From d0fde795d21d6271934f132557ce9542ceb358eb Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Thu, 29 Mar 2012 04:02:26 -0400 Subject: xfrm_user: Stop using NLA_PUT*(). These macros contain a hidden goto, and are thus extremely error prone and make code hard to audit. Signed-off-by: David S. Miller --- net/xfrm/xfrm_user.c | 105 ++++++++++++++++++++++++++++++--------------------- 1 file changed, 62 insertions(+), 43 deletions(-) (limited to 'net') diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index 7128dde0fe1a..44293b3fd6a1 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c @@ -756,40 +756,50 @@ static int copy_to_user_state_extra(struct xfrm_state *x, { copy_to_user_state(x, p); - if (x->coaddr) - NLA_PUT(skb, XFRMA_COADDR, sizeof(*x->coaddr), x->coaddr); + if (x->coaddr && + nla_put(skb, XFRMA_COADDR, sizeof(*x->coaddr), x->coaddr)) + goto nla_put_failure; - if (x->lastused) - NLA_PUT_U64(skb, XFRMA_LASTUSED, x->lastused); + if (x->lastused && + nla_put_u64(skb, XFRMA_LASTUSED, x->lastused)) + goto nla_put_failure; - if (x->aead) - NLA_PUT(skb, XFRMA_ALG_AEAD, aead_len(x->aead), x->aead); - if (x->aalg) { - if (copy_to_user_auth(x->aalg, skb)) - goto nla_put_failure; + if (x->aead && + nla_put(skb, XFRMA_ALG_AEAD, aead_len(x->aead), x->aead)) + goto nla_put_failure; - NLA_PUT(skb, XFRMA_ALG_AUTH_TRUNC, - xfrm_alg_auth_len(x->aalg), x->aalg); - } - if (x->ealg) - NLA_PUT(skb, XFRMA_ALG_CRYPT, xfrm_alg_len(x->ealg), x->ealg); - if (x->calg) - NLA_PUT(skb, XFRMA_ALG_COMP, sizeof(*(x->calg)), x->calg); + if (x->aalg && + (copy_to_user_auth(x->aalg, skb) || + nla_put(skb, XFRMA_ALG_AUTH_TRUNC, + xfrm_alg_auth_len(x->aalg), x->aalg))) + goto nla_put_failure; - if (x->encap) - NLA_PUT(skb, XFRMA_ENCAP, sizeof(*x->encap), x->encap); + if (x->ealg && + nla_put(skb, XFRMA_ALG_CRYPT, xfrm_alg_len(x->ealg), x->ealg)) + goto nla_put_failure; - if (x->tfcpad) - NLA_PUT_U32(skb, XFRMA_TFCPAD, x->tfcpad); + if (x->calg && + nla_put(skb, XFRMA_ALG_COMP, sizeof(*(x->calg)), x->calg)) + goto nla_put_failure; + + if (x->encap && + nla_put(skb, XFRMA_ENCAP, sizeof(*x->encap), x->encap)) + goto nla_put_failure; + + if (x->tfcpad && + nla_put_u32(skb, XFRMA_TFCPAD, x->tfcpad)) + goto nla_put_failure; if (xfrm_mark_put(skb, &x->mark)) goto nla_put_failure; - if (x->replay_esn) - NLA_PUT(skb, XFRMA_REPLAY_ESN_VAL, - xfrm_replay_state_esn_len(x->replay_esn), x->replay_esn); + if (x->replay_esn && + nla_put(skb, XFRMA_REPLAY_ESN_VAL, + xfrm_replay_state_esn_len(x->replay_esn), + x->replay_esn)) + goto nla_put_failure; - if (x->security && copy_sec_ctx(x->security, skb) < 0) + if (x->security && copy_sec_ctx(x->security, skb)) goto nla_put_failure; return 0; @@ -912,8 +922,9 @@ static int build_spdinfo(struct sk_buff *skb, struct net *net, sph.spdhcnt = si.spdhcnt; sph.spdhmcnt = si.spdhmcnt; - NLA_PUT(skb, XFRMA_SPD_INFO, sizeof(spc), &spc); - NLA_PUT(skb, XFRMA_SPD_HINFO, sizeof(sph), &sph); + if (nla_put(skb, XFRMA_SPD_INFO, sizeof(spc), &spc) || + nla_put(skb, XFRMA_SPD_HINFO, sizeof(sph), &sph)) + goto nla_put_failure; return nlmsg_end(skb, nlh); @@ -967,8 +978,9 @@ static int build_sadinfo(struct sk_buff *skb, struct net *net, sh.sadhmcnt = si.sadhmcnt; sh.sadhcnt = si.sadhcnt; - NLA_PUT_U32(skb, XFRMA_SAD_CNT, si.sadcnt); - NLA_PUT(skb, XFRMA_SAD_HINFO, sizeof(sh), &sh); + if (nla_put_u32(skb, XFRMA_SAD_CNT, si.sadcnt) || + nla_put(skb, XFRMA_SAD_HINFO, sizeof(sh), &sh)) + goto nla_put_failure; return nlmsg_end(skb, nlh); @@ -1690,21 +1702,27 @@ static int build_aevent(struct sk_buff *skb, struct xfrm_state *x, const struct id->reqid = x->props.reqid; id->flags = c->data.aevent; - if (x->replay_esn) - NLA_PUT(skb, XFRMA_REPLAY_ESN_VAL, - xfrm_replay_state_esn_len(x->replay_esn), - x->replay_esn); - else - NLA_PUT(skb, XFRMA_REPLAY_VAL, sizeof(x->replay), &x->replay); - - NLA_PUT(skb, XFRMA_LTIME_VAL, sizeof(x->curlft), &x->curlft); + if (x->replay_esn) { + if (nla_put(skb, XFRMA_REPLAY_ESN_VAL, + xfrm_replay_state_esn_len(x->replay_esn), + x->replay_esn)) + goto nla_put_failure; + } else { + if (nla_put(skb, XFRMA_REPLAY_VAL, sizeof(x->replay), + &x->replay)) + goto nla_put_failure; + } + if (nla_put(skb, XFRMA_LTIME_VAL, sizeof(x->curlft), &x->curlft)) + goto nla_put_failure; - if (id->flags & XFRM_AE_RTHR) - NLA_PUT_U32(skb, XFRMA_REPLAY_THRESH, x->replay_maxdiff); + if ((id->flags & XFRM_AE_RTHR) && + nla_put_u32(skb, XFRMA_REPLAY_THRESH, x->replay_maxdiff)) + goto nla_put_failure; - if (id->flags & XFRM_AE_ETHR) - NLA_PUT_U32(skb, XFRMA_ETIMER_THRESH, - x->replay_maxage * 10 / HZ); + if ((id->flags & XFRM_AE_ETHR) && + nla_put_u32(skb, XFRMA_ETIMER_THRESH, + x->replay_maxage * 10 / HZ)) + goto nla_put_failure; if (xfrm_mark_put(skb, &x->mark)) goto nla_put_failure; @@ -2835,8 +2853,9 @@ static int build_report(struct sk_buff *skb, u8 proto, ur->proto = proto; memcpy(&ur->sel, sel, sizeof(ur->sel)); - if (addr) - NLA_PUT(skb, XFRMA_COADDR, sizeof(*addr), addr); + if (addr && + nla_put(skb, XFRMA_COADDR, sizeof(*addr), addr)) + goto nla_put_failure; return nlmsg_end(skb, nlh); -- cgit v1.2.2 From 9360ffd1859720f6520cf59241909b74dae369d0 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Thu, 29 Mar 2012 04:41:26 -0400 Subject: wireless: Stop using NLA_PUT*(). These macros contain a hidden goto, and are thus extremely error prone and make code hard to audit. Signed-off-by: David S. Miller --- net/wireless/nl80211.c | 1204 +++++++++++++++++++++++++--------------------- net/wireless/wext-core.c | 3 +- 2 files changed, 662 insertions(+), 545 deletions(-) (limited to 'net') diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index e49da2797022..a4aab1d36285 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -356,20 +356,26 @@ static inline void *nl80211hdr_put(struct sk_buff *skb, u32 pid, u32 seq, static int nl80211_msg_put_channel(struct sk_buff *msg, struct ieee80211_channel *chan) { - NLA_PUT_U32(msg, NL80211_FREQUENCY_ATTR_FREQ, - chan->center_freq); + if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_FREQ, + chan->center_freq)) + goto nla_put_failure; - if (chan->flags & IEEE80211_CHAN_DISABLED) - NLA_PUT_FLAG(msg, NL80211_FREQUENCY_ATTR_DISABLED); - if (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN) - NLA_PUT_FLAG(msg, NL80211_FREQUENCY_ATTR_PASSIVE_SCAN); - if (chan->flags & IEEE80211_CHAN_NO_IBSS) - NLA_PUT_FLAG(msg, NL80211_FREQUENCY_ATTR_NO_IBSS); - if (chan->flags & IEEE80211_CHAN_RADAR) - NLA_PUT_FLAG(msg, NL80211_FREQUENCY_ATTR_RADAR); + if ((chan->flags & IEEE80211_CHAN_DISABLED) && + nla_put_flag(msg, NL80211_FREQUENCY_ATTR_DISABLED)) + goto nla_put_failure; + if ((chan->flags & IEEE80211_CHAN_PASSIVE_SCAN) && + nla_put_flag(msg, NL80211_FREQUENCY_ATTR_PASSIVE_SCAN)) + goto nla_put_failure; + if ((chan->flags & IEEE80211_CHAN_NO_IBSS) && + nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_IBSS)) + goto nla_put_failure; + if ((chan->flags & IEEE80211_CHAN_RADAR) && + nla_put_flag(msg, NL80211_FREQUENCY_ATTR_RADAR)) + goto nla_put_failure; - NLA_PUT_U32(msg, NL80211_FREQUENCY_ATTR_MAX_TX_POWER, - DBM_TO_MBM(chan->max_power)); + if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_MAX_TX_POWER, + DBM_TO_MBM(chan->max_power))) + goto nla_put_failure; return 0; @@ -621,8 +627,8 @@ static int nl80211_put_iftypes(struct sk_buff *msg, u32 attr, u16 ifmodes) i = 0; while (ifmodes) { - if (ifmodes & 1) - NLA_PUT_FLAG(msg, i); + if ((ifmodes & 1) && nla_put_flag(msg, i)) + goto nla_put_failure; ifmodes >>= 1; i++; } @@ -665,8 +671,9 @@ static int nl80211_put_iface_combinations(struct wiphy *wiphy, nl_limit = nla_nest_start(msg, j + 1); if (!nl_limit) goto nla_put_failure; - NLA_PUT_U32(msg, NL80211_IFACE_LIMIT_MAX, - c->limits[j].max); + if (nla_put_u32(msg, NL80211_IFACE_LIMIT_MAX, + c->limits[j].max)) + goto nla_put_failure; if (nl80211_put_iftypes(msg, NL80211_IFACE_LIMIT_TYPES, c->limits[j].types)) goto nla_put_failure; @@ -675,13 +682,14 @@ static int nl80211_put_iface_combinations(struct wiphy *wiphy, nla_nest_end(msg, nl_limits); - if (c->beacon_int_infra_match) - NLA_PUT_FLAG(msg, - NL80211_IFACE_COMB_STA_AP_BI_MATCH); - NLA_PUT_U32(msg, NL80211_IFACE_COMB_NUM_CHANNELS, - c->num_different_channels); - NLA_PUT_U32(msg, NL80211_IFACE_COMB_MAXNUM, - c->max_interfaces); + if (c->beacon_int_infra_match && + nla_put_flag(msg, NL80211_IFACE_COMB_STA_AP_BI_MATCH)) + goto nla_put_failure; + if (nla_put_u32(msg, NL80211_IFACE_COMB_NUM_CHANNELS, + c->num_different_channels) || + nla_put_u32(msg, NL80211_IFACE_COMB_MAXNUM, + c->max_interfaces)) + goto nla_put_failure; nla_nest_end(msg, nl_combi); } @@ -712,64 +720,74 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags, if (!hdr) return -1; - NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, dev->wiphy_idx); - NLA_PUT_STRING(msg, NL80211_ATTR_WIPHY_NAME, wiphy_name(&dev->wiphy)); - - NLA_PUT_U32(msg, NL80211_ATTR_GENERATION, - cfg80211_rdev_list_generation); - - NLA_PUT_U8(msg, NL80211_ATTR_WIPHY_RETRY_SHORT, - dev->wiphy.retry_short); - NLA_PUT_U8(msg, NL80211_ATTR_WIPHY_RETRY_LONG, - dev->wiphy.retry_long); - NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_FRAG_THRESHOLD, - dev->wiphy.frag_threshold); - NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_RTS_THRESHOLD, - dev->wiphy.rts_threshold); - NLA_PUT_U8(msg, NL80211_ATTR_WIPHY_COVERAGE_CLASS, - dev->wiphy.coverage_class); - NLA_PUT_U8(msg, NL80211_ATTR_MAX_NUM_SCAN_SSIDS, - dev->wiphy.max_scan_ssids); - NLA_PUT_U8(msg, NL80211_ATTR_MAX_NUM_SCHED_SCAN_SSIDS, - dev->wiphy.max_sched_scan_ssids); - NLA_PUT_U16(msg, NL80211_ATTR_MAX_SCAN_IE_LEN, - dev->wiphy.max_scan_ie_len); - NLA_PUT_U16(msg, NL80211_ATTR_MAX_SCHED_SCAN_IE_LEN, - dev->wiphy.max_sched_scan_ie_len); - NLA_PUT_U8(msg, NL80211_ATTR_MAX_MATCH_SETS, - dev->wiphy.max_match_sets); - - if (dev->wiphy.flags & WIPHY_FLAG_IBSS_RSN) - NLA_PUT_FLAG(msg, NL80211_ATTR_SUPPORT_IBSS_RSN); - if (dev->wiphy.flags & WIPHY_FLAG_MESH_AUTH) - NLA_PUT_FLAG(msg, NL80211_ATTR_SUPPORT_MESH_AUTH); - if (dev->wiphy.flags & WIPHY_FLAG_AP_UAPSD) - NLA_PUT_FLAG(msg, NL80211_ATTR_SUPPORT_AP_UAPSD); - if (dev->wiphy.flags & WIPHY_FLAG_SUPPORTS_FW_ROAM) - NLA_PUT_FLAG(msg, NL80211_ATTR_ROAM_SUPPORT); - if (dev->wiphy.flags & WIPHY_FLAG_SUPPORTS_TDLS) - NLA_PUT_FLAG(msg, NL80211_ATTR_TDLS_SUPPORT); - if (dev->wiphy.flags & WIPHY_FLAG_TDLS_EXTERNAL_SETUP) - NLA_PUT_FLAG(msg, NL80211_ATTR_TDLS_EXTERNAL_SETUP); - - NLA_PUT(msg, NL80211_ATTR_CIPHER_SUITES, - sizeof(u32) * dev->wiphy.n_cipher_suites, - dev->wiphy.cipher_suites); - - NLA_PUT_U8(msg, NL80211_ATTR_MAX_NUM_PMKIDS, - dev->wiphy.max_num_pmkids); - - if (dev->wiphy.flags & WIPHY_FLAG_CONTROL_PORT_PROTOCOL) - NLA_PUT_FLAG(msg, NL80211_ATTR_CONTROL_PORT_ETHERTYPE); - - NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_ANTENNA_AVAIL_TX, - dev->wiphy.available_antennas_tx); - NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_ANTENNA_AVAIL_RX, - dev->wiphy.available_antennas_rx); - - if (dev->wiphy.flags & WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD) - NLA_PUT_U32(msg, NL80211_ATTR_PROBE_RESP_OFFLOAD, - dev->wiphy.probe_resp_offload); + if (nla_put_u32(msg, NL80211_ATTR_WIPHY, dev->wiphy_idx) || + nla_put_string(msg, NL80211_ATTR_WIPHY_NAME, wiphy_name(&dev->wiphy)) || + nla_put_u32(msg, NL80211_ATTR_GENERATION, + cfg80211_rdev_list_generation) || + nla_put_u8(msg, NL80211_ATTR_WIPHY_RETRY_SHORT, + dev->wiphy.retry_short) || + nla_put_u8(msg, NL80211_ATTR_WIPHY_RETRY_LONG, + dev->wiphy.retry_long) || + nla_put_u32(msg, NL80211_ATTR_WIPHY_FRAG_THRESHOLD, + dev->wiphy.frag_threshold) || + nla_put_u32(msg, NL80211_ATTR_WIPHY_RTS_THRESHOLD, + dev->wiphy.rts_threshold) || + nla_put_u8(msg, NL80211_ATTR_WIPHY_COVERAGE_CLASS, + dev->wiphy.coverage_class) || + nla_put_u8(msg, NL80211_ATTR_MAX_NUM_SCAN_SSIDS, + dev->wiphy.max_scan_ssids) || + nla_put_u8(msg, NL80211_ATTR_MAX_NUM_SCHED_SCAN_SSIDS, + dev->wiphy.max_sched_scan_ssids) || + nla_put_u16(msg, NL80211_ATTR_MAX_SCAN_IE_LEN, + dev->wiphy.max_scan_ie_len) || + nla_put_u16(msg, NL80211_ATTR_MAX_SCHED_SCAN_IE_LEN, + dev->wiphy.max_sched_scan_ie_len) || + nla_put_u8(msg, NL80211_ATTR_MAX_MATCH_SETS, + dev->wiphy.max_match_sets)) + goto nla_put_failure; + + if ((dev->wiphy.flags & WIPHY_FLAG_IBSS_RSN) && + nla_put_flag(msg, NL80211_ATTR_SUPPORT_IBSS_RSN)) + goto nla_put_failure; + if ((dev->wiphy.flags & WIPHY_FLAG_MESH_AUTH) && + nla_put_flag(msg, NL80211_ATTR_SUPPORT_MESH_AUTH)) + goto nla_put_failure; + if ((dev->wiphy.flags & WIPHY_FLAG_AP_UAPSD) && + nla_put_flag(msg, NL80211_ATTR_SUPPORT_AP_UAPSD)) + goto nla_put_failure; + if ((dev->wiphy.flags & WIPHY_FLAG_SUPPORTS_FW_ROAM) && + nla_put_flag(msg, NL80211_ATTR_ROAM_SUPPORT)) + goto nla_put_failure; + if ((dev->wiphy.flags & WIPHY_FLAG_SUPPORTS_TDLS) && + nla_put_flag(msg, NL80211_ATTR_TDLS_SUPPORT)) + goto nla_put_failure; + if ((dev->wiphy.flags & WIPHY_FLAG_TDLS_EXTERNAL_SETUP) && + nla_put_flag(msg, NL80211_ATTR_TDLS_EXTERNAL_SETUP)) + goto nla_put_failure; + + if (nla_put(msg, NL80211_ATTR_CIPHER_SUITES, + sizeof(u32) * dev->wiphy.n_cipher_suites, + dev->wiphy.cipher_suites)) + goto nla_put_failure; + + if (nla_put_u8(msg, NL80211_ATTR_MAX_NUM_PMKIDS, + dev->wiphy.max_num_pmkids)) + goto nla_put_failure; + + if ((dev->wiphy.flags & WIPHY_FLAG_CONTROL_PORT_PROTOCOL) && + nla_put_flag(msg, NL80211_ATTR_CONTROL_PORT_ETHERTYPE)) + goto nla_put_failure; + + if (nla_put_u32(msg, NL80211_ATTR_WIPHY_ANTENNA_AVAIL_TX, + dev->wiphy.available_antennas_tx) || + nla_put_u32(msg, NL80211_ATTR_WIPHY_ANTENNA_AVAIL_RX, + dev->wiphy.available_antennas_rx)) + goto nla_put_failure; + + if ((dev->wiphy.flags & WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD) && + nla_put_u32(msg, NL80211_ATTR_PROBE_RESP_OFFLOAD, + dev->wiphy.probe_resp_offload)) + goto nla_put_failure; if ((dev->wiphy.available_antennas_tx || dev->wiphy.available_antennas_rx) && dev->ops->get_antenna) { @@ -777,8 +795,11 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags, int res; res = dev->ops->get_antenna(&dev->wiphy, &tx_ant, &rx_ant); if (!res) { - NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_ANTENNA_TX, tx_ant); - NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_ANTENNA_RX, rx_ant); + if (nla_put_u32(msg, NL80211_ATTR_WIPHY_ANTENNA_TX, + tx_ant) || + nla_put_u32(msg, NL80211_ATTR_WIPHY_ANTENNA_RX, + rx_ant)) + goto nla_put_failure; } } @@ -799,17 +820,17 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags, goto nla_put_failure; /* add HT info */ - if (dev->wiphy.bands[band]->ht_cap.ht_supported) { - NLA_PUT(msg, NL80211_BAND_ATTR_HT_MCS_SET, - sizeof(dev->wiphy.bands[band]->ht_cap.mcs), - &dev->wiphy.bands[band]->ht_cap.mcs); - NLA_PUT_U16(msg, NL80211_BAND_ATTR_HT_CAPA, - dev->wiphy.bands[band]->ht_cap.cap); - NLA_PUT_U8(msg, NL80211_BAND_ATTR_HT_AMPDU_FACTOR, - dev->wiphy.bands[band]->ht_cap.ampdu_factor); - NLA_PUT_U8(msg, NL80211_BAND_ATTR_HT_AMPDU_DENSITY, - dev->wiphy.bands[band]->ht_cap.ampdu_density); - } + if (dev->wiphy.bands[band]->ht_cap.ht_supported && + (nla_put(msg, NL80211_BAND_ATTR_HT_MCS_SET, + sizeof(dev->wiphy.bands[band]->ht_cap.mcs), + &dev->wiphy.bands[band]->ht_cap.mcs) || + nla_put_u16(msg, NL80211_BAND_ATTR_HT_CAPA, + dev->wiphy.bands[band]->ht_cap.cap) || + nla_put_u8(msg, NL80211_BAND_ATTR_HT_AMPDU_FACTOR, + dev->wiphy.bands[band]->ht_cap.ampdu_factor) || + nla_put_u8(msg, NL80211_BAND_ATTR_HT_AMPDU_DENSITY, + dev->wiphy.bands[band]->ht_cap.ampdu_density))) + goto nla_put_failure; /* add frequencies */ nl_freqs = nla_nest_start(msg, NL80211_BAND_ATTR_FREQS); @@ -842,11 +863,13 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags, goto nla_put_failure; rate = &dev->wiphy.bands[band]->bitrates[i]; - NLA_PUT_U32(msg, NL80211_BITRATE_ATTR_RATE, - rate->bitrate); - if (rate->flags & IEEE80211_RATE_SHORT_PREAMBLE) - NLA_PUT_FLAG(msg, - NL80211_BITRATE_ATTR_2GHZ_SHORTPREAMBLE); + if (nla_put_u32(msg, NL80211_BITRATE_ATTR_RATE, + rate->bitrate)) + goto nla_put_failure; + if ((rate->flags & IEEE80211_RATE_SHORT_PREAMBLE) && + nla_put_flag(msg, + NL80211_BITRATE_ATTR_2GHZ_SHORTPREAMBLE)) + goto nla_put_failure; nla_nest_end(msg, nl_rate); } @@ -866,7 +889,8 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags, do { \ if (dev->ops->op) { \ i++; \ - NLA_PUT_U32(msg, i, NL80211_CMD_ ## n); \ + if (nla_put_u32(msg, i, NL80211_CMD_ ## n)) \ + goto nla_put_failure; \ } \ } while (0) @@ -894,7 +918,8 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags, CMD(mgmt_tx_cancel_wait, FRAME_WAIT_CANCEL); if (dev->wiphy.flags & WIPHY_FLAG_NETNS_OK) { i++; - NLA_PUT_U32(msg, i, NL80211_CMD_SET_WIPHY_NETNS); + if (nla_put_u32(msg, i, NL80211_CMD_SET_WIPHY_NETNS)) + goto nla_put_failure; } CMD(set_channel, SET_CHANNEL); CMD(set_wds_peer, SET_WDS_PEER); @@ -908,7 +933,8 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags, CMD(set_noack_map, SET_NOACK_MAP); if (dev->wiphy.flags & WIPHY_FLAG_REPORTS_OBSS) { i++; - NLA_PUT_U32(msg, i, NL80211_CMD_REGISTER_BEACONS); + if (nla_put_u32(msg, i, NL80211_CMD_REGISTER_BEACONS)) + goto nla_put_failure; } #ifdef CONFIG_NL80211_TESTMODE @@ -919,23 +945,27 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags, if (dev->ops->connect || dev->ops->auth) { i++; - NLA_PUT_U32(msg, i, NL80211_CMD_CONNECT); + if (nla_put_u32(msg, i, NL80211_CMD_CONNECT)) + goto nla_put_failure; } if (dev->ops->disconnect || dev->ops->deauth) { i++; - NLA_PUT_U32(msg, i, NL80211_CMD_DISCONNECT); + if (nla_put_u32(msg, i, NL80211_CMD_DISCONNECT)) + goto nla_put_failure; } nla_nest_end(msg, nl_cmds); if (dev->ops->remain_on_channel && - dev->wiphy.flags & WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL) - NLA_PUT_U32(msg, NL80211_ATTR_MAX_REMAIN_ON_CHANNEL_DURATION, - dev->wiphy.max_remain_on_channel_duration); + (dev->wiphy.flags & WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL) && + nla_put_u32(msg, NL80211_ATTR_MAX_REMAIN_ON_CHANNEL_DURATION, + dev->wiphy.max_remain_on_channel_duration)) + goto nla_put_failure; - if (dev->wiphy.flags & WIPHY_FLAG_OFFCHAN_TX) - NLA_PUT_FLAG(msg, NL80211_ATTR_OFFCHANNEL_TX_OK); + if ((dev->wiphy.flags & WIPHY_FLAG_OFFCHAN_TX) && + nla_put_flag(msg, NL80211_ATTR_OFFCHANNEL_TX_OK)) + goto nla_put_failure; if (mgmt_stypes) { u16 stypes; @@ -953,9 +983,10 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags, i = 0; stypes = mgmt_stypes[ift].tx; while (stypes) { - if (stypes & 1) - NLA_PUT_U16(msg, NL80211_ATTR_FRAME_TYPE, - (i << 4) | IEEE80211_FTYPE_MGMT); + if ((stypes & 1) && + nla_put_u16(msg, NL80211_ATTR_FRAME_TYPE, + (i << 4) | IEEE80211_FTYPE_MGMT)) + goto nla_put_failure; stypes >>= 1; i++; } @@ -975,9 +1006,10 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags, i = 0; stypes = mgmt_stypes[ift].rx; while (stypes) { - if (stypes & 1) - NLA_PUT_U16(msg, NL80211_ATTR_FRAME_TYPE, - (i << 4) | IEEE80211_FTYPE_MGMT); + if ((stypes & 1) && + nla_put_u16(msg, NL80211_ATTR_FRAME_TYPE, + (i << 4) | IEEE80211_FTYPE_MGMT)) + goto nla_put_failure; stypes >>= 1; i++; } @@ -994,22 +1026,23 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags, if (!nl_wowlan) goto nla_put_failure; - if (dev->wiphy.wowlan.flags & WIPHY_WOWLAN_ANY) - NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_ANY); - if (dev->wiphy.wowlan.flags & WIPHY_WOWLAN_DISCONNECT) - NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_DISCONNECT); - if (dev->wiphy.wowlan.flags & WIPHY_WOWLAN_MAGIC_PKT) - NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_MAGIC_PKT); - if (dev->wiphy.wowlan.flags & WIPHY_WOWLAN_SUPPORTS_GTK_REKEY) - NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_GTK_REKEY_SUPPORTED); - if (dev->wiphy.wowlan.flags & WIPHY_WOWLAN_GTK_REKEY_FAILURE) - NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_GTK_REKEY_FAILURE); - if (dev->wiphy.wowlan.flags & WIPHY_WOWLAN_EAP_IDENTITY_REQ) - NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_EAP_IDENT_REQUEST); - if (dev->wiphy.wowlan.flags & WIPHY_WOWLAN_4WAY_HANDSHAKE) - NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_4WAY_HANDSHAKE); - if (dev->wiphy.wowlan.flags & WIPHY_WOWLAN_RFKILL_RELEASE) - NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_RFKILL_RELEASE); + if (((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_ANY) && + nla_put_flag(msg, NL80211_WOWLAN_TRIG_ANY)) || + ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_DISCONNECT) && + nla_put_flag(msg, NL80211_WOWLAN_TRIG_DISCONNECT)) || + ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_MAGIC_PKT) && + nla_put_flag(msg, NL80211_WOWLAN_TRIG_MAGIC_PKT)) || + ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_SUPPORTS_GTK_REKEY) && + nla_put_flag(msg, NL80211_WOWLAN_TRIG_GTK_REKEY_SUPPORTED)) || + ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_GTK_REKEY_FAILURE) && + nla_put_flag(msg, NL80211_WOWLAN_TRIG_GTK_REKEY_FAILURE)) || + ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_EAP_IDENTITY_REQ) && + nla_put_flag(msg, NL80211_WOWLAN_TRIG_EAP_IDENT_REQUEST)) || + ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_4WAY_HANDSHAKE) && + nla_put_flag(msg, NL80211_WOWLAN_TRIG_4WAY_HANDSHAKE)) || + ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_RFKILL_RELEASE) && + nla_put_flag(msg, NL80211_WOWLAN_TRIG_RFKILL_RELEASE))) + goto nla_put_failure; if (dev->wiphy.wowlan.n_patterns) { struct nl80211_wowlan_pattern_support pat = { .max_patterns = dev->wiphy.wowlan.n_patterns, @@ -1018,8 +1051,9 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags, .max_pattern_len = dev->wiphy.wowlan.pattern_max_len, }; - NLA_PUT(msg, NL80211_WOWLAN_TRIG_PKT_PATTERN, - sizeof(pat), &pat); + if (nla_put(msg, NL80211_WOWLAN_TRIG_PKT_PATTERN, + sizeof(pat), &pat)) + goto nla_put_failure; } nla_nest_end(msg, nl_wowlan); @@ -1032,16 +1066,20 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags, if (nl80211_put_iface_combinations(&dev->wiphy, msg)) goto nla_put_failure; - if (dev->wiphy.flags & WIPHY_FLAG_HAVE_AP_SME) - NLA_PUT_U32(msg, NL80211_ATTR_DEVICE_AP_SME, - dev->wiphy.ap_sme_capa); + if ((dev->wiphy.flags & WIPHY_FLAG_HAVE_AP_SME) && + nla_put_u32(msg, NL80211_ATTR_DEVICE_AP_SME, + dev->wiphy.ap_sme_capa)) + goto nla_put_failure; - NLA_PUT_U32(msg, NL80211_ATTR_FEATURE_FLAGS, dev->wiphy.features); + if (nla_put_u32(msg, NL80211_ATTR_FEATURE_FLAGS, + dev->wiphy.features)) + goto nla_put_failure; - if (dev->wiphy.ht_capa_mod_mask) - NLA_PUT(msg, NL80211_ATTR_HT_CAPABILITY_MASK, - sizeof(*dev->wiphy.ht_capa_mod_mask), - dev->wiphy.ht_capa_mod_mask); + if (dev->wiphy.ht_capa_mod_mask && + nla_put(msg, NL80211_ATTR_HT_CAPABILITY_MASK, + sizeof(*dev->wiphy.ht_capa_mod_mask), + dev->wiphy.ht_capa_mod_mask)) + goto nla_put_failure; return genlmsg_end(msg, hdr); @@ -1484,14 +1522,15 @@ static int nl80211_send_iface(struct sk_buff *msg, u32 pid, u32 seq, int flags, if (!hdr) return -1; - NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex); - NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); - NLA_PUT_STRING(msg, NL80211_ATTR_IFNAME, dev->name); - NLA_PUT_U32(msg, NL80211_ATTR_IFTYPE, dev->ieee80211_ptr->iftype); - - NLA_PUT_U32(msg, NL80211_ATTR_GENERATION, - rdev->devlist_generation ^ - (cfg80211_rdev_list_generation << 2)); + if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) || + nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || + nla_put_string(msg, NL80211_ATTR_IFNAME, dev->name) || + nla_put_u32(msg, NL80211_ATTR_IFTYPE, + dev->ieee80211_ptr->iftype) || + nla_put_u32(msg, NL80211_ATTR_GENERATION, + rdev->devlist_generation ^ + (cfg80211_rdev_list_generation << 2))) + goto nla_put_failure; return genlmsg_end(msg, hdr); @@ -1789,35 +1828,34 @@ static void get_key_callback(void *c, struct key_params *params) struct nlattr *key; struct get_key_cookie *cookie = c; - if (params->key) - NLA_PUT(cookie->msg, NL80211_ATTR_KEY_DATA, - params->key_len, params->key); - - if (params->seq) - NLA_PUT(cookie->msg, NL80211_ATTR_KEY_SEQ, - params->seq_len, params->seq); - - if (params->cipher) - NLA_PUT_U32(cookie->msg, NL80211_ATTR_KEY_CIPHER, - params->cipher); + if ((params->key && + nla_put(cookie->msg, NL80211_ATTR_KEY_DATA, + params->key_len, params->key)) || + (params->seq && + nla_put(cookie->msg, NL80211_ATTR_KEY_SEQ, + params->seq_len, params->seq)) || + (params->cipher && + nla_put_u32(cookie->msg, NL80211_ATTR_KEY_CIPHER, + params->cipher))) + goto nla_put_failure; key = nla_nest_start(cookie->msg, NL80211_ATTR_KEY); if (!key) goto nla_put_failure; - if (params->key) - NLA_PUT(cookie->msg, NL80211_KEY_DATA, - params->key_len, params->key); - - if (params->seq) - NLA_PUT(cookie->msg, NL80211_KEY_SEQ, - params->seq_len, params->seq); - - if (params->cipher) - NLA_PUT_U32(cookie->msg, NL80211_KEY_CIPHER, - params->cipher); + if ((params->key && + nla_put(cookie->msg, NL80211_KEY_DATA, + params->key_len, params->key)) || + (params->seq && + nla_put(cookie->msg, NL80211_KEY_SEQ, + params->seq_len, params->seq)) || + (params->cipher && + nla_put_u32(cookie->msg, NL80211_KEY_CIPHER, + params->cipher))) + goto nla_put_failure; - NLA_PUT_U8(cookie->msg, NL80211_ATTR_KEY_IDX, cookie->idx); + if (nla_put_u8(cookie->msg, NL80211_ATTR_KEY_IDX, cookie->idx)) + goto nla_put_failure; nla_nest_end(cookie->msg, key); @@ -1875,10 +1913,12 @@ static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info) cookie.msg = msg; cookie.idx = key_idx; - NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex); - NLA_PUT_U8(msg, NL80211_ATTR_KEY_IDX, key_idx); - if (mac_addr) - NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr); + if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) || + nla_put_u8(msg, NL80211_ATTR_KEY_IDX, key_idx)) + goto nla_put_failure; + if (mac_addr && + nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr)) + goto nla_put_failure; if (pairwise && mac_addr && !(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN)) @@ -2368,15 +2408,15 @@ static bool nl80211_put_sta_rate(struct sk_buff *msg, struct rate_info *info, /* cfg80211_calculate_bitrate will return 0 for mcs >= 32 */ bitrate = cfg80211_calculate_bitrate(info); - if (bitrate > 0) - NLA_PUT_U16(msg, NL80211_RATE_INFO_BITRATE, bitrate); - - if (info->flags & RATE_INFO_FLAGS_MCS) - NLA_PUT_U8(msg, NL80211_RATE_INFO_MCS, info->mcs); - if (info->flags & RATE_INFO_FLAGS_40_MHZ_WIDTH) - NLA_PUT_FLAG(msg, NL80211_RATE_INFO_40_MHZ_WIDTH); - if (info->flags & RATE_INFO_FLAGS_SHORT_GI) - NLA_PUT_FLAG(msg, NL80211_RATE_INFO_SHORT_GI); + if ((bitrate > 0 && + nla_put_u16(msg, NL80211_RATE_INFO_BITRATE, bitrate)) || + ((info->flags & RATE_INFO_FLAGS_MCS) && + nla_put_u8(msg, NL80211_RATE_INFO_MCS, info->mcs)) || + ((info->flags & RATE_INFO_FLAGS_40_MHZ_WIDTH) && + nla_put_flag(msg, NL80211_RATE_INFO_40_MHZ_WIDTH)) || + ((info->flags & RATE_INFO_FLAGS_SHORT_GI) && + nla_put_flag(msg, NL80211_RATE_INFO_SHORT_GI))) + goto nla_put_failure; nla_nest_end(msg, rate); return true; @@ -2398,43 +2438,50 @@ static int nl80211_send_station(struct sk_buff *msg, u32 pid, u32 seq, if (!hdr) return -1; - NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex); - NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr); - - NLA_PUT_U32(msg, NL80211_ATTR_GENERATION, sinfo->generation); + if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) || + nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr) || + nla_put_u32(msg, NL80211_ATTR_GENERATION, sinfo->generation)) + goto nla_put_failure; sinfoattr = nla_nest_start(msg, NL80211_ATTR_STA_INFO); if (!sinfoattr) goto nla_put_failure; - if (sinfo->filled & STATION_INFO_CONNECTED_TIME) - NLA_PUT_U32(msg, NL80211_STA_INFO_CONNECTED_TIME, - sinfo->connected_time); - if (sinfo->filled & STATION_INFO_INACTIVE_TIME) - NLA_PUT_U32(msg, NL80211_STA_INFO_INACTIVE_TIME, - sinfo->inactive_time); - if (sinfo->filled & STATION_INFO_RX_BYTES) - NLA_PUT_U32(msg, NL80211_STA_INFO_RX_BYTES, - sinfo->rx_bytes); - if (sinfo->filled & STATION_INFO_TX_BYTES) - NLA_PUT_U32(msg, NL80211_STA_INFO_TX_BYTES, - sinfo->tx_bytes); - if (sinfo->filled & STATION_INFO_LLID) - NLA_PUT_U16(msg, NL80211_STA_INFO_LLID, - sinfo->llid); - if (sinfo->filled & STATION_INFO_PLID) - NLA_PUT_U16(msg, NL80211_STA_INFO_PLID, - sinfo->plid); - if (sinfo->filled & STATION_INFO_PLINK_STATE) - NLA_PUT_U8(msg, NL80211_STA_INFO_PLINK_STATE, - sinfo->plink_state); + if ((sinfo->filled & STATION_INFO_CONNECTED_TIME) && + nla_put_u32(msg, NL80211_STA_INFO_CONNECTED_TIME, + sinfo->connected_time)) + goto nla_put_failure; + if ((sinfo->filled & STATION_INFO_INACTIVE_TIME) && + nla_put_u32(msg, NL80211_STA_INFO_INACTIVE_TIME, + sinfo->inactive_time)) + goto nla_put_failure; + if ((sinfo->filled & STATION_INFO_RX_BYTES) && + nla_put_u32(msg, NL80211_STA_INFO_RX_BYTES, + sinfo->rx_bytes)) + goto nla_put_failure; + if ((sinfo->filled & STATION_INFO_TX_BYTES) && + nla_put_u32(msg, NL80211_STA_INFO_TX_BYTES, + sinfo->tx_bytes)) + goto nla_put_failure; + if ((sinfo->filled & STATION_INFO_LLID) && + nla_put_u16(msg, NL80211_STA_INFO_LLID, sinfo->llid)) + goto nla_put_failure; + if ((sinfo->filled & STATION_INFO_PLID) && + nla_put_u16(msg, NL80211_STA_INFO_PLID, sinfo->plid)) + goto nla_put_failure; + if ((sinfo->filled & STATION_INFO_PLINK_STATE) && + nla_put_u8(msg, NL80211_STA_INFO_PLINK_STATE, + sinfo->plink_state)) + goto nla_put_failure; switch (rdev->wiphy.signal_type) { case CFG80211_SIGNAL_TYPE_MBM: - if (sinfo->filled & STATION_INFO_SIGNAL) - NLA_PUT_U8(msg, NL80211_STA_INFO_SIGNAL, - sinfo->signal); - if (sinfo->filled & STATION_INFO_SIGNAL_AVG) - NLA_PUT_U8(msg, NL80211_STA_INFO_SIGNAL_AVG, - sinfo->signal_avg); + if ((sinfo->filled & STATION_INFO_SIGNAL) && + nla_put_u8(msg, NL80211_STA_INFO_SIGNAL, + sinfo->signal)) + goto nla_put_failure; + if ((sinfo->filled & STATION_INFO_SIGNAL_AVG) && + nla_put_u8(msg, NL80211_STA_INFO_SIGNAL_AVG, + sinfo->signal_avg)) + goto nla_put_failure; break; default: break; @@ -2449,49 +2496,56 @@ static int nl80211_send_station(struct sk_buff *msg, u32 pid, u32 seq, NL80211_STA_INFO_RX_BITRATE)) goto nla_put_failure; } - if (sinfo->filled & STATION_INFO_RX_PACKETS) - NLA_PUT_U32(msg, NL80211_STA_INFO_RX_PACKETS, - sinfo->rx_packets); - if (sinfo->filled & STATION_INFO_TX_PACKETS) - NLA_PUT_U32(msg, NL80211_STA_INFO_TX_PACKETS, - sinfo->tx_packets); - if (sinfo->filled & STATION_INFO_TX_RETRIES) - NLA_PUT_U32(msg, NL80211_STA_INFO_TX_RETRIES, - sinfo->tx_retries); - if (sinfo->filled & STATION_INFO_TX_FAILED) - NLA_PUT_U32(msg, NL80211_STA_INFO_TX_FAILED, - sinfo->tx_failed); - if (sinfo->filled & STATION_INFO_BEACON_LOSS_COUNT) - NLA_PUT_U32(msg, NL80211_STA_INFO_BEACON_LOSS, - sinfo->beacon_loss_count); + if ((sinfo->filled & STATION_INFO_RX_PACKETS) && + nla_put_u32(msg, NL80211_STA_INFO_RX_PACKETS, + sinfo->rx_packets)) + goto nla_put_failure; + if ((sinfo->filled & STATION_INFO_TX_PACKETS) && + nla_put_u32(msg, NL80211_STA_INFO_TX_PACKETS, + sinfo->tx_packets)) + goto nla_put_failure; + if ((sinfo->filled & STATION_INFO_TX_RETRIES) && + nla_put_u32(msg, NL80211_STA_INFO_TX_RETRIES, + sinfo->tx_retries)) + goto nla_put_failure; + if ((sinfo->filled & STATION_INFO_TX_FAILED) && + nla_put_u32(msg, NL80211_STA_INFO_TX_FAILED, + sinfo->tx_failed)) + goto nla_put_failure; + if ((sinfo->filled & STATION_INFO_BEACON_LOSS_COUNT) && + nla_put_u32(msg, NL80211_STA_INFO_BEACON_LOSS, + sinfo->beacon_loss_count)) + goto nla_put_failure; if (sinfo->filled & STATION_INFO_BSS_PARAM) { bss_param = nla_nest_start(msg, NL80211_STA_INFO_BSS_PARAM); if (!bss_param) goto nla_put_failure; - if (sinfo->bss_param.flags & BSS_PARAM_FLAGS_CTS_PROT) - NLA_PUT_FLAG(msg, NL80211_STA_BSS_PARAM_CTS_PROT); - if (sinfo->bss_param.flags & BSS_PARAM_FLAGS_SHORT_PREAMBLE) - NLA_PUT_FLAG(msg, NL80211_STA_BSS_PARAM_SHORT_PREAMBLE); - if (sinfo->bss_param.flags & BSS_PARAM_FLAGS_SHORT_SLOT_TIME) - NLA_PUT_FLAG(msg, - NL80211_STA_BSS_PARAM_SHORT_SLOT_TIME); - NLA_PUT_U8(msg, NL80211_STA_BSS_PARAM_DTIM_PERIOD, - sinfo->bss_param.dtim_period); - NLA_PUT_U16(msg, NL80211_STA_BSS_PARAM_BEACON_INTERVAL, - sinfo->bss_param.beacon_interval); + if (((sinfo->bss_param.flags & BSS_PARAM_FLAGS_CTS_PROT) && + nla_put_flag(msg, NL80211_STA_BSS_PARAM_CTS_PROT)) || + ((sinfo->bss_param.flags & BSS_PARAM_FLAGS_SHORT_PREAMBLE) && + nla_put_flag(msg, NL80211_STA_BSS_PARAM_SHORT_PREAMBLE)) || + ((sinfo->bss_param.flags & BSS_PARAM_FLAGS_SHORT_SLOT_TIME) && + nla_put_flag(msg, NL80211_STA_BSS_PARAM_SHORT_SLOT_TIME)) || + nla_put_u8(msg, NL80211_STA_BSS_PARAM_DTIM_PERIOD, + sinfo->bss_param.dtim_period) || + nla_put_u16(msg, NL80211_STA_BSS_PARAM_BEACON_INTERVAL, + sinfo->bss_param.beacon_interval)) + goto nla_put_failure; nla_nest_end(msg, bss_param); } - if (sinfo->filled & STATION_INFO_STA_FLAGS) - NLA_PUT(msg, NL80211_STA_INFO_STA_FLAGS, - sizeof(struct nl80211_sta_flag_update), - &sinfo->sta_flags); + if ((sinfo->filled & STATION_INFO_STA_FLAGS) && + nla_put(msg, NL80211_STA_INFO_STA_FLAGS, + sizeof(struct nl80211_sta_flag_update), + &sinfo->sta_flags)) + goto nla_put_failure; nla_nest_end(msg, sinfoattr); - if (sinfo->filled & STATION_INFO_ASSOC_REQ_IES) - NLA_PUT(msg, NL80211_ATTR_IE, sinfo->assoc_req_ies_len, - sinfo->assoc_req_ies); + if ((sinfo->filled & STATION_INFO_ASSOC_REQ_IES) && + nla_put(msg, NL80211_ATTR_IE, sinfo->assoc_req_ies_len, + sinfo->assoc_req_ies)) + goto nla_put_failure; return genlmsg_end(msg, hdr); @@ -2913,36 +2967,37 @@ static int nl80211_send_mpath(struct sk_buff *msg, u32 pid, u32 seq, if (!hdr) return -1; - NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex); - NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, dst); - NLA_PUT(msg, NL80211_ATTR_MPATH_NEXT_HOP, ETH_ALEN, next_hop); - - NLA_PUT_U32(msg, NL80211_ATTR_GENERATION, pinfo->generation); + if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) || + nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, dst) || + nla_put(msg, NL80211_ATTR_MPATH_NEXT_HOP, ETH_ALEN, next_hop) || + nla_put_u32(msg, NL80211_ATTR_GENERATION, pinfo->generation)) + goto nla_put_failure; pinfoattr = nla_nest_start(msg, NL80211_ATTR_MPATH_INFO); if (!pinfoattr) goto nla_put_failure; - if (pinfo->filled & MPATH_INFO_FRAME_QLEN) - NLA_PUT_U32(msg, NL80211_MPATH_INFO_FRAME_QLEN, - pinfo->frame_qlen); - if (pinfo->filled & MPATH_INFO_SN) - NLA_PUT_U32(msg, NL80211_MPATH_INFO_SN, - pinfo->sn); - if (pinfo->filled & MPATH_INFO_METRIC) - NLA_PUT_U32(msg, NL80211_MPATH_INFO_METRIC, - pinfo->metric); - if (pinfo->filled & MPATH_INFO_EXPTIME) - NLA_PUT_U32(msg, NL80211_MPATH_INFO_EXPTIME, - pinfo->exptime); - if (pinfo->filled & MPATH_INFO_FLAGS) - NLA_PUT_U8(msg, NL80211_MPATH_INFO_FLAGS, - pinfo->flags); - if (pinfo->filled & MPATH_INFO_DISCOVERY_TIMEOUT) - NLA_PUT_U32(msg, NL80211_MPATH_INFO_DISCOVERY_TIMEOUT, - pinfo->discovery_timeout); - if (pinfo->filled & MPATH_INFO_DISCOVERY_RETRIES) - NLA_PUT_U8(msg, NL80211_MPATH_INFO_DISCOVERY_RETRIES, - pinfo->discovery_retries); + if ((pinfo->filled & MPATH_INFO_FRAME_QLEN) && + nla_put_u32(msg, NL80211_MPATH_INFO_FRAME_QLEN, + pinfo->frame_qlen)) + goto nla_put_failure; + if (((pinfo->filled & MPATH_INFO_SN) && + nla_put_u32(msg, NL80211_MPATH_INFO_SN, pinfo->sn)) || + ((pinfo->filled & MPATH_INFO_METRIC) && + nla_put_u32(msg, NL80211_MPATH_INFO_METRIC, + pinfo->metric)) || + ((pinfo->filled & MPATH_INFO_EXPTIME) && + nla_put_u32(msg, NL80211_MPATH_INFO_EXPTIME, + pinfo->exptime)) || + ((pinfo->filled & MPATH_INFO_FLAGS) && + nla_put_u8(msg, NL80211_MPATH_INFO_FLAGS, + pinfo->flags)) || + ((pinfo->filled & MPATH_INFO_DISCOVERY_TIMEOUT) && + nla_put_u32(msg, NL80211_MPATH_INFO_DISCOVERY_TIMEOUT, + pinfo->discovery_timeout)) || + ((pinfo->filled & MPATH_INFO_DISCOVERY_RETRIES) && + nla_put_u8(msg, NL80211_MPATH_INFO_DISCOVERY_RETRIES, + pinfo->discovery_retries))) + goto nla_put_failure; nla_nest_end(msg, pinfoattr); @@ -3268,47 +3323,48 @@ static int nl80211_get_mesh_config(struct sk_buff *skb, pinfoattr = nla_nest_start(msg, NL80211_ATTR_MESH_CONFIG); if (!pinfoattr) goto nla_put_failure; - NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex); - NLA_PUT_U16(msg, NL80211_MESHCONF_RETRY_TIMEOUT, - cur_params.dot11MeshRetryTimeout); - NLA_PUT_U16(msg, NL80211_MESHCONF_CONFIRM_TIMEOUT, - cur_params.dot11MeshConfirmTimeout); - NLA_PUT_U16(msg, NL80211_MESHCONF_HOLDING_TIMEOUT, - cur_params.dot11MeshHoldingTimeout); - NLA_PUT_U16(msg, NL80211_MESHCONF_MAX_PEER_LINKS, - cur_params.dot11MeshMaxPeerLinks); - NLA_PUT_U8(msg, NL80211_MESHCONF_MAX_RETRIES, - cur_params.dot11MeshMaxRetries); - NLA_PUT_U8(msg, NL80211_MESHCONF_TTL, - cur_params.dot11MeshTTL); - NLA_PUT_U8(msg, NL80211_MESHCONF_ELEMENT_TTL, - cur_params.element_ttl); - NLA_PUT_U8(msg, NL80211_MESHCONF_AUTO_OPEN_PLINKS, - cur_params.auto_open_plinks); - NLA_PUT_U8(msg, NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES, - cur_params.dot11MeshHWMPmaxPREQretries); - NLA_PUT_U32(msg, NL80211_MESHCONF_PATH_REFRESH_TIME, - cur_params.path_refresh_time); - NLA_PUT_U16(msg, NL80211_MESHCONF_MIN_DISCOVERY_TIMEOUT, - cur_params.min_discovery_timeout); - NLA_PUT_U32(msg, NL80211_MESHCONF_HWMP_ACTIVE_PATH_TIMEOUT, - cur_params.dot11MeshHWMPactivePathTimeout); - NLA_PUT_U16(msg, NL80211_MESHCONF_HWMP_PREQ_MIN_INTERVAL, - cur_params.dot11MeshHWMPpreqMinInterval); - NLA_PUT_U16(msg, NL80211_MESHCONF_HWMP_PERR_MIN_INTERVAL, - cur_params.dot11MeshHWMPperrMinInterval); - NLA_PUT_U16(msg, NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME, - cur_params.dot11MeshHWMPnetDiameterTraversalTime); - NLA_PUT_U8(msg, NL80211_MESHCONF_HWMP_ROOTMODE, - cur_params.dot11MeshHWMPRootMode); - NLA_PUT_U16(msg, NL80211_MESHCONF_HWMP_RANN_INTERVAL, - cur_params.dot11MeshHWMPRannInterval); - NLA_PUT_U8(msg, NL80211_MESHCONF_GATE_ANNOUNCEMENTS, - cur_params.dot11MeshGateAnnouncementProtocol); - NLA_PUT_U8(msg, NL80211_MESHCONF_FORWARDING, - cur_params.dot11MeshForwarding); - NLA_PUT_U32(msg, NL80211_MESHCONF_RSSI_THRESHOLD, - cur_params.rssi_threshold); + if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) || + nla_put_u16(msg, NL80211_MESHCONF_RETRY_TIMEOUT, + cur_params.dot11MeshRetryTimeout) || + nla_put_u16(msg, NL80211_MESHCONF_CONFIRM_TIMEOUT, + cur_params.dot11MeshConfirmTimeout) || + nla_put_u16(msg, NL80211_MESHCONF_HOLDING_TIMEOUT, + cur_params.dot11MeshHoldingTimeout) || + nla_put_u16(msg, NL80211_MESHCONF_MAX_PEER_LINKS, + cur_params.dot11MeshMaxPeerLinks) || + nla_put_u8(msg, NL80211_MESHCONF_MAX_RETRIES, + cur_params.dot11MeshMaxRetries) || + nla_put_u8(msg, NL80211_MESHCONF_TTL, + cur_params.dot11MeshTTL) || + nla_put_u8(msg, NL80211_MESHCONF_ELEMENT_TTL, + cur_params.element_ttl) || + nla_put_u8(msg, NL80211_MESHCONF_AUTO_OPEN_PLINKS, + cur_params.auto_open_plinks) || + nla_put_u8(msg, NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES, + cur_params.dot11MeshHWMPmaxPREQretries) || + nla_put_u32(msg, NL80211_MESHCONF_PATH_REFRESH_TIME, + cur_params.path_refresh_time) || + nla_put_u16(msg, NL80211_MESHCONF_MIN_DISCOVERY_TIMEOUT, + cur_params.min_discovery_timeout) || + nla_put_u32(msg, NL80211_MESHCONF_HWMP_ACTIVE_PATH_TIMEOUT, + cur_params.dot11MeshHWMPactivePathTimeout) || + nla_put_u16(msg, NL80211_MESHCONF_HWMP_PREQ_MIN_INTERVAL, + cur_params.dot11MeshHWMPpreqMinInterval) || + nla_put_u16(msg, NL80211_MESHCONF_HWMP_PERR_MIN_INTERVAL, + cur_params.dot11MeshHWMPperrMinInterval) || + nla_put_u16(msg, NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME, + cur_params.dot11MeshHWMPnetDiameterTraversalTime) || + nla_put_u8(msg, NL80211_MESHCONF_HWMP_ROOTMODE, + cur_params.dot11MeshHWMPRootMode) || + nla_put_u16(msg, NL80211_MESHCONF_HWMP_RANN_INTERVAL, + cur_params.dot11MeshHWMPRannInterval) || + nla_put_u8(msg, NL80211_MESHCONF_GATE_ANNOUNCEMENTS, + cur_params.dot11MeshGateAnnouncementProtocol) || + nla_put_u8(msg, NL80211_MESHCONF_FORWARDING, + cur_params.dot11MeshForwarding) || + nla_put_u32(msg, NL80211_MESHCONF_RSSI_THRESHOLD, + cur_params.rssi_threshold)) + goto nla_put_failure; nla_nest_end(msg, pinfoattr); genlmsg_end(msg, hdr); return genlmsg_reply(msg, info); @@ -3539,11 +3595,12 @@ static int nl80211_get_reg(struct sk_buff *skb, struct genl_info *info) if (!hdr) goto put_failure; - NLA_PUT_STRING(msg, NL80211_ATTR_REG_ALPHA2, - cfg80211_regdomain->alpha2); - if (cfg80211_regdomain->dfs_region) - NLA_PUT_U8(msg, NL80211_ATTR_DFS_REGION, - cfg80211_regdomain->dfs_region); + if (nla_put_string(msg, NL80211_ATTR_REG_ALPHA2, + cfg80211_regdomain->alpha2) || + (cfg80211_regdomain->dfs_region && + nla_put_u8(msg, NL80211_ATTR_DFS_REGION, + cfg80211_regdomain->dfs_region))) + goto nla_put_failure; nl_reg_rules = nla_nest_start(msg, NL80211_ATTR_REG_RULES); if (!nl_reg_rules) @@ -3563,18 +3620,19 @@ static int nl80211_get_reg(struct sk_buff *skb, struct genl_info *info) if (!nl_reg_rule) goto nla_put_failure; - NLA_PUT_U32(msg, NL80211_ATTR_REG_RULE_FLAGS, - reg_rule->flags); - NLA_PUT_U32(msg, NL80211_ATTR_FREQ_RANGE_START, - freq_range->start_freq_khz); - NLA_PUT_U32(msg, NL80211_ATTR_FREQ_RANGE_END, - freq_range->end_freq_khz); - NLA_PUT_U32(msg, NL80211_ATTR_FREQ_RANGE_MAX_BW, - freq_range->max_bandwidth_khz); - NLA_PUT_U32(msg, NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN, - power_rule->max_antenna_gain); - NLA_PUT_U32(msg, NL80211_ATTR_POWER_RULE_MAX_EIRP, - power_rule->max_eirp); + if (nla_put_u32(msg, NL80211_ATTR_REG_RULE_FLAGS, + reg_rule->flags) || + nla_put_u32(msg, NL80211_ATTR_FREQ_RANGE_START, + freq_range->start_freq_khz) || + nla_put_u32(msg, NL80211_ATTR_FREQ_RANGE_END, + freq_range->end_freq_khz) || + nla_put_u32(msg, NL80211_ATTR_FREQ_RANGE_MAX_BW, + freq_range->max_bandwidth_khz) || + nla_put_u32(msg, NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN, + power_rule->max_antenna_gain) || + nla_put_u32(msg, NL80211_ATTR_POWER_RULE_MAX_EIRP, + power_rule->max_eirp)) + goto nla_put_failure; nla_nest_end(msg, nl_reg_rule); } @@ -4145,37 +4203,44 @@ static int nl80211_send_bss(struct sk_buff *msg, struct netlink_callback *cb, genl_dump_check_consistent(cb, hdr, &nl80211_fam); - NLA_PUT_U32(msg, NL80211_ATTR_GENERATION, rdev->bss_generation); - NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, wdev->netdev->ifindex); + if (nla_put_u32(msg, NL80211_ATTR_GENERATION, rdev->bss_generation) || + nla_put_u32(msg, NL80211_ATTR_IFINDEX, wdev->netdev->ifindex)) + goto nla_put_failure; bss = nla_nest_start(msg, NL80211_ATTR_BSS); if (!bss) goto nla_put_failure; - if (!is_zero_ether_addr(res->bssid)) - NLA_PUT(msg, NL80211_BSS_BSSID, ETH_ALEN, res->bssid); - if (res->information_elements && res->len_information_elements) - NLA_PUT(msg, NL80211_BSS_INFORMATION_ELEMENTS, - res->len_information_elements, - res->information_elements); - if (res->beacon_ies && res->len_beacon_ies && - res->beacon_ies != res->information_elements) - NLA_PUT(msg, NL80211_BSS_BEACON_IES, - res->len_beacon_ies, res->beacon_ies); - if (res->tsf) - NLA_PUT_U64(msg, NL80211_BSS_TSF, res->tsf); - if (res->beacon_interval) - NLA_PUT_U16(msg, NL80211_BSS_BEACON_INTERVAL, res->beacon_interval); - NLA_PUT_U16(msg, NL80211_BSS_CAPABILITY, res->capability); - NLA_PUT_U32(msg, NL80211_BSS_FREQUENCY, res->channel->center_freq); - NLA_PUT_U32(msg, NL80211_BSS_SEEN_MS_AGO, - jiffies_to_msecs(jiffies - intbss->ts)); + if ((!is_zero_ether_addr(res->bssid) && + nla_put(msg, NL80211_BSS_BSSID, ETH_ALEN, res->bssid)) || + (res->information_elements && res->len_information_elements && + nla_put(msg, NL80211_BSS_INFORMATION_ELEMENTS, + res->len_information_elements, + res->information_elements)) || + (res->beacon_ies && res->len_beacon_ies && + res->beacon_ies != res->information_elements && + nla_put(msg, NL80211_BSS_BEACON_IES, + res->len_beacon_ies, res->beacon_ies))) + goto nla_put_failure; + if (res->tsf && + nla_put_u64(msg, NL80211_BSS_TSF, res->tsf)) + goto nla_put_failure; + if (res->beacon_interval && + nla_put_u16(msg, NL80211_BSS_BEACON_INTERVAL, res->beacon_interval)) + goto nla_put_failure; + if (nla_put_u16(msg, NL80211_BSS_CAPABILITY, res->capability) || + nla_put_u32(msg, NL80211_BSS_FREQUENCY, res->channel->center_freq) || + nla_put_u32(msg, NL80211_BSS_SEEN_MS_AGO, + jiffies_to_msecs(jiffies - intbss->ts))) + goto nla_put_failure; switch (rdev->wiphy.signal_type) { case CFG80211_SIGNAL_TYPE_MBM: - NLA_PUT_U32(msg, NL80211_BSS_SIGNAL_MBM, res->signal); + if (nla_put_u32(msg, NL80211_BSS_SIGNAL_MBM, res->signal)) + goto nla_put_failure; break; case CFG80211_SIGNAL_TYPE_UNSPEC: - NLA_PUT_U8(msg, NL80211_BSS_SIGNAL_UNSPEC, res->signal); + if (nla_put_u8(msg, NL80211_BSS_SIGNAL_UNSPEC, res->signal)) + goto nla_put_failure; break; default: break; @@ -4184,14 +4249,16 @@ static int nl80211_send_bss(struct sk_buff *msg, struct netlink_callback *cb, switch (wdev->iftype) { case NL80211_IFTYPE_P2P_CLIENT: case NL80211_IFTYPE_STATION: - if (intbss == wdev->current_bss) - NLA_PUT_U32(msg, NL80211_BSS_STATUS, - NL80211_BSS_STATUS_ASSOCIATED); + if (intbss == wdev->current_bss && + nla_put_u32(msg, NL80211_BSS_STATUS, + NL80211_BSS_STATUS_ASSOCIATED)) + goto nla_put_failure; break; case NL80211_IFTYPE_ADHOC: - if (intbss == wdev->current_bss) - NLA_PUT_U32(msg, NL80211_BSS_STATUS, - NL80211_BSS_STATUS_IBSS_JOINED); + if (intbss == wdev->current_bss && + nla_put_u32(msg, NL80211_BSS_STATUS, + NL80211_BSS_STATUS_IBSS_JOINED)) + goto nla_put_failure; break; default: break; @@ -4260,34 +4327,43 @@ static int nl80211_send_survey(struct sk_buff *msg, u32 pid, u32 seq, if (!hdr) return -ENOMEM; - NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex); + if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex)) + goto nla_put_failure; infoattr = nla_nest_start(msg, NL80211_ATTR_SURVEY_INFO); if (!infoattr) goto nla_put_failure; - NLA_PUT_U32(msg, NL80211_SURVEY_INFO_FREQUENCY, - survey->channel->center_freq); - if (survey->filled & SURVEY_INFO_NOISE_DBM) - NLA_PUT_U8(msg, NL80211_SURVEY_INFO_NOISE, - survey->noise); - if (survey->filled & SURVEY_INFO_IN_USE) - NLA_PUT_FLAG(msg, NL80211_SURVEY_INFO_IN_USE); - if (survey->filled & SURVEY_INFO_CHANNEL_TIME) - NLA_PUT_U64(msg, NL80211_SURVEY_INFO_CHANNEL_TIME, - survey->channel_time); - if (survey->filled & SURVEY_INFO_CHANNEL_TIME_BUSY) - NLA_PUT_U64(msg, NL80211_SURVEY_INFO_CHANNEL_TIME_BUSY, - survey->channel_time_busy); - if (survey->filled & SURVEY_INFO_CHANNEL_TIME_EXT_BUSY) - NLA_PUT_U64(msg, NL80211_SURVEY_INFO_CHANNEL_TIME_EXT_BUSY, - survey->channel_time_ext_busy); - if (survey->filled & SURVEY_INFO_CHANNEL_TIME_RX) - NLA_PUT_U64(msg, NL80211_SURVEY_INFO_CHANNEL_TIME_RX, - survey->channel_time_rx); - if (survey->filled & SURVEY_INFO_CHANNEL_TIME_TX) - NLA_PUT_U64(msg, NL80211_SURVEY_INFO_CHANNEL_TIME_TX, - survey->channel_time_tx); + if (nla_put_u32(msg, NL80211_SURVEY_INFO_FREQUENCY, + survey->channel->center_freq)) + goto nla_put_failure; + + if ((survey->filled & SURVEY_INFO_NOISE_DBM) && + nla_put_u8(msg, NL80211_SURVEY_INFO_NOISE, survey->noise)) + goto nla_put_failure; + if ((survey->filled & SURVEY_INFO_IN_USE) && + nla_put_flag(msg, NL80211_SURVEY_INFO_IN_USE)) + goto nla_put_failure; + if ((survey->filled & SURVEY_INFO_CHANNEL_TIME) && + nla_put_u64(msg, NL80211_SURVEY_INFO_CHANNEL_TIME, + survey->channel_time)) + goto nla_put_failure; + if ((survey->filled & SURVEY_INFO_CHANNEL_TIME_BUSY) && + nla_put_u64(msg, NL80211_SURVEY_INFO_CHANNEL_TIME_BUSY, + survey->channel_time_busy)) + goto nla_put_failure; + if ((survey->filled & SURVEY_INFO_CHANNEL_TIME_EXT_BUSY) && + nla_put_u64(msg, NL80211_SURVEY_INFO_CHANNEL_TIME_EXT_BUSY, + survey->channel_time_ext_busy)) + goto nla_put_failure; + if ((survey->filled & SURVEY_INFO_CHANNEL_TIME_RX) && + nla_put_u64(msg, NL80211_SURVEY_INFO_CHANNEL_TIME_RX, + survey->channel_time_rx)) + goto nla_put_failure; + if ((survey->filled & SURVEY_INFO_CHANNEL_TIME_TX) && + nla_put_u64(msg, NL80211_SURVEY_INFO_CHANNEL_TIME_TX, + survey->channel_time_tx)) + goto nla_put_failure; nla_nest_end(msg, infoattr); @@ -4968,7 +5044,7 @@ static int nl80211_testmode_dump(struct sk_buff *skb, NL80211_CMD_TESTMODE); struct nlattr *tmdata; - if (nla_put_u32(skb, NL80211_ATTR_WIPHY, phy_idx) < 0) { + if (nla_put_u32(skb, NL80211_ATTR_WIPHY, phy_idx)) { genlmsg_cancel(skb, hdr); break; } @@ -5019,7 +5095,8 @@ __cfg80211_testmode_alloc_skb(struct cfg80211_registered_device *rdev, return NULL; } - NLA_PUT_U32(skb, NL80211_ATTR_WIPHY, rdev->wiphy_idx); + if (nla_put_u32(skb, NL80211_ATTR_WIPHY, rdev->wiphy_idx)) + goto nla_put_failure; data = nla_nest_start(skb, NL80211_ATTR_TESTDATA); ((void **)skb->cb)[0] = rdev; @@ -5398,7 +5475,8 @@ static int nl80211_remain_on_channel(struct sk_buff *skb, if (err) goto free_msg; - NLA_PUT_U64(msg, NL80211_ATTR_COOKIE, cookie); + if (nla_put_u64(msg, NL80211_ATTR_COOKIE, cookie)) + goto nla_put_failure; genlmsg_end(msg, hdr); @@ -5685,7 +5763,8 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info) goto free_msg; if (msg) { - NLA_PUT_U64(msg, NL80211_ATTR_COOKIE, cookie); + if (nla_put_u64(msg, NL80211_ATTR_COOKIE, cookie)) + goto nla_put_failure; genlmsg_end(msg, hdr); return genlmsg_reply(msg, info); @@ -5790,7 +5869,8 @@ static int nl80211_get_power_save(struct sk_buff *skb, struct genl_info *info) else ps_state = NL80211_PS_DISABLED; - NLA_PUT_U32(msg, NL80211_ATTR_PS_STATE, ps_state); + if (nla_put_u32(msg, NL80211_ATTR_PS_STATE, ps_state)) + goto nla_put_failure; genlmsg_end(msg, hdr); return genlmsg_reply(msg, info); @@ -5937,20 +6017,21 @@ static int nl80211_get_wowlan(struct sk_buff *skb, struct genl_info *info) if (!nl_wowlan) goto nla_put_failure; - if (rdev->wowlan->any) - NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_ANY); - if (rdev->wowlan->disconnect) - NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_DISCONNECT); - if (rdev->wowlan->magic_pkt) - NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_MAGIC_PKT); - if (rdev->wowlan->gtk_rekey_failure) - NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_GTK_REKEY_FAILURE); - if (rdev->wowlan->eap_identity_req) - NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_EAP_IDENT_REQUEST); - if (rdev->wowlan->four_way_handshake) - NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_4WAY_HANDSHAKE); - if (rdev->wowlan->rfkill_release) - NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_RFKILL_RELEASE); + if ((rdev->wowlan->any && + nla_put_flag(msg, NL80211_WOWLAN_TRIG_ANY)) || + (rdev->wowlan->disconnect && + nla_put_flag(msg, NL80211_WOWLAN_TRIG_DISCONNECT)) || + (rdev->wowlan->magic_pkt && + nla_put_flag(msg, NL80211_WOWLAN_TRIG_MAGIC_PKT)) || + (rdev->wowlan->gtk_rekey_failure && + nla_put_flag(msg, NL80211_WOWLAN_TRIG_GTK_REKEY_FAILURE)) || + (rdev->wowlan->eap_identity_req && + nla_put_flag(msg, NL80211_WOWLAN_TRIG_EAP_IDENT_REQUEST)) || + (rdev->wowlan->four_way_handshake && + nla_put_flag(msg, NL80211_WOWLAN_TRIG_4WAY_HANDSHAKE)) || + (rdev->wowlan->rfkill_release && + nla_put_flag(msg, NL80211_WOWLAN_TRIG_RFKILL_RELEASE))) + goto nla_put_failure; if (rdev->wowlan->n_patterns) { struct nlattr *nl_pats, *nl_pat; int i, pat_len; @@ -5965,12 +6046,13 @@ static int nl80211_get_wowlan(struct sk_buff *skb, struct genl_info *info) if (!nl_pat) goto nla_put_failure; pat_len = rdev->wowlan->patterns[i].pattern_len; - NLA_PUT(msg, NL80211_WOWLAN_PKTPAT_MASK, - DIV_ROUND_UP(pat_len, 8), - rdev->wowlan->patterns[i].mask); - NLA_PUT(msg, NL80211_WOWLAN_PKTPAT_PATTERN, - pat_len, - rdev->wowlan->patterns[i].pattern); + if (nla_put(msg, NL80211_WOWLAN_PKTPAT_MASK, + DIV_ROUND_UP(pat_len, 8), + rdev->wowlan->patterns[i].mask) || + nla_put(msg, NL80211_WOWLAN_PKTPAT_PATTERN, + pat_len, + rdev->wowlan->patterns[i].pattern)) + goto nla_put_failure; nla_nest_end(msg, nl_pat); } nla_nest_end(msg, nl_pats); @@ -6243,7 +6325,8 @@ static int nl80211_probe_client(struct sk_buff *skb, if (err) goto free_msg; - NLA_PUT_U64(msg, NL80211_ATTR_COOKIE, cookie); + if (nla_put_u64(msg, NL80211_ATTR_COOKIE, cookie)) + goto nla_put_failure; genlmsg_end(msg, hdr); @@ -6911,19 +6994,24 @@ static int nl80211_add_scan_req(struct sk_buff *msg, nest = nla_nest_start(msg, NL80211_ATTR_SCAN_SSIDS); if (!nest) goto nla_put_failure; - for (i = 0; i < req->n_ssids; i++) - NLA_PUT(msg, i, req->ssids[i].ssid_len, req->ssids[i].ssid); + for (i = 0; i < req->n_ssids; i++) { + if (nla_put(msg, i, req->ssids[i].ssid_len, req->ssids[i].ssid)) + goto nla_put_failure; + } nla_nest_end(msg, nest); nest = nla_nest_start(msg, NL80211_ATTR_SCAN_FREQUENCIES); if (!nest) goto nla_put_failure; - for (i = 0; i < req->n_channels; i++) - NLA_PUT_U32(msg, i, req->channels[i]->center_freq); + for (i = 0; i < req->n_channels; i++) { + if (nla_put_u32(msg, i, req->channels[i]->center_freq)) + goto nla_put_failure; + } nla_nest_end(msg, nest); - if (req->ie) - NLA_PUT(msg, NL80211_ATTR_IE, req->ie_len, req->ie); + if (req->ie && + nla_put(msg, NL80211_ATTR_IE, req->ie_len, req->ie)) + goto nla_put_failure; return 0; nla_put_failure: @@ -6942,8 +7030,9 @@ static int nl80211_send_scan_msg(struct sk_buff *msg, if (!hdr) return -1; - NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); - NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); + if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || + nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex)) + goto nla_put_failure; /* ignore errors and send incomplete event anyway */ nl80211_add_scan_req(msg, rdev); @@ -6967,8 +7056,9 @@ nl80211_send_sched_scan_msg(struct sk_buff *msg, if (!hdr) return -1; - NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); - NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); + if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || + nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex)) + goto nla_put_failure; return genlmsg_end(msg, hdr); @@ -7091,26 +7181,33 @@ void nl80211_send_reg_change_event(struct regulatory_request *request) } /* Userspace can always count this one always being set */ - NLA_PUT_U8(msg, NL80211_ATTR_REG_INITIATOR, request->initiator); - - if (request->alpha2[0] == '0' && request->alpha2[1] == '0') - NLA_PUT_U8(msg, NL80211_ATTR_REG_TYPE, - NL80211_REGDOM_TYPE_WORLD); - else if (request->alpha2[0] == '9' && request->alpha2[1] == '9') - NLA_PUT_U8(msg, NL80211_ATTR_REG_TYPE, - NL80211_REGDOM_TYPE_CUSTOM_WORLD); - else if ((request->alpha2[0] == '9' && request->alpha2[1] == '8') || - request->intersect) - NLA_PUT_U8(msg, NL80211_ATTR_REG_TYPE, - NL80211_REGDOM_TYPE_INTERSECTION); - else { - NLA_PUT_U8(msg, NL80211_ATTR_REG_TYPE, - NL80211_REGDOM_TYPE_COUNTRY); - NLA_PUT_STRING(msg, NL80211_ATTR_REG_ALPHA2, request->alpha2); - } - - if (wiphy_idx_valid(request->wiphy_idx)) - NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, request->wiphy_idx); + if (nla_put_u8(msg, NL80211_ATTR_REG_INITIATOR, request->initiator)) + goto nla_put_failure; + + if (request->alpha2[0] == '0' && request->alpha2[1] == '0') { + if (nla_put_u8(msg, NL80211_ATTR_REG_TYPE, + NL80211_REGDOM_TYPE_WORLD)) + goto nla_put_failure; + } else if (request->alpha2[0] == '9' && request->alpha2[1] == '9') { + if (nla_put_u8(msg, NL80211_ATTR_REG_TYPE, + NL80211_REGDOM_TYPE_CUSTOM_WORLD)) + goto nla_put_failure; + } else if ((request->alpha2[0] == '9' && request->alpha2[1] == '8') || + request->intersect) { + if (nla_put_u8(msg, NL80211_ATTR_REG_TYPE, + NL80211_REGDOM_TYPE_INTERSECTION)) + goto nla_put_failure; + } else { + if (nla_put_u8(msg, NL80211_ATTR_REG_TYPE, + NL80211_REGDOM_TYPE_COUNTRY) || + nla_put_string(msg, NL80211_ATTR_REG_ALPHA2, + request->alpha2)) + goto nla_put_failure; + } + + if (wiphy_idx_valid(request->wiphy_idx) && + nla_put_u32(msg, NL80211_ATTR_WIPHY, request->wiphy_idx)) + goto nla_put_failure; genlmsg_end(msg, hdr); @@ -7144,9 +7241,10 @@ static void nl80211_send_mlme_event(struct cfg80211_registered_device *rdev, return; } - NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); - NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); - NLA_PUT(msg, NL80211_ATTR_FRAME, len, buf); + if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || + nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) || + nla_put(msg, NL80211_ATTR_FRAME, len, buf)) + goto nla_put_failure; genlmsg_end(msg, hdr); @@ -7224,10 +7322,11 @@ static void nl80211_send_mlme_timeout(struct cfg80211_registered_device *rdev, return; } - NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); - NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); - NLA_PUT_FLAG(msg, NL80211_ATTR_TIMED_OUT); - NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, addr); + if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || + nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) || + nla_put_flag(msg, NL80211_ATTR_TIMED_OUT) || + nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, addr)) + goto nla_put_failure; genlmsg_end(msg, hdr); @@ -7275,15 +7374,15 @@ void nl80211_send_connect_result(struct cfg80211_registered_device *rdev, return; } - NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); - NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); - if (bssid) - NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid); - NLA_PUT_U16(msg, NL80211_ATTR_STATUS_CODE, status); - if (req_ie) - NLA_PUT(msg, NL80211_ATTR_REQ_IE, req_ie_len, req_ie); - if (resp_ie) - NLA_PUT(msg, NL80211_ATTR_RESP_IE, resp_ie_len, resp_ie); + if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || + nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) || + (bssid && nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid)) || + nla_put_u16(msg, NL80211_ATTR_STATUS_CODE, status) || + (req_ie && + nla_put(msg, NL80211_ATTR_REQ_IE, req_ie_len, req_ie)) || + (resp_ie && + nla_put(msg, NL80211_ATTR_RESP_IE, resp_ie_len, resp_ie))) + goto nla_put_failure; genlmsg_end(msg, hdr); @@ -7315,13 +7414,14 @@ void nl80211_send_roamed(struct cfg80211_registered_device *rdev, return; } - NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); - NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); - NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid); - if (req_ie) - NLA_PUT(msg, NL80211_ATTR_REQ_IE, req_ie_len, req_ie); - if (resp_ie) - NLA_PUT(msg, NL80211_ATTR_RESP_IE, resp_ie_len, resp_ie); + if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || + nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) || + nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid) || + (req_ie && + nla_put(msg, NL80211_ATTR_REQ_IE, req_ie_len, req_ie)) || + (resp_ie && + nla_put(msg, NL80211_ATTR_RESP_IE, resp_ie_len, resp_ie))) + goto nla_put_failure; genlmsg_end(msg, hdr); @@ -7352,14 +7452,14 @@ void nl80211_send_disconnected(struct cfg80211_registered_device *rdev, return; } - NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); - NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); - if (from_ap && reason) - NLA_PUT_U16(msg, NL80211_ATTR_REASON_CODE, reason); - if (from_ap) - NLA_PUT_FLAG(msg, NL80211_ATTR_DISCONNECTED_BY_AP); - if (ie) - NLA_PUT(msg, NL80211_ATTR_IE, ie_len, ie); + if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || + nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) || + (from_ap && reason && + nla_put_u16(msg, NL80211_ATTR_REASON_CODE, reason)) || + (from_ap && + nla_put_flag(msg, NL80211_ATTR_DISCONNECTED_BY_AP)) || + (ie && nla_put(msg, NL80211_ATTR_IE, ie_len, ie))) + goto nla_put_failure; genlmsg_end(msg, hdr); @@ -7390,9 +7490,10 @@ void nl80211_send_ibss_bssid(struct cfg80211_registered_device *rdev, return; } - NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); - NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); - NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid); + if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || + nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) || + nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid)) + goto nla_put_failure; genlmsg_end(msg, hdr); @@ -7423,11 +7524,12 @@ void nl80211_send_new_peer_candidate(struct cfg80211_registered_device *rdev, return; } - NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); - NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); - NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, macaddr); - if (ie_len && ie) - NLA_PUT(msg, NL80211_ATTR_IE, ie_len , ie); + if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || + nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) || + nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, macaddr) || + (ie_len && ie && + nla_put(msg, NL80211_ATTR_IE, ie_len , ie))) + goto nla_put_failure; genlmsg_end(msg, hdr); @@ -7458,15 +7560,14 @@ void nl80211_michael_mic_failure(struct cfg80211_registered_device *rdev, return; } - NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); - NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); - if (addr) - NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, addr); - NLA_PUT_U32(msg, NL80211_ATTR_KEY_TYPE, key_type); - if (key_id != -1) - NLA_PUT_U8(msg, NL80211_ATTR_KEY_IDX, key_id); - if (tsc) - NLA_PUT(msg, NL80211_ATTR_KEY_SEQ, 6, tsc); + if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || + nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) || + (addr && nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, addr)) || + nla_put_u32(msg, NL80211_ATTR_KEY_TYPE, key_type) || + (key_id != -1 && + nla_put_u8(msg, NL80211_ATTR_KEY_IDX, key_id)) || + (tsc && nla_put(msg, NL80211_ATTR_KEY_SEQ, 6, tsc))) + goto nla_put_failure; genlmsg_end(msg, hdr); @@ -7501,7 +7602,8 @@ void nl80211_send_beacon_hint_event(struct wiphy *wiphy, * Since we are applying the beacon hint to a wiphy we know its * wiphy_idx is valid */ - NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, get_wiphy_idx(wiphy)); + if (nla_put_u32(msg, NL80211_ATTR_WIPHY, get_wiphy_idx(wiphy))) + goto nla_put_failure; /* Before */ nl_freq = nla_nest_start(msg, NL80211_ATTR_FREQ_BEFORE); @@ -7553,14 +7655,16 @@ static void nl80211_send_remain_on_chan_event( return; } - NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); - NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); - NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_FREQ, chan->center_freq); - NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_CHANNEL_TYPE, channel_type); - NLA_PUT_U64(msg, NL80211_ATTR_COOKIE, cookie); + if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || + nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) || + nla_put_u32(msg, NL80211_ATTR_WIPHY_FREQ, chan->center_freq) || + nla_put_u32(msg, NL80211_ATTR_WIPHY_CHANNEL_TYPE, channel_type) || + nla_put_u64(msg, NL80211_ATTR_COOKIE, cookie)) + goto nla_put_failure; - if (cmd == NL80211_CMD_REMAIN_ON_CHANNEL) - NLA_PUT_U32(msg, NL80211_ATTR_DURATION, duration); + if (cmd == NL80211_CMD_REMAIN_ON_CHANNEL && + nla_put_u32(msg, NL80211_ATTR_DURATION, duration)) + goto nla_put_failure; genlmsg_end(msg, hdr); @@ -7631,8 +7735,9 @@ void nl80211_send_sta_del_event(struct cfg80211_registered_device *rdev, return; } - NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex); - NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr); + if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) || + nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr)) + goto nla_put_failure; genlmsg_end(msg, hdr); @@ -7668,9 +7773,10 @@ static bool __nl80211_unexpected_frame(struct net_device *dev, u8 cmd, return true; } - NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); - NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex); - NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, addr); + if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || + nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) || + nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, addr)) + goto nla_put_failure; err = genlmsg_end(msg, hdr); if (err < 0) { @@ -7719,12 +7825,13 @@ int nl80211_send_mgmt(struct cfg80211_registered_device *rdev, return -ENOMEM; } - NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); - NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); - NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_FREQ, freq); - if (sig_dbm) - NLA_PUT_U32(msg, NL80211_ATTR_RX_SIGNAL_DBM, sig_dbm); - NLA_PUT(msg, NL80211_ATTR_FRAME, len, buf); + if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || + nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) || + nla_put_u32(msg, NL80211_ATTR_WIPHY_FREQ, freq) || + (sig_dbm && + nla_put_u32(msg, NL80211_ATTR_RX_SIGNAL_DBM, sig_dbm)) || + nla_put(msg, NL80211_ATTR_FRAME, len, buf)) + goto nla_put_failure; genlmsg_end(msg, hdr); @@ -7754,12 +7861,12 @@ void nl80211_send_mgmt_tx_status(struct cfg80211_registered_device *rdev, return; } - NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); - NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); - NLA_PUT(msg, NL80211_ATTR_FRAME, len, buf); - NLA_PUT_U64(msg, NL80211_ATTR_COOKIE, cookie); - if (ack) - NLA_PUT_FLAG(msg, NL80211_ATTR_ACK); + if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || + nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) || + nla_put(msg, NL80211_ATTR_FRAME, len, buf) || + nla_put_u64(msg, NL80211_ATTR_COOKIE, cookie) || + (ack && nla_put_flag(msg, NL80211_ATTR_ACK))) + goto nla_put_failure; genlmsg_end(msg, hdr); @@ -7791,15 +7898,17 @@ nl80211_send_cqm_rssi_notify(struct cfg80211_registered_device *rdev, return; } - NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); - NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); + if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || + nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex)) + goto nla_put_failure; pinfoattr = nla_nest_start(msg, NL80211_ATTR_CQM); if (!pinfoattr) goto nla_put_failure; - NLA_PUT_U32(msg, NL80211_ATTR_CQM_RSSI_THRESHOLD_EVENT, - rssi_event); + if (nla_put_u32(msg, NL80211_ATTR_CQM_RSSI_THRESHOLD_EVENT, + rssi_event)) + goto nla_put_failure; nla_nest_end(msg, pinfoattr); @@ -7832,16 +7941,18 @@ void nl80211_gtk_rekey_notify(struct cfg80211_registered_device *rdev, return; } - NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); - NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); - NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid); + if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || + nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) || + nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid)) + goto nla_put_failure; rekey_attr = nla_nest_start(msg, NL80211_ATTR_REKEY_DATA); if (!rekey_attr) goto nla_put_failure; - NLA_PUT(msg, NL80211_REKEY_DATA_REPLAY_CTR, - NL80211_REPLAY_CTR_LEN, replay_ctr); + if (nla_put(msg, NL80211_REKEY_DATA_REPLAY_CTR, + NL80211_REPLAY_CTR_LEN, replay_ctr)) + goto nla_put_failure; nla_nest_end(msg, rekey_attr); @@ -7874,17 +7985,19 @@ void nl80211_pmksa_candidate_notify(struct cfg80211_registered_device *rdev, return; } - NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); - NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); + if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || + nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex)) + goto nla_put_failure; attr = nla_nest_start(msg, NL80211_ATTR_PMKSA_CANDIDATE); if (!attr) goto nla_put_failure; - NLA_PUT_U32(msg, NL80211_PMKSA_CANDIDATE_INDEX, index); - NLA_PUT(msg, NL80211_PMKSA_CANDIDATE_BSSID, ETH_ALEN, bssid); - if (preauth) - NLA_PUT_FLAG(msg, NL80211_PMKSA_CANDIDATE_PREAUTH); + if (nla_put_u32(msg, NL80211_PMKSA_CANDIDATE_INDEX, index) || + nla_put(msg, NL80211_PMKSA_CANDIDATE_BSSID, ETH_ALEN, bssid) || + (preauth && + nla_put_flag(msg, NL80211_PMKSA_CANDIDATE_PREAUTH))) + goto nla_put_failure; nla_nest_end(msg, attr); @@ -7918,15 +8031,17 @@ nl80211_send_cqm_pktloss_notify(struct cfg80211_registered_device *rdev, return; } - NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); - NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); - NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, peer); + if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || + nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) || + nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, peer)) + goto nla_put_failure; pinfoattr = nla_nest_start(msg, NL80211_ATTR_CQM); if (!pinfoattr) goto nla_put_failure; - NLA_PUT_U32(msg, NL80211_ATTR_CQM_PKT_LOSS_EVENT, num_packets); + if (nla_put_u32(msg, NL80211_ATTR_CQM_PKT_LOSS_EVENT, num_packets)) + goto nla_put_failure; nla_nest_end(msg, pinfoattr); @@ -7960,12 +8075,12 @@ void cfg80211_probe_status(struct net_device *dev, const u8 *addr, return; } - NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); - NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex); - NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, addr); - NLA_PUT_U64(msg, NL80211_ATTR_COOKIE, cookie); - if (acked) - NLA_PUT_FLAG(msg, NL80211_ATTR_ACK); + if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || + nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) || + nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, addr) || + nla_put_u64(msg, NL80211_ATTR_COOKIE, cookie) || + (acked && nla_put_flag(msg, NL80211_ATTR_ACK))) + goto nla_put_failure; err = genlmsg_end(msg, hdr); if (err < 0) { @@ -8005,12 +8120,13 @@ void cfg80211_report_obss_beacon(struct wiphy *wiphy, return; } - NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); - if (freq) - NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_FREQ, freq); - if (sig_dbm) - NLA_PUT_U32(msg, NL80211_ATTR_RX_SIGNAL_DBM, sig_dbm); - NLA_PUT(msg, NL80211_ATTR_FRAME, len, frame); + if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || + (freq && + nla_put_u32(msg, NL80211_ATTR_WIPHY_FREQ, freq)) || + (sig_dbm && + nla_put_u32(msg, NL80211_ATTR_RX_SIGNAL_DBM, sig_dbm)) || + nla_put(msg, NL80211_ATTR_FRAME, len, frame)) + goto nla_put_failure; genlmsg_end(msg, hdr); diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c index 0af7f54e4f61..9f544c95171c 100644 --- a/net/wireless/wext-core.c +++ b/net/wireless/wext-core.c @@ -402,7 +402,8 @@ static struct nlmsghdr *rtnetlink_ifinfo_prep(struct net_device *dev, r->ifi_flags = dev_get_flags(dev); r->ifi_change = 0; /* Wireless changes don't affect those flags */ - NLA_PUT_STRING(skb, IFLA_IFNAME, dev->name); + if (nla_put_string(skb, IFLA_IFNAME, dev->name)) + goto nla_put_failure; return nlh; nla_put_failure: -- cgit v1.2.2 From 1b34ec43c9b3de44a5420841ab293d1b2035a94c Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Thu, 29 Mar 2012 05:11:39 -0400 Subject: pkt_sched: Stop using NLA_PUT*(). These macros contain a hidden goto, and are thus extremely error prone and make code hard to audit. Signed-off-by: David S. Miller --- net/sched/act_api.c | 9 ++++++--- net/sched/act_csum.c | 6 ++++-- net/sched/act_gact.c | 9 ++++++--- net/sched/act_ipt.c | 14 ++++++++------ net/sched/act_mirred.c | 6 ++++-- net/sched/act_nat.c | 6 ++++-- net/sched/act_pedit.c | 6 ++++-- net/sched/act_police.c | 13 ++++++++----- net/sched/act_simple.c | 8 +++++--- net/sched/act_skbedit.c | 27 ++++++++++++++++----------- net/sched/cls_api.c | 3 ++- net/sched/cls_basic.c | 5 +++-- net/sched/cls_flow.c | 35 +++++++++++++++++++++-------------- net/sched/cls_fw.c | 15 +++++++++------ net/sched/cls_route.c | 16 ++++++++++------ net/sched/cls_rsvp.h | 16 ++++++++++------ net/sched/cls_tcindex.c | 14 ++++++++------ net/sched/cls_u32.c | 40 ++++++++++++++++++++++++---------------- net/sched/em_meta.c | 19 ++++++++++++------- net/sched/ematch.c | 6 ++++-- net/sched/sch_api.c | 9 ++++++--- net/sched/sch_atm.c | 21 +++++++++++++-------- net/sched/sch_cbq.c | 18 ++++++++++++------ net/sched/sch_choke.c | 5 +++-- net/sched/sch_drr.c | 3 ++- net/sched/sch_dsmark.c | 18 +++++++++++------- net/sched/sch_fifo.c | 3 ++- net/sched/sch_generic.c | 3 ++- net/sched/sch_gred.c | 6 ++++-- net/sched/sch_hfsc.c | 6 ++++-- net/sched/sch_htb.c | 6 ++++-- net/sched/sch_mqprio.c | 3 ++- net/sched/sch_multiq.c | 3 ++- net/sched/sch_netem.c | 21 ++++++++++++++------- net/sched/sch_prio.c | 3 ++- net/sched/sch_qfq.c | 5 +++-- net/sched/sch_red.c | 5 +++-- net/sched/sch_sfb.c | 3 ++- net/sched/sch_sfq.c | 3 ++- net/sched/sch_tbf.c | 3 ++- 40 files changed, 263 insertions(+), 157 deletions(-) (limited to 'net') diff --git a/net/sched/act_api.c b/net/sched/act_api.c index 93fdf131bd75..5cfb160df063 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c @@ -127,7 +127,8 @@ static int tcf_del_walker(struct sk_buff *skb, struct tc_action *a, nest = nla_nest_start(skb, a->order); if (nest == NULL) goto nla_put_failure; - NLA_PUT_STRING(skb, TCA_KIND, a->ops->kind); + if (nla_put_string(skb, TCA_KIND, a->ops->kind)) + goto nla_put_failure; for (i = 0; i < (hinfo->hmask + 1); i++) { p = hinfo->htab[tcf_hash(i, hinfo->hmask)]; @@ -139,7 +140,8 @@ static int tcf_del_walker(struct sk_buff *skb, struct tc_action *a, p = s_p; } } - NLA_PUT_U32(skb, TCA_FCNT, n_i); + if (nla_put_u32(skb, TCA_FCNT, n_i)) + goto nla_put_failure; nla_nest_end(skb, nest); return n_i; @@ -437,7 +439,8 @@ tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref) if (a->ops == NULL || a->ops->dump == NULL) return err; - NLA_PUT_STRING(skb, TCA_KIND, a->ops->kind); + if (nla_put_string(skb, TCA_KIND, a->ops->kind)) + goto nla_put_failure; if (tcf_action_copy_stats(skb, a, 0)) goto nla_put_failure; nest = nla_nest_start(skb, TCA_OPTIONS); diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c index 453a73431ac4..882124ceb70c 100644 --- a/net/sched/act_csum.c +++ b/net/sched/act_csum.c @@ -550,11 +550,13 @@ static int tcf_csum_dump(struct sk_buff *skb, }; struct tcf_t t; - NLA_PUT(skb, TCA_CSUM_PARMS, sizeof(opt), &opt); + if (nla_put(skb, TCA_CSUM_PARMS, sizeof(opt), &opt)) + goto nla_put_failure; t.install = jiffies_to_clock_t(jiffies - p->tcf_tm.install); t.lastuse = jiffies_to_clock_t(jiffies - p->tcf_tm.lastuse); t.expires = jiffies_to_clock_t(p->tcf_tm.expires); - NLA_PUT(skb, TCA_CSUM_TM, sizeof(t), &t); + if (nla_put(skb, TCA_CSUM_TM, sizeof(t), &t)) + goto nla_put_failure; return skb->len; diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c index b77f5a06a658..f10fb8256442 100644 --- a/net/sched/act_gact.c +++ b/net/sched/act_gact.c @@ -162,7 +162,8 @@ static int tcf_gact_dump(struct sk_buff *skb, struct tc_action *a, int bind, int }; struct tcf_t t; - NLA_PUT(skb, TCA_GACT_PARMS, sizeof(opt), &opt); + if (nla_put(skb, TCA_GACT_PARMS, sizeof(opt), &opt)) + goto nla_put_failure; #ifdef CONFIG_GACT_PROB if (gact->tcfg_ptype) { struct tc_gact_p p_opt = { @@ -171,13 +172,15 @@ static int tcf_gact_dump(struct sk_buff *skb, struct tc_action *a, int bind, int .ptype = gact->tcfg_ptype, }; - NLA_PUT(skb, TCA_GACT_PROB, sizeof(p_opt), &p_opt); + if (nla_put(skb, TCA_GACT_PROB, sizeof(p_opt), &p_opt)) + goto nla_put_failure; } #endif t.install = jiffies_to_clock_t(jiffies - gact->tcf_tm.install); t.lastuse = jiffies_to_clock_t(jiffies - gact->tcf_tm.lastuse); t.expires = jiffies_to_clock_t(gact->tcf_tm.expires); - NLA_PUT(skb, TCA_GACT_TM, sizeof(t), &t); + if (nla_put(skb, TCA_GACT_TM, sizeof(t), &t)) + goto nla_put_failure; return skb->len; nla_put_failure: diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c index 60f8f616e8fa..0beba0e5312e 100644 --- a/net/sched/act_ipt.c +++ b/net/sched/act_ipt.c @@ -267,15 +267,17 @@ static int tcf_ipt_dump(struct sk_buff *skb, struct tc_action *a, int bind, int c.refcnt = ipt->tcf_refcnt - ref; strcpy(t->u.user.name, ipt->tcfi_t->u.kernel.target->name); - NLA_PUT(skb, TCA_IPT_TARG, ipt->tcfi_t->u.user.target_size, t); - NLA_PUT_U32(skb, TCA_IPT_INDEX, ipt->tcf_index); - NLA_PUT_U32(skb, TCA_IPT_HOOK, ipt->tcfi_hook); - NLA_PUT(skb, TCA_IPT_CNT, sizeof(struct tc_cnt), &c); - NLA_PUT_STRING(skb, TCA_IPT_TABLE, ipt->tcfi_tname); + if (nla_put(skb, TCA_IPT_TARG, ipt->tcfi_t->u.user.target_size, t) || + nla_put_u32(skb, TCA_IPT_INDEX, ipt->tcf_index) || + nla_put_u32(skb, TCA_IPT_HOOK, ipt->tcfi_hook) || + nla_put(skb, TCA_IPT_CNT, sizeof(struct tc_cnt), &c) || + nla_put_string(skb, TCA_IPT_TABLE, ipt->tcfi_tname)) + goto nla_put_failure; tm.install = jiffies_to_clock_t(jiffies - ipt->tcf_tm.install); tm.lastuse = jiffies_to_clock_t(jiffies - ipt->tcf_tm.lastuse); tm.expires = jiffies_to_clock_t(ipt->tcf_tm.expires); - NLA_PUT(skb, TCA_IPT_TM, sizeof (tm), &tm); + if (nla_put(skb, TCA_IPT_TM, sizeof (tm), &tm)) + goto nla_put_failure; kfree(t); return skb->len; diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c index e051398fdf6b..d583aea3b3df 100644 --- a/net/sched/act_mirred.c +++ b/net/sched/act_mirred.c @@ -227,11 +227,13 @@ static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind, i }; struct tcf_t t; - NLA_PUT(skb, TCA_MIRRED_PARMS, sizeof(opt), &opt); + if (nla_put(skb, TCA_MIRRED_PARMS, sizeof(opt), &opt)) + goto nla_put_failure; t.install = jiffies_to_clock_t(jiffies - m->tcf_tm.install); t.lastuse = jiffies_to_clock_t(jiffies - m->tcf_tm.lastuse); t.expires = jiffies_to_clock_t(m->tcf_tm.expires); - NLA_PUT(skb, TCA_MIRRED_TM, sizeof(t), &t); + if (nla_put(skb, TCA_MIRRED_TM, sizeof(t), &t)) + goto nla_put_failure; return skb->len; nla_put_failure: diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c index 001d1b354869..b5d029eb44f2 100644 --- a/net/sched/act_nat.c +++ b/net/sched/act_nat.c @@ -284,11 +284,13 @@ static int tcf_nat_dump(struct sk_buff *skb, struct tc_action *a, }; struct tcf_t t; - NLA_PUT(skb, TCA_NAT_PARMS, sizeof(opt), &opt); + if (nla_put(skb, TCA_NAT_PARMS, sizeof(opt), &opt)) + goto nla_put_failure; t.install = jiffies_to_clock_t(jiffies - p->tcf_tm.install); t.lastuse = jiffies_to_clock_t(jiffies - p->tcf_tm.lastuse); t.expires = jiffies_to_clock_t(p->tcf_tm.expires); - NLA_PUT(skb, TCA_NAT_TM, sizeof(t), &t); + if (nla_put(skb, TCA_NAT_TM, sizeof(t), &t)) + goto nla_put_failure; return skb->len; diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c index 10d3aed86560..26aa2f6ce257 100644 --- a/net/sched/act_pedit.c +++ b/net/sched/act_pedit.c @@ -215,11 +215,13 @@ static int tcf_pedit_dump(struct sk_buff *skb, struct tc_action *a, opt->refcnt = p->tcf_refcnt - ref; opt->bindcnt = p->tcf_bindcnt - bind; - NLA_PUT(skb, TCA_PEDIT_PARMS, s, opt); + if (nla_put(skb, TCA_PEDIT_PARMS, s, opt)) + goto nla_put_failure; t.install = jiffies_to_clock_t(jiffies - p->tcf_tm.install); t.lastuse = jiffies_to_clock_t(jiffies - p->tcf_tm.lastuse); t.expires = jiffies_to_clock_t(p->tcf_tm.expires); - NLA_PUT(skb, TCA_PEDIT_TM, sizeof(t), &t); + if (nla_put(skb, TCA_PEDIT_TM, sizeof(t), &t)) + goto nla_put_failure; kfree(opt); return skb->len; diff --git a/net/sched/act_police.c b/net/sched/act_police.c index 6fb3f5af0f85..a9de23297d47 100644 --- a/net/sched/act_police.c +++ b/net/sched/act_police.c @@ -356,11 +356,14 @@ tcf_act_police_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref) opt.rate = police->tcfp_R_tab->rate; if (police->tcfp_P_tab) opt.peakrate = police->tcfp_P_tab->rate; - NLA_PUT(skb, TCA_POLICE_TBF, sizeof(opt), &opt); - if (police->tcfp_result) - NLA_PUT_U32(skb, TCA_POLICE_RESULT, police->tcfp_result); - if (police->tcfp_ewma_rate) - NLA_PUT_U32(skb, TCA_POLICE_AVRATE, police->tcfp_ewma_rate); + if (nla_put(skb, TCA_POLICE_TBF, sizeof(opt), &opt)) + goto nla_put_failure; + if (police->tcfp_result && + nla_put_u32(skb, TCA_POLICE_RESULT, police->tcfp_result)) + goto nla_put_failure; + if (police->tcfp_ewma_rate && + nla_put_u32(skb, TCA_POLICE_AVRATE, police->tcfp_ewma_rate)) + goto nla_put_failure; return skb->len; nla_put_failure: diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c index 73e0a3ab4d55..3922f2a2821b 100644 --- a/net/sched/act_simple.c +++ b/net/sched/act_simple.c @@ -172,12 +172,14 @@ static int tcf_simp_dump(struct sk_buff *skb, struct tc_action *a, }; struct tcf_t t; - NLA_PUT(skb, TCA_DEF_PARMS, sizeof(opt), &opt); - NLA_PUT_STRING(skb, TCA_DEF_DATA, d->tcfd_defdata); + if (nla_put(skb, TCA_DEF_PARMS, sizeof(opt), &opt) || + nla_put_string(skb, TCA_DEF_DATA, d->tcfd_defdata)) + goto nla_put_failure; t.install = jiffies_to_clock_t(jiffies - d->tcf_tm.install); t.lastuse = jiffies_to_clock_t(jiffies - d->tcf_tm.lastuse); t.expires = jiffies_to_clock_t(d->tcf_tm.expires); - NLA_PUT(skb, TCA_DEF_TM, sizeof(t), &t); + if (nla_put(skb, TCA_DEF_TM, sizeof(t), &t)) + goto nla_put_failure; return skb->len; nla_put_failure: diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c index 35dbbe91027e..476e0fac6712 100644 --- a/net/sched/act_skbedit.c +++ b/net/sched/act_skbedit.c @@ -166,20 +166,25 @@ static int tcf_skbedit_dump(struct sk_buff *skb, struct tc_action *a, }; struct tcf_t t; - NLA_PUT(skb, TCA_SKBEDIT_PARMS, sizeof(opt), &opt); - if (d->flags & SKBEDIT_F_PRIORITY) - NLA_PUT(skb, TCA_SKBEDIT_PRIORITY, sizeof(d->priority), - &d->priority); - if (d->flags & SKBEDIT_F_QUEUE_MAPPING) - NLA_PUT(skb, TCA_SKBEDIT_QUEUE_MAPPING, - sizeof(d->queue_mapping), &d->queue_mapping); - if (d->flags & SKBEDIT_F_MARK) - NLA_PUT(skb, TCA_SKBEDIT_MARK, sizeof(d->mark), - &d->mark); + if (nla_put(skb, TCA_SKBEDIT_PARMS, sizeof(opt), &opt)) + goto nla_put_failure; + if ((d->flags & SKBEDIT_F_PRIORITY) && + nla_put(skb, TCA_SKBEDIT_PRIORITY, sizeof(d->priority), + &d->priority)) + goto nla_put_failure; + if ((d->flags & SKBEDIT_F_QUEUE_MAPPING) && + nla_put(skb, TCA_SKBEDIT_QUEUE_MAPPING, + sizeof(d->queue_mapping), &d->queue_mapping)) + goto nla_put_failure; + if ((d->flags & SKBEDIT_F_MARK) && + nla_put(skb, TCA_SKBEDIT_MARK, sizeof(d->mark), + &d->mark)) + goto nla_put_failure; t.install = jiffies_to_clock_t(jiffies - d->tcf_tm.install); t.lastuse = jiffies_to_clock_t(jiffies - d->tcf_tm.lastuse); t.expires = jiffies_to_clock_t(d->tcf_tm.expires); - NLA_PUT(skb, TCA_SKBEDIT_TM, sizeof(t), &t); + if (nla_put(skb, TCA_SKBEDIT_TM, sizeof(t), &t)) + goto nla_put_failure; return skb->len; nla_put_failure: diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index a69d44f1dac5..f452f696b4b3 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -357,7 +357,8 @@ static int tcf_fill_node(struct sk_buff *skb, struct tcf_proto *tp, tcm->tcm_ifindex = qdisc_dev(tp->q)->ifindex; tcm->tcm_parent = tp->classid; tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol); - NLA_PUT_STRING(skb, TCA_KIND, tp->ops->kind); + if (nla_put_string(skb, TCA_KIND, tp->ops->kind)) + goto nla_put_failure; tcm->tcm_handle = fh; if (RTM_DELTFILTER != event) { tcm->tcm_handle = 0; diff --git a/net/sched/cls_basic.c b/net/sched/cls_basic.c index ea1f70b5a5f4..590960a22a77 100644 --- a/net/sched/cls_basic.c +++ b/net/sched/cls_basic.c @@ -257,8 +257,9 @@ static int basic_dump(struct tcf_proto *tp, unsigned long fh, if (nest == NULL) goto nla_put_failure; - if (f->res.classid) - NLA_PUT_U32(skb, TCA_BASIC_CLASSID, f->res.classid); + if (f->res.classid && + nla_put_u32(skb, TCA_BASIC_CLASSID, f->res.classid)) + goto nla_put_failure; if (tcf_exts_dump(skb, &f->exts, &basic_ext_map) < 0 || tcf_em_tree_dump(skb, &f->ematches, TCA_BASIC_EMATCHES) < 0) diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c index 1d8bd0dbcd1f..ccd08c8dc6a7 100644 --- a/net/sched/cls_flow.c +++ b/net/sched/cls_flow.c @@ -572,25 +572,32 @@ static int flow_dump(struct tcf_proto *tp, unsigned long fh, if (nest == NULL) goto nla_put_failure; - NLA_PUT_U32(skb, TCA_FLOW_KEYS, f->keymask); - NLA_PUT_U32(skb, TCA_FLOW_MODE, f->mode); + if (nla_put_u32(skb, TCA_FLOW_KEYS, f->keymask) || + nla_put_u32(skb, TCA_FLOW_MODE, f->mode)) + goto nla_put_failure; if (f->mask != ~0 || f->xor != 0) { - NLA_PUT_U32(skb, TCA_FLOW_MASK, f->mask); - NLA_PUT_U32(skb, TCA_FLOW_XOR, f->xor); + if (nla_put_u32(skb, TCA_FLOW_MASK, f->mask) || + nla_put_u32(skb, TCA_FLOW_XOR, f->xor)) + goto nla_put_failure; } - if (f->rshift) - NLA_PUT_U32(skb, TCA_FLOW_RSHIFT, f->rshift); - if (f->addend) - NLA_PUT_U32(skb, TCA_FLOW_ADDEND, f->addend); + if (f->rshift && + nla_put_u32(skb, TCA_FLOW_RSHIFT, f->rshift)) + goto nla_put_failure; + if (f->addend && + nla_put_u32(skb, TCA_FLOW_ADDEND, f->addend)) + goto nla_put_failure; - if (f->divisor) - NLA_PUT_U32(skb, TCA_FLOW_DIVISOR, f->divisor); - if (f->baseclass) - NLA_PUT_U32(skb, TCA_FLOW_BASECLASS, f->baseclass); + if (f->divisor && + nla_put_u32(skb, TCA_FLOW_DIVISOR, f->divisor)) + goto nla_put_failure; + if (f->baseclass && + nla_put_u32(skb, TCA_FLOW_BASECLASS, f->baseclass)) + goto nla_put_failure; - if (f->perturb_period) - NLA_PUT_U32(skb, TCA_FLOW_PERTURB, f->perturb_period / HZ); + if (f->perturb_period && + nla_put_u32(skb, TCA_FLOW_PERTURB, f->perturb_period / HZ)) + goto nla_put_failure; if (tcf_exts_dump(skb, &f->exts, &flow_ext_map) < 0) goto nla_put_failure; diff --git a/net/sched/cls_fw.c b/net/sched/cls_fw.c index 389af152ec45..8384a4797240 100644 --- a/net/sched/cls_fw.c +++ b/net/sched/cls_fw.c @@ -346,14 +346,17 @@ static int fw_dump(struct tcf_proto *tp, unsigned long fh, if (nest == NULL) goto nla_put_failure; - if (f->res.classid) - NLA_PUT_U32(skb, TCA_FW_CLASSID, f->res.classid); + if (f->res.classid && + nla_put_u32(skb, TCA_FW_CLASSID, f->res.classid)) + goto nla_put_failure; #ifdef CONFIG_NET_CLS_IND - if (strlen(f->indev)) - NLA_PUT_STRING(skb, TCA_FW_INDEV, f->indev); + if (strlen(f->indev) && + nla_put_string(skb, TCA_FW_INDEV, f->indev)) + goto nla_put_failure; #endif /* CONFIG_NET_CLS_IND */ - if (head->mask != 0xFFFFFFFF) - NLA_PUT_U32(skb, TCA_FW_MASK, head->mask); + if (head->mask != 0xFFFFFFFF && + nla_put_u32(skb, TCA_FW_MASK, head->mask)) + goto nla_put_failure; if (tcf_exts_dump(skb, &f->exts, &fw_ext_map) < 0) goto nla_put_failure; diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c index 13ab66e9df58..36fec4227401 100644 --- a/net/sched/cls_route.c +++ b/net/sched/cls_route.c @@ -571,17 +571,21 @@ static int route4_dump(struct tcf_proto *tp, unsigned long fh, if (!(f->handle & 0x8000)) { id = f->id & 0xFF; - NLA_PUT_U32(skb, TCA_ROUTE4_TO, id); + if (nla_put_u32(skb, TCA_ROUTE4_TO, id)) + goto nla_put_failure; } if (f->handle & 0x80000000) { - if ((f->handle >> 16) != 0xFFFF) - NLA_PUT_U32(skb, TCA_ROUTE4_IIF, f->iif); + if ((f->handle >> 16) != 0xFFFF && + nla_put_u32(skb, TCA_ROUTE4_IIF, f->iif)) + goto nla_put_failure; } else { id = f->id >> 16; - NLA_PUT_U32(skb, TCA_ROUTE4_FROM, id); + if (nla_put_u32(skb, TCA_ROUTE4_FROM, id)) + goto nla_put_failure; } - if (f->res.classid) - NLA_PUT_U32(skb, TCA_ROUTE4_CLASSID, f->res.classid); + if (f->res.classid && + nla_put_u32(skb, TCA_ROUTE4_CLASSID, f->res.classid)) + goto nla_put_failure; if (tcf_exts_dump(skb, &f->exts, &route_ext_map) < 0) goto nla_put_failure; diff --git a/net/sched/cls_rsvp.h b/net/sched/cls_rsvp.h index b01427924f81..18ab93ec8d7e 100644 --- a/net/sched/cls_rsvp.h +++ b/net/sched/cls_rsvp.h @@ -615,18 +615,22 @@ static int rsvp_dump(struct tcf_proto *tp, unsigned long fh, if (nest == NULL) goto nla_put_failure; - NLA_PUT(skb, TCA_RSVP_DST, sizeof(s->dst), &s->dst); + if (nla_put(skb, TCA_RSVP_DST, sizeof(s->dst), &s->dst)) + goto nla_put_failure; pinfo.dpi = s->dpi; pinfo.spi = f->spi; pinfo.protocol = s->protocol; pinfo.tunnelid = s->tunnelid; pinfo.tunnelhdr = f->tunnelhdr; pinfo.pad = 0; - NLA_PUT(skb, TCA_RSVP_PINFO, sizeof(pinfo), &pinfo); - if (f->res.classid) - NLA_PUT_U32(skb, TCA_RSVP_CLASSID, f->res.classid); - if (((f->handle >> 8) & 0xFF) != 16) - NLA_PUT(skb, TCA_RSVP_SRC, sizeof(f->src), f->src); + if (nla_put(skb, TCA_RSVP_PINFO, sizeof(pinfo), &pinfo)) + goto nla_put_failure; + if (f->res.classid && + nla_put_u32(skb, TCA_RSVP_CLASSID, f->res.classid)) + goto nla_put_failure; + if (((f->handle >> 8) & 0xFF) != 16 && + nla_put(skb, TCA_RSVP_SRC, sizeof(f->src), f->src)) + goto nla_put_failure; if (tcf_exts_dump(skb, &f->exts, &rsvp_ext_map) < 0) goto nla_put_failure; diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c index dbe199234c63..fe29420d0b0e 100644 --- a/net/sched/cls_tcindex.c +++ b/net/sched/cls_tcindex.c @@ -438,10 +438,11 @@ static int tcindex_dump(struct tcf_proto *tp, unsigned long fh, if (!fh) { t->tcm_handle = ~0; /* whatever ... */ - NLA_PUT_U32(skb, TCA_TCINDEX_HASH, p->hash); - NLA_PUT_U16(skb, TCA_TCINDEX_MASK, p->mask); - NLA_PUT_U32(skb, TCA_TCINDEX_SHIFT, p->shift); - NLA_PUT_U32(skb, TCA_TCINDEX_FALL_THROUGH, p->fall_through); + if (nla_put_u32(skb, TCA_TCINDEX_HASH, p->hash) || + nla_put_u16(skb, TCA_TCINDEX_MASK, p->mask) || + nla_put_u32(skb, TCA_TCINDEX_SHIFT, p->shift) || + nla_put_u32(skb, TCA_TCINDEX_FALL_THROUGH, p->fall_through)) + goto nla_put_failure; nla_nest_end(skb, nest); } else { if (p->perfect) { @@ -460,8 +461,9 @@ static int tcindex_dump(struct tcf_proto *tp, unsigned long fh, } } pr_debug("handle = %d\n", t->tcm_handle); - if (r->res.class) - NLA_PUT_U32(skb, TCA_TCINDEX_CLASSID, r->res.classid); + if (r->res.class && + nla_put_u32(skb, TCA_TCINDEX_CLASSID, r->res.classid)) + goto nla_put_failure; if (tcf_exts_dump(skb, &r->exts, &tcindex_ext_map) < 0) goto nla_put_failure; diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c index 939b627b4795..591b006a8c5a 100644 --- a/net/sched/cls_u32.c +++ b/net/sched/cls_u32.c @@ -733,36 +733,44 @@ static int u32_dump(struct tcf_proto *tp, unsigned long fh, struct tc_u_hnode *ht = (struct tc_u_hnode *)fh; u32 divisor = ht->divisor + 1; - NLA_PUT_U32(skb, TCA_U32_DIVISOR, divisor); + if (nla_put_u32(skb, TCA_U32_DIVISOR, divisor)) + goto nla_put_failure; } else { - NLA_PUT(skb, TCA_U32_SEL, - sizeof(n->sel) + n->sel.nkeys*sizeof(struct tc_u32_key), - &n->sel); + if (nla_put(skb, TCA_U32_SEL, + sizeof(n->sel) + n->sel.nkeys*sizeof(struct tc_u32_key), + &n->sel)) + goto nla_put_failure; if (n->ht_up) { u32 htid = n->handle & 0xFFFFF000; - NLA_PUT_U32(skb, TCA_U32_HASH, htid); + if (nla_put_u32(skb, TCA_U32_HASH, htid)) + goto nla_put_failure; } - if (n->res.classid) - NLA_PUT_U32(skb, TCA_U32_CLASSID, n->res.classid); - if (n->ht_down) - NLA_PUT_U32(skb, TCA_U32_LINK, n->ht_down->handle); + if (n->res.classid && + nla_put_u32(skb, TCA_U32_CLASSID, n->res.classid)) + goto nla_put_failure; + if (n->ht_down && + nla_put_u32(skb, TCA_U32_LINK, n->ht_down->handle)) + goto nla_put_failure; #ifdef CONFIG_CLS_U32_MARK - if (n->mark.val || n->mark.mask) - NLA_PUT(skb, TCA_U32_MARK, sizeof(n->mark), &n->mark); + if ((n->mark.val || n->mark.mask) && + nla_put(skb, TCA_U32_MARK, sizeof(n->mark), &n->mark)) + goto nla_put_failure; #endif if (tcf_exts_dump(skb, &n->exts, &u32_ext_map) < 0) goto nla_put_failure; #ifdef CONFIG_NET_CLS_IND - if (strlen(n->indev)) - NLA_PUT_STRING(skb, TCA_U32_INDEV, n->indev); + if (strlen(n->indev) && + nla_put_string(skb, TCA_U32_INDEV, n->indev)) + goto nla_put_failure; #endif #ifdef CONFIG_CLS_U32_PERF - NLA_PUT(skb, TCA_U32_PCNT, - sizeof(struct tc_u32_pcnt) + n->sel.nkeys*sizeof(u64), - n->pf); + if (nla_put(skb, TCA_U32_PCNT, + sizeof(struct tc_u32_pcnt) + n->sel.nkeys*sizeof(u64), + n->pf)) + goto nla_put_failure; #endif } diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c index 1363bf14e61b..4790c696cbce 100644 --- a/net/sched/em_meta.c +++ b/net/sched/em_meta.c @@ -585,8 +585,9 @@ static void meta_var_apply_extras(struct meta_value *v, static int meta_var_dump(struct sk_buff *skb, struct meta_value *v, int tlv) { - if (v->val && v->len) - NLA_PUT(skb, tlv, v->len, (void *) v->val); + if (v->val && v->len && + nla_put(skb, tlv, v->len, (void *) v->val)) + goto nla_put_failure; return 0; nla_put_failure: @@ -636,10 +637,13 @@ static void meta_int_apply_extras(struct meta_value *v, static int meta_int_dump(struct sk_buff *skb, struct meta_value *v, int tlv) { - if (v->len == sizeof(unsigned long)) - NLA_PUT(skb, tlv, sizeof(unsigned long), &v->val); - else if (v->len == sizeof(u32)) - NLA_PUT_U32(skb, tlv, v->val); + if (v->len == sizeof(unsigned long)) { + if (nla_put(skb, tlv, sizeof(unsigned long), &v->val)) + goto nla_put_failure; + } else if (v->len == sizeof(u32)) { + if (nla_put_u32(skb, tlv, v->val)) + goto nla_put_failure; + } return 0; @@ -831,7 +835,8 @@ static int em_meta_dump(struct sk_buff *skb, struct tcf_ematch *em) memcpy(&hdr.left, &meta->lvalue.hdr, sizeof(hdr.left)); memcpy(&hdr.right, &meta->rvalue.hdr, sizeof(hdr.right)); - NLA_PUT(skb, TCA_EM_META_HDR, sizeof(hdr), &hdr); + if (nla_put(skb, TCA_EM_META_HDR, sizeof(hdr), &hdr)) + goto nla_put_failure; ops = meta_type_ops(&meta->lvalue); if (ops->dump(skb, &meta->lvalue, TCA_EM_META_LVALUE) < 0 || diff --git a/net/sched/ematch.c b/net/sched/ematch.c index 88d93eb92507..aca233c2b848 100644 --- a/net/sched/ematch.c +++ b/net/sched/ematch.c @@ -441,7 +441,8 @@ int tcf_em_tree_dump(struct sk_buff *skb, struct tcf_ematch_tree *tree, int tlv) if (top_start == NULL) goto nla_put_failure; - NLA_PUT(skb, TCA_EMATCH_TREE_HDR, sizeof(tree->hdr), &tree->hdr); + if (nla_put(skb, TCA_EMATCH_TREE_HDR, sizeof(tree->hdr), &tree->hdr)) + goto nla_put_failure; list_start = nla_nest_start(skb, TCA_EMATCH_TREE_LIST); if (list_start == NULL) @@ -457,7 +458,8 @@ int tcf_em_tree_dump(struct sk_buff *skb, struct tcf_ematch_tree *tree, int tlv) .flags = em->flags }; - NLA_PUT(skb, i + 1, sizeof(em_hdr), &em_hdr); + if (nla_put(skb, i + 1, sizeof(em_hdr), &em_hdr)) + goto nla_put_failure; if (em->ops && em->ops->dump) { if (em->ops->dump(skb, em) < 0) diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index 3d8981fde301..d2daefcc205f 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -426,7 +426,8 @@ static int qdisc_dump_stab(struct sk_buff *skb, struct qdisc_size_table *stab) nest = nla_nest_start(skb, TCA_STAB); if (nest == NULL) goto nla_put_failure; - NLA_PUT(skb, TCA_STAB_BASE, sizeof(stab->szopts), &stab->szopts); + if (nla_put(skb, TCA_STAB_BASE, sizeof(stab->szopts), &stab->szopts)) + goto nla_put_failure; nla_nest_end(skb, nest); return skb->len; @@ -1201,7 +1202,8 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid, tcm->tcm_parent = clid; tcm->tcm_handle = q->handle; tcm->tcm_info = atomic_read(&q->refcnt); - NLA_PUT_STRING(skb, TCA_KIND, q->ops->id); + if (nla_put_string(skb, TCA_KIND, q->ops->id)) + goto nla_put_failure; if (q->ops->dump && q->ops->dump(q, skb) < 0) goto nla_put_failure; q->qstats.qlen = q->q.qlen; @@ -1505,7 +1507,8 @@ static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q, tcm->tcm_parent = q->handle; tcm->tcm_handle = q->handle; tcm->tcm_info = 0; - NLA_PUT_STRING(skb, TCA_KIND, q->ops->id); + if (nla_put_string(skb, TCA_KIND, q->ops->id)) + goto nla_put_failure; if (cl_ops->dump && cl_ops->dump(q, cl, skb, tcm) < 0) goto nla_put_failure; diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c index e25e49061a0d..a77a4fbc069a 100644 --- a/net/sched/sch_atm.c +++ b/net/sched/sch_atm.c @@ -601,7 +601,8 @@ static int atm_tc_dump_class(struct Qdisc *sch, unsigned long cl, if (nest == NULL) goto nla_put_failure; - NLA_PUT(skb, TCA_ATM_HDR, flow->hdr_len, flow->hdr); + if (nla_put(skb, TCA_ATM_HDR, flow->hdr_len, flow->hdr)) + goto nla_put_failure; if (flow->vcc) { struct sockaddr_atmpvc pvc; int state; @@ -610,15 +611,19 @@ static int atm_tc_dump_class(struct Qdisc *sch, unsigned long cl, pvc.sap_addr.itf = flow->vcc->dev ? flow->vcc->dev->number : -1; pvc.sap_addr.vpi = flow->vcc->vpi; pvc.sap_addr.vci = flow->vcc->vci; - NLA_PUT(skb, TCA_ATM_ADDR, sizeof(pvc), &pvc); + if (nla_put(skb, TCA_ATM_ADDR, sizeof(pvc), &pvc)) + goto nla_put_failure; state = ATM_VF2VS(flow->vcc->flags); - NLA_PUT_U32(skb, TCA_ATM_STATE, state); + if (nla_put_u32(skb, TCA_ATM_STATE, state)) + goto nla_put_failure; + } + if (flow->excess) { + if (nla_put_u32(skb, TCA_ATM_EXCESS, flow->classid)) + goto nla_put_failure; + } else { + if (nla_put_u32(skb, TCA_ATM_EXCESS, 0)) + goto nla_put_failure; } - if (flow->excess) - NLA_PUT_U32(skb, TCA_ATM_EXCESS, flow->classid); - else - NLA_PUT_U32(skb, TCA_ATM_EXCESS, 0); - nla_nest_end(skb, nest); return skb->len; diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c index 24d94c097b35..6aabd77d1cfd 100644 --- a/net/sched/sch_cbq.c +++ b/net/sched/sch_cbq.c @@ -1425,7 +1425,8 @@ static int cbq_dump_rate(struct sk_buff *skb, struct cbq_class *cl) { unsigned char *b = skb_tail_pointer(skb); - NLA_PUT(skb, TCA_CBQ_RATE, sizeof(cl->R_tab->rate), &cl->R_tab->rate); + if (nla_put(skb, TCA_CBQ_RATE, sizeof(cl->R_tab->rate), &cl->R_tab->rate)) + goto nla_put_failure; return skb->len; nla_put_failure: @@ -1450,7 +1451,8 @@ static int cbq_dump_lss(struct sk_buff *skb, struct cbq_class *cl) opt.minidle = (u32)(-cl->minidle); opt.offtime = cl->offtime; opt.change = ~0; - NLA_PUT(skb, TCA_CBQ_LSSOPT, sizeof(opt), &opt); + if (nla_put(skb, TCA_CBQ_LSSOPT, sizeof(opt), &opt)) + goto nla_put_failure; return skb->len; nla_put_failure: @@ -1468,7 +1470,8 @@ static int cbq_dump_wrr(struct sk_buff *skb, struct cbq_class *cl) opt.priority = cl->priority + 1; opt.cpriority = cl->cpriority + 1; opt.weight = cl->weight; - NLA_PUT(skb, TCA_CBQ_WRROPT, sizeof(opt), &opt); + if (nla_put(skb, TCA_CBQ_WRROPT, sizeof(opt), &opt)) + goto nla_put_failure; return skb->len; nla_put_failure: @@ -1485,7 +1488,8 @@ static int cbq_dump_ovl(struct sk_buff *skb, struct cbq_class *cl) opt.priority2 = cl->priority2 + 1; opt.pad = 0; opt.penalty = cl->penalty; - NLA_PUT(skb, TCA_CBQ_OVL_STRATEGY, sizeof(opt), &opt); + if (nla_put(skb, TCA_CBQ_OVL_STRATEGY, sizeof(opt), &opt)) + goto nla_put_failure; return skb->len; nla_put_failure: @@ -1502,7 +1506,8 @@ static int cbq_dump_fopt(struct sk_buff *skb, struct cbq_class *cl) opt.split = cl->split ? cl->split->common.classid : 0; opt.defmap = cl->defmap; opt.defchange = ~0; - NLA_PUT(skb, TCA_CBQ_FOPT, sizeof(opt), &opt); + if (nla_put(skb, TCA_CBQ_FOPT, sizeof(opt), &opt)) + goto nla_put_failure; } return skb->len; @@ -1521,7 +1526,8 @@ static int cbq_dump_police(struct sk_buff *skb, struct cbq_class *cl) opt.police = cl->police; opt.__res1 = 0; opt.__res2 = 0; - NLA_PUT(skb, TCA_CBQ_POLICE, sizeof(opt), &opt); + if (nla_put(skb, TCA_CBQ_POLICE, sizeof(opt), &opt)) + goto nla_put_failure; } return skb->len; diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c index 7e267d7b9c75..81445cc8196f 100644 --- a/net/sched/sch_choke.c +++ b/net/sched/sch_choke.c @@ -515,8 +515,9 @@ static int choke_dump(struct Qdisc *sch, struct sk_buff *skb) if (opts == NULL) goto nla_put_failure; - NLA_PUT(skb, TCA_CHOKE_PARMS, sizeof(opt), &opt); - NLA_PUT_U32(skb, TCA_CHOKE_MAX_P, q->parms.max_P); + if (nla_put(skb, TCA_CHOKE_PARMS, sizeof(opt), &opt) || + nla_put_u32(skb, TCA_CHOKE_MAX_P, q->parms.max_P)) + goto nla_put_failure; return nla_nest_end(skb, opts); nla_put_failure: diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c index 6b7fe4a84f13..c2189879359b 100644 --- a/net/sched/sch_drr.c +++ b/net/sched/sch_drr.c @@ -260,7 +260,8 @@ static int drr_dump_class(struct Qdisc *sch, unsigned long arg, nest = nla_nest_start(skb, TCA_OPTIONS); if (nest == NULL) goto nla_put_failure; - NLA_PUT_U32(skb, TCA_DRR_QUANTUM, cl->quantum); + if (nla_put_u32(skb, TCA_DRR_QUANTUM, cl->quantum)) + goto nla_put_failure; return nla_nest_end(skb, nest); nla_put_failure: diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c index 2c790204d042..389b856c6653 100644 --- a/net/sched/sch_dsmark.c +++ b/net/sched/sch_dsmark.c @@ -429,8 +429,9 @@ static int dsmark_dump_class(struct Qdisc *sch, unsigned long cl, opts = nla_nest_start(skb, TCA_OPTIONS); if (opts == NULL) goto nla_put_failure; - NLA_PUT_U8(skb, TCA_DSMARK_MASK, p->mask[cl - 1]); - NLA_PUT_U8(skb, TCA_DSMARK_VALUE, p->value[cl - 1]); + if (nla_put_u8(skb, TCA_DSMARK_MASK, p->mask[cl - 1]) || + nla_put_u8(skb, TCA_DSMARK_VALUE, p->value[cl - 1])) + goto nla_put_failure; return nla_nest_end(skb, opts); @@ -447,13 +448,16 @@ static int dsmark_dump(struct Qdisc *sch, struct sk_buff *skb) opts = nla_nest_start(skb, TCA_OPTIONS); if (opts == NULL) goto nla_put_failure; - NLA_PUT_U16(skb, TCA_DSMARK_INDICES, p->indices); + if (nla_put_u16(skb, TCA_DSMARK_INDICES, p->indices)) + goto nla_put_failure; - if (p->default_index != NO_DEFAULT_INDEX) - NLA_PUT_U16(skb, TCA_DSMARK_DEFAULT_INDEX, p->default_index); + if (p->default_index != NO_DEFAULT_INDEX && + nla_put_u16(skb, TCA_DSMARK_DEFAULT_INDEX, p->default_index)) + goto nla_put_failure; - if (p->set_tc_index) - NLA_PUT_FLAG(skb, TCA_DSMARK_SET_TC_INDEX); + if (p->set_tc_index && + nla_put_flag(skb, TCA_DSMARK_SET_TC_INDEX)) + goto nla_put_failure; return nla_nest_end(skb, opts); diff --git a/net/sched/sch_fifo.c b/net/sched/sch_fifo.c index 66effe2da8e0..e15a9eb29087 100644 --- a/net/sched/sch_fifo.c +++ b/net/sched/sch_fifo.c @@ -85,7 +85,8 @@ static int fifo_dump(struct Qdisc *sch, struct sk_buff *skb) { struct tc_fifo_qopt opt = { .limit = sch->limit }; - NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt); + if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt)) + goto nla_put_failure; return skb->len; nla_put_failure: diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 67fc573e013a..0eb1202c22a6 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -512,7 +512,8 @@ static int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb) struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS }; memcpy(&opt.priomap, prio2band, TC_PRIO_MAX + 1); - NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt); + if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt)) + goto nla_put_failure; return skb->len; nla_put_failure: diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c index 0b15236be7b6..55e3310edc94 100644 --- a/net/sched/sch_gred.c +++ b/net/sched/sch_gred.c @@ -521,14 +521,16 @@ static int gred_dump(struct Qdisc *sch, struct sk_buff *skb) opts = nla_nest_start(skb, TCA_OPTIONS); if (opts == NULL) goto nla_put_failure; - NLA_PUT(skb, TCA_GRED_DPS, sizeof(sopt), &sopt); + if (nla_put(skb, TCA_GRED_DPS, sizeof(sopt), &sopt)) + goto nla_put_failure; for (i = 0; i < MAX_DPs; i++) { struct gred_sched_data *q = table->tab[i]; max_p[i] = q ? q->parms.max_P : 0; } - NLA_PUT(skb, TCA_GRED_MAX_P, sizeof(max_p), max_p); + if (nla_put(skb, TCA_GRED_MAX_P, sizeof(max_p), max_p)) + goto nla_put_failure; parms = nla_nest_start(skb, TCA_GRED_PARMS); if (parms == NULL) diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c index 9bdca2e011e9..8db3e2c72827 100644 --- a/net/sched/sch_hfsc.c +++ b/net/sched/sch_hfsc.c @@ -1305,7 +1305,8 @@ hfsc_dump_sc(struct sk_buff *skb, int attr, struct internal_sc *sc) tsc.m1 = sm2m(sc->sm1); tsc.d = dx2d(sc->dx); tsc.m2 = sm2m(sc->sm2); - NLA_PUT(skb, attr, sizeof(tsc), &tsc); + if (nla_put(skb, attr, sizeof(tsc), &tsc)) + goto nla_put_failure; return skb->len; @@ -1573,7 +1574,8 @@ hfsc_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb) } qopt.defcls = q->defcls; - NLA_PUT(skb, TCA_OPTIONS, sizeof(qopt), &qopt); + if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt)) + goto nla_put_failure; return skb->len; nla_put_failure: diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index 29b942ce9e82..2ea6f196e3c8 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c @@ -1051,7 +1051,8 @@ static int htb_dump(struct Qdisc *sch, struct sk_buff *skb) nest = nla_nest_start(skb, TCA_OPTIONS); if (nest == NULL) goto nla_put_failure; - NLA_PUT(skb, TCA_HTB_INIT, sizeof(gopt), &gopt); + if (nla_put(skb, TCA_HTB_INIT, sizeof(gopt), &gopt)) + goto nla_put_failure; nla_nest_end(skb, nest); spin_unlock_bh(root_lock); @@ -1090,7 +1091,8 @@ static int htb_dump_class(struct Qdisc *sch, unsigned long arg, opt.quantum = cl->quantum; opt.prio = cl->prio; opt.level = cl->level; - NLA_PUT(skb, TCA_HTB_PARMS, sizeof(opt), &opt); + if (nla_put(skb, TCA_HTB_PARMS, sizeof(opt), &opt)) + goto nla_put_failure; nla_nest_end(skb, nest); spin_unlock_bh(root_lock); diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c index 28de43092330..d1831ca966d4 100644 --- a/net/sched/sch_mqprio.c +++ b/net/sched/sch_mqprio.c @@ -247,7 +247,8 @@ static int mqprio_dump(struct Qdisc *sch, struct sk_buff *skb) opt.offset[i] = dev->tc_to_txq[i].offset; } - NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt); + if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt)) + goto nla_put_failure; return skb->len; nla_put_failure: diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c index 49131d7a7446..2a2b096d9a66 100644 --- a/net/sched/sch_multiq.c +++ b/net/sched/sch_multiq.c @@ -284,7 +284,8 @@ static int multiq_dump(struct Qdisc *sch, struct sk_buff *skb) opt.bands = q->bands; opt.max_bands = q->max_bands; - NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt); + if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt)) + goto nla_put_failure; return skb->len; diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index 5da548fa7ae9..110973145a4b 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c @@ -834,7 +834,8 @@ static int dump_loss_model(const struct netem_sched_data *q, .p23 = q->clg.a5, }; - NLA_PUT(skb, NETEM_LOSS_GI, sizeof(gi), &gi); + if (nla_put(skb, NETEM_LOSS_GI, sizeof(gi), &gi)) + goto nla_put_failure; break; } case CLG_GILB_ELL: { @@ -845,7 +846,8 @@ static int dump_loss_model(const struct netem_sched_data *q, .k1 = q->clg.a4, }; - NLA_PUT(skb, NETEM_LOSS_GE, sizeof(ge), &ge); + if (nla_put(skb, NETEM_LOSS_GE, sizeof(ge), &ge)) + goto nla_put_failure; break; } } @@ -874,26 +876,31 @@ static int netem_dump(struct Qdisc *sch, struct sk_buff *skb) qopt.loss = q->loss; qopt.gap = q->gap; qopt.duplicate = q->duplicate; - NLA_PUT(skb, TCA_OPTIONS, sizeof(qopt), &qopt); + if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt)) + goto nla_put_failure; cor.delay_corr = q->delay_cor.rho; cor.loss_corr = q->loss_cor.rho; cor.dup_corr = q->dup_cor.rho; - NLA_PUT(skb, TCA_NETEM_CORR, sizeof(cor), &cor); + if (nla_put(skb, TCA_NETEM_CORR, sizeof(cor), &cor)) + goto nla_put_failure; reorder.probability = q->reorder; reorder.correlation = q->reorder_cor.rho; - NLA_PUT(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder); + if (nla_put(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder)) + goto nla_put_failure; corrupt.probability = q->corrupt; corrupt.correlation = q->corrupt_cor.rho; - NLA_PUT(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt); + if (nla_put(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt)) + goto nla_put_failure; rate.rate = q->rate; rate.packet_overhead = q->packet_overhead; rate.cell_size = q->cell_size; rate.cell_overhead = q->cell_overhead; - NLA_PUT(skb, TCA_NETEM_RATE, sizeof(rate), &rate); + if (nla_put(skb, TCA_NETEM_RATE, sizeof(rate), &rate)) + goto nla_put_failure; if (dump_loss_model(q, skb) != 0) goto nla_put_failure; diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c index b5d56a22b1d2..79359b69ad8d 100644 --- a/net/sched/sch_prio.c +++ b/net/sched/sch_prio.c @@ -247,7 +247,8 @@ static int prio_dump(struct Qdisc *sch, struct sk_buff *skb) opt.bands = q->bands; memcpy(&opt.priomap, q->prio2band, TC_PRIO_MAX + 1); - NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt); + if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt)) + goto nla_put_failure; return skb->len; diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c index e68cb440756a..9af01f3df18c 100644 --- a/net/sched/sch_qfq.c +++ b/net/sched/sch_qfq.c @@ -429,8 +429,9 @@ static int qfq_dump_class(struct Qdisc *sch, unsigned long arg, nest = nla_nest_start(skb, TCA_OPTIONS); if (nest == NULL) goto nla_put_failure; - NLA_PUT_U32(skb, TCA_QFQ_WEIGHT, ONE_FP/cl->inv_w); - NLA_PUT_U32(skb, TCA_QFQ_LMAX, cl->lmax); + if (nla_put_u32(skb, TCA_QFQ_WEIGHT, ONE_FP/cl->inv_w) || + nla_put_u32(skb, TCA_QFQ_LMAX, cl->lmax)) + goto nla_put_failure; return nla_nest_end(skb, nest); nla_put_failure: diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c index a5cc3012cf42..633e32defdcc 100644 --- a/net/sched/sch_red.c +++ b/net/sched/sch_red.c @@ -272,8 +272,9 @@ static int red_dump(struct Qdisc *sch, struct sk_buff *skb) opts = nla_nest_start(skb, TCA_OPTIONS); if (opts == NULL) goto nla_put_failure; - NLA_PUT(skb, TCA_RED_PARMS, sizeof(opt), &opt); - NLA_PUT_U32(skb, TCA_RED_MAX_P, q->parms.max_P); + if (nla_put(skb, TCA_RED_PARMS, sizeof(opt), &opt) || + nla_put_u32(skb, TCA_RED_MAX_P, q->parms.max_P)) + goto nla_put_failure; return nla_nest_end(skb, opts); nla_put_failure: diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c index d7eea99333e9..74305c883bd3 100644 --- a/net/sched/sch_sfb.c +++ b/net/sched/sch_sfb.c @@ -570,7 +570,8 @@ static int sfb_dump(struct Qdisc *sch, struct sk_buff *skb) sch->qstats.backlog = q->qdisc->qstats.backlog; opts = nla_nest_start(skb, TCA_OPTIONS); - NLA_PUT(skb, TCA_SFB_PARMS, sizeof(opt), &opt); + if (nla_put(skb, TCA_SFB_PARMS, sizeof(opt), &opt)) + goto nla_put_failure; return nla_nest_end(skb, opts); nla_put_failure: diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c index 02a21abea65e..d3a1bc26dbfc 100644 --- a/net/sched/sch_sfq.c +++ b/net/sched/sch_sfq.c @@ -812,7 +812,8 @@ static int sfq_dump(struct Qdisc *sch, struct sk_buff *skb) memcpy(&opt.stats, &q->stats, sizeof(opt.stats)); opt.flags = q->flags; - NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt); + if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt)) + goto nla_put_failure; return skb->len; diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c index b8e156319d7b..4b056c15e90c 100644 --- a/net/sched/sch_tbf.c +++ b/net/sched/sch_tbf.c @@ -359,7 +359,8 @@ static int tbf_dump(struct Qdisc *sch, struct sk_buff *skb) memset(&opt.peakrate, 0, sizeof(opt.peakrate)); opt.mtu = q->mtu; opt.buffer = q->buffer; - NLA_PUT(skb, TCA_TBF_PARMS, sizeof(opt), &opt); + if (nla_put(skb, TCA_TBF_PARMS, sizeof(opt), &opt)) + goto nla_put_failure; nla_nest_end(skb, nest); return skb->len; -- cgit v1.2.2 From 7f116b5b6c2418e9d9ba2c2ba82cbbad45e36fc5 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Thu, 29 Mar 2012 23:15:10 -0400 Subject: phonet: Stop using NLA_PUT*(). These macros contain a hidden goto, and are thus extremely error prone and make code hard to audit. Signed-off-by: David S. Miller --- net/phonet/pn_netlink.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'net') diff --git a/net/phonet/pn_netlink.c b/net/phonet/pn_netlink.c index d61f6761777d..cfdf135fcd69 100644 --- a/net/phonet/pn_netlink.c +++ b/net/phonet/pn_netlink.c @@ -116,7 +116,8 @@ static int fill_addr(struct sk_buff *skb, struct net_device *dev, u8 addr, ifm->ifa_flags = IFA_F_PERMANENT; ifm->ifa_scope = RT_SCOPE_LINK; ifm->ifa_index = dev->ifindex; - NLA_PUT_U8(skb, IFA_LOCAL, addr); + if (nla_put_u8(skb, IFA_LOCAL, addr)) + goto nla_put_failure; return nlmsg_end(skb, nlh); nla_put_failure: @@ -183,8 +184,9 @@ static int fill_route(struct sk_buff *skb, struct net_device *dev, u8 dst, rtm->rtm_scope = RT_SCOPE_UNIVERSE; rtm->rtm_type = RTN_UNICAST; rtm->rtm_flags = 0; - NLA_PUT_U8(skb, RTA_DST, dst); - NLA_PUT_U32(skb, RTA_OIF, dev->ifindex); + if (nla_put_u8(skb, RTA_DST, dst) || + nla_put_u32(skb, RTA_OIF, dev->ifindex)) + goto nla_put_failure; return nlmsg_end(skb, nlh); nla_put_failure: -- cgit v1.2.2 From 028d6a6767456d6c84a72d3451f19fe7ca7b47db Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Thu, 29 Mar 2012 23:20:48 -0400 Subject: openvswitch: Stop using NLA_PUT*(). These macros contain a hidden goto, and are thus extremely error prone and make code hard to audit. Signed-off-by: David S. Miller --- net/openvswitch/datapath.c | 34 ++++++++++++++++++++-------------- net/openvswitch/flow.c | 18 +++++++++++------- 2 files changed, 31 insertions(+), 21 deletions(-) (limited to 'net') diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index 2c030505b335..f5ca1257debf 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c @@ -779,15 +779,18 @@ static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp, tcp_flags = flow->tcp_flags; spin_unlock_bh(&flow->lock); - if (used) - NLA_PUT_U64(skb, OVS_FLOW_ATTR_USED, ovs_flow_used_time(used)); + if (used && + nla_put_u64(skb, OVS_FLOW_ATTR_USED, ovs_flow_used_time(used))) + goto nla_put_failure; - if (stats.n_packets) - NLA_PUT(skb, OVS_FLOW_ATTR_STATS, - sizeof(struct ovs_flow_stats), &stats); + if (stats.n_packets && + nla_put(skb, OVS_FLOW_ATTR_STATS, + sizeof(struct ovs_flow_stats), &stats)) + goto nla_put_failure; - if (tcp_flags) - NLA_PUT_U8(skb, OVS_FLOW_ATTR_TCP_FLAGS, tcp_flags); + if (tcp_flags && + nla_put_u8(skb, OVS_FLOW_ATTR_TCP_FLAGS, tcp_flags)) + goto nla_put_failure; /* If OVS_FLOW_ATTR_ACTIONS doesn't fit, skip dumping the actions if * this is the first flow to be dumped into 'skb'. This is unusual for @@ -1169,7 +1172,8 @@ static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb, goto nla_put_failure; get_dp_stats(dp, &dp_stats); - NLA_PUT(skb, OVS_DP_ATTR_STATS, sizeof(struct ovs_dp_stats), &dp_stats); + if (nla_put(skb, OVS_DP_ATTR_STATS, sizeof(struct ovs_dp_stats), &dp_stats)) + goto nla_put_failure; return genlmsg_end(skb, ovs_header); @@ -1469,14 +1473,16 @@ static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb, ovs_header->dp_ifindex = get_dpifindex(vport->dp); - NLA_PUT_U32(skb, OVS_VPORT_ATTR_PORT_NO, vport->port_no); - NLA_PUT_U32(skb, OVS_VPORT_ATTR_TYPE, vport->ops->type); - NLA_PUT_STRING(skb, OVS_VPORT_ATTR_NAME, vport->ops->get_name(vport)); - NLA_PUT_U32(skb, OVS_VPORT_ATTR_UPCALL_PID, vport->upcall_pid); + if (nla_put_u32(skb, OVS_VPORT_ATTR_PORT_NO, vport->port_no) || + nla_put_u32(skb, OVS_VPORT_ATTR_TYPE, vport->ops->type) || + nla_put_string(skb, OVS_VPORT_ATTR_NAME, vport->ops->get_name(vport)) || + nla_put_u32(skb, OVS_VPORT_ATTR_UPCALL_PID, vport->upcall_pid)) + goto nla_put_failure; ovs_vport_get_stats(vport, &vport_stats); - NLA_PUT(skb, OVS_VPORT_ATTR_STATS, sizeof(struct ovs_vport_stats), - &vport_stats); + if (nla_put(skb, OVS_VPORT_ATTR_STATS, sizeof(struct ovs_vport_stats), + &vport_stats)) + goto nla_put_failure; err = ovs_vport_get_options(vport, skb); if (err == -EMSGSIZE) diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c index 1252c3081ef1..7cb416381e87 100644 --- a/net/openvswitch/flow.c +++ b/net/openvswitch/flow.c @@ -1174,11 +1174,13 @@ int ovs_flow_to_nlattrs(const struct sw_flow_key *swkey, struct sk_buff *skb) struct ovs_key_ethernet *eth_key; struct nlattr *nla, *encap; - if (swkey->phy.priority) - NLA_PUT_U32(skb, OVS_KEY_ATTR_PRIORITY, swkey->phy.priority); + if (swkey->phy.priority && + nla_put_u32(skb, OVS_KEY_ATTR_PRIORITY, swkey->phy.priority)) + goto nla_put_failure; - if (swkey->phy.in_port != USHRT_MAX) - NLA_PUT_U32(skb, OVS_KEY_ATTR_IN_PORT, swkey->phy.in_port); + if (swkey->phy.in_port != USHRT_MAX && + nla_put_u32(skb, OVS_KEY_ATTR_IN_PORT, swkey->phy.in_port)) + goto nla_put_failure; nla = nla_reserve(skb, OVS_KEY_ATTR_ETHERNET, sizeof(*eth_key)); if (!nla) @@ -1188,8 +1190,9 @@ int ovs_flow_to_nlattrs(const struct sw_flow_key *swkey, struct sk_buff *skb) memcpy(eth_key->eth_dst, swkey->eth.dst, ETH_ALEN); if (swkey->eth.tci || swkey->eth.type == htons(ETH_P_8021Q)) { - NLA_PUT_BE16(skb, OVS_KEY_ATTR_ETHERTYPE, htons(ETH_P_8021Q)); - NLA_PUT_BE16(skb, OVS_KEY_ATTR_VLAN, swkey->eth.tci); + if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, htons(ETH_P_8021Q)) || + nla_put_be16(skb, OVS_KEY_ATTR_VLAN, swkey->eth.tci)) + goto nla_put_failure; encap = nla_nest_start(skb, OVS_KEY_ATTR_ENCAP); if (!swkey->eth.tci) goto unencap; @@ -1200,7 +1203,8 @@ int ovs_flow_to_nlattrs(const struct sw_flow_key *swkey, struct sk_buff *skb) if (swkey->eth.type == htons(ETH_P_802_2)) goto unencap; - NLA_PUT_BE16(skb, OVS_KEY_ATTR_ETHERTYPE, swkey->eth.type); + if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, swkey->eth.type)) + goto nla_put_failure; if (swkey->eth.type == htons(ETH_P_IP)) { struct ovs_key_ipv4 *ipv4_key; -- cgit v1.2.2 From 1e6428d82b3c61ac3bb725b64ffb634b60cbd678 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Thu, 29 Mar 2012 23:23:57 -0400 Subject: nfc: Stop using NLA_PUT*(). These macros contain a hidden goto, and are thus extremely error prone and make code hard to audit. Signed-off-by: David S. Miller --- net/nfc/netlink.c | 70 ++++++++++++++++++++++++++++++++----------------------- 1 file changed, 41 insertions(+), 29 deletions(-) (limited to 'net') diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c index 6404052d6c07..8937664674fa 100644 --- a/net/nfc/netlink.c +++ b/net/nfc/netlink.c @@ -63,19 +63,23 @@ static int nfc_genl_send_target(struct sk_buff *msg, struct nfc_target *target, genl_dump_check_consistent(cb, hdr, &nfc_genl_family); - NLA_PUT_U32(msg, NFC_ATTR_TARGET_INDEX, target->idx); - NLA_PUT_U32(msg, NFC_ATTR_PROTOCOLS, target->supported_protocols); - NLA_PUT_U16(msg, NFC_ATTR_TARGET_SENS_RES, target->sens_res); - NLA_PUT_U8(msg, NFC_ATTR_TARGET_SEL_RES, target->sel_res); - if (target->nfcid1_len > 0) - NLA_PUT(msg, NFC_ATTR_TARGET_NFCID1, target->nfcid1_len, - target->nfcid1); - if (target->sensb_res_len > 0) - NLA_PUT(msg, NFC_ATTR_TARGET_SENSB_RES, target->sensb_res_len, - target->sensb_res); - if (target->sensf_res_len > 0) - NLA_PUT(msg, NFC_ATTR_TARGET_SENSF_RES, target->sensf_res_len, - target->sensf_res); + if (nla_put_u32(msg, NFC_ATTR_TARGET_INDEX, target->idx) || + nla_put_u32(msg, NFC_ATTR_PROTOCOLS, target->supported_protocols) || + nla_put_u16(msg, NFC_ATTR_TARGET_SENS_RES, target->sens_res) || + nla_put_u8(msg, NFC_ATTR_TARGET_SEL_RES, target->sel_res)) + goto nla_put_failure; + if (target->nfcid1_len > 0 && + nla_put(msg, NFC_ATTR_TARGET_NFCID1, target->nfcid1_len, + target->nfcid1)) + goto nla_put_failure; + if (target->sensb_res_len > 0 && + nla_put(msg, NFC_ATTR_TARGET_SENSB_RES, target->sensb_res_len, + target->sensb_res)) + goto nla_put_failure; + if (target->sensf_res_len > 0 && + nla_put(msg, NFC_ATTR_TARGET_SENSF_RES, target->sensf_res_len, + target->sensf_res)) + goto nla_put_failure; return genlmsg_end(msg, hdr); @@ -170,7 +174,8 @@ int nfc_genl_targets_found(struct nfc_dev *dev) if (!hdr) goto free_msg; - NLA_PUT_U32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx); + if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx)) + goto nla_put_failure; genlmsg_end(msg, hdr); @@ -197,10 +202,11 @@ int nfc_genl_device_added(struct nfc_dev *dev) if (!hdr) goto free_msg; - NLA_PUT_STRING(msg, NFC_ATTR_DEVICE_NAME, nfc_device_name(dev)); - NLA_PUT_U32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx); - NLA_PUT_U32(msg, NFC_ATTR_PROTOCOLS, dev->supported_protocols); - NLA_PUT_U8(msg, NFC_ATTR_DEVICE_POWERED, dev->dev_up); + if (nla_put_string(msg, NFC_ATTR_DEVICE_NAME, nfc_device_name(dev)) || + nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx) || + nla_put_u32(msg, NFC_ATTR_PROTOCOLS, dev->supported_protocols) || + nla_put_u8(msg, NFC_ATTR_DEVICE_POWERED, dev->dev_up)) + goto nla_put_failure; genlmsg_end(msg, hdr); @@ -229,7 +235,8 @@ int nfc_genl_device_removed(struct nfc_dev *dev) if (!hdr) goto free_msg; - NLA_PUT_U32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx); + if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx)) + goto nla_put_failure; genlmsg_end(msg, hdr); @@ -259,10 +266,11 @@ static int nfc_genl_send_device(struct sk_buff *msg, struct nfc_dev *dev, if (cb) genl_dump_check_consistent(cb, hdr, &nfc_genl_family); - NLA_PUT_STRING(msg, NFC_ATTR_DEVICE_NAME, nfc_device_name(dev)); - NLA_PUT_U32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx); - NLA_PUT_U32(msg, NFC_ATTR_PROTOCOLS, dev->supported_protocols); - NLA_PUT_U8(msg, NFC_ATTR_DEVICE_POWERED, dev->dev_up); + if (nla_put_string(msg, NFC_ATTR_DEVICE_NAME, nfc_device_name(dev)) || + nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx) || + nla_put_u32(msg, NFC_ATTR_PROTOCOLS, dev->supported_protocols) || + nla_put_u8(msg, NFC_ATTR_DEVICE_POWERED, dev->dev_up)) + goto nla_put_failure; return genlmsg_end(msg, hdr); @@ -339,11 +347,14 @@ int nfc_genl_dep_link_up_event(struct nfc_dev *dev, u32 target_idx, if (!hdr) goto free_msg; - NLA_PUT_U32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx); - if (rf_mode == NFC_RF_INITIATOR) - NLA_PUT_U32(msg, NFC_ATTR_TARGET_INDEX, target_idx); - NLA_PUT_U8(msg, NFC_ATTR_COMM_MODE, comm_mode); - NLA_PUT_U8(msg, NFC_ATTR_RF_MODE, rf_mode); + if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx)) + goto nla_put_failure; + if (rf_mode == NFC_RF_INITIATOR && + nla_put_u32(msg, NFC_ATTR_TARGET_INDEX, target_idx)) + goto nla_put_failure; + if (nla_put_u8(msg, NFC_ATTR_COMM_MODE, comm_mode) || + nla_put_u8(msg, NFC_ATTR_RF_MODE, rf_mode)) + goto nla_put_failure; genlmsg_end(msg, hdr); @@ -376,7 +387,8 @@ int nfc_genl_dep_link_down_event(struct nfc_dev *dev) if (!hdr) goto free_msg; - NLA_PUT_U32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx); + if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx)) + goto nla_put_failure; genlmsg_end(msg, hdr); -- cgit v1.2.2 From 444653f696d60217e145b050fb82967eaf34eb3f Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Thu, 29 Mar 2012 23:25:11 -0400 Subject: genetlink: Stop using NLA_PUT*(). These macros contain a hidden goto, and are thus extremely error prone and make code hard to audit. Signed-off-by: David S. Miller --- net/netlink/genetlink.c | 35 ++++++++++++++++++++--------------- 1 file changed, 20 insertions(+), 15 deletions(-) (limited to 'net') diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c index 9f40441d7a7d..8340ace837f2 100644 --- a/net/netlink/genetlink.c +++ b/net/netlink/genetlink.c @@ -635,11 +635,12 @@ static int ctrl_fill_info(struct genl_family *family, u32 pid, u32 seq, if (hdr == NULL) return -1; - NLA_PUT_STRING(skb, CTRL_ATTR_FAMILY_NAME, family->name); - NLA_PUT_U16(skb, CTRL_ATTR_FAMILY_ID, family->id); - NLA_PUT_U32(skb, CTRL_ATTR_VERSION, family->version); - NLA_PUT_U32(skb, CTRL_ATTR_HDRSIZE, family->hdrsize); - NLA_PUT_U32(skb, CTRL_ATTR_MAXATTR, family->maxattr); + if (nla_put_string(skb, CTRL_ATTR_FAMILY_NAME, family->name) || + nla_put_u16(skb, CTRL_ATTR_FAMILY_ID, family->id) || + nla_put_u32(skb, CTRL_ATTR_VERSION, family->version) || + nla_put_u32(skb, CTRL_ATTR_HDRSIZE, family->hdrsize) || + nla_put_u32(skb, CTRL_ATTR_MAXATTR, family->maxattr)) + goto nla_put_failure; if (!list_empty(&family->ops_list)) { struct nlattr *nla_ops; @@ -657,8 +658,9 @@ static int ctrl_fill_info(struct genl_family *family, u32 pid, u32 seq, if (nest == NULL) goto nla_put_failure; - NLA_PUT_U32(skb, CTRL_ATTR_OP_ID, ops->cmd); - NLA_PUT_U32(skb, CTRL_ATTR_OP_FLAGS, ops->flags); + if (nla_put_u32(skb, CTRL_ATTR_OP_ID, ops->cmd) || + nla_put_u32(skb, CTRL_ATTR_OP_FLAGS, ops->flags)) + goto nla_put_failure; nla_nest_end(skb, nest); } @@ -682,9 +684,10 @@ static int ctrl_fill_info(struct genl_family *family, u32 pid, u32 seq, if (nest == NULL) goto nla_put_failure; - NLA_PUT_U32(skb, CTRL_ATTR_MCAST_GRP_ID, grp->id); - NLA_PUT_STRING(skb, CTRL_ATTR_MCAST_GRP_NAME, - grp->name); + if (nla_put_u32(skb, CTRL_ATTR_MCAST_GRP_ID, grp->id) || + nla_put_string(skb, CTRL_ATTR_MCAST_GRP_NAME, + grp->name)) + goto nla_put_failure; nla_nest_end(skb, nest); } @@ -710,8 +713,9 @@ static int ctrl_fill_mcgrp_info(struct genl_multicast_group *grp, u32 pid, if (hdr == NULL) return -1; - NLA_PUT_STRING(skb, CTRL_ATTR_FAMILY_NAME, grp->family->name); - NLA_PUT_U16(skb, CTRL_ATTR_FAMILY_ID, grp->family->id); + if (nla_put_string(skb, CTRL_ATTR_FAMILY_NAME, grp->family->name) || + nla_put_u16(skb, CTRL_ATTR_FAMILY_ID, grp->family->id)) + goto nla_put_failure; nla_grps = nla_nest_start(skb, CTRL_ATTR_MCAST_GROUPS); if (nla_grps == NULL) @@ -721,9 +725,10 @@ static int ctrl_fill_mcgrp_info(struct genl_multicast_group *grp, u32 pid, if (nest == NULL) goto nla_put_failure; - NLA_PUT_U32(skb, CTRL_ATTR_MCAST_GRP_ID, grp->id); - NLA_PUT_STRING(skb, CTRL_ATTR_MCAST_GRP_NAME, - grp->name); + if (nla_put_u32(skb, CTRL_ATTR_MCAST_GRP_ID, grp->id) || + nla_put_string(skb, CTRL_ATTR_MCAST_GRP_NAME, + grp->name)) + goto nla_put_failure; nla_nest_end(skb, nest); nla_nest_end(skb, nla_grps); -- cgit v1.2.2 From a447189e073bf603e335f20f924b71d385d6b2ef Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Thu, 29 Mar 2012 23:27:38 -0400 Subject: nfnetlink_queue: Stop using NLA_PUT*(). These macros contain a hidden goto, and are thus extremely error prone and make code hard to audit. Signed-off-by: David S. Miller --- net/netfilter/nfnetlink_queue.c | 59 ++++++++++++++++++++++++----------------- 1 file changed, 35 insertions(+), 24 deletions(-) (limited to 'net') diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c index a80b0cb03f17..8d6bcf32c0ed 100644 --- a/net/netfilter/nfnetlink_queue.c +++ b/net/netfilter/nfnetlink_queue.c @@ -288,58 +288,67 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue, indev = entry->indev; if (indev) { #ifndef CONFIG_BRIDGE_NETFILTER - NLA_PUT_BE32(skb, NFQA_IFINDEX_INDEV, htonl(indev->ifindex)); + if (nla_put_be32(skb, NFQA_IFINDEX_INDEV, htonl(indev->ifindex))) + goto nla_put_failure; #else if (entry->pf == PF_BRIDGE) { /* Case 1: indev is physical input device, we need to * look for bridge group (when called from * netfilter_bridge) */ - NLA_PUT_BE32(skb, NFQA_IFINDEX_PHYSINDEV, - htonl(indev->ifindex)); + if (nla_put_be32(skb, NFQA_IFINDEX_PHYSINDEV, + htonl(indev->ifindex)) || /* this is the bridge group "brX" */ /* rcu_read_lock()ed by __nf_queue */ - NLA_PUT_BE32(skb, NFQA_IFINDEX_INDEV, - htonl(br_port_get_rcu(indev)->br->dev->ifindex)); + nla_put_be32(skb, NFQA_IFINDEX_INDEV, + htonl(br_port_get_rcu(indev)->br->dev->ifindex))) + goto nla_put_failure; } else { /* Case 2: indev is bridge group, we need to look for * physical device (when called from ipv4) */ - NLA_PUT_BE32(skb, NFQA_IFINDEX_INDEV, - htonl(indev->ifindex)); - if (entskb->nf_bridge && entskb->nf_bridge->physindev) - NLA_PUT_BE32(skb, NFQA_IFINDEX_PHYSINDEV, - htonl(entskb->nf_bridge->physindev->ifindex)); + if (nla_put_be32(skb, NFQA_IFINDEX_INDEV, + htonl(indev->ifindex))) + goto nla_put_failure; + if (entskb->nf_bridge && entskb->nf_bridge->physindev && + nla_put_be32(skb, NFQA_IFINDEX_PHYSINDEV, + htonl(entskb->nf_bridge->physindev->ifindex))) + goto nla_put_failure; } #endif } if (outdev) { #ifndef CONFIG_BRIDGE_NETFILTER - NLA_PUT_BE32(skb, NFQA_IFINDEX_OUTDEV, htonl(outdev->ifindex)); + if (nla_put_be32(skb, NFQA_IFINDEX_OUTDEV, htonl(outdev->ifindex))) + goto nla_put_failure; #else if (entry->pf == PF_BRIDGE) { /* Case 1: outdev is physical output device, we need to * look for bridge group (when called from * netfilter_bridge) */ - NLA_PUT_BE32(skb, NFQA_IFINDEX_PHYSOUTDEV, - htonl(outdev->ifindex)); + if (nla_put_be32(skb, NFQA_IFINDEX_PHYSOUTDEV, + htonl(outdev->ifindex)) || /* this is the bridge group "brX" */ /* rcu_read_lock()ed by __nf_queue */ - NLA_PUT_BE32(skb, NFQA_IFINDEX_OUTDEV, - htonl(br_port_get_rcu(outdev)->br->dev->ifindex)); + nla_put_be32(skb, NFQA_IFINDEX_OUTDEV, + htonl(br_port_get_rcu(outdev)->br->dev->ifindex))) + goto nla_put_failure; } else { /* Case 2: outdev is bridge group, we need to look for * physical output device (when called from ipv4) */ - NLA_PUT_BE32(skb, NFQA_IFINDEX_OUTDEV, - htonl(outdev->ifindex)); - if (entskb->nf_bridge && entskb->nf_bridge->physoutdev) - NLA_PUT_BE32(skb, NFQA_IFINDEX_PHYSOUTDEV, - htonl(entskb->nf_bridge->physoutdev->ifindex)); + if (nla_put_be32(skb, NFQA_IFINDEX_OUTDEV, + htonl(outdev->ifindex))) + goto nla_put_failure; + if (entskb->nf_bridge && entskb->nf_bridge->physoutdev && + nla_put_be32(skb, NFQA_IFINDEX_PHYSOUTDEV, + htonl(entskb->nf_bridge->physoutdev->ifindex))) + goto nla_put_failure; } #endif } - if (entskb->mark) - NLA_PUT_BE32(skb, NFQA_MARK, htonl(entskb->mark)); + if (entskb->mark && + nla_put_be32(skb, NFQA_MARK, htonl(entskb->mark))) + goto nla_put_failure; if (indev && entskb->dev && entskb->mac_header != entskb->network_header) { @@ -347,7 +356,8 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue, int len = dev_parse_header(entskb, phw.hw_addr); if (len) { phw.hw_addrlen = htons(len); - NLA_PUT(skb, NFQA_HWADDR, sizeof(phw), &phw); + if (nla_put(skb, NFQA_HWADDR, sizeof(phw), &phw)) + goto nla_put_failure; } } @@ -357,7 +367,8 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue, ts.sec = cpu_to_be64(tv.tv_sec); ts.usec = cpu_to_be64(tv.tv_usec); - NLA_PUT(skb, NFQA_TIMESTAMP, sizeof(ts), &ts); + if (nla_put(skb, NFQA_TIMESTAMP, sizeof(ts), &ts)) + goto nla_put_failure; } if (data_len) { -- cgit v1.2.2 From 1db20a52950e329f308900f1c603bf05bb1bebc3 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Thu, 29 Mar 2012 23:31:16 -0400 Subject: nfnetlink_log: Stop using NLA_PUT*(). These macros contain a hidden goto, and are thus extremely error prone and make code hard to audit. Signed-off-by: David S. Miller --- net/netfilter/nfnetlink_log.c | 100 ++++++++++++++++++++++++------------------ 1 file changed, 58 insertions(+), 42 deletions(-) (limited to 'net') diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c index 66b2c54c544f..3c3cfc0cc9b5 100644 --- a/net/netfilter/nfnetlink_log.c +++ b/net/netfilter/nfnetlink_log.c @@ -391,67 +391,78 @@ __build_packet_message(struct nfulnl_instance *inst, pmsg.hw_protocol = skb->protocol; pmsg.hook = hooknum; - NLA_PUT(inst->skb, NFULA_PACKET_HDR, sizeof(pmsg), &pmsg); + if (nla_put(inst->skb, NFULA_PACKET_HDR, sizeof(pmsg), &pmsg)) + goto nla_put_failure; - if (prefix) - NLA_PUT(inst->skb, NFULA_PREFIX, plen, prefix); + if (prefix && + nla_put(inst->skb, NFULA_PREFIX, plen, prefix)) + goto nla_put_failure; if (indev) { #ifndef CONFIG_BRIDGE_NETFILTER - NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_INDEV, - htonl(indev->ifindex)); + if (nla_put_be32(inst->skb, NFULA_IFINDEX_INDEV, + htonl(indev->ifindex))) + goto nla_put_failure; #else if (pf == PF_BRIDGE) { /* Case 1: outdev is physical input device, we need to * look for bridge group (when called from * netfilter_bridge) */ - NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_PHYSINDEV, - htonl(indev->ifindex)); + if (nla_put_be32(inst->skb, NFULA_IFINDEX_PHYSINDEV, + htonl(indev->ifindex)) || /* this is the bridge group "brX" */ /* rcu_read_lock()ed by nf_hook_slow or nf_log_packet */ - NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_INDEV, - htonl(br_port_get_rcu(indev)->br->dev->ifindex)); + nla_put_be32(inst->skb, NFULA_IFINDEX_INDEV, + htonl(br_port_get_rcu(indev)->br->dev->ifindex))) + goto nla_put_failure; } else { /* Case 2: indev is bridge group, we need to look for * physical device (when called from ipv4) */ - NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_INDEV, - htonl(indev->ifindex)); - if (skb->nf_bridge && skb->nf_bridge->physindev) - NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_PHYSINDEV, - htonl(skb->nf_bridge->physindev->ifindex)); + if (nla_put_be32(inst->skb, NFULA_IFINDEX_INDEV, + htonl(indev->ifindex))) + goto nla_put_failure; + if (skb->nf_bridge && skb->nf_bridge->physindev && + nla_put_be32(inst->skb, NFULA_IFINDEX_PHYSINDEV, + htonl(skb->nf_bridge->physindev->ifindex))) + goto nla_put_failure; } #endif } if (outdev) { #ifndef CONFIG_BRIDGE_NETFILTER - NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_OUTDEV, - htonl(outdev->ifindex)); + if (nla_put_be32(inst->skb, NFULA_IFINDEX_OUTDEV, + htonl(outdev->ifindex))) + goto nla_put_failure; #else if (pf == PF_BRIDGE) { /* Case 1: outdev is physical output device, we need to * look for bridge group (when called from * netfilter_bridge) */ - NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_PHYSOUTDEV, - htonl(outdev->ifindex)); + if (nla_put_be32(inst->skb, NFULA_IFINDEX_PHYSOUTDEV, + htonl(outdev->ifindex)) || /* this is the bridge group "brX" */ /* rcu_read_lock()ed by nf_hook_slow or nf_log_packet */ - NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_OUTDEV, - htonl(br_port_get_rcu(outdev)->br->dev->ifindex)); + nla_put_be32(inst->skb, NFULA_IFINDEX_OUTDEV, + htonl(br_port_get_rcu(outdev)->br->dev->ifindex))) + goto nla_put_failure; } else { /* Case 2: indev is a bridge group, we need to look * for physical device (when called from ipv4) */ - NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_OUTDEV, - htonl(outdev->ifindex)); - if (skb->nf_bridge && skb->nf_bridge->physoutdev) - NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_PHYSOUTDEV, - htonl(skb->nf_bridge->physoutdev->ifindex)); + if (nla_put_be32(inst->skb, NFULA_IFINDEX_OUTDEV, + htonl(outdev->ifindex))) + goto nla_put_failure; + if (skb->nf_bridge && skb->nf_bridge->physoutdev && + nla_put_be32(inst->skb, NFULA_IFINDEX_PHYSOUTDEV, + htonl(skb->nf_bridge->physoutdev->ifindex))) + goto nla_put_failure; } #endif } - if (skb->mark) - NLA_PUT_BE32(inst->skb, NFULA_MARK, htonl(skb->mark)); + if (skb->mark && + nla_put_be32(inst->skb, NFULA_MARK, htonl(skb->mark))) + goto nla_put_failure; if (indev && skb->dev && skb->mac_header != skb->network_header) { @@ -459,16 +470,18 @@ __build_packet_message(struct nfulnl_instance *inst, int len = dev_parse_header(skb, phw.hw_addr); if (len > 0) { phw.hw_addrlen = htons(len); - NLA_PUT(inst->skb, NFULA_HWADDR, sizeof(phw), &phw); + if (nla_put(inst->skb, NFULA_HWADDR, sizeof(phw), &phw)) + goto nla_put_failure; } } if (indev && skb_mac_header_was_set(skb)) { - NLA_PUT_BE16(inst->skb, NFULA_HWTYPE, htons(skb->dev->type)); - NLA_PUT_BE16(inst->skb, NFULA_HWLEN, - htons(skb->dev->hard_header_len)); - NLA_PUT(inst->skb, NFULA_HWHEADER, skb->dev->hard_header_len, - skb_mac_header(skb)); + if (nla_put_be32(inst->skb, NFULA_HWTYPE, htons(skb->dev->type)) || + nla_put_be16(inst->skb, NFULA_HWLEN, + htons(skb->dev->hard_header_len)) || + nla_put(inst->skb, NFULA_HWHEADER, skb->dev->hard_header_len, + skb_mac_header(skb))) + goto nla_put_failure; } if (skb->tstamp.tv64) { @@ -477,7 +490,8 @@ __build_packet_message(struct nfulnl_instance *inst, ts.sec = cpu_to_be64(tv.tv_sec); ts.usec = cpu_to_be64(tv.tv_usec); - NLA_PUT(inst->skb, NFULA_TIMESTAMP, sizeof(ts), &ts); + if (nla_put(inst->skb, NFULA_TIMESTAMP, sizeof(ts), &ts)) + goto nla_put_failure; } /* UID */ @@ -487,22 +501,24 @@ __build_packet_message(struct nfulnl_instance *inst, struct file *file = skb->sk->sk_socket->file; __be32 uid = htonl(file->f_cred->fsuid); __be32 gid = htonl(file->f_cred->fsgid); - /* need to unlock here since NLA_PUT may goto */ read_unlock_bh(&skb->sk->sk_callback_lock); - NLA_PUT_BE32(inst->skb, NFULA_UID, uid); - NLA_PUT_BE32(inst->skb, NFULA_GID, gid); + if (nla_put_be32(inst->skb, NFULA_UID, uid) || + nla_put_be32(inst->skb, NFULA_GID, gid)) + goto nla_put_failure; } else read_unlock_bh(&skb->sk->sk_callback_lock); } /* local sequence number */ - if (inst->flags & NFULNL_CFG_F_SEQ) - NLA_PUT_BE32(inst->skb, NFULA_SEQ, htonl(inst->seq++)); + if ((inst->flags & NFULNL_CFG_F_SEQ) && + nla_put_be32(inst->skb, NFULA_SEQ, htonl(inst->seq++))) + goto nla_put_failure; /* global sequence number */ - if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL) - NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL, - htonl(atomic_inc_return(&global_seq))); + if ((inst->flags & NFULNL_CFG_F_SEQ_GLOBAL) && + nla_put_be32(inst->skb, NFULA_SEQ_GLOBAL, + htonl(atomic_inc_return(&global_seq)))) + goto nla_put_failure; if (data_len) { struct nlattr *nla; -- cgit v1.2.2 From 48f03bdad8b3b0f5a96db45d517149eccd7f4ca8 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sun, 1 Apr 2012 18:46:00 -0400 Subject: nfnetlink_cttimeout: Stop using NLA_PUT*(). These macros contain a hidden goto, and are thus extremely error prone and make code hard to audit. Signed-off-by: David S. Miller --- net/netfilter/nfnetlink_cttimeout.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) (limited to 'net') diff --git a/net/netfilter/nfnetlink_cttimeout.c b/net/netfilter/nfnetlink_cttimeout.c index 2b9e79f5ef05..3e655288d1d6 100644 --- a/net/netfilter/nfnetlink_cttimeout.c +++ b/net/netfilter/nfnetlink_cttimeout.c @@ -170,11 +170,12 @@ ctnl_timeout_fill_info(struct sk_buff *skb, u32 pid, u32 seq, u32 type, nfmsg->version = NFNETLINK_V0; nfmsg->res_id = 0; - NLA_PUT_STRING(skb, CTA_TIMEOUT_NAME, timeout->name); - NLA_PUT_BE16(skb, CTA_TIMEOUT_L3PROTO, htons(timeout->l3num)); - NLA_PUT_U8(skb, CTA_TIMEOUT_L4PROTO, timeout->l4proto->l4proto); - NLA_PUT_BE32(skb, CTA_TIMEOUT_USE, - htonl(atomic_read(&timeout->refcnt))); + if (nla_put_string(skb, CTA_TIMEOUT_NAME, timeout->name) || + nla_put_be16(skb, CTA_TIMEOUT_L3PROTO, htons(timeout->l3num)) || + nla_put_u8(skb, CTA_TIMEOUT_L4PROTO, timeout->l4proto->l4proto) || + nla_put_be32(skb, CTA_TIMEOUT_USE, + htonl(atomic_read(&timeout->refcnt)))) + goto nla_put_failure; if (likely(l4proto->ctnl_timeout.obj_to_nlattr)) { struct nlattr *nest_parms; -- cgit v1.2.2 From 7c8011895330d3069b137233e2673f40ee6f4c91 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sun, 1 Apr 2012 18:46:29 -0400 Subject: nfnetlink_acct: Stop using NLA_PUT*(). These macros contain a hidden goto, and are thus extremely error prone and make code hard to audit. Signed-off-by: David S. Miller --- net/netfilter/nfnetlink_acct.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) (limited to 'net') diff --git a/net/netfilter/nfnetlink_acct.c b/net/netfilter/nfnetlink_acct.c index d98c868c148b..b2e7310ca0b8 100644 --- a/net/netfilter/nfnetlink_acct.c +++ b/net/netfilter/nfnetlink_acct.c @@ -109,7 +109,8 @@ nfnl_acct_fill_info(struct sk_buff *skb, u32 pid, u32 seq, u32 type, nfmsg->version = NFNETLINK_V0; nfmsg->res_id = 0; - NLA_PUT_STRING(skb, NFACCT_NAME, acct->name); + if (nla_put_string(skb, NFACCT_NAME, acct->name)) + goto nla_put_failure; if (type == NFNL_MSG_ACCT_GET_CTRZERO) { pkts = atomic64_xchg(&acct->pkts, 0); @@ -118,9 +119,10 @@ nfnl_acct_fill_info(struct sk_buff *skb, u32 pid, u32 seq, u32 type, pkts = atomic64_read(&acct->pkts); bytes = atomic64_read(&acct->bytes); } - NLA_PUT_BE64(skb, NFACCT_PKTS, cpu_to_be64(pkts)); - NLA_PUT_BE64(skb, NFACCT_BYTES, cpu_to_be64(bytes)); - NLA_PUT_BE32(skb, NFACCT_USE, htonl(atomic_read(&acct->refcnt))); + if (nla_put_be64(skb, NFACCT_PKTS, cpu_to_be64(pkts)) || + nla_put_be64(skb, NFACCT_BYTES, cpu_to_be64(bytes)) || + nla_put_be32(skb, NFACCT_USE, htonl(atomic_read(&acct->refcnt)))) + goto nla_put_failure; nlmsg_end(skb, nlh); return skb->len; -- cgit v1.2.2 From 3c60a17b1b55ac4a72382502c952222b7649d63b Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sun, 1 Apr 2012 18:48:06 -0400 Subject: nf_conntrack_proto_udp{,lite}: Stop using NLA_PUT*(). These macros contain a hidden goto, and are thus extremely error prone and make code hard to audit. Signed-off-by: David S. Miller --- net/netfilter/nf_conntrack_proto_udp.c | 9 +++++---- net/netfilter/nf_conntrack_proto_udplite.c | 9 +++++---- 2 files changed, 10 insertions(+), 8 deletions(-) (limited to 'net') diff --git a/net/netfilter/nf_conntrack_proto_udp.c b/net/netfilter/nf_conntrack_proto_udp.c index a9073dc1548d..7259a6bdeb49 100644 --- a/net/netfilter/nf_conntrack_proto_udp.c +++ b/net/netfilter/nf_conntrack_proto_udp.c @@ -181,10 +181,11 @@ udp_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data) { const unsigned int *timeouts = data; - NLA_PUT_BE32(skb, CTA_TIMEOUT_UDP_UNREPLIED, - htonl(timeouts[UDP_CT_UNREPLIED] / HZ)); - NLA_PUT_BE32(skb, CTA_TIMEOUT_UDP_REPLIED, - htonl(timeouts[UDP_CT_REPLIED] / HZ)); + if (nla_put_be32(skb, CTA_TIMEOUT_UDP_UNREPLIED, + htonl(timeouts[UDP_CT_UNREPLIED] / HZ)) || + nla_put_be32(skb, CTA_TIMEOUT_UDP_REPLIED, + htonl(timeouts[UDP_CT_REPLIED] / HZ))) + goto nla_put_failure; return 0; nla_put_failure: diff --git a/net/netfilter/nf_conntrack_proto_udplite.c b/net/netfilter/nf_conntrack_proto_udplite.c index e0606392cda0..4d60a5376aa6 100644 --- a/net/netfilter/nf_conntrack_proto_udplite.c +++ b/net/netfilter/nf_conntrack_proto_udplite.c @@ -185,10 +185,11 @@ udplite_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data) { const unsigned int *timeouts = data; - NLA_PUT_BE32(skb, CTA_TIMEOUT_UDPLITE_UNREPLIED, - htonl(timeouts[UDPLITE_CT_UNREPLIED] / HZ)); - NLA_PUT_BE32(skb, CTA_TIMEOUT_UDPLITE_REPLIED, - htonl(timeouts[UDPLITE_CT_REPLIED] / HZ)); + if (nla_put_be32(skb, CTA_TIMEOUT_UDPLITE_UNREPLIED, + htonl(timeouts[UDPLITE_CT_UNREPLIED] / HZ)) || + nla_put_be32(skb, CTA_TIMEOUT_UDPLITE_REPLIED, + htonl(timeouts[UDPLITE_CT_REPLIED] / HZ))) + goto nla_put_failure; return 0; nla_put_failure: -- cgit v1.2.2 From 4925a459e96767e84b112c023c3766d4aeefb611 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sun, 1 Apr 2012 18:50:08 -0400 Subject: nf_conntrack_proto_tcp: Stop using NLA_PUT*(). These macros contain a hidden goto, and are thus extremely error prone and make code hard to audit. Signed-off-by: David S. Miller --- net/netfilter/nf_conntrack_proto_tcp.c | 68 +++++++++++++++++----------------- 1 file changed, 35 insertions(+), 33 deletions(-) (limited to 'net') diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c index 361eade62a09..cc558162493b 100644 --- a/net/netfilter/nf_conntrack_proto_tcp.c +++ b/net/netfilter/nf_conntrack_proto_tcp.c @@ -1147,21 +1147,22 @@ static int tcp_to_nlattr(struct sk_buff *skb, struct nlattr *nla, if (!nest_parms) goto nla_put_failure; - NLA_PUT_U8(skb, CTA_PROTOINFO_TCP_STATE, ct->proto.tcp.state); - - NLA_PUT_U8(skb, CTA_PROTOINFO_TCP_WSCALE_ORIGINAL, - ct->proto.tcp.seen[0].td_scale); - - NLA_PUT_U8(skb, CTA_PROTOINFO_TCP_WSCALE_REPLY, - ct->proto.tcp.seen[1].td_scale); + if (nla_put_u8(skb, CTA_PROTOINFO_TCP_STATE, ct->proto.tcp.state) || + nla_put_u8(skb, CTA_PROTOINFO_TCP_WSCALE_ORIGINAL, + ct->proto.tcp.seen[0].td_scale) || + nla_put_u8(skb, CTA_PROTOINFO_TCP_WSCALE_REPLY, + ct->proto.tcp.seen[1].td_scale)) + goto nla_put_failure; tmp.flags = ct->proto.tcp.seen[0].flags; - NLA_PUT(skb, CTA_PROTOINFO_TCP_FLAGS_ORIGINAL, - sizeof(struct nf_ct_tcp_flags), &tmp); + if (nla_put(skb, CTA_PROTOINFO_TCP_FLAGS_ORIGINAL, + sizeof(struct nf_ct_tcp_flags), &tmp)) + goto nla_put_failure; tmp.flags = ct->proto.tcp.seen[1].flags; - NLA_PUT(skb, CTA_PROTOINFO_TCP_FLAGS_REPLY, - sizeof(struct nf_ct_tcp_flags), &tmp); + if (nla_put(skb, CTA_PROTOINFO_TCP_FLAGS_REPLY, + sizeof(struct nf_ct_tcp_flags), &tmp)) + goto nla_put_failure; spin_unlock_bh(&ct->lock); nla_nest_end(skb, nest_parms); @@ -1310,28 +1311,29 @@ tcp_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data) { const unsigned int *timeouts = data; - NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_SYN_SENT, - htonl(timeouts[TCP_CONNTRACK_SYN_SENT] / HZ)); - NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_SYN_RECV, - htonl(timeouts[TCP_CONNTRACK_SYN_RECV] / HZ)); - NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_ESTABLISHED, - htonl(timeouts[TCP_CONNTRACK_ESTABLISHED] / HZ)); - NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_FIN_WAIT, - htonl(timeouts[TCP_CONNTRACK_FIN_WAIT] / HZ)); - NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_CLOSE_WAIT, - htonl(timeouts[TCP_CONNTRACK_CLOSE_WAIT] / HZ)); - NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_LAST_ACK, - htonl(timeouts[TCP_CONNTRACK_LAST_ACK] / HZ)); - NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_TIME_WAIT, - htonl(timeouts[TCP_CONNTRACK_TIME_WAIT] / HZ)); - NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_CLOSE, - htonl(timeouts[TCP_CONNTRACK_CLOSE] / HZ)); - NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_SYN_SENT2, - htonl(timeouts[TCP_CONNTRACK_SYN_SENT2] / HZ)); - NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_RETRANS, - htonl(timeouts[TCP_CONNTRACK_RETRANS] / HZ)); - NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_UNACK, - htonl(timeouts[TCP_CONNTRACK_UNACK] / HZ)); + if (nla_put_be32(skb, CTA_TIMEOUT_TCP_SYN_SENT, + htonl(timeouts[TCP_CONNTRACK_SYN_SENT] / HZ)) || + nla_put_be32(skb, CTA_TIMEOUT_TCP_SYN_RECV, + htonl(timeouts[TCP_CONNTRACK_SYN_RECV] / HZ)) || + nla_put_be32(skb, CTA_TIMEOUT_TCP_ESTABLISHED, + htonl(timeouts[TCP_CONNTRACK_ESTABLISHED] / HZ)) || + nla_put_be32(skb, CTA_TIMEOUT_TCP_FIN_WAIT, + htonl(timeouts[TCP_CONNTRACK_FIN_WAIT] / HZ)) || + nla_put_be32(skb, CTA_TIMEOUT_TCP_CLOSE_WAIT, + htonl(timeouts[TCP_CONNTRACK_CLOSE_WAIT] / HZ)) || + nla_put_be32(skb, CTA_TIMEOUT_TCP_LAST_ACK, + htonl(timeouts[TCP_CONNTRACK_LAST_ACK] / HZ)) || + nla_put_be32(skb, CTA_TIMEOUT_TCP_TIME_WAIT, + htonl(timeouts[TCP_CONNTRACK_TIME_WAIT] / HZ)) || + nla_put_be32(skb, CTA_TIMEOUT_TCP_CLOSE, + htonl(timeouts[TCP_CONNTRACK_CLOSE] / HZ)) || + nla_put_be32(skb, CTA_TIMEOUT_TCP_SYN_SENT2, + htonl(timeouts[TCP_CONNTRACK_SYN_SENT2] / HZ)) || + nla_put_be32(skb, CTA_TIMEOUT_TCP_RETRANS, + htonl(timeouts[TCP_CONNTRACK_RETRANS] / HZ)) || + nla_put_be32(skb, CTA_TIMEOUT_TCP_UNACK, + htonl(timeouts[TCP_CONNTRACK_UNACK] / HZ))) + goto nla_put_failure; return 0; nla_put_failure: -- cgit v1.2.2 From 5e8d1eb5fb51e5b9d266a611133fea808320ab97 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sun, 1 Apr 2012 18:51:07 -0400 Subject: nf_conntrack_proto_sctp: Stop using NLA_PUT*(). These macros contain a hidden goto, and are thus extremely error prone and make code hard to audit. Signed-off-by: David S. Miller --- net/netfilter/nf_conntrack_proto_sctp.c | 22 ++++++++++------------ 1 file changed, 10 insertions(+), 12 deletions(-) (limited to 'net') diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c index 72b5088592dc..996db2fa21f7 100644 --- a/net/netfilter/nf_conntrack_proto_sctp.c +++ b/net/netfilter/nf_conntrack_proto_sctp.c @@ -482,15 +482,12 @@ static int sctp_to_nlattr(struct sk_buff *skb, struct nlattr *nla, if (!nest_parms) goto nla_put_failure; - NLA_PUT_U8(skb, CTA_PROTOINFO_SCTP_STATE, ct->proto.sctp.state); - - NLA_PUT_BE32(skb, - CTA_PROTOINFO_SCTP_VTAG_ORIGINAL, - ct->proto.sctp.vtag[IP_CT_DIR_ORIGINAL]); - - NLA_PUT_BE32(skb, - CTA_PROTOINFO_SCTP_VTAG_REPLY, - ct->proto.sctp.vtag[IP_CT_DIR_REPLY]); + if (nla_put_u8(skb, CTA_PROTOINFO_SCTP_STATE, ct->proto.sctp.state) || + nla_put_be32(skb, CTA_PROTOINFO_SCTP_VTAG_ORIGINAL, + ct->proto.sctp.vtag[IP_CT_DIR_ORIGINAL]) || + nla_put_be32(skb, CTA_PROTOINFO_SCTP_VTAG_REPLY, + ct->proto.sctp.vtag[IP_CT_DIR_REPLY])) + goto nla_put_failure; spin_unlock_bh(&ct->lock); @@ -578,9 +575,10 @@ sctp_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data) const unsigned int *timeouts = data; int i; - for (i=CTA_TIMEOUT_SCTP_UNSPEC+1; i Date: Sun, 1 Apr 2012 18:52:03 -0400 Subject: nf_conntrack_proto_gre: Stop using NLA_PUT*(). These macros contain a hidden goto, and are thus extremely error prone and make code hard to audit. Signed-off-by: David S. Miller --- net/netfilter/nf_conntrack_proto_gre.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'net') diff --git a/net/netfilter/nf_conntrack_proto_gre.c b/net/netfilter/nf_conntrack_proto_gre.c index 659648c4b14a..4bf6b4e4b776 100644 --- a/net/netfilter/nf_conntrack_proto_gre.c +++ b/net/netfilter/nf_conntrack_proto_gre.c @@ -321,10 +321,11 @@ gre_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data) { const unsigned int *timeouts = data; - NLA_PUT_BE32(skb, CTA_TIMEOUT_GRE_UNREPLIED, - htonl(timeouts[GRE_CT_UNREPLIED] / HZ)); - NLA_PUT_BE32(skb, CTA_TIMEOUT_GRE_REPLIED, - htonl(timeouts[GRE_CT_REPLIED] / HZ)); + if (nla_put_be32(skb, CTA_TIMEOUT_GRE_UNREPLIED, + htonl(timeouts[GRE_CT_UNREPLIED] / HZ)) || + nla_put_be32(skb, CTA_TIMEOUT_GRE_REPLIED, + htonl(timeouts[GRE_CT_REPLIED] / HZ))) + goto nla_put_failure; return 0; nla_put_failure: -- cgit v1.2.2 From f577694143b8024e95b2c0b680ab2071424bb1c7 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sun, 1 Apr 2012 18:52:31 -0400 Subject: nf_conntrack_proto_generic: Stop using NLA_PUT*(). These macros contain a hidden goto, and are thus extremely error prone and make code hard to audit. Signed-off-by: David S. Miller --- net/netfilter/nf_conntrack_proto_generic.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'net') diff --git a/net/netfilter/nf_conntrack_proto_generic.c b/net/netfilter/nf_conntrack_proto_generic.c index 835e24c58f0d..d8923d54b358 100644 --- a/net/netfilter/nf_conntrack_proto_generic.c +++ b/net/netfilter/nf_conntrack_proto_generic.c @@ -90,7 +90,8 @@ generic_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data) { const unsigned int *timeout = data; - NLA_PUT_BE32(skb, CTA_TIMEOUT_GENERIC_TIMEOUT, htonl(*timeout / HZ)); + if (nla_put_be32(skb, CTA_TIMEOUT_GENERIC_TIMEOUT, htonl(*timeout / HZ))) + goto nla_put_failure; return 0; -- cgit v1.2.2 From 516ee48f0be93ea5b41eaa5f7c5e06246447e575 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sun, 1 Apr 2012 18:53:24 -0400 Subject: nf_conntrack_proto_dccp: Stop using NLA_PUT*(). These macros contain a hidden goto, and are thus extremely error prone and make code hard to audit. Signed-off-by: David S. Miller --- net/netfilter/nf_conntrack_proto_dccp.c | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) (limited to 'net') diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c index 24fdce256cb0..a58998d0912f 100644 --- a/net/netfilter/nf_conntrack_proto_dccp.c +++ b/net/netfilter/nf_conntrack_proto_dccp.c @@ -643,11 +643,12 @@ static int dccp_to_nlattr(struct sk_buff *skb, struct nlattr *nla, nest_parms = nla_nest_start(skb, CTA_PROTOINFO_DCCP | NLA_F_NESTED); if (!nest_parms) goto nla_put_failure; - NLA_PUT_U8(skb, CTA_PROTOINFO_DCCP_STATE, ct->proto.dccp.state); - NLA_PUT_U8(skb, CTA_PROTOINFO_DCCP_ROLE, - ct->proto.dccp.role[IP_CT_DIR_ORIGINAL]); - NLA_PUT_BE64(skb, CTA_PROTOINFO_DCCP_HANDSHAKE_SEQ, - cpu_to_be64(ct->proto.dccp.handshake_seq)); + if (nla_put_u8(skb, CTA_PROTOINFO_DCCP_STATE, ct->proto.dccp.state) || + nla_put_u8(skb, CTA_PROTOINFO_DCCP_ROLE, + ct->proto.dccp.role[IP_CT_DIR_ORIGINAL]) || + nla_put_be64(skb, CTA_PROTOINFO_DCCP_HANDSHAKE_SEQ, + cpu_to_be64(ct->proto.dccp.handshake_seq))) + goto nla_put_failure; nla_nest_end(skb, nest_parms); spin_unlock_bh(&ct->lock); return 0; @@ -739,9 +740,10 @@ dccp_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data) const unsigned int *timeouts = data; int i; - for (i=CTA_TIMEOUT_DCCP_UNSPEC+1; i Date: Sun, 1 Apr 2012 18:57:48 -0400 Subject: nf_conntrack_netlink: Stop using NLA_PUT*(). These macros contain a hidden goto, and are thus extremely error prone and make code hard to audit. Signed-off-by: David S. Miller --- net/netfilter/nf_conntrack_netlink.c | 83 +++++++++++++++++++++--------------- 1 file changed, 49 insertions(+), 34 deletions(-) (limited to 'net') diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index ca7e8354e4f8..462ec2dbe561 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@ -66,7 +66,8 @@ ctnetlink_dump_tuples_proto(struct sk_buff *skb, nest_parms = nla_nest_start(skb, CTA_TUPLE_PROTO | NLA_F_NESTED); if (!nest_parms) goto nla_put_failure; - NLA_PUT_U8(skb, CTA_PROTO_NUM, tuple->dst.protonum); + if (nla_put_u8(skb, CTA_PROTO_NUM, tuple->dst.protonum)) + goto nla_put_failure; if (likely(l4proto->tuple_to_nlattr)) ret = l4proto->tuple_to_nlattr(skb, tuple); @@ -126,7 +127,8 @@ ctnetlink_dump_tuples(struct sk_buff *skb, static inline int ctnetlink_dump_status(struct sk_buff *skb, const struct nf_conn *ct) { - NLA_PUT_BE32(skb, CTA_STATUS, htonl(ct->status)); + if (nla_put_be32(skb, CTA_STATUS, htonl(ct->status))) + goto nla_put_failure; return 0; nla_put_failure: @@ -141,7 +143,8 @@ ctnetlink_dump_timeout(struct sk_buff *skb, const struct nf_conn *ct) if (timeout < 0) timeout = 0; - NLA_PUT_BE32(skb, CTA_TIMEOUT, htonl(timeout)); + if (nla_put_be32(skb, CTA_TIMEOUT, htonl(timeout))) + goto nla_put_failure; return 0; nla_put_failure: @@ -190,7 +193,8 @@ ctnetlink_dump_helpinfo(struct sk_buff *skb, const struct nf_conn *ct) nest_helper = nla_nest_start(skb, CTA_HELP | NLA_F_NESTED); if (!nest_helper) goto nla_put_failure; - NLA_PUT_STRING(skb, CTA_HELP_NAME, helper->name); + if (nla_put_string(skb, CTA_HELP_NAME, helper->name)) + goto nla_put_failure; if (helper->to_nlattr) helper->to_nlattr(skb, ct); @@ -214,8 +218,9 @@ dump_counters(struct sk_buff *skb, u64 pkts, u64 bytes, if (!nest_count) goto nla_put_failure; - NLA_PUT_BE64(skb, CTA_COUNTERS_PACKETS, cpu_to_be64(pkts)); - NLA_PUT_BE64(skb, CTA_COUNTERS_BYTES, cpu_to_be64(bytes)); + if (nla_put_be64(skb, CTA_COUNTERS_PACKETS, cpu_to_be64(pkts)) || + nla_put_be64(skb, CTA_COUNTERS_BYTES, cpu_to_be64(bytes))) + goto nla_put_failure; nla_nest_end(skb, nest_count); @@ -260,11 +265,10 @@ ctnetlink_dump_timestamp(struct sk_buff *skb, const struct nf_conn *ct) if (!nest_count) goto nla_put_failure; - NLA_PUT_BE64(skb, CTA_TIMESTAMP_START, cpu_to_be64(tstamp->start)); - if (tstamp->stop != 0) { - NLA_PUT_BE64(skb, CTA_TIMESTAMP_STOP, - cpu_to_be64(tstamp->stop)); - } + if (nla_put_be64(skb, CTA_TIMESTAMP_START, cpu_to_be64(tstamp->start)) || + (tstamp->stop != 0 && nla_put_be64(skb, CTA_TIMESTAMP_STOP, + cpu_to_be64(tstamp->stop)))) + goto nla_put_failure; nla_nest_end(skb, nest_count); return 0; @@ -277,7 +281,8 @@ nla_put_failure: static inline int ctnetlink_dump_mark(struct sk_buff *skb, const struct nf_conn *ct) { - NLA_PUT_BE32(skb, CTA_MARK, htonl(ct->mark)); + if (nla_put_be32(skb, CTA_MARK, htonl(ct->mark))) + goto nla_put_failure; return 0; nla_put_failure: @@ -304,7 +309,8 @@ ctnetlink_dump_secctx(struct sk_buff *skb, const struct nf_conn *ct) if (!nest_secctx) goto nla_put_failure; - NLA_PUT_STRING(skb, CTA_SECCTX_NAME, secctx); + if (nla_put_string(skb, CTA_SECCTX_NAME, secctx)) + goto nla_put_failure; nla_nest_end(skb, nest_secctx); ret = 0; @@ -349,12 +355,13 @@ dump_nat_seq_adj(struct sk_buff *skb, const struct nf_nat_seq *natseq, int type) if (!nest_parms) goto nla_put_failure; - NLA_PUT_BE32(skb, CTA_NAT_SEQ_CORRECTION_POS, - htonl(natseq->correction_pos)); - NLA_PUT_BE32(skb, CTA_NAT_SEQ_OFFSET_BEFORE, - htonl(natseq->offset_before)); - NLA_PUT_BE32(skb, CTA_NAT_SEQ_OFFSET_AFTER, - htonl(natseq->offset_after)); + if (nla_put_be32(skb, CTA_NAT_SEQ_CORRECTION_POS, + htonl(natseq->correction_pos)) || + nla_put_be32(skb, CTA_NAT_SEQ_OFFSET_BEFORE, + htonl(natseq->offset_before)) || + nla_put_be32(skb, CTA_NAT_SEQ_OFFSET_AFTER, + htonl(natseq->offset_after))) + goto nla_put_failure; nla_nest_end(skb, nest_parms); @@ -390,7 +397,8 @@ ctnetlink_dump_nat_seq_adj(struct sk_buff *skb, const struct nf_conn *ct) static inline int ctnetlink_dump_id(struct sk_buff *skb, const struct nf_conn *ct) { - NLA_PUT_BE32(skb, CTA_ID, htonl((unsigned long)ct)); + if (nla_put_be32(skb, CTA_ID, htonl((unsigned long)ct))) + goto nla_put_failure; return 0; nla_put_failure: @@ -400,7 +408,8 @@ nla_put_failure: static inline int ctnetlink_dump_use(struct sk_buff *skb, const struct nf_conn *ct) { - NLA_PUT_BE32(skb, CTA_USE, htonl(atomic_read(&ct->ct_general.use))); + if (nla_put_be32(skb, CTA_USE, htonl(atomic_read(&ct->ct_general.use)))) + goto nla_put_failure; return 0; nla_put_failure: @@ -440,8 +449,9 @@ ctnetlink_fill_info(struct sk_buff *skb, u32 pid, u32 seq, u32 type, goto nla_put_failure; nla_nest_end(skb, nest_parms); - if (nf_ct_zone(ct)) - NLA_PUT_BE16(skb, CTA_ZONE, htons(nf_ct_zone(ct))); + if (nf_ct_zone(ct) && + nla_put_be16(skb, CTA_ZONE, htons(nf_ct_zone(ct)))) + goto nla_put_failure; if (ctnetlink_dump_status(skb, ct) < 0 || ctnetlink_dump_timeout(skb, ct) < 0 || @@ -617,8 +627,9 @@ ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item) goto nla_put_failure; nla_nest_end(skb, nest_parms); - if (nf_ct_zone(ct)) - NLA_PUT_BE16(skb, CTA_ZONE, htons(nf_ct_zone(ct))); + if (nf_ct_zone(ct) && + nla_put_be16(skb, CTA_ZONE, htons(nf_ct_zone(ct)))) + goto nla_put_failure; if (ctnetlink_dump_id(skb, ct) < 0) goto nla_put_failure; @@ -1705,7 +1716,8 @@ ctnetlink_exp_dump_expect(struct sk_buff *skb, if (!nest_parms) goto nla_put_failure; - NLA_PUT_BE32(skb, CTA_EXPECT_NAT_DIR, htonl(exp->dir)); + if (nla_put_be32(skb, CTA_EXPECT_NAT_DIR, htonl(exp->dir))) + goto nla_put_failure; nat_tuple.src.l3num = nf_ct_l3num(master); nat_tuple.src.u3.ip = exp->saved_ip; @@ -1718,21 +1730,24 @@ ctnetlink_exp_dump_expect(struct sk_buff *skb, nla_nest_end(skb, nest_parms); } #endif - NLA_PUT_BE32(skb, CTA_EXPECT_TIMEOUT, htonl(timeout)); - NLA_PUT_BE32(skb, CTA_EXPECT_ID, htonl((unsigned long)exp)); - NLA_PUT_BE32(skb, CTA_EXPECT_FLAGS, htonl(exp->flags)); - NLA_PUT_BE32(skb, CTA_EXPECT_CLASS, htonl(exp->class)); + if (nla_put_be32(skb, CTA_EXPECT_TIMEOUT, htonl(timeout)) || + nla_put_be32(skb, CTA_EXPECT_ID, htonl((unsigned long)exp)) || + nla_put_be32(skb, CTA_EXPECT_FLAGS, htonl(exp->flags)) || + nla_put_be32(skb, CTA_EXPECT_CLASS, htonl(exp->class))) + goto nla_put_failure; help = nfct_help(master); if (help) { struct nf_conntrack_helper *helper; helper = rcu_dereference(help->helper); - if (helper) - NLA_PUT_STRING(skb, CTA_EXPECT_HELP_NAME, helper->name); + if (helper && + nla_put_string(skb, CTA_EXPECT_HELP_NAME, helper->name)) + goto nla_put_failure; } expfn = nf_ct_helper_expectfn_find_by_symbol(exp->expectfn); - if (expfn != NULL) - NLA_PUT_STRING(skb, CTA_EXPECT_FN, expfn->name); + if (expfn != NULL && + nla_put_string(skb, CTA_EXPECT_FN, expfn->name)) + goto nla_put_failure; return 0; -- cgit v1.2.2 From bae65be896cc420f58460cb6f6ac03e71d1bf240 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sun, 1 Apr 2012 18:58:28 -0400 Subject: nf_conntrack_core: Stop using NLA_PUT*(). These macros contain a hidden goto, and are thus extremely error prone and make code hard to audit. Signed-off-by: David S. Miller --- net/netfilter/nf_conntrack_core.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'net') diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index cbdb754dbb10..b0ab8c43abe7 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -1151,8 +1151,9 @@ static struct nf_ct_ext_type nf_ct_zone_extend __read_mostly = { int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb, const struct nf_conntrack_tuple *tuple) { - NLA_PUT_BE16(skb, CTA_PROTO_SRC_PORT, tuple->src.u.tcp.port); - NLA_PUT_BE16(skb, CTA_PROTO_DST_PORT, tuple->dst.u.tcp.port); + if (nla_put_be16(skb, CTA_PROTO_SRC_PORT, tuple->src.u.tcp.port) || + nla_put_be16(skb, CTA_PROTO_DST_PORT, tuple->dst.u.tcp.port)) + goto nla_put_failure; return 0; nla_put_failure: -- cgit v1.2.2 From 969e8e255c0a18c30cf78feca2d2e546b5746fc4 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sun, 1 Apr 2012 19:03:15 -0400 Subject: ipvs: Stop using NLA_PUT*(). These macros contain a hidden goto, and are thus extremely error prone and make code hard to audit. Signed-off-by: David S. Miller Acked-by: Simon Horman --- net/netfilter/ipvs/ip_vs_ctl.c | 108 ++++++++++++++++++++++------------------- 1 file changed, 58 insertions(+), 50 deletions(-) (limited to 'net') diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index b3afe189af61..964d426d237f 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c @@ -2816,17 +2816,17 @@ static int ip_vs_genl_fill_stats(struct sk_buff *skb, int container_type, ip_vs_copy_stats(&ustats, stats); - NLA_PUT_U32(skb, IPVS_STATS_ATTR_CONNS, ustats.conns); - NLA_PUT_U32(skb, IPVS_STATS_ATTR_INPKTS, ustats.inpkts); - NLA_PUT_U32(skb, IPVS_STATS_ATTR_OUTPKTS, ustats.outpkts); - NLA_PUT_U64(skb, IPVS_STATS_ATTR_INBYTES, ustats.inbytes); - NLA_PUT_U64(skb, IPVS_STATS_ATTR_OUTBYTES, ustats.outbytes); - NLA_PUT_U32(skb, IPVS_STATS_ATTR_CPS, ustats.cps); - NLA_PUT_U32(skb, IPVS_STATS_ATTR_INPPS, ustats.inpps); - NLA_PUT_U32(skb, IPVS_STATS_ATTR_OUTPPS, ustats.outpps); - NLA_PUT_U32(skb, IPVS_STATS_ATTR_INBPS, ustats.inbps); - NLA_PUT_U32(skb, IPVS_STATS_ATTR_OUTBPS, ustats.outbps); - + if (nla_put_u32(skb, IPVS_STATS_ATTR_CONNS, ustats.conns) || + nla_put_u32(skb, IPVS_STATS_ATTR_INPKTS, ustats.inpkts) || + nla_put_u32(skb, IPVS_STATS_ATTR_OUTPKTS, ustats.outpkts) || + nla_put_u64(skb, IPVS_STATS_ATTR_INBYTES, ustats.inbytes) || + nla_put_u64(skb, IPVS_STATS_ATTR_OUTBYTES, ustats.outbytes) || + nla_put_u32(skb, IPVS_STATS_ATTR_CPS, ustats.cps) || + nla_put_u32(skb, IPVS_STATS_ATTR_INPPS, ustats.inpps) || + nla_put_u32(skb, IPVS_STATS_ATTR_OUTPPS, ustats.outpps) || + nla_put_u32(skb, IPVS_STATS_ATTR_INBPS, ustats.inbps) || + nla_put_u32(skb, IPVS_STATS_ATTR_OUTBPS, ustats.outbps)) + goto nla_put_failure; nla_nest_end(skb, nl_stats); return 0; @@ -2847,23 +2847,25 @@ static int ip_vs_genl_fill_service(struct sk_buff *skb, if (!nl_service) return -EMSGSIZE; - NLA_PUT_U16(skb, IPVS_SVC_ATTR_AF, svc->af); - + if (nla_put_u16(skb, IPVS_SVC_ATTR_AF, svc->af)) + goto nla_put_failure; if (svc->fwmark) { - NLA_PUT_U32(skb, IPVS_SVC_ATTR_FWMARK, svc->fwmark); + if (nla_put_u32(skb, IPVS_SVC_ATTR_FWMARK, svc->fwmark)) + goto nla_put_failure; } else { - NLA_PUT_U16(skb, IPVS_SVC_ATTR_PROTOCOL, svc->protocol); - NLA_PUT(skb, IPVS_SVC_ATTR_ADDR, sizeof(svc->addr), &svc->addr); - NLA_PUT_U16(skb, IPVS_SVC_ATTR_PORT, svc->port); + if (nla_put_u16(skb, IPVS_SVC_ATTR_PROTOCOL, svc->protocol) || + nla_put(skb, IPVS_SVC_ATTR_ADDR, sizeof(svc->addr), &svc->addr) || + nla_put_u16(skb, IPVS_SVC_ATTR_PORT, svc->port)) + goto nla_put_failure; } - NLA_PUT_STRING(skb, IPVS_SVC_ATTR_SCHED_NAME, svc->scheduler->name); - if (svc->pe) - NLA_PUT_STRING(skb, IPVS_SVC_ATTR_PE_NAME, svc->pe->name); - NLA_PUT(skb, IPVS_SVC_ATTR_FLAGS, sizeof(flags), &flags); - NLA_PUT_U32(skb, IPVS_SVC_ATTR_TIMEOUT, svc->timeout / HZ); - NLA_PUT_U32(skb, IPVS_SVC_ATTR_NETMASK, svc->netmask); - + if (nla_put_string(skb, IPVS_SVC_ATTR_SCHED_NAME, svc->scheduler->name) || + (svc->pe && + nla_put_string(skb, IPVS_SVC_ATTR_PE_NAME, svc->pe->name)) || + nla_put(skb, IPVS_SVC_ATTR_FLAGS, sizeof(flags), &flags) || + nla_put_u32(skb, IPVS_SVC_ATTR_TIMEOUT, svc->timeout / HZ) || + nla_put_u32(skb, IPVS_SVC_ATTR_NETMASK, svc->netmask)) + goto nla_put_failure; if (ip_vs_genl_fill_stats(skb, IPVS_SVC_ATTR_STATS, &svc->stats)) goto nla_put_failure; @@ -3038,21 +3040,22 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest) if (!nl_dest) return -EMSGSIZE; - NLA_PUT(skb, IPVS_DEST_ATTR_ADDR, sizeof(dest->addr), &dest->addr); - NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port); - - NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD, - atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK); - NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight)); - NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold); - NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold); - NLA_PUT_U32(skb, IPVS_DEST_ATTR_ACTIVE_CONNS, - atomic_read(&dest->activeconns)); - NLA_PUT_U32(skb, IPVS_DEST_ATTR_INACT_CONNS, - atomic_read(&dest->inactconns)); - NLA_PUT_U32(skb, IPVS_DEST_ATTR_PERSIST_CONNS, - atomic_read(&dest->persistconns)); - + if (nla_put(skb, IPVS_DEST_ATTR_ADDR, sizeof(dest->addr), &dest->addr) || + nla_put_u16(skb, IPVS_DEST_ATTR_PORT, dest->port) || + nla_put_u32(skb, IPVS_DEST_ATTR_FWD_METHOD, + (atomic_read(&dest->conn_flags) & + IP_VS_CONN_F_FWD_MASK)) || + nla_put_u32(skb, IPVS_DEST_ATTR_WEIGHT, + atomic_read(&dest->weight)) || + nla_put_u32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold) || + nla_put_u32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold) || + nla_put_u32(skb, IPVS_DEST_ATTR_ACTIVE_CONNS, + atomic_read(&dest->activeconns)) || + nla_put_u32(skb, IPVS_DEST_ATTR_INACT_CONNS, + atomic_read(&dest->inactconns)) || + nla_put_u32(skb, IPVS_DEST_ATTR_PERSIST_CONNS, + atomic_read(&dest->persistconns))) + goto nla_put_failure; if (ip_vs_genl_fill_stats(skb, IPVS_DEST_ATTR_STATS, &dest->stats)) goto nla_put_failure; @@ -3181,10 +3184,10 @@ static int ip_vs_genl_fill_daemon(struct sk_buff *skb, __be32 state, if (!nl_daemon) return -EMSGSIZE; - NLA_PUT_U32(skb, IPVS_DAEMON_ATTR_STATE, state); - NLA_PUT_STRING(skb, IPVS_DAEMON_ATTR_MCAST_IFN, mcast_ifn); - NLA_PUT_U32(skb, IPVS_DAEMON_ATTR_SYNC_ID, syncid); - + if (nla_put_u32(skb, IPVS_DAEMON_ATTR_STATE, state) || + nla_put_string(skb, IPVS_DAEMON_ATTR_MCAST_IFN, mcast_ifn) || + nla_put_u32(skb, IPVS_DAEMON_ATTR_SYNC_ID, syncid)) + goto nla_put_failure; nla_nest_end(skb, nl_daemon); return 0; @@ -3473,21 +3476,26 @@ static int ip_vs_genl_get_cmd(struct sk_buff *skb, struct genl_info *info) __ip_vs_get_timeouts(net, &t); #ifdef CONFIG_IP_VS_PROTO_TCP - NLA_PUT_U32(msg, IPVS_CMD_ATTR_TIMEOUT_TCP, t.tcp_timeout); - NLA_PUT_U32(msg, IPVS_CMD_ATTR_TIMEOUT_TCP_FIN, - t.tcp_fin_timeout); + if (nla_put_u32(msg, IPVS_CMD_ATTR_TIMEOUT_TCP, + t.tcp_timeout) || + nla_put_u32(msg, IPVS_CMD_ATTR_TIMEOUT_TCP_FIN, + t.tcp_fin_timeout)) + goto nla_put_failure; #endif #ifdef CONFIG_IP_VS_PROTO_UDP - NLA_PUT_U32(msg, IPVS_CMD_ATTR_TIMEOUT_UDP, t.udp_timeout); + if (nla_put_u32(msg, IPVS_CMD_ATTR_TIMEOUT_UDP, t.udp_timeout)) + goto nla_put_failure; #endif break; } case IPVS_CMD_GET_INFO: - NLA_PUT_U32(msg, IPVS_INFO_ATTR_VERSION, IP_VS_VERSION_CODE); - NLA_PUT_U32(msg, IPVS_INFO_ATTR_CONN_TAB_SIZE, - ip_vs_conn_tab_size); + if (nla_put_u32(msg, IPVS_INFO_ATTR_VERSION, + IP_VS_VERSION_CODE) || + nla_put_u32(msg, IPVS_INFO_ATTR_CONN_TAB_SIZE, + ip_vs_conn_tab_size)) + goto nla_put_failure; break; } -- cgit v1.2.2 From 7cf7899d9ee31c88c86ea8459fc4db4bd11cc240 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sun, 1 Apr 2012 19:54:46 -0400 Subject: ipset: Stop using NLA_PUT*(). These macros contain a hidden goto, and are thus extremely error prone and make code hard to audit. Signed-off-by: David S. Miller --- net/netfilter/ipset/ip_set_bitmap_ip.c | 33 +++++++------- net/netfilter/ipset/ip_set_bitmap_ipmac.c | 43 ++++++++++-------- net/netfilter/ipset/ip_set_bitmap_port.c | 29 ++++++------ net/netfilter/ipset/ip_set_core.c | 43 ++++++++++-------- net/netfilter/ipset/ip_set_hash_ip.c | 20 +++++---- net/netfilter/ipset/ip_set_hash_ipport.c | 37 +++++++++------- net/netfilter/ipset/ip_set_hash_ipportip.c | 45 ++++++++++--------- net/netfilter/ipset/ip_set_hash_ipportnet.c | 69 +++++++++++++++-------------- net/netfilter/ipset/ip_set_hash_net.c | 45 ++++++++++--------- net/netfilter/ipset/ip_set_hash_netiface.c | 52 ++++++++++++---------- net/netfilter/ipset/ip_set_hash_netport.c | 61 +++++++++++++------------ net/netfilter/ipset/ip_set_list_set.c | 23 +++++----- 12 files changed, 271 insertions(+), 229 deletions(-) (limited to 'net') diff --git a/net/netfilter/ipset/ip_set_bitmap_ip.c b/net/netfilter/ipset/ip_set_bitmap_ip.c index a72a4dff0031..7e1b061aeeba 100644 --- a/net/netfilter/ipset/ip_set_bitmap_ip.c +++ b/net/netfilter/ipset/ip_set_bitmap_ip.c @@ -109,8 +109,9 @@ bitmap_ip_list(const struct ip_set *set, } else goto nla_put_failure; } - NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, - htonl(map->first_ip + id * map->hosts)); + if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, + htonl(map->first_ip + id * map->hosts))) + goto nla_put_failure; ipset_nest_end(skb, nested); } ipset_nest_end(skb, atd); @@ -194,10 +195,11 @@ bitmap_ip_tlist(const struct ip_set *set, } else goto nla_put_failure; } - NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, - htonl(map->first_ip + id * map->hosts)); - NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, - htonl(ip_set_timeout_get(members[id]))); + if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, + htonl(map->first_ip + id * map->hosts)) || + nla_put_net32(skb, IPSET_ATTR_TIMEOUT, + htonl(ip_set_timeout_get(members[id])))) + goto nla_put_failure; ipset_nest_end(skb, nested); } ipset_nest_end(skb, adt); @@ -334,15 +336,16 @@ bitmap_ip_head(struct ip_set *set, struct sk_buff *skb) nested = ipset_nest_start(skb, IPSET_ATTR_DATA); if (!nested) goto nla_put_failure; - NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, htonl(map->first_ip)); - NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP_TO, htonl(map->last_ip)); - if (map->netmask != 32) - NLA_PUT_U8(skb, IPSET_ATTR_NETMASK, map->netmask); - NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)); - NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE, - htonl(sizeof(*map) + map->memsize)); - if (with_timeout(map->timeout)) - NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout)); + if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, htonl(map->first_ip)) || + nla_put_ipaddr4(skb, IPSET_ATTR_IP_TO, htonl(map->last_ip)) || + (map->netmask != 32 && + nla_put_u8(skb, IPSET_ATTR_NETMASK, map->netmask)) || + nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)) || + nla_put_net32(skb, IPSET_ATTR_MEMSIZE, + htonl(sizeof(*map) + map->memsize)) || + (with_timeout(map->timeout) && + nla_put_net32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout)))) + goto nla_put_failure; ipset_nest_end(skb, nested); return 0; diff --git a/net/netfilter/ipset/ip_set_bitmap_ipmac.c b/net/netfilter/ipset/ip_set_bitmap_ipmac.c index 81324c12c5be..0bb16c469a89 100644 --- a/net/netfilter/ipset/ip_set_bitmap_ipmac.c +++ b/net/netfilter/ipset/ip_set_bitmap_ipmac.c @@ -186,11 +186,12 @@ bitmap_ipmac_list(const struct ip_set *set, } else goto nla_put_failure; } - NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, - htonl(map->first_ip + id)); - if (elem->match == MAC_FILLED) - NLA_PUT(skb, IPSET_ATTR_ETHER, ETH_ALEN, - elem->ether); + if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, + htonl(map->first_ip + id)) || + (elem->match == MAC_FILLED && + nla_put(skb, IPSET_ATTR_ETHER, ETH_ALEN, + elem->ether))) + goto nla_put_failure; ipset_nest_end(skb, nested); } ipset_nest_end(skb, atd); @@ -314,14 +315,16 @@ bitmap_ipmac_tlist(const struct ip_set *set, } else goto nla_put_failure; } - NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, - htonl(map->first_ip + id)); - if (elem->match == MAC_FILLED) - NLA_PUT(skb, IPSET_ATTR_ETHER, ETH_ALEN, - elem->ether); + if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, + htonl(map->first_ip + id)) || + (elem->match == MAC_FILLED && + nla_put(skb, IPSET_ATTR_ETHER, ETH_ALEN, + elem->ether))) + goto nla_put_failure; timeout = elem->match == MAC_UNSET ? elem->timeout : ip_set_timeout_get(elem->timeout); - NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(timeout)); + if (nla_put_net32(skb, IPSET_ATTR_TIMEOUT, htonl(timeout))) + goto nla_put_failure; ipset_nest_end(skb, nested); } ipset_nest_end(skb, atd); @@ -438,14 +441,16 @@ bitmap_ipmac_head(struct ip_set *set, struct sk_buff *skb) nested = ipset_nest_start(skb, IPSET_ATTR_DATA); if (!nested) goto nla_put_failure; - NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, htonl(map->first_ip)); - NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP_TO, htonl(map->last_ip)); - NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)); - NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE, - htonl(sizeof(*map) - + (map->last_ip - map->first_ip + 1) * map->dsize)); - if (with_timeout(map->timeout)) - NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout)); + if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, htonl(map->first_ip)) || + nla_put_ipaddr4(skb, IPSET_ATTR_IP_TO, htonl(map->last_ip)) || + nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)) || + nla_put_net32(skb, IPSET_ATTR_MEMSIZE, + htonl(sizeof(*map) + + ((map->last_ip - map->first_ip + 1) * + map->dsize))) || + (with_timeout(map->timeout) && + nla_put_net32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout)))) + goto nla_put_failure; ipset_nest_end(skb, nested); return 0; diff --git a/net/netfilter/ipset/ip_set_bitmap_port.c b/net/netfilter/ipset/ip_set_bitmap_port.c index 382ec28ba72e..b9f1fce7053b 100644 --- a/net/netfilter/ipset/ip_set_bitmap_port.c +++ b/net/netfilter/ipset/ip_set_bitmap_port.c @@ -96,8 +96,9 @@ bitmap_port_list(const struct ip_set *set, } else goto nla_put_failure; } - NLA_PUT_NET16(skb, IPSET_ATTR_PORT, - htons(map->first_port + id)); + if (nla_put_net16(skb, IPSET_ATTR_PORT, + htons(map->first_port + id))) + goto nla_put_failure; ipset_nest_end(skb, nested); } ipset_nest_end(skb, atd); @@ -183,10 +184,11 @@ bitmap_port_tlist(const struct ip_set *set, } else goto nla_put_failure; } - NLA_PUT_NET16(skb, IPSET_ATTR_PORT, - htons(map->first_port + id)); - NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, - htonl(ip_set_timeout_get(members[id]))); + if (nla_put_net16(skb, IPSET_ATTR_PORT, + htons(map->first_port + id)) || + nla_put_net32(skb, IPSET_ATTR_TIMEOUT, + htonl(ip_set_timeout_get(members[id])))) + goto nla_put_failure; ipset_nest_end(skb, nested); } ipset_nest_end(skb, adt); @@ -320,13 +322,14 @@ bitmap_port_head(struct ip_set *set, struct sk_buff *skb) nested = ipset_nest_start(skb, IPSET_ATTR_DATA); if (!nested) goto nla_put_failure; - NLA_PUT_NET16(skb, IPSET_ATTR_PORT, htons(map->first_port)); - NLA_PUT_NET16(skb, IPSET_ATTR_PORT_TO, htons(map->last_port)); - NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)); - NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE, - htonl(sizeof(*map) + map->memsize)); - if (with_timeout(map->timeout)) - NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout)); + if (nla_put_net16(skb, IPSET_ATTR_PORT, htons(map->first_port)) || + nla_put_net16(skb, IPSET_ATTR_PORT_TO, htons(map->last_port)) || + nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)) || + nla_put_net32(skb, IPSET_ATTR_MEMSIZE, + htonl(sizeof(*map) + map->memsize)) || + (with_timeout(map->timeout) && + nla_put_net32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout)))) + goto nla_put_failure; ipset_nest_end(skb, nested); return 0; diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c index e6c1c9605a58..eb66b9790a6f 100644 --- a/net/netfilter/ipset/ip_set_core.c +++ b/net/netfilter/ipset/ip_set_core.c @@ -1092,19 +1092,21 @@ dump_last: ret = -EMSGSIZE; goto release_refcount; } - NLA_PUT_U8(skb, IPSET_ATTR_PROTOCOL, IPSET_PROTOCOL); - NLA_PUT_STRING(skb, IPSET_ATTR_SETNAME, set->name); + if (nla_put_u8(skb, IPSET_ATTR_PROTOCOL, IPSET_PROTOCOL) || + nla_put_string(skb, IPSET_ATTR_SETNAME, set->name)) + goto nla_put_failure; if (dump_flags & IPSET_FLAG_LIST_SETNAME) goto next_set; switch (cb->args[2]) { case 0: /* Core header data */ - NLA_PUT_STRING(skb, IPSET_ATTR_TYPENAME, - set->type->name); - NLA_PUT_U8(skb, IPSET_ATTR_FAMILY, - set->family); - NLA_PUT_U8(skb, IPSET_ATTR_REVISION, - set->revision); + if (nla_put_string(skb, IPSET_ATTR_TYPENAME, + set->type->name) || + nla_put_u8(skb, IPSET_ATTR_FAMILY, + set->family) || + nla_put_u8(skb, IPSET_ATTR_REVISION, + set->revision)) + goto nla_put_failure; ret = set->variant->head(set, skb); if (ret < 0) goto release_refcount; @@ -1410,11 +1412,12 @@ ip_set_header(struct sock *ctnl, struct sk_buff *skb, IPSET_CMD_HEADER); if (!nlh2) goto nlmsg_failure; - NLA_PUT_U8(skb2, IPSET_ATTR_PROTOCOL, IPSET_PROTOCOL); - NLA_PUT_STRING(skb2, IPSET_ATTR_SETNAME, set->name); - NLA_PUT_STRING(skb2, IPSET_ATTR_TYPENAME, set->type->name); - NLA_PUT_U8(skb2, IPSET_ATTR_FAMILY, set->family); - NLA_PUT_U8(skb2, IPSET_ATTR_REVISION, set->revision); + if (nla_put_u8(skb2, IPSET_ATTR_PROTOCOL, IPSET_PROTOCOL) || + nla_put_string(skb2, IPSET_ATTR_SETNAME, set->name) || + nla_put_string(skb2, IPSET_ATTR_TYPENAME, set->type->name) || + nla_put_u8(skb2, IPSET_ATTR_FAMILY, set->family) || + nla_put_u8(skb2, IPSET_ATTR_REVISION, set->revision)) + goto nla_put_failure; nlmsg_end(skb2, nlh2); ret = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT); @@ -1469,11 +1472,12 @@ ip_set_type(struct sock *ctnl, struct sk_buff *skb, IPSET_CMD_TYPE); if (!nlh2) goto nlmsg_failure; - NLA_PUT_U8(skb2, IPSET_ATTR_PROTOCOL, IPSET_PROTOCOL); - NLA_PUT_STRING(skb2, IPSET_ATTR_TYPENAME, typename); - NLA_PUT_U8(skb2, IPSET_ATTR_FAMILY, family); - NLA_PUT_U8(skb2, IPSET_ATTR_REVISION, max); - NLA_PUT_U8(skb2, IPSET_ATTR_REVISION_MIN, min); + if (nla_put_u8(skb2, IPSET_ATTR_PROTOCOL, IPSET_PROTOCOL) || + nla_put_string(skb2, IPSET_ATTR_TYPENAME, typename) || + nla_put_u8(skb2, IPSET_ATTR_FAMILY, family) || + nla_put_u8(skb2, IPSET_ATTR_REVISION, max) || + nla_put_u8(skb2, IPSET_ATTR_REVISION_MIN, min)) + goto nla_put_failure; nlmsg_end(skb2, nlh2); pr_debug("Send TYPE, nlmsg_len: %u\n", nlh2->nlmsg_len); @@ -1517,7 +1521,8 @@ ip_set_protocol(struct sock *ctnl, struct sk_buff *skb, IPSET_CMD_PROTOCOL); if (!nlh2) goto nlmsg_failure; - NLA_PUT_U8(skb2, IPSET_ATTR_PROTOCOL, IPSET_PROTOCOL); + if (nla_put_u8(skb2, IPSET_ATTR_PROTOCOL, IPSET_PROTOCOL)) + goto nla_put_failure; nlmsg_end(skb2, nlh2); ret = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT); diff --git a/net/netfilter/ipset/ip_set_hash_ip.c b/net/netfilter/ipset/ip_set_hash_ip.c index 5139dea6019e..507fe93794aa 100644 --- a/net/netfilter/ipset/ip_set_hash_ip.c +++ b/net/netfilter/ipset/ip_set_hash_ip.c @@ -81,7 +81,8 @@ hash_ip4_data_zero_out(struct hash_ip4_elem *elem) static inline bool hash_ip4_data_list(struct sk_buff *skb, const struct hash_ip4_elem *data) { - NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip); + if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip)) + goto nla_put_failure; return 0; nla_put_failure: @@ -94,9 +95,10 @@ hash_ip4_data_tlist(struct sk_buff *skb, const struct hash_ip4_elem *data) const struct hash_ip4_telem *tdata = (const struct hash_ip4_telem *)data; - NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip); - NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, - htonl(ip_set_timeout_get(tdata->timeout))); + if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, tdata->ip) || + nla_put_net32(skb, IPSET_ATTR_TIMEOUT, + htonl(ip_set_timeout_get(tdata->timeout)))) + goto nla_put_failure; return 0; @@ -262,7 +264,8 @@ ip6_netmask(union nf_inet_addr *ip, u8 prefix) static bool hash_ip6_data_list(struct sk_buff *skb, const struct hash_ip6_elem *data) { - NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip); + if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip.in6)) + goto nla_put_failure; return 0; nla_put_failure: @@ -275,9 +278,10 @@ hash_ip6_data_tlist(struct sk_buff *skb, const struct hash_ip6_elem *data) const struct hash_ip6_telem *e = (const struct hash_ip6_telem *)data; - NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip); - NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, - htonl(ip_set_timeout_get(e->timeout))); + if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &e->ip.in6) || + nla_put_net32(skb, IPSET_ATTR_TIMEOUT, + htonl(ip_set_timeout_get(e->timeout)))) + goto nla_put_failure; return 0; nla_put_failure: diff --git a/net/netfilter/ipset/ip_set_hash_ipport.c b/net/netfilter/ipset/ip_set_hash_ipport.c index 9c27e249c171..68f284c97490 100644 --- a/net/netfilter/ipset/ip_set_hash_ipport.c +++ b/net/netfilter/ipset/ip_set_hash_ipport.c @@ -93,9 +93,10 @@ static bool hash_ipport4_data_list(struct sk_buff *skb, const struct hash_ipport4_elem *data) { - NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip); - NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port); - NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); + if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip) || + nla_put_net16(skb, IPSET_ATTR_PORT, data->port) || + nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto)) + goto nla_put_failure; return 0; nla_put_failure: @@ -109,12 +110,12 @@ hash_ipport4_data_tlist(struct sk_buff *skb, const struct hash_ipport4_telem *tdata = (const struct hash_ipport4_telem *)data; - NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip); - NLA_PUT_NET16(skb, IPSET_ATTR_PORT, tdata->port); - NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); - NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, - htonl(ip_set_timeout_get(tdata->timeout))); - + if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, tdata->ip) || + nla_put_net16(skb, IPSET_ATTR_PORT, tdata->port) || + nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) || + nla_put_net32(skb, IPSET_ATTR_TIMEOUT, + htonl(ip_set_timeout_get(tdata->timeout)))) + goto nla_put_failure; return 0; nla_put_failure: @@ -308,9 +309,10 @@ static bool hash_ipport6_data_list(struct sk_buff *skb, const struct hash_ipport6_elem *data) { - NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip); - NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port); - NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); + if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip.in6) || + nla_put_net16(skb, IPSET_ATTR_PORT, data->port) || + nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto)) + goto nla_put_failure; return 0; nla_put_failure: @@ -324,11 +326,12 @@ hash_ipport6_data_tlist(struct sk_buff *skb, const struct hash_ipport6_telem *e = (const struct hash_ipport6_telem *)data; - NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip); - NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port); - NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); - NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, - htonl(ip_set_timeout_get(e->timeout))); + if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &e->ip.in6) || + nla_put_net16(skb, IPSET_ATTR_PORT, data->port) || + nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) || + nla_put_net32(skb, IPSET_ATTR_TIMEOUT, + htonl(ip_set_timeout_get(e->timeout)))) + goto nla_put_failure; return 0; nla_put_failure: diff --git a/net/netfilter/ipset/ip_set_hash_ipportip.c b/net/netfilter/ipset/ip_set_hash_ipportip.c index 9134057c0728..1eec4b9e0dca 100644 --- a/net/netfilter/ipset/ip_set_hash_ipportip.c +++ b/net/netfilter/ipset/ip_set_hash_ipportip.c @@ -94,10 +94,11 @@ static bool hash_ipportip4_data_list(struct sk_buff *skb, const struct hash_ipportip4_elem *data) { - NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip); - NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP2, data->ip2); - NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port); - NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); + if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip) || + nla_put_ipaddr4(skb, IPSET_ATTR_IP2, data->ip2) || + nla_put_net16(skb, IPSET_ATTR_PORT, data->port) || + nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto)) + goto nla_put_failure; return 0; nla_put_failure: @@ -111,13 +112,13 @@ hash_ipportip4_data_tlist(struct sk_buff *skb, const struct hash_ipportip4_telem *tdata = (const struct hash_ipportip4_telem *)data; - NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip); - NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP2, tdata->ip2); - NLA_PUT_NET16(skb, IPSET_ATTR_PORT, tdata->port); - NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); - NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, - htonl(ip_set_timeout_get(tdata->timeout))); - + if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, tdata->ip) || + nla_put_ipaddr4(skb, IPSET_ATTR_IP2, tdata->ip2) || + nla_put_net16(skb, IPSET_ATTR_PORT, tdata->port) || + nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) || + nla_put_net32(skb, IPSET_ATTR_TIMEOUT, + htonl(ip_set_timeout_get(tdata->timeout)))) + goto nla_put_failure; return 0; nla_put_failure: @@ -319,10 +320,11 @@ static bool hash_ipportip6_data_list(struct sk_buff *skb, const struct hash_ipportip6_elem *data) { - NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip); - NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP2, &data->ip2); - NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port); - NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); + if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip.in6) || + nla_put_ipaddr6(skb, IPSET_ATTR_IP2, &data->ip2.in6) || + nla_put_net16(skb, IPSET_ATTR_PORT, data->port) || + nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto)) + goto nla_put_failure; return 0; nla_put_failure: @@ -336,12 +338,13 @@ hash_ipportip6_data_tlist(struct sk_buff *skb, const struct hash_ipportip6_telem *e = (const struct hash_ipportip6_telem *)data; - NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip); - NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP2, &data->ip2); - NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port); - NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); - NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, - htonl(ip_set_timeout_get(e->timeout))); + if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &e->ip.in6) || + nla_put_ipaddr6(skb, IPSET_ATTR_IP2, &data->ip2.in6) || + nla_put_net16(skb, IPSET_ATTR_PORT, data->port) || + nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) || + nla_put_net32(skb, IPSET_ATTR_TIMEOUT, + htonl(ip_set_timeout_get(e->timeout)))) + goto nla_put_failure; return 0; nla_put_failure: diff --git a/net/netfilter/ipset/ip_set_hash_ipportnet.c b/net/netfilter/ipset/ip_set_hash_ipportnet.c index 5d05e6969862..62d66ecef369 100644 --- a/net/netfilter/ipset/ip_set_hash_ipportnet.c +++ b/net/netfilter/ipset/ip_set_hash_ipportnet.c @@ -124,13 +124,14 @@ hash_ipportnet4_data_list(struct sk_buff *skb, { u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0; - NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip); - NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP2, data->ip2); - NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port); - NLA_PUT_U8(skb, IPSET_ATTR_CIDR2, data->cidr + 1); - NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); - if (flags) - NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags)); + if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip) || + nla_put_ipaddr4(skb, IPSET_ATTR_IP2, data->ip2) || + nla_put_net16(skb, IPSET_ATTR_PORT, data->port) || + nla_put_u8(skb, IPSET_ATTR_CIDR2, data->cidr + 1) || + nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) || + (flags && + nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags)))) + goto nla_put_failure; return 0; nla_put_failure: @@ -145,16 +146,16 @@ hash_ipportnet4_data_tlist(struct sk_buff *skb, (const struct hash_ipportnet4_telem *)data; u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0; - NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip); - NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP2, tdata->ip2); - NLA_PUT_NET16(skb, IPSET_ATTR_PORT, tdata->port); - NLA_PUT_U8(skb, IPSET_ATTR_CIDR2, data->cidr + 1); - NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); - NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, - htonl(ip_set_timeout_get(tdata->timeout))); - if (flags) - NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags)); - + if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, tdata->ip) || + nla_put_ipaddr4(skb, IPSET_ATTR_IP2, tdata->ip2) || + nla_put_net16(skb, IPSET_ATTR_PORT, tdata->port) || + nla_put_u8(skb, IPSET_ATTR_CIDR2, data->cidr + 1) || + nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) || + nla_put_net32(skb, IPSET_ATTR_TIMEOUT, + htonl(ip_set_timeout_get(tdata->timeout))) || + (flags && + nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags)))) + goto nla_put_failure; return 0; nla_put_failure: @@ -436,13 +437,14 @@ hash_ipportnet6_data_list(struct sk_buff *skb, { u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0; - NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip); - NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP2, &data->ip2); - NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port); - NLA_PUT_U8(skb, IPSET_ATTR_CIDR2, data->cidr + 1); - NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); - if (flags) - NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags)); + if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip.in6) || + nla_put_ipaddr6(skb, IPSET_ATTR_IP2, &data->ip2.in6) || + nla_put_net16(skb, IPSET_ATTR_PORT, data->port) || + nla_put_u8(skb, IPSET_ATTR_CIDR2, data->cidr + 1) || + nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) || + (flags && + nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags)))) + goto nla_put_failure; return 0; nla_put_failure: @@ -457,15 +459,16 @@ hash_ipportnet6_data_tlist(struct sk_buff *skb, (const struct hash_ipportnet6_telem *)data; u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0; - NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip); - NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP2, &data->ip2); - NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port); - NLA_PUT_U8(skb, IPSET_ATTR_CIDR2, data->cidr + 1); - NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); - NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, - htonl(ip_set_timeout_get(e->timeout))); - if (flags) - NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags)); + if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &e->ip.in6) || + nla_put_ipaddr6(skb, IPSET_ATTR_IP2, &data->ip2.in6) || + nla_put_net16(skb, IPSET_ATTR_PORT, data->port) || + nla_put_u8(skb, IPSET_ATTR_CIDR2, data->cidr + 1) || + nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) || + nla_put_net32(skb, IPSET_ATTR_TIMEOUT, + htonl(ip_set_timeout_get(e->timeout))) || + (flags && + nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags)))) + goto nla_put_failure; return 0; nla_put_failure: diff --git a/net/netfilter/ipset/ip_set_hash_net.c b/net/netfilter/ipset/ip_set_hash_net.c index 7c3d945517cf..6607a814be57 100644 --- a/net/netfilter/ipset/ip_set_hash_net.c +++ b/net/netfilter/ipset/ip_set_hash_net.c @@ -111,10 +111,11 @@ hash_net4_data_list(struct sk_buff *skb, const struct hash_net4_elem *data) { u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0; - NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip); - NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr); - if (flags) - NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags)); + if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip) || + nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr) || + (flags && + nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags)))) + goto nla_put_failure; return 0; nla_put_failure: @@ -128,13 +129,13 @@ hash_net4_data_tlist(struct sk_buff *skb, const struct hash_net4_elem *data) (const struct hash_net4_telem *)data; u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0; - NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip); - NLA_PUT_U8(skb, IPSET_ATTR_CIDR, tdata->cidr); - NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, - htonl(ip_set_timeout_get(tdata->timeout))); - if (flags) - NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags)); - + if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, tdata->ip) || + nla_put_u8(skb, IPSET_ATTR_CIDR, tdata->cidr) || + nla_put_net32(skb, IPSET_ATTR_TIMEOUT, + htonl(ip_set_timeout_get(tdata->timeout))) || + (flags && + nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags)))) + goto nla_put_failure; return 0; nla_put_failure: @@ -339,10 +340,11 @@ hash_net6_data_list(struct sk_buff *skb, const struct hash_net6_elem *data) { u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0; - NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip); - NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr); - if (flags) - NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags)); + if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip.in6) || + nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr) || + (flags && + nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags)))) + goto nla_put_failure; return 0; nla_put_failure: @@ -356,12 +358,13 @@ hash_net6_data_tlist(struct sk_buff *skb, const struct hash_net6_elem *data) (const struct hash_net6_telem *)data; u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0; - NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip); - NLA_PUT_U8(skb, IPSET_ATTR_CIDR, e->cidr); - NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, - htonl(ip_set_timeout_get(e->timeout))); - if (flags) - NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags)); + if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &e->ip.in6) || + nla_put_u8(skb, IPSET_ATTR_CIDR, e->cidr) || + nla_put_net32(skb, IPSET_ATTR_TIMEOUT, + htonl(ip_set_timeout_get(e->timeout))) || + (flags && + nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags)))) + goto nla_put_failure; return 0; nla_put_failure: diff --git a/net/netfilter/ipset/ip_set_hash_netiface.c b/net/netfilter/ipset/ip_set_hash_netiface.c index f24037ff4322..6093f3daa911 100644 --- a/net/netfilter/ipset/ip_set_hash_netiface.c +++ b/net/netfilter/ipset/ip_set_hash_netiface.c @@ -252,11 +252,12 @@ hash_netiface4_data_list(struct sk_buff *skb, if (data->nomatch) flags |= IPSET_FLAG_NOMATCH; - NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip); - NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr); - NLA_PUT_STRING(skb, IPSET_ATTR_IFACE, data->iface); - if (flags) - NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags)); + if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip) || + nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr) || + nla_put_string(skb, IPSET_ATTR_IFACE, data->iface) || + (flags && + nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags)))) + goto nla_put_failure; return 0; nla_put_failure: @@ -273,13 +274,14 @@ hash_netiface4_data_tlist(struct sk_buff *skb, if (data->nomatch) flags |= IPSET_FLAG_NOMATCH; - NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip); - NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr); - NLA_PUT_STRING(skb, IPSET_ATTR_IFACE, data->iface); - if (flags) - NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags)); - NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, - htonl(ip_set_timeout_get(tdata->timeout))); + if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip) || + nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr) || + nla_put_string(skb, IPSET_ATTR_IFACE, data->iface) || + (flags && + nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))) || + nla_put_net32(skb, IPSET_ATTR_TIMEOUT, + htonl(ip_set_timeout_get(tdata->timeout)))) + goto nla_put_failure; return 0; @@ -555,11 +557,12 @@ hash_netiface6_data_list(struct sk_buff *skb, if (data->nomatch) flags |= IPSET_FLAG_NOMATCH; - NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip); - NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr); - NLA_PUT_STRING(skb, IPSET_ATTR_IFACE, data->iface); - if (flags) - NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags)); + if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip.in6) || + nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr) || + nla_put_string(skb, IPSET_ATTR_IFACE, data->iface) || + (flags && + nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags)))) + goto nla_put_failure; return 0; nla_put_failure: @@ -576,13 +579,14 @@ hash_netiface6_data_tlist(struct sk_buff *skb, if (data->nomatch) flags |= IPSET_FLAG_NOMATCH; - NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip); - NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr); - NLA_PUT_STRING(skb, IPSET_ATTR_IFACE, data->iface); - if (flags) - NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags)); - NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, - htonl(ip_set_timeout_get(e->timeout))); + if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &e->ip.in6) || + nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr) || + nla_put_string(skb, IPSET_ATTR_IFACE, data->iface) || + (flags && + nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))) || + nla_put_net32(skb, IPSET_ATTR_TIMEOUT, + htonl(ip_set_timeout_get(e->timeout)))) + goto nla_put_failure; return 0; nla_put_failure: diff --git a/net/netfilter/ipset/ip_set_hash_netport.c b/net/netfilter/ipset/ip_set_hash_netport.c index ce2e77100b64..ae3c644adc14 100644 --- a/net/netfilter/ipset/ip_set_hash_netport.c +++ b/net/netfilter/ipset/ip_set_hash_netport.c @@ -124,12 +124,13 @@ hash_netport4_data_list(struct sk_buff *skb, { u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0; - NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip); - NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port); - NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr + 1); - NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); - if (flags) - NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags)); + if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip) || + nla_put_net16(skb, IPSET_ATTR_PORT, data->port) || + nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr + 1) || + nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) || + (flags && + nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags)))) + goto nla_put_failure; return 0; nla_put_failure: @@ -144,15 +145,15 @@ hash_netport4_data_tlist(struct sk_buff *skb, (const struct hash_netport4_telem *)data; u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0; - NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip); - NLA_PUT_NET16(skb, IPSET_ATTR_PORT, tdata->port); - NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr + 1); - NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); - NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, - htonl(ip_set_timeout_get(tdata->timeout))); - if (flags) - NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags)); - + if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, tdata->ip) || + nla_put_net16(skb, IPSET_ATTR_PORT, tdata->port) || + nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr + 1) || + nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) || + nla_put_net32(skb, IPSET_ATTR_TIMEOUT, + htonl(ip_set_timeout_get(tdata->timeout))) || + (flags && + nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags)))) + goto nla_put_failure; return 0; nla_put_failure: @@ -402,12 +403,13 @@ hash_netport6_data_list(struct sk_buff *skb, { u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0; - NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip); - NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port); - NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr + 1); - NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); - if (flags) - NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags)); + if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip.in6) || + nla_put_net16(skb, IPSET_ATTR_PORT, data->port) || + nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr + 1) || + nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) || + (flags && + nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags)))) + goto nla_put_failure; return 0; nla_put_failure: @@ -422,14 +424,15 @@ hash_netport6_data_tlist(struct sk_buff *skb, (const struct hash_netport6_telem *)data; u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0; - NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip); - NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port); - NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr + 1); - NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); - NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, - htonl(ip_set_timeout_get(e->timeout))); - if (flags) - NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags)); + if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &e->ip.in6) || + nla_put_net16(skb, IPSET_ATTR_PORT, data->port) || + nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr + 1) || + nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) || + nla_put_net32(skb, IPSET_ATTR_TIMEOUT, + htonl(ip_set_timeout_get(e->timeout))) || + (flags && + nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags)))) + goto nla_put_failure; return 0; nla_put_failure: diff --git a/net/netfilter/ipset/ip_set_list_set.c b/net/netfilter/ipset/ip_set_list_set.c index 7e095f9005f0..6cb1225765f9 100644 --- a/net/netfilter/ipset/ip_set_list_set.c +++ b/net/netfilter/ipset/ip_set_list_set.c @@ -402,12 +402,13 @@ list_set_head(struct ip_set *set, struct sk_buff *skb) nested = ipset_nest_start(skb, IPSET_ATTR_DATA); if (!nested) goto nla_put_failure; - NLA_PUT_NET32(skb, IPSET_ATTR_SIZE, htonl(map->size)); - if (with_timeout(map->timeout)) - NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout)); - NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)); - NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE, - htonl(sizeof(*map) + map->size * map->dsize)); + if (nla_put_net32(skb, IPSET_ATTR_SIZE, htonl(map->size)) || + (with_timeout(map->timeout) && + nla_put_net32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout))) || + nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)) || + nla_put_net32(skb, IPSET_ATTR_MEMSIZE, + htonl(sizeof(*map) + map->size * map->dsize))) + goto nla_put_failure; ipset_nest_end(skb, nested); return 0; @@ -442,13 +443,15 @@ list_set_list(const struct ip_set *set, } else goto nla_put_failure; } - NLA_PUT_STRING(skb, IPSET_ATTR_NAME, - ip_set_name_byindex(e->id)); + if (nla_put_string(skb, IPSET_ATTR_NAME, + ip_set_name_byindex(e->id))) + goto nla_put_failure; if (with_timeout(map->timeout)) { const struct set_telem *te = (const struct set_telem *) e; - NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, - htonl(ip_set_timeout_get(te->timeout))); + __be32 to = htonl(ip_set_timeout_get(te->timeout)); + if (nla_put_net32(skb, IPSET_ATTR_TIMEOUT, to)) + goto nla_put_failure; } ipset_nest_end(skb, nested); } -- cgit v1.2.2 From 60aed2abb3f6a713c3a9beda1436866079ee146c Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sun, 1 Apr 2012 19:59:31 -0400 Subject: l2tp: Stop using NLA_PUT*(). These macros contain a hidden goto, and are thus extremely error prone and make code hard to audit. Signed-off-by: David S. Miller --- net/l2tp/l2tp_netlink.c | 114 +++++++++++++++++++++++++++--------------------- 1 file changed, 64 insertions(+), 50 deletions(-) (limited to 'net') diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c index 93a41a09458b..bc8c3348f835 100644 --- a/net/l2tp/l2tp_netlink.c +++ b/net/l2tp/l2tp_netlink.c @@ -231,24 +231,28 @@ static int l2tp_nl_tunnel_send(struct sk_buff *skb, u32 pid, u32 seq, int flags, if (IS_ERR(hdr)) return PTR_ERR(hdr); - NLA_PUT_U8(skb, L2TP_ATTR_PROTO_VERSION, tunnel->version); - NLA_PUT_U32(skb, L2TP_ATTR_CONN_ID, tunnel->tunnel_id); - NLA_PUT_U32(skb, L2TP_ATTR_PEER_CONN_ID, tunnel->peer_tunnel_id); - NLA_PUT_U32(skb, L2TP_ATTR_DEBUG, tunnel->debug); - NLA_PUT_U16(skb, L2TP_ATTR_ENCAP_TYPE, tunnel->encap); + if (nla_put_u8(skb, L2TP_ATTR_PROTO_VERSION, tunnel->version) || + nla_put_u32(skb, L2TP_ATTR_CONN_ID, tunnel->tunnel_id) || + nla_put_u32(skb, L2TP_ATTR_PEER_CONN_ID, tunnel->peer_tunnel_id) || + nla_put_u32(skb, L2TP_ATTR_DEBUG, tunnel->debug) || + nla_put_u16(skb, L2TP_ATTR_ENCAP_TYPE, tunnel->encap)) + goto nla_put_failure; nest = nla_nest_start(skb, L2TP_ATTR_STATS); if (nest == NULL) goto nla_put_failure; - NLA_PUT_U64(skb, L2TP_ATTR_TX_PACKETS, tunnel->stats.tx_packets); - NLA_PUT_U64(skb, L2TP_ATTR_TX_BYTES, tunnel->stats.tx_bytes); - NLA_PUT_U64(skb, L2TP_ATTR_TX_ERRORS, tunnel->stats.tx_errors); - NLA_PUT_U64(skb, L2TP_ATTR_RX_PACKETS, tunnel->stats.rx_packets); - NLA_PUT_U64(skb, L2TP_ATTR_RX_BYTES, tunnel->stats.rx_bytes); - NLA_PUT_U64(skb, L2TP_ATTR_RX_SEQ_DISCARDS, tunnel->stats.rx_seq_discards); - NLA_PUT_U64(skb, L2TP_ATTR_RX_OOS_PACKETS, tunnel->stats.rx_oos_packets); - NLA_PUT_U64(skb, L2TP_ATTR_RX_ERRORS, tunnel->stats.rx_errors); + if (nla_put_u64(skb, L2TP_ATTR_TX_PACKETS, tunnel->stats.tx_packets) || + nla_put_u64(skb, L2TP_ATTR_TX_BYTES, tunnel->stats.tx_bytes) || + nla_put_u64(skb, L2TP_ATTR_TX_ERRORS, tunnel->stats.tx_errors) || + nla_put_u64(skb, L2TP_ATTR_RX_PACKETS, tunnel->stats.rx_packets) || + nla_put_u64(skb, L2TP_ATTR_RX_BYTES, tunnel->stats.rx_bytes) || + nla_put_u64(skb, L2TP_ATTR_RX_SEQ_DISCARDS, + tunnel->stats.rx_seq_discards) || + nla_put_u64(skb, L2TP_ATTR_RX_OOS_PACKETS, + tunnel->stats.rx_oos_packets) || + nla_put_u64(skb, L2TP_ATTR_RX_ERRORS, tunnel->stats.rx_errors)) + goto nla_put_failure; nla_nest_end(skb, nest); sk = tunnel->sock; @@ -259,13 +263,16 @@ static int l2tp_nl_tunnel_send(struct sk_buff *skb, u32 pid, u32 seq, int flags, switch (tunnel->encap) { case L2TP_ENCAPTYPE_UDP: - NLA_PUT_U16(skb, L2TP_ATTR_UDP_SPORT, ntohs(inet->inet_sport)); - NLA_PUT_U16(skb, L2TP_ATTR_UDP_DPORT, ntohs(inet->inet_dport)); - NLA_PUT_U8(skb, L2TP_ATTR_UDP_CSUM, (sk->sk_no_check != UDP_CSUM_NOXMIT)); + if (nla_put_u16(skb, L2TP_ATTR_UDP_SPORT, ntohs(inet->inet_sport)) || + nla_put_u16(skb, L2TP_ATTR_UDP_DPORT, ntohs(inet->inet_dport)) || + nla_put_u8(skb, L2TP_ATTR_UDP_CSUM, + (sk->sk_no_check != UDP_CSUM_NOXMIT))) + goto nla_put_failure; /* NOBREAK */ case L2TP_ENCAPTYPE_IP: - NLA_PUT_BE32(skb, L2TP_ATTR_IP_SADDR, inet->inet_saddr); - NLA_PUT_BE32(skb, L2TP_ATTR_IP_DADDR, inet->inet_daddr); + if (nla_put_be32(skb, L2TP_ATTR_IP_SADDR, inet->inet_saddr) || + nla_put_be32(skb, L2TP_ATTR_IP_DADDR, inet->inet_daddr)) + goto nla_put_failure; break; } @@ -563,43 +570,50 @@ static int l2tp_nl_session_send(struct sk_buff *skb, u32 pid, u32 seq, int flags if (IS_ERR(hdr)) return PTR_ERR(hdr); - NLA_PUT_U32(skb, L2TP_ATTR_CONN_ID, tunnel->tunnel_id); - NLA_PUT_U32(skb, L2TP_ATTR_SESSION_ID, session->session_id); - NLA_PUT_U32(skb, L2TP_ATTR_PEER_CONN_ID, tunnel->peer_tunnel_id); - NLA_PUT_U32(skb, L2TP_ATTR_PEER_SESSION_ID, session->peer_session_id); - NLA_PUT_U32(skb, L2TP_ATTR_DEBUG, session->debug); - NLA_PUT_U16(skb, L2TP_ATTR_PW_TYPE, session->pwtype); - NLA_PUT_U16(skb, L2TP_ATTR_MTU, session->mtu); - if (session->mru) - NLA_PUT_U16(skb, L2TP_ATTR_MRU, session->mru); - - if (session->ifname && session->ifname[0]) - NLA_PUT_STRING(skb, L2TP_ATTR_IFNAME, session->ifname); - if (session->cookie_len) - NLA_PUT(skb, L2TP_ATTR_COOKIE, session->cookie_len, &session->cookie[0]); - if (session->peer_cookie_len) - NLA_PUT(skb, L2TP_ATTR_PEER_COOKIE, session->peer_cookie_len, &session->peer_cookie[0]); - NLA_PUT_U8(skb, L2TP_ATTR_RECV_SEQ, session->recv_seq); - NLA_PUT_U8(skb, L2TP_ATTR_SEND_SEQ, session->send_seq); - NLA_PUT_U8(skb, L2TP_ATTR_LNS_MODE, session->lns_mode); + if (nla_put_u32(skb, L2TP_ATTR_CONN_ID, tunnel->tunnel_id) || + nla_put_u32(skb, L2TP_ATTR_SESSION_ID, session->session_id) || + nla_put_u32(skb, L2TP_ATTR_PEER_CONN_ID, tunnel->peer_tunnel_id) || + nla_put_u32(skb, L2TP_ATTR_PEER_SESSION_ID, + session->peer_session_id) || + nla_put_u32(skb, L2TP_ATTR_DEBUG, session->debug) || + nla_put_u16(skb, L2TP_ATTR_PW_TYPE, session->pwtype) || + nla_put_u16(skb, L2TP_ATTR_MTU, session->mtu) || + (session->mru && + nla_put_u16(skb, L2TP_ATTR_MRU, session->mru))) + goto nla_put_failure; + + if ((session->ifname && session->ifname[0] && + nla_put_string(skb, L2TP_ATTR_IFNAME, session->ifname)) || + (session->cookie_len && + nla_put(skb, L2TP_ATTR_COOKIE, session->cookie_len, + &session->cookie[0])) || + (session->peer_cookie_len && + nla_put(skb, L2TP_ATTR_PEER_COOKIE, session->peer_cookie_len, + &session->peer_cookie[0])) || + nla_put_u8(skb, L2TP_ATTR_RECV_SEQ, session->recv_seq) || + nla_put_u8(skb, L2TP_ATTR_SEND_SEQ, session->send_seq) || + nla_put_u8(skb, L2TP_ATTR_LNS_MODE, session->lns_mode) || #ifdef CONFIG_XFRM - if ((sk) && (sk->sk_policy[0] || sk->sk_policy[1])) - NLA_PUT_U8(skb, L2TP_ATTR_USING_IPSEC, 1); + (((sk) && (sk->sk_policy[0] || sk->sk_policy[1])) && + nla_put_u8(skb, L2TP_ATTR_USING_IPSEC, 1)) || #endif - if (session->reorder_timeout) - NLA_PUT_MSECS(skb, L2TP_ATTR_RECV_TIMEOUT, session->reorder_timeout); - + (session->reorder_timeout && + nla_put_msecs(skb, L2TP_ATTR_RECV_TIMEOUT, session->reorder_timeout))) + goto nla_put_failure; nest = nla_nest_start(skb, L2TP_ATTR_STATS); if (nest == NULL) goto nla_put_failure; - NLA_PUT_U64(skb, L2TP_ATTR_TX_PACKETS, session->stats.tx_packets); - NLA_PUT_U64(skb, L2TP_ATTR_TX_BYTES, session->stats.tx_bytes); - NLA_PUT_U64(skb, L2TP_ATTR_TX_ERRORS, session->stats.tx_errors); - NLA_PUT_U64(skb, L2TP_ATTR_RX_PACKETS, session->stats.rx_packets); - NLA_PUT_U64(skb, L2TP_ATTR_RX_BYTES, session->stats.rx_bytes); - NLA_PUT_U64(skb, L2TP_ATTR_RX_SEQ_DISCARDS, session->stats.rx_seq_discards); - NLA_PUT_U64(skb, L2TP_ATTR_RX_OOS_PACKETS, session->stats.rx_oos_packets); - NLA_PUT_U64(skb, L2TP_ATTR_RX_ERRORS, session->stats.rx_errors); + if (nla_put_u64(skb, L2TP_ATTR_TX_PACKETS, session->stats.tx_packets) || + nla_put_u64(skb, L2TP_ATTR_TX_BYTES, session->stats.tx_bytes) || + nla_put_u64(skb, L2TP_ATTR_TX_ERRORS, session->stats.tx_errors) || + nla_put_u64(skb, L2TP_ATTR_RX_PACKETS, session->stats.rx_packets) || + nla_put_u64(skb, L2TP_ATTR_RX_BYTES, session->stats.rx_bytes) || + nla_put_u64(skb, L2TP_ATTR_RX_SEQ_DISCARDS, + session->stats.rx_seq_discards) || + nla_put_u64(skb, L2TP_ATTR_RX_OOS_PACKETS, + session->stats.rx_oos_packets) || + nla_put_u64(skb, L2TP_ATTR_RX_ERRORS, session->stats.rx_errors)) + goto nla_put_failure; nla_nest_end(skb, nest); return genlmsg_end(skb, hdr); -- cgit v1.2.2 From 1eb4c977778b5ab8e8fba9022687f0a5941d681a Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sun, 1 Apr 2012 20:03:01 -0400 Subject: dcbnl: Stop using NLA_PUT*(). These macros contain a hidden goto, and are thus extremely error prone and make code hard to audit. Signed-off-by: David S. Miller --- net/dcb/dcbnl.c | 72 +++++++++++++++++++++++++++++++++------------------------ 1 file changed, 42 insertions(+), 30 deletions(-) (limited to 'net') diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c index d86053002c16..36f37af63bf2 100644 --- a/net/dcb/dcbnl.c +++ b/net/dcb/dcbnl.c @@ -1205,13 +1205,15 @@ static int dcbnl_build_peer_app(struct net_device *netdev, struct sk_buff* skb, if (!app) goto nla_put_failure; - if (app_info_type) - NLA_PUT(skb, app_info_type, sizeof(info), &info); - - for (i = 0; i < app_count; i++) - NLA_PUT(skb, app_entry_type, sizeof(struct dcb_app), - &table[i]); + if (app_info_type && + nla_put(skb, app_info_type, sizeof(info), &info)) + goto nla_put_failure; + for (i = 0; i < app_count; i++) { + if (nla_put(skb, app_entry_type, sizeof(struct dcb_app), + &table[i])) + goto nla_put_failure; + } nla_nest_end(skb, app); } err = 0; @@ -1230,8 +1232,8 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev) int dcbx; int err = -EMSGSIZE; - NLA_PUT_STRING(skb, DCB_ATTR_IFNAME, netdev->name); - + if (nla_put_string(skb, DCB_ATTR_IFNAME, netdev->name)) + goto nla_put_failure; ieee = nla_nest_start(skb, DCB_ATTR_IEEE); if (!ieee) goto nla_put_failure; @@ -1239,15 +1241,17 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev) if (ops->ieee_getets) { struct ieee_ets ets; err = ops->ieee_getets(netdev, &ets); - if (!err) - NLA_PUT(skb, DCB_ATTR_IEEE_ETS, sizeof(ets), &ets); + if (!err && + nla_put(skb, DCB_ATTR_IEEE_ETS, sizeof(ets), &ets)) + goto nla_put_failure; } if (ops->ieee_getpfc) { struct ieee_pfc pfc; err = ops->ieee_getpfc(netdev, &pfc); - if (!err) - NLA_PUT(skb, DCB_ATTR_IEEE_PFC, sizeof(pfc), &pfc); + if (!err && + nla_put(skb, DCB_ATTR_IEEE_PFC, sizeof(pfc), &pfc)) + goto nla_put_failure; } app = nla_nest_start(skb, DCB_ATTR_IEEE_APP_TABLE); @@ -1278,15 +1282,17 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev) if (ops->ieee_peer_getets) { struct ieee_ets ets; err = ops->ieee_peer_getets(netdev, &ets); - if (!err) - NLA_PUT(skb, DCB_ATTR_IEEE_PEER_ETS, sizeof(ets), &ets); + if (!err && + nla_put(skb, DCB_ATTR_IEEE_PEER_ETS, sizeof(ets), &ets)) + goto nla_put_failure; } if (ops->ieee_peer_getpfc) { struct ieee_pfc pfc; err = ops->ieee_peer_getpfc(netdev, &pfc); - if (!err) - NLA_PUT(skb, DCB_ATTR_IEEE_PEER_PFC, sizeof(pfc), &pfc); + if (!err && + nla_put(skb, DCB_ATTR_IEEE_PEER_PFC, sizeof(pfc), &pfc)) + goto nla_put_failure; } if (ops->peer_getappinfo && ops->peer_getapptable) { @@ -1340,10 +1346,11 @@ static int dcbnl_cee_pg_fill(struct sk_buff *skb, struct net_device *dev, ops->getpgtccfgtx(dev, i - DCB_PG_ATTR_TC_0, &prio, &pgid, &tc_pct, &up_map); - NLA_PUT_U8(skb, DCB_TC_ATTR_PARAM_PGID, pgid); - NLA_PUT_U8(skb, DCB_TC_ATTR_PARAM_UP_MAPPING, up_map); - NLA_PUT_U8(skb, DCB_TC_ATTR_PARAM_STRICT_PRIO, prio); - NLA_PUT_U8(skb, DCB_TC_ATTR_PARAM_BW_PCT, tc_pct); + if (nla_put_u8(skb, DCB_TC_ATTR_PARAM_PGID, pgid) || + nla_put_u8(skb, DCB_TC_ATTR_PARAM_UP_MAPPING, up_map) || + nla_put_u8(skb, DCB_TC_ATTR_PARAM_STRICT_PRIO, prio) || + nla_put_u8(skb, DCB_TC_ATTR_PARAM_BW_PCT, tc_pct)) + goto nla_put_failure; nla_nest_end(skb, tc_nest); } @@ -1356,7 +1363,8 @@ static int dcbnl_cee_pg_fill(struct sk_buff *skb, struct net_device *dev, else ops->getpgbwgcfgtx(dev, i - DCB_PG_ATTR_BW_ID_0, &tc_pct); - NLA_PUT_U8(skb, i, tc_pct); + if (nla_put_u8(skb, i, tc_pct)) + goto nla_put_failure; } nla_nest_end(skb, pg); return 0; @@ -1373,8 +1381,8 @@ static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev) int dcbx, i, err = -EMSGSIZE; u8 value; - NLA_PUT_STRING(skb, DCB_ATTR_IFNAME, netdev->name); - + if (nla_put_string(skb, DCB_ATTR_IFNAME, netdev->name)) + goto nla_put_failure; cee = nla_nest_start(skb, DCB_ATTR_CEE); if (!cee) goto nla_put_failure; @@ -1401,7 +1409,8 @@ static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev) for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) { ops->getpfccfg(netdev, i - DCB_PFC_UP_ATTR_0, &value); - NLA_PUT_U8(skb, i, value); + if (nla_put_u8(skb, i, value)) + goto nla_put_failure; } nla_nest_end(skb, pfc_nest); } @@ -1454,8 +1463,9 @@ static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev) for (i = DCB_FEATCFG_ATTR_ALL + 1; i <= DCB_FEATCFG_ATTR_MAX; i++) - if (!ops->getfeatcfg(netdev, i, &value)) - NLA_PUT_U8(skb, i, value); + if (!ops->getfeatcfg(netdev, i, &value) && + nla_put_u8(skb, i, value)) + goto nla_put_failure; nla_nest_end(skb, feat); } @@ -1464,15 +1474,17 @@ static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev) if (ops->cee_peer_getpg) { struct cee_pg pg; err = ops->cee_peer_getpg(netdev, &pg); - if (!err) - NLA_PUT(skb, DCB_ATTR_CEE_PEER_PG, sizeof(pg), &pg); + if (!err && + nla_put(skb, DCB_ATTR_CEE_PEER_PG, sizeof(pg), &pg)) + goto nla_put_failure; } if (ops->cee_peer_getpfc) { struct cee_pfc pfc; err = ops->cee_peer_getpfc(netdev, &pfc); - if (!err) - NLA_PUT(skb, DCB_ATTR_CEE_PEER_PFC, sizeof(pfc), &pfc); + if (!err && + nla_put(skb, DCB_ATTR_CEE_PEER_PFC, sizeof(pfc), &pfc)) + goto nla_put_failure; } if (ops->peer_getappinfo && ops->peer_getapptable) { -- cgit v1.2.2 From 9a6308d74edb791c05d0e292e6263efc69640942 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sun, 1 Apr 2012 20:06:28 -0400 Subject: neighbour: Stop using NLA_PUT*(). These macros contain a hidden goto, and are thus extremely error prone and make code hard to audit. Signed-off-by: David S. Miller --- net/core/neighbour.c | 75 ++++++++++++++++++++++++++++------------------------ 1 file changed, 40 insertions(+), 35 deletions(-) (limited to 'net') diff --git a/net/core/neighbour.c b/net/core/neighbour.c index 0a68045782d1..ac71765d6fd0 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -1768,29 +1768,29 @@ static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms) if (nest == NULL) return -ENOBUFS; - if (parms->dev) - NLA_PUT_U32(skb, NDTPA_IFINDEX, parms->dev->ifindex); - - NLA_PUT_U32(skb, NDTPA_REFCNT, atomic_read(&parms->refcnt)); - NLA_PUT_U32(skb, NDTPA_QUEUE_LENBYTES, parms->queue_len_bytes); - /* approximative value for deprecated QUEUE_LEN (in packets) */ - NLA_PUT_U32(skb, NDTPA_QUEUE_LEN, - DIV_ROUND_UP(parms->queue_len_bytes, - SKB_TRUESIZE(ETH_FRAME_LEN))); - NLA_PUT_U32(skb, NDTPA_PROXY_QLEN, parms->proxy_qlen); - NLA_PUT_U32(skb, NDTPA_APP_PROBES, parms->app_probes); - NLA_PUT_U32(skb, NDTPA_UCAST_PROBES, parms->ucast_probes); - NLA_PUT_U32(skb, NDTPA_MCAST_PROBES, parms->mcast_probes); - NLA_PUT_MSECS(skb, NDTPA_REACHABLE_TIME, parms->reachable_time); - NLA_PUT_MSECS(skb, NDTPA_BASE_REACHABLE_TIME, - parms->base_reachable_time); - NLA_PUT_MSECS(skb, NDTPA_GC_STALETIME, parms->gc_staletime); - NLA_PUT_MSECS(skb, NDTPA_DELAY_PROBE_TIME, parms->delay_probe_time); - NLA_PUT_MSECS(skb, NDTPA_RETRANS_TIME, parms->retrans_time); - NLA_PUT_MSECS(skb, NDTPA_ANYCAST_DELAY, parms->anycast_delay); - NLA_PUT_MSECS(skb, NDTPA_PROXY_DELAY, parms->proxy_delay); - NLA_PUT_MSECS(skb, NDTPA_LOCKTIME, parms->locktime); - + if ((parms->dev && + nla_put_u32(skb, NDTPA_IFINDEX, parms->dev->ifindex)) || + nla_put_u32(skb, NDTPA_REFCNT, atomic_read(&parms->refcnt)) || + nla_put_u32(skb, NDTPA_QUEUE_LENBYTES, parms->queue_len_bytes) || + /* approximative value for deprecated QUEUE_LEN (in packets) */ + nla_put_u32(skb, NDTPA_QUEUE_LEN, + DIV_ROUND_UP(parms->queue_len_bytes, + SKB_TRUESIZE(ETH_FRAME_LEN))) || + nla_put_u32(skb, NDTPA_PROXY_QLEN, parms->proxy_qlen) || + nla_put_u32(skb, NDTPA_APP_PROBES, parms->app_probes) || + nla_put_u32(skb, NDTPA_UCAST_PROBES, parms->ucast_probes) || + nla_put_u32(skb, NDTPA_MCAST_PROBES, parms->mcast_probes) || + nla_put_msecs(skb, NDTPA_REACHABLE_TIME, parms->reachable_time) || + nla_put_msecs(skb, NDTPA_BASE_REACHABLE_TIME, + parms->base_reachable_time) || + nla_put_msecs(skb, NDTPA_GC_STALETIME, parms->gc_staletime) || + nla_put_msecs(skb, NDTPA_DELAY_PROBE_TIME, + parms->delay_probe_time) || + nla_put_msecs(skb, NDTPA_RETRANS_TIME, parms->retrans_time) || + nla_put_msecs(skb, NDTPA_ANYCAST_DELAY, parms->anycast_delay) || + nla_put_msecs(skb, NDTPA_PROXY_DELAY, parms->proxy_delay) || + nla_put_msecs(skb, NDTPA_LOCKTIME, parms->locktime)) + goto nla_put_failure; return nla_nest_end(skb, nest); nla_put_failure: @@ -1815,12 +1815,12 @@ static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl, ndtmsg->ndtm_pad1 = 0; ndtmsg->ndtm_pad2 = 0; - NLA_PUT_STRING(skb, NDTA_NAME, tbl->id); - NLA_PUT_MSECS(skb, NDTA_GC_INTERVAL, tbl->gc_interval); - NLA_PUT_U32(skb, NDTA_THRESH1, tbl->gc_thresh1); - NLA_PUT_U32(skb, NDTA_THRESH2, tbl->gc_thresh2); - NLA_PUT_U32(skb, NDTA_THRESH3, tbl->gc_thresh3); - + if (nla_put_string(skb, NDTA_NAME, tbl->id) || + nla_put_msecs(skb, NDTA_GC_INTERVAL, tbl->gc_interval) || + nla_put_u32(skb, NDTA_THRESH1, tbl->gc_thresh1) || + nla_put_u32(skb, NDTA_THRESH2, tbl->gc_thresh2) || + nla_put_u32(skb, NDTA_THRESH3, tbl->gc_thresh3)) + goto nla_put_failure; { unsigned long now = jiffies; unsigned int flush_delta = now - tbl->last_flush; @@ -1841,7 +1841,8 @@ static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl, ndc.ndtc_hash_mask = ((1 << nht->hash_shift) - 1); rcu_read_unlock_bh(); - NLA_PUT(skb, NDTA_CONFIG, sizeof(ndc), &ndc); + if (nla_put(skb, NDTA_CONFIG, sizeof(ndc), &ndc)) + goto nla_put_failure; } { @@ -1866,7 +1867,8 @@ static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl, ndst.ndts_forced_gc_runs += st->forced_gc_runs; } - NLA_PUT(skb, NDTA_STATS, sizeof(ndst), &ndst); + if (nla_put(skb, NDTA_STATS, sizeof(ndst), &ndst)) + goto nla_put_failure; } BUG_ON(tbl->parms.dev); @@ -2137,7 +2139,8 @@ static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh, ndm->ndm_type = neigh->type; ndm->ndm_ifindex = neigh->dev->ifindex; - NLA_PUT(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key); + if (nla_put(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key)) + goto nla_put_failure; read_lock_bh(&neigh->lock); ndm->ndm_state = neigh->nud_state; @@ -2157,8 +2160,9 @@ static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh, ci.ndm_refcnt = atomic_read(&neigh->refcnt) - 1; read_unlock_bh(&neigh->lock); - NLA_PUT_U32(skb, NDA_PROBES, atomic_read(&neigh->probes)); - NLA_PUT(skb, NDA_CACHEINFO, sizeof(ci), &ci); + if (nla_put_u32(skb, NDA_PROBES, atomic_read(&neigh->probes)) || + nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci)) + goto nla_put_failure; return nlmsg_end(skb, nlh); @@ -2187,7 +2191,8 @@ static int pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn, ndm->ndm_ifindex = pn->dev->ifindex; ndm->ndm_state = NUD_NONE; - NLA_PUT(skb, NDA_DST, tbl->key_len, pn->key); + if (nla_put(skb, NDA_DST, tbl->key_len, pn->key)) + goto nla_put_failure; return nlmsg_end(skb, nlh); -- cgit v1.2.2 From a6574349d068cb393ae547ee556e682e5fdb2ff3 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sun, 1 Apr 2012 20:12:00 -0400 Subject: rtnetlink: Stop using NLA_PUT*(). These macros contain a hidden goto, and are thus extremely error prone and make code hard to audit. Signed-off-by: David S. Miller --- net/core/rtnetlink.c | 67 +++++++++++++++++++++++++++------------------------- 1 file changed, 35 insertions(+), 32 deletions(-) (limited to 'net') diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 1a63c6efd2ea..71a1920a23a1 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -608,7 +608,8 @@ int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics) for (i = 0; i < RTAX_MAX; i++) { if (metrics[i]) { valid++; - NLA_PUT_U32(skb, i+1, metrics[i]); + if (nla_put_u32(skb, i+1, metrics[i])) + goto nla_put_failure; } } @@ -808,7 +809,8 @@ static int rtnl_vf_ports_fill(struct sk_buff *skb, struct net_device *dev) vf_port = nla_nest_start(skb, IFLA_VF_PORT); if (!vf_port) goto nla_put_failure; - NLA_PUT_U32(skb, IFLA_PORT_VF, vf); + if (nla_put_u32(skb, IFLA_PORT_VF, vf)) + goto nla_put_failure; err = dev->netdev_ops->ndo_get_vf_port(dev, vf, skb); if (err == -EMSGSIZE) goto nla_put_failure; @@ -892,25 +894,22 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, ifm->ifi_flags = dev_get_flags(dev); ifm->ifi_change = change; - NLA_PUT_STRING(skb, IFLA_IFNAME, dev->name); - NLA_PUT_U32(skb, IFLA_TXQLEN, dev->tx_queue_len); - NLA_PUT_U8(skb, IFLA_OPERSTATE, - netif_running(dev) ? dev->operstate : IF_OPER_DOWN); - NLA_PUT_U8(skb, IFLA_LINKMODE, dev->link_mode); - NLA_PUT_U32(skb, IFLA_MTU, dev->mtu); - NLA_PUT_U32(skb, IFLA_GROUP, dev->group); - - if (dev->ifindex != dev->iflink) - NLA_PUT_U32(skb, IFLA_LINK, dev->iflink); - - if (dev->master) - NLA_PUT_U32(skb, IFLA_MASTER, dev->master->ifindex); - - if (dev->qdisc) - NLA_PUT_STRING(skb, IFLA_QDISC, dev->qdisc->ops->id); - - if (dev->ifalias) - NLA_PUT_STRING(skb, IFLA_IFALIAS, dev->ifalias); + if (nla_put_string(skb, IFLA_IFNAME, dev->name) || + nla_put_u32(skb, IFLA_TXQLEN, dev->tx_queue_len) || + nla_put_u8(skb, IFLA_OPERSTATE, + netif_running(dev) ? dev->operstate : IF_OPER_DOWN) || + nla_put_u8(skb, IFLA_LINKMODE, dev->link_mode) || + nla_put_u32(skb, IFLA_MTU, dev->mtu) || + nla_put_u32(skb, IFLA_GROUP, dev->group) || + (dev->ifindex != dev->iflink && + nla_put_u32(skb, IFLA_LINK, dev->iflink)) || + (dev->master && + nla_put_u32(skb, IFLA_MASTER, dev->master->ifindex)) || + (dev->qdisc && + nla_put_string(skb, IFLA_QDISC, dev->qdisc->ops->id)) || + (dev->ifalias && + nla_put_string(skb, IFLA_IFALIAS, dev->ifalias))) + goto nla_put_failure; if (1) { struct rtnl_link_ifmap map = { @@ -921,12 +920,14 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, .dma = dev->dma, .port = dev->if_port, }; - NLA_PUT(skb, IFLA_MAP, sizeof(map), &map); + if (nla_put(skb, IFLA_MAP, sizeof(map), &map)) + goto nla_put_failure; } if (dev->addr_len) { - NLA_PUT(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr); - NLA_PUT(skb, IFLA_BROADCAST, dev->addr_len, dev->broadcast); + if (nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr) || + nla_put(skb, IFLA_BROADCAST, dev->addr_len, dev->broadcast)) + goto nla_put_failure; } attr = nla_reserve(skb, IFLA_STATS, @@ -943,8 +944,9 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, goto nla_put_failure; copy_rtnl_link_stats64(nla_data(attr), stats); - if (dev->dev.parent && (ext_filter_mask & RTEXT_FILTER_VF)) - NLA_PUT_U32(skb, IFLA_NUM_VF, dev_num_vf(dev->dev.parent)); + if (dev->dev.parent && (ext_filter_mask & RTEXT_FILTER_VF) && + nla_put_u32(skb, IFLA_NUM_VF, dev_num_vf(dev->dev.parent))) + goto nla_put_failure; if (dev->netdev_ops->ndo_get_vf_config && dev->dev.parent && (ext_filter_mask & RTEXT_FILTER_VF)) { @@ -987,12 +989,13 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, nla_nest_cancel(skb, vfinfo); goto nla_put_failure; } - NLA_PUT(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac); - NLA_PUT(skb, IFLA_VF_VLAN, sizeof(vf_vlan), &vf_vlan); - NLA_PUT(skb, IFLA_VF_TX_RATE, sizeof(vf_tx_rate), - &vf_tx_rate); - NLA_PUT(skb, IFLA_VF_SPOOFCHK, sizeof(vf_spoofchk), - &vf_spoofchk); + if (nla_put(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac) || + nla_put(skb, IFLA_VF_VLAN, sizeof(vf_vlan), &vf_vlan) || + nla_put(skb, IFLA_VF_TX_RATE, sizeof(vf_tx_rate), + &vf_tx_rate) || + nla_put(skb, IFLA_VF_SPOOFCHK, sizeof(vf_spoofchk), + &vf_spoofchk)) + goto nla_put_failure; nla_nest_end(skb, vf); } nla_nest_end(skb, vfinfo); -- cgit v1.2.2 From b21dddb9dfe50ca1e205faf4b25900895494d25b Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sun, 1 Apr 2012 20:15:14 -0400 Subject: decnet: Stop using NLA_PUT*(). These macros contain a hidden goto, and are thus extremely error prone and make code hard to audit. Signed-off-by: David S. Miller --- net/decnet/dn_dev.c | 14 +++++++------- net/decnet/dn_rules.c | 10 +++++----- 2 files changed, 12 insertions(+), 12 deletions(-) (limited to 'net') diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c index 74d321a60e7b..16efcda2f060 100644 --- a/net/decnet/dn_dev.c +++ b/net/decnet/dn_dev.c @@ -695,13 +695,13 @@ static int dn_nl_fill_ifaddr(struct sk_buff *skb, struct dn_ifaddr *ifa, ifm->ifa_scope = ifa->ifa_scope; ifm->ifa_index = ifa->ifa_dev->dev->ifindex; - if (ifa->ifa_address) - NLA_PUT_LE16(skb, IFA_ADDRESS, ifa->ifa_address); - if (ifa->ifa_local) - NLA_PUT_LE16(skb, IFA_LOCAL, ifa->ifa_local); - if (ifa->ifa_label[0]) - NLA_PUT_STRING(skb, IFA_LABEL, ifa->ifa_label); - + if ((ifa->ifa_address && + nla_put_le16(skb, IFA_ADDRESS, ifa->ifa_address)) || + (ifa->ifa_local && + nla_put_le16(skb, IFA_LOCAL, ifa->ifa_local)) || + (ifa->ifa_label[0] && + nla_put_string(skb, IFA_LABEL, ifa->ifa_label))) + goto nla_put_failure; return nlmsg_end(skb, nlh); nla_put_failure: diff --git a/net/decnet/dn_rules.c b/net/decnet/dn_rules.c index f65c9ddaee41..7399e3d51922 100644 --- a/net/decnet/dn_rules.c +++ b/net/decnet/dn_rules.c @@ -204,11 +204,11 @@ static int dn_fib_rule_fill(struct fib_rule *rule, struct sk_buff *skb, frh->src_len = r->src_len; frh->tos = 0; - if (r->dst_len) - NLA_PUT_LE16(skb, FRA_DST, r->dst); - if (r->src_len) - NLA_PUT_LE16(skb, FRA_SRC, r->src); - + if ((r->dst_len && + nla_put_le16(skb, FRA_DST, r->dst)) || + (r->src_len && + nla_put_le16(skb, FRA_SRC, r->src))) + goto nla_put_failure; return 0; nla_put_failure: -- cgit v1.2.2 From c78679e8f31b86c7a46e77a3096011f911854187 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sun, 1 Apr 2012 20:27:33 -0400 Subject: ipv6: Stop using NLA_PUT*(). These macros contain a hidden goto, and are thus extremely error prone and make code hard to audit. Signed-off-by: David S. Miller --- net/ipv6/addrconf.c | 32 +++++++++++++++----------------- net/ipv6/fib6_rules.c | 15 +++++++-------- net/ipv6/ip6mr.c | 9 +++++---- net/ipv6/ndisc.c | 5 +++-- net/ipv6/route.c | 38 +++++++++++++++++++++++--------------- 5 files changed, 53 insertions(+), 46 deletions(-) (limited to 'net') diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 6a3bb6077e19..153060f946e0 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -3989,14 +3989,14 @@ static int inet6_fill_ifla6_attrs(struct sk_buff *skb, struct inet6_dev *idev) struct nlattr *nla; struct ifla_cacheinfo ci; - NLA_PUT_U32(skb, IFLA_INET6_FLAGS, idev->if_flags); - + if (nla_put_u32(skb, IFLA_INET6_FLAGS, idev->if_flags)) + goto nla_put_failure; ci.max_reasm_len = IPV6_MAXPLEN; ci.tstamp = cstamp_delta(idev->tstamp); ci.reachable_time = jiffies_to_msecs(idev->nd_parms->reachable_time); ci.retrans_time = jiffies_to_msecs(idev->nd_parms->retrans_time); - NLA_PUT(skb, IFLA_INET6_CACHEINFO, sizeof(ci), &ci); - + if (nla_put(skb, IFLA_INET6_CACHEINFO, sizeof(ci), &ci)) + goto nla_put_failure; nla = nla_reserve(skb, IFLA_INET6_CONF, DEVCONF_MAX * sizeof(s32)); if (nla == NULL) goto nla_put_failure; @@ -4061,15 +4061,13 @@ static int inet6_fill_ifinfo(struct sk_buff *skb, struct inet6_dev *idev, hdr->ifi_flags = dev_get_flags(dev); hdr->ifi_change = 0; - NLA_PUT_STRING(skb, IFLA_IFNAME, dev->name); - - if (dev->addr_len) - NLA_PUT(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr); - - NLA_PUT_U32(skb, IFLA_MTU, dev->mtu); - if (dev->ifindex != dev->iflink) - NLA_PUT_U32(skb, IFLA_LINK, dev->iflink); - + if (nla_put_string(skb, IFLA_IFNAME, dev->name) || + (dev->addr_len && + nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) || + nla_put_u32(skb, IFLA_MTU, dev->mtu) || + (dev->ifindex != dev->iflink && + nla_put_u32(skb, IFLA_LINK, dev->iflink))) + goto nla_put_failure; protoinfo = nla_nest_start(skb, IFLA_PROTINFO); if (protoinfo == NULL) goto nla_put_failure; @@ -4182,12 +4180,12 @@ static int inet6_fill_prefix(struct sk_buff *skb, struct inet6_dev *idev, if (pinfo->autoconf) pmsg->prefix_flags |= IF_PREFIX_AUTOCONF; - NLA_PUT(skb, PREFIX_ADDRESS, sizeof(pinfo->prefix), &pinfo->prefix); - + if (nla_put(skb, PREFIX_ADDRESS, sizeof(pinfo->prefix), &pinfo->prefix)) + goto nla_put_failure; ci.preferred_time = ntohl(pinfo->prefered); ci.valid_time = ntohl(pinfo->valid); - NLA_PUT(skb, PREFIX_CACHEINFO, sizeof(ci), &ci); - + if (nla_put(skb, PREFIX_CACHEINFO, sizeof(ci), &ci)) + goto nla_put_failure; return nlmsg_end(skb, nlh); nla_put_failure: diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c index b6c573152067..01a2de1f5e87 100644 --- a/net/ipv6/fib6_rules.c +++ b/net/ipv6/fib6_rules.c @@ -215,14 +215,13 @@ static int fib6_rule_fill(struct fib_rule *rule, struct sk_buff *skb, frh->src_len = rule6->src.plen; frh->tos = rule6->tclass; - if (rule6->dst.plen) - NLA_PUT(skb, FRA_DST, sizeof(struct in6_addr), - &rule6->dst.addr); - - if (rule6->src.plen) - NLA_PUT(skb, FRA_SRC, sizeof(struct in6_addr), - &rule6->src.addr); - + if ((rule6->dst.plen && + nla_put(skb, FRA_DST, sizeof(struct in6_addr), + &rule6->dst.addr)) || + (rule6->src.plen && + nla_put(skb, FRA_SRC, sizeof(struct in6_addr), + &rule6->src.addr))) + goto nla_put_failure; return 0; nla_put_failure: diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index 5aa3981a3922..ff6ddf93f269 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c @@ -2216,14 +2216,15 @@ static int ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb, rtm->rtm_src_len = 128; rtm->rtm_tos = 0; rtm->rtm_table = mrt->id; - NLA_PUT_U32(skb, RTA_TABLE, mrt->id); + if (nla_put_u32(skb, RTA_TABLE, mrt->id)) + goto nla_put_failure; rtm->rtm_scope = RT_SCOPE_UNIVERSE; rtm->rtm_protocol = RTPROT_UNSPEC; rtm->rtm_flags = 0; - NLA_PUT(skb, RTA_SRC, 16, &c->mf6c_origin); - NLA_PUT(skb, RTA_DST, 16, &c->mf6c_mcastgrp); - + if (nla_put(skb, RTA_SRC, 16, &c->mf6c_origin) || + nla_put(skb, RTA_DST, 16, &c->mf6c_mcastgrp)) + goto nla_put_failure; if (__ip6mr_fill_mroute(mrt, skb, c, rtm) < 0) goto nla_put_failure; diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index 3dcdb81ec3e8..1d6fb0c94da1 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c @@ -1099,8 +1099,9 @@ static void ndisc_ra_useropt(struct sk_buff *ra, struct nd_opt_hdr *opt) memcpy(ndmsg + 1, opt, opt->nd_opt_len << 3); - NLA_PUT(skb, NDUSEROPT_SRCADDR, sizeof(struct in6_addr), - &ipv6_hdr(ra)->saddr); + if (nla_put(skb, NDUSEROPT_SRCADDR, sizeof(struct in6_addr), + &ipv6_hdr(ra)->saddr)) + goto nla_put_failure; nlmsg_end(skb, nlh); rtnl_notify(skb, net, 0, RTNLGRP_ND_USEROPT, NULL, GFP_ATOMIC); diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 3992e26a6039..4d70c06f0436 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -2413,7 +2413,8 @@ static int rt6_fill_node(struct net *net, else table = RT6_TABLE_UNSPEC; rtm->rtm_table = table; - NLA_PUT_U32(skb, RTA_TABLE, table); + if (nla_put_u32(skb, RTA_TABLE, table)) + goto nla_put_failure; if (rt->rt6i_flags & RTF_REJECT) rtm->rtm_type = RTN_UNREACHABLE; else if (rt->rt6i_flags & RTF_LOCAL) @@ -2436,16 +2437,20 @@ static int rt6_fill_node(struct net *net, rtm->rtm_flags |= RTM_F_CLONED; if (dst) { - NLA_PUT(skb, RTA_DST, 16, dst); + if (nla_put(skb, RTA_DST, 16, dst)) + goto nla_put_failure; rtm->rtm_dst_len = 128; } else if (rtm->rtm_dst_len) - NLA_PUT(skb, RTA_DST, 16, &rt->rt6i_dst.addr); + if (nla_put(skb, RTA_DST, 16, &rt->rt6i_dst.addr)) + goto nla_put_failure; #ifdef CONFIG_IPV6_SUBTREES if (src) { - NLA_PUT(skb, RTA_SRC, 16, src); + if (nla_put(skb, RTA_SRC, 16, src)) + goto nla_put_failure; rtm->rtm_src_len = 128; - } else if (rtm->rtm_src_len) - NLA_PUT(skb, RTA_SRC, 16, &rt->rt6i_src.addr); + } else if (rtm->rtm_src_len && + nla_put(skb, RTA_SRC, 16, &rt->rt6i_src.addr)) + goto nla_put_failure; #endif if (iif) { #ifdef CONFIG_IPV6_MROUTE @@ -2463,17 +2468,20 @@ static int rt6_fill_node(struct net *net, } } else #endif - NLA_PUT_U32(skb, RTA_IIF, iif); + if (nla_put_u32(skb, RTA_IIF, iif)) + goto nla_put_failure; } else if (dst) { struct in6_addr saddr_buf; - if (ip6_route_get_saddr(net, rt, dst, 0, &saddr_buf) == 0) - NLA_PUT(skb, RTA_PREFSRC, 16, &saddr_buf); + if (ip6_route_get_saddr(net, rt, dst, 0, &saddr_buf) == 0 && + nla_put(skb, RTA_PREFSRC, 16, &saddr_buf)) + goto nla_put_failure; } if (rt->rt6i_prefsrc.plen) { struct in6_addr saddr_buf; saddr_buf = rt->rt6i_prefsrc.addr; - NLA_PUT(skb, RTA_PREFSRC, 16, &saddr_buf); + if (nla_put(skb, RTA_PREFSRC, 16, &saddr_buf)) + goto nla_put_failure; } if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0) @@ -2489,11 +2497,11 @@ static int rt6_fill_node(struct net *net, } rcu_read_unlock(); - if (rt->dst.dev) - NLA_PUT_U32(skb, RTA_OIF, rt->dst.dev->ifindex); - - NLA_PUT_U32(skb, RTA_PRIORITY, rt->rt6i_metric); - + if (rt->dst.dev && + nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex)) + goto nla_put_failure; + if (nla_put_u32(skb, RTA_PRIORITY, rt->rt6i_metric)) + goto nla_put_failure; if (!(rt->rt6i_flags & RTF_EXPIRES)) expires = 0; else if (rt->dst.expires - jiffies < INT_MAX) -- cgit v1.2.2 From e549a6b3a5acff66f0427091e44f814943a26a86 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sun, 1 Apr 2012 20:28:52 -0400 Subject: netfilter: ipv6: Stop using NLA_PUT*(). These macros contain a hidden goto, and are thus extremely error prone and make code hard to audit. Signed-off-by: David S. Miller --- net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c | 9 +++++---- net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c | 12 ++++++------ 2 files changed, 11 insertions(+), 10 deletions(-) (limited to 'net') diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c index 4111050a9fc5..fe925e492520 100644 --- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c +++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c @@ -278,10 +278,11 @@ static struct nf_hook_ops ipv6_conntrack_ops[] __read_mostly = { static int ipv6_tuple_to_nlattr(struct sk_buff *skb, const struct nf_conntrack_tuple *tuple) { - NLA_PUT(skb, CTA_IP_V6_SRC, sizeof(u_int32_t) * 4, - &tuple->src.u3.ip6); - NLA_PUT(skb, CTA_IP_V6_DST, sizeof(u_int32_t) * 4, - &tuple->dst.u3.ip6); + if (nla_put(skb, CTA_IP_V6_SRC, sizeof(u_int32_t) * 4, + &tuple->src.u3.ip6) || + nla_put(skb, CTA_IP_V6_DST, sizeof(u_int32_t) * 4, + &tuple->dst.u3.ip6)) + goto nla_put_failure; return 0; nla_put_failure: diff --git a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c index 92cc9f2931ae..3e81904fbbcd 100644 --- a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c +++ b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c @@ -234,10 +234,10 @@ icmpv6_error(struct net *net, struct nf_conn *tmpl, static int icmpv6_tuple_to_nlattr(struct sk_buff *skb, const struct nf_conntrack_tuple *t) { - NLA_PUT_BE16(skb, CTA_PROTO_ICMPV6_ID, t->src.u.icmp.id); - NLA_PUT_U8(skb, CTA_PROTO_ICMPV6_TYPE, t->dst.u.icmp.type); - NLA_PUT_U8(skb, CTA_PROTO_ICMPV6_CODE, t->dst.u.icmp.code); - + if (nla_put_be16(skb, CTA_PROTO_ICMPV6_ID, t->src.u.icmp.id) || + nla_put_u8(skb, CTA_PROTO_ICMPV6_TYPE, t->dst.u.icmp.type) || + nla_put_u8(skb, CTA_PROTO_ICMPV6_CODE, t->dst.u.icmp.code)) + goto nla_put_failure; return 0; nla_put_failure: @@ -300,8 +300,8 @@ icmpv6_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data) { const unsigned int *timeout = data; - NLA_PUT_BE32(skb, CTA_TIMEOUT_ICMPV6_TIMEOUT, htonl(*timeout / HZ)); - + if (nla_put_be32(skb, CTA_TIMEOUT_ICMPV6_TIMEOUT, htonl(*timeout / HZ))) + goto nla_put_failure; return 0; nla_put_failure: -- cgit v1.2.2 From f3756b79e8f76cb92830383c215deba146fe0a26 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sun, 1 Apr 2012 20:39:02 -0400 Subject: ipv4: Stop using NLA_PUT*(). These macros contain a hidden goto, and are thus extremely error prone and make code hard to audit. Signed-off-by: David S. Miller --- net/ipv4/devinet.c | 20 +++++++++----------- net/ipv4/fib_rules.c | 16 ++++++++-------- net/ipv4/fib_semantics.c | 47 ++++++++++++++++++++++++++--------------------- net/ipv4/ip_gre.c | 23 ++++++++++++----------- net/ipv4/ipmr.c | 9 +++++---- net/ipv4/route.c | 45 +++++++++++++++++++++++++++------------------ 6 files changed, 87 insertions(+), 73 deletions(-) (limited to 'net') diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index d4fad5c77447..3ffaad0ef98f 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c @@ -1267,17 +1267,15 @@ static int inet_fill_ifaddr(struct sk_buff *skb, struct in_ifaddr *ifa, ifm->ifa_scope = ifa->ifa_scope; ifm->ifa_index = ifa->ifa_dev->dev->ifindex; - if (ifa->ifa_address) - NLA_PUT_BE32(skb, IFA_ADDRESS, ifa->ifa_address); - - if (ifa->ifa_local) - NLA_PUT_BE32(skb, IFA_LOCAL, ifa->ifa_local); - - if (ifa->ifa_broadcast) - NLA_PUT_BE32(skb, IFA_BROADCAST, ifa->ifa_broadcast); - - if (ifa->ifa_label[0]) - NLA_PUT_STRING(skb, IFA_LABEL, ifa->ifa_label); + if ((ifa->ifa_address && + nla_put_be32(skb, IFA_ADDRESS, ifa->ifa_address)) || + (ifa->ifa_local && + nla_put_be32(skb, IFA_LOCAL, ifa->ifa_local)) || + (ifa->ifa_broadcast && + nla_put_be32(skb, IFA_BROADCAST, ifa->ifa_broadcast)) || + (ifa->ifa_label[0] && + nla_put_string(skb, IFA_LABEL, ifa->ifa_label))) + goto nla_put_failure; return nlmsg_end(skb, nlh); diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c index 799fc790b3cf..2d043f71ef70 100644 --- a/net/ipv4/fib_rules.c +++ b/net/ipv4/fib_rules.c @@ -221,15 +221,15 @@ static int fib4_rule_fill(struct fib_rule *rule, struct sk_buff *skb, frh->src_len = rule4->src_len; frh->tos = rule4->tos; - if (rule4->dst_len) - NLA_PUT_BE32(skb, FRA_DST, rule4->dst); - - if (rule4->src_len) - NLA_PUT_BE32(skb, FRA_SRC, rule4->src); - + if ((rule4->dst_len && + nla_put_be32(skb, FRA_DST, rule4->dst)) || + (rule4->src_len && + nla_put_be32(skb, FRA_SRC, rule4->src))) + goto nla_put_failure; #ifdef CONFIG_IP_ROUTE_CLASSID - if (rule4->tclassid) - NLA_PUT_U32(skb, FRA_FLOW, rule4->tclassid); + if (rule4->tclassid && + nla_put_u32(skb, FRA_FLOW, rule4->tclassid)) + goto nla_put_failure; #endif return 0; diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index a8c5c1d6715b..63aa48acc98a 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c @@ -932,33 +932,36 @@ int fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event, rtm->rtm_table = tb_id; else rtm->rtm_table = RT_TABLE_COMPAT; - NLA_PUT_U32(skb, RTA_TABLE, tb_id); + if (nla_put_u32(skb, RTA_TABLE, tb_id)) + goto nla_put_failure; rtm->rtm_type = type; rtm->rtm_flags = fi->fib_flags; rtm->rtm_scope = fi->fib_scope; rtm->rtm_protocol = fi->fib_protocol; - if (rtm->rtm_dst_len) - NLA_PUT_BE32(skb, RTA_DST, dst); - - if (fi->fib_priority) - NLA_PUT_U32(skb, RTA_PRIORITY, fi->fib_priority); - + if (rtm->rtm_dst_len && + nla_put_be32(skb, RTA_DST, dst)) + goto nla_put_failure; + if (fi->fib_priority && + nla_put_u32(skb, RTA_PRIORITY, fi->fib_priority)) + goto nla_put_failure; if (rtnetlink_put_metrics(skb, fi->fib_metrics) < 0) goto nla_put_failure; - if (fi->fib_prefsrc) - NLA_PUT_BE32(skb, RTA_PREFSRC, fi->fib_prefsrc); - + if (fi->fib_prefsrc && + nla_put_be32(skb, RTA_PREFSRC, fi->fib_prefsrc)) + goto nla_put_failure; if (fi->fib_nhs == 1) { - if (fi->fib_nh->nh_gw) - NLA_PUT_BE32(skb, RTA_GATEWAY, fi->fib_nh->nh_gw); - - if (fi->fib_nh->nh_oif) - NLA_PUT_U32(skb, RTA_OIF, fi->fib_nh->nh_oif); + if (fi->fib_nh->nh_gw && + nla_put_be32(skb, RTA_GATEWAY, fi->fib_nh->nh_gw)) + goto nla_put_failure; + if (fi->fib_nh->nh_oif && + nla_put_u32(skb, RTA_OIF, fi->fib_nh->nh_oif)) + goto nla_put_failure; #ifdef CONFIG_IP_ROUTE_CLASSID - if (fi->fib_nh[0].nh_tclassid) - NLA_PUT_U32(skb, RTA_FLOW, fi->fib_nh[0].nh_tclassid); + if (fi->fib_nh[0].nh_tclassid && + nla_put_u32(skb, RTA_FLOW, fi->fib_nh[0].nh_tclassid)) + goto nla_put_failure; #endif } #ifdef CONFIG_IP_ROUTE_MULTIPATH @@ -979,11 +982,13 @@ int fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event, rtnh->rtnh_hops = nh->nh_weight - 1; rtnh->rtnh_ifindex = nh->nh_oif; - if (nh->nh_gw) - NLA_PUT_BE32(skb, RTA_GATEWAY, nh->nh_gw); + if (nh->nh_gw && + nla_put_be32(skb, RTA_GATEWAY, nh->nh_gw)) + goto nla_put_failure; #ifdef CONFIG_IP_ROUTE_CLASSID - if (nh->nh_tclassid) - NLA_PUT_U32(skb, RTA_FLOW, nh->nh_tclassid); + if (nh->nh_tclassid && + nla_put_u32(skb, RTA_FLOW, nh->nh_tclassid)) + goto nla_put_failure; #endif /* length of rtnetlink header + attributes */ rtnh->rtnh_len = nlmsg_get_pos(skb) - (void *) rtnh; diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index b57532d4742c..02d07c6f630f 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -1654,17 +1654,18 @@ static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev) struct ip_tunnel *t = netdev_priv(dev); struct ip_tunnel_parm *p = &t->parms; - NLA_PUT_U32(skb, IFLA_GRE_LINK, p->link); - NLA_PUT_BE16(skb, IFLA_GRE_IFLAGS, p->i_flags); - NLA_PUT_BE16(skb, IFLA_GRE_OFLAGS, p->o_flags); - NLA_PUT_BE32(skb, IFLA_GRE_IKEY, p->i_key); - NLA_PUT_BE32(skb, IFLA_GRE_OKEY, p->o_key); - NLA_PUT_BE32(skb, IFLA_GRE_LOCAL, p->iph.saddr); - NLA_PUT_BE32(skb, IFLA_GRE_REMOTE, p->iph.daddr); - NLA_PUT_U8(skb, IFLA_GRE_TTL, p->iph.ttl); - NLA_PUT_U8(skb, IFLA_GRE_TOS, p->iph.tos); - NLA_PUT_U8(skb, IFLA_GRE_PMTUDISC, !!(p->iph.frag_off & htons(IP_DF))); - + if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) || + nla_put_be16(skb, IFLA_GRE_IFLAGS, p->i_flags) || + nla_put_be16(skb, IFLA_GRE_OFLAGS, p->o_flags) || + nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) || + nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) || + nla_put_be32(skb, IFLA_GRE_LOCAL, p->iph.saddr) || + nla_put_be32(skb, IFLA_GRE_REMOTE, p->iph.daddr) || + nla_put_u8(skb, IFLA_GRE_TTL, p->iph.ttl) || + nla_put_u8(skb, IFLA_GRE_TOS, p->iph.tos) || + nla_put_u8(skb, IFLA_GRE_PMTUDISC, + !!(p->iph.frag_off & htons(IP_DF)))) + goto nla_put_failure; return 0; nla_put_failure: diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 0518a4fb177b..dcf4d7fe3917 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -2120,15 +2120,16 @@ static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, rtm->rtm_src_len = 32; rtm->rtm_tos = 0; rtm->rtm_table = mrt->id; - NLA_PUT_U32(skb, RTA_TABLE, mrt->id); + if (nla_put_u32(skb, RTA_TABLE, mrt->id)) + goto nla_put_failure; rtm->rtm_type = RTN_MULTICAST; rtm->rtm_scope = RT_SCOPE_UNIVERSE; rtm->rtm_protocol = RTPROT_UNSPEC; rtm->rtm_flags = 0; - NLA_PUT_BE32(skb, RTA_SRC, c->mfc_origin); - NLA_PUT_BE32(skb, RTA_DST, c->mfc_mcastgrp); - + if (nla_put_be32(skb, RTA_SRC, c->mfc_origin) || + nla_put_be32(skb, RTA_DST, c->mfc_mcastgrp)) + goto nla_put_failure; if (__ipmr_fill_mroute(mrt, skb, c, rtm) < 0) goto nla_put_failure; diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 3b110a46362c..e5647b4e51c6 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -2973,7 +2973,8 @@ static int rt_fill_info(struct net *net, r->rtm_src_len = 0; r->rtm_tos = rt->rt_key_tos; r->rtm_table = RT_TABLE_MAIN; - NLA_PUT_U32(skb, RTA_TABLE, RT_TABLE_MAIN); + if (nla_put_u32(skb, RTA_TABLE, RT_TABLE_MAIN)) + goto nla_put_failure; r->rtm_type = rt->rt_type; r->rtm_scope = RT_SCOPE_UNIVERSE; r->rtm_protocol = RTPROT_UNSPEC; @@ -2981,31 +2982,38 @@ static int rt_fill_info(struct net *net, if (rt->rt_flags & RTCF_NOTIFY) r->rtm_flags |= RTM_F_NOTIFY; - NLA_PUT_BE32(skb, RTA_DST, rt->rt_dst); - + if (nla_put_be32(skb, RTA_DST, rt->rt_dst)) + goto nla_put_failure; if (rt->rt_key_src) { r->rtm_src_len = 32; - NLA_PUT_BE32(skb, RTA_SRC, rt->rt_key_src); + if (nla_put_be32(skb, RTA_SRC, rt->rt_key_src)) + goto nla_put_failure; } - if (rt->dst.dev) - NLA_PUT_U32(skb, RTA_OIF, rt->dst.dev->ifindex); + if (rt->dst.dev && + nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex)) + goto nla_put_failure; #ifdef CONFIG_IP_ROUTE_CLASSID - if (rt->dst.tclassid) - NLA_PUT_U32(skb, RTA_FLOW, rt->dst.tclassid); + if (rt->dst.tclassid && + nla_put_u32(skb, RTA_FLOW, rt->dst.tclassid)) + goto nla_put_failure; #endif - if (rt_is_input_route(rt)) - NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_spec_dst); - else if (rt->rt_src != rt->rt_key_src) - NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_src); - - if (rt->rt_dst != rt->rt_gateway) - NLA_PUT_BE32(skb, RTA_GATEWAY, rt->rt_gateway); + if (rt_is_input_route(rt)) { + if (nla_put_be32(skb, RTA_PREFSRC, rt->rt_spec_dst)) + goto nla_put_failure; + } else if (rt->rt_src != rt->rt_key_src) { + if (nla_put_be32(skb, RTA_PREFSRC, rt->rt_src)) + goto nla_put_failure; + } + if (rt->rt_dst != rt->rt_gateway && + nla_put_be32(skb, RTA_GATEWAY, rt->rt_gateway)) + goto nla_put_failure; if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0) goto nla_put_failure; - if (rt->rt_mark) - NLA_PUT_BE32(skb, RTA_MARK, rt->rt_mark); + if (rt->rt_mark && + nla_put_be32(skb, RTA_MARK, rt->rt_mark)) + goto nla_put_failure; error = rt->dst.error; if (peer) { @@ -3046,7 +3054,8 @@ static int rt_fill_info(struct net *net, } } else #endif - NLA_PUT_U32(skb, RTA_IIF, rt->rt_iif); + if (nla_put_u32(skb, RTA_IIF, rt->rt_iif)) + goto nla_put_failure; } if (rtnl_put_cacheinfo(skb, &rt->dst, id, ts, tsage, -- cgit v1.2.2 From d317e4f68f5445e536dbd45381e7c900a33413db Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sun, 1 Apr 2012 20:40:20 -0400 Subject: netfilter: ipv4: Stop using NLA_PUT*(). These macros contain a hidden goto, and are thus extremely error prone and make code hard to audit. Signed-off-by: David S. Miller --- net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c | 5 +++-- net/ipv4/netfilter/nf_conntrack_proto_icmp.c | 12 ++++++------ 2 files changed, 9 insertions(+), 8 deletions(-) (limited to 'net') diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c index de9da21113a1..5c45e30cf788 100644 --- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c +++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c @@ -303,8 +303,9 @@ getorigdst(struct sock *sk, int optval, void __user *user, int *len) static int ipv4_tuple_to_nlattr(struct sk_buff *skb, const struct nf_conntrack_tuple *tuple) { - NLA_PUT_BE32(skb, CTA_IP_V4_SRC, tuple->src.u3.ip); - NLA_PUT_BE32(skb, CTA_IP_V4_DST, tuple->dst.u3.ip); + if (nla_put_be32(skb, CTA_IP_V4_SRC, tuple->src.u3.ip) || + nla_put_be32(skb, CTA_IP_V4_DST, tuple->dst.u3.ip)) + goto nla_put_failure; return 0; nla_put_failure: diff --git a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c index 7cbe9cb261c2..0847e373d33c 100644 --- a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c +++ b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c @@ -228,10 +228,10 @@ icmp_error(struct net *net, struct nf_conn *tmpl, static int icmp_tuple_to_nlattr(struct sk_buff *skb, const struct nf_conntrack_tuple *t) { - NLA_PUT_BE16(skb, CTA_PROTO_ICMP_ID, t->src.u.icmp.id); - NLA_PUT_U8(skb, CTA_PROTO_ICMP_TYPE, t->dst.u.icmp.type); - NLA_PUT_U8(skb, CTA_PROTO_ICMP_CODE, t->dst.u.icmp.code); - + if (nla_put_be16(skb, CTA_PROTO_ICMP_ID, t->src.u.icmp.id) || + nla_put_u8(skb, CTA_PROTO_ICMP_TYPE, t->dst.u.icmp.type) || + nla_put_u8(skb, CTA_PROTO_ICMP_CODE, t->dst.u.icmp.code)) + goto nla_put_failure; return 0; nla_put_failure: @@ -293,8 +293,8 @@ icmp_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data) { const unsigned int *timeout = data; - NLA_PUT_BE32(skb, CTA_TIMEOUT_ICMP_TIMEOUT, htonl(*timeout / HZ)); - + if (nla_put_be32(skb, CTA_TIMEOUT_ICMP_TIMEOUT, htonl(*timeout / HZ))) + goto nla_put_failure; return 0; nla_put_failure: -- cgit v1.2.2 From be51da0f3e346eb520c4ffdaecb8ba6fb4337a76 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sun, 1 Apr 2012 20:45:25 -0400 Subject: ieee802154: Stop using NLA_PUT*(). These macros contain a hidden goto, and are thus extremely error prone and make code hard to audit. Signed-off-by: David S. Miller --- net/ieee802154/nl-mac.c | 146 +++++++++++++++++++++++------------------------- net/ieee802154/nl-phy.c | 29 +++++----- 2 files changed, 85 insertions(+), 90 deletions(-) (limited to 'net') diff --git a/net/ieee802154/nl-mac.c b/net/ieee802154/nl-mac.c index adaf46214905..ca92587720f4 100644 --- a/net/ieee802154/nl-mac.c +++ b/net/ieee802154/nl-mac.c @@ -63,15 +63,14 @@ int ieee802154_nl_assoc_indic(struct net_device *dev, if (!msg) return -ENOBUFS; - NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name); - NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex); - NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN, - dev->dev_addr); - - NLA_PUT(msg, IEEE802154_ATTR_SRC_HW_ADDR, IEEE802154_ADDR_LEN, - addr->hwaddr); - - NLA_PUT_U8(msg, IEEE802154_ATTR_CAPABILITY, cap); + if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) || + nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) || + nla_put(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN, + dev->dev_addr) || + nla_put(msg, IEEE802154_ATTR_SRC_HW_ADDR, IEEE802154_ADDR_LEN, + addr->hwaddr) || + nla_put_u8(msg, IEEE802154_ATTR_CAPABILITY, cap)) + goto nla_put_failure; return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id); @@ -92,14 +91,13 @@ int ieee802154_nl_assoc_confirm(struct net_device *dev, u16 short_addr, if (!msg) return -ENOBUFS; - NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name); - NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex); - NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN, - dev->dev_addr); - - NLA_PUT_U16(msg, IEEE802154_ATTR_SHORT_ADDR, short_addr); - NLA_PUT_U8(msg, IEEE802154_ATTR_STATUS, status); - + if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) || + nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) || + nla_put(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN, + dev->dev_addr) || + nla_put_u16(msg, IEEE802154_ATTR_SHORT_ADDR, short_addr) || + nla_put_u8(msg, IEEE802154_ATTR_STATUS, status)) + goto nla_put_failure; return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id); nla_put_failure: @@ -119,20 +117,22 @@ int ieee802154_nl_disassoc_indic(struct net_device *dev, if (!msg) return -ENOBUFS; - NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name); - NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex); - NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN, - dev->dev_addr); - - if (addr->addr_type == IEEE802154_ADDR_LONG) - NLA_PUT(msg, IEEE802154_ATTR_SRC_HW_ADDR, IEEE802154_ADDR_LEN, - addr->hwaddr); - else - NLA_PUT_U16(msg, IEEE802154_ATTR_SRC_SHORT_ADDR, - addr->short_addr); - - NLA_PUT_U8(msg, IEEE802154_ATTR_REASON, reason); - + if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) || + nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) || + nla_put(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN, + dev->dev_addr)) + goto nla_put_failure; + if (addr->addr_type == IEEE802154_ADDR_LONG) { + if (nla_put(msg, IEEE802154_ATTR_SRC_HW_ADDR, IEEE802154_ADDR_LEN, + addr->hwaddr)) + goto nla_put_failure; + } else { + if (nla_put_u16(msg, IEEE802154_ATTR_SRC_SHORT_ADDR, + addr->short_addr)) + goto nla_put_failure; + } + if (nla_put_u8(msg, IEEE802154_ATTR_REASON, reason)) + goto nla_put_failure; return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id); nla_put_failure: @@ -151,13 +151,12 @@ int ieee802154_nl_disassoc_confirm(struct net_device *dev, u8 status) if (!msg) return -ENOBUFS; - NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name); - NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex); - NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN, - dev->dev_addr); - - NLA_PUT_U8(msg, IEEE802154_ATTR_STATUS, status); - + if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) || + nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) || + nla_put(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN, + dev->dev_addr) || + nla_put_u8(msg, IEEE802154_ATTR_STATUS, status)) + goto nla_put_failure; return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id); nla_put_failure: @@ -177,13 +176,13 @@ int ieee802154_nl_beacon_indic(struct net_device *dev, if (!msg) return -ENOBUFS; - NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name); - NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex); - NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN, - dev->dev_addr); - NLA_PUT_U16(msg, IEEE802154_ATTR_COORD_SHORT_ADDR, coord_addr); - NLA_PUT_U16(msg, IEEE802154_ATTR_COORD_PAN_ID, panid); - + if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) || + nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) || + nla_put(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN, + dev->dev_addr) || + nla_put_u16(msg, IEEE802154_ATTR_COORD_SHORT_ADDR, coord_addr) || + nla_put_u16(msg, IEEE802154_ATTR_COORD_PAN_ID, panid)) + goto nla_put_failure; return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id); nla_put_failure: @@ -204,19 +203,17 @@ int ieee802154_nl_scan_confirm(struct net_device *dev, if (!msg) return -ENOBUFS; - NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name); - NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex); - NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN, - dev->dev_addr); - - NLA_PUT_U8(msg, IEEE802154_ATTR_STATUS, status); - NLA_PUT_U8(msg, IEEE802154_ATTR_SCAN_TYPE, scan_type); - NLA_PUT_U32(msg, IEEE802154_ATTR_CHANNELS, unscanned); - NLA_PUT_U8(msg, IEEE802154_ATTR_PAGE, page); - - if (edl) - NLA_PUT(msg, IEEE802154_ATTR_ED_LIST, 27, edl); - + if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) || + nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) || + nla_put(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN, + dev->dev_addr) || + nla_put_u8(msg, IEEE802154_ATTR_STATUS, status) || + nla_put_u8(msg, IEEE802154_ATTR_SCAN_TYPE, scan_type) || + nla_put_u32(msg, IEEE802154_ATTR_CHANNELS, unscanned) || + nla_put_u8(msg, IEEE802154_ATTR_PAGE, page) || + (edl && + nla_put(msg, IEEE802154_ATTR_ED_LIST, 27, edl))) + goto nla_put_failure; return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id); nla_put_failure: @@ -235,13 +232,12 @@ int ieee802154_nl_start_confirm(struct net_device *dev, u8 status) if (!msg) return -ENOBUFS; - NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name); - NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex); - NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN, - dev->dev_addr); - - NLA_PUT_U8(msg, IEEE802154_ATTR_STATUS, status); - + if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) || + nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) || + nla_put(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN, + dev->dev_addr) || + nla_put_u8(msg, IEEE802154_ATTR_STATUS, status)) + goto nla_put_failure; return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id); nla_put_failure: @@ -266,16 +262,16 @@ static int ieee802154_nl_fill_iface(struct sk_buff *msg, u32 pid, phy = ieee802154_mlme_ops(dev)->get_phy(dev); BUG_ON(!phy); - NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name); - NLA_PUT_STRING(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)); - NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex); - - NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN, - dev->dev_addr); - NLA_PUT_U16(msg, IEEE802154_ATTR_SHORT_ADDR, - ieee802154_mlme_ops(dev)->get_short_addr(dev)); - NLA_PUT_U16(msg, IEEE802154_ATTR_PAN_ID, - ieee802154_mlme_ops(dev)->get_pan_id(dev)); + if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) || + nla_put_string(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)) || + nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) || + nla_put(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN, + dev->dev_addr) || + nla_put_u16(msg, IEEE802154_ATTR_SHORT_ADDR, + ieee802154_mlme_ops(dev)->get_short_addr(dev)) || + nla_put_u16(msg, IEEE802154_ATTR_PAN_ID, + ieee802154_mlme_ops(dev)->get_pan_id(dev))) + goto nla_put_failure; wpan_phy_put(phy); return genlmsg_end(msg, hdr); diff --git a/net/ieee802154/nl-phy.c b/net/ieee802154/nl-phy.c index c64a38d57aa3..3bdc4303c339 100644 --- a/net/ieee802154/nl-phy.c +++ b/net/ieee802154/nl-phy.c @@ -53,18 +53,18 @@ static int ieee802154_nl_fill_phy(struct sk_buff *msg, u32 pid, goto out; mutex_lock(&phy->pib_lock); - NLA_PUT_STRING(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)); - - NLA_PUT_U8(msg, IEEE802154_ATTR_PAGE, phy->current_page); - NLA_PUT_U8(msg, IEEE802154_ATTR_CHANNEL, phy->current_channel); + if (nla_put_string(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)) || + nla_put_u8(msg, IEEE802154_ATTR_PAGE, phy->current_page) || + nla_put_u8(msg, IEEE802154_ATTR_CHANNEL, phy->current_channel)) + goto nla_put_failure; for (i = 0; i < 32; i++) { if (phy->channels_supported[i]) buf[pages++] = phy->channels_supported[i] | (i << 27); } - if (pages) - NLA_PUT(msg, IEEE802154_ATTR_CHANNEL_PAGE_LIST, - pages * sizeof(uint32_t), buf); - + if (pages && + nla_put(msg, IEEE802154_ATTR_CHANNEL_PAGE_LIST, + pages * sizeof(uint32_t), buf)) + goto nla_put_failure; mutex_unlock(&phy->pib_lock); kfree(buf); return genlmsg_end(msg, hdr); @@ -245,9 +245,9 @@ static int ieee802154_add_iface(struct sk_buff *skb, goto dev_unregister; } - NLA_PUT_STRING(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)); - NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name); - + if (nla_put_string(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)) || + nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name)) + goto nla_put_failure; dev_put(dev); wpan_phy_put(phy); @@ -333,10 +333,9 @@ static int ieee802154_del_iface(struct sk_buff *skb, rtnl_unlock(); - - NLA_PUT_STRING(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)); - NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, name); - + if (nla_put_string(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)) || + nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, name)) + goto nla_put_failure; wpan_phy_put(phy); return ieee802154_nl_reply(msg, info); -- cgit v1.2.2 From 0e3cea7b3cdb2344919e96b3af33de64736ae3a3 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sun, 1 Apr 2012 20:47:01 -0400 Subject: fib_rules: Stop using NLA_PUT*(). These macros contain a hidden goto, and are thus extremely error prone and make code hard to audit. Signed-off-by: David S. Miller --- net/core/fib_rules.c | 32 +++++++++++++++----------------- 1 file changed, 15 insertions(+), 17 deletions(-) (limited to 'net') diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c index c02e63c908da..72cceb79d0d4 100644 --- a/net/core/fib_rules.c +++ b/net/core/fib_rules.c @@ -542,7 +542,8 @@ static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule, frh = nlmsg_data(nlh); frh->family = ops->family; frh->table = rule->table; - NLA_PUT_U32(skb, FRA_TABLE, rule->table); + if (nla_put_u32(skb, FRA_TABLE, rule->table)) + goto nla_put_failure; frh->res1 = 0; frh->res2 = 0; frh->action = rule->action; @@ -553,31 +554,28 @@ static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule, frh->flags |= FIB_RULE_UNRESOLVED; if (rule->iifname[0]) { - NLA_PUT_STRING(skb, FRA_IIFNAME, rule->iifname); - + if (nla_put_string(skb, FRA_IIFNAME, rule->iifname)) + goto nla_put_failure; if (rule->iifindex == -1) frh->flags |= FIB_RULE_IIF_DETACHED; } if (rule->oifname[0]) { - NLA_PUT_STRING(skb, FRA_OIFNAME, rule->oifname); - + if (nla_put_string(skb, FRA_OIFNAME, rule->oifname)) + goto nla_put_failure; if (rule->oifindex == -1) frh->flags |= FIB_RULE_OIF_DETACHED; } - if (rule->pref) - NLA_PUT_U32(skb, FRA_PRIORITY, rule->pref); - - if (rule->mark) - NLA_PUT_U32(skb, FRA_FWMARK, rule->mark); - - if (rule->mark_mask || rule->mark) - NLA_PUT_U32(skb, FRA_FWMASK, rule->mark_mask); - - if (rule->target) - NLA_PUT_U32(skb, FRA_GOTO, rule->target); - + if ((rule->pref && + nla_put_u32(skb, FRA_PRIORITY, rule->pref)) || + (rule->mark && + nla_put_u32(skb, FRA_FWMARK, rule->mark)) || + ((rule->mark_mask || rule->mark) && + nla_put_u32(skb, FRA_FWMASK, rule->mark_mask)) || + (rule->target && + nla_put_u32(skb, FRA_GOTO, rule->target))) + goto nla_put_failure; if (ops->fill(rule, skb, frh) < 0) goto nla_put_failure; -- cgit v1.2.2 From 14ad6647f3c919b4ee987e2058fbc6c10f29bb53 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sun, 1 Apr 2012 20:47:35 -0400 Subject: gen_stats: Stop using NLA_PUT*(). These macros contain a hidden goto, and are thus extremely error prone and make code hard to audit. Signed-off-by: David S. Miller --- net/core/gen_stats.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'net') diff --git a/net/core/gen_stats.c b/net/core/gen_stats.c index 0452eb27a272..ddedf211e588 100644 --- a/net/core/gen_stats.c +++ b/net/core/gen_stats.c @@ -27,7 +27,8 @@ static inline int gnet_stats_copy(struct gnet_dump *d, int type, void *buf, int size) { - NLA_PUT(d->skb, type, size, buf); + if (nla_put(d->skb, type, size, buf)) + goto nla_put_failure; return 0; nla_put_failure: -- cgit v1.2.2 From 2809cec5b52b2d02fa0d7dab6e146abfc93c5b86 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sun, 1 Apr 2012 20:48:13 -0400 Subject: caif: Stop using NLA_PUT*(). These macros contain a hidden goto, and are thus extremely error prone and make code hard to audit. Signed-off-by: David S. Miller --- net/caif/chnl_net.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) (limited to 'net') diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c index 20618dd3088b..93e9c6dc9ddf 100644 --- a/net/caif/chnl_net.c +++ b/net/caif/chnl_net.c @@ -421,14 +421,14 @@ static int ipcaif_fill_info(struct sk_buff *skb, const struct net_device *dev) struct chnl_net *priv; u8 loop; priv = netdev_priv(dev); - NLA_PUT_U32(skb, IFLA_CAIF_IPV4_CONNID, - priv->conn_req.sockaddr.u.dgm.connection_id); - NLA_PUT_U32(skb, IFLA_CAIF_IPV6_CONNID, - priv->conn_req.sockaddr.u.dgm.connection_id); + if (nla_put_u32(skb, IFLA_CAIF_IPV4_CONNID, + priv->conn_req.sockaddr.u.dgm.connection_id) || + nla_put_u32(skb, IFLA_CAIF_IPV6_CONNID, + priv->conn_req.sockaddr.u.dgm.connection_id)) + goto nla_put_failure; loop = priv->conn_req.protocol == CAIFPROTO_DATAGRAM_LOOP; - NLA_PUT_U8(skb, IFLA_CAIF_LOOPBACK, loop); - - + if (nla_put_u8(skb, IFLA_CAIF_LOOPBACK, loop)) + goto nla_put_failure; return 0; nla_put_failure: return -EMSGSIZE; -- cgit v1.2.2 From 2eb812e6501597e8b4bc4fdef2db8158c1a3afb6 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sun, 1 Apr 2012 20:49:54 -0400 Subject: bridge: Stop using NLA_PUT*(). These macros contain a hidden goto, and are thus extremely error prone and make code hard to audit. Signed-off-by: David S. Miller --- net/bridge/br_fdb.c | 8 ++++---- net/bridge/br_netlink.c | 25 +++++++++++-------------- 2 files changed, 15 insertions(+), 18 deletions(-) (limited to 'net') diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c index 5ba0c844d508..80dbce4974ce 100644 --- a/net/bridge/br_fdb.c +++ b/net/bridge/br_fdb.c @@ -487,14 +487,14 @@ static int fdb_fill_info(struct sk_buff *skb, const struct net_bridge *br, ndm->ndm_ifindex = fdb->dst ? fdb->dst->dev->ifindex : br->dev->ifindex; ndm->ndm_state = fdb_to_nud(fdb); - NLA_PUT(skb, NDA_LLADDR, ETH_ALEN, &fdb->addr); - + if (nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->addr)) + goto nla_put_failure; ci.ndm_used = jiffies_to_clock_t(now - fdb->used); ci.ndm_confirmed = 0; ci.ndm_updated = jiffies_to_clock_t(now - fdb->updated); ci.ndm_refcnt = 0; - NLA_PUT(skb, NDA_CACHEINFO, sizeof(ci), &ci); - + if (nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci)) + goto nla_put_failure; return nlmsg_end(skb, nlh); nla_put_failure: diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c index a1daf8227ed1..346b368d8698 100644 --- a/net/bridge/br_netlink.c +++ b/net/bridge/br_netlink.c @@ -60,20 +60,17 @@ static int br_fill_ifinfo(struct sk_buff *skb, const struct net_bridge_port *por hdr->ifi_flags = dev_get_flags(dev); hdr->ifi_change = 0; - NLA_PUT_STRING(skb, IFLA_IFNAME, dev->name); - NLA_PUT_U32(skb, IFLA_MASTER, br->dev->ifindex); - NLA_PUT_U32(skb, IFLA_MTU, dev->mtu); - NLA_PUT_U8(skb, IFLA_OPERSTATE, operstate); - - if (dev->addr_len) - NLA_PUT(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr); - - if (dev->ifindex != dev->iflink) - NLA_PUT_U32(skb, IFLA_LINK, dev->iflink); - - if (event == RTM_NEWLINK) - NLA_PUT_U8(skb, IFLA_PROTINFO, port->state); - + if (nla_put_string(skb, IFLA_IFNAME, dev->name) || + nla_put_u32(skb, IFLA_MASTER, br->dev->ifindex) || + nla_put_u32(skb, IFLA_MTU, dev->mtu) || + nla_put_u8(skb, IFLA_OPERSTATE, operstate) || + (dev->addr_len && + nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) || + (dev->ifindex != dev->iflink && + nla_put_u32(skb, IFLA_LINK, dev->iflink)) || + (event == RTM_NEWLINK && + nla_put_u8(skb, IFLA_PROTINFO, port->state))) + goto nla_put_failure; return nlmsg_end(skb, nlh); nla_put_failure: -- cgit v1.2.2 From 61849dda1dd4ee286521fdfde388dcfc1d891bad Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sun, 1 Apr 2012 20:50:45 -0400 Subject: vlan: Stop using NLA_PUT*(). These macros contain a hidden goto, and are thus extremely error prone and make code hard to audit. Signed-off-by: David S. Miller --- net/8021q/vlan_netlink.c | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) (limited to 'net') diff --git a/net/8021q/vlan_netlink.c b/net/8021q/vlan_netlink.c index 50711368ad6a..708c80ea1874 100644 --- a/net/8021q/vlan_netlink.c +++ b/net/8021q/vlan_netlink.c @@ -166,11 +166,13 @@ static int vlan_fill_info(struct sk_buff *skb, const struct net_device *dev) struct nlattr *nest; unsigned int i; - NLA_PUT_U16(skb, IFLA_VLAN_ID, vlan_dev_priv(dev)->vlan_id); + if (nla_put_u16(skb, IFLA_VLAN_ID, vlan_dev_priv(dev)->vlan_id)) + goto nla_put_failure; if (vlan->flags) { f.flags = vlan->flags; f.mask = ~0; - NLA_PUT(skb, IFLA_VLAN_FLAGS, sizeof(f), &f); + if (nla_put(skb, IFLA_VLAN_FLAGS, sizeof(f), &f)) + goto nla_put_failure; } if (vlan->nr_ingress_mappings) { nest = nla_nest_start(skb, IFLA_VLAN_INGRESS_QOS); @@ -183,8 +185,9 @@ static int vlan_fill_info(struct sk_buff *skb, const struct net_device *dev) m.from = i; m.to = vlan->ingress_priority_map[i]; - NLA_PUT(skb, IFLA_VLAN_QOS_MAPPING, - sizeof(m), &m); + if (nla_put(skb, IFLA_VLAN_QOS_MAPPING, + sizeof(m), &m)) + goto nla_put_failure; } nla_nest_end(skb, nest); } @@ -202,8 +205,9 @@ static int vlan_fill_info(struct sk_buff *skb, const struct net_device *dev) m.from = pm->priority; m.to = (pm->vlan_qos >> 13) & 0x7; - NLA_PUT(skb, IFLA_VLAN_QOS_MAPPING, - sizeof(m), &m); + if (nla_put(skb, IFLA_VLAN_QOS_MAPPING, + sizeof(m), &m)) + goto nla_put_failure; } } nla_nest_end(skb, nest); -- cgit v1.2.2 From 3e866703cbf8a832cd857426c5a9989b16037138 Mon Sep 17 00:00:00 2001 From: Eldad Zack Date: Sun, 1 Apr 2012 07:49:01 +0000 Subject: net/ipv6/sit.c: Checkpatch cleanup sit.c:118: ERROR: "foo * bar" should be "foo *bar" sit.c:694: ERROR: "(foo*)" should be "(foo *)" sit.c:724: ERROR: "(foo*)" should be "(foo *)" Signed-off-by: Eldad Zack Signed-off-by: David S. Miller --- net/ipv6/sit.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'net') diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index c4ffd1743528..f9608db9dcfb 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c @@ -115,7 +115,7 @@ static struct net_device_stats *ipip6_get_stats(struct net_device *dev) /* * Must be invoked with rcu_read_lock */ -static struct ip_tunnel * ipip6_tunnel_lookup(struct net *net, +static struct ip_tunnel *ipip6_tunnel_lookup(struct net *net, struct net_device *dev, __be32 remote, __be32 local) { unsigned int h0 = HASH(remote); @@ -691,7 +691,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb, goto tx_error; } - addr6 = (const struct in6_addr*)&neigh->primary_key; + addr6 = (const struct in6_addr *)&neigh->primary_key; addr_type = ipv6_addr_type(addr6); if ((addr_type & IPV6_ADDR_UNICAST) && @@ -721,7 +721,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb, goto tx_error; } - addr6 = (const struct in6_addr*)&neigh->primary_key; + addr6 = (const struct in6_addr *)&neigh->primary_key; addr_type = ipv6_addr_type(addr6); if (addr_type == IPV6_ADDR_ANY) { -- cgit v1.2.2 From d94d34a0664b296fd1593a96ccc9c97a94dfd43d Mon Sep 17 00:00:00 2001 From: Eldad Zack Date: Sun, 1 Apr 2012 07:49:02 +0000 Subject: net/ipv6/addrconf_core.c: Checkpatch cleanup addrconf_core.c:13: ERROR: space required before the open parenthesis '(' Signed-off-by: Eldad Zack Signed-off-by: David S. Miller --- net/ipv6/addrconf_core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/ipv6/addrconf_core.c b/net/ipv6/addrconf_core.c index 399287e595d7..7981bde57575 100644 --- a/net/ipv6/addrconf_core.c +++ b/net/ipv6/addrconf_core.c @@ -10,7 +10,7 @@ static inline unsigned ipv6_addr_scope2type(unsigned scope) { - switch(scope) { + switch (scope) { case IPV6_ADDR_SCOPE_NODELOCAL: return (IPV6_ADDR_SCOPE_TYPE(IPV6_ADDR_SCOPE_NODELOCAL) | IPV6_ADDR_LOOPBACK); -- cgit v1.2.2 From b5a4257cef5e8d6efb2ac9322b00377279ffd234 Mon Sep 17 00:00:00 2001 From: Eldad Zack Date: Sun, 1 Apr 2012 07:49:03 +0000 Subject: net/ipv6/datagram.c: Checkpatch cleanups datagram.c:101: ERROR: "(foo*)" should be "(foo *)" datagram.c:521: ERROR: space required before the open parenthesis '(' datagram.c:830: WARNING: braces {} are not necessary for single statement blocks datagram.c:849: WARNING: braces {} are not necessary for single statement blocks Signed-off-by: Eldad Zack Signed-off-by: David S. Miller --- net/ipv6/datagram.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) (limited to 'net') diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index 76832c8dc89d..f6210d6fd7d8 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c @@ -98,7 +98,7 @@ int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) sin.sin_port = usin->sin6_port; err = ip4_datagram_connect(sk, - (struct sockaddr*) &sin, + (struct sockaddr *) &sin, sizeof(sin)); ipv4_connected: @@ -518,7 +518,7 @@ int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb) unsigned len; u8 *ptr = nh + off; - switch(nexthdr) { + switch (nexthdr) { case IPPROTO_DSTOPTS: nexthdr = ptr[0]; len = (ptr[1] + 1) << 3; @@ -827,9 +827,8 @@ int datagram_send_ctl(struct net *net, struct sock *sk, int tc; err = -EINVAL; - if (cmsg->cmsg_len != CMSG_LEN(sizeof(int))) { + if (cmsg->cmsg_len != CMSG_LEN(sizeof(int))) goto exit_f; - } tc = *(int *)CMSG_DATA(cmsg); if (tc < -1 || tc > 0xff) @@ -846,9 +845,8 @@ int datagram_send_ctl(struct net *net, struct sock *sk, int df; err = -EINVAL; - if (cmsg->cmsg_len != CMSG_LEN(sizeof(int))) { + if (cmsg->cmsg_len != CMSG_LEN(sizeof(int))) goto exit_f; - } df = *(int *)CMSG_DATA(cmsg); if (df < 0 || df > 1) -- cgit v1.2.2 From ac3c8172ff5efa8078e5380677dff0badf70b729 Mon Sep 17 00:00:00 2001 From: Eldad Zack Date: Sun, 1 Apr 2012 07:49:04 +0000 Subject: net/ipv6/exthdrs.c: Checkpatch cleanups exthdrs.c:726: WARNING: EXPORT_SYMBOL(foo); should immediately follow its function/variable exthdrs.c:741: ERROR: "(foo*)" should be "(foo *)" exthdrs.c:741: ERROR: "(foo*)" should be "(foo *)" exthdrs.c:744: ERROR: "(foo**)" should be "(foo **)" exthdrs.c:746: ERROR: "(foo**)" should be "(foo **)" exthdrs.c:748: ERROR: "(foo**)" should be "(foo **)" exthdrs.c:750: ERROR: "(foo**)" should be "(foo **)" exthdrs.c:755: WARNING: EXPORT_SYMBOL(foo); should immediately follow its function/variable exthdrs.c:896: WARNING: EXPORT_SYMBOL(foo); should immediately follow its function/variable Signed-off-by: Eldad Zack Signed-off-by: David S. Miller --- net/ipv6/exthdrs.c | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) (limited to 'net') diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c index 3d641b6e9b09..c486b8e1817f 100644 --- a/net/ipv6/exthdrs.c +++ b/net/ipv6/exthdrs.c @@ -722,7 +722,6 @@ void ipv6_push_nfrag_opts(struct sk_buff *skb, struct ipv6_txoptions *opt, if (opt->hopopt) ipv6_push_exthdr(skb, proto, NEXTHDR_HOP, opt->hopopt); } - EXPORT_SYMBOL(ipv6_push_nfrag_opts); void ipv6_push_frag_opts(struct sk_buff *skb, struct ipv6_txoptions *opt, u8 *proto) @@ -738,20 +737,19 @@ ipv6_dup_options(struct sock *sk, struct ipv6_txoptions *opt) opt2 = sock_kmalloc(sk, opt->tot_len, GFP_ATOMIC); if (opt2) { - long dif = (char*)opt2 - (char*)opt; + long dif = (char *)opt2 - (char *)opt; memcpy(opt2, opt, opt->tot_len); if (opt2->hopopt) - *((char**)&opt2->hopopt) += dif; + *((char **)&opt2->hopopt) += dif; if (opt2->dst0opt) - *((char**)&opt2->dst0opt) += dif; + *((char **)&opt2->dst0opt) += dif; if (opt2->dst1opt) - *((char**)&opt2->dst1opt) += dif; + *((char **)&opt2->dst1opt) += dif; if (opt2->srcrt) - *((char**)&opt2->srcrt) += dif; + *((char **)&opt2->srcrt) += dif; } return opt2; } - EXPORT_SYMBOL_GPL(ipv6_dup_options); static int ipv6_renew_option(void *ohdr, @@ -892,5 +890,4 @@ struct in6_addr *fl6_update_dst(struct flowi6 *fl6, fl6->daddr = *((struct rt0_hdr *)opt->srcrt)->addr; return orig; } - EXPORT_SYMBOL_GPL(fl6_update_dst); -- cgit v1.2.2 From 81bd60b515e5e71ecdb5176be51361dfd6830b00 Mon Sep 17 00:00:00 2001 From: Eldad Zack Date: Sun, 1 Apr 2012 07:49:05 +0000 Subject: net/ipv6/exthdrs_core.c: Checkpatch cleanups exthdrs_core.c:113: WARNING: EXPORT_SYMBOL(foo); should immediately follow its function/variable exthdrs_core.c:114: WARNING: EXPORT_SYMBOL(foo); should immediately follow its function/variable Signed-off-by: Eldad Zack Signed-off-by: David S. Miller --- net/ipv6/exthdrs_core.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'net') diff --git a/net/ipv6/exthdrs_core.c b/net/ipv6/exthdrs_core.c index 72957f4a7c6c..7b1a884634d5 100644 --- a/net/ipv6/exthdrs_core.c +++ b/net/ipv6/exthdrs_core.c @@ -21,6 +21,7 @@ int ipv6_ext_hdr(u8 nexthdr) (nexthdr == NEXTHDR_NONE) || (nexthdr == NEXTHDR_DEST); } +EXPORT_SYMBOL(ipv6_ext_hdr); /* * Skip any extension headers. This is used by the ICMP module. @@ -109,6 +110,4 @@ int ipv6_skip_exthdr(const struct sk_buff *skb, int start, u8 *nexthdrp, *nexthdrp = nexthdr; return start; } - -EXPORT_SYMBOL(ipv6_ext_hdr); EXPORT_SYMBOL(ipv6_skip_exthdr); -- cgit v1.2.2 From 911c8541efc78014b29d44510be4d6d4c9d539da Mon Sep 17 00:00:00 2001 From: Eldad Zack Date: Sun, 1 Apr 2012 07:49:06 +0000 Subject: net/ipv6/fib6_rules.c: Checkpatch cleanup fib6_rules.c:26: ERROR: open brace '{' following struct go on the same line Signed-off-by: Eldad Zack Signed-off-by: David S. Miller --- net/ipv6/fib6_rules.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'net') diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c index 01a2de1f5e87..0ff1cfd55bc4 100644 --- a/net/ipv6/fib6_rules.c +++ b/net/ipv6/fib6_rules.c @@ -22,8 +22,7 @@ #include #include -struct fib6_rule -{ +struct fib6_rule { struct fib_rule common; struct rt6key src; struct rt6key dst; -- cgit v1.2.2 From a2d91a09dac71bf34a9a3e4b8fdda36f02788b52 Mon Sep 17 00:00:00 2001 From: Eldad Zack Date: Sun, 1 Apr 2012 07:49:07 +0000 Subject: net/ipv6/icmp.c: Checkpatch cleanups icmp.c:501: ERROR: "(foo*)" should be "(foo *)" icmp.c:582: ERROR: "(foo*)" should be "(foo *)" icmp.c:954: WARNING: EXPORT_SYMBOL(foo); should immediately follow its function/variable Signed-off-by: Eldad Zack Signed-off-by: David S. Miller --- net/ipv6/icmp.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'net') diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index af88934e4d79..6dd59f160da4 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c @@ -499,7 +499,7 @@ void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info) err = ip6_append_data(sk, icmpv6_getfrag, &msg, len + sizeof(struct icmp6hdr), sizeof(struct icmp6hdr), hlimit, - np->tclass, NULL, &fl6, (struct rt6_info*)dst, + np->tclass, NULL, &fl6, (struct rt6_info *)dst, MSG_DONTWAIT, np->dontfrag); if (err) { ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTERRORS); @@ -580,7 +580,7 @@ static void icmpv6_echo_reply(struct sk_buff *skb) err = ip6_append_data(sk, icmpv6_getfrag, &msg, skb->len + sizeof(struct icmp6hdr), sizeof(struct icmp6hdr), hlimit, np->tclass, NULL, &fl6, - (struct rt6_info*)dst, MSG_DONTWAIT, + (struct rt6_info *)dst, MSG_DONTWAIT, np->dontfrag); if (err) { @@ -951,7 +951,6 @@ int icmpv6_err_convert(u8 type, u8 code, int *err) return fatal; } - EXPORT_SYMBOL(icmpv6_err_convert); #ifdef CONFIG_SYSCTL -- cgit v1.2.2 From 8e5e8f30d0dea1db2c8e2a78802e7e57f796f339 Mon Sep 17 00:00:00 2001 From: Eldad Zack Date: Sun, 1 Apr 2012 07:49:08 +0000 Subject: net/ipv6/addrconf.c: Checkpatch cleanups net/ipv6/addrconf.c:340: WARNING: EXPORT_SYMBOL(foo); should immediately follow its function/variable net/ipv6/addrconf.c:342: ERROR: "foo * bar" should be "foo *bar" net/ipv6/addrconf.c:444: ERROR: "foo * bar" should be "foo *bar" net/ipv6/addrconf.c:1337: WARNING: EXPORT_SYMBOL(foo); should immediately follow its function/variable net/ipv6/addrconf.c:1526: ERROR: "(foo*)" should be "(foo *)" net/ipv6/addrconf.c:1671: ERROR: open brace '{' following function declarations go on the next line net/ipv6/addrconf.c:1914: ERROR: "foo * bar" should be "foo *bar" net/ipv6/addrconf.c:2368: ERROR: "foo * bar" should be "foo *bar" net/ipv6/addrconf.c:2370: ERROR: "foo * bar" should be "foo *bar" net/ipv6/addrconf.c:2416: ERROR: "foo * bar" should be "foo *bar" net/ipv6/addrconf.c:2437: ERROR: "foo * bar" should be "foo *bar" net/ipv6/addrconf.c:2573: ERROR: "foo * bar" should be "foo *bar" net/ipv6/addrconf.c:3797: ERROR: "foo* bar" should be "foo *bar" Signed-off-by: Eldad Zack Signed-off-by: David S. Miller --- net/ipv6/addrconf.c | 25 ++++++++++++------------- 1 file changed, 12 insertions(+), 13 deletions(-) (limited to 'net') diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 153060f946e0..fcd230a6235a 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -336,10 +336,9 @@ void in6_dev_finish_destroy(struct inet6_dev *idev) snmp6_free_dev(idev); kfree_rcu(idev, rcu); } - EXPORT_SYMBOL(in6_dev_finish_destroy); -static struct inet6_dev * ipv6_add_dev(struct net_device *dev) +static struct inet6_dev *ipv6_add_dev(struct net_device *dev) { struct inet6_dev *ndev; @@ -441,7 +440,7 @@ static struct inet6_dev * ipv6_add_dev(struct net_device *dev) return ndev; } -static struct inet6_dev * ipv6_find_idev(struct net_device *dev) +static struct inet6_dev *ipv6_find_idev(struct net_device *dev) { struct inet6_dev *idev; @@ -1333,7 +1332,6 @@ int ipv6_chk_prefix(const struct in6_addr *addr, struct net_device *dev) rcu_read_unlock(); return onlink; } - EXPORT_SYMBOL(ipv6_chk_prefix); struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net, const struct in6_addr *addr, @@ -1523,7 +1521,7 @@ static int addrconf_ifid_arcnet(u8 *eui, struct net_device *dev) if (dev->addr_len != ARCNET_ALEN) return -1; memset(eui, 0, 7); - eui[7] = *(u8*)dev->dev_addr; + eui[7] = *(u8 *)dev->dev_addr; return 0; } @@ -1668,7 +1666,8 @@ out: in6_dev_put(idev); } -static int __ipv6_try_regen_rndid(struct inet6_dev *idev, struct in6_addr *tmpaddr) { +static int __ipv6_try_regen_rndid(struct inet6_dev *idev, struct in6_addr *tmpaddr) +{ int ret = 0; if (tmpaddr && memcmp(idev->rndid, &tmpaddr->s6_addr[8], 8) == 0) @@ -1911,7 +1910,7 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len, bool sllao) /* Try to figure out our local address for this prefix */ if (pinfo->autoconf && in6_dev->cnf.autoconf) { - struct inet6_ifaddr * ifp; + struct inet6_ifaddr *ifp; struct in6_addr addr; int create = 0, update_lft = 0; @@ -2365,9 +2364,9 @@ static void sit_add_v4_addrs(struct inet6_dev *idev) } for_each_netdev(net, dev) { - struct in_device * in_dev = __in_dev_get_rtnl(dev); + struct in_device *in_dev = __in_dev_get_rtnl(dev); if (in_dev && (dev->flags & IFF_UP)) { - struct in_ifaddr * ifa; + struct in_ifaddr *ifa; int flag = scope; @@ -2413,7 +2412,7 @@ static void init_loopback(struct net_device *dev) static void addrconf_add_linklocal(struct inet6_dev *idev, const struct in6_addr *addr) { - struct inet6_ifaddr * ifp; + struct inet6_ifaddr *ifp; u32 addr_flags = IFA_F_PERMANENT; #ifdef CONFIG_IPV6_OPTIMISTIC_DAD @@ -2434,7 +2433,7 @@ static void addrconf_add_linklocal(struct inet6_dev *idev, const struct in6_addr static void addrconf_dev_config(struct net_device *dev) { struct in6_addr addr; - struct inet6_dev * idev; + struct inet6_dev *idev; ASSERT_RTNL(); @@ -2570,7 +2569,7 @@ static void addrconf_ip6_tnl_config(struct net_device *dev) } static int addrconf_notify(struct notifier_block *this, unsigned long event, - void * data) + void *data) { struct net_device *dev = (struct net_device *) data; struct inet6_dev *idev = __in6_dev_get(dev); @@ -3794,7 +3793,7 @@ static int inet6_dump_ifacaddr(struct sk_buff *skb, struct netlink_callback *cb) return inet6_dump_addr(skb, cb, type); } -static int inet6_rtm_getaddr(struct sk_buff *in_skb, struct nlmsghdr* nlh, +static int inet6_rtm_getaddr(struct sk_buff *in_skb, struct nlmsghdr *nlh, void *arg) { struct net *net = sock_net(in_skb->sk); -- cgit v1.2.2 From edbc0bb3fb72ec4645a242520cf1d0b9b6b02261 Mon Sep 17 00:00:00 2001 From: Ben Greear Date: Thu, 29 Mar 2012 12:51:30 +0000 Subject: net: Report dev->promiscuity in netlink reports. The standard ways of probing a device's promiscuity (ifi_flags, for instance) does not report the actual state of the device. This patch adds dev->promiscuity to the netlink netdevice report so that users can know for certain if the device is acting PROMISC or not. Signed-off-by: Ben Greear Signed-off-by: David S. Miller --- net/core/rtnetlink.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'net') diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 71a1920a23a1..b76f8fa3fc64 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -784,6 +784,7 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev, + nla_total_size(4) /* IFLA_MTU */ + nla_total_size(4) /* IFLA_LINK */ + nla_total_size(4) /* IFLA_MASTER */ + + nla_total_size(4) /* IFLA_PROMISCUITY */ + nla_total_size(1) /* IFLA_OPERSTATE */ + nla_total_size(1) /* IFLA_LINKMODE */ + nla_total_size(ext_filter_mask @@ -901,6 +902,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, nla_put_u8(skb, IFLA_LINKMODE, dev->link_mode) || nla_put_u32(skb, IFLA_MTU, dev->mtu) || nla_put_u32(skb, IFLA_GROUP, dev->group) || + nla_put_u32(skb, IFLA_PROMISCUITY, dev->promiscuity) || (dev->ifindex != dev->iflink && nla_put_u32(skb, IFLA_LINK, dev->iflink)) || (dev->master && @@ -1117,6 +1119,7 @@ const struct nla_policy ifla_policy[IFLA_MAX+1] = { [IFLA_PORT_SELF] = { .type = NLA_NESTED }, [IFLA_AF_SPEC] = { .type = NLA_NESTED }, [IFLA_EXT_MASK] = { .type = NLA_U32 }, + [IFLA_PROMISCUITY] = { .type = NLA_U32 }, }; EXPORT_SYMBOL(ifla_policy); -- cgit v1.2.2 From eb6a24816b247c0be6b2e97e68933072874bbe54 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 3 Apr 2012 05:28:28 +0000 Subject: af_unix: reduce high order page allocations unix_dgram_sendmsg() currently builds linear skbs, and this can stress page allocator with high order page allocations. When memory gets fragmented, this can eventually fail. We can try to use order-2 allocations for skb head (SKB_MAX_ALLOC) plus up to 16 page fragments to lower pressure on buddy allocator. This patch has no effect on messages of less than 16064 bytes. (on 64bit arches with PAGE_SIZE=4096) For bigger messages (from 16065 to 81600 bytes), this patch brings reliability at the expense of performance penalty because of extra pages allocations. netperf -t DG_STREAM -T 0,2 -- -m 16064 -s 200000 ->4086040 Messages / 10s netperf -t DG_STREAM -T 0,2 -- -m 16068 -s 200000 ->3901747 Messages / 10s Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/unix/af_unix.c | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) (limited to 'net') diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index d510353ef431..eadb9020cd64 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -1442,6 +1442,7 @@ static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock, long timeo; struct scm_cookie tmp_scm; int max_level; + int data_len = 0; if (NULL == siocb->scm) siocb->scm = &tmp_scm; @@ -1475,7 +1476,13 @@ static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock, if (len > sk->sk_sndbuf - 32) goto out; - skb = sock_alloc_send_skb(sk, len, msg->msg_flags&MSG_DONTWAIT, &err); + if (len > SKB_MAX_ALLOC) + data_len = min_t(size_t, + len - SKB_MAX_ALLOC, + MAX_SKB_FRAGS * PAGE_SIZE); + + skb = sock_alloc_send_pskb(sk, len - data_len, data_len, + msg->msg_flags & MSG_DONTWAIT, &err); if (skb == NULL) goto out; @@ -1485,8 +1492,10 @@ static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock, max_level = err + 1; unix_get_secdata(siocb->scm, skb); - skb_reset_transport_header(skb); - err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len); + skb_put(skb, len - data_len); + skb->data_len = data_len; + skb->len = len; + err = skb_copy_datagram_from_iovec(skb, 0, msg->msg_iov, 0, len); if (err) goto out_free; -- cgit v1.2.2 From 302d663740cfaf2c364df6bb61cd339014ed714c Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Sat, 31 Mar 2012 11:01:19 +0000 Subject: filter: Allow to create sk-unattached filters Today, BPF filters are bind to sockets. Since BPF machine becomes handy for other purposes, this patch allows to create unattached filter. Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- net/core/filter.c | 66 +++++++++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 62 insertions(+), 4 deletions(-) (limited to 'net') diff --git a/net/core/filter.c b/net/core/filter.c index 5dea45279215..cfbea889a0eb 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -587,6 +587,67 @@ void sk_filter_release_rcu(struct rcu_head *rcu) } EXPORT_SYMBOL(sk_filter_release_rcu); +static int __sk_prepare_filter(struct sk_filter *fp) +{ + int err; + + fp->bpf_func = sk_run_filter; + + err = sk_chk_filter(fp->insns, fp->len); + if (err) + return err; + + bpf_jit_compile(fp); + return 0; +} + +/** + * sk_unattached_filter_create - create an unattached filter + * @fprog: the filter program + * @sk: the socket to use + * + * Create a filter independent ofr any socket. We first run some + * sanity checks on it to make sure it does not explode on us later. + * If an error occurs or there is insufficient memory for the filter + * a negative errno code is returned. On success the return is zero. + */ +int sk_unattached_filter_create(struct sk_filter **pfp, + struct sock_fprog *fprog) +{ + struct sk_filter *fp; + unsigned int fsize = sizeof(struct sock_filter) * fprog->len; + int err; + + /* Make sure new filter is there and in the right amounts. */ + if (fprog->filter == NULL) + return -EINVAL; + + fp = kmalloc(fsize + sizeof(*fp), GFP_KERNEL); + if (!fp) + return -ENOMEM; + memcpy(fp->insns, fprog->filter, fsize); + + atomic_set(&fp->refcnt, 1); + fp->len = fprog->len; + + err = __sk_prepare_filter(fp); + if (err) + goto free_mem; + + *pfp = fp; + return 0; +free_mem: + kfree(fp); + return err; +} +EXPORT_SYMBOL_GPL(sk_unattached_filter_create); + +void sk_unattached_filter_destroy(struct sk_filter *fp) +{ + sk_filter_release(fp); +} +EXPORT_SYMBOL_GPL(sk_unattached_filter_destroy); + /** * sk_attach_filter - attach a socket filter * @fprog: the filter program @@ -617,16 +678,13 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk) atomic_set(&fp->refcnt, 1); fp->len = fprog->len; - fp->bpf_func = sk_run_filter; - err = sk_chk_filter(fp->insns, fp->len); + err = __sk_prepare_filter(fp); if (err) { sk_filter_uncharge(sk, fp); return err; } - bpf_jit_compile(fp); - old_fp = rcu_dereference_protected(sk->sk_filter, sock_owned_by_user(sk)); rcu_assign_pointer(sk->sk_filter, fp); -- cgit v1.2.2 From ffe06c17afbbbd4d73cdc339419be232847d667a Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Sat, 31 Mar 2012 11:01:20 +0000 Subject: filter: add XOR operation Add XOR instruction fo BPF machine. Needed for computing packet hashes. Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- net/core/filter.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'net') diff --git a/net/core/filter.c b/net/core/filter.c index cfbea889a0eb..5099c4b4a53f 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -315,6 +315,9 @@ load_b: case BPF_S_ANC_CPU: A = raw_smp_processor_id(); continue; + case BPF_S_ANC_ALU_XOR_X: + A ^= X; + continue; case BPF_S_ANC_NLATTR: { struct nlattr *nla; @@ -559,6 +562,7 @@ int sk_chk_filter(struct sock_filter *filter, unsigned int flen) ANCILLARY(HATYPE); ANCILLARY(RXHASH); ANCILLARY(CPU); + ANCILLARY(ALU_XOR_X); } } ftest->code = code; -- cgit v1.2.2 From 2173bff5dcff4ca02226978f39648b66a8c3ca1e Mon Sep 17 00:00:00 2001 From: Shmulik Ladkani Date: Tue, 3 Apr 2012 23:13:00 +0000 Subject: ipv6: Fix 'inet6_rtm_getroute' to release 'rt->dst' in case of 'alloc_skb' failure In 72331bc [ipv6: Fix RTM_GETROUTE's interpretation of RTA_IIF to be consistent with ipv4] the code of 'inet6_rtm_getroute()' was re-ordered such that the reference to 'rt->dst' is incremented prior skb allocation. Hence, if 'alloc_skb()' fails, must drop a reference from 'rt->dst'. Add the missing 'dst_release()' call. Signed-off-by: Shmulik Ladkani Signed-off-by: David S. Miller --- net/ipv6/route.c | 1 + 1 file changed, 1 insertion(+) (limited to 'net') diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 4d70c06f0436..8c5df6f3a2de 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -2606,6 +2606,7 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); if (!skb) { + dst_release(&rt->dst); err = -ENOBUFS; goto errout; } -- cgit v1.2.2 From c8f3a8c31069137fe0100e6920558f1a7487ef3c Mon Sep 17 00:00:00 2001 From: Richard Cochran Date: Tue, 3 Apr 2012 22:59:17 +0000 Subject: ethtool: Introduce a method for getting time stamping capabilities. This commit adds a new ethtool ioctl that exposes the SO_TIMESTAMPING capabilities of a network interface. In addition, user space programs can use this ioctl to discover the PTP Hardware Clock (PHC) device associated with the interface. Since software receive time stamps are handled by the stack, the generic ethtool code can answer the query correctly in case the MAC or PHY drivers lack special time stamping features. Signed-off-by: Richard Cochran Reviewed-by: Ben Hutchings Signed-off-by: David S. Miller --- net/core/ethtool.c | 48 +++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 45 insertions(+), 3 deletions(-) (limited to 'net') diff --git a/net/core/ethtool.c b/net/core/ethtool.c index 6d6d7d25caaa..a723b1321691 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c @@ -17,6 +17,8 @@ #include #include #include +#include +#include #include #include #include @@ -1278,6 +1280,40 @@ out: return ret; } +static int ethtool_get_ts_info(struct net_device *dev, void __user *useraddr) +{ + int err = 0; + struct ethtool_ts_info info; + const struct ethtool_ops *ops = dev->ethtool_ops; + struct phy_device *phydev = dev->phydev; + + memset(&info, 0, sizeof(info)); + info.cmd = ETHTOOL_GET_TS_INFO; + + if (phydev && phydev->drv && phydev->drv->ts_info) { + + err = phydev->drv->ts_info(phydev, &info); + + } else if (dev->ethtool_ops && dev->ethtool_ops->get_ts_info) { + + err = ops->get_ts_info(dev, &info); + + } else { + info.so_timestamping = + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE; + info.phc_index = -1; + } + + if (err) + return err; + + if (copy_to_user(useraddr, &info, sizeof(info))) + err = -EFAULT; + + return err; +} + /* The main entry point in this file. Called from net/core/dev.c */ int dev_ethtool(struct net *net, struct ifreq *ifr) @@ -1295,11 +1331,13 @@ int dev_ethtool(struct net *net, struct ifreq *ifr) return -EFAULT; if (!dev->ethtool_ops) { - /* ETHTOOL_GDRVINFO does not require any driver support. - * It is also unprivileged and does not change anything, - * so we can take a shortcut to it. */ + /* A few commands do not require any driver support, + * are unprivileged, and do not change anything, so we + * can take a shortcut to them. */ if (ethcmd == ETHTOOL_GDRVINFO) return ethtool_get_drvinfo(dev, useraddr); + else if (ethcmd == ETHTOOL_GET_TS_INFO) + return ethtool_get_ts_info(dev, useraddr); else return -EOPNOTSUPP; } @@ -1330,6 +1368,7 @@ int dev_ethtool(struct net *net, struct ifreq *ifr) case ETHTOOL_GRXCLSRULE: case ETHTOOL_GRXCLSRLALL: case ETHTOOL_GFEATURES: + case ETHTOOL_GET_TS_INFO: break; default: if (!capable(CAP_NET_ADMIN)) @@ -1496,6 +1535,9 @@ int dev_ethtool(struct net *net, struct ifreq *ifr) case ETHTOOL_GET_DUMP_DATA: rc = ethtool_get_dump_data(dev, useraddr); break; + case ETHTOOL_GET_TS_INFO: + rc = ethtool_get_ts_info(dev, useraddr); + break; default: rc = -EOPNOTSUPP; } -- cgit v1.2.2 From 02eacbd0c405d378d2357e8e0fac5de981bd40f8 Mon Sep 17 00:00:00 2001 From: Richard Cochran Date: Tue, 3 Apr 2012 22:59:22 +0000 Subject: ethtool: Add a common function for drivers with transmit time stamping. Currently, most drivers do not support transmit SO_TIMESTAMPING. For those that do support it, there is one appropriate response to the get_ts_info query. This patch adds a common function providing this response. Signed-off-by: Richard Cochran Reviewed-by: Ben Hutchings Signed-off-by: David S. Miller --- net/core/ethtool.c | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'net') diff --git a/net/core/ethtool.c b/net/core/ethtool.c index a723b1321691..beacdd93cd8f 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c @@ -38,6 +38,17 @@ u32 ethtool_op_get_link(struct net_device *dev) } EXPORT_SYMBOL(ethtool_op_get_link); +int ethtool_op_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info) +{ + info->so_timestamping = + SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE; + info->phc_index = -1; + return 0; +} +EXPORT_SYMBOL(ethtool_op_get_ts_info); + /* Handlers for each ethtool command */ #define ETHTOOL_DEV_FEATURE_WORDS ((NETDEV_FEATURE_COUNT + 31) / 32) -- cgit v1.2.2 From d4a968658ce7417d78638387343b6197839111af Mon Sep 17 00:00:00 2001 From: Amir Vadai Date: Wed, 4 Apr 2012 21:33:28 +0000 Subject: net/route: export symbol ip_tos2prio Need to export this to enable drivers use rt_tos2priority() Signed-off-by: Amir Vadai Signed-off-by: David S. Miller --- net/ipv4/route.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/ipv4/route.c b/net/ipv4/route.c index e5647b4e51c6..634aa824b17e 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -230,7 +230,7 @@ const __u8 ip_tos2prio[16] = { TC_PRIO_INTERACTIVE_BULK, ECN_OR_COST(INTERACTIVE_BULK) }; - +EXPORT_SYMBOL(ip_tos2prio); /* * Route cache. -- cgit v1.2.2 From 08f10affe45051e18e0d8291c0a53aecef1b8a14 Mon Sep 17 00:00:00 2001 From: Amir Vadai Date: Wed, 4 Apr 2012 21:33:30 +0000 Subject: net/dcb: Add an optional max rate attribute Although not specified in 8021Qaz spec, it could be useful to enable drivers whose HW supports setting a rate limit for an ETS TC. This patch adds this optional attribute to DCB netlink. To use it, drivers should implement and register the callbacks ieee_setmaxrate and ieee_getmaxrate. The units are 64 bits long and specified in Kbps to enable usage over both slow and very fast networks. Signed-off-by: Amir Vadai Signed-off-by: David S. Miller --- net/dcb/dcbnl.c | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) (limited to 'net') diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c index 36f37af63bf2..8dfa1da7c40d 100644 --- a/net/dcb/dcbnl.c +++ b/net/dcb/dcbnl.c @@ -178,6 +178,7 @@ static const struct nla_policy dcbnl_ieee_policy[DCB_ATTR_IEEE_MAX + 1] = { [DCB_ATTR_IEEE_ETS] = {.len = sizeof(struct ieee_ets)}, [DCB_ATTR_IEEE_PFC] = {.len = sizeof(struct ieee_pfc)}, [DCB_ATTR_IEEE_APP_TABLE] = {.type = NLA_NESTED}, + [DCB_ATTR_IEEE_MAXRATE] = {.len = sizeof(struct ieee_maxrate)}, }; static const struct nla_policy dcbnl_ieee_app[DCB_ATTR_IEEE_APP_MAX + 1] = { @@ -1246,6 +1247,17 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev) goto nla_put_failure; } + if (ops->ieee_getmaxrate) { + struct ieee_maxrate maxrate; + err = ops->ieee_getmaxrate(netdev, &maxrate); + if (!err) { + err = nla_put(skb, DCB_ATTR_IEEE_MAXRATE, + sizeof(maxrate), &maxrate); + if (err) + goto nla_put_failure; + } + } + if (ops->ieee_getpfc) { struct ieee_pfc pfc; err = ops->ieee_getpfc(netdev, &pfc); @@ -1601,6 +1613,14 @@ static int dcbnl_ieee_set(struct net_device *netdev, struct nlattr **tb, goto err; } + if (ieee[DCB_ATTR_IEEE_MAXRATE] && ops->ieee_setmaxrate) { + struct ieee_maxrate *maxrate = + nla_data(ieee[DCB_ATTR_IEEE_MAXRATE]); + err = ops->ieee_setmaxrate(netdev, maxrate); + if (err) + goto err; + } + if (ieee[DCB_ATTR_IEEE_PFC] && ops->ieee_setpfc) { struct ieee_pfc *pfc = nla_data(ieee[DCB_ATTR_IEEE_PFC]); err = ops->ieee_setpfc(netdev, pfc); -- cgit v1.2.2 From 51c56b004e2c9a46207bb8a116589c2f84b92e5d Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 5 Apr 2012 11:35:15 +0200 Subject: net: remove k{un}map_skb_frag() Since commit 3e4d3af501 (mm: stack based kmap_atomic()) we dont have to disable BH anymore while mapping skb frags. We can remove kmap_skb_frag() / kunmap_skb_frag() helpers and use kmap_atomic() / kunmap_atomic() Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/appletalk/ddp.c | 6 +++--- net/core/kmap_skb.h | 19 ------------------- net/core/skbuff.c | 42 +++++++++++++++++++++--------------------- 3 files changed, 24 insertions(+), 43 deletions(-) delete mode 100644 net/core/kmap_skb.h (limited to 'net') diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c index bfa9ab93eda5..0301b328cf0f 100644 --- a/net/appletalk/ddp.c +++ b/net/appletalk/ddp.c @@ -63,7 +63,7 @@ #include #include #include -#include "../core/kmap_skb.h" +#include struct datalink_proto *ddp_dl, *aarp_dl; static const struct proto_ops atalk_dgram_ops; @@ -960,10 +960,10 @@ static unsigned long atalk_sum_skb(const struct sk_buff *skb, int offset, if (copy > len) copy = len; - vaddr = kmap_skb_frag(frag); + vaddr = kmap_atomic(skb_frag_page(frag)); sum = atalk_sum_partial(vaddr + frag->page_offset + offset - start, copy, sum); - kunmap_skb_frag(vaddr); + kunmap_atomic(vaddr); if (!(len -= copy)) return sum; diff --git a/net/core/kmap_skb.h b/net/core/kmap_skb.h deleted file mode 100644 index 52d0a4459041..000000000000 --- a/net/core/kmap_skb.h +++ /dev/null @@ -1,19 +0,0 @@ -#include - -static inline void *kmap_skb_frag(const skb_frag_t *frag) -{ -#ifdef CONFIG_HIGHMEM - BUG_ON(in_irq()); - - local_bh_disable(); -#endif - return kmap_atomic(skb_frag_page(frag)); -} - -static inline void kunmap_skb_frag(void *vaddr) -{ - kunmap_atomic(vaddr); -#ifdef CONFIG_HIGHMEM - local_bh_enable(); -#endif -} diff --git a/net/core/skbuff.c b/net/core/skbuff.c index a690cae91cdd..b2595adb605f 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -68,8 +68,7 @@ #include #include #include - -#include "kmap_skb.h" +#include static struct kmem_cache *skbuff_head_cache __read_mostly; static struct kmem_cache *skbuff_fclone_cache __read_mostly; @@ -708,10 +707,10 @@ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask) } return -ENOMEM; } - vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]); + vaddr = kmap_atomic(skb_frag_page(f)); memcpy(page_address(page), vaddr + f->page_offset, skb_frag_size(f)); - kunmap_skb_frag(vaddr); + kunmap_atomic(vaddr); page->private = (unsigned long)head; head = page; } @@ -1486,21 +1485,22 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { int end; + skb_frag_t *f = &skb_shinfo(skb)->frags[i]; WARN_ON(start > offset + len); - end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); + end = start + skb_frag_size(f); if ((copy = end - offset) > 0) { u8 *vaddr; if (copy > len) copy = len; - vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]); + vaddr = kmap_atomic(skb_frag_page(f)); memcpy(to, - vaddr + skb_shinfo(skb)->frags[i].page_offset+ - offset - start, copy); - kunmap_skb_frag(vaddr); + vaddr + f->page_offset + offset - start, + copy); + kunmap_atomic(vaddr); if ((len -= copy) == 0) return 0; @@ -1805,10 +1805,10 @@ int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len) if (copy > len) copy = len; - vaddr = kmap_skb_frag(frag); + vaddr = kmap_atomic(skb_frag_page(frag)); memcpy(vaddr + frag->page_offset + offset - start, from, copy); - kunmap_skb_frag(vaddr); + kunmap_atomic(vaddr); if ((len -= copy) == 0) return 0; @@ -1868,21 +1868,21 @@ __wsum skb_checksum(const struct sk_buff *skb, int offset, for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { int end; + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; WARN_ON(start > offset + len); - end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); + end = start + skb_frag_size(frag); if ((copy = end - offset) > 0) { __wsum csum2; u8 *vaddr; - skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; if (copy > len) copy = len; - vaddr = kmap_skb_frag(frag); + vaddr = kmap_atomic(skb_frag_page(frag)); csum2 = csum_partial(vaddr + frag->page_offset + offset - start, copy, 0); - kunmap_skb_frag(vaddr); + kunmap_atomic(vaddr); csum = csum_block_add(csum, csum2, pos); if (!(len -= copy)) return csum; @@ -1954,12 +1954,12 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, if (copy > len) copy = len; - vaddr = kmap_skb_frag(frag); + vaddr = kmap_atomic(skb_frag_page(frag)); csum2 = csum_partial_copy_nocheck(vaddr + frag->page_offset + offset - start, to, copy, 0); - kunmap_skb_frag(vaddr); + kunmap_atomic(vaddr); csum = csum_block_add(csum, csum2, pos); if (!(len -= copy)) return csum; @@ -2479,7 +2479,7 @@ next_skb: if (abs_offset < block_limit) { if (!st->frag_data) - st->frag_data = kmap_skb_frag(frag); + st->frag_data = kmap_atomic(skb_frag_page(frag)); *data = (u8 *) st->frag_data + frag->page_offset + (abs_offset - st->stepped_offset); @@ -2488,7 +2488,7 @@ next_skb: } if (st->frag_data) { - kunmap_skb_frag(st->frag_data); + kunmap_atomic(st->frag_data); st->frag_data = NULL; } @@ -2497,7 +2497,7 @@ next_skb: } if (st->frag_data) { - kunmap_skb_frag(st->frag_data); + kunmap_atomic(st->frag_data); st->frag_data = NULL; } @@ -2525,7 +2525,7 @@ EXPORT_SYMBOL(skb_seq_read); void skb_abort_seq_read(struct skb_seq_state *st) { if (st->frag_data) - kunmap_skb_frag(st->frag_data); + kunmap_atomic(st->frag_data); } EXPORT_SYMBOL(skb_abort_seq_read); -- cgit v1.2.2 From ce713ee5a10f3a171df94b0d501034aab2388c16 Mon Sep 17 00:00:00 2001 From: "RongQing.Li" Date: Thu, 5 Apr 2012 17:36:29 +0800 Subject: net: replace continue with break to reduce unnecessary loop in xxx_xmarksources The conditional which decides to skip inactive filters does not change with the change of loop index, so it is unnecessary to check them many times. Signed-off-by: RongQing.Li Signed-off-by: David S. Miller --- net/ipv4/igmp.c | 2 +- net/ipv6/mcast.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'net') diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index 450e5d21ed2a..e9b90a8ca55a 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c @@ -775,7 +775,7 @@ static int igmp_xmarksources(struct ip_mc_list *pmc, int nsrcs, __be32 *srcs) if (psf->sf_count[MCAST_INCLUDE] || pmc->sfcount[MCAST_EXCLUDE] != psf->sf_count[MCAST_EXCLUDE]) - continue; + break; if (srcs[i] == psf->sf_inaddr) { scount++; break; diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index 16c33e308121..6264d8fd2a2d 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c @@ -1061,7 +1061,7 @@ static int mld_xmarksources(struct ifmcaddr6 *pmc, int nsrcs, if (psf->sf_count[MCAST_INCLUDE] || pmc->mca_sfcount[MCAST_EXCLUDE] != psf->sf_count[MCAST_EXCLUDE]) - continue; + break; if (ipv6_addr_equal(&srcs[i], &psf->sf_addr)) { scount++; break; -- cgit v1.2.2 From 6f0756a38fc4cf016070f218bf78930b4c1f9a0f Mon Sep 17 00:00:00 2001 From: Rajkumar Manoharan Date: Thu, 15 Mar 2012 05:50:36 +0530 Subject: mac80211: do not send pspoll when powersave is disabled There might be latency at AP side to update TIM IE which could cause the station to send pspoll frame even after the wakeup. If the powersave is disabled, the nullfunc notification alone is sufficient to receive frames from the AP. And if the pspoll frame was already sent, no need to resend the frame till it was acked by AP. Cc: Jouni Malinen Cc: Kalle Valo Signed-off-by: Rajkumar Manoharan Signed-off-by: John W. Linville --- net/mac80211/mlme.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 576fb25456dd..48d0ee3f15dc 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -2472,7 +2472,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS); ieee80211_send_nullfunc(local, sdata, 0); - } else { + } else if (!local->pspolling && sdata->u.mgd.powersave) { local->pspolling = true; /* -- cgit v1.2.2 From f69b9c79c99b607a8d6b1d9e1913861154af8c63 Mon Sep 17 00:00:00 2001 From: Rajkumar Manoharan Date: Thu, 15 Mar 2012 06:15:26 +0530 Subject: mac80211: flush to get the tx status of nullfunc frame immediately Sometimes the probe frame (nullfunc) is stuck at the hw queue. so that the mac80211 terminates the connection as it wont see the tx status. Instead of waiting for long period for ack status, lets call flush to get nullfunc status immediately. It also helps to send the nullfunc till max tries reached. Signed-off-by: Rajkumar Manoharan Signed-off-by: John W. Linville --- net/mac80211/mlme.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'net') diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 48d0ee3f15dc..f733aeb50e85 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -1575,6 +1575,8 @@ static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata) ifmgd->probe_send_count++; ifmgd->probe_timeout = jiffies + msecs_to_jiffies(probe_wait_ms); run_again(ifmgd, ifmgd->probe_timeout); + if (sdata->local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) + drv_flush(sdata->local, false); } static void ieee80211_mgd_probe_ap(struct ieee80211_sub_if_data *sdata, -- cgit v1.2.2 From 074d46d1d23f27488a3f314e29cae2453541f17d Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Thu, 15 Mar 2012 19:45:16 +0100 Subject: wireless: rename ht_info to ht_operation Since some of the HT code pre-dates 802.11n-2009 some names are wrong. The one that bothers me most is that "HT operation" is called "HT information" in our code and that causes confusion. Rename "HT information" to "HT operation" and also the control_chan field to primary_chan to match the name used in the spec. Signed-off-by: Johannes Berg Acked-by: Bing Zhao Signed-off-by: John W. Linville --- net/mac80211/ibss.c | 16 +++++++-------- net/mac80211/ieee80211_i.h | 13 ++++++------ net/mac80211/mesh.c | 12 ++++++------ net/mac80211/mesh.h | 2 +- net/mac80211/mesh_plink.c | 4 ++-- net/mac80211/mlme.c | 49 +++++++++++++++++++++++----------------------- net/mac80211/tx.c | 4 ++-- net/mac80211/util.c | 43 ++++++++++++++++++++-------------------- 8 files changed, 70 insertions(+), 73 deletions(-) (limited to 'net') diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c index 33fd8d9f714e..547cd7e3018a 100644 --- a/net/mac80211/ibss.c +++ b/net/mac80211/ibss.c @@ -160,13 +160,11 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, if (channel_type && sband->ht_cap.ht_supported) { pos = skb_put(skb, 4 + sizeof(struct ieee80211_ht_cap) + - sizeof(struct ieee80211_ht_info)); + sizeof(struct ieee80211_ht_operation)); pos = ieee80211_ie_build_ht_cap(pos, &sband->ht_cap, sband->ht_cap.cap); - pos = ieee80211_ie_build_ht_info(pos, - &sband->ht_cap, - chan, - channel_type); + pos = ieee80211_ie_build_ht_oper(pos, &sband->ht_cap, + chan, channel_type); } if (local->hw.queues >= 4) { @@ -441,13 +439,13 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata, if (sta && elems->wmm_info) set_sta_flag(sta, WLAN_STA_WME); - if (sta && elems->ht_info_elem && elems->ht_cap_elem && + if (sta && elems->ht_operation && elems->ht_cap_elem && sdata->u.ibss.channel_type != NL80211_CHAN_NO_HT) { /* we both use HT */ struct ieee80211_sta_ht_cap sta_ht_cap_new; enum nl80211_channel_type channel_type = - ieee80211_ht_info_to_channel_type( - elems->ht_info_elem); + ieee80211_ht_oper_to_channel_type( + elems->ht_operation); ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband, elems->ht_cap_elem, @@ -1063,7 +1061,7 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata, 4 /* IBSS params */ + 2 + (IEEE80211_MAX_SUPP_RATES - 8) + 2 + sizeof(struct ieee80211_ht_cap) + - 2 + sizeof(struct ieee80211_ht_info) + + 2 + sizeof(struct ieee80211_ht_operation) + params->ie_len); if (!skb) return -ENOMEM; diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index d9798a307f20..0ae822c47930 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -397,7 +397,7 @@ struct ieee80211_mgd_auth_data { struct ieee80211_mgd_assoc_data { struct cfg80211_bss *bss; const u8 *supp_rates; - const u8 *ht_information_ie; + const u8 *ht_operation_ie; unsigned long timeout; int tries; @@ -1117,7 +1117,7 @@ struct ieee802_11_elems { u8 *wmm_info; u8 *wmm_param; struct ieee80211_ht_cap *ht_cap_elem; - struct ieee80211_ht_info *ht_info_elem; + struct ieee80211_ht_operation *ht_operation; struct ieee80211_meshconf_ie *mesh_config; u8 *mesh_id; u8 *peering; @@ -1470,10 +1470,9 @@ size_t ieee80211_ie_split(const u8 *ies, size_t ielen, size_t ieee80211_ie_split_vendor(const u8 *ies, size_t ielen, size_t offset); u8 *ieee80211_ie_build_ht_cap(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap, u16 cap); -u8 *ieee80211_ie_build_ht_info(u8 *pos, - struct ieee80211_sta_ht_cap *ht_cap, - struct ieee80211_channel *channel, - enum nl80211_channel_type channel_type); +u8 *ieee80211_ie_build_ht_oper(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap, + struct ieee80211_channel *channel, + enum nl80211_channel_type channel_type); /* internal work items */ void ieee80211_work_init(struct ieee80211_local *local); @@ -1501,7 +1500,7 @@ bool ieee80211_set_channel_type(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, enum nl80211_channel_type chantype); enum nl80211_channel_type -ieee80211_ht_info_to_channel_type(struct ieee80211_ht_info *ht_info); +ieee80211_ht_oper_to_channel_type(struct ieee80211_ht_operation *ht_oper); enum nl80211_channel_type ieee80211_get_tx_channel_type( struct ieee80211_local *local, enum nl80211_channel_type channel_type); diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index e5fbb7cf3562..b05fa9ef866c 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c @@ -98,9 +98,9 @@ bool mesh_matches_local(struct ieee802_11_elems *ie, struct ieee80211_sub_if_dat goto mismatch; /* disallow peering with mismatched channel types for now */ - if (ie->ht_info_elem && + if (ie->ht_operation && (local->_oper_channel_type != - ieee80211_ht_info_to_channel_type(ie->ht_info_elem))) + ieee80211_ht_oper_to_channel_type(ie->ht_operation))) goto mismatch; return true; @@ -371,7 +371,7 @@ int mesh_add_ht_cap_ie(struct sk_buff *skb, return 0; } -int mesh_add_ht_info_ie(struct sk_buff *skb, +int mesh_add_ht_oper_ie(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata) { struct ieee80211_local *local = sdata->local; @@ -385,11 +385,11 @@ int mesh_add_ht_info_ie(struct sk_buff *skb, if (!ht_cap->ht_supported || channel_type == NL80211_CHAN_NO_HT) return 0; - if (skb_tailroom(skb) < 2 + sizeof(struct ieee80211_ht_info)) + if (skb_tailroom(skb) < 2 + sizeof(struct ieee80211_ht_operation)) return -ENOMEM; - pos = skb_put(skb, 2 + sizeof(struct ieee80211_ht_info)); - ieee80211_ie_build_ht_info(pos, ht_cap, channel, channel_type); + pos = skb_put(skb, 2 + sizeof(struct ieee80211_ht_operation)); + ieee80211_ie_build_ht_oper(pos, ht_cap, channel, channel_type); return 0; } diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h index 8d53b71378e3..2bd5d8b864f6 100644 --- a/net/mac80211/mesh.h +++ b/net/mac80211/mesh.h @@ -220,7 +220,7 @@ int mesh_add_ds_params_ie(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata); int mesh_add_ht_cap_ie(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata); -int mesh_add_ht_info_ie(struct sk_buff *skb, +int mesh_add_ht_oper_ie(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata); void mesh_rmc_free(struct ieee80211_sub_if_data *sdata); int mesh_rmc_init(struct ieee80211_sub_if_data *sdata); diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c index 4e53c4cbca9e..923269b62b43 100644 --- a/net/mac80211/mesh_plink.c +++ b/net/mac80211/mesh_plink.c @@ -187,7 +187,7 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata, 2 + sdata->u.mesh.mesh_id_len + 2 + sizeof(struct ieee80211_meshconf_ie) + 2 + sizeof(struct ieee80211_ht_cap) + - 2 + sizeof(struct ieee80211_ht_info) + + 2 + sizeof(struct ieee80211_ht_operation) + 2 + 8 + /* peering IE */ sdata->u.mesh.ie_len); if (!skb) @@ -263,7 +263,7 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata, if (action != WLAN_SP_MESH_PEERING_CLOSE) { if (mesh_add_ht_cap_ie(skb, sdata) || - mesh_add_ht_info_ie(skb, sdata)) + mesh_add_ht_oper_ie(skb, sdata)) return -1; } diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index f733aeb50e85..c59bc509ed6f 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -177,7 +177,7 @@ static int ecw2cw(int ecw) * HT abilities for a specific band. */ static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata, - struct ieee80211_ht_info *hti, + struct ieee80211_ht_operation *ht_oper, const u8 *bssid, u16 ap_ht_cap_flags, bool beacon_htcap_ie) { @@ -185,7 +185,7 @@ static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata, struct ieee80211_supported_band *sband; struct sta_info *sta; u32 changed = 0; - int hti_cfreq; + int ht_cfreq; u16 ht_opmode; bool enable_ht = true; enum nl80211_channel_type prev_chantype; @@ -196,10 +196,10 @@ static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata, prev_chantype = sdata->vif.bss_conf.channel_type; - hti_cfreq = ieee80211_channel_to_frequency(hti->control_chan, - sband->band); + ht_cfreq = ieee80211_channel_to_frequency(ht_oper->primary_chan, + sband->band); /* check that channel matches the right operating channel */ - if (local->hw.conf.channel->center_freq != hti_cfreq) { + if (local->hw.conf.channel->center_freq != ht_cfreq) { /* Some APs mess this up, evidently. * Netgear WNDR3700 sometimes reports 4 higher than * the actual channel, for instance. @@ -207,11 +207,11 @@ static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata, printk(KERN_DEBUG "%s: Wrong control channel in association" " response: configured center-freq: %d" - " hti-cfreq: %d hti->control_chan: %d" + " ht-cfreq: %d ht->control_chan: %d" " band: %d. Disabling HT.\n", sdata->name, local->hw.conf.channel->center_freq, - hti_cfreq, hti->control_chan, + ht_cfreq, ht_oper->primary_chan, sband->band); enable_ht = false; } @@ -222,8 +222,9 @@ static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata, if (!(ap_ht_cap_flags & IEEE80211_HT_CAP_40MHZ_INTOLERANT) && !ieee80111_cfg_override_disables_ht40(sdata) && (sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) && - (hti->ht_param & IEEE80211_HT_PARAM_CHAN_WIDTH_ANY)) { - switch(hti->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) { + (ht_oper->ht_param & IEEE80211_HT_PARAM_CHAN_WIDTH_ANY)) { + switch (ht_oper->ht_param & + IEEE80211_HT_PARAM_CHA_SEC_OFFSET) { case IEEE80211_HT_PARAM_CHA_SEC_ABOVE: rx_channel_type = NL80211_CHAN_HT40PLUS; break; @@ -278,7 +279,7 @@ static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata, IEEE80211_QUEUE_STOP_REASON_CHTYPE_CHANGE); } - ht_opmode = le16_to_cpu(hti->operation_mode); + ht_opmode = le16_to_cpu(ht_oper->operation_mode); /* if bss configuration changed store the new one */ if (sdata->ht_opmode_valid != enable_ht || @@ -316,12 +317,12 @@ static int ieee80211_compatible_rates(const u8 *supp_rates, int supp_rates_len, } static void ieee80211_add_ht_ie(struct ieee80211_sub_if_data *sdata, - struct sk_buff *skb, const u8 *ht_info_ie, + struct sk_buff *skb, const u8 *ht_oper_ie, struct ieee80211_supported_band *sband, struct ieee80211_channel *channel, enum ieee80211_smps_mode smps) { - struct ieee80211_ht_info *ht_info; + struct ieee80211_ht_operation *ht_oper; u8 *pos; u32 flags = channel->flags; u16 cap; @@ -329,21 +330,21 @@ static void ieee80211_add_ht_ie(struct ieee80211_sub_if_data *sdata, BUILD_BUG_ON(sizeof(ht_cap) != sizeof(sband->ht_cap)); - if (!ht_info_ie) + if (!ht_oper_ie) return; - if (ht_info_ie[1] < sizeof(struct ieee80211_ht_info)) + if (ht_oper_ie[1] < sizeof(struct ieee80211_ht_operation)) return; memcpy(&ht_cap, &sband->ht_cap, sizeof(ht_cap)); ieee80211_apply_htcap_overrides(sdata, &ht_cap); - ht_info = (struct ieee80211_ht_info *)(ht_info_ie + 2); + ht_oper = (struct ieee80211_ht_operation *)(ht_oper_ie + 2); /* determine capability flags */ cap = ht_cap.cap; - switch (ht_info->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) { + switch (ht_oper->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) { case IEEE80211_HT_PARAM_CHA_SEC_ABOVE: if (flags & IEEE80211_CHAN_NO_HT40PLUS) { cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40; @@ -557,7 +558,7 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata) } if (!(ifmgd->flags & IEEE80211_STA_DISABLE_11N)) - ieee80211_add_ht_ie(sdata, skb, assoc_data->ht_information_ie, + ieee80211_add_ht_ie(sdata, skb, assoc_data->ht_operation_ie, sband, local->oper_channel, ifmgd->ap_smps); /* if present, add any custom non-vendor IEs that go after HT */ @@ -2094,9 +2095,9 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata, ieee80211_set_wmm_default(sdata, false); changed |= BSS_CHANGED_QOS; - if (elems.ht_info_elem && elems.wmm_param && + if (elems.ht_operation && elems.wmm_param && !(ifmgd->flags & IEEE80211_STA_DISABLE_11N)) - changed |= ieee80211_enable_ht(sdata, elems.ht_info_elem, + changed |= ieee80211_enable_ht(sdata, elems.ht_operation, cbss->bssid, ap_ht_cap_flags, false); @@ -2321,7 +2322,7 @@ static const u64 care_about_ies = (1ULL << WLAN_EID_CHANNEL_SWITCH) | (1ULL << WLAN_EID_PWR_CONSTRAINT) | (1ULL << WLAN_EID_HT_CAPABILITY) | - (1ULL << WLAN_EID_HT_INFORMATION); + (1ULL << WLAN_EID_HT_OPERATION); static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt, @@ -2506,7 +2507,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, erp_valid, erp_value); - if (elems.ht_cap_elem && elems.ht_info_elem && elems.wmm_param && + if (elems.ht_cap_elem && elems.ht_operation && elems.wmm_param && !(ifmgd->flags & IEEE80211_STA_DISABLE_11N)) { struct sta_info *sta; struct ieee80211_supported_band *sband; @@ -2529,7 +2530,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, rcu_read_unlock(); - changed |= ieee80211_enable_ht(sdata, elems.ht_info_elem, + changed |= ieee80211_enable_ht(sdata, elems.ht_operation, bssid, ap_ht_cap_flags, true); } @@ -3339,8 +3340,8 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata, assoc_data->wmm = bss->wmm_used && (local->hw.queues >= 4); assoc_data->supp_rates = bss->supp_rates; assoc_data->supp_rates_len = bss->supp_rates_len; - assoc_data->ht_information_ie = - ieee80211_bss_get_ie(req->bss, WLAN_EID_HT_INFORMATION); + assoc_data->ht_operation_ie = + ieee80211_bss_get_ie(req->bss, WLAN_EID_HT_OPERATION); if (bss->wmm_used && bss->uapsd_supported && (sdata->local->hw.flags & IEEE80211_HW_SUPPORTS_UAPSD)) { diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 782a60198df4..a9b27273320e 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -2390,7 +2390,7 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw, 2 + 3 + /* DS params */ 2 + (IEEE80211_MAX_SUPP_RATES - 8) + 2 + sizeof(struct ieee80211_ht_cap) + - 2 + sizeof(struct ieee80211_ht_info) + + 2 + sizeof(struct ieee80211_ht_operation) + 2 + sdata->u.mesh.mesh_id_len + 2 + sizeof(struct ieee80211_meshconf_ie) + sdata->u.mesh.ie_len); @@ -2419,7 +2419,7 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw, ieee80211_add_ext_srates_ie(&sdata->vif, skb) || mesh_add_rsn_ie(skb, sdata) || mesh_add_ht_cap_ie(skb, sdata) || - mesh_add_ht_info_ie(skb, sdata) || + mesh_add_ht_oper_ie(skb, sdata) || mesh_add_meshid_ie(skb, sdata) || mesh_add_meshconf_ie(skb, sdata) || mesh_add_vendor_ies(skb, sdata)) { diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 32f7a3b3d43c..5e23cf6389d0 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -684,9 +684,9 @@ u32 ieee802_11_parse_elems_crc(u8 *start, size_t len, else elem_parse_failed = true; break; - case WLAN_EID_HT_INFORMATION: - if (elen >= sizeof(struct ieee80211_ht_info)) - elems->ht_info_elem = (void *)pos; + case WLAN_EID_HT_OPERATION: + if (elen >= sizeof(struct ieee80211_ht_operation)) + elems->ht_operation = (void *)pos; else elem_parse_failed = true; break; @@ -1611,57 +1611,56 @@ u8 *ieee80211_ie_build_ht_cap(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap, return pos; } -u8 *ieee80211_ie_build_ht_info(u8 *pos, - struct ieee80211_sta_ht_cap *ht_cap, +u8 *ieee80211_ie_build_ht_oper(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap, struct ieee80211_channel *channel, enum nl80211_channel_type channel_type) { - struct ieee80211_ht_info *ht_info; + struct ieee80211_ht_operation *ht_oper; /* Build HT Information */ - *pos++ = WLAN_EID_HT_INFORMATION; - *pos++ = sizeof(struct ieee80211_ht_info); - ht_info = (struct ieee80211_ht_info *)pos; - ht_info->control_chan = + *pos++ = WLAN_EID_HT_OPERATION; + *pos++ = sizeof(struct ieee80211_ht_operation); + ht_oper = (struct ieee80211_ht_operation *)pos; + ht_oper->primary_chan = ieee80211_frequency_to_channel(channel->center_freq); switch (channel_type) { case NL80211_CHAN_HT40MINUS: - ht_info->ht_param = IEEE80211_HT_PARAM_CHA_SEC_BELOW; + ht_oper->ht_param = IEEE80211_HT_PARAM_CHA_SEC_BELOW; break; case NL80211_CHAN_HT40PLUS: - ht_info->ht_param = IEEE80211_HT_PARAM_CHA_SEC_ABOVE; + ht_oper->ht_param = IEEE80211_HT_PARAM_CHA_SEC_ABOVE; break; case NL80211_CHAN_HT20: default: - ht_info->ht_param = IEEE80211_HT_PARAM_CHA_SEC_NONE; + ht_oper->ht_param = IEEE80211_HT_PARAM_CHA_SEC_NONE; break; } if (ht_cap->cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) - ht_info->ht_param |= IEEE80211_HT_PARAM_CHAN_WIDTH_ANY; + ht_oper->ht_param |= IEEE80211_HT_PARAM_CHAN_WIDTH_ANY; /* * Note: According to 802.11n-2009 9.13.3.1, HT Protection field and * RIFS Mode are reserved in IBSS mode, therefore keep them at 0 */ - ht_info->operation_mode = 0x0000; - ht_info->stbc_param = 0x0000; + ht_oper->operation_mode = 0x0000; + ht_oper->stbc_param = 0x0000; /* It seems that Basic MCS set and Supported MCS set are identical for the first 10 bytes */ - memset(&ht_info->basic_set, 0, 16); - memcpy(&ht_info->basic_set, &ht_cap->mcs, 10); + memset(&ht_oper->basic_set, 0, 16); + memcpy(&ht_oper->basic_set, &ht_cap->mcs, 10); - return pos + sizeof(struct ieee80211_ht_info); + return pos + sizeof(struct ieee80211_ht_operation); } enum nl80211_channel_type -ieee80211_ht_info_to_channel_type(struct ieee80211_ht_info *ht_info) +ieee80211_ht_oper_to_channel_type(struct ieee80211_ht_operation *ht_oper) { enum nl80211_channel_type channel_type; - if (!ht_info) + if (!ht_oper) return NL80211_CHAN_NO_HT; - switch (ht_info->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) { + switch (ht_oper->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) { case IEEE80211_HT_PARAM_CHA_SEC_NONE: channel_type = NL80211_CHAN_HT20; break; -- cgit v1.2.2 From d17087e78d3961bd42f99cc3cf8cbf2d7d8ef55e Mon Sep 17 00:00:00 2001 From: Ben Greear Date: Thu, 15 Mar 2012 16:22:05 -0700 Subject: mac80211: Add iface name when calling WARN-ON. This lets the user know which interface has failed the check_sdata_in_driver check. Signed-off-by: Ben Greear Signed-off-by: John W. Linville --- net/mac80211/driver-ops.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'net') diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h index af4691fed645..e8dbda1b5b8a 100644 --- a/net/mac80211/driver-ops.h +++ b/net/mac80211/driver-ops.h @@ -7,7 +7,9 @@ static inline void check_sdata_in_driver(struct ieee80211_sub_if_data *sdata) { - WARN_ON(!(sdata->flags & IEEE80211_SDATA_IN_DRIVER)); + WARN(!(sdata->flags & IEEE80211_SDATA_IN_DRIVER), + "%s: Failed check-sdata-in-driver check, flags: 0x%x\n", + sdata->dev->name, sdata->flags); } static inline struct ieee80211_sub_if_data * -- cgit v1.2.2 From 52a3f20c09559465f821b54838decb397054e7de Mon Sep 17 00:00:00 2001 From: Marco Porsch Date: Fri, 16 Mar 2012 15:30:26 +0100 Subject: mac80211: end service period only after sending last buffered frame Signed-off-by: Marco Porsch Signed-off-by: John W. Linville --- net/mac80211/sta_info.c | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) (limited to 'net') diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index 38137cb5f6f0..7fd7ac48f893 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@ -1195,13 +1195,15 @@ ieee80211_sta_ps_deliver_response(struct sta_info *sta, ieee80211_is_qos_nullfunc(hdr->frame_control)) qoshdr = ieee80211_get_qos_ctl(hdr); - /* set EOSP for the frame */ - if (reason == IEEE80211_FRAME_RELEASE_UAPSD && - qoshdr && skb_queue_empty(&frames)) - *qoshdr |= IEEE80211_QOS_CTL_EOSP; - - info->flags |= IEEE80211_TX_STATUS_EOSP | - IEEE80211_TX_CTL_REQ_TX_STATUS; + /* end service period after last frame */ + if (skb_queue_empty(&frames)) { + if (reason == IEEE80211_FRAME_RELEASE_UAPSD && + qoshdr) + *qoshdr |= IEEE80211_QOS_CTL_EOSP; + + info->flags |= IEEE80211_TX_STATUS_EOSP | + IEEE80211_TX_CTL_REQ_TX_STATUS; + } if (qoshdr) tids |= BIT(*qoshdr & IEEE80211_QOS_CTL_TID_MASK); -- cgit v1.2.2 From c6fb08aaa879d1a70ed01e9ac1080066abf7ef78 Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Sun, 18 Mar 2012 22:58:04 +0100 Subject: cfg80211: use compare_ether_addr on MAC addresses instead of memcmp Because of the constant size and guaranteed 16 bit alignment, the inline compare_ether_addr function is much cheaper than calling memcmp. Signed-off-by: Felix Fietkau Signed-off-by: John W. Linville --- net/wireless/mlme.c | 32 +++++++++++++++++--------------- net/wireless/scan.c | 2 +- 2 files changed, 18 insertions(+), 16 deletions(-) (limited to 'net') diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c index f5a7ac3a0939..e14fdcc1d7cd 100644 --- a/net/wireless/mlme.c +++ b/net/wireless/mlme.c @@ -6,6 +6,7 @@ #include #include +#include #include #include #include @@ -100,7 +101,7 @@ void __cfg80211_send_deauth(struct net_device *dev, ASSERT_WDEV_LOCK(wdev); if (wdev->current_bss && - memcmp(wdev->current_bss->pub.bssid, bssid, ETH_ALEN) == 0) { + compare_ether_addr(wdev->current_bss->pub.bssid, bssid) == 0) { cfg80211_unhold_bss(wdev->current_bss); cfg80211_put_bss(&wdev->current_bss->pub); wdev->current_bss = NULL; @@ -115,7 +116,7 @@ void __cfg80211_send_deauth(struct net_device *dev, reason_code = le16_to_cpu(mgmt->u.deauth.reason_code); - from_ap = memcmp(mgmt->sa, dev->dev_addr, ETH_ALEN) != 0; + from_ap = compare_ether_addr(mgmt->sa, dev->dev_addr) != 0; __cfg80211_disconnected(dev, NULL, 0, reason_code, from_ap); } else if (wdev->sme_state == CFG80211_SME_CONNECTING) { __cfg80211_connect_result(dev, mgmt->bssid, NULL, 0, NULL, 0, @@ -154,7 +155,7 @@ void __cfg80211_send_disassoc(struct net_device *dev, return; if (wdev->current_bss && - memcmp(wdev->current_bss->pub.bssid, bssid, ETH_ALEN) == 0) { + compare_ether_addr(wdev->current_bss->pub.bssid, bssid) == 0) { cfg80211_sme_disassoc(dev, wdev->current_bss); cfg80211_unhold_bss(wdev->current_bss); cfg80211_put_bss(&wdev->current_bss->pub); @@ -165,7 +166,7 @@ void __cfg80211_send_disassoc(struct net_device *dev, reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code); - from_ap = memcmp(mgmt->sa, dev->dev_addr, ETH_ALEN) != 0; + from_ap = compare_ether_addr(mgmt->sa, dev->dev_addr) != 0; __cfg80211_disconnected(dev, NULL, 0, reason_code, from_ap); } EXPORT_SYMBOL(__cfg80211_send_disassoc); @@ -285,7 +286,7 @@ int __cfg80211_mlme_auth(struct cfg80211_registered_device *rdev, return -EINVAL; if (wdev->current_bss && - memcmp(bssid, wdev->current_bss->pub.bssid, ETH_ALEN) == 0) + compare_ether_addr(bssid, wdev->current_bss->pub.bssid) == 0) return -EALREADY; memset(&req, 0, sizeof(req)); @@ -362,7 +363,7 @@ int __cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev, memset(&req, 0, sizeof(req)); if (wdev->current_bss && prev_bssid && - memcmp(wdev->current_bss->pub.bssid, prev_bssid, ETH_ALEN) == 0) { + compare_ether_addr(wdev->current_bss->pub.bssid, prev_bssid) == 0) { /* * Trying to reassociate: Allow this to proceed and let the old * association to be dropped when the new one is completed. @@ -446,7 +447,8 @@ int __cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev, if (local_state_change) { if (wdev->current_bss && - memcmp(wdev->current_bss->pub.bssid, bssid, ETH_ALEN) == 0) { + compare_ether_addr(wdev->current_bss->pub.bssid, bssid) + == 0) { cfg80211_unhold_bss(wdev->current_bss); cfg80211_put_bss(&wdev->current_bss->pub); wdev->current_bss = NULL; @@ -495,7 +497,7 @@ static int __cfg80211_mlme_disassoc(struct cfg80211_registered_device *rdev, req.local_state_change = local_state_change; req.ie = ie; req.ie_len = ie_len; - if (memcmp(wdev->current_bss->pub.bssid, bssid, ETH_ALEN) == 0) + if (compare_ether_addr(wdev->current_bss->pub.bssid, bssid) == 0) req.bss = &wdev->current_bss->pub; else return -ENOTCONN; @@ -758,8 +760,8 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev, break; } - if (memcmp(wdev->current_bss->pub.bssid, - mgmt->bssid, ETH_ALEN)) { + if (compare_ether_addr(wdev->current_bss->pub.bssid, + mgmt->bssid)) { err = -ENOTCONN; break; } @@ -772,8 +774,8 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev, break; /* for station, check that DA is the AP */ - if (memcmp(wdev->current_bss->pub.bssid, - mgmt->da, ETH_ALEN)) { + if (compare_ether_addr(wdev->current_bss->pub.bssid, + mgmt->da)) { err = -ENOTCONN; break; } @@ -781,11 +783,11 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev, case NL80211_IFTYPE_AP: case NL80211_IFTYPE_P2P_GO: case NL80211_IFTYPE_AP_VLAN: - if (memcmp(mgmt->bssid, dev->dev_addr, ETH_ALEN)) + if (compare_ether_addr(mgmt->bssid, dev->dev_addr)) err = -EINVAL; break; case NL80211_IFTYPE_MESH_POINT: - if (memcmp(mgmt->sa, mgmt->bssid, ETH_ALEN)) { + if (compare_ether_addr(mgmt->sa, mgmt->bssid)) { err = -EINVAL; break; } @@ -804,7 +806,7 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev, return err; } - if (memcmp(mgmt->sa, dev->dev_addr, ETH_ALEN) != 0) + if (compare_ether_addr(mgmt->sa, dev->dev_addr) != 0) return -EINVAL; /* Transmit the Action frame as requested by user space */ diff --git a/net/wireless/scan.c b/net/wireless/scan.c index 70faadf16a32..fdbcfe692a36 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c @@ -378,7 +378,7 @@ static int cmp_bss_core(struct cfg80211_bss *a, b->len_information_elements); } - return memcmp(a->bssid, b->bssid, ETH_ALEN); + return compare_ether_addr(a->bssid, b->bssid); } static int cmp_bss(struct cfg80211_bss *a, -- cgit v1.2.2 From fcb2c9e1025cd529890303ffbde813a98cdffed4 Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Sun, 18 Mar 2012 22:58:05 +0100 Subject: mac80211: reduce code duplication in debugfs code Signed-off-by: Felix Fietkau Signed-off-by: John W. Linville --- net/mac80211/debugfs_netdev.c | 71 ++++++++----------------------------------- 1 file changed, 13 insertions(+), 58 deletions(-) (limited to 'net') diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c index a32eeda04aa3..6ed0455902c6 100644 --- a/net/mac80211/debugfs_netdev.c +++ b/net/mac80211/debugfs_netdev.c @@ -499,26 +499,23 @@ IEEE80211_IF_FILE(dot11MeshForwarding, u.mesh.mshcfg.dot11MeshForwarding, DEC); IEEE80211_IF_FILE(rssi_threshold, u.mesh.mshcfg.rssi_threshold, DEC); #endif - -#define DEBUGFS_ADD(name) \ - debugfs_create_file(#name, 0400, sdata->debugfs.dir, \ - sdata, &name##_ops); - #define DEBUGFS_ADD_MODE(name, mode) \ debugfs_create_file(#name, mode, sdata->debugfs.dir, \ sdata, &name##_ops); -static void add_sta_files(struct ieee80211_sub_if_data *sdata) +#define DEBUGFS_ADD(name) DEBUGFS_ADD_MODE(name, 0400) + +static void add_common_files(struct ieee80211_sub_if_data *sdata) { DEBUGFS_ADD(drop_unencrypted); - DEBUGFS_ADD(flags); - DEBUGFS_ADD(state); - DEBUGFS_ADD(channel_type); DEBUGFS_ADD(rc_rateidx_mask_2ghz); DEBUGFS_ADD(rc_rateidx_mask_5ghz); DEBUGFS_ADD(rc_rateidx_mcs_mask_2ghz); DEBUGFS_ADD(rc_rateidx_mcs_mask_5ghz); +} +static void add_sta_files(struct ieee80211_sub_if_data *sdata) +{ DEBUGFS_ADD(bssid); DEBUGFS_ADD(aid); DEBUGFS_ADD(last_beacon); @@ -531,15 +528,6 @@ static void add_sta_files(struct ieee80211_sub_if_data *sdata) static void add_ap_files(struct ieee80211_sub_if_data *sdata) { - DEBUGFS_ADD(drop_unencrypted); - DEBUGFS_ADD(flags); - DEBUGFS_ADD(state); - DEBUGFS_ADD(channel_type); - DEBUGFS_ADD(rc_rateidx_mask_2ghz); - DEBUGFS_ADD(rc_rateidx_mask_5ghz); - DEBUGFS_ADD(rc_rateidx_mcs_mask_2ghz); - DEBUGFS_ADD(rc_rateidx_mcs_mask_5ghz); - DEBUGFS_ADD(num_sta_authorized); DEBUGFS_ADD(num_sta_ps); DEBUGFS_ADD(dtim_count); @@ -549,48 +537,14 @@ static void add_ap_files(struct ieee80211_sub_if_data *sdata) static void add_ibss_files(struct ieee80211_sub_if_data *sdata) { - DEBUGFS_ADD(channel_type); - DEBUGFS_ADD(rc_rateidx_mask_2ghz); - DEBUGFS_ADD(rc_rateidx_mask_5ghz); - DEBUGFS_ADD(rc_rateidx_mcs_mask_2ghz); - DEBUGFS_ADD(rc_rateidx_mcs_mask_5ghz); - DEBUGFS_ADD_MODE(tsf, 0600); } static void add_wds_files(struct ieee80211_sub_if_data *sdata) { - DEBUGFS_ADD(drop_unencrypted); - DEBUGFS_ADD(flags); - DEBUGFS_ADD(state); - DEBUGFS_ADD(channel_type); - DEBUGFS_ADD(rc_rateidx_mask_2ghz); - DEBUGFS_ADD(rc_rateidx_mask_5ghz); - DEBUGFS_ADD(rc_rateidx_mcs_mask_2ghz); - DEBUGFS_ADD(rc_rateidx_mcs_mask_5ghz); - DEBUGFS_ADD(peer); } -static void add_vlan_files(struct ieee80211_sub_if_data *sdata) -{ - DEBUGFS_ADD(drop_unencrypted); - DEBUGFS_ADD(flags); - DEBUGFS_ADD(state); - DEBUGFS_ADD(channel_type); - DEBUGFS_ADD(rc_rateidx_mask_2ghz); - DEBUGFS_ADD(rc_rateidx_mask_5ghz); - DEBUGFS_ADD(rc_rateidx_mcs_mask_2ghz); - DEBUGFS_ADD(rc_rateidx_mcs_mask_5ghz); -} - -static void add_monitor_files(struct ieee80211_sub_if_data *sdata) -{ - DEBUGFS_ADD(flags); - DEBUGFS_ADD(state); - DEBUGFS_ADD(channel_type); -} - #ifdef CONFIG_MAC80211_MESH static void add_mesh_files(struct ieee80211_sub_if_data *sdata) @@ -651,6 +605,13 @@ static void add_files(struct ieee80211_sub_if_data *sdata) if (!sdata->debugfs.dir) return; + DEBUGFS_ADD(flags); + DEBUGFS_ADD(state); + DEBUGFS_ADD(channel_type); + + if (sdata->vif.type != NL80211_IFTYPE_MONITOR) + add_common_files(sdata); + switch (sdata->vif.type) { case NL80211_IFTYPE_MESH_POINT: #ifdef CONFIG_MAC80211_MESH @@ -671,12 +632,6 @@ static void add_files(struct ieee80211_sub_if_data *sdata) case NL80211_IFTYPE_WDS: add_wds_files(sdata); break; - case NL80211_IFTYPE_MONITOR: - add_monitor_files(sdata); - break; - case NL80211_IFTYPE_AP_VLAN: - add_vlan_files(sdata); - break; default: break; } -- cgit v1.2.2 From 12d3952fc4a1cd96234bc7023bf7eefeb0bb6355 Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Sun, 18 Mar 2012 22:58:06 +0100 Subject: mac80211: optimize aggregation session timeout handling Calling mod_timer from the rx/tx hotpath is somewhat expensive, and the timeout doesn't need to be so precise. Switch to a different strategy: Schedule the timer initially, store jiffies of all last rx/tx activity which would previously modify the timer, and let the timer re-arm itself after checking the last rx/tx timestamp. Make the session timers deferrable to avoid causing extra wakeups on systems running on battery. This visibly reduces CPU load under high network load on small embedded systems. Signed-off-by: Felix Fietkau Signed-off-by: John W. Linville --- net/mac80211/agg-rx.c | 18 ++++++++++++++++-- net/mac80211/agg-tx.c | 18 ++++++++++++++++-- net/mac80211/ieee80211_i.h | 3 ++- net/mac80211/rx.c | 3 +-- net/mac80211/sta_info.h | 4 ++++ net/mac80211/tx.c | 3 +-- 6 files changed, 40 insertions(+), 9 deletions(-) (limited to 'net') diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c index 64d3ce5ea1a0..a070d4f460ea 100644 --- a/net/mac80211/agg-rx.c +++ b/net/mac80211/agg-rx.c @@ -142,6 +142,18 @@ static void sta_rx_agg_session_timer_expired(unsigned long data) u8 *timer_to_id = ptid - *ptid; struct sta_info *sta = container_of(timer_to_id, struct sta_info, timer_to_tid[0]); + struct tid_ampdu_rx *tid_rx; + unsigned long timeout; + + tid_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[*ptid]); + if (!tid_rx) + return; + + timeout = tid_rx->last_rx + TU_TO_JIFFIES(tid_rx->timeout); + if (time_is_after_jiffies(timeout)) { + mod_timer(&tid_rx->session_timer, timeout); + return; + } #ifdef CONFIG_MAC80211_HT_DEBUG printk(KERN_DEBUG "rx session timer expired on tid %d\n", (u16)*ptid); @@ -291,7 +303,7 @@ void ieee80211_process_addba_request(struct ieee80211_local *local, /* rx timer */ tid_agg_rx->session_timer.function = sta_rx_agg_session_timer_expired; tid_agg_rx->session_timer.data = (unsigned long)&sta->timer_to_tid[tid]; - init_timer(&tid_agg_rx->session_timer); + init_timer_deferrable(&tid_agg_rx->session_timer); /* rx reorder timer */ tid_agg_rx->reorder_timer.function = sta_rx_agg_reorder_timer_expired; @@ -335,8 +347,10 @@ void ieee80211_process_addba_request(struct ieee80211_local *local, /* activate it for RX */ rcu_assign_pointer(sta->ampdu_mlme.tid_rx[tid], tid_agg_rx); - if (timeout) + if (timeout) { mod_timer(&tid_agg_rx->session_timer, TU_TO_EXP_TIME(timeout)); + tid_agg_rx->last_rx = jiffies; + } end: mutex_unlock(&sta->ampdu_mlme.mtx); diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c index 76be61744198..9628a1892441 100644 --- a/net/mac80211/agg-tx.c +++ b/net/mac80211/agg-tx.c @@ -417,6 +417,18 @@ static void sta_tx_agg_session_timer_expired(unsigned long data) u8 *timer_to_id = ptid - *ptid; struct sta_info *sta = container_of(timer_to_id, struct sta_info, timer_to_tid[0]); + struct tid_ampdu_tx *tid_tx; + unsigned long timeout; + + tid_tx = rcu_dereference_protected_tid_tx(sta, *ptid); + if (!tid_tx) + return; + + timeout = tid_tx->last_tx + TU_TO_JIFFIES(tid_tx->timeout); + if (time_is_after_jiffies(timeout)) { + mod_timer(&tid_tx->session_timer, timeout); + return; + } #ifdef CONFIG_MAC80211_HT_DEBUG printk(KERN_DEBUG "tx session timer expired on tid %d\n", (u16)*ptid); @@ -542,7 +554,7 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid, /* tx timer */ tid_tx->session_timer.function = sta_tx_agg_session_timer_expired; tid_tx->session_timer.data = (unsigned long)&sta->timer_to_tid[tid]; - init_timer(&tid_tx->session_timer); + init_timer_deferrable(&tid_tx->session_timer); /* assign a dialog token */ sta->ampdu_mlme.dialog_token_allocator++; @@ -884,9 +896,11 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local, sta->ampdu_mlme.addba_req_num[tid] = 0; - if (tid_tx->timeout) + if (tid_tx->timeout) { mod_timer(&tid_tx->session_timer, TU_TO_EXP_TIME(tid_tx->timeout)); + tid_tx->last_tx = jiffies; + } } else { ___ieee80211_stop_tx_ba_session(sta, tid, WLAN_BACK_INITIATOR, diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 0ae822c47930..23765e3ca7bc 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -52,7 +52,8 @@ struct ieee80211_local; * increased memory use (about 2 kB of RAM per entry). */ #define IEEE80211_FRAGMENT_MAX 4 -#define TU_TO_EXP_TIME(x) (jiffies + usecs_to_jiffies((x) * 1024)) +#define TU_TO_JIFFIES(x) (usecs_to_jiffies((x) * 1024)) +#define TU_TO_EXP_TIME(x) (jiffies + TU_TO_JIFFIES(x)) #define IEEE80211_DEFAULT_UAPSD_QUEUES \ (IEEE80211_WMM_IE_STA_QOSINFO_AC_BK | \ diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index bcfe8c77c839..8da3b36c287a 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -793,8 +793,7 @@ static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx) /* reset session timer */ if (tid_agg_rx->timeout) - mod_timer(&tid_agg_rx->session_timer, - TU_TO_EXP_TIME(tid_agg_rx->timeout)); + tid_agg_rx->last_rx = jiffies; /* if this mpdu is fragmented - terminate rx aggregation session */ sc = le16_to_cpu(hdr->seq_ctrl); diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h index ab0576827baf..e21652bccf7c 100644 --- a/net/mac80211/sta_info.h +++ b/net/mac80211/sta_info.h @@ -101,6 +101,7 @@ enum ieee80211_sta_info_flags { * @dialog_token: dialog token for aggregation session * @timeout: session timeout value to be filled in ADDBA requests * @state: session state (see above) + * @last_tx: jiffies of last tx activity * @stop_initiator: initiator of a session stop * @tx_stop: TX DelBA frame when stopping * @buf_size: reorder buffer size at receiver @@ -122,6 +123,7 @@ struct tid_ampdu_tx { struct timer_list addba_resp_timer; struct sk_buff_head pending; unsigned long state; + unsigned long last_tx; u16 timeout; u8 dialog_token; u8 stop_initiator; @@ -139,6 +141,7 @@ struct tid_ampdu_tx { * @reorder_time: jiffies when skb was added * @session_timer: check if peer keeps Tx-ing on the TID (by timeout value) * @reorder_timer: releases expired frames from the reorder buffer. + * @last_rx: jiffies of last rx activity * @head_seq_num: head sequence number in reordering buffer. * @stored_mpdu_num: number of MPDUs in reordering buffer * @ssn: Starting Sequence Number expected to be aggregated. @@ -163,6 +166,7 @@ struct tid_ampdu_rx { unsigned long *reorder_time; struct timer_list session_timer; struct timer_list reorder_timer; + unsigned long last_rx; u16 head_seq_num; u16 stored_mpdu_num; u16 ssn; diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index a9b27273320e..da2447a7bade 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -1118,8 +1118,7 @@ static bool ieee80211_tx_prep_agg(struct ieee80211_tx_data *tx, /* reset session timer */ if (reset_agg_timer && tid_tx->timeout) - mod_timer(&tid_tx->session_timer, - TU_TO_EXP_TIME(tid_tx->timeout)); + tid_tx->last_tx = jiffies; return queued; } -- cgit v1.2.2 From 70b12f2612a6b352d16342b5952cf9f9de6c1d56 Mon Sep 17 00:00:00 2001 From: Ronald Wahl Date: Mon, 19 Mar 2012 14:37:20 +0100 Subject: mac80211: when receiving DTIM disable power-save mode only if it was enabled When receiving DTIM we currently disable power save mode in the hardware unconditionally, i.e. also when the hardware was not sleeping. This causes trouble with at least one wireless chipset (Ralink RT3572). When the hardware is not sleeping and we send a wakeup command (e.g. this happens after a scan) then a significant decrease of the link quality or a disconnect may occur. Disabling power save mode only when it was enabled prevents this issue. Signed-off-by: Ronald Wahl Reviewed-by: Gertjan van Wingerde Signed-off-by: John W. Linville --- net/mac80211/mlme.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'net') diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index c59bc509ed6f..bb7e5189e27e 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -2471,9 +2471,11 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) { if (directed_tim) { if (local->hw.conf.dynamic_ps_timeout > 0) { - local->hw.conf.flags &= ~IEEE80211_CONF_PS; - ieee80211_hw_config(local, - IEEE80211_CONF_CHANGE_PS); + if (local->hw.conf.flags & IEEE80211_CONF_PS) { + local->hw.conf.flags &= ~IEEE80211_CONF_PS; + ieee80211_hw_config(local, + IEEE80211_CONF_CHANGE_PS); + } ieee80211_send_nullfunc(local, sdata, 0); } else if (!local->pspolling && sdata->u.mgd.powersave) { local->pspolling = true; -- cgit v1.2.2 From 292c41acddfdbe0fb42d4c4ad9b896168fd16e91 Mon Sep 17 00:00:00 2001 From: Chun-Yeow Yeoh Date: Mon, 19 Mar 2012 21:38:46 +0800 Subject: mac80211: fix the sparse warnings on endian handling in RANN propagation The HWMP sequence number of received RANN element is compared to decide whether to be propagated. The sequence number is required to covert from 32bit little endian data into CPUs endianness for comparison. The same applies to the RANN metric. Signed-off-by: Chun-Yeow Yeoh Signed-off-by: Javier Cardona Signed-off-by: John W. Linville --- net/mac80211/mesh_hwmp.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'net') diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c index 1c6f3d02aebf..f80a9e3da359 100644 --- a/net/mac80211/mesh_hwmp.c +++ b/net/mac80211/mesh_hwmp.c @@ -748,10 +748,10 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata, flags = rann->rann_flags; root_is_gate = !!(flags & RANN_FLAG_IS_GATE); orig_addr = rann->rann_addr; - orig_sn = rann->rann_seq; + orig_sn = le32_to_cpu(rann->rann_seq); hopcount = rann->rann_hopcount; hopcount++; - metric = rann->rann_metric; + metric = le32_to_cpu(rann->rann_metric); /* Ignore our own RANNs */ if (compare_ether_addr(orig_addr, sdata->vif.addr) == 0) -- cgit v1.2.2 From d2a079fd48c05235b86016a33a79cb86a86e15a8 Mon Sep 17 00:00:00 2001 From: Chun-Yeow Yeoh Date: Fri, 23 Mar 2012 18:48:51 +0800 Subject: mac80211: fix the RANN propagation issues This patch is intended to solve the follwing issues in RANN propagation: [1] The interval in propagated RANN should be based on the interval of received RANN. [2] The aggregated path metric for propagated RANN is as received plus own link metric towards the transmitting mesh STA (not root mesh STA). [3] The comparison of path metric for RANN with same sequence number should be done before deciding whether to propagate or not. Signed-off-by: Chun-Yeow Yeoh Signed-off-by: John W. Linville --- net/mac80211/mesh.h | 2 ++ net/mac80211/mesh_hwmp.c | 24 ++++++++++++++++++------ 2 files changed, 20 insertions(+), 6 deletions(-) (limited to 'net') diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h index 2bd5d8b864f6..3e52439ed112 100644 --- a/net/mac80211/mesh.h +++ b/net/mac80211/mesh.h @@ -86,6 +86,7 @@ enum mesh_deferred_task_flags { * mpath itself. No need to take this lock when adding or removing * an mpath to a hash bucket on a path table. * @rann_snd_addr: the RANN sender address + * @rann_metric: the aggregated path metric towards the root node * @is_root: the destination station of this path is a root node * @is_gate: the destination station of this path is a mesh gate * @@ -112,6 +113,7 @@ struct mesh_path { enum mesh_path_flags flags; spinlock_t state_lock; u8 rann_snd_addr[ETH_ALEN]; + u32 rann_metric; bool is_root; bool is_gate; }; diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c index f80a9e3da359..a80da3784a25 100644 --- a/net/mac80211/mesh_hwmp.c +++ b/net/mac80211/mesh_hwmp.c @@ -86,8 +86,8 @@ static inline u32 u16_field_get(u8 *preq_elem, int offset, bool ae) #define PERR_IE_TARGET_RCODE(x) u16_field_get(x, 13, 0) #define MSEC_TO_TU(x) (x*1000/1024) -#define SN_GT(x, y) ((long) (y) - (long) (x) < 0) -#define SN_LT(x, y) ((long) (x) - (long) (y) < 0) +#define SN_GT(x, y) ((s32)(y - x) < 0) +#define SN_LT(x, y) ((s32)(x - y) < 0) #define net_traversal_jiffies(s) \ msecs_to_jiffies(s->u.mesh.mshcfg.dot11MeshHWMPnetDiameterTraversalTime) @@ -732,11 +732,12 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata, struct ieee80211_rann_ie *rann) { struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; + struct ieee80211_local *local = sdata->local; + struct sta_info *sta; struct mesh_path *mpath; u8 ttl, flags, hopcount; u8 *orig_addr; - u32 orig_sn, metric; - u32 interval = ifmsh->mshcfg.dot11MeshHWMPRannInterval; + u32 orig_sn, metric, metric_txsta, interval; bool root_is_gate; ttl = rann->rann_ttl; @@ -749,6 +750,7 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata, root_is_gate = !!(flags & RANN_FLAG_IS_GATE); orig_addr = rann->rann_addr; orig_sn = le32_to_cpu(rann->rann_seq); + interval = le32_to_cpu(rann->rann_interval); hopcount = rann->rann_hopcount; hopcount++; metric = le32_to_cpu(rann->rann_metric); @@ -761,6 +763,14 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata, orig_addr, mgmt->sa, root_is_gate); rcu_read_lock(); + sta = sta_info_get(sdata, mgmt->sa); + if (!sta) { + rcu_read_unlock(); + return; + } + + metric_txsta = airtime_link_metric_get(local, sta); + mpath = mesh_path_lookup(orig_addr, sdata); if (!mpath) { mesh_path_add(orig_addr, sdata); @@ -780,14 +790,16 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata, mesh_queue_preq(mpath, PREQ_Q_F_START | PREQ_Q_F_REFRESH); } - if (mpath->sn < orig_sn && ifmsh->mshcfg.dot11MeshForwarding) { + if ((SN_LT(mpath->sn, orig_sn) || (mpath->sn == orig_sn && + metric < mpath->rann_metric)) && ifmsh->mshcfg.dot11MeshForwarding) { mesh_path_sel_frame_tx(MPATH_RANN, flags, orig_addr, cpu_to_le32(orig_sn), 0, NULL, 0, broadcast_addr, hopcount, ttl, cpu_to_le32(interval), - cpu_to_le32(metric + mpath->metric), + cpu_to_le32(metric + metric_txsta), 0, sdata); mpath->sn = orig_sn; + mpath->rann_metric = metric + metric_txsta; } /* Using individually addressed PREQ for root node */ -- cgit v1.2.2 From 80007efeff0568375b08faf93c7aad65602cb97e Mon Sep 17 00:00:00 2001 From: "Luis R. Rodriguez" Date: Fri, 23 Mar 2012 07:23:31 -0700 Subject: cfg80211: warn if db.txt is empty with CONFIG_CFG80211_INTERNAL_REGDB It has happened twice now where elaborate troubleshooting has undergone on systems where CONFIG_CFG80211_INTERNAL_REGDB [0] has been set but yet net/wireless/db.txt was not updated. Despite the documentation on this it seems system integrators could use some more help with this, so throw out a kernel warning at boot time when their database is empty. This does mean that the error-prone system integrator won't likely realize the issue until they boot the machine but -- it does not seem to make sense to enable a build bug breaking random build testing. [0] http://wireless.kernel.org/en/developers/Regulatory/CRDA#CONFIG_CFG80211_INTERNAL_REGDB Cc: Stephen Rothwell Cc: Youngsin Lee Cc: Raja Mani Cc: Senthil Kumar Balasubramanian Cc: Vipin Mehta Cc: yahuan@qca.qualcomm.com Cc: jjan@qca.qualcomm.com Cc: vthiagar@qca.qualcomm.com Cc: henrykim@qualcomm.com Cc: jouni@qca.qualcomm.com Cc: athiruve@qca.qualcomm.com Cc: cjkim@qualcomm.com Cc: philipk@qca.qualcomm.com Cc: sunnykim@qualcomm.com Cc: sskwak@qualcomm.com Cc: kkim@qualcomm.com Cc: mattbyun@qualcomm.com Cc: ryanlee@qualcomm.com Cc: simbap@qualcomm.com Cc: krislee@qualcomm.com Cc: conner@qualcomm.com Cc: hojinkim@qualcomm.com Cc: honglee@qualcomm.com Cc: johnwkim@qualcomm.com Cc: jinyong@qca.qualcomm.com Cc: stable@vger.kernel.org Signed-off-by: Luis R. Rodriguez Signed-off-by: John W. Linville --- net/wireless/reg.c | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'net') diff --git a/net/wireless/reg.c b/net/wireless/reg.c index e9a0ac83b84c..15f347477a99 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c @@ -388,7 +388,15 @@ static void reg_regdb_query(const char *alpha2) schedule_work(®_regdb_work); } + +/* Feel free to add any other sanity checks here */ +static void reg_regdb_size_check(void) +{ + /* We should ideally BUILD_BUG_ON() but then random builds would fail */ + WARN_ONCE(!reg_regdb_size, "db.txt is empty, you should update it..."); +} #else +static inline void reg_regdb_size_check(void) {} static inline void reg_regdb_query(const char *alpha2) {} #endif /* CONFIG_CFG80211_INTERNAL_REGDB */ @@ -2322,6 +2330,8 @@ int __init regulatory_init(void) spin_lock_init(®_requests_lock); spin_lock_init(®_pending_beacons_lock); + reg_regdb_size_check(); + cfg80211_regdomain = cfg80211_world_regdom; user_alpha2[0] = '9'; -- cgit v1.2.2 From 81ddbb5c1188dfaa98c67832a751117fcacda75d Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Mon, 26 Mar 2012 18:47:18 +0200 Subject: mac80211: don't always advertise remain-on-channel Not all devices are really capable of implementing remain-on-channel, even if it is implemented in SW, as they can't necessarily deal with channel changes while associated. Remove the WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL and add it only if either the driver has remain_on_channel implemented in the driver/device. Also add it to all drivers that advertise P2P right now since those definitely have to have it working. Signed-off-by: Johannes Berg Acked-by: Luciano Coelho Signed-off-by: John W. Linville --- net/mac80211/main.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'net') diff --git a/net/mac80211/main.c b/net/mac80211/main.c index 16336480c631..d019f0d3a0fe 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c @@ -557,8 +557,10 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len, WIPHY_FLAG_4ADDR_AP | WIPHY_FLAG_4ADDR_STATION | WIPHY_FLAG_REPORTS_OBSS | - WIPHY_FLAG_OFFCHAN_TX | - WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; + WIPHY_FLAG_OFFCHAN_TX; + + if (ops->remain_on_channel) + wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; wiphy->features = NL80211_FEATURE_SK_TX_STATUS | NL80211_FEATURE_HT_IBSS; -- cgit v1.2.2 From 98aed9fd0104ef2d8e6b5154c9713e0e27e5f9c8 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Tue, 27 Mar 2012 14:18:36 +0200 Subject: mac80211: fix mesh TX coding style Fix bad indentation & pointless if nesting. Signed-off-by: Johannes Berg Signed-off-by: John W. Linville --- net/mac80211/tx.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'net') diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index da2447a7bade..82389adbe582 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -1467,12 +1467,12 @@ void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) if (ieee80211_vif_is_mesh(&sdata->vif) && ieee80211_is_data(hdr->frame_control) && - !is_multicast_ether_addr(hdr->addr1)) - if (mesh_nexthop_resolve(skb, sdata)) { - /* skb queued: don't free */ - rcu_read_unlock(); - return; - } + !is_multicast_ether_addr(hdr->addr1) && + mesh_nexthop_resolve(skb, sdata)) { + /* skb queued: don't free */ + rcu_read_unlock(); + return; + } ieee80211_set_qos_hdr(sdata, skb); ieee80211_tx(sdata, skb, false); -- cgit v1.2.2 From 4875d30df594eb47746b16073067e316968edd53 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Tue, 27 Mar 2012 14:18:37 +0200 Subject: mac80211: clean up uAPSD TX code Clean up the code formatting and also replace the constant 0 by IEEE80211_AC_VO. Signed-off-by: Johannes Berg Signed-off-by: John W. Linville --- net/mac80211/tx.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'net') diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 82389adbe582..b35d319cea8f 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -230,9 +230,9 @@ ieee80211_tx_h_dynamic_ps(struct ieee80211_tx_data *tx) * changed via debugfs, user needs to reassociate manually to have * everything in sync. */ - if ((ifmgd->flags & IEEE80211_STA_UAPSD_ENABLED) - && (ifmgd->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO) - && skb_get_queue_mapping(tx->skb) == 0) + if ((ifmgd->flags & IEEE80211_STA_UAPSD_ENABLED) && + (ifmgd->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO) && + skb_get_queue_mapping(tx->skb) == IEEE80211_AC_VO) return TX_CONTINUE; if (local->hw.conf.flags & IEEE80211_CONF_PS) { -- cgit v1.2.2 From 4670cf7a84dfbc3c5a9b50a12afdee0003a40ed8 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Tue, 27 Mar 2012 14:18:38 +0200 Subject: mac80211: make ieee80211_downgrade_queue static There's no reason for it to not be static. Signed-off-by: Johannes Berg Signed-off-by: John W. Linville --- net/mac80211/wme.c | 40 ++++++++++++++++++++-------------------- net/mac80211/wme.h | 3 --- 2 files changed, 20 insertions(+), 23 deletions(-) (limited to 'net') diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c index 89511be3111e..b3d4ee044e7c 100644 --- a/net/mac80211/wme.c +++ b/net/mac80211/wme.c @@ -52,6 +52,26 @@ static int wme_downgrade_ac(struct sk_buff *skb) } } +static u16 ieee80211_downgrade_queue(struct ieee80211_local *local, + struct sk_buff *skb) +{ + /* in case we are a client verify acm is not set for this ac */ + while (unlikely(local->wmm_acm & BIT(skb->priority))) { + if (wme_downgrade_ac(skb)) { + /* + * This should not really happen. The AP has marked all + * lower ACs to require admission control which is not + * a reasonable configuration. Allow the frame to be + * transmitted using AC_BK as a workaround. + */ + break; + } + } + + /* look up which queue to use for frames with this 1d tag */ + return ieee802_1d_to_ac[skb->priority]; +} + /* Indicate which queue to use for this fully formed 802.11 frame */ u16 ieee80211_select_queue_80211(struct ieee80211_local *local, struct sk_buff *skb, @@ -139,26 +159,6 @@ u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata, return ieee80211_downgrade_queue(local, skb); } -u16 ieee80211_downgrade_queue(struct ieee80211_local *local, - struct sk_buff *skb) -{ - /* in case we are a client verify acm is not set for this ac */ - while (unlikely(local->wmm_acm & BIT(skb->priority))) { - if (wme_downgrade_ac(skb)) { - /* - * This should not really happen. The AP has marked all - * lower ACs to require admission control which is not - * a reasonable configuration. Allow the frame to be - * transmitted using AC_BK as a workaround. - */ - break; - } - } - - /* look up which queue to use for frames with this 1d tag */ - return ieee802_1d_to_ac[skb->priority]; -} - void ieee80211_set_qos_hdr(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) { diff --git a/net/mac80211/wme.h b/net/mac80211/wme.h index 94edceb617ff..ca80818b7b66 100644 --- a/net/mac80211/wme.h +++ b/net/mac80211/wme.h @@ -22,8 +22,5 @@ u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb); void ieee80211_set_qos_hdr(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb); -u16 ieee80211_downgrade_queue(struct ieee80211_local *local, - struct sk_buff *skb); - #endif /* _WME_H */ -- cgit v1.2.2 From 78307daadf08cd471a2adfcebef8453fae9c8314 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Tue, 27 Mar 2012 14:18:39 +0200 Subject: mac80211: inline ieee80211_add_pending_skbs This is a trivial wrapper function, inline it. Signed-off-by: Johannes Berg Signed-off-by: John W. Linville --- net/mac80211/ieee80211_i.h | 7 +++++-- net/mac80211/util.c | 6 ------ 2 files changed, 5 insertions(+), 8 deletions(-) (limited to 'net') diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 23765e3ca7bc..54f5b5b299d5 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -1432,11 +1432,14 @@ void ieee80211_stop_queue_by_reason(struct ieee80211_hw *hw, int queue, enum queue_stop_reason reason); void ieee80211_add_pending_skb(struct ieee80211_local *local, struct sk_buff *skb); -void ieee80211_add_pending_skbs(struct ieee80211_local *local, - struct sk_buff_head *skbs); void ieee80211_add_pending_skbs_fn(struct ieee80211_local *local, struct sk_buff_head *skbs, void (*fn)(void *data), void *data); +static inline void ieee80211_add_pending_skbs(struct ieee80211_local *local, + struct sk_buff_head *skbs) +{ + ieee80211_add_pending_skbs_fn(local, skbs, NULL, NULL); +} void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata, u16 transaction, u16 auth_alg, diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 5e23cf6389d0..4cb0fd2f5af9 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -404,12 +404,6 @@ void ieee80211_add_pending_skbs_fn(struct ieee80211_local *local, spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); } -void ieee80211_add_pending_skbs(struct ieee80211_local *local, - struct sk_buff_head *skbs) -{ - ieee80211_add_pending_skbs_fn(local, skbs, NULL, NULL); -} - void ieee80211_stop_queues_by_reason(struct ieee80211_hw *hw, enum queue_stop_reason reason) { -- cgit v1.2.2 From 1d98fb122d8f0c33504576da4107bc807176be1d Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Tue, 27 Mar 2012 14:18:40 +0200 Subject: mac80211: use AC constants Use the AC constants instead of hard-coding the numbers with comments. Signed-off-by: Johannes Berg Signed-off-by: John W. Linville --- net/mac80211/util.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'net') diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 4cb0fd2f5af9..1d4b8b7a5a33 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -790,20 +790,20 @@ void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata, aCWmin = 15; switch (queue) { - case 3: /* AC_BK */ + case IEEE80211_AC_BK: qparam.cw_max = aCWmax; qparam.cw_min = aCWmin; qparam.txop = 0; qparam.aifs = 7; break; default: /* never happens but let's not leave undefined */ - case 2: /* AC_BE */ + case IEEE80211_AC_BE: qparam.cw_max = aCWmax; qparam.cw_min = aCWmin; qparam.txop = 0; qparam.aifs = 3; break; - case 1: /* AC_VI */ + case IEEE80211_AC_VI: qparam.cw_max = aCWmin; qparam.cw_min = (aCWmin + 1) / 2 - 1; if (use_11b) @@ -812,7 +812,7 @@ void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata, qparam.txop = 3008/32; qparam.aifs = 2; break; - case 0: /* AC_VO */ + case IEEE80211_AC_VO: qparam.cw_max = (aCWmin + 1) / 2 - 1; qparam.cw_min = (aCWmin + 1) / 4 - 1; if (use_11b) -- cgit v1.2.2 From 24398e39c8ee4a9d9123eed322b859ece4d16cac Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Wed, 28 Mar 2012 10:58:36 +0200 Subject: mac80211: set HT channel before association Changing the channel type during operation is confusing to some drivers and will be hard to handle in multi-channel scenarios. Instead of changing the channel, set it to the right HT channel before authenticating/associating and don't change it -- just update the 20/40 MHz restrictions in rate control as needed when changed by the AP. This also fixes a problem that Paul missed in his fix for the "regulatory makes us deaf" issue -- when we couldn't use 40 MHz we still associated saying we were using 40 MHz, which could in similarly broken APs make us never even connect successfully. Signed-off-by: Johannes Berg Signed-off-by: John W. Linville --- net/mac80211/ht.c | 9 -- net/mac80211/ieee80211_i.h | 10 +- net/mac80211/mlme.c | 226 ++++++++++++++++++++++----------------------- 3 files changed, 114 insertions(+), 131 deletions(-) (limited to 'net') diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c index f25fff7607d8..9b603366943c 100644 --- a/net/mac80211/ht.c +++ b/net/mac80211/ht.c @@ -19,15 +19,6 @@ #include "ieee80211_i.h" #include "rate.h" -bool ieee80111_cfg_override_disables_ht40(struct ieee80211_sub_if_data *sdata) -{ - const __le16 flg = cpu_to_le16(IEEE80211_HT_CAP_SUP_WIDTH_20_40); - if ((sdata->u.mgd.ht_capa_mask.cap_info & flg) && - !(sdata->u.mgd.ht_capa.cap_info & flg)) - return true; - return false; -} - static void __check_htcap_disable(struct ieee80211_sub_if_data *sdata, struct ieee80211_sta_ht_cap *ht_cap, u16 flag) diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 54f5b5b299d5..a67ba7c85a1e 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -379,6 +379,7 @@ enum ieee80211_sta_flags { IEEE80211_STA_UAPSD_ENABLED = BIT(7), IEEE80211_STA_NULLFUNC_ACKED = BIT(8), IEEE80211_STA_RESET_SIGNAL_AVE = BIT(9), + IEEE80211_STA_DISABLE_40MHZ = BIT(10), }; struct ieee80211_mgd_auth_data { @@ -511,6 +512,8 @@ struct ieee80211_if_managed { int rssi_min_thold, rssi_max_thold; int last_ave_beacon_signal; + enum nl80211_channel_type tx_chantype; + struct ieee80211_ht_cap ht_capa; /* configured ht-cap over-rides */ struct ieee80211_ht_cap ht_capa_mask; /* Valid parts of ht_capa */ }; @@ -667,12 +670,6 @@ struct ieee80211_sub_if_data { char name[IFNAMSIZ]; - /* - * keep track of whether the HT opmode (stored in - * vif.bss_info.ht_operation_mode) is valid. - */ - bool ht_opmode_valid; - /* to detect idle changes */ bool old_idle; @@ -1300,7 +1297,6 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, struct net_device *dev); /* HT */ -bool ieee80111_cfg_override_disables_ht40(struct ieee80211_sub_if_data *sdata); void ieee80211_apply_htcap_overrides(struct ieee80211_sub_if_data *sdata, struct ieee80211_sta_ht_cap *ht_cap); void ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_sub_if_data *sdata, diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index bb7e5189e27e..30259a73195c 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -171,110 +171,57 @@ static int ecw2cw(int ecw) return (1 << ecw) - 1; } -/* - * ieee80211_enable_ht should be called only after the operating band - * has been determined as ht configuration depends on the hw's - * HT abilities for a specific band. - */ -static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata, - struct ieee80211_ht_operation *ht_oper, - const u8 *bssid, u16 ap_ht_cap_flags, - bool beacon_htcap_ie) +static u32 ieee80211_config_ht_tx(struct ieee80211_sub_if_data *sdata, + struct ieee80211_ht_operation *ht_oper, + const u8 *bssid, bool reconfig) { struct ieee80211_local *local = sdata->local; struct ieee80211_supported_band *sband; struct sta_info *sta; u32 changed = 0; - int ht_cfreq; u16 ht_opmode; - bool enable_ht = true; - enum nl80211_channel_type prev_chantype; - enum nl80211_channel_type rx_channel_type = NL80211_CHAN_NO_HT; - enum nl80211_channel_type tx_channel_type; + enum nl80211_channel_type channel_type; sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; - prev_chantype = sdata->vif.bss_conf.channel_type; + channel_type = local->hw.conf.channel_type; + if (WARN_ON_ONCE(channel_type == NL80211_CHAN_NO_HT)) + return 0; - ht_cfreq = ieee80211_channel_to_frequency(ht_oper->primary_chan, - sband->band); - /* check that channel matches the right operating channel */ - if (local->hw.conf.channel->center_freq != ht_cfreq) { - /* Some APs mess this up, evidently. - * Netgear WNDR3700 sometimes reports 4 higher than - * the actual channel, for instance. - */ - printk(KERN_DEBUG - "%s: Wrong control channel in association" - " response: configured center-freq: %d" - " ht-cfreq: %d ht->control_chan: %d" - " band: %d. Disabling HT.\n", - sdata->name, - local->hw.conf.channel->center_freq, - ht_cfreq, ht_oper->primary_chan, - sband->band); - enable_ht = false; - } - - if (enable_ht) { - rx_channel_type = NL80211_CHAN_HT20; - - if (!(ap_ht_cap_flags & IEEE80211_HT_CAP_40MHZ_INTOLERANT) && - !ieee80111_cfg_override_disables_ht40(sdata) && - (sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) && - (ht_oper->ht_param & IEEE80211_HT_PARAM_CHAN_WIDTH_ANY)) { - switch (ht_oper->ht_param & - IEEE80211_HT_PARAM_CHA_SEC_OFFSET) { - case IEEE80211_HT_PARAM_CHA_SEC_ABOVE: - rx_channel_type = NL80211_CHAN_HT40PLUS; - break; - case IEEE80211_HT_PARAM_CHA_SEC_BELOW: - rx_channel_type = NL80211_CHAN_HT40MINUS; - break; - } - } - } - - tx_channel_type = ieee80211_get_tx_channel_type(local, rx_channel_type); - - if (local->tmp_channel) - local->tmp_channel_type = rx_channel_type; + channel_type = ieee80211_get_tx_channel_type(local, channel_type); - if (!ieee80211_set_channel_type(local, sdata, rx_channel_type)) { - /* can only fail due to HT40+/- mismatch */ - rx_channel_type = NL80211_CHAN_HT20; - WARN_ON(!ieee80211_set_channel_type(local, sdata, - rx_channel_type)); - } + /* This can change during the lifetime of the BSS */ + if (!(ht_oper->ht_param & IEEE80211_HT_PARAM_CHAN_WIDTH_ANY)) + channel_type = NL80211_CHAN_HT20; - if (beacon_htcap_ie && (prev_chantype != rx_channel_type)) { - /* - * Whenever the AP announces the HT mode change that can be - * 40MHz intolerant or etc., it would be safer to stop tx - * queues before doing hw config to avoid buffer overflow. - */ - ieee80211_stop_queues_by_reason(&sdata->local->hw, + if (!reconfig || (sdata->u.mgd.tx_chantype != channel_type)) { + if (reconfig) { + /* + * Whenever the AP announces the HT mode changed + * (e.g. 40 MHz intolerant) stop queues to avoid + * sending out frames while the rate control is + * reconfiguring. + */ + ieee80211_stop_queues_by_reason(&sdata->local->hw, IEEE80211_QUEUE_STOP_REASON_CHTYPE_CHANGE); - /* flush out all packets */ - synchronize_net(); - - drv_flush(local, false); - } + /* flush out all packets */ + synchronize_net(); - /* channel_type change automatically detected */ - ieee80211_hw_config(local, 0); + drv_flush(local, false); + } - if (prev_chantype != tx_channel_type) { rcu_read_lock(); sta = sta_info_get(sdata, bssid); if (sta) rate_control_rate_update(local, sband, sta, IEEE80211_RC_HT_CHANGED, - tx_channel_type); + channel_type); rcu_read_unlock(); - if (beacon_htcap_ie) + sdata->u.mgd.tx_chantype = channel_type; + + if (reconfig) ieee80211_wake_queues_by_reason(&sdata->local->hw, IEEE80211_QUEUE_STOP_REASON_CHTYPE_CHANGE); } @@ -282,12 +229,9 @@ static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata, ht_opmode = le16_to_cpu(ht_oper->operation_mode); /* if bss configuration changed store the new one */ - if (sdata->ht_opmode_valid != enable_ht || - sdata->vif.bss_conf.ht_operation_mode != ht_opmode || - prev_chantype != rx_channel_type) { + if (!reconfig || (sdata->vif.bss_conf.ht_operation_mode != ht_opmode)) { changed |= BSS_CHANGED_HT; sdata->vif.bss_conf.ht_operation_mode = ht_opmode; - sdata->ht_opmode_valid = enable_ht; } return changed; @@ -359,6 +303,16 @@ static void ieee80211_add_ht_ie(struct ieee80211_sub_if_data *sdata, break; } + /* + * If 40 MHz was disabled associate as though we weren't + * capable of 40 MHz -- some broken APs will never fall + * back to trying to transmit in 20 MHz. + */ + if (sdata->u.mgd.flags & IEEE80211_STA_DISABLE_40MHZ) { + cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40; + cap &= ~IEEE80211_HT_CAP_SGI_40; + } + /* set SM PS mode properly */ cap &= ~IEEE80211_HT_CAP_SM_PS; switch (smps) { @@ -1436,7 +1390,6 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata, sdata->vif.bss_conf.assoc = false; /* on the next assoc, re-program HT parameters */ - sdata->ht_opmode_valid = false; memset(&ifmgd->ht_capa, 0, sizeof(ifmgd->ht_capa)); memset(&ifmgd->ht_capa_mask, 0, sizeof(ifmgd->ht_capa_mask)); @@ -2003,7 +1956,6 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata, struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf; u32 changed = 0; int err; - u16 ap_ht_cap_flags; /* AssocResp and ReassocResp have identical structure */ @@ -2054,8 +2006,6 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata, ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband, elems.ht_cap_elem, &sta->sta.ht_cap); - ap_ht_cap_flags = sta->sta.ht_cap.cap; - rate_control_rate_init(sta); if (ifmgd->flags & IEEE80211_STA_MFP_ENABLED) @@ -2097,9 +2047,8 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata, if (elems.ht_operation && elems.wmm_param && !(ifmgd->flags & IEEE80211_STA_DISABLE_11N)) - changed |= ieee80211_enable_ht(sdata, elems.ht_operation, - cbss->bssid, ap_ht_cap_flags, - false); + changed |= ieee80211_config_ht_tx(sdata, elems.ht_operation, + cbss->bssid, false); /* set AID and assoc capability, * ieee80211_set_associated() will tell the driver */ @@ -2511,29 +2460,12 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, if (elems.ht_cap_elem && elems.ht_operation && elems.wmm_param && !(ifmgd->flags & IEEE80211_STA_DISABLE_11N)) { - struct sta_info *sta; struct ieee80211_supported_band *sband; - u16 ap_ht_cap_flags; - - rcu_read_lock(); - - sta = sta_info_get(sdata, bssid); - if (WARN_ON(!sta)) { - rcu_read_unlock(); - return; - } sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; - ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband, - elems.ht_cap_elem, &sta->sta.ht_cap); - - ap_ht_cap_flags = sta->sta.ht_cap.cap; - - rcu_read_unlock(); - - changed |= ieee80211_enable_ht(sdata, elems.ht_operation, - bssid, ap_ht_cap_flags, true); + changed |= ieee80211_config_ht_tx(sdata, elems.ht_operation, + bssid, true); } /* Note: country IE parsing is done for us by cfg80211 */ @@ -3065,6 +2997,11 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata, struct sta_info *sta; bool have_sta = false; int err; + int ht_cfreq; + enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT; + const u8 *ht_oper_ie; + const struct ieee80211_ht_operation *ht_oper = NULL; + struct ieee80211_supported_band *sband; if (WARN_ON(!ifmgd->auth_data && !ifmgd->assoc_data)) return -EINVAL; @@ -3086,17 +3023,76 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata, mutex_unlock(&local->mtx); /* switch to the right channel */ + sband = local->hw.wiphy->bands[cbss->channel->band]; + + ifmgd->flags &= ~IEEE80211_STA_DISABLE_40MHZ; + + if (sband->ht_cap.ht_supported) { + ht_oper_ie = cfg80211_find_ie(WLAN_EID_HT_OPERATION, + cbss->information_elements, + cbss->len_information_elements); + if (ht_oper_ie && ht_oper_ie[1] >= sizeof(*ht_oper)) + ht_oper = (void *)(ht_oper_ie + 2); + } + + if (ht_oper) { + ht_cfreq = ieee80211_channel_to_frequency(ht_oper->primary_chan, + cbss->channel->band); + /* check that channel matches the right operating channel */ + if (cbss->channel->center_freq != ht_cfreq) { + /* + * It's possible that some APs are confused here; + * Netgear WNDR3700 sometimes reports 4 higher than + * the actual channel in association responses, but + * since we look at probe response/beacon data here + * it should be OK. + */ + printk(KERN_DEBUG + "%s: Wrong control channel: center-freq: %d" + " ht-cfreq: %d ht->primary_chan: %d" + " band: %d. Disabling HT.\n", + sdata->name, cbss->channel->center_freq, + ht_cfreq, ht_oper->primary_chan, + cbss->channel->band); + ht_oper = NULL; + } + } + + if (ht_oper) { + channel_type = NL80211_CHAN_HT20; + + if (sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) { + switch (ht_oper->ht_param & + IEEE80211_HT_PARAM_CHA_SEC_OFFSET) { + case IEEE80211_HT_PARAM_CHA_SEC_ABOVE: + channel_type = NL80211_CHAN_HT40PLUS; + break; + case IEEE80211_HT_PARAM_CHA_SEC_BELOW: + channel_type = NL80211_CHAN_HT40MINUS; + break; + } + } + } + + if (!ieee80211_set_channel_type(local, sdata, channel_type)) { + /* can only fail due to HT40+/- mismatch */ + channel_type = NL80211_CHAN_HT20; + printk(KERN_DEBUG + "%s: disabling 40 MHz due to multi-vif mismatch\n", + sdata->name); + ifmgd->flags |= IEEE80211_STA_DISABLE_40MHZ; + WARN_ON(!ieee80211_set_channel_type(local, sdata, + channel_type)); + } + local->oper_channel = cbss->channel; - ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL); + ieee80211_hw_config(local, 0); if (!have_sta) { - struct ieee80211_supported_band *sband; u32 rates = 0, basic_rates = 0; bool have_higher_than_11mbit; int min_rate = INT_MAX, min_rate_index = -1; - sband = sdata->local->hw.wiphy->bands[cbss->channel->band]; - ieee80211_get_rates(sband, bss->supp_rates, bss->supp_rates_len, &rates, &basic_rates, -- cgit v1.2.2 From 64f68e5d15bee47e0d6d0c57a1cf52cedd9b3527 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Wed, 28 Mar 2012 10:58:37 +0200 Subject: mac80211: remove channel type argument from rate_update The channel type argument to the rate_update() callback isn't really the correct way to give the rate control algorithm about the desired RX bandwidth of the peer. Remove this argument, and instead update the STA capabilities with 20/40 appropriately. The SMPS update done by this callback works in the same way, so this makes the callback cleaner. Signed-off-by: Johannes Berg Signed-off-by: John W. Linville --- net/mac80211/chan.c | 26 ------------------- net/mac80211/ieee80211_i.h | 5 ---- net/mac80211/mlme.c | 51 ++++++++++++++++++++++++++------------ net/mac80211/rate.h | 5 ++-- net/mac80211/rc80211_minstrel_ht.c | 15 +++-------- net/mac80211/rx.c | 7 ++---- net/mac80211/sta_info.h | 2 ++ 7 files changed, 45 insertions(+), 66 deletions(-) (limited to 'net') diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c index e00ce8c3e28e..c76cf7230c7d 100644 --- a/net/mac80211/chan.c +++ b/net/mac80211/chan.c @@ -135,29 +135,3 @@ bool ieee80211_set_channel_type(struct ieee80211_local *local, return result; } - -/* - * ieee80211_get_tx_channel_type returns the channel type we should - * use for packet transmission, given the channel capability and - * whatever regulatory flags we have been given. - */ -enum nl80211_channel_type ieee80211_get_tx_channel_type( - struct ieee80211_local *local, - enum nl80211_channel_type channel_type) -{ - switch (channel_type) { - case NL80211_CHAN_HT40PLUS: - if (local->hw.conf.channel->flags & - IEEE80211_CHAN_NO_HT40PLUS) - return NL80211_CHAN_HT20; - break; - case NL80211_CHAN_HT40MINUS: - if (local->hw.conf.channel->flags & - IEEE80211_CHAN_NO_HT40MINUS) - return NL80211_CHAN_HT20; - break; - default: - break; - } - return channel_type; -} diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index a67ba7c85a1e..867b8eec1e9e 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -512,8 +512,6 @@ struct ieee80211_if_managed { int rssi_min_thold, rssi_max_thold; int last_ave_beacon_signal; - enum nl80211_channel_type tx_chantype; - struct ieee80211_ht_cap ht_capa; /* configured ht-cap over-rides */ struct ieee80211_ht_cap ht_capa_mask; /* Valid parts of ht_capa */ }; @@ -1501,9 +1499,6 @@ bool ieee80211_set_channel_type(struct ieee80211_local *local, enum nl80211_channel_type chantype); enum nl80211_channel_type ieee80211_ht_oper_to_channel_type(struct ieee80211_ht_operation *ht_oper); -enum nl80211_channel_type ieee80211_get_tx_channel_type( - struct ieee80211_local *local, - enum nl80211_channel_type channel_type); #ifdef CONFIG_MAC80211_NOINLINE #define debug_noinline noinline diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 30259a73195c..9cc5dda68219 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -180,21 +180,38 @@ static u32 ieee80211_config_ht_tx(struct ieee80211_sub_if_data *sdata, struct sta_info *sta; u32 changed = 0; u16 ht_opmode; - enum nl80211_channel_type channel_type; + bool disable_40 = false; sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; - channel_type = local->hw.conf.channel_type; - if (WARN_ON_ONCE(channel_type == NL80211_CHAN_NO_HT)) - return 0; - - channel_type = ieee80211_get_tx_channel_type(local, channel_type); + switch (sdata->vif.bss_conf.channel_type) { + case NL80211_CHAN_HT40PLUS: + if (local->hw.conf.channel->flags & IEEE80211_CHAN_NO_HT40PLUS) + disable_40 = true; + break; + case NL80211_CHAN_HT40MINUS: + if (local->hw.conf.channel->flags & IEEE80211_CHAN_NO_HT40MINUS) + disable_40 = true; + break; + default: + break; + } /* This can change during the lifetime of the BSS */ if (!(ht_oper->ht_param & IEEE80211_HT_PARAM_CHAN_WIDTH_ANY)) - channel_type = NL80211_CHAN_HT20; + disable_40 = true; - if (!reconfig || (sdata->u.mgd.tx_chantype != channel_type)) { + mutex_lock(&local->sta_mtx); + sta = sta_info_get(sdata, bssid); + + WARN_ON_ONCE(!sta); + + if (sta && !sta->supports_40mhz) + disable_40 = true; + + if (sta && (!reconfig || + (disable_40 != !!(sta->sta.ht_cap.cap & + IEEE80211_HT_CAP_SUP_WIDTH_20_40)))) { if (reconfig) { /* * Whenever the AP announces the HT mode changed @@ -211,20 +228,19 @@ static u32 ieee80211_config_ht_tx(struct ieee80211_sub_if_data *sdata, drv_flush(local, false); } - rcu_read_lock(); - sta = sta_info_get(sdata, bssid); - if (sta) - rate_control_rate_update(local, sband, sta, - IEEE80211_RC_HT_CHANGED, - channel_type); - rcu_read_unlock(); + if (disable_40) + sta->sta.ht_cap.cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40; + else + sta->sta.ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40; - sdata->u.mgd.tx_chantype = channel_type; + rate_control_rate_update(local, sband, sta, + IEEE80211_RC_HT_CHANGED); if (reconfig) ieee80211_wake_queues_by_reason(&sdata->local->hw, IEEE80211_QUEUE_STOP_REASON_CHTYPE_CHANGE); } + mutex_unlock(&local->sta_mtx); ht_opmode = le16_to_cpu(ht_oper->operation_mode); @@ -2006,6 +2022,9 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata, ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband, elems.ht_cap_elem, &sta->sta.ht_cap); + sta->supports_40mhz = + sta->sta.ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40; + rate_control_rate_init(sta); if (ifmgd->flags & IEEE80211_STA_MFP_ENABLED) diff --git a/net/mac80211/rate.h b/net/mac80211/rate.h index fbb1efdc4d04..27b66be8ac8f 100644 --- a/net/mac80211/rate.h +++ b/net/mac80211/rate.h @@ -63,8 +63,7 @@ static inline void rate_control_rate_init(struct sta_info *sta) static inline void rate_control_rate_update(struct ieee80211_local *local, struct ieee80211_supported_band *sband, - struct sta_info *sta, u32 changed, - enum nl80211_channel_type oper_chan_type) + struct sta_info *sta, u32 changed) { struct rate_control_ref *ref = local->rate_ctrl; struct ieee80211_sta *ista = &sta->sta; @@ -72,7 +71,7 @@ static inline void rate_control_rate_update(struct ieee80211_local *local, if (ref && ref->ops->rate_update) ref->ops->rate_update(ref->priv, sband, ista, - priv_sta, changed, oper_chan_type); + priv_sta, changed); } static inline void *rate_control_alloc_sta(struct rate_control_ref *ref, diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c index 16e0b277b9a8..3b3dcae13bbc 100644 --- a/net/mac80211/rc80211_minstrel_ht.c +++ b/net/mac80211/rc80211_minstrel_ht.c @@ -686,8 +686,7 @@ minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta, static void minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband, - struct ieee80211_sta *sta, void *priv_sta, - enum nl80211_channel_type oper_chan_type) + struct ieee80211_sta *sta, void *priv_sta) { struct minstrel_priv *mp = priv; struct minstrel_ht_sta_priv *msp = priv_sta; @@ -735,10 +734,6 @@ minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband, if (sta_cap & IEEE80211_HT_CAP_LDPC_CODING) mi->tx_flags |= IEEE80211_TX_CTL_LDPC; - if (oper_chan_type != NL80211_CHAN_HT40MINUS && - oper_chan_type != NL80211_CHAN_HT40PLUS) - sta_cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40; - smps = (sta_cap & IEEE80211_HT_CAP_SM_PS) >> IEEE80211_HT_CAP_SM_PS_SHIFT; @@ -788,17 +783,15 @@ static void minstrel_ht_rate_init(void *priv, struct ieee80211_supported_band *sband, struct ieee80211_sta *sta, void *priv_sta) { - struct minstrel_priv *mp = priv; - - minstrel_ht_update_caps(priv, sband, sta, priv_sta, mp->hw->conf.channel_type); + minstrel_ht_update_caps(priv, sband, sta, priv_sta); } static void minstrel_ht_rate_update(void *priv, struct ieee80211_supported_band *sband, struct ieee80211_sta *sta, void *priv_sta, - u32 changed, enum nl80211_channel_type oper_chan_type) + u32 changed) { - minstrel_ht_update_caps(priv, sband, sta, priv_sta, oper_chan_type); + minstrel_ht_update_caps(priv, sband, sta, priv_sta); } static void * diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 8da3b36c287a..54a049123a60 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -2268,11 +2268,8 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx) sband = rx->local->hw.wiphy->bands[status->band]; - rate_control_rate_update( - local, sband, rx->sta, - IEEE80211_RC_SMPS_CHANGED, - ieee80211_get_tx_channel_type( - local, local->_oper_channel_type)); + rate_control_rate_update(local, sband, rx->sta, + IEEE80211_RC_SMPS_CHANGED); goto handled; } default: diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h index e21652bccf7c..b1b4b1413c74 100644 --- a/net/mac80211/sta_info.h +++ b/net/mac80211/sta_info.h @@ -369,6 +369,8 @@ struct sta_info { unsigned int lost_packets; unsigned int beacon_loss_count; + bool supports_40mhz; + /* keep last! */ struct ieee80211_sta sta; }; -- cgit v1.2.2 From 7213cf2cb0dfbb4d6b55a1da000d34338f76c0e3 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Wed, 28 Mar 2012 10:58:38 +0200 Subject: mac80211: remove queue stop on rate control update We currently stop the queue when changing the rate control between 20/40 MHz in the BSS. This seems to have been necessary when we actually changed the channel, but now that we just update the station it doesn't seem right any more. Remove it. Signed-off-by: Johannes Berg Signed-off-by: John W. Linville --- net/mac80211/ieee80211_i.h | 1 - net/mac80211/mlme.c | 19 ------------------- 2 files changed, 20 deletions(-) (limited to 'net') diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 867b8eec1e9e..93b075e14d09 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -757,7 +757,6 @@ enum queue_stop_reason { IEEE80211_QUEUE_STOP_REASON_AGGREGATION, IEEE80211_QUEUE_STOP_REASON_SUSPEND, IEEE80211_QUEUE_STOP_REASON_SKB_ADD, - IEEE80211_QUEUE_STOP_REASON_CHTYPE_CHANGE, }; #ifdef CONFIG_MAC80211_LEDS diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 9cc5dda68219..594af5f4079c 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -212,21 +212,6 @@ static u32 ieee80211_config_ht_tx(struct ieee80211_sub_if_data *sdata, if (sta && (!reconfig || (disable_40 != !!(sta->sta.ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)))) { - if (reconfig) { - /* - * Whenever the AP announces the HT mode changed - * (e.g. 40 MHz intolerant) stop queues to avoid - * sending out frames while the rate control is - * reconfiguring. - */ - ieee80211_stop_queues_by_reason(&sdata->local->hw, - IEEE80211_QUEUE_STOP_REASON_CHTYPE_CHANGE); - - /* flush out all packets */ - synchronize_net(); - - drv_flush(local, false); - } if (disable_40) sta->sta.ht_cap.cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40; @@ -235,10 +220,6 @@ static u32 ieee80211_config_ht_tx(struct ieee80211_sub_if_data *sdata, rate_control_rate_update(local, sband, sta, IEEE80211_RC_HT_CHANGED); - - if (reconfig) - ieee80211_wake_queues_by_reason(&sdata->local->hw, - IEEE80211_QUEUE_STOP_REASON_CHTYPE_CHANGE); } mutex_unlock(&local->sta_mtx); -- cgit v1.2.2 From 8f727ef3c4859f2c397a7609beb845dcd66729f5 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Fri, 30 Mar 2012 08:43:32 +0200 Subject: mac80211: notify driver of rate control updates Devices that have internal rate control need to be notified when the bandwidth or SMPS state changes just like external rate control algorithms get a notification now. Add this notification and clarify the change bits while at it, the HT_CHANGED bit really meant only bandwidth changed. Signed-off-by: Johannes Berg Signed-off-by: John W. Linville --- net/mac80211/driver-ops.h | 15 +++++++++++++++ net/mac80211/driver-trace.h | 28 ++++++++++++++++++++++++++++ net/mac80211/mlme.c | 2 +- net/mac80211/rate.h | 2 ++ 4 files changed, 46 insertions(+), 1 deletion(-) (limited to 'net') diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h index e8dbda1b5b8a..0eb2bc003058 100644 --- a/net/mac80211/driver-ops.h +++ b/net/mac80211/driver-ops.h @@ -476,6 +476,21 @@ int drv_sta_state(struct ieee80211_local *local, return ret; } +static inline void drv_sta_rc_update(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct ieee80211_sta *sta, u32 changed) +{ + sdata = get_bss_sdata(sdata); + check_sdata_in_driver(sdata); + + trace_drv_sta_rc_update(local, sdata, sta, changed); + if (local->ops->sta_rc_update) + local->ops->sta_rc_update(&local->hw, &sdata->vif, + sta, changed); + + trace_drv_return_void(local); +} + static inline int drv_conf_tx(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, u16 queue, const struct ieee80211_tx_queue_params *params) diff --git a/net/mac80211/driver-trace.h b/net/mac80211/driver-trace.h index 21d6f5290a1c..7ea544d86436 100644 --- a/net/mac80211/driver-trace.h +++ b/net/mac80211/driver-trace.h @@ -624,6 +624,34 @@ TRACE_EVENT(drv_sta_state, ) ); +TRACE_EVENT(drv_sta_rc_update, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct ieee80211_sta *sta, + u32 changed), + + TP_ARGS(local, sdata, sta, changed), + + TP_STRUCT__entry( + LOCAL_ENTRY + VIF_ENTRY + STA_ENTRY + __field(u32, changed) + ), + + TP_fast_assign( + LOCAL_ASSIGN; + VIF_ASSIGN; + STA_ASSIGN; + __entry->changed = changed; + ), + + TP_printk( + LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " changed: 0x%x", + LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->changed + ) +); + TRACE_EVENT(drv_sta_add, TP_PROTO(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 594af5f4079c..4974f998c7dd 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -219,7 +219,7 @@ static u32 ieee80211_config_ht_tx(struct ieee80211_sub_if_data *sdata, sta->sta.ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40; rate_control_rate_update(local, sband, sta, - IEEE80211_RC_HT_CHANGED); + IEEE80211_RC_BW_CHANGED); } mutex_unlock(&local->sta_mtx); diff --git a/net/mac80211/rate.h b/net/mac80211/rate.h index 27b66be8ac8f..6e4fd32c6617 100644 --- a/net/mac80211/rate.h +++ b/net/mac80211/rate.h @@ -17,6 +17,7 @@ #include #include "ieee80211_i.h" #include "sta_info.h" +#include "driver-ops.h" struct rate_control_ref { struct ieee80211_local *local; @@ -72,6 +73,7 @@ static inline void rate_control_rate_update(struct ieee80211_local *local, if (ref && ref->ops->rate_update) ref->ops->rate_update(ref->priv, sband, ista, priv_sta, changed); + drv_sta_rc_update(local, sta->sdata, &sta->sta, changed); } static inline void *rate_control_alloc_sta(struct rate_control_ref *ref, -- cgit v1.2.2 From a3304b0a17495183a2270d4a25978795226597a4 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Wed, 28 Mar 2012 11:04:24 +0200 Subject: cfg80211/nl80211: clarify TX queue API With the plan to change mac80211's queue API to not map ACs to queues 1:1, it seems necessary to clarify some APIs that act on ACs rather than on queues to spell that out explicitly. Do this. Also verify that the AC number given is valid. Signed-off-by: Johannes Berg Signed-off-by: John W. Linville --- net/mac80211/cfg.c | 10 +++++----- net/mac80211/driver-ops.h | 6 +++--- net/mac80211/driver-trace.h | 13 ++++++------- net/wireless/nl80211.c | 7 +++++-- 4 files changed, 19 insertions(+), 17 deletions(-) (limited to 'net') diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 677d65929780..ef40db5ab3c7 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -1449,14 +1449,14 @@ static int ieee80211_set_txq_params(struct wiphy *wiphy, */ p.uapsd = false; - if (params->queue >= local->hw.queues) + if (params->ac >= local->hw.queues) return -EINVAL; - sdata->tx_conf[params->queue] = p; - if (drv_conf_tx(local, sdata, params->queue, &p)) { + sdata->tx_conf[params->ac] = p; + if (drv_conf_tx(local, sdata, params->ac, &p)) { wiphy_debug(local->hw.wiphy, - "failed to set TX queue parameters for queue %d\n", - params->queue); + "failed to set TX queue parameters for AC %d\n", + params->ac); return -EINVAL; } diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h index 0eb2bc003058..8ad40f68f2c3 100644 --- a/net/mac80211/driver-ops.h +++ b/net/mac80211/driver-ops.h @@ -492,7 +492,7 @@ static inline void drv_sta_rc_update(struct ieee80211_local *local, } static inline int drv_conf_tx(struct ieee80211_local *local, - struct ieee80211_sub_if_data *sdata, u16 queue, + struct ieee80211_sub_if_data *sdata, u16 ac, const struct ieee80211_tx_queue_params *params) { int ret = -EOPNOTSUPP; @@ -501,10 +501,10 @@ static inline int drv_conf_tx(struct ieee80211_local *local, check_sdata_in_driver(sdata); - trace_drv_conf_tx(local, sdata, queue, params); + trace_drv_conf_tx(local, sdata, ac, params); if (local->ops->conf_tx) ret = local->ops->conf_tx(&local->hw, &sdata->vif, - queue, params); + ac, params); trace_drv_return_int(local, ret); return ret; } diff --git a/net/mac80211/driver-trace.h b/net/mac80211/driver-trace.h index 7ea544d86436..d1f017a11988 100644 --- a/net/mac80211/driver-trace.h +++ b/net/mac80211/driver-trace.h @@ -705,15 +705,14 @@ TRACE_EVENT(drv_sta_remove, TRACE_EVENT(drv_conf_tx, TP_PROTO(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, - u16 queue, - const struct ieee80211_tx_queue_params *params), + u16 ac, const struct ieee80211_tx_queue_params *params), - TP_ARGS(local, sdata, queue, params), + TP_ARGS(local, sdata, ac, params), TP_STRUCT__entry( LOCAL_ENTRY VIF_ENTRY - __field(u16, queue) + __field(u16, ac) __field(u16, txop) __field(u16, cw_min) __field(u16, cw_max) @@ -724,7 +723,7 @@ TRACE_EVENT(drv_conf_tx, TP_fast_assign( LOCAL_ASSIGN; VIF_ASSIGN; - __entry->queue = queue; + __entry->ac = ac; __entry->txop = params->txop; __entry->cw_max = params->cw_max; __entry->cw_min = params->cw_min; @@ -733,8 +732,8 @@ TRACE_EVENT(drv_conf_tx, ), TP_printk( - LOCAL_PR_FMT VIF_PR_FMT " queue:%d", - LOCAL_PR_ARG, VIF_PR_ARG, __entry->queue + LOCAL_PR_FMT VIF_PR_FMT " AC:%d", + LOCAL_PR_ARG, VIF_PR_ARG, __entry->ac ) ); diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index e49da2797022..344697df1177 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -1104,17 +1104,20 @@ static const struct nla_policy txq_params_policy[NL80211_TXQ_ATTR_MAX + 1] = { static int parse_txq_params(struct nlattr *tb[], struct ieee80211_txq_params *txq_params) { - if (!tb[NL80211_TXQ_ATTR_QUEUE] || !tb[NL80211_TXQ_ATTR_TXOP] || + if (!tb[NL80211_TXQ_ATTR_AC] || !tb[NL80211_TXQ_ATTR_TXOP] || !tb[NL80211_TXQ_ATTR_CWMIN] || !tb[NL80211_TXQ_ATTR_CWMAX] || !tb[NL80211_TXQ_ATTR_AIFS]) return -EINVAL; - txq_params->queue = nla_get_u8(tb[NL80211_TXQ_ATTR_QUEUE]); + txq_params->ac = nla_get_u8(tb[NL80211_TXQ_ATTR_AC]); txq_params->txop = nla_get_u16(tb[NL80211_TXQ_ATTR_TXOP]); txq_params->cwmin = nla_get_u16(tb[NL80211_TXQ_ATTR_CWMIN]); txq_params->cwmax = nla_get_u16(tb[NL80211_TXQ_ATTR_CWMAX]); txq_params->aifs = nla_get_u8(tb[NL80211_TXQ_ATTR_AIFS]); + if (txq_params->ac >= NL80211_NUM_ACS) + return -EINVAL; + return 0; } -- cgit v1.2.2 From 54bcbc695e2ca88e1c8f05a93d38a04ac6b1aa0e Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Wed, 28 Mar 2012 11:04:25 +0200 Subject: mac80211: refuse TX queue configuration on non-QoS HW Drivers that don't support QoS also don't support setting up their ACs, catch that early. While at it, remove the input check since cfg80211 does it now. Also fix up the restart code to not try to set up the queues in this case. Finally also change the tx_conf array to have IEEE80211_NUM_ACS entries instead of # of queues since that's what it really needs. Signed-off-by: Johannes Berg Signed-off-by: John W. Linville --- net/mac80211/cfg.c | 6 +++--- net/mac80211/ieee80211_i.h | 2 +- net/mac80211/util.c | 30 ++++++++++++++++++------------ 3 files changed, 22 insertions(+), 16 deletions(-) (limited to 'net') diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index ef40db5ab3c7..12226b7743c6 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -1437,6 +1437,9 @@ static int ieee80211_set_txq_params(struct wiphy *wiphy, if (!local->ops->conf_tx) return -EOPNOTSUPP; + if (local->hw.queues < IEEE80211_NUM_ACS) + return -EOPNOTSUPP; + memset(&p, 0, sizeof(p)); p.aifs = params->aifs; p.cw_max = params->cwmax; @@ -1449,9 +1452,6 @@ static int ieee80211_set_txq_params(struct wiphy *wiphy, */ p.uapsd = false; - if (params->ac >= local->hw.queues) - return -EINVAL; - sdata->tx_conf[params->ac] = p; if (drv_conf_tx(local, sdata, params->ac, &p)) { wiphy_debug(local->hw.wiphy, diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 93b075e14d09..8e7af7cee013 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -687,7 +687,7 @@ struct ieee80211_sub_if_data { __be16 control_port_protocol; bool control_port_no_encrypt; - struct ieee80211_tx_queue_params tx_conf[IEEE80211_MAX_QUEUES]; + struct ieee80211_tx_queue_params tx_conf[IEEE80211_NUM_ACS]; struct work_struct work; struct sk_buff_head skb_queue; diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 1d4b8b7a5a33..2b62307825d4 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -769,19 +769,22 @@ void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata, { struct ieee80211_local *local = sdata->local; struct ieee80211_tx_queue_params qparam; - int queue; + int ac; bool use_11b; int aCWmin, aCWmax; if (!local->ops->conf_tx) return; + if (local->hw.queues < IEEE80211_NUM_ACS) + return; + memset(&qparam, 0, sizeof(qparam)); use_11b = (local->hw.conf.channel->band == IEEE80211_BAND_2GHZ) && !(sdata->flags & IEEE80211_SDATA_OPERATING_GMODE); - for (queue = 0; queue < local->hw.queues; queue++) { + for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { /* Set defaults according to 802.11-2007 Table 7-37 */ aCWmax = 1023; if (use_11b) @@ -789,7 +792,7 @@ void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata, else aCWmin = 15; - switch (queue) { + switch (ac) { case IEEE80211_AC_BK: qparam.cw_max = aCWmax; qparam.cw_min = aCWmin; @@ -825,8 +828,8 @@ void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata, qparam.uapsd = false; - sdata->tx_conf[queue] = qparam; - drv_conf_tx(local, sdata, queue, &qparam); + sdata->tx_conf[ac] = qparam; + drv_conf_tx(local, sdata, ac, &qparam); } /* after reinitialize QoS TX queues setting to default, @@ -1226,14 +1229,17 @@ int ieee80211_reconfig(struct ieee80211_local *local) mutex_unlock(&local->sta_mtx); /* reconfigure tx conf */ - list_for_each_entry(sdata, &local->interfaces, list) { - if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN || - sdata->vif.type == NL80211_IFTYPE_MONITOR || - !ieee80211_sdata_running(sdata)) - continue; + if (hw->queues >= IEEE80211_NUM_ACS) { + list_for_each_entry(sdata, &local->interfaces, list) { + if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN || + sdata->vif.type == NL80211_IFTYPE_MONITOR || + !ieee80211_sdata_running(sdata)) + continue; - for (i = 0; i < hw->queues; i++) - drv_conf_tx(local, sdata, i, &sdata->tx_conf[i]); + for (i = 0; i < IEEE80211_NUM_ACS; i++) + drv_conf_tx(local, sdata, i, + &sdata->tx_conf[i]); + } } /* reconfigure hardware */ -- cgit v1.2.2 From ded81f6ba934e792e441f20178683608cbc0b5cb Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Wed, 28 Mar 2012 11:04:26 +0200 Subject: mac80211: decouple # of netdev queues from HW queues When we get more hardware queues, we'll still want to only have netdev queues per AC, so set it up in that way. If the hardware doesn't support QoS (by not supporting at least 4 queues) the netdevs get a single queue only (this is no change in behavior as there are no drivers with 2 or 3 queues today.) Signed-off-by: Johannes Berg Signed-off-by: John W. Linville --- net/mac80211/iface.c | 6 +++++- net/mac80211/wme.c | 2 +- 2 files changed, 6 insertions(+), 2 deletions(-) (limited to 'net') diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index 401c01f0731e..efa9409865ac 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -1133,11 +1133,15 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name, struct net_device *ndev; struct ieee80211_sub_if_data *sdata = NULL; int ret, i; + int txqs = 1; ASSERT_RTNL(); + if (local->hw.queues >= IEEE80211_NUM_ACS) + txqs = IEEE80211_NUM_ACS; + ndev = alloc_netdev_mqs(sizeof(*sdata) + local->hw.vif_data_size, - name, ieee80211_if_setup, local->hw.queues, 1); + name, ieee80211_if_setup, txqs, 1); if (!ndev) return -ENOMEM; dev_net_set(ndev, wiphy_net(local->hw.wiphy)); diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c index b3d4ee044e7c..16b48395a468 100644 --- a/net/mac80211/wme.c +++ b/net/mac80211/wme.c @@ -108,7 +108,7 @@ u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata, if (local->hw.queues < 4 || skb->len < 6) { skb->priority = 0; /* required for correct WPA/11i MIC */ - return min_t(u16, local->hw.queues - 1, IEEE80211_AC_BE); + return 0; } rcu_read_lock(); -- cgit v1.2.2 From ada151252655b63409860e0795993cb369e667cc Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Wed, 28 Mar 2012 11:04:27 +0200 Subject: mac80211: debounce queue stop/wake When the queue status changes we need to do a fair bit of work, so ignore no-op changes early. Signed-off-by: Johannes Berg Signed-off-by: John W. Linville --- net/mac80211/util.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'net') diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 2b62307825d4..ef725cabb099 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -276,6 +276,9 @@ static void __ieee80211_wake_queue(struct ieee80211_hw *hw, int queue, if (WARN_ON(queue >= hw->queues)) return; + if (!test_bit(reason, &local->queue_stop_reasons[queue])) + return; + __clear_bit(reason, &local->queue_stop_reasons[queue]); if (local->queue_stop_reasons[queue] != 0) @@ -323,6 +326,9 @@ static void __ieee80211_stop_queue(struct ieee80211_hw *hw, int queue, if (WARN_ON(queue >= hw->queues)) return; + if (test_bit(reason, &local->queue_stop_reasons[queue])) + return; + __set_bit(reason, &local->queue_stop_reasons[queue]); rcu_read_lock(); -- cgit v1.2.2 From 4644ae89033872a62b4fea6ca96b958e115efdc0 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Wed, 28 Mar 2012 11:04:28 +0200 Subject: mac80211: lazily stop queues in add_pending When adding pending SKBs there's no need to stop all queues, we only need to stop those that we're adding frames to. Implement that by lazily stopping a queue as we add an SKB. Signed-off-by: Johannes Berg Signed-off-by: John W. Linville --- net/mac80211/util.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'net') diff --git a/net/mac80211/util.c b/net/mac80211/util.c index ef725cabb099..471a831066dd 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -385,10 +385,6 @@ void ieee80211_add_pending_skbs_fn(struct ieee80211_local *local, int queue, i; spin_lock_irqsave(&local->queue_stop_reason_lock, flags); - for (i = 0; i < hw->queues; i++) - __ieee80211_stop_queue(hw, i, - IEEE80211_QUEUE_STOP_REASON_SKB_ADD); - while ((skb = skb_dequeue(skbs))) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); @@ -398,6 +394,10 @@ void ieee80211_add_pending_skbs_fn(struct ieee80211_local *local, } queue = skb_get_queue_mapping(skb); + + __ieee80211_stop_queue(hw, queue, + IEEE80211_QUEUE_STOP_REASON_SKB_ADD); + __skb_queue_tail(&local->pending[queue], skb); } -- cgit v1.2.2 From 32c5057b22a60b23353dda93c57e475856ca286c Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Wed, 28 Mar 2012 11:04:29 +0200 Subject: mac80211: use IEEE80211_NUM_ACS When comparing hw->queues to determine if the device is QoS capable, use IEEE80211_NUM_ACS instead of just 4. Signed-off-by: Johannes Berg Signed-off-by: John W. Linville --- net/mac80211/ibss.c | 2 +- net/mac80211/iface.c | 2 +- net/mac80211/mlme.c | 7 ++++--- net/mac80211/tx.c | 2 +- net/mac80211/wme.c | 4 ++-- 5 files changed, 9 insertions(+), 8 deletions(-) (limited to 'net') diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c index 547cd7e3018a..e910449dead0 100644 --- a/net/mac80211/ibss.c +++ b/net/mac80211/ibss.c @@ -167,7 +167,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, chan, channel_type); } - if (local->hw.queues >= 4) { + if (local->hw.queues >= IEEE80211_NUM_ACS) { pos = skb_put(skb, 9); *pos++ = WLAN_EID_VENDOR_SPECIFIC; *pos++ = 7; /* len */ diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index efa9409865ac..efb433d3dc25 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -676,7 +676,7 @@ static u16 ieee80211_monitor_select_queue(struct net_device *dev, struct ieee80211_hdr *hdr; struct ieee80211_radiotap_header *rtap = (void *)skb->data; - if (local->hw.queues < 4) + if (local->hw.queues < IEEE80211_NUM_ACS) return 0; if (skb->len < 4 || diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 4974f998c7dd..93d484c8a0b8 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -1134,7 +1134,7 @@ static void ieee80211_sta_wmm_params(struct ieee80211_local *local, if (!local->ops->conf_tx) return; - if (local->hw.queues < 4) + if (local->hw.queues < IEEE80211_NUM_ACS) return; if (!wmm_param) @@ -3312,7 +3312,7 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata, /* Also disable HT if we don't support it or the AP doesn't use WMM */ sband = local->hw.wiphy->bands[req->bss->channel->band]; if (!sband->ht_cap.ht_supported || - local->hw.queues < 4 || !bss->wmm_used) + local->hw.queues < IEEE80211_NUM_ACS || !bss->wmm_used) ifmgd->flags |= IEEE80211_STA_DISABLE_11N; memcpy(&ifmgd->ht_capa, &req->ht_capa, sizeof(ifmgd->ht_capa)); @@ -3335,7 +3335,8 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata, ifmgd->ap_smps = ifmgd->req_smps; assoc_data->capability = req->bss->capability; - assoc_data->wmm = bss->wmm_used && (local->hw.queues >= 4); + assoc_data->wmm = bss->wmm_used && + (local->hw.queues >= IEEE80211_NUM_ACS); assoc_data->supp_rates = bss->supp_rates; assoc_data->supp_rates_len = bss->supp_rates_len; assoc_data->ht_operation_ie = diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index b35d319cea8f..14a01c81f959 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -1928,7 +1928,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, wme_sta = true; /* receiver and we are QoS enabled, use a QoS type frame */ - if (wme_sta && local->hw.queues >= 4) { + if (wme_sta && local->hw.queues >= IEEE80211_NUM_ACS) { fc |= cpu_to_le16(IEEE80211_STYPE_QOS_DATA); hdrlen += 2; } diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c index 16b48395a468..c3d643a6536c 100644 --- a/net/mac80211/wme.c +++ b/net/mac80211/wme.c @@ -79,7 +79,7 @@ u16 ieee80211_select_queue_80211(struct ieee80211_local *local, { u8 *p; - if (local->hw.queues < 4) + if (local->hw.queues < IEEE80211_NUM_ACS) return 0; if (!ieee80211_is_data(hdr->frame_control)) { @@ -106,7 +106,7 @@ u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata, const u8 *ra = NULL; bool qos = false; - if (local->hw.queues < 4 || skb->len < 6) { + if (local->hw.queues < IEEE80211_NUM_ACS || skb->len < 6) { skb->priority = 0; /* required for correct WPA/11i MIC */ return 0; } -- cgit v1.2.2 From 88c868c43ba38ac3bab07bab4c45b4bc44c94357 Mon Sep 17 00:00:00 2001 From: Stanislaw Gruszka Date: Thu, 29 Mar 2012 16:30:41 +0200 Subject: mac80211: sanity check for null SSID While associated we should never have empty SSID, but life can be full of surprises, and is allways better to print a warning than crash. Before memcpy() in ieee80211_probereq_get() check ssid_len instead of ssid pointer, sice pointer it always passed by "ssidie + 2" expression to send probe functions, so practically never can be NULL. Signed-off-by: Stanislaw Gruszka Signed-off-by: John W. Linville --- net/mac80211/mlme.c | 19 ++++++++++++++++--- net/mac80211/tx.c | 2 +- 2 files changed, 17 insertions(+), 4 deletions(-) (limited to 'net') diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 93d484c8a0b8..12ca9820689a 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -1518,9 +1518,16 @@ static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata) ifmgd->nullfunc_failed = false; ieee80211_send_nullfunc(sdata->local, sdata, 0); } else { + int ssid_len; + ssid = ieee80211_bss_get_ie(ifmgd->associated, WLAN_EID_SSID); - ieee80211_send_probe_req(sdata, dst, ssid + 2, ssid[1], NULL, 0, - (u32) -1, true, false); + if (WARN_ON_ONCE(ssid == NULL)) + ssid_len = 0; + else + ssid_len = ssid[1]; + + ieee80211_send_probe_req(sdata, dst, ssid + 2, ssid_len, NULL, + 0, (u32) -1, true, false); } ifmgd->probe_send_count++; @@ -1596,6 +1603,7 @@ struct sk_buff *ieee80211_ap_probereq_get(struct ieee80211_hw *hw, struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; struct sk_buff *skb; const u8 *ssid; + int ssid_len; if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION)) return NULL; @@ -1606,8 +1614,13 @@ struct sk_buff *ieee80211_ap_probereq_get(struct ieee80211_hw *hw, return NULL; ssid = ieee80211_bss_get_ie(ifmgd->associated, WLAN_EID_SSID); + if (WARN_ON_ONCE(ssid == NULL)) + ssid_len = 0; + else + ssid_len = ssid[1]; + skb = ieee80211_build_probe_req(sdata, ifmgd->associated->bssid, - (u32) -1, ssid + 2, ssid[1], + (u32) -1, ssid + 2, ssid_len, NULL, 0, true); return skb; diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 14a01c81f959..e0b89780b472 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -2602,7 +2602,7 @@ struct sk_buff *ieee80211_probereq_get(struct ieee80211_hw *hw, pos = skb_put(skb, ie_ssid_len); *pos++ = WLAN_EID_SSID; *pos++ = ssid_len; - if (ssid) + if (ssid_len) memcpy(pos, ssid, ssid_len); pos += ssid_len; -- cgit v1.2.2 From 9bdd3a6bf8513a0a9eda031d15b36e4677854243 Mon Sep 17 00:00:00 2001 From: Javier Cardona Date: Sat, 31 Mar 2012 11:31:31 -0700 Subject: mac80211: Allow tsf increments via debugfs Reading and writing back the tsf value via tsf is too slow if one wants to make small increments to this timer. With this change you can use the syntax "+=" or "-=" to add or substract a value from the tsf counter. Signed-off-by: Javier Cardona Signed-off-by: John W. Linville --- net/mac80211/debugfs_netdev.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'net') diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c index 6ed0455902c6..b0fc205411c1 100644 --- a/net/mac80211/debugfs_netdev.c +++ b/net/mac80211/debugfs_netdev.c @@ -424,6 +424,7 @@ static ssize_t ieee80211_if_parse_tsf( struct ieee80211_local *local = sdata->local; unsigned long long tsf; int ret; + int tsf_is_delta = 0; if (strncmp(buf, "reset", 5) == 0) { if (local->ops->reset_tsf) { @@ -431,9 +432,20 @@ static ssize_t ieee80211_if_parse_tsf( wiphy_info(local->hw.wiphy, "debugfs reset TSF\n"); } } else { + if (buflen > 10 && buf[1] == '=') { + if (buf[0] == '+') + tsf_is_delta = 1; + else if (buf[0] == '-') + tsf_is_delta = -1; + else + return -EINVAL; + buf += 2; + } ret = kstrtoull(buf, 10, &tsf); if (ret < 0) return -EINVAL; + if (tsf_is_delta) + tsf = drv_get_tsf(local, sdata) + tsf_is_delta * tsf; if (local->ops->set_tsf) { drv_set_tsf(local, sdata, tsf); wiphy_info(local->hw.wiphy, -- cgit v1.2.2 From dbf498fbafa2c23139d5a990e94ed78bafbbea19 Mon Sep 17 00:00:00 2001 From: Javier Cardona Date: Sat, 31 Mar 2012 11:31:32 -0700 Subject: mac80211: Implement mesh synchronization framework This patch adds MBSS extensible synchronization framework (Sec. 13.13.2 of IEEE Std. 802.11-2012). The framework is implemented via an ops table which defines the following functions: rx_bcn_presp() - this is called every time a mesh beacon is received. adjust_tbtt() - this is called immediately before a beacon is about to be transmitted. The default neighbor offset synchronization defined in the standard is implemented. We also provide template functions for vendor specific methods. When neighbor offset synchronization is active (which is the default) mesh neighbors in the same MBSS will track timing offsets to each other and compensate clock drift. In our tests we observed that this mesh synchronization implementation successfully corrected drifts between stations of ~2PPM while introducing a jitter of ~20us. It is also possible to test this framework on mac80211_hwsim simulated phys to see how it behaves under different topologies, over poor links, etc. Signed-off-by: Marco Porsch Signed-off-by: Pavel Zubarev Signed-off-by: Javier Cardona Signed-off-by: John W. Linville --- net/mac80211/Kconfig | 11 ++ net/mac80211/Makefile | 3 +- net/mac80211/debugfs_sta.c | 5 +- net/mac80211/ieee80211_i.h | 23 ++++ net/mac80211/mesh.c | 21 +++- net/mac80211/mesh.h | 19 +++ net/mac80211/mesh_sync.c | 296 +++++++++++++++++++++++++++++++++++++++++++++ net/mac80211/sta_info.h | 5 + net/mac80211/tx.c | 5 + 9 files changed, 380 insertions(+), 8 deletions(-) create mode 100644 net/mac80211/mesh_sync.c (limited to 'net') diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig index 96ddb72760b9..8d249d705980 100644 --- a/net/mac80211/Kconfig +++ b/net/mac80211/Kconfig @@ -225,6 +225,17 @@ config MAC80211_VERBOSE_MHWMP_DEBUG Do not select this option. +config MAC80211_VERBOSE_MESH_SYNC_DEBUG + bool "Verbose mesh mesh synchronization debugging" + depends on MAC80211_DEBUG_MENU + depends on MAC80211_MESH + ---help--- + Selecting this option causes mac80211 to print out very verbose mesh + synchronization debugging messages (when mac80211 is taking part in a + mesh network). + + Do not select this option. + config MAC80211_VERBOSE_TDLS_DEBUG bool "Verbose TDLS debugging" depends on MAC80211_DEBUG_MENU diff --git a/net/mac80211/Makefile b/net/mac80211/Makefile index 1be7a454aa77..3e9d931bba35 100644 --- a/net/mac80211/Makefile +++ b/net/mac80211/Makefile @@ -38,7 +38,8 @@ mac80211-$(CONFIG_MAC80211_MESH) += \ mesh.o \ mesh_pathtbl.o \ mesh_plink.o \ - mesh_hwmp.o + mesh_hwmp.o \ + mesh_sync.o mac80211-$(CONFIG_PM) += pm.o diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c index 6d45804d09bc..ceeefd424103 100644 --- a/net/mac80211/debugfs_sta.c +++ b/net/mac80211/debugfs_sta.c @@ -63,7 +63,7 @@ static ssize_t sta_flags_read(struct file *file, char __user *userbuf, test_sta_flag(sta, WLAN_STA_##flg) ? #flg "\n" : "" int res = scnprintf(buf, sizeof(buf), - "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s", + "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s", TEST(AUTH), TEST(ASSOC), TEST(PS_STA), TEST(PS_DRIVER), TEST(AUTHORIZED), TEST(SHORT_PREAMBLE), @@ -71,7 +71,8 @@ static ssize_t sta_flags_read(struct file *file, char __user *userbuf, TEST(MFP), TEST(BLOCK_BA), TEST(PSPOLL), TEST(UAPSD), TEST(SP), TEST(TDLS_PEER), TEST(TDLS_PEER_AUTH), TEST(4ADDR_EVENT), - TEST(INSERTED), TEST(RATE_CONTROL)); + TEST(INSERTED), TEST(RATE_CONTROL), + TEST(TOFFSET_KNOWN)); #undef TEST return simple_read_from_buffer(userbuf, count, ppos, buf, res); } diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 8e7af7cee013..ea9623cbd969 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -554,6 +554,24 @@ struct ieee80211_if_ibss { } state; }; +/** + * struct ieee80211_mesh_sync_ops - Extensible synchronization framework interface + * + * these declarations define the interface, which enables + * vendor-specific mesh synchronization + * + */ +struct ieee802_11_elems; +struct ieee80211_mesh_sync_ops { + void (*rx_bcn_presp)(struct ieee80211_sub_if_data *sdata, + u16 stype, + struct ieee80211_mgmt *mgmt, + struct ieee802_11_elems *elems, + struct ieee80211_rx_status *rx_status); + void (*adjust_tbtt)(struct ieee80211_sub_if_data *sdata); + /* add other framework functions here */ +}; + struct ieee80211_if_mesh { struct timer_list housekeeping_timer; struct timer_list mesh_path_timer; @@ -602,6 +620,11 @@ struct ieee80211_if_mesh { IEEE80211_MESH_SEC_AUTHED = 0x1, IEEE80211_MESH_SEC_SECURED = 0x2, } security; + /* Extensible Synchronization Framework */ + struct ieee80211_mesh_sync_ops *sync_ops; + s64 sync_offset_clockdrift_max; + spinlock_t sync_offset_lock; + bool adjusting_tbtt; }; #ifdef CONFIG_MAC80211_MESH diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index b05fa9ef866c..386dbca1eab3 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c @@ -13,9 +13,6 @@ #include "ieee80211_i.h" #include "mesh.h" -#define MESHCONF_CAPAB_ACCEPT_PLINKS 0x01 -#define MESHCONF_CAPAB_FORWARDING 0x08 - #define TMR_RUNNING_HK 0 #define TMR_RUNNING_MP 1 #define TMR_RUNNING_MPR 2 @@ -251,8 +248,10 @@ mesh_add_meshconf_ie(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata) /* Mesh capability */ ifmsh->accepting_plinks = mesh_plink_availables(sdata); *pos = MESHCONF_CAPAB_FORWARDING; - *pos++ |= ifmsh->accepting_plinks ? + *pos |= ifmsh->accepting_plinks ? MESHCONF_CAPAB_ACCEPT_PLINKS : 0x00; + *pos++ |= ifmsh->adjusting_tbtt ? + MESHCONF_CAPAB_TBTT_ADJUSTING : 0x00; *pos++ = 0x00; return 0; @@ -573,8 +572,11 @@ void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata) ieee80211_configure_filter(local); ifmsh->mesh_cc_id = 0; /* Disabled */ - ifmsh->mesh_sp_id = 0; /* Neighbor Offset */ ifmsh->mesh_auth_id = 0; /* Disabled */ + /* register sync ops from extensible synchronization framework */ + ifmsh->sync_ops = ieee80211_mesh_sync_ops_get(ifmsh->mesh_sp_id); + ifmsh->adjusting_tbtt = false; + ifmsh->sync_offset_clockdrift_max = 0; set_bit(MESH_WORK_HOUSEKEEPING, &ifmsh->wrkq_flags); ieee80211_mesh_root_setup(ifmsh); ieee80211_queue_work(&local->hw, &sdata->work); @@ -616,6 +618,7 @@ static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata, struct ieee80211_rx_status *rx_status) { struct ieee80211_local *local = sdata->local; + struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; struct ieee802_11_elems elems; struct ieee80211_channel *channel; u32 supp_rates = 0; @@ -654,6 +657,10 @@ static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata, supp_rates = ieee80211_sta_get_rates(local, &elems, band); mesh_neighbour_update(mgmt->sa, supp_rates, sdata, &elems); } + + if (ifmsh->sync_ops) + ifmsh->sync_ops->rx_bcn_presp(sdata, + stype, mgmt, &elems, rx_status); } static void ieee80211_mesh_rx_mgmt_action(struct ieee80211_sub_if_data *sdata, @@ -721,6 +728,9 @@ void ieee80211_mesh_work(struct ieee80211_sub_if_data *sdata) if (test_and_clear_bit(MESH_WORK_ROOT, &ifmsh->wrkq_flags)) ieee80211_mesh_rootpath(sdata); + + if (test_and_clear_bit(MESH_WORK_DRIFT_ADJUST, &ifmsh->wrkq_flags)) + mesh_sync_adjust_tbtt(sdata); } void ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local) @@ -761,4 +771,5 @@ void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata) (unsigned long) sdata); INIT_LIST_HEAD(&ifmsh->preq_queue.list); spin_lock_init(&ifmsh->mesh_preq_queue_lock); + spin_lock_init(&ifmsh->sync_offset_lock); } diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h index 3e52439ed112..fa7d9704c175 100644 --- a/net/mac80211/mesh.h +++ b/net/mac80211/mesh.h @@ -18,6 +18,20 @@ /* Data structures */ +/** + * enum mesh_config_capab_flags - mesh config IE capability flags + * + * @MESHCONF_CAPAB_ACCEPT_PLINKS: STA is willing to establish + * additional mesh peerings with other mesh STAs + * @MESHCONF_CAPAB_FORWARDING: the STA forwards MSDUs + * @MESHCONF_CAPAB_TBTT_ADJUSTING: TBTT adjustment procedure is ongoing + */ +enum mesh_config_capab_flags { + MESHCONF_CAPAB_ACCEPT_PLINKS = BIT(0), + MESHCONF_CAPAB_FORWARDING = BIT(3), + MESHCONF_CAPAB_TBTT_ADJUSTING = BIT(5), +}; + /** * enum mesh_path_flags - mac80211 mesh path flags * @@ -56,12 +70,15 @@ enum mesh_path_flags { * @MESH_WORK_GROW_MPP_TABLE: the mesh portals table is full and needs to * grow * @MESH_WORK_ROOT: the mesh root station needs to send a frame + * @MESH_WORK_DRIFT_ADJUST: time to compensate for clock drift relative to other + * mesh nodes */ enum mesh_deferred_task_flags { MESH_WORK_HOUSEKEEPING, MESH_WORK_GROW_MPATH_TABLE, MESH_WORK_GROW_MPP_TABLE, MESH_WORK_ROOT, + MESH_WORK_DRIFT_ADJUST, }; /** @@ -234,6 +251,7 @@ void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata); void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata); void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata); void ieee80211_mesh_root_setup(struct ieee80211_if_mesh *ifmsh); +struct ieee80211_mesh_sync_ops *ieee80211_mesh_sync_ops_get(u8 method); /* Mesh paths */ int mesh_nexthop_lookup(struct sk_buff *skb, @@ -327,6 +345,7 @@ void ieee80211_mesh_quiesce(struct ieee80211_sub_if_data *sdata); void ieee80211_mesh_restart(struct ieee80211_sub_if_data *sdata); void mesh_plink_quiesce(struct sta_info *sta); void mesh_plink_restart(struct sta_info *sta); +void mesh_sync_adjust_tbtt(struct ieee80211_sub_if_data *sdata); #else #define mesh_allocated 0 static inline void diff --git a/net/mac80211/mesh_sync.c b/net/mac80211/mesh_sync.c new file mode 100644 index 000000000000..f78b0139856f --- /dev/null +++ b/net/mac80211/mesh_sync.c @@ -0,0 +1,296 @@ +/* + * Copyright 2011-2012, Pavel Zubarev + * Copyright 2011-2012, Marco Porsch + * Copyright 2011-2012, cozybit Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include "ieee80211_i.h" +#include "mesh.h" +#include "driver-ops.h" + +#ifdef CONFIG_MAC80211_VERBOSE_MESH_SYNC_DEBUG +#define msync_dbg(fmt, args...) \ + printk(KERN_DEBUG "Mesh sync (%s): " fmt "\n", sdata->name, ##args) +#else +#define msync_dbg(fmt, args...) do { (void)(0); } while (0) +#endif + +/* This is not in the standard. It represents a tolerable tbtt drift below + * which we do no TSF adjustment. + */ +#define TBTT_MINIMUM_ADJUSTMENT 10 + +struct sync_method { + u8 method; + struct ieee80211_mesh_sync_ops ops; +}; + +/** + * mesh_peer_tbtt_adjusting - check if an mp is currently adjusting its TBTT + * + * @ie: information elements of a management frame from the mesh peer + */ +static bool mesh_peer_tbtt_adjusting(struct ieee802_11_elems *ie) +{ + return (ie->mesh_config->meshconf_cap & + MESHCONF_CAPAB_TBTT_ADJUSTING) != 0; +} + +void mesh_sync_adjust_tbtt(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_local *local = sdata->local; + struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; + /* sdata->vif.bss_conf.beacon_int in 1024us units, 0.04% */ + u64 beacon_int_fraction = sdata->vif.bss_conf.beacon_int * 1024 / 2500; + u64 tsf; + u64 tsfdelta; + + spin_lock_bh(&ifmsh->sync_offset_lock); + + if (ifmsh->sync_offset_clockdrift_max < beacon_int_fraction) { + msync_dbg("TBTT : max clockdrift=%lld; adjusting", + (long long) ifmsh->sync_offset_clockdrift_max); + tsfdelta = -ifmsh->sync_offset_clockdrift_max; + ifmsh->sync_offset_clockdrift_max = 0; + } else { + msync_dbg("TBTT : max clockdrift=%lld; adjusting by %llu", + (long long) ifmsh->sync_offset_clockdrift_max, + (unsigned long long) beacon_int_fraction); + tsfdelta = -beacon_int_fraction; + ifmsh->sync_offset_clockdrift_max -= beacon_int_fraction; + } + + tsf = drv_get_tsf(local, sdata); + if (tsf != -1ULL) + drv_set_tsf(local, sdata, tsf + tsfdelta); + spin_unlock_bh(&ifmsh->sync_offset_lock); +} + +static void mesh_sync_offset_rx_bcn_presp(struct ieee80211_sub_if_data *sdata, + u16 stype, + struct ieee80211_mgmt *mgmt, + struct ieee802_11_elems *elems, + struct ieee80211_rx_status *rx_status) +{ + struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; + struct ieee80211_local *local = sdata->local; + struct sta_info *sta; + u64 t_t, t_r; + + WARN_ON(ifmsh->mesh_sp_id != IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET); + + /* standard mentions only beacons */ + if (stype != IEEE80211_STYPE_BEACON) + return; + + /* The current tsf is a first approximation for the timestamp + * for the received beacon. Further down we try to get a + * better value from the rx_status->mactime field if + * available. Also we have to call drv_get_tsf() before + * entering the rcu-read section.*/ + t_r = drv_get_tsf(local, sdata); + + rcu_read_lock(); + sta = sta_info_get(sdata, mgmt->sa); + if (!sta) + goto no_sync; + + /* check offset sync conditions (13.13.2.2.1) + * + * TODO also sync to + * dot11MeshNbrOffsetMaxNeighbor non-peer non-MBSS neighbors + */ + + if (elems->mesh_config && mesh_peer_tbtt_adjusting(elems)) { + clear_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN); + msync_dbg("STA %pM : is adjusting TBTT", sta->sta.addr); + goto no_sync; + } + + if (rx_status->flag & RX_FLAG_MACTIME_MPDU && rx_status->mactime) { + /* + * The mactime is defined as the time the first data symbol + * of the frame hits the PHY, and the timestamp of the beacon + * is defined as "the time that the data symbol containing the + * first bit of the timestamp is transmitted to the PHY plus + * the transmitting STA's delays through its local PHY from the + * MAC-PHY interface to its interface with the WM" (802.11 + * 11.1.2) + * + * T_r, in 13.13.2.2.2, is just defined as "the frame reception + * time" but we unless we interpret that time to be the same + * time of the beacon timestamp, the offset calculation will be + * off. Below we adjust t_r to be "the time at which the first + * symbol of the timestamp element in the beacon is received". + * This correction depends on the rate. + * + * Based on similar code in ibss.c + */ + int rate; + + if (rx_status->flag & RX_FLAG_HT) { + /* TODO: + * In principle there could be HT-beacons (Dual Beacon + * HT Operation options), but for now ignore them and + * just use the primary (i.e. non-HT) beacons for + * synchronization. + * */ + goto no_sync; + } else + rate = local->hw.wiphy->bands[rx_status->band]-> + bitrates[rx_status->rate_idx].bitrate; + + /* 24 bytes of header * 8 bits/byte * + * 10*(100 Kbps)/Mbps / rate (100 Kbps)*/ + t_r = rx_status->mactime + (24 * 8 * 10 / rate); + } + + /* Timing offset calculation (see 13.13.2.2.2) */ + t_t = le64_to_cpu(mgmt->u.beacon.timestamp); + sta->t_offset = t_t - t_r; + + if (test_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN)) { + s64 t_clockdrift = sta->t_offset_setpoint + - sta->t_offset; + + msync_dbg("STA %pM : sta->t_offset=%lld," + " sta->t_offset_setpoint=%lld," + " t_clockdrift=%lld", + sta->sta.addr, + (long long) sta->t_offset, + (long long) + sta->t_offset_setpoint, + (long long) t_clockdrift); + rcu_read_unlock(); + + spin_lock_bh(&ifmsh->sync_offset_lock); + if (t_clockdrift > + ifmsh->sync_offset_clockdrift_max) + ifmsh->sync_offset_clockdrift_max + = t_clockdrift; + spin_unlock_bh(&ifmsh->sync_offset_lock); + + } else { + sta->t_offset_setpoint = sta->t_offset; + set_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN); + msync_dbg("STA %pM : offset was invalid, " + " sta->t_offset=%lld", + sta->sta.addr, + (long long) sta->t_offset); + rcu_read_unlock(); + } + return; + +no_sync: + rcu_read_unlock(); +} + +static void mesh_sync_offset_adjust_tbtt(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; + + WARN_ON(ifmsh->mesh_sp_id + != IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET); + BUG_ON(!rcu_read_lock_held()); + + spin_lock_bh(&ifmsh->sync_offset_lock); + + if (ifmsh->sync_offset_clockdrift_max > + TBTT_MINIMUM_ADJUSTMENT) { + /* Since ajusting the tsf here would + * require a possibly blocking call + * to the driver tsf setter, we punt + * the tsf adjustment to the mesh tasklet + */ + msync_dbg("TBTT : kicking off TBTT " + "adjustment with " + "clockdrift_max=%lld", + ifmsh->sync_offset_clockdrift_max); + set_bit(MESH_WORK_DRIFT_ADJUST, + &ifmsh->wrkq_flags); + } else { + msync_dbg("TBTT : max clockdrift=%lld; " + "too small to adjust", + (long long) + ifmsh->sync_offset_clockdrift_max); + ifmsh->sync_offset_clockdrift_max = 0; + } + spin_unlock_bh(&ifmsh->sync_offset_lock); +} + +static const u8 *mesh_get_vendor_oui(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; + u8 offset; + + if (!ifmsh->ie || !ifmsh->ie_len) + return NULL; + + offset = ieee80211_ie_split_vendor(ifmsh->ie, + ifmsh->ie_len, 0); + + if (!offset) + return NULL; + + return ifmsh->ie + offset + 2; +} + +static void mesh_sync_vendor_rx_bcn_presp(struct ieee80211_sub_if_data *sdata, + u16 stype, + struct ieee80211_mgmt *mgmt, + struct ieee802_11_elems *elems, + struct ieee80211_rx_status *rx_status) +{ + const u8 *oui; + + WARN_ON(sdata->u.mesh.mesh_sp_id != IEEE80211_SYNC_METHOD_VENDOR); + msync_dbg("called mesh_sync_vendor_rx_bcn_presp"); + oui = mesh_get_vendor_oui(sdata); + /* here you would implement the vendor offset tracking for this oui */ +} + +static void mesh_sync_vendor_adjust_tbtt(struct ieee80211_sub_if_data *sdata) +{ + const u8 *oui; + + WARN_ON(sdata->u.mesh.mesh_sp_id != IEEE80211_SYNC_METHOD_VENDOR); + msync_dbg("called mesh_sync_vendor_adjust_tbtt"); + oui = mesh_get_vendor_oui(sdata); + /* here you would implement the vendor tsf adjustment for this oui */ +} + +/* global variable */ +static struct sync_method sync_methods[] = { + { + .method = IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET, + .ops = { + .rx_bcn_presp = &mesh_sync_offset_rx_bcn_presp, + .adjust_tbtt = &mesh_sync_offset_adjust_tbtt, + } + }, + { + .method = IEEE80211_SYNC_METHOD_VENDOR, + .ops = { + .rx_bcn_presp = &mesh_sync_vendor_rx_bcn_presp, + .adjust_tbtt = &mesh_sync_vendor_adjust_tbtt, + } + }, +}; + +struct ieee80211_mesh_sync_ops *ieee80211_mesh_sync_ops_get(u8 method) +{ + struct ieee80211_mesh_sync_ops *ops = NULL; + u8 i; + + for (i = 0 ; i < ARRAY_SIZE(sync_methods); ++i) { + if (sync_methods[i].method == method) { + ops = &sync_methods[i].ops; + break; + } + } + return ops; +} diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h index b1b4b1413c74..f75f5d9ac06d 100644 --- a/net/mac80211/sta_info.h +++ b/net/mac80211/sta_info.h @@ -55,6 +55,7 @@ * @WLAN_STA_4ADDR_EVENT: 4-addr event was already sent for this frame. * @WLAN_STA_INSERTED: This station is inserted into the hash table. * @WLAN_STA_RATE_CONTROL: rate control was initialized for this station. + * @WLAN_STA_TOFFSET_KNOWN: toffset calculated for this station is valid. */ enum ieee80211_sta_info_flags { WLAN_STA_AUTH, @@ -76,6 +77,7 @@ enum ieee80211_sta_info_flags { WLAN_STA_4ADDR_EVENT, WLAN_STA_INSERTED, WLAN_STA_RATE_CONTROL, + WLAN_STA_TOFFSET_KNOWN, }; #define STA_TID_NUM 16 @@ -268,6 +270,7 @@ struct sta_ampdu_mlme { * @plink_timeout: timeout of peer link * @plink_timer: peer link watch timer * @plink_timer_was_running: used by suspend/resume to restore timers + * @t_offset: timing offset relative to this host * @debugfs: debug filesystem info * @dead: set to true when sta is unlinked * @uploaded: set to true when sta is uploaded to the driver @@ -357,6 +360,8 @@ struct sta_info { enum nl80211_plink_state plink_state; u32 plink_timeout; struct timer_list plink_timer; + s64 t_offset; + s64 t_offset_setpoint; #endif #ifdef CONFIG_MAC80211_DEBUGFS diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index e0b89780b472..daab5adeb93c 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -2373,6 +2373,7 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw, IEEE80211_STYPE_BEACON); } else if (ieee80211_vif_is_mesh(&sdata->vif)) { struct ieee80211_mgmt *mgmt; + struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; u8 *pos; int hdr_len = offsetof(struct ieee80211_mgmt, u.beacon) + sizeof(mgmt->u.beacon); @@ -2382,6 +2383,10 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw, goto out; #endif + if (ifmsh->sync_ops) + ifmsh->sync_ops->adjust_tbtt( + sdata); + skb = dev_alloc_skb(local->tx_headroom + hdr_len + 2 + /* NULL SSID */ -- cgit v1.2.2 From d299a1f21ea7ffd5114d099b2f92c867c495e8b3 Mon Sep 17 00:00:00 2001 From: Javier Cardona Date: Sat, 31 Mar 2012 11:31:33 -0700 Subject: {nl,cfg}80211: Support for mesh synchronization Report Toffset to userspace. Let userspace select the mesh synchronization method. Signed-off-by: Marco Porsch Signed-off-by: Pavel Zubarev Signed-off-by: Javier Cardona Signed-off-by: John W. Linville --- net/mac80211/cfg.c | 8 ++++++++ net/wireless/mesh.c | 3 +++ net/wireless/nl80211.c | 16 ++++++++++++++++ 3 files changed, 27 insertions(+) (limited to 'net') diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 12226b7743c6..83e08dcb2f5d 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -412,6 +412,10 @@ static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo) sinfo->llid = le16_to_cpu(sta->llid); sinfo->plid = le16_to_cpu(sta->plid); sinfo->plink_state = sta->plink_state; + if (test_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN)) { + sinfo->filled |= STATION_INFO_T_OFFSET; + sinfo->t_offset = sta->t_offset; + } #endif } @@ -1235,6 +1239,7 @@ static int copy_mesh_setup(struct ieee80211_if_mesh *ifmsh, /* now copy the rest of the setup parameters */ ifmsh->mesh_id_len = setup->mesh_id_len; memcpy(ifmsh->mesh_id, setup->mesh_id, ifmsh->mesh_id_len); + ifmsh->mesh_sp_id = setup->sync_method; ifmsh->mesh_pp_id = setup->path_sel_proto; ifmsh->mesh_pm_id = setup->path_metric; ifmsh->security = IEEE80211_MESH_SEC_NONE; @@ -1279,6 +1284,9 @@ static int ieee80211_update_mesh_config(struct wiphy *wiphy, conf->dot11MeshTTL = nconf->element_ttl; if (_chg_mesh_attr(NL80211_MESHCONF_AUTO_OPEN_PLINKS, mask)) conf->auto_open_plinks = nconf->auto_open_plinks; + if (_chg_mesh_attr(NL80211_MESHCONF_SYNC_OFFSET_MAX_NEIGHBOR, mask)) + conf->dot11MeshNbrOffsetMaxNeighbor = + nconf->dot11MeshNbrOffsetMaxNeighbor; if (_chg_mesh_attr(NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES, mask)) conf->dot11MeshHWMPmaxPREQretries = nconf->dot11MeshHWMPmaxPREQretries; diff --git a/net/wireless/mesh.c b/net/wireless/mesh.c index ba21ab22187b..8c747fa9319b 100644 --- a/net/wireless/mesh.c +++ b/net/wireless/mesh.c @@ -38,6 +38,7 @@ #define MESH_MAX_PREQ_RETRIES 4 +#define MESH_SYNC_NEIGHBOR_OFFSET_MAX 50 const struct mesh_config default_mesh_config = { .dot11MeshRetryTimeout = MESH_RET_T, @@ -48,6 +49,7 @@ const struct mesh_config default_mesh_config = { .element_ttl = MESH_DEFAULT_ELEMENT_TTL, .auto_open_plinks = true, .dot11MeshMaxPeerLinks = MESH_MAX_ESTAB_PLINKS, + .dot11MeshNbrOffsetMaxNeighbor = MESH_SYNC_NEIGHBOR_OFFSET_MAX, .dot11MeshHWMPactivePathTimeout = MESH_PATH_TIMEOUT, .dot11MeshHWMPpreqMinInterval = MESH_PREQ_MIN_INT, .dot11MeshHWMPperrMinInterval = MESH_PERR_MIN_INT, @@ -62,6 +64,7 @@ const struct mesh_config default_mesh_config = { }; const struct mesh_setup default_mesh_setup = { + .sync_method = IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET, .path_sel_proto = IEEE80211_PATH_PROTOCOL_HWMP, .path_metric = IEEE80211_PATH_METRIC_AIRTIME, .ie = NULL, diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 344697df1177..b12a05243d71 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -2490,6 +2490,9 @@ static int nl80211_send_station(struct sk_buff *msg, u32 pid, u32 seq, NLA_PUT(msg, NL80211_STA_INFO_STA_FLAGS, sizeof(struct nl80211_sta_flag_update), &sinfo->sta_flags); + if (sinfo->filled & STATION_INFO_T_OFFSET) + NLA_PUT_U64(msg, NL80211_STA_INFO_T_OFFSET, + sinfo->t_offset); nla_nest_end(msg, sinfoattr); if (sinfo->filled & STATION_INFO_ASSOC_REQ_IES) @@ -3288,6 +3291,8 @@ static int nl80211_get_mesh_config(struct sk_buff *skb, cur_params.element_ttl); NLA_PUT_U8(msg, NL80211_MESHCONF_AUTO_OPEN_PLINKS, cur_params.auto_open_plinks); + NLA_PUT_U32(msg, NL80211_MESHCONF_SYNC_OFFSET_MAX_NEIGHBOR, + cur_params.dot11MeshNbrOffsetMaxNeighbor); NLA_PUT_U8(msg, NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES, cur_params.dot11MeshHWMPmaxPREQretries); NLA_PUT_U32(msg, NL80211_MESHCONF_PATH_REFRESH_TIME, @@ -3332,6 +3337,7 @@ static const struct nla_policy nl80211_meshconf_params_policy[NL80211_MESHCONF_A [NL80211_MESHCONF_TTL] = { .type = NLA_U8 }, [NL80211_MESHCONF_ELEMENT_TTL] = { .type = NLA_U8 }, [NL80211_MESHCONF_AUTO_OPEN_PLINKS] = { .type = NLA_U8 }, + [NL80211_MESHCONF_SYNC_OFFSET_MAX_NEIGHBOR] = { .type = NLA_U32 }, [NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES] = { .type = NLA_U8 }, [NL80211_MESHCONF_PATH_REFRESH_TIME] = { .type = NLA_U32 }, @@ -3349,6 +3355,7 @@ static const struct nla_policy nl80211_meshconf_params_policy[NL80211_MESHCONF_A static const struct nla_policy nl80211_mesh_setup_params_policy[NL80211_MESH_SETUP_ATTR_MAX+1] = { + [NL80211_MESH_SETUP_ENABLE_VENDOR_SYNC] = { .type = NLA_U8 }, [NL80211_MESH_SETUP_ENABLE_VENDOR_PATH_SEL] = { .type = NLA_U8 }, [NL80211_MESH_SETUP_ENABLE_VENDOR_METRIC] = { .type = NLA_U8 }, [NL80211_MESH_SETUP_USERSPACE_AUTH] = { .type = NLA_FLAG }, @@ -3401,6 +3408,9 @@ do {\ mask, NL80211_MESHCONF_ELEMENT_TTL, nla_get_u8); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, auto_open_plinks, mask, NL80211_MESHCONF_AUTO_OPEN_PLINKS, nla_get_u8); + FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshNbrOffsetMaxNeighbor, + mask, NL80211_MESHCONF_SYNC_OFFSET_MAX_NEIGHBOR, + nla_get_u32); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPmaxPREQretries, mask, NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES, nla_get_u8); @@ -3458,6 +3468,12 @@ static int nl80211_parse_mesh_setup(struct genl_info *info, nl80211_mesh_setup_params_policy)) return -EINVAL; + if (tb[NL80211_MESH_SETUP_ENABLE_VENDOR_SYNC]) + setup->sync_method = + (nla_get_u8(tb[NL80211_MESH_SETUP_ENABLE_VENDOR_SYNC])) ? + IEEE80211_SYNC_METHOD_VENDOR : + IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET; + if (tb[NL80211_MESH_SETUP_ENABLE_VENDOR_PATH_SEL]) setup->path_sel_proto = (nla_get_u8(tb[NL80211_MESH_SETUP_ENABLE_VENDOR_PATH_SEL])) ? -- cgit v1.2.2 From 10e3cd6a251a2a24e5461e5ad242ea8708ff1866 Mon Sep 17 00:00:00 2001 From: Antonio Quartulli Date: Sat, 31 Mar 2012 16:10:33 +0300 Subject: batman-adv: clean up Kconfig Signed-off-by: Antonio Quartulli --- net/batman-adv/Kconfig | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) (limited to 'net') diff --git a/net/batman-adv/Kconfig b/net/batman-adv/Kconfig index 2b68d068eaf3..a04d28f392e6 100644 --- a/net/batman-adv/Kconfig +++ b/net/batman-adv/Kconfig @@ -7,19 +7,18 @@ config BATMAN_ADV depends on NET select CRC16 default n - ---help--- + help + B.A.T.M.A.N. (better approach to mobile ad-hoc networking) is + a routing protocol for multi-hop ad-hoc mesh networks. The + networks may be wired or wireless. See + http://www.open-mesh.org/ for more information and user space + tools. - B.A.T.M.A.N. (better approach to mobile ad-hoc networking) is - a routing protocol for multi-hop ad-hoc mesh networks. The - networks may be wired or wireless. See - http://www.open-mesh.org/ for more information and user space - tools. config BATMAN_ADV_DEBUG bool "B.A.T.M.A.N. debugging" - depends on BATMAN_ADV != n - ---help--- - + depends on BATMAN_ADV + help This is an option for use by developers; most people should say N here. This enables compilation of support for outputting debugging information to the kernel log. The -- cgit v1.2.2 From c1faead3335608d9c9bb58e07bbf9663d53d4b26 Mon Sep 17 00:00:00 2001 From: Antonio Quartulli Date: Mon, 30 Jan 2012 20:59:17 +0100 Subject: batman-adv: use ETH_ALEN instead of hardcoded numeric constants In packet.h the numeric constant 6 is used instead of the more portable ETH_ALEN define. This patch substitute any hardcoded value with such define. Signed-off-by: Antonio Quartulli Acked-by: Sven Eckelmann --- net/batman-adv/packet.h | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) (limited to 'net') diff --git a/net/batman-adv/packet.h b/net/batman-adv/packet.h index 441f3db1bd91..45e75ed6637d 100644 --- a/net/batman-adv/packet.h +++ b/net/batman-adv/packet.h @@ -100,8 +100,8 @@ struct batman_ogm_packet { struct batman_header header; uint8_t flags; /* 0x40: DIRECTLINK flag, 0x20 VIS_SERVER flag... */ uint32_t seqno; - uint8_t orig[6]; - uint8_t prev_sender[6]; + uint8_t orig[ETH_ALEN]; + uint8_t prev_sender[ETH_ALEN]; uint8_t gw_flags; /* flags related to gateway class */ uint8_t tq; uint8_t tt_num_changes; @@ -114,8 +114,8 @@ struct batman_ogm_packet { struct icmp_packet { struct batman_header header; uint8_t msg_type; /* see ICMP message types above */ - uint8_t dst[6]; - uint8_t orig[6]; + uint8_t dst[ETH_ALEN]; + uint8_t orig[ETH_ALEN]; uint16_t seqno; uint8_t uid; uint8_t reserved; @@ -128,8 +128,8 @@ struct icmp_packet { struct icmp_packet_rr { struct batman_header header; uint8_t msg_type; /* see ICMP message types above */ - uint8_t dst[6]; - uint8_t orig[6]; + uint8_t dst[ETH_ALEN]; + uint8_t orig[ETH_ALEN]; uint16_t seqno; uint8_t uid; uint8_t rr_cur; @@ -139,16 +139,16 @@ struct icmp_packet_rr { struct unicast_packet { struct batman_header header; uint8_t ttvn; /* destination translation table version number */ - uint8_t dest[6]; + uint8_t dest[ETH_ALEN]; } __packed; struct unicast_frag_packet { struct batman_header header; uint8_t ttvn; /* destination translation table version number */ - uint8_t dest[6]; + uint8_t dest[ETH_ALEN]; uint8_t flags; uint8_t align; - uint8_t orig[6]; + uint8_t orig[ETH_ALEN]; uint16_t seqno; } __packed; @@ -156,7 +156,7 @@ struct bcast_packet { struct batman_header header; uint8_t reserved; uint32_t seqno; - uint8_t orig[6]; + uint8_t orig[ETH_ALEN]; } __packed; struct vis_packet { @@ -165,9 +165,9 @@ struct vis_packet { uint32_t seqno; /* sequence number */ uint8_t entries; /* number of entries behind this struct */ uint8_t reserved; - uint8_t vis_orig[6]; /* originator that announces its neighbors */ - uint8_t target_orig[6]; /* who should receive this packet */ - uint8_t sender_orig[6]; /* who sent or rebroadcasted this packet */ + uint8_t vis_orig[ETH_ALEN]; /* originator reporting its neighbors */ + uint8_t target_orig[ETH_ALEN]; /* who should receive this packet */ + uint8_t sender_orig[ETH_ALEN]; /* who sent or forwarded this packet */ } __packed; struct tt_query_packet { -- cgit v1.2.2 From 0079d2cef1514422668c7beedd61bfde5aa2c146 Mon Sep 17 00:00:00 2001 From: Sven Eckelmann Date: Sat, 4 Feb 2012 17:34:52 +0100 Subject: batman-adv: Replace bitarray operations with bitmap bitarray.c consists mostly of functionality that is already available as part of the standard kernel API. batman-adv could use architecture optimized code and reduce the binary size by switching to the standard functions. Signed-off-by: Sven Eckelmann Signed-off-by: Antonio Quartulli --- net/batman-adv/bat_iv_ogm.c | 15 +++--- net/batman-adv/bitarray.c | 118 ++++---------------------------------------- net/batman-adv/bitarray.h | 26 +++++++--- net/batman-adv/main.h | 2 +- net/batman-adv/routing.c | 6 +-- net/batman-adv/types.h | 4 +- 6 files changed, 41 insertions(+), 130 deletions(-) (limited to 'net') diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c index a6d5d63fb6ad..fab1071f601e 100644 --- a/net/batman-adv/bat_iv_ogm.c +++ b/net/batman-adv/bat_iv_ogm.c @@ -850,9 +850,9 @@ static int bat_iv_ogm_update_seqnos(const struct ethhdr *ethhdr, hlist_for_each_entry_rcu(tmp_neigh_node, node, &orig_node->neigh_list, list) { - is_duplicate |= get_bit_status(tmp_neigh_node->real_bits, - orig_node->last_real_seqno, - batman_ogm_packet->seqno); + is_duplicate |= bat_test_bit(tmp_neigh_node->real_bits, + orig_node->last_real_seqno, + batman_ogm_packet->seqno); if (compare_eth(tmp_neigh_node->addr, ethhdr->h_source) && (tmp_neigh_node->if_incoming == if_incoming)) @@ -866,7 +866,8 @@ static int bat_iv_ogm_update_seqnos(const struct ethhdr *ethhdr, seq_diff, set_mark); tmp_neigh_node->real_packet_count = - bit_packet_count(tmp_neigh_node->real_bits); + bitmap_weight(tmp_neigh_node->real_bits, + TQ_LOCAL_WINDOW_SIZE); } rcu_read_unlock(); @@ -998,11 +999,11 @@ static void bat_iv_ogm_process(const struct ethhdr *ethhdr, spin_lock_bh(&orig_neigh_node->ogm_cnt_lock); word = &(orig_neigh_node->bcast_own[offset]); - bit_mark(word, - if_incoming_seqno - + bat_set_bit(word, + if_incoming_seqno - batman_ogm_packet->seqno - 2); orig_neigh_node->bcast_own_sum[if_incoming->if_num] = - bit_packet_count(word); + bitmap_weight(word, TQ_LOCAL_WINDOW_SIZE); spin_unlock_bh(&orig_neigh_node->ogm_cnt_lock); } diff --git a/net/batman-adv/bitarray.c b/net/batman-adv/bitarray.c index 6d0aa216b232..07ae6e1b8aca 100644 --- a/net/batman-adv/bitarray.c +++ b/net/batman-adv/bitarray.c @@ -24,100 +24,13 @@ #include -/* returns true if the corresponding bit in the given seq_bits indicates true - * and curr_seqno is within range of last_seqno */ -int get_bit_status(const unsigned long *seq_bits, uint32_t last_seqno, - uint32_t curr_seqno) -{ - int32_t diff, word_offset, word_num; - - diff = last_seqno - curr_seqno; - if (diff < 0 || diff >= TQ_LOCAL_WINDOW_SIZE) { - return 0; - } else { - /* which word */ - word_num = (last_seqno - curr_seqno) / WORD_BIT_SIZE; - /* which position in the selected word */ - word_offset = (last_seqno - curr_seqno) % WORD_BIT_SIZE; - - if (test_bit(word_offset, &seq_bits[word_num])) - return 1; - else - return 0; - } -} - -/* turn corresponding bit on, so we can remember that we got the packet */ -void bit_mark(unsigned long *seq_bits, int32_t n) -{ - int32_t word_offset, word_num; - - /* if too old, just drop it */ - if (n < 0 || n >= TQ_LOCAL_WINDOW_SIZE) - return; - - /* which word */ - word_num = n / WORD_BIT_SIZE; - /* which position in the selected word */ - word_offset = n % WORD_BIT_SIZE; - - set_bit(word_offset, &seq_bits[word_num]); /* turn the position on */ -} - /* shift the packet array by n places. */ -static void bit_shift(unsigned long *seq_bits, int32_t n) +static void bat_bitmap_shift_left(unsigned long *seq_bits, int32_t n) { - int32_t word_offset, word_num; - int32_t i; - if (n <= 0 || n >= TQ_LOCAL_WINDOW_SIZE) return; - word_offset = n % WORD_BIT_SIZE;/* shift how much inside each word */ - word_num = n / WORD_BIT_SIZE; /* shift over how much (full) words */ - - for (i = NUM_WORDS - 1; i > word_num; i--) { - /* going from old to new, so we don't overwrite the data we copy - * from. - * - * left is high, right is low: FEDC BA98 7654 3210 - * ^^ ^^ - * vvvv - * ^^^^ = from, vvvvv =to, we'd have word_num==1 and - * word_offset==WORD_BIT_SIZE/2 ????? in this example. - * (=24 bits) - * - * our desired output would be: 9876 5432 1000 0000 - * */ - - seq_bits[i] = - (seq_bits[i - word_num] << word_offset) + - /* take the lower port from the left half, shift it left - * to its final position */ - (seq_bits[i - word_num - 1] >> - (WORD_BIT_SIZE-word_offset)); - /* and the upper part of the right half and shift it left to - * its position */ - /* for our example that would be: word[0] = 9800 + 0076 = - * 9876 */ - } - /* now for our last word, i==word_num, we only have its "left" half. - * that's the 1000 word in our example.*/ - - seq_bits[i] = (seq_bits[i - word_num] << word_offset); - - /* pad the rest with 0, if there is anything */ - i--; - - for (; i >= 0; i--) - seq_bits[i] = 0; -} - -static void bit_reset_window(unsigned long *seq_bits) -{ - int i; - for (i = 0; i < NUM_WORDS; i++) - seq_bits[i] = 0; + bitmap_shift_left(seq_bits, seq_bits, n, TQ_LOCAL_WINDOW_SIZE); } @@ -137,7 +50,7 @@ int bit_get_packet(void *priv, unsigned long *seq_bits, if ((seq_num_diff <= 0) && (seq_num_diff > -TQ_LOCAL_WINDOW_SIZE)) { if (set_mark) - bit_mark(seq_bits, -seq_num_diff); + bat_set_bit(seq_bits, -seq_num_diff); return 0; } @@ -145,10 +58,10 @@ int bit_get_packet(void *priv, unsigned long *seq_bits, * set the mark if required */ if ((seq_num_diff > 0) && (seq_num_diff < TQ_LOCAL_WINDOW_SIZE)) { - bit_shift(seq_bits, seq_num_diff); + bat_bitmap_shift_left(seq_bits, seq_num_diff); if (set_mark) - bit_mark(seq_bits, 0); + bat_set_bit(seq_bits, 0); return 1; } @@ -159,9 +72,9 @@ int bit_get_packet(void *priv, unsigned long *seq_bits, bat_dbg(DBG_BATMAN, bat_priv, "We missed a lot of packets (%i) !\n", seq_num_diff - 1); - bit_reset_window(seq_bits); + bitmap_zero(seq_bits, TQ_LOCAL_WINDOW_SIZE); if (set_mark) - bit_mark(seq_bits, 0); + bat_set_bit(seq_bits, 0); return 1; } @@ -176,9 +89,9 @@ int bit_get_packet(void *priv, unsigned long *seq_bits, bat_dbg(DBG_BATMAN, bat_priv, "Other host probably restarted!\n"); - bit_reset_window(seq_bits); + bitmap_zero(seq_bits, TQ_LOCAL_WINDOW_SIZE); if (set_mark) - bit_mark(seq_bits, 0); + bat_set_bit(seq_bits, 0); return 1; } @@ -186,16 +99,3 @@ int bit_get_packet(void *priv, unsigned long *seq_bits, /* never reached */ return 0; } - -/* count the hamming weight, how many good packets did we receive? just count - * the 1's. - */ -int bit_packet_count(const unsigned long *seq_bits) -{ - int i, hamming = 0; - - for (i = 0; i < NUM_WORDS; i++) - hamming += hweight_long(seq_bits[i]); - - return hamming; -} diff --git a/net/batman-adv/bitarray.h b/net/batman-adv/bitarray.h index c6135728a680..1835c15cda41 100644 --- a/net/batman-adv/bitarray.h +++ b/net/batman-adv/bitarray.h @@ -22,23 +22,33 @@ #ifndef _NET_BATMAN_ADV_BITARRAY_H_ #define _NET_BATMAN_ADV_BITARRAY_H_ -#define WORD_BIT_SIZE (sizeof(unsigned long) * 8) - /* returns true if the corresponding bit in the given seq_bits indicates true * and curr_seqno is within range of last_seqno */ -int get_bit_status(const unsigned long *seq_bits, uint32_t last_seqno, - uint32_t curr_seqno); +static inline int bat_test_bit(const unsigned long *seq_bits, + uint32_t last_seqno, uint32_t curr_seqno) +{ + int32_t diff; + + diff = last_seqno - curr_seqno; + if (diff < 0 || diff >= TQ_LOCAL_WINDOW_SIZE) + return 0; + else + return test_bit(diff, seq_bits); +} /* turn corresponding bit on, so we can remember that we got the packet */ -void bit_mark(unsigned long *seq_bits, int32_t n); +static inline void bat_set_bit(unsigned long *seq_bits, int32_t n) +{ + /* if too old, just drop it */ + if (n < 0 || n >= TQ_LOCAL_WINDOW_SIZE) + return; + set_bit(n, seq_bits); /* turn the position on */ +} /* receive and process one packet, returns 1 if received seq_num is considered * new, 0 if old */ int bit_get_packet(void *priv, unsigned long *seq_bits, int32_t seq_num_diff, int set_mark); -/* count the hamming weight, how many good packets did we receive? */ -int bit_packet_count(const unsigned long *seq_bits); - #endif /* _NET_BATMAN_ADV_BITARRAY_H_ */ diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h index 94fa1c2393a6..0a20a19197aa 100644 --- a/net/batman-adv/main.h +++ b/net/batman-adv/main.h @@ -65,7 +65,7 @@ #define NULL_IFINDEX 0 /* dummy ifindex used to avoid iface checks */ -#define NUM_WORDS (TQ_LOCAL_WINDOW_SIZE / WORD_BIT_SIZE) +#define NUM_WORDS BITS_TO_LONGS(TQ_LOCAL_WINDOW_SIZE) #define LOG_BUF_LEN 8192 /* has to be a power of 2 */ diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c index 7f8e15899417..b0370e3c59e8 100644 --- a/net/batman-adv/routing.c +++ b/net/batman-adv/routing.c @@ -52,7 +52,7 @@ void slide_own_bcast_window(struct hard_iface *hard_iface) bit_get_packet(bat_priv, word, 1, 0); orig_node->bcast_own_sum[hard_iface->if_num] = - bit_packet_count(word); + bitmap_weight(word, TQ_LOCAL_WINDOW_SIZE); spin_unlock_bh(&orig_node->ogm_cnt_lock); } rcu_read_unlock(); @@ -1047,8 +1047,8 @@ int recv_bcast_packet(struct sk_buff *skb, struct hard_iface *recv_if) spin_lock_bh(&orig_node->bcast_seqno_lock); /* check whether the packet is a duplicate */ - if (get_bit_status(orig_node->bcast_bits, orig_node->last_bcast_seqno, - ntohl(bcast_packet->seqno))) + if (bat_test_bit(orig_node->bcast_bits, orig_node->last_bcast_seqno, + ntohl(bcast_packet->seqno))) goto spin_unlock; seq_diff = ntohl(bcast_packet->seqno) - orig_node->last_bcast_seqno; diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h index 302efb523475..24c8a31a3d91 100644 --- a/net/batman-adv/types.h +++ b/net/batman-adv/types.h @@ -90,7 +90,7 @@ struct orig_node { bool tt_poss_change; uint32_t last_real_seqno; uint8_t last_ttl; - unsigned long bcast_bits[NUM_WORDS]; + DECLARE_BITMAP(bcast_bits, TQ_LOCAL_WINDOW_SIZE); uint32_t last_bcast_seqno; struct hlist_head neigh_list; struct list_head frag_list; @@ -132,7 +132,7 @@ struct neigh_node { uint8_t last_ttl; struct list_head bonding_list; unsigned long last_valid; - unsigned long real_bits[NUM_WORDS]; + DECLARE_BITMAP(real_bits, TQ_LOCAL_WINDOW_SIZE); atomic_t refcount; struct rcu_head rcu; struct orig_node *orig_node; -- cgit v1.2.2 From de7aae6570412a22fd8854d62dc73c8afe508253 Mon Sep 17 00:00:00 2001 From: Sven Eckelmann Date: Sun, 5 Feb 2012 18:55:22 +0100 Subject: batman-adv: Remove declaration of only locally used functions Signed-off-by: Sven Eckelmann Acked-by: Antonio Quartulli Signed-off-by: Antonio Quartulli --- net/batman-adv/translation-table.c | 22 ++++++++++++++-------- net/batman-adv/translation-table.h | 8 -------- 2 files changed, 14 insertions(+), 16 deletions(-) (limited to 'net') diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c index 1f8692127840..5f8c540b9f17 100644 --- a/net/batman-adv/translation-table.c +++ b/net/batman-adv/translation-table.c @@ -30,6 +30,8 @@ #include +static void send_roam_adv(struct bat_priv *bat_priv, uint8_t *client, + struct orig_node *orig_node); static void _tt_global_del(struct bat_priv *bat_priv, struct tt_global_entry *tt_global_entry, const char *message); @@ -645,9 +647,10 @@ out: tt_global_entry_free_ref(tt_global_entry); } -void tt_global_del(struct bat_priv *bat_priv, - struct orig_node *orig_node, const unsigned char *addr, - const char *message, bool roaming) +static void tt_global_del(struct bat_priv *bat_priv, + struct orig_node *orig_node, + const unsigned char *addr, + const char *message, bool roaming) { struct tt_global_entry *tt_global_entry = NULL; struct tt_local_entry *tt_local_entry = NULL; @@ -848,7 +851,8 @@ out: } /* Calculates the checksum of the local table of a given orig_node */ -uint16_t tt_global_crc(struct bat_priv *bat_priv, struct orig_node *orig_node) +static uint16_t tt_global_crc(struct bat_priv *bat_priv, + struct orig_node *orig_node) { uint16_t total = 0, total_one; struct hashtable_t *hash = bat_priv->tt_global_hash; @@ -936,8 +940,10 @@ static void tt_req_list_free(struct bat_priv *bat_priv) spin_unlock_bh(&bat_priv->tt_req_list_lock); } -void tt_save_orig_buffer(struct bat_priv *bat_priv, struct orig_node *orig_node, - const unsigned char *tt_buff, uint8_t tt_num_changes) +static void tt_save_orig_buffer(struct bat_priv *bat_priv, + struct orig_node *orig_node, + const unsigned char *tt_buff, + uint8_t tt_num_changes) { uint16_t tt_buff_len = tt_len(tt_num_changes); @@ -1627,8 +1633,8 @@ unlock: return ret; } -void send_roam_adv(struct bat_priv *bat_priv, uint8_t *client, - struct orig_node *orig_node) +static void send_roam_adv(struct bat_priv *bat_priv, uint8_t *client, + struct orig_node *orig_node) { struct neigh_node *neigh_node = NULL; struct sk_buff *skb = NULL; diff --git a/net/batman-adv/translation-table.h b/net/batman-adv/translation-table.h index c753633b1da1..bfebe26edd8e 100644 --- a/net/batman-adv/translation-table.h +++ b/net/batman-adv/translation-table.h @@ -39,23 +39,15 @@ int tt_global_add(struct bat_priv *bat_priv, struct orig_node *orig_node, int tt_global_seq_print_text(struct seq_file *seq, void *offset); void tt_global_del_orig(struct bat_priv *bat_priv, struct orig_node *orig_node, const char *message); -void tt_global_del(struct bat_priv *bat_priv, - struct orig_node *orig_node, const unsigned char *addr, - const char *message, bool roaming); struct orig_node *transtable_search(struct bat_priv *bat_priv, const uint8_t *src, const uint8_t *addr); -void tt_save_orig_buffer(struct bat_priv *bat_priv, struct orig_node *orig_node, - const unsigned char *tt_buff, uint8_t tt_num_changes); uint16_t tt_local_crc(struct bat_priv *bat_priv); -uint16_t tt_global_crc(struct bat_priv *bat_priv, struct orig_node *orig_node); void tt_free(struct bat_priv *bat_priv); bool send_tt_response(struct bat_priv *bat_priv, struct tt_query_packet *tt_request); bool is_my_client(struct bat_priv *bat_priv, const uint8_t *addr); void handle_tt_response(struct bat_priv *bat_priv, struct tt_query_packet *tt_response); -void send_roam_adv(struct bat_priv *bat_priv, uint8_t *client, - struct orig_node *orig_node); void tt_commit_changes(struct bat_priv *bat_priv); bool is_ap_isolated(struct bat_priv *bat_priv, uint8_t *src, uint8_t *dst); void tt_update_orig(struct bat_priv *bat_priv, struct orig_node *orig_node, -- cgit v1.2.2 From 8681a1c4dd258c573e80b4a7af7e7127770b67a8 Mon Sep 17 00:00:00 2001 From: Marek Lindner Date: Fri, 27 Jan 2012 23:11:55 +0800 Subject: batman-adv: encourage batman to take shorter routes by changing the default hop penalty Signed-off-by: Marek Lindner Signed-off-by: Antonio Quartulli --- net/batman-adv/soft-interface.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c index a5590f4193f1..82c097d6ec93 100644 --- a/net/batman-adv/soft-interface.c +++ b/net/batman-adv/soft-interface.c @@ -834,7 +834,7 @@ struct net_device *softif_create(const char *name) atomic_set(&bat_priv->gw_sel_class, 20); atomic_set(&bat_priv->gw_bandwidth, 41); atomic_set(&bat_priv->orig_interval, 1000); - atomic_set(&bat_priv->hop_penalty, 10); + atomic_set(&bat_priv->hop_penalty, 30); atomic_set(&bat_priv->log_level, 0); atomic_set(&bat_priv->fragmentation, 1); atomic_set(&bat_priv->bcast_queue_left, BCAST_QUEUE_LEN); -- cgit v1.2.2 From a7f6ee9493677ba40625d810258de5bd521cc1b0 Mon Sep 17 00:00:00 2001 From: Simon Wunderlich Date: Sun, 22 Jan 2012 20:00:18 +0100 Subject: batman-adv: remove old bridge loop avoidance code The functionality is to be replaced by an improved implementation, so first clean up. Signed-off-by: Simon Wunderlich Signed-off-by: Antonio Quartulli --- net/batman-adv/bat_debugfs.c | 8 - net/batman-adv/main.c | 5 - net/batman-adv/main.h | 2 - net/batman-adv/originator.c | 2 - net/batman-adv/routing.c | 5 +- net/batman-adv/routing.h | 1 - net/batman-adv/soft-interface.c | 476 +--------------------------------------- net/batman-adv/soft-interface.h | 2 - net/batman-adv/types.h | 21 -- 9 files changed, 5 insertions(+), 517 deletions(-) (limited to 'net') diff --git a/net/batman-adv/bat_debugfs.c b/net/batman-adv/bat_debugfs.c index c3b0548b175d..165ff62a2bed 100644 --- a/net/batman-adv/bat_debugfs.c +++ b/net/batman-adv/bat_debugfs.c @@ -238,12 +238,6 @@ static int gateways_open(struct inode *inode, struct file *file) return single_open(file, gw_client_seq_print_text, net_dev); } -static int softif_neigh_open(struct inode *inode, struct file *file) -{ - struct net_device *net_dev = (struct net_device *)inode->i_private; - return single_open(file, softif_neigh_seq_print_text, net_dev); -} - static int transtable_global_open(struct inode *inode, struct file *file) { struct net_device *net_dev = (struct net_device *)inode->i_private; @@ -282,7 +276,6 @@ struct bat_debuginfo bat_debuginfo_##_name = { \ static BAT_DEBUGINFO(routing_algos, S_IRUGO, bat_algorithms_open); static BAT_DEBUGINFO(originators, S_IRUGO, originators_open); static BAT_DEBUGINFO(gateways, S_IRUGO, gateways_open); -static BAT_DEBUGINFO(softif_neigh, S_IRUGO, softif_neigh_open); static BAT_DEBUGINFO(transtable_global, S_IRUGO, transtable_global_open); static BAT_DEBUGINFO(transtable_local, S_IRUGO, transtable_local_open); static BAT_DEBUGINFO(vis_data, S_IRUGO, vis_data_open); @@ -290,7 +283,6 @@ static BAT_DEBUGINFO(vis_data, S_IRUGO, vis_data_open); static struct bat_debuginfo *mesh_debuginfos[] = { &bat_debuginfo_originators, &bat_debuginfo_gateways, - &bat_debuginfo_softif_neigh, &bat_debuginfo_transtable_global, &bat_debuginfo_transtable_local, &bat_debuginfo_vis_data, diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c index 6d51caaf8cec..94d4968a953a 100644 --- a/net/batman-adv/main.c +++ b/net/batman-adv/main.c @@ -96,13 +96,10 @@ int mesh_init(struct net_device *soft_iface) spin_lock_init(&bat_priv->gw_list_lock); spin_lock_init(&bat_priv->vis_hash_lock); spin_lock_init(&bat_priv->vis_list_lock); - spin_lock_init(&bat_priv->softif_neigh_lock); - spin_lock_init(&bat_priv->softif_neigh_vid_lock); INIT_HLIST_HEAD(&bat_priv->forw_bat_list); INIT_HLIST_HEAD(&bat_priv->forw_bcast_list); INIT_HLIST_HEAD(&bat_priv->gw_list); - INIT_HLIST_HEAD(&bat_priv->softif_neigh_vids); INIT_LIST_HEAD(&bat_priv->tt_changes_list); INIT_LIST_HEAD(&bat_priv->tt_req_list); INIT_LIST_HEAD(&bat_priv->tt_roam_list); @@ -145,8 +142,6 @@ void mesh_free(struct net_device *soft_iface) tt_free(bat_priv); - softif_neigh_purge(bat_priv); - atomic_set(&bat_priv->mesh_state, MESH_INACTIVE); } diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h index 0a20a19197aa..7a6a25f22fc2 100644 --- a/net/batman-adv/main.h +++ b/net/batman-adv/main.h @@ -80,8 +80,6 @@ #define MAX_AGGREGATION_BYTES 512 #define MAX_AGGREGATION_MS 100 -#define SOFTIF_NEIGH_TIMEOUT 180000 /* 3 minutes */ - /* don't reset again within 30 seconds */ #define RESET_PROTECTION_MS 30000 #define EXPECTED_SEQNO_RANGE 65536 diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c index 43c0a4f1399e..82390818874b 100644 --- a/net/batman-adv/originator.c +++ b/net/batman-adv/originator.c @@ -375,8 +375,6 @@ static void _purge_orig(struct bat_priv *bat_priv) gw_node_purge(bat_priv); gw_election(bat_priv); - - softif_neigh_purge(bat_priv); } static void purge_orig(struct work_struct *work) diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c index b0370e3c59e8..71d4211beb23 100644 --- a/net/batman-adv/routing.c +++ b/net/batman-adv/routing.c @@ -30,6 +30,9 @@ #include "vis.h" #include "unicast.h" +static int route_unicast_packet(struct sk_buff *skb, + struct hard_iface *recv_if); + void slide_own_bcast_window(struct hard_iface *hard_iface) { struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface); @@ -798,7 +801,7 @@ static int check_unicast_packet(struct sk_buff *skb, int hdr_size) return 0; } -int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if) +static int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if) { struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface); struct orig_node *orig_node = NULL; diff --git a/net/batman-adv/routing.h b/net/batman-adv/routing.h index 92ac100d83da..3d729cb17113 100644 --- a/net/batman-adv/routing.h +++ b/net/batman-adv/routing.h @@ -25,7 +25,6 @@ void slide_own_bcast_window(struct hard_iface *hard_iface); void update_route(struct bat_priv *bat_priv, struct orig_node *orig_node, struct neigh_node *neigh_node); -int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if); int recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if); int recv_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if); int recv_ucast_frag_packet(struct sk_buff *skb, struct hard_iface *recv_if); diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c index 82c097d6ec93..e56cb88ef2ba 100644 --- a/net/batman-adv/soft-interface.c +++ b/net/batman-adv/soft-interface.c @@ -73,439 +73,6 @@ int my_skb_head_push(struct sk_buff *skb, unsigned int len) return 0; } -static void softif_neigh_free_ref(struct softif_neigh *softif_neigh) -{ - if (atomic_dec_and_test(&softif_neigh->refcount)) - kfree_rcu(softif_neigh, rcu); -} - -static void softif_neigh_vid_free_rcu(struct rcu_head *rcu) -{ - struct softif_neigh_vid *softif_neigh_vid; - struct softif_neigh *softif_neigh; - struct hlist_node *node, *node_tmp; - struct bat_priv *bat_priv; - - softif_neigh_vid = container_of(rcu, struct softif_neigh_vid, rcu); - bat_priv = softif_neigh_vid->bat_priv; - - spin_lock_bh(&bat_priv->softif_neigh_lock); - hlist_for_each_entry_safe(softif_neigh, node, node_tmp, - &softif_neigh_vid->softif_neigh_list, list) { - hlist_del_rcu(&softif_neigh->list); - softif_neigh_free_ref(softif_neigh); - } - spin_unlock_bh(&bat_priv->softif_neigh_lock); - - kfree(softif_neigh_vid); -} - -static void softif_neigh_vid_free_ref(struct softif_neigh_vid *softif_neigh_vid) -{ - if (atomic_dec_and_test(&softif_neigh_vid->refcount)) - call_rcu(&softif_neigh_vid->rcu, softif_neigh_vid_free_rcu); -} - -static struct softif_neigh_vid *softif_neigh_vid_get(struct bat_priv *bat_priv, - short vid) -{ - struct softif_neigh_vid *softif_neigh_vid; - struct hlist_node *node; - - rcu_read_lock(); - hlist_for_each_entry_rcu(softif_neigh_vid, node, - &bat_priv->softif_neigh_vids, list) { - if (softif_neigh_vid->vid != vid) - continue; - - if (!atomic_inc_not_zero(&softif_neigh_vid->refcount)) - continue; - - goto out; - } - - softif_neigh_vid = kzalloc(sizeof(*softif_neigh_vid), GFP_ATOMIC); - if (!softif_neigh_vid) - goto out; - - softif_neigh_vid->vid = vid; - softif_neigh_vid->bat_priv = bat_priv; - - /* initialize with 2 - caller decrements counter by one */ - atomic_set(&softif_neigh_vid->refcount, 2); - INIT_HLIST_HEAD(&softif_neigh_vid->softif_neigh_list); - INIT_HLIST_NODE(&softif_neigh_vid->list); - spin_lock_bh(&bat_priv->softif_neigh_vid_lock); - hlist_add_head_rcu(&softif_neigh_vid->list, - &bat_priv->softif_neigh_vids); - spin_unlock_bh(&bat_priv->softif_neigh_vid_lock); - -out: - rcu_read_unlock(); - return softif_neigh_vid; -} - -static struct softif_neigh *softif_neigh_get(struct bat_priv *bat_priv, - const uint8_t *addr, short vid) -{ - struct softif_neigh_vid *softif_neigh_vid; - struct softif_neigh *softif_neigh = NULL; - struct hlist_node *node; - - softif_neigh_vid = softif_neigh_vid_get(bat_priv, vid); - if (!softif_neigh_vid) - goto out; - - rcu_read_lock(); - hlist_for_each_entry_rcu(softif_neigh, node, - &softif_neigh_vid->softif_neigh_list, - list) { - if (!compare_eth(softif_neigh->addr, addr)) - continue; - - if (!atomic_inc_not_zero(&softif_neigh->refcount)) - continue; - - softif_neigh->last_seen = jiffies; - goto unlock; - } - - softif_neigh = kzalloc(sizeof(*softif_neigh), GFP_ATOMIC); - if (!softif_neigh) - goto unlock; - - memcpy(softif_neigh->addr, addr, ETH_ALEN); - softif_neigh->last_seen = jiffies; - /* initialize with 2 - caller decrements counter by one */ - atomic_set(&softif_neigh->refcount, 2); - - INIT_HLIST_NODE(&softif_neigh->list); - spin_lock_bh(&bat_priv->softif_neigh_lock); - hlist_add_head_rcu(&softif_neigh->list, - &softif_neigh_vid->softif_neigh_list); - spin_unlock_bh(&bat_priv->softif_neigh_lock); - -unlock: - rcu_read_unlock(); -out: - if (softif_neigh_vid) - softif_neigh_vid_free_ref(softif_neigh_vid); - return softif_neigh; -} - -static struct softif_neigh *softif_neigh_get_selected( - struct softif_neigh_vid *softif_neigh_vid) -{ - struct softif_neigh *softif_neigh; - - rcu_read_lock(); - softif_neigh = rcu_dereference(softif_neigh_vid->softif_neigh); - - if (softif_neigh && !atomic_inc_not_zero(&softif_neigh->refcount)) - softif_neigh = NULL; - - rcu_read_unlock(); - return softif_neigh; -} - -static struct softif_neigh *softif_neigh_vid_get_selected( - struct bat_priv *bat_priv, - short vid) -{ - struct softif_neigh_vid *softif_neigh_vid; - struct softif_neigh *softif_neigh = NULL; - - softif_neigh_vid = softif_neigh_vid_get(bat_priv, vid); - if (!softif_neigh_vid) - goto out; - - softif_neigh = softif_neigh_get_selected(softif_neigh_vid); -out: - if (softif_neigh_vid) - softif_neigh_vid_free_ref(softif_neigh_vid); - return softif_neigh; -} - -static void softif_neigh_vid_select(struct bat_priv *bat_priv, - struct softif_neigh *new_neigh, - short vid) -{ - struct softif_neigh_vid *softif_neigh_vid; - struct softif_neigh *curr_neigh; - - softif_neigh_vid = softif_neigh_vid_get(bat_priv, vid); - if (!softif_neigh_vid) - goto out; - - spin_lock_bh(&bat_priv->softif_neigh_lock); - - if (new_neigh && !atomic_inc_not_zero(&new_neigh->refcount)) - new_neigh = NULL; - - curr_neigh = rcu_dereference_protected(softif_neigh_vid->softif_neigh, - 1); - rcu_assign_pointer(softif_neigh_vid->softif_neigh, new_neigh); - - if ((curr_neigh) && (!new_neigh)) - bat_dbg(DBG_ROUTES, bat_priv, - "Removing mesh exit point on vid: %d (prev: %pM).\n", - vid, curr_neigh->addr); - else if ((curr_neigh) && (new_neigh)) - bat_dbg(DBG_ROUTES, bat_priv, - "Changing mesh exit point on vid: %d from %pM to %pM.\n", - vid, curr_neigh->addr, new_neigh->addr); - else if ((!curr_neigh) && (new_neigh)) - bat_dbg(DBG_ROUTES, bat_priv, - "Setting mesh exit point on vid: %d to %pM.\n", - vid, new_neigh->addr); - - if (curr_neigh) - softif_neigh_free_ref(curr_neigh); - - spin_unlock_bh(&bat_priv->softif_neigh_lock); - -out: - if (softif_neigh_vid) - softif_neigh_vid_free_ref(softif_neigh_vid); -} - -static void softif_neigh_vid_deselect(struct bat_priv *bat_priv, - struct softif_neigh_vid *softif_neigh_vid) -{ - struct softif_neigh *curr_neigh; - struct softif_neigh *softif_neigh = NULL, *softif_neigh_tmp; - struct hard_iface *primary_if = NULL; - struct hlist_node *node; - - primary_if = primary_if_get_selected(bat_priv); - if (!primary_if) - goto out; - - /* find new softif_neigh immediately to avoid temporary loops */ - rcu_read_lock(); - curr_neigh = rcu_dereference(softif_neigh_vid->softif_neigh); - - hlist_for_each_entry_rcu(softif_neigh_tmp, node, - &softif_neigh_vid->softif_neigh_list, - list) { - if (softif_neigh_tmp == curr_neigh) - continue; - - /* we got a neighbor but its mac is 'bigger' than ours */ - if (memcmp(primary_if->net_dev->dev_addr, - softif_neigh_tmp->addr, ETH_ALEN) < 0) - continue; - - if (!atomic_inc_not_zero(&softif_neigh_tmp->refcount)) - continue; - - softif_neigh = softif_neigh_tmp; - goto unlock; - } - -unlock: - rcu_read_unlock(); -out: - softif_neigh_vid_select(bat_priv, softif_neigh, softif_neigh_vid->vid); - - if (primary_if) - hardif_free_ref(primary_if); - if (softif_neigh) - softif_neigh_free_ref(softif_neigh); -} - -int softif_neigh_seq_print_text(struct seq_file *seq, void *offset) -{ - struct net_device *net_dev = (struct net_device *)seq->private; - struct bat_priv *bat_priv = netdev_priv(net_dev); - struct softif_neigh_vid *softif_neigh_vid; - struct softif_neigh *softif_neigh; - struct hard_iface *primary_if; - struct hlist_node *node, *node_tmp; - struct softif_neigh *curr_softif_neigh; - int ret = 0, last_seen_secs, last_seen_msecs; - - primary_if = primary_if_get_selected(bat_priv); - if (!primary_if) { - ret = seq_printf(seq, - "BATMAN mesh %s disabled - please specify interfaces to enable it\n", - net_dev->name); - goto out; - } - - if (primary_if->if_status != IF_ACTIVE) { - ret = seq_printf(seq, - "BATMAN mesh %s disabled - primary interface not active\n", - net_dev->name); - goto out; - } - - seq_printf(seq, "Softif neighbor list (%s)\n", net_dev->name); - - rcu_read_lock(); - hlist_for_each_entry_rcu(softif_neigh_vid, node, - &bat_priv->softif_neigh_vids, list) { - seq_printf(seq, " %-15s %s on vid: %d\n", - "Originator", "last-seen", softif_neigh_vid->vid); - - curr_softif_neigh = softif_neigh_get_selected(softif_neigh_vid); - - hlist_for_each_entry_rcu(softif_neigh, node_tmp, - &softif_neigh_vid->softif_neigh_list, - list) { - last_seen_secs = jiffies_to_msecs(jiffies - - softif_neigh->last_seen) / 1000; - last_seen_msecs = jiffies_to_msecs(jiffies - - softif_neigh->last_seen) % 1000; - seq_printf(seq, "%s %pM %3i.%03is\n", - curr_softif_neigh == softif_neigh - ? "=>" : " ", softif_neigh->addr, - last_seen_secs, last_seen_msecs); - } - - if (curr_softif_neigh) - softif_neigh_free_ref(curr_softif_neigh); - - seq_printf(seq, "\n"); - } - rcu_read_unlock(); - -out: - if (primary_if) - hardif_free_ref(primary_if); - return ret; -} - -void softif_neigh_purge(struct bat_priv *bat_priv) -{ - struct softif_neigh *softif_neigh, *curr_softif_neigh; - struct softif_neigh_vid *softif_neigh_vid; - struct hlist_node *node, *node_tmp, *node_tmp2; - int do_deselect; - - rcu_read_lock(); - hlist_for_each_entry_rcu(softif_neigh_vid, node, - &bat_priv->softif_neigh_vids, list) { - if (!atomic_inc_not_zero(&softif_neigh_vid->refcount)) - continue; - - curr_softif_neigh = softif_neigh_get_selected(softif_neigh_vid); - do_deselect = 0; - - spin_lock_bh(&bat_priv->softif_neigh_lock); - hlist_for_each_entry_safe(softif_neigh, node_tmp, node_tmp2, - &softif_neigh_vid->softif_neigh_list, - list) { - if ((!has_timed_out(softif_neigh->last_seen, - SOFTIF_NEIGH_TIMEOUT)) && - (atomic_read(&bat_priv->mesh_state) == MESH_ACTIVE)) - continue; - - if (curr_softif_neigh == softif_neigh) { - bat_dbg(DBG_ROUTES, bat_priv, - "Current mesh exit point on vid: %d '%pM' vanished.\n", - softif_neigh_vid->vid, - softif_neigh->addr); - do_deselect = 1; - } - - hlist_del_rcu(&softif_neigh->list); - softif_neigh_free_ref(softif_neigh); - } - spin_unlock_bh(&bat_priv->softif_neigh_lock); - - /* soft_neigh_vid_deselect() needs to acquire the - * softif_neigh_lock */ - if (do_deselect) - softif_neigh_vid_deselect(bat_priv, softif_neigh_vid); - - if (curr_softif_neigh) - softif_neigh_free_ref(curr_softif_neigh); - - softif_neigh_vid_free_ref(softif_neigh_vid); - } - rcu_read_unlock(); - - spin_lock_bh(&bat_priv->softif_neigh_vid_lock); - hlist_for_each_entry_safe(softif_neigh_vid, node, node_tmp, - &bat_priv->softif_neigh_vids, list) { - if (!hlist_empty(&softif_neigh_vid->softif_neigh_list)) - continue; - - hlist_del_rcu(&softif_neigh_vid->list); - softif_neigh_vid_free_ref(softif_neigh_vid); - } - spin_unlock_bh(&bat_priv->softif_neigh_vid_lock); - -} - -static void softif_batman_recv(struct sk_buff *skb, struct net_device *dev, - short vid) -{ - struct bat_priv *bat_priv = netdev_priv(dev); - struct ethhdr *ethhdr = (struct ethhdr *)skb->data; - struct batman_ogm_packet *batman_ogm_packet; - struct softif_neigh *softif_neigh = NULL; - struct hard_iface *primary_if = NULL; - struct softif_neigh *curr_softif_neigh = NULL; - - if (ntohs(ethhdr->h_proto) == ETH_P_8021Q) - batman_ogm_packet = (struct batman_ogm_packet *) - (skb->data + ETH_HLEN + VLAN_HLEN); - else - batman_ogm_packet = (struct batman_ogm_packet *) - (skb->data + ETH_HLEN); - - if (batman_ogm_packet->header.version != COMPAT_VERSION) - goto out; - - if (batman_ogm_packet->header.packet_type != BAT_OGM) - goto out; - - if (!(batman_ogm_packet->flags & PRIMARIES_FIRST_HOP)) - goto out; - - if (is_my_mac(batman_ogm_packet->orig)) - goto out; - - softif_neigh = softif_neigh_get(bat_priv, batman_ogm_packet->orig, vid); - if (!softif_neigh) - goto out; - - curr_softif_neigh = softif_neigh_vid_get_selected(bat_priv, vid); - if (curr_softif_neigh == softif_neigh) - goto out; - - primary_if = primary_if_get_selected(bat_priv); - if (!primary_if) - goto out; - - /* we got a neighbor but its mac is 'bigger' than ours */ - if (memcmp(primary_if->net_dev->dev_addr, - softif_neigh->addr, ETH_ALEN) < 0) - goto out; - - /* close own batX device and use softif_neigh as exit node */ - if (!curr_softif_neigh) { - softif_neigh_vid_select(bat_priv, softif_neigh, vid); - goto out; - } - - /* switch to new 'smallest neighbor' */ - if (memcmp(softif_neigh->addr, curr_softif_neigh->addr, ETH_ALEN) < 0) - softif_neigh_vid_select(bat_priv, softif_neigh, vid); - -out: - kfree_skb(skb); - if (softif_neigh) - softif_neigh_free_ref(softif_neigh); - if (curr_softif_neigh) - softif_neigh_free_ref(curr_softif_neigh); - if (primary_if) - hardif_free_ref(primary_if); - return; -} - static int interface_open(struct net_device *dev) { netif_start_queue(dev); @@ -562,7 +129,6 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface) struct hard_iface *primary_if = NULL; struct bcast_packet *bcast_packet; struct vlan_ethhdr *vhdr; - struct softif_neigh *curr_softif_neigh = NULL; unsigned int header_len = 0; int data_len = skb->len, ret; short vid = -1; @@ -583,17 +149,8 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface) /* fall through */ case ETH_P_BATMAN: - softif_batman_recv(skb, soft_iface, vid); - goto end; - } - - /** - * if we have a another chosen mesh exit node in range - * it will transport the packets to the mesh - */ - curr_softif_neigh = softif_neigh_vid_get_selected(bat_priv, vid); - if (curr_softif_neigh) goto dropped; + } /* Register the client MAC in the transtable */ tt_local_add(soft_iface, ethhdr->h_source, skb->skb_iif); @@ -675,8 +232,6 @@ dropped: dropped_freed: bat_priv->stats.tx_dropped++; end: - if (curr_softif_neigh) - softif_neigh_free_ref(curr_softif_neigh); if (primary_if) hardif_free_ref(primary_if); return NETDEV_TX_OK; @@ -687,12 +242,9 @@ void interface_rx(struct net_device *soft_iface, int hdr_size) { struct bat_priv *bat_priv = netdev_priv(soft_iface); - struct unicast_packet *unicast_packet; struct ethhdr *ethhdr; struct vlan_ethhdr *vhdr; - struct softif_neigh *curr_softif_neigh = NULL; short vid = -1; - int ret; /* check if enough space is available for pulling, and pull */ if (!pskb_may_pull(skb, hdr_size)) @@ -716,30 +268,6 @@ void interface_rx(struct net_device *soft_iface, goto dropped; } - /** - * if we have a another chosen mesh exit node in range - * it will transport the packets to the non-mesh network - */ - curr_softif_neigh = softif_neigh_vid_get_selected(bat_priv, vid); - if (curr_softif_neigh) { - skb_push(skb, hdr_size); - unicast_packet = (struct unicast_packet *)skb->data; - - if ((unicast_packet->header.packet_type != BAT_UNICAST) && - (unicast_packet->header.packet_type != BAT_UNICAST_FRAG)) - goto dropped; - - skb_reset_mac_header(skb); - - memcpy(unicast_packet->dest, - curr_softif_neigh->addr, ETH_ALEN); - ret = route_unicast_packet(skb, recv_if); - if (ret == NET_RX_DROP) - goto dropped; - - goto out; - } - /* skb->dev & skb->pkt_type are set here */ if (unlikely(!pskb_may_pull(skb, ETH_HLEN))) goto dropped; @@ -765,8 +293,6 @@ void interface_rx(struct net_device *soft_iface, dropped: kfree_skb(skb); out: - if (curr_softif_neigh) - softif_neigh_free_ref(curr_softif_neigh); return; } diff --git a/net/batman-adv/soft-interface.h b/net/batman-adv/soft-interface.h index 756eab5b8dd4..020300673884 100644 --- a/net/batman-adv/soft-interface.h +++ b/net/batman-adv/soft-interface.h @@ -23,8 +23,6 @@ #define _NET_BATMAN_ADV_SOFT_INTERFACE_H_ int my_skb_head_push(struct sk_buff *skb, unsigned int len); -int softif_neigh_seq_print_text(struct seq_file *seq, void *offset); -void softif_neigh_purge(struct bat_priv *bat_priv); void interface_rx(struct net_device *soft_iface, struct sk_buff *skb, struct hard_iface *recv_if, int hdr_size); diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h index 24c8a31a3d91..feac2f4030c8 100644 --- a/net/batman-adv/types.h +++ b/net/batman-adv/types.h @@ -174,7 +174,6 @@ struct bat_priv { struct hlist_head forw_bat_list; struct hlist_head forw_bcast_list; struct hlist_head gw_list; - struct hlist_head softif_neigh_vids; struct list_head tt_changes_list; /* tracks changes in a OGM int */ struct list_head vis_send_list; struct hashtable_t *orig_hash; @@ -191,8 +190,6 @@ struct bat_priv { spinlock_t gw_list_lock; /* protects gw_list and curr_gw */ spinlock_t vis_hash_lock; /* protects vis_hash */ spinlock_t vis_list_lock; /* protects vis_info::recv_list */ - spinlock_t softif_neigh_lock; /* protects soft-interface neigh list */ - spinlock_t softif_neigh_vid_lock; /* protects soft-interface vid list */ atomic_t num_local_tt; /* Checksum of the local table, recomputed before sending a new OGM */ atomic_t tt_crc; @@ -327,24 +324,6 @@ struct recvlist_node { uint8_t mac[ETH_ALEN]; }; -struct softif_neigh_vid { - struct hlist_node list; - struct bat_priv *bat_priv; - short vid; - atomic_t refcount; - struct softif_neigh __rcu *softif_neigh; - struct rcu_head rcu; - struct hlist_head softif_neigh_list; -}; - -struct softif_neigh { - struct hlist_node list; - uint8_t addr[ETH_ALEN]; - unsigned long last_seen; - atomic_t refcount; - struct rcu_head rcu; -}; - struct bat_algo_ops { struct hlist_node list; char *name; -- cgit v1.2.2 From 23721387c409087fd3b97e274f34d3ddc0970b74 Mon Sep 17 00:00:00 2001 From: Simon Wunderlich Date: Sun, 22 Jan 2012 20:00:19 +0100 Subject: batman-adv: add basic bridge loop avoidance code This second version of the bridge loop avoidance for batman-adv avoids loops between the mesh and a backbone (usually a LAN). By connecting multiple batman-adv mesh nodes to the same ethernet segment a loop can be created when the soft-interface is bridged into that ethernet segment. A simple visualization of the loop involving the most common case - a LAN as ethernet segment: node1 <-- LAN --> node2 | | wifi <-- mesh --> wifi Packets from the LAN (e.g. ARP broadcasts) will circle forever from node1 or node2 over the mesh back into the LAN. With this patch, batman recognizes backbone gateways, nodes which are part of the mesh and backbone/LAN at the same time. Each backbone gateway "claims" clients from within the mesh to handle them exclusively. By restricting that only responsible backbone gateways may handle their claimed clients traffic, loops are effectively avoided. Signed-off-by: Simon Wunderlich Signed-off-by: Antonio Quartulli --- net/batman-adv/Kconfig | 2 +- net/batman-adv/Makefile | 1 + net/batman-adv/bat_sysfs.c | 2 +- net/batman-adv/bridge_loop_avoidance.c | 1296 ++++++++++++++++++++++++++++++++ net/batman-adv/bridge_loop_avoidance.h | 37 + net/batman-adv/hard-interface.c | 18 +- net/batman-adv/main.c | 6 + net/batman-adv/main.h | 6 +- net/batman-adv/originator.c | 1 + net/batman-adv/packet.h | 17 + net/batman-adv/routing.c | 7 + net/batman-adv/soft-interface.c | 12 + net/batman-adv/types.h | 27 + 13 files changed, 1422 insertions(+), 10 deletions(-) create mode 100644 net/batman-adv/bridge_loop_avoidance.c create mode 100644 net/batman-adv/bridge_loop_avoidance.h (limited to 'net') diff --git a/net/batman-adv/Kconfig b/net/batman-adv/Kconfig index a04d28f392e6..6ff977c1f3bc 100644 --- a/net/batman-adv/Kconfig +++ b/net/batman-adv/Kconfig @@ -4,7 +4,7 @@ config BATMAN_ADV tristate "B.A.T.M.A.N. Advanced Meshing Protocol" - depends on NET + depends on NET && INET select CRC16 default n help diff --git a/net/batman-adv/Makefile b/net/batman-adv/Makefile index 4e392ebedb64..94b67fd81bf7 100644 --- a/net/batman-adv/Makefile +++ b/net/batman-adv/Makefile @@ -23,6 +23,7 @@ batman-adv-y += bat_debugfs.o batman-adv-y += bat_iv_ogm.o batman-adv-y += bat_sysfs.o batman-adv-y += bitarray.o +batman-adv-y += bridge_loop_avoidance.o batman-adv-y += gateway_client.o batman-adv-y += gateway_common.o batman-adv-y += hard-interface.o diff --git a/net/batman-adv/bat_sysfs.c b/net/batman-adv/bat_sysfs.c index 68ff759fc304..d12757fbe10a 100644 --- a/net/batman-adv/bat_sysfs.c +++ b/net/batman-adv/bat_sysfs.c @@ -398,7 +398,7 @@ BAT_ATTR_UINT(gw_sel_class, S_IRUGO | S_IWUSR, 1, TQ_MAX_VALUE, static BAT_ATTR(gw_bandwidth, S_IRUGO | S_IWUSR, show_gw_bwidth, store_gw_bwidth); #ifdef CONFIG_BATMAN_ADV_DEBUG -BAT_ATTR_UINT(log_level, S_IRUGO | S_IWUSR, 0, 7, NULL); +BAT_ATTR_UINT(log_level, S_IRUGO | S_IWUSR, 0, 15, NULL); #endif static struct bat_attribute *mesh_attrs[] = { diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c new file mode 100644 index 000000000000..f84c892be7ea --- /dev/null +++ b/net/batman-adv/bridge_loop_avoidance.c @@ -0,0 +1,1296 @@ +/* + * Copyright (C) 2011 B.A.T.M.A.N. contributors: + * + * Simon Wunderlich + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA + * + */ + +#include "main.h" +#include "hash.h" +#include "hard-interface.h" +#include "originator.h" +#include "bridge_loop_avoidance.h" +#include "send.h" + +#include +#include +#include +#include +#include + +static const uint8_t claim_dest[6] = {0xff, 0x43, 0x05, 0x00, 0x00, 0x00}; +static const uint8_t announce_mac[4] = {0x43, 0x05, 0x43, 0x05}; + +static void bla_periodic_work(struct work_struct *work); +static void bla_send_announce(struct bat_priv *bat_priv, + struct backbone_gw *backbone_gw); + +/* return the index of the claim */ +static inline uint32_t choose_claim(const void *data, uint32_t size) +{ + const unsigned char *key = data; + uint32_t hash = 0; + size_t i; + + for (i = 0; i < ETH_ALEN + sizeof(short); i++) { + hash += key[i]; + hash += (hash << 10); + hash ^= (hash >> 6); + } + + hash += (hash << 3); + hash ^= (hash >> 11); + hash += (hash << 15); + + return hash % size; +} + +/* return the index of the backbone gateway */ +static inline uint32_t choose_backbone_gw(const void *data, uint32_t size) +{ + const unsigned char *key = data; + uint32_t hash = 0; + size_t i; + + for (i = 0; i < ETH_ALEN + sizeof(short); i++) { + hash += key[i]; + hash += (hash << 10); + hash ^= (hash >> 6); + } + + hash += (hash << 3); + hash ^= (hash >> 11); + hash += (hash << 15); + + return hash % size; +} + + +/* compares address and vid of two backbone gws */ +static int compare_backbone_gw(const struct hlist_node *node, const void *data2) +{ + const void *data1 = container_of(node, struct backbone_gw, + hash_entry); + + return (memcmp(data1, data2, ETH_ALEN + sizeof(short)) == 0 ? 1 : 0); +} + +/* compares address and vid of two claims */ +static int compare_claim(const struct hlist_node *node, const void *data2) +{ + const void *data1 = container_of(node, struct claim, + hash_entry); + + return (memcmp(data1, data2, ETH_ALEN + sizeof(short)) == 0 ? 1 : 0); +} + +/* free a backbone gw */ +static void backbone_gw_free_ref(struct backbone_gw *backbone_gw) +{ + if (atomic_dec_and_test(&backbone_gw->refcount)) + kfree_rcu(backbone_gw, rcu); +} + +/* finally deinitialize the claim */ +static void claim_free_rcu(struct rcu_head *rcu) +{ + struct claim *claim; + + claim = container_of(rcu, struct claim, rcu); + + backbone_gw_free_ref(claim->backbone_gw); + kfree(claim); +} + +/* free a claim, call claim_free_rcu if its the last reference */ +static void claim_free_ref(struct claim *claim) +{ + if (atomic_dec_and_test(&claim->refcount)) + call_rcu(&claim->rcu, claim_free_rcu); +} + +/** + * @bat_priv: the bat priv with all the soft interface information + * @data: search data (may be local/static data) + * + * looks for a claim in the hash, and returns it if found + * or NULL otherwise. + */ +static struct claim *claim_hash_find(struct bat_priv *bat_priv, + struct claim *data) +{ + struct hashtable_t *hash = bat_priv->claim_hash; + struct hlist_head *head; + struct hlist_node *node; + struct claim *claim; + struct claim *claim_tmp = NULL; + int index; + + if (!hash) + return NULL; + + index = choose_claim(data, hash->size); + head = &hash->table[index]; + + rcu_read_lock(); + hlist_for_each_entry_rcu(claim, node, head, hash_entry) { + if (!compare_claim(&claim->hash_entry, data)) + continue; + + if (!atomic_inc_not_zero(&claim->refcount)) + continue; + + claim_tmp = claim; + break; + } + rcu_read_unlock(); + + return claim_tmp; +} + +/** + * @bat_priv: the bat priv with all the soft interface information + * @addr: the address of the originator + * @vid: the VLAN ID + * + * looks for a claim in the hash, and returns it if found + * or NULL otherwise. + */ +static struct backbone_gw *backbone_hash_find(struct bat_priv *bat_priv, + uint8_t *addr, short vid) +{ + struct hashtable_t *hash = bat_priv->backbone_hash; + struct hlist_head *head; + struct hlist_node *node; + struct backbone_gw search_entry, *backbone_gw; + struct backbone_gw *backbone_gw_tmp = NULL; + int index; + + if (!hash) + return NULL; + + memcpy(search_entry.orig, addr, ETH_ALEN); + search_entry.vid = vid; + + index = choose_backbone_gw(&search_entry, hash->size); + head = &hash->table[index]; + + rcu_read_lock(); + hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) { + if (!compare_backbone_gw(&backbone_gw->hash_entry, + &search_entry)) + continue; + + if (!atomic_inc_not_zero(&backbone_gw->refcount)) + continue; + + backbone_gw_tmp = backbone_gw; + break; + } + rcu_read_unlock(); + + return backbone_gw_tmp; +} + +/* delete all claims for a backbone */ +static void bla_del_backbone_claims(struct backbone_gw *backbone_gw) +{ + struct hashtable_t *hash; + struct hlist_node *node, *node_tmp; + struct hlist_head *head; + struct claim *claim; + int i; + spinlock_t *list_lock; /* protects write access to the hash lists */ + + hash = backbone_gw->bat_priv->claim_hash; + if (!hash) + return; + + for (i = 0; i < hash->size; i++) { + head = &hash->table[i]; + list_lock = &hash->list_locks[i]; + + spin_lock_bh(list_lock); + hlist_for_each_entry_safe(claim, node, node_tmp, + head, hash_entry) { + + if (claim->backbone_gw != backbone_gw) + continue; + + claim_free_ref(claim); + hlist_del_rcu(node); + } + spin_unlock_bh(list_lock); + } + + /* all claims gone, intialize CRC */ + backbone_gw->crc = BLA_CRC_INIT; +} + +/** + * @bat_priv: the bat priv with all the soft interface information + * @orig: the mac address to be announced within the claim + * @vid: the VLAN ID + * @claimtype: the type of the claim (CLAIM, UNCLAIM, ANNOUNCE, ...) + * + * sends a claim frame according to the provided info. + */ +static void bla_send_claim(struct bat_priv *bat_priv, uint8_t *mac, + short vid, int claimtype) +{ + struct sk_buff *skb; + struct ethhdr *ethhdr; + struct hard_iface *primary_if; + struct net_device *soft_iface; + uint8_t *hw_src; + struct bla_claim_dst local_claim_dest; + uint32_t zeroip = 0; + + primary_if = primary_if_get_selected(bat_priv); + if (!primary_if) + return; + + memcpy(&local_claim_dest, claim_dest, sizeof(local_claim_dest)); + local_claim_dest.type = claimtype; + + soft_iface = primary_if->soft_iface; + + skb = arp_create(ARPOP_REPLY, ETH_P_ARP, + /* IP DST: 0.0.0.0 */ + zeroip, + primary_if->soft_iface, + /* IP SRC: 0.0.0.0 */ + zeroip, + /* Ethernet DST: Broadcast */ + NULL, + /* Ethernet SRC/HW SRC: originator mac */ + primary_if->net_dev->dev_addr, + /* HW DST: FF:43:05:XX:00:00 + * with XX = claim type + */ + (uint8_t *)&local_claim_dest); + + if (!skb) + goto out; + + ethhdr = (struct ethhdr *)skb->data; + hw_src = (uint8_t *)ethhdr + + sizeof(struct ethhdr) + + sizeof(struct arphdr); + + /* now we pretend that the client would have sent this ... */ + switch (claimtype) { + case CLAIM_TYPE_ADD: + /* normal claim frame + * set Ethernet SRC to the clients mac + */ + memcpy(ethhdr->h_source, mac, ETH_ALEN); + bat_dbg(DBG_BLA, bat_priv, + "bla_send_claim(): CLAIM %pM on vid %d\n", mac, vid); + break; + case CLAIM_TYPE_DEL: + /* unclaim frame + * set HW SRC to the clients mac + */ + memcpy(hw_src, mac, ETH_ALEN); + bat_dbg(DBG_BLA, bat_priv, + "bla_send_claim(): UNCLAIM %pM on vid %d\n", mac, vid); + break; + case CLAIM_TYPE_ANNOUNCE: + /* announcement frame + * set HW SRC to the special mac containg the crc + */ + memcpy(hw_src, mac, ETH_ALEN); + bat_dbg(DBG_BLA, bat_priv, + "bla_send_claim(): ANNOUNCE of %pM on vid %d\n", + ethhdr->h_source, vid); + break; + case CLAIM_TYPE_REQUEST: + /* request frame + * set HW SRC to the special mac containg the crc + */ + memcpy(hw_src, mac, ETH_ALEN); + memcpy(ethhdr->h_dest, mac, ETH_ALEN); + bat_dbg(DBG_BLA, bat_priv, + "bla_send_claim(): REQUEST of %pM to %pMon vid %d\n", + ethhdr->h_source, ethhdr->h_dest, vid); + break; + + } + + if (vid != -1) + skb = vlan_insert_tag(skb, vid); + + skb_reset_mac_header(skb); + skb->protocol = eth_type_trans(skb, soft_iface); + bat_priv->stats.rx_packets++; + bat_priv->stats.rx_bytes += skb->len + sizeof(struct ethhdr); + soft_iface->last_rx = jiffies; + + netif_rx(skb); +out: + if (primary_if) + hardif_free_ref(primary_if); +} + +/** + * @bat_priv: the bat priv with all the soft interface information + * @orig: the mac address of the originator + * @vid: the VLAN ID + * + * searches for the backbone gw or creates a new one if it could not + * be found. + */ +static struct backbone_gw *bla_get_backbone_gw(struct bat_priv *bat_priv, + uint8_t *orig, short vid) +{ + struct backbone_gw *entry; + int hash_added; + + entry = backbone_hash_find(bat_priv, orig, vid); + + if (entry) + return entry; + + bat_dbg(DBG_BLA, bat_priv, + "bla_get_backbone_gw(): not found (%pM, %d), creating new entry\n", + orig, vid); + + entry = kzalloc(sizeof(*entry), GFP_ATOMIC); + if (!entry) + return NULL; + + entry->vid = vid; + entry->lasttime = jiffies; + entry->crc = BLA_CRC_INIT; + entry->bat_priv = bat_priv; + atomic_set(&entry->request_sent, 0); + memcpy(entry->orig, orig, ETH_ALEN); + + /* one for the hash, one for returning */ + atomic_set(&entry->refcount, 2); + + hash_added = hash_add(bat_priv->backbone_hash, compare_backbone_gw, + choose_backbone_gw, entry, &entry->hash_entry); + + if (unlikely(hash_added != 0)) { + /* hash failed, free the structure */ + kfree(entry); + return NULL; + } + + return entry; +} + +/* update or add the own backbone gw to make sure we announce + * where we receive other backbone gws + */ +static void bla_update_own_backbone_gw(struct bat_priv *bat_priv, + struct hard_iface *primary_if, + short vid) +{ + struct backbone_gw *backbone_gw; + + backbone_gw = bla_get_backbone_gw(bat_priv, + primary_if->net_dev->dev_addr, vid); + if (unlikely(!backbone_gw)) + return; + + backbone_gw->lasttime = jiffies; + backbone_gw_free_ref(backbone_gw); +} + +/** + * @bat_priv: the bat priv with all the soft interface information + * @vid: the vid where the request came on + * + * Repeat all of our own claims, and finally send an ANNOUNCE frame + * to allow the requester another check if the CRC is correct now. + */ +static void bla_answer_request(struct bat_priv *bat_priv, + struct hard_iface *primary_if, short vid) +{ + struct hlist_node *node; + struct hlist_head *head; + struct hashtable_t *hash; + struct claim *claim; + struct backbone_gw *backbone_gw; + int i; + + bat_dbg(DBG_BLA, bat_priv, + "bla_answer_request(): received a claim request, send all of our own claims again\n"); + + backbone_gw = backbone_hash_find(bat_priv, + primary_if->net_dev->dev_addr, vid); + if (!backbone_gw) + return; + + hash = bat_priv->claim_hash; + for (i = 0; i < hash->size; i++) { + head = &hash->table[i]; + + rcu_read_lock(); + hlist_for_each_entry_rcu(claim, node, head, hash_entry) { + /* only own claims are interesting */ + if (claim->backbone_gw != backbone_gw) + continue; + + bla_send_claim(bat_priv, claim->addr, claim->vid, + CLAIM_TYPE_ADD); + } + rcu_read_unlock(); + } + + /* finally, send an announcement frame */ + bla_send_announce(bat_priv, backbone_gw); + backbone_gw_free_ref(backbone_gw); +} + +/** + * @backbone_gw: the backbone gateway from whom we are out of sync + * + * When the crc is wrong, ask the backbone gateway for a full table update. + * After the request, it will repeat all of his own claims and finally + * send an announcement claim with which we can check again. + */ +static void bla_send_request(struct backbone_gw *backbone_gw) +{ + /* first, remove all old entries */ + bla_del_backbone_claims(backbone_gw); + + bat_dbg(DBG_BLA, backbone_gw->bat_priv, + "Sending REQUEST to %pM\n", + backbone_gw->orig); + + /* send request */ + bla_send_claim(backbone_gw->bat_priv, backbone_gw->orig, + backbone_gw->vid, CLAIM_TYPE_REQUEST); + + /* no local broadcasts should be sent or received, for now. */ + if (!atomic_read(&backbone_gw->request_sent)) { + atomic_inc(&backbone_gw->bat_priv->bla_num_requests); + atomic_set(&backbone_gw->request_sent, 1); + } +} + +/** + * @bat_priv: the bat priv with all the soft interface information + * @backbone_gw: our backbone gateway which should be announced + * + * This function sends an announcement. It is called from multiple + * places. + */ +static void bla_send_announce(struct bat_priv *bat_priv, + struct backbone_gw *backbone_gw) +{ + uint8_t mac[ETH_ALEN]; + uint16_t crc; + + memcpy(mac, announce_mac, 4); + crc = htons(backbone_gw->crc); + memcpy(&mac[4], (uint8_t *)&crc, 2); + + bla_send_claim(bat_priv, mac, backbone_gw->vid, CLAIM_TYPE_ANNOUNCE); + +} + +/** + * @bat_priv: the bat priv with all the soft interface information + * @mac: the mac address of the claim + * @vid: the VLAN ID of the frame + * @backbone_gw: the backbone gateway which claims it + * + * Adds a claim in the claim hash. + */ +static void bla_add_claim(struct bat_priv *bat_priv, const uint8_t *mac, + const short vid, struct backbone_gw *backbone_gw) +{ + struct claim *claim; + struct claim search_claim; + int hash_added; + + memcpy(search_claim.addr, mac, ETH_ALEN); + search_claim.vid = vid; + claim = claim_hash_find(bat_priv, &search_claim); + + /* create a new claim entry if it does not exist yet. */ + if (!claim) { + claim = kzalloc(sizeof(*claim), GFP_ATOMIC); + if (!claim) + return; + + memcpy(claim->addr, mac, ETH_ALEN); + claim->vid = vid; + claim->lasttime = jiffies; + claim->backbone_gw = backbone_gw; + + atomic_set(&claim->refcount, 2); + bat_dbg(DBG_BLA, bat_priv, + "bla_add_claim(): adding new entry %pM, vid %d to hash ...\n", + mac, vid); + hash_added = hash_add(bat_priv->claim_hash, compare_claim, + choose_claim, claim, &claim->hash_entry); + + if (unlikely(hash_added != 0)) { + /* only local changes happened. */ + kfree(claim); + return; + } + } else { + claim->lasttime = jiffies; + if (claim->backbone_gw == backbone_gw) + /* no need to register a new backbone */ + goto claim_free_ref; + + bat_dbg(DBG_BLA, bat_priv, + "bla_add_claim(): changing ownership for %pM, vid %d\n", + mac, vid); + + claim->backbone_gw->crc ^= + crc16(0, claim->addr, ETH_ALEN); + backbone_gw_free_ref(claim->backbone_gw); + + } + /* set (new) backbone gw */ + atomic_inc(&backbone_gw->refcount); + claim->backbone_gw = backbone_gw; + + backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN); + backbone_gw->lasttime = jiffies; + +claim_free_ref: + claim_free_ref(claim); +} + +/* Delete a claim from the claim hash which has the + * given mac address and vid. + */ +static void bla_del_claim(struct bat_priv *bat_priv, const uint8_t *mac, + const short vid) +{ + struct claim search_claim, *claim; + + memcpy(search_claim.addr, mac, ETH_ALEN); + search_claim.vid = vid; + claim = claim_hash_find(bat_priv, &search_claim); + if (!claim) + return; + + bat_dbg(DBG_BLA, bat_priv, "bla_del_claim(): %pM, vid %d\n", mac, vid); + + hash_remove(bat_priv->claim_hash, compare_claim, choose_claim, claim); + claim_free_ref(claim); /* reference from the hash is gone */ + + claim->backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN); + + /* don't need the reference from hash_find() anymore */ + claim_free_ref(claim); +} + +/* check for ANNOUNCE frame, return 1 if handled */ +static int handle_announce(struct bat_priv *bat_priv, + uint8_t *an_addr, uint8_t *backbone_addr, short vid) +{ + struct backbone_gw *backbone_gw; + uint16_t crc; + + if (memcmp(an_addr, announce_mac, 4) != 0) + return 0; + + backbone_gw = bla_get_backbone_gw(bat_priv, backbone_addr, vid); + + if (unlikely(!backbone_gw)) + return 1; + + + /* handle as ANNOUNCE frame */ + backbone_gw->lasttime = jiffies; + crc = ntohs(*((uint16_t *)(&an_addr[4]))); + + bat_dbg(DBG_BLA, bat_priv, + "handle_announce(): ANNOUNCE vid %d (sent by %pM)... CRC = %04x\n", + vid, backbone_gw->orig, crc); + + if (backbone_gw->crc != crc) { + bat_dbg(DBG_BLA, backbone_gw->bat_priv, + "handle_announce(): CRC FAILED for %pM/%d (my = %04x, sent = %04x)\n", + backbone_gw->orig, backbone_gw->vid, backbone_gw->crc, + crc); + + bla_send_request(backbone_gw); + } else { + /* if we have sent a request and the crc was OK, + * we can allow traffic again. + */ + if (atomic_read(&backbone_gw->request_sent)) { + atomic_dec(&backbone_gw->bat_priv->bla_num_requests); + atomic_set(&backbone_gw->request_sent, 0); + } + } + + backbone_gw_free_ref(backbone_gw); + return 1; +} + +/* check for REQUEST frame, return 1 if handled */ +static int handle_request(struct bat_priv *bat_priv, + struct hard_iface *primary_if, + uint8_t *backbone_addr, + struct ethhdr *ethhdr, short vid) +{ + /* check for REQUEST frame */ + if (!compare_eth(backbone_addr, ethhdr->h_dest)) + return 0; + + /* sanity check, this should not happen on a normal switch, + * we ignore it in this case. + */ + if (!compare_eth(ethhdr->h_dest, primary_if->net_dev->dev_addr)) + return 1; + + bat_dbg(DBG_BLA, bat_priv, + "handle_request(): REQUEST vid %d (sent by %pM)...\n", + vid, ethhdr->h_source); + + bla_answer_request(bat_priv, primary_if, vid); + return 1; +} + +/* check for UNCLAIM frame, return 1 if handled */ +static int handle_unclaim(struct bat_priv *bat_priv, + struct hard_iface *primary_if, + uint8_t *backbone_addr, + uint8_t *claim_addr, short vid) +{ + struct backbone_gw *backbone_gw; + + /* unclaim in any case if it is our own */ + if (primary_if && compare_eth(backbone_addr, + primary_if->net_dev->dev_addr)) + bla_send_claim(bat_priv, claim_addr, vid, CLAIM_TYPE_DEL); + + backbone_gw = backbone_hash_find(bat_priv, backbone_addr, vid); + + if (!backbone_gw) + return 1; + + /* this must be an UNCLAIM frame */ + bat_dbg(DBG_BLA, bat_priv, + "handle_unclaim(): UNCLAIM %pM on vid %d (sent by %pM)...\n", + claim_addr, vid, backbone_gw->orig); + + bla_del_claim(bat_priv, claim_addr, vid); + backbone_gw_free_ref(backbone_gw); + return 1; +} + +/* check for CLAIM frame, return 1 if handled */ +static int handle_claim(struct bat_priv *bat_priv, + struct hard_iface *primary_if, uint8_t *backbone_addr, + uint8_t *claim_addr, short vid) +{ + struct backbone_gw *backbone_gw; + + /* register the gateway if not yet available, and add the claim. */ + + backbone_gw = bla_get_backbone_gw(bat_priv, backbone_addr, vid); + + if (unlikely(!backbone_gw)) + return 1; + + /* this must be a CLAIM frame */ + bla_add_claim(bat_priv, claim_addr, vid, backbone_gw); + if (compare_eth(backbone_addr, primary_if->net_dev->dev_addr)) + bla_send_claim(bat_priv, claim_addr, vid, CLAIM_TYPE_ADD); + + /* TODO: we could call something like tt_local_del() here. */ + + backbone_gw_free_ref(backbone_gw); + return 1; +} + +/** + * @bat_priv: the bat priv with all the soft interface information + * @skb: the frame to be checked + * + * Check if this is a claim frame, and process it accordingly. + * + * returns 1 if it was a claim frame, otherwise return 0 to + * tell the callee that it can use the frame on its own. + */ +static int bla_process_claim(struct bat_priv *bat_priv, + struct hard_iface *primary_if, + struct sk_buff *skb) +{ + struct ethhdr *ethhdr; + struct vlan_ethhdr *vhdr; + struct arphdr *arphdr; + uint8_t *hw_src, *hw_dst; + struct bla_claim_dst *bla_dst; + uint16_t proto; + int headlen; + short vid = -1; + + ethhdr = (struct ethhdr *)skb_mac_header(skb); + + if (ntohs(ethhdr->h_proto) == ETH_P_8021Q) { + vhdr = (struct vlan_ethhdr *)ethhdr; + vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK; + proto = ntohs(vhdr->h_vlan_encapsulated_proto); + headlen = sizeof(*vhdr); + } else { + proto = ntohs(ethhdr->h_proto); + headlen = sizeof(*ethhdr); + } + + if (proto != ETH_P_ARP) + return 0; /* not a claim frame */ + + /* this must be a ARP frame. check if it is a claim. */ + + if (unlikely(!pskb_may_pull(skb, headlen + arp_hdr_len(skb->dev)))) + return 0; + + /* pskb_may_pull() may have modified the pointers, get ethhdr again */ + ethhdr = (struct ethhdr *)skb_mac_header(skb); + arphdr = (struct arphdr *)((uint8_t *)ethhdr + headlen); + + /* Check whether the ARP frame carries a valid + * IP information + */ + + if (arphdr->ar_hrd != htons(ARPHRD_ETHER)) + return 0; + if (arphdr->ar_pro != htons(ETH_P_IP)) + return 0; + if (arphdr->ar_hln != ETH_ALEN) + return 0; + if (arphdr->ar_pln != 4) + return 0; + + hw_src = (uint8_t *)arphdr + sizeof(struct arphdr); + hw_dst = hw_src + ETH_ALEN + 4; + bla_dst = (struct bla_claim_dst *)hw_dst; + + /* check if it is a claim frame. */ + if (memcmp(hw_dst, claim_dest, 3) != 0) + return 0; + + /* become a backbone gw ourselves on this vlan if not happened yet */ + bla_update_own_backbone_gw(bat_priv, primary_if, vid); + + /* check for the different types of claim frames ... */ + switch (bla_dst->type) { + case CLAIM_TYPE_ADD: + if (handle_claim(bat_priv, primary_if, hw_src, + ethhdr->h_source, vid)) + return 1; + break; + case CLAIM_TYPE_DEL: + if (handle_unclaim(bat_priv, primary_if, + ethhdr->h_source, hw_src, vid)) + return 1; + break; + + case CLAIM_TYPE_ANNOUNCE: + if (handle_announce(bat_priv, hw_src, ethhdr->h_source, vid)) + return 1; + break; + case CLAIM_TYPE_REQUEST: + if (handle_request(bat_priv, primary_if, hw_src, ethhdr, vid)) + return 1; + break; + } + + bat_dbg(DBG_BLA, bat_priv, + "bla_process_claim(): ERROR - this looks like a claim frame, but is useless. eth src %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n", + ethhdr->h_source, vid, hw_src, hw_dst); + return 1; +} + +/* Check when we last heard from other nodes, and remove them in case of + * a time out, or clean all backbone gws if now is set. + */ +static void bla_purge_backbone_gw(struct bat_priv *bat_priv, int now) +{ + struct backbone_gw *backbone_gw; + struct hlist_node *node, *node_tmp; + struct hlist_head *head; + struct hashtable_t *hash; + spinlock_t *list_lock; /* protects write access to the hash lists */ + int i; + + hash = bat_priv->backbone_hash; + if (!hash) + return; + + for (i = 0; i < hash->size; i++) { + head = &hash->table[i]; + list_lock = &hash->list_locks[i]; + + spin_lock_bh(list_lock); + hlist_for_each_entry_safe(backbone_gw, node, node_tmp, + head, hash_entry) { + if (now) + goto purge_now; + if (!has_timed_out(backbone_gw->lasttime, + BLA_BACKBONE_TIMEOUT)) + continue; + + bat_dbg(DBG_BLA, backbone_gw->bat_priv, + "bla_purge_backbone_gw(): backbone gw %pM timed out\n", + backbone_gw->orig); + +purge_now: + /* don't wait for the pending request anymore */ + if (atomic_read(&backbone_gw->request_sent)) + atomic_dec(&bat_priv->bla_num_requests); + + bla_del_backbone_claims(backbone_gw); + + hlist_del_rcu(node); + backbone_gw_free_ref(backbone_gw); + } + spin_unlock_bh(list_lock); + } +} + +/** + * @bat_priv: the bat priv with all the soft interface information + * @primary_if: the selected primary interface, may be NULL if now is set + * @now: whether the whole hash shall be wiped now + * + * Check when we heard last time from our own claims, and remove them in case of + * a time out, or clean all claims if now is set + */ +static void bla_purge_claims(struct bat_priv *bat_priv, + struct hard_iface *primary_if, int now) +{ + struct claim *claim; + struct hlist_node *node; + struct hlist_head *head; + struct hashtable_t *hash; + int i; + + hash = bat_priv->claim_hash; + if (!hash) + return; + + for (i = 0; i < hash->size; i++) { + head = &hash->table[i]; + + rcu_read_lock(); + hlist_for_each_entry_rcu(claim, node, head, hash_entry) { + if (now) + goto purge_now; + if (!compare_eth(claim->backbone_gw->orig, + primary_if->net_dev->dev_addr)) + continue; + if (!has_timed_out(claim->lasttime, + BLA_CLAIM_TIMEOUT)) + continue; + + bat_dbg(DBG_BLA, bat_priv, + "bla_purge_claims(): %pM, vid %d, time out\n", + claim->addr, claim->vid); + +purge_now: + handle_unclaim(bat_priv, primary_if, + claim->backbone_gw->orig, + claim->addr, claim->vid); + } + rcu_read_unlock(); + } +} + +/** + * @bat_priv: the bat priv with all the soft interface information + * @primary_if: the new selected primary_if + * @oldif: the old primary interface, may be NULL + * + * Update the backbone gateways when the own orig address changes. + * + */ +void bla_update_orig_address(struct bat_priv *bat_priv, + struct hard_iface *primary_if, + struct hard_iface *oldif) +{ + struct backbone_gw *backbone_gw; + struct hlist_node *node; + struct hlist_head *head; + struct hashtable_t *hash; + int i; + + if (!oldif) { + bla_purge_claims(bat_priv, NULL, 1); + bla_purge_backbone_gw(bat_priv, 1); + return; + } + + hash = bat_priv->backbone_hash; + if (!hash) + return; + + for (i = 0; i < hash->size; i++) { + head = &hash->table[i]; + + rcu_read_lock(); + hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) { + /* own orig still holds the old value. */ + if (!compare_eth(backbone_gw->orig, + oldif->net_dev->dev_addr)) + continue; + + memcpy(backbone_gw->orig, + primary_if->net_dev->dev_addr, ETH_ALEN); + /* send an announce frame so others will ask for our + * claims and update their tables. + */ + bla_send_announce(bat_priv, backbone_gw); + } + rcu_read_unlock(); + } +} + + + +/* (re)start the timer */ +static void bla_start_timer(struct bat_priv *bat_priv) +{ + INIT_DELAYED_WORK(&bat_priv->bla_work, bla_periodic_work); + queue_delayed_work(bat_event_workqueue, &bat_priv->bla_work, + msecs_to_jiffies(BLA_PERIOD_LENGTH)); +} + +/* periodic work to do: + * * purge structures when they are too old + * * send announcements + */ +static void bla_periodic_work(struct work_struct *work) +{ + struct delayed_work *delayed_work = + container_of(work, struct delayed_work, work); + struct bat_priv *bat_priv = + container_of(delayed_work, struct bat_priv, bla_work); + struct hlist_node *node; + struct hlist_head *head; + struct backbone_gw *backbone_gw; + struct hashtable_t *hash; + struct hard_iface *primary_if; + int i; + + primary_if = primary_if_get_selected(bat_priv); + if (!primary_if) + goto out; + + bla_purge_claims(bat_priv, primary_if, 0); + bla_purge_backbone_gw(bat_priv, 0); + + if (!atomic_read(&bat_priv->bridge_loop_avoidance)) + goto out; + + hash = bat_priv->backbone_hash; + if (!hash) + goto out; + + for (i = 0; i < hash->size; i++) { + head = &hash->table[i]; + + rcu_read_lock(); + hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) { + if (!compare_eth(backbone_gw->orig, + primary_if->net_dev->dev_addr)) + continue; + + backbone_gw->lasttime = jiffies; + + bla_send_announce(bat_priv, backbone_gw); + } + rcu_read_unlock(); + } +out: + if (primary_if) + hardif_free_ref(primary_if); + + bla_start_timer(bat_priv); +} + +/* initialize all bla structures */ +int bla_init(struct bat_priv *bat_priv) +{ + bat_dbg(DBG_BLA, bat_priv, "bla hash registering\n"); + + if (bat_priv->claim_hash) + return 1; + + bat_priv->claim_hash = hash_new(128); + bat_priv->backbone_hash = hash_new(32); + + if (!bat_priv->claim_hash || !bat_priv->backbone_hash) + return -1; + + bat_dbg(DBG_BLA, bat_priv, "bla hashes initialized\n"); + + bla_start_timer(bat_priv); + return 1; +} + +/** + * @skb: the frame to be checked + * @orig_node: the orig_node of the frame + * @hdr_size: maximum length of the frame + * + * bla_is_backbone_gw inspects the skb for the VLAN ID and returns 1 + * if the orig_node is also a gateway on the soft interface, otherwise it + * returns 0. + * + */ +int bla_is_backbone_gw(struct sk_buff *skb, + struct orig_node *orig_node, int hdr_size) +{ + struct ethhdr *ethhdr; + struct vlan_ethhdr *vhdr; + struct backbone_gw *backbone_gw; + short vid = -1; + + if (!atomic_read(&orig_node->bat_priv->bridge_loop_avoidance)) + return 0; + + /* first, find out the vid. */ + if (!pskb_may_pull(skb, hdr_size + sizeof(struct ethhdr))) + return 0; + + ethhdr = (struct ethhdr *)(((uint8_t *)skb->data) + hdr_size); + + if (ntohs(ethhdr->h_proto) == ETH_P_8021Q) { + if (!pskb_may_pull(skb, hdr_size + sizeof(struct vlan_ethhdr))) + return 0; + + vhdr = (struct vlan_ethhdr *)(((uint8_t *)skb->data) + + hdr_size); + vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK; + } + + /* see if this originator is a backbone gw for this VLAN */ + + backbone_gw = backbone_hash_find(orig_node->bat_priv, + orig_node->orig, vid); + if (!backbone_gw) + return 0; + + backbone_gw_free_ref(backbone_gw); + return 1; +} + +/* free all bla structures (for softinterface free or module unload) */ +void bla_free(struct bat_priv *bat_priv) +{ + struct hard_iface *primary_if; + + cancel_delayed_work_sync(&bat_priv->bla_work); + primary_if = primary_if_get_selected(bat_priv); + + if (bat_priv->claim_hash) { + bla_purge_claims(bat_priv, primary_if, 1); + hash_destroy(bat_priv->claim_hash); + bat_priv->claim_hash = NULL; + } + if (bat_priv->backbone_hash) { + bla_purge_backbone_gw(bat_priv, 1); + hash_destroy(bat_priv->backbone_hash); + bat_priv->backbone_hash = NULL; + } + if (primary_if) + hardif_free_ref(primary_if); +} + +/** + * @bat_priv: the bat priv with all the soft interface information + * @skb: the frame to be checked + * @vid: the VLAN ID of the frame + * + * bla_rx avoidance checks if: + * * we have to race for a claim + * * if the frame is allowed on the LAN + * + * in these cases, the skb is further handled by this function and + * returns 1, otherwise it returns 0 and the caller shall further + * process the skb. + * + */ +int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid) +{ + struct ethhdr *ethhdr; + struct claim search_claim, *claim = NULL; + struct hard_iface *primary_if; + int ret; + + ethhdr = (struct ethhdr *)skb_mac_header(skb); + + primary_if = primary_if_get_selected(bat_priv); + if (!primary_if) + goto handled; + + if (!atomic_read(&bat_priv->bridge_loop_avoidance)) + goto allow; + + + if (unlikely(atomic_read(&bat_priv->bla_num_requests))) + /* don't allow broadcasts while requests are in flight */ + if (is_multicast_ether_addr(ethhdr->h_dest)) + goto handled; + + memcpy(search_claim.addr, ethhdr->h_source, ETH_ALEN); + search_claim.vid = vid; + claim = claim_hash_find(bat_priv, &search_claim); + + if (!claim) { + /* possible optimization: race for a claim */ + /* No claim exists yet, claim it for us! + */ + handle_claim(bat_priv, primary_if, + primary_if->net_dev->dev_addr, + ethhdr->h_source, vid); + goto allow; + } + + /* if it is our own claim ... */ + if (compare_eth(claim->backbone_gw->orig, + primary_if->net_dev->dev_addr)) { + /* ... allow it in any case */ + claim->lasttime = jiffies; + goto allow; + } + + /* if it is a broadcast ... */ + if (is_multicast_ether_addr(ethhdr->h_dest)) { + /* ... drop it. the responsible gateway is in charge. */ + goto handled; + } else { + /* seems the client considers us as its best gateway. + * send a claim and update the claim table + * immediately. + */ + handle_claim(bat_priv, primary_if, + primary_if->net_dev->dev_addr, + ethhdr->h_source, vid); + goto allow; + } +allow: + bla_update_own_backbone_gw(bat_priv, primary_if, vid); + ret = 0; + goto out; + +handled: + kfree_skb(skb); + ret = 1; + +out: + if (primary_if) + hardif_free_ref(primary_if); + if (claim) + claim_free_ref(claim); + return ret; +} + +/** + * @bat_priv: the bat priv with all the soft interface information + * @skb: the frame to be checked + * @vid: the VLAN ID of the frame + * + * bla_tx checks if: + * * a claim was received which has to be processed + * * the frame is allowed on the mesh + * + * in these cases, the skb is further handled by this function and + * returns 1, otherwise it returns 0 and the caller shall further + * process the skb. + * + */ +int bla_tx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid) +{ + struct ethhdr *ethhdr; + struct claim search_claim, *claim = NULL; + struct hard_iface *primary_if; + int ret = 0; + + primary_if = primary_if_get_selected(bat_priv); + if (!primary_if) + goto out; + + if (!atomic_read(&bat_priv->bridge_loop_avoidance)) + goto allow; + + /* in VLAN case, the mac header might not be set. */ + skb_reset_mac_header(skb); + + if (bla_process_claim(bat_priv, primary_if, skb)) + goto handled; + + ethhdr = (struct ethhdr *)skb_mac_header(skb); + + if (unlikely(atomic_read(&bat_priv->bla_num_requests))) + /* don't allow broadcasts while requests are in flight */ + if (is_multicast_ether_addr(ethhdr->h_dest)) + goto handled; + + memcpy(search_claim.addr, ethhdr->h_source, ETH_ALEN); + search_claim.vid = vid; + + claim = claim_hash_find(bat_priv, &search_claim); + + /* if no claim exists, allow it. */ + if (!claim) + goto allow; + + /* check if we are responsible. */ + if (compare_eth(claim->backbone_gw->orig, + primary_if->net_dev->dev_addr)) { + /* if yes, the client has roamed and we have + * to unclaim it. + */ + handle_unclaim(bat_priv, primary_if, + primary_if->net_dev->dev_addr, + ethhdr->h_source, vid); + goto allow; + } + + /* check if it is a multicast/broadcast frame */ + if (is_multicast_ether_addr(ethhdr->h_dest)) { + /* drop it. the responsible gateway has forwarded it into + * the backbone network. + */ + goto handled; + } else { + /* we must allow it. at least if we are + * responsible for the DESTINATION. + */ + goto allow; + } +allow: + bla_update_own_backbone_gw(bat_priv, primary_if, vid); + ret = 0; + goto out; +handled: + ret = 1; +out: + if (primary_if) + hardif_free_ref(primary_if); + if (claim) + claim_free_ref(claim); + return ret; +} diff --git a/net/batman-adv/bridge_loop_avoidance.h b/net/batman-adv/bridge_loop_avoidance.h new file mode 100644 index 000000000000..488d77895d01 --- /dev/null +++ b/net/batman-adv/bridge_loop_avoidance.h @@ -0,0 +1,37 @@ +/* + * Copyright (C) 2011 B.A.T.M.A.N. contributors: + * + * Simon Wunderlich + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA + * + */ + +#ifndef _NET_BATMAN_ADV_BLA_H_ +#define _NET_BATMAN_ADV_BLA_H_ + +int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid); +int bla_tx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid); +int bla_is_backbone_gw(struct sk_buff *skb, + struct orig_node *orig_node, int hdr_size); +void bla_update_orig_address(struct bat_priv *bat_priv, + struct hard_iface *primary_if, + struct hard_iface *oldif); +int bla_init(struct bat_priv *bat_priv); +void bla_free(struct bat_priv *bat_priv); + +#define BLA_CRC_INIT 0 + +#endif /* ifndef _NET_BATMAN_ADV_BLA_H_ */ diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c index 377897701a85..8c4b790b98be 100644 --- a/net/batman-adv/hard-interface.c +++ b/net/batman-adv/hard-interface.c @@ -28,6 +28,7 @@ #include "bat_sysfs.h" #include "originator.h" #include "hash.h" +#include "bridge_loop_avoidance.h" #include @@ -107,7 +108,8 @@ out: return hard_iface; } -static void primary_if_update_addr(struct bat_priv *bat_priv) +static void primary_if_update_addr(struct bat_priv *bat_priv, + struct hard_iface *oldif) { struct vis_packet *vis_packet; struct hard_iface *primary_if; @@ -122,6 +124,7 @@ static void primary_if_update_addr(struct bat_priv *bat_priv) memcpy(vis_packet->sender_orig, primary_if->net_dev->dev_addr, ETH_ALEN); + bla_update_orig_address(bat_priv, primary_if, oldif); out: if (primary_if) hardif_free_ref(primary_if); @@ -140,14 +143,15 @@ static void primary_if_select(struct bat_priv *bat_priv, curr_hard_iface = rcu_dereference_protected(bat_priv->primary_if, 1); rcu_assign_pointer(bat_priv->primary_if, new_hard_iface); - if (curr_hard_iface) - hardif_free_ref(curr_hard_iface); - if (!new_hard_iface) - return; + goto out; bat_priv->bat_algo_ops->bat_ogm_init_primary(new_hard_iface); - primary_if_update_addr(bat_priv); + primary_if_update_addr(bat_priv, curr_hard_iface); + +out: + if (curr_hard_iface) + hardif_free_ref(curr_hard_iface); } static bool hardif_is_iface_up(const struct hard_iface *hard_iface) @@ -531,7 +535,7 @@ static int hard_if_event(struct notifier_block *this, goto hardif_put; if (hard_iface == primary_if) - primary_if_update_addr(bat_priv); + primary_if_update_addr(bat_priv, NULL); break; default: break; diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c index 94d4968a953a..e67ca96285b3 100644 --- a/net/batman-adv/main.c +++ b/net/batman-adv/main.c @@ -30,6 +30,7 @@ #include "translation-table.h" #include "hard-interface.h" #include "gateway_client.h" +#include "bridge_loop_avoidance.h" #include "vis.h" #include "hash.h" #include "bat_algo.h" @@ -115,6 +116,9 @@ int mesh_init(struct net_device *soft_iface) if (vis_init(bat_priv) < 1) goto err; + if (bla_init(bat_priv) < 1) + goto err; + atomic_set(&bat_priv->gw_reselect, 0); atomic_set(&bat_priv->mesh_state, MESH_ACTIVE); goto end; @@ -142,6 +146,8 @@ void mesh_free(struct net_device *soft_iface) tt_free(bat_priv); + bla_free(bat_priv); + atomic_set(&bat_priv->mesh_state, MESH_INACTIVE); } diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h index 7a6a25f22fc2..82723b5dce61 100644 --- a/net/batman-adv/main.h +++ b/net/batman-adv/main.h @@ -80,6 +80,9 @@ #define MAX_AGGREGATION_BYTES 512 #define MAX_AGGREGATION_MS 100 +#define BLA_PERIOD_LENGTH 10000 /* 10 seconds */ +#define BLA_BACKBONE_TIMEOUT (BLA_PERIOD_LENGTH * 3) +#define BLA_CLAIM_TIMEOUT (BLA_PERIOD_LENGTH * 10) /* don't reset again within 30 seconds */ #define RESET_PROTECTION_MS 30000 #define EXPECTED_SEQNO_RANGE 65536 @@ -117,7 +120,8 @@ enum dbg_level { DBG_BATMAN = 1 << 0, DBG_ROUTES = 1 << 1, /* route added / changed / deleted */ DBG_TT = 1 << 2, /* translation table operations */ - DBG_ALL = 7 + DBG_BLA = 1 << 3, /* bridge loop avoidance */ + DBG_ALL = 15 }; /* Kernel headers */ diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c index 82390818874b..ce4969885894 100644 --- a/net/batman-adv/originator.c +++ b/net/batman-adv/originator.c @@ -28,6 +28,7 @@ #include "hard-interface.h" #include "unicast.h" #include "soft-interface.h" +#include "bridge_loop_avoidance.h" static void purge_orig(struct work_struct *work); diff --git a/net/batman-adv/packet.h b/net/batman-adv/packet.h index 45e75ed6637d..59800e82371a 100644 --- a/net/batman-adv/packet.h +++ b/net/batman-adv/packet.h @@ -90,6 +90,23 @@ enum tt_client_flags { TT_CLIENT_PENDING = 1 << 10 }; +/* claim frame types for the bridge loop avoidance */ +enum bla_claimframe { + CLAIM_TYPE_ADD = 0x00, + CLAIM_TYPE_DEL = 0x01, + CLAIM_TYPE_ANNOUNCE = 0x02, + CLAIM_TYPE_REQUEST = 0x03 +}; + +/* the destination hardware field in the ARP frame is used to + * transport the claim type and the group id + */ +struct bla_claim_dst { + uint8_t magic[3]; /* FF:43:05 */ + uint8_t type; /* bla_claimframe */ + uint16_t group; /* group id */ +} __packed; + struct batman_header { uint8_t packet_type; uint8_t version; /* batman version field */ diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c index 71d4211beb23..a1d8c9b0f902 100644 --- a/net/batman-adv/routing.c +++ b/net/batman-adv/routing.c @@ -29,6 +29,7 @@ #include "originator.h" #include "vis.h" #include "unicast.h" +#include "bridge_loop_avoidance.h" static int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if); @@ -1071,6 +1072,12 @@ int recv_bcast_packet(struct sk_buff *skb, struct hard_iface *recv_if) /* rebroadcast packet */ add_bcast_packet_to_list(bat_priv, skb, 1); + /* don't hand the broadcast up if it is from an originator + * from the same backbone. + */ + if (bla_is_backbone_gw(skb, orig_node, hdr_size)) + goto out; + /* broadcast for me */ interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size); ret = NET_RX_SUCCESS; diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c index e56cb88ef2ba..4d639b303bd7 100644 --- a/net/batman-adv/soft-interface.c +++ b/net/batman-adv/soft-interface.c @@ -36,6 +36,7 @@ #include #include #include "unicast.h" +#include "bridge_loop_avoidance.h" static int bat_get_settings(struct net_device *dev, struct ethtool_cmd *cmd); @@ -152,6 +153,9 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface) goto dropped; } + if (bla_tx(bat_priv, skb, vid)) + goto dropped; + /* Register the client MAC in the transtable */ tt_local_add(soft_iface, ethhdr->h_source, skb->skb_iif); @@ -287,6 +291,12 @@ void interface_rx(struct net_device *soft_iface, if (is_ap_isolated(bat_priv, ethhdr->h_source, ethhdr->h_dest)) goto dropped; + /* Let the bridge loop avoidance check the packet. If will + * not handle it, we can safely push it up. + */ + if (bla_rx(bat_priv, skb, vid)) + goto out; + netif_rx(skb); goto out; @@ -354,6 +364,7 @@ struct net_device *softif_create(const char *name) atomic_set(&bat_priv->aggregated_ogms, 1); atomic_set(&bat_priv->bonding, 0); + atomic_set(&bat_priv->bridge_loop_avoidance, 0); atomic_set(&bat_priv->ap_isolation, 0); atomic_set(&bat_priv->vis_mode, VIS_TYPE_CLIENT_UPDATE); atomic_set(&bat_priv->gw_mode, GW_MODE_OFF); @@ -371,6 +382,7 @@ struct net_device *softif_create(const char *name) atomic_set(&bat_priv->ttvn, 0); atomic_set(&bat_priv->tt_local_changes, 0); atomic_set(&bat_priv->tt_ogm_append_cnt, 0); + atomic_set(&bat_priv->bla_num_requests, 0); bat_priv->tt_buff = NULL; bat_priv->tt_buff_len = 0; diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h index feac2f4030c8..089dd44a29b1 100644 --- a/net/batman-adv/types.h +++ b/net/batman-adv/types.h @@ -148,6 +148,7 @@ struct bat_priv { atomic_t bonding; /* boolean */ atomic_t fragmentation; /* boolean */ atomic_t ap_isolation; /* boolean */ + atomic_t bridge_loop_avoidance; /* boolean */ atomic_t vis_mode; /* VIS_TYPE_* */ atomic_t gw_mode; /* GW_MODE_* */ atomic_t gw_sel_class; /* uint */ @@ -161,6 +162,7 @@ struct bat_priv { atomic_t ttvn; /* translation table version number */ atomic_t tt_ogm_append_cnt; atomic_t tt_local_changes; /* changes registered in a OGM interval */ + atomic_t bla_num_requests; /* number of bla requests in flight */ /* The tt_poss_change flag is used to detect an ongoing roaming phase. * If true, then I received a Roaming_adv and I have to inspect every * packet directed to me to check whether I am still the true @@ -179,6 +181,8 @@ struct bat_priv { struct hashtable_t *orig_hash; struct hashtable_t *tt_local_hash; struct hashtable_t *tt_global_hash; + struct hashtable_t *claim_hash; + struct hashtable_t *backbone_hash; struct list_head tt_req_list; /* list of pending tt_requests */ struct list_head tt_roam_list; struct hashtable_t *vis_hash; @@ -199,6 +203,7 @@ struct bat_priv { struct delayed_work tt_work; struct delayed_work orig_work; struct delayed_work vis_work; + struct delayed_work bla_work; struct gw_node __rcu *curr_gw; /* rcu protected pointer */ atomic_t gw_reselect; struct hard_iface __rcu *primary_if; /* rcu protected pointer */ @@ -241,6 +246,28 @@ struct tt_global_entry { unsigned long roam_at; /* time at which TT_GLOBAL_ROAM was set */ }; +struct backbone_gw { + uint8_t orig[ETH_ALEN]; + short vid; /* used VLAN ID */ + struct hlist_node hash_entry; + struct bat_priv *bat_priv; + unsigned long lasttime; /* last time we heard of this backbone gw */ + atomic_t request_sent; + atomic_t refcount; + struct rcu_head rcu; + uint16_t crc; /* crc checksum over all claims */ +}; + +struct claim { + uint8_t addr[ETH_ALEN]; + short vid; + struct backbone_gw *backbone_gw; + unsigned long lasttime; /* last time we heard of claim (locals only) */ + struct rcu_head rcu; + atomic_t refcount; + struct hlist_node hash_entry; +}; + struct tt_change_node { struct list_head list; struct tt_change change; -- cgit v1.2.2 From c867305509e9bb748d9349c84cc26beaa95ccd73 Mon Sep 17 00:00:00 2001 From: Simon Wunderlich Date: Sun, 22 Jan 2012 20:00:20 +0100 Subject: batman-adv: make bridge loop avoidance switchable Signed-off-by: Simon Wunderlich Signed-off-by: Antonio Quartulli --- net/batman-adv/bat_sysfs.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'net') diff --git a/net/batman-adv/bat_sysfs.c b/net/batman-adv/bat_sysfs.c index d12757fbe10a..824bfe710687 100644 --- a/net/batman-adv/bat_sysfs.c +++ b/net/batman-adv/bat_sysfs.c @@ -386,6 +386,7 @@ static ssize_t store_gw_bwidth(struct kobject *kobj, struct attribute *attr, BAT_ATTR_BOOL(aggregated_ogms, S_IRUGO | S_IWUSR, NULL); BAT_ATTR_BOOL(bonding, S_IRUGO | S_IWUSR, NULL); +BAT_ATTR_BOOL(bridge_loop_avoidance, S_IRUGO | S_IWUSR, NULL); BAT_ATTR_BOOL(fragmentation, S_IRUGO | S_IWUSR, update_min_mtu); BAT_ATTR_BOOL(ap_isolation, S_IRUGO | S_IWUSR, NULL); static BAT_ATTR(vis_mode, S_IRUGO | S_IWUSR, show_vis_mode, store_vis_mode); @@ -404,6 +405,7 @@ BAT_ATTR_UINT(log_level, S_IRUGO | S_IWUSR, 0, 15, NULL); static struct bat_attribute *mesh_attrs[] = { &bat_attr_aggregated_ogms, &bat_attr_bonding, + &bat_attr_bridge_loop_avoidance, &bat_attr_fragmentation, &bat_attr_ap_isolation, &bat_attr_vis_mode, -- cgit v1.2.2 From 9bf8e4d4254397684250eae29a0dc12d54a00251 Mon Sep 17 00:00:00 2001 From: Simon Wunderlich Date: Sun, 22 Jan 2012 20:00:21 +0100 Subject: batman-adv: export claim tables through debugfs Signed-off-by: Simon Wunderlich Signed-off-by: Antonio Quartulli --- net/batman-adv/bat_debugfs.c | 10 +++++++ net/batman-adv/bridge_loop_avoidance.c | 53 ++++++++++++++++++++++++++++++++++ net/batman-adv/bridge_loop_avoidance.h | 1 + 3 files changed, 64 insertions(+) (limited to 'net') diff --git a/net/batman-adv/bat_debugfs.c b/net/batman-adv/bat_debugfs.c index 165ff62a2bed..0e3517773a8b 100644 --- a/net/batman-adv/bat_debugfs.c +++ b/net/batman-adv/bat_debugfs.c @@ -32,6 +32,7 @@ #include "soft-interface.h" #include "vis.h" #include "icmp_socket.h" +#include "bridge_loop_avoidance.h" static struct dentry *bat_debugfs; @@ -244,6 +245,13 @@ static int transtable_global_open(struct inode *inode, struct file *file) return single_open(file, tt_global_seq_print_text, net_dev); } +static int bla_claim_table_open(struct inode *inode, struct file *file) +{ + struct net_device *net_dev = (struct net_device *)inode->i_private; + return single_open(file, bla_claim_table_seq_print_text, net_dev); +} + + static int transtable_local_open(struct inode *inode, struct file *file) { struct net_device *net_dev = (struct net_device *)inode->i_private; @@ -277,6 +285,7 @@ static BAT_DEBUGINFO(routing_algos, S_IRUGO, bat_algorithms_open); static BAT_DEBUGINFO(originators, S_IRUGO, originators_open); static BAT_DEBUGINFO(gateways, S_IRUGO, gateways_open); static BAT_DEBUGINFO(transtable_global, S_IRUGO, transtable_global_open); +static BAT_DEBUGINFO(bla_claim_table, S_IRUGO, bla_claim_table_open); static BAT_DEBUGINFO(transtable_local, S_IRUGO, transtable_local_open); static BAT_DEBUGINFO(vis_data, S_IRUGO, vis_data_open); @@ -284,6 +293,7 @@ static struct bat_debuginfo *mesh_debuginfos[] = { &bat_debuginfo_originators, &bat_debuginfo_gateways, &bat_debuginfo_transtable_global, + &bat_debuginfo_bla_claim_table, &bat_debuginfo_transtable_local, &bat_debuginfo_vis_data, NULL, diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c index f84c892be7ea..56b9b4924763 100644 --- a/net/batman-adv/bridge_loop_avoidance.c +++ b/net/batman-adv/bridge_loop_avoidance.c @@ -1294,3 +1294,56 @@ out: claim_free_ref(claim); return ret; } + +int bla_claim_table_seq_print_text(struct seq_file *seq, void *offset) +{ + struct net_device *net_dev = (struct net_device *)seq->private; + struct bat_priv *bat_priv = netdev_priv(net_dev); + struct hashtable_t *hash = bat_priv->claim_hash; + struct claim *claim; + struct hard_iface *primary_if; + struct hlist_node *node; + struct hlist_head *head; + uint32_t i; + bool is_own; + int ret = 0; + + primary_if = primary_if_get_selected(bat_priv); + if (!primary_if) { + ret = seq_printf(seq, + "BATMAN mesh %s disabled - please specify interfaces to enable it\n", + net_dev->name); + goto out; + } + + if (primary_if->if_status != IF_ACTIVE) { + ret = seq_printf(seq, + "BATMAN mesh %s disabled - primary interface not active\n", + net_dev->name); + goto out; + } + + seq_printf(seq, "Claims announced for the mesh %s (orig %pM)\n", + net_dev->name, primary_if->net_dev->dev_addr); + seq_printf(seq, " %-17s %-5s %-17s [o] (%-4s)\n", + "Client", "VID", "Originator", "CRC"); + for (i = 0; i < hash->size; i++) { + head = &hash->table[i]; + + rcu_read_lock(); + hlist_for_each_entry_rcu(claim, node, head, hash_entry) { + is_own = compare_eth(claim->backbone_gw->orig, + primary_if->net_dev->dev_addr); + seq_printf(seq, " * %pM on % 5d by %pM [%c] (%04x)\n", + claim->addr, claim->vid, + claim->backbone_gw->orig, + (is_own ? 'x' : ' '), + claim->backbone_gw->crc); + } + rcu_read_unlock(); + } +out: + if (primary_if) + hardif_free_ref(primary_if); + return ret; +} diff --git a/net/batman-adv/bridge_loop_avoidance.h b/net/batman-adv/bridge_loop_avoidance.h index 488d77895d01..d9e34803a465 100644 --- a/net/batman-adv/bridge_loop_avoidance.h +++ b/net/batman-adv/bridge_loop_avoidance.h @@ -26,6 +26,7 @@ int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid); int bla_tx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid); int bla_is_backbone_gw(struct sk_buff *skb, struct orig_node *orig_node, int hdr_size); +int bla_claim_table_seq_print_text(struct seq_file *seq, void *offset); void bla_update_orig_address(struct bat_priv *bat_priv, struct hard_iface *primary_if, struct hard_iface *oldif); -- cgit v1.2.2 From db08e6e557ebc8ffedf6530693937d0e51b8f6b9 Mon Sep 17 00:00:00 2001 From: Simon Wunderlich Date: Sat, 22 Oct 2011 20:12:51 +0200 Subject: batman-adv: allow multiple entries in tt_global_entries as backbone gateways will all independently announce the same clients, also the tt global table must be able to hold multiple originators per client entry. Signed-off-by: Simon Wunderlich Signed-off-by: Antonio Quartulli --- net/batman-adv/translation-table.c | 395 ++++++++++++++++++++++++++++--------- net/batman-adv/types.h | 9 +- 2 files changed, 311 insertions(+), 93 deletions(-) (limited to 'net') diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c index 5f8c540b9f17..9648b0dc57ef 100644 --- a/net/batman-adv/translation-table.c +++ b/net/batman-adv/translation-table.c @@ -32,10 +32,8 @@ static void send_roam_adv(struct bat_priv *bat_priv, uint8_t *client, struct orig_node *orig_node); -static void _tt_global_del(struct bat_priv *bat_priv, - struct tt_global_entry *tt_global_entry, - const char *message); static void tt_purge(struct work_struct *work); +static void tt_global_del_orig_list(struct tt_global_entry *tt_global_entry); /* returns 1 if they are the same mac addr */ static int compare_tt(const struct hlist_node *node, const void *data2) @@ -125,17 +123,31 @@ static void tt_global_entry_free_rcu(struct rcu_head *rcu) tt_global_entry = container_of(tt_common_entry, struct tt_global_entry, common); - if (tt_global_entry->orig_node) - orig_node_free_ref(tt_global_entry->orig_node); - kfree(tt_global_entry); } static void tt_global_entry_free_ref(struct tt_global_entry *tt_global_entry) { - if (atomic_dec_and_test(&tt_global_entry->common.refcount)) + if (atomic_dec_and_test(&tt_global_entry->common.refcount)) { + tt_global_del_orig_list(tt_global_entry); call_rcu(&tt_global_entry->common.rcu, tt_global_entry_free_rcu); + } +} + +static void tt_orig_list_entry_free_rcu(struct rcu_head *rcu) +{ + struct tt_orig_list_entry *orig_entry; + + orig_entry = container_of(rcu, struct tt_orig_list_entry, rcu); + atomic_dec(&orig_entry->orig_node->tt_size); + orig_node_free_ref(orig_entry->orig_node); + kfree(orig_entry); +} + +static void tt_orig_list_entry_free_ref(struct tt_orig_list_entry *orig_entry) +{ + call_rcu(&orig_entry->rcu, tt_orig_list_entry_free_rcu); } static void tt_local_event(struct bat_priv *bat_priv, const uint8_t *addr, @@ -184,6 +196,9 @@ void tt_local_add(struct net_device *soft_iface, const uint8_t *addr, struct bat_priv *bat_priv = netdev_priv(soft_iface); struct tt_local_entry *tt_local_entry = NULL; struct tt_global_entry *tt_global_entry = NULL; + struct hlist_head *head; + struct hlist_node *node; + struct tt_orig_list_entry *orig_entry; int hash_added; tt_local_entry = tt_local_hash_find(bat_priv, addr); @@ -234,14 +249,21 @@ void tt_local_add(struct net_device *soft_iface, const uint8_t *addr, /* Check whether it is a roaming! */ if (tt_global_entry) { - /* This node is probably going to update its tt table */ - tt_global_entry->orig_node->tt_poss_change = true; - /* The global entry has to be marked as ROAMING and has to be - * kept for consistency purpose */ + /* These node are probably going to update their tt table */ + head = &tt_global_entry->orig_list; + rcu_read_lock(); + hlist_for_each_entry_rcu(orig_entry, node, head, list) { + orig_entry->orig_node->tt_poss_change = true; + + send_roam_adv(bat_priv, tt_global_entry->common.addr, + orig_entry->orig_node); + } + rcu_read_unlock(); + /* The global entry has to be marked as ROAMING and + * has to be kept for consistency purpose + */ tt_global_entry->common.flags |= TT_CLIENT_ROAM; tt_global_entry->roam_at = jiffies; - send_roam_adv(bat_priv, tt_global_entry->common.addr, - tt_global_entry->orig_node); } out: if (tt_local_entry) @@ -492,33 +514,76 @@ static void tt_changes_list_free(struct bat_priv *bat_priv) spin_unlock_bh(&bat_priv->tt_changes_list_lock); } +/* find out if an orig_node is already in the list of a tt_global_entry. + * returns 1 if found, 0 otherwise + */ +static bool tt_global_entry_has_orig(const struct tt_global_entry *entry, + const struct orig_node *orig_node) +{ + struct tt_orig_list_entry *tmp_orig_entry; + const struct hlist_head *head; + struct hlist_node *node; + bool found = false; + + rcu_read_lock(); + head = &entry->orig_list; + hlist_for_each_entry_rcu(tmp_orig_entry, node, head, list) { + if (tmp_orig_entry->orig_node == orig_node) { + found = true; + break; + } + } + rcu_read_unlock(); + return found; +} + +static void tt_global_add_orig_entry(struct tt_global_entry *tt_global_entry, + struct orig_node *orig_node, + int ttvn) +{ + struct tt_orig_list_entry *orig_entry; + + orig_entry = kzalloc(sizeof(*orig_entry), GFP_ATOMIC); + if (!orig_entry) + return; + + INIT_HLIST_NODE(&orig_entry->list); + atomic_inc(&orig_node->refcount); + atomic_inc(&orig_node->tt_size); + orig_entry->orig_node = orig_node; + orig_entry->ttvn = ttvn; + + spin_lock_bh(&tt_global_entry->list_lock); + hlist_add_head_rcu(&orig_entry->list, + &tt_global_entry->orig_list); + spin_unlock_bh(&tt_global_entry->list_lock); +} + /* caller must hold orig_node refcount */ int tt_global_add(struct bat_priv *bat_priv, struct orig_node *orig_node, const unsigned char *tt_addr, uint8_t ttvn, bool roaming, bool wifi) { - struct tt_global_entry *tt_global_entry; - struct orig_node *orig_node_tmp; + struct tt_global_entry *tt_global_entry = NULL; int ret = 0; int hash_added; tt_global_entry = tt_global_hash_find(bat_priv, tt_addr); if (!tt_global_entry) { - tt_global_entry = - kmalloc(sizeof(*tt_global_entry), - GFP_ATOMIC); + tt_global_entry = kzalloc(sizeof(*tt_global_entry), + GFP_ATOMIC); if (!tt_global_entry) goto out; memcpy(tt_global_entry->common.addr, tt_addr, ETH_ALEN); + tt_global_entry->common.flags = NO_FLAGS; - atomic_set(&tt_global_entry->common.refcount, 2); - /* Assign the new orig_node */ - atomic_inc(&orig_node->refcount); - tt_global_entry->orig_node = orig_node; - tt_global_entry->ttvn = ttvn; tt_global_entry->roam_at = 0; + atomic_set(&tt_global_entry->common.refcount, 2); + + INIT_HLIST_HEAD(&tt_global_entry->orig_list); + spin_lock_init(&tt_global_entry->list_lock); hash_added = hash_add(bat_priv->tt_global_hash, compare_tt, choose_orig, &tt_global_entry->common, @@ -529,19 +594,27 @@ int tt_global_add(struct bat_priv *bat_priv, struct orig_node *orig_node, tt_global_entry_free_ref(tt_global_entry); goto out_remove; } - atomic_inc(&orig_node->tt_size); + + tt_global_add_orig_entry(tt_global_entry, orig_node, ttvn); } else { - if (tt_global_entry->orig_node != orig_node) { - atomic_dec(&tt_global_entry->orig_node->tt_size); - orig_node_tmp = tt_global_entry->orig_node; - atomic_inc(&orig_node->refcount); - tt_global_entry->orig_node = orig_node; - orig_node_free_ref(orig_node_tmp); - atomic_inc(&orig_node->tt_size); + /* there is already a global entry, use this one. */ + + /* If there is the TT_CLIENT_ROAM flag set, there is only one + * originator left in the list and we previously received a + * delete + roaming change for this originator. + * + * We should first delete the old originator before adding the + * new one. + */ + if (tt_global_entry->common.flags & TT_CLIENT_ROAM) { + tt_global_del_orig_list(tt_global_entry); + tt_global_entry->common.flags &= ~TT_CLIENT_ROAM; + tt_global_entry->roam_at = 0; } - tt_global_entry->common.flags = NO_FLAGS; - tt_global_entry->ttvn = ttvn; - tt_global_entry->roam_at = 0; + + if (!tt_global_entry_has_orig(tt_global_entry, orig_node)) + tt_global_add_orig_entry(tt_global_entry, orig_node, + ttvn); } if (wifi) @@ -562,6 +635,34 @@ out: return ret; } +/* print all orig nodes who announce the address for this global entry. + * it is assumed that the caller holds rcu_read_lock(); + */ +static void tt_global_print_entry(struct tt_global_entry *tt_global_entry, + struct seq_file *seq) +{ + struct hlist_head *head; + struct hlist_node *node; + struct tt_orig_list_entry *orig_entry; + struct tt_common_entry *tt_common_entry; + uint16_t flags; + uint8_t last_ttvn; + + tt_common_entry = &tt_global_entry->common; + + head = &tt_global_entry->orig_list; + + hlist_for_each_entry_rcu(orig_entry, node, head, list) { + flags = tt_common_entry->flags; + last_ttvn = atomic_read(&orig_entry->orig_node->last_ttvn); + seq_printf(seq, " * %pM (%3u) via %pM (%3u) [%c%c]\n", + tt_global_entry->common.addr, orig_entry->ttvn, + orig_entry->orig_node->orig, last_ttvn, + (flags & TT_CLIENT_ROAM ? 'R' : '.'), + (flags & TT_CLIENT_WIFI ? 'W' : '.')); + } +} + int tt_global_seq_print_text(struct seq_file *seq, void *offset) { struct net_device *net_dev = (struct net_device *)seq->private; @@ -605,18 +706,7 @@ int tt_global_seq_print_text(struct seq_file *seq, void *offset) tt_global_entry = container_of(tt_common_entry, struct tt_global_entry, common); - seq_printf(seq, - " * %pM (%3u) via %pM (%3u) [%c%c]\n", - tt_global_entry->common.addr, - tt_global_entry->ttvn, - tt_global_entry->orig_node->orig, - (uint8_t) atomic_read( - &tt_global_entry->orig_node-> - last_ttvn), - (tt_global_entry->common.flags & - TT_CLIENT_ROAM ? 'R' : '.'), - (tt_global_entry->common.flags & - TT_CLIENT_WIFI ? 'W' : '.')); + tt_global_print_entry(tt_global_entry, seq); } rcu_read_unlock(); } @@ -626,27 +716,103 @@ out: return ret; } -static void _tt_global_del(struct bat_priv *bat_priv, - struct tt_global_entry *tt_global_entry, - const char *message) +/* deletes the orig list of a tt_global_entry */ +static void tt_global_del_orig_list(struct tt_global_entry *tt_global_entry) { - if (!tt_global_entry) - goto out; + struct hlist_head *head; + struct hlist_node *node, *safe; + struct tt_orig_list_entry *orig_entry; - bat_dbg(DBG_TT, bat_priv, - "Deleting global tt entry %pM (via %pM): %s\n", - tt_global_entry->common.addr, tt_global_entry->orig_node->orig, - message); + spin_lock_bh(&tt_global_entry->list_lock); + head = &tt_global_entry->orig_list; + hlist_for_each_entry_safe(orig_entry, node, safe, head, list) { + hlist_del_rcu(node); + tt_orig_list_entry_free_ref(orig_entry); + } + spin_unlock_bh(&tt_global_entry->list_lock); - atomic_dec(&tt_global_entry->orig_node->tt_size); +} + +static void tt_global_del_orig_entry(struct bat_priv *bat_priv, + struct tt_global_entry *tt_global_entry, + struct orig_node *orig_node, + const char *message) +{ + struct hlist_head *head; + struct hlist_node *node, *safe; + struct tt_orig_list_entry *orig_entry; + + spin_lock_bh(&tt_global_entry->list_lock); + head = &tt_global_entry->orig_list; + hlist_for_each_entry_safe(orig_entry, node, safe, head, list) { + if (orig_entry->orig_node == orig_node) { + bat_dbg(DBG_TT, bat_priv, + "Deleting %pM from global tt entry %pM: %s\n", + orig_node->orig, tt_global_entry->common.addr, + message); + hlist_del_rcu(node); + tt_orig_list_entry_free_ref(orig_entry); + } + } + spin_unlock_bh(&tt_global_entry->list_lock); +} + +static void tt_global_del_struct(struct bat_priv *bat_priv, + struct tt_global_entry *tt_global_entry, + const char *message) +{ + bat_dbg(DBG_TT, bat_priv, + "Deleting global tt entry %pM: %s\n", + tt_global_entry->common.addr, message); hash_remove(bat_priv->tt_global_hash, compare_tt, choose_orig, tt_global_entry->common.addr); -out: - if (tt_global_entry) - tt_global_entry_free_ref(tt_global_entry); + tt_global_entry_free_ref(tt_global_entry); + } +/* If the client is to be deleted, we check if it is the last origantor entry + * within tt_global entry. If yes, we set the TT_CLIENT_ROAM flag and the timer, + * otherwise we simply remove the originator scheduled for deletion. + */ +static void tt_global_del_roaming(struct bat_priv *bat_priv, + struct tt_global_entry *tt_global_entry, + struct orig_node *orig_node, + const char *message) +{ + bool last_entry = true; + struct hlist_head *head; + struct hlist_node *node; + struct tt_orig_list_entry *orig_entry; + + /* no local entry exists, case 1: + * Check if this is the last one or if other entries exist. + */ + + rcu_read_lock(); + head = &tt_global_entry->orig_list; + hlist_for_each_entry_rcu(orig_entry, node, head, list) { + if (orig_entry->orig_node != orig_node) { + last_entry = false; + break; + } + } + rcu_read_unlock(); + + if (last_entry) { + /* its the last one, mark for roaming. */ + tt_global_entry->common.flags |= TT_CLIENT_ROAM; + tt_global_entry->roam_at = jiffies; + } else + /* there is another entry, we can simply delete this + * one and can still use the other one. + */ + tt_global_del_orig_entry(bat_priv, tt_global_entry, + orig_node, message); +} + + + static void tt_global_del(struct bat_priv *bat_priv, struct orig_node *orig_node, const unsigned char *addr, @@ -656,30 +822,44 @@ static void tt_global_del(struct bat_priv *bat_priv, struct tt_local_entry *tt_local_entry = NULL; tt_global_entry = tt_global_hash_find(bat_priv, addr); - if (!tt_global_entry || tt_global_entry->orig_node != orig_node) + if (!tt_global_entry) goto out; - if (!roaming) - goto out_del; + if (!roaming) { + tt_global_del_orig_entry(bat_priv, tt_global_entry, orig_node, + message); + + if (hlist_empty(&tt_global_entry->orig_list)) + tt_global_del_struct(bat_priv, tt_global_entry, + message); + + goto out; + } /* if we are deleting a global entry due to a roam * event, there are two possibilities: - * 1) the client roamed from node A to node B => we mark + * 1) the client roamed from node A to node B => if there + * is only one originator left for this client, we mark * it with TT_CLIENT_ROAM, we start a timer and we * wait for node B to claim it. In case of timeout * the entry is purged. + * + * If there are other originators left, we directly delete + * the originator. * 2) the client roamed to us => we can directly delete * the global entry, since it is useless now. */ + tt_local_entry = tt_local_hash_find(bat_priv, tt_global_entry->common.addr); - if (!tt_local_entry) { - tt_global_entry->common.flags |= TT_CLIENT_ROAM; - tt_global_entry->roam_at = jiffies; - goto out; - } + if (tt_local_entry) { + /* local entry exists, case 2: client roamed to us. */ + tt_global_del_orig_list(tt_global_entry); + tt_global_del_struct(bat_priv, tt_global_entry, message); + } else + /* no local entry exists, case 1: check for roaming */ + tt_global_del_roaming(bat_priv, tt_global_entry, orig_node, + message); -out_del: - _tt_global_del(bat_priv, tt_global_entry, message); out: if (tt_global_entry) @@ -712,11 +892,14 @@ void tt_global_del_orig(struct bat_priv *bat_priv, tt_global_entry = container_of(tt_common_entry, struct tt_global_entry, common); - if (tt_global_entry->orig_node == orig_node) { + + tt_global_del_orig_entry(bat_priv, tt_global_entry, + orig_node, message); + + if (hlist_empty(&tt_global_entry->orig_list)) { bat_dbg(DBG_TT, bat_priv, - "Deleting global tt entry %pM (via %pM): %s\n", + "Deleting global tt entry %pM: %s\n", tt_global_entry->common.addr, - tt_global_entry->orig_node->orig, message); hlist_del_rcu(node); tt_global_entry_free_ref(tt_global_entry); @@ -757,7 +940,7 @@ static void tt_global_roam_purge(struct bat_priv *bat_priv) bat_dbg(DBG_TT, bat_priv, "Deleting global tt entry (%pM): Roaming timeout\n", tt_global_entry->common.addr); - atomic_dec(&tt_global_entry->orig_node->tt_size); + hlist_del_rcu(node); tt_global_entry_free_ref(tt_global_entry); } @@ -820,6 +1003,11 @@ struct orig_node *transtable_search(struct bat_priv *bat_priv, struct tt_local_entry *tt_local_entry = NULL; struct tt_global_entry *tt_global_entry = NULL; struct orig_node *orig_node = NULL; + struct neigh_node *router = NULL; + struct hlist_head *head; + struct hlist_node *node; + struct tt_orig_list_entry *orig_entry; + int best_tq; if (src && atomic_read(&bat_priv->ap_isolation)) { tt_local_entry = tt_local_hash_find(bat_priv, src); @@ -836,11 +1024,25 @@ struct orig_node *transtable_search(struct bat_priv *bat_priv, if (tt_local_entry && _is_ap_isolated(tt_local_entry, tt_global_entry)) goto out; - if (!atomic_inc_not_zero(&tt_global_entry->orig_node->refcount)) - goto out; + best_tq = 0; - orig_node = tt_global_entry->orig_node; + rcu_read_lock(); + head = &tt_global_entry->orig_list; + hlist_for_each_entry_rcu(orig_entry, node, head, list) { + router = orig_node_get_router(orig_entry->orig_node); + if (!router) + continue; + if (router->tq_avg > best_tq) { + orig_node = orig_entry->orig_node; + best_tq = router->tq_avg; + } + neigh_node_free_ref(router); + } + /* found anything? */ + if (orig_node && !atomic_inc_not_zero(&orig_node->refcount)) + orig_node = NULL; + rcu_read_unlock(); out: if (tt_global_entry) tt_global_entry_free_ref(tt_global_entry); @@ -872,20 +1074,26 @@ static uint16_t tt_global_crc(struct bat_priv *bat_priv, tt_global_entry = container_of(tt_common_entry, struct tt_global_entry, common); - if (compare_eth(tt_global_entry->orig_node, - orig_node)) { - /* Roaming clients are in the global table for - * consistency only. They don't have to be - * taken into account while computing the - * global crc */ - if (tt_common_entry->flags & TT_CLIENT_ROAM) - continue; - total_one = 0; - for (j = 0; j < ETH_ALEN; j++) - total_one = crc16_byte(total_one, - tt_common_entry->addr[j]); - total ^= total_one; - } + /* Roaming clients are in the global table for + * consistency only. They don't have to be + * taken into account while computing the + * global crc + */ + if (tt_global_entry->common.flags & TT_CLIENT_ROAM) + continue; + + /* find out if this global entry is announced by this + * originator + */ + if (!tt_global_entry_has_orig(tt_global_entry, + orig_node)) + continue; + + total_one = 0; + for (j = 0; j < ETH_ALEN; j++) + total_one = crc16_byte(total_one, + tt_global_entry->common.addr[j]); + total ^= total_one; } rcu_read_unlock(); } @@ -1026,7 +1234,7 @@ static int tt_global_valid_entry(const void *entry_ptr, const void *data_ptr) tt_global_entry = container_of(tt_common_entry, struct tt_global_entry, common); - return (tt_global_entry->orig_node == orig_node); + return tt_global_entry_has_orig(tt_global_entry, orig_node); } static struct sk_buff *tt_response_fill_table(uint16_t tt_len, uint8_t ttvn, @@ -1802,6 +2010,8 @@ void tt_commit_changes(struct bat_priv *bat_priv) /* Increment the TTVN only once per OGM interval */ atomic_inc(&bat_priv->ttvn); + bat_dbg(DBG_TT, bat_priv, "Local changes committed, updating to ttvn %u\n", + (uint8_t)atomic_read(&bat_priv->ttvn)); bat_priv->tt_poss_change = false; } @@ -1879,6 +2089,7 @@ void tt_update_orig(struct bat_priv *bat_priv, struct orig_node *orig_node, } else { /* if we missed more than one change or our tables are not * in sync anymore -> request fresh tt data */ + if (!orig_node->tt_initialised || ttvn != orig_ttvn || orig_node->tt_crc != tt_crc) { request_table: diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h index 089dd44a29b1..35cd831508a9 100644 --- a/net/batman-adv/types.h +++ b/net/batman-adv/types.h @@ -241,9 +241,16 @@ struct tt_local_entry { struct tt_global_entry { struct tt_common_entry common; + struct hlist_head orig_list; + spinlock_t list_lock; /* protects the list */ + unsigned long roam_at; /* time at which TT_GLOBAL_ROAM was set */ +}; + +struct tt_orig_list_entry { struct orig_node *orig_node; uint8_t ttvn; - unsigned long roam_at; /* time at which TT_GLOBAL_ROAM was set */ + struct rcu_head rcu; + struct hlist_node list; }; struct backbone_gw { -- cgit v1.2.2 From 20ff9d593f8ff20c2ef24498f77a8bc30b3a059a Mon Sep 17 00:00:00 2001 From: Simon Wunderlich Date: Sun, 22 Jan 2012 20:00:23 +0100 Subject: batman-adv: don't let backbone gateways exchange tt entries As the backbone gateways are connected to the same backbone, they should announce the same clients on the backbone non-exclusively. Signed-off-by: Simon Wunderlich Signed-off-by: Antonio Quartulli --- net/batman-adv/bridge_loop_avoidance.c | 50 ++++++++++++++++++++++++++++++++++ net/batman-adv/bridge_loop_avoidance.h | 1 + net/batman-adv/routing.c | 7 +++++ net/batman-adv/translation-table.c | 18 ++++++++++-- 4 files changed, 74 insertions(+), 2 deletions(-) (limited to 'net') diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c index 56b9b4924763..6186f6e92e33 100644 --- a/net/batman-adv/bridge_loop_avoidance.c +++ b/net/batman-adv/bridge_loop_avoidance.c @@ -24,6 +24,7 @@ #include "hard-interface.h" #include "originator.h" #include "bridge_loop_avoidance.h" +#include "translation-table.h" #include "send.h" #include @@ -359,6 +360,7 @@ static struct backbone_gw *bla_get_backbone_gw(struct bat_priv *bat_priv, uint8_t *orig, short vid) { struct backbone_gw *entry; + struct orig_node *orig_node; int hash_added; entry = backbone_hash_find(bat_priv, orig, vid); @@ -393,6 +395,13 @@ static struct backbone_gw *bla_get_backbone_gw(struct bat_priv *bat_priv, return NULL; } + /* this is a gateway now, remove any tt entries */ + orig_node = orig_hash_find(bat_priv, orig); + if (orig_node) { + tt_global_del_orig(bat_priv, orig_node, + "became a backbone gateway"); + orig_node_free_ref(orig_node); + } return entry; } @@ -1049,6 +1058,47 @@ int bla_init(struct bat_priv *bat_priv) return 1; } +/** + * @bat_priv: the bat priv with all the soft interface information + * @orig: originator mac address + * + * check if the originator is a gateway for any VLAN ID. + * + * returns 1 if it is found, 0 otherwise + * + */ + +int bla_is_backbone_gw_orig(struct bat_priv *bat_priv, uint8_t *orig) +{ + struct hashtable_t *hash = bat_priv->backbone_hash; + struct hlist_head *head; + struct hlist_node *node; + struct backbone_gw *backbone_gw; + int i; + + if (!atomic_read(&bat_priv->bridge_loop_avoidance)) + return 0; + + if (!hash) + return 0; + + for (i = 0; i < hash->size; i++) { + head = &hash->table[i]; + + rcu_read_lock(); + hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) { + if (compare_eth(backbone_gw->orig, orig)) { + rcu_read_unlock(); + return 1; + } + } + rcu_read_unlock(); + } + + return 0; +} + + /** * @skb: the frame to be checked * @orig_node: the orig_node of the frame diff --git a/net/batman-adv/bridge_loop_avoidance.h b/net/batman-adv/bridge_loop_avoidance.h index d9e34803a465..b74940f09baf 100644 --- a/net/batman-adv/bridge_loop_avoidance.h +++ b/net/batman-adv/bridge_loop_avoidance.h @@ -27,6 +27,7 @@ int bla_tx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid); int bla_is_backbone_gw(struct sk_buff *skb, struct orig_node *orig_node, int hdr_size); int bla_claim_table_seq_print_text(struct seq_file *seq, void *offset); +int bla_is_backbone_gw_orig(struct bat_priv *bat_priv, uint8_t *orig); void bla_update_orig_address(struct bat_priv *bat_priv, struct hard_iface *primary_if, struct hard_iface *oldif); diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c index a1d8c9b0f902..1d1fd04c9c3a 100644 --- a/net/batman-adv/routing.c +++ b/net/batman-adv/routing.c @@ -673,6 +673,13 @@ int recv_roam_adv(struct sk_buff *skb, struct hard_iface *recv_if) if (!is_my_mac(roam_adv_packet->dst)) return route_unicast_packet(skb, recv_if); + /* check if it is a backbone gateway. we don't accept + * roaming advertisement from it, as it has the same + * entries as we have. + */ + if (bla_is_backbone_gw_orig(bat_priv, roam_adv_packet->src)) + goto out; + orig_node = orig_hash_find(bat_priv, roam_adv_packet->src); if (!orig_node) goto out; diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c index 9648b0dc57ef..e16a3690bdb2 100644 --- a/net/batman-adv/translation-table.c +++ b/net/batman-adv/translation-table.c @@ -27,6 +27,7 @@ #include "hash.h" #include "originator.h" #include "routing.h" +#include "bridge_loop_avoidance.h" #include @@ -1615,10 +1616,15 @@ out: bool send_tt_response(struct bat_priv *bat_priv, struct tt_query_packet *tt_request) { - if (is_my_mac(tt_request->dst)) + if (is_my_mac(tt_request->dst)) { + /* don't answer backbone gws! */ + if (bla_is_backbone_gw_orig(bat_priv, tt_request->src)) + return true; + return send_my_tt_response(bat_priv, tt_request); - else + } else { return send_other_tt_response(bat_priv, tt_request); + } } static void _tt_update_changes(struct bat_priv *bat_priv, @@ -1722,6 +1728,10 @@ void handle_tt_response(struct bat_priv *bat_priv, tt_response->src, tt_response->ttvn, tt_response->tt_data, (tt_response->flags & TT_FULL_TABLE ? 'F' : '.')); + /* we should have never asked a backbone gw */ + if (bla_is_backbone_gw_orig(bat_priv, tt_response->src)) + goto out; + orig_node = orig_hash_find(bat_priv, tt_response->src); if (!orig_node) goto out; @@ -2052,6 +2062,10 @@ void tt_update_orig(struct bat_priv *bat_priv, struct orig_node *orig_node, uint8_t orig_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn); bool full_table = true; + /* don't care about a backbone gateways updates. */ + if (bla_is_backbone_gw_orig(bat_priv, orig_node->orig)) + return; + /* orig table not initialised AND first diff is in the OGM OR the ttvn * increased by one -> we can apply the attached changes */ if ((!orig_node->tt_initialised && ttvn == 1) || -- cgit v1.2.2 From fe2da6ff27c73c1d102ec2189f94e8bc729d1a9b Mon Sep 17 00:00:00 2001 From: Simon Wunderlich Date: Sun, 22 Jan 2012 20:00:24 +0100 Subject: batman-adv: add broadcast duplicate check When multiple backbone gateways relay the same broadcast from the backbone into the mesh, other nodes in the mesh may receive this broadcast multiple times. To avoid this, the crc checksums of received broadcasts are recorded and new broadcast packets with the same content may be dropped if received by another gateway. Signed-off-by: Simon Wunderlich Signed-off-by: Antonio Quartulli --- net/batman-adv/bridge_loop_avoidance.c | 75 ++++++++++++++++++++++++++++++++++ net/batman-adv/bridge_loop_avoidance.h | 2 + net/batman-adv/main.h | 3 ++ net/batman-adv/routing.c | 4 ++ net/batman-adv/types.h | 7 ++++ 5 files changed, 91 insertions(+) (limited to 'net') diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c index 6186f6e92e33..4f6b44a5b128 100644 --- a/net/batman-adv/bridge_loop_avoidance.c +++ b/net/batman-adv/bridge_loop_avoidance.c @@ -1041,8 +1041,16 @@ out: /* initialize all bla structures */ int bla_init(struct bat_priv *bat_priv) { + int i; + bat_dbg(DBG_BLA, bat_priv, "bla hash registering\n"); + /* initialize the duplicate list */ + for (i = 0; i < DUPLIST_SIZE; i++) + bat_priv->bcast_duplist[i].entrytime = + jiffies - msecs_to_jiffies(DUPLIST_TIMEOUT); + bat_priv->bcast_duplist_curr = 0; + if (bat_priv->claim_hash) return 1; @@ -1058,6 +1066,73 @@ int bla_init(struct bat_priv *bat_priv) return 1; } +/** + * @bat_priv: the bat priv with all the soft interface information + * @bcast_packet: originator mac address + * @hdr_size: maximum length of the frame + * + * check if it is on our broadcast list. Another gateway might + * have sent the same packet because it is connected to the same backbone, + * so we have to remove this duplicate. + * + * This is performed by checking the CRC, which will tell us + * with a good chance that it is the same packet. If it is furthermore + * sent by another host, drop it. We allow equal packets from + * the same host however as this might be intended. + * + **/ + +int bla_check_bcast_duplist(struct bat_priv *bat_priv, + struct bcast_packet *bcast_packet, + int hdr_size) +{ + int i, length, curr; + uint8_t *content; + uint16_t crc; + struct bcast_duplist_entry *entry; + + length = hdr_size - sizeof(*bcast_packet); + content = (uint8_t *)bcast_packet; + content += sizeof(*bcast_packet); + + /* calculate the crc ... */ + crc = crc16(0, content, length); + + for (i = 0 ; i < DUPLIST_SIZE; i++) { + curr = (bat_priv->bcast_duplist_curr + i) % DUPLIST_SIZE; + entry = &bat_priv->bcast_duplist[curr]; + + /* we can stop searching if the entry is too old ; + * later entries will be even older + */ + if (has_timed_out(entry->entrytime, DUPLIST_TIMEOUT)) + break; + + if (entry->crc != crc) + continue; + + if (compare_eth(entry->orig, bcast_packet->orig)) + continue; + + /* this entry seems to match: same crc, not too old, + * and from another gw. therefore return 1 to forbid it. + */ + return 1; + } + /* not found, add a new entry (overwrite the oldest entry) */ + curr = (bat_priv->bcast_duplist_curr + DUPLIST_SIZE - 1) % DUPLIST_SIZE; + entry = &bat_priv->bcast_duplist[curr]; + entry->crc = crc; + entry->entrytime = jiffies; + memcpy(entry->orig, bcast_packet->orig, ETH_ALEN); + bat_priv->bcast_duplist_curr = curr; + + /* allow it, its the first occurence. */ + return 0; +} + + + /** * @bat_priv: the bat priv with all the soft interface information * @orig: originator mac address diff --git a/net/batman-adv/bridge_loop_avoidance.h b/net/batman-adv/bridge_loop_avoidance.h index b74940f09baf..9468c245121c 100644 --- a/net/batman-adv/bridge_loop_avoidance.h +++ b/net/batman-adv/bridge_loop_avoidance.h @@ -28,6 +28,8 @@ int bla_is_backbone_gw(struct sk_buff *skb, struct orig_node *orig_node, int hdr_size); int bla_claim_table_seq_print_text(struct seq_file *seq, void *offset); int bla_is_backbone_gw_orig(struct bat_priv *bat_priv, uint8_t *orig); +int bla_check_bcast_duplist(struct bat_priv *bat_priv, + struct bcast_packet *bcast_packet, int hdr_size); void bla_update_orig_address(struct bat_priv *bat_priv, struct hard_iface *primary_if, struct hard_iface *oldif); diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h index 82723b5dce61..d9832acf558d 100644 --- a/net/batman-adv/main.h +++ b/net/batman-adv/main.h @@ -83,6 +83,9 @@ #define BLA_PERIOD_LENGTH 10000 /* 10 seconds */ #define BLA_BACKBONE_TIMEOUT (BLA_PERIOD_LENGTH * 3) #define BLA_CLAIM_TIMEOUT (BLA_PERIOD_LENGTH * 10) + +#define DUPLIST_SIZE 16 +#define DUPLIST_TIMEOUT 500 /* 500 ms */ /* don't reset again within 30 seconds */ #define RESET_PROTECTION_MS 30000 #define EXPECTED_SEQNO_RANGE 65536 diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c index 1d1fd04c9c3a..78eddc9067e6 100644 --- a/net/batman-adv/routing.c +++ b/net/batman-adv/routing.c @@ -1076,6 +1076,10 @@ int recv_bcast_packet(struct sk_buff *skb, struct hard_iface *recv_if) spin_unlock_bh(&orig_node->bcast_seqno_lock); + /* check whether this has been sent by another originator before */ + if (bla_check_bcast_duplist(bat_priv, bcast_packet, hdr_size)) + goto out; + /* rebroadcast packet */ add_bcast_packet_to_list(bat_priv, skb, 1); diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h index 35cd831508a9..ad97e87a2e22 100644 --- a/net/batman-adv/types.h +++ b/net/batman-adv/types.h @@ -140,6 +140,11 @@ struct neigh_node { spinlock_t tq_lock; /* protects: tq_recv, tq_index */ }; +struct bcast_duplist_entry { + uint8_t orig[ETH_ALEN]; + uint16_t crc; + unsigned long entrytime; +}; struct bat_priv { atomic_t mesh_state; @@ -186,6 +191,8 @@ struct bat_priv { struct list_head tt_req_list; /* list of pending tt_requests */ struct list_head tt_roam_list; struct hashtable_t *vis_hash; + struct bcast_duplist_entry bcast_duplist[DUPLIST_SIZE]; + int bcast_duplist_curr; spinlock_t forw_bat_list_lock; /* protects forw_bat_list */ spinlock_t forw_bcast_list_lock; /* protects */ spinlock_t tt_changes_list_lock; /* protects tt_changes */ -- cgit v1.2.2 From b1a8c04b8af74158e006b92a9e7e0f619e1a9409 Mon Sep 17 00:00:00 2001 From: Simon Wunderlich Date: Sun, 22 Jan 2012 20:00:25 +0100 Subject: batman-adv: drop STP over batman Signed-off-by: Simon Wunderlich Signed-off-by: Antonio Quartulli --- net/batman-adv/soft-interface.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'net') diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c index 4d639b303bd7..10ab49aa568f 100644 --- a/net/batman-adv/soft-interface.c +++ b/net/batman-adv/soft-interface.c @@ -130,6 +130,8 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface) struct hard_iface *primary_if = NULL; struct bcast_packet *bcast_packet; struct vlan_ethhdr *vhdr; + static const uint8_t stp_addr[ETH_ALEN] = {0x01, 0x80, 0xC2, 0x00, 0x00, + 0x00}; unsigned int header_len = 0; int data_len = skb->len, ret; short vid = -1; @@ -159,6 +161,12 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface) /* Register the client MAC in the transtable */ tt_local_add(soft_iface, ethhdr->h_source, skb->skb_iif); + /* don't accept stp packets. STP does not help in meshes. + * better use the bridge loop avoidance ... + */ + if (compare_eth(ethhdr->h_dest, stp_addr)) + goto dropped; + if (is_multicast_ether_addr(ethhdr->h_dest)) { do_bcast = true; -- cgit v1.2.2 From 38ef3d1d919e6a47c3e0d38b3d788aa468a7ede8 Mon Sep 17 00:00:00 2001 From: Simon Wunderlich Date: Sun, 22 Jan 2012 20:00:26 +0100 Subject: batman-adv: form groups in the bridge loop avoidance backbone gateways may be part of the same LAN, but participate in different meshes. With this patch, backbone gateways form groups by applying the groupid of another backbone gateway if it is higher. After forming the group, they only accept messages from backbone gateways of the same group. Signed-off-by: Simon Wunderlich Signed-off-by: Antonio Quartulli --- net/batman-adv/bridge_loop_avoidance.c | 121 +++++++++++++++++++++++++++++++-- net/batman-adv/types.h | 1 + 2 files changed, 116 insertions(+), 6 deletions(-) (limited to 'net') diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c index 4f6b44a5b128..1cf18ac44ba9 100644 --- a/net/batman-adv/bridge_loop_avoidance.c +++ b/net/batman-adv/bridge_loop_avoidance.c @@ -33,7 +33,6 @@ #include #include -static const uint8_t claim_dest[6] = {0xff, 0x43, 0x05, 0x00, 0x00, 0x00}; static const uint8_t announce_mac[4] = {0x43, 0x05, 0x43, 0x05}; static void bla_periodic_work(struct work_struct *work); @@ -265,7 +264,8 @@ static void bla_send_claim(struct bat_priv *bat_priv, uint8_t *mac, if (!primary_if) return; - memcpy(&local_claim_dest, claim_dest, sizeof(local_claim_dest)); + memcpy(&local_claim_dest, &bat_priv->claim_dest, + sizeof(local_claim_dest)); local_claim_dest.type = claimtype; soft_iface = primary_if->soft_iface; @@ -282,6 +282,7 @@ static void bla_send_claim(struct bat_priv *bat_priv, uint8_t *mac, primary_if->net_dev->dev_addr, /* HW DST: FF:43:05:XX:00:00 * with XX = claim type + * and YY:YY = group id */ (uint8_t *)&local_claim_dest); @@ -732,6 +733,86 @@ static int handle_claim(struct bat_priv *bat_priv, return 1; } +/** + * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the soft interface information + * @hw_src: the Hardware source in the ARP Header + * @hw_dst: the Hardware destination in the ARP Header + * @ethhdr: pointer to the Ethernet header of the claim frame + * + * checks if it is a claim packet and if its on the same group. + * This function also applies the group ID of the sender + * if it is in the same mesh. + * + * returns: + * 2 - if it is a claim packet and on the same group + * 1 - if is a claim packet from another group + * 0 - if it is not a claim packet + */ +static int check_claim_group(struct bat_priv *bat_priv, + struct hard_iface *primary_if, + uint8_t *hw_src, uint8_t *hw_dst, + struct ethhdr *ethhdr) +{ + uint8_t *backbone_addr; + struct orig_node *orig_node; + struct bla_claim_dst *bla_dst, *bla_dst_own; + + bla_dst = (struct bla_claim_dst *)hw_dst; + bla_dst_own = &bat_priv->claim_dest; + + /* check if it is a claim packet in general */ + if (memcmp(bla_dst->magic, bla_dst_own->magic, + sizeof(bla_dst->magic)) != 0) + return 0; + + /* if announcement packet, use the source, + * otherwise assume it is in the hw_src + */ + switch (bla_dst->type) { + case CLAIM_TYPE_ADD: + backbone_addr = hw_src; + break; + case CLAIM_TYPE_REQUEST: + case CLAIM_TYPE_ANNOUNCE: + case CLAIM_TYPE_DEL: + backbone_addr = ethhdr->h_source; + break; + default: + return 0; + } + + /* don't accept claim frames from ourselves */ + if (compare_eth(backbone_addr, primary_if->net_dev->dev_addr)) + return 0; + + /* if its already the same group, it is fine. */ + if (bla_dst->group == bla_dst_own->group) + return 2; + + /* lets see if this originator is in our mesh */ + orig_node = orig_hash_find(bat_priv, backbone_addr); + + /* dont accept claims from gateways which are not in + * the same mesh or group. + */ + if (!orig_node) + return 1; + + /* if our mesh friends mac is bigger, use it for ourselves. */ + if (ntohs(bla_dst->group) > ntohs(bla_dst_own->group)) { + bat_dbg(DBG_BLA, bat_priv, + "taking other backbones claim group: %04x\n", + ntohs(bla_dst->group)); + bla_dst_own->group = bla_dst->group; + } + + orig_node_free_ref(orig_node); + + return 2; +} + + /** * @bat_priv: the bat priv with all the soft interface information * @skb: the frame to be checked @@ -753,6 +834,7 @@ static int bla_process_claim(struct bat_priv *bat_priv, uint16_t proto; int headlen; short vid = -1; + int ret; ethhdr = (struct ethhdr *)skb_mac_header(skb); @@ -796,8 +878,14 @@ static int bla_process_claim(struct bat_priv *bat_priv, bla_dst = (struct bla_claim_dst *)hw_dst; /* check if it is a claim frame. */ - if (memcmp(hw_dst, claim_dest, 3) != 0) - return 0; + ret = check_claim_group(bat_priv, primary_if, hw_src, hw_dst, ethhdr); + if (ret == 1) + bat_dbg(DBG_BLA, bat_priv, + "bla_process_claim(): received a claim frame from another group. From: %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n", + ethhdr->h_source, vid, hw_src, hw_dst); + + if (ret < 2) + return ret; /* become a backbone gw ourselves on this vlan if not happened yet */ bla_update_own_backbone_gw(bat_priv, primary_if, vid); @@ -944,6 +1032,10 @@ void bla_update_orig_address(struct bat_priv *bat_priv, struct hashtable_t *hash; int i; + /* reset bridge loop avoidance group id */ + bat_priv->claim_dest.group = + htons(crc16(0, primary_if->net_dev->dev_addr, ETH_ALEN)); + if (!oldif) { bla_purge_claims(bat_priv, NULL, 1); bla_purge_backbone_gw(bat_priv, 1); @@ -1042,9 +1134,24 @@ out: int bla_init(struct bat_priv *bat_priv) { int i; + uint8_t claim_dest[ETH_ALEN] = {0xff, 0x43, 0x05, 0x00, 0x00, 0x00}; + struct hard_iface *primary_if; bat_dbg(DBG_BLA, bat_priv, "bla hash registering\n"); + /* setting claim destination address */ + memcpy(&bat_priv->claim_dest.magic, claim_dest, 3); + bat_priv->claim_dest.type = 0; + primary_if = primary_if_get_selected(bat_priv); + if (primary_if) { + bat_priv->claim_dest.group = + htons(crc16(0, primary_if->net_dev->dev_addr, + ETH_ALEN)); + hardif_free_ref(primary_if); + } else { + bat_priv->claim_dest.group = 0; /* will be set later */ + } + /* initialize the duplicate list */ for (i = 0; i < DUPLIST_SIZE; i++) bat_priv->bcast_duplist[i].entrytime = @@ -1448,8 +1555,10 @@ int bla_claim_table_seq_print_text(struct seq_file *seq, void *offset) goto out; } - seq_printf(seq, "Claims announced for the mesh %s (orig %pM)\n", - net_dev->name, primary_if->net_dev->dev_addr); + seq_printf(seq, + "Claims announced for the mesh %s (orig %pM, group id %04x)\n", + net_dev->name, primary_if->net_dev->dev_addr, + ntohs(bat_priv->claim_dest.group)); seq_printf(seq, " %-17s %-5s %-17s [o] (%-4s)\n", "Client", "VID", "Originator", "CRC"); for (i = 0; i < hash->size; i++) { diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h index ad97e87a2e22..7f7f610ea1d1 100644 --- a/net/batman-adv/types.h +++ b/net/batman-adv/types.h @@ -193,6 +193,7 @@ struct bat_priv { struct hashtable_t *vis_hash; struct bcast_duplist_entry bcast_duplist[DUPLIST_SIZE]; int bcast_duplist_curr; + struct bla_claim_dst claim_dest; spinlock_t forw_bat_list_lock; /* protects forw_bat_list */ spinlock_t forw_bcast_list_lock; /* protects */ spinlock_t tt_changes_list_lock; /* protects tt_changes */ -- cgit v1.2.2 From 7a5cc24277b57ce38eb0afa6634b71d4d5cc671e Mon Sep 17 00:00:00 2001 From: Simon Wunderlich Date: Sun, 22 Jan 2012 20:00:27 +0100 Subject: batman-adv: add bridge loop avoidance compile option The define CONFIG_BATMAN_ADV_BLA switches the bridge loop avoidance on - skip it, and the bridge loop avoidance is not compiled in. This is useful if binary size should be saved or the feature is not needed. Signed-off-by: Simon Wunderlich Signed-off-by: Antonio Quartulli --- net/batman-adv/Kconfig | 12 ++++++- net/batman-adv/Makefile | 2 +- net/batman-adv/bat_debugfs.c | 7 ++++- net/batman-adv/bat_sysfs.c | 4 +++ net/batman-adv/bridge_loop_avoidance.h | 57 ++++++++++++++++++++++++++++++++++ net/batman-adv/soft-interface.c | 4 +-- net/batman-adv/types.h | 8 +++++ 7 files changed, 89 insertions(+), 5 deletions(-) (limited to 'net') diff --git a/net/batman-adv/Kconfig b/net/batman-adv/Kconfig index 6ff977c1f3bc..53f5244e28f8 100644 --- a/net/batman-adv/Kconfig +++ b/net/batman-adv/Kconfig @@ -4,7 +4,7 @@ config BATMAN_ADV tristate "B.A.T.M.A.N. Advanced Meshing Protocol" - depends on NET && INET + depends on NET select CRC16 default n help @@ -14,6 +14,16 @@ config BATMAN_ADV http://www.open-mesh.org/ for more information and user space tools. +config BATMAN_ADV_BLA + bool "Bridge Loop Avoidance" + depends on BATMAN_ADV && INET + default y + help + This option enables BLA (Bridge Loop Avoidance), a mechanism + to avoid Ethernet frames looping when mesh nodes are connected + to both the same LAN and the same mesh. If you will never use + more than one mesh node in the same LAN, you can safely remove + this feature and save some space. config BATMAN_ADV_DEBUG bool "B.A.T.M.A.N. debugging" diff --git a/net/batman-adv/Makefile b/net/batman-adv/Makefile index 94b67fd81bf7..6d5c1940667d 100644 --- a/net/batman-adv/Makefile +++ b/net/batman-adv/Makefile @@ -23,7 +23,7 @@ batman-adv-y += bat_debugfs.o batman-adv-y += bat_iv_ogm.o batman-adv-y += bat_sysfs.o batman-adv-y += bitarray.o -batman-adv-y += bridge_loop_avoidance.o +batman-adv-$(CONFIG_BATMAN_ADV_BLA) += bridge_loop_avoidance.o batman-adv-y += gateway_client.o batman-adv-y += gateway_common.o batman-adv-y += hard-interface.o diff --git a/net/batman-adv/bat_debugfs.c b/net/batman-adv/bat_debugfs.c index 0e3517773a8b..916380c73ab7 100644 --- a/net/batman-adv/bat_debugfs.c +++ b/net/batman-adv/bat_debugfs.c @@ -245,12 +245,13 @@ static int transtable_global_open(struct inode *inode, struct file *file) return single_open(file, tt_global_seq_print_text, net_dev); } +#ifdef CONFIG_BATMAN_ADV_BLA static int bla_claim_table_open(struct inode *inode, struct file *file) { struct net_device *net_dev = (struct net_device *)inode->i_private; return single_open(file, bla_claim_table_seq_print_text, net_dev); } - +#endif static int transtable_local_open(struct inode *inode, struct file *file) { @@ -285,7 +286,9 @@ static BAT_DEBUGINFO(routing_algos, S_IRUGO, bat_algorithms_open); static BAT_DEBUGINFO(originators, S_IRUGO, originators_open); static BAT_DEBUGINFO(gateways, S_IRUGO, gateways_open); static BAT_DEBUGINFO(transtable_global, S_IRUGO, transtable_global_open); +#ifdef CONFIG_BATMAN_ADV_BLA static BAT_DEBUGINFO(bla_claim_table, S_IRUGO, bla_claim_table_open); +#endif static BAT_DEBUGINFO(transtable_local, S_IRUGO, transtable_local_open); static BAT_DEBUGINFO(vis_data, S_IRUGO, vis_data_open); @@ -293,7 +296,9 @@ static struct bat_debuginfo *mesh_debuginfos[] = { &bat_debuginfo_originators, &bat_debuginfo_gateways, &bat_debuginfo_transtable_global, +#ifdef CONFIG_BATMAN_ADV_BLA &bat_debuginfo_bla_claim_table, +#endif &bat_debuginfo_transtable_local, &bat_debuginfo_vis_data, NULL, diff --git a/net/batman-adv/bat_sysfs.c b/net/batman-adv/bat_sysfs.c index 824bfe710687..c6efd687ca75 100644 --- a/net/batman-adv/bat_sysfs.c +++ b/net/batman-adv/bat_sysfs.c @@ -386,7 +386,9 @@ static ssize_t store_gw_bwidth(struct kobject *kobj, struct attribute *attr, BAT_ATTR_BOOL(aggregated_ogms, S_IRUGO | S_IWUSR, NULL); BAT_ATTR_BOOL(bonding, S_IRUGO | S_IWUSR, NULL); +#ifdef CONFIG_BATMAN_ADV_BLA BAT_ATTR_BOOL(bridge_loop_avoidance, S_IRUGO | S_IWUSR, NULL); +#endif BAT_ATTR_BOOL(fragmentation, S_IRUGO | S_IWUSR, update_min_mtu); BAT_ATTR_BOOL(ap_isolation, S_IRUGO | S_IWUSR, NULL); static BAT_ATTR(vis_mode, S_IRUGO | S_IWUSR, show_vis_mode, store_vis_mode); @@ -405,7 +407,9 @@ BAT_ATTR_UINT(log_level, S_IRUGO | S_IWUSR, 0, 15, NULL); static struct bat_attribute *mesh_attrs[] = { &bat_attr_aggregated_ogms, &bat_attr_bonding, +#ifdef CONFIG_BATMAN_ADV_BLA &bat_attr_bridge_loop_avoidance, +#endif &bat_attr_fragmentation, &bat_attr_ap_isolation, &bat_attr_vis_mode, diff --git a/net/batman-adv/bridge_loop_avoidance.h b/net/batman-adv/bridge_loop_avoidance.h index 9468c245121c..4a8e4fc766bc 100644 --- a/net/batman-adv/bridge_loop_avoidance.h +++ b/net/batman-adv/bridge_loop_avoidance.h @@ -22,6 +22,7 @@ #ifndef _NET_BATMAN_ADV_BLA_H_ #define _NET_BATMAN_ADV_BLA_H_ +#ifdef CONFIG_BATMAN_ADV_BLA int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid); int bla_tx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid); int bla_is_backbone_gw(struct sk_buff *skb, @@ -37,5 +38,61 @@ int bla_init(struct bat_priv *bat_priv); void bla_free(struct bat_priv *bat_priv); #define BLA_CRC_INIT 0 +#else /* ifdef CONFIG_BATMAN_ADV_BLA */ + +static inline int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, + short vid) +{ + return 0; +} + +static inline int bla_tx(struct bat_priv *bat_priv, struct sk_buff *skb, + short vid) +{ + return 0; +} + +static inline int bla_is_backbone_gw(struct sk_buff *skb, + struct orig_node *orig_node, + int hdr_size) +{ + return 0; +} + +static inline int bla_claim_table_seq_print_text(struct seq_file *seq, + void *offset) +{ + return 0; +} + +static inline int bla_is_backbone_gw_orig(struct bat_priv *bat_priv, + uint8_t *orig) +{ + return 0; +} + +static inline int bla_check_bcast_duplist(struct bat_priv *bat_priv, + struct bcast_packet *bcast_packet, + int hdr_size) +{ + return 0; +} + +static inline void bla_update_orig_address(struct bat_priv *bat_priv, + struct hard_iface *primary_if, + struct hard_iface *oldif) +{ +} + +static inline int bla_init(struct bat_priv *bat_priv) +{ + return 1; +} + +static inline void bla_free(struct bat_priv *bat_priv) +{ +} + +#endif /* ifdef CONFIG_BATMAN_ADV_BLA */ #endif /* ifndef _NET_BATMAN_ADV_BLA_H_ */ diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c index 10ab49aa568f..efe0fbaadcd6 100644 --- a/net/batman-adv/soft-interface.c +++ b/net/batman-adv/soft-interface.c @@ -134,7 +134,7 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface) 0x00}; unsigned int header_len = 0; int data_len = skb->len, ret; - short vid = -1; + short vid __maybe_unused = -1; bool do_bcast = false; if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE) @@ -256,7 +256,7 @@ void interface_rx(struct net_device *soft_iface, struct bat_priv *bat_priv = netdev_priv(soft_iface); struct ethhdr *ethhdr; struct vlan_ethhdr *vhdr; - short vid = -1; + short vid __maybe_unused = -1; /* check if enough space is available for pulling, and pull */ if (!pskb_may_pull(skb, hdr_size)) diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h index 7f7f610ea1d1..a5b1a6333def 100644 --- a/net/batman-adv/types.h +++ b/net/batman-adv/types.h @@ -140,11 +140,13 @@ struct neigh_node { spinlock_t tq_lock; /* protects: tq_recv, tq_index */ }; +#ifdef CONFIG_BATMAN_ADV_BLA struct bcast_duplist_entry { uint8_t orig[ETH_ALEN]; uint16_t crc; unsigned long entrytime; }; +#endif struct bat_priv { atomic_t mesh_state; @@ -186,14 +188,18 @@ struct bat_priv { struct hashtable_t *orig_hash; struct hashtable_t *tt_local_hash; struct hashtable_t *tt_global_hash; +#ifdef CONFIG_BATMAN_ADV_BLA struct hashtable_t *claim_hash; struct hashtable_t *backbone_hash; +#endif struct list_head tt_req_list; /* list of pending tt_requests */ struct list_head tt_roam_list; struct hashtable_t *vis_hash; +#ifdef CONFIG_BATMAN_ADV_BLA struct bcast_duplist_entry bcast_duplist[DUPLIST_SIZE]; int bcast_duplist_curr; struct bla_claim_dst claim_dest; +#endif spinlock_t forw_bat_list_lock; /* protects forw_bat_list */ spinlock_t forw_bcast_list_lock; /* protects */ spinlock_t tt_changes_list_lock; /* protects tt_changes */ @@ -261,6 +267,7 @@ struct tt_orig_list_entry { struct hlist_node list; }; +#ifdef CONFIG_BATMAN_ADV_BLA struct backbone_gw { uint8_t orig[ETH_ALEN]; short vid; /* used VLAN ID */ @@ -282,6 +289,7 @@ struct claim { atomic_t refcount; struct hlist_node hash_entry; }; +#endif struct tt_change_node { struct list_head list; -- cgit v1.2.2 From d934f7d0d6a3f8aa3049ca0692948ec59d738928 Mon Sep 17 00:00:00 2001 From: Ashok Nagarajan Date: Mon, 2 Apr 2012 21:21:19 -0700 Subject: mac80211: Use mandatory rates as basic rates when starting mesh Signed-off-by: Ashok Nagarajan Signed-off-by: Thomas Pedersen Signed-off-by: John W. Linville --- net/mac80211/mesh.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'net') diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index 386dbca1eab3..f1d9685d959c 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c @@ -581,8 +581,12 @@ void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata) ieee80211_mesh_root_setup(ifmsh); ieee80211_queue_work(&local->hw, &sdata->work); sdata->vif.bss_conf.beacon_int = MESH_DEFAULT_BEACON_INTERVAL; + sdata->vif.bss_conf.basic_rates = + ieee80211_mandatory_rates(sdata->local, + sdata->local->hw.conf.channel->band); ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON | BSS_CHANGED_BEACON_ENABLED | + BSS_CHANGED_BASIC_RATES | BSS_CHANGED_BEACON_INT); } -- cgit v1.2.2 From 657c3e0c4147bb3d3fdd338e32b83b968b0f9d02 Mon Sep 17 00:00:00 2001 From: Ashok Nagarajan Date: Mon, 2 Apr 2012 21:21:20 -0700 Subject: mac80211: Indicate basic rates when adding rate IEs Basic rates are added with supported rates IE and extended supported rates IE. Signed-off-by: Ashok Nagarajan Signed-off-by: Thomas Pedersen Signed-off-by: John W. Linville --- net/mac80211/cfg.c | 12 ++++++------ net/mac80211/mesh_plink.c | 4 ++-- net/mac80211/tx.c | 4 ++-- net/mac80211/util.c | 18 ++++++++++++++---- 4 files changed, 24 insertions(+), 14 deletions(-) (limited to 'net') diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 83e08dcb2f5d..42e1fb2e700f 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -2358,8 +2358,8 @@ ieee80211_prep_tdls_encap_data(struct wiphy *wiphy, struct net_device *dev, tf->u.setup_req.capability = cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata)); - ieee80211_add_srates_ie(&sdata->vif, skb); - ieee80211_add_ext_srates_ie(&sdata->vif, skb); + ieee80211_add_srates_ie(&sdata->vif, skb, false); + ieee80211_add_ext_srates_ie(&sdata->vif, skb, false); ieee80211_tdls_add_ext_capab(skb); break; case WLAN_TDLS_SETUP_RESPONSE: @@ -2372,8 +2372,8 @@ ieee80211_prep_tdls_encap_data(struct wiphy *wiphy, struct net_device *dev, tf->u.setup_resp.capability = cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata)); - ieee80211_add_srates_ie(&sdata->vif, skb); - ieee80211_add_ext_srates_ie(&sdata->vif, skb); + ieee80211_add_srates_ie(&sdata->vif, skb, false); + ieee80211_add_ext_srates_ie(&sdata->vif, skb, false); ieee80211_tdls_add_ext_capab(skb); break; case WLAN_TDLS_SETUP_CONFIRM: @@ -2433,8 +2433,8 @@ ieee80211_prep_tdls_direct(struct wiphy *wiphy, struct net_device *dev, mgmt->u.action.u.tdls_discover_resp.capability = cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata)); - ieee80211_add_srates_ie(&sdata->vif, skb); - ieee80211_add_ext_srates_ie(&sdata->vif, skb); + ieee80211_add_srates_ie(&sdata->vif, skb, false); + ieee80211_add_ext_srates_ie(&sdata->vif, skb, false); ieee80211_tdls_add_ext_capab(skb); break; default: diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c index 923269b62b43..73fa687edc7c 100644 --- a/net/mac80211/mesh_plink.c +++ b/net/mac80211/mesh_plink.c @@ -212,8 +212,8 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata, pos = skb_put(skb, 2); memcpy(pos + 2, &plid, 2); } - if (ieee80211_add_srates_ie(&sdata->vif, skb) || - ieee80211_add_ext_srates_ie(&sdata->vif, skb) || + if (ieee80211_add_srates_ie(&sdata->vif, skb, true) || + ieee80211_add_ext_srates_ie(&sdata->vif, skb, true) || mesh_add_rsn_ie(skb, sdata) || mesh_add_meshid_ie(skb, sdata) || mesh_add_meshconf_ie(skb, sdata)) diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index daab5adeb93c..4109ec7999a3 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -2418,9 +2418,9 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw, *pos++ = WLAN_EID_SSID; *pos++ = 0x0; - if (ieee80211_add_srates_ie(&sdata->vif, skb) || + if (ieee80211_add_srates_ie(&sdata->vif, skb, true) || mesh_add_ds_params_ie(skb, sdata) || - ieee80211_add_ext_srates_ie(&sdata->vif, skb) || + ieee80211_add_ext_srates_ie(&sdata->vif, skb, true) || mesh_add_rsn_ie(skb, sdata) || mesh_add_ht_cap_ie(skb, sdata) || mesh_add_ht_oper_ie(skb, sdata) || diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 471a831066dd..468a18ea1f1b 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -1683,13 +1683,15 @@ ieee80211_ht_oper_to_channel_type(struct ieee80211_ht_operation *ht_oper) return channel_type; } -int ieee80211_add_srates_ie(struct ieee80211_vif *vif, struct sk_buff *skb) +int ieee80211_add_srates_ie(struct ieee80211_vif *vif, + struct sk_buff *skb, bool need_basic) { struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); struct ieee80211_local *local = sdata->local; struct ieee80211_supported_band *sband; int rate; u8 i, rates, *pos; + u32 basic_rates = vif->bss_conf.basic_rates; sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; rates = sband->n_bitrates; @@ -1703,20 +1705,25 @@ int ieee80211_add_srates_ie(struct ieee80211_vif *vif, struct sk_buff *skb) *pos++ = WLAN_EID_SUPP_RATES; *pos++ = rates; for (i = 0; i < rates; i++) { + u8 basic = 0; + if (need_basic && basic_rates & BIT(i)) + basic = 0x80; rate = sband->bitrates[i].bitrate; - *pos++ = (u8) (rate / 5); + *pos++ = basic | (u8) (rate / 5); } return 0; } -int ieee80211_add_ext_srates_ie(struct ieee80211_vif *vif, struct sk_buff *skb) +int ieee80211_add_ext_srates_ie(struct ieee80211_vif *vif, + struct sk_buff *skb, bool need_basic) { struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); struct ieee80211_local *local = sdata->local; struct ieee80211_supported_band *sband; int rate; u8 i, exrates, *pos; + u32 basic_rates = vif->bss_conf.basic_rates; sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; exrates = sband->n_bitrates; @@ -1733,8 +1740,11 @@ int ieee80211_add_ext_srates_ie(struct ieee80211_vif *vif, struct sk_buff *skb) *pos++ = WLAN_EID_EXT_SUPP_RATES; *pos++ = exrates; for (i = 8; i < sband->n_bitrates; i++) { + u8 basic = 0; + if (need_basic && basic_rates & BIT(i)) + basic = 0x80; rate = sband->bitrates[i].bitrate; - *pos++ = (u8) (rate / 5); + *pos++ = basic | (u8) (rate / 5); } } return 0; -- cgit v1.2.2 From 9ebb61a23d90703344fc609fbee8da67b1e7456c Mon Sep 17 00:00:00 2001 From: Ashok Nagarajan Date: Mon, 2 Apr 2012 21:21:21 -0700 Subject: mac80211: Modify sta_get_rates to give basic rates Signed-off-by: Ashok Nagarajan Signed-off-by: Thomas Pedersen Signed-off-by: John W. Linville --- net/mac80211/ibss.c | 4 ++-- net/mac80211/ieee80211_i.h | 2 +- net/mac80211/mesh.c | 5 +++-- net/mac80211/mesh_plink.c | 7 ++++--- net/mac80211/util.c | 16 +++++++++++++--- 5 files changed, 23 insertions(+), 11 deletions(-) (limited to 'net') diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c index e910449dead0..49a207980338 100644 --- a/net/mac80211/ibss.c +++ b/net/mac80211/ibss.c @@ -408,7 +408,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata, if (elems->supp_rates) { supp_rates = ieee80211_sta_get_rates(local, elems, - band); + band, NULL); if (sta) { u32 prev_rates; @@ -558,7 +558,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata, sdata->name, mgmt->bssid); #endif ieee80211_sta_join_ibss(sdata, bss); - supp_rates = ieee80211_sta_get_rates(local, elems, band); + supp_rates = ieee80211_sta_get_rates(local, elems, band, NULL); ieee80211_ibss_add_sta(sdata, mgmt->bssid, mgmt->sa, supp_rates, true); rcu_read_unlock(); diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index ea9623cbd969..41f7295cd891 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -1480,7 +1480,7 @@ void ieee80211_sta_def_wmm_params(struct ieee80211_sub_if_data *sdata, const u8 *supp_rates); u32 ieee80211_sta_get_rates(struct ieee80211_local *local, struct ieee802_11_elems *elems, - enum ieee80211_band band); + enum ieee80211_band band, u32 *basic_rates); int __ieee80211_request_smps(struct ieee80211_sub_if_data *sdata, enum ieee80211_smps_mode smps_mode); void ieee80211_recalc_smps(struct ieee80211_local *local); diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index f1d9685d959c..7b22822d4208 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c @@ -625,7 +625,7 @@ static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata, struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; struct ieee802_11_elems elems; struct ieee80211_channel *channel; - u32 supp_rates = 0; + u32 supp_rates = 0, basic_rates = 0; size_t baselen; int freq; enum ieee80211_band band = rx_status->band; @@ -658,7 +658,8 @@ static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata, if (elems.mesh_id && elems.mesh_config && mesh_matches_local(&elems, sdata)) { - supp_rates = ieee80211_sta_get_rates(local, &elems, band); + supp_rates = ieee80211_sta_get_rates(local, &elems, + band, &basic_rates); mesh_neighbour_update(mgmt->sa, supp_rates, sdata, &elems); } diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c index 73fa687edc7c..91e2043bc9b2 100644 --- a/net/mac80211/mesh_plink.c +++ b/net/mac80211/mesh_plink.c @@ -465,6 +465,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m bool deactivated, matches_local = true; u8 ie_len; u8 *baseaddr; + u32 rates, basic_rates = 0; __le16 plid, llid, reason; #ifdef CONFIG_MAC80211_VERBOSE_MPL_DEBUG static const char *mplstates[] = { @@ -559,6 +560,9 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m /* Now we will figure out the appropriate event... */ event = PLINK_UNDEFINED; + rates = ieee80211_sta_get_rates(local, &elems, + rx_status->band, &basic_rates); + if (ftype != WLAN_SP_MESH_PEERING_CLOSE && (!mesh_matches_local(&elems, sdata))) { matches_local = false; @@ -583,7 +587,6 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m return; } else if (!sta) { /* ftype == WLAN_SP_MESH_PEERING_OPEN */ - u32 rates; rcu_read_unlock(); @@ -591,8 +594,6 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m mpl_dbg("Mesh plink error: no more free plinks\n"); return; } - - rates = ieee80211_sta_get_rates(local, &elems, rx_status->band); sta = mesh_plink_alloc(sdata, mgmt->sa, rates, &elems); if (!sta) { mpl_dbg("Mesh plink error: plink table full\n"); diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 468a18ea1f1b..a18b693042b2 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -1109,7 +1109,7 @@ void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst, u32 ieee80211_sta_get_rates(struct ieee80211_local *local, struct ieee802_11_elems *elems, - enum ieee80211_band band) + enum ieee80211_band band, u32 *basic_rates) { struct ieee80211_supported_band *sband; struct ieee80211_rate *bitrates; @@ -1130,15 +1130,25 @@ u32 ieee80211_sta_get_rates(struct ieee80211_local *local, elems->ext_supp_rates_len; i++) { u8 rate = 0; int own_rate; + bool is_basic; if (i < elems->supp_rates_len) rate = elems->supp_rates[i]; else if (elems->ext_supp_rates) rate = elems->ext_supp_rates [i - elems->supp_rates_len]; own_rate = 5 * (rate & 0x7f); - for (j = 0; j < num_rates; j++) - if (bitrates[j].bitrate == own_rate) + is_basic = !!(rate & 0x80); + + if (is_basic && (rate & 0x7f) == BSS_MEMBERSHIP_SELECTOR_HT_PHY) + continue; + + for (j = 0; j < num_rates; j++) { + if (bitrates[j].bitrate == own_rate) { supp_rates |= BIT(j); + if (basic_rates && is_basic) + *basic_rates |= BIT(j); + } + } } return supp_rates; } -- cgit v1.2.2 From fe40cb6274988852aa5a84440d8f81c00cea4028 Mon Sep 17 00:00:00 2001 From: Ashok Nagarajan Date: Mon, 2 Apr 2012 21:21:22 -0700 Subject: mac80211: Check basic rates when peering Section 13.2.3 of IEEE 80211s standard requires BSSBasicRateSet of mesh nodes to be identical to establish peer link. Signed-off-by: Ashok Nagarajan Signed-off-by: Thomas Pedersen Signed-off-by: John W. Linville --- net/mac80211/mesh.c | 15 ++++++++++----- net/mac80211/mesh.h | 2 +- net/mac80211/mesh_plink.c | 2 +- 3 files changed, 12 insertions(+), 7 deletions(-) (limited to 'net') diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index 7b22822d4208..133c118526fb 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c @@ -66,11 +66,13 @@ static void ieee80211_mesh_housekeeping_timer(unsigned long data) * * @ie: information elements of a management frame from the mesh peer * @sdata: local mesh subif + * @basic_rates: BSSBasicRateSet of the peer candidate * * This function checks if the mesh configuration of a mesh point matches the * local mesh configuration, i.e. if both nodes belong to the same mesh network. */ -bool mesh_matches_local(struct ieee802_11_elems *ie, struct ieee80211_sub_if_data *sdata) +bool mesh_matches_local(struct ieee802_11_elems *ie, + struct ieee80211_sub_if_data *sdata, u32 basic_rates) { struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; struct ieee80211_local *local = sdata->local; @@ -94,6 +96,9 @@ bool mesh_matches_local(struct ieee802_11_elems *ie, struct ieee80211_sub_if_dat (ifmsh->mesh_auth_id == ie->mesh_config->meshconf_auth))) goto mismatch; + if (sdata->vif.bss_conf.basic_rates != basic_rates) + goto mismatch; + /* disallow peering with mismatched channel types for now */ if (ie->ht_operation && (local->_oper_channel_type != @@ -656,12 +661,12 @@ static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata, if (!channel || channel->flags & IEEE80211_CHAN_DISABLED) return; + supp_rates = ieee80211_sta_get_rates(local, &elems, + band, &basic_rates); + if (elems.mesh_id && elems.mesh_config && - mesh_matches_local(&elems, sdata)) { - supp_rates = ieee80211_sta_get_rates(local, &elems, - band, &basic_rates); + mesh_matches_local(&elems, sdata, basic_rates)) mesh_neighbour_update(mgmt->sa, supp_rates, sdata, &elems); - } if (ifmsh->sync_ops) ifmsh->sync_ops->rx_bcn_presp(sdata, diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h index fa7d9704c175..4ad738988801 100644 --- a/net/mac80211/mesh.h +++ b/net/mac80211/mesh.h @@ -223,7 +223,7 @@ int ieee80211_new_mesh_header(struct ieee80211s_hdr *meshhdr, int mesh_rmc_check(u8 *addr, struct ieee80211s_hdr *mesh_hdr, struct ieee80211_sub_if_data *sdata); bool mesh_matches_local(struct ieee802_11_elems *ie, - struct ieee80211_sub_if_data *sdata); + struct ieee80211_sub_if_data *sdata, u32 basic_rates); void mesh_ids_set_default(struct ieee80211_if_mesh *mesh); void mesh_mgmt_ies_add(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata); diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c index 91e2043bc9b2..9c836e774fbd 100644 --- a/net/mac80211/mesh_plink.c +++ b/net/mac80211/mesh_plink.c @@ -564,7 +564,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m rx_status->band, &basic_rates); if (ftype != WLAN_SP_MESH_PEERING_CLOSE && - (!mesh_matches_local(&elems, sdata))) { + (!mesh_matches_local(&elems, sdata, basic_rates))) { matches_local = false; switch (ftype) { case WLAN_SP_MESH_PEERING_OPEN: -- cgit v1.2.2 From 3edaf3e61fda3aa9ff8d38445bf92f2bec23bf63 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Tue, 3 Apr 2012 10:24:00 +0200 Subject: mac80211: manage AP netdev carrier state The AP netdev is really only active when beaconing, so manage the carrier state accordingly. Also do that for VLAN interfaces enslaved to a given AP interface. Signed-off-by: Johannes Berg Signed-off-by: John W. Linville --- net/mac80211/cfg.c | 10 +++++++++- net/mac80211/iface.c | 9 +++++++-- 2 files changed, 16 insertions(+), 3 deletions(-) (limited to 'net') diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 42e1fb2e700f..667d93943399 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -644,6 +644,10 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev, ieee80211_bss_info_change_notify(sdata, changed); + netif_carrier_on(dev); + list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list) + netif_carrier_on(vlan->dev); + return 0; } @@ -669,7 +673,7 @@ static int ieee80211_change_beacon(struct wiphy *wiphy, struct net_device *dev, static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev) { - struct ieee80211_sub_if_data *sdata; + struct ieee80211_sub_if_data *sdata, *vlan; struct beacon_data *old; sdata = IEEE80211_DEV_TO_SUB_IF(dev); @@ -678,6 +682,10 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev) if (!old) return -ENOENT; + list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list) + netif_carrier_off(vlan->dev); + netif_carrier_off(dev); + RCU_INIT_POINTER(sdata->u.ap.beacon, NULL); kfree_rcu(old, rcu_head); diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index efb433d3dc25..56a38a3088d4 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -254,7 +254,11 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up) switch (sdata->vif.type) { case NL80211_IFTYPE_AP_VLAN: - /* no need to tell driver */ + /* no need to tell driver, but set carrier */ + if (rtnl_dereference(sdata->bss->beacon)) + netif_carrier_on(dev); + else + netif_carrier_off(dev); break; case NL80211_IFTYPE_MONITOR: if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) { @@ -294,7 +298,8 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up) ieee80211_bss_info_change_notify(sdata, changed); if (sdata->vif.type == NL80211_IFTYPE_STATION || - sdata->vif.type == NL80211_IFTYPE_ADHOC) + sdata->vif.type == NL80211_IFTYPE_ADHOC || + sdata->vif.type == NL80211_IFTYPE_AP) netif_carrier_off(dev); else netif_carrier_on(dev); -- cgit v1.2.2 From 4b6f1dd6a6faf4ed8d209bbd548e78b15e55aee8 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Tue, 3 Apr 2012 14:35:57 +0200 Subject: mac80211: add explicit monitor interface if needed The queue mapping redesign that I'm planning to do will break pure injection unless we handle monitor interfaces explicitly. One possible option would be to have the driver tell mac80211 about monitor mode queues etc., but that would duplicate the API since we already need to have queue assignments handled per virtual interface. So in order to solve this, have a virtual monitor interface that is added whenever all active vifs are monitors. We could also use the state of one of the monitor interfaces, but managing that would be complicated, so allocate separate state. Signed-off-by: Johannes Berg Signed-off-by: John W. Linville --- net/mac80211/driver-ops.h | 3 ++- net/mac80211/ieee80211_i.h | 3 +++ net/mac80211/iface.c | 65 ++++++++++++++++++++++++++++++++++++++++++++++ net/mac80211/pm.c | 4 +++ net/mac80211/tx.c | 7 +++-- net/mac80211/util.c | 10 +++++++ 6 files changed, 89 insertions(+), 3 deletions(-) (limited to 'net') diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h index 8ad40f68f2c3..492c08c27c5f 100644 --- a/net/mac80211/driver-ops.h +++ b/net/mac80211/driver-ops.h @@ -101,7 +101,8 @@ static inline int drv_add_interface(struct ieee80211_local *local, might_sleep(); if (WARN_ON(sdata->vif.type == NL80211_IFTYPE_AP_VLAN || - sdata->vif.type == NL80211_IFTYPE_MONITOR)) + (sdata->vif.type == NL80211_IFTYPE_MONITOR && + !(local->hw.flags & IEEE80211_HW_WANT_MONITOR_VIF)))) return -EINVAL; trace_drv_add_interface(local, sdata); diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 41f7295cd891..8ed074f7e6cd 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -1100,6 +1100,9 @@ struct ieee80211_local { struct net_device napi_dev; struct napi_struct napi; + + /* virtual monitor interface */ + struct ieee80211_sub_if_data __rcu *monitor_sdata; }; static inline struct ieee80211_sub_if_data * diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index 56a38a3088d4..2b88cb278fc4 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -169,6 +169,59 @@ void ieee80211_adjust_monitor_flags(struct ieee80211_sub_if_data *sdata, #undef ADJUST } +static int ieee80211_add_virtual_monitor(struct ieee80211_local *local) +{ + struct ieee80211_sub_if_data *sdata; + int ret; + + if (!(local->hw.flags & IEEE80211_HW_WANT_MONITOR_VIF)) + return 0; + + if (local->monitor_sdata) + return 0; + + sdata = kzalloc(sizeof(*sdata) + local->hw.vif_data_size, GFP_KERNEL); + if (!sdata) + return -ENOMEM; + + /* set up data */ + sdata->local = local; + sdata->vif.type = NL80211_IFTYPE_MONITOR; + snprintf(sdata->name, IFNAMSIZ, "%s-monitor", + wiphy_name(local->hw.wiphy)); + + ret = drv_add_interface(local, sdata); + if (WARN_ON(ret)) { + /* ok .. stupid driver, it asked for this! */ + kfree(sdata); + return ret; + } + + rcu_assign_pointer(local->monitor_sdata, sdata); + + return 0; +} + +static void ieee80211_del_virtual_monitor(struct ieee80211_local *local) +{ + struct ieee80211_sub_if_data *sdata; + + if (!(local->hw.flags & IEEE80211_HW_WANT_MONITOR_VIF)) + return; + + sdata = rtnl_dereference(local->monitor_sdata); + + if (!sdata) + return; + + rcu_assign_pointer(local->monitor_sdata, NULL); + synchronize_net(); + + drv_remove_interface(local, sdata); + + kfree(sdata); +} + /* * NOTE: Be very careful when changing this function, it must NOT return * an error on interface type changes that have been pre-checked, so most @@ -266,6 +319,12 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up) break; } + if (local->monitors == 0 && local->open_count == 0) { + res = ieee80211_add_virtual_monitor(local); + if (res) + goto err_stop; + } + /* must be before the call to ieee80211_configure_filter */ local->monitors++; if (local->monitors == 1) { @@ -280,6 +339,8 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up) break; default: if (coming_up) { + ieee80211_del_virtual_monitor(local); + res = drv_add_interface(local, sdata); if (res) goto err_stop; @@ -511,6 +572,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, if (local->monitors == 0) { local->hw.conf.flags &= ~IEEE80211_CONF_MONITOR; hw_reconf_flags |= IEEE80211_CONF_CHANGE_MONITOR; + ieee80211_del_virtual_monitor(local); } ieee80211_adjust_monitor_flags(sdata, -1); @@ -584,6 +646,9 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, } } spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); + + if (local->monitors == local->open_count && local->monitors > 0) + ieee80211_add_virtual_monitor(local); } static int ieee80211_stop(struct net_device *dev) diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c index ef8eba1d736d..af1c4e26e965 100644 --- a/net/mac80211/pm.c +++ b/net/mac80211/pm.c @@ -127,6 +127,10 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) drv_remove_interface(local, sdata); } + sdata = rtnl_dereference(local->monitor_sdata); + if (sdata) + drv_remove_interface(local, sdata); + /* stop hardware - this must stop RX */ if (local->open_count) ieee80211_stop_device(local); diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 4109ec7999a3..a8d0188ab408 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -1283,8 +1283,11 @@ static bool __ieee80211_tx(struct ieee80211_local *local, switch (sdata->vif.type) { case NL80211_IFTYPE_MONITOR: - sdata = NULL; - vif = NULL; + sdata = rcu_dereference(local->monitor_sdata); + if (sdata) + vif = &sdata->vif; + else + vif = NULL; break; case NL80211_IFTYPE_AP_VLAN: sdata = container_of(sdata->bss, diff --git a/net/mac80211/util.c b/net/mac80211/util.c index a18b693042b2..9e8f4b892555 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -1223,6 +1223,16 @@ int ieee80211_reconfig(struct ieee80211_local *local) IEEE80211_TPT_LEDTRIG_FL_RADIO, 0); /* add interfaces */ + sdata = rtnl_dereference(local->monitor_sdata); + if (sdata) { + res = drv_add_interface(local, sdata); + if (WARN_ON(res)) { + rcu_assign_pointer(local->monitor_sdata, NULL); + synchronize_net(); + kfree(sdata); + } + } + list_for_each_entry(sdata, &local->interfaces, list) { if (sdata->vif.type != NL80211_IFTYPE_AP_VLAN && sdata->vif.type != NL80211_IFTYPE_MONITOR && -- cgit v1.2.2 From 3a25a8c8b75b430c4f4022918e26fa51d557ecde Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Tue, 3 Apr 2012 16:28:50 +0200 Subject: mac80211: add improved HW queue control mac80211 currently only supports one hardware queue per AC. This is already problematic for off-channel uses since if we go off channel while the BE queue is full and then try to send an off-channel frame the frame will never go out. This will become worse when we support multi-channel since then a queue on one channel might be full, but we have to stop the software queue for all channels. That is obviously not desirable. To address this problem allow drivers to register more hardware queues, and allow them to map them to virtual interfaces. When they stop a hardware queue the corresponding AC software queues on the correct interfaces will be stopped as well. Additionally, there's an off-channel queue to solve that problem and a per-interface after-DTIM beacon queue. This allows drivers to manage software queues closer to how the hardware works. Currently, there's a limit of 16 hardware queues. This may or may not be sufficient, we can adjust it as needed. Signed-off-by: Johannes Berg Signed-off-by: John W. Linville --- net/mac80211/agg-tx.c | 39 +++++++++++++++-------------- net/mac80211/cfg.c | 6 +++++ net/mac80211/ieee80211_i.h | 1 + net/mac80211/iface.c | 62 ++++++++++++++++++++++++++++++++++++++++++++++ net/mac80211/main.c | 6 +++++ net/mac80211/tx.c | 38 +++++++++++++++++++++------- net/mac80211/util.c | 48 +++++++++++++++++++++++++++-------- 7 files changed, 162 insertions(+), 38 deletions(-) (limited to 'net') diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c index 9628a1892441..5b7053c58732 100644 --- a/net/mac80211/agg-tx.c +++ b/net/mac80211/agg-tx.c @@ -286,25 +286,25 @@ static inline int ieee80211_ac_from_tid(int tid) * a global "agg_queue_stop" refcount. */ static void __acquires(agg_queue) -ieee80211_stop_queue_agg(struct ieee80211_local *local, int tid) +ieee80211_stop_queue_agg(struct ieee80211_sub_if_data *sdata, int tid) { - int queue = ieee80211_ac_from_tid(tid); + int queue = sdata->vif.hw_queue[ieee80211_ac_from_tid(tid)]; - if (atomic_inc_return(&local->agg_queue_stop[queue]) == 1) + if (atomic_inc_return(&sdata->local->agg_queue_stop[queue]) == 1) ieee80211_stop_queue_by_reason( - &local->hw, queue, + &sdata->local->hw, queue, IEEE80211_QUEUE_STOP_REASON_AGGREGATION); __acquire(agg_queue); } static void __releases(agg_queue) -ieee80211_wake_queue_agg(struct ieee80211_local *local, int tid) +ieee80211_wake_queue_agg(struct ieee80211_sub_if_data *sdata, int tid) { - int queue = ieee80211_ac_from_tid(tid); + int queue = sdata->vif.hw_queue[ieee80211_ac_from_tid(tid)]; - if (atomic_dec_return(&local->agg_queue_stop[queue]) == 0) + if (atomic_dec_return(&sdata->local->agg_queue_stop[queue]) == 0) ieee80211_wake_queue_by_reason( - &local->hw, queue, + &sdata->local->hw, queue, IEEE80211_QUEUE_STOP_REASON_AGGREGATION); __release(agg_queue); } @@ -314,13 +314,14 @@ ieee80211_wake_queue_agg(struct ieee80211_local *local, int tid) * requires a call to ieee80211_agg_splice_finish later */ static void __acquires(agg_queue) -ieee80211_agg_splice_packets(struct ieee80211_local *local, +ieee80211_agg_splice_packets(struct ieee80211_sub_if_data *sdata, struct tid_ampdu_tx *tid_tx, u16 tid) { - int queue = ieee80211_ac_from_tid(tid); + struct ieee80211_local *local = sdata->local; + int queue = sdata->vif.hw_queue[ieee80211_ac_from_tid(tid)]; unsigned long flags; - ieee80211_stop_queue_agg(local, tid); + ieee80211_stop_queue_agg(sdata, tid); if (WARN(!tid_tx, "TID %d gone but expected when splicing aggregates" " from the pending queue\n", tid)) @@ -336,9 +337,9 @@ ieee80211_agg_splice_packets(struct ieee80211_local *local, } static void __releases(agg_queue) -ieee80211_agg_splice_finish(struct ieee80211_local *local, u16 tid) +ieee80211_agg_splice_finish(struct ieee80211_sub_if_data *sdata, u16 tid) { - ieee80211_wake_queue_agg(local, tid); + ieee80211_wake_queue_agg(sdata, tid); } void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid) @@ -376,9 +377,9 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid) " tid %d\n", tid); #endif spin_lock_bh(&sta->lock); - ieee80211_agg_splice_packets(local, tid_tx, tid); + ieee80211_agg_splice_packets(sdata, tid_tx, tid); ieee80211_assign_tid_tx(sta, tid, NULL); - ieee80211_agg_splice_finish(local, tid); + ieee80211_agg_splice_finish(sdata, tid); spin_unlock_bh(&sta->lock); kfree_rcu(tid_tx, rcu_head); @@ -598,14 +599,14 @@ static void ieee80211_agg_tx_operational(struct ieee80211_local *local, */ spin_lock_bh(&sta->lock); - ieee80211_agg_splice_packets(local, tid_tx, tid); + ieee80211_agg_splice_packets(sta->sdata, tid_tx, tid); /* * Now mark as operational. This will be visible * in the TX path, and lets it go lock-free in * the common case. */ set_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state); - ieee80211_agg_splice_finish(local, tid); + ieee80211_agg_splice_finish(sta->sdata, tid); spin_unlock_bh(&sta->lock); } @@ -790,12 +791,12 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid) * more. */ - ieee80211_agg_splice_packets(local, tid_tx, tid); + ieee80211_agg_splice_packets(sta->sdata, tid_tx, tid); /* future packets must not find the tid_tx struct any more */ ieee80211_assign_tid_tx(sta, tid, NULL); - ieee80211_agg_splice_finish(local, tid); + ieee80211_agg_splice_finish(sta->sdata, tid); kfree_rcu(tid_tx, rcu_head); diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 667d93943399..d6163b98f7b7 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -2106,6 +2106,10 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct net_device *dev, IEEE80211_SKB_CB(skb)->flags = flags; + if (flags & IEEE80211_TX_CTL_TX_OFFCHAN) + IEEE80211_SKB_CB(skb)->hw_queue = + local->hw.offchannel_tx_hw_queue; + skb->dev = sdata->dev; *cookie = (unsigned long) skb; @@ -2147,6 +2151,8 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct net_device *dev, /* modify cookie to prevent API mismatches */ *cookie ^= 2; IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_TX_OFFCHAN; + IEEE80211_SKB_CB(skb)->hw_queue = + local->hw.offchannel_tx_hw_queue; local->hw_roc_skb = skb; local->hw_roc_skb_for_status = skb; mutex_unlock(&local->mtx); diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 8ed074f7e6cd..4be11ea3dfc4 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -1449,6 +1449,7 @@ void ieee80211_wake_queue_by_reason(struct ieee80211_hw *hw, int queue, enum queue_stop_reason reason); void ieee80211_stop_queue_by_reason(struct ieee80211_hw *hw, int queue, enum queue_stop_reason reason); +void ieee80211_propagate_queue_wake(struct ieee80211_local *local, int queue); void ieee80211_add_pending_skb(struct ieee80211_local *local, struct sk_buff *skb); void ieee80211_add_pending_skbs_fn(struct ieee80211_local *local, diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index 2b88cb278fc4..ed297649c577 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -149,6 +149,34 @@ static int ieee80211_check_concurrent_iface(struct ieee80211_sub_if_data *sdata, return 0; } +static int ieee80211_check_queues(struct ieee80211_sub_if_data *sdata) +{ + int n_queues = sdata->local->hw.queues; + int i; + + for (i = 0; i < IEEE80211_NUM_ACS; i++) { + if (WARN_ON_ONCE(sdata->vif.hw_queue[i] == + IEEE80211_INVAL_HW_QUEUE)) + return -EINVAL; + if (WARN_ON_ONCE(sdata->vif.hw_queue[i] >= + n_queues)) + return -EINVAL; + } + + if (sdata->vif.type != NL80211_IFTYPE_AP) { + sdata->vif.cab_queue = IEEE80211_INVAL_HW_QUEUE; + return 0; + } + + if (WARN_ON_ONCE(sdata->vif.cab_queue == IEEE80211_INVAL_HW_QUEUE)) + return -EINVAL; + + if (WARN_ON_ONCE(sdata->vif.cab_queue >= n_queues)) + return -EINVAL; + + return 0; +} + void ieee80211_adjust_monitor_flags(struct ieee80211_sub_if_data *sdata, const int offset) { @@ -169,6 +197,20 @@ void ieee80211_adjust_monitor_flags(struct ieee80211_sub_if_data *sdata, #undef ADJUST } +static void ieee80211_set_default_queues(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_local *local = sdata->local; + int i; + + for (i = 0; i < IEEE80211_NUM_ACS; i++) { + if (local->hw.flags & IEEE80211_HW_QUEUE_CONTROL) + sdata->vif.hw_queue[i] = IEEE80211_INVAL_HW_QUEUE; + else + sdata->vif.hw_queue[i] = i; + } + sdata->vif.cab_queue = IEEE80211_INVAL_HW_QUEUE; +} + static int ieee80211_add_virtual_monitor(struct ieee80211_local *local) { struct ieee80211_sub_if_data *sdata; @@ -190,6 +232,8 @@ static int ieee80211_add_virtual_monitor(struct ieee80211_local *local) snprintf(sdata->name, IFNAMSIZ, "%s-monitor", wiphy_name(local->hw.wiphy)); + ieee80211_set_default_queues(sdata); + ret = drv_add_interface(local, sdata); if (WARN_ON(ret)) { /* ok .. stupid driver, it asked for this! */ @@ -197,6 +241,12 @@ static int ieee80211_add_virtual_monitor(struct ieee80211_local *local) return ret; } + ret = ieee80211_check_queues(sdata); + if (ret) { + kfree(sdata); + return ret; + } + rcu_assign_pointer(local->monitor_sdata, sdata); return 0; @@ -344,6 +394,9 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up) res = drv_add_interface(local, sdata); if (res) goto err_stop; + res = ieee80211_check_queues(sdata); + if (res) + goto err_del_interface; } if (sdata->vif.type == NL80211_IFTYPE_AP) { @@ -1040,6 +1093,13 @@ static int ieee80211_runtime_change_iftype(struct ieee80211_sub_if_data *sdata, if (ret) type = sdata->vif.type; + /* + * Ignore return value here, there's not much we can do since + * the driver changed the interface type internally already. + * The warnings will hopefully make driver authors fix it :-) + */ + ieee80211_check_queues(sdata); + ieee80211_setup_sdata(sdata, type); err = ieee80211_do_open(sdata->dev, false); @@ -1266,6 +1326,8 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name, sizeof(sdata->rc_rateidx_mcs_mask[i])); } + ieee80211_set_default_queues(sdata); + /* setup type-dependent data */ ieee80211_setup_sdata(sdata, type); diff --git a/net/mac80211/main.c b/net/mac80211/main.c index d019f0d3a0fe..ac79d5e8e0d0 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c @@ -591,6 +591,7 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len, local->hw.max_report_rates = 0; local->hw.max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF; local->hw.max_tx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF; + local->hw.offchannel_tx_hw_queue = IEEE80211_INVAL_HW_QUEUE; local->hw.conf.long_frame_max_tx_count = wiphy->retry_long; local->hw.conf.short_frame_max_tx_count = wiphy->retry_short; local->user_power_level = -1; @@ -687,6 +688,11 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) WLAN_CIPHER_SUITE_AES_CMAC }; + if (hw->flags & IEEE80211_HW_QUEUE_CONTROL && + (local->hw.offchannel_tx_hw_queue == IEEE80211_INVAL_HW_QUEUE || + local->hw.offchannel_tx_hw_queue >= local->hw.queues)) + return -EINVAL; + if ((hw->wiphy->wowlan.flags || hw->wiphy->wowlan.n_patterns) #ifdef CONFIG_PM && (!local->ops->suspend || !local->ops->resume) diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index a8d0188ab408..4f6aac16ac3a 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -400,6 +400,8 @@ ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx) return TX_CONTINUE; info->flags |= IEEE80211_TX_CTL_SEND_AFTER_DTIM; + if (tx->local->hw.flags & IEEE80211_HW_QUEUE_CONTROL) + info->hw_queue = tx->sdata->vif.cab_queue; /* device releases frame after DTIM beacon */ if (!(tx->local->hw.flags & IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING)) @@ -1214,11 +1216,19 @@ static bool ieee80211_tx_frags(struct ieee80211_local *local, bool txpending) { struct sk_buff *skb, *tmp; - struct ieee80211_tx_info *info; unsigned long flags; skb_queue_walk_safe(skbs, skb, tmp) { - int q = skb_get_queue_mapping(skb); + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + int q = info->hw_queue; + +#ifdef CONFIG_MAC80211_VERBOSE_DEBUG + if (WARN_ON_ONCE(q >= local->hw.queues)) { + __skb_unlink(skb, skbs); + dev_kfree_skb(skb); + continue; + } +#endif spin_lock_irqsave(&local->queue_stop_reason_lock, flags); if (local->queue_stop_reasons[q] || @@ -1240,7 +1250,6 @@ static bool ieee80211_tx_frags(struct ieee80211_local *local, } spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); - info = IEEE80211_SKB_CB(skb); info->control.vif = vif; info->control.sta = sta; @@ -1284,9 +1293,14 @@ static bool __ieee80211_tx(struct ieee80211_local *local, switch (sdata->vif.type) { case NL80211_IFTYPE_MONITOR: sdata = rcu_dereference(local->monitor_sdata); - if (sdata) + if (sdata) { vif = &sdata->vif; - else + info->hw_queue = + vif->hw_queue[skb_get_queue_mapping(skb)]; + } else if (local->hw.flags & IEEE80211_HW_QUEUE_CONTROL) { + dev_kfree_skb(skb); + return true; + } else vif = NULL; break; case NL80211_IFTYPE_AP_VLAN: @@ -1402,6 +1416,12 @@ static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata, tx.channel = local->hw.conf.channel; info->band = tx.channel->band; + /* set up hw_queue value early */ + if (!(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) || + !(local->hw.flags & IEEE80211_HW_QUEUE_CONTROL)) + info->hw_queue = + sdata->vif.hw_queue[skb_get_queue_mapping(skb)]; + if (!invoke_tx_handlers(&tx)) result = __ieee80211_tx(local, &tx.skbs, led_len, tx.sta, txpending); @@ -2172,7 +2192,6 @@ static bool ieee80211_tx_pending_skb(struct ieee80211_local *local, void ieee80211_tx_pending(unsigned long data) { struct ieee80211_local *local = (struct ieee80211_local *)data; - struct ieee80211_sub_if_data *sdata; unsigned long flags; int i; bool txok; @@ -2209,8 +2228,7 @@ void ieee80211_tx_pending(unsigned long data) } if (skb_queue_empty(&local->pending[i])) - list_for_each_entry_rcu(sdata, &local->interfaces, list) - netif_wake_subqueue(sdata->dev, i); + ieee80211_propagate_queue_wake(local, i); } spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); @@ -2717,11 +2735,13 @@ EXPORT_SYMBOL(ieee80211_get_buffered_bc); void ieee80211_tx_skb_tid(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, int tid) { + int ac = ieee802_1d_to_ac[tid]; + skb_set_mac_header(skb, 0); skb_set_network_header(skb, 0); skb_set_transport_header(skb, 0); - skb_set_queue_mapping(skb, ieee802_1d_to_ac[tid]); + skb_set_queue_mapping(skb, ac); skb->priority = tid; /* diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 9e8f4b892555..e67fe5c1def9 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -265,11 +265,36 @@ __le16 ieee80211_ctstoself_duration(struct ieee80211_hw *hw, } EXPORT_SYMBOL(ieee80211_ctstoself_duration); +void ieee80211_propagate_queue_wake(struct ieee80211_local *local, int queue) +{ + struct ieee80211_sub_if_data *sdata; + + list_for_each_entry_rcu(sdata, &local->interfaces, list) { + int ac; + + if (test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state)) + continue; + + if (sdata->vif.cab_queue != IEEE80211_INVAL_HW_QUEUE && + local->queue_stop_reasons[sdata->vif.cab_queue] != 0) + continue; + + for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { + int ac_queue = sdata->vif.hw_queue[ac]; + + if (ac_queue == queue || + (sdata->vif.cab_queue == queue && + local->queue_stop_reasons[ac_queue] == 0 && + skb_queue_empty(&local->pending[ac_queue]))) + netif_wake_subqueue(sdata->dev, ac); + } + } +} + static void __ieee80211_wake_queue(struct ieee80211_hw *hw, int queue, enum queue_stop_reason reason) { struct ieee80211_local *local = hw_to_local(hw); - struct ieee80211_sub_if_data *sdata; trace_wake_queue(local, queue, reason); @@ -287,11 +312,7 @@ static void __ieee80211_wake_queue(struct ieee80211_hw *hw, int queue, if (skb_queue_empty(&local->pending[queue])) { rcu_read_lock(); - list_for_each_entry_rcu(sdata, &local->interfaces, list) { - if (test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state)) - continue; - netif_wake_subqueue(sdata->dev, queue); - } + ieee80211_propagate_queue_wake(local, queue); rcu_read_unlock(); } else tasklet_schedule(&local->tx_pending_tasklet); @@ -332,8 +353,15 @@ static void __ieee80211_stop_queue(struct ieee80211_hw *hw, int queue, __set_bit(reason, &local->queue_stop_reasons[queue]); rcu_read_lock(); - list_for_each_entry_rcu(sdata, &local->interfaces, list) - netif_stop_subqueue(sdata->dev, queue); + list_for_each_entry_rcu(sdata, &local->interfaces, list) { + int ac; + + for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { + if (sdata->vif.hw_queue[ac] == queue || + sdata->vif.cab_queue == queue) + netif_stop_subqueue(sdata->dev, ac); + } + } rcu_read_unlock(); } @@ -360,8 +388,8 @@ void ieee80211_add_pending_skb(struct ieee80211_local *local, { struct ieee80211_hw *hw = &local->hw; unsigned long flags; - int queue = skb_get_queue_mapping(skb); struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + int queue = info->hw_queue; if (WARN_ON(!info->control.vif)) { kfree_skb(skb); @@ -393,7 +421,7 @@ void ieee80211_add_pending_skbs_fn(struct ieee80211_local *local, continue; } - queue = skb_get_queue_mapping(skb); + queue = info->hw_queue; __ieee80211_stop_queue(hw, queue, IEEE80211_QUEUE_STOP_REASON_SKB_ADD); -- cgit v1.2.2 From 4d6c36fa227afc7b76b85ee48e3ef3972fe0ca23 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Tue, 3 Apr 2012 14:45:54 +0200 Subject: mac80211: clean up an ieee80211_do_open error path Eliad's comment prompted me to look closer at the error paths in ieee80211_do_open() and I found one that should use the error labels. Also add a comment about the clear_bit since in many error cases the bit hasn't been set. Cc: Eliad Peller Signed-off-by: Johannes Berg Signed-off-by: John W. Linville --- net/mac80211/iface.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'net') diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index ed297649c577..6e85faed053d 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -349,9 +349,8 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up) memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN); if (!is_valid_ether_addr(dev->dev_addr)) { - if (!local->open_count) - drv_stop(local); - return -EADDRNOTAVAIL; + res = -EADDRNOTAVAIL; + goto err_stop; } } @@ -485,6 +484,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up) sdata->bss = NULL; if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) list_del(&sdata->u.vlan.list); + /* might already be clear but that doesn't matter */ clear_bit(SDATA_STATE_RUNNING, &sdata->state); return res; } -- cgit v1.2.2 From 6d52563f2bc217cbdccb97068f5b6176352f01f2 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Wed, 4 Apr 2012 15:05:25 +0200 Subject: cfg80211/mac80211: enable proper device_set_wakeup_enable handling In WoWLAN, we only get the triggers when we actually get to suspend. As a consequence, drivers currently don't know that the device should enable wakeup. However, the device_set_wakeup_enable() API is intended to be called when the wakeup is enabled, not later when needed. Add a new set_wakeup() call to cfg80211 and mac80211 to allow drivers to properly call device_set_wakeup_enable. Signed-off-by: Johannes Berg Signed-off-by: John W. Linville --- net/mac80211/cfg.c | 10 ++++++++++ net/mac80211/driver-ops.h | 13 +++++++++++++ net/mac80211/driver-trace.h | 14 ++++++++++++++ net/wireless/core.c | 5 ++++- net/wireless/nl80211.c | 4 ++++ 5 files changed, 45 insertions(+), 1 deletion(-) (limited to 'net') diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index d6163b98f7b7..355735491252 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -2695,6 +2695,13 @@ ieee80211_wiphy_get_channel(struct wiphy *wiphy) return local->oper_channel; } +#ifdef CONFIG_PM +static void ieee80211_set_wakeup(struct wiphy *wiphy, bool enabled) +{ + drv_set_wakeup(wiphy_priv(wiphy), enabled); +} +#endif + struct cfg80211_ops mac80211_config_ops = { .add_virtual_intf = ieee80211_add_iface, .del_virtual_intf = ieee80211_del_iface, @@ -2763,4 +2770,7 @@ struct cfg80211_ops mac80211_config_ops = { .probe_client = ieee80211_probe_client, .get_channel = ieee80211_wiphy_get_channel, .set_noack_map = ieee80211_set_noack_map, +#ifdef CONFIG_PM + .set_wakeup = ieee80211_set_wakeup, +#endif }; diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h index 492c08c27c5f..4a0e559cb26b 100644 --- a/net/mac80211/driver-ops.h +++ b/net/mac80211/driver-ops.h @@ -91,6 +91,19 @@ static inline int drv_resume(struct ieee80211_local *local) trace_drv_return_int(local, ret); return ret; } + +static inline void drv_set_wakeup(struct ieee80211_local *local, + bool enabled) +{ + might_sleep(); + + if (!local->ops->set_wakeup) + return; + + trace_drv_set_wakeup(local, enabled); + local->ops->set_wakeup(&local->hw, enabled); + trace_drv_return_void(local); +} #endif static inline int drv_add_interface(struct ieee80211_local *local, diff --git a/net/mac80211/driver-trace.h b/net/mac80211/driver-trace.h index d1f017a11988..7c0754bed61b 100644 --- a/net/mac80211/driver-trace.h +++ b/net/mac80211/driver-trace.h @@ -171,6 +171,20 @@ DEFINE_EVENT(local_only_evt, drv_resume, TP_ARGS(local) ); +TRACE_EVENT(drv_set_wakeup, + TP_PROTO(struct ieee80211_local *local, bool enabled), + TP_ARGS(local, enabled), + TP_STRUCT__entry( + LOCAL_ENTRY + __field(bool, enabled) + ), + TP_fast_assign( + LOCAL_ASSIGN; + __entry->enabled = enabled; + ), + TP_printk(LOCAL_PR_FMT " enabled:%d", LOCAL_PR_ARG, __entry->enabled) +); + DEFINE_EVENT(local_only_evt, drv_stop, TP_PROTO(struct ieee80211_local *local), TP_ARGS(local) diff --git a/net/wireless/core.c b/net/wireless/core.c index ccdfed897651..59f4a7e7c092 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c @@ -708,6 +708,10 @@ void wiphy_unregister(struct wiphy *wiphy) flush_work(&rdev->scan_done_wk); cancel_work_sync(&rdev->conn_work); flush_work(&rdev->event_work); + + if (rdev->wowlan && rdev->ops->set_wakeup) + rdev->ops->set_wakeup(&rdev->wiphy, false); + cfg80211_rdev_free_wowlan(rdev); } EXPORT_SYMBOL(wiphy_unregister); @@ -720,7 +724,6 @@ void cfg80211_dev_free(struct cfg80211_registered_device *rdev) mutex_destroy(&rdev->sched_scan_mtx); list_for_each_entry_safe(scan, tmp, &rdev->bss_list, list) cfg80211_put_bss(&scan->pub); - cfg80211_rdev_free_wowlan(rdev); kfree(rdev); } diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index b12a05243d71..40e5620e5fde 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -6014,6 +6014,7 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info) struct cfg80211_wowlan new_triggers = {}; struct wiphy_wowlan_support *wowlan = &rdev->wiphy.wowlan; int err, i; + bool prev_enabled = rdev->wowlan; if (!rdev->wiphy.wowlan.flags && !rdev->wiphy.wowlan.n_patterns) return -EOPNOTSUPP; @@ -6146,6 +6147,9 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info) rdev->wowlan = NULL; } + if (rdev->ops->set_wakeup && prev_enabled != !!rdev->wowlan) + rdev->ops->set_wakeup(&rdev->wiphy, rdev->wowlan); + return 0; error: for (i = 0; i < new_triggers.n_patterns; i++) -- cgit v1.2.2 From 5314526b1743e8e8614293db7d86e480b4fe9824 Mon Sep 17 00:00:00 2001 From: Thomas Pedersen Date: Fri, 6 Apr 2012 13:35:47 -0700 Subject: cfg80211: add channel switch notify event The firmware may decide to switch channels while already beaconing, e.g. in response to a cfg80211 connect request on a different vif. Add this event to notify userspace when an AP or GO interface has successfully migrated to a new channel, so it can update its configuration accordingly. Signed-off-by: Thomas Pedersen Signed-off-by: John W. Linville --- net/wireless/mlme.c | 27 +++++++++++++++++++++++++++ net/wireless/nl80211.c | 32 ++++++++++++++++++++++++++++++++ net/wireless/nl80211.h | 4 ++++ 3 files changed, 63 insertions(+) (limited to 'net') diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c index e14fdcc1d7cd..6801d96bc224 100644 --- a/net/wireless/mlme.c +++ b/net/wireless/mlme.c @@ -930,6 +930,33 @@ void cfg80211_pmksa_candidate_notify(struct net_device *dev, int index, } EXPORT_SYMBOL(cfg80211_pmksa_candidate_notify); +void cfg80211_ch_switch_notify(struct net_device *dev, int freq, + enum nl80211_channel_type type) +{ + struct wireless_dev *wdev = dev->ieee80211_ptr; + struct wiphy *wiphy = wdev->wiphy; + struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); + struct ieee80211_channel *chan; + + wdev_lock(wdev); + + if (WARN_ON(wdev->iftype != NL80211_IFTYPE_AP && + wdev->iftype != NL80211_IFTYPE_P2P_GO)) + goto out; + + chan = rdev_freq_to_chan(rdev, freq, type); + if (WARN_ON(!chan)) + goto out; + + wdev->channel = chan; + + nl80211_ch_switch_notify(rdev, dev, freq, type, GFP_KERNEL); +out: + wdev_unlock(wdev); + return; +} +EXPORT_SYMBOL(cfg80211_ch_switch_notify); + bool cfg80211_rx_spurious_frame(struct net_device *dev, const u8 *addr, gfp_t gfp) { diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 40e5620e5fde..c3e82af72bf5 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -7922,6 +7922,38 @@ void nl80211_pmksa_candidate_notify(struct cfg80211_registered_device *rdev, nlmsg_free(msg); } +void nl80211_ch_switch_notify(struct cfg80211_registered_device *rdev, + struct net_device *netdev, int freq, + enum nl80211_channel_type type, gfp_t gfp) +{ + struct sk_buff *msg; + void *hdr; + + msg = nlmsg_new(NLMSG_GOODSIZE, gfp); + if (!msg) + return; + + hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_CH_SWITCH_NOTIFY); + if (!hdr) { + nlmsg_free(msg); + return; + } + + NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); + NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_FREQ, freq); + NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_CHANNEL_TYPE, type); + + genlmsg_end(msg, hdr); + + genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, + nl80211_mlme_mcgrp.id, gfp); + return; + + nla_put_failure: + genlmsg_cancel(msg, hdr); + nlmsg_free(msg); +} + void nl80211_send_cqm_pktloss_notify(struct cfg80211_registered_device *rdev, struct net_device *netdev, const u8 *peer, diff --git a/net/wireless/nl80211.h b/net/wireless/nl80211.h index 4ffe50df9f31..01a1122c3b33 100644 --- a/net/wireless/nl80211.h +++ b/net/wireless/nl80211.h @@ -118,6 +118,10 @@ void nl80211_pmksa_candidate_notify(struct cfg80211_registered_device *rdev, struct net_device *netdev, int index, const u8 *bssid, bool preauth, gfp_t gfp); +void nl80211_ch_switch_notify(struct cfg80211_registered_device *rdev, + struct net_device *dev, int freq, + enum nl80211_channel_type type, gfp_t gfp); + bool nl80211_unexpected_frame(struct net_device *dev, const u8 *addr, gfp_t gfp); bool nl80211_unexpected_4addr_frame(struct net_device *dev, -- cgit v1.2.2 From 35bcd591132c2d4d2a31843063c0f9e64e5be751 Mon Sep 17 00:00:00 2001 From: Chun-Yeow Yeoh Date: Tue, 10 Apr 2012 12:31:56 +0800 Subject: mac80211: fix the assignment of PREQ's MAC address for Proactive RANN Record the RANN sender's address only for RANNs that meet the acceptance criteria (per sections 13.10.12.4.2). Signed-off-by: Chun-Yeow Yeoh Reviewed-by: Javier Cardona Signed-off-by: John W. Linville --- net/mac80211/mesh_hwmp.c | 5 +++-- net/mac80211/mesh_pathtbl.c | 2 ++ 2 files changed, 5 insertions(+), 2 deletions(-) (limited to 'net') diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c index a80da3784a25..503016f58631 100644 --- a/net/mac80211/mesh_hwmp.c +++ b/net/mac80211/mesh_hwmp.c @@ -800,10 +800,11 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata, 0, sdata); mpath->sn = orig_sn; mpath->rann_metric = metric + metric_txsta; + /* Recording RANNs sender address to send individually + * addressed PREQs destined for root mesh STA */ + memcpy(mpath->rann_snd_addr, mgmt->sa, ETH_ALEN); } - /* Using individually addressed PREQ for root node */ - memcpy(mpath->rann_snd_addr, mgmt->sa, ETH_ALEN); mpath->is_root = true; if (root_is_gate) diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c index 49aaefd99635..baa6096c66b4 100644 --- a/net/mac80211/mesh_pathtbl.c +++ b/net/mac80211/mesh_pathtbl.c @@ -538,6 +538,8 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata) read_lock_bh(&pathtbl_resize_lock); memcpy(new_mpath->dst, dst, ETH_ALEN); + memset(new_mpath->rann_snd_addr, 0xff, ETH_ALEN); + new_mpath->is_root = false; new_mpath->sdata = sdata; new_mpath->flags = 0; skb_queue_head_init(&new_mpath->frame_queue); -- cgit v1.2.2 From 8112a5c91d781a22d2b631f3295386b0b70de7c8 Mon Sep 17 00:00:00 2001 From: Samuel Ortiz Date: Tue, 10 Apr 2012 19:43:04 +0200 Subject: NFC: Add a target lost netlink event Some chips are capable of detecting when a tag is out of the field, so they could send a netlink event about it to userspace. Signed-off-by: Samuel Ortiz Signed-off-by: John W. Linville --- net/nfc/netlink.c | 30 ++++++++++++++++++++++++++++++ net/nfc/nfc.h | 1 + 2 files changed, 31 insertions(+) (limited to 'net') diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c index 6404052d6c07..ebdb605f8dbd 100644 --- a/net/nfc/netlink.c +++ b/net/nfc/netlink.c @@ -183,6 +183,36 @@ free_msg: return -EMSGSIZE; } +int nfc_genl_target_lost(struct nfc_dev *dev, u32 target_idx) +{ + struct sk_buff *msg; + void *hdr; + + msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0, + NFC_EVENT_TARGET_LOST); + if (!hdr) + goto free_msg; + + NLA_PUT_STRING(msg, NFC_ATTR_DEVICE_NAME, nfc_device_name(dev)); + NLA_PUT_U32(msg, NFC_ATTR_TARGET_INDEX, target_idx); + + genlmsg_end(msg, hdr); + + genlmsg_multicast(msg, 0, nfc_genl_event_mcgrp.id, GFP_KERNEL); + + return 0; + +nla_put_failure: + genlmsg_cancel(msg, hdr); +free_msg: + nlmsg_free(msg); + return -EMSGSIZE; +} + int nfc_genl_device_added(struct nfc_dev *dev) { struct sk_buff *msg; diff --git a/net/nfc/nfc.h b/net/nfc/nfc.h index ec8794c1099c..2c868d2b3c57 100644 --- a/net/nfc/nfc.h +++ b/net/nfc/nfc.h @@ -119,6 +119,7 @@ void nfc_genl_data_init(struct nfc_genl_data *genl_data); void nfc_genl_data_exit(struct nfc_genl_data *genl_data); int nfc_genl_targets_found(struct nfc_dev *dev); +int nfc_genl_target_lost(struct nfc_dev *dev, u32 target_idx); int nfc_genl_device_added(struct nfc_dev *dev); int nfc_genl_device_removed(struct nfc_dev *dev); -- cgit v1.2.2 From e1da0efa2ee71df957b280bcfa41f82ce6986a1d Mon Sep 17 00:00:00 2001 From: Eric Lapuyade Date: Tue, 10 Apr 2012 19:43:05 +0200 Subject: NFC: Export target lost function NFC drivers will call this routine when they detect that a tag leaves the RF field. This will eventually lead to the corresponding netlink event to be sent. Signed-off-by: Eric Lapuyade Signed-off-by: Samuel Ortiz Signed-off-by: John W. Linville --- net/nfc/core.c | 39 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) (limited to 'net') diff --git a/net/nfc/core.c b/net/nfc/core.c index 295d129864d2..deb4721ce8a1 100644 --- a/net/nfc/core.c +++ b/net/nfc/core.c @@ -455,6 +455,45 @@ int nfc_targets_found(struct nfc_dev *dev, } EXPORT_SYMBOL(nfc_targets_found); +int nfc_target_lost(struct nfc_dev *dev, u32 target_idx) +{ + struct nfc_target *tg; + int i; + + pr_debug("dev_name %s n_target %d\n", dev_name(&dev->dev), target_idx); + + spin_lock_bh(&dev->targets_lock); + + for (i = 0; i < dev->n_targets; i++) { + tg = &dev->targets[i]; + if (tg->idx == target_idx) + break; + } + + if (i == dev->n_targets) { + spin_unlock_bh(&dev->targets_lock); + return -EINVAL; + } + + dev->targets_generation++; + dev->n_targets--; + + if (dev->n_targets) { + memcpy(&dev->targets[i], &dev->targets[i + 1], + (dev->n_targets - i) * sizeof(struct nfc_target)); + } else { + kfree(dev->targets); + dev->targets = NULL; + } + + spin_unlock_bh(&dev->targets_lock); + + nfc_genl_target_lost(dev, target_idx); + + return 0; +} +EXPORT_SYMBOL(nfc_target_lost); + static void nfc_release(struct device *d) { struct nfc_dev *dev = to_nfc_dev(d); -- cgit v1.2.2 From 8b8d2e08bf0d50193931afd27482a59376b66b2b Mon Sep 17 00:00:00 2001 From: Eric Lapuyade Date: Tue, 10 Apr 2012 19:43:06 +0200 Subject: NFC: HCI support This is an implementation of ETSI TS 102 622 specification. Many NFC chipsets use HCI as the host <-> target protocol on top of a serial link like i2c. Signed-off-by: Eric Lapuyade Signed-off-by: Samuel Ortiz Signed-off-by: John W. Linville --- net/nfc/Kconfig | 1 + net/nfc/Makefile | 1 + net/nfc/hci/Kconfig | 8 + net/nfc/hci/Makefile | 7 + net/nfc/hci/command.c | 354 +++++++++++++++++++++ net/nfc/hci/core.c | 830 ++++++++++++++++++++++++++++++++++++++++++++++++++ net/nfc/hci/hci.h | 139 +++++++++ net/nfc/hci/hcp.c | 156 ++++++++++ 8 files changed, 1496 insertions(+) create mode 100644 net/nfc/hci/Kconfig create mode 100644 net/nfc/hci/Makefile create mode 100644 net/nfc/hci/command.c create mode 100644 net/nfc/hci/core.c create mode 100644 net/nfc/hci/hci.h create mode 100644 net/nfc/hci/hcp.c (limited to 'net') diff --git a/net/nfc/Kconfig b/net/nfc/Kconfig index 44c865b86d6f..8d8d9bc4b6ff 100644 --- a/net/nfc/Kconfig +++ b/net/nfc/Kconfig @@ -14,6 +14,7 @@ menuconfig NFC be called nfc. source "net/nfc/nci/Kconfig" +source "net/nfc/hci/Kconfig" source "net/nfc/llcp/Kconfig" source "drivers/nfc/Kconfig" diff --git a/net/nfc/Makefile b/net/nfc/Makefile index 7b4a6dcfa566..d1a117c2c401 100644 --- a/net/nfc/Makefile +++ b/net/nfc/Makefile @@ -4,6 +4,7 @@ obj-$(CONFIG_NFC) += nfc.o obj-$(CONFIG_NFC_NCI) += nci/ +obj-$(CONFIG_NFC_HCI) += hci/ nfc-objs := core.o netlink.o af_nfc.o rawsock.o nfc-$(CONFIG_NFC_LLCP) += llcp/llcp.o llcp/commands.o llcp/sock.o diff --git a/net/nfc/hci/Kconfig b/net/nfc/hci/Kconfig new file mode 100644 index 000000000000..c32d2d4a9635 --- /dev/null +++ b/net/nfc/hci/Kconfig @@ -0,0 +1,8 @@ +config NFC_HCI + depends on NFC + tristate "NFC HCI implementation" + default n + help + Say Y here if you want to build support for a kernel NFC HCI + implementation. This is mostly needed for devices that only process + HCI frames, like for example the NXP pn544. diff --git a/net/nfc/hci/Makefile b/net/nfc/hci/Makefile new file mode 100644 index 000000000000..af17c7be051a --- /dev/null +++ b/net/nfc/hci/Makefile @@ -0,0 +1,7 @@ +# +# Makefile for the Linux NFC HCI layer. +# + +obj-$(CONFIG_NFC_HCI) += hci.o + +hci-y := core.o hcp.o command.o diff --git a/net/nfc/hci/command.c b/net/nfc/hci/command.c new file mode 100644 index 000000000000..8729abf5f18b --- /dev/null +++ b/net/nfc/hci/command.c @@ -0,0 +1,354 @@ +/* + * Copyright (C) 2012 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the + * Free Software Foundation, Inc., + * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#define pr_fmt(fmt) "hci: %s: " fmt, __func__ + +#include +#include +#include +#include + +#include + +#include "hci.h" + +static int nfc_hci_result_to_errno(u8 result) +{ + switch (result) { + case NFC_HCI_ANY_OK: + return 0; + case NFC_HCI_ANY_E_TIMEOUT: + return -ETIMEDOUT; + default: + return -1; + } +} + +static void nfc_hci_execute_cb(struct nfc_hci_dev *hdev, u8 result, + struct sk_buff *skb, void *cb_data) +{ + struct hcp_exec_waiter *hcp_ew = (struct hcp_exec_waiter *)cb_data; + + pr_debug("HCI Cmd completed with HCI result=%d\n", result); + + hcp_ew->exec_result = nfc_hci_result_to_errno(result); + if (hcp_ew->exec_result == 0) + hcp_ew->result_skb = skb; + else + kfree_skb(skb); + hcp_ew->exec_complete = true; + + wake_up(hcp_ew->wq); +} + +static int nfc_hci_execute_cmd(struct nfc_hci_dev *hdev, u8 pipe, u8 cmd, + const u8 *param, size_t param_len, + struct sk_buff **skb) +{ + DECLARE_WAIT_QUEUE_HEAD_ONSTACK(ew_wq); + struct hcp_exec_waiter hcp_ew; + hcp_ew.wq = &ew_wq; + hcp_ew.exec_complete = false; + hcp_ew.result_skb = NULL; + + pr_debug("through pipe=%d, cmd=%d, plen=%zd\n", pipe, cmd, param_len); + + /* TODO: Define hci cmd execution delay. Should it be the same + * for all commands? + */ + hcp_ew.exec_result = nfc_hci_hcp_message_tx(hdev, pipe, + NFC_HCI_HCP_COMMAND, cmd, + param, param_len, + nfc_hci_execute_cb, &hcp_ew, + 3000); + if (hcp_ew.exec_result < 0) + return hcp_ew.exec_result; + + wait_event(ew_wq, hcp_ew.exec_complete == true); + + if (hcp_ew.exec_result == 0) { + if (skb) + *skb = hcp_ew.result_skb; + else + kfree_skb(hcp_ew.result_skb); + } + + return hcp_ew.exec_result; +} + +int nfc_hci_send_event(struct nfc_hci_dev *hdev, u8 gate, u8 event, + const u8 *param, size_t param_len) +{ + u8 pipe; + + pr_debug("%d to gate %d\n", event, gate); + + pipe = hdev->gate2pipe[gate]; + if (pipe == NFC_HCI_INVALID_PIPE) + return -EADDRNOTAVAIL; + + return nfc_hci_hcp_message_tx(hdev, pipe, NFC_HCI_HCP_EVENT, event, + param, param_len, NULL, NULL, 0); +} +EXPORT_SYMBOL(nfc_hci_send_event); + +int nfc_hci_send_response(struct nfc_hci_dev *hdev, u8 gate, u8 response, + const u8 *param, size_t param_len) +{ + u8 pipe; + + pr_debug("\n"); + + pipe = hdev->gate2pipe[gate]; + if (pipe == NFC_HCI_INVALID_PIPE) + return -EADDRNOTAVAIL; + + return nfc_hci_hcp_message_tx(hdev, pipe, NFC_HCI_HCP_RESPONSE, + response, param, param_len, NULL, NULL, + 0); +} +EXPORT_SYMBOL(nfc_hci_send_response); + +/* + * Execute an hci command sent to gate. + * skb will contain response data if success. skb can be NULL if you are not + * interested by the response. + */ +int nfc_hci_send_cmd(struct nfc_hci_dev *hdev, u8 gate, u8 cmd, + const u8 *param, size_t param_len, struct sk_buff **skb) +{ + u8 pipe; + + pr_debug("\n"); + + pipe = hdev->gate2pipe[gate]; + if (pipe == NFC_HCI_INVALID_PIPE) + return -EADDRNOTAVAIL; + + return nfc_hci_execute_cmd(hdev, pipe, cmd, param, param_len, skb); +} +EXPORT_SYMBOL(nfc_hci_send_cmd); + +int nfc_hci_set_param(struct nfc_hci_dev *hdev, u8 gate, u8 idx, + const u8 *param, size_t param_len) +{ + int r; + u8 *tmp; + + /* TODO ELa: reg idx must be inserted before param, but we don't want + * to ask the caller to do it to keep a simpler API. + * For now, just create a new temporary param buffer. This is far from + * optimal though, and the plan is to modify APIs to pass idx down to + * nfc_hci_hcp_message_tx where the frame is actually built, thereby + * eliminating the need for the temp allocation-copy here. + */ + + pr_debug("idx=%d to gate %d\n", idx, gate); + + tmp = kmalloc(1 + param_len, GFP_KERNEL); + if (tmp == NULL) + return -ENOMEM; + + *tmp = idx; + memcpy(tmp + 1, param, param_len); + + r = nfc_hci_send_cmd(hdev, gate, NFC_HCI_ANY_SET_PARAMETER, + tmp, param_len + 1, NULL); + + kfree(tmp); + + return r; +} +EXPORT_SYMBOL(nfc_hci_set_param); + +int nfc_hci_get_param(struct nfc_hci_dev *hdev, u8 gate, u8 idx, + struct sk_buff **skb) +{ + pr_debug("gate=%d regidx=%d\n", gate, idx); + + return nfc_hci_send_cmd(hdev, gate, NFC_HCI_ANY_GET_PARAMETER, + &idx, 1, skb); +} +EXPORT_SYMBOL(nfc_hci_get_param); + +static int nfc_hci_open_pipe(struct nfc_hci_dev *hdev, u8 pipe) +{ + struct sk_buff *skb; + int r; + + pr_debug("pipe=%d\n", pipe); + + r = nfc_hci_execute_cmd(hdev, pipe, NFC_HCI_ANY_OPEN_PIPE, + NULL, 0, &skb); + if (r == 0) { + /* dest host other than host controller will send + * number of pipes already open on this gate before + * execution. The number can be found in skb->data[0] + */ + kfree_skb(skb); + } + + return r; +} + +static int nfc_hci_close_pipe(struct nfc_hci_dev *hdev, u8 pipe) +{ + pr_debug("\n"); + + return nfc_hci_execute_cmd(hdev, pipe, NFC_HCI_ANY_CLOSE_PIPE, + NULL, 0, NULL); +} + +static u8 nfc_hci_create_pipe(struct nfc_hci_dev *hdev, u8 dest_host, + u8 dest_gate, int *result) +{ + struct sk_buff *skb; + struct hci_create_pipe_params params; + struct hci_create_pipe_resp *resp; + u8 pipe; + + pr_debug("gate=%d\n", dest_gate); + + params.src_gate = NFC_HCI_ADMIN_GATE; + params.dest_host = dest_host; + params.dest_gate = dest_gate; + + *result = nfc_hci_execute_cmd(hdev, NFC_HCI_ADMIN_PIPE, + NFC_HCI_ADM_CREATE_PIPE, + (u8 *) ¶ms, sizeof(params), &skb); + if (*result == 0) { + resp = (struct hci_create_pipe_resp *)skb->data; + pipe = resp->pipe; + kfree_skb(skb); + + pr_debug("pipe created=%d\n", pipe); + + return pipe; + } else + return NFC_HCI_INVALID_PIPE; +} + +static int nfc_hci_delete_pipe(struct nfc_hci_dev *hdev, u8 pipe) +{ + pr_debug("\n"); + + return nfc_hci_execute_cmd(hdev, NFC_HCI_ADMIN_PIPE, + NFC_HCI_ADM_DELETE_PIPE, &pipe, 1, NULL); +} + +static int nfc_hci_clear_all_pipes(struct nfc_hci_dev *hdev) +{ + int r; + + u8 param[2]; + + /* TODO: Find out what the identity reference data is + * and fill param with it. HCI spec 6.1.3.5 */ + + pr_debug("\n"); + + r = nfc_hci_execute_cmd(hdev, NFC_HCI_ADMIN_PIPE, + NFC_HCI_ADM_CLEAR_ALL_PIPE, param, 2, NULL); + + return 0; +} + +int nfc_hci_disconnect_gate(struct nfc_hci_dev *hdev, u8 gate) +{ + int r; + u8 pipe = hdev->gate2pipe[gate]; + + pr_debug("\n"); + + if (pipe == NFC_HCI_INVALID_PIPE) + return -EADDRNOTAVAIL; + + r = nfc_hci_close_pipe(hdev, pipe); + if (r < 0) + return r; + + if (pipe != NFC_HCI_LINK_MGMT_PIPE && pipe != NFC_HCI_ADMIN_PIPE) { + r = nfc_hci_delete_pipe(hdev, pipe); + if (r < 0) + return r; + } + + hdev->gate2pipe[gate] = NFC_HCI_INVALID_PIPE; + + return 0; +} +EXPORT_SYMBOL(nfc_hci_disconnect_gate); + +int nfc_hci_disconnect_all_gates(struct nfc_hci_dev *hdev) +{ + int r; + + pr_debug("\n"); + + r = nfc_hci_clear_all_pipes(hdev); + if (r < 0) + return r; + + memset(hdev->gate2pipe, NFC_HCI_INVALID_PIPE, sizeof(hdev->gate2pipe)); + + return 0; +} +EXPORT_SYMBOL(nfc_hci_disconnect_all_gates); + +int nfc_hci_connect_gate(struct nfc_hci_dev *hdev, u8 dest_host, u8 dest_gate) +{ + u8 pipe = NFC_HCI_INVALID_PIPE; + bool pipe_created = false; + int r; + + pr_debug("\n"); + + if (hdev->gate2pipe[dest_gate] != NFC_HCI_INVALID_PIPE) + return -EADDRINUSE; + + switch (dest_gate) { + case NFC_HCI_LINK_MGMT_GATE: + pipe = NFC_HCI_LINK_MGMT_PIPE; + break; + case NFC_HCI_ADMIN_GATE: + pipe = NFC_HCI_ADMIN_PIPE; + break; + default: + pipe = nfc_hci_create_pipe(hdev, dest_host, dest_gate, &r); + if (pipe == NFC_HCI_INVALID_PIPE) + return r; + pipe_created = true; + break; + } + + r = nfc_hci_open_pipe(hdev, pipe); + if (r < 0) { + if (pipe_created) + if (nfc_hci_delete_pipe(hdev, pipe) < 0) { + /* TODO: Cannot clean by deleting pipe... + * -> inconsistent state */ + } + return r; + } + + hdev->gate2pipe[dest_gate] = pipe; + + return 0; +} +EXPORT_SYMBOL(nfc_hci_connect_gate); diff --git a/net/nfc/hci/core.c b/net/nfc/hci/core.c new file mode 100644 index 000000000000..86fd00d5a099 --- /dev/null +++ b/net/nfc/hci/core.c @@ -0,0 +1,830 @@ +/* + * Copyright (C) 2012 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the + * Free Software Foundation, Inc., + * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#define pr_fmt(fmt) "hci: %s: " fmt, __func__ + +#include +#include +#include +#include + +#include +#include + +#include "hci.h" + +/* Largest headroom needed for outgoing HCI commands */ +#define HCI_CMDS_HEADROOM 1 + +static void nfc_hci_msg_tx_work(struct work_struct *work) +{ + struct nfc_hci_dev *hdev = container_of(work, struct nfc_hci_dev, + msg_tx_work); + struct hci_msg *msg; + struct sk_buff *skb; + int r = 0; + + mutex_lock(&hdev->msg_tx_mutex); + + if (hdev->cmd_pending_msg) { + if (timer_pending(&hdev->cmd_timer) == 0) { + if (hdev->cmd_pending_msg->cb) + hdev->cmd_pending_msg->cb(hdev, + NFC_HCI_ANY_E_TIMEOUT, + NULL, + hdev-> + cmd_pending_msg-> + cb_context); + kfree(hdev->cmd_pending_msg); + hdev->cmd_pending_msg = NULL; + } else + goto exit; + } + +next_msg: + if (list_empty(&hdev->msg_tx_queue)) + goto exit; + + msg = list_first_entry(&hdev->msg_tx_queue, struct hci_msg, msg_l); + list_del(&msg->msg_l); + + pr_debug("msg_tx_queue has a cmd to send\n"); + while ((skb = skb_dequeue(&msg->msg_frags)) != NULL) { + r = hdev->ops->xmit(hdev, skb); + if (r < 0) { + kfree_skb(skb); + skb_queue_purge(&msg->msg_frags); + if (msg->cb) + msg->cb(hdev, NFC_HCI_ANY_E_NOK, NULL, + msg->cb_context); + kfree(msg); + break; + } + } + + if (r) + goto next_msg; + + if (msg->wait_response == false) { + kfree(msg); + goto next_msg; + } + + hdev->cmd_pending_msg = msg; + mod_timer(&hdev->cmd_timer, jiffies + + msecs_to_jiffies(hdev->cmd_pending_msg->completion_delay)); + +exit: + mutex_unlock(&hdev->msg_tx_mutex); +} + +static void nfc_hci_msg_rx_work(struct work_struct *work) +{ + struct nfc_hci_dev *hdev = container_of(work, struct nfc_hci_dev, + msg_rx_work); + struct sk_buff *skb; + struct hcp_message *message; + u8 pipe; + u8 type; + u8 instruction; + + while ((skb = skb_dequeue(&hdev->msg_rx_queue)) != NULL) { + pipe = skb->data[0]; + skb_pull(skb, NFC_HCI_HCP_PACKET_HEADER_LEN); + message = (struct hcp_message *)skb->data; + type = HCP_MSG_GET_TYPE(message->header); + instruction = HCP_MSG_GET_CMD(message->header); + skb_pull(skb, NFC_HCI_HCP_MESSAGE_HEADER_LEN); + + nfc_hci_hcp_message_rx(hdev, pipe, type, instruction, skb); + } +} + +void nfc_hci_resp_received(struct nfc_hci_dev *hdev, u8 result, + struct sk_buff *skb) +{ + mutex_lock(&hdev->msg_tx_mutex); + + if (hdev->cmd_pending_msg == NULL) { + kfree_skb(skb); + goto exit; + } + + del_timer_sync(&hdev->cmd_timer); + + if (hdev->cmd_pending_msg->cb) + hdev->cmd_pending_msg->cb(hdev, result, skb, + hdev->cmd_pending_msg->cb_context); + else + kfree_skb(skb); + + kfree(hdev->cmd_pending_msg); + hdev->cmd_pending_msg = NULL; + + queue_work(hdev->msg_tx_wq, &hdev->msg_tx_work); + +exit: + mutex_unlock(&hdev->msg_tx_mutex); +} + +void nfc_hci_cmd_received(struct nfc_hci_dev *hdev, u8 pipe, u8 cmd, + struct sk_buff *skb) +{ + kfree_skb(skb); +} + +static u32 nfc_hci_sak_to_protocol(u8 sak) +{ + switch (NFC_HCI_TYPE_A_SEL_PROT(sak)) { + case NFC_HCI_TYPE_A_SEL_PROT_MIFARE: + return NFC_PROTO_MIFARE_MASK; + case NFC_HCI_TYPE_A_SEL_PROT_ISO14443: + return NFC_PROTO_ISO14443_MASK; + case NFC_HCI_TYPE_A_SEL_PROT_DEP: + return NFC_PROTO_NFC_DEP_MASK; + case NFC_HCI_TYPE_A_SEL_PROT_ISO14443_DEP: + return NFC_PROTO_ISO14443_MASK | NFC_PROTO_NFC_DEP_MASK; + default: + return 0xffffffff; + } +} + +static int nfc_hci_target_discovered(struct nfc_hci_dev *hdev, u8 gate) +{ + struct nfc_target *targets; + struct sk_buff *atqa_skb = NULL; + struct sk_buff *sak_skb = NULL; + int r; + + pr_debug("from gate %d\n", gate); + + targets = kzalloc(sizeof(struct nfc_target), GFP_KERNEL); + if (targets == NULL) + return -ENOMEM; + + switch (gate) { + case NFC_HCI_RF_READER_A_GATE: + r = nfc_hci_get_param(hdev, NFC_HCI_RF_READER_A_GATE, + NFC_HCI_RF_READER_A_ATQA, &atqa_skb); + if (r < 0) + goto exit; + + r = nfc_hci_get_param(hdev, NFC_HCI_RF_READER_A_GATE, + NFC_HCI_RF_READER_A_SAK, &sak_skb); + if (r < 0) + goto exit; + + if (atqa_skb->len != 2 || sak_skb->len != 1) { + r = -EPROTO; + goto exit; + } + + targets->supported_protocols = + nfc_hci_sak_to_protocol(sak_skb->data[0]); + if (targets->supported_protocols == 0xffffffff) { + r = -EPROTO; + goto exit; + } + + targets->sens_res = be16_to_cpu(*(u16 *)atqa_skb->data); + targets->sel_res = sak_skb->data[0]; + + if (hdev->ops->complete_target_discovered) { + r = hdev->ops->complete_target_discovered(hdev, gate, + targets); + if (r < 0) + goto exit; + } + break; + case NFC_HCI_RF_READER_B_GATE: + targets->supported_protocols = NFC_PROTO_ISO14443_MASK; + break; + default: + if (hdev->ops->target_from_gate) + r = hdev->ops->target_from_gate(hdev, gate, targets); + else + r = -EPROTO; + if (r < 0) + goto exit; + + if (hdev->ops->complete_target_discovered) { + r = hdev->ops->complete_target_discovered(hdev, gate, + targets); + if (r < 0) + goto exit; + } + break; + } + + targets->hci_reader_gate = gate; + + r = nfc_targets_found(hdev->ndev, targets, 1); + if (r < 0) + goto exit; + + kfree(hdev->targets); + hdev->targets = targets; + targets = NULL; + hdev->target_count = 1; + +exit: + kfree(targets); + kfree_skb(atqa_skb); + kfree_skb(sak_skb); + + return r; +} + +void nfc_hci_event_received(struct nfc_hci_dev *hdev, u8 pipe, u8 event, + struct sk_buff *skb) +{ + int r = 0; + + switch (event) { + case NFC_HCI_EVT_TARGET_DISCOVERED: + if (hdev->poll_started == false) { + r = -EPROTO; + goto exit; + } + + if (skb->len < 1) { /* no status data? */ + r = -EPROTO; + goto exit; + } + + if (skb->data[0] == 3) { + /* TODO: Multiple targets in field, none activated + * poll is supposedly stopped, but there is no + * single target to activate, so nothing to report + * up. + * if we need to restart poll, we must save the + * protocols from the initial poll and reuse here. + */ + } + + if (skb->data[0] != 0) { + r = -EPROTO; + goto exit; + } + + r = nfc_hci_target_discovered(hdev, + nfc_hci_pipe2gate(hdev, pipe)); + break; + default: + /* TODO: Unknown events are hardware specific + * pass them to the driver (needs a new hci_ops) */ + break; + } + +exit: + kfree_skb(skb); + + if (r) { + /* TODO: There was an error dispatching the event, + * how to propagate up to nfc core? + */ + } +} + +static void nfc_hci_cmd_timeout(unsigned long data) +{ + struct nfc_hci_dev *hdev = (struct nfc_hci_dev *)data; + + queue_work(hdev->msg_tx_wq, &hdev->msg_tx_work); +} + +static int hci_dev_connect_gates(struct nfc_hci_dev *hdev, u8 gate_count, + u8 gates[]) +{ + int r; + u8 *p = gates; + while (gate_count--) { + r = nfc_hci_connect_gate(hdev, NFC_HCI_HOST_CONTROLLER_ID, *p); + if (r < 0) + return r; + p++; + } + + return 0; +} + +static int hci_dev_session_init(struct nfc_hci_dev *hdev) +{ + struct sk_buff *skb = NULL; + int r; + u8 hci_gates[] = { /* NFC_HCI_ADMIN_GATE MUST be first */ + NFC_HCI_ADMIN_GATE, NFC_HCI_LOOPBACK_GATE, + NFC_HCI_ID_MGMT_GATE, NFC_HCI_LINK_MGMT_GATE, + NFC_HCI_RF_READER_B_GATE, NFC_HCI_RF_READER_A_GATE + }; + + r = nfc_hci_connect_gate(hdev, NFC_HCI_HOST_CONTROLLER_ID, + NFC_HCI_ADMIN_GATE); + if (r < 0) + goto exit; + + r = nfc_hci_get_param(hdev, NFC_HCI_ADMIN_GATE, + NFC_HCI_ADMIN_SESSION_IDENTITY, &skb); + if (r < 0) + goto disconnect_all; + + if (skb->len && skb->len == strlen(hdev->init_data.session_id)) + if (memcmp(hdev->init_data.session_id, skb->data, + skb->len) == 0) { + /* TODO ELa: restore gate<->pipe table from + * some TBD location. + * note: it doesn't seem possible to get the chip + * currently open gate/pipe table. + * It is only possible to obtain the supported + * gate list. + */ + + /* goto exit + * For now, always do a full initialization */ + } + + r = nfc_hci_disconnect_all_gates(hdev); + if (r < 0) + goto exit; + + r = hci_dev_connect_gates(hdev, sizeof(hci_gates), hci_gates); + if (r < 0) + goto disconnect_all; + + r = hci_dev_connect_gates(hdev, hdev->init_data.gate_count, + hdev->init_data.gates); + if (r < 0) + goto disconnect_all; + + r = nfc_hci_set_param(hdev, NFC_HCI_ADMIN_GATE, + NFC_HCI_ADMIN_SESSION_IDENTITY, + hdev->init_data.session_id, + strlen(hdev->init_data.session_id)); + if (r == 0) + goto exit; + +disconnect_all: + nfc_hci_disconnect_all_gates(hdev); + +exit: + if (skb) + kfree_skb(skb); + + return r; +} + +static int hci_dev_version(struct nfc_hci_dev *hdev) +{ + int r; + struct sk_buff *skb; + + r = nfc_hci_get_param(hdev, NFC_HCI_ID_MGMT_GATE, + NFC_HCI_ID_MGMT_VERSION_SW, &skb); + if (r < 0) + return r; + + if (skb->len != 3) { + kfree_skb(skb); + return -EINVAL; + } + + hdev->sw_romlib = (skb->data[0] & 0xf0) >> 4; + hdev->sw_patch = skb->data[0] & 0x0f; + hdev->sw_flashlib_major = skb->data[1]; + hdev->sw_flashlib_minor = skb->data[2]; + + kfree_skb(skb); + + r = nfc_hci_get_param(hdev, NFC_HCI_ID_MGMT_GATE, + NFC_HCI_ID_MGMT_VERSION_HW, &skb); + if (r < 0) + return r; + + if (skb->len != 3) { + kfree_skb(skb); + return -EINVAL; + } + + hdev->hw_derivative = (skb->data[0] & 0xe0) >> 5; + hdev->hw_version = skb->data[0] & 0x1f; + hdev->hw_mpw = (skb->data[1] & 0xc0) >> 6; + hdev->hw_software = skb->data[1] & 0x3f; + hdev->hw_bsid = skb->data[2]; + + kfree_skb(skb); + + pr_info("SOFTWARE INFO:\n"); + pr_info("RomLib : %d\n", hdev->sw_romlib); + pr_info("Patch : %d\n", hdev->sw_patch); + pr_info("FlashLib Major : %d\n", hdev->sw_flashlib_major); + pr_info("FlashLib Minor : %d\n", hdev->sw_flashlib_minor); + pr_info("HARDWARE INFO:\n"); + pr_info("Derivative : %d\n", hdev->hw_derivative); + pr_info("HW Version : %d\n", hdev->hw_version); + pr_info("#MPW : %d\n", hdev->hw_mpw); + pr_info("Software : %d\n", hdev->hw_software); + pr_info("BSID Version : %d\n", hdev->hw_bsid); + + return 0; +} + +static int hci_dev_up(struct nfc_dev *nfc_dev) +{ + struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev); + int r = 0; + + if (hdev->ops->open) { + r = hdev->ops->open(hdev); + if (r < 0) + return r; + } + + r = hci_dev_session_init(hdev); + if (r < 0) + goto exit; + + r = nfc_hci_send_event(hdev, NFC_HCI_RF_READER_A_GATE, + NFC_HCI_EVT_END_OPERATION, NULL, 0); + if (r < 0) + goto exit; + + if (hdev->ops->hci_ready) { + r = hdev->ops->hci_ready(hdev); + if (r < 0) + goto exit; + } + + r = hci_dev_version(hdev); + if (r < 0) + goto exit; + +exit: + if (r < 0) + if (hdev->ops->close) + hdev->ops->close(hdev); + return r; +} + +static int hci_dev_down(struct nfc_dev *nfc_dev) +{ + struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev); + + if (hdev->ops->close) + hdev->ops->close(hdev); + + memset(hdev->gate2pipe, NFC_HCI_INVALID_PIPE, sizeof(hdev->gate2pipe)); + + return 0; +} + +static int hci_start_poll(struct nfc_dev *nfc_dev, u32 protocols) +{ + struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev); + int r; + + if (hdev->ops->start_poll) + r = hdev->ops->start_poll(hdev, protocols); + else + r = nfc_hci_send_event(hdev, NFC_HCI_RF_READER_A_GATE, + NFC_HCI_EVT_READER_REQUESTED, NULL, 0); + if (r == 0) + hdev->poll_started = true; + + return r; +} + +static void hci_stop_poll(struct nfc_dev *nfc_dev) +{ + struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev); + + if (hdev->poll_started) { + nfc_hci_send_event(hdev, NFC_HCI_RF_READER_A_GATE, + NFC_HCI_EVT_END_OPERATION, NULL, 0); + hdev->poll_started = false; + } +} + +static struct nfc_target *hci_find_target(struct nfc_hci_dev *hdev, + u32 target_idx) +{ + int i; + if (hdev->poll_started == false || hdev->targets == NULL) + return NULL; + + for (i = 0; i < hdev->target_count; i++) { + if (hdev->targets[i].idx == target_idx) + return &hdev->targets[i]; + } + + return NULL; +} + +static int hci_activate_target(struct nfc_dev *nfc_dev, u32 target_idx, + u32 protocol) +{ + struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev); + + if (hci_find_target(hdev, target_idx) == NULL) + return -ENOMEDIUM; + + return 0; +} + +static void hci_deactivate_target(struct nfc_dev *nfc_dev, u32 target_idx) +{ +} + +static int hci_data_exchange(struct nfc_dev *nfc_dev, u32 target_idx, + struct sk_buff *skb, data_exchange_cb_t cb, + void *cb_context) +{ + struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev); + int r; + struct nfc_target *target; + struct sk_buff *res_skb = NULL; + + pr_debug("target_idx=%d\n", target_idx); + + target = hci_find_target(hdev, target_idx); + if (target == NULL) + return -ENOMEDIUM; + + switch (target->hci_reader_gate) { + case NFC_HCI_RF_READER_A_GATE: + case NFC_HCI_RF_READER_B_GATE: + if (hdev->ops->data_exchange) { + r = hdev->ops->data_exchange(hdev, target, skb, + &res_skb); + if (r <= 0) /* handled */ + break; + } + + *skb_push(skb, 1) = 0; /* CTR, see spec:10.2.2.1 */ + r = nfc_hci_send_cmd(hdev, target->hci_reader_gate, + NFC_HCI_WR_XCHG_DATA, + skb->data, skb->len, &res_skb); + /* + * TODO: Check RF Error indicator to make sure data is valid. + * It seems that HCI cmd can complete without error, but data + * can be invalid if an RF error occured? Ignore for now. + */ + if (r == 0) + skb_trim(res_skb, res_skb->len - 1); /* RF Err ind */ + break; + default: + if (hdev->ops->data_exchange) { + r = hdev->ops->data_exchange(hdev, target, skb, + &res_skb); + if (r == 1) + r = -ENOTSUPP; + } + else + r = -ENOTSUPP; + } + + kfree_skb(skb); + + cb(cb_context, res_skb, r); + + return 0; +} + +struct nfc_ops hci_nfc_ops = { + .dev_up = hci_dev_up, + .dev_down = hci_dev_down, + .start_poll = hci_start_poll, + .stop_poll = hci_stop_poll, + .activate_target = hci_activate_target, + .deactivate_target = hci_deactivate_target, + .data_exchange = hci_data_exchange, +}; + +struct nfc_hci_dev *nfc_hci_allocate_device(struct nfc_hci_ops *ops, + struct nfc_hci_init_data *init_data, + u32 protocols, + int tx_headroom, + int tx_tailroom, + int max_link_payload) +{ + struct nfc_hci_dev *hdev; + + if (ops->xmit == NULL) + return NULL; + + if (protocols == 0) + return NULL; + + hdev = kzalloc(sizeof(struct nfc_hci_dev), GFP_KERNEL); + if (hdev == NULL) + return NULL; + + hdev->ndev = nfc_allocate_device(&hci_nfc_ops, protocols, + tx_headroom + HCI_CMDS_HEADROOM, + tx_tailroom); + if (!hdev->ndev) { + kfree(hdev); + return NULL; + } + + hdev->ops = ops; + hdev->max_data_link_payload = max_link_payload; + hdev->init_data = *init_data; + + nfc_set_drvdata(hdev->ndev, hdev); + + memset(hdev->gate2pipe, NFC_HCI_INVALID_PIPE, sizeof(hdev->gate2pipe)); + + return hdev; +} +EXPORT_SYMBOL(nfc_hci_allocate_device); + +void nfc_hci_free_device(struct nfc_hci_dev *hdev) +{ + nfc_free_device(hdev->ndev); + kfree(hdev); +} +EXPORT_SYMBOL(nfc_hci_free_device); + +int nfc_hci_register_device(struct nfc_hci_dev *hdev) +{ + struct device *dev = &hdev->ndev->dev; + const char *devname = dev_name(dev); + char name[32]; + int r = 0; + + mutex_init(&hdev->msg_tx_mutex); + + INIT_LIST_HEAD(&hdev->msg_tx_queue); + + INIT_WORK(&hdev->msg_tx_work, nfc_hci_msg_tx_work); + snprintf(name, sizeof(name), "%s_hci_msg_tx_wq", devname); + hdev->msg_tx_wq = alloc_workqueue(name, WQ_NON_REENTRANT | WQ_UNBOUND | + WQ_MEM_RECLAIM, 1); + if (hdev->msg_tx_wq == NULL) { + r = -ENOMEM; + goto exit; + } + + init_timer(&hdev->cmd_timer); + hdev->cmd_timer.data = (unsigned long)hdev; + hdev->cmd_timer.function = nfc_hci_cmd_timeout; + + skb_queue_head_init(&hdev->rx_hcp_frags); + + INIT_WORK(&hdev->msg_rx_work, nfc_hci_msg_rx_work); + snprintf(name, sizeof(name), "%s_hci_msg_rx_wq", devname); + hdev->msg_rx_wq = alloc_workqueue(name, WQ_NON_REENTRANT | WQ_UNBOUND | + WQ_MEM_RECLAIM, 1); + if (hdev->msg_rx_wq == NULL) { + r = -ENOMEM; + goto exit; + } + + skb_queue_head_init(&hdev->msg_rx_queue); + + r = nfc_register_device(hdev->ndev); + +exit: + if (r < 0) { + if (hdev->msg_tx_wq) + destroy_workqueue(hdev->msg_tx_wq); + if (hdev->msg_rx_wq) + destroy_workqueue(hdev->msg_rx_wq); + } + + return r; +} +EXPORT_SYMBOL(nfc_hci_register_device); + +void nfc_hci_unregister_device(struct nfc_hci_dev *hdev) +{ + struct hci_msg *msg; + + skb_queue_purge(&hdev->rx_hcp_frags); + skb_queue_purge(&hdev->msg_rx_queue); + + while ((msg = list_first_entry(&hdev->msg_tx_queue, struct hci_msg, + msg_l)) != NULL) { + list_del(&msg->msg_l); + skb_queue_purge(&msg->msg_frags); + kfree(msg); + } + + del_timer_sync(&hdev->cmd_timer); + + nfc_unregister_device(hdev->ndev); + + destroy_workqueue(hdev->msg_tx_wq); + + destroy_workqueue(hdev->msg_rx_wq); +} +EXPORT_SYMBOL(nfc_hci_unregister_device); + +void nfc_hci_set_clientdata(struct nfc_hci_dev *hdev, void *clientdata) +{ + hdev->clientdata = clientdata; +} +EXPORT_SYMBOL(nfc_hci_set_clientdata); + +void *nfc_hci_get_clientdata(struct nfc_hci_dev *hdev) +{ + return hdev->clientdata; +} +EXPORT_SYMBOL(nfc_hci_get_clientdata); + +void nfc_hci_recv_frame(struct nfc_hci_dev *hdev, struct sk_buff *skb) +{ + struct hcp_packet *packet; + u8 type; + u8 instruction; + struct sk_buff *hcp_skb; + u8 pipe; + struct sk_buff *frag_skb; + int msg_len; + + if (skb == NULL) { + /* TODO ELa: lower layer had permanent failure, need to + * propagate that up + */ + + skb_queue_purge(&hdev->rx_hcp_frags); + + return; + } + + packet = (struct hcp_packet *)skb->data; + if ((packet->header & ~NFC_HCI_FRAGMENT) == 0) { + skb_queue_tail(&hdev->rx_hcp_frags, skb); + return; + } + + /* it's the last fragment. Does it need re-aggregation? */ + if (skb_queue_len(&hdev->rx_hcp_frags)) { + pipe = packet->header & NFC_HCI_FRAGMENT; + skb_queue_tail(&hdev->rx_hcp_frags, skb); + + msg_len = 0; + skb_queue_walk(&hdev->rx_hcp_frags, frag_skb) { + msg_len += (frag_skb->len - + NFC_HCI_HCP_PACKET_HEADER_LEN); + } + + hcp_skb = nfc_alloc_recv_skb(NFC_HCI_HCP_PACKET_HEADER_LEN + + msg_len, GFP_KERNEL); + if (hcp_skb == NULL) { + /* TODO ELa: cannot deliver HCP message. How to + * propagate error up? + */ + } + + *skb_put(hcp_skb, NFC_HCI_HCP_PACKET_HEADER_LEN) = pipe; + + skb_queue_walk(&hdev->rx_hcp_frags, frag_skb) { + msg_len = frag_skb->len - NFC_HCI_HCP_PACKET_HEADER_LEN; + memcpy(skb_put(hcp_skb, msg_len), + frag_skb->data + NFC_HCI_HCP_PACKET_HEADER_LEN, + msg_len); + } + + skb_queue_purge(&hdev->rx_hcp_frags); + } else { + packet->header &= NFC_HCI_FRAGMENT; + hcp_skb = skb; + } + + /* if this is a response, dispatch immediately to + * unblock waiting cmd context. Otherwise, enqueue to dispatch + * in separate context where handler can also execute command. + */ + packet = (struct hcp_packet *)hcp_skb->data; + type = HCP_MSG_GET_TYPE(packet->message.header); + if (type == NFC_HCI_HCP_RESPONSE) { + pipe = packet->header; + instruction = HCP_MSG_GET_CMD(packet->message.header); + skb_pull(hcp_skb, NFC_HCI_HCP_PACKET_HEADER_LEN + + NFC_HCI_HCP_MESSAGE_HEADER_LEN); + nfc_hci_hcp_message_rx(hdev, pipe, type, instruction, hcp_skb); + } else { + skb_queue_tail(&hdev->msg_rx_queue, hcp_skb); + queue_work(hdev->msg_rx_wq, &hdev->msg_rx_work); + } +} +EXPORT_SYMBOL(nfc_hci_recv_frame); + +MODULE_LICENSE("GPL"); diff --git a/net/nfc/hci/hci.h b/net/nfc/hci/hci.h new file mode 100644 index 000000000000..45f2fe4fd486 --- /dev/null +++ b/net/nfc/hci/hci.h @@ -0,0 +1,139 @@ +/* + * Copyright (C) 2012 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the + * Free Software Foundation, Inc., + * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#ifndef __LOCAL_HCI_H +#define __LOCAL_HCI_H + +struct gate_pipe_map { + u8 gate; + u8 pipe; +}; + +struct hcp_message { + u8 header; /* type -cmd,evt,rsp- + instruction */ + u8 data[]; +} __packed; + +struct hcp_packet { + u8 header; /* cbit+pipe */ + struct hcp_message message; +} __packed; + +/* + * HCI command execution completion callback. + * result will be one of the HCI response codes. + * skb contains the response data and must be disposed. + */ +typedef void (*hci_cmd_cb_t) (struct nfc_hci_dev *hdev, u8 result, + struct sk_buff *skb, void *cb_data); + +struct hcp_exec_waiter { + wait_queue_head_t *wq; + bool exec_complete; + int exec_result; + struct sk_buff *result_skb; +}; + +struct hci_msg { + struct list_head msg_l; + struct sk_buff_head msg_frags; + bool wait_response; + hci_cmd_cb_t cb; + void *cb_context; + unsigned long completion_delay; +}; + +struct hci_create_pipe_params { + u8 src_gate; + u8 dest_host; + u8 dest_gate; +} __packed; + +struct hci_create_pipe_resp { + u8 src_host; + u8 src_gate; + u8 dest_host; + u8 dest_gate; + u8 pipe; +} __packed; + +#define NFC_HCI_FRAGMENT 0x7f + +#define HCP_HEADER(type, instr) ((((type) & 0x03) << 6) | ((instr) & 0x3f)) +#define HCP_MSG_GET_TYPE(header) ((header & 0xc0) >> 6) +#define HCP_MSG_GET_CMD(header) (header & 0x3f) + +int nfc_hci_hcp_message_tx(struct nfc_hci_dev *hdev, u8 pipe, + u8 type, u8 instruction, + const u8 *payload, size_t payload_len, + hci_cmd_cb_t cb, void *cb_data, + unsigned long completion_delay); + +u8 nfc_hci_pipe2gate(struct nfc_hci_dev *hdev, u8 pipe); + +void nfc_hci_hcp_message_rx(struct nfc_hci_dev *hdev, u8 pipe, u8 type, + u8 instruction, struct sk_buff *skb); + +/* HCP headers */ +#define NFC_HCI_HCP_PACKET_HEADER_LEN 1 +#define NFC_HCI_HCP_MESSAGE_HEADER_LEN 1 +#define NFC_HCI_HCP_HEADER_LEN 2 + +/* HCP types */ +#define NFC_HCI_HCP_COMMAND 0x00 +#define NFC_HCI_HCP_EVENT 0x01 +#define NFC_HCI_HCP_RESPONSE 0x02 + +/* Generic commands */ +#define NFC_HCI_ANY_SET_PARAMETER 0x01 +#define NFC_HCI_ANY_GET_PARAMETER 0x02 +#define NFC_HCI_ANY_OPEN_PIPE 0x03 +#define NFC_HCI_ANY_CLOSE_PIPE 0x04 + +/* Reader RF commands */ +#define NFC_HCI_WR_XCHG_DATA 0x10 + +/* Admin commands */ +#define NFC_HCI_ADM_CREATE_PIPE 0x10 +#define NFC_HCI_ADM_DELETE_PIPE 0x11 +#define NFC_HCI_ADM_NOTIFY_PIPE_CREATED 0x12 +#define NFC_HCI_ADM_NOTIFY_PIPE_DELETED 0x13 +#define NFC_HCI_ADM_CLEAR_ALL_PIPE 0x14 +#define NFC_HCI_ADM_NOTIFY_ALL_PIPE_CLEARED 0x15 + +/* Generic responses */ +#define NFC_HCI_ANY_OK 0x00 +#define NFC_HCI_ANY_E_NOT_CONNECTED 0x01 +#define NFC_HCI_ANY_E_CMD_PAR_UNKNOWN 0x02 +#define NFC_HCI_ANY_E_NOK 0x03 +#define NFC_HCI_ANY_E_PIPES_FULL 0x04 +#define NFC_HCI_ANY_E_REG_PAR_UNKNOWN 0x05 +#define NFC_HCI_ANY_E_PIPE_NOT_OPENED 0x06 +#define NFC_HCI_ANY_E_CMD_NOT_SUPPORTED 0x07 +#define NFC_HCI_ANY_E_INHIBITED 0x08 +#define NFC_HCI_ANY_E_TIMEOUT 0x09 +#define NFC_HCI_ANY_E_REG_ACCESS_DENIED 0x0a +#define NFC_HCI_ANY_E_PIPE_ACCESS_DENIED 0x0b + +/* Pipes */ +#define NFC_HCI_INVALID_PIPE 0x80 +#define NFC_HCI_LINK_MGMT_PIPE 0x00 +#define NFC_HCI_ADMIN_PIPE 0x01 + +#endif /* __LOCAL_HCI_H */ diff --git a/net/nfc/hci/hcp.c b/net/nfc/hci/hcp.c new file mode 100644 index 000000000000..7212cf2c5785 --- /dev/null +++ b/net/nfc/hci/hcp.c @@ -0,0 +1,156 @@ +/* + * Copyright (C) 2012 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the + * Free Software Foundation, Inc., + * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#define pr_fmt(fmt) "hci: %s: " fmt, __func__ + +#include +#include +#include + +#include + +#include "hci.h" + +/* + * Payload is the HCP message data only. Instruction will be prepended. + * Guarantees that cb will be called upon completion or timeout delay + * counted from the moment the cmd is sent to the transport. + */ +int nfc_hci_hcp_message_tx(struct nfc_hci_dev *hdev, u8 pipe, + u8 type, u8 instruction, + const u8 *payload, size_t payload_len, + hci_cmd_cb_t cb, void *cb_data, + unsigned long completion_delay) +{ + struct nfc_dev *ndev = hdev->ndev; + struct hci_msg *cmd; + const u8 *ptr = payload; + int hci_len, err; + bool firstfrag = true; + + cmd = kzalloc(sizeof(struct hci_msg), GFP_KERNEL); + if (cmd == NULL) + return -ENOMEM; + + INIT_LIST_HEAD(&cmd->msg_l); + skb_queue_head_init(&cmd->msg_frags); + cmd->wait_response = (type == NFC_HCI_HCP_COMMAND) ? true : false; + cmd->cb = cb; + cmd->cb_context = cb_data; + cmd->completion_delay = completion_delay; + + hci_len = payload_len + 1; + while (hci_len > 0) { + struct sk_buff *skb; + int skb_len, data_link_len; + struct hcp_packet *packet; + + if (NFC_HCI_HCP_PACKET_HEADER_LEN + hci_len <= + hdev->max_data_link_payload) + data_link_len = hci_len; + else + data_link_len = hdev->max_data_link_payload - + NFC_HCI_HCP_PACKET_HEADER_LEN; + + skb_len = ndev->tx_headroom + NFC_HCI_HCP_PACKET_HEADER_LEN + + data_link_len + ndev->tx_tailroom; + hci_len -= data_link_len; + + skb = alloc_skb(skb_len, GFP_KERNEL); + if (skb == NULL) { + err = -ENOMEM; + goto out_skb_err; + } + skb_reserve(skb, ndev->tx_headroom); + + skb_put(skb, NFC_HCI_HCP_PACKET_HEADER_LEN + data_link_len); + + /* Only the last fragment will have the cb bit set to 1 */ + packet = (struct hcp_packet *)skb->data; + packet->header = pipe; + if (firstfrag) { + firstfrag = false; + packet->message.header = HCP_HEADER(type, instruction); + if (ptr) { + memcpy(packet->message.data, ptr, + data_link_len - 1); + ptr += data_link_len - 1; + } + } else { + memcpy(&packet->message, ptr, data_link_len); + ptr += data_link_len; + } + + /* This is the last fragment, set the cb bit */ + if (hci_len == 0) + packet->header |= ~NFC_HCI_FRAGMENT; + + skb_queue_tail(&cmd->msg_frags, skb); + } + + mutex_lock(&hdev->msg_tx_mutex); + list_add_tail(&hdev->msg_tx_queue, &cmd->msg_l); + mutex_unlock(&hdev->msg_tx_mutex); + + queue_work(hdev->msg_tx_wq, &hdev->msg_tx_work); + + return 0; + +out_skb_err: + skb_queue_purge(&cmd->msg_frags); + kfree(cmd); + + return err; +} + +u8 nfc_hci_pipe2gate(struct nfc_hci_dev *hdev, u8 pipe) +{ + int gate; + + for (gate = 0; gate < NFC_HCI_MAX_GATES; gate++) + if (hdev->gate2pipe[gate] == pipe) + return gate; + + return 0xff; +} + +/* + * Receive hcp message for pipe, with type and cmd. + * skb contains optional message data only. + */ +void nfc_hci_hcp_message_rx(struct nfc_hci_dev *hdev, u8 pipe, u8 type, + u8 instruction, struct sk_buff *skb) +{ + switch (type) { + case NFC_HCI_HCP_RESPONSE: + nfc_hci_resp_received(hdev, instruction, skb); + break; + case NFC_HCI_HCP_COMMAND: + nfc_hci_cmd_received(hdev, pipe, instruction, skb); + break; + case NFC_HCI_HCP_EVENT: + nfc_hci_event_received(hdev, pipe, instruction, skb); + break; + default: + pr_err("UNKNOWN MSG Type %d, instruction=%d\n", + type, instruction); + kfree_skb(skb); + break; + } +} -- cgit v1.2.2 From eb738fe535ae8e44402c372ecc1321eee0552a09 Mon Sep 17 00:00:00 2001 From: Eric Lapuyade Date: Tue, 10 Apr 2012 19:43:07 +0200 Subject: NFC: SHDLC implementation Most NFC HCI chipsets actually use a simplified HDLC link layer to carry HCI payloads. This implementation registers itself as an HCI device on behalf of the NFC driver. Signed-off-by: Eric Lapuyade Signed-off-by: Samuel Ortiz Signed-off-by: John W. Linville --- net/nfc/hci/Kconfig | 8 + net/nfc/hci/Makefile | 1 + net/nfc/hci/shdlc.c | 945 +++++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 954 insertions(+) create mode 100644 net/nfc/hci/shdlc.c (limited to 'net') diff --git a/net/nfc/hci/Kconfig b/net/nfc/hci/Kconfig index c32d2d4a9635..17213a6362b4 100644 --- a/net/nfc/hci/Kconfig +++ b/net/nfc/hci/Kconfig @@ -6,3 +6,11 @@ config NFC_HCI Say Y here if you want to build support for a kernel NFC HCI implementation. This is mostly needed for devices that only process HCI frames, like for example the NXP pn544. + +config NFC_SHDLC + depends on NFC_HCI + bool "SHDLC link layer for HCI based NFC drivers" + default n + ---help--- + Say yes if you use an NFC HCI driver that requires SHDLC link layer. + If unsure, say N here. diff --git a/net/nfc/hci/Makefile b/net/nfc/hci/Makefile index af17c7be051a..f9c44b2fb065 100644 --- a/net/nfc/hci/Makefile +++ b/net/nfc/hci/Makefile @@ -5,3 +5,4 @@ obj-$(CONFIG_NFC_HCI) += hci.o hci-y := core.o hcp.o command.o +hci-$(CONFIG_NFC_SHDLC) += shdlc.o diff --git a/net/nfc/hci/shdlc.c b/net/nfc/hci/shdlc.c new file mode 100644 index 000000000000..923bdf7c26d6 --- /dev/null +++ b/net/nfc/hci/shdlc.c @@ -0,0 +1,945 @@ +/* + * Copyright (C) 2012 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the + * Free Software Foundation, Inc., + * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#define pr_fmt(fmt) "shdlc: %s: " fmt, __func__ + +#include +#include +#include +#include +#include +#include + +#include +#include + +#define SHDLC_LLC_HEAD_ROOM 2 +#define SHDLC_LLC_TAIL_ROOM 2 + +#define SHDLC_MAX_WINDOW 4 +#define SHDLC_SREJ_SUPPORT false + +#define SHDLC_CONTROL_HEAD_MASK 0xe0 +#define SHDLC_CONTROL_HEAD_I 0x80 +#define SHDLC_CONTROL_HEAD_I2 0xa0 +#define SHDLC_CONTROL_HEAD_S 0xc0 +#define SHDLC_CONTROL_HEAD_U 0xe0 + +#define SHDLC_CONTROL_NS_MASK 0x38 +#define SHDLC_CONTROL_NR_MASK 0x07 +#define SHDLC_CONTROL_TYPE_MASK 0x18 + +#define SHDLC_CONTROL_M_MASK 0x1f + +enum sframe_type { + S_FRAME_RR = 0x00, + S_FRAME_REJ = 0x01, + S_FRAME_RNR = 0x02, + S_FRAME_SREJ = 0x03 +}; + +enum uframe_modifier { + U_FRAME_UA = 0x06, + U_FRAME_RSET = 0x19 +}; + +#define SHDLC_CONNECT_VALUE_MS 5 +#define SHDLC_T1_VALUE_MS(w) ((5 * w) / 4) +#define SHDLC_T2_VALUE_MS 300 + +#define SHDLC_DUMP_SKB(info, skb) \ +do { \ + pr_debug("%s:\n", info); \ + print_hex_dump(KERN_DEBUG, "shdlc: ", DUMP_PREFIX_OFFSET, \ + 16, 1, skb->data, skb->len, 0); \ +} while (0) + +/* checks x < y <= z modulo 8 */ +static bool nfc_shdlc_x_lt_y_lteq_z(int x, int y, int z) +{ + if (x < z) + return ((x < y) && (y <= z)) ? true : false; + else + return ((y > x) || (y <= z)) ? true : false; +} + +/* checks x <= y < z modulo 8 */ +static bool nfc_shdlc_x_lteq_y_lt_z(int x, int y, int z) +{ + if (x <= z) + return ((x <= y) && (y < z)) ? true : false; + else /* x > z -> z+8 > x */ + return ((y >= x) || (y < z)) ? true : false; +} + +static struct sk_buff *nfc_shdlc_alloc_skb(struct nfc_shdlc *shdlc, + int payload_len) +{ + struct sk_buff *skb; + + skb = alloc_skb(shdlc->client_headroom + SHDLC_LLC_HEAD_ROOM + + shdlc->client_tailroom + SHDLC_LLC_TAIL_ROOM + + payload_len, GFP_KERNEL); + if (skb) + skb_reserve(skb, shdlc->client_headroom + SHDLC_LLC_HEAD_ROOM); + + return skb; +} + +static void nfc_shdlc_add_len_crc(struct sk_buff *skb) +{ + u16 crc; + int len; + + len = skb->len + 2; + *skb_push(skb, 1) = len; + + crc = crc_ccitt(0xffff, skb->data, skb->len); + crc = ~crc; + *skb_put(skb, 1) = crc & 0xff; + *skb_put(skb, 1) = crc >> 8; +} + +/* immediately sends an S frame. */ +static int nfc_shdlc_send_s_frame(struct nfc_shdlc *shdlc, + enum sframe_type sframe_type, int nr) +{ + int r; + struct sk_buff *skb; + + pr_debug("sframe_type=%d nr=%d\n", sframe_type, nr); + + skb = nfc_shdlc_alloc_skb(shdlc, 0); + if (skb == NULL) + return -ENOMEM; + + *skb_push(skb, 1) = SHDLC_CONTROL_HEAD_S | (sframe_type << 3) | nr; + + nfc_shdlc_add_len_crc(skb); + + r = shdlc->ops->xmit(shdlc, skb); + + kfree_skb(skb); + + return r; +} + +/* immediately sends an U frame. skb may contain optional payload */ +static int nfc_shdlc_send_u_frame(struct nfc_shdlc *shdlc, + struct sk_buff *skb, + enum uframe_modifier uframe_modifier) +{ + int r; + + pr_debug("uframe_modifier=%d\n", uframe_modifier); + + *skb_push(skb, 1) = SHDLC_CONTROL_HEAD_U | uframe_modifier; + + nfc_shdlc_add_len_crc(skb); + + r = shdlc->ops->xmit(shdlc, skb); + + kfree_skb(skb); + + return r; +} + +/* + * Free ack_pending frames until y_nr - 1, and reset t2 according to + * the remaining oldest ack_pending frame sent time + */ +static void nfc_shdlc_reset_t2(struct nfc_shdlc *shdlc, int y_nr) +{ + struct sk_buff *skb; + int dnr = shdlc->dnr; /* MUST initially be < y_nr */ + + pr_debug("release ack pending up to frame %d excluded\n", y_nr); + + while (dnr != y_nr) { + pr_debug("release ack pending frame %d\n", dnr); + + skb = skb_dequeue(&shdlc->ack_pending_q); + kfree_skb(skb); + + dnr = (dnr + 1) % 8; + } + + if (skb_queue_empty(&shdlc->ack_pending_q)) { + if (shdlc->t2_active) { + del_timer_sync(&shdlc->t2_timer); + shdlc->t2_active = false; + + pr_debug + ("All sent frames acked. Stopped T2(retransmit)\n"); + } + } else { + skb = skb_peek(&shdlc->ack_pending_q); + + mod_timer(&shdlc->t2_timer, *(unsigned long *)skb->cb + + msecs_to_jiffies(SHDLC_T2_VALUE_MS)); + shdlc->t2_active = true; + + pr_debug + ("Start T2(retransmit) for remaining unacked sent frames\n"); + } +} + +/* + * Receive validated frames from lower layer. skb contains HCI payload only. + * Handle according to algorithm at spec:10.8.2 + */ +static void nfc_shdlc_rcv_i_frame(struct nfc_shdlc *shdlc, + struct sk_buff *skb, int ns, int nr) +{ + int x_ns = ns; + int y_nr = nr; + + pr_debug("recvd I-frame %d, remote waiting frame %d\n", ns, nr); + + if (shdlc->state != SHDLC_CONNECTED) + goto exit; + + if (x_ns != shdlc->nr) { + nfc_shdlc_send_s_frame(shdlc, S_FRAME_REJ, shdlc->nr); + goto exit; + } + + if (shdlc->t1_active == false) { + shdlc->t1_active = true; + mod_timer(&shdlc->t1_timer, + msecs_to_jiffies(SHDLC_T1_VALUE_MS(shdlc->w))); + pr_debug("(re)Start T1(send ack)\n"); + } + + if (skb->len) { + nfc_hci_recv_frame(shdlc->hdev, skb); + skb = NULL; + } + + shdlc->nr = (shdlc->nr + 1) % 8; + + if (nfc_shdlc_x_lt_y_lteq_z(shdlc->dnr, y_nr, shdlc->ns)) { + nfc_shdlc_reset_t2(shdlc, y_nr); + + shdlc->dnr = y_nr; + } + +exit: + if (skb) + kfree_skb(skb); +} + +static void nfc_shdlc_rcv_ack(struct nfc_shdlc *shdlc, int y_nr) +{ + pr_debug("remote acked up to frame %d excluded\n", y_nr); + + if (nfc_shdlc_x_lt_y_lteq_z(shdlc->dnr, y_nr, shdlc->ns)) { + nfc_shdlc_reset_t2(shdlc, y_nr); + shdlc->dnr = y_nr; + } +} + +static void nfc_shdlc_requeue_ack_pending(struct nfc_shdlc *shdlc) +{ + struct sk_buff *skb; + + pr_debug("ns reset to %d\n", shdlc->dnr); + + while ((skb = skb_dequeue_tail(&shdlc->ack_pending_q))) { + skb_pull(skb, 2); /* remove len+control */ + skb_trim(skb, skb->len - 2); /* remove crc */ + skb_queue_head(&shdlc->send_q, skb); + } + shdlc->ns = shdlc->dnr; +} + +static void nfc_shdlc_rcv_rej(struct nfc_shdlc *shdlc, int y_nr) +{ + struct sk_buff *skb; + + pr_debug("remote asks retransmition from frame %d\n", y_nr); + + if (nfc_shdlc_x_lteq_y_lt_z(shdlc->dnr, y_nr, shdlc->ns)) { + if (shdlc->t2_active) { + del_timer_sync(&shdlc->t2_timer); + shdlc->t2_active = false; + pr_debug("Stopped T2(retransmit)\n"); + } + + if (shdlc->dnr != y_nr) { + while ((shdlc->dnr = ((shdlc->dnr + 1) % 8)) != y_nr) { + skb = skb_dequeue(&shdlc->ack_pending_q); + kfree_skb(skb); + } + } + + nfc_shdlc_requeue_ack_pending(shdlc); + } +} + +/* See spec RR:10.8.3 REJ:10.8.4 */ +static void nfc_shdlc_rcv_s_frame(struct nfc_shdlc *shdlc, + enum sframe_type s_frame_type, int nr) +{ + struct sk_buff *skb; + + if (shdlc->state != SHDLC_CONNECTED) + return; + + switch (s_frame_type) { + case S_FRAME_RR: + nfc_shdlc_rcv_ack(shdlc, nr); + if (shdlc->rnr == true) { /* see SHDLC 10.7.7 */ + shdlc->rnr = false; + if (shdlc->send_q.qlen == 0) { + skb = nfc_shdlc_alloc_skb(shdlc, 0); + if (skb) + skb_queue_tail(&shdlc->send_q, skb); + } + } + break; + case S_FRAME_REJ: + nfc_shdlc_rcv_rej(shdlc, nr); + break; + case S_FRAME_RNR: + nfc_shdlc_rcv_ack(shdlc, nr); + shdlc->rnr = true; + break; + default: + break; + } +} + +static void nfc_shdlc_connect_complete(struct nfc_shdlc *shdlc, int r) +{ + pr_debug("result=%d\n", r); + + del_timer_sync(&shdlc->connect_timer); + + if (r == 0) { + shdlc->ns = 0; + shdlc->nr = 0; + shdlc->dnr = 0; + + shdlc->state = SHDLC_CONNECTED; + } else { + shdlc->state = SHDLC_DISCONNECTED; + + /* + * TODO: Could it be possible that there are pending + * executing commands that are waiting for connect to complete + * before they can be carried? As connect is a blocking + * operation, it would require that the userspace process can + * send commands on the same device from a second thread before + * the device is up. I don't think that is possible, is it? + */ + } + + shdlc->connect_result = r; + + wake_up(shdlc->connect_wq); +} + +static int nfc_shdlc_connect_initiate(struct nfc_shdlc *shdlc) +{ + struct sk_buff *skb; + + pr_debug("\n"); + + skb = nfc_shdlc_alloc_skb(shdlc, 2); + if (skb == NULL) + return -ENOMEM; + + *skb_put(skb, 1) = SHDLC_MAX_WINDOW; + *skb_put(skb, 1) = SHDLC_SREJ_SUPPORT ? 1 : 0; + + return nfc_shdlc_send_u_frame(shdlc, skb, U_FRAME_RSET); +} + +static int nfc_shdlc_connect_send_ua(struct nfc_shdlc *shdlc) +{ + struct sk_buff *skb; + + pr_debug("\n"); + + skb = nfc_shdlc_alloc_skb(shdlc, 0); + if (skb == NULL) + return -ENOMEM; + + return nfc_shdlc_send_u_frame(shdlc, skb, U_FRAME_UA); +} + +static void nfc_shdlc_rcv_u_frame(struct nfc_shdlc *shdlc, + struct sk_buff *skb, + enum uframe_modifier u_frame_modifier) +{ + u8 w = SHDLC_MAX_WINDOW; + bool srej_support = SHDLC_SREJ_SUPPORT; + int r; + + pr_debug("u_frame_modifier=%d\n", u_frame_modifier); + + switch (u_frame_modifier) { + case U_FRAME_RSET: + if (shdlc->state == SHDLC_NEGOCIATING) { + /* we sent RSET, but chip wants to negociate */ + if (skb->len > 0) + w = skb->data[0]; + + if (skb->len > 1) + srej_support = skb->data[1] & 0x01 ? true : + false; + + if ((w <= SHDLC_MAX_WINDOW) && + (SHDLC_SREJ_SUPPORT || (srej_support == false))) { + shdlc->w = w; + shdlc->srej_support = srej_support; + r = nfc_shdlc_connect_send_ua(shdlc); + nfc_shdlc_connect_complete(shdlc, r); + } + } else if (shdlc->state > SHDLC_NEGOCIATING) { + /* + * TODO: Chip wants to reset link + * send ua, empty skb lists, reset counters + * propagate info to HCI layer + */ + } + break; + case U_FRAME_UA: + if ((shdlc->state == SHDLC_CONNECTING && + shdlc->connect_tries > 0) || + (shdlc->state == SHDLC_NEGOCIATING)) + nfc_shdlc_connect_complete(shdlc, 0); + break; + default: + break; + } + + kfree_skb(skb); +} + +static void nfc_shdlc_handle_rcv_queue(struct nfc_shdlc *shdlc) +{ + struct sk_buff *skb; + u8 control; + int nr; + int ns; + enum sframe_type s_frame_type; + enum uframe_modifier u_frame_modifier; + + if (shdlc->rcv_q.qlen) + pr_debug("rcvQlen=%d\n", shdlc->rcv_q.qlen); + + while ((skb = skb_dequeue(&shdlc->rcv_q)) != NULL) { + control = skb->data[0]; + skb_pull(skb, 1); + switch (control & SHDLC_CONTROL_HEAD_MASK) { + case SHDLC_CONTROL_HEAD_I: + case SHDLC_CONTROL_HEAD_I2: + ns = (control & SHDLC_CONTROL_NS_MASK) >> 3; + nr = control & SHDLC_CONTROL_NR_MASK; + nfc_shdlc_rcv_i_frame(shdlc, skb, ns, nr); + break; + case SHDLC_CONTROL_HEAD_S: + s_frame_type = (control & SHDLC_CONTROL_TYPE_MASK) >> 3; + nr = control & SHDLC_CONTROL_NR_MASK; + nfc_shdlc_rcv_s_frame(shdlc, s_frame_type, nr); + kfree_skb(skb); + break; + case SHDLC_CONTROL_HEAD_U: + u_frame_modifier = control & SHDLC_CONTROL_M_MASK; + nfc_shdlc_rcv_u_frame(shdlc, skb, u_frame_modifier); + break; + default: + pr_err("UNKNOWN Control=%d\n", control); + kfree_skb(skb); + break; + } + } +} + +static int nfc_shdlc_w_used(int ns, int dnr) +{ + int unack_count; + + if (dnr <= ns) + unack_count = ns - dnr; + else + unack_count = 8 - dnr + ns; + + return unack_count; +} + +/* Send frames according to algorithm at spec:10.8.1 */ +static void nfc_shdlc_handle_send_queue(struct nfc_shdlc *shdlc) +{ + struct sk_buff *skb; + int r; + unsigned long time_sent; + + if (shdlc->send_q.qlen) + pr_debug + ("sendQlen=%d ns=%d dnr=%d rnr=%s w_room=%d unackQlen=%d\n", + shdlc->send_q.qlen, shdlc->ns, shdlc->dnr, + shdlc->rnr == false ? "false" : "true", + shdlc->w - nfc_shdlc_w_used(shdlc->ns, shdlc->dnr), + shdlc->ack_pending_q.qlen); + + while (shdlc->send_q.qlen && shdlc->ack_pending_q.qlen < shdlc->w && + (shdlc->rnr == false)) { + + if (shdlc->t1_active) { + del_timer_sync(&shdlc->t1_timer); + shdlc->t1_active = false; + pr_debug("Stopped T1(send ack)\n"); + } + + skb = skb_dequeue(&shdlc->send_q); + + *skb_push(skb, 1) = SHDLC_CONTROL_HEAD_I | (shdlc->ns << 3) | + shdlc->nr; + + pr_debug("Sending I-Frame %d, waiting to rcv %d\n", shdlc->ns, + shdlc->nr); + /* SHDLC_DUMP_SKB("shdlc frame written", skb); */ + + nfc_shdlc_add_len_crc(skb); + + r = shdlc->ops->xmit(shdlc, skb); + if (r < 0) { + /* + * TODO: Cannot send, shdlc machine is dead, we + * must propagate the information up to HCI. + */ + shdlc->hard_fault = r; + break; + } + + shdlc->ns = (shdlc->ns + 1) % 8; + + time_sent = jiffies; + *(unsigned long *)skb->cb = time_sent; + + skb_queue_tail(&shdlc->ack_pending_q, skb); + + if (shdlc->t2_active == false) { + shdlc->t2_active = true; + mod_timer(&shdlc->t2_timer, time_sent + + msecs_to_jiffies(SHDLC_T2_VALUE_MS)); + pr_debug("Started T2 (retransmit)\n"); + } + } +} + +static void nfc_shdlc_connect_timeout(unsigned long data) +{ + struct nfc_shdlc *shdlc = (struct nfc_shdlc *)data; + + pr_debug("\n"); + + queue_work(shdlc->sm_wq, &shdlc->sm_work); +} + +static void nfc_shdlc_t1_timeout(unsigned long data) +{ + struct nfc_shdlc *shdlc = (struct nfc_shdlc *)data; + + pr_debug("SoftIRQ: need to send ack\n"); + + queue_work(shdlc->sm_wq, &shdlc->sm_work); +} + +static void nfc_shdlc_t2_timeout(unsigned long data) +{ + struct nfc_shdlc *shdlc = (struct nfc_shdlc *)data; + + pr_debug("SoftIRQ: need to retransmit\n"); + + queue_work(shdlc->sm_wq, &shdlc->sm_work); +} + +static void nfc_shdlc_sm_work(struct work_struct *work) +{ + struct nfc_shdlc *shdlc = container_of(work, struct nfc_shdlc, sm_work); + int r; + + pr_debug("\n"); + + mutex_lock(&shdlc->state_mutex); + + switch (shdlc->state) { + case SHDLC_DISCONNECTED: + skb_queue_purge(&shdlc->rcv_q); + skb_queue_purge(&shdlc->send_q); + skb_queue_purge(&shdlc->ack_pending_q); + break; + case SHDLC_CONNECTING: + if (shdlc->connect_tries++ < 5) + r = nfc_shdlc_connect_initiate(shdlc); + else + r = -ETIME; + if (r < 0) + nfc_shdlc_connect_complete(shdlc, r); + else { + mod_timer(&shdlc->connect_timer, jiffies + + msecs_to_jiffies(SHDLC_CONNECT_VALUE_MS)); + + shdlc->state = SHDLC_NEGOCIATING; + } + break; + case SHDLC_NEGOCIATING: + if (timer_pending(&shdlc->connect_timer) == 0) { + shdlc->state = SHDLC_CONNECTING; + queue_work(shdlc->sm_wq, &shdlc->sm_work); + } + + nfc_shdlc_handle_rcv_queue(shdlc); + break; + case SHDLC_CONNECTED: + nfc_shdlc_handle_rcv_queue(shdlc); + nfc_shdlc_handle_send_queue(shdlc); + + if (shdlc->t1_active && timer_pending(&shdlc->t1_timer) == 0) { + pr_debug + ("Handle T1(send ack) elapsed (T1 now inactive)\n"); + + shdlc->t1_active = false; + r = nfc_shdlc_send_s_frame(shdlc, S_FRAME_RR, + shdlc->nr); + if (r < 0) + shdlc->hard_fault = r; + } + + if (shdlc->t2_active && timer_pending(&shdlc->t2_timer) == 0) { + pr_debug + ("Handle T2(retransmit) elapsed (T2 inactive)\n"); + + shdlc->t2_active = false; + + nfc_shdlc_requeue_ack_pending(shdlc); + nfc_shdlc_handle_send_queue(shdlc); + } + + if (shdlc->hard_fault) { + /* + * TODO: Handle hard_fault that occured during + * this invocation of the shdlc worker + */ + } + break; + default: + break; + } + mutex_unlock(&shdlc->state_mutex); +} + +/* + * Called from syscall context to establish shdlc link. Sleeps until + * link is ready or failure. + */ +static int nfc_shdlc_connect(struct nfc_shdlc *shdlc) +{ + DECLARE_WAIT_QUEUE_HEAD_ONSTACK(connect_wq); + + pr_debug("\n"); + + mutex_lock(&shdlc->state_mutex); + + shdlc->state = SHDLC_CONNECTING; + shdlc->connect_wq = &connect_wq; + shdlc->connect_tries = 0; + shdlc->connect_result = 1; + + mutex_unlock(&shdlc->state_mutex); + + queue_work(shdlc->sm_wq, &shdlc->sm_work); + + wait_event(connect_wq, shdlc->connect_result != 1); + + return shdlc->connect_result; +} + +static void nfc_shdlc_disconnect(struct nfc_shdlc *shdlc) +{ + pr_debug("\n"); + + mutex_lock(&shdlc->state_mutex); + + shdlc->state = SHDLC_DISCONNECTED; + + mutex_unlock(&shdlc->state_mutex); + + queue_work(shdlc->sm_wq, &shdlc->sm_work); +} + +/* + * Receive an incoming shdlc frame. Frame has already been crc-validated. + * skb contains only LLC header and payload. + * If skb == NULL, it is a notification that the link below is dead. + */ +void nfc_shdlc_recv_frame(struct nfc_shdlc *shdlc, struct sk_buff *skb) +{ + if (skb == NULL) { + pr_err("NULL Frame -> link is dead\n"); + shdlc->hard_fault = -EREMOTEIO; + } else { + SHDLC_DUMP_SKB("incoming frame", skb); + skb_queue_tail(&shdlc->rcv_q, skb); + } + + queue_work(shdlc->sm_wq, &shdlc->sm_work); +} +EXPORT_SYMBOL(nfc_shdlc_recv_frame); + +static int nfc_shdlc_open(struct nfc_hci_dev *hdev) +{ + struct nfc_shdlc *shdlc = nfc_hci_get_clientdata(hdev); + int r; + + pr_debug("\n"); + + if (shdlc->ops->open) { + r = shdlc->ops->open(shdlc); + if (r < 0) + return r; + } + + r = nfc_shdlc_connect(shdlc); + if (r < 0 && shdlc->ops->close) + shdlc->ops->close(shdlc); + + return r; +} + +static void nfc_shdlc_close(struct nfc_hci_dev *hdev) +{ + struct nfc_shdlc *shdlc = nfc_hci_get_clientdata(hdev); + + pr_debug("\n"); + + nfc_shdlc_disconnect(shdlc); + + if (shdlc->ops->close) + shdlc->ops->close(shdlc); +} + +static int nfc_shdlc_hci_ready(struct nfc_hci_dev *hdev) +{ + struct nfc_shdlc *shdlc = nfc_hci_get_clientdata(hdev); + int r = 0; + + pr_debug("\n"); + + if (shdlc->ops->hci_ready) + r = shdlc->ops->hci_ready(shdlc); + + return r; +} + +static int nfc_shdlc_xmit(struct nfc_hci_dev *hdev, struct sk_buff *skb) +{ + struct nfc_shdlc *shdlc = nfc_hci_get_clientdata(hdev); + + SHDLC_DUMP_SKB("queuing HCP packet to shdlc", skb); + + skb_queue_tail(&shdlc->send_q, skb); + + queue_work(shdlc->sm_wq, &shdlc->sm_work); + + return 0; +} + +static int nfc_shdlc_start_poll(struct nfc_hci_dev *hdev, u32 protocols) +{ + struct nfc_shdlc *shdlc = nfc_hci_get_clientdata(hdev); + + pr_debug("\n"); + + if (shdlc->ops->start_poll) + return shdlc->ops->start_poll(shdlc, protocols); + + return 0; +} + +static int nfc_shdlc_target_from_gate(struct nfc_hci_dev *hdev, u8 gate, + struct nfc_target *target) +{ + struct nfc_shdlc *shdlc = nfc_hci_get_clientdata(hdev); + + if (shdlc->ops->target_from_gate) + return shdlc->ops->target_from_gate(shdlc, gate, target); + + return -EPERM; +} + +static int nfc_shdlc_complete_target_discovered(struct nfc_hci_dev *hdev, + u8 gate, + struct nfc_target *target) +{ + struct nfc_shdlc *shdlc = nfc_hci_get_clientdata(hdev); + + pr_debug("\n"); + + if (shdlc->ops->complete_target_discovered) + return shdlc->ops->complete_target_discovered(shdlc, gate, + target); + + return 0; +} + +static int nfc_shdlc_data_exchange(struct nfc_hci_dev *hdev, + struct nfc_target *target, + struct sk_buff *skb, + struct sk_buff **res_skb) +{ + struct nfc_shdlc *shdlc = nfc_hci_get_clientdata(hdev); + + if (shdlc->ops->data_exchange) + return shdlc->ops->data_exchange(shdlc, target, skb, res_skb); + + return -EPERM; +} + +static struct nfc_hci_ops shdlc_ops = { + .open = nfc_shdlc_open, + .close = nfc_shdlc_close, + .hci_ready = nfc_shdlc_hci_ready, + .xmit = nfc_shdlc_xmit, + .start_poll = nfc_shdlc_start_poll, + .target_from_gate = nfc_shdlc_target_from_gate, + .complete_target_discovered = nfc_shdlc_complete_target_discovered, + .data_exchange = nfc_shdlc_data_exchange, +}; + +struct nfc_shdlc *nfc_shdlc_allocate(struct nfc_shdlc_ops *ops, + struct nfc_hci_init_data *init_data, + u32 protocols, + int tx_headroom, int tx_tailroom, + int max_link_payload, const char *devname) +{ + struct nfc_shdlc *shdlc; + int r; + char name[32]; + + if (ops->xmit == NULL) + return NULL; + + shdlc = kzalloc(sizeof(struct nfc_shdlc), GFP_KERNEL); + if (shdlc == NULL) + return NULL; + + mutex_init(&shdlc->state_mutex); + shdlc->ops = ops; + shdlc->state = SHDLC_DISCONNECTED; + + init_timer(&shdlc->connect_timer); + shdlc->connect_timer.data = (unsigned long)shdlc; + shdlc->connect_timer.function = nfc_shdlc_connect_timeout; + + init_timer(&shdlc->t1_timer); + shdlc->t1_timer.data = (unsigned long)shdlc; + shdlc->t1_timer.function = nfc_shdlc_t1_timeout; + + init_timer(&shdlc->t2_timer); + shdlc->t2_timer.data = (unsigned long)shdlc; + shdlc->t2_timer.function = nfc_shdlc_t2_timeout; + + shdlc->w = SHDLC_MAX_WINDOW; + shdlc->srej_support = SHDLC_SREJ_SUPPORT; + + skb_queue_head_init(&shdlc->rcv_q); + skb_queue_head_init(&shdlc->send_q); + skb_queue_head_init(&shdlc->ack_pending_q); + + INIT_WORK(&shdlc->sm_work, nfc_shdlc_sm_work); + snprintf(name, sizeof(name), "%s_shdlc_sm_wq", devname); + shdlc->sm_wq = alloc_workqueue(name, WQ_NON_REENTRANT | WQ_UNBOUND | + WQ_MEM_RECLAIM, 1); + if (shdlc->sm_wq == NULL) + goto err_allocwq; + + shdlc->client_headroom = tx_headroom; + shdlc->client_tailroom = tx_tailroom; + + shdlc->hdev = nfc_hci_allocate_device(&shdlc_ops, init_data, protocols, + tx_headroom + SHDLC_LLC_HEAD_ROOM, + tx_tailroom + SHDLC_LLC_TAIL_ROOM, + max_link_payload); + if (shdlc->hdev == NULL) + goto err_allocdev; + + nfc_hci_set_clientdata(shdlc->hdev, shdlc); + + r = nfc_hci_register_device(shdlc->hdev); + if (r < 0) + goto err_regdev; + + return shdlc; + +err_regdev: + nfc_hci_free_device(shdlc->hdev); + +err_allocdev: + destroy_workqueue(shdlc->sm_wq); + +err_allocwq: + kfree(shdlc); + + return NULL; +} +EXPORT_SYMBOL(nfc_shdlc_allocate); + +void nfc_shdlc_free(struct nfc_shdlc *shdlc) +{ + pr_debug("\n"); + + /* TODO: Check that this cannot be called while still in use */ + + nfc_hci_unregister_device(shdlc->hdev); + nfc_hci_free_device(shdlc->hdev); + + destroy_workqueue(shdlc->sm_wq); + + skb_queue_purge(&shdlc->rcv_q); + skb_queue_purge(&shdlc->send_q); + skb_queue_purge(&shdlc->ack_pending_q); + + kfree(shdlc); +} +EXPORT_SYMBOL(nfc_shdlc_free); + +void nfc_shdlc_set_clientdata(struct nfc_shdlc *shdlc, void *clientdata) +{ + pr_debug("\n"); + + shdlc->clientdata = clientdata; +} +EXPORT_SYMBOL(nfc_shdlc_set_clientdata); + +void *nfc_shdlc_get_clientdata(struct nfc_shdlc *shdlc) +{ + return shdlc->clientdata; +} +EXPORT_SYMBOL(nfc_shdlc_get_clientdata); + +struct nfc_hci_dev *nfc_shdlc_get_hci_dev(struct nfc_shdlc *shdlc) +{ + return shdlc->hdev; +} +EXPORT_SYMBOL(nfc_shdlc_get_hci_dev); -- cgit v1.2.2 From c4fbb6515a4dcec83d340247639b5644c4745528 Mon Sep 17 00:00:00 2001 From: Samuel Ortiz Date: Tue, 10 Apr 2012 19:43:09 +0200 Subject: NFC: The core part should generate the target index The target index can be used by userspace to uniquely identify a target and thus should be kept unique, per NFC adapter. Moreover, some protocols do not provide a logical index when discovering new targets, so we have to generate one for them. For NCI or pn533 to fetch their logical index, we added a logical_idx field to the target structure. Signed-off-by: Samuel Ortiz Signed-off-by: John W. Linville --- net/nfc/core.c | 5 +++++ net/nfc/nci/core.c | 2 +- net/nfc/nci/ntf.c | 11 ++++++----- net/nfc/rawsock.c | 6 ++++++ 4 files changed, 18 insertions(+), 6 deletions(-) (limited to 'net') diff --git a/net/nfc/core.c b/net/nfc/core.c index deb4721ce8a1..d92400087b61 100644 --- a/net/nfc/core.c +++ b/net/nfc/core.c @@ -428,10 +428,15 @@ EXPORT_SYMBOL(nfc_alloc_recv_skb); int nfc_targets_found(struct nfc_dev *dev, struct nfc_target *targets, int n_targets) { + int i; + pr_debug("dev_name=%s n_targets=%d\n", dev_name(&dev->dev), n_targets); dev->polling = false; + for (i = 0; i < n_targets; i++) + targets[i].idx = dev->target_idx++; + spin_lock_bh(&dev->targets_lock); dev->targets_generation++; diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c index 9ec065bb9ee1..8737c2089fdd 100644 --- a/net/nfc/nci/core.c +++ b/net/nfc/nci/core.c @@ -477,7 +477,7 @@ static int nci_activate_target(struct nfc_dev *nfc_dev, __u32 target_idx, } if (atomic_read(&ndev->state) == NCI_W4_HOST_SELECT) { - param.rf_discovery_id = target->idx; + param.rf_discovery_id = target->logical_idx; if (protocol == NFC_PROTO_JEWEL) param.rf_protocol = NCI_RF_PROTOCOL_T1T; diff --git a/net/nfc/nci/ntf.c b/net/nfc/nci/ntf.c index 2e3dee42196d..99e1632e6aac 100644 --- a/net/nfc/nci/ntf.c +++ b/net/nfc/nci/ntf.c @@ -227,7 +227,7 @@ static void nci_add_new_target(struct nci_dev *ndev, for (i = 0; i < ndev->n_targets; i++) { target = &ndev->targets[i]; - if (target->idx == ntf->rf_discovery_id) { + if (target->logical_idx == ntf->rf_discovery_id) { /* This target already exists, add the new protocol */ nci_add_new_protocol(ndev, target, ntf->rf_protocol, ntf->rf_tech_and_mode, @@ -248,10 +248,10 @@ static void nci_add_new_target(struct nci_dev *ndev, ntf->rf_tech_and_mode, &ntf->rf_tech_specific_params); if (!rc) { - target->idx = ntf->rf_discovery_id; + target->logical_idx = ntf->rf_discovery_id; ndev->n_targets++; - pr_debug("target_idx %d, n_targets %d\n", target->idx, + pr_debug("logical idx %d, n_targets %d\n", target->logical_idx, ndev->n_targets); } } @@ -372,10 +372,11 @@ static void nci_target_auto_activated(struct nci_dev *ndev, if (rc) return; - target->idx = ntf->rf_discovery_id; + target->logical_idx = ntf->rf_discovery_id; ndev->n_targets++; - pr_debug("target_idx %d, n_targets %d\n", target->idx, ndev->n_targets); + pr_debug("logical idx %d, n_targets %d\n", + target->logical_idx, ndev->n_targets); nfc_targets_found(ndev->nfc_dev, ndev->targets, ndev->n_targets); } diff --git a/net/nfc/rawsock.c b/net/nfc/rawsock.c index 5a839ceb2e82..b2825aa85f64 100644 --- a/net/nfc/rawsock.c +++ b/net/nfc/rawsock.c @@ -92,6 +92,12 @@ static int rawsock_connect(struct socket *sock, struct sockaddr *_addr, goto error; } + if (addr->target_idx > dev->target_idx - 1 || + addr->target_idx < dev->target_idx - dev->n_targets) { + rc = -EINVAL; + goto error; + } + rc = nfc_activate_target(dev, addr->target_idx, addr->nfc_protocol); if (rc) goto put_dev; -- cgit v1.2.2 From 01ae0eea9bed132a9c4a2c207dbf8e05b0051071 Mon Sep 17 00:00:00 2001 From: Eric Lapuyade Date: Tue, 10 Apr 2012 19:43:10 +0200 Subject: NFC: Fix next target_idx type and rename for clarity Signed-off-by: Eric Lapuyade Signed-off-by: Samuel Ortiz Signed-off-by: John W. Linville --- net/nfc/core.c | 2 +- net/nfc/rawsock.c | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'net') diff --git a/net/nfc/core.c b/net/nfc/core.c index d92400087b61..db88429cfc1a 100644 --- a/net/nfc/core.c +++ b/net/nfc/core.c @@ -435,7 +435,7 @@ int nfc_targets_found(struct nfc_dev *dev, dev->polling = false; for (i = 0; i < n_targets; i++) - targets[i].idx = dev->target_idx++; + targets[i].idx = dev->target_next_idx++; spin_lock_bh(&dev->targets_lock); diff --git a/net/nfc/rawsock.c b/net/nfc/rawsock.c index b2825aa85f64..ec1134c9e07f 100644 --- a/net/nfc/rawsock.c +++ b/net/nfc/rawsock.c @@ -92,8 +92,8 @@ static int rawsock_connect(struct socket *sock, struct sockaddr *_addr, goto error; } - if (addr->target_idx > dev->target_idx - 1 || - addr->target_idx < dev->target_idx - dev->n_targets) { + if (addr->target_idx > dev->target_next_idx - 1 || + addr->target_idx < dev->target_next_idx - dev->n_targets) { rc = -EINVAL; goto error; } -- cgit v1.2.2 From 144612cacc0b5c230f0b3aebc3a3a53854c332ee Mon Sep 17 00:00:00 2001 From: Eric Lapuyade Date: Tue, 10 Apr 2012 19:43:11 +0200 Subject: NFC: Changed target activated state logic Signed-off-by: Eric Lapuyade Signed-off-by: Samuel Ortiz Signed-off-by: John W. Linville --- net/nfc/core.c | 24 +++++++++++++++++++++--- 1 file changed, 21 insertions(+), 3 deletions(-) (limited to 'net') diff --git a/net/nfc/core.c b/net/nfc/core.c index db88429cfc1a..44a701806ba5 100644 --- a/net/nfc/core.c +++ b/net/nfc/core.c @@ -95,7 +95,7 @@ int nfc_dev_down(struct nfc_dev *dev) goto error; } - if (dev->polling || dev->remote_activated) { + if (dev->polling || dev->activated_target_idx != NFC_TARGET_IDX_NONE) { rc = -EBUSY; goto error; } @@ -211,6 +211,8 @@ int nfc_dep_link_up(struct nfc_dev *dev, int target_index, u8 comm_mode) } rc = dev->ops->dep_link_up(dev, target_index, comm_mode, gb, gb_len); + if (!rc) + dev->activated_target_idx = target_index; error: device_unlock(&dev->dev); @@ -246,6 +248,7 @@ int nfc_dep_link_down(struct nfc_dev *dev) rc = dev->ops->dep_link_down(dev); if (!rc) { dev->dep_link_up = false; + dev->activated_target_idx = NFC_TARGET_IDX_NONE; nfc_llcp_mac_is_down(dev); nfc_genl_dep_link_down_event(dev); } @@ -290,7 +293,7 @@ int nfc_activate_target(struct nfc_dev *dev, u32 target_idx, u32 protocol) rc = dev->ops->activate_target(dev, target_idx, protocol); if (!rc) - dev->remote_activated = true; + dev->activated_target_idx = target_idx; error: device_unlock(&dev->dev); @@ -318,7 +321,7 @@ int nfc_deactivate_target(struct nfc_dev *dev, u32 target_idx) } dev->ops->deactivate_target(dev, target_idx); - dev->remote_activated = false; + dev->activated_target_idx = NFC_TARGET_IDX_NONE; error: device_unlock(&dev->dev); @@ -352,6 +355,18 @@ int nfc_data_exchange(struct nfc_dev *dev, u32 target_idx, struct sk_buff *skb, goto error; } + if (dev->activated_target_idx == NFC_TARGET_IDX_NONE) { + rc = -ENOTCONN; + kfree_skb(skb); + goto error; + } + + if (target_idx != dev->activated_target_idx) { + rc = -EADDRNOTAVAIL; + kfree_skb(skb); + goto error; + } + rc = dev->ops->data_exchange(dev, target_idx, skb, cb, cb_context); error: @@ -482,6 +497,7 @@ int nfc_target_lost(struct nfc_dev *dev, u32 target_idx) dev->targets_generation++; dev->n_targets--; + dev->activated_target_idx = NFC_TARGET_IDX_NONE; if (dev->n_targets) { memcpy(&dev->targets[i], &dev->targets[i + 1], @@ -575,6 +591,8 @@ struct nfc_dev *nfc_allocate_device(struct nfc_ops *ops, /* first generation must not be 0 */ dev->targets_generation = 1; + dev->activated_target_idx = NFC_TARGET_IDX_NONE; + return dev; } EXPORT_SYMBOL(nfc_allocate_device); -- cgit v1.2.2 From c8d56ae78653c02fc6e6f304a18f860302481c2d Mon Sep 17 00:00:00 2001 From: Eric Lapuyade Date: Tue, 10 Apr 2012 19:43:12 +0200 Subject: NFC: Add Core support to generate tag lost event Some HW/drivers get notifications when a tag moves out of the radio field. This notification is now forwarded to user space through netlink. Signed-off-by: Eric Lapuyade Signed-off-by: Samuel Ortiz Signed-off-by: John W. Linville --- net/nfc/core.c | 72 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 71 insertions(+), 1 deletion(-) (limited to 'net') diff --git a/net/nfc/core.c b/net/nfc/core.c index 44a701806ba5..da353275fbc6 100644 --- a/net/nfc/core.c +++ b/net/nfc/core.c @@ -33,6 +33,8 @@ #define VERSION "0.1" +#define NFC_CHECK_PRES_FREQ_MS 2000 + int nfc_devlist_generation; DEFINE_MUTEX(nfc_devlist_mutex); @@ -292,9 +294,14 @@ int nfc_activate_target(struct nfc_dev *dev, u32 target_idx, u32 protocol) } rc = dev->ops->activate_target(dev, target_idx, protocol); - if (!rc) + if (!rc) { dev->activated_target_idx = target_idx; + if (dev->ops->check_presence) + mod_timer(&dev->check_pres_timer, jiffies + + msecs_to_jiffies(NFC_CHECK_PRES_FREQ_MS)); + } + error: device_unlock(&dev->dev); return rc; @@ -320,6 +327,9 @@ int nfc_deactivate_target(struct nfc_dev *dev, u32 target_idx) goto error; } + if (dev->ops->check_presence) + del_timer_sync(&dev->check_pres_timer); + dev->ops->deactivate_target(dev, target_idx); dev->activated_target_idx = NFC_TARGET_IDX_NONE; @@ -367,8 +377,15 @@ int nfc_data_exchange(struct nfc_dev *dev, u32 target_idx, struct sk_buff *skb, goto error; } + if (dev->ops->check_presence) + del_timer_sync(&dev->check_pres_timer); + rc = dev->ops->data_exchange(dev, target_idx, skb, cb, cb_context); + if (!rc && dev->ops->check_presence) + mod_timer(&dev->check_pres_timer, jiffies + + msecs_to_jiffies(NFC_CHECK_PRES_FREQ_MS)); + error: device_unlock(&dev->dev); return rc; @@ -521,11 +538,46 @@ static void nfc_release(struct device *d) pr_debug("dev_name=%s\n", dev_name(&dev->dev)); + if (dev->ops->check_presence) { + del_timer_sync(&dev->check_pres_timer); + destroy_workqueue(dev->check_pres_wq); + } + nfc_genl_data_exit(&dev->genl_data); kfree(dev->targets); kfree(dev); } +static void nfc_check_pres_work(struct work_struct *work) +{ + struct nfc_dev *dev = container_of(work, struct nfc_dev, + check_pres_work); + int rc; + + device_lock(&dev->dev); + + if (dev->activated_target_idx != NFC_TARGET_IDX_NONE && + timer_pending(&dev->check_pres_timer) == 0) { + rc = dev->ops->check_presence(dev, dev->activated_target_idx); + if (!rc) { + mod_timer(&dev->check_pres_timer, jiffies + + msecs_to_jiffies(NFC_CHECK_PRES_FREQ_MS)); + } else { + nfc_target_lost(dev, dev->activated_target_idx); + dev->activated_target_idx = NFC_TARGET_IDX_NONE; + } + } + + device_unlock(&dev->dev); +} + +static void nfc_check_pres_timeout(unsigned long data) +{ + struct nfc_dev *dev = (struct nfc_dev *)data; + + queue_work(dev->check_pres_wq, &dev->check_pres_work); +} + struct class nfc_class = { .name = "nfc", .dev_release = nfc_release, @@ -593,6 +645,24 @@ struct nfc_dev *nfc_allocate_device(struct nfc_ops *ops, dev->activated_target_idx = NFC_TARGET_IDX_NONE; + if (ops->check_presence) { + char name[32]; + init_timer(&dev->check_pres_timer); + dev->check_pres_timer.data = (unsigned long)dev; + dev->check_pres_timer.function = nfc_check_pres_timeout; + + INIT_WORK(&dev->check_pres_work, nfc_check_pres_work); + snprintf(name, sizeof(name), "nfc%d_check_pres_wq", dev->idx); + dev->check_pres_wq = alloc_workqueue(name, WQ_NON_REENTRANT | + WQ_UNBOUND | + WQ_MEM_RECLAIM, 1); + if (dev->check_pres_wq == NULL) { + kfree(dev); + return NULL; + } + } + + return dev; } EXPORT_SYMBOL(nfc_allocate_device); -- cgit v1.2.2 From 4be646ecc94b34acec41aba628b57cfc02ab7ae0 Mon Sep 17 00:00:00 2001 From: Samuel Ortiz Date: Tue, 10 Apr 2012 19:43:13 +0200 Subject: NFC: Dump LLCP frames At KERN_DEBUG level. Signed-off-by: Samuel Ortiz Signed-off-by: John W. Linville --- net/nfc/llcp/llcp.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'net') diff --git a/net/nfc/llcp/llcp.c b/net/nfc/llcp/llcp.c index 17a578f641f1..d5e87c35002a 100644 --- a/net/nfc/llcp/llcp.c +++ b/net/nfc/llcp/llcp.c @@ -388,6 +388,9 @@ static void nfc_llcp_tx_work(struct work_struct *work) skb = skb_dequeue(&local->tx_queue); if (skb != NULL) { pr_debug("Sending pending skb\n"); + print_hex_dump(KERN_DEBUG, "LLCP Tx: ", DUMP_PREFIX_OFFSET, + 16, 1, skb->data, skb->len, true); + nfc_data_exchange(local->dev, local->target_idx, skb, nfc_llcp_recv, local); } else { @@ -814,6 +817,10 @@ static void nfc_llcp_rx_work(struct work_struct *work) pr_debug("ptype 0x%x dsap 0x%x ssap 0x%x\n", ptype, dsap, ssap); + if (ptype != LLCP_PDU_SYMM) + print_hex_dump(KERN_DEBUG, "LLCP Rx: ", DUMP_PREFIX_OFFSET, + 16, 1, skb->data, skb->len, true); + switch (ptype) { case LLCP_PDU_SYMM: pr_debug("SYMM\n"); -- cgit v1.2.2 From 279cf174aea84202c5fef4675ff3f1265f071c8e Mon Sep 17 00:00:00 2001 From: Samuel Ortiz Date: Tue, 10 Apr 2012 19:43:14 +0200 Subject: NFC: No need to apply twice the modulo op to LLCP's recv_n recv_n is set properly when receiving an HDLC frame. Signed-off-by: Samuel Ortiz Signed-off-by: John W. Linville --- net/nfc/llcp/commands.c | 2 +- net/nfc/llcp/llcp.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'net') diff --git a/net/nfc/llcp/commands.c b/net/nfc/llcp/commands.c index ef10ffcb4b6f..4aa52b8b6c0c 100644 --- a/net/nfc/llcp/commands.c +++ b/net/nfc/llcp/commands.c @@ -522,7 +522,7 @@ int nfc_llcp_send_rr(struct nfc_llcp_sock *sock) skb_put(skb, LLCP_SEQUENCE_SIZE); - skb->data[2] = sock->recv_n % 16; + skb->data[2] = sock->recv_n; skb_queue_head(&local->tx_queue, skb); diff --git a/net/nfc/llcp/llcp.c b/net/nfc/llcp/llcp.c index d5e87c35002a..2cf01e642566 100644 --- a/net/nfc/llcp/llcp.c +++ b/net/nfc/llcp/llcp.c @@ -428,7 +428,7 @@ static u8 nfc_llcp_nr(struct sk_buff *pdu) static void nfc_llcp_set_nrns(struct nfc_llcp_sock *sock, struct sk_buff *pdu) { - pdu->data[2] = (sock->send_n << 4) | (sock->recv_n % 16); + pdu->data[2] = (sock->send_n << 4) | (sock->recv_n); sock->send_n = (sock->send_n + 1) % 16; sock->recv_ack_n = (sock->recv_n - 1) % 16; } -- cgit v1.2.2 From 324b0af6f5a48dc38dac016eed14d019cac5903f Mon Sep 17 00:00:00 2001 From: Samuel Ortiz Date: Tue, 10 Apr 2012 19:43:15 +0200 Subject: NFC: Fix LLCP TLV building routine The if logic could lead to zero length TLVs. Signed-off-by: Samuel Ortiz Signed-off-by: John W. Linville --- net/nfc/llcp/commands.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/nfc/llcp/commands.c b/net/nfc/llcp/commands.c index 4aa52b8b6c0c..34ee6847806a 100644 --- a/net/nfc/llcp/commands.c +++ b/net/nfc/llcp/commands.c @@ -102,7 +102,7 @@ u8 *nfc_llcp_build_tlv(u8 type, u8 *value, u8 value_length, u8 *tlv_length) length = llcp_tlv_length[type]; if (length == 0 && value_length == 0) return NULL; - else + else if (length == 0) length = value_length; *tlv_length = 2 + length; -- cgit v1.2.2 From ffc29315e5b665d3e7e17d6156ac82f85a6d0205 Mon Sep 17 00:00:00 2001 From: Samuel Ortiz Date: Tue, 10 Apr 2012 19:43:16 +0200 Subject: NFC: Call llcp_add_header properly when sending LLCP DM or DISC dsap and ssap were swapped when sending DN or DISC. Signed-off-by: Samuel Ortiz Signed-off-by: John W. Linville --- net/nfc/llcp/commands.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'net') diff --git a/net/nfc/llcp/commands.c b/net/nfc/llcp/commands.c index 34ee6847806a..11a3b7d98dc5 100644 --- a/net/nfc/llcp/commands.c +++ b/net/nfc/llcp/commands.c @@ -248,7 +248,7 @@ int nfc_llcp_disconnect(struct nfc_llcp_sock *sock) skb_reserve(skb, dev->tx_headroom + NFC_HEADER_SIZE); - skb = llcp_add_header(skb, sock->ssap, sock->dsap, LLCP_PDU_DISC); + skb = llcp_add_header(skb, sock->dsap, sock->ssap, LLCP_PDU_DISC); skb_queue_tail(&local->tx_queue, skb); @@ -416,7 +416,7 @@ int nfc_llcp_send_dm(struct nfc_llcp_local *local, u8 ssap, u8 dsap, u8 reason) skb_reserve(skb, dev->tx_headroom + NFC_HEADER_SIZE); - skb = llcp_add_header(skb, ssap, dsap, LLCP_PDU_DM); + skb = llcp_add_header(skb, dsap, ssap, LLCP_PDU_DM); memcpy(skb_put(skb, 1), &reason, 1); -- cgit v1.2.2 From 56d5876a22e79b0bb82eb3dc5f2134fa429daa2e Mon Sep 17 00:00:00 2001 From: Samuel Ortiz Date: Tue, 10 Apr 2012 19:43:19 +0200 Subject: NFC: Add MIUX to the local LLCP general bytes Signed-off-by: Samuel Ortiz Signed-off-by: John W. Linville --- net/nfc/llcp/llcp.c | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'net') diff --git a/net/nfc/llcp/llcp.c b/net/nfc/llcp/llcp.c index 2cf01e642566..31a05e55619f 100644 --- a/net/nfc/llcp/llcp.c +++ b/net/nfc/llcp/llcp.c @@ -307,6 +307,8 @@ static int nfc_llcp_build_gb(struct nfc_llcp_local *local) u8 *gb_cur, *version_tlv, version, version_length; u8 *lto_tlv, lto, lto_length; u8 *wks_tlv, wks_length; + u8 *miux_tlv, miux_length; + __be16 miux; u8 gb_len = 0; version = LLCP_VERSION_11; @@ -324,6 +326,11 @@ static int nfc_llcp_build_gb(struct nfc_llcp_local *local) &wks_length); gb_len += wks_length; + miux = cpu_to_be16(LLCP_MAX_MIUX); + miux_tlv = nfc_llcp_build_tlv(LLCP_TLV_MIUX, (u8 *)&miux, 0, + &miux_length); + gb_len += miux_length; + gb_len += ARRAY_SIZE(llcp_magic); if (gb_len > NFC_MAX_GT_LEN) { @@ -345,6 +352,9 @@ static int nfc_llcp_build_gb(struct nfc_llcp_local *local) memcpy(gb_cur, wks_tlv, wks_length); gb_cur += wks_length; + memcpy(gb_cur, miux_tlv, miux_length); + gb_cur += miux_length; + kfree(version_tlv); kfree(lto_tlv); -- cgit v1.2.2 From 91b0ade112136a1504677defa91cf137d9a53994 Mon Sep 17 00:00:00 2001 From: Samuel Ortiz Date: Tue, 10 Apr 2012 19:43:20 +0200 Subject: NFC: Fix LLCP link timeout typo We were sending the LTO TLV as a version TLV instead of the actual link timeout one. Signed-off-by: Samuel Ortiz Signed-off-by: John W. Linville --- net/nfc/llcp/llcp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/nfc/llcp/llcp.c b/net/nfc/llcp/llcp.c index 31a05e55619f..92988aa620dc 100644 --- a/net/nfc/llcp/llcp.c +++ b/net/nfc/llcp/llcp.c @@ -318,7 +318,7 @@ static int nfc_llcp_build_gb(struct nfc_llcp_local *local) /* 1500 ms */ lto = 150; - lto_tlv = nfc_llcp_build_tlv(LLCP_TLV_VERSION, <o, 1, <o_length); + lto_tlv = nfc_llcp_build_tlv(LLCP_TLV_LTO, <o, 1, <o_length); gb_len += lto_length; pr_debug("Local wks 0x%lx\n", local->local_wks); -- cgit v1.2.2 From e35f30c131a562bafd069820a6983fd4023e606e Mon Sep 17 00:00:00 2001 From: "Alexey I. Froloff" Date: Fri, 6 Apr 2012 05:50:58 +0000 Subject: Treat ND option 31 as userland (DNSSL support) As specified in RFC6106, DNSSL option contains one or more domain names of DNS suffixes. 8-bit identifier of the DNSSL option type as assigned by the IANA is 31. This option should also be treated as userland. Signed-off-by: Alexey I. Froloff Signed-off-by: David S. Miller --- net/ipv6/ndisc.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'net') diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index 1d6fb0c94da1..7cb236e8e261 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c @@ -15,6 +15,7 @@ /* * Changes: * + * Alexey I. Froloff : RFC6106 (DNSSL) support * Pierre Ynard : export userland ND options * through netlink (RDNSS support) * Lars Fenneberg : fixed MTU setting on receipt @@ -228,7 +229,8 @@ static struct nd_opt_hdr *ndisc_next_option(struct nd_opt_hdr *cur, static inline int ndisc_is_useropt(struct nd_opt_hdr *opt) { - return opt->nd_opt_type == ND_OPT_RDNSS; + return opt->nd_opt_type == ND_OPT_RDNSS || + opt->nd_opt_type == ND_OPT_DNSSL; } static struct nd_opt_hdr *ndisc_next_useropt(struct nd_opt_hdr *cur, -- cgit v1.2.2 From e1e420c71b53829c661123a21b14a42d821e5e7f Mon Sep 17 00:00:00 2001 From: Shuah Khan Date: Thu, 12 Apr 2012 09:28:13 +0000 Subject: net/core: simple_strtoul cleanup Changed net/core/net-sysfs.c: netdev_store() to use kstrtoul() instead of obsolete simple_strtoul(). Signed-off-by: Shuah Khan Signed-off-by: David S. Miller --- net/core/net-sysfs.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'net') diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index 495586232aa1..97d0f2453a0e 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c @@ -74,15 +74,14 @@ static ssize_t netdev_store(struct device *dev, struct device_attribute *attr, int (*set)(struct net_device *, unsigned long)) { struct net_device *net = to_net_dev(dev); - char *endp; unsigned long new; int ret = -EINVAL; if (!capable(CAP_NET_ADMIN)) return -EPERM; - new = simple_strtoul(buf, &endp, 0); - if (endp == buf) + ret = kstrtoul(buf, 0, &new); + if (ret) goto err; if (!rtnl_trylock()) -- cgit v1.2.2 From 6fdbd1648be5db20d172bcd013c8874bb2000700 Mon Sep 17 00:00:00 2001 From: Eldad Zack Date: Tue, 10 Apr 2012 08:51:27 +0000 Subject: net/ipv6/ipv6_sockglue.c: Removed redundant extern extern int sysctl_mld_max_msf is already defined in linux/ipv6.h. Signed-off-by: Eldad Zack Signed-off-by: David S. Miller --- net/ipv6/ipv6_sockglue.c | 1 - 1 file changed, 1 deletion(-) (limited to 'net') diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index 63dd1f89ed7d..ca1af0760c4c 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c @@ -678,7 +678,6 @@ done: } case MCAST_MSFILTER: { - extern int sysctl_mld_max_msf; struct group_filter *gsf; if (optlen < GROUP_FILTER_SIZE(0)) -- cgit v1.2.2 From 46ba5b23c32667821b748b4e0b74667e750621cd Mon Sep 17 00:00:00 2001 From: Jeffrin Jose Date: Sun, 8 Apr 2012 06:07:42 +0000 Subject: net: Fixed coding style issues relating to braces. Fixed coding style issues in net/core/utils.c in relation with braces placement. Signed-off-by: Jeffrin Jose Signed-off-by: David S. Miller --- net/core/utils.c | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) (limited to 'net') diff --git a/net/core/utils.c b/net/core/utils.c index dc3c3faff2f4..39895a65e54a 100644 --- a/net/core/utils.c +++ b/net/core/utils.c @@ -58,14 +58,11 @@ __be32 in_aton(const char *str) int i; l = 0; - for (i = 0; i < 4; i++) - { + for (i = 0; i < 4; i++) { l <<= 8; - if (*str != '\0') - { + if (*str != '\0') { val = 0; - while (*str != '\0' && *str != '.' && *str != '\n') - { + while (*str != '\0' && *str != '.' && *str != '\n') { val *= 10; val += *str - '0'; str++; -- cgit v1.2.2 From c1412fce7eccae62b4de22494f6ab3ff8a90c0c6 Mon Sep 17 00:00:00 2001 From: Eldad Zack Date: Thu, 12 Apr 2012 17:36:17 -0400 Subject: net/ipv6/exthdrs.c: Strict PadN option checking Added strict checking of PadN, as PadN can be used to increase header size and thus push the protocol header into the 2nd fragment. PadN is used to align the options within the Hop-by-Hop or Destination Options header to 64-bit boundaries. The maximum valid size is thus 7 bytes. RFC 4942 recommends to actively check the "payload" itself and ensure that it contains only zeroes. See also RFC 4942 section 2.1.9.5. Signed-off-by: Eldad Zack Signed-off-by: David S. Miller --- net/ipv6/exthdrs.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) (limited to 'net') diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c index c486b8e1817f..aa0a51e64682 100644 --- a/net/ipv6/exthdrs.c +++ b/net/ipv6/exthdrs.c @@ -153,6 +153,7 @@ static int ip6_parse_tlv(struct tlvtype_proc *procs, struct sk_buff *skb) while (len > 0) { int optlen = nh[off + 1] + 2; + int i; switch (nh[off]) { case IPV6_TLV_PAD0: @@ -160,6 +161,21 @@ static int ip6_parse_tlv(struct tlvtype_proc *procs, struct sk_buff *skb) break; case IPV6_TLV_PADN: + /* RFC 2460 states that the purpose of PadN is + * to align the containing header to multiples + * of 8. 7 is therefore the highest valid value. + * See also RFC 4942, Section 2.1.9.5. + */ + if (optlen > 7) + goto bad; + /* RFC 4942 recommends receiving hosts to + * actively check PadN payload to contain + * only zeroes. + */ + for (i = 2; i < optlen; i++) { + if (nh[off + i] != 0) + goto bad; + } break; default: /* Other TLV code so scan list */ -- cgit v1.2.2 From 447648128ec22e294604674ffe1064aa3ec3b767 Mon Sep 17 00:00:00 2001 From: Dmitry Tarnyagin Date: Thu, 12 Apr 2012 08:27:24 +0000 Subject: caif: set traffic class for caif packets Set traffic class for CAIF packets, based on socket priority, CAIF protocol type, or type of message. Traffic class mapping for different packet types: - control: TC_PRIO_CONTROL; - flow control: TC_PRIO_CONTROL; - at: TC_PRIO_CONTROL; - rfm: TC_PRIO_INTERACTIVE_BULK; - other sockets: equals to socket's TC; - network data: no change. Signed-off-by: Dmitry Tarnyagin Signed-off-by: David S. Miller --- net/caif/caif_socket.c | 16 ++++++++++++++-- net/caif/cfctrl.c | 4 ++++ net/caif/cfpkt_skbuff.c | 7 +++++++ net/caif/cfsrvl.c | 3 +++ 4 files changed, 28 insertions(+), 2 deletions(-) (limited to 'net') diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c index 5016fa57b623..ce47ee9f48c8 100644 --- a/net/caif/caif_socket.c +++ b/net/caif/caif_socket.c @@ -19,7 +19,7 @@ #include #include #include -#include +#include #include #include #include @@ -505,6 +505,7 @@ static int transmit_skb(struct sk_buff *skb, struct caifsock *cf_sk, pkt = cfpkt_fromnative(CAIF_DIR_OUT, skb); memset(skb->cb, 0, sizeof(struct caif_payload_info)); + cfpkt_set_prio(pkt, cf_sk->sk.sk_priority); if (cf_sk->layer.dn == NULL) { kfree_skb(skb); @@ -1062,6 +1063,18 @@ static int caif_create(struct net *net, struct socket *sock, int protocol, /* Store the protocol */ sk->sk_protocol = (unsigned char) protocol; + /* Initialize default priority for well-known cases */ + switch (protocol) { + case CAIFPROTO_AT: + sk->sk_priority = TC_PRIO_CONTROL; + break; + case CAIFPROTO_RFM: + sk->sk_priority = TC_PRIO_INTERACTIVE_BULK; + break; + default: + sk->sk_priority = TC_PRIO_BESTEFFORT; + } + /* * Lock in order to try to stop someone from opening the socket * too early. @@ -1081,7 +1094,6 @@ static int caif_create(struct net *net, struct socket *sock, int protocol, set_rx_flow_on(cf_sk); /* Set default options on configuration */ - cf_sk->sk.sk_priority = CAIF_PRIO_NORMAL; cf_sk->conn_req.link_selector = CAIF_LINK_LOW_LATENCY; cf_sk->conn_req.protocol = protocol; release_sock(&cf_sk->sk); diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c index 5cf52225692e..047cd0eec022 100644 --- a/net/caif/cfctrl.c +++ b/net/caif/cfctrl.c @@ -9,6 +9,7 @@ #include #include #include +#include #include #include #include @@ -189,6 +190,7 @@ void cfctrl_enum_req(struct cflayer *layer, u8 physlinkid) cfctrl->serv.dev_info.id = physlinkid; cfpkt_addbdy(pkt, CFCTRL_CMD_ENUM); cfpkt_addbdy(pkt, physlinkid); + cfpkt_set_prio(pkt, TC_PRIO_CONTROL); dn->transmit(dn, pkt); } @@ -281,6 +283,7 @@ int cfctrl_linkup_request(struct cflayer *layer, * might arrive with the newly allocated channel ID. */ cfpkt_info(pkt)->dev_info->id = param->phyid; + cfpkt_set_prio(pkt, TC_PRIO_CONTROL); ret = dn->transmit(dn, pkt); if (ret < 0) { @@ -314,6 +317,7 @@ int cfctrl_linkdown_req(struct cflayer *layer, u8 channelid, cfpkt_addbdy(pkt, CFCTRL_CMD_LINK_DESTROY); cfpkt_addbdy(pkt, channelid); init_info(cfpkt_info(pkt), cfctrl); + cfpkt_set_prio(pkt, TC_PRIO_CONTROL); ret = dn->transmit(dn, pkt); #ifndef CAIF_NO_LOOP diff --git a/net/caif/cfpkt_skbuff.c b/net/caif/cfpkt_skbuff.c index e335ba859b97..863dedd91bb6 100644 --- a/net/caif/cfpkt_skbuff.c +++ b/net/caif/cfpkt_skbuff.c @@ -381,6 +381,7 @@ struct cfpkt *cfpkt_split(struct cfpkt *pkt, u16 pos) memcpy(skb2->data, split, len2nd); skb2->tail += len2nd; skb2->len += len2nd; + skb2->priority = skb->priority; return skb_to_pkt(skb2); } @@ -394,3 +395,9 @@ struct caif_payload_info *cfpkt_info(struct cfpkt *pkt) return (struct caif_payload_info *)&pkt_to_skb(pkt)->cb; } EXPORT_SYMBOL(cfpkt_info); + +void cfpkt_set_prio(struct cfpkt *pkt, int prio) +{ + pkt_to_skb(pkt)->priority = prio; +} +EXPORT_SYMBOL(cfpkt_set_prio); diff --git a/net/caif/cfsrvl.c b/net/caif/cfsrvl.c index 4aa33d4496b6..dd485f6128e8 100644 --- a/net/caif/cfsrvl.c +++ b/net/caif/cfsrvl.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include @@ -120,6 +121,7 @@ static int cfservl_modemcmd(struct cflayer *layr, enum caif_modemcmd ctrl) info->channel_id = service->layer.id; info->hdr_len = 1; info->dev_info = &service->dev_info; + cfpkt_set_prio(pkt, TC_PRIO_CONTROL); return layr->dn->transmit(layr->dn, pkt); } case CAIF_MODEMCMD_FLOW_OFF_REQ: @@ -140,6 +142,7 @@ static int cfservl_modemcmd(struct cflayer *layr, enum caif_modemcmd ctrl) info->channel_id = service->layer.id; info->hdr_len = 1; info->dev_info = &service->dev_info; + cfpkt_set_prio(pkt, TC_PRIO_CONTROL); return layr->dn->transmit(layr->dn, pkt); } default: -- cgit v1.2.2 From 9d02daf754238adac48fa075ee79e7edd3d79ed3 Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Sun, 8 Apr 2012 09:55:43 +0000 Subject: pppoatm: Fix excessive queue bloat We discovered that PPPoATM has an excessively deep transmit queue. A queue the size of the default socket send buffer (wmem_default) is maintained between the PPP generic core and the ATM device. Fix it to queue a maximum of *two* packets. The one the ATM device is currently working on, and one more for the ATM driver to process immediately in its TX done interrupt handler. The PPP core is designed to feed packets to the channel with minimal latency, so that really ought to be enough to keep the ATM device busy. While we're at it, fix the fact that we were triggering the wakeup tasklet on *every* pppoatm_pop() call. The comment saying "this is inefficient, but doing it right is too hard" turns out to be overly pessimistic... I think :) On machines like the Traverse Geos, with a slow Geode CPU and two high-speed ADSL2+ interfaces, there were reports of extremely high CPU usage which could partly be attributed to the extra wakeups. (The wakeup handling could actually be made a whole lot easier if we stop checking sk->sk_sndbuf altogether. Given that we now only queue *two* packets ever, one wonders what the point is. As it is, you could already deadlock the thing by setting the sk_sndbuf to a value lower than the MTU of the device, and it'd just block for ever.) Signed-off-by: David Woodhouse Signed-off-by: David S. Miller --- net/atm/pppoatm.c | 95 +++++++++++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 85 insertions(+), 10 deletions(-) (limited to 'net') diff --git a/net/atm/pppoatm.c b/net/atm/pppoatm.c index 614d3fc47ede..ce1e59fdae7b 100644 --- a/net/atm/pppoatm.c +++ b/net/atm/pppoatm.c @@ -62,11 +62,24 @@ struct pppoatm_vcc { void (*old_pop)(struct atm_vcc *, struct sk_buff *); /* keep old push/pop for detaching */ enum pppoatm_encaps encaps; + atomic_t inflight; + unsigned long blocked; int flags; /* SC_COMP_PROT - compress protocol */ struct ppp_channel chan; /* interface to generic ppp layer */ struct tasklet_struct wakeup_tasklet; }; +/* + * We want to allow two packets in the queue. The one that's currently in + * flight, and *one* queued up ready for the ATM device to send immediately + * from its TX done IRQ. We want to be able to use atomic_inc_not_zero(), so + * inflight == -2 represents an empty queue, -1 one packet, and zero means + * there are two packets in the queue. + */ +#define NONE_INFLIGHT -2 + +#define BLOCKED 0 + /* * Header used for LLC Encapsulated PPP (4 bytes) followed by the LCP protocol * ID (0xC021) used in autodetection @@ -102,16 +115,30 @@ static void pppoatm_wakeup_sender(unsigned long arg) static void pppoatm_pop(struct atm_vcc *atmvcc, struct sk_buff *skb) { struct pppoatm_vcc *pvcc = atmvcc_to_pvcc(atmvcc); + pvcc->old_pop(atmvcc, skb); + atomic_dec(&pvcc->inflight); + /* - * We don't really always want to do this since it's - * really inefficient - it would be much better if we could - * test if we had actually throttled the generic layer. - * Unfortunately then there would be a nasty SMP race where - * we could clear that flag just as we refuse another packet. - * For now we do the safe thing. + * We always used to run the wakeup tasklet unconditionally here, for + * fear of race conditions where we clear the BLOCKED flag just as we + * refuse another packet in pppoatm_send(). This was quite inefficient. + * + * In fact it's OK. The PPP core will only ever call pppoatm_send() + * while holding the channel->downl lock. And ppp_output_wakeup() as + * called by the tasklet will *also* grab that lock. So even if another + * CPU is in pppoatm_send() right now, the tasklet isn't going to race + * with it. The wakeup *will* happen after the other CPU is safely out + * of pppoatm_send() again. + * + * So if the CPU in pppoatm_send() has already set the BLOCKED bit and + * it about to return, that's fine. We trigger a wakeup which will + * happen later. And if the CPU in pppoatm_send() *hasn't* set the + * BLOCKED bit yet, that's fine too because of the double check in + * pppoatm_may_send() which is commented there. */ - tasklet_schedule(&pvcc->wakeup_tasklet); + if (test_and_clear_bit(BLOCKED, &pvcc->blocked)) + tasklet_schedule(&pvcc->wakeup_tasklet); } /* @@ -184,6 +211,51 @@ error: ppp_input_error(&pvcc->chan, 0); } +static inline int pppoatm_may_send(struct pppoatm_vcc *pvcc, int size) +{ + /* + * It's not clear that we need to bother with using atm_may_send() + * to check we don't exceed sk->sk_sndbuf. If userspace sets a + * value of sk_sndbuf which is lower than the MTU, we're going to + * block for ever. But the code always did that before we introduced + * the packet count limit, so... + */ + if (atm_may_send(pvcc->atmvcc, size) && + atomic_inc_not_zero_hint(&pvcc->inflight, NONE_INFLIGHT)) + return 1; + + /* + * We use test_and_set_bit() rather than set_bit() here because + * we need to ensure there's a memory barrier after it. The bit + * *must* be set before we do the atomic_inc() on pvcc->inflight. + * There's no smp_mb__after_set_bit(), so it's this or abuse + * smp_mb__after_clear_bit(). + */ + test_and_set_bit(BLOCKED, &pvcc->blocked); + + /* + * We may have raced with pppoatm_pop(). If it ran for the + * last packet in the queue, *just* before we set the BLOCKED + * bit, then it might never run again and the channel could + * remain permanently blocked. Cope with that race by checking + * *again*. If it did run in that window, we'll have space on + * the queue now and can return success. It's harmless to leave + * the BLOCKED flag set, since it's only used as a trigger to + * run the wakeup tasklet. Another wakeup will never hurt. + * If pppoatm_pop() is running but hasn't got as far as making + * space on the queue yet, then it hasn't checked the BLOCKED + * flag yet either, so we're safe in that case too. It'll issue + * an "immediate" wakeup... where "immediate" actually involves + * taking the PPP channel's ->downl lock, which is held by the + * code path that calls pppoatm_send(), and is thus going to + * wait for us to finish. + */ + if (atm_may_send(pvcc->atmvcc, size) && + atomic_inc_not_zero(&pvcc->inflight)) + return 1; + + return 0; +} /* * Called by the ppp_generic.c to send a packet - returns true if packet * was accepted. If we return false, then it's our job to call @@ -207,7 +279,7 @@ static int pppoatm_send(struct ppp_channel *chan, struct sk_buff *skb) struct sk_buff *n; n = skb_realloc_headroom(skb, LLC_LEN); if (n != NULL && - !atm_may_send(pvcc->atmvcc, n->truesize)) { + !pppoatm_may_send(pvcc, n->truesize)) { kfree_skb(n); goto nospace; } @@ -215,12 +287,12 @@ static int pppoatm_send(struct ppp_channel *chan, struct sk_buff *skb) skb = n; if (skb == NULL) return DROP_PACKET; - } else if (!atm_may_send(pvcc->atmvcc, skb->truesize)) + } else if (!pppoatm_may_send(pvcc, skb->truesize)) goto nospace; memcpy(skb_push(skb, LLC_LEN), pppllc, LLC_LEN); break; case e_vc: - if (!atm_may_send(pvcc->atmvcc, skb->truesize)) + if (!pppoatm_may_send(pvcc, skb->truesize)) goto nospace; break; case e_autodetect: @@ -285,6 +357,9 @@ static int pppoatm_assign_vcc(struct atm_vcc *atmvcc, void __user *arg) if (pvcc == NULL) return -ENOMEM; pvcc->atmvcc = atmvcc; + + /* Maximum is zero, so that we can use atomic_inc_not_zero() */ + atomic_set(&pvcc->inflight, NONE_INFLIGHT); pvcc->old_push = atmvcc->push; pvcc->old_pop = atmvcc->pop; pvcc->encaps = (enum pppoatm_encaps) be.encaps; -- cgit v1.2.2 From 6f66cdc3e5d3d5ccbb7ee9265b8211cdc24aa401 Mon Sep 17 00:00:00 2001 From: David Ward Date: Mon, 9 Apr 2012 04:13:53 +0000 Subject: net/garp: fix GID rbtree ordering The comparison operators were backwards in both garp_attr_lookup and garp_attr_create, so the entire GID rbtree was in reverse order. (There was no practical side effect to this though, except that PDUs were sent with attributes listed in reverse order, which is still valid by the protocol. This change is only for clarity.) Signed-off-by: David Ward Signed-off-by: David S. Miller --- net/802/garp.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'net') diff --git a/net/802/garp.c b/net/802/garp.c index a5c224830439..8456f5d98b85 100644 --- a/net/802/garp.c +++ b/net/802/garp.c @@ -157,9 +157,9 @@ static struct garp_attr *garp_attr_lookup(const struct garp_applicant *app, while (parent) { attr = rb_entry(parent, struct garp_attr, node); d = garp_attr_cmp(attr, data, len, type); - if (d < 0) + if (d > 0) parent = parent->rb_left; - else if (d > 0) + else if (d < 0) parent = parent->rb_right; else return attr; @@ -178,9 +178,9 @@ static struct garp_attr *garp_attr_create(struct garp_applicant *app, parent = *p; attr = rb_entry(parent, struct garp_attr, node); d = garp_attr_cmp(attr, data, len, type); - if (d < 0) + if (d > 0) p = &parent->rb_left; - else if (d > 0) + else if (d < 0) p = &parent->rb_right; else { /* The attribute already exists; re-use it. */ -- cgit v1.2.2 From efacb309b50073a79ae604949a31509cd8b507ab Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Tue, 10 Apr 2012 18:34:43 +0000 Subject: rtnetlink & bonding: change args got get_tx_queues Change get_tx_queues, drop unsused arg/return value real_tx_queues, and use return by value (with error) rather than call by reference. Probably bonding should just change to LLTX and the whole get_tx_queues API could disappear! Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- net/core/rtnetlink.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'net') diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 545a969672ab..4a0d8cfff2a0 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -1640,14 +1640,14 @@ struct net_device *rtnl_create_link(struct net *src_net, struct net *net, int err; struct net_device *dev; unsigned int num_queues = 1; - unsigned int real_num_queues = 1; if (ops->get_tx_queues) { - err = ops->get_tx_queues(src_net, tb, &num_queues, - &real_num_queues); - if (err) + err = ops->get_tx_queues(src_net, tb); + if (err < 0) goto err; + num_queues = err; } + err = -ENOMEM; dev = alloc_netdev_mq(ops->priv_size, ifname, ops->setup, num_queues); if (!dev) -- cgit v1.2.2 From 447167bf565a474ff0cfb0f41d54936937479e97 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 11 Apr 2012 23:05:28 +0000 Subject: udp: intoduce udp_encap_needed static_key Most machines dont use UDP encapsulation (L2TP) Adds a static_key so that udp_queue_rcv_skb() doesnt have to perform a test if L2TP never setup the encap_rcv on a socket. Idea of this patch came after Simon Horman proposal to add a hook on TCP as well. If static_key is not yet enabled, the fast path does a single JMP . When static_key is enabled, JMP destination is patched to reach the real encap_type/encap_rcv logic, possibly adding cache misses. Signed-off-by: Eric Dumazet Cc: Simon Horman Cc: dev@openvswitch.org Signed-off-by: David S. Miller --- net/ipv4/udp.c | 12 +++++++++++- net/l2tp/l2tp_core.c | 1 + 2 files changed, 12 insertions(+), 1 deletion(-) (limited to 'net') diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index fe141052a1be..ad1e0dd4da48 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -107,6 +107,7 @@ #include #include #include +#include #include "udp_impl.h" struct udp_table udp_table __read_mostly; @@ -1379,6 +1380,14 @@ static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) } +static struct static_key udp_encap_needed __read_mostly; +void udp_encap_enable(void) +{ + if (!static_key_enabled(&udp_encap_needed)) + static_key_slow_inc(&udp_encap_needed); +} +EXPORT_SYMBOL(udp_encap_enable); + /* returns: * -1: error * 0: success @@ -1400,7 +1409,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) goto drop; nf_reset(skb); - if (up->encap_type) { + if (static_key_false(&udp_encap_needed) && up->encap_type) { int (*encap_rcv)(struct sock *sk, struct sk_buff *skb); /* @@ -1760,6 +1769,7 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname, /* FALLTHROUGH */ case UDP_ENCAP_L2TPINUDP: up->encap_type = val; + udp_encap_enable(); break; default: err = -ENOPROTOOPT; diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index 89ff8c67943e..f6732b6c758b 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c @@ -1424,6 +1424,7 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 /* Mark socket as an encapsulation socket. See net/ipv4/udp.c */ udp_sk(sk)->encap_type = UDP_ENCAP_L2TPINUDP; udp_sk(sk)->encap_rcv = l2tp_udp_encap_recv; + udp_encap_enable(); } sk->sk_user_data = tunnel; -- cgit v1.2.2 From dcd2ba92e842eec0d0372415fa26f1c411f5530d Mon Sep 17 00:00:00 2001 From: Hiroaki SHIMODA Date: Fri, 13 Apr 2012 07:34:44 +0000 Subject: neighbour: Make neigh_table_init_no_netlink() static. neigh_table_init_no_netlink() is only used in net/core/neighbour.c file. Signed-off-by: Hiroaki SHIMODA Signed-off-by: David S. Miller --- net/core/neighbour.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'net') diff --git a/net/core/neighbour.c b/net/core/neighbour.c index ac71765d6fd0..2d3e6fcdc9c9 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -1500,7 +1500,7 @@ static void neigh_parms_destroy(struct neigh_parms *parms) static struct lock_class_key neigh_table_proxy_queue_class; -void neigh_table_init_no_netlink(struct neigh_table *tbl) +static void neigh_table_init_no_netlink(struct neigh_table *tbl) { unsigned long now = jiffies; unsigned long phsize; @@ -1538,7 +1538,6 @@ void neigh_table_init_no_netlink(struct neigh_table *tbl) tbl->last_flush = now; tbl->last_rand = now + tbl->parms.reachable_time * 20; } -EXPORT_SYMBOL(neigh_table_init_no_netlink); void neigh_table_init(struct neigh_table *tbl) { -- cgit v1.2.2 From 3c490a87f6f6ce5b002c42f9c00c9136ddbd7248 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Denis-Courmont?= Date: Thu, 12 Apr 2012 03:39:16 +0000 Subject: Phonet: phonet_net_id can be static (sparse) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Rémi Denis-Courmont Signed-off-by: David S. Miller --- net/phonet/pn_dev.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/phonet/pn_dev.c b/net/phonet/pn_dev.c index bf5cf69c820a..da564efe4f5b 100644 --- a/net/phonet/pn_dev.c +++ b/net/phonet/pn_dev.c @@ -44,7 +44,7 @@ struct phonet_net { struct phonet_routes routes; }; -int phonet_net_id __read_mostly; +static int phonet_net_id __read_mostly; static struct phonet_net *phonet_pernet(struct net *net) { -- cgit v1.2.2 From 8e052e52896e5d665e1380d5524c9c21f52c0bfd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Denis-Courmont?= Date: Thu, 12 Apr 2012 03:39:17 +0000 Subject: Phonet: missing headers (sparse) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Rémi Denis-Courmont Signed-off-by: David S. Miller --- net/phonet/sysctl.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'net') diff --git a/net/phonet/sysctl.c b/net/phonet/sysctl.c index cea1c7dbdae2..740bf209dd97 100644 --- a/net/phonet/sysctl.c +++ b/net/phonet/sysctl.c @@ -27,6 +27,10 @@ #include #include +#include +#include +#include + #define DYNAMIC_PORT_MIN 0x40 #define DYNAMIC_PORT_MAX 0x7f -- cgit v1.2.2 From 133d40f9a22bdfd2617a446f1e3209537c5415ec Mon Sep 17 00:00:00 2001 From: Stanislaw Gruszka Date: Wed, 28 Mar 2012 16:01:19 +0200 Subject: mac80211: do not scan and monitor connection in parallel Before we send probes in connection monitoring we check if scan is not pending. But we do that check without locking. Fix that and also do not start scan if connection monitoring is in progress. Signed-off-by: Stanislaw Gruszka Signed-off-by: John W. Linville --- net/mac80211/ieee80211_i.h | 1 + net/mac80211/mlme.c | 35 +++++++++++++++++++++++------------ net/mac80211/scan.c | 29 ++++++++++++++++++++++++++++- net/mac80211/work.c | 7 +------ 4 files changed, 53 insertions(+), 19 deletions(-) (limited to 'net') diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 4be11ea3dfc4..1fd8cb26b3ff 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -1260,6 +1260,7 @@ int ieee80211_request_internal_scan(struct ieee80211_sub_if_data *sdata, int ieee80211_request_scan(struct ieee80211_sub_if_data *sdata, struct cfg80211_scan_request *req); void ieee80211_scan_cancel(struct ieee80211_local *local); +void ieee80211_run_deferred_scan(struct ieee80211_local *local); ieee80211_rx_result ieee80211_scan_rx(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb); diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index bbf1100d90d6..c8836fa7d627 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -1447,19 +1447,24 @@ void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata, static void ieee80211_reset_ap_probe(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + struct ieee80211_local *local = sdata->local; + mutex_lock(&local->mtx); if (!(ifmgd->flags & (IEEE80211_STA_BEACON_POLL | - IEEE80211_STA_CONNECTION_POLL))) - return; + IEEE80211_STA_CONNECTION_POLL))) { + mutex_unlock(&local->mtx); + return; + } ifmgd->flags &= ~(IEEE80211_STA_CONNECTION_POLL | IEEE80211_STA_BEACON_POLL); - mutex_lock(&sdata->local->iflist_mtx); - ieee80211_recalc_ps(sdata->local, -1); - mutex_unlock(&sdata->local->iflist_mtx); + + mutex_lock(&local->iflist_mtx); + ieee80211_recalc_ps(local, -1); + mutex_unlock(&local->iflist_mtx); if (sdata->local->hw.flags & IEEE80211_HW_CONNECTION_MONITOR) - return; + goto out; /* * We've received a probe response, but are not sure whether @@ -1471,6 +1476,9 @@ static void ieee80211_reset_ap_probe(struct ieee80211_sub_if_data *sdata) mod_timer(&ifmgd->conn_mon_timer, round_jiffies_up(jiffies + IEEE80211_CONNECTION_IDLE_TIME)); +out: + ieee80211_run_deferred_scan(local); + mutex_unlock(&local->mtx); } void ieee80211_sta_tx_notify(struct ieee80211_sub_if_data *sdata, @@ -1546,17 +1554,18 @@ static void ieee80211_mgd_probe_ap(struct ieee80211_sub_if_data *sdata, if (!ieee80211_sdata_running(sdata)) return; - if (sdata->local->scanning) - return; - - if (sdata->local->tmp_channel) - return; - mutex_lock(&ifmgd->mtx); if (!ifmgd->associated) goto out; + mutex_lock(&sdata->local->mtx); + + if (sdata->local->tmp_channel || sdata->local->scanning) { + mutex_unlock(&sdata->local->mtx); + goto out; + } + #ifdef CONFIG_MAC80211_VERBOSE_DEBUG if (beacon && net_ratelimit()) printk(KERN_DEBUG "%s: detected beacon loss from AP " @@ -1583,6 +1592,8 @@ static void ieee80211_mgd_probe_ap(struct ieee80211_sub_if_data *sdata, else ifmgd->flags |= IEEE80211_STA_CONNECTION_POLL; + mutex_unlock(&sdata->local->mtx); + if (already) goto out; diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c index c70e17677135..56175933c28f 100644 --- a/net/mac80211/scan.c +++ b/net/mac80211/scan.c @@ -387,6 +387,33 @@ static int ieee80211_start_sw_scan(struct ieee80211_local *local) return 0; } +static bool ieee80211_can_scan(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata) +{ + if (!list_empty(&local->work_list)) + return false; + + if (sdata->vif.type == NL80211_IFTYPE_STATION && + sdata->u.mgd.flags & (IEEE80211_STA_BEACON_POLL | + IEEE80211_STA_CONNECTION_POLL)) + return false; + + return true; +} + +void ieee80211_run_deferred_scan(struct ieee80211_local *local) +{ + lockdep_assert_held(&local->mtx); + + if (!local->scan_req || local->scanning) + return; + + if (!ieee80211_can_scan(local, local->scan_sdata)) + return; + + ieee80211_queue_delayed_work(&local->hw, &local->scan_work, + round_jiffies_relative(0)); +} static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata, struct cfg80211_scan_request *req) @@ -399,7 +426,7 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata, if (local->scan_req) return -EBUSY; - if (!list_empty(&local->work_list)) { + if (!ieee80211_can_scan(local, sdata)) { /* wait for the work to finish/time out */ local->scan_req = req; local->scan_sdata = sdata; diff --git a/net/mac80211/work.c b/net/mac80211/work.c index c6e230efa049..1f74af33901b 100644 --- a/net/mac80211/work.c +++ b/net/mac80211/work.c @@ -226,13 +226,8 @@ static void ieee80211_work_work(struct work_struct *work) run_again(local, jiffies + HZ/2); } - if (list_empty(&local->work_list) && local->scan_req && - !local->scanning) - ieee80211_queue_delayed_work(&local->hw, - &local->scan_work, - round_jiffies_relative(0)); - ieee80211_recalc_idle(local); + ieee80211_run_deferred_scan(local); mutex_unlock(&local->mtx); -- cgit v1.2.2 From 5f5460706e5feaef286aacce37647f7e57d026e4 Mon Sep 17 00:00:00 2001 From: Stanislaw Gruszka Date: Wed, 28 Mar 2012 16:01:20 +0200 Subject: mac80211: protect ->scanning by mutex in ieee80211_work_work() Signed-off-by: Stanislaw Gruszka Signed-off-by: John W. Linville --- net/mac80211/work.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'net') diff --git a/net/mac80211/work.c b/net/mac80211/work.c index 1f74af33901b..b2650a9d45ff 100644 --- a/net/mac80211/work.c +++ b/net/mac80211/work.c @@ -122,9 +122,6 @@ static void ieee80211_work_work(struct work_struct *work) enum work_action rma; bool remain_off_channel = false; - if (local->scanning) - return; - /* * ieee80211_queue_work() should have picked up most cases, * here we'll pick the rest. @@ -134,6 +131,11 @@ static void ieee80211_work_work(struct work_struct *work) mutex_lock(&local->mtx); + if (local->scanning) { + mutex_unlock(&local->mtx); + return; + } + ieee80211_recalc_idle(local); list_for_each_entry_safe(wk, tmp, &local->work_list, list) { -- cgit v1.2.2 From d91df0e3a1b9a7427785cb8d28be073df9b18b78 Mon Sep 17 00:00:00 2001 From: Pontus Fuchs Date: Tue, 3 Apr 2012 16:39:58 +0200 Subject: cfg80211: Add channel information to NL80211_CMD_GET_INTERFACE If the current channel is known, add frequency and channel type to NL80211_CMD_GET_INTERFACE. Signed-off-by: Pontus Fuchs Signed-off-by: John W. Linville --- net/mac80211/cfg.c | 4 +++- net/wireless/nl80211.c | 13 +++++++++++++ net/wireless/wext-compat.c | 3 ++- 3 files changed, 18 insertions(+), 2 deletions(-) (limited to 'net') diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 355735491252..510a745c3108 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -2688,10 +2688,12 @@ static int ieee80211_probe_client(struct wiphy *wiphy, struct net_device *dev, } static struct ieee80211_channel * -ieee80211_wiphy_get_channel(struct wiphy *wiphy) +ieee80211_wiphy_get_channel(struct wiphy *wiphy, + enum nl80211_channel_type *type) { struct ieee80211_local *local = wiphy_priv(wiphy); + *type = local->_oper_channel_type; return local->oper_channel; } diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 5e261234d271..bcf6f70e518d 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -1501,6 +1501,19 @@ static int nl80211_send_iface(struct sk_buff *msg, u32 pid, u32 seq, int flags, rdev->devlist_generation ^ (cfg80211_rdev_list_generation << 2)); + if (rdev->ops->get_channel) { + struct ieee80211_channel *chan; + enum nl80211_channel_type channel_type; + + chan = rdev->ops->get_channel(&rdev->wiphy, &channel_type); + if (chan) { + NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_FREQ, + chan->center_freq); + NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_CHANNEL_TYPE, + channel_type); + } + } + return genlmsg_end(msg, hdr); nla_put_failure: diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c index 3c24eb97e9d7..6a6181a673ca 100644 --- a/net/wireless/wext-compat.c +++ b/net/wireless/wext-compat.c @@ -821,6 +821,7 @@ static int cfg80211_wext_giwfreq(struct net_device *dev, struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); struct ieee80211_channel *chan; + enum nl80211_channel_type channel_type; switch (wdev->iftype) { case NL80211_IFTYPE_STATION: @@ -831,7 +832,7 @@ static int cfg80211_wext_giwfreq(struct net_device *dev, if (!rdev->ops->get_channel) return -EINVAL; - chan = rdev->ops->get_channel(wdev->wiphy); + chan = rdev->ops->get_channel(wdev->wiphy, &channel_type); if (!chan) return -EINVAL; freq->m = chan->center_freq; -- cgit v1.2.2 From d01b31604c55c52e08fbc6fc160137a12983df64 Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Fri, 6 Apr 2012 20:48:15 +0200 Subject: mac80211: fix an issue in ieee80211_tx_info count field management I noticed a possible issue in the status count field management of the ieee80211_tx_info data structure. In particular, when the AGGR processing is employed, status.rates[].count is set just for the first frame and not for others belonging to the same burst, leading to wrong statistic data in the mac80211 debug file system. Signed-off-by: Lorenzo Bianconi Signed-off-by: John W. Linville --- net/mac80211/status.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'net') diff --git a/net/mac80211/status.c b/net/mac80211/status.c index 5f8f89e89d6b..05f257aa2e08 100644 --- a/net/mac80211/status.c +++ b/net/mac80211/status.c @@ -355,7 +355,13 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb) int rtap_len; for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) { - if (info->status.rates[i].idx < 0) { + if ((info->flags & IEEE80211_TX_CTL_AMPDU) && + !(info->flags & IEEE80211_TX_STAT_AMPDU)) { + /* just the first aggr frame carry status info */ + info->status.rates[i].idx = -1; + info->status.rates[i].count = 0; + break; + } else if (info->status.rates[i].idx < 0) { break; } else if (i >= hw->max_report_rates) { /* the HW cannot have attempted that rate */ -- cgit v1.2.2 From 4ee73f338a528f44fd90496adfbfd9c119401850 Mon Sep 17 00:00:00 2001 From: Michal Kazior Date: Wed, 11 Apr 2012 08:47:56 +0200 Subject: mac80211: remove hw.conf.channel usage where possible Removes hw.conf.channel usage from the following functions: * ieee80211_mandatory_rates * ieee80211_sta_get_rates * ieee80211_frame_duration * ieee80211_rts_duration * ieee80211_ctstoself_duration This is in preparation for multi-channel operation. Signed-off-by: Michal Kazior Signed-off-by: John W. Linville --- net/mac80211/ieee80211_i.h | 2 +- net/mac80211/rc80211_minstrel.c | 13 +++++++------ net/mac80211/rc80211_minstrel_ht.c | 5 ++--- net/mac80211/tx.c | 4 ++-- net/mac80211/util.c | 34 +++++++++++++++------------------- 5 files changed, 27 insertions(+), 31 deletions(-) (limited to 'net') diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 1fd8cb26b3ff..8cc4bc101409 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -1404,7 +1404,7 @@ static inline int __ieee80211_resume(struct ieee80211_hw *hw) extern void *mac80211_wiphy_privid; /* for wiphy privid */ u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len, enum nl80211_iftype type); -int ieee80211_frame_duration(struct ieee80211_local *local, size_t len, +int ieee80211_frame_duration(enum ieee80211_band band, size_t len, int rate, int erp, int short_preamble); void mac80211_ev_michael_mic_failure(struct ieee80211_sub_if_data *sdata, int keyidx, struct ieee80211_hdr *hdr, const u8 *tsc, diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c index b39dda523f39..79633ae06fd6 100644 --- a/net/mac80211/rc80211_minstrel.c +++ b/net/mac80211/rc80211_minstrel.c @@ -334,14 +334,15 @@ minstrel_get_rate(void *priv, struct ieee80211_sta *sta, static void -calc_rate_durations(struct ieee80211_local *local, struct minstrel_rate *d, +calc_rate_durations(enum ieee80211_band band, + struct minstrel_rate *d, struct ieee80211_rate *rate) { int erp = !!(rate->flags & IEEE80211_RATE_ERP_G); - d->perfect_tx_time = ieee80211_frame_duration(local, 1200, + d->perfect_tx_time = ieee80211_frame_duration(band, 1200, rate->bitrate, erp, 1); - d->ack_time = ieee80211_frame_duration(local, 10, + d->ack_time = ieee80211_frame_duration(band, 10, rate->bitrate, erp, 1); } @@ -379,14 +380,14 @@ minstrel_rate_init(void *priv, struct ieee80211_supported_band *sband, { struct minstrel_sta_info *mi = priv_sta; struct minstrel_priv *mp = priv; - struct ieee80211_local *local = hw_to_local(mp->hw); struct ieee80211_rate *ctl_rate; unsigned int i, n = 0; unsigned int t_slot = 9; /* FIXME: get real slot time */ mi->lowest_rix = rate_lowest_index(sband, sta); ctl_rate = &sband->bitrates[mi->lowest_rix]; - mi->sp_ack_dur = ieee80211_frame_duration(local, 10, ctl_rate->bitrate, + mi->sp_ack_dur = ieee80211_frame_duration(sband->band, 10, + ctl_rate->bitrate, !!(ctl_rate->flags & IEEE80211_RATE_ERP_G), 1); for (i = 0; i < sband->n_bitrates; i++) { @@ -402,7 +403,7 @@ minstrel_rate_init(void *priv, struct ieee80211_supported_band *sband, mr->rix = i; mr->bitrate = sband->bitrates[i].bitrate / 5; - calc_rate_durations(local, mr, &sband->bitrates[i]); + calc_rate_durations(sband->band, mr, &sband->bitrates[i]); /* calculate maximum number of retransmissions before * fallback (based on maximum segment size) */ diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c index 3b3dcae13bbc..2d1acc6c5445 100644 --- a/net/mac80211/rc80211_minstrel_ht.c +++ b/net/mac80211/rc80211_minstrel_ht.c @@ -692,7 +692,6 @@ minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband, struct minstrel_ht_sta_priv *msp = priv_sta; struct minstrel_ht_sta *mi = &msp->ht; struct ieee80211_mcs_info *mcs = &sta->ht_cap.mcs; - struct ieee80211_local *local = hw_to_local(mp->hw); u16 sta_cap = sta->ht_cap.cap; int n_supported = 0; int ack_dur; @@ -711,8 +710,8 @@ minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband, memset(mi, 0, sizeof(*mi)); mi->stats_update = jiffies; - ack_dur = ieee80211_frame_duration(local, 10, 60, 1, 1); - mi->overhead = ieee80211_frame_duration(local, 0, 60, 1, 1) + ack_dur; + ack_dur = ieee80211_frame_duration(sband->band, 10, 60, 1, 1); + mi->overhead = ieee80211_frame_duration(sband->band, 0, 60, 1, 1) + ack_dur; mi->overhead_rtscts = mi->overhead + 2 * ack_dur; mi->avg_ampdu_len = MINSTREL_FRAC(1, 1); diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 4f6aac16ac3a..0abbef952c14 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -159,7 +159,7 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, /* Time needed to transmit ACK * (10 bytes + 4-byte FCS = 112 bits) plus SIFS; rounded up * to closest integer */ - dur = ieee80211_frame_duration(local, 10, rate, erp, + dur = ieee80211_frame_duration(sband->band, 10, rate, erp, tx->sdata->vif.bss_conf.use_short_preamble); if (next_frag_len) { @@ -167,7 +167,7 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, * transmit next fragment plus ACK and 2 x SIFS. */ dur *= 2; /* ACK + SIFS */ /* next fragment */ - dur += ieee80211_frame_duration(local, next_frag_len, + dur += ieee80211_frame_duration(sband->band, next_frag_len, txrate->bitrate, erp, tx->sdata->vif.bss_conf.use_short_preamble); } diff --git a/net/mac80211/util.c b/net/mac80211/util.c index e67fe5c1def9..9d255a2e37ee 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -106,7 +106,7 @@ void ieee80211_tx_set_protected(struct ieee80211_tx_data *tx) } } -int ieee80211_frame_duration(struct ieee80211_local *local, size_t len, +int ieee80211_frame_duration(enum ieee80211_band band, size_t len, int rate, int erp, int short_preamble) { int dur; @@ -120,7 +120,7 @@ int ieee80211_frame_duration(struct ieee80211_local *local, size_t len, * DIV_ROUND_UP() operations. */ - if (local->hw.conf.channel->band == IEEE80211_BAND_5GHZ || erp) { + if (band == IEEE80211_BAND_5GHZ || erp) { /* * OFDM: * @@ -162,10 +162,10 @@ int ieee80211_frame_duration(struct ieee80211_local *local, size_t len, /* Exported duration function for driver use */ __le16 ieee80211_generic_frame_duration(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + enum ieee80211_band band, size_t frame_len, struct ieee80211_rate *rate) { - struct ieee80211_local *local = hw_to_local(hw); struct ieee80211_sub_if_data *sdata; u16 dur; int erp; @@ -179,7 +179,7 @@ __le16 ieee80211_generic_frame_duration(struct ieee80211_hw *hw, erp = rate->flags & IEEE80211_RATE_ERP_G; } - dur = ieee80211_frame_duration(local, frame_len, rate->bitrate, erp, + dur = ieee80211_frame_duration(band, frame_len, rate->bitrate, erp, short_preamble); return cpu_to_le16(dur); @@ -198,7 +198,7 @@ __le16 ieee80211_rts_duration(struct ieee80211_hw *hw, u16 dur; struct ieee80211_supported_band *sband; - sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; + sband = local->hw.wiphy->bands[frame_txctl->band]; short_preamble = false; @@ -213,13 +213,13 @@ __le16 ieee80211_rts_duration(struct ieee80211_hw *hw, } /* CTS duration */ - dur = ieee80211_frame_duration(local, 10, rate->bitrate, + dur = ieee80211_frame_duration(sband->band, 10, rate->bitrate, erp, short_preamble); /* Data frame duration */ - dur += ieee80211_frame_duration(local, frame_len, rate->bitrate, + dur += ieee80211_frame_duration(sband->band, frame_len, rate->bitrate, erp, short_preamble); /* ACK duration */ - dur += ieee80211_frame_duration(local, 10, rate->bitrate, + dur += ieee80211_frame_duration(sband->band, 10, rate->bitrate, erp, short_preamble); return cpu_to_le16(dur); @@ -239,7 +239,7 @@ __le16 ieee80211_ctstoself_duration(struct ieee80211_hw *hw, u16 dur; struct ieee80211_supported_band *sband; - sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; + sband = local->hw.wiphy->bands[frame_txctl->band]; short_preamble = false; @@ -253,11 +253,11 @@ __le16 ieee80211_ctstoself_duration(struct ieee80211_hw *hw, } /* Data frame duration */ - dur = ieee80211_frame_duration(local, frame_len, rate->bitrate, + dur = ieee80211_frame_duration(sband->band, frame_len, rate->bitrate, erp, short_preamble); if (!(frame_txctl->flags & IEEE80211_TX_CTL_NO_ACK)) { /* ACK duration */ - dur += ieee80211_frame_duration(local, 10, rate->bitrate, + dur += ieee80211_frame_duration(sband->band, 10, rate->bitrate, erp, short_preamble); } @@ -909,10 +909,8 @@ u32 ieee80211_mandatory_rates(struct ieee80211_local *local, int i; sband = local->hw.wiphy->bands[band]; - if (!sband) { - WARN_ON(1); - sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; - } + if (WARN_ON(!sband)) + return 1; if (band == IEEE80211_BAND_2GHZ) mandatory_flag = IEEE80211_RATE_MANDATORY_B; @@ -1146,10 +1144,8 @@ u32 ieee80211_sta_get_rates(struct ieee80211_local *local, int i, j; sband = local->hw.wiphy->bands[band]; - if (!sband) { - WARN_ON(1); - sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; - } + if (WARN_ON(!sband)) + return 1; bitrates = sband->bitrates; num_rates = sband->n_bitrates; -- cgit v1.2.2 From 0446b49c3350ad1fdbc5a3f8f8223fc8af7d853b Mon Sep 17 00:00:00 2001 From: Mohammed Shafi Shajakhan Date: Wed, 11 Apr 2012 15:16:12 +0530 Subject: mac80211: remove ieee80211_rx_bss_get its not used where, while we directly obtain ieee80211_bss's pointer in ibss.c by calling cfg80211_get_bss Signed-off-by: Mohammed Shafi Shajakhan Signed-off-by: John W. Linville --- net/mac80211/ieee80211_i.h | 3 --- net/mac80211/scan.c | 14 -------------- 2 files changed, 17 deletions(-) (limited to 'net') diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 8cc4bc101409..bd7a451b0849 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -1273,9 +1273,6 @@ ieee80211_bss_info_update(struct ieee80211_local *local, struct ieee802_11_elems *elems, struct ieee80211_channel *channel, bool beacon); -struct ieee80211_bss * -ieee80211_rx_bss_get(struct ieee80211_local *local, u8 *bssid, int freq, - u8 *ssid, u8 ssid_len); void ieee80211_rx_bss_put(struct ieee80211_local *local, struct ieee80211_bss *bss); diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c index 56175933c28f..45f5aa229efd 100644 --- a/net/mac80211/scan.c +++ b/net/mac80211/scan.c @@ -29,20 +29,6 @@ #define IEEE80211_CHANNEL_TIME (HZ / 33) #define IEEE80211_PASSIVE_CHANNEL_TIME (HZ / 8) -struct ieee80211_bss * -ieee80211_rx_bss_get(struct ieee80211_local *local, u8 *bssid, int freq, - u8 *ssid, u8 ssid_len) -{ - struct cfg80211_bss *cbss; - - cbss = cfg80211_get_bss(local->hw.wiphy, - ieee80211_get_channel(local->hw.wiphy, freq), - bssid, ssid, ssid_len, 0, 0); - if (!cbss) - return NULL; - return (void *)cbss->priv; -} - static void ieee80211_rx_bss_free(struct cfg80211_bss *cbss) { struct ieee80211_bss *bss = (void *)cbss->priv; -- cgit v1.2.2 From 87b6d218f3adb00e6b58c7f96f8b5a74ff91abb4 Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Thu, 12 Apr 2012 06:31:16 +0000 Subject: tunnel: implement 64 bits statistics Convert the per-cpu statistics kept for GRE, IPIP, and SIT tunnels to use 64 bit statistics. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- net/ipv4/ip_gre.c | 59 +++++++++++++++++++++++++++++++++++++------------------ net/ipv4/ipip.c | 53 ++++++++++++++++++++++++++++++++----------------- net/ipv6/sit.c | 52 +++++++++++++++++++++++++++++++----------------- 3 files changed, 109 insertions(+), 55 deletions(-) (limited to 'net') diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 02d07c6f630f..b9abf265c2f8 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -169,30 +169,49 @@ struct ipgre_net { /* often modified stats are per cpu, other are shared (netdev->stats) */ struct pcpu_tstats { - unsigned long rx_packets; - unsigned long rx_bytes; - unsigned long tx_packets; - unsigned long tx_bytes; -} __attribute__((aligned(4*sizeof(unsigned long)))); + u64 rx_packets; + u64 rx_bytes; + u64 tx_packets; + u64 tx_bytes; + struct u64_stats_sync syncp; +}; -static struct net_device_stats *ipgre_get_stats(struct net_device *dev) +static struct rtnl_link_stats64 *ipgre_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *tot) { - struct pcpu_tstats sum = { 0 }; int i; for_each_possible_cpu(i) { const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i); - - sum.rx_packets += tstats->rx_packets; - sum.rx_bytes += tstats->rx_bytes; - sum.tx_packets += tstats->tx_packets; - sum.tx_bytes += tstats->tx_bytes; + u64 rx_packets, rx_bytes, tx_packets, tx_bytes; + unsigned int start; + + do { + start = u64_stats_fetch_begin_bh(&tstats->syncp); + rx_packets = tstats->rx_packets; + tx_packets = tstats->tx_packets; + rx_bytes = tstats->rx_bytes; + tx_bytes = tstats->tx_bytes; + } while (u64_stats_fetch_retry_bh(&tstats->syncp, start)); + + tot->rx_packets += rx_packets; + tot->tx_packets += tx_packets; + tot->rx_bytes += rx_bytes; + tot->tx_bytes += tx_bytes; } - dev->stats.rx_packets = sum.rx_packets; - dev->stats.rx_bytes = sum.rx_bytes; - dev->stats.tx_packets = sum.tx_packets; - dev->stats.tx_bytes = sum.tx_bytes; - return &dev->stats; + + tot->multicast = dev->stats.multicast; + tot->rx_crc_errors = dev->stats.rx_crc_errors; + tot->rx_fifo_errors = dev->stats.rx_fifo_errors; + tot->rx_length_errors = dev->stats.rx_length_errors; + tot->rx_errors = dev->stats.rx_errors; + tot->tx_fifo_errors = dev->stats.tx_fifo_errors; + tot->tx_carrier_errors = dev->stats.tx_carrier_errors; + tot->tx_dropped = dev->stats.tx_dropped; + tot->tx_aborted_errors = dev->stats.tx_aborted_errors; + tot->tx_errors = dev->stats.tx_errors; + + return tot; } /* Given src, dst and key, find appropriate for input tunnel. */ @@ -672,8 +691,10 @@ static int ipgre_rcv(struct sk_buff *skb) } tstats = this_cpu_ptr(tunnel->dev->tstats); + u64_stats_update_begin(&tstats->syncp); tstats->rx_packets++; tstats->rx_bytes += skb->len; + u64_stats_update_end(&tstats->syncp); __skb_tunnel_rx(skb, tunnel->dev); @@ -1253,7 +1274,7 @@ static const struct net_device_ops ipgre_netdev_ops = { .ndo_start_xmit = ipgre_tunnel_xmit, .ndo_do_ioctl = ipgre_tunnel_ioctl, .ndo_change_mtu = ipgre_tunnel_change_mtu, - .ndo_get_stats = ipgre_get_stats, + .ndo_get_stats64 = ipgre_get_stats64, }; static void ipgre_dev_free(struct net_device *dev) @@ -1507,7 +1528,7 @@ static const struct net_device_ops ipgre_tap_netdev_ops = { .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, .ndo_change_mtu = ipgre_tunnel_change_mtu, - .ndo_get_stats = ipgre_get_stats, + .ndo_get_stats64 = ipgre_get_stats64, }; static void ipgre_tap_setup(struct net_device *dev) diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c index ae1413e3f2f8..b5a1849b83d5 100644 --- a/net/ipv4/ipip.c +++ b/net/ipv4/ipip.c @@ -144,30 +144,45 @@ static void ipip_dev_free(struct net_device *dev); /* often modified stats are per cpu, other are shared (netdev->stats) */ struct pcpu_tstats { - unsigned long rx_packets; - unsigned long rx_bytes; - unsigned long tx_packets; - unsigned long tx_bytes; -} __attribute__((aligned(4*sizeof(unsigned long)))); + u64 rx_packets; + u64 rx_bytes; + u64 tx_packets; + u64 tx_bytes; + struct u64_stats_sync syncp; +}; -static struct net_device_stats *ipip_get_stats(struct net_device *dev) +static struct rtnl_link_stats64 *ipip_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *tot) { - struct pcpu_tstats sum = { 0 }; int i; for_each_possible_cpu(i) { const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i); - - sum.rx_packets += tstats->rx_packets; - sum.rx_bytes += tstats->rx_bytes; - sum.tx_packets += tstats->tx_packets; - sum.tx_bytes += tstats->tx_bytes; + u64 rx_packets, rx_bytes, tx_packets, tx_bytes; + unsigned int start; + + do { + start = u64_stats_fetch_begin_bh(&tstats->syncp); + rx_packets = tstats->rx_packets; + tx_packets = tstats->tx_packets; + rx_bytes = tstats->rx_bytes; + tx_bytes = tstats->tx_bytes; + } while (u64_stats_fetch_retry_bh(&tstats->syncp, start)); + + tot->rx_packets += rx_packets; + tot->tx_packets += tx_packets; + tot->rx_bytes += rx_bytes; + tot->tx_bytes += tx_bytes; } - dev->stats.rx_packets = sum.rx_packets; - dev->stats.rx_bytes = sum.rx_bytes; - dev->stats.tx_packets = sum.tx_packets; - dev->stats.tx_bytes = sum.tx_bytes; - return &dev->stats; + + tot->tx_fifo_errors = dev->stats.tx_fifo_errors; + tot->tx_carrier_errors = dev->stats.tx_carrier_errors; + tot->tx_dropped = dev->stats.tx_dropped; + tot->tx_aborted_errors = dev->stats.tx_aborted_errors; + tot->tx_errors = dev->stats.tx_errors; + tot->collisions = dev->stats.collisions; + + return tot; } static struct ip_tunnel * ipip_tunnel_lookup(struct net *net, @@ -404,8 +419,10 @@ static int ipip_rcv(struct sk_buff *skb) skb->pkt_type = PACKET_HOST; tstats = this_cpu_ptr(tunnel->dev->tstats); + u64_stats_update_begin(&tstats->syncp); tstats->rx_packets++; tstats->rx_bytes += skb->len; + u64_stats_update_end(&tstats->syncp); __skb_tunnel_rx(skb, tunnel->dev); @@ -730,7 +747,7 @@ static const struct net_device_ops ipip_netdev_ops = { .ndo_start_xmit = ipip_tunnel_xmit, .ndo_do_ioctl = ipip_tunnel_ioctl, .ndo_change_mtu = ipip_tunnel_change_mtu, - .ndo_get_stats = ipip_get_stats, + .ndo_get_stats64 = ipip_get_stats64, }; static void ipip_dev_free(struct net_device *dev) diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index f9608db9dcfb..e5fef943e30a 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c @@ -87,31 +87,47 @@ struct sit_net { /* often modified stats are per cpu, other are shared (netdev->stats) */ struct pcpu_tstats { - unsigned long rx_packets; - unsigned long rx_bytes; - unsigned long tx_packets; - unsigned long tx_bytes; -} __attribute__((aligned(4*sizeof(unsigned long)))); + u64 rx_packets; + u64 rx_bytes; + u64 tx_packets; + u64 tx_bytes; + struct u64_stats_sync syncp; +}; -static struct net_device_stats *ipip6_get_stats(struct net_device *dev) +static struct rtnl_link_stats64 *ipip6_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *tot) { - struct pcpu_tstats sum = { 0 }; int i; for_each_possible_cpu(i) { const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i); - - sum.rx_packets += tstats->rx_packets; - sum.rx_bytes += tstats->rx_bytes; - sum.tx_packets += tstats->tx_packets; - sum.tx_bytes += tstats->tx_bytes; + u64 rx_packets, rx_bytes, tx_packets, tx_bytes; + unsigned int start; + + do { + start = u64_stats_fetch_begin_bh(&tstats->syncp); + rx_packets = tstats->rx_packets; + tx_packets = tstats->tx_packets; + rx_bytes = tstats->rx_bytes; + tx_bytes = tstats->tx_bytes; + } while (u64_stats_fetch_retry_bh(&tstats->syncp, start)); + + tot->rx_packets += rx_packets; + tot->tx_packets += tx_packets; + tot->rx_bytes += rx_bytes; + tot->tx_bytes += tx_bytes; } - dev->stats.rx_packets = sum.rx_packets; - dev->stats.rx_bytes = sum.rx_bytes; - dev->stats.tx_packets = sum.tx_packets; - dev->stats.tx_bytes = sum.tx_bytes; - return &dev->stats; + + tot->rx_errors = dev->stats.rx_errors; + tot->tx_fifo_errors = dev->stats.tx_fifo_errors; + tot->tx_carrier_errors = dev->stats.tx_carrier_errors; + tot->tx_dropped = dev->stats.tx_dropped; + tot->tx_aborted_errors = dev->stats.tx_aborted_errors; + tot->tx_errors = dev->stats.tx_errors; + + return tot; } + /* * Must be invoked with rcu_read_lock */ @@ -1126,7 +1142,7 @@ static const struct net_device_ops ipip6_netdev_ops = { .ndo_start_xmit = ipip6_tunnel_xmit, .ndo_do_ioctl = ipip6_tunnel_ioctl, .ndo_change_mtu = ipip6_tunnel_change_mtu, - .ndo_get_stats = ipip6_get_stats, + .ndo_get_stats64= ipip6_get_stats64, }; static void ipip6_dev_free(struct net_device *dev) -- cgit v1.2.2 From fd4f2cead6983735a4e6283126b9276873d7ff09 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 12 Apr 2012 19:48:40 +0000 Subject: tcp: RFC6298 supersedes RFC2988bis Updates some comments to track RFC6298 Signed-off-by: Eric Dumazet Cc: H.K. Jerry Chu Cc: Tom Herbert Signed-off-by: David S. Miller --- net/ipv4/inet_connection_sock.c | 2 +- net/ipv4/tcp_input.c | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'net') diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index 19d66cefd7d3..c12396f2785f 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c @@ -514,7 +514,7 @@ void inet_csk_reqsk_queue_prune(struct sock *parent, /* Normally all the openreqs are young and become mature * (i.e. converted to established socket) for first timeout. - * If synack was not acknowledged for 3 seconds, it means + * If synack was not acknowledged for 1 second, it means * one of the following things: synack was lost, ack was lost, * rtt is high or nobody planned to ack (i.e. synflood). * When server is a bit loaded, queue is populated with old diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 9944c1d9a218..dc1e0be30b77 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -936,7 +936,7 @@ static void tcp_init_metrics(struct sock *sk) tcp_set_rto(sk); reset: if (tp->srtt == 0) { - /* RFC2988bis: We've failed to get a valid RTT sample from + /* RFC6298: 5.7 We've failed to get a valid RTT sample from * 3WHS. This is most likely due to retransmission, * including spurious one. Reset the RTO back to 3secs * from the more aggressive 1sec to avoid more spurious @@ -946,7 +946,7 @@ reset: inet_csk(sk)->icsk_rto = TCP_TIMEOUT_FALLBACK; } /* Cut cwnd down to 1 per RFC5681 if SYN or SYN-ACK has been - * retransmitted. In light of RFC2988bis' more aggressive 1sec + * retransmitted. In light of RFC6298 more aggressive 1sec * initRTO, we only reset cwnd when more than 1 SYN/SYN-ACK * retransmission has occurred. */ -- cgit v1.2.2 From c72e118334a2590f4f07d9e51490b902c33f5280 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 12 Apr 2012 22:16:05 +0000 Subject: inet: makes syn_ack_timeout mandatory There are two struct request_sock_ops providers, tcp and dccp. inet_csk_reqsk_queue_prune() can avoid testing syn_ack_timeout being NULL if we make it non NULL like syn_ack_timeout Signed-off-by: Eric Dumazet Cc: Gerrit Renker Cc: dccp@vger.kernel.org Signed-off-by: David S. Miller --- net/dccp/ipv4.c | 6 ++++++ net/dccp/ipv6.c | 1 + net/ipv4/inet_connection_sock.c | 3 +-- 3 files changed, 8 insertions(+), 2 deletions(-) (limited to 'net') diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c index caf6e1734b62..07f5579ca756 100644 --- a/net/dccp/ipv4.c +++ b/net/dccp/ipv4.c @@ -574,6 +574,11 @@ static void dccp_v4_reqsk_destructor(struct request_sock *req) kfree(inet_rsk(req)->opt); } +void dccp_syn_ack_timeout(struct sock *sk, struct request_sock *req) +{ +} +EXPORT_SYMBOL(dccp_syn_ack_timeout); + static struct request_sock_ops dccp_request_sock_ops __read_mostly = { .family = PF_INET, .obj_size = sizeof(struct dccp_request_sock), @@ -581,6 +586,7 @@ static struct request_sock_ops dccp_request_sock_ops __read_mostly = { .send_ack = dccp_reqsk_send_ack, .destructor = dccp_v4_reqsk_destructor, .send_reset = dccp_v4_ctl_send_reset, + .syn_ack_timeout = dccp_syn_ack_timeout, }; int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb) diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index 4dc588f520e0..e923ac95bb04 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c @@ -343,6 +343,7 @@ static struct request_sock_ops dccp6_request_sock_ops = { .send_ack = dccp_reqsk_send_ack, .destructor = dccp_v6_reqsk_destructor, .send_reset = dccp_v6_ctl_send_reset, + .syn_ack_timeout = dccp_syn_ack_timeout, }; static struct sock *dccp_v6_hnd_req(struct sock *sk,struct sk_buff *skb) diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index c12396f2785f..d19f32aca6ca 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c @@ -555,8 +555,7 @@ void inet_csk_reqsk_queue_prune(struct sock *parent, syn_ack_recalc(req, thresh, max_retries, queue->rskq_defer_accept, &expire, &resend); - if (req->rsk_ops->syn_ack_timeout) - req->rsk_ops->syn_ack_timeout(parent, req); + req->rsk_ops->syn_ack_timeout(parent, req); if (!expire && (!resend || !req->rsk_ops->rtx_syn_ack(parent, req, NULL) || -- cgit v1.2.2 From aacd9289af8b82f5fb01bcdd53d0e3406d1333c7 Mon Sep 17 00:00:00 2001 From: Alex Copot Date: Thu, 12 Apr 2012 22:21:45 +0000 Subject: tcp: bind() use stronger condition for bind_conflict We must try harder to get unique (addr, port) pairs when doing port autoselection for sockets with SO_REUSEADDR option set. We achieve this by adding a relaxation parameter to inet_csk_bind_conflict. When 'relax' parameter is off we return a conflict whenever the current searched pair (addr, port) is not unique. This tries to address the problems reported in patch: 8d238b25b1ec22a73b1c2206f111df2faaff8285 Revert "tcp: bind() fix when many ports are bound" Tests where ran for creating and binding(0) many sockets on 100 IPs. The results are, on average: * 60000 sockets, 600 ports / IP: * 0.210 s, 620 (IP, port) duplicates without patch * 0.219 s, no duplicates with patch * 100000 sockets, 1000 ports / IP: * 0.371 s, 1720 duplicates without patch * 0.373 s, no duplicates with patch * 200000 sockets, 2000 ports / IP: * 0.766 s, 6900 duplicates without patch * 0.768 s, no duplicates with patch * 500000 sockets, 5000 ports / IP: * 2.227 s, 41500 duplicates without patch * 2.284 s, no duplicates with patch Signed-off-by: Alex Copot Signed-off-by: Daniel Baluta Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/ipv4/inet_connection_sock.c | 18 ++++++++++++++---- net/ipv6/inet6_connection_sock.c | 2 +- 2 files changed, 15 insertions(+), 5 deletions(-) (limited to 'net') diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index d19f32aca6ca..14409f111bc2 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c @@ -53,7 +53,7 @@ void inet_get_local_port_range(int *low, int *high) EXPORT_SYMBOL(inet_get_local_port_range); int inet_csk_bind_conflict(const struct sock *sk, - const struct inet_bind_bucket *tb) + const struct inet_bind_bucket *tb, bool relax) { struct sock *sk2; struct hlist_node *node; @@ -79,6 +79,14 @@ int inet_csk_bind_conflict(const struct sock *sk, sk2_rcv_saddr == sk_rcv_saddr(sk)) break; } + if (!relax && reuse && sk2->sk_reuse && + sk2->sk_state != TCP_LISTEN) { + const __be32 sk2_rcv_saddr = sk_rcv_saddr(sk2); + + if (!sk2_rcv_saddr || !sk_rcv_saddr(sk) || + sk2_rcv_saddr == sk_rcv_saddr(sk)) + break; + } } } return node != NULL; @@ -122,12 +130,13 @@ again: (tb->num_owners < smallest_size || smallest_size == -1)) { smallest_size = tb->num_owners; smallest_rover = rover; - if (atomic_read(&hashinfo->bsockets) > (high - low) + 1) { + if (atomic_read(&hashinfo->bsockets) > (high - low) + 1 && + !inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb, false)) { snum = smallest_rover; goto tb_found; } } - if (!inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb)) { + if (!inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb, false)) { snum = rover; goto tb_found; } @@ -178,12 +187,13 @@ tb_found: goto success; } else { ret = 1; - if (inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb)) { + if (inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb, true)) { if (sk->sk_reuse && sk->sk_state != TCP_LISTEN && smallest_size != -1 && --attempts >= 0) { spin_unlock(&head->lock); goto again; } + goto fail_unlock; } } diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c index 02dd203d9eac..e6cee5292a0b 100644 --- a/net/ipv6/inet6_connection_sock.c +++ b/net/ipv6/inet6_connection_sock.c @@ -28,7 +28,7 @@ #include int inet6_csk_bind_conflict(const struct sock *sk, - const struct inet_bind_bucket *tb) + const struct inet_bind_bucket *tb, bool relax) { const struct sock *sk2; const struct hlist_node *node; -- cgit v1.2.2 From a8cb05b238e0edfd7c63f496d9ce3e238ea68a16 Mon Sep 17 00:00:00 2001 From: Vijay Subramanian Date: Fri, 13 Apr 2012 13:23:59 +0000 Subject: tcp: Remove redundant code entering quickack mode tcp_enter_quickack_mode() already calls tcp_incr_quickack() and sets icsk->icsk_ack.ato to TCP_ATO_MIN. This patch removes the duplication. Signed-off-by: Vijay Subramanian Reviewed-by: Flavio Leitner Signed-off-by: David S. Miller --- net/ipv4/tcp_input.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'net') diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index dc1e0be30b77..3dc94febc9ee 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -5734,8 +5734,6 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, */ inet_csk_schedule_ack(sk); icsk->icsk_ack.lrcvtime = tcp_time_stamp; - icsk->icsk_ack.ato = TCP_ATO_MIN; - tcp_incr_quickack(sk); tcp_enter_quickack_mode(sk); inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, TCP_DELACK_MAX, TCP_RTO_MAX); -- cgit v1.2.2 From cf22f9a2b83e72082c43ef5c942d5daf9301da8e Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sat, 14 Apr 2012 21:37:40 -0400 Subject: ipv6: Remove unused argument to addrconf_dad_start(). Signed-off-by: David S. Miller --- net/ipv6/addrconf.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'net') diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index fcd230a6235a..5c031c07776b 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -149,7 +149,7 @@ static void addrconf_type_change(struct net_device *dev, unsigned long event); static int addrconf_ifdown(struct net_device *dev, int how); -static void addrconf_dad_start(struct inet6_ifaddr *ifp, u32 flags); +static void addrconf_dad_start(struct inet6_ifaddr *ifp); static void addrconf_dad_timer(unsigned long data); static void addrconf_dad_completed(struct inet6_ifaddr *ifp); static void addrconf_dad_run(struct inet6_dev *idev); @@ -929,7 +929,7 @@ retry: ift->tstamp = tmp_tstamp; spin_unlock_bh(&ift->lock); - addrconf_dad_start(ift, 0); + addrconf_dad_start(ift); in6_ifa_put(ift); in6_dev_put(idev); out: @@ -1959,7 +1959,7 @@ ok: update_lft = create = 1; ifp->cstamp = jiffies; - addrconf_dad_start(ifp, RTF_ADDRCONF|RTF_PREFIX_RT); + addrconf_dad_start(ifp); } if (ifp) { @@ -2238,7 +2238,7 @@ static int inet6_addr_add(struct net *net, int ifindex, const struct in6_addr *p * that the Optimistic flag should not be set for * manually configured addresses */ - addrconf_dad_start(ifp, 0); + addrconf_dad_start(ifp); in6_ifa_put(ifp); addrconf_verify(0); return 0; @@ -2425,7 +2425,7 @@ static void addrconf_add_linklocal(struct inet6_dev *idev, const struct in6_addr ifp = ipv6_add_addr(idev, addr, 64, IFA_LINK, addr_flags); if (!IS_ERR(ifp)) { addrconf_prefix_route(&ifp->addr, ifp->prefix_len, idev->dev, 0, 0); - addrconf_dad_start(ifp, 0); + addrconf_dad_start(ifp); in6_ifa_put(ifp); } } @@ -2920,7 +2920,7 @@ static void addrconf_dad_kick(struct inet6_ifaddr *ifp) addrconf_mod_timer(ifp, AC_DAD, rand_num); } -static void addrconf_dad_start(struct inet6_ifaddr *ifp, u32 flags) +static void addrconf_dad_start(struct inet6_ifaddr *ifp) { struct inet6_dev *idev = ifp->idev; struct net_device *dev = idev->dev; -- cgit v1.2.2 From 5e73ea1a31c3612aa6dfe44f864ca5b7b6a4cff9 Mon Sep 17 00:00:00 2001 From: Daniel Baluta Date: Sun, 15 Apr 2012 01:34:41 +0000 Subject: ipv4: fix checkpatch errors Fix checkpatch errors of the following type: * ERROR: "foo * bar" should be "foo *bar" * ERROR: "(foo*)" should be "(foo *)" Signed-off-by: Daniel Baluta Signed-off-by: David S. Miller --- net/ipv4/af_inet.c | 2 +- net/ipv4/ah4.c | 2 +- net/ipv4/igmp.c | 16 ++++++++-------- net/ipv4/inet_hashtables.c | 2 +- net/ipv4/ip_forward.c | 4 ++-- net/ipv4/ip_gre.c | 20 ++++++++++---------- net/ipv4/ip_options.c | 24 ++++++++++++------------ net/ipv4/ip_sockglue.c | 2 +- net/ipv4/ipip.c | 4 ++-- net/ipv4/ping.c | 2 +- net/ipv4/raw.c | 2 +- net/ipv4/route.c | 10 +++++----- net/ipv4/tcp_probe.c | 2 +- net/ipv4/udp.c | 2 +- net/ipv4/udp_impl.h | 2 +- net/ipv4/xfrm4_policy.c | 2 +- 16 files changed, 49 insertions(+), 49 deletions(-) (limited to 'net') diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 10e3751466b5..3744c1c0af5a 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -541,7 +541,7 @@ out: } EXPORT_SYMBOL(inet_bind); -int inet_dgram_connect(struct socket *sock, struct sockaddr * uaddr, +int inet_dgram_connect(struct socket *sock, struct sockaddr *uaddr, int addr_len, int flags) { struct sock *sk = sock->sk; diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c index fd508b526014..3a280756dd73 100644 --- a/net/ipv4/ah4.c +++ b/net/ipv4/ah4.c @@ -77,7 +77,7 @@ static inline struct scatterlist *ah_req_sg(struct crypto_ahash *ahash, static int ip_clear_mutable_options(const struct iphdr *iph, __be32 *daddr) { - unsigned char * optptr = (unsigned char*)(iph+1); + unsigned char *optptr = (unsigned char *)(iph+1); int l = iph->ihl*4 - sizeof(struct iphdr); int optlen; diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index ceaac24ecdca..6699f23e6f55 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c @@ -344,10 +344,10 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size) pip->protocol = IPPROTO_IGMP; pip->tot_len = 0; /* filled in later */ ip_select_ident(pip, &rt->dst, NULL); - ((u8*)&pip[1])[0] = IPOPT_RA; - ((u8*)&pip[1])[1] = 4; - ((u8*)&pip[1])[2] = 0; - ((u8*)&pip[1])[3] = 0; + ((u8 *)&pip[1])[0] = IPOPT_RA; + ((u8 *)&pip[1])[1] = 4; + ((u8 *)&pip[1])[2] = 0; + ((u8 *)&pip[1])[3] = 0; skb->transport_header = skb->network_header + sizeof(struct iphdr) + 4; skb_put(skb, sizeof(*pig)); @@ -688,10 +688,10 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc, iph->saddr = fl4.saddr; iph->protocol = IPPROTO_IGMP; ip_select_ident(iph, &rt->dst, NULL); - ((u8*)&iph[1])[0] = IPOPT_RA; - ((u8*)&iph[1])[1] = 4; - ((u8*)&iph[1])[2] = 0; - ((u8*)&iph[1])[3] = 0; + ((u8 *)&iph[1])[0] = IPOPT_RA; + ((u8 *)&iph[1])[1] = 4; + ((u8 *)&iph[1])[2] = 0; + ((u8 *)&iph[1])[3] = 0; ih = (struct igmphdr *)skb_put(skb, sizeof(struct igmphdr)); ih->type = type; diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c index 984ec656b03b..7880af970208 100644 --- a/net/ipv4/inet_hashtables.c +++ b/net/ipv4/inet_hashtables.c @@ -217,7 +217,7 @@ begin: } EXPORT_SYMBOL_GPL(__inet_lookup_listener); -struct sock * __inet_lookup_established(struct net *net, +struct sock *__inet_lookup_established(struct net *net, struct inet_hashinfo *hashinfo, const __be32 saddr, const __be16 sport, const __be32 daddr, const u16 hnum, diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c index 29a07b6c7168..e5c44fc586ab 100644 --- a/net/ipv4/ip_forward.c +++ b/net/ipv4/ip_forward.c @@ -41,7 +41,7 @@ static int ip_forward_finish(struct sk_buff *skb) { - struct ip_options * opt = &(IPCB(skb)->opt); + struct ip_options *opt = &(IPCB(skb)->opt); IP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTFORWDATAGRAMS); @@ -55,7 +55,7 @@ int ip_forward(struct sk_buff *skb) { struct iphdr *iph; /* Our header */ struct rtable *rt; /* Route we use */ - struct ip_options * opt = &(IPCB(skb)->opt); + struct ip_options *opt = &(IPCB(skb)->opt); if (skb_warn_if_lro(skb)) goto drop; diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index b9abf265c2f8..f49047b79609 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -216,9 +216,9 @@ static struct rtnl_link_stats64 *ipgre_get_stats64(struct net_device *dev, /* Given src, dst and key, find appropriate for input tunnel. */ -static struct ip_tunnel * ipgre_tunnel_lookup(struct net_device *dev, - __be32 remote, __be32 local, - __be32 key, __be16 gre_proto) +static struct ip_tunnel *ipgre_tunnel_lookup(struct net_device *dev, + __be32 remote, __be32 local, + __be32 key, __be16 gre_proto) { struct net *net = dev_net(dev); int link = dev->ifindex; @@ -483,7 +483,7 @@ static void ipgre_err(struct sk_buff *skb, u32 info) */ const struct iphdr *iph = (const struct iphdr *)skb->data; - __be16 *p = (__be16*)(skb->data+(iph->ihl<<2)); + __be16 *p = (__be16 *)(skb->data+(iph->ihl<<2)); int grehlen = (iph->ihl<<2) + 4; const int type = icmp_hdr(skb)->type; const int code = icmp_hdr(skb)->code; @@ -593,7 +593,7 @@ static int ipgre_rcv(struct sk_buff *skb) iph = ip_hdr(skb); h = skb->data; - flags = *(__be16*)h; + flags = *(__be16 *)h; if (flags&(GRE_CSUM|GRE_KEY|GRE_ROUTING|GRE_SEQ|GRE_VERSION)) { /* - Version must be 0. @@ -617,11 +617,11 @@ static int ipgre_rcv(struct sk_buff *skb) offset += 4; } if (flags&GRE_KEY) { - key = *(__be32*)(h + offset); + key = *(__be32 *)(h + offset); offset += 4; } if (flags&GRE_SEQ) { - seqno = ntohl(*(__be32*)(h + offset)); + seqno = ntohl(*(__be32 *)(h + offset)); offset += 4; } } @@ -921,7 +921,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev htons(ETH_P_TEB) : skb->protocol; if (tunnel->parms.o_flags&(GRE_KEY|GRE_CSUM|GRE_SEQ)) { - __be32 *ptr = (__be32*)(((u8*)iph) + tunnel->hlen - 4); + __be32 *ptr = (__be32 *)(((u8 *)iph) + tunnel->hlen - 4); if (tunnel->parms.o_flags&GRE_SEQ) { ++tunnel->o_seqno; @@ -934,7 +934,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev } if (tunnel->parms.o_flags&GRE_CSUM) { *ptr = 0; - *(__sum16*)ptr = ip_compute_csum((void*)(iph+1), skb->len - sizeof(struct iphdr)); + *(__sum16 *)ptr = ip_compute_csum((void *)(iph+1), skb->len - sizeof(struct iphdr)); } } @@ -1190,7 +1190,7 @@ static int ipgre_header(struct sk_buff *skb, struct net_device *dev, { struct ip_tunnel *t = netdev_priv(dev); struct iphdr *iph = (struct iphdr *)skb_push(skb, t->hlen); - __be16 *p = (__be16*)(iph+1); + __be16 *p = (__be16 *)(iph+1); memcpy(iph, &t->parms.iph, sizeof(struct iphdr)); p[0] = t->parms.o_flags; diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c index a0d0d9d9b870..1372c4586edc 100644 --- a/net/ipv4/ip_options.c +++ b/net/ipv4/ip_options.c @@ -210,10 +210,10 @@ int ip_options_echo(struct ip_options *dopt, struct sk_buff *skb) * Simple and stupid 8), but the most efficient way. */ -void ip_options_fragment(struct sk_buff * skb) +void ip_options_fragment(struct sk_buff *skb) { unsigned char *optptr = skb_network_header(skb) + sizeof(struct iphdr); - struct ip_options * opt = &(IPCB(skb)->opt); + struct ip_options *opt = &(IPCB(skb)->opt); int l = opt->optlen; int optlen; @@ -248,13 +248,13 @@ void ip_options_fragment(struct sk_buff * skb) */ int ip_options_compile(struct net *net, - struct ip_options * opt, struct sk_buff * skb) + struct ip_options *opt, struct sk_buff *skb) { int l; - unsigned char * iph; - unsigned char * optptr; + unsigned char *iph; + unsigned char *optptr; int optlen; - unsigned char * pp_ptr = NULL; + unsigned char *pp_ptr = NULL; struct rtable *rt = NULL; if (skb != NULL) { @@ -473,20 +473,20 @@ EXPORT_SYMBOL(ip_options_compile); * Undo all the changes done by ip_options_compile(). */ -void ip_options_undo(struct ip_options * opt) +void ip_options_undo(struct ip_options *opt) { if (opt->srr) { - unsigned char * optptr = opt->__data+opt->srr-sizeof(struct iphdr); + unsigned char *optptr = opt->__data+opt->srr-sizeof(struct iphdr); memmove(optptr+7, optptr+3, optptr[1]-7); memcpy(optptr+3, &opt->faddr, 4); } if (opt->rr_needaddr) { - unsigned char * optptr = opt->__data+opt->rr-sizeof(struct iphdr); + unsigned char *optptr = opt->__data+opt->rr-sizeof(struct iphdr); optptr[2] -= 4; memset(&optptr[optptr[2]-1], 0, 4); } if (opt->ts) { - unsigned char * optptr = opt->__data+opt->ts-sizeof(struct iphdr); + unsigned char *optptr = opt->__data+opt->ts-sizeof(struct iphdr); if (opt->ts_needtime) { optptr[2] -= 4; memset(&optptr[optptr[2]-1], 0, 4); @@ -549,8 +549,8 @@ int ip_options_get(struct net *net, struct ip_options_rcu **optp, void ip_forward_options(struct sk_buff *skb) { - struct ip_options * opt = &(IPCB(skb)->opt); - unsigned char * optptr; + struct ip_options *opt = &(IPCB(skb)->opt); + unsigned char *optptr; struct rtable *rt = skb_rtable(skb); unsigned char *raw = skb_network_header(skb); diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index 2fd0fba77124..0a87e1fc0ce5 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c @@ -90,7 +90,7 @@ static void ip_cmsg_recv_opts(struct msghdr *msg, struct sk_buff *skb) static void ip_cmsg_recv_retopts(struct msghdr *msg, struct sk_buff *skb) { unsigned char optbuf[sizeof(struct ip_options) + 40]; - struct ip_options * opt = (struct ip_options *)optbuf; + struct ip_options *opt = (struct ip_options *)optbuf; if (IPCB(skb)->opt.optlen == 0) return; diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c index b5a1849b83d5..2d0f99bf61b3 100644 --- a/net/ipv4/ipip.c +++ b/net/ipv4/ipip.c @@ -185,7 +185,7 @@ static struct rtnl_link_stats64 *ipip_get_stats64(struct net_device *dev, return tot; } -static struct ip_tunnel * ipip_tunnel_lookup(struct net *net, +static struct ip_tunnel *ipip_tunnel_lookup(struct net *net, __be32 remote, __be32 local) { unsigned int h0 = HASH(remote); @@ -260,7 +260,7 @@ static void ipip_tunnel_link(struct ipip_net *ipn, struct ip_tunnel *t) rcu_assign_pointer(*tp, t); } -static struct ip_tunnel * ipip_tunnel_locate(struct net *net, +static struct ip_tunnel *ipip_tunnel_locate(struct net *net, struct ip_tunnel_parm *parms, int create) { __be32 remote = parms->iph.daddr; diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c index 50009c787bcd..9f380ace22ee 100644 --- a/net/ipv4/ping.c +++ b/net/ipv4/ping.c @@ -410,7 +410,7 @@ struct pingfakehdr { __wsum wcheck; }; -static int ping_getfrag(void *from, char * to, +static int ping_getfrag(void *from, char *to, int offset, int fraglen, int odd, struct sk_buff *skb) { struct pingfakehdr *pfh = (struct pingfakehdr *)from; diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index bbd604c68e68..4032b818f3e4 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c @@ -288,7 +288,7 @@ void raw_icmp_error(struct sk_buff *skb, int protocol, u32 info) read_unlock(&raw_v4_hashinfo.lock); } -static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb) +static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb) { /* Charge it to the socket. */ diff --git a/net/ipv4/route.c b/net/ipv4/route.c index e4d18f2a305d..a13ce2364ed2 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -2215,7 +2215,7 @@ static int ip_mkroute_input(struct sk_buff *skb, struct in_device *in_dev, __be32 daddr, __be32 saddr, u32 tos) { - struct rtable* rth = NULL; + struct rtable *rth = NULL; int err; unsigned hash; @@ -2257,11 +2257,11 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr, struct flowi4 fl4; unsigned flags = 0; u32 itag = 0; - struct rtable * rth; + struct rtable *rth; unsigned hash; __be32 spec_dst; int err = -EINVAL; - struct net * net = dev_net(dev); + struct net *net = dev_net(dev); /* IP on this device is disabled. */ @@ -2433,7 +2433,7 @@ martian_source_keep_err: int ip_route_input_common(struct sk_buff *skb, __be32 daddr, __be32 saddr, u8 tos, struct net_device *dev, bool noref) { - struct rtable * rth; + struct rtable *rth; unsigned hash; int iif = dev->ifindex; struct net *net; @@ -3068,7 +3068,7 @@ nla_put_failure: return -EMSGSIZE; } -static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void *arg) +static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, void *arg) { struct net *net = sock_net(in_skb->sk); struct rtmsg *rtm; diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c index a981cdc0a6e9..a8df7052e0b6 100644 --- a/net/ipv4/tcp_probe.c +++ b/net/ipv4/tcp_probe.c @@ -138,7 +138,7 @@ static struct jprobe tcp_jprobe = { .entry = jtcp_rcv_established, }; -static int tcpprobe_open(struct inode * inode, struct file * file) +static int tcpprobe_open(struct inode *inode, struct file *file) { /* Reset (empty) log */ spin_lock_bh(&tcp_probe.lock); diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index ad1e0dd4da48..381ea5115142 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -847,7 +847,7 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, * Get and verify the address. */ if (msg->msg_name) { - struct sockaddr_in * usin = (struct sockaddr_in *)msg->msg_name; + struct sockaddr_in *usin = (struct sockaddr_in *)msg->msg_name; if (msg->msg_namelen < sizeof(*usin)) return -EINVAL; if (usin->sin_family != AF_INET) { diff --git a/net/ipv4/udp_impl.h b/net/ipv4/udp_impl.h index aaad650d47d9..5a681e298b90 100644 --- a/net/ipv4/udp_impl.h +++ b/net/ipv4/udp_impl.h @@ -25,7 +25,7 @@ extern int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t len, int noblock, int flags, int *addr_len); extern int udp_sendpage(struct sock *sk, struct page *page, int offset, size_t size, int flags); -extern int udp_queue_rcv_skb(struct sock * sk, struct sk_buff *skb); +extern int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); extern void udp_destroy_sock(struct sock *sk); #ifdef CONFIG_PROC_FS diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c index a0b4c5da8d43..8ef24e16afce 100644 --- a/net/ipv4/xfrm4_policy.c +++ b/net/ipv4/xfrm4_policy.c @@ -152,7 +152,7 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse) case IPPROTO_AH: if (pskb_may_pull(skb, xprth + 8 - skb->data)) { - __be32 *ah_hdr = (__be32*)xprth; + __be32 *ah_hdr = (__be32 *)xprth; fl4->fl4_ipsec_spi = ah_hdr[1]; } -- cgit v1.2.2 From 95c961747284a6b83a5e2d81240e214b0fa3464d Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Sun, 15 Apr 2012 05:58:06 +0000 Subject: net: cleanup unsigned to unsigned int Use of "unsigned int" is preferred to bare "unsigned" in net tree. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/802/fc.c | 2 +- net/802/fddi.c | 2 +- net/802/hippi.c | 2 +- net/802/tr.c | 4 ++-- net/9p/client.c | 6 +++--- net/9p/trans_fd.c | 2 +- net/atm/br2684.c | 2 +- net/atm/mpoa_proc.c | 2 +- net/atm/signaling.c | 2 +- net/ax25/ax25_ip.c | 4 ++-- net/batman-adv/bat_sysfs.c | 6 +++--- net/bridge/br_forward.c | 2 +- net/bridge/br_multicast.c | 10 +++++----- net/bridge/br_netlink.c | 2 +- net/bridge/br_private_stp.h | 7 +++---- net/bridge/br_stp.c | 4 ++-- net/bridge/br_stp_timer.c | 6 +++--- net/bridge/br_sysfs_br.c | 2 +- net/caif/caif_socket.c | 2 +- net/ceph/auth_x.h | 6 +++--- net/ceph/ceph_common.c | 4 ++-- net/ceph/ceph_hash.c | 6 +++--- net/ceph/crush/mapper.c | 8 ++++---- net/ceph/debugfs.c | 6 +++--- net/ceph/messenger.c | 16 ++++++++-------- net/ceph/mon_client.c | 10 +++++----- net/ceph/osd_client.c | 2 +- net/ceph/osdmap.c | 14 +++++++------- net/compat.c | 10 +++++----- net/core/datagram.c | 6 +++--- net/core/dev.c | 7 ++++--- net/core/filter.c | 2 +- net/core/neighbour.c | 2 +- net/core/net-sysfs.c | 8 ++++---- net/core/pktgen.c | 20 ++++++++++---------- net/core/rtnetlink.c | 6 +++--- net/core/skbuff.c | 2 +- net/core/sock.c | 2 +- net/dccp/ccids/ccid3.c | 12 ++++++------ net/dccp/dccp.h | 8 ++++---- net/dccp/input.c | 10 +++++----- net/decnet/af_decnet.c | 6 +++--- net/decnet/dn_fib.c | 5 +++-- net/decnet/dn_nsp_in.c | 2 +- net/decnet/dn_nsp_out.c | 4 ++-- net/decnet/dn_route.c | 18 +++++++++--------- net/decnet/dn_rules.c | 4 ++-- net/dns_resolver/dns_key.c | 2 +- net/dns_resolver/internal.h | 2 +- net/ethernet/eth.c | 2 +- net/ieee802154/6lowpan.c | 2 +- net/ieee802154/dgram.c | 6 +++--- net/ieee802154/raw.c | 2 +- net/ipv4/arp.c | 4 ++-- net/ipv4/devinet.c | 2 +- net/ipv4/fib_frontend.c | 10 +++++----- net/ipv4/inet_connection_sock.c | 3 ++- net/ipv4/inet_timewait_sock.c | 2 +- net/ipv4/ip_options.c | 2 +- net/ipv4/ip_sockglue.c | 4 ++-- net/ipv4/ipconfig.c | 2 +- net/ipv4/netfilter.c | 2 +- net/ipv4/netfilter/arp_tables.c | 2 +- net/ipv4/netfilter/ip_tables.c | 2 +- net/ipv4/netfilter/nf_nat_sip.c | 2 +- net/ipv4/ping.c | 8 +++++--- net/ipv4/route.c | 20 ++++++++++---------- net/ipv4/sysctl_net_ipv4.c | 2 +- net/ipv4/tcp.c | 8 ++++---- net/ipv4/tcp_input.c | 2 +- net/ipv4/tcp_output.c | 24 ++++++++++++------------ net/ipv4/tcp_probe.c | 2 +- net/ipv4/udp.c | 2 +- net/ipv6/addrconf_core.c | 2 +- net/ipv6/datagram.c | 2 +- net/ipv6/ip6_flowlabel.c | 4 ++-- net/ipv6/ip6_tunnel.c | 2 +- net/ipv6/ipv6_sockglue.c | 2 +- net/ipv6/netfilter/ip6_tables.c | 2 +- net/ipv6/route.c | 11 +++++------ net/ipv6/xfrm6_tunnel.c | 6 +++--- net/irda/ircomm/ircomm_tty_ioctl.c | 2 +- net/key/af_key.c | 4 ++-- net/l2tp/l2tp_core.h | 16 ++++++++-------- net/llc/af_llc.c | 2 +- net/netfilter/ipset/ip_set_core.c | 6 +++--- net/netfilter/ipvs/ip_vs_app.c | 2 +- net/netfilter/ipvs/ip_vs_conn.c | 34 +++++++++++++++++----------------- net/netfilter/ipvs/ip_vs_core.c | 2 +- net/netfilter/ipvs/ip_vs_ctl.c | 26 +++++++++++++------------- net/netfilter/ipvs/ip_vs_dh.c | 2 +- net/netfilter/ipvs/ip_vs_ftp.c | 2 +- net/netfilter/ipvs/ip_vs_lblc.c | 6 +++--- net/netfilter/ipvs/ip_vs_lblcr.c | 6 +++--- net/netfilter/ipvs/ip_vs_proto.c | 12 ++++++------ net/netfilter/ipvs/ip_vs_sh.c | 2 +- net/netfilter/ipvs/ip_vs_sync.c | 4 ++-- net/netfilter/nf_conntrack_h323_main.c | 2 +- net/netfilter/nfnetlink.c | 2 +- net/netfilter/xt_recent.c | 2 +- net/netrom/nr_dev.c | 2 +- net/nfc/core.c | 4 ++-- net/nfc/nfc.h | 2 +- net/openvswitch/vport-netdev.c | 4 ++-- net/packet/af_packet.c | 2 +- net/phonet/af_phonet.c | 2 +- net/phonet/pep.c | 8 ++++---- net/phonet/pn_dev.c | 2 +- net/phonet/socket.c | 12 ++++++------ net/phonet/sysctl.c | 3 ++- net/rose/rose_dev.c | 2 +- net/rose/rose_subr.c | 2 +- net/rxrpc/af_rxrpc.c | 8 ++++---- net/rxrpc/ar-ack.c | 6 +++--- net/rxrpc/ar-call.c | 4 ++-- net/rxrpc/ar-input.c | 2 +- net/rxrpc/ar-internal.h | 16 ++++++++-------- net/rxrpc/ar-key.c | 22 +++++++++++----------- net/rxrpc/rxkad.c | 6 +++--- net/sctp/associola.c | 4 ++-- net/sctp/output.c | 4 ++-- net/sctp/outqueue.c | 2 +- net/sctp/sm_sideeffect.c | 4 ++-- net/sctp/sm_statefuns.c | 4 ++-- net/socket.c | 18 +++++++++--------- net/sunrpc/auth_gss/gss_krb5_mech.c | 2 +- net/sunrpc/cache.c | 2 +- net/sunrpc/timer.c | 6 +++--- net/sunrpc/xdr.c | 2 +- net/sunrpc/xprt.c | 2 +- net/tipc/link.c | 2 +- net/tipc/socket.c | 8 ++++---- net/unix/af_unix.c | 21 +++++++++++---------- net/wimax/stack.c | 5 +++-- net/wireless/wext-core.c | 6 +++--- net/x25/x25_facilities.c | 4 ++-- net/xfrm/xfrm_hash.h | 8 ++++---- 137 files changed, 390 insertions(+), 384 deletions(-) (limited to 'net') diff --git a/net/802/fc.c b/net/802/fc.c index b324e31401a9..05eea6b98bb8 100644 --- a/net/802/fc.c +++ b/net/802/fc.c @@ -35,7 +35,7 @@ static int fc_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, - const void *daddr, const void *saddr, unsigned len) + const void *daddr, const void *saddr, unsigned int len) { struct fch_hdr *fch; int hdr_len; diff --git a/net/802/fddi.c b/net/802/fddi.c index 5ab25cd4314b..9cda40661e0d 100644 --- a/net/802/fddi.c +++ b/net/802/fddi.c @@ -51,7 +51,7 @@ static int fddi_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, - const void *daddr, const void *saddr, unsigned len) + const void *daddr, const void *saddr, unsigned int len) { int hl = FDDI_K_SNAP_HLEN; struct fddihdr *fddi; diff --git a/net/802/hippi.c b/net/802/hippi.c index 056794e66375..51a1f530417d 100644 --- a/net/802/hippi.c +++ b/net/802/hippi.c @@ -45,7 +45,7 @@ static int hippi_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, - const void *daddr, const void *saddr, unsigned len) + const void *daddr, const void *saddr, unsigned int len) { struct hippi_hdr *hip = (struct hippi_hdr *)skb_push(skb, HIPPI_HLEN); struct hippi_cb *hcb = (struct hippi_cb *) skb->cb; diff --git a/net/802/tr.c b/net/802/tr.c index b9a3a145e348..e65f0b8de0c4 100644 --- a/net/802/tr.c +++ b/net/802/tr.c @@ -101,7 +101,7 @@ static inline unsigned long rif_hash(const unsigned char *addr) static int tr_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, - const void *daddr, const void *saddr, unsigned len) + const void *daddr, const void *saddr, unsigned int len) { struct trh_hdr *trh; int hdr_len; @@ -193,7 +193,7 @@ __be16 tr_type_trans(struct sk_buff *skb, struct net_device *dev) struct trh_hdr *trh; struct trllc *trllc; - unsigned riflen=0; + unsigned int riflen=0; skb->dev = dev; skb_reset_mac_header(skb); diff --git a/net/9p/client.c b/net/9p/client.c index b23a17c431c8..a170893d70e0 100644 --- a/net/9p/client.c +++ b/net/9p/client.c @@ -1530,7 +1530,7 @@ p9_client_read(struct p9_fid *fid, char *data, char __user *udata, u64 offset, p9_debug(P9_DEBUG_9P, ">>> TREAD fid %d offset %llu %d\n", - fid->fid, (long long unsigned) offset, count); + fid->fid, (unsigned long long) offset, count); err = 0; clnt = fid->clnt; @@ -1605,7 +1605,7 @@ p9_client_write(struct p9_fid *fid, char *data, const char __user *udata, struct p9_req_t *req; p9_debug(P9_DEBUG_9P, ">>> TWRITE fid %d offset %llu count %d\n", - fid->fid, (long long unsigned) offset, count); + fid->fid, (unsigned long long) offset, count); err = 0; clnt = fid->clnt; @@ -2040,7 +2040,7 @@ int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset) char *dataptr; p9_debug(P9_DEBUG_9P, ">>> TREADDIR fid %d offset %llu count %d\n", - fid->fid, (long long unsigned) offset, count); + fid->fid, (unsigned long long) offset, count); err = 0; clnt = fid->clnt; diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c index fccae26fa674..6449bae15702 100644 --- a/net/9p/trans_fd.c +++ b/net/9p/trans_fd.c @@ -513,7 +513,7 @@ error: clear_bit(Wworksched, &m->wsched); } -static int p9_pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key) +static int p9_pollwake(wait_queue_t *wait, unsigned int mode, int sync, void *key) { struct p9_poll_wait *pwait = container_of(wait, struct p9_poll_wait, wait); diff --git a/net/atm/br2684.c b/net/atm/br2684.c index 353fccf1cde3..4819d31533e0 100644 --- a/net/atm/br2684.c +++ b/net/atm/br2684.c @@ -73,7 +73,7 @@ struct br2684_vcc { #ifdef CONFIG_ATM_BR2684_IPFILTER struct br2684_filter filter; #endif /* CONFIG_ATM_BR2684_IPFILTER */ - unsigned copies_needed, copies_failed; + unsigned int copies_needed, copies_failed; }; struct br2684_dev { diff --git a/net/atm/mpoa_proc.c b/net/atm/mpoa_proc.c index 53e500292271..5bdd300db0f7 100644 --- a/net/atm/mpoa_proc.c +++ b/net/atm/mpoa_proc.c @@ -207,7 +207,7 @@ static ssize_t proc_mpc_write(struct file *file, const char __user *buff, size_t nbytes, loff_t *ppos) { char *page, *p; - unsigned len; + unsigned int len; if (nbytes == 0) return 0; diff --git a/net/atm/signaling.c b/net/atm/signaling.c index 509c8ac02b63..86767ca908a3 100644 --- a/net/atm/signaling.c +++ b/net/atm/signaling.c @@ -166,7 +166,7 @@ void sigd_enq2(struct atm_vcc *vcc, enum atmsvc_msg_type type, { struct sk_buff *skb; struct atmsvc_msg *msg; - static unsigned session = 0; + static unsigned int session = 0; pr_debug("%d (0x%p)\n", (int)type, vcc); while (!(skb = alloc_skb(sizeof(struct atmsvc_msg), GFP_KERNEL))) diff --git a/net/ax25/ax25_ip.c b/net/ax25/ax25_ip.c index 846ae4e2b115..67de6b33f2c3 100644 --- a/net/ax25/ax25_ip.c +++ b/net/ax25/ax25_ip.c @@ -48,7 +48,7 @@ int ax25_hard_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, const void *daddr, - const void *saddr, unsigned len) + const void *saddr, unsigned int len) { unsigned char *buff; @@ -219,7 +219,7 @@ put: int ax25_hard_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, const void *daddr, - const void *saddr, unsigned len) + const void *saddr, unsigned int len) { return -AX25_HEADER_LEN; } diff --git a/net/batman-adv/bat_sysfs.c b/net/batman-adv/bat_sysfs.c index c6efd687ca75..2c816883ca13 100644 --- a/net/batman-adv/bat_sysfs.c +++ b/net/batman-adv/bat_sysfs.c @@ -149,7 +149,7 @@ static int store_bool_attr(char *buff, size_t count, atomic_read(attr) == 1 ? "enabled" : "disabled", enabled == 1 ? "enabled" : "disabled"); - atomic_set(attr, (unsigned)enabled); + atomic_set(attr, (unsigned int)enabled); return count; } @@ -268,7 +268,7 @@ static ssize_t store_vis_mode(struct kobject *kobj, struct attribute *attr, "client" : "server", vis_mode_tmp == VIS_TYPE_CLIENT_UPDATE ? "client" : "server"); - atomic_set(&bat_priv->vis_mode, (unsigned)vis_mode_tmp); + atomic_set(&bat_priv->vis_mode, (unsigned int)vis_mode_tmp); return count; } @@ -354,7 +354,7 @@ static ssize_t store_gw_mode(struct kobject *kobj, struct attribute *attr, curr_gw_mode_str, buff); gw_deselect(bat_priv); - atomic_set(&bat_priv->gw_mode, (unsigned)gw_mode_tmp); + atomic_set(&bat_priv->gw_mode, (unsigned int)gw_mode_tmp); return count; } diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c index 61f65344e711..aab8470707c4 100644 --- a/net/bridge/br_forward.c +++ b/net/bridge/br_forward.c @@ -34,7 +34,7 @@ static inline int should_deliver(const struct net_bridge_port *p, p->state == BR_STATE_FORWARDING); } -static inline unsigned packet_length(const struct sk_buff *skb) +static inline unsigned int packet_length(const struct sk_buff *skb) { return skb->len - (skb->protocol == htons(ETH_P_8021Q) ? VLAN_HLEN : 0); } diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index 27ca25ed7021..020e463f2225 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c @@ -512,8 +512,8 @@ static struct net_bridge_mdb_entry *br_multicast_get_group( struct net_bridge_mdb_htable *mdb; struct net_bridge_mdb_entry *mp; struct hlist_node *p; - unsigned count = 0; - unsigned max; + unsigned int count = 0; + unsigned int max; int elasticity; int err; @@ -1281,8 +1281,8 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br, struct sk_buff *skb2 = skb; const struct iphdr *iph; struct igmphdr *ih; - unsigned len; - unsigned offset; + unsigned int len; + unsigned int offset; int err; /* We treat OOM as packet loss for now. */ @@ -1382,7 +1382,7 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br, u8 icmp6_type; u8 nexthdr; __be16 frag_off; - unsigned len; + unsigned int len; int offset; int err; diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c index 346b368d8698..df38108f6973 100644 --- a/net/bridge/br_netlink.c +++ b/net/bridge/br_netlink.c @@ -88,7 +88,7 @@ void br_ifinfo_notify(int event, struct net_bridge_port *port) int err = -ENOBUFS; br_debug(port->br, "port %u(%s) event %d\n", - (unsigned)port->port_no, port->dev->name, event); + (unsigned int)port->port_no, port->dev->name, event); skb = nlmsg_new(br_nlmsg_size(), GFP_ATOMIC); if (skb == NULL) diff --git a/net/bridge/br_private_stp.h b/net/bridge/br_private_stp.h index 05ed9bc7e426..0c0fe36e7aa9 100644 --- a/net/bridge/br_private_stp.h +++ b/net/bridge/br_private_stp.h @@ -29,10 +29,9 @@ #define BR_MIN_PATH_COST 1 #define BR_MAX_PATH_COST 65535 -struct br_config_bpdu -{ - unsigned topology_change:1; - unsigned topology_change_ack:1; +struct br_config_bpdu { + unsigned int topology_change:1; + unsigned int topology_change_ack:1; bridge_id root; int root_path_cost; bridge_id bridge_id; diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c index 8c836d96ba76..af9a12099ba4 100644 --- a/net/bridge/br_stp.c +++ b/net/bridge/br_stp.c @@ -32,7 +32,7 @@ static const char *const br_port_state_names[] = { void br_log_state(const struct net_bridge_port *p) { br_info(p->br, "port %u(%s) entered %s state\n", - (unsigned) p->port_no, p->dev->name, + (unsigned int) p->port_no, p->dev->name, br_port_state_names[p->state]); } @@ -478,7 +478,7 @@ void br_received_tcn_bpdu(struct net_bridge_port *p) { if (br_is_designated_port(p)) { br_info(p->br, "port %u(%s) received tcn bpdu\n", - (unsigned) p->port_no, p->dev->name); + (unsigned int) p->port_no, p->dev->name); br_topology_change_detection(p->br); br_topology_change_acknowledge(p); diff --git a/net/bridge/br_stp_timer.c b/net/bridge/br_stp_timer.c index 58de2a0f9975..a6747e673426 100644 --- a/net/bridge/br_stp_timer.c +++ b/net/bridge/br_stp_timer.c @@ -56,7 +56,7 @@ static void br_message_age_timer_expired(unsigned long arg) return; br_info(br, "port %u(%s) neighbor %.2x%.2x.%pM lost\n", - (unsigned) p->port_no, p->dev->name, + (unsigned int) p->port_no, p->dev->name, id->prio[0], id->prio[1], &id->addr); /* @@ -84,7 +84,7 @@ static void br_forward_delay_timer_expired(unsigned long arg) struct net_bridge *br = p->br; br_debug(br, "port %u(%s) forward delay timer\n", - (unsigned) p->port_no, p->dev->name); + (unsigned int) p->port_no, p->dev->name); spin_lock(&br->lock); if (p->state == BR_STATE_LISTENING) { p->state = BR_STATE_LEARNING; @@ -131,7 +131,7 @@ static void br_hold_timer_expired(unsigned long arg) struct net_bridge_port *p = (struct net_bridge_port *) arg; br_debug(p->br, "port %u(%s) hold timer expired\n", - (unsigned) p->port_no, p->dev->name); + (unsigned int) p->port_no, p->dev->name); spin_lock(&p->br->lock); if (p->config_pending) diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c index c236c0e43984..766fd7fb0295 100644 --- a/net/bridge/br_sysfs_br.c +++ b/net/bridge/br_sysfs_br.c @@ -297,7 +297,7 @@ static ssize_t store_group_addr(struct device *d, const char *buf, size_t len) { struct net_bridge *br = to_bridge(d); - unsigned new_addr[6]; + unsigned int new_addr[6]; int i; if (!capable(CAP_NET_ADMIN)) diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c index ce47ee9f48c8..0dccdb3c7d26 100644 --- a/net/caif/caif_socket.c +++ b/net/caif/caif_socket.c @@ -130,7 +130,7 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= - (unsigned)sk->sk_rcvbuf && rx_flow_is_on(cf_sk)) { + (unsigned int)sk->sk_rcvbuf && rx_flow_is_on(cf_sk)) { if (net_ratelimit()) pr_debug("sending flow OFF (queue len = %d %d)\n", atomic_read(&cf_sk->sk.sk_rmem_alloc), diff --git a/net/ceph/auth_x.h b/net/ceph/auth_x.h index e02da7a5c5a1..f459e93b774f 100644 --- a/net/ceph/auth_x.h +++ b/net/ceph/auth_x.h @@ -13,7 +13,7 @@ */ struct ceph_x_ticket_handler { struct rb_node node; - unsigned service; + unsigned int service; struct ceph_crypto_key session_key; struct ceph_timespec validity; @@ -27,7 +27,7 @@ struct ceph_x_ticket_handler { struct ceph_x_authorizer { struct ceph_buffer *buf; - unsigned service; + unsigned int service; u64 nonce; char reply_buf[128]; /* big enough for encrypted blob */ }; @@ -38,7 +38,7 @@ struct ceph_x_info { bool starting; u64 server_challenge; - unsigned have_keys; + unsigned int have_keys; struct rb_root ticket_handlers; struct ceph_x_authorizer auth_authorizer; diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c index cc913193d992..a776f751edbf 100644 --- a/net/ceph/ceph_common.c +++ b/net/ceph/ceph_common.c @@ -441,8 +441,8 @@ EXPORT_SYMBOL(ceph_client_id); * create a fresh client instance */ struct ceph_client *ceph_create_client(struct ceph_options *opt, void *private, - unsigned supported_features, - unsigned required_features) + unsigned int supported_features, + unsigned int required_features) { struct ceph_client *client; struct ceph_entity_addr *myaddr = NULL; diff --git a/net/ceph/ceph_hash.c b/net/ceph/ceph_hash.c index 0a1b53bce76d..67bb1f11e613 100644 --- a/net/ceph/ceph_hash.c +++ b/net/ceph/ceph_hash.c @@ -20,7 +20,7 @@ c = c - a; c = c - b; c = c ^ (b >> 15); \ } while (0) -unsigned ceph_str_hash_rjenkins(const char *str, unsigned length) +unsigned int ceph_str_hash_rjenkins(const char *str, unsigned int length) { const unsigned char *k = (const unsigned char *)str; __u32 a, b, c; /* the internal state */ @@ -81,7 +81,7 @@ unsigned ceph_str_hash_rjenkins(const char *str, unsigned length) /* * linux dcache hash */ -unsigned ceph_str_hash_linux(const char *str, unsigned length) +unsigned int ceph_str_hash_linux(const char *str, unsigned int length) { unsigned long hash = 0; unsigned char c; @@ -94,7 +94,7 @@ unsigned ceph_str_hash_linux(const char *str, unsigned length) } -unsigned ceph_str_hash(int type, const char *s, unsigned len) +unsigned int ceph_str_hash(int type, const char *s, unsigned int len) { switch (type) { case CEPH_STR_HASH_LINUX: diff --git a/net/ceph/crush/mapper.c b/net/ceph/crush/mapper.c index b79747c4b645..854ac53f56cd 100644 --- a/net/ceph/crush/mapper.c +++ b/net/ceph/crush/mapper.c @@ -68,8 +68,8 @@ int crush_find_rule(struct crush_map *map, int ruleset, int type, int size) static int bucket_perm_choose(struct crush_bucket *bucket, int x, int r) { - unsigned pr = r % bucket->size; - unsigned i, s; + unsigned int pr = r % bucket->size; + unsigned int i, s; /* start a new permutation if @x has changed */ if (bucket->perm_x != x || bucket->perm_n == 0) { @@ -100,13 +100,13 @@ static int bucket_perm_choose(struct crush_bucket *bucket, for (i = 0; i < bucket->perm_n; i++) dprintk(" perm_choose have %d: %d\n", i, bucket->perm[i]); while (bucket->perm_n <= pr) { - unsigned p = bucket->perm_n; + unsigned int p = bucket->perm_n; /* no point in swapping the final entry */ if (p < bucket->size - 1) { i = crush_hash32_3(bucket->hash, x, bucket->id, p) % (bucket->size - p); if (i) { - unsigned t = bucket->perm[p + i]; + unsigned int t = bucket->perm[p + i]; bucket->perm[p + i] = bucket->perm[p]; bucket->perm[p] = t; } diff --git a/net/ceph/debugfs.c b/net/ceph/debugfs.c index 27d4ea315d12..54b531a01121 100644 --- a/net/ceph/debugfs.c +++ b/net/ceph/debugfs.c @@ -94,9 +94,9 @@ static int monc_show(struct seq_file *s, void *p) mutex_lock(&monc->mutex); if (monc->have_mdsmap) - seq_printf(s, "have mdsmap %u\n", (unsigned)monc->have_mdsmap); + seq_printf(s, "have mdsmap %u\n", (unsigned int)monc->have_mdsmap); if (monc->have_osdmap) - seq_printf(s, "have osdmap %u\n", (unsigned)monc->have_osdmap); + seq_printf(s, "have osdmap %u\n", (unsigned int)monc->have_osdmap); if (monc->want_next_osdmap) seq_printf(s, "want next osdmap\n"); @@ -146,7 +146,7 @@ static int osdc_show(struct seq_file *s, void *pp) if (req->r_reassert_version.epoch) seq_printf(s, "\t%u'%llu", - (unsigned)le32_to_cpu(req->r_reassert_version.epoch), + (unsigned int)le32_to_cpu(req->r_reassert_version.epoch), le64_to_cpu(req->r_reassert_version.version)); else seq_printf(s, "\t"); diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index f0993af2ae4d..36fa6bf68498 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -699,7 +699,7 @@ static int prepare_write_connect(struct ceph_messenger *msgr, struct ceph_connection *con, int include_banner) { - unsigned global_seq = get_global_seq(con->msgr, 0); + unsigned int global_seq = get_global_seq(con->msgr, 0); int proto; switch (con->peer_name.type) { @@ -816,7 +816,7 @@ static void iter_bio_next(struct bio **bio_iter, int *seg) static int write_partial_msg_pages(struct ceph_connection *con) { struct ceph_msg *msg = con->out_msg; - unsigned data_len = le32_to_cpu(msg->hdr.data_len); + unsigned int data_len = le32_to_cpu(msg->hdr.data_len); size_t len; bool do_datacrc = !con->msgr->nocrc; int ret; @@ -1554,7 +1554,7 @@ static struct ceph_msg *ceph_alloc_msg(struct ceph_connection *con, static int read_partial_message_pages(struct ceph_connection *con, struct page **pages, - unsigned data_len, bool do_datacrc) + unsigned int data_len, bool do_datacrc) { void *p; int ret; @@ -1587,7 +1587,7 @@ static int read_partial_message_pages(struct ceph_connection *con, #ifdef CONFIG_BLOCK static int read_partial_message_bio(struct ceph_connection *con, struct bio **bio_iter, int *bio_seg, - unsigned data_len, bool do_datacrc) + unsigned int data_len, bool do_datacrc) { struct bio_vec *bv = bio_iovec_idx(*bio_iter, *bio_seg); void *p; @@ -1629,7 +1629,7 @@ static int read_partial_message(struct ceph_connection *con) struct ceph_msg *m = con->in_msg; int ret; int to, left; - unsigned front_len, middle_len, data_len; + unsigned int front_len, middle_len, data_len; bool do_datacrc = !con->msgr->nocrc; int skip; u64 seq; @@ -2345,9 +2345,9 @@ void ceph_con_revoke_message(struct ceph_connection *con, struct ceph_msg *msg) { mutex_lock(&con->mutex); if (con->in_msg && con->in_msg == msg) { - unsigned front_len = le32_to_cpu(con->in_hdr.front_len); - unsigned middle_len = le32_to_cpu(con->in_hdr.middle_len); - unsigned data_len = le32_to_cpu(con->in_hdr.data_len); + unsigned int front_len = le32_to_cpu(con->in_hdr.front_len); + unsigned int middle_len = le32_to_cpu(con->in_hdr.middle_len); + unsigned int data_len = le32_to_cpu(con->in_hdr.data_len); /* skip rest of message */ dout("con_revoke_pages %p msg %p revoked\n", con, msg); diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c index 1845cde26227..10d6008d31f2 100644 --- a/net/ceph/mon_client.c +++ b/net/ceph/mon_client.c @@ -168,7 +168,7 @@ static bool __sub_expired(struct ceph_mon_client *monc) */ static void __schedule_delayed(struct ceph_mon_client *monc) { - unsigned delay; + unsigned int delay; if (monc->cur_mon < 0 || __sub_expired(monc)) delay = 10 * HZ; @@ -184,7 +184,7 @@ static void __schedule_delayed(struct ceph_mon_client *monc) static void __send_subscribe(struct ceph_mon_client *monc) { dout("__send_subscribe sub_sent=%u exp=%u want_osd=%d\n", - (unsigned)monc->sub_sent, __sub_expired(monc), + (unsigned int)monc->sub_sent, __sub_expired(monc), monc->want_next_osdmap); if ((__sub_expired(monc) && !monc->sub_sent) || monc->want_next_osdmap == 1) { @@ -201,7 +201,7 @@ static void __send_subscribe(struct ceph_mon_client *monc) if (monc->want_next_osdmap) { dout("__send_subscribe to 'osdmap' %u\n", - (unsigned)monc->have_osdmap); + (unsigned int)monc->have_osdmap); ceph_encode_string(&p, end, "osdmap", 6); i = p; i->have = cpu_to_le64(monc->have_osdmap); @@ -211,7 +211,7 @@ static void __send_subscribe(struct ceph_mon_client *monc) } if (monc->want_mdsmap) { dout("__send_subscribe to 'mdsmap' %u+\n", - (unsigned)monc->have_mdsmap); + (unsigned int)monc->have_mdsmap); ceph_encode_string(&p, end, "mdsmap", 6); i = p; i->have = cpu_to_le64(monc->have_mdsmap); @@ -236,7 +236,7 @@ static void __send_subscribe(struct ceph_mon_client *monc) static void handle_subscribe_ack(struct ceph_mon_client *monc, struct ceph_msg *msg) { - unsigned seconds; + unsigned int seconds; struct ceph_mon_subscribe_ack *h = msg->front.iov_base; if (msg->front.iov_len < sizeof(*h)) diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index 5e254055c910..1b0ef3c4d393 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -1214,7 +1214,7 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg, } if (!req->r_got_reply) { - unsigned bytes; + unsigned int bytes; req->r_result = le32_to_cpu(rhead->result); bytes = le32_to_cpu(msg->hdr.data_len); diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c index 29ad46ec9dcf..56e561a69004 100644 --- a/net/ceph/osdmap.c +++ b/net/ceph/osdmap.c @@ -38,7 +38,7 @@ done: /* maps */ -static int calc_bits_of(unsigned t) +static int calc_bits_of(unsigned int t) { int b = 0; while (t) { @@ -154,7 +154,7 @@ static struct crush_map *crush_decode(void *pbyval, void *end) magic = ceph_decode_32(p); if (magic != CRUSH_MAGIC) { pr_err("crush_decode magic %x != current %x\n", - (unsigned)magic, (unsigned)CRUSH_MAGIC); + (unsigned int)magic, (unsigned int)CRUSH_MAGIC); goto bad; } c->max_buckets = ceph_decode_32(p); @@ -460,7 +460,7 @@ static void __remove_pg_pool(struct rb_root *root, struct ceph_pg_pool_info *pi) static int __decode_pool(void **p, void *end, struct ceph_pg_pool_info *pi) { - unsigned n, m; + unsigned int n, m; ceph_decode_copy(p, &pi->v, sizeof(pi->v)); calc_pg_masks(pi); @@ -970,7 +970,7 @@ void ceph_calc_file_object_mapping(struct ceph_file_layout *layout, objsetno = stripeno / su_per_object; *ono = objsetno * sc + stripepos; - dout("objset %u * sc %u = ono %u\n", objsetno, sc, (unsigned)*ono); + dout("objset %u * sc %u = ono %u\n", objsetno, sc, (unsigned int)*ono); /* *oxoff = *off % layout->fl_stripe_unit; # offset in su */ t = off; @@ -998,12 +998,12 @@ int ceph_calc_object_layout(struct ceph_object_layout *ol, struct ceph_file_layout *fl, struct ceph_osdmap *osdmap) { - unsigned num, num_mask; + unsigned int num, num_mask; struct ceph_pg pgid; s32 preferred = (s32)le32_to_cpu(fl->fl_pg_preferred); int poolid = le32_to_cpu(fl->fl_pg_pool); struct ceph_pg_pool_info *pool; - unsigned ps; + unsigned int ps; BUG_ON(!osdmap); @@ -1045,7 +1045,7 @@ static int *calc_pg_raw(struct ceph_osdmap *osdmap, struct ceph_pg pgid, struct ceph_pg_mapping *pg; struct ceph_pg_pool_info *pool; int ruleno; - unsigned poolid, ps, pps, t; + unsigned int poolid, ps, pps, t; int preferred; poolid = le32_to_cpu(pgid.pool); diff --git a/net/compat.c b/net/compat.c index e055708b8ec9..e240441a2317 100644 --- a/net/compat.c +++ b/net/compat.c @@ -741,13 +741,13 @@ static unsigned char nas[21] = { }; #undef AL -asmlinkage long compat_sys_sendmsg(int fd, struct compat_msghdr __user *msg, unsigned flags) +asmlinkage long compat_sys_sendmsg(int fd, struct compat_msghdr __user *msg, unsigned int flags) { return sys_sendmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT); } asmlinkage long compat_sys_sendmmsg(int fd, struct compat_mmsghdr __user *mmsg, - unsigned vlen, unsigned int flags) + unsigned int vlen, unsigned int flags) { return __sys_sendmmsg(fd, (struct mmsghdr __user *)mmsg, vlen, flags | MSG_CMSG_COMPAT); @@ -758,20 +758,20 @@ asmlinkage long compat_sys_recvmsg(int fd, struct compat_msghdr __user *msg, uns return sys_recvmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT); } -asmlinkage long compat_sys_recv(int fd, void __user *buf, size_t len, unsigned flags) +asmlinkage long compat_sys_recv(int fd, void __user *buf, size_t len, unsigned int flags) { return sys_recv(fd, buf, len, flags | MSG_CMSG_COMPAT); } asmlinkage long compat_sys_recvfrom(int fd, void __user *buf, size_t len, - unsigned flags, struct sockaddr __user *addr, + unsigned int flags, struct sockaddr __user *addr, int __user *addrlen) { return sys_recvfrom(fd, buf, len, flags | MSG_CMSG_COMPAT, addr, addrlen); } asmlinkage long compat_sys_recvmmsg(int fd, struct compat_mmsghdr __user *mmsg, - unsigned vlen, unsigned int flags, + unsigned int vlen, unsigned int flags, struct compat_timespec __user *timeout) { int datagrams; diff --git a/net/core/datagram.c b/net/core/datagram.c index e4fbfd6e2bd4..ae6acf6a3dea 100644 --- a/net/core/datagram.c +++ b/net/core/datagram.c @@ -65,7 +65,7 @@ static inline int connection_based(struct sock *sk) return sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM; } -static int receiver_wake_function(wait_queue_t *wait, unsigned mode, int sync, +static int receiver_wake_function(wait_queue_t *wait, unsigned int mode, int sync, void *key) { unsigned long bits = (unsigned long)key; @@ -158,7 +158,7 @@ out_noerr: * quite explicitly by POSIX 1003.1g, don't change them without having * the standard around please. */ -struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags, +struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags, int *peeked, int *off, int *err) { struct sk_buff *skb; @@ -216,7 +216,7 @@ no_packet: } EXPORT_SYMBOL(__skb_recv_datagram); -struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, +struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned int flags, int noblock, int *err) { int peeked, off = 0; diff --git a/net/core/dev.c b/net/core/dev.c index 9bb8f87c4cda..c93812733f1d 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -208,7 +208,8 @@ static inline void dev_base_seq_inc(struct net *net) static inline struct hlist_head *dev_name_hash(struct net *net, const char *name) { - unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ)); + unsigned int hash = full_name_hash(name, strnlen(name, IFNAMSIZ)); + return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)]; } @@ -4618,9 +4619,9 @@ void dev_set_rx_mode(struct net_device *dev) * * Get the combination of flag bits exported through APIs to userspace. */ -unsigned dev_get_flags(const struct net_device *dev) +unsigned int dev_get_flags(const struct net_device *dev) { - unsigned flags; + unsigned int flags; flags = (dev->flags & ~(IFF_PROMISC | IFF_ALLMULTI | diff --git a/net/core/filter.c b/net/core/filter.c index 95d05a6012d1..47a5f055e7f3 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -531,7 +531,7 @@ int sk_chk_filter(struct sock_filter *filter, unsigned int flen) * Compare this with conditional jumps below, * where offsets are limited. --ANK (981016) */ - if (ftest->k >= (unsigned)(flen-pc-1)) + if (ftest->k >= (unsigned int)(flen-pc-1)) return -EINVAL; break; case BPF_S_JMP_JEQ_K: diff --git a/net/core/neighbour.c b/net/core/neighbour.c index 2d3e6fcdc9c9..434eed8c6185 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -890,7 +890,7 @@ static void neigh_timer_handler(unsigned long arg) { unsigned long now, next; struct neighbour *neigh = (struct neighbour *)arg; - unsigned state; + unsigned int state; int notify = 0; write_lock(&neigh->lock); diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index 97d0f2453a0e..fdf9e61d0651 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c @@ -231,7 +231,7 @@ NETDEVICE_SHOW(flags, fmt_hex); static int change_flags(struct net_device *net, unsigned long new_flags) { - return dev_change_flags(net, (unsigned) new_flags); + return dev_change_flags(net, (unsigned int) new_flags); } static ssize_t store_flags(struct device *dev, struct device_attribute *attr, @@ -581,7 +581,7 @@ static ssize_t store_rps_map(struct netdev_rx_queue *queue, return err; } - map = kzalloc(max_t(unsigned, + map = kzalloc(max_t(unsigned int, RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES), GFP_KERNEL); if (!map) { @@ -902,7 +902,7 @@ static ssize_t bql_set_hold_time(struct netdev_queue *queue, const char *buf, size_t len) { struct dql *dql = &queue->dql; - unsigned value; + unsigned int value; int err; err = kstrtouint(buf, 10, &value); @@ -1106,7 +1106,7 @@ static ssize_t store_xps_map(struct netdev_queue *queue, return err; } - new_dev_maps = kzalloc(max_t(unsigned, + new_dev_maps = kzalloc(max_t(unsigned int, XPS_DEV_MAPS_SIZE, L1_CACHE_BYTES), GFP_KERNEL); if (!new_dev_maps) { free_cpumask_var(mask); diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 4d8ce93cd503..ffb5d382f241 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c @@ -320,7 +320,7 @@ struct pktgen_dev { (see RFC 3260, sec. 4) */ /* MPLS */ - unsigned nr_labels; /* Depth of stack, 0 = no MPLS */ + unsigned int nr_labels; /* Depth of stack, 0 = no MPLS */ __be32 labels[MAX_MPLS_LABELS]; /* VLAN/SVLAN (802.1Q/Q-in-Q) */ @@ -373,10 +373,10 @@ struct pktgen_dev { */ char odevname[32]; struct flow_state *flows; - unsigned cflows; /* Concurrent flows (config) */ - unsigned lflow; /* Flow length (config) */ - unsigned nflows; /* accumulated flows (stats) */ - unsigned curfl; /* current sequenced flow (state)*/ + unsigned int cflows; /* Concurrent flows (config) */ + unsigned int lflow; /* Flow length (config) */ + unsigned int nflows; /* accumulated flows (stats) */ + unsigned int curfl; /* current sequenced flow (state)*/ u16 queue_map_min; u16 queue_map_max; @@ -592,7 +592,7 @@ static int pktgen_if_show(struct seq_file *seq, void *v) pkt_dev->src_mac_count, pkt_dev->dst_mac_count); if (pkt_dev->nr_labels) { - unsigned i; + unsigned int i; seq_printf(seq, " mpls: "); for (i = 0; i < pkt_dev->nr_labels; i++) seq_printf(seq, "%08x%s", ntohl(pkt_dev->labels[i]), @@ -812,7 +812,7 @@ done_str: static ssize_t get_labels(const char __user *buffer, struct pktgen_dev *pkt_dev) { - unsigned n = 0; + unsigned int n = 0; char c; ssize_t i = 0; int len; @@ -1510,7 +1510,7 @@ static ssize_t pktgen_if_write(struct file *file, } if (!strcmp(name, "mpls")) { - unsigned n, cnt; + unsigned int n, cnt; len = get_labels(&user_buffer[i], pkt_dev); if (len < 0) @@ -2324,7 +2324,7 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev) } if (pkt_dev->flags & F_MPLS_RND) { - unsigned i; + unsigned int i; for (i = 0; i < pkt_dev->nr_labels; i++) if (pkt_dev->labels[i] & MPLS_STACK_BOTTOM) pkt_dev->labels[i] = MPLS_STACK_BOTTOM | @@ -2550,7 +2550,7 @@ err: static void mpls_push(__be32 *mpls, struct pktgen_dev *pkt_dev) { - unsigned i; + unsigned int i; for (i = 0; i < pkt_dev->nr_labels; i++) *mpls++ = pkt_dev->labels[i] & ~MPLS_STACK_BOTTOM; diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 4a0d8cfff2a0..2ff6fe4bada4 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -552,7 +552,7 @@ void __rta_fill(struct sk_buff *skb, int attrtype, int attrlen, const void *data } EXPORT_SYMBOL(__rta_fill); -int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned group, int echo) +int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned int group, int echo) { struct sock *rtnl = net->rtnl; int err = 0; @@ -1953,7 +1953,7 @@ static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb) return skb->len; } -void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change) +void rtmsg_ifinfo(int type, struct net_device *dev, unsigned int change) { struct net *net = dev_net(dev); struct sk_buff *skb; @@ -2048,7 +2048,7 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) struct rtattr *attr = (void *)nlh + NLMSG_ALIGN(min_len); while (RTA_OK(attr, attrlen)) { - unsigned flavor = attr->rta_type; + unsigned int flavor = attr->rta_type; if (flavor) { if (flavor > rta_max[sz_idx]) return -EINVAL; diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 35b3a685e342..8f0d68d14360 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -3166,7 +3166,7 @@ int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) int len = skb->len; if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= - (unsigned)sk->sk_rcvbuf) + (unsigned int)sk->sk_rcvbuf) return -ENOMEM; skb_orphan(skb); diff --git a/net/core/sock.c b/net/core/sock.c index b2e14c07d920..c7e60eac639b 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -1534,7 +1534,7 @@ struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force, */ void *sock_kmalloc(struct sock *sk, int size, gfp_t priority) { - if ((unsigned)size <= sysctl_optmem_max && + if ((unsigned int)size <= sysctl_optmem_max && atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) { void *mem; /* First do the add, to avoid the race if kmalloc diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c index 70bfaf2d1965..8c67bedf85b0 100644 --- a/net/dccp/ccids/ccid3.c +++ b/net/dccp/ccids/ccid3.c @@ -100,7 +100,7 @@ static void ccid3_update_send_interval(struct ccid3_hc_tx_sock *hc) DCCP_BUG_ON(hc->tx_t_ipi == 0); ccid3_pr_debug("t_ipi=%u, s=%u, X=%u\n", hc->tx_t_ipi, - hc->tx_s, (unsigned)(hc->tx_x >> 6)); + hc->tx_s, (unsigned int)(hc->tx_x >> 6)); } static u32 ccid3_hc_tx_idle_rtt(struct ccid3_hc_tx_sock *hc, ktime_t now) @@ -153,9 +153,9 @@ static void ccid3_hc_tx_update_x(struct sock *sk, ktime_t *stamp) if (hc->tx_x != old_x) { ccid3_pr_debug("X_prev=%u, X_now=%u, X_calc=%u, " - "X_recv=%u\n", (unsigned)(old_x >> 6), - (unsigned)(hc->tx_x >> 6), hc->tx_x_calc, - (unsigned)(hc->tx_x_recv >> 6)); + "X_recv=%u\n", (unsigned int)(old_x >> 6), + (unsigned int)(hc->tx_x >> 6), hc->tx_x_calc, + (unsigned int)(hc->tx_x_recv >> 6)); ccid3_update_send_interval(hc); } @@ -425,8 +425,8 @@ done_computing_x: "p=%u, X_calc=%u, X_recv=%u, X=%u\n", dccp_role(sk), sk, hc->tx_rtt, r_sample, hc->tx_s, hc->tx_p, hc->tx_x_calc, - (unsigned)(hc->tx_x_recv >> 6), - (unsigned)(hc->tx_x >> 6)); + (unsigned int)(hc->tx_x_recv >> 6), + (unsigned int)(hc->tx_x >> 6)); /* unschedule no feedback timer */ sk_stop_timer(sk, &hc->tx_no_feedback_timer); diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h index 29d6bb629a6c..9040be049d8c 100644 --- a/net/dccp/dccp.h +++ b/net/dccp/dccp.h @@ -75,7 +75,7 @@ extern void dccp_time_wait(struct sock *sk, int state, int timeo); * state, about 60 seconds */ /* RFC 1122, 4.2.3.1 initial RTO value */ -#define DCCP_TIMEOUT_INIT ((unsigned)(3 * HZ)) +#define DCCP_TIMEOUT_INIT ((unsigned int)(3 * HZ)) /* * The maximum back-off value for retransmissions. This is needed for @@ -84,7 +84,7 @@ extern void dccp_time_wait(struct sock *sk, int state, int timeo); * - feature-negotiation retransmission (sec. 6.6.3), * - Acks in client-PARTOPEN state (sec. 8.1.5). */ -#define DCCP_RTO_MAX ((unsigned)(64 * HZ)) +#define DCCP_RTO_MAX ((unsigned int)(64 * HZ)) /* * RTT sampling: sanity bounds and fallback RTT value from RFC 4340, section 3.4 @@ -287,9 +287,9 @@ extern struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb, extern int dccp_child_process(struct sock *parent, struct sock *child, struct sk_buff *skb); extern int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb, - struct dccp_hdr *dh, unsigned len); + struct dccp_hdr *dh, unsigned int len); extern int dccp_rcv_established(struct sock *sk, struct sk_buff *skb, - const struct dccp_hdr *dh, const unsigned len); + const struct dccp_hdr *dh, const unsigned int len); extern int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized); extern void dccp_destroy_sock(struct sock *sk); diff --git a/net/dccp/input.c b/net/dccp/input.c index 51d5fe5fffba..bc93a333931e 100644 --- a/net/dccp/input.c +++ b/net/dccp/input.c @@ -285,7 +285,7 @@ static int dccp_check_seqno(struct sock *sk, struct sk_buff *skb) } static int __dccp_rcv_established(struct sock *sk, struct sk_buff *skb, - const struct dccp_hdr *dh, const unsigned len) + const struct dccp_hdr *dh, const unsigned int len) { struct dccp_sock *dp = dccp_sk(sk); @@ -366,7 +366,7 @@ discard: } int dccp_rcv_established(struct sock *sk, struct sk_buff *skb, - const struct dccp_hdr *dh, const unsigned len) + const struct dccp_hdr *dh, const unsigned int len) { if (dccp_check_seqno(sk, skb)) goto discard; @@ -388,7 +388,7 @@ EXPORT_SYMBOL_GPL(dccp_rcv_established); static int dccp_rcv_request_sent_state_process(struct sock *sk, struct sk_buff *skb, const struct dccp_hdr *dh, - const unsigned len) + const unsigned int len) { /* * Step 4: Prepare sequence numbers in REQUEST @@ -521,7 +521,7 @@ unable_to_proceed: static int dccp_rcv_respond_partopen_state_process(struct sock *sk, struct sk_buff *skb, const struct dccp_hdr *dh, - const unsigned len) + const unsigned int len) { struct dccp_sock *dp = dccp_sk(sk); u32 sample = dp->dccps_options_received.dccpor_timestamp_echo; @@ -572,7 +572,7 @@ static int dccp_rcv_respond_partopen_state_process(struct sock *sk, } int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb, - struct dccp_hdr *dh, unsigned len) + struct dccp_hdr *dh, unsigned int len) { struct dccp_sock *dp = dccp_sk(sk); struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb); diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c index 4136987d94da..2ba1a2814c24 100644 --- a/net/decnet/af_decnet.c +++ b/net/decnet/af_decnet.c @@ -250,7 +250,7 @@ static void dn_unhash_sock_bh(struct sock *sk) static struct hlist_head *listen_hash(struct sockaddr_dn *addr) { int i; - unsigned hash = addr->sdn_objnum; + unsigned int hash = addr->sdn_objnum; if (hash == 0) { hash = addr->sdn_objnamel; @@ -1844,9 +1844,9 @@ static inline int dn_queue_too_long(struct dn_scp *scp, struct sk_buff_head *que * inclusion (or not) of the two 16 bit acknowledgement fields so it doesn't * make much practical difference. */ -unsigned dn_mss_from_pmtu(struct net_device *dev, int mtu) +unsigned int dn_mss_from_pmtu(struct net_device *dev, int mtu) { - unsigned mss = 230 - DN_MAX_NSP_DATA_HEADER; + unsigned int mss = 230 - DN_MAX_NSP_DATA_HEADER; if (dev) { struct dn_dev *dn_db = rcu_dereference_raw(dev->dn_ptr); mtu -= LL_RESERVED_SPACE(dev); diff --git a/net/decnet/dn_fib.c b/net/decnet/dn_fib.c index 9e885f180b60..65a8cd7891fe 100644 --- a/net/decnet/dn_fib.c +++ b/net/decnet/dn_fib.c @@ -302,11 +302,12 @@ struct dn_fib_info *dn_fib_create_info(const struct rtmsg *r, struct dn_kern_rta struct rtattr *attr = RTA_DATA(rta->rta_mx); while(RTA_OK(attr, attrlen)) { - unsigned flavour = attr->rta_type; + unsigned int flavour = attr->rta_type; + if (flavour) { if (flavour > RTAX_MAX) goto err_inval; - fi->fib_metrics[flavour-1] = *(unsigned*)RTA_DATA(attr); + fi->fib_metrics[flavour-1] = *(unsigned int *)RTA_DATA(attr); } attr = RTA_NEXT(attr, attrlen); } diff --git a/net/decnet/dn_nsp_in.c b/net/decnet/dn_nsp_in.c index f6544b2c91b0..58084f37151e 100644 --- a/net/decnet/dn_nsp_in.c +++ b/net/decnet/dn_nsp_in.c @@ -588,7 +588,7 @@ static __inline__ int dn_queue_skb(struct sock *sk, struct sk_buff *skb, int sig number of warnings when compiling with -W --ANK */ if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= - (unsigned)sk->sk_rcvbuf) { + (unsigned int)sk->sk_rcvbuf) { err = -ENOMEM; goto out; } diff --git a/net/decnet/dn_nsp_out.c b/net/decnet/dn_nsp_out.c index e446e85e64a6..b952f88d9c1f 100644 --- a/net/decnet/dn_nsp_out.c +++ b/net/decnet/dn_nsp_out.c @@ -209,7 +209,7 @@ static void dn_nsp_rtt(struct sock *sk, long rtt) * * Returns: The number of times the packet has been sent previously */ -static inline unsigned dn_nsp_clone_and_send(struct sk_buff *skb, +static inline unsigned int dn_nsp_clone_and_send(struct sk_buff *skb, gfp_t gfp) { struct dn_skb_cb *cb = DN_SKB_CB(skb); @@ -240,7 +240,7 @@ void dn_nsp_output(struct sock *sk) { struct dn_scp *scp = DN_SK(sk); struct sk_buff *skb; - unsigned reduce_win = 0; + unsigned int reduce_win = 0; /* * First we check for otherdata/linkservice messages diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c index 80a3de4906d3..7e1f8788da19 100644 --- a/net/decnet/dn_route.c +++ b/net/decnet/dn_route.c @@ -122,7 +122,7 @@ static int dn_route_input(struct sk_buff *); static void dn_run_flush(unsigned long dummy); static struct dn_rt_hash_bucket *dn_rt_hash_table; -static unsigned dn_rt_hash_mask; +static unsigned int dn_rt_hash_mask; static struct timer_list dn_route_timer; static DEFINE_TIMER(dn_rt_flush_timer, dn_run_flush, 0, 0); @@ -149,13 +149,13 @@ static void dn_dst_destroy(struct dst_entry *dst) dst_destroy_metrics_generic(dst); } -static __inline__ unsigned dn_hash(__le16 src, __le16 dst) +static __inline__ unsigned int dn_hash(__le16 src, __le16 dst) { __u16 tmp = (__u16 __force)(src ^ dst); tmp ^= (tmp >> 3); tmp ^= (tmp >> 5); tmp ^= (tmp >> 10); - return dn_rt_hash_mask & (unsigned)tmp; + return dn_rt_hash_mask & (unsigned int)tmp; } static inline void dnrt_free(struct dn_route *rt) @@ -297,7 +297,7 @@ static inline int compare_keys(struct flowidn *fl1, struct flowidn *fl2) (fl1->flowidn_iif ^ fl2->flowidn_iif)) == 0; } -static int dn_insert_route(struct dn_route *rt, unsigned hash, struct dn_route **rp) +static int dn_insert_route(struct dn_route *rt, unsigned int hash, struct dn_route **rp) { struct dn_route *rth; struct dn_route __rcu **rthp; @@ -934,8 +934,8 @@ static int dn_route_output_slow(struct dst_entry **pprt, const struct flowidn *o struct dn_route *rt = NULL; struct net_device *dev_out = NULL, *dev; struct neighbour *neigh = NULL; - unsigned hash; - unsigned flags = 0; + unsigned int hash; + unsigned int flags = 0; struct dn_fib_res res = { .fi = NULL, .type = RTN_UNICAST }; int err; int free_res = 0; @@ -1209,7 +1209,7 @@ e_neighbour: */ static int __dn_route_output_key(struct dst_entry **pprt, const struct flowidn *flp, int flags) { - unsigned hash = dn_hash(flp->saddr, flp->daddr); + unsigned int hash = dn_hash(flp->saddr, flp->daddr); struct dn_route *rt = NULL; if (!(flags & MSG_TRYHARD)) { @@ -1275,7 +1275,7 @@ static int dn_route_input_slow(struct sk_buff *skb) struct net_device *out_dev = NULL; struct dn_dev *dn_db; struct neighbour *neigh = NULL; - unsigned hash; + unsigned int hash; int flags = 0; __le16 gateway = 0; __le16 local_src = 0; @@ -1490,7 +1490,7 @@ static int dn_route_input(struct sk_buff *skb) { struct dn_route *rt; struct dn_skb_cb *cb = DN_SKB_CB(skb); - unsigned hash = dn_hash(cb->src, cb->dst); + unsigned int hash = dn_hash(cb->src, cb->dst); if (skb_dst(skb)) return 0; diff --git a/net/decnet/dn_rules.c b/net/decnet/dn_rules.c index 7399e3d51922..e65f2c856e06 100644 --- a/net/decnet/dn_rules.c +++ b/net/decnet/dn_rules.c @@ -177,11 +177,11 @@ static int dn_fib_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh, return 1; } -unsigned dnet_addr_type(__le16 addr) +unsigned int dnet_addr_type(__le16 addr) { struct flowidn fld = { .daddr = addr }; struct dn_fib_res res; - unsigned ret = RTN_UNICAST; + unsigned int ret = RTN_UNICAST; struct dn_fib_table *tb = dn_fib_get_table(RT_TABLE_LOCAL, 0); res.r = NULL; diff --git a/net/dns_resolver/dns_key.c b/net/dns_resolver/dns_key.c index c73bba326d70..6f70ea935b0b 100644 --- a/net/dns_resolver/dns_key.c +++ b/net/dns_resolver/dns_key.c @@ -38,7 +38,7 @@ MODULE_DESCRIPTION("DNS Resolver"); MODULE_AUTHOR("Wang Lei"); MODULE_LICENSE("GPL"); -unsigned dns_resolver_debug; +unsigned int dns_resolver_debug; module_param_named(debug, dns_resolver_debug, uint, S_IWUSR | S_IRUGO); MODULE_PARM_DESC(debug, "DNS Resolver debugging mask"); diff --git a/net/dns_resolver/internal.h b/net/dns_resolver/internal.h index 189ca9e9b785..17c7886b5b3a 100644 --- a/net/dns_resolver/internal.h +++ b/net/dns_resolver/internal.h @@ -31,7 +31,7 @@ extern const struct cred *dns_resolver_cache; /* * debug tracing */ -extern unsigned dns_resolver_debug; +extern unsigned int dns_resolver_debug; #define kdebug(FMT, ...) \ do { \ diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c index bf10a311cf1c..5889a6c38a10 100644 --- a/net/ethernet/eth.c +++ b/net/ethernet/eth.c @@ -77,7 +77,7 @@ __setup("ether=", netdev_boot_setup); */ int eth_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, - const void *daddr, const void *saddr, unsigned len) + const void *daddr, const void *saddr, unsigned int len) { struct ethhdr *eth = (struct ethhdr *)skb_push(skb, ETH_HLEN); diff --git a/net/ieee802154/6lowpan.c b/net/ieee802154/6lowpan.c index 368515885368..58c8895716ff 100644 --- a/net/ieee802154/6lowpan.c +++ b/net/ieee802154/6lowpan.c @@ -371,7 +371,7 @@ err: static int lowpan_header_create(struct sk_buff *skb, struct net_device *dev, unsigned short type, const void *_daddr, - const void *_saddr, unsigned len) + const void *_saddr, unsigned int len) { u8 tmp, iphc0, iphc1, *hc06_ptr; struct ipv6hdr *hdr; diff --git a/net/ieee802154/dgram.c b/net/ieee802154/dgram.c index 1b09eaabaac1..6fbb2ad7bb6d 100644 --- a/net/ieee802154/dgram.c +++ b/net/ieee802154/dgram.c @@ -44,8 +44,8 @@ struct dgram_sock { struct ieee802154_addr src_addr; struct ieee802154_addr dst_addr; - unsigned bound:1; - unsigned want_ack:1; + unsigned int bound:1; + unsigned int want_ack:1; }; static inline struct dgram_sock *dgram_sk(const struct sock *sk) @@ -206,7 +206,7 @@ static int dgram_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t size) { struct net_device *dev; - unsigned mtu; + unsigned int mtu; struct sk_buff *skb; struct dgram_sock *ro = dgram_sk(sk); int hlen, tlen; diff --git a/net/ieee802154/raw.c b/net/ieee802154/raw.c index f96bae8fd330..50e823927d49 100644 --- a/net/ieee802154/raw.c +++ b/net/ieee802154/raw.c @@ -106,7 +106,7 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t size) { struct net_device *dev; - unsigned mtu; + unsigned int mtu; struct sk_buff *skb; int hlen, tlen; int err; diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c index 18d9b81ecb1a..373b56bf8f49 100644 --- a/net/ipv4/arp.c +++ b/net/ipv4/arp.c @@ -1059,7 +1059,7 @@ static int arp_req_set(struct net *net, struct arpreq *r, neigh = __neigh_lookup_errno(&arp_tbl, &ip, dev); err = PTR_ERR(neigh); if (!IS_ERR(neigh)) { - unsigned state = NUD_STALE; + unsigned int state = NUD_STALE; if (r->arp_flags & ATF_PERM) state = NUD_PERMANENT; err = neigh_update(neigh, (r->arp_flags & ATF_COM) ? @@ -1071,7 +1071,7 @@ static int arp_req_set(struct net *net, struct arpreq *r, return err; } -static unsigned arp_state_to_flags(struct neighbour *neigh) +static unsigned int arp_state_to_flags(struct neighbour *neigh) { if (neigh->nud_state&NUD_PERMANENT) return ATF_PERM | ATF_COM; diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index 7ba2196e4377..8a01bfb7c873 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c @@ -1125,7 +1125,7 @@ skip: } } -static inline bool inetdev_valid_mtu(unsigned mtu) +static inline bool inetdev_valid_mtu(unsigned int mtu) { return mtu >= 68; } diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index cbe3a68507cf..3854411fa37c 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c @@ -136,13 +136,13 @@ static void fib_flush(struct net *net) * Find address type as if only "dev" was present in the system. If * on_dev is NULL then all interfaces are taken into consideration. */ -static inline unsigned __inet_dev_addr_type(struct net *net, - const struct net_device *dev, - __be32 addr) +static inline unsigned int __inet_dev_addr_type(struct net *net, + const struct net_device *dev, + __be32 addr) { struct flowi4 fl4 = { .daddr = addr }; struct fib_result res; - unsigned ret = RTN_BROADCAST; + unsigned int ret = RTN_BROADCAST; struct fib_table *local_table; if (ipv4_is_zeronet(addr) || ipv4_is_lbcast(addr)) @@ -740,7 +740,7 @@ void fib_del_ifaddr(struct in_ifaddr *ifa, struct in_ifaddr *iprim) #define BRD_OK 2 #define BRD0_OK 4 #define BRD1_OK 8 - unsigned ok = 0; + unsigned int ok = 0; int subnet = 0; /* Primary network */ int gone = 1; /* Address is missing */ int same_prefsrc = 0; /* Another primary with same IP */ diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index 14409f111bc2..7d972f650a61 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c @@ -42,7 +42,8 @@ EXPORT_SYMBOL(sysctl_local_reserved_ports); void inet_get_local_port_range(int *low, int *high) { - unsigned seq; + unsigned int seq; + do { seq = read_seqbegin(&sysctl_local_ports.lock); diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c index 89168c6351ff..543ef6225458 100644 --- a/net/ipv4/inet_timewait_sock.c +++ b/net/ipv4/inet_timewait_sock.c @@ -263,7 +263,7 @@ rescan: void inet_twdr_hangman(unsigned long data) { struct inet_timewait_death_row *twdr; - int unsigned need_timer; + unsigned int need_timer; twdr = (struct inet_timewait_death_row *)data; spin_lock(&twdr->death_lock); diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c index 1372c4586edc..95722ed0e5bb 100644 --- a/net/ipv4/ip_options.c +++ b/net/ipv4/ip_options.c @@ -413,7 +413,7 @@ int ip_options_compile(struct net *net, opt->is_changed = 1; } } else { - unsigned overflow = optptr[3]>>4; + unsigned int overflow = optptr[3]>>4; if (overflow == 15) { pp_ptr = optptr + 3; goto error; diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index 0a87e1fc0ce5..51c6c672c8aa 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c @@ -147,7 +147,7 @@ static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb) void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb) { struct inet_sock *inet = inet_sk(skb->sk); - unsigned flags = inet->cmsg_flags; + unsigned int flags = inet->cmsg_flags; /* Ordered by supposed usage frequency */ if (flags & 1) @@ -1094,7 +1094,7 @@ EXPORT_SYMBOL(compat_ip_setsockopt); */ static int do_ip_getsockopt(struct sock *sk, int level, int optname, - char __user *optval, int __user *optlen, unsigned flags) + char __user *optval, int __user *optlen, unsigned int flags) { struct inet_sock *inet = inet_sk(sk); int val; diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c index 92ac7e7363a0..f267280d8709 100644 --- a/net/ipv4/ipconfig.c +++ b/net/ipv4/ipconfig.c @@ -1198,7 +1198,7 @@ static int __init ic_dynamic(void) d = ic_first_dev; retries = CONF_SEND_RETRIES; get_random_bytes(&timeout, sizeof(timeout)); - timeout = CONF_BASE_TIMEOUT + (timeout % (unsigned) CONF_TIMEOUT_RANDOM); + timeout = CONF_BASE_TIMEOUT + (timeout % (unsigned int) CONF_TIMEOUT_RANDOM); for (;;) { /* Track the device we are configuring */ ic_dev_xid = d->xid; diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c index 4f47e064e262..3cd8c586741a 100644 --- a/net/ipv4/netfilter.c +++ b/net/ipv4/netfilter.c @@ -12,7 +12,7 @@ #include /* route_me_harder function, used by iptable_nat, iptable_mangle + ip_queue */ -int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type) +int ip_route_me_harder(struct sk_buff *skb, unsigned int addr_type) { struct net *net = dev_net(skb_dst(skb)->dev); const struct iphdr *iph = ip_hdr(skb); diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index fd7a3f68917f..a3935273869f 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c @@ -303,7 +303,7 @@ unsigned int arpt_do_table(struct sk_buff *skb, if (v < 0) { /* Pop from stack? */ if (v != XT_RETURN) { - verdict = (unsigned)(-v) - 1; + verdict = (unsigned int)(-v) - 1; break; } e = back; diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index 24e556e83a3b..585b80f3cc68 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c @@ -377,7 +377,7 @@ ipt_do_table(struct sk_buff *skb, if (v < 0) { /* Pop from stack? */ if (v != XT_RETURN) { - verdict = (unsigned)(-v) - 1; + verdict = (unsigned int)(-v) - 1; break; } if (*stackptr <= origptr) { diff --git a/net/ipv4/netfilter/nf_nat_sip.c b/net/ipv4/netfilter/nf_nat_sip.c index 57932c43960e..ea4a23813d26 100644 --- a/net/ipv4/netfilter/nf_nat_sip.c +++ b/net/ipv4/netfilter/nf_nat_sip.c @@ -283,7 +283,7 @@ static unsigned int ip_nat_sip_expect(struct sk_buff *skb, unsigned int dataoff, __be32 newip; u_int16_t port; char buffer[sizeof("nnn.nnn.nnn.nnn:nnnnn")]; - unsigned buflen; + unsigned int buflen; /* Connection will come from reply */ if (ct->tuplehash[dir].tuple.src.u3.ip == ct->tuplehash[!dir].tuple.dst.u3.ip) diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c index 9f380ace22ee..6e930c7174dd 100644 --- a/net/ipv4/ping.c +++ b/net/ipv4/ping.c @@ -51,15 +51,16 @@ static struct ping_table ping_table; static u16 ping_port_rover; -static inline int ping_hashfn(struct net *net, unsigned num, unsigned mask) +static inline int ping_hashfn(struct net *net, unsigned int num, unsigned int mask) { int res = (num + net_hash_mix(net)) & mask; + pr_debug("hash(%d) = %d\n", num, res); return res; } static inline struct hlist_nulls_head *ping_hashslot(struct ping_table *table, - struct net *net, unsigned num) + struct net *net, unsigned int num) { return &table->hash[ping_hashfn(net, num, PING_HTABLE_MASK)]; } @@ -188,7 +189,8 @@ static void inet_get_ping_group_range_net(struct net *net, gid_t *low, gid_t *high) { gid_t *data = net->ipv4.sysctl_ping_group_range; - unsigned seq; + unsigned int seq; + do { seq = read_seqbegin(&sysctl_local_ports.lock); diff --git a/net/ipv4/route.c b/net/ipv4/route.c index a13ce2364ed2..ac4d6b3fa9c9 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -296,7 +296,7 @@ static inline void rt_hash_lock_init(void) #endif static struct rt_hash_bucket *rt_hash_table __read_mostly; -static unsigned rt_hash_mask __read_mostly; +static unsigned int rt_hash_mask __read_mostly; static unsigned int rt_hash_log __read_mostly; static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat); @@ -1143,7 +1143,7 @@ static int rt_bind_neighbour(struct rtable *rt) return 0; } -static struct rtable *rt_intern_hash(unsigned hash, struct rtable *rt, +static struct rtable *rt_intern_hash(unsigned int hash, struct rtable *rt, struct sk_buff *skb, int ifindex) { struct rtable *rth, *cand; @@ -1384,7 +1384,7 @@ void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more) } EXPORT_SYMBOL(__ip_select_ident); -static void rt_del(unsigned hash, struct rtable *rt) +static void rt_del(unsigned int hash, struct rtable *rt) { struct rtable __rcu **rthp; struct rtable *aux; @@ -1538,7 +1538,7 @@ static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst) ip_rt_put(rt); ret = NULL; } else if (rt->rt_flags & RTCF_REDIRECTED) { - unsigned hash = rt_hash(rt->rt_key_dst, rt->rt_key_src, + unsigned int hash = rt_hash(rt->rt_key_dst, rt->rt_key_src, rt->rt_oif, rt_genid(dev_net(dst->dev))); rt_del(hash, rt); @@ -2217,7 +2217,7 @@ static int ip_mkroute_input(struct sk_buff *skb, { struct rtable *rth = NULL; int err; - unsigned hash; + unsigned int hash; #ifdef CONFIG_IP_ROUTE_MULTIPATH if (res->fi && res->fi->fib_nhs > 1) @@ -2255,10 +2255,10 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr, struct fib_result res; struct in_device *in_dev = __in_dev_get_rcu(dev); struct flowi4 fl4; - unsigned flags = 0; + unsigned int flags = 0; u32 itag = 0; - struct rtable *rth; - unsigned hash; + struct rtable *rth; + unsigned int hash; __be32 spec_dst; int err = -EINVAL; struct net *net = dev_net(dev); @@ -2433,8 +2433,8 @@ martian_source_keep_err: int ip_route_input_common(struct sk_buff *skb, __be32 daddr, __be32 saddr, u8 tos, struct net_device *dev, bool noref) { - struct rtable *rth; - unsigned hash; + struct rtable *rth; + unsigned int hash; int iif = dev->ifindex; struct net *net; int res; diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index 7a7724da9bff..34a628625d9c 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c @@ -78,7 +78,7 @@ static int ipv4_local_port_range(ctl_table *table, int write, static void inet_get_ping_group_range_table(struct ctl_table *table, gid_t *low, gid_t *high) { gid_t *data = table->data; - unsigned seq; + unsigned int seq; do { seq = read_seqbegin(&sysctl_local_ports.lock); diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 8bb6adeb62c0..c53e8a827f55 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -2675,7 +2675,7 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, { struct sk_buff *segs = ERR_PTR(-EINVAL); struct tcphdr *th; - unsigned thlen; + unsigned int thlen; unsigned int seq; __be32 delta; unsigned int oldlen; @@ -3033,9 +3033,9 @@ int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *hp, struct scatterlist sg; const struct tcphdr *tp = tcp_hdr(skb); struct hash_desc *desc = &hp->md5_desc; - unsigned i; - const unsigned head_data_len = skb_headlen(skb) > header_len ? - skb_headlen(skb) - header_len : 0; + unsigned int i; + const unsigned int head_data_len = skb_headlen(skb) > header_len ? + skb_headlen(skb) - header_len : 0; const struct skb_shared_info *shi = skb_shinfo(skb); struct sk_buff *frag_iter; diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 3dc94febc9ee..99448f06a42a 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -175,7 +175,7 @@ static void tcp_measure_rcv_mss(struct sock *sk, const struct sk_buff *skb) static void tcp_incr_quickack(struct sock *sk) { struct inet_connection_sock *icsk = inet_csk(sk); - unsigned quickacks = tcp_sk(sk)->rcv_wnd / (2 * icsk->icsk_ack.rcv_mss); + unsigned int quickacks = tcp_sk(sk)->rcv_wnd / (2 * icsk->icsk_ack.rcv_mss); if (quickacks == 0) quickacks = 2; diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 376b2cfbb685..de8790ced946 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -563,13 +563,13 @@ static void tcp_options_write(__be32 *ptr, struct tcp_sock *tp, /* Compute TCP options for SYN packets. This is not the final * network wire format yet. */ -static unsigned tcp_syn_options(struct sock *sk, struct sk_buff *skb, +static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb, struct tcp_out_options *opts, struct tcp_md5sig_key **md5) { struct tcp_sock *tp = tcp_sk(sk); struct tcp_cookie_values *cvp = tp->cookie_values; - unsigned remaining = MAX_TCP_OPTION_SPACE; + unsigned int remaining = MAX_TCP_OPTION_SPACE; u8 cookie_size = (!tp->rx_opt.cookie_out_never && cvp != NULL) ? tcp_cookie_size_check(cvp->cookie_desired) : 0; @@ -663,15 +663,15 @@ static unsigned tcp_syn_options(struct sock *sk, struct sk_buff *skb, } /* Set up TCP options for SYN-ACKs. */ -static unsigned tcp_synack_options(struct sock *sk, +static unsigned int tcp_synack_options(struct sock *sk, struct request_sock *req, - unsigned mss, struct sk_buff *skb, + unsigned int mss, struct sk_buff *skb, struct tcp_out_options *opts, struct tcp_md5sig_key **md5, struct tcp_extend_values *xvp) { struct inet_request_sock *ireq = inet_rsk(req); - unsigned remaining = MAX_TCP_OPTION_SPACE; + unsigned int remaining = MAX_TCP_OPTION_SPACE; u8 cookie_plus = (xvp != NULL && !xvp->cookie_out_never) ? xvp->cookie_plus : 0; @@ -742,13 +742,13 @@ static unsigned tcp_synack_options(struct sock *sk, /* Compute TCP options for ESTABLISHED sockets. This is not the * final wire format yet. */ -static unsigned tcp_established_options(struct sock *sk, struct sk_buff *skb, +static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb, struct tcp_out_options *opts, struct tcp_md5sig_key **md5) { struct tcp_skb_cb *tcb = skb ? TCP_SKB_CB(skb) : NULL; struct tcp_sock *tp = tcp_sk(sk); - unsigned size = 0; + unsigned int size = 0; unsigned int eff_sacks; #ifdef CONFIG_TCP_MD5SIG @@ -770,9 +770,9 @@ static unsigned tcp_established_options(struct sock *sk, struct sk_buff *skb, eff_sacks = tp->rx_opt.num_sacks + tp->rx_opt.dsack; if (unlikely(eff_sacks)) { - const unsigned remaining = MAX_TCP_OPTION_SPACE - size; + const unsigned int remaining = MAX_TCP_OPTION_SPACE - size; opts->num_sack_blocks = - min_t(unsigned, eff_sacks, + min_t(unsigned int, eff_sacks, (remaining - TCPOLEN_SACK_BASE_ALIGNED) / TCPOLEN_SACK_PERBLOCK); size += TCPOLEN_SACK_BASE_ALIGNED + @@ -801,7 +801,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, struct tcp_sock *tp; struct tcp_skb_cb *tcb; struct tcp_out_options opts; - unsigned tcp_options_size, tcp_header_size; + unsigned int tcp_options_size, tcp_header_size; struct tcp_md5sig_key *md5; struct tcphdr *th; int err; @@ -1258,7 +1258,7 @@ unsigned int tcp_current_mss(struct sock *sk) const struct tcp_sock *tp = tcp_sk(sk); const struct dst_entry *dst = __sk_dst_get(sk); u32 mss_now; - unsigned header_len; + unsigned int header_len; struct tcp_out_options opts; struct tcp_md5sig_key *md5; @@ -1389,7 +1389,7 @@ static inline int tcp_minshall_check(const struct tcp_sock *tp) */ static inline int tcp_nagle_check(const struct tcp_sock *tp, const struct sk_buff *skb, - unsigned mss_now, int nonagle) + unsigned int mss_now, int nonagle) { return skb->len < mss_now && ((nonagle & TCP_NAGLE_CORK) || diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c index a8df7052e0b6..4526fe68e60e 100644 --- a/net/ipv4/tcp_probe.c +++ b/net/ipv4/tcp_probe.c @@ -91,7 +91,7 @@ static inline int tcp_probe_avail(void) * Note: arguments must match tcp_rcv_established()! */ static int jtcp_rcv_established(struct sock *sk, struct sk_buff *skb, - struct tcphdr *th, unsigned len) + struct tcphdr *th, unsigned int len) { const struct tcp_sock *tp = tcp_sk(sk); const struct inet_sock *inet = inet_sk(sk); diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 381ea5115142..3430e8fc18de 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -207,7 +207,7 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum, if (!snum) { int low, high, remaining; - unsigned rand; + unsigned int rand; unsigned short first, last; DECLARE_BITMAP(bitmap, PORTS_PER_CHAIN); diff --git a/net/ipv6/addrconf_core.c b/net/ipv6/addrconf_core.c index 7981bde57575..d051e5f4bf34 100644 --- a/net/ipv6/addrconf_core.c +++ b/net/ipv6/addrconf_core.c @@ -8,7 +8,7 @@ #define IPV6_ADDR_SCOPE_TYPE(scope) ((scope) << 16) -static inline unsigned ipv6_addr_scope2type(unsigned scope) +static inline unsigned int ipv6_addr_scope2type(unsigned int scope) { switch (scope) { case IPV6_ADDR_SCOPE_NODELOCAL: diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index f6210d6fd7d8..7fba35aea06c 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c @@ -515,7 +515,7 @@ int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb) u8 nexthdr = ipv6_hdr(skb)->nexthdr; while (off <= opt->lastopt) { - unsigned len; + unsigned int len; u8 *ptr = nh + off; switch (nexthdr) { diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c index b7867a1215b1..1dd632971bce 100644 --- a/net/ipv6/ip6_flowlabel.c +++ b/net/ipv6/ip6_flowlabel.c @@ -705,9 +705,9 @@ static int ip6fl_seq_show(struct seq_file *seq, void *v) struct ip6_flowlabel *fl = v; seq_printf(seq, "%05X %-1d %-6d %-6d %-6ld %-8ld %pi6 %-4d\n", - (unsigned)ntohl(fl->label), + (unsigned int)ntohl(fl->label), fl->share, - (unsigned)fl->owner, + (int)fl->owner, atomic_read(&fl->users), fl->linger/HZ, (long)(fl->expires - jiffies)/HZ, diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index aa21da6a09cd..e25b0fdd935c 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -198,7 +198,7 @@ ip6_tnl_bucket(struct ip6_tnl_net *ip6n, const struct ip6_tnl_parm *p) { const struct in6_addr *remote = &p->raddr; const struct in6_addr *local = &p->laddr; - unsigned h = 0; + unsigned int h = 0; int prio = 0; if (!ipv6_addr_any(remote) || !ipv6_addr_any(local)) { diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index ca1af0760c4c..ba6d13d1f1e1 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c @@ -942,7 +942,7 @@ static int ipv6_getsockopt_sticky(struct sock *sk, struct ipv6_txoptions *opt, } static int do_ipv6_getsockopt(struct sock *sk, int level, int optname, - char __user *optval, int __user *optlen, unsigned flags) + char __user *optval, int __user *optlen, unsigned int flags) { struct ipv6_pinfo *np = inet6_sk(sk); int len; diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index 9d4e15559319..d4e350f72bbb 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c @@ -396,7 +396,7 @@ ip6t_do_table(struct sk_buff *skb, if (v < 0) { /* Pop from stack? */ if (v != XT_RETURN) { - verdict = (unsigned)(-v) - 1; + verdict = (unsigned int)(-v) - 1; break; } if (*stackptr <= origptr) diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 8c5df6f3a2de..f910cce69c9f 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -82,7 +82,7 @@ static void ip6_rt_update_pmtu(struct dst_entry *dst, u32 mtu); static struct rt6_info *rt6_add_route_info(struct net *net, const struct in6_addr *prefix, int prefixlen, const struct in6_addr *gwaddr, int ifindex, - unsigned pref); + unsigned int pref); static struct rt6_info *rt6_get_route_info(struct net *net, const struct in6_addr *prefix, int prefixlen, const struct in6_addr *gwaddr, int ifindex); @@ -1870,7 +1870,7 @@ out: static struct rt6_info *rt6_add_route_info(struct net *net, const struct in6_addr *prefix, int prefixlen, const struct in6_addr *gwaddr, int ifindex, - unsigned pref) + unsigned int pref) { struct fib6_config cfg = { .fc_table = RT6_TABLE_INFO, @@ -2200,10 +2200,9 @@ void rt6_ifdown(struct net *net, struct net_device *dev) icmp6_clean_all(fib6_ifdown, &adn); } -struct rt6_mtu_change_arg -{ +struct rt6_mtu_change_arg { struct net_device *dev; - unsigned mtu; + unsigned int mtu; }; static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg) @@ -2245,7 +2244,7 @@ static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg) return 0; } -void rt6_mtu_change(struct net_device *dev, unsigned mtu) +void rt6_mtu_change(struct net_device *dev, unsigned int mtu) { struct rt6_mtu_change_arg arg = { .dev = dev, diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c index 4fe1db12d2a3..ee5a7065aacc 100644 --- a/net/ipv6/xfrm6_tunnel.c +++ b/net/ipv6/xfrm6_tunnel.c @@ -68,9 +68,9 @@ static DEFINE_SPINLOCK(xfrm6_tunnel_spi_lock); static struct kmem_cache *xfrm6_tunnel_spi_kmem __read_mostly; -static inline unsigned xfrm6_tunnel_spi_hash_byaddr(const xfrm_address_t *addr) +static inline unsigned int xfrm6_tunnel_spi_hash_byaddr(const xfrm_address_t *addr) { - unsigned h; + unsigned int h; h = (__force u32)(addr->a6[0] ^ addr->a6[1] ^ addr->a6[2] ^ addr->a6[3]); h ^= h >> 16; @@ -80,7 +80,7 @@ static inline unsigned xfrm6_tunnel_spi_hash_byaddr(const xfrm_address_t *addr) return h; } -static inline unsigned xfrm6_tunnel_spi_hash_byspi(u32 spi) +static inline unsigned int xfrm6_tunnel_spi_hash_byspi(u32 spi) { return spi % XFRM6_TUNNEL_SPI_BYSPI_HSIZE; } diff --git a/net/irda/ircomm/ircomm_tty_ioctl.c b/net/irda/ircomm/ircomm_tty_ioctl.c index 77c5e6499f8f..d0667d68351d 100644 --- a/net/irda/ircomm/ircomm_tty_ioctl.c +++ b/net/irda/ircomm/ircomm_tty_ioctl.c @@ -54,7 +54,7 @@ */ static void ircomm_tty_change_speed(struct ircomm_tty_cb *self) { - unsigned cflag, cval; + unsigned int cflag, cval; int baud; IRDA_DEBUG(2, "%s()\n", __func__ ); diff --git a/net/key/af_key.c b/net/key/af_key.c index 7e5d927b576f..34e418508a67 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c @@ -1714,7 +1714,7 @@ static int key_notify_sa_flush(const struct km_event *c) static int pfkey_flush(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs) { struct net *net = sock_net(sk); - unsigned proto; + unsigned int proto; struct km_event c; struct xfrm_audit audit_info; int err, err2; @@ -3547,7 +3547,7 @@ static int pfkey_sendmsg(struct kiocb *kiocb, goto out; err = -EMSGSIZE; - if ((unsigned)len > sk->sk_sndbuf - 32) + if ((unsigned int)len > sk->sk_sndbuf - 32) goto out; err = -ENOBUFS; diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h index a16a48e79fab..09e4a38b4f43 100644 --- a/net/l2tp/l2tp_core.h +++ b/net/l2tp/l2tp_core.h @@ -54,15 +54,15 @@ struct l2tp_tunnel; */ struct l2tp_session_cfg { enum l2tp_pwtype pw_type; - unsigned data_seq:2; /* data sequencing level + unsigned int data_seq:2; /* data sequencing level * 0 => none, 1 => IP only, * 2 => all */ - unsigned recv_seq:1; /* expect receive packets with + unsigned int recv_seq:1; /* expect receive packets with * sequence numbers? */ - unsigned send_seq:1; /* send packets with sequence + unsigned int send_seq:1; /* send packets with sequence * numbers? */ - unsigned lns_mode:1; /* behave as LNS? LAC enables + unsigned int lns_mode:1; /* behave as LNS? LAC enables * sequence numbers under * control of LNS. */ int debug; /* bitmask of debug message @@ -107,15 +107,15 @@ struct l2tp_session { char name[32]; /* for logging */ char ifname[IFNAMSIZ]; - unsigned data_seq:2; /* data sequencing level + unsigned int data_seq:2; /* data sequencing level * 0 => none, 1 => IP only, * 2 => all */ - unsigned recv_seq:1; /* expect receive packets with + unsigned int recv_seq:1; /* expect receive packets with * sequence numbers? */ - unsigned send_seq:1; /* send packets with sequence + unsigned int send_seq:1; /* send packets with sequence * numbers? */ - unsigned lns_mode:1; /* behave as LNS? LAC enables + unsigned int lns_mode:1; /* behave as LNS? LAC enables * sequence numbers under * control of LNS. */ int debug; /* bitmask of debug message diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c index b9bef2c75026..17bc85d5b7ba 100644 --- a/net/llc/af_llc.c +++ b/net/llc/af_llc.c @@ -518,7 +518,7 @@ static int llc_ui_listen(struct socket *sock, int backlog) if (sock_flag(sk, SOCK_ZAPPED)) goto out; rc = 0; - if (!(unsigned)backlog) /* BSDism */ + if (!(unsigned int)backlog) /* BSDism */ backlog = 1; sk->sk_max_ack_backlog = backlog; if (sk->sk_state != TCP_LISTEN) { diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c index eb66b9790a6f..819c342f5b30 100644 --- a/net/netfilter/ipset/ip_set_core.c +++ b/net/netfilter/ipset/ip_set_core.c @@ -1618,7 +1618,7 @@ static struct nfnetlink_subsystem ip_set_netlink_subsys __read_mostly = { static int ip_set_sockfn_get(struct sock *sk, int optval, void __user *user, int *len) { - unsigned *op; + unsigned int *op; void *data; int copylen = *len, ret = 0; @@ -1626,7 +1626,7 @@ ip_set_sockfn_get(struct sock *sk, int optval, void __user *user, int *len) return -EPERM; if (optval != SO_IP_SET) return -EBADF; - if (*len < sizeof(unsigned)) + if (*len < sizeof(unsigned int)) return -EINVAL; data = vmalloc(*len); @@ -1636,7 +1636,7 @@ ip_set_sockfn_get(struct sock *sk, int optval, void __user *user, int *len) ret = -EFAULT; goto done; } - op = (unsigned *) data; + op = (unsigned int *) data; if (*op < IP_SET_OP_VERSION) { /* Check the version at the beginning of operations */ diff --git a/net/netfilter/ipvs/ip_vs_app.c b/net/netfilter/ipvs/ip_vs_app.c index 52856178c9d7..64f9e8f13207 100644 --- a/net/netfilter/ipvs/ip_vs_app.c +++ b/net/netfilter/ipvs/ip_vs_app.c @@ -313,7 +313,7 @@ vs_fix_ack_seq(const struct ip_vs_seq *vseq, struct tcphdr *th) * Assumes already checked proto==IPPROTO_TCP and diff!=0. */ static inline void vs_seq_update(struct ip_vs_conn *cp, struct ip_vs_seq *vseq, - unsigned flag, __u32 seq, int diff) + unsigned int flag, __u32 seq, int diff) { /* spinlock is to keep updating cp->flags atomic */ spin_lock(&cp->lock); diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c index 29fa5badde75..4a09b7873003 100644 --- a/net/netfilter/ipvs/ip_vs_conn.c +++ b/net/netfilter/ipvs/ip_vs_conn.c @@ -86,42 +86,42 @@ struct ip_vs_aligned_lock static struct ip_vs_aligned_lock __ip_vs_conntbl_lock_array[CT_LOCKARRAY_SIZE] __cacheline_aligned; -static inline void ct_read_lock(unsigned key) +static inline void ct_read_lock(unsigned int key) { read_lock(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l); } -static inline void ct_read_unlock(unsigned key) +static inline void ct_read_unlock(unsigned int key) { read_unlock(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l); } -static inline void ct_write_lock(unsigned key) +static inline void ct_write_lock(unsigned int key) { write_lock(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l); } -static inline void ct_write_unlock(unsigned key) +static inline void ct_write_unlock(unsigned int key) { write_unlock(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l); } -static inline void ct_read_lock_bh(unsigned key) +static inline void ct_read_lock_bh(unsigned int key) { read_lock_bh(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l); } -static inline void ct_read_unlock_bh(unsigned key) +static inline void ct_read_unlock_bh(unsigned int key) { read_unlock_bh(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l); } -static inline void ct_write_lock_bh(unsigned key) +static inline void ct_write_lock_bh(unsigned int key) { write_lock_bh(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l); } -static inline void ct_write_unlock_bh(unsigned key) +static inline void ct_write_unlock_bh(unsigned int key) { write_unlock_bh(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l); } @@ -130,7 +130,7 @@ static inline void ct_write_unlock_bh(unsigned key) /* * Returns hash value for IPVS connection entry */ -static unsigned int ip_vs_conn_hashkey(struct net *net, int af, unsigned proto, +static unsigned int ip_vs_conn_hashkey(struct net *net, int af, unsigned int proto, const union nf_inet_addr *addr, __be16 port) { @@ -188,7 +188,7 @@ static unsigned int ip_vs_conn_hashkey_conn(const struct ip_vs_conn *cp) */ static inline int ip_vs_conn_hash(struct ip_vs_conn *cp) { - unsigned hash; + unsigned int hash; int ret; if (cp->flags & IP_VS_CONN_F_ONE_PACKET) @@ -224,7 +224,7 @@ static inline int ip_vs_conn_hash(struct ip_vs_conn *cp) */ static inline int ip_vs_conn_unhash(struct ip_vs_conn *cp) { - unsigned hash; + unsigned int hash; int ret; /* unhash it and decrease its reference counter */ @@ -257,7 +257,7 @@ static inline int ip_vs_conn_unhash(struct ip_vs_conn *cp) static inline struct ip_vs_conn * __ip_vs_conn_in_get(const struct ip_vs_conn_param *p) { - unsigned hash; + unsigned int hash; struct ip_vs_conn *cp; struct hlist_node *n; @@ -344,7 +344,7 @@ EXPORT_SYMBOL_GPL(ip_vs_conn_in_get_proto); /* Get reference to connection template */ struct ip_vs_conn *ip_vs_ct_in_get(const struct ip_vs_conn_param *p) { - unsigned hash; + unsigned int hash; struct ip_vs_conn *cp; struct hlist_node *n; @@ -394,7 +394,7 @@ struct ip_vs_conn *ip_vs_ct_in_get(const struct ip_vs_conn_param *p) * p->vaddr, p->vport: pkt dest address (foreign host) */ struct ip_vs_conn *ip_vs_conn_out_get(const struct ip_vs_conn_param *p) { - unsigned hash; + unsigned int hash; struct ip_vs_conn *cp, *ret=NULL; struct hlist_node *n; @@ -824,7 +824,7 @@ void ip_vs_conn_expire_now(struct ip_vs_conn *cp) */ struct ip_vs_conn * ip_vs_conn_new(const struct ip_vs_conn_param *p, - const union nf_inet_addr *daddr, __be16 dport, unsigned flags, + const union nf_inet_addr *daddr, __be16 dport, unsigned int flags, struct ip_vs_dest *dest, __u32 fwmark) { struct ip_vs_conn *cp; @@ -1057,7 +1057,7 @@ static const struct file_operations ip_vs_conn_fops = { .release = seq_release_net, }; -static const char *ip_vs_origin_name(unsigned flags) +static const char *ip_vs_origin_name(unsigned int flags) { if (flags & IP_VS_CONN_F_SYNC) return "SYNC"; @@ -1169,7 +1169,7 @@ void ip_vs_random_dropentry(struct net *net) * Randomly scan 1/32 of the whole table every second */ for (idx = 0; idx < (ip_vs_conn_tab_size>>5); idx++) { - unsigned hash = net_random() & ip_vs_conn_tab_mask; + unsigned int hash = net_random() & ip_vs_conn_tab_mask; struct hlist_node *n; /* diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c index 2555816e7788..b5a5c7363f83 100644 --- a/net/netfilter/ipvs/ip_vs_core.c +++ b/net/netfilter/ipvs/ip_vs_core.c @@ -80,7 +80,7 @@ static atomic_t ipvs_netns_cnt = ATOMIC_INIT(0); #define icmp_id(icmph) (((icmph)->un).echo.id) #define icmpv6_id(icmph) (icmph->icmp6_dataun.u_echo.identifier) -const char *ip_vs_proto_name(unsigned proto) +const char *ip_vs_proto_name(unsigned int proto) { static char buf[20]; diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index 964d426d237f..b8d0df701227 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c @@ -265,11 +265,11 @@ static struct list_head ip_vs_svc_fwm_table[IP_VS_SVC_TAB_SIZE]; /* * Returns hash value for virtual service */ -static inline unsigned -ip_vs_svc_hashkey(struct net *net, int af, unsigned proto, +static inline unsigned int +ip_vs_svc_hashkey(struct net *net, int af, unsigned int proto, const union nf_inet_addr *addr, __be16 port) { - register unsigned porth = ntohs(port); + register unsigned int porth = ntohs(port); __be32 addr_fold = addr->ip; #ifdef CONFIG_IP_VS_IPV6 @@ -286,7 +286,7 @@ ip_vs_svc_hashkey(struct net *net, int af, unsigned proto, /* * Returns hash value of fwmark for virtual service lookup */ -static inline unsigned ip_vs_svc_fwm_hashkey(struct net *net, __u32 fwmark) +static inline unsigned int ip_vs_svc_fwm_hashkey(struct net *net, __u32 fwmark) { return (((size_t)net>>8) ^ fwmark) & IP_VS_SVC_TAB_MASK; } @@ -298,7 +298,7 @@ static inline unsigned ip_vs_svc_fwm_hashkey(struct net *net, __u32 fwmark) */ static int ip_vs_svc_hash(struct ip_vs_service *svc) { - unsigned hash; + unsigned int hash; if (svc->flags & IP_VS_SVC_F_HASHED) { pr_err("%s(): request for already hashed, called from %pF\n", @@ -361,7 +361,7 @@ static inline struct ip_vs_service * __ip_vs_service_find(struct net *net, int af, __u16 protocol, const union nf_inet_addr *vaddr, __be16 vport) { - unsigned hash; + unsigned int hash; struct ip_vs_service *svc; /* Check for "full" addressed entries */ @@ -388,7 +388,7 @@ __ip_vs_service_find(struct net *net, int af, __u16 protocol, static inline struct ip_vs_service * __ip_vs_svc_fwm_find(struct net *net, int af, __u32 fwmark) { - unsigned hash; + unsigned int hash; struct ip_vs_service *svc; /* Check for fwmark addressed entries */ @@ -489,11 +489,11 @@ __ip_vs_unbind_svc(struct ip_vs_dest *dest) /* * Returns hash value for real service */ -static inline unsigned ip_vs_rs_hashkey(int af, +static inline unsigned int ip_vs_rs_hashkey(int af, const union nf_inet_addr *addr, __be16 port) { - register unsigned porth = ntohs(port); + register unsigned int porth = ntohs(port); __be32 addr_fold = addr->ip; #ifdef CONFIG_IP_VS_IPV6 @@ -512,7 +512,7 @@ static inline unsigned ip_vs_rs_hashkey(int af, */ static int ip_vs_rs_hash(struct netns_ipvs *ipvs, struct ip_vs_dest *dest) { - unsigned hash; + unsigned int hash; if (!list_empty(&dest->d_list)) { return 0; @@ -555,7 +555,7 @@ ip_vs_lookup_real_service(struct net *net, int af, __u16 protocol, __be16 dport) { struct netns_ipvs *ipvs = net_ipvs(net); - unsigned hash; + unsigned int hash; struct ip_vs_dest *dest; /* @@ -842,7 +842,7 @@ ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest, struct ip_vs_dest **dest_p) { struct ip_vs_dest *dest; - unsigned atype; + unsigned int atype; EnterFunction(2); @@ -1867,7 +1867,7 @@ struct ip_vs_iter { * Write the contents of the VS rule table to a PROCfs file. * (It is kept just for backward compatibility) */ -static inline const char *ip_vs_fwd_name(unsigned flags) +static inline const char *ip_vs_fwd_name(unsigned int flags) { switch (flags & IP_VS_CONN_F_FWD_MASK) { case IP_VS_CONN_F_LOCALNODE: diff --git a/net/netfilter/ipvs/ip_vs_dh.c b/net/netfilter/ipvs/ip_vs_dh.c index 1c269e56200a..1a53a7a2fff0 100644 --- a/net/netfilter/ipvs/ip_vs_dh.c +++ b/net/netfilter/ipvs/ip_vs_dh.c @@ -68,7 +68,7 @@ struct ip_vs_dh_bucket { /* * Returns hash value for IPVS DH entry */ -static inline unsigned ip_vs_dh_hashkey(int af, const union nf_inet_addr *addr) +static inline unsigned int ip_vs_dh_hashkey(int af, const union nf_inet_addr *addr) { __be32 addr_fold = addr->ip; diff --git a/net/netfilter/ipvs/ip_vs_ftp.c b/net/netfilter/ipvs/ip_vs_ftp.c index 538d74ee4f68..debb8c71422c 100644 --- a/net/netfilter/ipvs/ip_vs_ftp.c +++ b/net/netfilter/ipvs/ip_vs_ftp.c @@ -177,7 +177,7 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp, __be16 port; struct ip_vs_conn *n_cp; char buf[24]; /* xxx.xxx.xxx.xxx,ppp,ppp\000 */ - unsigned buf_len; + unsigned int buf_len; int ret = 0; enum ip_conntrack_info ctinfo; struct nf_conn *ct; diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c index 0f16283fd058..27c24f156c28 100644 --- a/net/netfilter/ipvs/ip_vs_lblc.c +++ b/net/netfilter/ipvs/ip_vs_lblc.c @@ -142,7 +142,7 @@ static inline void ip_vs_lblc_free(struct ip_vs_lblc_entry *en) /* * Returns hash value for IPVS LBLC entry */ -static inline unsigned +static inline unsigned int ip_vs_lblc_hashkey(int af, const union nf_inet_addr *addr) { __be32 addr_fold = addr->ip; @@ -163,7 +163,7 @@ ip_vs_lblc_hashkey(int af, const union nf_inet_addr *addr) static void ip_vs_lblc_hash(struct ip_vs_lblc_table *tbl, struct ip_vs_lblc_entry *en) { - unsigned hash = ip_vs_lblc_hashkey(en->af, &en->addr); + unsigned int hash = ip_vs_lblc_hashkey(en->af, &en->addr); list_add(&en->list, &tbl->bucket[hash]); atomic_inc(&tbl->entries); @@ -178,7 +178,7 @@ static inline struct ip_vs_lblc_entry * ip_vs_lblc_get(int af, struct ip_vs_lblc_table *tbl, const union nf_inet_addr *addr) { - unsigned hash = ip_vs_lblc_hashkey(af, addr); + unsigned int hash = ip_vs_lblc_hashkey(af, addr); struct ip_vs_lblc_entry *en; list_for_each_entry(en, &tbl->bucket[hash], list) diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c index eec797f8cce7..749875611ed6 100644 --- a/net/netfilter/ipvs/ip_vs_lblcr.c +++ b/net/netfilter/ipvs/ip_vs_lblcr.c @@ -311,7 +311,7 @@ static inline void ip_vs_lblcr_free(struct ip_vs_lblcr_entry *en) /* * Returns hash value for IPVS LBLCR entry */ -static inline unsigned +static inline unsigned int ip_vs_lblcr_hashkey(int af, const union nf_inet_addr *addr) { __be32 addr_fold = addr->ip; @@ -332,7 +332,7 @@ ip_vs_lblcr_hashkey(int af, const union nf_inet_addr *addr) static void ip_vs_lblcr_hash(struct ip_vs_lblcr_table *tbl, struct ip_vs_lblcr_entry *en) { - unsigned hash = ip_vs_lblcr_hashkey(en->af, &en->addr); + unsigned int hash = ip_vs_lblcr_hashkey(en->af, &en->addr); list_add(&en->list, &tbl->bucket[hash]); atomic_inc(&tbl->entries); @@ -347,7 +347,7 @@ static inline struct ip_vs_lblcr_entry * ip_vs_lblcr_get(int af, struct ip_vs_lblcr_table *tbl, const union nf_inet_addr *addr) { - unsigned hash = ip_vs_lblcr_hashkey(af, addr); + unsigned int hash = ip_vs_lblcr_hashkey(af, addr); struct ip_vs_lblcr_entry *en; list_for_each_entry(en, &tbl->bucket[hash], list) diff --git a/net/netfilter/ipvs/ip_vs_proto.c b/net/netfilter/ipvs/ip_vs_proto.c index f843a8833250..6eda11de8c05 100644 --- a/net/netfilter/ipvs/ip_vs_proto.c +++ b/net/netfilter/ipvs/ip_vs_proto.c @@ -48,7 +48,7 @@ static struct ip_vs_protocol *ip_vs_proto_table[IP_VS_PROTO_TAB_SIZE]; */ static int __used __init register_ip_vs_protocol(struct ip_vs_protocol *pp) { - unsigned hash = IP_VS_PROTO_HASH(pp->protocol); + unsigned int hash = IP_VS_PROTO_HASH(pp->protocol); pp->next = ip_vs_proto_table[hash]; ip_vs_proto_table[hash] = pp; @@ -69,7 +69,7 @@ static int register_ip_vs_proto_netns(struct net *net, struct ip_vs_protocol *pp) { struct netns_ipvs *ipvs = net_ipvs(net); - unsigned hash = IP_VS_PROTO_HASH(pp->protocol); + unsigned int hash = IP_VS_PROTO_HASH(pp->protocol); struct ip_vs_proto_data *pd = kzalloc(sizeof(struct ip_vs_proto_data), GFP_ATOMIC); @@ -94,7 +94,7 @@ register_ip_vs_proto_netns(struct net *net, struct ip_vs_protocol *pp) static int unregister_ip_vs_protocol(struct ip_vs_protocol *pp) { struct ip_vs_protocol **pp_p; - unsigned hash = IP_VS_PROTO_HASH(pp->protocol); + unsigned int hash = IP_VS_PROTO_HASH(pp->protocol); pp_p = &ip_vs_proto_table[hash]; for (; *pp_p; pp_p = &(*pp_p)->next) { @@ -117,7 +117,7 @@ unregister_ip_vs_proto_netns(struct net *net, struct ip_vs_proto_data *pd) { struct netns_ipvs *ipvs = net_ipvs(net); struct ip_vs_proto_data **pd_p; - unsigned hash = IP_VS_PROTO_HASH(pd->pp->protocol); + unsigned int hash = IP_VS_PROTO_HASH(pd->pp->protocol); pd_p = &ipvs->proto_data_table[hash]; for (; *pd_p; pd_p = &(*pd_p)->next) { @@ -139,7 +139,7 @@ unregister_ip_vs_proto_netns(struct net *net, struct ip_vs_proto_data *pd) struct ip_vs_protocol * ip_vs_proto_get(unsigned short proto) { struct ip_vs_protocol *pp; - unsigned hash = IP_VS_PROTO_HASH(proto); + unsigned int hash = IP_VS_PROTO_HASH(proto); for (pp = ip_vs_proto_table[hash]; pp; pp = pp->next) { if (pp->protocol == proto) @@ -157,7 +157,7 @@ struct ip_vs_proto_data * __ipvs_proto_data_get(struct netns_ipvs *ipvs, unsigned short proto) { struct ip_vs_proto_data *pd; - unsigned hash = IP_VS_PROTO_HASH(proto); + unsigned int hash = IP_VS_PROTO_HASH(proto); for (pd = ipvs->proto_data_table[hash]; pd; pd = pd->next) { if (pd->pp->protocol == proto) diff --git a/net/netfilter/ipvs/ip_vs_sh.c b/net/netfilter/ipvs/ip_vs_sh.c index 069e8d4d5c01..91e97ee049be 100644 --- a/net/netfilter/ipvs/ip_vs_sh.c +++ b/net/netfilter/ipvs/ip_vs_sh.c @@ -70,7 +70,7 @@ struct ip_vs_sh_bucket { /* * Returns hash value for IPVS SH entry */ -static inline unsigned ip_vs_sh_hashkey(int af, const union nf_inet_addr *addr) +static inline unsigned int ip_vs_sh_hashkey(int af, const union nf_inet_addr *addr) { __be32 addr_fold = addr->ip; diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c index 8a0d6d6889f0..f4e0b6cf8246 100644 --- a/net/netfilter/ipvs/ip_vs_sync.c +++ b/net/netfilter/ipvs/ip_vs_sync.c @@ -839,7 +839,7 @@ static void ip_vs_process_message_v0(struct net *net, const char *buffer, p = (char *)buffer + sizeof(struct ip_vs_sync_mesg_v0); for (i=0; inr_conns; i++) { - unsigned flags, state; + unsigned int flags, state; if (p + SIMPLE_CONN_SIZE > buffer+buflen) { IP_VS_ERR_RL("BACKUP v0, bogus conn\n"); @@ -1109,7 +1109,7 @@ static void ip_vs_process_message(struct net *net, __u8 *buffer, for (i=0; itimeout)) return 0; diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c index e6ddde165612..3e797d1fcb94 100644 --- a/net/netfilter/nfnetlink.c +++ b/net/netfilter/nfnetlink.c @@ -103,7 +103,7 @@ int nfnetlink_has_listeners(struct net *net, unsigned int group) EXPORT_SYMBOL_GPL(nfnetlink_has_listeners); int nfnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, - unsigned group, int echo, gfp_t flags) + unsigned int group, int echo, gfp_t flags) { return nlmsg_notify(net->nfnl, skb, pid, group, echo, flags); } diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c index d2ff15a2412b..fc0d6dbe5d17 100644 --- a/net/netfilter/xt_recent.c +++ b/net/netfilter/xt_recent.c @@ -314,7 +314,7 @@ static int recent_mt_check(const struct xt_mtchk_param *par) #ifdef CONFIG_PROC_FS struct proc_dir_entry *pde; #endif - unsigned i; + unsigned int i; int ret = -EINVAL; if (unlikely(!hash_rnd_inited)) { diff --git a/net/netrom/nr_dev.c b/net/netrom/nr_dev.c index 1c51d7a58f0b..743262becd6e 100644 --- a/net/netrom/nr_dev.c +++ b/net/netrom/nr_dev.c @@ -97,7 +97,7 @@ static int nr_rebuild_header(struct sk_buff *skb) static int nr_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, - const void *daddr, const void *saddr, unsigned len) + const void *daddr, const void *saddr, unsigned int len) { unsigned char *buff = skb_push(skb, NR_NETWORK_LEN + NR_TRANSPORT_LEN); diff --git a/net/nfc/core.c b/net/nfc/core.c index 295d129864d2..32a7b615e65f 100644 --- a/net/nfc/core.c +++ b/net/nfc/core.c @@ -475,12 +475,12 @@ EXPORT_SYMBOL(nfc_class); static int match_idx(struct device *d, void *data) { struct nfc_dev *dev = to_nfc_dev(d); - unsigned *idx = data; + unsigned int *idx = data; return dev->idx == *idx; } -struct nfc_dev *nfc_get_device(unsigned idx) +struct nfc_dev *nfc_get_device(unsigned int idx) { struct device *d; diff --git a/net/nfc/nfc.h b/net/nfc/nfc.h index ec8794c1099c..061a830b0a88 100644 --- a/net/nfc/nfc.h +++ b/net/nfc/nfc.h @@ -127,7 +127,7 @@ int nfc_genl_dep_link_up_event(struct nfc_dev *dev, u32 target_idx, u8 comm_mode, u8 rf_mode); int nfc_genl_dep_link_down_event(struct nfc_dev *dev); -struct nfc_dev *nfc_get_device(unsigned idx); +struct nfc_dev *nfc_get_device(unsigned int idx); static inline void nfc_put_device(struct nfc_dev *dev) { diff --git a/net/openvswitch/vport-netdev.c b/net/openvswitch/vport-netdev.c index c1068aed03d1..5920bda4ab6b 100644 --- a/net/openvswitch/vport-netdev.c +++ b/net/openvswitch/vport-netdev.c @@ -140,9 +140,9 @@ int ovs_netdev_get_ifindex(const struct vport *vport) return netdev_vport->dev->ifindex; } -static unsigned packet_length(const struct sk_buff *skb) +static unsigned int packet_length(const struct sk_buff *skb) { - unsigned length = skb->len - ETH_HLEN; + unsigned int length = skb->len - ETH_HLEN; if (skb->protocol == htons(ETH_P_8021Q)) length -= VLAN_HLEN; diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 4f2c0df79563..d2b5f6591f1a 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -1764,7 +1764,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 + po->tp_reserve; } else { - unsigned maclen = skb_network_offset(skb); + unsigned int maclen = skb_network_offset(skb); netoff = TPACKET_ALIGN(po->tp_hdrlen + (maclen < 16 ? 16 : maclen)) + po->tp_reserve; diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c index d65f699fbf34..779ce4ff92ec 100644 --- a/net/phonet/af_phonet.c +++ b/net/phonet/af_phonet.c @@ -129,7 +129,7 @@ static const struct net_proto_family phonet_proto_family = { /* Phonet device header operations */ static int pn_header_create(struct sk_buff *skb, struct net_device *dev, unsigned short type, const void *daddr, - const void *saddr, unsigned len) + const void *saddr, unsigned int len) { u8 *media = skb_push(skb, 1); diff --git a/net/phonet/pep.c b/net/phonet/pep.c index 9726fe684ab8..9dd4f926f7d1 100644 --- a/net/phonet/pep.c +++ b/net/phonet/pep.c @@ -273,7 +273,7 @@ static int pipe_rcv_status(struct sock *sk, struct sk_buff *skb) hdr = pnp_hdr(skb); if (hdr->data[0] != PN_PEP_TYPE_COMMON) { LIMIT_NETDEBUG(KERN_DEBUG"Phonet unknown PEP type: %u\n", - (unsigned)hdr->data[0]); + (unsigned int)hdr->data[0]); return -EOPNOTSUPP; } @@ -305,7 +305,7 @@ static int pipe_rcv_status(struct sock *sk, struct sk_buff *skb) default: LIMIT_NETDEBUG(KERN_DEBUG"Phonet unknown PEP indication: %u\n", - (unsigned)hdr->data[1]); + (unsigned int)hdr->data[1]); return -EOPNOTSUPP; } if (wake) @@ -478,9 +478,9 @@ static void pipe_destruct(struct sock *sk) skb_queue_purge(&pn->ctrlreq_queue); } -static u8 pipe_negotiate_fc(const u8 *fcs, unsigned n) +static u8 pipe_negotiate_fc(const u8 *fcs, unsigned int n) { - unsigned i; + unsigned int i; u8 final_fc = PN_NO_FLOW_CONTROL; for (i = 0; i < n; i++) { diff --git a/net/phonet/pn_dev.c b/net/phonet/pn_dev.c index da564efe4f5b..36f75a9e2c3d 100644 --- a/net/phonet/pn_dev.c +++ b/net/phonet/pn_dev.c @@ -268,7 +268,7 @@ static int phonet_device_autoconf(struct net_device *dev) static void phonet_route_autodel(struct net_device *dev) { struct phonet_net *pnn = phonet_pernet(dev_net(dev)); - unsigned i; + unsigned int i; DECLARE_BITMAP(deleted, 64); /* Remove left-over Phonet routes */ diff --git a/net/phonet/socket.c b/net/phonet/socket.c index 4c7eff30dfa9..89cfa9ce4939 100644 --- a/net/phonet/socket.c +++ b/net/phonet/socket.c @@ -58,7 +58,7 @@ static struct { void __init pn_sock_init(void) { - unsigned i; + unsigned int i; for (i = 0; i < PN_HASHSIZE; i++) INIT_HLIST_HEAD(pnsocks.hlist + i); @@ -116,7 +116,7 @@ struct sock *pn_find_sock_by_sa(struct net *net, const struct sockaddr_pn *spn) void pn_deliver_sock_broadcast(struct net *net, struct sk_buff *skb) { struct hlist_head *hlist = pnsocks.hlist; - unsigned h; + unsigned int h; rcu_read_lock(); for (h = 0; h < PN_HASHSIZE; h++) { @@ -545,7 +545,7 @@ static struct sock *pn_sock_get_idx(struct seq_file *seq, loff_t pos) struct hlist_head *hlist = pnsocks.hlist; struct hlist_node *node; struct sock *sknode; - unsigned h; + unsigned int h; for (h = 0; h < PN_HASHSIZE; h++) { sk_for_each_rcu(sknode, node, hlist) { @@ -710,7 +710,7 @@ int pn_sock_unbind_res(struct sock *sk, u8 res) void pn_sock_unbind_all_res(struct sock *sk) { - unsigned res, match = 0; + unsigned int res, match = 0; mutex_lock(&resource_mutex); for (res = 0; res < 256; res++) { @@ -732,7 +732,7 @@ void pn_sock_unbind_all_res(struct sock *sk) static struct sock **pn_res_get_idx(struct seq_file *seq, loff_t pos) { struct net *net = seq_file_net(seq); - unsigned i; + unsigned int i; if (!net_eq(net, &init_net)) return NULL; @@ -750,7 +750,7 @@ static struct sock **pn_res_get_idx(struct seq_file *seq, loff_t pos) static struct sock **pn_res_get_next(struct seq_file *seq, struct sock **sk) { struct net *net = seq_file_net(seq); - unsigned i; + unsigned int i; BUG_ON(!net_eq(net, &init_net)); diff --git a/net/phonet/sysctl.c b/net/phonet/sysctl.c index 740bf209dd97..8bed7675b3f4 100644 --- a/net/phonet/sysctl.c +++ b/net/phonet/sysctl.c @@ -50,7 +50,8 @@ static void set_local_port_range(int range[2]) void phonet_get_local_port_range(int *min, int *max) { - unsigned seq; + unsigned int seq; + do { seq = read_seqbegin(&local_port_range_lock); if (min) diff --git a/net/rose/rose_dev.c b/net/rose/rose_dev.c index 906cc05bba63..28dbdb911b85 100644 --- a/net/rose/rose_dev.c +++ b/net/rose/rose_dev.c @@ -37,7 +37,7 @@ static int rose_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, - const void *daddr, const void *saddr, unsigned len) + const void *daddr, const void *saddr, unsigned int len) { unsigned char *buff = skb_push(skb, ROSE_MIN_LEN + 2); diff --git a/net/rose/rose_subr.c b/net/rose/rose_subr.c index 47f1fdb346b0..7ca57741b2fb 100644 --- a/net/rose/rose_subr.c +++ b/net/rose/rose_subr.c @@ -399,7 +399,7 @@ int rose_parse_facilities(unsigned char *p, unsigned packet_len, facilities_len = *p++; - if (facilities_len == 0 || (unsigned)facilities_len > packet_len) + if (facilities_len == 0 || (unsigned int)facilities_len > packet_len) return 0; while (facilities_len >= 3 && *p == 0x00) { diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c index 74c064c0dfdd..05996d0dd828 100644 --- a/net/rxrpc/af_rxrpc.c +++ b/net/rxrpc/af_rxrpc.c @@ -26,7 +26,7 @@ MODULE_AUTHOR("Red Hat, Inc."); MODULE_LICENSE("GPL"); MODULE_ALIAS_NETPROTO(PF_RXRPC); -unsigned rxrpc_debug; // = RXRPC_DEBUG_KPROTO; +unsigned int rxrpc_debug; // = RXRPC_DEBUG_KPROTO; module_param_named(debug, rxrpc_debug, uint, S_IWUSR | S_IRUGO); MODULE_PARM_DESC(debug, "RxRPC debugging mask"); @@ -513,7 +513,7 @@ static int rxrpc_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) { struct rxrpc_sock *rx = rxrpc_sk(sock->sk); - unsigned min_sec_level; + unsigned int min_sec_level; int ret; _enter(",%d,%d,,%d", level, optname, optlen); @@ -555,13 +555,13 @@ static int rxrpc_setsockopt(struct socket *sock, int level, int optname, case RXRPC_MIN_SECURITY_LEVEL: ret = -EINVAL; - if (optlen != sizeof(unsigned)) + if (optlen != sizeof(unsigned int)) goto error; ret = -EISCONN; if (rx->sk.sk_state != RXRPC_UNCONNECTED) goto error; ret = get_user(min_sec_level, - (unsigned __user *) optval); + (unsigned int __user *) optval); if (ret < 0) goto error; ret = -EINVAL; diff --git a/net/rxrpc/ar-ack.c b/net/rxrpc/ar-ack.c index c3126e864f3c..e4d9cbcff402 100644 --- a/net/rxrpc/ar-ack.c +++ b/net/rxrpc/ar-ack.c @@ -19,7 +19,7 @@ #include #include "ar-internal.h" -static unsigned rxrpc_ack_defer = 1; +static unsigned int rxrpc_ack_defer = 1; static const char *const rxrpc_acks[] = { "---", "REQ", "DUP", "OOS", "WIN", "MEM", "PNG", "PNR", "DLY", "IDL", @@ -548,11 +548,11 @@ static void rxrpc_zap_tx_window(struct rxrpc_call *call) * process the extra information that may be appended to an ACK packet */ static void rxrpc_extract_ackinfo(struct rxrpc_call *call, struct sk_buff *skb, - unsigned latest, int nAcks) + unsigned int latest, int nAcks) { struct rxrpc_ackinfo ackinfo; struct rxrpc_peer *peer; - unsigned mtu; + unsigned int mtu; if (skb_copy_bits(skb, nAcks + 3, &ackinfo, sizeof(ackinfo)) < 0) { _leave(" [no ackinfo]"); diff --git a/net/rxrpc/ar-call.c b/net/rxrpc/ar-call.c index bf656c230ba9..a3bbb360a3f9 100644 --- a/net/rxrpc/ar-call.c +++ b/net/rxrpc/ar-call.c @@ -38,8 +38,8 @@ const char *const rxrpc_call_states[] = { struct kmem_cache *rxrpc_call_jar; LIST_HEAD(rxrpc_calls); DEFINE_RWLOCK(rxrpc_call_lock); -static unsigned rxrpc_call_max_lifetime = 60; -static unsigned rxrpc_dead_call_timeout = 2; +static unsigned int rxrpc_call_max_lifetime = 60; +static unsigned int rxrpc_dead_call_timeout = 2; static void rxrpc_destroy_call(struct work_struct *work); static void rxrpc_call_life_expired(unsigned long _call); diff --git a/net/rxrpc/ar-input.c b/net/rxrpc/ar-input.c index 1a2b0633fece..529572f18d1f 100644 --- a/net/rxrpc/ar-input.c +++ b/net/rxrpc/ar-input.c @@ -76,7 +76,7 @@ int rxrpc_queue_rcv_skb(struct rxrpc_call *call, struct sk_buff *skb, * --ANK */ // ret = -ENOBUFS; // if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= -// (unsigned) sk->sk_rcvbuf) +// (unsigned int) sk->sk_rcvbuf) // goto out; ret = sk_filter(sk, skb); diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h index 8e22bd345e71..a693aca2ae2e 100644 --- a/net/rxrpc/ar-internal.h +++ b/net/rxrpc/ar-internal.h @@ -83,7 +83,7 @@ struct rxrpc_skb_priv { struct rxrpc_call *call; /* call with which associated */ unsigned long resend_at; /* time in jiffies at which to resend */ union { - unsigned offset; /* offset into buffer of next read */ + unsigned int offset; /* offset into buffer of next read */ int remain; /* amount of space remaining for next write */ u32 error; /* network error code */ bool need_resend; /* T if needs resending */ @@ -176,9 +176,9 @@ struct rxrpc_peer { struct list_head error_targets; /* targets for net error distribution */ spinlock_t lock; /* access lock */ atomic_t usage; - unsigned if_mtu; /* interface MTU for this peer */ - unsigned mtu; /* network MTU for this peer */ - unsigned maxdata; /* data size (MTU - hdrsize) */ + unsigned int if_mtu; /* interface MTU for this peer */ + unsigned int mtu; /* network MTU for this peer */ + unsigned int maxdata; /* data size (MTU - hdrsize) */ unsigned short hdrsize; /* header size (IP + UDP + RxRPC) */ int debug_id; /* debug ID for printks */ int net_error; /* network error distributed */ @@ -187,8 +187,8 @@ struct rxrpc_peer { /* calculated RTT cache */ #define RXRPC_RTT_CACHE_SIZE 32 suseconds_t rtt; /* current RTT estimate (in uS) */ - unsigned rtt_point; /* next entry at which to insert */ - unsigned rtt_usage; /* amount of cache actually used */ + unsigned int rtt_point; /* next entry at which to insert */ + unsigned int rtt_usage; /* amount of cache actually used */ suseconds_t rtt_cache[RXRPC_RTT_CACHE_SIZE]; /* calculated RTT cache */ }; @@ -271,7 +271,7 @@ struct rxrpc_connection { } state; int error; /* error code for local abort */ int debug_id; /* debug ID for printks */ - unsigned call_counter; /* call ID counter */ + unsigned int call_counter; /* call ID counter */ atomic_t serial; /* packet serial number counter */ atomic_t hi_serial; /* highest serial number received */ u8 avail_calls; /* number of calls available */ @@ -592,7 +592,7 @@ extern struct rxrpc_transport *rxrpc_find_transport(struct rxrpc_local *, /* * debug tracing */ -extern unsigned rxrpc_debug; +extern unsigned int rxrpc_debug; #define dbgprintk(FMT,...) \ printk("[%-6.6s] "FMT"\n", current->comm ,##__VA_ARGS__) diff --git a/net/rxrpc/ar-key.c b/net/rxrpc/ar-key.c index ae3a035f5390..8b1f9f49960f 100644 --- a/net/rxrpc/ar-key.c +++ b/net/rxrpc/ar-key.c @@ -82,7 +82,7 @@ static int rxrpc_vet_description_s(const char *desc) * - the caller guarantees we have at least 4 words */ static int rxrpc_instantiate_xdr_rxkad(struct key *key, const __be32 *xdr, - unsigned toklen) + unsigned int toklen) { struct rxrpc_key_token *token, **pptoken; size_t plen; @@ -210,10 +210,10 @@ static void rxrpc_rxk5_free(struct rxk5_key *rxk5) */ static int rxrpc_krb5_decode_principal(struct krb5_principal *princ, const __be32 **_xdr, - unsigned *_toklen) + unsigned int *_toklen) { const __be32 *xdr = *_xdr; - unsigned toklen = *_toklen, n_parts, loop, tmp; + unsigned int toklen = *_toklen, n_parts, loop, tmp; /* there must be at least one name, and at least #names+1 length * words */ @@ -286,10 +286,10 @@ static int rxrpc_krb5_decode_principal(struct krb5_principal *princ, static int rxrpc_krb5_decode_tagged_data(struct krb5_tagged_data *td, size_t max_data_size, const __be32 **_xdr, - unsigned *_toklen) + unsigned int *_toklen) { const __be32 *xdr = *_xdr; - unsigned toklen = *_toklen, len; + unsigned int toklen = *_toklen, len; /* there must be at least one tag and one length word */ if (toklen <= 8) @@ -330,11 +330,11 @@ static int rxrpc_krb5_decode_tagged_array(struct krb5_tagged_data **_td, u8 max_n_elem, size_t max_elem_size, const __be32 **_xdr, - unsigned *_toklen) + unsigned int *_toklen) { struct krb5_tagged_data *td; const __be32 *xdr = *_xdr; - unsigned toklen = *_toklen, n_elem, loop; + unsigned int toklen = *_toklen, n_elem, loop; int ret; /* there must be at least one count */ @@ -380,10 +380,10 @@ static int rxrpc_krb5_decode_tagged_array(struct krb5_tagged_data **_td, * extract a krb5 ticket */ static int rxrpc_krb5_decode_ticket(u8 **_ticket, u16 *_tktlen, - const __be32 **_xdr, unsigned *_toklen) + const __be32 **_xdr, unsigned int *_toklen) { const __be32 *xdr = *_xdr; - unsigned toklen = *_toklen, len; + unsigned int toklen = *_toklen, len; /* there must be at least one length word */ if (toklen <= 4) @@ -419,7 +419,7 @@ static int rxrpc_krb5_decode_ticket(u8 **_ticket, u16 *_tktlen, * - the caller guarantees we have at least 4 words */ static int rxrpc_instantiate_xdr_rxk5(struct key *key, const __be32 *xdr, - unsigned toklen) + unsigned int toklen) { struct rxrpc_key_token *token, **pptoken; struct rxk5_key *rxk5; @@ -549,7 +549,7 @@ static int rxrpc_instantiate_xdr(struct key *key, const void *data, size_t datal { const __be32 *xdr = data, *token; const char *cp; - unsigned len, tmp, loop, ntoken, toklen, sec_ix; + unsigned int len, tmp, loop, ntoken, toklen, sec_ix; int ret; _enter(",{%x,%x,%x,%x},%zu", diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c index 7635107726ce..f226709ebd8f 100644 --- a/net/rxrpc/rxkad.c +++ b/net/rxrpc/rxkad.c @@ -31,7 +31,7 @@ #define REALM_SZ 40 /* size of principal's auth domain */ #define SNAME_SZ 40 /* size of service name */ -unsigned rxrpc_debug; +unsigned int rxrpc_debug; module_param_named(debug, rxrpc_debug, uint, S_IWUSR | S_IRUGO); MODULE_PARM_DESC(debug, "rxkad debugging mask"); @@ -207,7 +207,7 @@ static int rxkad_secure_packet_encrypt(const struct rxrpc_call *call, struct rxrpc_crypt iv; struct scatterlist sg[16]; struct sk_buff *trailer; - unsigned len; + unsigned int len; u16 check; int nsg; @@ -826,7 +826,7 @@ static int rxkad_decrypt_ticket(struct rxrpc_connection *conn, struct rxrpc_crypt iv, key; struct scatterlist sg[1]; struct in_addr addr; - unsigned life; + unsigned int life; time_t issue, now; bool little_endian; int ret; diff --git a/net/sctp/associola.c b/net/sctp/associola.c index acd2edbc073e..5bc9ab161b37 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c @@ -1408,7 +1408,7 @@ static inline int sctp_peer_needs_update(struct sctp_association *asoc) } /* Increase asoc's rwnd by len and send any window update SACK if needed. */ -void sctp_assoc_rwnd_increase(struct sctp_association *asoc, unsigned len) +void sctp_assoc_rwnd_increase(struct sctp_association *asoc, unsigned int len) { struct sctp_chunk *sack; struct timer_list *timer; @@ -1465,7 +1465,7 @@ void sctp_assoc_rwnd_increase(struct sctp_association *asoc, unsigned len) } /* Decrease asoc's rwnd by len. */ -void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned len) +void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned int len) { int rx_count; int over = 0; diff --git a/net/sctp/output.c b/net/sctp/output.c index 817174eb5f41..69534c5f8afa 100644 --- a/net/sctp/output.c +++ b/net/sctp/output.c @@ -663,8 +663,8 @@ static sctp_xmit_t sctp_packet_can_append_data(struct sctp_packet *packet, */ if (!sctp_sk(asoc->base.sk)->nodelay && sctp_packet_empty(packet) && inflight && sctp_state(asoc, ESTABLISHED)) { - unsigned max = transport->pathmtu - packet->overhead; - unsigned len = chunk->skb->len + q->out_qlen; + unsigned int max = transport->pathmtu - packet->overhead; + unsigned int len = chunk->skb->len + q->out_qlen; /* Check whether this chunk and all the rest of pending * data will fit or delay in hopes of bundling a full diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c index cfeb1d4a1ee6..a0fa19f5650c 100644 --- a/net/sctp/outqueue.c +++ b/net/sctp/outqueue.c @@ -1147,7 +1147,7 @@ int sctp_outq_sack(struct sctp_outq *q, struct sctp_sackhdr *sack) __u32 sack_ctsn, ctsn, tsn; __u32 highest_tsn, highest_new_tsn; __u32 sack_a_rwnd; - unsigned outstanding; + unsigned int outstanding; struct sctp_transport *primary = asoc->peer.primary_path; int count_of_newacks = 0; int gap_ack_blocks; diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index 1ff51c9d18d5..fbb374c65945 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c @@ -524,7 +524,7 @@ static void sctp_do_8_2_transport_strike(struct sctp_association *asoc, /* Worker routine to handle INIT command failure. */ static void sctp_cmd_init_failed(sctp_cmd_seq_t *commands, struct sctp_association *asoc, - unsigned error) + unsigned int error) { struct sctp_ulpevent *event; @@ -550,7 +550,7 @@ static void sctp_cmd_assoc_failed(sctp_cmd_seq_t *commands, sctp_event_t event_type, sctp_subtype_t subtype, struct sctp_chunk *chunk, - unsigned error) + unsigned int error) { struct sctp_ulpevent *event; diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index 891f5db8cc31..a147b4d307d2 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -2410,7 +2410,7 @@ static sctp_disposition_t __sctp_sf_do_9_1_abort(const struct sctp_endpoint *ep, sctp_cmd_seq_t *commands) { struct sctp_chunk *chunk = arg; - unsigned len; + unsigned int len; __be16 error = SCTP_ERROR_NO_ERROR; /* See if we have an error cause code in the chunk. */ @@ -2446,7 +2446,7 @@ sctp_disposition_t sctp_sf_cookie_wait_abort(const struct sctp_endpoint *ep, sctp_cmd_seq_t *commands) { struct sctp_chunk *chunk = arg; - unsigned len; + unsigned int len; __be16 error = SCTP_ERROR_NO_ERROR; if (!sctp_vtag_verify_either(chunk, asoc)) diff --git a/net/socket.c b/net/socket.c index 851edcd6b098..d6c1af9ae7b2 100644 --- a/net/socket.c +++ b/net/socket.c @@ -1479,7 +1479,7 @@ SYSCALL_DEFINE2(listen, int, fd, int, backlog) sock = sockfd_lookup_light(fd, &err, &fput_needed); if (sock) { somaxconn = sock_net(sock->sk)->core.sysctl_somaxconn; - if ((unsigned)backlog > somaxconn) + if ((unsigned int)backlog > somaxconn) backlog = somaxconn; err = security_socket_listen(sock, backlog); @@ -1691,7 +1691,7 @@ SYSCALL_DEFINE3(getpeername, int, fd, struct sockaddr __user *, usockaddr, */ SYSCALL_DEFINE6(sendto, int, fd, void __user *, buff, size_t, len, - unsigned, flags, struct sockaddr __user *, addr, + unsigned int, flags, struct sockaddr __user *, addr, int, addr_len) { struct socket *sock; @@ -1738,7 +1738,7 @@ out: */ SYSCALL_DEFINE4(send, int, fd, void __user *, buff, size_t, len, - unsigned, flags) + unsigned int, flags) { return sys_sendto(fd, buff, len, flags, NULL, 0); } @@ -1750,7 +1750,7 @@ SYSCALL_DEFINE4(send, int, fd, void __user *, buff, size_t, len, */ SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size, - unsigned, flags, struct sockaddr __user *, addr, + unsigned int, flags, struct sockaddr __user *, addr, int __user *, addr_len) { struct socket *sock; @@ -1795,7 +1795,7 @@ out: */ asmlinkage long sys_recv(int fd, void __user *ubuf, size_t size, - unsigned flags) + unsigned int flags) { return sys_recvfrom(fd, ubuf, size, flags, NULL, NULL); } @@ -1897,7 +1897,7 @@ struct used_address { }; static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg, - struct msghdr *msg_sys, unsigned flags, + struct msghdr *msg_sys, unsigned int flags, struct used_address *used_address) { struct compat_msghdr __user *msg_compat = @@ -2014,7 +2014,7 @@ out: * BSD sendmsg interface */ -SYSCALL_DEFINE3(sendmsg, int, fd, struct msghdr __user *, msg, unsigned, flags) +SYSCALL_DEFINE3(sendmsg, int, fd, struct msghdr __user *, msg, unsigned int, flags) { int fput_needed, err; struct msghdr msg_sys; @@ -2096,7 +2096,7 @@ SYSCALL_DEFINE4(sendmmsg, int, fd, struct mmsghdr __user *, mmsg, } static int __sys_recvmsg(struct socket *sock, struct msghdr __user *msg, - struct msghdr *msg_sys, unsigned flags, int nosec) + struct msghdr *msg_sys, unsigned int flags, int nosec) { struct compat_msghdr __user *msg_compat = (struct compat_msghdr __user *)msg; @@ -3223,7 +3223,7 @@ static int compat_sock_ioctl_trans(struct file *file, struct socket *sock, return -ENOIOCTLCMD; } -static long compat_sock_ioctl(struct file *file, unsigned cmd, +static long compat_sock_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct socket *sock = file->private_data; diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c index 8eff8c32d1b9..d3611f11a8df 100644 --- a/net/sunrpc/auth_gss/gss_krb5_mech.c +++ b/net/sunrpc/auth_gss/gss_krb5_mech.c @@ -624,7 +624,7 @@ gss_import_v2_context(const void *p, const void *end, struct krb5_ctx *ctx, ctx->seq_send = ctx->seq_send64; if (ctx->seq_send64 != ctx->seq_send) { dprintk("%s: seq_send64 %lx, seq_send %x overflow?\n", __func__, - (long unsigned)ctx->seq_send64, ctx->seq_send); + (unsigned long)ctx->seq_send64, ctx->seq_send); p = ERR_PTR(-EINVAL); goto out_err; } diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c index de0b0f39d9d8..47ad2666fdf6 100644 --- a/net/sunrpc/cache.c +++ b/net/sunrpc/cache.c @@ -1273,7 +1273,7 @@ static void *c_start(struct seq_file *m, loff_t *pos) __acquires(cd->hash_lock) { loff_t n = *pos; - unsigned hash, entry; + unsigned int hash, entry; struct cache_head *ch; struct cache_detail *cd = ((struct handle*)m->private)->cd; diff --git a/net/sunrpc/timer.c b/net/sunrpc/timer.c index dd824341c349..08881d0c9672 100644 --- a/net/sunrpc/timer.c +++ b/net/sunrpc/timer.c @@ -34,7 +34,7 @@ void rpc_init_rtt(struct rpc_rtt *rt, unsigned long timeo) { unsigned long init = 0; - unsigned i; + unsigned int i; rt->timeo = timeo; @@ -57,7 +57,7 @@ EXPORT_SYMBOL_GPL(rpc_init_rtt); * NB: When computing the smoothed RTT and standard deviation, * be careful not to produce negative intermediate results. */ -void rpc_update_rtt(struct rpc_rtt *rt, unsigned timer, long m) +void rpc_update_rtt(struct rpc_rtt *rt, unsigned int timer, long m) { long *srtt, *sdrtt; @@ -106,7 +106,7 @@ EXPORT_SYMBOL_GPL(rpc_update_rtt); * read, write, commit - A+4D * other - timeo */ -unsigned long rpc_calc_rto(struct rpc_rtt *rt, unsigned timer) +unsigned long rpc_calc_rto(struct rpc_rtt *rt, unsigned int timer) { unsigned long res; diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c index b97a3dd9a60a..fddcccfcdf76 100644 --- a/net/sunrpc/xdr.c +++ b/net/sunrpc/xdr.c @@ -1204,7 +1204,7 @@ xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len, int (*actor)(struct scatterlist *, void *), void *data) { int i, ret = 0; - unsigned page_len, thislen, page_offset; + unsigned int page_len, thislen, page_offset; struct scatterlist sg[1]; sg_init_table(sg, 1); diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 0cbcd1ab49ab..6fe2dcead150 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -783,7 +783,7 @@ static void xprt_update_rtt(struct rpc_task *task) { struct rpc_rqst *req = task->tk_rqstp; struct rpc_rtt *rtt = task->tk_client->cl_rtt; - unsigned timer = task->tk_msg.rpc_proc->p_timer; + unsigned int timer = task->tk_msg.rpc_proc->p_timer; long m = usecs_to_jiffies(ktime_to_us(req->rq_rtt)); if (timer) { diff --git a/net/tipc/link.c b/net/tipc/link.c index b4b9b30167a3..33cecd6fc37d 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c @@ -612,7 +612,7 @@ static void link_activate(struct tipc_link *l_ptr) * @event: state machine event to process */ -static void link_state_event(struct tipc_link *l_ptr, unsigned event) +static void link_state_event(struct tipc_link *l_ptr, unsigned int event) { struct tipc_link *other; u32 cont_intv = l_ptr->continuity_interval; diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 29e957f64458..bcb3314ef164 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -535,7 +535,7 @@ static int send_msg(struct kiocb *iocb, struct socket *sock, (dest->family != AF_TIPC))) return -EINVAL; if ((total_len > TIPC_MAX_USER_MSG_SIZE) || - (m->msg_iovlen > (unsigned)INT_MAX)) + (m->msg_iovlen > (unsigned int)INT_MAX)) return -EMSGSIZE; if (iocb) @@ -647,7 +647,7 @@ static int send_packet(struct kiocb *iocb, struct socket *sock, return send_msg(iocb, sock, m, total_len); if ((total_len > TIPC_MAX_USER_MSG_SIZE) || - (m->msg_iovlen > (unsigned)INT_MAX)) + (m->msg_iovlen > (unsigned int)INT_MAX)) return -EMSGSIZE; if (iocb) @@ -734,8 +734,8 @@ static int send_stream(struct kiocb *iocb, struct socket *sock, goto exit; } - if ((total_len > (unsigned)INT_MAX) || - (m->msg_iovlen > (unsigned)INT_MAX)) { + if ((total_len > (unsigned int)INT_MAX) || + (m->msg_iovlen > (unsigned int)INT_MAX)) { res = -EMSGSIZE; goto exit; } diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index eadb9020cd64..641f2e47f165 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -149,9 +149,10 @@ static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb) * each socket state is protected by separate spin lock. */ -static inline unsigned unix_hash_fold(__wsum n) +static inline unsigned int unix_hash_fold(__wsum n) { - unsigned hash = (__force unsigned)n; + unsigned int hash = (__force unsigned int)n; + hash ^= hash>>16; hash ^= hash>>8; return hash&(UNIX_HASH_SIZE-1); @@ -200,7 +201,7 @@ static inline void unix_release_addr(struct unix_address *addr) * - if started by zero, it is abstract name. */ -static int unix_mkname(struct sockaddr_un *sunaddr, int len, unsigned *hashp) +static int unix_mkname(struct sockaddr_un *sunaddr, int len, unsigned int *hashp) { if (len <= sizeof(short) || len > sizeof(*sunaddr)) return -EINVAL; @@ -250,7 +251,7 @@ static inline void unix_insert_socket(struct hlist_head *list, struct sock *sk) static struct sock *__unix_find_socket_byname(struct net *net, struct sockaddr_un *sunname, - int len, int type, unsigned hash) + int len, int type, unsigned int hash) { struct sock *s; struct hlist_node *node; @@ -273,7 +274,7 @@ found: static inline struct sock *unix_find_socket_byname(struct net *net, struct sockaddr_un *sunname, int len, int type, - unsigned hash) + unsigned int hash) { struct sock *s; @@ -760,7 +761,7 @@ out: mutex_unlock(&u->readlock); static struct sock *unix_find_other(struct net *net, struct sockaddr_un *sunname, int len, - int type, unsigned hash, int *error) + int type, unsigned int hash, int *error) { struct sock *u; struct path path; @@ -824,7 +825,7 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) struct dentry *dentry = NULL; struct path path; int err; - unsigned hash; + unsigned int hash; struct unix_address *addr; struct hlist_head *list; @@ -964,7 +965,7 @@ static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr, struct net *net = sock_net(sk); struct sockaddr_un *sunaddr = (struct sockaddr_un *)addr; struct sock *other; - unsigned hash; + unsigned int hash; int err; if (addr->sa_family != AF_UNSPEC) { @@ -1062,7 +1063,7 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr, struct sock *newsk = NULL; struct sock *other = NULL; struct sk_buff *skb = NULL; - unsigned hash; + unsigned int hash; int st; int err; long timeo; @@ -1437,7 +1438,7 @@ static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock, struct sock *other = NULL; int namelen = 0; /* fake GCC */ int err; - unsigned hash; + unsigned int hash; struct sk_buff *skb; long timeo; struct scm_cookie tmp_scm; diff --git a/net/wimax/stack.c b/net/wimax/stack.c index 3c65eae701c4..a6470ac39498 100644 --- a/net/wimax/stack.c +++ b/net/wimax/stack.c @@ -187,7 +187,7 @@ out: static void __check_new_state(enum wimax_st old_state, enum wimax_st new_state, - unsigned allowed_states_bm) + unsigned int allowed_states_bm) { if (WARN_ON(((1 << new_state) & allowed_states_bm) == 0)) { printk(KERN_ERR "SW BUG! Forbidden state change %u -> %u\n", @@ -425,7 +425,8 @@ static size_t wimax_addr_scnprint(char *addr_str, size_t addr_str_size, unsigned char *addr, size_t addr_len) { - unsigned cnt, total; + unsigned int cnt, total; + for (total = cnt = 0; cnt < addr_len; cnt++) total += scnprintf(addr_str + total, addr_str_size - total, "%02x%c", addr[cnt], diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c index 22adfebaad27..b0eb7aa49b60 100644 --- a/net/wireless/wext-core.c +++ b/net/wireless/wext-core.c @@ -256,7 +256,7 @@ static const struct iw_ioctl_description standard_ioctl[] = { .max_tokens = sizeof(struct iw_pmksa), }, }; -static const unsigned standard_ioctl_num = ARRAY_SIZE(standard_ioctl); +static const unsigned int standard_ioctl_num = ARRAY_SIZE(standard_ioctl); /* * Meta-data about all the additional standard Wireless Extension events @@ -306,7 +306,7 @@ static const struct iw_ioctl_description standard_event[] = { .max_tokens = sizeof(struct iw_pmkid_cand), }, }; -static const unsigned standard_event_num = ARRAY_SIZE(standard_event); +static const unsigned int standard_event_num = ARRAY_SIZE(standard_event); /* Size (in bytes) of various events */ static const int event_type_size[] = { @@ -429,7 +429,7 @@ void wireless_send_event(struct net_device * dev, int hdr_len; /* Size of the event header */ int wrqu_off = 0; /* Offset in wrqu */ /* Don't "optimise" the following variable, it will crash */ - unsigned cmd_index; /* *MUST* be unsigned */ + unsigned int cmd_index; /* *MUST* be unsigned */ struct sk_buff *skb; struct nlmsghdr *nlh; struct nlattr *nla; diff --git a/net/x25/x25_facilities.c b/net/x25/x25_facilities.c index 36384a1fa9f2..66c638730c7a 100644 --- a/net/x25/x25_facilities.c +++ b/net/x25/x25_facilities.c @@ -231,7 +231,7 @@ int x25_create_facilities(unsigned char *buffer, } if (dte_facs->calling_len && (facil_mask & X25_MASK_CALLING_AE)) { - unsigned bytecount = (dte_facs->calling_len + 1) >> 1; + unsigned int bytecount = (dte_facs->calling_len + 1) >> 1; *p++ = X25_FAC_CALLING_AE; *p++ = 1 + bytecount; *p++ = dte_facs->calling_len; @@ -240,7 +240,7 @@ int x25_create_facilities(unsigned char *buffer, } if (dte_facs->called_len && (facil_mask & X25_MASK_CALLED_AE)) { - unsigned bytecount = (dte_facs->called_len % 2) ? + unsigned int bytecount = (dte_facs->called_len % 2) ? dte_facs->called_len / 2 + 1 : dte_facs->called_len / 2; *p++ = X25_FAC_CALLED_AE; diff --git a/net/xfrm/xfrm_hash.h b/net/xfrm/xfrm_hash.h index 7199d78b2aa1..716502ada53b 100644 --- a/net/xfrm/xfrm_hash.h +++ b/net/xfrm/xfrm_hash.h @@ -45,10 +45,10 @@ static inline unsigned int __xfrm_dst_hash(const xfrm_address_t *daddr, return (h ^ (h >> 16)) & hmask; } -static inline unsigned __xfrm_src_hash(const xfrm_address_t *daddr, - const xfrm_address_t *saddr, - unsigned short family, - unsigned int hmask) +static inline unsigned int __xfrm_src_hash(const xfrm_address_t *daddr, + const xfrm_address_t *saddr, + unsigned short family, + unsigned int hmask) { unsigned int h = family; switch (family) { -- cgit v1.2.2 From 748572162a2bc3ce6f0b215e25ad601c3ec33e77 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Fri, 13 Apr 2012 02:37:42 +0000 Subject: bridge: Add br_multicast_start_querier This patch adds the helper br_multicast_start_querier so that the code which starts the queriers in br_multicast_toggle can be reused elsewhere. Signed-off-by: Herbert Xu Signed-off-by: David S. Miller --- net/bridge/br_multicast.c | 25 ++++++++++++++++--------- 1 file changed, 16 insertions(+), 9 deletions(-) (limited to 'net') diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index 020e463f2225..ecabf210d0d2 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c @@ -1689,9 +1689,23 @@ unlock: return err; } -int br_multicast_toggle(struct net_bridge *br, unsigned long val) +static void br_multicast_start_querier(struct net_bridge *br) { struct net_bridge_port *port; + + br_multicast_open(br); + + list_for_each_entry(port, &br->port_list, list) { + if (port->state == BR_STATE_DISABLED || + port->state == BR_STATE_BLOCKING) + continue; + + __br_multicast_enable_port(port); + } +} + +int br_multicast_toggle(struct net_bridge *br, unsigned long val) +{ int err = 0; struct net_bridge_mdb_htable *mdb; @@ -1721,14 +1735,7 @@ rollback: goto rollback; } - br_multicast_open(br); - list_for_each_entry(port, &br->port_list, list) { - if (port->state == BR_STATE_DISABLED || - port->state == BR_STATE_BLOCKING) - continue; - - __br_multicast_enable_port(port); - } + br_multicast_start_querier(br); unlock: spin_unlock_bh(&br->multicast_lock); -- cgit v1.2.2 From c83b8fab06fc8c80d6440649f117bb7541df5fd0 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Fri, 13 Apr 2012 02:37:42 +0000 Subject: bridge: Restart queries when last querier expires As it stands when we discover that a real querier (one that queries with a non-zero source address) we stop querying. However, even after said querier has fallen off the edge of the earth, we will never restart querying (unless the bridge itself is restarted). This patch fixes this by kicking our own querier into gear when the timer for other queriers expire. Signed-off-by: Herbert Xu Signed-off-by: David S. Miller --- net/bridge/br_multicast.c | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) (limited to 'net') diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index ecabf210d0d2..b3647d090e1f 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c @@ -36,6 +36,8 @@ #define mlock_dereference(X, br) \ rcu_dereference_protected(X, lockdep_is_held(&br->multicast_lock)) +static void br_multicast_start_querier(struct net_bridge *br); + #if IS_ENABLED(CONFIG_IPV6) static inline int ipv6_is_transient_multicast(const struct in6_addr *addr) { @@ -740,6 +742,21 @@ static void br_multicast_local_router_expired(unsigned long data) { } +static void br_multicast_querier_expired(unsigned long data) +{ + struct net_bridge_port *port = (void *)data; + struct net_bridge *br = port->br; + + spin_lock(&br->multicast_lock); + if (!netif_running(br->dev) || br->multicast_disabled) + goto out; + + br_multicast_start_querier(br); + +out: + spin_unlock(&br->multicast_lock); +} + static void __br_multicast_send_query(struct net_bridge *br, struct net_bridge_port *port, struct br_ip *ip) @@ -1562,7 +1579,7 @@ void br_multicast_init(struct net_bridge *br) setup_timer(&br->multicast_router_timer, br_multicast_local_router_expired, 0); setup_timer(&br->multicast_querier_timer, - br_multicast_local_router_expired, 0); + br_multicast_querier_expired, 0); setup_timer(&br->multicast_query_timer, br_multicast_query_expired, (unsigned long)br); } -- cgit v1.2.2 From c5c23260594c5701af66ef754916775ba6a46bbc Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Fri, 13 Apr 2012 02:37:42 +0000 Subject: bridge: Add multicast_querier toggle and disable queries by default Sending general queries was implemented as an optimisation to speed up convergence on start-up. In order to prevent interference with multicast routers a zero source address has to be used. Unfortunately these packets appear to cause some multicast-aware switches to misbehave, e.g., by disrupting multicast packets to us. Since the multicast snooping feature still functions without sending our own queries, this patch will change the default to not send queries. For those that need queries in order to speed up convergence on start-up, a toggle is provided to restore the previous behaviour. Signed-off-by: Herbert Xu Signed-off-by: David S. Miller --- net/bridge/br_multicast.c | 20 ++++++++++++++++++++ net/bridge/br_private.h | 2 ++ net/bridge/br_sysfs_br.c | 18 ++++++++++++++++++ 3 files changed, 40 insertions(+) (limited to 'net') diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index b3647d090e1f..708e84f31888 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c @@ -783,6 +783,7 @@ static void br_multicast_send_query(struct net_bridge *br, struct br_ip br_group; if (!netif_running(br->dev) || br->multicast_disabled || + !br->multicast_querier || timer_pending(&br->multicast_querier_timer)) return; @@ -1565,6 +1566,7 @@ void br_multicast_init(struct net_bridge *br) br->hash_max = 512; br->multicast_router = 1; + br->multicast_querier = 0; br->multicast_last_member_count = 2; br->multicast_startup_query_count = 2; @@ -1760,6 +1762,24 @@ unlock: return err; } +int br_multicast_set_querier(struct net_bridge *br, unsigned long val) +{ + val = !!val; + + spin_lock_bh(&br->multicast_lock); + if (br->multicast_querier == val) + goto unlock; + + br->multicast_querier = val; + if (val) + br_multicast_start_querier(br); + +unlock: + spin_unlock_bh(&br->multicast_lock); + + return 0; +} + int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val) { int err = -ENOENT; diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index e1d882257877..f8ffd8c49054 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@ -224,6 +224,7 @@ struct net_bridge unsigned char multicast_router; u8 multicast_disabled:1; + u8 multicast_querier:1; u32 hash_elasticity; u32 hash_max; @@ -417,6 +418,7 @@ extern int br_multicast_set_router(struct net_bridge *br, unsigned long val); extern int br_multicast_set_port_router(struct net_bridge_port *p, unsigned long val); extern int br_multicast_toggle(struct net_bridge *br, unsigned long val); +extern int br_multicast_set_querier(struct net_bridge *br, unsigned long val); extern int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val); static inline bool br_multicast_is_router(struct net_bridge *br) diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c index 766fd7fb0295..c5c059333eab 100644 --- a/net/bridge/br_sysfs_br.c +++ b/net/bridge/br_sysfs_br.c @@ -379,6 +379,23 @@ static ssize_t store_multicast_snooping(struct device *d, static DEVICE_ATTR(multicast_snooping, S_IRUGO | S_IWUSR, show_multicast_snooping, store_multicast_snooping); +static ssize_t show_multicast_querier(struct device *d, + struct device_attribute *attr, + char *buf) +{ + struct net_bridge *br = to_bridge(d); + return sprintf(buf, "%d\n", br->multicast_querier); +} + +static ssize_t store_multicast_querier(struct device *d, + struct device_attribute *attr, + const char *buf, size_t len) +{ + return store_bridge_parm(d, buf, len, br_multicast_set_querier); +} +static DEVICE_ATTR(multicast_querier, S_IRUGO | S_IWUSR, + show_multicast_querier, store_multicast_querier); + static ssize_t show_hash_elasticity(struct device *d, struct device_attribute *attr, char *buf) { @@ -702,6 +719,7 @@ static struct attribute *bridge_attrs[] = { #ifdef CONFIG_BRIDGE_IGMP_SNOOPING &dev_attr_multicast_router.attr, &dev_attr_multicast_snooping.attr, + &dev_attr_multicast_querier.attr, &dev_attr_hash_elasticity.attr, &dev_attr_hash_max.attr, &dev_attr_multicast_last_member_count.attr, -- cgit v1.2.2 From 77162022ab26a1f99d3af30c03760a76f86e193d Mon Sep 17 00:00:00 2001 From: John Fastabend Date: Sun, 15 Apr 2012 06:43:56 +0000 Subject: net: add generic PF_BRIDGE:RTM_ FDB hooks This adds two new flags NTF_MASTER and NTF_SELF that can now be used to specify where PF_BRIDGE netlink commands should be sent. NTF_MASTER sends the commands to the 'dev->master' device for parsing. Typically this will be the linux net/bridge, or open-vswitch devices. Also without any flags set the command will be handled by the master device as well so that current user space tools continue to work as expected. The NTF_SELF flag will push the PF_BRIDGE commands to the device. In the basic example below the commands are then parsed and programmed in the embedded bridge. Note if both NTF_SELF and NTF_MASTER bits are set then the command will be sent to both 'dev->master' and 'dev' this allows user space to easily keep the embedded bridge and software bridge in sync. There is a slight complication in the case with both flags set when an error occurs. To resolve this the rtnl handler clears the NTF_ flag in the netlink ack to indicate which sets completed successfully. The add/del handlers will abort as soon as any error occurs. To support this new net device ops were added to call into the device and the existing bridging code was refactored to use these. There should be no required changes in user space to support the current bridge behavior. A basic setup with a SR-IOV enabled NIC looks like this, veth0 veth2 | | ------------ | bridge0 | <---- software bridging ------------ / / ethx.y ethx VF PF \ \ <---- propagate FDB entries to HW \ \ -------------------- | Embedded Bridge | <---- hardware offloaded switching -------------------- In this case the embedded bridge must be managed to allow 'veth0' to communicate with 'ethx.y' correctly. At present drivers managing the embedded bridge either send frames onto the network which then get dropped by the switch OR the embedded bridge will flood these frames. With this patch we have a mechanism to manage the embedded bridge correctly from user space. This example is specific to SR-IOV but replacing the VF with another PF or dropping this into the DSA framework generates similar management issues. Examples session using the 'br'[1] tool to add, dump and then delete a mac address with a new "embedded" option and enabled ixgbe driver: # br fdb add 22:35:19:ac:60:59 dev eth3 # br fdb port mac addr flags veth0 22:35:19:ac:60:58 static veth0 9a:5f:81:f7:f6:ec local eth3 00:1b:21:55:23:59 local eth3 22:35:19:ac:60:59 static veth0 22:35:19:ac:60:57 static #br fdb add 22:35:19:ac:60:59 embedded dev eth3 #br fdb port mac addr flags veth0 22:35:19:ac:60:58 static veth0 9a:5f:81:f7:f6:ec local eth3 00:1b:21:55:23:59 local eth3 22:35:19:ac:60:59 static veth0 22:35:19:ac:60:57 static eth3 22:35:19:ac:60:59 local embedded #br fdb del 22:35:19:ac:60:59 embedded dev eth3 I added a couple lines to 'br' to set the flags correctly is all. It is my opinion that the merit of this patch is now embedded and SW bridges can both be modeled correctly in user space using very nearly the same message passing. [1] 'br' tool was published as an RFC here and will be renamed 'bridge' http://patchwork.ozlabs.org/patch/117664/ Thanks to Jamal Hadi Salim, Stephen Hemminger and Ben Hutchings for valuable feedback, suggestions, and review. v2: fixed api descriptions and error case with both NTF_SELF and NTF_MASTER set plus updated patch description. Signed-off-by: John Fastabend Signed-off-by: David S. Miller --- net/bridge/br_device.c | 3 + net/bridge/br_fdb.c | 128 ++++++++++------------------------------ net/bridge/br_netlink.c | 12 ---- net/bridge/br_private.h | 15 ++++- net/core/rtnetlink.c | 152 ++++++++++++++++++++++++++++++++++++++++++++++++ 5 files changed, 198 insertions(+), 112 deletions(-) (limited to 'net') diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c index ba829de84423..d6e5929458b1 100644 --- a/net/bridge/br_device.c +++ b/net/bridge/br_device.c @@ -317,6 +317,9 @@ static const struct net_device_ops br_netdev_ops = { .ndo_add_slave = br_add_slave, .ndo_del_slave = br_del_slave, .ndo_fix_features = br_fix_features, + .ndo_fdb_add = br_fdb_add, + .ndo_fdb_del = br_fdb_delete, + .ndo_fdb_dump = br_fdb_dump, }; static void br_dev_free(struct net_device *dev) diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c index 80dbce4974ce..5945c54bc2de 100644 --- a/net/bridge/br_fdb.c +++ b/net/bridge/br_fdb.c @@ -535,44 +535,38 @@ errout: } /* Dump information about entries, in response to GETNEIGH */ -int br_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb) +int br_fdb_dump(struct sk_buff *skb, + struct netlink_callback *cb, + struct net_device *dev, + int idx) { - struct net *net = sock_net(skb->sk); - struct net_device *dev; - int idx = 0; - - rcu_read_lock(); - for_each_netdev_rcu(net, dev) { - struct net_bridge *br = netdev_priv(dev); - int i; - - if (!(dev->priv_flags & IFF_EBRIDGE)) - continue; + struct net_bridge *br = netdev_priv(dev); + int i; - for (i = 0; i < BR_HASH_SIZE; i++) { - struct hlist_node *h; - struct net_bridge_fdb_entry *f; + if (!(dev->priv_flags & IFF_EBRIDGE)) + goto out; - hlist_for_each_entry_rcu(f, h, &br->hash[i], hlist) { - if (idx < cb->args[0]) - goto skip; + for (i = 0; i < BR_HASH_SIZE; i++) { + struct hlist_node *h; + struct net_bridge_fdb_entry *f; - if (fdb_fill_info(skb, br, f, - NETLINK_CB(cb->skb).pid, - cb->nlh->nlmsg_seq, - RTM_NEWNEIGH, - NLM_F_MULTI) < 0) - break; + hlist_for_each_entry_rcu(f, h, &br->hash[i], hlist) { + if (idx < cb->args[0]) + goto skip; + + if (fdb_fill_info(skb, br, f, + NETLINK_CB(cb->skb).pid, + cb->nlh->nlmsg_seq, + RTM_NEWNEIGH, + NLM_F_MULTI) < 0) + break; skip: - ++idx; - } + ++idx; } } - rcu_read_unlock(); - - cb->args[0] = idx; - return skb->len; +out: + return idx; } /* Update (create or replace) forwarding database entry */ @@ -614,43 +608,11 @@ static int fdb_add_entry(struct net_bridge_port *source, const __u8 *addr, } /* Add new permanent fdb entry with RTM_NEWNEIGH */ -int br_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) +int br_fdb_add(struct ndmsg *ndm, struct net_device *dev, + unsigned char *addr, u16 nlh_flags) { - struct net *net = sock_net(skb->sk); - struct ndmsg *ndm; - struct nlattr *tb[NDA_MAX+1]; - struct net_device *dev; struct net_bridge_port *p; - const __u8 *addr; - int err; - - ASSERT_RTNL(); - err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL); - if (err < 0) - return err; - - ndm = nlmsg_data(nlh); - if (ndm->ndm_ifindex == 0) { - pr_info("bridge: RTM_NEWNEIGH with invalid ifindex\n"); - return -EINVAL; - } - - dev = __dev_get_by_index(net, ndm->ndm_ifindex); - if (dev == NULL) { - pr_info("bridge: RTM_NEWNEIGH with unknown ifindex\n"); - return -ENODEV; - } - - if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) { - pr_info("bridge: RTM_NEWNEIGH with invalid address\n"); - return -EINVAL; - } - - addr = nla_data(tb[NDA_LLADDR]); - if (!is_valid_ether_addr(addr)) { - pr_info("bridge: RTM_NEWNEIGH with invalid ether address\n"); - return -EINVAL; - } + int err = 0; if (!(ndm->ndm_state & (NUD_PERMANENT|NUD_NOARP|NUD_REACHABLE))) { pr_info("bridge: RTM_NEWNEIGH with invalid state %#x\n", ndm->ndm_state); @@ -670,14 +632,14 @@ int br_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) rcu_read_unlock(); } else { spin_lock_bh(&p->br->hash_lock); - err = fdb_add_entry(p, addr, ndm->ndm_state, nlh->nlmsg_flags); + err = fdb_add_entry(p, addr, ndm->ndm_state, nlh_flags); spin_unlock_bh(&p->br->hash_lock); } return err; } -static int fdb_delete_by_addr(struct net_bridge_port *p, const u8 *addr) +static int fdb_delete_by_addr(struct net_bridge_port *p, u8 *addr) { struct net_bridge *br = p->br; struct hlist_head *head = &br->hash[br_mac_hash(addr)]; @@ -692,40 +654,12 @@ static int fdb_delete_by_addr(struct net_bridge_port *p, const u8 *addr) } /* Remove neighbor entry with RTM_DELNEIGH */ -int br_fdb_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) +int br_fdb_delete(struct ndmsg *ndm, struct net_device *dev, + unsigned char *addr) { - struct net *net = sock_net(skb->sk); - struct ndmsg *ndm; struct net_bridge_port *p; - struct nlattr *llattr; - const __u8 *addr; - struct net_device *dev; int err; - ASSERT_RTNL(); - if (nlmsg_len(nlh) < sizeof(*ndm)) - return -EINVAL; - - ndm = nlmsg_data(nlh); - if (ndm->ndm_ifindex == 0) { - pr_info("bridge: RTM_DELNEIGH with invalid ifindex\n"); - return -EINVAL; - } - - dev = __dev_get_by_index(net, ndm->ndm_ifindex); - if (dev == NULL) { - pr_info("bridge: RTM_DELNEIGH with unknown ifindex\n"); - return -ENODEV; - } - - llattr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_LLADDR); - if (llattr == NULL || nla_len(llattr) != ETH_ALEN) { - pr_info("bridge: RTM_DELNEIGH with invalid address\n"); - return -EINVAL; - } - - addr = nla_data(llattr); - p = br_port_get_rtnl(dev); if (p == NULL) { pr_info("bridge: RTM_DELNEIGH %s not a bridge port\n", diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c index df38108f6973..2080485515f1 100644 --- a/net/bridge/br_netlink.c +++ b/net/bridge/br_netlink.c @@ -232,18 +232,6 @@ int __init br_netlink_init(void) br_rtm_setlink, NULL, NULL); if (err) goto err3; - err = __rtnl_register(PF_BRIDGE, RTM_NEWNEIGH, - br_fdb_add, NULL, NULL); - if (err) - goto err3; - err = __rtnl_register(PF_BRIDGE, RTM_DELNEIGH, - br_fdb_delete, NULL, NULL); - if (err) - goto err3; - err = __rtnl_register(PF_BRIDGE, RTM_GETNEIGH, - NULL, br_fdb_dump, NULL); - if (err) - goto err3; return 0; diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index f8ffd8c49054..1a8ad4fb9a6b 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@ -360,9 +360,18 @@ extern int br_fdb_insert(struct net_bridge *br, extern void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source, const unsigned char *addr); -extern int br_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb); -extern int br_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg); -extern int br_fdb_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg); + +extern int br_fdb_delete(struct ndmsg *ndm, + struct net_device *dev, + unsigned char *addr); +extern int br_fdb_add(struct ndmsg *nlh, + struct net_device *dev, + unsigned char *addr, + u16 nlh_flags); +extern int br_fdb_dump(struct sk_buff *skb, + struct netlink_callback *cb, + struct net_device *dev, + int idx); /* br_forward.c */ extern void br_deliver(const struct net_bridge_port *to, diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 2ff6fe4bada4..b348b7fbf53a 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -35,7 +35,9 @@ #include #include #include +#include #include +#include #include @@ -1978,6 +1980,152 @@ errout: rtnl_set_sk_err(net, RTNLGRP_LINK, err); } +static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) +{ + struct net *net = sock_net(skb->sk); + struct net_device *master = NULL; + struct ndmsg *ndm; + struct nlattr *tb[NDA_MAX+1]; + struct net_device *dev; + u8 *addr; + int err; + + err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL); + if (err < 0) + return err; + + ndm = nlmsg_data(nlh); + if (ndm->ndm_ifindex == 0) { + pr_info("PF_BRIDGE: RTM_NEWNEIGH with invalid ifindex\n"); + return -EINVAL; + } + + dev = __dev_get_by_index(net, ndm->ndm_ifindex); + if (dev == NULL) { + pr_info("PF_BRIDGE: RTM_NEWNEIGH with unknown ifindex\n"); + return -ENODEV; + } + + if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) { + pr_info("PF_BRIDGE: RTM_NEWNEIGH with invalid address\n"); + return -EINVAL; + } + + addr = nla_data(tb[NDA_LLADDR]); + if (!is_valid_ether_addr(addr)) { + pr_info("PF_BRIDGE: RTM_NEWNEIGH with invalid ether address\n"); + return -EINVAL; + } + + err = -EOPNOTSUPP; + + /* Support fdb on master device the net/bridge default case */ + if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) && + (dev->priv_flags & IFF_BRIDGE_PORT)) { + master = dev->master; + err = master->netdev_ops->ndo_fdb_add(ndm, dev, addr, + nlh->nlmsg_flags); + if (err) + goto out; + else + ndm->ndm_flags &= ~NTF_MASTER; + } + + /* Embedded bridge, macvlan, and any other device support */ + if ((ndm->ndm_flags & NTF_SELF) && dev->netdev_ops->ndo_fdb_add) { + err = dev->netdev_ops->ndo_fdb_add(ndm, dev, addr, + nlh->nlmsg_flags); + + if (!err) + ndm->ndm_flags &= ~NTF_SELF; + } +out: + return err; +} + +static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) +{ + struct net *net = sock_net(skb->sk); + struct ndmsg *ndm; + struct nlattr *llattr; + struct net_device *dev; + int err = -EINVAL; + __u8 *addr; + + if (nlmsg_len(nlh) < sizeof(*ndm)) + return -EINVAL; + + ndm = nlmsg_data(nlh); + if (ndm->ndm_ifindex == 0) { + pr_info("PF_BRIDGE: RTM_DELNEIGH with invalid ifindex\n"); + return -EINVAL; + } + + dev = __dev_get_by_index(net, ndm->ndm_ifindex); + if (dev == NULL) { + pr_info("PF_BRIDGE: RTM_DELNEIGH with unknown ifindex\n"); + return -ENODEV; + } + + llattr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_LLADDR); + if (llattr == NULL || nla_len(llattr) != ETH_ALEN) { + pr_info("PF_BRIGDE: RTM_DELNEIGH with invalid address\n"); + return -EINVAL; + } + + addr = nla_data(llattr); + err = -EOPNOTSUPP; + + /* Support fdb on master device the net/bridge default case */ + if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) && + (dev->priv_flags & IFF_BRIDGE_PORT)) { + struct net_device *master = dev->master; + + if (master->netdev_ops->ndo_fdb_del) + err = master->netdev_ops->ndo_fdb_del(ndm, dev, addr); + + if (err) + goto out; + else + ndm->ndm_flags &= ~NTF_MASTER; + } + + /* Embedded bridge, macvlan, and any other device support */ + if ((ndm->ndm_flags & NTF_SELF) && dev->netdev_ops->ndo_fdb_del) { + err = dev->netdev_ops->ndo_fdb_del(ndm, dev, addr); + + if (!err) + ndm->ndm_flags &= ~NTF_SELF; + } +out: + return err; +} + +static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb) +{ + int idx = 0; + struct net *net = sock_net(skb->sk); + struct net_device *dev; + + rcu_read_lock(); + for_each_netdev_rcu(net, dev) { + if (dev->priv_flags & IFF_BRIDGE_PORT) { + struct net_device *master = dev->master; + const struct net_device_ops *ops = master->netdev_ops; + + if (ops->ndo_fdb_dump) + idx = ops->ndo_fdb_dump(skb, cb, dev, idx); + } + + if (dev->netdev_ops->ndo_fdb_dump) + idx = dev->netdev_ops->ndo_fdb_dump(skb, cb, dev, idx); + } + rcu_read_unlock(); + + cb->args[0] = idx; + return skb->len; +} + /* Protected by RTNL sempahore. */ static struct rtattr **rta_buf; static int rtattr_max; @@ -2150,5 +2298,9 @@ void __init rtnetlink_init(void) rtnl_register(PF_UNSPEC, RTM_GETADDR, NULL, rtnl_dump_all, NULL); rtnl_register(PF_UNSPEC, RTM_GETROUTE, NULL, rtnl_dump_all, NULL); + + rtnl_register(PF_BRIDGE, RTM_NEWNEIGH, rtnl_fdb_add, NULL, NULL); + rtnl_register(PF_BRIDGE, RTM_DELNEIGH, rtnl_fdb_del, NULL, NULL); + rtnl_register(PF_BRIDGE, RTM_GETNEIGH, NULL, rtnl_fdb_dump, NULL); } -- cgit v1.2.2 From 12a94634453c61fd9a11c4702002e3db6d4feb70 Mon Sep 17 00:00:00 2001 From: John Fastabend Date: Sun, 15 Apr 2012 06:44:02 +0000 Subject: net: addr_list: add exclusive dev_uc_add and dev_mc_add This adds a dev_uc_add_excl() and dev_mc_add_excl() calls similar to the original dev_{uc|mc}_add() except it sets the global bit and returns -EEXIST for duplicat entires. This is useful for drivers that support SR-IOV, macvlan devices and any other devices that need to manage the unicast and multicast lists. v2: fix typo UNICAST should be MULTICAST in dev_mc_add_excl() CC: Ben Hutchings Signed-off-by: John Fastabend Signed-off-by: David S. Miller --- net/core/dev_addr_lists.c | 97 +++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 81 insertions(+), 16 deletions(-) (limited to 'net') diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c index 626698f0db8b..c4cc2bc49f06 100644 --- a/net/core/dev_addr_lists.c +++ b/net/core/dev_addr_lists.c @@ -21,12 +21,35 @@ * General list handling functions */ +static int __hw_addr_create_ex(struct netdev_hw_addr_list *list, + unsigned char *addr, int addr_len, + unsigned char addr_type, bool global) +{ + struct netdev_hw_addr *ha; + int alloc_size; + + alloc_size = sizeof(*ha); + if (alloc_size < L1_CACHE_BYTES) + alloc_size = L1_CACHE_BYTES; + ha = kmalloc(alloc_size, GFP_ATOMIC); + if (!ha) + return -ENOMEM; + memcpy(ha->addr, addr, addr_len); + ha->type = addr_type; + ha->refcount = 1; + ha->global_use = global; + ha->synced = false; + list_add_tail_rcu(&ha->list, &list->list); + list->count++; + + return 0; +} + static int __hw_addr_add_ex(struct netdev_hw_addr_list *list, unsigned char *addr, int addr_len, unsigned char addr_type, bool global) { struct netdev_hw_addr *ha; - int alloc_size; if (addr_len > MAX_ADDR_LEN) return -EINVAL; @@ -46,21 +69,7 @@ static int __hw_addr_add_ex(struct netdev_hw_addr_list *list, } } - - alloc_size = sizeof(*ha); - if (alloc_size < L1_CACHE_BYTES) - alloc_size = L1_CACHE_BYTES; - ha = kmalloc(alloc_size, GFP_ATOMIC); - if (!ha) - return -ENOMEM; - memcpy(ha->addr, addr, addr_len); - ha->type = addr_type; - ha->refcount = 1; - ha->global_use = global; - ha->synced = false; - list_add_tail_rcu(&ha->list, &list->list); - list->count++; - return 0; + return __hw_addr_create_ex(list, addr, addr_len, addr_type, global); } static int __hw_addr_add(struct netdev_hw_addr_list *list, unsigned char *addr, @@ -376,6 +385,34 @@ EXPORT_SYMBOL(dev_addr_del_multiple); * Unicast list handling functions */ +/** + * dev_uc_add_excl - Add a global secondary unicast address + * @dev: device + * @addr: address to add + */ +int dev_uc_add_excl(struct net_device *dev, unsigned char *addr) +{ + struct netdev_hw_addr *ha; + int err; + + netif_addr_lock_bh(dev); + list_for_each_entry(ha, &dev->uc.list, list) { + if (!memcmp(ha->addr, addr, dev->addr_len) && + ha->type == NETDEV_HW_ADDR_T_UNICAST) { + err = -EEXIST; + goto out; + } + } + err = __hw_addr_create_ex(&dev->uc, addr, dev->addr_len, + NETDEV_HW_ADDR_T_UNICAST, true); + if (!err) + __dev_set_rx_mode(dev); +out: + netif_addr_unlock_bh(dev); + return err; +} +EXPORT_SYMBOL(dev_uc_add_excl); + /** * dev_uc_add - Add a secondary unicast address * @dev: device @@ -501,6 +538,34 @@ EXPORT_SYMBOL(dev_uc_init); * Multicast list handling functions */ +/** + * dev_mc_add_excl - Add a global secondary multicast address + * @dev: device + * @addr: address to add + */ +int dev_mc_add_excl(struct net_device *dev, unsigned char *addr) +{ + struct netdev_hw_addr *ha; + int err; + + netif_addr_lock_bh(dev); + list_for_each_entry(ha, &dev->mc.list, list) { + if (!memcmp(ha->addr, addr, dev->addr_len) && + ha->type == NETDEV_HW_ADDR_T_MULTICAST) { + err = -EEXIST; + goto out; + } + } + err = __hw_addr_create_ex(&dev->mc, addr, dev->addr_len, + NETDEV_HW_ADDR_T_MULTICAST, true); + if (!err) + __dev_set_rx_mode(dev); +out: + netif_addr_unlock_bh(dev); + return err; +} +EXPORT_SYMBOL(dev_mc_add_excl); + static int __dev_mc_add(struct net_device *dev, unsigned char *addr, bool global) { -- cgit v1.2.2 From d83b060360485454fcd6870340ec01d6f96f2295 Mon Sep 17 00:00:00 2001 From: John Fastabend Date: Sun, 15 Apr 2012 06:44:08 +0000 Subject: net: add fdb generic dump routine This adds a generic dump routine drivers can call. It should be sufficient to handle any bridging model that uses the unicast address list. This should be most SR-IOV enabled NICs. v2: return error on nlmsg_put and use -EMSGSIZE instead of -ENOMEM this is inline other usages Signed-off-by: John Fastabend Signed-off-by: David S. Miller --- net/core/rtnetlink.c | 84 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 84 insertions(+) (limited to 'net') diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index b348b7fbf53a..e8488c7b092c 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -1980,6 +1980,37 @@ errout: rtnl_set_sk_err(net, RTNLGRP_LINK, err); } +static int nlmsg_populate_fdb_fill(struct sk_buff *skb, + struct net_device *dev, + u8 *addr, u32 pid, u32 seq, + int type, unsigned int flags) +{ + struct nlmsghdr *nlh; + struct ndmsg *ndm; + + nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), NLM_F_MULTI); + if (!nlh) + return -EMSGSIZE; + + ndm = nlmsg_data(nlh); + ndm->ndm_family = AF_BRIDGE; + ndm->ndm_pad1 = 0; + ndm->ndm_pad2 = 0; + ndm->ndm_flags = flags; + ndm->ndm_type = 0; + ndm->ndm_ifindex = dev->ifindex; + ndm->ndm_state = NUD_PERMANENT; + + if (nla_put(skb, NDA_LLADDR, ETH_ALEN, addr)) + goto nla_put_failure; + + return nlmsg_end(skb, nlh); + +nla_put_failure: + nlmsg_cancel(skb, nlh); + return -EMSGSIZE; +} + static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) { struct net *net = sock_net(skb->sk); @@ -2101,6 +2132,59 @@ out: return err; } +static int nlmsg_populate_fdb(struct sk_buff *skb, + struct netlink_callback *cb, + struct net_device *dev, + int *idx, + struct netdev_hw_addr_list *list) +{ + struct netdev_hw_addr *ha; + int err; + u32 pid, seq; + + pid = NETLINK_CB(cb->skb).pid; + seq = cb->nlh->nlmsg_seq; + + list_for_each_entry(ha, &list->list, list) { + if (*idx < cb->args[0]) + goto skip; + + err = nlmsg_populate_fdb_fill(skb, dev, ha->addr, + pid, seq, 0, NTF_SELF); + if (err < 0) + return err; +skip: + *idx += 1; + } + return 0; +} + +/** + * ndo_dflt_fdb_dump: default netdevice operation to dump an FDB table. + * @nlh: netlink message header + * @dev: netdevice + * + * Default netdevice operation to dump the existing unicast address list. + * Returns zero on success. + */ +int ndo_dflt_fdb_dump(struct sk_buff *skb, + struct netlink_callback *cb, + struct net_device *dev, + int idx) +{ + int err; + + netif_addr_lock_bh(dev); + err = nlmsg_populate_fdb(skb, cb, dev, &idx, &dev->uc); + if (err) + goto out; + nlmsg_populate_fdb(skb, cb, dev, &idx, &dev->mc); +out: + netif_addr_unlock_bh(dev); + return idx; +} +EXPORT_SYMBOL(ndo_dflt_fdb_dump); + static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb) { int idx = 0; -- cgit v1.2.2 From 3ff661c38c8492a2859e39e0ea1e3b6d30e89bf5 Mon Sep 17 00:00:00 2001 From: John Fastabend Date: Sun, 15 Apr 2012 06:44:14 +0000 Subject: net: rtnetlink notify events for FDB NTF_SELF adds and deletes It is useful to be able to monitor for FDB events in user space. This patch adds support to generate netlink events when a change is made to a device supporting the FDB ops. This brings embedded switches inline with the SW net/bridge which triggers events on FDB updates as well. Signed-off-by: John Fastabend Signed-off-by: David S. Miller --- net/core/rtnetlink.c | 35 +++++++++++++++++++++++++++++++++-- 1 file changed, 33 insertions(+), 2 deletions(-) (limited to 'net') diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index e8488c7b092c..b442d35bbc8b 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -2011,6 +2011,33 @@ nla_put_failure: return -EMSGSIZE; } +static inline size_t rtnl_fdb_nlmsg_size(void) +{ + return NLMSG_ALIGN(sizeof(struct ndmsg)) + nla_total_size(ETH_ALEN); +} + +static void rtnl_fdb_notify(struct net_device *dev, u8 *addr, int type) +{ + struct net *net = dev_net(dev); + struct sk_buff *skb; + int err = -ENOBUFS; + + skb = nlmsg_new(rtnl_fdb_nlmsg_size(), GFP_ATOMIC); + if (!skb) + goto errout; + + err = nlmsg_populate_fdb_fill(skb, dev, addr, 0, 0, type, NTF_SELF); + if (err < 0) { + kfree_skb(skb); + goto errout; + } + + rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC); + return; +errout: + rtnl_set_sk_err(net, RTNLGRP_NEIGH, err); +} + static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) { struct net *net = sock_net(skb->sk); @@ -2067,8 +2094,10 @@ static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) err = dev->netdev_ops->ndo_fdb_add(ndm, dev, addr, nlh->nlmsg_flags); - if (!err) + if (!err) { + rtnl_fdb_notify(dev, addr, RTM_NEWNEIGH); ndm->ndm_flags &= ~NTF_SELF; + } } out: return err; @@ -2125,8 +2154,10 @@ static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) if ((ndm->ndm_flags & NTF_SELF) && dev->netdev_ops->ndo_fdb_del) { err = dev->netdev_ops->ndo_fdb_del(ndm, dev, addr); - if (!err) + if (!err) { + rtnl_fdb_notify(dev, addr, RTM_DELNEIGH); ndm->ndm_flags &= ~NTF_SELF; + } } out: return err; -- cgit v1.2.2 From 8e8b41f9d8c8e63fc92f899ace8da91a490ac573 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Thu, 15 Mar 2012 10:16:16 +0100 Subject: cfg80211: enforce lack of interface combinations My grand plan to allow drivers to gradually move over to advertising virtual interface combinations and only enforce with drivers that do want it enforced doesn't seem to be working out, only Christian ever added the advertising (to carl9170), nobody else did. Begin enforcing combinations in cfg80211 so that users can rely on the information reported about a device. Cc: "Luis R. Rodriguez" Cc: Jouni Malinen Cc: Vasanthakumar Thiagarajan Cc: Senthil Balasubramanian Cc: Kalle Valo Cc: Jiri Slaby Cc: Nick Kossifidis Cc: Bob Copeland Cc: Bing Zhao Cc: Lennert Buytenhek Cc: Ivo van Doorn Cc: Gertjan van Wingerde Cc: Helmut Schaa Cc: Luciano Coelho Signed-off-by: Johannes Berg Signed-off-by: John W. Linville --- net/wireless/core.c | 4 ---- net/wireless/util.c | 10 +++------- 2 files changed, 3 insertions(+), 11 deletions(-) (limited to 'net') diff --git a/net/wireless/core.c b/net/wireless/core.c index 59f4a7e7c092..39f2538a46fc 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c @@ -422,10 +422,6 @@ static int wiphy_verify_combinations(struct wiphy *wiphy) const struct ieee80211_iface_combination *c; int i, j; - /* If we have combinations enforce them */ - if (wiphy->n_iface_combinations) - wiphy->flags |= WIPHY_FLAG_ENFORCE_COMBINATIONS; - for (i = 0; i < wiphy->n_iface_combinations; i++) { u32 cnt = 0; u16 all_iftypes = 0; diff --git a/net/wireless/util.c b/net/wireless/util.c index 1b7a08df933c..ffced852284d 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c @@ -946,13 +946,6 @@ int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev, if (rdev->wiphy.software_iftypes & BIT(iftype)) return 0; - /* - * Drivers will gradually all set this flag, until all - * have it we only enforce for those that set it. - */ - if (!(rdev->wiphy.flags & WIPHY_FLAG_ENFORCE_COMBINATIONS)) - return 0; - memset(num, 0, sizeof(num)); num[iftype] = 1; @@ -972,6 +965,9 @@ int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev, } mutex_unlock(&rdev->devlist_mtx); + if (total == 1) + return 0; + for (i = 0; i < rdev->wiphy.n_iface_combinations; i++) { const struct ieee80211_iface_combination *c; struct ieee80211_iface_limit *limits; -- cgit v1.2.2 From bb3e10fb585f1911fedf5fcc4411dcf8d8d63f54 Mon Sep 17 00:00:00 2001 From: Luciano Coelho Date: Thu, 12 Apr 2012 16:09:49 +0300 Subject: mac80211: check IEEE80211_HW_QUEUE_CONTROL in ieee80211_check_queues() Commit 3a25a8c8 (mac80211: add improved HW queue control) introduced a bug when running in AP mode without the IEEE80211_HW_QUEUE_CONTROL flag set. The ieee80211_check_queues() function always returns -EINVAL, preventing AP mode from starting. To fix this, check whether this flag is set before checking if cab_queue is set properly. Signed-off-by: Luciano Coelho Signed-off-by: John W. Linville --- net/mac80211/iface.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'net') diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index 6e85faed053d..23d1da376eb3 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -163,7 +163,8 @@ static int ieee80211_check_queues(struct ieee80211_sub_if_data *sdata) return -EINVAL; } - if (sdata->vif.type != NL80211_IFTYPE_AP) { + if ((sdata->vif.type != NL80211_IFTYPE_AP) || + !(sdata->local->hw.flags & IEEE80211_HW_QUEUE_CONTROL)) { sdata->vif.cab_queue = IEEE80211_INVAL_HW_QUEUE; return 0; } -- cgit v1.2.2 From a802a6eba13282ddd5718f8db9d476e42e84e2ba Mon Sep 17 00:00:00 2001 From: Javier Cardona Date: Thu, 12 Apr 2012 14:32:22 -0700 Subject: mac80211: Choose a new toffset setpoint if a big tsf jump is detected. Signed-off-by: Javier Cardona Signed-off-by: John W. Linville --- net/mac80211/mesh_sync.c | 26 ++++++++++++++++++++------ 1 file changed, 20 insertions(+), 6 deletions(-) (limited to 'net') diff --git a/net/mac80211/mesh_sync.c b/net/mac80211/mesh_sync.c index f78b0139856f..22a5f1e66996 100644 --- a/net/mac80211/mesh_sync.c +++ b/net/mac80211/mesh_sync.c @@ -22,7 +22,14 @@ /* This is not in the standard. It represents a tolerable tbtt drift below * which we do no TSF adjustment. */ -#define TBTT_MINIMUM_ADJUSTMENT 10 +#define TOFFSET_MINIMUM_ADJUSTMENT 10 + +/* This is not in the standard. It represents the maximum Toffset jump above + * which we'll invalidate the Toffset setpoint and choose a new setpoint. This + * could be, for instance, in case a neighbor is restarted and its TSF counter + * reset. + * */ +#define TOFFSET_MAXIMUM_ADJUSTMENT 30000 /* 30 ms */ struct sync_method { u8 method; @@ -156,15 +163,22 @@ static void mesh_sync_offset_rx_bcn_presp(struct ieee80211_sub_if_data *sdata, if (test_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN)) { s64 t_clockdrift = sta->t_offset_setpoint - sta->t_offset; - - msync_dbg("STA %pM : sta->t_offset=%lld," - " sta->t_offset_setpoint=%lld," - " t_clockdrift=%lld", + msync_dbg("STA %pM : sta->t_offset=%lld, sta->t_offset_setpoint=%lld, t_clockdrift=%lld", sta->sta.addr, (long long) sta->t_offset, (long long) sta->t_offset_setpoint, (long long) t_clockdrift); + + if (t_clockdrift > TOFFSET_MAXIMUM_ADJUSTMENT || + t_clockdrift < -TOFFSET_MAXIMUM_ADJUSTMENT) { + msync_dbg("STA %pM : t_clockdrift=%lld too large, setpoint reset", + sta->sta.addr, + (long long) t_clockdrift); + clear_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN); + goto no_sync; + } + rcu_read_unlock(); spin_lock_bh(&ifmsh->sync_offset_lock); @@ -200,7 +214,7 @@ static void mesh_sync_offset_adjust_tbtt(struct ieee80211_sub_if_data *sdata) spin_lock_bh(&ifmsh->sync_offset_lock); if (ifmsh->sync_offset_clockdrift_max > - TBTT_MINIMUM_ADJUSTMENT) { + TOFFSET_MINIMUM_ADJUSTMENT) { /* Since ajusting the tsf here would * require a possibly blocking call * to the driver tsf setter, we punt -- cgit v1.2.2 From ec14bcd20f5139a3dc42cfc34cdcebcbdc062c00 Mon Sep 17 00:00:00 2001 From: Javier Cardona Date: Thu, 12 Apr 2012 14:32:23 -0700 Subject: mac80211: Take into account TSF adjustment latency in Toffset setpoint When testing mesh synchronization we observed a global TSF slowdown that was dependent on the number of synchronized mesh stations. This seems to be caused by the TSF adjustment (read/write) latency. Adding a small margin to the Toffset setpoint solved the problem. Signed-off-by: Shinichi Hotori Signed-off-by: Yu Niiro Signed-off-by: Javier Cardona Signed-off-by: John W. Linville --- net/mac80211/mesh_sync.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'net') diff --git a/net/mac80211/mesh_sync.c b/net/mac80211/mesh_sync.c index 22a5f1e66996..ff60d6bcc631 100644 --- a/net/mac80211/mesh_sync.c +++ b/net/mac80211/mesh_sync.c @@ -24,11 +24,17 @@ */ #define TOFFSET_MINIMUM_ADJUSTMENT 10 +/* This is not in the standard. It is a margin added to the + * Toffset setpoint to mitigate TSF overcorrection + * introduced by TSF adjustment latency. + */ +#define TOFFSET_SET_MARGIN 20 + /* This is not in the standard. It represents the maximum Toffset jump above * which we'll invalidate the Toffset setpoint and choose a new setpoint. This * could be, for instance, in case a neighbor is restarted and its TSF counter * reset. - * */ + */ #define TOFFSET_MAXIMUM_ADJUSTMENT 30000 /* 30 ms */ struct sync_method { -- cgit v1.2.2 From f9616e0f8828fba6c06d1feff1c26eaf049b1e8a Mon Sep 17 00:00:00 2001 From: Rajkumar Manoharan Date: Fri, 13 Apr 2012 16:38:40 +0530 Subject: cfg80211: increse bss expire time The background scan completion takes more time when the station is having heavy uplink traffic. The scan state machine decides to fall back to home channel on every off-channel visit when there are pending frames in tx queue. bgscan completion took ~30sec on dual band US regulatory card. scan period = (20 active channels * probe timeout) + (12 passive channels * passive probe timeout) + (32 * timeout on home channel) + (32 * flush timeout) Signed-off-by: Rajkumar Manoharan Signed-off-by: John W. Linville --- net/wireless/scan.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/wireless/scan.c b/net/wireless/scan.c index fdbcfe692a36..1442bb68a3f3 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c @@ -18,7 +18,7 @@ #include "nl80211.h" #include "wext-compat.h" -#define IEEE80211_SCAN_RESULT_EXPIRE (15 * HZ) +#define IEEE80211_SCAN_RESULT_EXPIRE (30 * HZ) void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev, bool leak) { -- cgit v1.2.2 From 1dae27f84baa37b76014b348985089d22d90cccc Mon Sep 17 00:00:00 2001 From: Wey-Yi Guy Date: Fri, 13 Apr 2012 12:02:57 -0700 Subject: mac80211: add function retrieve average rssi Add utility function to provide the average rssi per vif Signed-off-by: Wey-Yi Guy Signed-off-by: John W. Linville --- net/mac80211/util.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'net') diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 9d255a2e37ee..89c1e5b9ba94 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -1793,3 +1793,11 @@ int ieee80211_add_ext_srates_ie(struct ieee80211_vif *vif, } return 0; } + +int ieee80211_ave_rssi(struct ieee80211_vif *vif) +{ + struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); + struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + + return ifmgd->ave_beacon_signal; +} -- cgit v1.2.2 From a75afd4770d401186b130b2bbb58ad23d4d01ae3 Mon Sep 17 00:00:00 2001 From: Daniel Baluta Date: Mon, 26 Mar 2012 02:05:50 +0300 Subject: can: fix sparse warning for cgw_list Make cgw_list static to remove the following sparse warning: net/can/gw.c:69:1: warning: symbol 'cgw_list' was not declared. Should it be static? Signed-off-by: Daniel Baluta Acked-by: Oliver Hartkopp Signed-off-by: Marc Kleine-Budde --- net/can/gw.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/can/gw.c b/net/can/gw.c index 3d79b127881e..b41acf25668f 100644 --- a/net/can/gw.c +++ b/net/can/gw.c @@ -66,7 +66,7 @@ MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("Oliver Hartkopp "); MODULE_ALIAS("can-gw"); -HLIST_HEAD(cgw_list); +static HLIST_HEAD(cgw_list); static struct notifier_block notifier; static struct kmem_cache *cgw_cache __read_mostly; -- cgit v1.2.2 From 7f593881089084640bffeb93c9cc49077afcec81 Mon Sep 17 00:00:00 2001 From: majianpeng Date: Mon, 16 Apr 2012 19:33:31 +0000 Subject: net/ipv4:Remove two memleak reports by kmemleak_not_leak. Signed-off-by: majianpeng Acked-by: Eric Dumazet Signed-off-by: David S. Miller --- net/ipv4/route.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/ipv4/route.c b/net/ipv4/route.c index ac4d6b3fa9c9..bcd4744e0e4e 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -3514,6 +3514,6 @@ int __init ip_rt_init(void) */ void __init ip_static_sysctl_init(void) { - register_sysctl_paths(ipv4_path, ipv4_skeleton); + kmemleak_not_leak(register_sysctl_paths(ipv4_path, ipv4_skeleton)); } #endif -- cgit v1.2.2 From 798ec84d45754403571d6387396236e877965c5a Mon Sep 17 00:00:00 2001 From: majianpeng Date: Mon, 16 Apr 2012 19:58:28 +0000 Subject: net/core:Remove memleak reports by kmemleak_not_leak. Signed-off-by: majianpeng Signed-off-by: David S. Miller --- net/core/sysctl_net_core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c index 0c2850874254..cee599190261 100644 --- a/net/core/sysctl_net_core.c +++ b/net/core/sysctl_net_core.c @@ -256,7 +256,7 @@ static __init int sysctl_core_init(void) { static struct ctl_table empty[1]; - register_sysctl_paths(net_core_path, empty); + kmemleak_not_leak(register_sysctl_paths(net_core_path, empty)); register_net_sysctl_rotable(net_core_path, net_core_table); return register_pernet_subsys(&sysctl_core_ops); } -- cgit v1.2.2 From 6d2003fc26e280001273d0b9dcc02b90f4536708 Mon Sep 17 00:00:00 2001 From: Antonio Quartulli Date: Sat, 14 Apr 2012 13:15:27 +0200 Subject: batman-adv: convert the tt_crc to network order Before sending out a TT_Request packet we must convert the tt_crc field value to network order (since it is 16bits long). Reported-by: Al Viro Signed-off-by: Antonio Quartulli --- net/batman-adv/translation-table.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c index e16a3690bdb2..a38d315d3cd6 100644 --- a/net/batman-adv/translation-table.c +++ b/net/batman-adv/translation-table.c @@ -1339,7 +1339,7 @@ static int send_tt_request(struct bat_priv *bat_priv, memcpy(tt_request->dst, dst_orig_node->orig, ETH_ALEN); tt_request->header.ttl = TTL; tt_request->ttvn = ttvn; - tt_request->tt_data = tt_crc; + tt_request->tt_data = htons(tt_crc); tt_request->flags = TT_REQUEST; if (full_table) -- cgit v1.2.2 From e88af9464f8ba6bec5a5213065ce8d98b2f2ac1a Mon Sep 17 00:00:00 2001 From: Antonio Quartulli Date: Wed, 18 Apr 2012 09:47:57 +0200 Subject: batman-adv: remove duplicated line in comment Remove an accidentally added duplicated line in a function comment Signed-off-by: Antonio Quartulli --- net/batman-adv/bridge_loop_avoidance.c | 1 - 1 file changed, 1 deletion(-) (limited to 'net') diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c index 1cf18ac44ba9..c8642b586411 100644 --- a/net/batman-adv/bridge_loop_avoidance.c +++ b/net/batman-adv/bridge_loop_avoidance.c @@ -734,7 +734,6 @@ static int handle_claim(struct bat_priv *bat_priv, } /** - * @bat_priv: the bat priv with all the soft interface information * @bat_priv: the bat priv with all the soft interface information * @hw_src: the Hardware source in the ARP Header * @hw_dst: the Hardware destination in the ARP Header -- cgit v1.2.2 From 8140625e30523da3eb76cdab837ba7aa9509029c Mon Sep 17 00:00:00 2001 From: Marek Lindner Date: Tue, 7 Feb 2012 17:19:58 +0800 Subject: batman-adv: move ogm initialization into the proper function Signed-off-by: Marek Lindner Signed-off-by: Antonio Quartulli --- net/batman-adv/hard-interface.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'net') diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c index 8c4b790b98be..f15200760d65 100644 --- a/net/batman-adv/hard-interface.c +++ b/net/batman-adv/hard-interface.c @@ -332,7 +332,6 @@ int hardif_enable_interface(struct hard_iface *hard_iface, hard_iface->batman_adv_ptype.dev = hard_iface->net_dev; dev_add_pack(&hard_iface->batman_adv_ptype); - atomic_set(&hard_iface->seqno, 1); atomic_set(&hard_iface->frag_seqno, 1); bat_info(hard_iface->soft_iface, "Adding interface: %s\n", hard_iface->net_dev->name); @@ -451,6 +450,13 @@ static struct hard_iface *hardif_add_interface(struct net_device *net_dev) check_known_mac_addr(hard_iface->net_dev); list_add_tail_rcu(&hard_iface->list, &hardif_list); + /** + * This can't be called via a bat_priv callback because + * we have no bat_priv yet. + */ + atomic_set(&hard_iface->seqno, 1); + hard_iface->packet_buff = NULL; + return hard_iface; free_if: -- cgit v1.2.2 From c2aca02235c014de57b03aa725d9b094f7a87cac Mon Sep 17 00:00:00 2001 From: Marek Lindner Date: Tue, 7 Feb 2012 17:20:45 +0800 Subject: batman-adv: refactoring API: find generalized name for bat_ogm_init callback Signed-off-by: Marek Lindner Signed-off-by: Antonio Quartulli --- net/batman-adv/bat_iv_ogm.c | 4 ++-- net/batman-adv/hard-interface.c | 2 +- net/batman-adv/main.c | 2 +- net/batman-adv/types.h | 4 ++-- 4 files changed, 6 insertions(+), 6 deletions(-) (limited to 'net') diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c index fab1071f601e..117b8314e8c7 100644 --- a/net/batman-adv/bat_iv_ogm.c +++ b/net/batman-adv/bat_iv_ogm.c @@ -30,7 +30,7 @@ #include "send.h" #include "bat_algo.h" -static void bat_iv_ogm_init(struct hard_iface *hard_iface) +static void bat_iv_ogm_iface_enable(struct hard_iface *hard_iface) { struct batman_ogm_packet *batman_ogm_packet; @@ -1169,7 +1169,7 @@ static void bat_iv_ogm_receive(struct hard_iface *if_incoming, static struct bat_algo_ops batman_iv __read_mostly = { .name = "BATMAN IV", - .bat_ogm_init = bat_iv_ogm_init, + .bat_iface_enable = bat_iv_ogm_iface_enable, .bat_ogm_init_primary = bat_iv_ogm_init_primary, .bat_ogm_update_mac = bat_iv_ogm_update_mac, .bat_ogm_schedule = bat_iv_ogm_schedule, diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c index f15200760d65..4d9b85ddd93a 100644 --- a/net/batman-adv/hard-interface.c +++ b/net/batman-adv/hard-interface.c @@ -312,7 +312,7 @@ int hardif_enable_interface(struct hard_iface *hard_iface, hard_iface->soft_iface = soft_iface; bat_priv = netdev_priv(hard_iface->soft_iface); - bat_priv->bat_algo_ops->bat_ogm_init(hard_iface); + bat_priv->bat_algo_ops->bat_iface_enable(hard_iface); if (!hard_iface->packet_buff) { bat_err(hard_iface->soft_iface, diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c index e67ca96285b3..ca8f39547105 100644 --- a/net/batman-adv/main.c +++ b/net/batman-adv/main.c @@ -208,7 +208,7 @@ int bat_algo_register(struct bat_algo_ops *bat_algo_ops) } /* all algorithms must implement all ops (for now) */ - if (!bat_algo_ops->bat_ogm_init || + if (!bat_algo_ops->bat_iface_enable || !bat_algo_ops->bat_ogm_init_primary || !bat_algo_ops->bat_ogm_update_mac || !bat_algo_ops->bat_ogm_schedule || diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h index a5b1a6333def..4b9224829a6c 100644 --- a/net/batman-adv/types.h +++ b/net/batman-adv/types.h @@ -377,8 +377,8 @@ struct recvlist_node { struct bat_algo_ops { struct hlist_node list; char *name; - /* init OGM when hard-interface is enabled */ - void (*bat_ogm_init)(struct hard_iface *hard_iface); + /* init routing info when hard-interface is enabled */ + void (*bat_iface_enable)(struct hard_iface *hard_iface); /* init primary OGM when primary interface is selected */ void (*bat_ogm_init_primary)(struct hard_iface *hard_iface); /* init mac addresses of the OGM belonging to this hard-interface */ -- cgit v1.2.2 From d7d32ec0f199cc00a43434cdd920338763fab2e0 Mon Sep 17 00:00:00 2001 From: Marek Lindner Date: Tue, 7 Feb 2012 17:20:46 +0800 Subject: batman-adv: randomize initial seqno to avoid collision Signed-off-by: Marek Lindner Signed-off-by: Antonio Quartulli --- net/batman-adv/bat_iv_ogm.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'net') diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c index 117b8314e8c7..95bfc5962e1a 100644 --- a/net/batman-adv/bat_iv_ogm.c +++ b/net/batman-adv/bat_iv_ogm.c @@ -33,6 +33,11 @@ static void bat_iv_ogm_iface_enable(struct hard_iface *hard_iface) { struct batman_ogm_packet *batman_ogm_packet; + uint32_t random_seqno; + + /* randomize initial seqno to avoid collision */ + get_random_bytes(&random_seqno, sizeof(random_seqno)); + atomic_set(&hard_iface->seqno, random_seqno); hard_iface->packet_len = BATMAN_OGM_LEN; hard_iface->packet_buff = kmalloc(hard_iface->packet_len, GFP_ATOMIC); -- cgit v1.2.2 From 00a50076a3afa4014cdd57e87e31a00ce4c4b67e Mon Sep 17 00:00:00 2001 From: Marek Lindner Date: Tue, 7 Feb 2012 17:20:47 +0800 Subject: batman-adv: add iface_disable() callback to routing API Signed-off-by: Marek Lindner Signed-off-by: Antonio Quartulli --- net/batman-adv/bat_iv_ogm.c | 7 +++++++ net/batman-adv/hard-interface.c | 3 +-- net/batman-adv/main.c | 1 + net/batman-adv/types.h | 2 ++ 4 files changed, 11 insertions(+), 2 deletions(-) (limited to 'net') diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c index 95bfc5962e1a..4cc66db699ed 100644 --- a/net/batman-adv/bat_iv_ogm.c +++ b/net/batman-adv/bat_iv_ogm.c @@ -52,6 +52,12 @@ static void bat_iv_ogm_iface_enable(struct hard_iface *hard_iface) batman_ogm_packet->ttvn = 0; } +static void bat_iv_ogm_iface_disable(struct hard_iface *hard_iface) +{ + kfree(hard_iface->packet_buff); + hard_iface->packet_buff = NULL; +} + static void bat_iv_ogm_init_primary(struct hard_iface *hard_iface) { struct batman_ogm_packet *batman_ogm_packet; @@ -1175,6 +1181,7 @@ static void bat_iv_ogm_receive(struct hard_iface *if_incoming, static struct bat_algo_ops batman_iv __read_mostly = { .name = "BATMAN IV", .bat_iface_enable = bat_iv_ogm_iface_enable, + .bat_iface_disable = bat_iv_ogm_iface_disable, .bat_ogm_init_primary = bat_iv_ogm_init_primary, .bat_ogm_update_mac = bat_iv_ogm_update_mac, .bat_ogm_schedule = bat_iv_ogm_schedule, diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c index 4d9b85ddd93a..fd9715ec6dbd 100644 --- a/net/batman-adv/hard-interface.c +++ b/net/batman-adv/hard-interface.c @@ -397,8 +397,7 @@ void hardif_disable_interface(struct hard_iface *hard_iface) hardif_free_ref(new_if); } - kfree(hard_iface->packet_buff); - hard_iface->packet_buff = NULL; + bat_priv->bat_algo_ops->bat_iface_disable(hard_iface); hard_iface->if_status = IF_NOT_IN_USE; /* delete all references to this hard_iface */ diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c index ca8f39547105..a47a6ced1cb7 100644 --- a/net/batman-adv/main.c +++ b/net/batman-adv/main.c @@ -209,6 +209,7 @@ int bat_algo_register(struct bat_algo_ops *bat_algo_ops) /* all algorithms must implement all ops (for now) */ if (!bat_algo_ops->bat_iface_enable || + !bat_algo_ops->bat_iface_disable || !bat_algo_ops->bat_ogm_init_primary || !bat_algo_ops->bat_ogm_update_mac || !bat_algo_ops->bat_ogm_schedule || diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h index 4b9224829a6c..b034cf23bc9a 100644 --- a/net/batman-adv/types.h +++ b/net/batman-adv/types.h @@ -379,6 +379,8 @@ struct bat_algo_ops { char *name; /* init routing info when hard-interface is enabled */ void (*bat_iface_enable)(struct hard_iface *hard_iface); + /* de-init routing info when hard-interface is disabled */ + void (*bat_iface_disable)(struct hard_iface *hard_iface); /* init primary OGM when primary interface is selected */ void (*bat_ogm_init_primary)(struct hard_iface *hard_iface); /* init mac addresses of the OGM belonging to this hard-interface */ -- cgit v1.2.2 From 77af7575c4b11ce7d27b4cb372abd358b2dcd850 Mon Sep 17 00:00:00 2001 From: Marek Lindner Date: Tue, 7 Feb 2012 17:20:48 +0800 Subject: batman-adv: handle routing code initialization properly Signed-off-by: Marek Lindner Signed-off-by: Antonio Quartulli --- net/batman-adv/bat_iv_ogm.c | 11 ++++++++++- net/batman-adv/hard-interface.c | 15 ++++++--------- net/batman-adv/types.h | 2 +- 3 files changed, 17 insertions(+), 11 deletions(-) (limited to 'net') diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c index 4cc66db699ed..271467033a1c 100644 --- a/net/batman-adv/bat_iv_ogm.c +++ b/net/batman-adv/bat_iv_ogm.c @@ -30,10 +30,11 @@ #include "send.h" #include "bat_algo.h" -static void bat_iv_ogm_iface_enable(struct hard_iface *hard_iface) +static int bat_iv_ogm_iface_enable(struct hard_iface *hard_iface) { struct batman_ogm_packet *batman_ogm_packet; uint32_t random_seqno; + int res = -1; /* randomize initial seqno to avoid collision */ get_random_bytes(&random_seqno, sizeof(random_seqno)); @@ -42,6 +43,9 @@ static void bat_iv_ogm_iface_enable(struct hard_iface *hard_iface) hard_iface->packet_len = BATMAN_OGM_LEN; hard_iface->packet_buff = kmalloc(hard_iface->packet_len, GFP_ATOMIC); + if (!hard_iface->packet_buff) + goto out; + batman_ogm_packet = (struct batman_ogm_packet *)hard_iface->packet_buff; batman_ogm_packet->header.packet_type = BAT_OGM; batman_ogm_packet->header.version = COMPAT_VERSION; @@ -50,6 +54,11 @@ static void bat_iv_ogm_iface_enable(struct hard_iface *hard_iface) batman_ogm_packet->tq = TQ_MAX_VALUE; batman_ogm_packet->tt_num_changes = 0; batman_ogm_packet->ttvn = 0; + + res = 0; + +out: + return res; } static void bat_iv_ogm_iface_disable(struct hard_iface *hard_iface) diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c index fd9715ec6dbd..3b391fd3b5b2 100644 --- a/net/batman-adv/hard-interface.c +++ b/net/batman-adv/hard-interface.c @@ -304,22 +304,17 @@ int hardif_enable_interface(struct hard_iface *hard_iface, if (!softif_is_valid(soft_iface)) { pr_err("Can't create batman mesh interface %s: already exists as regular interface\n", soft_iface->name); - dev_put(soft_iface); ret = -EINVAL; - goto err; + goto err_dev; } hard_iface->soft_iface = soft_iface; bat_priv = netdev_priv(hard_iface->soft_iface); - bat_priv->bat_algo_ops->bat_iface_enable(hard_iface); - - if (!hard_iface->packet_buff) { - bat_err(hard_iface->soft_iface, - "Can't add interface packet (%s): out of memory\n", - hard_iface->net_dev->name); + ret = bat_priv->bat_algo_ops->bat_iface_enable(hard_iface); + if (ret < 0) { ret = -ENOMEM; - goto err; + goto err_dev; } hard_iface->if_num = bat_priv->num_ifaces; @@ -363,6 +358,8 @@ int hardif_enable_interface(struct hard_iface *hard_iface, out: return 0; +err_dev: + dev_put(soft_iface); err: hardif_free_ref(hard_iface); return ret; diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h index b034cf23bc9a..dd78023800bd 100644 --- a/net/batman-adv/types.h +++ b/net/batman-adv/types.h @@ -378,7 +378,7 @@ struct bat_algo_ops { struct hlist_node list; char *name; /* init routing info when hard-interface is enabled */ - void (*bat_iface_enable)(struct hard_iface *hard_iface); + int (*bat_iface_enable)(struct hard_iface *hard_iface); /* de-init routing info when hard-interface is disabled */ void (*bat_iface_disable)(struct hard_iface *hard_iface); /* init primary OGM when primary interface is selected */ -- cgit v1.2.2 From cd8b78e7e9d1d9625634dc1ec2bb4b5a14d1295a Mon Sep 17 00:00:00 2001 From: Marek Lindner Date: Tue, 7 Feb 2012 17:20:49 +0800 Subject: batman-adv: refactoring API: find generalized name for bat_ogm_init_primary callback Signed-off-by: Marek Lindner Signed-off-by: Antonio Quartulli --- net/batman-adv/bat_iv_ogm.c | 4 ++-- net/batman-adv/hard-interface.c | 2 +- net/batman-adv/main.c | 2 +- net/batman-adv/types.h | 4 ++-- 4 files changed, 6 insertions(+), 6 deletions(-) (limited to 'net') diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c index 271467033a1c..c0bdb90cc024 100644 --- a/net/batman-adv/bat_iv_ogm.c +++ b/net/batman-adv/bat_iv_ogm.c @@ -67,7 +67,7 @@ static void bat_iv_ogm_iface_disable(struct hard_iface *hard_iface) hard_iface->packet_buff = NULL; } -static void bat_iv_ogm_init_primary(struct hard_iface *hard_iface) +static void bat_iv_ogm_primary_iface_set(struct hard_iface *hard_iface) { struct batman_ogm_packet *batman_ogm_packet; @@ -1191,7 +1191,7 @@ static struct bat_algo_ops batman_iv __read_mostly = { .name = "BATMAN IV", .bat_iface_enable = bat_iv_ogm_iface_enable, .bat_iface_disable = bat_iv_ogm_iface_disable, - .bat_ogm_init_primary = bat_iv_ogm_init_primary, + .bat_primary_iface_set = bat_iv_ogm_primary_iface_set, .bat_ogm_update_mac = bat_iv_ogm_update_mac, .bat_ogm_schedule = bat_iv_ogm_schedule, .bat_ogm_emit = bat_iv_ogm_emit, diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c index 3b391fd3b5b2..75a555b8587e 100644 --- a/net/batman-adv/hard-interface.c +++ b/net/batman-adv/hard-interface.c @@ -146,7 +146,7 @@ static void primary_if_select(struct bat_priv *bat_priv, if (!new_hard_iface) goto out; - bat_priv->bat_algo_ops->bat_ogm_init_primary(new_hard_iface); + bat_priv->bat_algo_ops->bat_primary_iface_set(new_hard_iface); primary_if_update_addr(bat_priv, curr_hard_iface); out: diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c index a47a6ced1cb7..791327219531 100644 --- a/net/batman-adv/main.c +++ b/net/batman-adv/main.c @@ -210,7 +210,7 @@ int bat_algo_register(struct bat_algo_ops *bat_algo_ops) /* all algorithms must implement all ops (for now) */ if (!bat_algo_ops->bat_iface_enable || !bat_algo_ops->bat_iface_disable || - !bat_algo_ops->bat_ogm_init_primary || + !bat_algo_ops->bat_primary_iface_set || !bat_algo_ops->bat_ogm_update_mac || !bat_algo_ops->bat_ogm_schedule || !bat_algo_ops->bat_ogm_emit || diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h index dd78023800bd..4d93aad899f7 100644 --- a/net/batman-adv/types.h +++ b/net/batman-adv/types.h @@ -381,8 +381,8 @@ struct bat_algo_ops { int (*bat_iface_enable)(struct hard_iface *hard_iface); /* de-init routing info when hard-interface is disabled */ void (*bat_iface_disable)(struct hard_iface *hard_iface); - /* init primary OGM when primary interface is selected */ - void (*bat_ogm_init_primary)(struct hard_iface *hard_iface); + /* called when primary interface is selected / changed */ + void (*bat_primary_iface_set)(struct hard_iface *hard_iface); /* init mac addresses of the OGM belonging to this hard-interface */ void (*bat_ogm_update_mac)(struct hard_iface *hard_iface); /* prepare a new outgoing OGM for the send queue */ -- cgit v1.2.2 From 76e3d7fc1a49ea8c377ddc91a4ec40f326404833 Mon Sep 17 00:00:00 2001 From: Marek Lindner Date: Tue, 7 Feb 2012 17:20:50 +0800 Subject: batman-adv: rename BATMAN_OGM_LEN to BATMAN_OGM_HLEN Using BATMAN_OGM_LEN leaves one with the impression that this is the full packet size which is not the case. Therefore the variable is renamed. Signed-off-by: Marek Lindner Signed-off-by: Antonio Quartulli --- net/batman-adv/bat_iv_ogm.c | 12 ++++++------ net/batman-adv/packet.h | 2 +- net/batman-adv/routing.c | 2 +- net/batman-adv/send.c | 12 ++++++------ 4 files changed, 14 insertions(+), 14 deletions(-) (limited to 'net') diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c index c0bdb90cc024..a2af2896769f 100644 --- a/net/batman-adv/bat_iv_ogm.c +++ b/net/batman-adv/bat_iv_ogm.c @@ -40,7 +40,7 @@ static int bat_iv_ogm_iface_enable(struct hard_iface *hard_iface) get_random_bytes(&random_seqno, sizeof(random_seqno)); atomic_set(&hard_iface->seqno, random_seqno); - hard_iface->packet_len = BATMAN_OGM_LEN; + hard_iface->packet_len = BATMAN_OGM_HLEN; hard_iface->packet_buff = kmalloc(hard_iface->packet_len, GFP_ATOMIC); if (!hard_iface->packet_buff) @@ -112,7 +112,7 @@ static uint8_t hop_penalty(uint8_t tq, const struct bat_priv *bat_priv) static int bat_iv_ogm_aggr_packet(int buff_pos, int packet_len, int tt_num_changes) { - int next_buff_pos = buff_pos + BATMAN_OGM_LEN + tt_len(tt_num_changes); + int next_buff_pos = buff_pos + BATMAN_OGM_HLEN + tt_len(tt_num_changes); return (next_buff_pos <= packet_len) && (next_buff_pos <= MAX_AGGREGATION_BYTES); @@ -162,7 +162,7 @@ static void bat_iv_ogm_send_to_if(struct forw_packet *forw_packet, batman_ogm_packet->ttvn, hard_iface->net_dev->name, hard_iface->net_dev->dev_addr); - buff_pos += BATMAN_OGM_LEN + + buff_pos += BATMAN_OGM_HLEN + tt_len(batman_ogm_packet->tt_num_changes); packet_num++; batman_ogm_packet = (struct batman_ogm_packet *) @@ -540,7 +540,7 @@ static void bat_iv_ogm_forward(struct orig_node *orig_node, batman_ogm_packet->flags &= ~DIRECTLINK; bat_iv_ogm_queue_add(bat_priv, (unsigned char *)batman_ogm_packet, - BATMAN_OGM_LEN + tt_len(tt_num_changes), + BATMAN_OGM_HLEN + tt_len(tt_num_changes), if_incoming, 0, bat_iv_ogm_fwd_send_time()); } @@ -1173,12 +1173,12 @@ static void bat_iv_ogm_receive(struct hard_iface *if_incoming, batman_ogm_packet->seqno = ntohl(batman_ogm_packet->seqno); batman_ogm_packet->tt_crc = ntohs(batman_ogm_packet->tt_crc); - tt_buff = packet_buff + buff_pos + BATMAN_OGM_LEN; + tt_buff = packet_buff + buff_pos + BATMAN_OGM_HLEN; bat_iv_ogm_process(ethhdr, batman_ogm_packet, tt_buff, if_incoming); - buff_pos += BATMAN_OGM_LEN + + buff_pos += BATMAN_OGM_HLEN + tt_len(batman_ogm_packet->tt_num_changes); batman_ogm_packet = (struct batman_ogm_packet *) diff --git a/net/batman-adv/packet.h b/net/batman-adv/packet.h index 59800e82371a..59dec0a1979b 100644 --- a/net/batman-adv/packet.h +++ b/net/batman-adv/packet.h @@ -126,7 +126,7 @@ struct batman_ogm_packet { uint16_t tt_crc; } __packed; -#define BATMAN_OGM_LEN sizeof(struct batman_ogm_packet) +#define BATMAN_OGM_HLEN sizeof(struct batman_ogm_packet) struct icmp_packet { struct batman_header header; diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c index 78eddc9067e6..ac13a6a7409d 100644 --- a/net/batman-adv/routing.c +++ b/net/batman-adv/routing.c @@ -254,7 +254,7 @@ int recv_bat_ogm_packet(struct sk_buff *skb, struct hard_iface *hard_iface) struct ethhdr *ethhdr; /* drop packet if it has not necessary minimum size */ - if (unlikely(!pskb_may_pull(skb, BATMAN_OGM_LEN))) + if (unlikely(!pskb_may_pull(skb, BATMAN_OGM_HLEN))) return NET_RX_DROP; ethhdr = (struct ethhdr *)skb_mac_header(skb); diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c index af7a6741a685..b5f078cacc09 100644 --- a/net/batman-adv/send.c +++ b/net/batman-adv/send.c @@ -87,7 +87,7 @@ static void realloc_packet_buffer(struct hard_iface *hard_iface, /* keep old buffer if kmalloc should fail */ if (new_buff) { memcpy(new_buff, hard_iface->packet_buff, - BATMAN_OGM_LEN); + BATMAN_OGM_HLEN); kfree(hard_iface->packet_buff); hard_iface->packet_buff = new_buff; @@ -101,13 +101,13 @@ static int prepare_packet_buffer(struct bat_priv *bat_priv, { int new_len; - new_len = BATMAN_OGM_LEN + + new_len = BATMAN_OGM_HLEN + tt_len((uint8_t)atomic_read(&bat_priv->tt_local_changes)); /* if we have too many changes for one packet don't send any * and wait for the tt table request which will be fragmented */ if (new_len > hard_iface->soft_iface->mtu) - new_len = BATMAN_OGM_LEN; + new_len = BATMAN_OGM_HLEN; realloc_packet_buffer(hard_iface, new_len); @@ -117,14 +117,14 @@ static int prepare_packet_buffer(struct bat_priv *bat_priv, atomic_set(&bat_priv->tt_ogm_append_cnt, TT_OGM_APPEND_MAX); return tt_changes_fill_buffer(bat_priv, - hard_iface->packet_buff + BATMAN_OGM_LEN, - hard_iface->packet_len - BATMAN_OGM_LEN); + hard_iface->packet_buff + BATMAN_OGM_HLEN, + hard_iface->packet_len - BATMAN_OGM_HLEN); } static int reset_packet_buffer(struct bat_priv *bat_priv, struct hard_iface *hard_iface) { - realloc_packet_buffer(hard_iface, BATMAN_OGM_LEN); + realloc_packet_buffer(hard_iface, BATMAN_OGM_HLEN); return 0; } -- cgit v1.2.2 From 1eeb479fda2405269b3a85c86ba0eca41fcc4ea0 Mon Sep 17 00:00:00 2001 From: Marek Lindner Date: Tue, 7 Feb 2012 17:20:51 +0800 Subject: batman-adv: mark existing ogm variables as batman iv The coming protocol changes also will have a part called "OGM". That makes it necessary to introduce a distinction in the code base. Signed-off-by: Marek Lindner Signed-off-by: Antonio Quartulli --- net/batman-adv/bat_iv_ogm.c | 4 ++-- net/batman-adv/hard-interface.c | 2 +- net/batman-adv/packet.h | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) (limited to 'net') diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c index a2af2896769f..98f71827c7b3 100644 --- a/net/batman-adv/bat_iv_ogm.c +++ b/net/batman-adv/bat_iv_ogm.c @@ -47,7 +47,7 @@ static int bat_iv_ogm_iface_enable(struct hard_iface *hard_iface) goto out; batman_ogm_packet = (struct batman_ogm_packet *)hard_iface->packet_buff; - batman_ogm_packet->header.packet_type = BAT_OGM; + batman_ogm_packet->header.packet_type = BAT_IV_OGM; batman_ogm_packet->header.version = COMPAT_VERSION; batman_ogm_packet->header.ttl = 2; batman_ogm_packet->flags = NO_FLAGS; @@ -934,7 +934,7 @@ static void bat_iv_ogm_process(const struct ethhdr *ethhdr, * packet in an aggregation. Here we expect that the padding * is always zero (or not 0x01) */ - if (batman_ogm_packet->header.packet_type != BAT_OGM) + if (batman_ogm_packet->header.packet_type != BAT_IV_OGM) return; /* could be changed by schedule_own_packet() */ diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c index 75a555b8587e..e8c5da379a80 100644 --- a/net/batman-adv/hard-interface.c +++ b/net/batman-adv/hard-interface.c @@ -604,7 +604,7 @@ static int batman_skb_recv(struct sk_buff *skb, struct net_device *dev, switch (batman_ogm_packet->header.packet_type) { /* batman originator packet */ - case BAT_OGM: + case BAT_IV_OGM: ret = recv_bat_ogm_packet(skb, hard_iface); break; diff --git a/net/batman-adv/packet.h b/net/batman-adv/packet.h index 59dec0a1979b..f54969c61a1e 100644 --- a/net/batman-adv/packet.h +++ b/net/batman-adv/packet.h @@ -25,7 +25,7 @@ #define ETH_P_BATMAN 0x4305 /* unofficial/not registered Ethertype */ enum bat_packettype { - BAT_OGM = 0x01, + BAT_IV_OGM = 0x01, BAT_ICMP = 0x02, BAT_UNICAST = 0x03, BAT_BCAST = 0x04, @@ -38,7 +38,7 @@ enum bat_packettype { /* this file is included by batctl which needs these defines */ #define COMPAT_VERSION 14 -enum batman_flags { +enum batman_iv_flags { PRIMARIES_FIRST_HOP = 1 << 4, VIS_SERVER = 1 << 5, DIRECTLINK = 1 << 6 -- cgit v1.2.2 From 0d125074ebc8c971e939f8c2c8f90a80fa09aeb4 Mon Sep 17 00:00:00 2001 From: Antonio Quartulli Date: Sat, 18 Feb 2012 11:27:34 +0100 Subject: batman-adv: use ETH_HLEN instead of sizeof(struct ethhdr) Instead of using sizeof(struct ethhdr) it is strongly recommended to use the kernel macro ETH_HLEN. This patch substitute each occurrence of the former expressione with the latter one. Signed-off-by: Antonio Quartulli --- net/batman-adv/bat_iv_ogm.c | 7 +++---- net/batman-adv/bridge_loop_avoidance.c | 10 ++++------ net/batman-adv/hard-interface.c | 3 +-- net/batman-adv/icmp_socket.c | 4 ++-- net/batman-adv/routing.c | 8 ++++---- net/batman-adv/send.c | 2 +- net/batman-adv/soft-interface.c | 2 +- net/batman-adv/types.h | 2 +- net/batman-adv/vis.c | 8 ++++---- 9 files changed, 21 insertions(+), 25 deletions(-) (limited to 'net') diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c index 98f71827c7b3..d7fa8afd7ff8 100644 --- a/net/batman-adv/bat_iv_ogm.c +++ b/net/batman-adv/bat_iv_ogm.c @@ -355,10 +355,9 @@ static void bat_iv_ogm_aggregate_new(const unsigned char *packet_buff, if ((atomic_read(&bat_priv->aggregated_ogms)) && (packet_len < MAX_AGGREGATION_BYTES)) forw_packet_aggr->skb = dev_alloc_skb(MAX_AGGREGATION_BYTES + - sizeof(struct ethhdr)); + ETH_HLEN); else - forw_packet_aggr->skb = dev_alloc_skb(packet_len + - sizeof(struct ethhdr)); + forw_packet_aggr->skb = dev_alloc_skb(packet_len + ETH_HLEN); if (!forw_packet_aggr->skb) { if (!own_packet) @@ -366,7 +365,7 @@ static void bat_iv_ogm_aggregate_new(const unsigned char *packet_buff, kfree(forw_packet_aggr); goto out; } - skb_reserve(forw_packet_aggr->skb, sizeof(struct ethhdr)); + skb_reserve(forw_packet_aggr->skb, ETH_HLEN); INIT_HLIST_NODE(&forw_packet_aggr->list); diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c index c8642b586411..ad394c6496cc 100644 --- a/net/batman-adv/bridge_loop_avoidance.c +++ b/net/batman-adv/bridge_loop_avoidance.c @@ -290,9 +290,7 @@ static void bla_send_claim(struct bat_priv *bat_priv, uint8_t *mac, goto out; ethhdr = (struct ethhdr *)skb->data; - hw_src = (uint8_t *)ethhdr + - sizeof(struct ethhdr) + - sizeof(struct arphdr); + hw_src = (uint8_t *)ethhdr + ETH_HLEN + sizeof(struct arphdr); /* now we pretend that the client would have sent this ... */ switch (claimtype) { @@ -340,7 +338,7 @@ static void bla_send_claim(struct bat_priv *bat_priv, uint8_t *mac, skb_reset_mac_header(skb); skb->protocol = eth_type_trans(skb, soft_iface); bat_priv->stats.rx_packets++; - bat_priv->stats.rx_bytes += skb->len + sizeof(struct ethhdr); + bat_priv->stats.rx_bytes += skb->len + ETH_HLEN; soft_iface->last_rx = jiffies; netif_rx(skb); @@ -844,7 +842,7 @@ static int bla_process_claim(struct bat_priv *bat_priv, headlen = sizeof(*vhdr); } else { proto = ntohs(ethhdr->h_proto); - headlen = sizeof(*ethhdr); + headlen = ETH_HLEN; } if (proto != ETH_P_ARP) @@ -1302,7 +1300,7 @@ int bla_is_backbone_gw(struct sk_buff *skb, return 0; /* first, find out the vid. */ - if (!pskb_may_pull(skb, hdr_size + sizeof(struct ethhdr))) + if (!pskb_may_pull(skb, hdr_size + ETH_HLEN)) return 0; ethhdr = (struct ethhdr *)(((uint8_t *)skb->data) + hdr_size); diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c index e8c5da379a80..47c79d724ba3 100644 --- a/net/batman-adv/hard-interface.c +++ b/net/batman-adv/hard-interface.c @@ -574,8 +574,7 @@ static int batman_skb_recv(struct sk_buff *skb, struct net_device *dev, goto err_free; /* expect a valid ethernet header here. */ - if (unlikely(skb->mac_len != sizeof(struct ethhdr) || - !skb_mac_header(skb))) + if (unlikely(skb->mac_len != ETH_HLEN || !skb_mac_header(skb))) goto err_free; if (!hard_iface->soft_iface) diff --git a/net/batman-adv/icmp_socket.c b/net/batman-adv/icmp_socket.c index b87518edcef9..2e98a57f3407 100644 --- a/net/batman-adv/icmp_socket.c +++ b/net/batman-adv/icmp_socket.c @@ -175,13 +175,13 @@ static ssize_t bat_socket_write(struct file *file, const char __user *buff, if (len >= sizeof(struct icmp_packet_rr)) packet_len = sizeof(struct icmp_packet_rr); - skb = dev_alloc_skb(packet_len + sizeof(struct ethhdr)); + skb = dev_alloc_skb(packet_len + ETH_HLEN); if (!skb) { len = -ENOMEM; goto out; } - skb_reserve(skb, sizeof(struct ethhdr)); + skb_reserve(skb, ETH_HLEN); icmp_packet = (struct icmp_packet_rr *)skb_put(skb, packet_len); if (copy_from_user(icmp_packet, buff, packet_len)) { diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c index ac13a6a7409d..ff560863bc74 100644 --- a/net/batman-adv/routing.c +++ b/net/batman-adv/routing.c @@ -313,7 +313,7 @@ static int recv_my_icmp_packet(struct bat_priv *bat_priv, goto out; /* create a copy of the skb, if needed, to modify it. */ - if (skb_cow(skb, sizeof(struct ethhdr)) < 0) + if (skb_cow(skb, ETH_HLEN) < 0) goto out; icmp_packet = (struct icmp_packet_rr *)skb->data; @@ -368,7 +368,7 @@ static int recv_icmp_ttl_exceeded(struct bat_priv *bat_priv, goto out; /* create a copy of the skb, if needed, to modify it. */ - if (skb_cow(skb, sizeof(struct ethhdr)) < 0) + if (skb_cow(skb, ETH_HLEN) < 0) goto out; icmp_packet = (struct icmp_packet *)skb->data; @@ -454,7 +454,7 @@ int recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if) goto out; /* create a copy of the skb, if needed, to modify it. */ - if (skb_cow(skb, sizeof(struct ethhdr)) < 0) + if (skb_cow(skb, ETH_HLEN) < 0) goto out; icmp_packet = (struct icmp_packet_rr *)skb->data; @@ -841,7 +841,7 @@ static int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if) goto out; /* create a copy of the skb, if needed, to modify it. */ - if (skb_cow(skb, sizeof(struct ethhdr)) < 0) + if (skb_cow(skb, ETH_HLEN) < 0) goto out; unicast_packet = (struct unicast_packet *)skb->data; diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c index b5f078cacc09..7c66b6121fa6 100644 --- a/net/batman-adv/send.c +++ b/net/batman-adv/send.c @@ -51,7 +51,7 @@ int send_skb_packet(struct sk_buff *skb, struct hard_iface *hard_iface, } /* push to the ethernet header. */ - if (my_skb_head_push(skb, sizeof(*ethhdr)) < 0) + if (my_skb_head_push(skb, ETH_HLEN) < 0) goto send_skb_err; skb_reset_mac_header(skb); diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c index efe0fbaadcd6..6e2530b02043 100644 --- a/net/batman-adv/soft-interface.c +++ b/net/batman-adv/soft-interface.c @@ -292,7 +292,7 @@ void interface_rx(struct net_device *soft_iface, /* skb->ip_summed = CHECKSUM_UNNECESSARY;*/ bat_priv->stats.rx_packets++; - bat_priv->stats.rx_bytes += skb->len + sizeof(struct ethhdr); + bat_priv->stats.rx_bytes += skb->len + ETH_HLEN; soft_iface->last_rx = jiffies; diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h index 4d93aad899f7..2f4848b776a7 100644 --- a/net/batman-adv/types.h +++ b/net/batman-adv/types.h @@ -27,7 +27,7 @@ #include "packet.h" #include "bitarray.h" -#define BAT_HEADER_LEN (sizeof(struct ethhdr) + \ +#define BAT_HEADER_LEN (ETH_HLEN + \ ((sizeof(struct unicast_packet) > sizeof(struct bcast_packet) ? \ sizeof(struct unicast_packet) : \ sizeof(struct bcast_packet)))) diff --git a/net/batman-adv/vis.c b/net/batman-adv/vis.c index c4a5b8cafada..cec216fb77c7 100644 --- a/net/batman-adv/vis.c +++ b/net/batman-adv/vis.c @@ -434,12 +434,12 @@ static struct vis_info *add_packet(struct bat_priv *bat_priv, return NULL; info->skb_packet = dev_alloc_skb(sizeof(*packet) + vis_info_len + - sizeof(struct ethhdr)); + ETH_HLEN); if (!info->skb_packet) { kfree(info); return NULL; } - skb_reserve(info->skb_packet, sizeof(struct ethhdr)); + skb_reserve(info->skb_packet, ETH_HLEN); packet = (struct vis_packet *)skb_put(info->skb_packet, sizeof(*packet) + vis_info_len); @@ -894,11 +894,11 @@ int vis_init(struct bat_priv *bat_priv) bat_priv->my_vis_info->skb_packet = dev_alloc_skb(sizeof(*packet) + MAX_VIS_PACKET_SIZE + - sizeof(struct ethhdr)); + ETH_HLEN); if (!bat_priv->my_vis_info->skb_packet) goto free_info; - skb_reserve(bat_priv->my_vis_info->skb_packet, sizeof(struct ethhdr)); + skb_reserve(bat_priv->my_vis_info->skb_packet, ETH_HLEN); packet = (struct vis_packet *)skb_put(bat_priv->my_vis_info->skb_packet, sizeof(*packet)); -- cgit v1.2.2 From c97c72b493d7b450005f4054b15679e312c89caa Mon Sep 17 00:00:00 2001 From: Antonio Quartulli Date: Sun, 26 Feb 2012 15:39:41 +0100 Subject: batman-adv: print OGM seq numbers as unsigned int OGM sequence numbers are declared as uint32_t and so they have to printed using %u instead of %d in order to avoid wrong representations. Signed-off-by: Antonio Quartulli --- net/batman-adv/bat_iv_ogm.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'net') diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c index d7fa8afd7ff8..1014210bb365 100644 --- a/net/batman-adv/bat_iv_ogm.c +++ b/net/batman-adv/bat_iv_ogm.c @@ -152,7 +152,7 @@ static void bat_iv_ogm_send_to_if(struct forw_packet *forw_packet, "Sending own" : "Forwarding")); bat_dbg(DBG_BATMAN, bat_priv, - "%s %spacket (originator %pM, seqno %d, TQ %d, TTL %d, IDF %s, ttvn %d) on interface %s [%pM]\n", + "%s %spacket (originator %pM, seqno %u, TQ %d, TTL %d, IDF %s, ttvn %d) on interface %s [%pM]\n", fwd_str, (packet_num > 0 ? "aggregated " : ""), batman_ogm_packet->orig, ntohl(batman_ogm_packet->seqno), @@ -211,7 +211,7 @@ static void bat_iv_ogm_emit(struct forw_packet *forw_packet) /* FIXME: what about aggregated packets ? */ bat_dbg(DBG_BATMAN, bat_priv, - "%s packet (originator %pM, seqno %d, TTL %d) on interface %s [%pM]\n", + "%s packet (originator %pM, seqno %u, TTL %d) on interface %s [%pM]\n", (forw_packet->own ? "Sending own" : "Forwarding"), batman_ogm_packet->orig, ntohl(batman_ogm_packet->seqno), @@ -892,7 +892,7 @@ static int bat_iv_ogm_update_seqnos(const struct ethhdr *ethhdr, if (need_update) { bat_dbg(DBG_BATMAN, bat_priv, - "updating last_seqno: old %d, new %d\n", + "updating last_seqno: old %u, new %u\n", orig_node->last_real_seqno, batman_ogm_packet->seqno); orig_node->last_real_seqno = batman_ogm_packet->seqno; } @@ -945,7 +945,7 @@ static void bat_iv_ogm_process(const struct ethhdr *ethhdr, batman_ogm_packet->orig) ? 1 : 0); bat_dbg(DBG_BATMAN, bat_priv, - "Received BATMAN packet via NB: %pM, IF: %s [%pM] (from OG: %pM, via prev OG: %pM, seqno %d, ttvn %u, crc %u, changes %u, td %d, TTL %d, V %d, IDF %d)\n", + "Received BATMAN packet via NB: %pM, IF: %s [%pM] (from OG: %pM, via prev OG: %pM, seqno %u, ttvn %u, crc %u, changes %u, td %d, TTL %d, V %d, IDF %d)\n", ethhdr->h_source, if_incoming->net_dev->name, if_incoming->net_dev->dev_addr, batman_ogm_packet->orig, batman_ogm_packet->prev_sender, batman_ogm_packet->seqno, -- cgit v1.2.2 From 1e5cc266dbc401d11aefb966ad35e651c2f67414 Mon Sep 17 00:00:00 2001 From: Antonio Quartulli Date: Sun, 26 Feb 2012 15:39:42 +0100 Subject: batman-adv: skip the window protection test when the originator has no neighbours When we receive an OGM from from a node for the first time, the last_real_seqno field of the orig_node structure has not been initialised yet. The value of this field is used to compute the current ogm-seqno window and therefore the protection mechanism will probably drop the packet due to an out-of-window error. To avoid this situation this patch adds a check to skip the window protection mechanism if no neighbour nodes have already been added. When the first neighbour node is added, the last_real_seqno field is initialised too. Reported-by: Marek Lindner Signed-off-by: Antonio Quartulli --- net/batman-adv/bat_iv_ogm.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'net') diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c index 1014210bb365..8b2db2e76c7e 100644 --- a/net/batman-adv/bat_iv_ogm.c +++ b/net/batman-adv/bat_iv_ogm.c @@ -861,7 +861,8 @@ static int bat_iv_ogm_update_seqnos(const struct ethhdr *ethhdr, seq_diff = batman_ogm_packet->seqno - orig_node->last_real_seqno; /* signalize caller that the packet is to be dropped. */ - if (window_protected(bat_priv, seq_diff, + if (!hlist_empty(&orig_node->neigh_list) && + window_protected(bat_priv, seq_diff, &orig_node->batman_seqno_reset)) goto out; -- cgit v1.2.2 From 3f8375fee30cbf7fb0bd67f044e3406daa16fa3e Mon Sep 17 00:00:00 2001 From: Allan Stephens Date: Tue, 17 Apr 2012 17:57:52 -0400 Subject: tipc: introduce publication lists struct There is currently a single list that is containing both cluster-scope and zone-scope publications, and the list count is a separate free floating variable. Create a struct to bind the count to the list, and to pave the way for factoring out the publications into zone/cluster/node scope. The current "publ_root" most matches what will be the cluster scope list, so it is named accordingly in this commit. Signed-off-by: Allan Stephens Signed-off-by: Paul Gortmaker --- net/tipc/name_distr.c | 27 +++++++++++++++++---------- 1 file changed, 17 insertions(+), 10 deletions(-) (limited to 'net') diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c index d57da6159616..870a001131c6 100644 --- a/net/tipc/name_distr.c +++ b/net/tipc/name_distr.c @@ -68,12 +68,19 @@ struct distr_item { }; /** - * List of externally visible publications by this node -- - * that is, all publications having scope > TIPC_NODE_SCOPE. + * struct publ_list - list of publications made by this node + * @list: circular list of publications + * @list_size: number of entries in list */ +struct publ_list { + struct list_head list; + u32 size; +}; -static LIST_HEAD(publ_root); -static u32 publ_cnt; +static struct publ_list publ_cluster = { + .list = LIST_HEAD_INIT(publ_cluster.list), + .size = 0, +}; /** * publ_to_item - add publication info to a publication message @@ -132,8 +139,8 @@ void tipc_named_publish(struct publication *publ) struct sk_buff *buf; struct distr_item *item; - list_add_tail(&publ->local_list, &publ_root); - publ_cnt++; + list_add_tail(&publ->local_list, &publ_cluster.list); + publ_cluster.size++; buf = named_prepare_buf(PUBLICATION, ITEM_SIZE, 0); if (!buf) { @@ -156,7 +163,7 @@ void tipc_named_withdraw(struct publication *publ) struct distr_item *item; list_del(&publ->local_list); - publ_cnt--; + publ_cluster.size--; buf = named_prepare_buf(WITHDRAWAL, ITEM_SIZE, 0); if (!buf) { @@ -207,9 +214,9 @@ void tipc_named_node_up(unsigned long nodearg) INIT_LIST_HEAD(&message_list); read_lock_bh(&tipc_nametbl_lock); - rest = publ_cnt * ITEM_SIZE; + rest = publ_cluster.size * ITEM_SIZE; - list_for_each_entry(publ, &publ_root, local_list) { + list_for_each_entry(publ, &publ_cluster.list, local_list) { if (!buf) { left = (rest <= max_item_buf) ? rest : max_item_buf; rest -= left; @@ -329,7 +336,7 @@ void tipc_named_reinit(void) write_lock_bh(&tipc_nametbl_lock); - list_for_each_entry(publ, &publ_root, local_list) + list_for_each_entry(publ, &publ_cluster.list, local_list) publ->node = tipc_own_addr; write_unlock_bh(&tipc_nametbl_lock); -- cgit v1.2.2 From e11aa059715e2bacd4e62d57be5557dda697af8e Mon Sep 17 00:00:00 2001 From: Allan Stephens Date: Tue, 17 Apr 2012 17:57:52 -0400 Subject: tipc: Factor out name publication code to a separate function This is done so that it can be reused with differing publication lists, instead of being hard coded to the cluster publicaton list. Signed-off-by: Allan Stephens Signed-off-by: Paul Gortmaker --- net/tipc/name_distr.c | 61 ++++++++++++++++++++++++++++----------------------- 1 file changed, 34 insertions(+), 27 deletions(-) (limited to 'net') diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c index 870a001131c6..3be0eb9509df 100644 --- a/net/tipc/name_distr.c +++ b/net/tipc/name_distr.c @@ -176,6 +176,39 @@ void tipc_named_withdraw(struct publication *publ) named_cluster_distribute(buf); } +/* + * named_distribute - prepare name info for bulk distribution to another node + */ +static void named_distribute(struct list_head *message_list, u32 node, + struct publ_list *pls, u32 max_item_buf) +{ + struct publication *publ; + struct sk_buff *buf = NULL; + struct distr_item *item = NULL; + u32 left = 0; + u32 rest = pls->size * ITEM_SIZE; + + list_for_each_entry(publ, &pls->list, local_list) { + if (!buf) { + left = (rest <= max_item_buf) ? rest : max_item_buf; + rest -= left; + buf = named_prepare_buf(PUBLICATION, left, node); + if (!buf) { + warn("Bulk publication failure\n"); + return; + } + item = (struct distr_item *)msg_data(buf_msg(buf)); + } + publ_to_item(item, publ); + item++; + left -= ITEM_SIZE; + if (!left) { + list_add_tail((struct list_head *)buf, message_list); + buf = NULL; + } + } +} + /** * tipc_named_node_up - tell specified node about all publications by this node */ @@ -184,13 +217,8 @@ void tipc_named_node_up(unsigned long nodearg) { struct tipc_node *n_ptr; struct tipc_link *l_ptr; - struct publication *publ; - struct distr_item *item = NULL; - struct sk_buff *buf = NULL; struct list_head message_list; u32 node = (u32)nodearg; - u32 left = 0; - u32 rest; u32 max_item_buf = 0; /* compute maximum amount of publication data to send per message */ @@ -214,28 +242,7 @@ void tipc_named_node_up(unsigned long nodearg) INIT_LIST_HEAD(&message_list); read_lock_bh(&tipc_nametbl_lock); - rest = publ_cluster.size * ITEM_SIZE; - - list_for_each_entry(publ, &publ_cluster.list, local_list) { - if (!buf) { - left = (rest <= max_item_buf) ? rest : max_item_buf; - rest -= left; - buf = named_prepare_buf(PUBLICATION, left, node); - if (!buf) { - warn("Bulk publication distribution failure\n"); - goto exit; - } - item = (struct distr_item *)msg_data(buf_msg(buf)); - } - publ_to_item(item, publ); - item++; - left -= ITEM_SIZE; - if (!left) { - list_add_tail((struct list_head *)buf, &message_list); - buf = NULL; - } - } -exit: + named_distribute(&message_list, node, &publ_cluster, max_item_buf); read_unlock_bh(&tipc_nametbl_lock); tipc_link_send_names(&message_list, (u32)node); -- cgit v1.2.2 From 7426a5645f3d18daec1f7d6a24b529ec7286b800 Mon Sep 17 00:00:00 2001 From: Shan Wei Date: Wed, 18 Apr 2012 18:05:46 +0000 Subject: net: fix compile error of leaking kmemleak.h header MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit net/core/sysctl_net_core.c: In function ‘sysctl_core_init’: net/core/sysctl_net_core.c:259: error: implicit declaration of function ‘kmemleak_not_leak’ with same error in net/ipv4/route.c Signed-off-by: Shan Wei Signed-off-by: David S. Miller --- net/core/sysctl_net_core.c | 1 + net/ipv4/route.c | 1 + 2 files changed, 2 insertions(+) (limited to 'net') diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c index cee599190261..247c69b7cfc2 100644 --- a/net/core/sysctl_net_core.c +++ b/net/core/sysctl_net_core.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include diff --git a/net/ipv4/route.c b/net/ipv4/route.c index bcd4744e0e4e..a1c115d750e9 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -109,6 +109,7 @@ #include #ifdef CONFIG_SYSCTL #include +#include #endif #include -- cgit v1.2.2 From 9ff264492ffa2ac90698e1806bf27e716a99fee8 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 19 Apr 2012 02:24:17 +0000 Subject: ip6_tunnel: dont drop packet but consume it When we need to reallocate skb, we dont drop a packet. Call consume_skb() to not confuse dropwatch. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/ipv6/ip6_tunnel.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index e25b0fdd935c..5df487c81ed9 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -954,7 +954,7 @@ static int ip6_tnl_xmit2(struct sk_buff *skb, if (skb->sk) skb_set_owner_w(new_skb, skb->sk); - kfree_skb(skb); + consume_skb(skb); skb = new_skb; } skb_dst_drop(skb); -- cgit v1.2.2 From 8460c00f6e2fb8e05421a277fee62fe09803f7a7 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 19 Apr 2012 02:24:28 +0000 Subject: netlink: dont drop packet but consume it When we need to clone skb, we dont drop a packet. Call consume_skb() to not confuse dropwatch. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/netlink/af_netlink.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index faa48f70b7c9..da8721443503 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -866,7 +866,7 @@ static struct sk_buff *netlink_trim(struct sk_buff *skb, gfp_t allocation) struct sk_buff *nskb = skb_clone(skb, allocation); if (!nskb) return skb; - kfree_skb(skb); + consume_skb(skb); skb = nskb; } -- cgit v1.2.2 From ab185d7b2551e9b8d946a657ddccd86d192bebd1 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 19 Apr 2012 02:24:36 +0000 Subject: ipv6: tcp: dont drop packet but consume it When we need to clone skb, we dont drop a packet. Call consume_skb() to not confuse dropwatch. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/ipv6/tcp_ipv6.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 86cfe6005f40..050c55186bc4 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -1353,7 +1353,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb, newnp->pktoptions = NULL; if (treq->pktopts != NULL) { newnp->pktoptions = skb_clone(treq->pktopts, GFP_ATOMIC); - kfree_skb(treq->pktopts); + consume_skb(treq->pktopts); treq->pktopts = NULL; if (newnp->pktoptions) skb_set_owner_r(newnp->pktoptions, newsk); -- cgit v1.2.2 From abc4e4fa29eb81f874d4ef3c6bafcf5ad6f90b07 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 19 Apr 2012 02:24:42 +0000 Subject: packet: dont drop packet but consume it When we need to clone skb, we dont drop a packet. Call consume_skb() to not confuse dropwatch. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/packet/af_packet.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index d2b5f6591f1a..40053a08f009 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -1654,7 +1654,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev, skb->data = skb_head; skb->len = skb_len; } - kfree_skb(skb); + consume_skb(skb); skb = nskb; } -- cgit v1.2.2 From 7604adc2fffc275adc6674659e09479adf633e4a Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 19 Apr 2012 02:24:48 +0000 Subject: ipv6: dccp: dont drop packet but consume it When we need to clone skb, we dont drop a packet. Call consume_skb() to not confuse dropwatch. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/dccp/ipv6.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index e923ac95bb04..fa9512d86f3b 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c @@ -580,7 +580,7 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk, newnp->pktoptions = NULL; if (ireq6->pktopts != NULL) { newnp->pktoptions = skb_clone(ireq6->pktopts, GFP_ATOMIC); - kfree_skb(ireq6->pktopts); + consume_skb(ireq6->pktopts); ireq6->pktopts = NULL; if (newnp->pktoptions) skb_set_owner_r(newnp->pktoptions, newsk); -- cgit v1.2.2 From 85bb2a60fab28a338870faec222a1f2232b01caa Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 19 Apr 2012 02:24:53 +0000 Subject: net: dont drop packet but consume it When we need to clone skb, we dont drop a packet. Call consume_skb() to not confuse dropwatch. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/core/skbuff.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 8f0d68d14360..43c490d52df6 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -1274,7 +1274,7 @@ drop_pages: return -ENOMEM; nfrag->next = frag->next; - kfree_skb(frag); + consume_skb(frag); frag = nfrag; *fragp = frag; } -- cgit v1.2.2 From daa86548281ec9364eac2925bdf907f861204a5b Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 19 Apr 2012 07:07:40 +0000 Subject: net: gro: GRO_MERGED_FREE consumes packets As part of GRO processing, merged skbs should be consumed, not freed, to not confuse dropwatch/drop_monitor. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/core/dev.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'net') diff --git a/net/core/dev.c b/net/core/dev.c index c93812733f1d..501f3cc703dd 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -3542,10 +3542,13 @@ gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb) break; case GRO_DROP: - case GRO_MERGED_FREE: kfree_skb(skb); break; + case GRO_MERGED_FREE: + consume_skb(skb); + break; + case GRO_HELD: case GRO_MERGED: break; -- cgit v1.2.2 From cbf8f7bb200f5dbdc9ce11243431440720db03dc Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 19 Apr 2012 06:10:26 +0000 Subject: ipv4: dont drop packet in defrag but consume it When defragmentation is finalized, we clone a packet and kfree_skb() it. Call consume_skb() to not confuse dropwatch, since its not a drop. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/ipv4/ip_fragment.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index 3727e234c884..631f596d5d70 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c @@ -569,7 +569,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev, skb_morph(head, qp->q.fragments); head->next = qp->q.fragments->next; - kfree_skb(qp->q.fragments); + consume_skb(qp->q.fragments); qp->q.fragments = head; } -- cgit v1.2.2 From a909804f7c6cb83b7365ed23e9fd4c1267ee9ef0 Mon Sep 17 00:00:00 2001 From: Allan Stephens Date: Tue, 17 Apr 2012 17:57:52 -0400 Subject: tipc: Separate cluster-scope and zone-scope names into distinct lists Utilizes distinct lists to track zone-scope and cluster-scope names published by a node. For now, TIPC continues to process the entries in both lists in the same way; however, an upcoming patch will utilize the existence of the lists to prevent the sending of cluster-scope names to nodes that are not part of the local cluster. To achieve this, an array of publication lists is introduced, so that they can be iterated over and accessed via publ->scope as an index where convenient. Signed-off-by: Allan Stephens Signed-off-by: Paul Gortmaker --- net/tipc/name_distr.c | 31 ++++++++++++++++++++++++++----- 1 file changed, 26 insertions(+), 5 deletions(-) (limited to 'net') diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c index 3be0eb9509df..8751ea53dfde 100644 --- a/net/tipc/name_distr.c +++ b/net/tipc/name_distr.c @@ -77,11 +77,29 @@ struct publ_list { u32 size; }; +static struct publ_list publ_zone = { + .list = LIST_HEAD_INIT(publ_zone.list), + .size = 0, +}; + static struct publ_list publ_cluster = { .list = LIST_HEAD_INIT(publ_cluster.list), .size = 0, }; +static struct publ_list publ_node = { + .list = LIST_HEAD_INIT(publ_node.list), + .size = 0, +}; + +static struct publ_list *publ_lists[] = { + NULL, + &publ_zone, /* publ_lists[TIPC_ZONE_SCOPE] */ + &publ_cluster, /* publ_lists[TIPC_CLUSTER_SCOPE] */ + &publ_node /* publ_lists[TIPC_NODE_SCOPE] */ +}; + + /** * publ_to_item - add publication info to a publication message */ @@ -139,8 +157,8 @@ void tipc_named_publish(struct publication *publ) struct sk_buff *buf; struct distr_item *item; - list_add_tail(&publ->local_list, &publ_cluster.list); - publ_cluster.size++; + list_add_tail(&publ->local_list, &publ_lists[publ->scope]->list); + publ_lists[publ->scope]->size++; buf = named_prepare_buf(PUBLICATION, ITEM_SIZE, 0); if (!buf) { @@ -163,7 +181,7 @@ void tipc_named_withdraw(struct publication *publ) struct distr_item *item; list_del(&publ->local_list); - publ_cluster.size--; + publ_lists[publ->scope]->size--; buf = named_prepare_buf(WITHDRAWAL, ITEM_SIZE, 0); if (!buf) { @@ -243,6 +261,7 @@ void tipc_named_node_up(unsigned long nodearg) read_lock_bh(&tipc_nametbl_lock); named_distribute(&message_list, node, &publ_cluster, max_item_buf); + named_distribute(&message_list, node, &publ_zone, max_item_buf); read_unlock_bh(&tipc_nametbl_lock); tipc_link_send_names(&message_list, (u32)node); @@ -340,11 +359,13 @@ void tipc_named_recv(struct sk_buff *buf) void tipc_named_reinit(void) { struct publication *publ; + int scope; write_lock_bh(&tipc_nametbl_lock); - list_for_each_entry(publ, &publ_cluster.list, local_list) - publ->node = tipc_own_addr; + for (scope = TIPC_ZONE_SCOPE; scope <= TIPC_CLUSTER_SCOPE; scope++) + list_for_each_entry(publ, &publ_lists[scope]->list, local_list) + publ->node = tipc_own_addr; write_unlock_bh(&tipc_nametbl_lock); } -- cgit v1.2.2 From 1110b8d33a54d1b91131e2a70ef0c3c26425b800 Mon Sep 17 00:00:00 2001 From: Allan Stephens Date: Tue, 17 Apr 2012 17:57:52 -0400 Subject: tipc: Update node-scope publications when network address is assigned Ensures that node-scope name publications that exist prior to the configuration of a node's network address are properly re-initialized with that address when it is assigned. TIPC's node-scope publications are now tracked using a publications list like the lists used for cluster-scope and zone-scope publications so they can be easily updated when required. The inclusion of node scope name publications in a conventional publication list means that they must now also be withdrawn, just like cluster and zone scope publications are currently withdrawn. So some conditional tests on scope ==/!= TIPC_NODE_SCOPE are inserted/removed accordingly. Signed-off-by: Allan Stephens Signed-off-by: Paul Gortmaker --- net/tipc/name_distr.c | 14 ++++++++++---- net/tipc/name_table.c | 5 ++--- 2 files changed, 12 insertions(+), 7 deletions(-) (limited to 'net') diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c index 8751ea53dfde..211c1723a190 100644 --- a/net/tipc/name_distr.c +++ b/net/tipc/name_distr.c @@ -160,6 +160,9 @@ void tipc_named_publish(struct publication *publ) list_add_tail(&publ->local_list, &publ_lists[publ->scope]->list); publ_lists[publ->scope]->size++; + if (publ->scope == TIPC_NODE_SCOPE) + return; + buf = named_prepare_buf(PUBLICATION, ITEM_SIZE, 0); if (!buf) { warn("Publication distribution failure\n"); @@ -183,6 +186,9 @@ void tipc_named_withdraw(struct publication *publ) list_del(&publ->local_list); publ_lists[publ->scope]->size--; + if (publ->scope == TIPC_NODE_SCOPE) + return; + buf = named_prepare_buf(WITHDRAWAL, ITEM_SIZE, 0); if (!buf) { warn("Withdrawal distribution failure\n"); @@ -349,11 +355,11 @@ void tipc_named_recv(struct sk_buff *buf) } /** - * tipc_named_reinit - re-initialize local publication list + * tipc_named_reinit - re-initialize local publications * * This routine is called whenever TIPC networking is enabled. - * All existing publications by this node that have "cluster" or "zone" scope - * are updated to reflect the node's new network address. + * All name table entries published by this node are updated to reflect + * the node's new network address. */ void tipc_named_reinit(void) @@ -363,7 +369,7 @@ void tipc_named_reinit(void) write_lock_bh(&tipc_nametbl_lock); - for (scope = TIPC_ZONE_SCOPE; scope <= TIPC_CLUSTER_SCOPE; scope++) + for (scope = TIPC_ZONE_SCOPE; scope <= TIPC_NODE_SCOPE; scope++) list_for_each_entry(publ, &publ_lists[scope]->list, local_list) publ->node = tipc_own_addr; diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c index c6a1ae36952e..bd80d80fb112 100644 --- a/net/tipc/name_table.c +++ b/net/tipc/name_table.c @@ -698,7 +698,7 @@ struct publication *tipc_nametbl_publish(u32 type, u32 lower, u32 upper, table.local_publ_count++; publ = tipc_nametbl_insert_publ(type, lower, upper, scope, tipc_own_addr, port_ref, key); - if (publ && (scope != TIPC_NODE_SCOPE)) + if (likely(publ)) tipc_named_publish(publ); write_unlock_bh(&tipc_nametbl_lock); return publ; @@ -716,8 +716,7 @@ int tipc_nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key) publ = tipc_nametbl_remove_publ(type, lower, tipc_own_addr, ref, key); if (likely(publ)) { table.local_publ_count--; - if (publ->scope != TIPC_NODE_SCOPE) - tipc_named_withdraw(publ); + tipc_named_withdraw(publ); write_unlock_bh(&tipc_nametbl_lock); list_del_init(&publ->pport_list); kfree(publ); -- cgit v1.2.2 From fd6eced8a482986784eb1f3aa0838dbdd725e71c Mon Sep 17 00:00:00 2001 From: Allan Stephens Date: Wed, 9 Nov 2011 14:22:52 -0500 Subject: tipc: Don't record failed publication attempt as a success No longer increments counter of number of publications by a node if an attempt to add a new publication fails. This prevents TIPC from incorrectly blocking future publications because the configured maximum number of publications has been reached. Signed-off-by: Allan Stephens Signed-off-by: Paul Gortmaker --- net/tipc/name_table.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'net') diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c index bd80d80fb112..5d7004262647 100644 --- a/net/tipc/name_table.c +++ b/net/tipc/name_table.c @@ -695,11 +695,12 @@ struct publication *tipc_nametbl_publish(u32 type, u32 lower, u32 upper, } write_lock_bh(&tipc_nametbl_lock); - table.local_publ_count++; publ = tipc_nametbl_insert_publ(type, lower, upper, scope, tipc_own_addr, port_ref, key); - if (likely(publ)) + if (likely(publ)) { + table.local_publ_count++; tipc_named_publish(publ); + } write_unlock_bh(&tipc_nametbl_lock); return publ; } -- cgit v1.2.2 From 336ebf5bf524e447227cb1d785b22ca722e6afa7 Mon Sep 17 00:00:00 2001 From: Allan Stephens Date: Tue, 17 Apr 2012 18:02:01 -0400 Subject: tipc: Add routines for safe checking of node's network address Introduces routines that test whether a given network address is equal to a node's own network address or if it lies within the node's own network cluster, and which work properly regardless of whether the node is using the default network address <0.0.0> or a non-zero network address that is assigned later on. In essence, these routines ensure that address <0.0.0> is treated as an alias for "this node", regardless of which network address the node is actually using. Old users of the pre-existing more strict match in_own_cluster() have been accordingly redirected to what is now called in_own_cluster_exact() --- which does not extend matching to <0,0,0>. Signed-off-by: Allan Stephens Signed-off-by: Paul Gortmaker --- net/tipc/addr.h | 20 +++++++++++++++++++- net/tipc/bearer.c | 2 +- net/tipc/name_table.c | 6 +++--- net/tipc/node.c | 2 +- 4 files changed, 24 insertions(+), 6 deletions(-) (limited to 'net') diff --git a/net/tipc/addr.h b/net/tipc/addr.h index e4f35afe3207..d706a1d92be8 100644 --- a/net/tipc/addr.h +++ b/net/tipc/addr.h @@ -50,11 +50,29 @@ static inline u32 tipc_cluster_mask(u32 addr) return addr & TIPC_CLUSTER_MASK; } -static inline int in_own_cluster(u32 addr) +static inline int in_own_cluster_exact(u32 addr) { return !((addr ^ tipc_own_addr) >> 12); } +/** + * in_own_node - test for node inclusion; <0.0.0> always matches + */ + +static inline int in_own_node(u32 addr) +{ + return (addr == tipc_own_addr) || !addr; +} + +/** + * in_own_cluster - test for cluster inclusion; <0.0.0> always matches + */ + +static inline int in_own_cluster(u32 addr) +{ + return in_own_cluster_exact(addr) || !addr; +} + /** * addr_domain - convert 2-bit scope value to equivalent message lookup domain * diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c index 5dfd89c40429..0bfdeba91d51 100644 --- a/net/tipc/bearer.c +++ b/net/tipc/bearer.c @@ -449,7 +449,7 @@ int tipc_enable_bearer(const char *name, u32 disc_domain, u32 priority) if (tipc_in_scope(disc_domain, tipc_own_addr)) { disc_domain = tipc_own_addr & TIPC_CLUSTER_MASK; res = 0; /* accept any node in own cluster */ - } else if (in_own_cluster(disc_domain)) + } else if (in_own_cluster_exact(disc_domain)) res = 0; /* accept specified node in own cluster */ } if (res) { diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c index 5d7004262647..1e0518da19da 100644 --- a/net/tipc/name_table.c +++ b/net/tipc/name_table.c @@ -342,7 +342,7 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq, list_add(&publ->zone_list, &info->zone_list); info->zone_list_size++; - if (in_own_cluster(node)) { + if (in_own_cluster_exact(node)) { list_add(&publ->cluster_list, &info->cluster_list); info->cluster_list_size++; } @@ -411,7 +411,7 @@ found: /* Remove publication from cluster scope list, if present */ - if (in_own_cluster(node)) { + if (in_own_cluster_exact(node)) { list_del(&publ->cluster_list); info->cluster_list_size--; } @@ -604,7 +604,7 @@ u32 tipc_nametbl_translate(u32 type, u32 instance, u32 *destnode) publ = list_first_entry(&info->node_list, struct publication, node_list); list_move_tail(&publ->node_list, &info->node_list); - } else if (in_own_cluster(*destnode)) { + } else if (in_own_cluster_exact(*destnode)) { if (list_empty(&info->cluster_list)) goto no_match; publ = list_first_entry(&info->cluster_list, struct publication, diff --git a/net/tipc/node.c b/net/tipc/node.c index a34cabc2c43a..6a71bea91db0 100644 --- a/net/tipc/node.c +++ b/net/tipc/node.c @@ -72,7 +72,7 @@ struct tipc_node *tipc_node_find(u32 addr) struct tipc_node *node; struct hlist_node *pos; - if (unlikely(!in_own_cluster(addr))) + if (unlikely(!in_own_cluster_exact(addr))) return NULL; hlist_for_each_entry(node, pos, &node_htable[tipc_hashfn(addr)], hash) { -- cgit v1.2.2 From d4f5c12cdf43ce70731d5abfb6400bfb1be392d3 Mon Sep 17 00:00:00 2001 From: Allan Stephens Date: Tue, 17 Apr 2012 18:16:34 -0400 Subject: tipc: Ensure network address change doesn't impact name table updates Revises routines that add and remove an entry from a node's name table so that the publication scope lists are updated properly even if the node's network address is changed in mid-operation. The routines now recognize the default node address of <0.0.0> as an alias for "this node" even after a new network address has been assigned. Signed-off-by: Allan Stephens Signed-off-by: Paul Gortmaker --- net/tipc/name_table.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'net') diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c index 1e0518da19da..25c2975d86ee 100644 --- a/net/tipc/name_table.c +++ b/net/tipc/name_table.c @@ -342,12 +342,12 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq, list_add(&publ->zone_list, &info->zone_list); info->zone_list_size++; - if (in_own_cluster_exact(node)) { + if (in_own_cluster(node)) { list_add(&publ->cluster_list, &info->cluster_list); info->cluster_list_size++; } - if (node == tipc_own_addr) { + if (in_own_node(node)) { list_add(&publ->node_list, &info->node_list); info->node_list_size++; } @@ -411,14 +411,14 @@ found: /* Remove publication from cluster scope list, if present */ - if (in_own_cluster_exact(node)) { + if (in_own_cluster(node)) { list_del(&publ->cluster_list); info->cluster_list_size--; } /* Remove publication from node scope list, if present */ - if (node == tipc_own_addr) { + if (in_own_node(node)) { list_del(&publ->node_list); info->node_list_size--; } -- cgit v1.2.2 From 5eb0a291fbde1842b8e3f241183a0e2c1399c600 Mon Sep 17 00:00:00 2001 From: Allan Stephens Date: Tue, 17 Apr 2012 18:17:35 -0400 Subject: tipc: Optimize re-initialization of port message header templates Removes an unnecessary check in the logic that updates the message header template for existing ports when a node's network address is first assigned. There is no longer any need to check to see if the node's network address has actually changed since the calling routine has already verified that this is so. Signed-off-by: Allan Stephens Signed-off-by: Paul Gortmaker --- net/tipc/port.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'net') diff --git a/net/tipc/port.c b/net/tipc/port.c index 94d2904cce66..6156c6740f67 100644 --- a/net/tipc/port.c +++ b/net/tipc/port.c @@ -646,8 +646,6 @@ void tipc_port_reinit(void) spin_lock_bh(&tipc_port_list_lock); list_for_each_entry(p_ptr, &ports, port_list) { msg = &p_ptr->phdr; - if (msg_orignode(msg) == tipc_own_addr) - break; msg_set_prevnode(msg, tipc_own_addr); msg_set_orignode(msg, tipc_own_addr); } -- cgit v1.2.2 From f21536d1e73c36b37c50f71013c67f19db77d4b8 Mon Sep 17 00:00:00 2001 From: Allan Stephens Date: Tue, 17 Apr 2012 18:22:49 -0400 Subject: tipc: Ensure network address change doesn't impact new port Re-orders port creation logic so that the initialization of a new port's message header template occurs while the port list lock is held. This ensures that a change to the node's network address that occurs at the same time as the port is being created does not result in the template identifying the sender using the former network address. The new approach guarantees that the new port's template is using the current network address or that it will be updated when the address changes. Signed-off-by: Allan Stephens Signed-off-by: Paul Gortmaker --- net/tipc/port.c | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) (limited to 'net') diff --git a/net/tipc/port.c b/net/tipc/port.c index 6156c6740f67..3b7162c1f074 100644 --- a/net/tipc/port.c +++ b/net/tipc/port.c @@ -221,18 +221,25 @@ struct tipc_port *tipc_createport_raw(void *usr_handle, p_ptr->usr_handle = usr_handle; p_ptr->max_pkt = MAX_PKT_DEFAULT; p_ptr->ref = ref; - msg = &p_ptr->phdr; - tipc_msg_init(msg, importance, TIPC_NAMED_MSG, NAMED_H_SIZE, 0); - msg_set_origport(msg, ref); INIT_LIST_HEAD(&p_ptr->wait_list); INIT_LIST_HEAD(&p_ptr->subscription.nodesub_list); p_ptr->dispatcher = dispatcher; p_ptr->wakeup = wakeup; p_ptr->user_port = NULL; k_init_timer(&p_ptr->timer, (Handler)port_timeout, ref); - spin_lock_bh(&tipc_port_list_lock); INIT_LIST_HEAD(&p_ptr->publications); INIT_LIST_HEAD(&p_ptr->port_list); + + /* + * Must hold port list lock while initializing message header template + * to ensure a change to node's own network address doesn't result + * in template containing out-dated network address information + */ + + spin_lock_bh(&tipc_port_list_lock); + msg = &p_ptr->phdr; + tipc_msg_init(msg, importance, TIPC_NAMED_MSG, NAMED_H_SIZE, 0); + msg_set_origport(msg, ref); list_add_tail(&p_ptr->port_list, &ports); spin_unlock_bh(&tipc_port_list_lock); return p_ptr; -- cgit v1.2.2 From d0e17fedc2aeb0c4db09434787ef6d432582e050 Mon Sep 17 00:00:00 2001 From: Allan Stephens Date: Tue, 17 Apr 2012 18:36:42 -0400 Subject: tipc: delete duplicate peerport/peernode helper functions Prior to commit 23dd4cce387124ec3ea06ca30d17854ae4d9b772 "tipc: Combine port structure with tipc_port structure" there was a need for the two sets of helper functions. But now they are just duplicates. Remove the globally visible ones, and mark the remaining ones as inline. Signed-off-by: Allan Stephens Signed-off-by: Paul Gortmaker --- net/tipc/port.c | 8 ++++---- net/tipc/port.h | 10 ---------- 2 files changed, 4 insertions(+), 14 deletions(-) (limited to 'net') diff --git a/net/tipc/port.c b/net/tipc/port.c index 3b7162c1f074..f1f6b33f761c 100644 --- a/net/tipc/port.c +++ b/net/tipc/port.c @@ -59,12 +59,12 @@ static struct sk_buff *port_build_peer_abort_msg(struct tipc_port *, u32 err); static void port_timeout(unsigned long ref); -static u32 port_peernode(struct tipc_port *p_ptr) +static inline u32 port_peernode(struct tipc_port *p_ptr) { return msg_destnode(&p_ptr->phdr); } -static u32 port_peerport(struct tipc_port *p_ptr) +static inline u32 port_peerport(struct tipc_port *p_ptr) { return msg_destport(&p_ptr->phdr); } @@ -1159,9 +1159,9 @@ int tipc_port_recv_msg(struct sk_buff *buf) if (likely(p_ptr)) { if (likely(p_ptr->connected)) { if ((unlikely(msg_origport(msg) != - tipc_peer_port(p_ptr))) || + port_peerport(p_ptr))) || (unlikely(msg_orignode(msg) != - tipc_peer_node(p_ptr))) || + port_peernode(p_ptr))) || (unlikely(!msg_connected(msg)))) { err = TIPC_ERR_NO_PORT; tipc_port_unlock(p_ptr); diff --git a/net/tipc/port.h b/net/tipc/port.h index 9b88531e5a61..0a632a6fc059 100644 --- a/net/tipc/port.h +++ b/net/tipc/port.h @@ -257,16 +257,6 @@ static inline struct tipc_port *tipc_port_deref(u32 ref) return (struct tipc_port *)tipc_ref_deref(ref); } -static inline u32 tipc_peer_port(struct tipc_port *p_ptr) -{ - return msg_destport(&p_ptr->phdr); -} - -static inline u32 tipc_peer_node(struct tipc_port *p_ptr) -{ - return msg_destnode(&p_ptr->phdr); -} - static inline int tipc_port_congested(struct tipc_port *p_ptr) { return (p_ptr->sent - p_ptr->acked) >= (TIPC_FLOW_CONTROL_WIN * 2); -- cgit v1.2.2 From f0712e86b75f4839773abbc01d5baa7e36e378c2 Mon Sep 17 00:00:00 2001 From: Allan Stephens Date: Tue, 17 Apr 2012 18:42:28 -0400 Subject: tipc: Ensure network address change doesn't impact local connections Revises routines that deal with connections between two ports on the same node to ensure the connection is not impacted if the node's network address is changed in mid-operation. The routines now treat the default node address of <0.0.0> as an alias for "this node" in the following situations: 1) Incoming messages destined to a connected port now handle the alias properly when validating that the message was sent by the expected peer port, ensuring that the message will be accepted regardless of whether it specifies the node's old network address or it's current one. 2) The code which completes connection establishment now handles the alias properly when determining if the peer port is on the same node as the connected port. An added benefit of addressing issue 1) is that some peer port validation code has been relocated to TIPC's socket subsystem, which means that validation is no longer done twice when a message is sent to a non-socket port (such as TIPC's configuration service or network topology service). Signed-off-by: Allan Stephens Signed-off-by: Paul Gortmaker --- net/tipc/node_subscr.c | 2 +- net/tipc/port.c | 59 +++++++++++++++++++++++++------------------------- net/tipc/port.h | 1 + net/tipc/socket.c | 3 ++- 4 files changed, 34 insertions(+), 31 deletions(-) (limited to 'net') diff --git a/net/tipc/node_subscr.c b/net/tipc/node_subscr.c index c3c2815ae630..327ffbbfee00 100644 --- a/net/tipc/node_subscr.c +++ b/net/tipc/node_subscr.c @@ -45,7 +45,7 @@ void tipc_nodesub_subscribe(struct tipc_node_subscr *node_sub, u32 addr, void *usr_handle, net_ev_handler handle_down) { - if (addr == tipc_own_addr) { + if (in_own_node(addr)) { node_sub->node = NULL; return; } diff --git a/net/tipc/port.c b/net/tipc/port.c index f1f6b33f761c..616c72fb9234 100644 --- a/net/tipc/port.c +++ b/net/tipc/port.c @@ -69,6 +69,28 @@ static inline u32 port_peerport(struct tipc_port *p_ptr) return msg_destport(&p_ptr->phdr); } +/* + * tipc_port_peer_msg - verify message was sent by connected port's peer + * + * Handles cases where the node's network address has changed from + * the default of <0.0.0> to its configured setting. + */ + +int tipc_port_peer_msg(struct tipc_port *p_ptr, struct tipc_msg *msg) +{ + u32 peernode; + u32 orignode; + + if (msg_origport(msg) != port_peerport(p_ptr)) + return 0; + + orignode = msg_orignode(msg); + peernode = port_peernode(p_ptr); + return (orignode == peernode) || + (!orignode && (peernode == tipc_own_addr)) || + (!peernode && (orignode == tipc_own_addr)); +} + /** * tipc_multicast - send a multicast message to local and remote destinations */ @@ -526,25 +548,21 @@ void tipc_port_recv_proto_msg(struct sk_buff *buf) struct tipc_msg *msg = buf_msg(buf); struct tipc_port *p_ptr; struct sk_buff *r_buf = NULL; - u32 orignode = msg_orignode(msg); - u32 origport = msg_origport(msg); u32 destport = msg_destport(msg); int wakeable; /* Validate connection */ p_ptr = tipc_port_lock(destport); - if (!p_ptr || !p_ptr->connected || - (port_peernode(p_ptr) != orignode) || - (port_peerport(p_ptr) != origport)) { + if (!p_ptr || !p_ptr->connected || !tipc_port_peer_msg(p_ptr, msg)) { r_buf = tipc_buf_acquire(BASIC_H_SIZE); if (r_buf) { msg = buf_msg(r_buf); tipc_msg_init(msg, TIPC_HIGH_IMPORTANCE, TIPC_CONN_MSG, - BASIC_H_SIZE, orignode); + BASIC_H_SIZE, msg_orignode(msg)); msg_set_errcode(msg, TIPC_ERR_NO_PORT); msg_set_origport(msg, destport); - msg_set_destport(msg, origport); + msg_set_destport(msg, msg_origport(msg)); } if (p_ptr) tipc_port_unlock(p_ptr); @@ -681,6 +699,7 @@ static void port_dispatcher_sigh(void *dummy) struct tipc_name_seq dseq; void *usr_handle; int connected; + int peer_invalid; int published; u32 message_type; @@ -701,6 +720,7 @@ static void port_dispatcher_sigh(void *dummy) up_ptr = p_ptr->user_port; usr_handle = up_ptr->usr_handle; connected = p_ptr->connected; + peer_invalid = connected && !tipc_port_peer_msg(p_ptr, msg); published = p_ptr->published; if (unlikely(msg_errcode(msg))) @@ -710,8 +730,6 @@ static void port_dispatcher_sigh(void *dummy) case TIPC_CONN_MSG:{ tipc_conn_msg_event cb = up_ptr->conn_msg_cb; - u32 peer_port = port_peerport(p_ptr); - u32 peer_node = port_peernode(p_ptr); u32 dsz; tipc_port_unlock(p_ptr); @@ -720,8 +738,7 @@ static void port_dispatcher_sigh(void *dummy) if (unlikely(!connected)) { if (tipc_connect2port(dref, &orig)) goto reject; - } else if ((msg_origport(msg) != peer_port) || - (msg_orignode(msg) != peer_node)) + } else if (peer_invalid) goto reject; dsz = msg_data_sz(msg); if (unlikely(dsz && @@ -773,14 +790,9 @@ err: case TIPC_CONN_MSG:{ tipc_conn_shutdown_event cb = up_ptr->conn_err_cb; - u32 peer_port = port_peerport(p_ptr); - u32 peer_node = port_peernode(p_ptr); tipc_port_unlock(p_ptr); - if (!cb || !connected) - break; - if ((msg_origport(msg) != peer_port) || - (msg_orignode(msg) != peer_node)) + if (!cb || !connected || peer_invalid) break; tipc_disconnect(dref); skb_pull(buf, msg_hdr_sz(msg)); @@ -1157,17 +1169,6 @@ int tipc_port_recv_msg(struct sk_buff *buf) /* validate destination & pass to port, otherwise reject message */ p_ptr = tipc_port_lock(destport); if (likely(p_ptr)) { - if (likely(p_ptr->connected)) { - if ((unlikely(msg_origport(msg) != - port_peerport(p_ptr))) || - (unlikely(msg_orignode(msg) != - port_peernode(p_ptr))) || - (unlikely(!msg_connected(msg)))) { - err = TIPC_ERR_NO_PORT; - tipc_port_unlock(p_ptr); - goto reject; - } - } err = p_ptr->dispatcher(p_ptr, buf); tipc_port_unlock(p_ptr); if (likely(!err)) @@ -1175,7 +1176,7 @@ int tipc_port_recv_msg(struct sk_buff *buf) } else { err = TIPC_ERR_NO_PORT; } -reject: + return tipc_reject_msg(buf, err); } diff --git a/net/tipc/port.h b/net/tipc/port.h index 0a632a6fc059..301e1bd840d1 100644 --- a/net/tipc/port.h +++ b/net/tipc/port.h @@ -201,6 +201,7 @@ int tipc_shutdown(u32 ref); * The following routines require that the port be locked on entry */ int tipc_disconnect_port(struct tipc_port *tp_ptr); +int tipc_port_peer_msg(struct tipc_port *p_ptr, struct tipc_msg *msg); /* * TIPC messaging routines diff --git a/net/tipc/socket.c b/net/tipc/socket.c index bcb3314ef164..c19fc4a228a8 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -1236,7 +1236,8 @@ static u32 filter_rcv(struct sock *sk, struct sk_buff *buf) if (msg_mcast(msg)) return TIPC_ERR_NO_PORT; if (sock->state == SS_CONNECTED) { - if (!msg_connected(msg)) + if (!msg_connected(msg) || + !tipc_port_peer_msg(tipc_sk_port(sk), msg)) return TIPC_ERR_NO_PORT; } else if (sock->state == SS_CONNECTING) { if (!msg_connected(msg) && (msg_errcode(msg) == 0)) -- cgit v1.2.2 From 974a5a864bf959b7f3412a31ee8ce001c6628451 Mon Sep 17 00:00:00 2001 From: Allan Stephens Date: Wed, 18 Apr 2012 09:12:09 -0400 Subject: tipc: take lock while updating node network address The routine that changes the node's network address now takes TIPC's network lock in write mode while the main address variable and associated data structures are being changed; this is needed to ensure that the link subsystem won't attempt to send a message off-node until the sending port's message header template has been updated with the node's new network address. Signed-off-by: Allan Stephens Signed-off-by: Paul Gortmaker --- net/tipc/net.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'net') diff --git a/net/tipc/net.c b/net/tipc/net.c index d4531b07076c..5fab4ff24a94 100644 --- a/net/tipc/net.c +++ b/net/tipc/net.c @@ -178,11 +178,12 @@ int tipc_net_start(u32 addr) tipc_subscr_stop(); tipc_cfg_stop(); + write_lock_bh(&tipc_net_lock); tipc_own_addr = addr; tipc_named_reinit(); tipc_port_reinit(); - tipc_bclink_init(); + write_unlock_bh(&tipc_net_lock); tipc_k_signal((Handler)tipc_subscr_start, 0); tipc_k_signal((Handler)tipc_cfg_init, 0); -- cgit v1.2.2 From b8f683d126c1cb757e794d6d904cbe7cf5954797 Mon Sep 17 00:00:00 2001 From: Allan Stephens Date: Wed, 18 Apr 2012 09:22:56 -0400 Subject: tipc: properly handle off-node send requests with invalid addr There are two send routines that might conceivably be asked by an application to send a message off-node when the node is still using the default network address. These now have an added check that detects this and rejects the message gracefully. Signed-off-by: Allan Stephens Signed-off-by: Paul Gortmaker --- net/tipc/port.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) (limited to 'net') diff --git a/net/tipc/port.c b/net/tipc/port.c index 616c72fb9234..dc7f916b2e10 100644 --- a/net/tipc/port.c +++ b/net/tipc/port.c @@ -1270,10 +1270,14 @@ int tipc_send2name(u32 ref, struct tipc_name const *name, unsigned int domain, if (likely(destnode == tipc_own_addr)) res = tipc_port_recv_sections(p_ptr, num_sect, msg_sect, total_len); - else + else if (tipc_own_addr) res = tipc_link_send_sections_fast(p_ptr, msg_sect, num_sect, total_len, destnode); + else + res = tipc_port_reject_sections(p_ptr, msg, msg_sect, + num_sect, total_len, + TIPC_ERR_NO_NODE); if (likely(res != -ELINKCONG)) { if (res > 0) p_ptr->sent++; @@ -1314,9 +1318,12 @@ int tipc_send2port(u32 ref, struct tipc_portid const *dest, if (dest->node == tipc_own_addr) res = tipc_port_recv_sections(p_ptr, num_sect, msg_sect, total_len); - else + else if (tipc_own_addr) res = tipc_link_send_sections_fast(p_ptr, msg_sect, num_sect, total_len, dest->node); + else + res = tipc_port_reject_sections(p_ptr, msg, msg_sect, num_sect, + total_len, TIPC_ERR_NO_NODE); if (likely(res != -ELINKCONG)) { if (res > 0) p_ptr->sent++; -- cgit v1.2.2 From 8a55fe74b1a767cb00d6248a847068c9d886d710 Mon Sep 17 00:00:00 2001 From: Allan Stephens Date: Wed, 18 Apr 2012 09:27:22 -0400 Subject: tipc: handle <0.0.0> as an alias for this node on outgoing msgs Revises handling of send routines for payload messages to ensure that they are processed properly even if the node's network address is changed in mid-operation. The routines now treat the default node address of <0.0.0> as an alias for "this node" when determining where to send an outgoing message. Signed-off-by: Allan Stephens Signed-off-by: Paul Gortmaker --- net/tipc/port.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'net') diff --git a/net/tipc/port.c b/net/tipc/port.c index dc7f916b2e10..c50819bc109a 100644 --- a/net/tipc/port.c +++ b/net/tipc/port.c @@ -1217,7 +1217,7 @@ int tipc_send(u32 ref, unsigned int num_sect, struct iovec const *msg_sect, p_ptr->congested = 1; if (!tipc_port_congested(p_ptr)) { destnode = port_peernode(p_ptr); - if (likely(destnode != tipc_own_addr)) + if (likely(!in_own_node(destnode))) res = tipc_link_send_sections_fast(p_ptr, msg_sect, num_sect, total_len, destnode); else @@ -1267,7 +1267,7 @@ int tipc_send2name(u32 ref, struct tipc_name const *name, unsigned int domain, msg_set_destport(msg, destport); if (likely(destport || destnode)) { - if (likely(destnode == tipc_own_addr)) + if (likely(in_own_node(destnode))) res = tipc_port_recv_sections(p_ptr, num_sect, msg_sect, total_len); else if (tipc_own_addr) @@ -1315,7 +1315,7 @@ int tipc_send2port(u32 ref, struct tipc_portid const *dest, msg_set_destport(msg, dest->ref); msg_set_hdr_sz(msg, BASIC_H_SIZE); - if (dest->node == tipc_own_addr) + if (in_own_node(dest->node)) res = tipc_port_recv_sections(p_ptr, num_sect, msg_sect, total_len); else if (tipc_own_addr) @@ -1362,7 +1362,7 @@ int tipc_send_buf2port(u32 ref, struct tipc_portid const *dest, skb_push(buf, BASIC_H_SIZE); skb_copy_to_linear_data(buf, msg, BASIC_H_SIZE); - if (dest->node == tipc_own_addr) + if (in_own_node(dest->node)) res = tipc_port_recv_msg(buf); else res = tipc_send_buf_fast(buf, dest->node); -- cgit v1.2.2 From 630d920dcae546c4e8ef6c01e7c49b2f42822c5f Mon Sep 17 00:00:00 2001 From: Allan Stephens Date: Wed, 18 Apr 2012 09:42:29 -0400 Subject: tipc: Ensure network address change doesn't impact rejected message Revises handling of a rejected message to ensure that a locally originated message is returned properly even if the node's network address is changed in mid-operation. The routine now treats the default node address of <0.0.0> as an alias for "this node" when determining where to send a returned message. Signed-off-by: Allan Stephens Signed-off-by: Paul Gortmaker --- net/tipc/port.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/tipc/port.c b/net/tipc/port.c index c50819bc109a..0f40b1055306 100644 --- a/net/tipc/port.c +++ b/net/tipc/port.c @@ -444,7 +444,7 @@ int tipc_reject_msg(struct sk_buff *buf, u32 err) /* send returned message & dispose of rejected message */ src_node = msg_prevnode(msg); - if (src_node == tipc_own_addr) + if (in_own_node(src_node)) tipc_port_recv_msg(rbuf); else tipc_link_send(rbuf, src_node, msg_link_selector(rmsg)); -- cgit v1.2.2 From 9d52ce4bd3fa9e0cf1658791f2c680e20e0598a1 Mon Sep 17 00:00:00 2001 From: Allan Stephens Date: Wed, 18 Apr 2012 09:42:56 -0400 Subject: tipc: Ensure network address change doesn't impact configuration service Enhances command validation done by TIPC's configuration service so that it works properly even if the node's network address is changed in mid-operation. The default node address of <0.0.0> is now recognized as an alias for "this node" even after a new network address has been assigned. Signed-off-by: Allan Stephens Signed-off-by: Paul Gortmaker --- net/tipc/config.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/tipc/config.c b/net/tipc/config.c index f76d3b15e4e2..f5458eddd7bc 100644 --- a/net/tipc/config.c +++ b/net/tipc/config.c @@ -290,7 +290,7 @@ struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area /* Check command authorization */ - if (likely(orig_node == tipc_own_addr)) { + if (likely(in_own_node(orig_node))) { /* command is permitted */ } else if (cmd >= 0x8000) { rep_tlv_buf = tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED -- cgit v1.2.2 From ab41a2ca50d27ee2dc8b9eef07aeb251168271be Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Thu, 19 Apr 2012 13:18:47 +0000 Subject: net: Implement register_net_sysctl. Right now all of the networking sysctl registrations are running in a compatibiity mode. The natvie sysctl registration api takes a cstring for a path and a simple ctl_table. Implement register_net_sysctl so that we can register network sysctls without needing to use compatiblity code in the sysctl core. Switching from a ctl_path to a cstring results in less boiler plate and denser code that is a little easier to read. I would simply have changed the arguments to register_net_sysctl_table instead of keeping two functions in parallel but gcc will allow a ctl_path pointer to be passed to a char * pointer with only issuing a warning resulting in completely incorrect code can be built. Since I have to change the function name I am taking advantage of the situation to let both register_net_sysctl and register_net_sysctl_table live for a short time in parallel which makes clean conversion patches a bit easier to read and write. Signed-off-by: Eric W. Biederman Acked-by: Pavel Emelyanov Signed-off-by: David S. Miller --- net/sysctl_net.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'net') diff --git a/net/sysctl_net.c b/net/sysctl_net.c index c3e65aebecc0..3865c4f76b82 100644 --- a/net/sysctl_net.c +++ b/net/sysctl_net.c @@ -117,6 +117,13 @@ struct ctl_table_header *register_net_sysctl_rotable(const } EXPORT_SYMBOL_GPL(register_net_sysctl_rotable); +struct ctl_table_header *register_net_sysctl(struct net *net, + const char *path, struct ctl_table *table) +{ + return __register_sysctl_table(&net->sysctls, path, table); +} +EXPORT_SYMBOL_GPL(register_net_sysctl); + void unregister_net_sysctl_table(struct ctl_table_header *header) { unregister_sysctl_table(header); -- cgit v1.2.2 From bc8a36942a5c05896d373a8e98d5e6b14514b4bc Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Thu, 19 Apr 2012 13:19:46 +0000 Subject: net sysctl: Register an empty /proc/sys/net Implementation limitations of the sysctl core won't let /proc/sys/net reside in a network namespace. /proc/sys/net at least must be registered as a normal sysctl. So register /proc/sys/net early as an empty directory to guarantee we don't violate this constraint and hit bugs in the sysctl implementation. Signed-off-by: Eric W. Biederman Acked-by: Pavel Emelyanov Signed-off-by: David S. Miller --- net/sysctl_net.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) (limited to 'net') diff --git a/net/sysctl_net.c b/net/sysctl_net.c index 3865c4f76b82..2b2986dd04ae 100644 --- a/net/sysctl_net.c +++ b/net/sysctl_net.c @@ -88,9 +88,18 @@ static struct pernet_operations sysctl_pernet_ops = { .exit = sysctl_net_exit, }; +static struct ctl_table_header *net_header; static __init int net_sysctl_init(void) { - int ret; + static struct ctl_table empty[1]; + int ret = -ENOMEM; + /* Avoid limitations in the sysctl implementation by + * registering "/proc/sys/net" as an empty directory not in a + * network namespace. + */ + net_header = register_sysctl("net", empty); + if (!net_header) + goto out; ret = register_pernet_subsys(&sysctl_pernet_ops); if (ret) goto out; -- cgit v1.2.2 From 2ca794e5e86c800d7f98c4ebb8bd325099c0afe8 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Thu, 19 Apr 2012 13:20:32 +0000 Subject: net sysctl: Initialize the network sysctls sooner to avoid problems. If the netfilter code is modified to use register_net_sysctl_table the kernel fails to boot because the per net sysctl infrasturce is not setup soon enough. So to avoid races call net_sysctl_init from sock_init(). Signed-off-by: Eric W. Biederman Acked-by: Pavel Emelyanov Signed-off-by: David S. Miller --- net/socket.c | 6 ++++++ net/sysctl_net.c | 3 +-- 2 files changed, 7 insertions(+), 2 deletions(-) (limited to 'net') diff --git a/net/socket.c b/net/socket.c index d6c1af9ae7b2..e34510928dcc 100644 --- a/net/socket.c +++ b/net/socket.c @@ -2524,6 +2524,12 @@ EXPORT_SYMBOL(sock_unregister); static int __init sock_init(void) { int err; + /* + * Initialize the network sysctl infrastructure. + */ + err = net_sysctl_init(); + if (err) + goto out; /* * Initialize sock SLAB cache. diff --git a/net/sysctl_net.c b/net/sysctl_net.c index 2b2986dd04ae..ce97237b653f 100644 --- a/net/sysctl_net.c +++ b/net/sysctl_net.c @@ -89,7 +89,7 @@ static struct pernet_operations sysctl_pernet_ops = { }; static struct ctl_table_header *net_header; -static __init int net_sysctl_init(void) +__init int net_sysctl_init(void) { static struct ctl_table empty[1]; int ret = -ENOMEM; @@ -109,7 +109,6 @@ static __init int net_sysctl_init(void) out: return ret; } -subsys_initcall(net_sysctl_init); struct ctl_table_header *register_net_sysctl_table(struct net *net, const struct ctl_path *path, struct ctl_table *table) -- cgit v1.2.2 From 4344475797a16ef948385780943f7a5cf09f0675 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Thu, 19 Apr 2012 13:22:55 +0000 Subject: net: Kill register_sysctl_rotable register_sysctl_rotable never caught on as an interesting way to register sysctls. My take on the situation is that what we want are sysctls that we can only see in the initial network namespace. What we have implemented with register_sysctl_rotable are sysctls that we can see in all of the network namespaces and can only change in the initial network namespace. That is a very silly way to go. Just register the network sysctls in the initial network namespace and we don't have any weird special cases to deal with. The sysctls affected are: /proc/sys/net/ipv4/ipfrag_secret_interval /proc/sys/net/ipv4/ipfrag_max_dist /proc/sys/net/ipv6/ip6frag_secret_interval /proc/sys/net/ipv6/mld_max_msf I really don't expect anyone will miss them if they can't read them in a child user namespace. CC: Pavel Emelyanov Signed-off-by: Eric W. Biederman Acked-by: Pavel Emelyanov Signed-off-by: David S. Miller --- net/core/sysctl_net_core.c | 2 +- net/ipv4/ip_fragment.c | 2 +- net/ipv6/reassembly.c | 2 +- net/ipv6/sysctl_net_ipv6.c | 2 +- net/sysctl_net.c | 23 ----------------------- 5 files changed, 4 insertions(+), 27 deletions(-) (limited to 'net') diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c index 247c69b7cfc2..8f67633b484e 100644 --- a/net/core/sysctl_net_core.c +++ b/net/core/sysctl_net_core.c @@ -258,7 +258,7 @@ static __init int sysctl_core_init(void) static struct ctl_table empty[1]; kmemleak_not_leak(register_sysctl_paths(net_core_path, empty)); - register_net_sysctl_rotable(net_core_path, net_core_table); + register_net_sysctl(&init_net, "net/core", net_core_table); return register_pernet_subsys(&sysctl_core_ops); } diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index 631f596d5d70..6a2f85cd440e 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c @@ -807,7 +807,7 @@ static void __net_exit ip4_frags_ns_ctl_unregister(struct net *net) static void ip4_frags_ctl_register(void) { - register_net_sysctl_rotable(net_ipv4_ctl_path, ip4_frags_ctl_table); + register_net_sysctl(&init_net, "net/ipv4", ip4_frags_ctl_table); } #else static inline int ip4_frags_ns_ctl_register(struct net *net) diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c index 9447bd69873a..42f4f7c0948a 100644 --- a/net/ipv6/reassembly.c +++ b/net/ipv6/reassembly.c @@ -674,7 +674,7 @@ static struct ctl_table_header *ip6_ctl_header; static int ip6_frags_sysctl_register(void) { - ip6_ctl_header = register_net_sysctl_rotable(net_ipv6_ctl_path, + ip6_ctl_header = register_net_sysctl(&init_net, "net/ipv6", ip6_frags_ctl_table); return ip6_ctl_header == NULL ? -ENOMEM : 0; } diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c index 166a57c47d39..06f21e5ad361 100644 --- a/net/ipv6/sysctl_net_ipv6.c +++ b/net/ipv6/sysctl_net_ipv6.c @@ -140,7 +140,7 @@ int ipv6_sysctl_register(void) { int err = -ENOMEM; - ip6_header = register_net_sysctl_rotable(net_ipv6_ctl_path, ipv6_rotable); + ip6_header = register_net_sysctl(&init_net, "net/ipv6", ipv6_rotable); if (ip6_header == NULL) goto out; diff --git a/net/sysctl_net.c b/net/sysctl_net.c index ce97237b653f..2b8d1d950987 100644 --- a/net/sysctl_net.c +++ b/net/sysctl_net.c @@ -59,19 +59,6 @@ static struct ctl_table_root net_sysctl_root = { .permissions = net_ctl_permissions, }; -static int net_ctl_ro_header_perms(struct ctl_table_root *root, - struct nsproxy *namespaces, struct ctl_table *table) -{ - if (net_eq(namespaces->net_ns, &init_net)) - return table->mode; - else - return table->mode & ~0222; -} - -static struct ctl_table_root net_sysctl_ro_root = { - .permissions = net_ctl_ro_header_perms, -}; - static int __net_init sysctl_net_init(struct net *net) { setup_sysctl_set(&net->sysctls, &net_sysctl_root, is_seen); @@ -103,8 +90,6 @@ __init int net_sysctl_init(void) ret = register_pernet_subsys(&sysctl_pernet_ops); if (ret) goto out; - setup_sysctl_set(&net_sysctl_ro_root.default_set, &net_sysctl_ro_root, NULL); - register_sysctl_root(&net_sysctl_ro_root); register_sysctl_root(&net_sysctl_root); out: return ret; @@ -117,14 +102,6 @@ struct ctl_table_header *register_net_sysctl_table(struct net *net, } EXPORT_SYMBOL_GPL(register_net_sysctl_table); -struct ctl_table_header *register_net_sysctl_rotable(const - struct ctl_path *path, struct ctl_table *table) -{ - return __register_sysctl_paths(&net_sysctl_ro_root.default_set, - path, table); -} -EXPORT_SYMBOL_GPL(register_net_sysctl_rotable); - struct ctl_table_header *register_net_sysctl(struct net *net, const char *path, struct ctl_table *table) { -- cgit v1.2.2 From 5dd3df105b9f6cb7dd2472b59e028d0d1c878ecb Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Thu, 19 Apr 2012 13:24:33 +0000 Subject: net: Move all of the network sysctls without a namespace into init_net. This makes it clearer which sysctls are relative to your current network namespace. This makes it a little less error prone by not exposing sysctls for the initial network namespace in other namespaces. This is the same way we handle all of our other network interfaces to userspace and I can't honestly remember why we didn't do this for sysctls right from the start. Signed-off-by: Eric W. Biederman Acked-by: Pavel Emelyanov Signed-off-by: David S. Miller --- net/802/tr.c | 2 +- net/appletalk/sysctl_net_atalk.c | 4 ++-- net/ax25/sysctl_net_ax25.c | 4 ++-- net/bridge/br_netfilter.c | 4 ++-- net/core/neighbour.c | 2 +- net/core/sysctl_net_core.c | 2 +- net/dccp/sysctl.c | 4 ++-- net/decnet/dn_dev.c | 4 ++-- net/decnet/sysctl_net_decnet.c | 4 ++-- net/ipv4/netfilter/ip_queue.c | 6 +++--- net/ipv4/route.c | 2 +- net/ipv4/sysctl_net_ipv4.c | 4 ++-- net/ipv6/netfilter/ip6_queue.c | 6 +++--- net/ipv6/netfilter/nf_conntrack_reasm.c | 4 ++-- net/ipv6/sysctl_net_ipv6.c | 2 +- net/ipx/sysctl_net_ipx.c | 5 +++-- net/irda/irsysctl.c | 4 ++-- net/llc/sysctl_net_llc.c | 5 +++-- net/netfilter/nf_conntrack_proto.c | 4 ++-- net/netfilter/nf_conntrack_standalone.c | 6 +++--- net/netfilter/nf_log.c | 2 +- net/netrom/sysctl_net_netrom.c | 4 ++-- net/phonet/sysctl.c | 4 ++-- net/rds/ib_sysctl.c | 4 ++-- net/rds/iw_sysctl.c | 4 ++-- net/rds/sysctl.c | 4 ++-- net/rose/sysctl_net_rose.c | 4 ++-- net/sctp/sysctl.c | 4 ++-- net/unix/sysctl_net_unix.c | 2 +- net/x25/sysctl_net_x25.c | 4 ++-- 30 files changed, 58 insertions(+), 56 deletions(-) (limited to 'net') diff --git a/net/802/tr.c b/net/802/tr.c index e65f0b8de0c4..103e0201b0ab 100644 --- a/net/802/tr.c +++ b/net/802/tr.c @@ -662,7 +662,7 @@ static int __init rif_init(void) setup_timer(&rif_timer, rif_check_expire, 0); add_timer(&rif_timer); #ifdef CONFIG_SYSCTL - register_sysctl_paths(tr_path, tr_table); + register_net_sysctl_table(&init_net, tr_path, tr_table); #endif proc_net_fops_create(&init_net, "tr_rif", S_IRUGO, &rif_seq_fops); return 0; diff --git a/net/appletalk/sysctl_net_atalk.c b/net/appletalk/sysctl_net_atalk.c index 04e9c0da7aa9..5edce8f70cb7 100644 --- a/net/appletalk/sysctl_net_atalk.c +++ b/net/appletalk/sysctl_net_atalk.c @@ -52,10 +52,10 @@ static struct ctl_table_header *atalk_table_header; void atalk_register_sysctl(void) { - atalk_table_header = register_sysctl_paths(atalk_path, atalk_table); + atalk_table_header = register_net_sysctl_table(&init_net, atalk_path, atalk_table); } void atalk_unregister_sysctl(void) { - unregister_sysctl_table(atalk_table_header); + unregister_net_sysctl_table(atalk_table_header); } diff --git a/net/ax25/sysctl_net_ax25.c b/net/ax25/sysctl_net_ax25.c index ebe0ef3f1d83..7ba381b6f074 100644 --- a/net/ax25/sysctl_net_ax25.c +++ b/net/ax25/sysctl_net_ax25.c @@ -196,13 +196,13 @@ void ax25_register_sysctl(void) } spin_unlock_bh(&ax25_dev_lock); - ax25_table_header = register_sysctl_paths(ax25_path, ax25_table); + ax25_table_header = register_net_sysctl_table(&init_net, ax25_path, ax25_table); } void ax25_unregister_sysctl(void) { ctl_table *p; - unregister_sysctl_table(ax25_table_header); + unregister_net_sysctl_table(ax25_table_header); for (p = ax25_table; p->procname; p++) kfree(p->child); diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c index dec4f3817133..4f4c4a619f68 100644 --- a/net/bridge/br_netfilter.c +++ b/net/bridge/br_netfilter.c @@ -1030,7 +1030,7 @@ int __init br_netfilter_init(void) return ret; } #ifdef CONFIG_SYSCTL - brnf_sysctl_header = register_sysctl_paths(brnf_path, brnf_table); + brnf_sysctl_header = register_net_sysctl_table(&init_net, brnf_path, brnf_table); if (brnf_sysctl_header == NULL) { printk(KERN_WARNING "br_netfilter: can't register to sysctl.\n"); @@ -1047,7 +1047,7 @@ void br_netfilter_fini(void) { nf_unregister_hooks(br_nf_ops, ARRAY_SIZE(br_nf_ops)); #ifdef CONFIG_SYSCTL - unregister_sysctl_table(brnf_sysctl_header); + unregister_net_sysctl_table(brnf_sysctl_header); #endif dst_entries_destroy(&fake_dst_ops); } diff --git a/net/core/neighbour.c b/net/core/neighbour.c index 434eed8c6185..0c2df3d3cfbf 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -3017,7 +3017,7 @@ void neigh_sysctl_unregister(struct neigh_parms *p) if (p->sysctl_table) { struct neigh_sysctl_table *t = p->sysctl_table; p->sysctl_table = NULL; - unregister_sysctl_table(t->sysctl_header); + unregister_net_sysctl_table(t->sysctl_header); kfree(t->dev_name); kfree(t); } diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c index 8f67633b484e..7d3772e0d150 100644 --- a/net/core/sysctl_net_core.c +++ b/net/core/sysctl_net_core.c @@ -257,7 +257,7 @@ static __init int sysctl_core_init(void) { static struct ctl_table empty[1]; - kmemleak_not_leak(register_sysctl_paths(net_core_path, empty)); + kmemleak_not_leak(register_net_sysctl_table(&init_net, net_core_path, empty)); register_net_sysctl(&init_net, "net/core", net_core_table); return register_pernet_subsys(&sysctl_core_ops); } diff --git a/net/dccp/sysctl.c b/net/dccp/sysctl.c index 42348824ee31..329e1390c26d 100644 --- a/net/dccp/sysctl.c +++ b/net/dccp/sysctl.c @@ -109,7 +109,7 @@ static struct ctl_table_header *dccp_table_header; int __init dccp_sysctl_init(void) { - dccp_table_header = register_sysctl_paths(dccp_path, + dccp_table_header = register_net_sysctl_table(&init_net, dccp_path, dccp_default_table); return dccp_table_header != NULL ? 0 : -ENOMEM; @@ -118,7 +118,7 @@ int __init dccp_sysctl_init(void) void dccp_sysctl_exit(void) { if (dccp_table_header != NULL) { - unregister_sysctl_table(dccp_table_header); + unregister_net_sysctl_table(dccp_table_header); dccp_table_header = NULL; } } diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c index a4aecb09d12a..ce8a18471845 100644 --- a/net/decnet/dn_dev.c +++ b/net/decnet/dn_dev.c @@ -236,7 +236,7 @@ static void dn_dev_sysctl_register(struct net_device *dev, struct dn_dev_parms * t->dn_dev_vars[0].extra1 = (void *)dev; - t->sysctl_header = register_sysctl_paths(dn_ctl_path, t->dn_dev_vars); + t->sysctl_header = register_net_sysctl_table(&init_net, dn_ctl_path, t->dn_dev_vars); if (t->sysctl_header == NULL) kfree(t); else @@ -248,7 +248,7 @@ static void dn_dev_sysctl_unregister(struct dn_dev_parms *parms) if (parms->sysctl) { struct dn_dev_sysctl_table *t = parms->sysctl; parms->sysctl = NULL; - unregister_sysctl_table(t->sysctl_header); + unregister_net_sysctl_table(t->sysctl_header); kfree(t); } } diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c index 02e75d11cfbb..4380b8edea72 100644 --- a/net/decnet/sysctl_net_decnet.c +++ b/net/decnet/sysctl_net_decnet.c @@ -359,12 +359,12 @@ static struct ctl_path dn_path[] = { void dn_register_sysctl(void) { - dn_table_header = register_sysctl_paths(dn_path, dn_table); + dn_table_header = register_net_sysctl_table(&init_net, dn_path, dn_table); } void dn_unregister_sysctl(void) { - unregister_sysctl_table(dn_table_header); + unregister_net_sysctl_table(dn_table_header); } #else /* CONFIG_SYSCTL */ diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c index 94d45e1f8882..766485d7d099 100644 --- a/net/ipv4/netfilter/ip_queue.c +++ b/net/ipv4/netfilter/ip_queue.c @@ -586,7 +586,7 @@ static int __init ip_queue_init(void) #endif register_netdevice_notifier(&ipq_dev_notifier); #ifdef CONFIG_SYSCTL - ipq_sysctl_header = register_sysctl_paths(net_ipv4_ctl_path, ipq_table); + ipq_sysctl_header = register_net_sysctl_table(&init_net, net_ipv4_ctl_path, ipq_table); #endif status = nf_register_queue_handler(NFPROTO_IPV4, &nfqh); if (status < 0) { @@ -597,7 +597,7 @@ static int __init ip_queue_init(void) cleanup_sysctl: #ifdef CONFIG_SYSCTL - unregister_sysctl_table(ipq_sysctl_header); + unregister_net_sysctl_table(ipq_sysctl_header); #endif unregister_netdevice_notifier(&ipq_dev_notifier); proc_net_remove(&init_net, IPQ_PROC_FS_NAME); @@ -618,7 +618,7 @@ static void __exit ip_queue_fini(void) ipq_flush(NULL, 0); #ifdef CONFIG_SYSCTL - unregister_sysctl_table(ipq_sysctl_header); + unregister_net_sysctl_table(ipq_sysctl_header); #endif unregister_netdevice_notifier(&ipq_dev_notifier); proc_net_remove(&init_net, IPQ_PROC_FS_NAME); diff --git a/net/ipv4/route.c b/net/ipv4/route.c index a1c115d750e9..86866a4b537f 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -3515,6 +3515,6 @@ int __init ip_rt_init(void) */ void __init ip_static_sysctl_init(void) { - kmemleak_not_leak(register_sysctl_paths(ipv4_path, ipv4_skeleton)); + kmemleak_not_leak(register_net_sysctl_table(&init_net, ipv4_path, ipv4_skeleton)); } #endif diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index 34a628625d9c..e7a6fa3d70bb 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c @@ -857,12 +857,12 @@ static __init int sysctl_ipv4_init(void) if (!i->procname) return -EINVAL; - hdr = register_sysctl_paths(net_ipv4_ctl_path, ipv4_table); + hdr = register_net_sysctl_table(&init_net, net_ipv4_ctl_path, ipv4_table); if (hdr == NULL) return -ENOMEM; if (register_pernet_subsys(&ipv4_sysctl_ops)) { - unregister_sysctl_table(hdr); + unregister_net_sysctl_table(hdr); return -ENOMEM; } diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c index a34c9e4c792c..6785f5044acf 100644 --- a/net/ipv6/netfilter/ip6_queue.c +++ b/net/ipv6/netfilter/ip6_queue.c @@ -588,7 +588,7 @@ static int __init ip6_queue_init(void) #endif register_netdevice_notifier(&ipq_dev_notifier); #ifdef CONFIG_SYSCTL - ipq_sysctl_header = register_sysctl_paths(net_ipv6_ctl_path, ipq_table); + ipq_sysctl_header = register_net_sysctl_table(&init_net, net_ipv6_ctl_path, ipq_table); #endif status = nf_register_queue_handler(NFPROTO_IPV6, &nfqh); if (status < 0) { @@ -599,7 +599,7 @@ static int __init ip6_queue_init(void) cleanup_sysctl: #ifdef CONFIG_SYSCTL - unregister_sysctl_table(ipq_sysctl_header); + unregister_net_sysctl_table(ipq_sysctl_header); #endif unregister_netdevice_notifier(&ipq_dev_notifier); proc_net_remove(&init_net, IPQ_PROC_FS_NAME); @@ -621,7 +621,7 @@ static void __exit ip6_queue_fini(void) ipq_flush(NULL, 0); #ifdef CONFIG_SYSCTL - unregister_sysctl_table(ipq_sysctl_header); + unregister_net_sysctl_table(ipq_sysctl_header); #endif unregister_netdevice_notifier(&ipq_dev_notifier); proc_net_remove(&init_net, IPQ_PROC_FS_NAME); diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c index 38f00b0298d3..754814462950 100644 --- a/net/ipv6/netfilter/nf_conntrack_reasm.c +++ b/net/ipv6/netfilter/nf_conntrack_reasm.c @@ -626,7 +626,7 @@ int nf_ct_frag6_init(void) inet_frags_init(&nf_frags); #ifdef CONFIG_SYSCTL - nf_ct_frag6_sysctl_header = register_sysctl_paths(nf_net_netfilter_sysctl_path, + nf_ct_frag6_sysctl_header = register_net_sysctl_table(&init_net, nf_net_netfilter_sysctl_path, nf_ct_frag6_sysctl_table); if (!nf_ct_frag6_sysctl_header) { inet_frags_fini(&nf_frags); @@ -640,7 +640,7 @@ int nf_ct_frag6_init(void) void nf_ct_frag6_cleanup(void) { #ifdef CONFIG_SYSCTL - unregister_sysctl_table(nf_ct_frag6_sysctl_header); + unregister_net_sysctl_table(nf_ct_frag6_sysctl_header); nf_ct_frag6_sysctl_header = NULL; #endif inet_frags_fini(&nf_frags); diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c index 06f21e5ad361..99279c8aaf29 100644 --- a/net/ipv6/sysctl_net_ipv6.c +++ b/net/ipv6/sysctl_net_ipv6.c @@ -165,7 +165,7 @@ static struct ctl_table_header *ip6_base; int ipv6_static_sysctl_register(void) { - ip6_base = register_sysctl_paths(net_ipv6_ctl_path, ipv6_static_skeleton); + ip6_base = register_net_sysctl_table(&init_net, net_ipv6_ctl_path, ipv6_static_skeleton); if (ip6_base == NULL) return -ENOMEM; return 0; diff --git a/net/ipx/sysctl_net_ipx.c b/net/ipx/sysctl_net_ipx.c index bd6dca00fb85..035880709e84 100644 --- a/net/ipx/sysctl_net_ipx.c +++ b/net/ipx/sysctl_net_ipx.c @@ -8,6 +8,7 @@ #include #include +#include #ifndef CONFIG_SYSCTL #error This file should not be compiled without CONFIG_SYSCTL defined @@ -37,10 +38,10 @@ static struct ctl_table_header *ipx_table_header; void ipx_register_sysctl(void) { - ipx_table_header = register_sysctl_paths(ipx_path, ipx_table); + ipx_table_header = register_net_sysctl_table(&init_net, ipx_path, ipx_table); } void ipx_unregister_sysctl(void) { - unregister_sysctl_table(ipx_table_header); + unregister_net_sysctl_table(ipx_table_header); } diff --git a/net/irda/irsysctl.c b/net/irda/irsysctl.c index 2615ffc8e785..20ced38fc371 100644 --- a/net/irda/irsysctl.c +++ b/net/irda/irsysctl.c @@ -251,7 +251,7 @@ static struct ctl_table_header *irda_table_header; */ int __init irda_sysctl_register(void) { - irda_table_header = register_sysctl_paths(irda_path, irda_table); + irda_table_header = register_net_sysctl_table(&init_net, irda_path, irda_table); if (!irda_table_header) return -ENOMEM; @@ -266,7 +266,7 @@ int __init irda_sysctl_register(void) */ void irda_sysctl_unregister(void) { - unregister_sysctl_table(irda_table_header); + unregister_net_sysctl_table(irda_table_header); } diff --git a/net/llc/sysctl_net_llc.c b/net/llc/sysctl_net_llc.c index e2ebe3586263..9a6a65f2104b 100644 --- a/net/llc/sysctl_net_llc.c +++ b/net/llc/sysctl_net_llc.c @@ -7,6 +7,7 @@ #include #include #include +#include #include #ifndef CONFIG_SYSCTL @@ -89,7 +90,7 @@ static struct ctl_table_header *llc_table_header; int __init llc_sysctl_init(void) { - llc_table_header = register_sysctl_paths(llc_path, llc_table); + llc_table_header = register_net_sysctl_table(&init_net, llc_path, llc_table); return llc_table_header ? 0 : -ENOMEM; } @@ -97,7 +98,7 @@ int __init llc_sysctl_init(void) void llc_sysctl_exit(void) { if (llc_table_header) { - unregister_sysctl_table(llc_table_header); + unregister_net_sysctl_table(llc_table_header); llc_table_header = NULL; } } diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c index be3da2c8cdc5..bbc753fd4fe0 100644 --- a/net/netfilter/nf_conntrack_proto.c +++ b/net/netfilter/nf_conntrack_proto.c @@ -40,7 +40,7 @@ nf_ct_register_sysctl(struct ctl_table_header **header, struct ctl_path *path, struct ctl_table *table, unsigned int *users) { if (*header == NULL) { - *header = register_sysctl_paths(path, table); + *header = register_net_sysctl_table(&init_net, path, table); if (*header == NULL) return -ENOMEM; } @@ -56,7 +56,7 @@ nf_ct_unregister_sysctl(struct ctl_table_header **header, if (users != NULL && --*users > 0) return; - unregister_sysctl_table(*header); + unregister_net_sysctl_table(*header); *header = NULL; } #endif diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c index 885f5ab9bc28..0c3888de0f55 100644 --- a/net/netfilter/nf_conntrack_standalone.c +++ b/net/netfilter/nf_conntrack_standalone.c @@ -479,7 +479,7 @@ static int nf_conntrack_standalone_init_sysctl(struct net *net) if (net_eq(net, &init_net)) { nf_ct_netfilter_header = - register_sysctl_paths(nf_ct_path, nf_ct_netfilter_table); + register_net_sysctl_table(&init_net, nf_ct_path, nf_ct_netfilter_table); if (!nf_ct_netfilter_header) goto out; } @@ -505,7 +505,7 @@ out_unregister_netfilter: kfree(table); out_kmemdup: if (net_eq(net, &init_net)) - unregister_sysctl_table(nf_ct_netfilter_header); + unregister_net_sysctl_table(nf_ct_netfilter_header); out: printk(KERN_ERR "nf_conntrack: can't register to sysctl.\n"); return -ENOMEM; @@ -516,7 +516,7 @@ static void nf_conntrack_standalone_fini_sysctl(struct net *net) struct ctl_table *table; if (net_eq(net, &init_net)) - unregister_sysctl_table(nf_ct_netfilter_header); + unregister_net_sysctl_table(nf_ct_netfilter_header); table = net->ct.sysctl_header->ctl_table_arg; unregister_net_sysctl_table(net->ct.sysctl_header); kfree(table); diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c index 957374a234d4..04fca48d901a 100644 --- a/net/netfilter/nf_log.c +++ b/net/netfilter/nf_log.c @@ -283,7 +283,7 @@ static __init int netfilter_log_sysctl_init(void) nf_log_sysctl_table[i].extra1 = (void *)(unsigned long) i; } - nf_log_dir_header = register_sysctl_paths(nf_log_sysctl_path, + nf_log_dir_header = register_net_sysctl_table(&init_net, nf_log_sysctl_path, nf_log_sysctl_table); if (!nf_log_dir_header) return -ENOMEM; diff --git a/net/netrom/sysctl_net_netrom.c b/net/netrom/sysctl_net_netrom.c index 1e0fa9e57aac..4ed149e265bf 100644 --- a/net/netrom/sysctl_net_netrom.c +++ b/net/netrom/sysctl_net_netrom.c @@ -154,10 +154,10 @@ static struct ctl_path nr_path[] = { void __init nr_register_sysctl(void) { - nr_table_header = register_sysctl_paths(nr_path, nr_table); + nr_table_header = register_net_sysctl_table(&init_net, nr_path, nr_table); } void nr_unregister_sysctl(void) { - unregister_sysctl_table(nr_table_header); + unregister_net_sysctl_table(nr_table_header); } diff --git a/net/phonet/sysctl.c b/net/phonet/sysctl.c index 8bed7675b3f4..aa55db5f383b 100644 --- a/net/phonet/sysctl.c +++ b/net/phonet/sysctl.c @@ -106,11 +106,11 @@ static struct ctl_path phonet_ctl_path[] = { int __init phonet_sysctl_init(void) { - phonet_table_hrd = register_sysctl_paths(phonet_ctl_path, phonet_table); + phonet_table_hrd = register_net_sysctl_table(&init_net, phonet_ctl_path, phonet_table); return phonet_table_hrd == NULL ? -ENOMEM : 0; } void phonet_sysctl_exit(void) { - unregister_sysctl_table(phonet_table_hrd); + unregister_net_sysctl_table(phonet_table_hrd); } diff --git a/net/rds/ib_sysctl.c b/net/rds/ib_sysctl.c index 1253b006efdb..0fef3e15777b 100644 --- a/net/rds/ib_sysctl.c +++ b/net/rds/ib_sysctl.c @@ -116,12 +116,12 @@ static struct ctl_path rds_ib_sysctl_path[] = { void rds_ib_sysctl_exit(void) { if (rds_ib_sysctl_hdr) - unregister_sysctl_table(rds_ib_sysctl_hdr); + unregister_net_sysctl_table(rds_ib_sysctl_hdr); } int rds_ib_sysctl_init(void) { - rds_ib_sysctl_hdr = register_sysctl_paths(rds_ib_sysctl_path, rds_ib_sysctl_table); + rds_ib_sysctl_hdr = register_net_sysctl_table(&init_net, rds_ib_sysctl_path, rds_ib_sysctl_table); if (!rds_ib_sysctl_hdr) return -ENOMEM; return 0; diff --git a/net/rds/iw_sysctl.c b/net/rds/iw_sysctl.c index e2e47176e729..bcfe36dc55a7 100644 --- a/net/rds/iw_sysctl.c +++ b/net/rds/iw_sysctl.c @@ -119,12 +119,12 @@ static struct ctl_path rds_iw_sysctl_path[] = { void rds_iw_sysctl_exit(void) { if (rds_iw_sysctl_hdr) - unregister_sysctl_table(rds_iw_sysctl_hdr); + unregister_net_sysctl_table(rds_iw_sysctl_hdr); } int rds_iw_sysctl_init(void) { - rds_iw_sysctl_hdr = register_sysctl_paths(rds_iw_sysctl_path, rds_iw_sysctl_table); + rds_iw_sysctl_hdr = register_net_sysctl_table(&init_net, rds_iw_sysctl_path, rds_iw_sysctl_table); if (!rds_iw_sysctl_hdr) return -ENOMEM; return 0; diff --git a/net/rds/sysctl.c b/net/rds/sysctl.c index 25ad0c77a26c..30354b8cd584 100644 --- a/net/rds/sysctl.c +++ b/net/rds/sysctl.c @@ -102,7 +102,7 @@ static struct ctl_path rds_sysctl_path[] = { void rds_sysctl_exit(void) { if (rds_sysctl_reg_table) - unregister_sysctl_table(rds_sysctl_reg_table); + unregister_net_sysctl_table(rds_sysctl_reg_table); } int rds_sysctl_init(void) @@ -110,7 +110,7 @@ int rds_sysctl_init(void) rds_sysctl_reconnect_min = msecs_to_jiffies(1); rds_sysctl_reconnect_min_jiffies = rds_sysctl_reconnect_min; - rds_sysctl_reg_table = register_sysctl_paths(rds_sysctl_path, rds_sysctl_rds_table); + rds_sysctl_reg_table = register_net_sysctl_table(&init_net, rds_sysctl_path, rds_sysctl_rds_table); if (!rds_sysctl_reg_table) return -ENOMEM; return 0; diff --git a/net/rose/sysctl_net_rose.c b/net/rose/sysctl_net_rose.c index df6d9dac2186..02b73979535b 100644 --- a/net/rose/sysctl_net_rose.c +++ b/net/rose/sysctl_net_rose.c @@ -126,10 +126,10 @@ static struct ctl_path rose_path[] = { void __init rose_register_sysctl(void) { - rose_table_header = register_sysctl_paths(rose_path, rose_table); + rose_table_header = register_net_sysctl_table(&init_net, rose_path, rose_table); } void rose_unregister_sysctl(void) { - unregister_sysctl_table(rose_table_header); + unregister_net_sysctl_table(rose_table_header); } diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c index 60ffbd067ff7..1e385b452047 100644 --- a/net/sctp/sysctl.c +++ b/net/sctp/sysctl.c @@ -286,11 +286,11 @@ static struct ctl_table_header * sctp_sysctl_header; /* Sysctl registration. */ void sctp_sysctl_register(void) { - sctp_sysctl_header = register_sysctl_paths(sctp_path, sctp_table); + sctp_sysctl_header = register_net_sysctl_table(&init_net, sctp_path, sctp_table); } /* Sysctl deregistration. */ void sctp_sysctl_unregister(void) { - unregister_sysctl_table(sctp_sysctl_header); + unregister_net_sysctl_table(sctp_sysctl_header); } diff --git a/net/unix/sysctl_net_unix.c b/net/unix/sysctl_net_unix.c index 397cffebb3b6..4f6979c06f84 100644 --- a/net/unix/sysctl_net_unix.c +++ b/net/unix/sysctl_net_unix.c @@ -58,6 +58,6 @@ void unix_sysctl_unregister(struct net *net) struct ctl_table *table; table = net->unx.ctl->ctl_table_arg; - unregister_sysctl_table(net->unx.ctl); + unregister_net_sysctl_table(net->unx.ctl); kfree(table); } diff --git a/net/x25/sysctl_net_x25.c b/net/x25/sysctl_net_x25.c index d2efd29f434e..08337cb488d4 100644 --- a/net/x25/sysctl_net_x25.c +++ b/net/x25/sysctl_net_x25.c @@ -81,10 +81,10 @@ static struct ctl_path x25_path[] = { void __init x25_register_sysctl(void) { - x25_table_header = register_sysctl_paths(x25_path, x25_table); + x25_table_header = register_net_sysctl_table(&init_net, x25_path, x25_table); } void x25_unregister_sysctl(void) { - unregister_sysctl_table(x25_table_header); + unregister_net_sysctl_table(x25_table_header); } -- cgit v1.2.2 From 45bad914987ed526d468247e0a5f5f7acb2066a1 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Thu, 19 Apr 2012 13:25:13 +0000 Subject: net core: Remove unneded creation of an empty net/core sysctl directory On the next line we register the net_core_table in net/core which creates the directory and ensures it exists. Signed-off-by: Eric W. Biederman Acked-by: Pavel Emelyanov Signed-off-by: David S. Miller --- net/core/sysctl_net_core.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'net') diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c index 7d3772e0d150..9fc2f9d666a9 100644 --- a/net/core/sysctl_net_core.c +++ b/net/core/sysctl_net_core.c @@ -255,9 +255,6 @@ static __net_initdata struct pernet_operations sysctl_core_ops = { static __init int sysctl_core_init(void) { - static struct ctl_table empty[1]; - - kmemleak_not_leak(register_net_sysctl_table(&init_net, net_core_path, empty)); register_net_sysctl(&init_net, "net/core", net_core_table); return register_pernet_subsys(&sysctl_core_ops); } -- cgit v1.2.2 From a5287acc6ca3c69821ab4c5439be8c0ff30a20cb Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Thu, 19 Apr 2012 13:26:19 +0000 Subject: net ipv6: Remove unneded registration of an empty net/ipv6/neigh sysctl no longer requires explicit creation of directories. The neigh directory is always populated with at least a default entry so this should cause no user visible changes. Signed-off-by: Eric W. Biederman Acked-by: Pavel Emelyanov Signed-off-by: David S. Miller --- net/ipv6/af_inet6.c | 15 --------------- net/ipv6/sysctl_net_ipv6.c | 27 --------------------------- 2 files changed, 42 deletions(-) (limited to 'net') diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index 8ed1b930e75f..461e7896e5d8 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c @@ -1111,11 +1111,6 @@ static int __init inet6_init(void) if (err) goto out_sock_register_fail; -#ifdef CONFIG_SYSCTL - err = ipv6_static_sysctl_register(); - if (err) - goto static_sysctl_fail; -#endif tcpv6_prot.sysctl_mem = init_net.ipv4.sysctl_tcp_mem; /* @@ -1242,10 +1237,6 @@ ipmr_fail: icmp_fail: unregister_pernet_subsys(&inet6_net_ops); register_pernet_fail: -#ifdef CONFIG_SYSCTL - ipv6_static_sysctl_unregister(); -static_sysctl_fail: -#endif sock_unregister(PF_INET6); rtnl_unregister_all(PF_INET6); out_sock_register_fail: @@ -1272,9 +1263,6 @@ static void __exit inet6_exit(void) /* Disallow any further netlink messages */ rtnl_unregister_all(PF_INET6); -#ifdef CONFIG_SYSCTL - ipv6_sysctl_unregister(); -#endif udpv6_exit(); udplitev6_exit(); tcpv6_exit(); @@ -1302,9 +1290,6 @@ static void __exit inet6_exit(void) rawv6_exit(); unregister_pernet_subsys(&inet6_net_ops); -#ifdef CONFIG_SYSCTL - ipv6_static_sysctl_unregister(); -#endif proto_unregister(&rawv6_prot); proto_unregister(&udplitev6_prot); proto_unregister(&udpv6_prot); diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c index 99279c8aaf29..cf1e96a49607 100644 --- a/net/ipv6/sysctl_net_ipv6.c +++ b/net/ipv6/sysctl_net_ipv6.c @@ -16,18 +16,6 @@ #include #include -static struct ctl_table empty[1]; - -static ctl_table ipv6_static_skeleton[] = { - { - .procname = "neigh", - .maxlen = 0, - .mode = 0555, - .child = empty, - }, - { } -}; - static ctl_table ipv6_table_template[] = { { .procname = "route", @@ -160,18 +148,3 @@ void ipv6_sysctl_unregister(void) unregister_net_sysctl_table(ip6_header); unregister_pernet_subsys(&ipv6_sysctl_net_ops); } - -static struct ctl_table_header *ip6_base; - -int ipv6_static_sysctl_register(void) -{ - ip6_base = register_net_sysctl_table(&init_net, net_ipv6_ctl_path, ipv6_static_skeleton); - if (ip6_base == NULL) - return -ENOMEM; - return 0; -} - -void ipv6_static_sysctl_unregister(void) -{ - unregister_net_sysctl_table(ip6_base); -} -- cgit v1.2.2 From 4e5ca78541c549ec8886ad2fc19306f35ee672e1 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Thu, 19 Apr 2012 13:32:39 +0000 Subject: net ipv4: Remove the unneeded registration of an empty net/ipv4/neigh sysctl no longer requires explicit creation of directories. The neigh directory is always populated with at least a default entry so this won't cause any user visible changes. Delete the ipv4_path and the ipv4_skeleton these are no longer needed. Directly register the ipv4_route_table. And since I am an idiot remove the header definitions that I should have removed in the previous patch. Signed-off-by: Eric W. Biederman Acked-by: Pavel Emelyanov Signed-off-by: David S. Miller --- net/ipv4/route.c | 19 +------------------ 1 file changed, 1 insertion(+), 18 deletions(-) (limited to 'net') diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 86866a4b537f..adf2105a6e85 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -3344,23 +3344,6 @@ static ctl_table ipv4_route_table[] = { { } }; -static struct ctl_table empty[1]; - -static struct ctl_table ipv4_skeleton[] = -{ - { .procname = "route", - .mode = 0555, .child = ipv4_route_table}, - { .procname = "neigh", - .mode = 0555, .child = empty}, - { } -}; - -static __net_initdata struct ctl_path ipv4_path[] = { - { .procname = "net", }, - { .procname = "ipv4", }, - { }, -}; - static struct ctl_table ipv4_route_flush_table[] = { { .procname = "flush", @@ -3515,6 +3498,6 @@ int __init ip_rt_init(void) */ void __init ip_static_sysctl_init(void) { - kmemleak_not_leak(register_net_sysctl_table(&init_net, ipv4_path, ipv4_skeleton)); + register_net_sysctl(&init_net, "net/ipv4/route", ipv4_route_table); } #endif -- cgit v1.2.2 From 0ca7a4c87d27dd2fde0783dec94a821d6d035696 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Thu, 19 Apr 2012 13:34:18 +0000 Subject: net ax25: Simplify and cleanup the ax25 sysctl handling. Don't register/unregister every ax25 table in a batch. Instead register and unregister per device ax25 sysctls as ax25 devices come and go. This moves ax25 to be a completely modern sysctl user. Registering the sysctls in just the initial network namespace, removing the use of .child entries that are no longer natively supported by the sysctl core and taking advantage of the fact that there are no longer any ordering constraints between registering and unregistering different sysctl tables. Signed-off-by: Eric W. Biederman Acked-by: Pavel Emelyanov Signed-off-by: David S. Miller --- net/ax25/af_ax25.c | 2 -- net/ax25/ax25_dev.c | 10 ++---- net/ax25/sysctl_net_ax25.c | 82 ++++++++++++++++------------------------------ 3 files changed, 30 insertions(+), 64 deletions(-) (limited to 'net') diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c index 0906c194a413..282eb76bc7d6 100644 --- a/net/ax25/af_ax25.c +++ b/net/ax25/af_ax25.c @@ -1990,7 +1990,6 @@ static int __init ax25_init(void) sock_register(&ax25_family_ops); dev_add_pack(&ax25_packet_type); register_netdevice_notifier(&ax25_dev_notifier); - ax25_register_sysctl(); proc_net_fops_create(&init_net, "ax25_route", S_IRUGO, &ax25_route_fops); proc_net_fops_create(&init_net, "ax25", S_IRUGO, &ax25_info_fops); @@ -2015,7 +2014,6 @@ static void __exit ax25_exit(void) ax25_uid_free(); ax25_dev_free(); - ax25_unregister_sysctl(); unregister_netdevice_notifier(&ax25_dev_notifier); dev_remove_pack(&ax25_packet_type); diff --git a/net/ax25/ax25_dev.c b/net/ax25/ax25_dev.c index d0de30e89591..3d106767b272 100644 --- a/net/ax25/ax25_dev.c +++ b/net/ax25/ax25_dev.c @@ -59,8 +59,6 @@ void ax25_dev_device_up(struct net_device *dev) return; } - ax25_unregister_sysctl(); - dev->ax25_ptr = ax25_dev; ax25_dev->dev = dev; dev_hold(dev); @@ -90,7 +88,7 @@ void ax25_dev_device_up(struct net_device *dev) ax25_dev_list = ax25_dev; spin_unlock_bh(&ax25_dev_lock); - ax25_register_sysctl(); + ax25_register_dev_sysctl(ax25_dev); } void ax25_dev_device_down(struct net_device *dev) @@ -100,7 +98,7 @@ void ax25_dev_device_down(struct net_device *dev) if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL) return; - ax25_unregister_sysctl(); + ax25_unregister_dev_sysctl(ax25_dev); spin_lock_bh(&ax25_dev_lock); @@ -120,7 +118,6 @@ void ax25_dev_device_down(struct net_device *dev) spin_unlock_bh(&ax25_dev_lock); dev_put(dev); kfree(ax25_dev); - ax25_register_sysctl(); return; } @@ -130,7 +127,6 @@ void ax25_dev_device_down(struct net_device *dev) spin_unlock_bh(&ax25_dev_lock); dev_put(dev); kfree(ax25_dev); - ax25_register_sysctl(); return; } @@ -138,8 +134,6 @@ void ax25_dev_device_down(struct net_device *dev) } spin_unlock_bh(&ax25_dev_lock); dev->ax25_ptr = NULL; - - ax25_register_sysctl(); } int ax25_fwd_ioctl(unsigned int cmd, struct ax25_fwd_struct *fwd) diff --git a/net/ax25/sysctl_net_ax25.c b/net/ax25/sysctl_net_ax25.c index 7ba381b6f074..d5744b752511 100644 --- a/net/ax25/sysctl_net_ax25.c +++ b/net/ax25/sysctl_net_ax25.c @@ -29,17 +29,6 @@ static int min_proto[1], max_proto[] = { AX25_PROTO_MAX }; static int min_ds_timeout[1], max_ds_timeout[] = {65535000}; #endif -static struct ctl_table_header *ax25_table_header; - -static ctl_table *ax25_table; -static int ax25_table_size; - -static struct ctl_path ax25_path[] = { - { .procname = "net", }, - { .procname = "ax25", }, - { } -}; - static const ctl_table ax25_param_table[] = { { .procname = "ip_default_mode", @@ -159,52 +148,37 @@ static const ctl_table ax25_param_table[] = { { } /* that's all, folks! */ }; -void ax25_register_sysctl(void) +int ax25_register_dev_sysctl(ax25_dev *ax25_dev) { - ax25_dev *ax25_dev; - int n, k; - - spin_lock_bh(&ax25_dev_lock); - for (ax25_table_size = sizeof(ctl_table), ax25_dev = ax25_dev_list; ax25_dev != NULL; ax25_dev = ax25_dev->next) - ax25_table_size += sizeof(ctl_table); - - if ((ax25_table = kzalloc(ax25_table_size, GFP_ATOMIC)) == NULL) { - spin_unlock_bh(&ax25_dev_lock); - return; - } - - for (n = 0, ax25_dev = ax25_dev_list; ax25_dev != NULL; ax25_dev = ax25_dev->next) { - struct ctl_table *child = kmemdup(ax25_param_table, - sizeof(ax25_param_table), - GFP_ATOMIC); - if (!child) { - while (n--) - kfree(ax25_table[n].child); - kfree(ax25_table); - spin_unlock_bh(&ax25_dev_lock); - return; - } - ax25_table[n].child = ax25_dev->systable = child; - ax25_table[n].procname = ax25_dev->dev->name; - ax25_table[n].mode = 0555; - - - for (k = 0; k < AX25_MAX_VALUES; k++) - child[k].data = &ax25_dev->values[k]; - - n++; + char path[sizeof("net/ax25/") + IFNAMSIZ]; + int k; + struct ctl_table *table; + + table = kmemdup(ax25_param_table, sizeof(ax25_param_table), GFP_KERNEL); + if (!table) + return -ENOMEM; + + for (k = 0; k < AX25_MAX_VALUES; k++) + table[k].data = &ax25_dev->values[k]; + + snprintf(path, sizeof(path), "net/ax25/%s", ax25_dev->dev->name); + ax25_dev->sysheader = register_net_sysctl(&init_net, path, table); + if (!ax25_dev->sysheader) { + kfree(table); + return -ENOMEM; } - spin_unlock_bh(&ax25_dev_lock); - - ax25_table_header = register_net_sysctl_table(&init_net, ax25_path, ax25_table); + return 0; } -void ax25_unregister_sysctl(void) +void ax25_unregister_dev_sysctl(ax25_dev *ax25_dev) { - ctl_table *p; - unregister_net_sysctl_table(ax25_table_header); - - for (p = ax25_table; p->procname; p++) - kfree(p->child); - kfree(ax25_table); + struct ctl_table_header *header = ax25_dev->sysheader; + struct ctl_table *table; + + if (header) { + ax25_dev->sysheader = NULL; + table = header->ctl_table_arg; + unregister_net_sysctl_table(header); + kfree(table); + } } -- cgit v1.2.2 From 64fb3010400f6051261be9c5c74f29de416dad8f Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Thu, 19 Apr 2012 13:35:39 +0000 Subject: net llc: Don't use sysctl tables with .child entries. The sysctl core no longer natively understands sysctl tables with .child entries. Kill the intermediate tables and use register_net_sysctl directly to remove the need for compatibility code. Signed-off-by: Eric W. Biederman Acked-by: Pavel Emelyanov Signed-off-by: David S. Miller --- net/llc/sysctl_net_llc.c | 51 +++++++++++++++--------------------------------- 1 file changed, 16 insertions(+), 35 deletions(-) (limited to 'net') diff --git a/net/llc/sysctl_net_llc.c b/net/llc/sysctl_net_llc.c index 9a6a65f2104b..d75306b9c2f3 100644 --- a/net/llc/sysctl_net_llc.c +++ b/net/llc/sysctl_net_llc.c @@ -57,48 +57,29 @@ static struct ctl_table llc_station_table[] = { { }, }; -static struct ctl_table llc2_dir_timeout_table[] = { - { - .procname = "timeout", - .mode = 0555, - .child = llc2_timeout_table, - }, - { }, -}; - -static struct ctl_table llc_table[] = { - { - .procname = "llc2", - .mode = 0555, - .child = llc2_dir_timeout_table, - }, - { - .procname = "station", - .mode = 0555, - .child = llc_station_table, - }, - { }, -}; - -static struct ctl_path llc_path[] = { - { .procname = "net", }, - { .procname = "llc", }, - { } -}; - -static struct ctl_table_header *llc_table_header; +static struct ctl_table_header *llc2_timeout_header; +static struct ctl_table_header *llc_station_header; int __init llc_sysctl_init(void) { - llc_table_header = register_net_sysctl_table(&init_net, llc_path, llc_table); + llc2_timeout_header = register_net_sysctl(&init_net, "net/llc/llc2/timeout", llc2_timeout_table); + llc_station_header = register_net_sysctl(&init_net, "net/llc/station", llc_station_table); - return llc_table_header ? 0 : -ENOMEM; + if (!llc2_timeout_header || !llc_station_header) { + llc_sysctl_exit(); + return -ENOMEM; + } + return 0; } void llc_sysctl_exit(void) { - if (llc_table_header) { - unregister_net_sysctl_table(llc_table_header); - llc_table_header = NULL; + if (llc2_timeout_header) { + unregister_net_sysctl_table(llc2_timeout_header); + llc2_timeout_header = NULL; + } + if (llc_station_header) { + unregister_net_sysctl_table(llc_station_header); + llc_station_header = NULL; } } -- cgit v1.2.2 From 6dceb03687124b6ee392eb1d9921bd463eab3190 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Thu, 19 Apr 2012 13:37:09 +0000 Subject: net ipv6: Don't use sysctl tables with .child entries. The sysctl core no longer natively understands sysctl tables with .child entries. Split the ipv6_table to remove the .child entries. Signed-off-by: Eric W. Biederman Acked-by: Pavel Emelyanov Signed-off-by: David S. Miller --- net/ipv6/sysctl_net_ipv6.c | 47 +++++++++++++++++++++++----------------------- 1 file changed, 23 insertions(+), 24 deletions(-) (limited to 'net') diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c index cf1e96a49607..a52d8203a5de 100644 --- a/net/ipv6/sysctl_net_ipv6.c +++ b/net/ipv6/sysctl_net_ipv6.c @@ -17,18 +17,6 @@ #include static ctl_table ipv6_table_template[] = { - { - .procname = "route", - .maxlen = 0, - .mode = 0555, - .child = ipv6_route_table_template - }, - { - .procname = "icmp", - .maxlen = 0, - .mode = 0555, - .child = ipv6_icmp_table_template - }, { .procname = "bindv6only", .data = &init_net.ipv6.sysctl.bindv6only, @@ -69,28 +57,37 @@ static int __net_init ipv6_sysctl_net_init(struct net *net) GFP_KERNEL); if (!ipv6_table) goto out; + ipv6_table[0].data = &net->ipv6.sysctl.bindv6only; ipv6_route_table = ipv6_route_sysctl_init(net); if (!ipv6_route_table) goto out_ipv6_table; - ipv6_table[0].child = ipv6_route_table; ipv6_icmp_table = ipv6_icmp_sysctl_init(net); if (!ipv6_icmp_table) goto out_ipv6_route_table; - ipv6_table[1].child = ipv6_icmp_table; - ipv6_table[2].data = &net->ipv6.sysctl.bindv6only; - - net->ipv6.sysctl.table = register_net_sysctl_table(net, net_ipv6_ctl_path, - ipv6_table); - if (!net->ipv6.sysctl.table) + net->ipv6.sysctl.hdr = register_net_sysctl(net, "net/ipv6", ipv6_table); + if (!net->ipv6.sysctl.hdr) goto out_ipv6_icmp_table; + net->ipv6.sysctl.route_hdr = + register_net_sysctl(net, "net/ipv6/route", ipv6_route_table); + if (!net->ipv6.sysctl.route_hdr) + goto out_unregister_ipv6_table; + + net->ipv6.sysctl.icmp_hdr = + register_net_sysctl(net, "net/ipv6/icmp", ipv6_icmp_table); + if (!net->ipv6.sysctl.icmp_hdr) + goto out_unregister_route_table; + err = 0; out: return err; - +out_unregister_route_table: + unregister_net_sysctl_table(net->ipv6.sysctl.route_hdr); +out_unregister_ipv6_table: + unregister_net_sysctl_table(net->ipv6.sysctl.hdr); out_ipv6_icmp_table: kfree(ipv6_icmp_table); out_ipv6_route_table: @@ -106,11 +103,13 @@ static void __net_exit ipv6_sysctl_net_exit(struct net *net) struct ctl_table *ipv6_route_table; struct ctl_table *ipv6_icmp_table; - ipv6_table = net->ipv6.sysctl.table->ctl_table_arg; - ipv6_route_table = ipv6_table[0].child; - ipv6_icmp_table = ipv6_table[1].child; + ipv6_table = net->ipv6.sysctl.hdr->ctl_table_arg; + ipv6_route_table = net->ipv6.sysctl.route_hdr->ctl_table_arg; + ipv6_icmp_table = net->ipv6.sysctl.icmp_hdr->ctl_table_arg; - unregister_net_sysctl_table(net->ipv6.sysctl.table); + unregister_net_sysctl_table(net->ipv6.sysctl.icmp_hdr); + unregister_net_sysctl_table(net->ipv6.sysctl.route_hdr); + unregister_net_sysctl_table(net->ipv6.sysctl.hdr); kfree(ipv6_table); kfree(ipv6_route_table); -- cgit v1.2.2 From 8f40a1f9821a4ccb2a237f14d4eb6d6e0e665c14 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Thu, 19 Apr 2012 13:38:03 +0000 Subject: net neighbour: Convert to use register_net_sysctl Using an ascii path to register_net_sysctl as opposed to the slightly awkward ctl_path allows for much simpler code. We no longer need to malloc dev_name to keep it alive the length of our sysctl register instead we can use a small temporary buffer on the stack. Signed-off-by: Eric W. Biederman Acked-by: Pavel Emelyanov Signed-off-by: David S. Miller --- net/core/neighbour.c | 33 ++++++--------------------------- 1 file changed, 6 insertions(+), 27 deletions(-) (limited to 'net') diff --git a/net/core/neighbour.c b/net/core/neighbour.c index 0c2df3d3cfbf..fadaa819b854 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -2799,7 +2799,6 @@ enum { static struct neigh_sysctl_table { struct ctl_table_header *sysctl_header; struct ctl_table neigh_vars[NEIGH_VAR_MAX + 1]; - char *dev_name; } neigh_sysctl_template __read_mostly = { .neigh_vars = { [NEIGH_VAR_MCAST_PROBE] = { @@ -2925,19 +2924,7 @@ int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p, { struct neigh_sysctl_table *t; const char *dev_name_source = NULL; - -#define NEIGH_CTL_PATH_ROOT 0 -#define NEIGH_CTL_PATH_PROTO 1 -#define NEIGH_CTL_PATH_NEIGH 2 -#define NEIGH_CTL_PATH_DEV 3 - - struct ctl_path neigh_path[] = { - { .procname = "net", }, - { .procname = "proto", }, - { .procname = "neigh", }, - { .procname = "default", }, - { }, - }; + char neigh_path[ sizeof("net//neigh/") + IFNAMSIZ + IFNAMSIZ ]; t = kmemdup(&neigh_sysctl_template, sizeof(*t), GFP_KERNEL); if (!t) @@ -2965,7 +2952,7 @@ int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p, memset(&t->neigh_vars[NEIGH_VAR_GC_INTERVAL], 0, sizeof(t->neigh_vars[NEIGH_VAR_GC_INTERVAL])); } else { - dev_name_source = neigh_path[NEIGH_CTL_PATH_DEV].procname; + dev_name_source = "default"; t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = (int *)(p + 1); t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = (int *)(p + 1) + 1; t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = (int *)(p + 1) + 2; @@ -2988,23 +2975,16 @@ int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p, t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].extra1 = dev; } - t->dev_name = kstrdup(dev_name_source, GFP_KERNEL); - if (!t->dev_name) - goto free; - - neigh_path[NEIGH_CTL_PATH_DEV].procname = t->dev_name; - neigh_path[NEIGH_CTL_PATH_PROTO].procname = p_name; - + snprintf(neigh_path, sizeof(neigh_path), "net/%s/neigh/%s", + p_name, dev_name_source); t->sysctl_header = - register_net_sysctl_table(neigh_parms_net(p), neigh_path, t->neigh_vars); + register_net_sysctl(neigh_parms_net(p), neigh_path, t->neigh_vars); if (!t->sysctl_header) - goto free_procname; + goto free; p->sysctl_table = t; return 0; -free_procname: - kfree(t->dev_name); free: kfree(t); err: @@ -3018,7 +2998,6 @@ void neigh_sysctl_unregister(struct neigh_parms *p) struct neigh_sysctl_table *t = p->sysctl_table; p->sysctl_table = NULL; unregister_net_sysctl_table(t->sysctl_header); - kfree(t->dev_name); kfree(t); } } -- cgit v1.2.2 From 9bdcc88fa03a09c1f0478c0d7443d0aba7872210 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Thu, 19 Apr 2012 13:40:37 +0000 Subject: net decnet: Convert to use register_net_sysctl Using an ascii path to register_net_sysctl as opposed to the slightly awkward ctl_path allows for much simpler code. Signed-off-by: Eric W. Biederman Acked-by: Pavel Emelyanov Signed-off-by: David S. Miller --- net/decnet/dn_dev.c | 19 ++++--------------- net/decnet/sysctl_net_decnet.c | 8 +------- 2 files changed, 5 insertions(+), 22 deletions(-) (limited to 'net') diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c index ce8a18471845..f3924ab1e019 100644 --- a/net/decnet/dn_dev.c +++ b/net/decnet/dn_dev.c @@ -209,15 +209,7 @@ static void dn_dev_sysctl_register(struct net_device *dev, struct dn_dev_parms * struct dn_dev_sysctl_table *t; int i; -#define DN_CTL_PATH_DEV 3 - - struct ctl_path dn_ctl_path[] = { - { .procname = "net", }, - { .procname = "decnet", }, - { .procname = "conf", }, - { /* to be set */ }, - { }, - }; + char path[sizeof("net/decnet/conf/") + IFNAMSIZ]; t = kmemdup(&dn_dev_sysctl, sizeof(*t), GFP_KERNEL); if (t == NULL) @@ -228,15 +220,12 @@ static void dn_dev_sysctl_register(struct net_device *dev, struct dn_dev_parms * t->dn_dev_vars[i].data = ((char *)parms) + offset; } - if (dev) { - dn_ctl_path[DN_CTL_PATH_DEV].procname = dev->name; - } else { - dn_ctl_path[DN_CTL_PATH_DEV].procname = parms->name; - } + snprintf(path, sizeof(path), "net/decnet/conf/%s", + dev? dev->name : parms->name); t->dn_dev_vars[0].extra1 = (void *)dev; - t->sysctl_header = register_net_sysctl_table(&init_net, dn_ctl_path, t->dn_dev_vars); + t->sysctl_header = register_net_sysctl(&init_net, path, t->dn_dev_vars); if (t->sysctl_header == NULL) kfree(t); else diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c index 4380b8edea72..a55eeccaa72f 100644 --- a/net/decnet/sysctl_net_decnet.c +++ b/net/decnet/sysctl_net_decnet.c @@ -351,15 +351,9 @@ static ctl_table dn_table[] = { { } }; -static struct ctl_path dn_path[] = { - { .procname = "net", }, - { .procname = "decnet", }, - { } -}; - void dn_register_sysctl(void) { - dn_table_header = register_net_sysctl_table(&init_net, dn_path, dn_table); + dn_table_header = register_net_sysctl(&init_net, "net/decnet", dn_table); } void dn_unregister_sysctl(void) -- cgit v1.2.2 From 6105e29320f662bf2f2358e2b2ff1bbf599f387f Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Thu, 19 Apr 2012 13:41:24 +0000 Subject: net ipv6: Convert addrconf to use register_net_sysctl Using an ascii path to register_net_sysctl as opposed to the slightly awkward ctl_path allows for much simpler code. We no longer need to malloc dev_name to keep it alive the length of our sysctl register instead we can use a small temporary buffer on the stack. Signed-off-by: Eric W. Biederman Acked-by: Pavel Emelyanov Signed-off-by: David S. Miller --- net/ipv6/addrconf.c | 32 ++++---------------------------- 1 file changed, 4 insertions(+), 28 deletions(-) (limited to 'net') diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 4a839836e59c..e3b3421f8dad 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -4365,7 +4365,6 @@ static struct addrconf_sysctl_table { struct ctl_table_header *sysctl_header; ctl_table addrconf_vars[DEVCONF_MAX+1]; - char *dev_name; } addrconf_sysctl __read_mostly = { .sysctl_header = NULL, .addrconf_vars = { @@ -4594,17 +4593,7 @@ static int __addrconf_sysctl_register(struct net *net, char *dev_name, { int i; struct addrconf_sysctl_table *t; - -#define ADDRCONF_CTL_PATH_DEV 3 - - struct ctl_path addrconf_ctl_path[] = { - { .procname = "net", }, - { .procname = "ipv6", }, - { .procname = "conf", }, - { /* to be set */ }, - { }, - }; - + char path[sizeof("net/ipv6/conf/") + IFNAMSIZ]; t = kmemdup(&addrconf_sysctl, sizeof(*t), GFP_KERNEL); if (t == NULL) @@ -4616,27 +4605,15 @@ static int __addrconf_sysctl_register(struct net *net, char *dev_name, t->addrconf_vars[i].extra2 = net; } - /* - * Make a copy of dev_name, because '.procname' is regarded as const - * by sysctl and we wouldn't want anyone to change it under our feet - * (see SIOCSIFNAME). - */ - t->dev_name = kstrdup(dev_name, GFP_KERNEL); - if (!t->dev_name) - goto free; + snprintf(path, sizeof(path), "net/ipv6/conf/%s", dev_name); - addrconf_ctl_path[ADDRCONF_CTL_PATH_DEV].procname = t->dev_name; - - t->sysctl_header = register_net_sysctl_table(net, addrconf_ctl_path, - t->addrconf_vars); + t->sysctl_header = register_net_sysctl(net, path, t->addrconf_vars); if (t->sysctl_header == NULL) - goto free_procname; + goto free; p->sysctl = t; return 0; -free_procname: - kfree(t->dev_name); free: kfree(t); out: @@ -4653,7 +4630,6 @@ static void __addrconf_sysctl_unregister(struct ipv6_devconf *p) t = p->sysctl; p->sysctl = NULL; unregister_net_sysctl_table(t->sysctl_header); - kfree(t->dev_name); kfree(t); } -- cgit v1.2.2 From 8607ddb86711df4504a028cc88299d334b786bf3 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Thu, 19 Apr 2012 13:42:09 +0000 Subject: net ipv4: Convert devinet to use register_net_sysctl Using an ascii path to register_net_sysctl as opposed to the slightly awkward ctl_path allows for much simpler code. We no longer need to malloc dev_name to keep it alive the length of our sysctl register instead we can use a small temporary buffer on the stack. Signed-off-by: Eric W. Biederman Acked-by: Pavel Emelyanov Signed-off-by: David S. Miller --- net/ipv4/devinet.c | 39 +++++---------------------------------- 1 file changed, 5 insertions(+), 34 deletions(-) (limited to 'net') diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index 8a01bfb7c873..88c9e3f68c78 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c @@ -1585,7 +1585,6 @@ static int ipv4_doint_and_flush(ctl_table *ctl, int write, static struct devinet_sysctl_table { struct ctl_table_header *sysctl_header; struct ctl_table devinet_vars[__IPV4_DEVCONF_MAX]; - char *dev_name; } devinet_sysctl = { .devinet_vars = { DEVINET_SYSCTL_COMPLEX_ENTRY(FORWARDING, "forwarding", @@ -1627,16 +1626,7 @@ static int __devinet_sysctl_register(struct net *net, char *dev_name, { int i; struct devinet_sysctl_table *t; - -#define DEVINET_CTL_PATH_DEV 3 - - struct ctl_path devinet_ctl_path[] = { - { .procname = "net", }, - { .procname = "ipv4", }, - { .procname = "conf", }, - { /* to be set */ }, - { }, - }; + char path[sizeof("net/ipv4/conf/") + IFNAMSIZ]; t = kmemdup(&devinet_sysctl, sizeof(*t), GFP_KERNEL); if (!t) @@ -1648,27 +1638,15 @@ static int __devinet_sysctl_register(struct net *net, char *dev_name, t->devinet_vars[i].extra2 = net; } - /* - * Make a copy of dev_name, because '.procname' is regarded as const - * by sysctl and we wouldn't want anyone to change it under our feet - * (see SIOCSIFNAME). - */ - t->dev_name = kstrdup(dev_name, GFP_KERNEL); - if (!t->dev_name) - goto free; - - devinet_ctl_path[DEVINET_CTL_PATH_DEV].procname = t->dev_name; + snprintf(path, sizeof(path), "net/ipv4/conf/%s", dev_name); - t->sysctl_header = register_net_sysctl_table(net, devinet_ctl_path, - t->devinet_vars); + t->sysctl_header = register_net_sysctl(net, path, t->devinet_vars); if (!t->sysctl_header) - goto free_procname; + goto free; p->sysctl = t; return 0; -free_procname: - kfree(t->dev_name); free: kfree(t); out: @@ -1684,7 +1662,6 @@ static void __devinet_sysctl_unregister(struct ipv4_devconf *cnf) cnf->sysctl = NULL; unregister_net_sysctl_table(t->sysctl_header); - kfree(t->dev_name); kfree(t); } @@ -1714,12 +1691,6 @@ static struct ctl_table ctl_forward_entry[] = { }, { }, }; - -static __net_initdata struct ctl_path net_ipv4_path[] = { - { .procname = "net", }, - { .procname = "ipv4", }, - { }, -}; #endif static __net_init int devinet_init_net(struct net *net) @@ -1765,7 +1736,7 @@ static __net_init int devinet_init_net(struct net *net) goto err_reg_dflt; err = -ENOMEM; - forw_hdr = register_net_sysctl_table(net, net_ipv4_path, tbl); + forw_hdr = register_net_sysctl(net, "net/ipv4", tbl); if (forw_hdr == NULL) goto err_reg_ctl; net->ipv4.forw_hdr = forw_hdr; -- cgit v1.2.2 From f99e8f715a5c7ebad5410b1e9b4d744ddb284f54 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Thu, 19 Apr 2012 13:43:55 +0000 Subject: net: Convert nf_conntrack_proto to use register_net_sysctl There isn't much advantage here except that strings paths are a bit easier to read, and converting everything to them allows me to kill off ctl_path. Signed-off-by: Eric W. Biederman Acked-by: Pavel Emelyanov Signed-off-by: David S. Miller --- net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c | 2 +- net/netfilter/nf_conntrack_proto.c | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) (limited to 'net') diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c index 345c7dc08482..91747d4ebc26 100644 --- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c +++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c @@ -365,7 +365,7 @@ struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv4 __read_mostly = { .nla_policy = ipv4_nla_policy, #endif #if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT) - .ctl_table_path = nf_net_ipv4_netfilter_sysctl_path, + .ctl_table_path = "net/ipv4/netfilter", .ctl_table = ip_ct_sysctl_table, #endif .me = THIS_MODULE, diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c index bbc753fd4fe0..8b631b07a645 100644 --- a/net/netfilter/nf_conntrack_proto.c +++ b/net/netfilter/nf_conntrack_proto.c @@ -36,11 +36,11 @@ static DEFINE_MUTEX(nf_ct_proto_mutex); #ifdef CONFIG_SYSCTL static int -nf_ct_register_sysctl(struct ctl_table_header **header, struct ctl_path *path, +nf_ct_register_sysctl(struct ctl_table_header **header, const char *path, struct ctl_table *table, unsigned int *users) { if (*header == NULL) { - *header = register_net_sysctl_table(&init_net, path, table); + *header = register_net_sysctl(&init_net, path, table); if (*header == NULL) return -ENOMEM; } @@ -250,7 +250,7 @@ static int nf_ct_l4proto_register_sysctl(struct nf_conntrack_l4proto *l4proto) #ifdef CONFIG_SYSCTL if (l4proto->ctl_table != NULL) { err = nf_ct_register_sysctl(l4proto->ctl_table_header, - nf_net_netfilter_sysctl_path, + "net/netfilter", l4proto->ctl_table, l4proto->ctl_table_users); if (err < 0) @@ -259,7 +259,7 @@ static int nf_ct_l4proto_register_sysctl(struct nf_conntrack_l4proto *l4proto) #ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT if (l4proto->ctl_compat_table != NULL) { err = nf_ct_register_sysctl(&l4proto->ctl_compat_table_header, - nf_net_ipv4_netfilter_sysctl_path, + "net/ipv4/netfilter", l4proto->ctl_compat_table, NULL); if (err == 0) goto out; -- cgit v1.2.2 From ec8f23ce0f4005b74013d4d122e0d540397a93c9 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Thu, 19 Apr 2012 13:44:49 +0000 Subject: net: Convert all sysctl registrations to register_net_sysctl This results in code with less boiler plate that is a bit easier to read. Additionally stops us from using compatibility code in the sysctl core, hastening the day when the compatibility code can be removed. Signed-off-by: Eric W. Biederman Acked-by: Pavel Emelyanov Signed-off-by: David S. Miller --- net/802/tr.c | 8 +------- net/appletalk/sysctl_net_atalk.c | 8 +------- net/bridge/br_netfilter.c | 8 +------- net/core/sysctl_net_core.c | 3 +-- net/dccp/sysctl.c | 9 +-------- net/ipv4/ip_fragment.c | 2 +- net/ipv4/netfilter/ip_queue.c | 2 +- net/ipv4/route.c | 10 +--------- net/ipv4/sysctl_net_ipv4.c | 5 ++--- net/ipv4/xfrm4_policy.c | 4 ++-- net/ipv6/netfilter/ip6_queue.c | 2 +- net/ipv6/netfilter/nf_conntrack_reasm.c | 4 ++-- net/ipv6/reassembly.c | 2 +- net/ipv6/xfrm6_policy.c | 4 ++-- net/ipx/sysctl_net_ipx.c | 8 +------- net/irda/irsysctl.c | 8 +------- net/netfilter/ipvs/ip_vs_ctl.c | 3 +-- net/netfilter/ipvs/ip_vs_lblc.c | 3 +-- net/netfilter/ipvs/ip_vs_lblcr.c | 3 +-- net/netfilter/nf_conntrack_acct.c | 4 ++-- net/netfilter/nf_conntrack_ecache.c | 3 +-- net/netfilter/nf_conntrack_proto_dccp.c | 4 ++-- net/netfilter/nf_conntrack_standalone.c | 10 ++-------- net/netfilter/nf_conntrack_timestamp.c | 4 ++-- net/netfilter/nf_log.c | 9 +-------- net/netrom/sysctl_net_netrom.c | 8 +------- net/phonet/sysctl.c | 8 +------- net/rds/ib_sysctl.c | 9 +-------- net/rds/iw_sysctl.c | 9 +-------- net/rds/sysctl.c | 9 +-------- net/rose/sysctl_net_rose.c | 8 +------- net/sctp/sysctl.c | 8 +------- net/unix/sysctl_net_unix.c | 8 +------- net/x25/sysctl_net_x25.c | 8 +------- net/xfrm/xfrm_sysctl.c | 2 +- 35 files changed, 43 insertions(+), 164 deletions(-) (limited to 'net') diff --git a/net/802/tr.c b/net/802/tr.c index 103e0201b0ab..30a352ed09b1 100644 --- a/net/802/tr.c +++ b/net/802/tr.c @@ -643,12 +643,6 @@ static struct ctl_table tr_table[] = { }, { }, }; - -static __initdata struct ctl_path tr_path[] = { - { .procname = "net", }, - { .procname = "token-ring", }, - { } -}; #endif /* @@ -662,7 +656,7 @@ static int __init rif_init(void) setup_timer(&rif_timer, rif_check_expire, 0); add_timer(&rif_timer); #ifdef CONFIG_SYSCTL - register_net_sysctl_table(&init_net, tr_path, tr_table); + register_net_sysctl(&init_net, "net/token-ring", tr_table); #endif proc_net_fops_create(&init_net, "tr_rif", S_IRUGO, &rif_seq_fops); return 0; diff --git a/net/appletalk/sysctl_net_atalk.c b/net/appletalk/sysctl_net_atalk.c index 5edce8f70cb7..ebb864361f7a 100644 --- a/net/appletalk/sysctl_net_atalk.c +++ b/net/appletalk/sysctl_net_atalk.c @@ -42,17 +42,11 @@ static struct ctl_table atalk_table[] = { { }, }; -static struct ctl_path atalk_path[] = { - { .procname = "net", }, - { .procname = "appletalk", }, - { } -}; - static struct ctl_table_header *atalk_table_header; void atalk_register_sysctl(void) { - atalk_table_header = register_net_sysctl_table(&init_net, atalk_path, atalk_table); + atalk_table_header = register_net_sysctl(&init_net, "net/appletalk", atalk_table); } void atalk_unregister_sysctl(void) diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c index 4f4c4a619f68..9d4f09c3520f 100644 --- a/net/bridge/br_netfilter.c +++ b/net/bridge/br_netfilter.c @@ -1008,12 +1008,6 @@ static ctl_table brnf_table[] = { }, { } }; - -static struct ctl_path brnf_path[] = { - { .procname = "net", }, - { .procname = "bridge", }, - { } -}; #endif int __init br_netfilter_init(void) @@ -1030,7 +1024,7 @@ int __init br_netfilter_init(void) return ret; } #ifdef CONFIG_SYSCTL - brnf_sysctl_header = register_net_sysctl_table(&init_net, brnf_path, brnf_table); + brnf_sysctl_header = register_net_sysctl(&init_net, "net/bridge", brnf_table); if (brnf_sysctl_header == NULL) { printk(KERN_WARNING "br_netfilter: can't register to sysctl.\n"); diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c index 9fc2f9d666a9..64924e345a9b 100644 --- a/net/core/sysctl_net_core.c +++ b/net/core/sysctl_net_core.c @@ -224,8 +224,7 @@ static __net_init int sysctl_core_net_init(struct net *net) tbl[0].data = &net->core.sysctl_somaxconn; } - net->core.sysctl_hdr = register_net_sysctl_table(net, - net_core_path, tbl); + net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl); if (net->core.sysctl_hdr == NULL) goto err_reg; diff --git a/net/dccp/sysctl.c b/net/dccp/sysctl.c index 329e1390c26d..607ab71b5a0c 100644 --- a/net/dccp/sysctl.c +++ b/net/dccp/sysctl.c @@ -98,18 +98,11 @@ static struct ctl_table dccp_default_table[] = { { } }; -static struct ctl_path dccp_path[] = { - { .procname = "net", }, - { .procname = "dccp", }, - { .procname = "default", }, - { } -}; - static struct ctl_table_header *dccp_table_header; int __init dccp_sysctl_init(void) { - dccp_table_header = register_net_sysctl_table(&init_net, dccp_path, + dccp_table_header = register_net_sysctl(&init_net, "net/dccp/default", dccp_default_table); return dccp_table_header != NULL ? 0 : -ENOMEM; diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index 6a2f85cd440e..71e5c328176c 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c @@ -782,7 +782,7 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net) table[2].data = &net->ipv4.frags.timeout; } - hdr = register_net_sysctl_table(net, net_ipv4_ctl_path, table); + hdr = register_net_sysctl(net, "net/ipv4", table); if (hdr == NULL) goto err_reg; diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c index 766485d7d099..09775a1e1348 100644 --- a/net/ipv4/netfilter/ip_queue.c +++ b/net/ipv4/netfilter/ip_queue.c @@ -586,7 +586,7 @@ static int __init ip_queue_init(void) #endif register_netdevice_notifier(&ipq_dev_notifier); #ifdef CONFIG_SYSCTL - ipq_sysctl_header = register_net_sysctl_table(&init_net, net_ipv4_ctl_path, ipq_table); + ipq_sysctl_header = register_net_sysctl(&init_net, "net/ipv4", ipq_table); #endif status = nf_register_queue_handler(NFPROTO_IPV4, &nfqh); if (status < 0) { diff --git a/net/ipv4/route.c b/net/ipv4/route.c index adf2105a6e85..5773f5d9e213 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -3354,13 +3354,6 @@ static struct ctl_table ipv4_route_flush_table[] = { { }, }; -static __net_initdata struct ctl_path ipv4_route_path[] = { - { .procname = "net", }, - { .procname = "ipv4", }, - { .procname = "route", }, - { }, -}; - static __net_init int sysctl_route_net_init(struct net *net) { struct ctl_table *tbl; @@ -3373,8 +3366,7 @@ static __net_init int sysctl_route_net_init(struct net *net) } tbl[0].extra1 = net; - net->ipv4.route_hdr = - register_net_sysctl_table(net, ipv4_route_path, tbl); + net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl); if (net->ipv4.route_hdr == NULL) goto err_reg; return 0; diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index e7a6fa3d70bb..56e64f7b75d0 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c @@ -815,8 +815,7 @@ static __net_init int ipv4_sysctl_init_net(struct net *net) tcp_init_mem(net); - net->ipv4.ipv4_hdr = register_net_sysctl_table(net, - net_ipv4_ctl_path, table); + net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table); if (net->ipv4.ipv4_hdr == NULL) goto err_reg; @@ -857,7 +856,7 @@ static __init int sysctl_ipv4_init(void) if (!i->procname) return -EINVAL; - hdr = register_net_sysctl_table(&init_net, net_ipv4_ctl_path, ipv4_table); + hdr = register_net_sysctl(&init_net, "net/ipv4", ipv4_table); if (hdr == NULL) return -ENOMEM; diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c index 8ef24e16afce..0d3426cb5c4f 100644 --- a/net/ipv4/xfrm4_policy.c +++ b/net/ipv4/xfrm4_policy.c @@ -298,8 +298,8 @@ void __init xfrm4_init(int rt_max_size) xfrm4_state_init(); xfrm4_policy_init(); #ifdef CONFIG_SYSCTL - sysctl_hdr = register_net_sysctl_table(&init_net, net_ipv4_ctl_path, - xfrm4_policy_table); + sysctl_hdr = register_net_sysctl(&init_net, "net/ipv4", + xfrm4_policy_table); #endif } diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c index 6785f5044acf..3ca9303b3a19 100644 --- a/net/ipv6/netfilter/ip6_queue.c +++ b/net/ipv6/netfilter/ip6_queue.c @@ -588,7 +588,7 @@ static int __init ip6_queue_init(void) #endif register_netdevice_notifier(&ipq_dev_notifier); #ifdef CONFIG_SYSCTL - ipq_sysctl_header = register_net_sysctl_table(&init_net, net_ipv6_ctl_path, ipq_table); + ipq_sysctl_header = register_net_sysctl(&init_net, "net/ipv6", ipq_table); #endif status = nf_register_queue_handler(NFPROTO_IPV6, &nfqh); if (status < 0) { diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c index 754814462950..48a2be1b7c70 100644 --- a/net/ipv6/netfilter/nf_conntrack_reasm.c +++ b/net/ipv6/netfilter/nf_conntrack_reasm.c @@ -626,8 +626,8 @@ int nf_ct_frag6_init(void) inet_frags_init(&nf_frags); #ifdef CONFIG_SYSCTL - nf_ct_frag6_sysctl_header = register_net_sysctl_table(&init_net, nf_net_netfilter_sysctl_path, - nf_ct_frag6_sysctl_table); + nf_ct_frag6_sysctl_header = register_net_sysctl(&init_net, "net/netfilter", + nf_ct_frag6_sysctl_table); if (!nf_ct_frag6_sysctl_header) { inet_frags_fini(&nf_frags); return -ENOMEM; diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c index 42f4f7c0948a..36e04cff1a85 100644 --- a/net/ipv6/reassembly.c +++ b/net/ipv6/reassembly.c @@ -646,7 +646,7 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net) table[2].data = &net->ipv6.frags.timeout; } - hdr = register_net_sysctl_table(net, net_ipv6_ctl_path, table); + hdr = register_net_sysctl(net, "net/ipv6", table); if (hdr == NULL) goto err_reg; diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c index 8ea65e032733..8625fba96db9 100644 --- a/net/ipv6/xfrm6_policy.c +++ b/net/ipv6/xfrm6_policy.c @@ -334,8 +334,8 @@ int __init xfrm6_init(void) goto out_policy; #ifdef CONFIG_SYSCTL - sysctl_hdr = register_net_sysctl_table(&init_net, net_ipv6_ctl_path, - xfrm6_policy_table); + sysctl_hdr = register_net_sysctl(&init_net, "net/ipv6", + xfrm6_policy_table); #endif out: return ret; diff --git a/net/ipx/sysctl_net_ipx.c b/net/ipx/sysctl_net_ipx.c index 035880709e84..ad7c03dedaab 100644 --- a/net/ipx/sysctl_net_ipx.c +++ b/net/ipx/sysctl_net_ipx.c @@ -28,17 +28,11 @@ static struct ctl_table ipx_table[] = { { }, }; -static struct ctl_path ipx_path[] = { - { .procname = "net", }, - { .procname = "ipx", }, - { } -}; - static struct ctl_table_header *ipx_table_header; void ipx_register_sysctl(void) { - ipx_table_header = register_net_sysctl_table(&init_net, ipx_path, ipx_table); + ipx_table_header = register_net_sysctl(&init_net, "net/ipx", ipx_table); } void ipx_unregister_sysctl(void) diff --git a/net/irda/irsysctl.c b/net/irda/irsysctl.c index 20ced38fc371..de73f6496db5 100644 --- a/net/irda/irsysctl.c +++ b/net/irda/irsysctl.c @@ -235,12 +235,6 @@ static ctl_table irda_table[] = { { } }; -static struct ctl_path irda_path[] = { - { .procname = "net", }, - { .procname = "irda", }, - { } -}; - static struct ctl_table_header *irda_table_header; /* @@ -251,7 +245,7 @@ static struct ctl_table_header *irda_table_header; */ int __init irda_sysctl_register(void) { - irda_table_header = register_net_sysctl_table(&init_net, irda_path, irda_table); + irda_table_header = register_net_sysctl(&init_net, "net/irda", irda_table); if (!irda_table_header) return -ENOMEM; diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index b8d0df701227..a606d6b1b0e5 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c @@ -3672,8 +3672,7 @@ int __net_init ip_vs_control_net_init_sysctl(struct net *net) tbl[idx++].data = &ipvs->sysctl_nat_icmp_send; - ipvs->sysctl_hdr = register_net_sysctl_table(net, net_vs_ctl_path, - tbl); + ipvs->sysctl_hdr = register_net_sysctl(net, "net/ipv4/vs", tbl); if (ipvs->sysctl_hdr == NULL) { if (!net_eq(net, &init_net)) kfree(tbl); diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c index 27c24f156c28..1024466de124 100644 --- a/net/netfilter/ipvs/ip_vs_lblc.c +++ b/net/netfilter/ipvs/ip_vs_lblc.c @@ -563,8 +563,7 @@ static int __net_init __ip_vs_lblc_init(struct net *net) ipvs->lblc_ctl_table[0].data = &ipvs->sysctl_lblc_expiration; ipvs->lblc_ctl_header = - register_net_sysctl_table(net, net_vs_ctl_path, - ipvs->lblc_ctl_table); + register_net_sysctl(net, "net/ipv4/vs", ipvs->lblc_ctl_table); if (!ipvs->lblc_ctl_header) { if (!net_eq(net, &init_net)) kfree(ipvs->lblc_ctl_table); diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c index 749875611ed6..9261825a6579 100644 --- a/net/netfilter/ipvs/ip_vs_lblcr.c +++ b/net/netfilter/ipvs/ip_vs_lblcr.c @@ -757,8 +757,7 @@ static int __net_init __ip_vs_lblcr_init(struct net *net) ipvs->lblcr_ctl_table[0].data = &ipvs->sysctl_lblcr_expiration; ipvs->lblcr_ctl_header = - register_net_sysctl_table(net, net_vs_ctl_path, - ipvs->lblcr_ctl_table); + register_net_sysctl(net, "net/ipv4/vs", ipvs->lblcr_ctl_table); if (!ipvs->lblcr_ctl_header) { if (!net_eq(net, &init_net)) kfree(ipvs->lblcr_ctl_table); diff --git a/net/netfilter/nf_conntrack_acct.c b/net/netfilter/nf_conntrack_acct.c index f4f8cda05986..d61e0782a797 100644 --- a/net/netfilter/nf_conntrack_acct.c +++ b/net/netfilter/nf_conntrack_acct.c @@ -69,8 +69,8 @@ static int nf_conntrack_acct_init_sysctl(struct net *net) table[0].data = &net->ct.sysctl_acct; - net->ct.acct_sysctl_header = register_net_sysctl_table(net, - nf_net_netfilter_sysctl_path, table); + net->ct.acct_sysctl_header = register_net_sysctl(net, "net/netfilter", + table); if (!net->ct.acct_sysctl_header) { printk(KERN_ERR "nf_conntrack_acct: can't register to sysctl.\n"); goto out_register; diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c index 5bd3047ddeec..b924f3a49a8e 100644 --- a/net/netfilter/nf_conntrack_ecache.c +++ b/net/netfilter/nf_conntrack_ecache.c @@ -199,8 +199,7 @@ static int nf_conntrack_event_init_sysctl(struct net *net) table[1].data = &net->ct.sysctl_events_retry_timeout; net->ct.event_sysctl_header = - register_net_sysctl_table(net, - nf_net_netfilter_sysctl_path, table); + register_net_sysctl(net, "net/netfilter", table); if (!net->ct.event_sysctl_header) { printk(KERN_ERR "nf_ct_event: can't register to sysctl.\n"); goto out_register; diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c index a58998d0912f..ef706a485be1 100644 --- a/net/netfilter/nf_conntrack_proto_dccp.c +++ b/net/netfilter/nf_conntrack_proto_dccp.c @@ -910,8 +910,8 @@ static __net_init int dccp_net_init(struct net *net) dn->sysctl_table[6].data = &dn->dccp_timeout[CT_DCCP_TIMEWAIT]; dn->sysctl_table[7].data = &dn->dccp_loose; - dn->sysctl_header = register_net_sysctl_table(net, - nf_net_netfilter_sysctl_path, dn->sysctl_table); + dn->sysctl_header = register_net_sysctl(net, "net/netfilter", + dn->sysctl_table); if (!dn->sysctl_header) { kfree(dn->sysctl_table); return -ENOMEM; diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c index 0c3888de0f55..9b3943252a5e 100644 --- a/net/netfilter/nf_conntrack_standalone.c +++ b/net/netfilter/nf_conntrack_standalone.c @@ -468,18 +468,13 @@ static ctl_table nf_ct_netfilter_table[] = { { } }; -static struct ctl_path nf_ct_path[] = { - { .procname = "net", }, - { } -}; - static int nf_conntrack_standalone_init_sysctl(struct net *net) { struct ctl_table *table; if (net_eq(net, &init_net)) { nf_ct_netfilter_header = - register_net_sysctl_table(&init_net, nf_ct_path, nf_ct_netfilter_table); + register_net_sysctl(&init_net, "net", nf_ct_netfilter_table); if (!nf_ct_netfilter_header) goto out; } @@ -494,8 +489,7 @@ static int nf_conntrack_standalone_init_sysctl(struct net *net) table[3].data = &net->ct.sysctl_checksum; table[4].data = &net->ct.sysctl_log_invalid; - net->ct.sysctl_header = register_net_sysctl_table(net, - nf_net_netfilter_sysctl_path, table); + net->ct.sysctl_header = register_net_sysctl(net, "net/netfilter", table); if (!net->ct.sysctl_header) goto out_unregister_netfilter; diff --git a/net/netfilter/nf_conntrack_timestamp.c b/net/netfilter/nf_conntrack_timestamp.c index e8d27afbbdb9..dbb364f62d6f 100644 --- a/net/netfilter/nf_conntrack_timestamp.c +++ b/net/netfilter/nf_conntrack_timestamp.c @@ -51,8 +51,8 @@ static int nf_conntrack_tstamp_init_sysctl(struct net *net) table[0].data = &net->ct.sysctl_tstamp; - net->ct.tstamp_sysctl_header = register_net_sysctl_table(net, - nf_net_netfilter_sysctl_path, table); + net->ct.tstamp_sysctl_header = register_net_sysctl(net, "net/netfilter", + table); if (!net->ct.tstamp_sysctl_header) { printk(KERN_ERR "nf_ct_tstamp: can't register to sysctl.\n"); goto out_register; diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c index 04fca48d901a..703fb26aa48d 100644 --- a/net/netfilter/nf_log.c +++ b/net/netfilter/nf_log.c @@ -214,13 +214,6 @@ static const struct file_operations nflog_file_ops = { #endif /* PROC_FS */ #ifdef CONFIG_SYSCTL -static struct ctl_path nf_log_sysctl_path[] = { - { .procname = "net", }, - { .procname = "netfilter", }, - { .procname = "nf_log", }, - { } -}; - static char nf_log_sysctl_fnames[NFPROTO_NUMPROTO-NFPROTO_UNSPEC][3]; static struct ctl_table nf_log_sysctl_table[NFPROTO_NUMPROTO+1]; static struct ctl_table_header *nf_log_dir_header; @@ -283,7 +276,7 @@ static __init int netfilter_log_sysctl_init(void) nf_log_sysctl_table[i].extra1 = (void *)(unsigned long) i; } - nf_log_dir_header = register_net_sysctl_table(&init_net, nf_log_sysctl_path, + nf_log_dir_header = register_net_sysctl(&init_net, "net/netfilter/nf_log", nf_log_sysctl_table); if (!nf_log_dir_header) return -ENOMEM; diff --git a/net/netrom/sysctl_net_netrom.c b/net/netrom/sysctl_net_netrom.c index 4ed149e265bf..42f630b9a698 100644 --- a/net/netrom/sysctl_net_netrom.c +++ b/net/netrom/sysctl_net_netrom.c @@ -146,15 +146,9 @@ static ctl_table nr_table[] = { { } }; -static struct ctl_path nr_path[] = { - { .procname = "net", }, - { .procname = "netrom", }, - { } -}; - void __init nr_register_sysctl(void) { - nr_table_header = register_net_sysctl_table(&init_net, nr_path, nr_table); + nr_table_header = register_net_sysctl(&init_net, "net/netrom", nr_table); } void nr_unregister_sysctl(void) diff --git a/net/phonet/sysctl.c b/net/phonet/sysctl.c index aa55db5f383b..696348fd31a1 100644 --- a/net/phonet/sysctl.c +++ b/net/phonet/sysctl.c @@ -98,15 +98,9 @@ static struct ctl_table phonet_table[] = { { } }; -static struct ctl_path phonet_ctl_path[] = { - { .procname = "net", }, - { .procname = "phonet", }, - { }, -}; - int __init phonet_sysctl_init(void) { - phonet_table_hrd = register_net_sysctl_table(&init_net, phonet_ctl_path, phonet_table); + phonet_table_hrd = register_net_sysctl(&init_net, "net/phonet", phonet_table); return phonet_table_hrd == NULL ? -ENOMEM : 0; } diff --git a/net/rds/ib_sysctl.c b/net/rds/ib_sysctl.c index 0fef3e15777b..7e643bafb4af 100644 --- a/net/rds/ib_sysctl.c +++ b/net/rds/ib_sysctl.c @@ -106,13 +106,6 @@ static ctl_table rds_ib_sysctl_table[] = { { } }; -static struct ctl_path rds_ib_sysctl_path[] = { - { .procname = "net", }, - { .procname = "rds", }, - { .procname = "ib", }, - { } -}; - void rds_ib_sysctl_exit(void) { if (rds_ib_sysctl_hdr) @@ -121,7 +114,7 @@ void rds_ib_sysctl_exit(void) int rds_ib_sysctl_init(void) { - rds_ib_sysctl_hdr = register_net_sysctl_table(&init_net, rds_ib_sysctl_path, rds_ib_sysctl_table); + rds_ib_sysctl_hdr = register_net_sysctl(&init_net, "net/rds/ib", rds_ib_sysctl_table); if (!rds_ib_sysctl_hdr) return -ENOMEM; return 0; diff --git a/net/rds/iw_sysctl.c b/net/rds/iw_sysctl.c index bcfe36dc55a7..5d5ebd576f3f 100644 --- a/net/rds/iw_sysctl.c +++ b/net/rds/iw_sysctl.c @@ -109,13 +109,6 @@ static ctl_table rds_iw_sysctl_table[] = { { } }; -static struct ctl_path rds_iw_sysctl_path[] = { - { .procname = "net", }, - { .procname = "rds", }, - { .procname = "iw", }, - { } -}; - void rds_iw_sysctl_exit(void) { if (rds_iw_sysctl_hdr) @@ -124,7 +117,7 @@ void rds_iw_sysctl_exit(void) int rds_iw_sysctl_init(void) { - rds_iw_sysctl_hdr = register_net_sysctl_table(&init_net, rds_iw_sysctl_path, rds_iw_sysctl_table); + rds_iw_sysctl_hdr = register_net_sysctl(&init_net, "net/rds/iw", rds_iw_sysctl_table); if (!rds_iw_sysctl_hdr) return -ENOMEM; return 0; diff --git a/net/rds/sysctl.c b/net/rds/sysctl.c index 30354b8cd584..907214b4c4d0 100644 --- a/net/rds/sysctl.c +++ b/net/rds/sysctl.c @@ -92,13 +92,6 @@ static ctl_table rds_sysctl_rds_table[] = { { } }; -static struct ctl_path rds_sysctl_path[] = { - { .procname = "net", }, - { .procname = "rds", }, - { } -}; - - void rds_sysctl_exit(void) { if (rds_sysctl_reg_table) @@ -110,7 +103,7 @@ int rds_sysctl_init(void) rds_sysctl_reconnect_min = msecs_to_jiffies(1); rds_sysctl_reconnect_min_jiffies = rds_sysctl_reconnect_min; - rds_sysctl_reg_table = register_net_sysctl_table(&init_net, rds_sysctl_path, rds_sysctl_rds_table); + rds_sysctl_reg_table = register_net_sysctl(&init_net,"net/rds", rds_sysctl_rds_table); if (!rds_sysctl_reg_table) return -ENOMEM; return 0; diff --git a/net/rose/sysctl_net_rose.c b/net/rose/sysctl_net_rose.c index 02b73979535b..94ca9c2ccd69 100644 --- a/net/rose/sysctl_net_rose.c +++ b/net/rose/sysctl_net_rose.c @@ -118,15 +118,9 @@ static ctl_table rose_table[] = { { } }; -static struct ctl_path rose_path[] = { - { .procname = "net", }, - { .procname = "rose", }, - { } -}; - void __init rose_register_sysctl(void) { - rose_table_header = register_net_sysctl_table(&init_net, rose_path, rose_table); + rose_table_header = register_net_sysctl(&init_net, "net/rose", rose_table); } void rose_unregister_sysctl(void) diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c index 1e385b452047..e5fe639c89e7 100644 --- a/net/sctp/sysctl.c +++ b/net/sctp/sysctl.c @@ -275,18 +275,12 @@ static ctl_table sctp_table[] = { { /* sentinel */ } }; -static struct ctl_path sctp_path[] = { - { .procname = "net", }, - { .procname = "sctp", }, - { } -}; - static struct ctl_table_header * sctp_sysctl_header; /* Sysctl registration. */ void sctp_sysctl_register(void) { - sctp_sysctl_header = register_net_sysctl_table(&init_net, sctp_path, sctp_table); + sctp_sysctl_header = register_net_sysctl(&init_net, "net/sctp", sctp_table); } /* Sysctl deregistration. */ diff --git a/net/unix/sysctl_net_unix.c b/net/unix/sysctl_net_unix.c index 4f6979c06f84..b34b5b9792f0 100644 --- a/net/unix/sysctl_net_unix.c +++ b/net/unix/sysctl_net_unix.c @@ -26,12 +26,6 @@ static ctl_table unix_table[] = { { } }; -static struct ctl_path unix_path[] = { - { .procname = "net", }, - { .procname = "unix", }, - { }, -}; - int __net_init unix_sysctl_register(struct net *net) { struct ctl_table *table; @@ -41,7 +35,7 @@ int __net_init unix_sysctl_register(struct net *net) goto err_alloc; table[0].data = &net->unx.sysctl_max_dgram_qlen; - net->unx.ctl = register_net_sysctl_table(net, unix_path, table); + net->unx.ctl = register_net_sysctl(net, "net/unix", table); if (net->unx.ctl == NULL) goto err_reg; diff --git a/net/x25/sysctl_net_x25.c b/net/x25/sysctl_net_x25.c index 08337cb488d4..43239527a205 100644 --- a/net/x25/sysctl_net_x25.c +++ b/net/x25/sysctl_net_x25.c @@ -73,15 +73,9 @@ static struct ctl_table x25_table[] = { { 0, }, }; -static struct ctl_path x25_path[] = { - { .procname = "net", }, - { .procname = "x25", }, - { } -}; - void __init x25_register_sysctl(void) { - x25_table_header = register_net_sysctl_table(&init_net, x25_path, x25_table); + x25_table_header = register_net_sysctl(&init_net, "net/x25", x25_table); } void x25_unregister_sysctl(void) diff --git a/net/xfrm/xfrm_sysctl.c b/net/xfrm/xfrm_sysctl.c index 05640bc9594b..380976f74c4c 100644 --- a/net/xfrm/xfrm_sysctl.c +++ b/net/xfrm/xfrm_sysctl.c @@ -54,7 +54,7 @@ int __net_init xfrm_sysctl_init(struct net *net) table[2].data = &net->xfrm.sysctl_larval_drop; table[3].data = &net->xfrm.sysctl_acq_expires; - net->xfrm.sysctl_hdr = register_net_sysctl_table(net, net_core_path, table); + net->xfrm.sysctl_hdr = register_net_sysctl(net, "net/core", table); if (!net->xfrm.sysctl_hdr) goto out_register; return 0; -- cgit v1.2.2 From a5347fe36b313c07d59b065d00a8fa56362c5f97 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Thu, 19 Apr 2012 13:45:29 +0000 Subject: net: Delete all remaining instances of ctl_path We don't use struct ctl_path anymore so delete the exported constants. Signed-off-by: Eric W. Biederman Acked-by: Pavel Emelyanov Signed-off-by: David S. Miller --- net/core/sysctl_net_core.c | 6 ------ net/ipv4/netfilter.c | 10 ---------- net/ipv4/sysctl_net_ipv4.c | 7 ------- net/ipv6/sysctl_net_ipv6.c | 7 ------- net/netfilter/core.c | 9 --------- net/netfilter/ipvs/ip_vs_ctl.c | 7 ------- 6 files changed, 46 deletions(-) (limited to 'net') diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c index 64924e345a9b..a7c36845b123 100644 --- a/net/core/sysctl_net_core.c +++ b/net/core/sysctl_net_core.c @@ -203,12 +203,6 @@ static struct ctl_table netns_core_table[] = { { } }; -__net_initdata struct ctl_path net_core_path[] = { - { .procname = "net", }, - { .procname = "core", }, - { }, -}; - static __net_init int sysctl_core_net_init(struct net *net) { struct ctl_table *tbl; diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c index 3cd8c586741a..ed1b36783192 100644 --- a/net/ipv4/netfilter.c +++ b/net/ipv4/netfilter.c @@ -237,13 +237,3 @@ static void ipv4_netfilter_fini(void) module_init(ipv4_netfilter_init); module_exit(ipv4_netfilter_fini); - -#ifdef CONFIG_SYSCTL -struct ctl_path nf_net_ipv4_netfilter_sysctl_path[] = { - { .procname = "net", }, - { .procname = "ipv4", }, - { .procname = "netfilter", }, - { } -}; -EXPORT_SYMBOL_GPL(nf_net_ipv4_netfilter_sysctl_path); -#endif /* CONFIG_SYSCTL */ diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index 56e64f7b75d0..33417f84e07f 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c @@ -768,13 +768,6 @@ static struct ctl_table ipv4_net_table[] = { { } }; -struct ctl_path net_ipv4_ctl_path[] = { - { .procname = "net", }, - { .procname = "ipv4", }, - { }, -}; -EXPORT_SYMBOL_GPL(net_ipv4_ctl_path); - static __net_init int ipv4_sysctl_init_net(struct net *net) { struct ctl_table *table; diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c index a52d8203a5de..e85c48bd404f 100644 --- a/net/ipv6/sysctl_net_ipv6.c +++ b/net/ipv6/sysctl_net_ipv6.c @@ -38,13 +38,6 @@ static ctl_table ipv6_rotable[] = { { } }; -struct ctl_path net_ipv6_ctl_path[] = { - { .procname = "net", }, - { .procname = "ipv6", }, - { }, -}; -EXPORT_SYMBOL_GPL(net_ipv6_ctl_path); - static int __net_init ipv6_sysctl_net_init(struct net *net) { struct ctl_table *ipv6_table; diff --git a/net/netfilter/core.c b/net/netfilter/core.c index e1b7e051332e..e19f3653db23 100644 --- a/net/netfilter/core.c +++ b/net/netfilter/core.c @@ -290,12 +290,3 @@ void __init netfilter_init(void) if (netfilter_log_init() < 0) panic("cannot initialize nf_log"); } - -#ifdef CONFIG_SYSCTL -struct ctl_path nf_net_netfilter_sysctl_path[] = { - { .procname = "net", }, - { .procname = "netfilter", }, - { } -}; -EXPORT_SYMBOL_GPL(nf_net_netfilter_sysctl_path); -#endif /* CONFIG_SYSCTL */ diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index a606d6b1b0e5..e3707d2bd9c7 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c @@ -1846,13 +1846,6 @@ static struct ctl_table vs_vars[] = { { } }; -const struct ctl_path net_vs_ctl_path[] = { - { .procname = "net", }, - { .procname = "ipv4", }, - { .procname = "vs", }, - { } -}; -EXPORT_SYMBOL_GPL(net_vs_ctl_path); #endif #ifdef CONFIG_PROC_FS -- cgit v1.2.2 From 5f568e5afe35721f2f692bccab243ba87cd8f87a Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Thu, 19 Apr 2012 13:46:06 +0000 Subject: net: Remove register_net_sysctl_table All of the users have been converted to use registera_net_sysctl so we no longer need register_net_sysctl. Signed-off-by: Eric W. Biederman Acked-by: Pavel Emelyanov Signed-off-by: David S. Miller --- net/sysctl_net.c | 7 ------- 1 file changed, 7 deletions(-) (limited to 'net') diff --git a/net/sysctl_net.c b/net/sysctl_net.c index 2b8d1d950987..f3e813a8d107 100644 --- a/net/sysctl_net.c +++ b/net/sysctl_net.c @@ -95,13 +95,6 @@ out: return ret; } -struct ctl_table_header *register_net_sysctl_table(struct net *net, - const struct ctl_path *path, struct ctl_table *table) -{ - return __register_sysctl_paths(&net->sysctls, path, table); -} -EXPORT_SYMBOL_GPL(register_net_sysctl_table); - struct ctl_table_header *register_net_sysctl(struct net *net, const char *path, struct ctl_table *table) { -- cgit v1.2.2 From 4a17fd5229c1b6066aa478f6b690f8293ce811a1 Mon Sep 17 00:00:00 2001 From: Pavel Emelyanov Date: Thu, 19 Apr 2012 03:39:36 +0000 Subject: sock: Introduce named constants for sk_reuse Name them in a "backward compatible" manner, i.e. reuse or not are still 1 and 0 respectively. The reuse value of 2 means that the socket with it will forcibly reuse everyone else's port. Signed-off-by: Pavel Emelyanov Signed-off-by: David S. Miller --- net/core/sock.c | 2 +- net/econet/af_econet.c | 4 ++-- net/ipv4/af_inet.c | 2 +- net/ipv4/inet_connection_sock.c | 3 +++ net/ipv6/af_inet6.c | 2 +- net/netfilter/ipvs/ip_vs_sync.c | 2 +- net/rds/tcp_listen.c | 2 +- net/sunrpc/svcsock.c | 2 +- 8 files changed, 11 insertions(+), 8 deletions(-) (limited to 'net') diff --git a/net/core/sock.c b/net/core/sock.c index c7e60eac639b..679c5bbe2bed 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -561,7 +561,7 @@ int sock_setsockopt(struct socket *sock, int level, int optname, sock_valbool_flag(sk, SOCK_DBG, valbool); break; case SO_REUSEADDR: - sk->sk_reuse = valbool; + sk->sk_reuse = (valbool ? SK_CAN_REUSE : SK_NO_REUSE); break; case SO_TYPE: case SO_PROTOCOL: diff --git a/net/econet/af_econet.c b/net/econet/af_econet.c index 71b5edcee401..fa14ca76b77b 100644 --- a/net/econet/af_econet.c +++ b/net/econet/af_econet.c @@ -617,7 +617,7 @@ static int econet_create(struct net *net, struct socket *sock, int protocol, if (sk == NULL) goto out; - sk->sk_reuse = 1; + sk->sk_reuse = SK_CAN_REUSE; sock->ops = &econet_ops; sock_init_data(sock, sk); @@ -1012,7 +1012,7 @@ static int __init aun_udp_initialise(void) return error; } - udpsock->sk->sk_reuse = 1; + udpsock->sk->sk_reuse = SK_CAN_REUSE; udpsock->sk->sk_allocation = GFP_ATOMIC; /* we're going to call it from interrupts */ diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 3744c1c0af5a..c8f7aee587d1 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -350,7 +350,7 @@ lookup_protocol: err = 0; sk->sk_no_check = answer_no_check; if (INET_PROTOSW_REUSE & answer_flags) - sk->sk_reuse = 1; + sk->sk_reuse = SK_CAN_REUSE; inet = inet_sk(sk); inet->is_icsk = (INET_PROTOSW_ICSK & answer_flags) != 0; diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index 7d972f650a61..95e61596e605 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c @@ -182,6 +182,9 @@ have_snum: goto tb_not_found; tb_found: if (!hlist_empty(&tb->owners)) { + if (sk->sk_reuse == SK_FORCE_REUSE) + goto success; + if (tb->fastreuse > 0 && sk->sk_reuse && sk->sk_state != TCP_LISTEN && smallest_size == -1) { diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index 461e7896e5d8..0ad046c7ae95 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c @@ -180,7 +180,7 @@ lookup_protocol: err = 0; sk->sk_no_check = answer_no_check; if (INET_PROTOSW_REUSE & answer_flags) - sk->sk_reuse = 1; + sk->sk_reuse = SK_CAN_REUSE; inet = inet_sk(sk); inet->is_icsk = (INET_PROTOSW_ICSK & answer_flags) != 0; diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c index f4e0b6cf8246..bf5e538af67b 100644 --- a/net/netfilter/ipvs/ip_vs_sync.c +++ b/net/netfilter/ipvs/ip_vs_sync.c @@ -1368,7 +1368,7 @@ static struct socket *make_receive_sock(struct net *net) */ sk_change_net(sock->sk, net); /* it is equivalent to the REUSEADDR option in user-space */ - sock->sk->sk_reuse = 1; + sock->sk->sk_reuse = SK_CAN_REUSE; result = sock->ops->bind(sock, (struct sockaddr *) &mcast_addr, sizeof(struct sockaddr)); diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c index 8b5cc4aa8868..72981375f47c 100644 --- a/net/rds/tcp_listen.c +++ b/net/rds/tcp_listen.c @@ -145,7 +145,7 @@ int rds_tcp_listen_init(void) if (ret < 0) goto out; - sock->sk->sk_reuse = 1; + sock->sk->sk_reuse = SK_CAN_REUSE; rds_tcp_nonagle(sock); write_lock_bh(&sock->sk->sk_callback_lock); diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index 824d32fb3121..f0132b2e875e 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c @@ -1556,7 +1556,7 @@ static struct svc_xprt *svc_create_socket(struct svc_serv *serv, (char *)&val, sizeof(val)); if (type == SOCK_STREAM) - sock->sk->sk_reuse = 1; /* allow address reuse */ + sock->sk->sk_reuse = SK_CAN_REUSE; /* allow address reuse */ error = kernel_bind(sock, sin, len); if (error < 0) goto bummer; -- cgit v1.2.2 From 370816aef0c5436c2adbec3966038f36ca326933 Mon Sep 17 00:00:00 2001 From: Pavel Emelyanov Date: Thu, 19 Apr 2012 03:40:01 +0000 Subject: tcp: Move code around This is just the preparation patch, which makes the needed for TCP repair code ready for use. Signed-off-by: Pavel Emelyanov Signed-off-by: David S. Miller --- net/ipv4/tcp.c | 2 +- net/ipv4/tcp_input.c | 81 ++++++++++++++++++++++++++++++--------------------- net/ipv4/tcp_output.c | 4 +-- 3 files changed, 51 insertions(+), 36 deletions(-) (limited to 'net') diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index c53e8a827f55..bb4200f56158 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -919,7 +919,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; int iovlen, flags, err, copied; - int mss_now, size_goal; + int mss_now = 0, size_goal; bool sg; long timeo; diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 99448f06a42a..37e1c5cd2c01 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -5325,6 +5325,14 @@ discard: return 0; } +void tcp_queue_rcv(struct sock *sk, struct sk_buff *skb, int hdrlen) +{ + __skb_pull(skb, hdrlen); + __skb_queue_tail(&sk->sk_receive_queue, skb); + skb_set_owner_r(skb, sk); + tcp_sk(sk)->rcv_nxt = TCP_SKB_CB(skb)->end_seq; +} + /* * TCP receive function for the ESTABLISHED state. * @@ -5490,10 +5498,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITS); /* Bulk data transfer: receiver */ - __skb_pull(skb, tcp_header_len); - __skb_queue_tail(&sk->sk_receive_queue, skb); - skb_set_owner_r(skb, sk); - tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; + tcp_queue_rcv(sk, skb, tcp_header_len); } tcp_event_data_recv(sk, skb); @@ -5559,6 +5564,44 @@ discard: } EXPORT_SYMBOL(tcp_rcv_established); +void tcp_finish_connect(struct sock *sk, struct sk_buff *skb) +{ + struct tcp_sock *tp = tcp_sk(sk); + struct inet_connection_sock *icsk = inet_csk(sk); + + tcp_set_state(sk, TCP_ESTABLISHED); + + if (skb != NULL) + security_inet_conn_established(sk, skb); + + /* Make sure socket is routed, for correct metrics. */ + icsk->icsk_af_ops->rebuild_header(sk); + + tcp_init_metrics(sk); + + tcp_init_congestion_control(sk); + + /* Prevent spurious tcp_cwnd_restart() on first data + * packet. + */ + tp->lsndtime = tcp_time_stamp; + + tcp_init_buffer_space(sk); + + if (sock_flag(sk, SOCK_KEEPOPEN)) + inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tp)); + + if (!tp->rx_opt.snd_wscale) + __tcp_fast_path_on(tp, tp->snd_wnd); + else + tp->pred_flags = 0; + + if (!sock_flag(sk, SOCK_DEAD)) { + sk->sk_state_change(sk); + sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT); + } +} + static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, const struct tcphdr *th, unsigned int len) { @@ -5691,36 +5734,8 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, } smp_mb(); - tcp_set_state(sk, TCP_ESTABLISHED); - - security_inet_conn_established(sk, skb); - - /* Make sure socket is routed, for correct metrics. */ - icsk->icsk_af_ops->rebuild_header(sk); - - tcp_init_metrics(sk); - tcp_init_congestion_control(sk); - - /* Prevent spurious tcp_cwnd_restart() on first data - * packet. - */ - tp->lsndtime = tcp_time_stamp; - - tcp_init_buffer_space(sk); - - if (sock_flag(sk, SOCK_KEEPOPEN)) - inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tp)); - - if (!tp->rx_opt.snd_wscale) - __tcp_fast_path_on(tp, tp->snd_wnd); - else - tp->pred_flags = 0; - - if (!sock_flag(sk, SOCK_DEAD)) { - sk->sk_state_change(sk); - sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT); - } + tcp_finish_connect(sk, skb); if (sk->sk_write_pending || icsk->icsk_accept_queue.rskq_defer_accept || diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index de8790ced946..db126a6954a2 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -2561,7 +2561,7 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst, EXPORT_SYMBOL(tcp_make_synack); /* Do all connect socket setups that can be done AF independent. */ -static void tcp_connect_init(struct sock *sk) +void tcp_connect_init(struct sock *sk) { const struct dst_entry *dst = __sk_dst_get(sk); struct tcp_sock *tp = tcp_sk(sk); @@ -2616,6 +2616,7 @@ static void tcp_connect_init(struct sock *sk) tp->snd_una = tp->write_seq; tp->snd_sml = tp->write_seq; tp->snd_up = tp->write_seq; + tp->snd_nxt = tp->write_seq; tp->rcv_nxt = 0; tp->rcv_wup = 0; tp->copied_seq = 0; @@ -2641,7 +2642,6 @@ int tcp_connect(struct sock *sk) /* Reserve space for headers. */ skb_reserve(buff, MAX_TCP_HEADER); - tp->snd_nxt = tp->write_seq; tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN); TCP_ECN_send_syn(sk, buff); -- cgit v1.2.2 From ee9952831cfd0bbe834f4a26489d7dce74582e37 Mon Sep 17 00:00:00 2001 From: Pavel Emelyanov Date: Thu, 19 Apr 2012 03:40:39 +0000 Subject: tcp: Initial repair mode This includes (according the the previous description): * TCP_REPAIR sockoption This one just puts the socket in/out of the repair mode. Allowed for CAP_NET_ADMIN and for closed/establised sockets only. When repair mode is turned off and the socket happens to be in the established state the window probe is sent to the peer to 'unlock' the connection. * TCP_REPAIR_QUEUE sockoption This one sets the queue which we're about to repair. The 'no-queue' is set by default. * TCP_QUEUE_SEQ socoption Sets the write_seq/rcv_nxt of a selected repaired queue. Allowed for TCP_CLOSE-d sockets only. When the socket changes its state the other seq-s are changed by the kernel according to the protocol rules (most of the existing code is actually reused). * Ability to forcibly bind a socket to a port The sk->sk_reuse is set to SK_FORCE_REUSE. * Immediate connect modification The connect syscall initializes the connection, then directly jumps to the code which finalizes it. * Silent close modification The close just aborts the connection (similar to SO_LINGER with 0 time) but without sending any FIN/RST-s to peer. Signed-off-by: Pavel Emelyanov Signed-off-by: David S. Miller --- net/ipv4/tcp.c | 68 ++++++++++++++++++++++++++++++++++++++++++++++++++- net/ipv4/tcp_ipv4.c | 19 +++++++++++--- net/ipv4/tcp_output.c | 16 +++++++++--- 3 files changed, 96 insertions(+), 7 deletions(-) (limited to 'net') diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index bb4200f56158..e38d6f240321 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -1935,7 +1935,9 @@ void tcp_close(struct sock *sk, long timeout) * advertise a zero window, then kill -9 the FTP client, wheee... * Note: timeout is always zero in such a case. */ - if (data_was_unread) { + if (unlikely(tcp_sk(sk)->repair)) { + sk->sk_prot->disconnect(sk, 0); + } else if (data_was_unread) { /* Unread data was tossed, zap the connection. */ NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPABORTONCLOSE); tcp_set_state(sk, TCP_CLOSE); @@ -2074,6 +2076,8 @@ int tcp_disconnect(struct sock *sk, int flags) /* ABORT function of RFC793 */ if (old_state == TCP_LISTEN) { inet_csk_listen_stop(sk); + } else if (unlikely(tp->repair)) { + sk->sk_err = ECONNABORTED; } else if (tcp_need_reset(old_state) || (tp->snd_nxt != tp->write_seq && (1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK))) { @@ -2125,6 +2129,12 @@ int tcp_disconnect(struct sock *sk, int flags) } EXPORT_SYMBOL(tcp_disconnect); +static inline int tcp_can_repair_sock(struct sock *sk) +{ + return capable(CAP_NET_ADMIN) && + ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_ESTABLISHED)); +} + /* * Socket option code for TCP. */ @@ -2297,6 +2307,42 @@ static int do_tcp_setsockopt(struct sock *sk, int level, tp->thin_dupack = val; break; + case TCP_REPAIR: + if (!tcp_can_repair_sock(sk)) + err = -EPERM; + else if (val == 1) { + tp->repair = 1; + sk->sk_reuse = SK_FORCE_REUSE; + tp->repair_queue = TCP_NO_QUEUE; + } else if (val == 0) { + tp->repair = 0; + sk->sk_reuse = SK_NO_REUSE; + tcp_send_window_probe(sk); + } else + err = -EINVAL; + + break; + + case TCP_REPAIR_QUEUE: + if (!tp->repair) + err = -EPERM; + else if (val < TCP_QUEUES_NR) + tp->repair_queue = val; + else + err = -EINVAL; + break; + + case TCP_QUEUE_SEQ: + if (sk->sk_state != TCP_CLOSE) + err = -EPERM; + else if (tp->repair_queue == TCP_SEND_QUEUE) + tp->write_seq = val; + else if (tp->repair_queue == TCP_RECV_QUEUE) + tp->rcv_nxt = val; + else + err = -EINVAL; + break; + case TCP_CORK: /* When set indicates to always queue non-full frames. * Later the user clears this option and we transmit @@ -2632,6 +2678,26 @@ static int do_tcp_getsockopt(struct sock *sk, int level, val = tp->thin_dupack; break; + case TCP_REPAIR: + val = tp->repair; + break; + + case TCP_REPAIR_QUEUE: + if (tp->repair) + val = tp->repair_queue; + else + return -EINVAL; + break; + + case TCP_QUEUE_SEQ: + if (tp->repair_queue == TCP_SEND_QUEUE) + val = tp->write_seq; + else if (tp->repair_queue == TCP_RECV_QUEUE) + val = tp->rcv_nxt; + else + return -EINVAL; + break; + case TCP_USER_TIMEOUT: val = jiffies_to_msecs(icsk->icsk_user_timeout); break; diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 0cb86ceb652f..ba6dad81908e 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -138,6 +138,14 @@ int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp) } EXPORT_SYMBOL_GPL(tcp_twsk_unique); +static int tcp_repair_connect(struct sock *sk) +{ + tcp_connect_init(sk); + tcp_finish_connect(sk, NULL); + + return 0; +} + /* This will initiate an outgoing connection. */ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) { @@ -196,7 +204,8 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) /* Reset inherited state */ tp->rx_opt.ts_recent = 0; tp->rx_opt.ts_recent_stamp = 0; - tp->write_seq = 0; + if (likely(!tp->repair)) + tp->write_seq = 0; } if (tcp_death_row.sysctl_tw_recycle && @@ -247,7 +256,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) sk->sk_gso_type = SKB_GSO_TCPV4; sk_setup_caps(sk, &rt->dst); - if (!tp->write_seq) + if (!tp->write_seq && likely(!tp->repair)) tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr, inet->inet_daddr, inet->inet_sport, @@ -255,7 +264,11 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) inet->inet_id = tp->write_seq ^ jiffies; - err = tcp_connect(sk); + if (likely(!tp->repair)) + err = tcp_connect(sk); + else + err = tcp_repair_connect(sk); + rt = NULL; if (err) goto failure; diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index db126a6954a2..fa442a61be6a 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -2617,9 +2617,11 @@ void tcp_connect_init(struct sock *sk) tp->snd_sml = tp->write_seq; tp->snd_up = tp->write_seq; tp->snd_nxt = tp->write_seq; - tp->rcv_nxt = 0; - tp->rcv_wup = 0; - tp->copied_seq = 0; + + if (likely(!tp->repair)) + tp->rcv_nxt = 0; + tp->rcv_wup = tp->rcv_nxt; + tp->copied_seq = tp->rcv_nxt; inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT; inet_csk(sk)->icsk_retransmits = 0; @@ -2790,6 +2792,14 @@ static int tcp_xmit_probe_skb(struct sock *sk, int urgent) return tcp_transmit_skb(sk, skb, 0, GFP_ATOMIC); } +void tcp_send_window_probe(struct sock *sk) +{ + if (sk->sk_state == TCP_ESTABLISHED) { + tcp_sk(sk)->snd_wl1 = tcp_sk(sk)->rcv_nxt - 1; + tcp_xmit_probe_skb(sk, 0); + } +} + /* Initiate keepalive or window probe from timer. */ int tcp_write_wakeup(struct sock *sk) { -- cgit v1.2.2 From c0e88ff0f256958401778ff692da4b8891acb5a9 Mon Sep 17 00:00:00 2001 From: Pavel Emelyanov Date: Thu, 19 Apr 2012 03:41:01 +0000 Subject: tcp: Repair socket queues Reading queues under repair mode is done with recvmsg call. The queue-under-repair set by TCP_REPAIR_QUEUE option is used to determine which queue should be read. Thus both send and receive queue can be read with this. Caller must pass the MSG_PEEK flag. Writing to queues is done with sendmsg call and yet again -- the repair-queue option can be used to push data into the receive queue. When putting an skb into receive queue a zero tcp header is appented to its head to address the tcp_hdr(skb)->syn and the ->fin checks by the (after repair) tcp_recvmsg. These flags flags are both set to zero and that's why. The fin cannot be met in the queue while reading the source socket, since the repair only works for closed/established sockets and queueing fin packet always changes its state. The syn in the queue denotes that the respective skb's seq is "off-by-one" as compared to the actual payload lenght. Thus, at the rcv queue refill we can just drop this flag and set the skb's sequences to precice values. When the repair mode is turned off, the write queue seqs are updated so that the whole queue is considered to be 'already sent, waiting for ACKs' (write_seq = snd_nxt <= snd_una). From the protocol POV the send queue looks like it was sent, but the data between the write_seq and snd_nxt is lost in the network. This helps to avoid another sockoption for setting the snd_nxt sequence. Leaving the whole queue in a 'not yet sent' state (as it will be after sendmsg-s) will not allow to receive any acks from the peer since the ack_seq will be after the snd_nxt. Thus even the ack for the window probe will be dropped and the connection will be 'locked' with the zero peer window. Signed-off-by: Pavel Emelyanov Signed-off-by: David S. Miller --- net/ipv4/tcp.c | 89 +++++++++++++++++++++++++++++++++++++++++++++++++-- net/ipv4/tcp_output.c | 1 + 2 files changed, 87 insertions(+), 3 deletions(-) (limited to 'net') diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index e38d6f240321..47e2f4972f79 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -912,6 +912,39 @@ static inline int select_size(const struct sock *sk, bool sg) return tmp; } +static int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size) +{ + struct sk_buff *skb; + struct tcp_skb_cb *cb; + struct tcphdr *th; + + skb = alloc_skb(size + sizeof(*th), sk->sk_allocation); + if (!skb) + goto err; + + th = (struct tcphdr *)skb_put(skb, sizeof(*th)); + skb_reset_transport_header(skb); + memset(th, 0, sizeof(*th)); + + if (memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size)) + goto err_free; + + cb = TCP_SKB_CB(skb); + + TCP_SKB_CB(skb)->seq = tcp_sk(sk)->rcv_nxt; + TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + size; + TCP_SKB_CB(skb)->ack_seq = tcp_sk(sk)->snd_una - 1; + + tcp_queue_rcv(sk, skb, sizeof(*th)); + + return size; + +err_free: + kfree_skb(skb); +err: + return -ENOMEM; +} + int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t size) { @@ -933,6 +966,19 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, if ((err = sk_stream_wait_connect(sk, &timeo)) != 0) goto out_err; + if (unlikely(tp->repair)) { + if (tp->repair_queue == TCP_RECV_QUEUE) { + copied = tcp_send_rcvq(sk, msg, size); + goto out; + } + + err = -EINVAL; + if (tp->repair_queue == TCP_NO_QUEUE) + goto out_err; + + /* 'common' sending to sendq */ + } + /* This should be in poll */ clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); @@ -1089,7 +1135,7 @@ new_segment: if ((seglen -= copy) == 0 && iovlen == 0) goto out; - if (skb->len < max || (flags & MSG_OOB)) + if (skb->len < max || (flags & MSG_OOB) || unlikely(tp->repair)) continue; if (forced_push(tp)) { @@ -1102,7 +1148,7 @@ new_segment: wait_for_sndbuf: set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); wait_for_memory: - if (copied) + if (copied && likely(!tp->repair)) tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH); if ((err = sk_stream_wait_memory(sk, &timeo)) != 0) @@ -1113,7 +1159,7 @@ wait_for_memory: } out: - if (copied) + if (copied && likely(!tp->repair)) tcp_push(sk, flags, mss_now, tp->nonagle); release_sock(sk); return copied; @@ -1187,6 +1233,24 @@ static int tcp_recv_urg(struct sock *sk, struct msghdr *msg, int len, int flags) return -EAGAIN; } +static int tcp_peek_sndq(struct sock *sk, struct msghdr *msg, int len) +{ + struct sk_buff *skb; + int copied = 0, err = 0; + + /* XXX -- need to support SO_PEEK_OFF */ + + skb_queue_walk(&sk->sk_write_queue, skb) { + err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, skb->len); + if (err) + break; + + copied += skb->len; + } + + return err ?: copied; +} + /* Clean up the receive buffer for full frames taken by the user, * then send an ACK if necessary. COPIED is the number of bytes * tcp_recvmsg has given to the user so far, it speeds up the @@ -1432,6 +1496,21 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, if (flags & MSG_OOB) goto recv_urg; + if (unlikely(tp->repair)) { + err = -EPERM; + if (!(flags & MSG_PEEK)) + goto out; + + if (tp->repair_queue == TCP_SEND_QUEUE) + goto recv_sndq; + + err = -EINVAL; + if (tp->repair_queue == TCP_NO_QUEUE) + goto out; + + /* 'common' recv queue MSG_PEEK-ing */ + } + seq = &tp->copied_seq; if (flags & MSG_PEEK) { peek_seq = tp->copied_seq; @@ -1783,6 +1862,10 @@ out: recv_urg: err = tcp_recv_urg(sk, msg, len, flags); goto out; + +recv_sndq: + err = tcp_peek_sndq(sk, msg, len); + goto out; } EXPORT_SYMBOL(tcp_recvmsg); diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index fa442a61be6a..57a834cafca1 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -2796,6 +2796,7 @@ void tcp_send_window_probe(struct sock *sk) { if (sk->sk_state == TCP_ESTABLISHED) { tcp_sk(sk)->snd_wl1 = tcp_sk(sk)->rcv_nxt - 1; + tcp_sk(sk)->snd_nxt = tcp_sk(sk)->write_seq; tcp_xmit_probe_skb(sk, 0); } } -- cgit v1.2.2 From 5e6a3ce6573f0c519d1ff57df60e3877bb2d3151 Mon Sep 17 00:00:00 2001 From: Pavel Emelyanov Date: Thu, 19 Apr 2012 03:41:32 +0000 Subject: tcp: Report mss_clamp with TCP_MAXSEG option in repair mode The mss_clamp is the only connection-time negotiated option which cannot be obtained from the user space. Make the TCP_MAXSEG sockopt report one in the repair mode. Signed-off-by: Pavel Emelyanov Signed-off-by: David S. Miller --- net/ipv4/tcp.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'net') diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 47e2f4972f79..b4e690ddb08c 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -2659,6 +2659,8 @@ static int do_tcp_getsockopt(struct sock *sk, int level, val = tp->mss_cache; if (!val && ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) val = tp->rx_opt.user_mss; + if (tp->repair) + val = tp->rx_opt.mss_clamp; break; case TCP_NODELAY: val = !!(tp->nonagle&TCP_NAGLE_OFF); -- cgit v1.2.2 From b139ba4e90dccbf4cd4efb112af96a5c9e0b098c Mon Sep 17 00:00:00 2001 From: Pavel Emelyanov Date: Thu, 19 Apr 2012 03:41:57 +0000 Subject: tcp: Repair connection-time negotiated parameters There are options, which are set up on a socket while performing TCP handshake. Need to resurrect them on a socket while repairing. A new sockoption accepts a buffer and parses it. The buffer should be CODE:VALUE sequence of bytes, where CODE is standard option code and VALUE is the respective value. Only 4 options should be handled on repaired socket. To read 3 out of 4 of these options the TCP_INFO sockoption can be used. An ability to get the last one (the mss_clamp) was added by the previous patch. Now the restore. Three of these options -- timestamp_ok, mss_clamp and snd_wscale -- are just restored on a coket. The sack_ok flags has 2 issues. First, whether or not to do sacks at all. This flag is just read and set back. No other sack info is saved or restored, since according to the standart and the code dropping all sack-ed segments is OK, the sender will resubmit them again, so after the repair we will probably experience a pause in connection. Next, the fack bit. It's just set back on a socket if the respective sysctl is set. No collected stats about packets flow is preserved. As far as I see (plz, correct me if I'm wrong) the fack-based congestion algorithm survives dropping all of the stats and repairs itself eventually, probably losing the performance for that period. Signed-off-by: Pavel Emelyanov Signed-off-by: David S. Miller --- net/ipv4/tcp.c | 71 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 71 insertions(+) (limited to 'net') diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index b4e690ddb08c..3ce3bd031f33 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -2218,6 +2218,68 @@ static inline int tcp_can_repair_sock(struct sock *sk) ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_ESTABLISHED)); } +static int tcp_repair_options_est(struct tcp_sock *tp, char __user *optbuf, unsigned int len) +{ + /* + * Options are stored in CODE:VALUE form where CODE is 8bit and VALUE + * fits the respective TCPOLEN_ size + */ + + while (len > 0) { + u8 opcode; + + if (get_user(opcode, optbuf)) + return -EFAULT; + + optbuf++; + len--; + + switch (opcode) { + case TCPOPT_MSS: { + u16 in_mss; + + if (len < sizeof(in_mss)) + return -ENODATA; + if (get_user(in_mss, optbuf)) + return -EFAULT; + + tp->rx_opt.mss_clamp = in_mss; + + optbuf += sizeof(in_mss); + len -= sizeof(in_mss); + break; + } + case TCPOPT_WINDOW: { + u8 wscale; + + if (len < sizeof(wscale)) + return -ENODATA; + if (get_user(wscale, optbuf)) + return -EFAULT; + + if (wscale > 14) + return -EFBIG; + + tp->rx_opt.snd_wscale = wscale; + + optbuf += sizeof(wscale); + len -= sizeof(wscale); + break; + } + case TCPOPT_SACK_PERM: + tp->rx_opt.sack_ok |= TCP_SACK_SEEN; + if (sysctl_tcp_fack) + tcp_enable_fack(tp); + break; + case TCPOPT_TIMESTAMP: + tp->rx_opt.tstamp_ok = 1; + break; + } + } + + return 0; +} + /* * Socket option code for TCP. */ @@ -2426,6 +2488,15 @@ static int do_tcp_setsockopt(struct sock *sk, int level, err = -EINVAL; break; + case TCP_REPAIR_OPTIONS: + if (!tp->repair) + err = -EINVAL; + else if (sk->sk_state == TCP_ESTABLISHED) + err = tcp_repair_options_est(tp, optval, optlen); + else + err = -EPERM; + break; + case TCP_CORK: /* When set indicates to always queue non-full frames. * Later the user clears this option and we transmit -- cgit v1.2.2 From a74e910618efb154936958cfeb2aab7c234478c5 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 20 Apr 2012 20:04:01 +0200 Subject: net: change big iov allocations iov of more than 8 entries are allocated in sendmsg()/recvmsg() through sock_kmalloc() As these allocations are temporary only and small enough, it makes sense to use plain kmalloc() and avoid sk_omem_alloc atomic overhead. Slightly changed fast path to be even faster. Signed-off-by: Eric Dumazet Cc: Mike Waychison Signed-off-by: David S. Miller --- net/socket.c | 37 ++++++++++++++++--------------------- 1 file changed, 16 insertions(+), 21 deletions(-) (limited to 'net') diff --git a/net/socket.c b/net/socket.c index e34510928dcc..d3aaa4f67a3b 100644 --- a/net/socket.c +++ b/net/socket.c @@ -1908,7 +1908,7 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg, __attribute__ ((aligned(sizeof(__kernel_size_t)))); /* 20 is size of ipv6_pktinfo */ unsigned char *ctl_buf = ctl; - int err, ctl_len, iov_size, total_len; + int err, ctl_len, total_len; err = -EFAULT; if (MSG_CMSG_COMPAT & flags) { @@ -1917,16 +1917,13 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg, } else if (copy_from_user(msg_sys, msg, sizeof(struct msghdr))) return -EFAULT; - /* do not move before msg_sys is valid */ - err = -EMSGSIZE; - if (msg_sys->msg_iovlen > UIO_MAXIOV) - goto out; - - /* Check whether to allocate the iovec area */ - err = -ENOMEM; - iov_size = msg_sys->msg_iovlen * sizeof(struct iovec); if (msg_sys->msg_iovlen > UIO_FASTIOV) { - iov = sock_kmalloc(sock->sk, iov_size, GFP_KERNEL); + err = -EMSGSIZE; + if (msg_sys->msg_iovlen > UIO_MAXIOV) + goto out; + err = -ENOMEM; + iov = kmalloc(msg_sys->msg_iovlen * sizeof(struct iovec), + GFP_KERNEL); if (!iov) goto out; } @@ -2005,7 +2002,7 @@ out_freectl: sock_kfree_s(sock->sk, ctl_buf, ctl_len); out_freeiov: if (iov != iovstack) - sock_kfree_s(sock->sk, iov, iov_size); + kfree(iov); out: return err; } @@ -2103,7 +2100,7 @@ static int __sys_recvmsg(struct socket *sock, struct msghdr __user *msg, struct iovec iovstack[UIO_FASTIOV]; struct iovec *iov = iovstack; unsigned long cmsg_ptr; - int err, iov_size, total_len, len; + int err, total_len, len; /* kernel mode address */ struct sockaddr_storage addr; @@ -2118,15 +2115,13 @@ static int __sys_recvmsg(struct socket *sock, struct msghdr __user *msg, } else if (copy_from_user(msg_sys, msg, sizeof(struct msghdr))) return -EFAULT; - err = -EMSGSIZE; - if (msg_sys->msg_iovlen > UIO_MAXIOV) - goto out; - - /* Check whether to allocate the iovec area */ - err = -ENOMEM; - iov_size = msg_sys->msg_iovlen * sizeof(struct iovec); if (msg_sys->msg_iovlen > UIO_FASTIOV) { - iov = sock_kmalloc(sock->sk, iov_size, GFP_KERNEL); + err = -EMSGSIZE; + if (msg_sys->msg_iovlen > UIO_MAXIOV) + goto out; + err = -ENOMEM; + iov = kmalloc(msg_sys->msg_iovlen * sizeof(struct iovec), + GFP_KERNEL); if (!iov) goto out; } @@ -2180,7 +2175,7 @@ static int __sys_recvmsg(struct socket *sock, struct msghdr __user *msg, out_freeiov: if (iov != iovstack) - sock_kfree_s(sock->sk, iov, iov_size); + kfree(iov); out: return err; } -- cgit v1.2.2 From e66e9a31474dcce5be6f1186dc933d8a991c707b Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 19 Apr 2012 09:38:17 +0000 Subject: net: allow better page reuse in splice(sock -> pipe) splice() from socket to pipe needs linear_to_page() helper to transfert skb header to part of page. We can reset the offset in the current sk->sk_sndmsg_page if we are the last user of the page. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/core/skbuff.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'net') diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 43c490d52df6..bf257de95d26 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -1565,6 +1565,9 @@ new_page: } else { unsigned int mlen; + /* If we are the only user of the page, we can reset offset */ + if (page_count(p) == 1) + sk->sk_sndmsg_off = 0; off = sk->sk_sndmsg_off; mlen = PAGE_SIZE - off; if (mlen < 64 && mlen < *len) { -- cgit v1.2.2 From 900f65d361d333c949ef76a828343075f4fdf523 Mon Sep 17 00:00:00 2001 From: Neal Cardwell Date: Thu, 19 Apr 2012 09:55:21 +0000 Subject: tcp: move duplicate code from tcp_v4_init_sock()/tcp_v6_init_sock() This commit moves the (substantial) common code shared between tcp_v4_init_sock() and tcp_v6_init_sock() to a new address-family independent function, tcp_init_sock(). Centralizing this functionality should help avoid drift issues, e.g. where the IPv4 side is updated without a corresponding update to IPv6. There was already some drift: IPv4 initialized snd_cwnd to TCP_INIT_CWND, while the IPv6 side was still initializing snd_cwnd to 2 (in this case it should not matter, since snd_cwnd is also initialized in tcp_init_metrics(), but the general risks and maintenance overhead remain). When diffing the old and new code, note that new tcp_init_sock() function uses the order of steps from the tcp_v4_init_sock() implementation (the order is slightly different in tcp_v6_init_sock()). Signed-off-by: Neal Cardwell Acked-by: Eric Dumazet Signed-off-by: David S. Miller --- net/ipv4/tcp.c | 64 +++++++++++++++++++++++++++++++++++++++++++++++++++++ net/ipv4/tcp_ipv4.c | 52 ++----------------------------------------- net/ipv6/tcp_ipv6.c | 50 +---------------------------------------- 3 files changed, 67 insertions(+), 99 deletions(-) (limited to 'net') diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 3ce3bd031f33..bcc4eab5f251 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -363,6 +363,70 @@ static int retrans_to_secs(u8 retrans, int timeout, int rto_max) return period; } +/* Address-family independent initialization for a tcp_sock. + * + * NOTE: A lot of things set to zero explicitly by call to + * sk_alloc() so need not be done here. + */ +void tcp_init_sock(struct sock *sk) +{ + struct inet_connection_sock *icsk = inet_csk(sk); + struct tcp_sock *tp = tcp_sk(sk); + + skb_queue_head_init(&tp->out_of_order_queue); + tcp_init_xmit_timers(sk); + tcp_prequeue_init(tp); + + icsk->icsk_rto = TCP_TIMEOUT_INIT; + tp->mdev = TCP_TIMEOUT_INIT; + + /* So many TCP implementations out there (incorrectly) count the + * initial SYN frame in their delayed-ACK and congestion control + * algorithms that we must have the following bandaid to talk + * efficiently to them. -DaveM + */ + tp->snd_cwnd = TCP_INIT_CWND; + + /* See draft-stevens-tcpca-spec-01 for discussion of the + * initialization of these values. + */ + tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; + tp->snd_cwnd_clamp = ~0; + tp->mss_cache = TCP_MSS_DEFAULT; + + tp->reordering = sysctl_tcp_reordering; + icsk->icsk_ca_ops = &tcp_init_congestion_ops; + + sk->sk_state = TCP_CLOSE; + + sk->sk_write_space = sk_stream_write_space; + sock_set_flag(sk, SOCK_USE_WRITE_QUEUE); + + icsk->icsk_sync_mss = tcp_sync_mss; + + /* TCP Cookie Transactions */ + if (sysctl_tcp_cookie_size > 0) { + /* Default, cookies without s_data_payload. */ + tp->cookie_values = + kzalloc(sizeof(*tp->cookie_values), + sk->sk_allocation); + if (tp->cookie_values != NULL) + kref_init(&tp->cookie_values->kref); + } + /* Presumed zeroed, in order of appearance: + * cookie_in_always, cookie_out_never, + * s_data_constant, s_data_in, s_data_out + */ + sk->sk_sndbuf = sysctl_tcp_wmem[1]; + sk->sk_rcvbuf = sysctl_tcp_rmem[1]; + + local_bh_disable(); + sock_update_memcg(sk); + sk_sockets_allocated_inc(sk); + local_bh_enable(); +} +EXPORT_SYMBOL(tcp_init_sock); + /* * Wait for a TCP event. * diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index ba6dad81908e..5b07ea109300 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -1890,62 +1890,14 @@ static int tcp_v4_init_sock(struct sock *sk) struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); - skb_queue_head_init(&tp->out_of_order_queue); - tcp_init_xmit_timers(sk); - tcp_prequeue_init(tp); - - icsk->icsk_rto = TCP_TIMEOUT_INIT; - tp->mdev = TCP_TIMEOUT_INIT; - - /* So many TCP implementations out there (incorrectly) count the - * initial SYN frame in their delayed-ACK and congestion control - * algorithms that we must have the following bandaid to talk - * efficiently to them. -DaveM - */ - tp->snd_cwnd = TCP_INIT_CWND; - - /* See draft-stevens-tcpca-spec-01 for discussion of the - * initialization of these values. - */ - tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; - tp->snd_cwnd_clamp = ~0; - tp->mss_cache = TCP_MSS_DEFAULT; - - tp->reordering = sysctl_tcp_reordering; - icsk->icsk_ca_ops = &tcp_init_congestion_ops; - - sk->sk_state = TCP_CLOSE; - - sk->sk_write_space = sk_stream_write_space; - sock_set_flag(sk, SOCK_USE_WRITE_QUEUE); + tcp_init_sock(sk); icsk->icsk_af_ops = &ipv4_specific; - icsk->icsk_sync_mss = tcp_sync_mss; + #ifdef CONFIG_TCP_MD5SIG tp->af_specific = &tcp_sock_ipv4_specific; #endif - /* TCP Cookie Transactions */ - if (sysctl_tcp_cookie_size > 0) { - /* Default, cookies without s_data_payload. */ - tp->cookie_values = - kzalloc(sizeof(*tp->cookie_values), - sk->sk_allocation); - if (tp->cookie_values != NULL) - kref_init(&tp->cookie_values->kref); - } - /* Presumed zeroed, in order of appearance: - * cookie_in_always, cookie_out_never, - * s_data_constant, s_data_in, s_data_out - */ - sk->sk_sndbuf = sysctl_tcp_wmem[1]; - sk->sk_rcvbuf = sysctl_tcp_rmem[1]; - - local_bh_disable(); - sock_update_memcg(sk); - sk_sockets_allocated_inc(sk); - local_bh_enable(); - return 0; } diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 050c55186bc4..24dac6b83fb9 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -1831,62 +1831,14 @@ static int tcp_v6_init_sock(struct sock *sk) struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); - skb_queue_head_init(&tp->out_of_order_queue); - tcp_init_xmit_timers(sk); - tcp_prequeue_init(tp); - - icsk->icsk_rto = TCP_TIMEOUT_INIT; - tp->mdev = TCP_TIMEOUT_INIT; - - /* So many TCP implementations out there (incorrectly) count the - * initial SYN frame in their delayed-ACK and congestion control - * algorithms that we must have the following bandaid to talk - * efficiently to them. -DaveM - */ - tp->snd_cwnd = 2; - - /* See draft-stevens-tcpca-spec-01 for discussion of the - * initialization of these values. - */ - tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; - tp->snd_cwnd_clamp = ~0; - tp->mss_cache = TCP_MSS_DEFAULT; - - tp->reordering = sysctl_tcp_reordering; - - sk->sk_state = TCP_CLOSE; + tcp_init_sock(sk); icsk->icsk_af_ops = &ipv6_specific; - icsk->icsk_ca_ops = &tcp_init_congestion_ops; - icsk->icsk_sync_mss = tcp_sync_mss; - sk->sk_write_space = sk_stream_write_space; - sock_set_flag(sk, SOCK_USE_WRITE_QUEUE); #ifdef CONFIG_TCP_MD5SIG tp->af_specific = &tcp_sock_ipv6_specific; #endif - /* TCP Cookie Transactions */ - if (sysctl_tcp_cookie_size > 0) { - /* Default, cookies without s_data_payload. */ - tp->cookie_values = - kzalloc(sizeof(*tp->cookie_values), - sk->sk_allocation); - if (tp->cookie_values != NULL) - kref_init(&tp->cookie_values->kref); - } - /* Presumed zeroed, in order of appearance: - * cookie_in_always, cookie_out_never, - * s_data_constant, s_data_in, s_data_out - */ - sk->sk_sndbuf = sysctl_tcp_wmem[1]; - sk->sk_rcvbuf = sysctl_tcp_rmem[1]; - - local_bh_disable(); - sock_update_memcg(sk); - sk_sockets_allocated_inc(sk); - local_bh_enable(); - return 0; } -- cgit v1.2.2 From c06fff6e175729f0d89201c98d21e55c2bf1a850 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 19 Apr 2012 21:56:11 +0000 Subject: af_packet: packet_getsockopt() cleanup Factorize code, since most fetched values are int type. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/packet/af_packet.c | 46 ++++++++-------------------------------------- 1 file changed, 8 insertions(+), 38 deletions(-) (limited to 'net') diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 40053a08f009..0f661745df0f 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -3224,10 +3224,10 @@ static int packet_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) { int len; - int val; + int val, lv = sizeof(val); struct sock *sk = sock->sk; struct packet_sock *po = pkt_sk(sk); - void *data; + void *data = &val; struct tpacket_stats st; union tpacket_stats_u st_u; @@ -3242,21 +3242,17 @@ static int packet_getsockopt(struct socket *sock, int level, int optname, switch (optname) { case PACKET_STATISTICS: - if (po->tp_version == TPACKET_V3) { - len = sizeof(struct tpacket_stats_v3); - } else { - if (len > sizeof(struct tpacket_stats)) - len = sizeof(struct tpacket_stats); - } spin_lock_bh(&sk->sk_receive_queue.lock); if (po->tp_version == TPACKET_V3) { + lv = sizeof(struct tpacket_stats_v3); memcpy(&st_u.stats3, &po->stats, - sizeof(struct tpacket_stats)); + sizeof(struct tpacket_stats)); st_u.stats3.tp_freeze_q_cnt = - po->stats_u.stats3.tp_freeze_q_cnt; + po->stats_u.stats3.tp_freeze_q_cnt; st_u.stats3.tp_packets += po->stats.tp_drops; data = &st_u.stats3; } else { + lv = sizeof(struct tpacket_stats); st = po->stats; st.tp_packets += st.tp_drops; data = &st; @@ -3265,31 +3261,16 @@ static int packet_getsockopt(struct socket *sock, int level, int optname, spin_unlock_bh(&sk->sk_receive_queue.lock); break; case PACKET_AUXDATA: - if (len > sizeof(int)) - len = sizeof(int); val = po->auxdata; - - data = &val; break; case PACKET_ORIGDEV: - if (len > sizeof(int)) - len = sizeof(int); val = po->origdev; - - data = &val; break; case PACKET_VNET_HDR: - if (len > sizeof(int)) - len = sizeof(int); val = po->has_vnet_hdr; - - data = &val; break; case PACKET_VERSION: - if (len > sizeof(int)) - len = sizeof(int); val = po->tp_version; - data = &val; break; case PACKET_HDRLEN: if (len > sizeof(int)) @@ -3309,39 +3290,28 @@ static int packet_getsockopt(struct socket *sock, int level, int optname, default: return -EINVAL; } - data = &val; break; case PACKET_RESERVE: - if (len > sizeof(unsigned int)) - len = sizeof(unsigned int); val = po->tp_reserve; - data = &val; break; case PACKET_LOSS: - if (len > sizeof(unsigned int)) - len = sizeof(unsigned int); val = po->tp_loss; - data = &val; break; case PACKET_TIMESTAMP: - if (len > sizeof(int)) - len = sizeof(int); val = po->tp_tstamp; - data = &val; break; case PACKET_FANOUT: - if (len > sizeof(int)) - len = sizeof(int); val = (po->fanout ? ((u32)po->fanout->id | ((u32)po->fanout->type << 16)) : 0); - data = &val; break; default: return -ENOPROTOOPT; } + if (len > lv) + len = lv; if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, data, len)) -- cgit v1.2.2 From ac807fa8e625aff58060876d70298c93a59c4252 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Mon, 23 Apr 2012 03:21:58 -0400 Subject: tcp: Fix build warning after tcp_{v4,v6}_init_sock consolidation. net/ipv4/tcp_ipv4.c: In function 'tcp_v4_init_sock': net/ipv4/tcp_ipv4.c:1891:19: warning: unused variable 'tp' [-Wunused-variable] net/ipv6/tcp_ipv6.c: In function 'tcp_v6_init_sock': net/ipv6/tcp_ipv6.c:1836:19: warning: unused variable 'tp' [-Wunused-variable] Reported-by: Stephen Rothwell Signed-off-by: David S. Miller --- net/ipv4/tcp_ipv4.c | 3 +-- net/ipv6/tcp_ipv6.c | 3 +-- 2 files changed, 2 insertions(+), 4 deletions(-) (limited to 'net') diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 5b07ea109300..0883921b20c1 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -1888,14 +1888,13 @@ static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = { static int tcp_v4_init_sock(struct sock *sk) { struct inet_connection_sock *icsk = inet_csk(sk); - struct tcp_sock *tp = tcp_sk(sk); tcp_init_sock(sk); icsk->icsk_af_ops = &ipv4_specific; #ifdef CONFIG_TCP_MD5SIG - tp->af_specific = &tcp_sock_ipv4_specific; + tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific; #endif return 0; diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 24dac6b83fb9..8044f6ac1301 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -1829,14 +1829,13 @@ static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = { static int tcp_v6_init_sock(struct sock *sk) { struct inet_connection_sock *icsk = inet_csk(sk); - struct tcp_sock *tp = tcp_sk(sk); tcp_init_sock(sk); icsk->icsk_af_ops = &ipv6_specific; #ifdef CONFIG_TCP_MD5SIG - tp->af_specific = &tcp_sock_ipv6_specific; + tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific; #endif return 0; -- cgit v1.2.2 From 8a690674e0601efbe9a7b16a5826fc522645cca3 Mon Sep 17 00:00:00 2001 From: Ben Greear Date: Tue, 17 Apr 2012 10:54:16 -0700 Subject: mac80211: Support on-channel scan option. This based on an idea posted by Stanislaw Gruszka, though I accept full blame for the implementation! This has been tested with ath9k. The idea is to let users scan on the current operating channel without interrupting normal traffic more than absolutely necessary (changing power level might reset some hardware, for instance). Signed-off-by: Ben Greear Signed-off-by: John W. Linville --- net/mac80211/ieee80211_i.h | 3 ++ net/mac80211/main.c | 4 +- net/mac80211/rx.c | 2 + net/mac80211/scan.c | 95 +++++++++++++++++++++++++++++++++------------- 4 files changed, 77 insertions(+), 27 deletions(-) (limited to 'net') diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index bd7a451b0849..1d074260acd1 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -803,6 +803,8 @@ struct tpt_led_trigger { * well be on the operating channel * @SCAN_HW_SCANNING: The hardware is scanning for us, we have no way to * determine if we are on the operating channel or not + * @SCAN_ONCHANNEL_SCANNING: Do a software scan on only the current operating + * channel. This should not interrupt normal traffic. * @SCAN_COMPLETED: Set for our scan work function when the driver reported * that the scan completed. * @SCAN_ABORTED: Set for our scan work function when the driver reported @@ -811,6 +813,7 @@ struct tpt_led_trigger { enum { SCAN_SW_SCANNING, SCAN_HW_SCANNING, + SCAN_ONCHANNEL_SCANNING, SCAN_COMPLETED, SCAN_ABORTED, }; diff --git a/net/mac80211/main.c b/net/mac80211/main.c index ac79d5e8e0d0..b70f7f09da61 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c @@ -47,7 +47,8 @@ void ieee80211_configure_filter(struct ieee80211_local *local) if (atomic_read(&local->iff_allmultis)) new_flags |= FIF_ALLMULTI; - if (local->monitors || test_bit(SCAN_SW_SCANNING, &local->scanning)) + if (local->monitors || test_bit(SCAN_SW_SCANNING, &local->scanning) || + test_bit(SCAN_ONCHANNEL_SCANNING, &local->scanning)) new_flags |= FIF_BCN_PRBRESP_PROMISC; if (local->fif_probe_req || local->probe_req_reg) @@ -148,6 +149,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed) } if (test_bit(SCAN_SW_SCANNING, &local->scanning) || + test_bit(SCAN_ONCHANNEL_SCANNING, &local->scanning) || test_bit(SCAN_HW_SCANNING, &local->scanning)) power = chan->max_power; else diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 54a049123a60..dd2fbec23eeb 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -425,6 +425,7 @@ ieee80211_rx_h_passive_scan(struct ieee80211_rx_data *rx) if (test_bit(SCAN_HW_SCANNING, &local->scanning) || test_bit(SCAN_SW_SCANNING, &local->scanning) || + test_bit(SCAN_ONCHANNEL_SCANNING, &local->scanning) || local->sched_scanning) return ieee80211_scan_rx(rx->sdata, skb); @@ -2915,6 +2916,7 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw, local->dot11ReceivedFragmentCount++; if (unlikely(test_bit(SCAN_HW_SCANNING, &local->scanning) || + test_bit(SCAN_ONCHANNEL_SCANNING, &local->scanning) || test_bit(SCAN_SW_SCANNING, &local->scanning))) status->rx_flags |= IEEE80211_RX_IN_SCAN; diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c index 45f5aa229efd..8282284f835c 100644 --- a/net/mac80211/scan.c +++ b/net/mac80211/scan.c @@ -401,6 +401,30 @@ void ieee80211_run_deferred_scan(struct ieee80211_local *local) round_jiffies_relative(0)); } +static void ieee80211_scan_state_send_probe(struct ieee80211_local *local, + unsigned long *next_delay) +{ + int i; + struct ieee80211_sub_if_data *sdata = local->scan_sdata; + enum ieee80211_band band = local->hw.conf.channel->band; + + for (i = 0; i < local->scan_req->n_ssids; i++) + ieee80211_send_probe_req( + sdata, NULL, + local->scan_req->ssids[i].ssid, + local->scan_req->ssids[i].ssid_len, + local->scan_req->ie, local->scan_req->ie_len, + local->scan_req->rates[band], false, + local->scan_req->no_cck); + + /* + * After sending probe requests, wait for probe responses + * on the channel. + */ + *next_delay = IEEE80211_CHANNEL_TIME; + local->next_scan_state = SCAN_DECISION; +} + static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata, struct cfg80211_scan_request *req) { @@ -451,10 +475,47 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata, local->scan_req = req; local->scan_sdata = sdata; - if (local->ops->hw_scan) + if (local->ops->hw_scan) { __set_bit(SCAN_HW_SCANNING, &local->scanning); - else + } else if ((req->n_channels == 1) && + (req->channels[0]->center_freq == + local->hw.conf.channel->center_freq)) { + + /* If we are scanning only on the current channel, then + * we do not need to stop normal activities + */ + unsigned long next_delay; + + __set_bit(SCAN_ONCHANNEL_SCANNING, &local->scanning); + + ieee80211_recalc_idle(local); + + /* Notify driver scan is starting, keep order of operations + * same as normal software scan, in case that matters. */ + drv_sw_scan_start(local); + + ieee80211_configure_filter(local); /* accept probe-responses */ + + /* We need to ensure power level is at max for scanning. */ + ieee80211_hw_config(local, 0); + + if ((req->channels[0]->flags & + IEEE80211_CHAN_PASSIVE_SCAN) || + !local->scan_req->n_ssids) { + next_delay = IEEE80211_PASSIVE_CHANNEL_TIME; + } else { + ieee80211_scan_state_send_probe(local, &next_delay); + next_delay = IEEE80211_CHANNEL_TIME; + } + + /* Now, just wait a bit and we are all done! */ + ieee80211_queue_delayed_work(&local->hw, &local->scan_work, + next_delay); + return 0; + } else { + /* Do normal software scan */ __set_bit(SCAN_SW_SCANNING, &local->scanning); + } ieee80211_recalc_idle(local); @@ -611,30 +672,6 @@ static void ieee80211_scan_state_set_channel(struct ieee80211_local *local, local->next_scan_state = SCAN_SEND_PROBE; } -static void ieee80211_scan_state_send_probe(struct ieee80211_local *local, - unsigned long *next_delay) -{ - int i; - struct ieee80211_sub_if_data *sdata = local->scan_sdata; - enum ieee80211_band band = local->hw.conf.channel->band; - - for (i = 0; i < local->scan_req->n_ssids; i++) - ieee80211_send_probe_req( - sdata, NULL, - local->scan_req->ssids[i].ssid, - local->scan_req->ssids[i].ssid_len, - local->scan_req->ie, local->scan_req->ie_len, - local->scan_req->rates[band], false, - local->scan_req->no_cck); - - /* - * After sending probe requests, wait for probe responses - * on the channel. - */ - *next_delay = IEEE80211_CHANNEL_TIME; - local->next_scan_state = SCAN_DECISION; -} - static void ieee80211_scan_state_suspend(struct ieee80211_local *local, unsigned long *next_delay) { @@ -685,6 +722,12 @@ void ieee80211_scan_work(struct work_struct *work) sdata = local->scan_sdata; + /* When scanning on-channel, the first-callback means completed. */ + if (test_bit(SCAN_ONCHANNEL_SCANNING, &local->scanning)) { + aborted = test_and_clear_bit(SCAN_ABORTED, &local->scanning); + goto out_complete; + } + if (test_and_clear_bit(SCAN_COMPLETED, &local->scanning)) { aborted = test_and_clear_bit(SCAN_ABORTED, &local->scanning); goto out_complete; -- cgit v1.2.2 From 54ab1ffb6cd94e5c013d61c192e78e30fdf25f8a Mon Sep 17 00:00:00 2001 From: Thomas Pedersen Date: Wed, 18 Apr 2012 19:23:42 -0700 Subject: mac80211: refactor mesh peer initialization This patch unifies the previous two paths toward mesh peer creation a bit. It also fixes a bug where a peer's changing rates or HT mode wouldn't register on leaving and then returning to the mesh with a sta entry still present. Also clean up locking and clear possibly stale ht cap. Signed-off-by: Thomas Pedersen Signed-off-by: John W. Linville --- net/mac80211/mesh_plink.c | 126 ++++++++++++++++++++++++++-------------------- 1 file changed, 72 insertions(+), 54 deletions(-) (limited to 'net') diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c index 9c836e774fbd..c3a0b0a4f97f 100644 --- a/net/mac80211/mesh_plink.c +++ b/net/mac80211/mesh_plink.c @@ -82,20 +82,14 @@ static inline void mesh_plink_fsm_restart(struct sta_info *sta) } /* - * NOTE: This is just an alias for sta_info_alloc(), see notes - * on it in the lifecycle management section! + * Allocate mesh sta entry and insert into station table */ static struct sta_info *mesh_plink_alloc(struct ieee80211_sub_if_data *sdata, - u8 *hw_addr, u32 rates, - struct ieee802_11_elems *elems) + u8 *hw_addr) { - struct ieee80211_local *local = sdata->local; - struct ieee80211_supported_band *sband; struct sta_info *sta; - sband = local->hw.wiphy->bands[local->oper_channel->band]; - - if (local->num_sta >= MESH_MAX_PLINKS) + if (sdata->local->num_sta >= MESH_MAX_PLINKS) return NULL; sta = sta_info_alloc(sdata, hw_addr, GFP_KERNEL); @@ -108,12 +102,8 @@ static struct sta_info *mesh_plink_alloc(struct ieee80211_sub_if_data *sdata, set_sta_flag(sta, WLAN_STA_WME); - sta->sta.supp_rates[local->hw.conf.channel->band] = rates; - if (elems->ht_cap_elem) - ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband, - elems->ht_cap_elem, - &sta->sta.ht_cap); - rate_control_rate_init(sta); + if (sta_info_insert(sta)) + return NULL; return sta; } @@ -274,43 +264,76 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata, return 0; } -void mesh_neighbour_update(u8 *hw_addr, u32 rates, - struct ieee80211_sub_if_data *sdata, - struct ieee802_11_elems *elems) +/* mesh_peer_init - initialize new mesh peer and return resulting sta_info + * + * @sdata: local meshif + * @addr: peer's address + * @rates: station's supported rates + * @elems: IEs from beacon or mesh peering frame + * + * call under RCU + */ +static struct sta_info *mesh_peer_init(struct ieee80211_sub_if_data *sdata, + u8 *addr, u32 rates, + struct ieee802_11_elems *elems) { struct ieee80211_local *local = sdata->local; + struct ieee80211_supported_band *sband; struct sta_info *sta; - rcu_read_lock(); + sband = local->hw.wiphy->bands[local->oper_channel->band]; - sta = sta_info_get(sdata, hw_addr); + sta = sta_info_get(sdata, addr); if (!sta) { - rcu_read_unlock(); - /* Userspace handles peer allocation when security is enabled - * */ - if (sdata->u.mesh.security & IEEE80211_MESH_SEC_AUTHED) - cfg80211_notify_new_peer_candidate(sdata->dev, hw_addr, - elems->ie_start, elems->total_len, - GFP_KERNEL); - else - sta = mesh_plink_alloc(sdata, hw_addr, rates, elems); + sta = mesh_plink_alloc(sdata, addr); if (!sta) - return; - if (sta_info_insert_rcu(sta)) { - rcu_read_unlock(); - return; - } + return NULL; } + spin_lock_bh(&sta->lock); sta->last_rx = jiffies; sta->sta.supp_rates[local->hw.conf.channel->band] = rates; + if (elems->ht_cap_elem) + ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband, + elems->ht_cap_elem, + &sta->sta.ht_cap); + else + memset(&sta->sta.ht_cap, 0, sizeof(sta->sta.ht_cap)); + + rate_control_rate_init(sta); + spin_unlock_bh(&sta->lock); + + return sta; +} + +void mesh_neighbour_update(u8 *hw_addr, u32 rates, + struct ieee80211_sub_if_data *sdata, + struct ieee802_11_elems *elems) +{ + struct sta_info *sta; + + /* Userspace handles peer allocation when security is enabled */ + if (sdata->u.mesh.security & IEEE80211_MESH_SEC_AUTHED) { + cfg80211_notify_new_peer_candidate(sdata->dev, hw_addr, + elems->ie_start, + elems->total_len, + GFP_KERNEL); + return; + } + + rcu_read_lock(); + sta = mesh_peer_init(sdata, hw_addr, rates, elems); + if (!sta) + goto out; + if (mesh_peer_accepts_plinks(elems) && - sta->plink_state == NL80211_PLINK_LISTEN && - sdata->u.mesh.accepting_plinks && - sdata->u.mesh.mshcfg.auto_open_plinks && - rssi_threshold_check(sta, sdata)) + sta->plink_state == NL80211_PLINK_LISTEN && + sdata->u.mesh.accepting_plinks && + sdata->u.mesh.mshcfg.auto_open_plinks && + rssi_threshold_check(sta, sdata)) mesh_plink_open(sta); +out: rcu_read_unlock(); } @@ -587,26 +610,13 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m return; } else if (!sta) { /* ftype == WLAN_SP_MESH_PEERING_OPEN */ - - rcu_read_unlock(); - if (!mesh_plink_free_count(sdata)) { mpl_dbg("Mesh plink error: no more free plinks\n"); - return; - } - sta = mesh_plink_alloc(sdata, mgmt->sa, rates, &elems); - if (!sta) { - mpl_dbg("Mesh plink error: plink table full\n"); - return; - } - if (sta_info_insert_rcu(sta)) { rcu_read_unlock(); return; } event = OPN_ACPT; - spin_lock_bh(&sta->lock); } else if (matches_local) { - spin_lock_bh(&sta->lock); switch (ftype) { case WLAN_SP_MESH_PEERING_OPEN: if (!mesh_plink_free_count(sdata) || @@ -643,12 +653,19 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m break; default: mpl_dbg("Mesh plink: unknown frame subtype\n"); - spin_unlock_bh(&sta->lock); rcu_read_unlock(); return; } - } else { - spin_lock_bh(&sta->lock); + } + + if (event == OPN_ACPT) { + /* allocate sta entry if necessary and update info */ + sta = mesh_peer_init(sdata, mgmt->sa, rates, &elems); + if (!sta) { + mpl_dbg("Mesh plink: failed to init peer!\n"); + rcu_read_unlock(); + return; + } } mpl_dbg("Mesh plink (peer, state, llid, plid, event): %pM %s %d %d %d\n", @@ -656,6 +673,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m le16_to_cpu(sta->llid), le16_to_cpu(sta->plid), event); reason = 0; + spin_lock_bh(&sta->lock); switch (sta->plink_state) { /* spin_unlock as soon as state is updated at each case */ case NL80211_PLINK_LISTEN: -- cgit v1.2.2 From f743ff4907fa5bc2b460f48ace831a560806a9fb Mon Sep 17 00:00:00 2001 From: Thomas Pedersen Date: Wed, 18 Apr 2012 19:23:43 -0700 Subject: mac80211: refactor mesh peer rate handling To avoid passing supp_rates and basic_rates around all the time, just derive these when needed in mesh_matches_local() and mesh_peer_init(). Signed-off-by: Thomas Pedersen Signed-off-by: John W. Linville --- net/mac80211/mesh.c | 19 +++++++++---------- net/mac80211/mesh.h | 10 +++++----- net/mac80211/mesh_plink.c | 25 +++++++++++-------------- 3 files changed, 25 insertions(+), 29 deletions(-) (limited to 'net') diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index 133c118526fb..598a96a3a051 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c @@ -64,18 +64,18 @@ static void ieee80211_mesh_housekeeping_timer(unsigned long data) /** * mesh_matches_local - check if the config of a mesh point matches ours * - * @ie: information elements of a management frame from the mesh peer * @sdata: local mesh subif - * @basic_rates: BSSBasicRateSet of the peer candidate + * @ie: information elements of a management frame from the mesh peer * * This function checks if the mesh configuration of a mesh point matches the * local mesh configuration, i.e. if both nodes belong to the same mesh network. */ -bool mesh_matches_local(struct ieee802_11_elems *ie, - struct ieee80211_sub_if_data *sdata, u32 basic_rates) +bool mesh_matches_local(struct ieee80211_sub_if_data *sdata, + struct ieee802_11_elems *ie) { struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; struct ieee80211_local *local = sdata->local; + u32 basic_rates = 0; /* * As support for each feature is added, check for matching @@ -96,6 +96,9 @@ bool mesh_matches_local(struct ieee802_11_elems *ie, (ifmsh->mesh_auth_id == ie->mesh_config->meshconf_auth))) goto mismatch; + ieee80211_sta_get_rates(local, ie, local->oper_channel->band, + &basic_rates); + if (sdata->vif.bss_conf.basic_rates != basic_rates) goto mismatch; @@ -630,7 +633,6 @@ static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata, struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; struct ieee802_11_elems elems; struct ieee80211_channel *channel; - u32 supp_rates = 0, basic_rates = 0; size_t baselen; int freq; enum ieee80211_band band = rx_status->band; @@ -661,12 +663,9 @@ static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata, if (!channel || channel->flags & IEEE80211_CHAN_DISABLED) return; - supp_rates = ieee80211_sta_get_rates(local, &elems, - band, &basic_rates); - if (elems.mesh_id && elems.mesh_config && - mesh_matches_local(&elems, sdata, basic_rates)) - mesh_neighbour_update(mgmt->sa, supp_rates, sdata, &elems); + mesh_matches_local(sdata, &elems)) + mesh_neighbour_update(sdata, mgmt->sa, &elems); if (ifmsh->sync_ops) ifmsh->sync_ops->rx_bcn_presp(sdata, diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h index 4ad738988801..345f0e7d518e 100644 --- a/net/mac80211/mesh.h +++ b/net/mac80211/mesh.h @@ -222,8 +222,8 @@ int ieee80211_new_mesh_header(struct ieee80211s_hdr *meshhdr, char *addr6); int mesh_rmc_check(u8 *addr, struct ieee80211s_hdr *mesh_hdr, struct ieee80211_sub_if_data *sdata); -bool mesh_matches_local(struct ieee802_11_elems *ie, - struct ieee80211_sub_if_data *sdata, u32 basic_rates); +bool mesh_matches_local(struct ieee80211_sub_if_data *sdata, + struct ieee802_11_elems *ie); void mesh_ids_set_default(struct ieee80211_if_mesh *mesh); void mesh_mgmt_ies_add(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata); @@ -276,9 +276,9 @@ int mesh_path_add_gate(struct mesh_path *mpath); int mesh_path_send_to_gates(struct mesh_path *mpath); int mesh_gate_num(struct ieee80211_sub_if_data *sdata); /* Mesh plinks */ -void mesh_neighbour_update(u8 *hw_addr, u32 rates, - struct ieee80211_sub_if_data *sdata, - struct ieee802_11_elems *ie); +void mesh_neighbour_update(struct ieee80211_sub_if_data *sdata, + u8 *hw_addr, + struct ieee802_11_elems *ie); bool mesh_peer_accepts_plinks(struct ieee802_11_elems *ie); void mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata); void mesh_plink_broken(struct sta_info *sta); diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c index c3a0b0a4f97f..c2af7b3d03cd 100644 --- a/net/mac80211/mesh_plink.c +++ b/net/mac80211/mesh_plink.c @@ -268,20 +268,22 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata, * * @sdata: local meshif * @addr: peer's address - * @rates: station's supported rates * @elems: IEs from beacon or mesh peering frame * * call under RCU */ static struct sta_info *mesh_peer_init(struct ieee80211_sub_if_data *sdata, - u8 *addr, u32 rates, + u8 *addr, struct ieee802_11_elems *elems) { struct ieee80211_local *local = sdata->local; + enum ieee80211_band band = local->oper_channel->band; struct ieee80211_supported_band *sband; + u32 rates, basic_rates = 0; struct sta_info *sta; - sband = local->hw.wiphy->bands[local->oper_channel->band]; + sband = local->hw.wiphy->bands[band]; + rates = ieee80211_sta_get_rates(local, elems, band, &basic_rates); sta = sta_info_get(sdata, addr); if (!sta) { @@ -292,7 +294,7 @@ static struct sta_info *mesh_peer_init(struct ieee80211_sub_if_data *sdata, spin_lock_bh(&sta->lock); sta->last_rx = jiffies; - sta->sta.supp_rates[local->hw.conf.channel->band] = rates; + sta->sta.supp_rates[band] = rates; if (elems->ht_cap_elem) ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband, elems->ht_cap_elem, @@ -306,8 +308,8 @@ static struct sta_info *mesh_peer_init(struct ieee80211_sub_if_data *sdata, return sta; } -void mesh_neighbour_update(u8 *hw_addr, u32 rates, - struct ieee80211_sub_if_data *sdata, +void mesh_neighbour_update(struct ieee80211_sub_if_data *sdata, + u8 *hw_addr, struct ieee802_11_elems *elems) { struct sta_info *sta; @@ -322,7 +324,7 @@ void mesh_neighbour_update(u8 *hw_addr, u32 rates, } rcu_read_lock(); - sta = mesh_peer_init(sdata, hw_addr, rates, elems); + sta = mesh_peer_init(sdata, hw_addr, elems); if (!sta) goto out; @@ -479,7 +481,6 @@ void mesh_plink_block(struct sta_info *sta) void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt, size_t len, struct ieee80211_rx_status *rx_status) { - struct ieee80211_local *local = sdata->local; struct ieee802_11_elems elems; struct sta_info *sta; enum plink_event event; @@ -488,7 +489,6 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m bool deactivated, matches_local = true; u8 ie_len; u8 *baseaddr; - u32 rates, basic_rates = 0; __le16 plid, llid, reason; #ifdef CONFIG_MAC80211_VERBOSE_MPL_DEBUG static const char *mplstates[] = { @@ -583,11 +583,8 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m /* Now we will figure out the appropriate event... */ event = PLINK_UNDEFINED; - rates = ieee80211_sta_get_rates(local, &elems, - rx_status->band, &basic_rates); - if (ftype != WLAN_SP_MESH_PEERING_CLOSE && - (!mesh_matches_local(&elems, sdata, basic_rates))) { + !mesh_matches_local(sdata, &elems)) { matches_local = false; switch (ftype) { case WLAN_SP_MESH_PEERING_OPEN: @@ -660,7 +657,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m if (event == OPN_ACPT) { /* allocate sta entry if necessary and update info */ - sta = mesh_peer_init(sdata, mgmt->sa, rates, &elems); + sta = mesh_peer_init(sdata, mgmt->sa, &elems); if (!sta) { mpl_dbg("Mesh plink: failed to init peer!\n"); rcu_read_unlock(); -- cgit v1.2.2 From e76781e48f969e044d318485274b9574f1ccc3dd Mon Sep 17 00:00:00 2001 From: Thomas Pedersen Date: Wed, 18 Apr 2012 19:24:13 -0700 Subject: mac80211: don't set mesh peer ht caps if ht disabled Blindly setting ht caps on a mesh peer's station entry would result in MCS rates being used by the rate control algorithm even if no ht had been configured. Fix this by checking the channel type before assigning ht capabilites. Signed-off-by: Thomas Pedersen Signed-off-by: John W. Linville --- net/mac80211/mesh_plink.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'net') diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c index c2af7b3d03cd..1ff2a5c63e43 100644 --- a/net/mac80211/mesh_plink.c +++ b/net/mac80211/mesh_plink.c @@ -295,7 +295,8 @@ static struct sta_info *mesh_peer_init(struct ieee80211_sub_if_data *sdata, spin_lock_bh(&sta->lock); sta->last_rx = jiffies; sta->sta.supp_rates[band] = rates; - if (elems->ht_cap_elem) + if (elems->ht_cap_elem && + sdata->local->_oper_channel_type != NL80211_CHAN_NO_HT) ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband, elems->ht_cap_elem, &sta->sta.ht_cap); -- cgit v1.2.2 From aee286c2cf94929f90d4d1f64ee9b316007ba284 Mon Sep 17 00:00:00 2001 From: Thomas Pedersen Date: Wed, 18 Apr 2012 19:24:14 -0700 Subject: mac80211: fix STA channel width field According to IEEE 802.11 8.4.2.59, set the "STA channel width" bit to 0 if transmitting STA is using a 20mhz channel. Signed-off-by: Thomas Pedersen Signed-off-by: John W. Linville --- net/mac80211/util.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'net') diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 89c1e5b9ba94..577942bfc024 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -1684,7 +1684,9 @@ u8 *ieee80211_ie_build_ht_oper(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap, ht_oper->ht_param = IEEE80211_HT_PARAM_CHA_SEC_NONE; break; } - if (ht_cap->cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) + if (ht_cap->cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 && + channel_type != NL80211_CHAN_NO_HT && + channel_type != NL80211_CHAN_HT20) ht_oper->ht_param |= IEEE80211_HT_PARAM_CHAN_WIDTH_ANY; /* -- cgit v1.2.2 From 6ac95b57657d1bf5776f29a1697c123f62d5a58a Mon Sep 17 00:00:00 2001 From: Javier Cardona Date: Fri, 20 Apr 2012 09:52:56 -0700 Subject: mac80211: fixup for mesh TSF adjustment latency in Toffset setpoint The original patch defined the correction margin but did not apply it. Signed-off-by: Shinichi Hotori Signed-off-by: Yu Niiro Signed-off-by: Javier Cardona Signed-off-by: John W. Linville --- net/mac80211/mesh_sync.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/mac80211/mesh_sync.c b/net/mac80211/mesh_sync.c index ff60d6bcc631..38d30e8ce6dc 100644 --- a/net/mac80211/mesh_sync.c +++ b/net/mac80211/mesh_sync.c @@ -195,7 +195,7 @@ static void mesh_sync_offset_rx_bcn_presp(struct ieee80211_sub_if_data *sdata, spin_unlock_bh(&ifmsh->sync_offset_lock); } else { - sta->t_offset_setpoint = sta->t_offset; + sta->t_offset_setpoint = sta->t_offset - TOFFSET_SET_MARGIN; set_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN); msync_dbg("STA %pM : offset was invalid, " " sta->t_offset=%lld", -- cgit v1.2.2 From 0d8a0a17288e419c2e5e9ce18c8b66b390eb7e23 Mon Sep 17 00:00:00 2001 From: Wey-Yi Guy Date: Fri, 20 Apr 2012 11:57:00 -0700 Subject: mac80211: declare ieee80211_ave_rssi as EXPORT ieee80211_ave_rssi need to be declare as export for driver to use it. Signed-off-by: Wey-Yi Guy Signed-off-by: John W. Linville --- net/mac80211/util.c | 1 + 1 file changed, 1 insertion(+) (limited to 'net') diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 577942bfc024..8ba8b49c9531 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -1803,3 +1803,4 @@ int ieee80211_ave_rssi(struct ieee80211_vif *vif) return ifmgd->ave_beacon_signal; } +EXPORT_SYMBOL_GPL(ieee80211_ave_rssi); -- cgit v1.2.2 From 218d2e26dceb241e4b253c5c4702ee1b38b715cc Mon Sep 17 00:00:00 2001 From: Bala Shanmugam Date: Fri, 20 Apr 2012 19:12:58 +0530 Subject: cfg80211: Validate legacy rateset. Legacy rates are not validated while configuring tx rateset using iw. So below cmd is accepted by nl80211. sudo iw wlan2 set bitrates legacy-2.4 1 2 3 Validate legacy rates and return error if any rate in the rateset is not valid. Signed-off-by: Bala Shanmugam Signed-off-by: John W. Linville --- net/wireless/nl80211.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'net') diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index bcf6f70e518d..140c1d291d4e 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -5577,6 +5577,9 @@ static int nl80211_set_tx_bitrate_mask(struct sk_buff *skb, sband, nla_data(tb[NL80211_TXRATE_LEGACY]), nla_len(tb[NL80211_TXRATE_LEGACY])); + if ((mask.control[band].legacy == 0) && + nla_len(tb[NL80211_TXRATE_LEGACY])) + return -EINVAL; } if (tb[NL80211_TXRATE_MCS]) { if (!ht_rateset_to_mask( -- cgit v1.2.2 From f545a38f74584cc7424cb74f792a00c6d2589485 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Sun, 22 Apr 2012 23:34:26 +0000 Subject: net: add a limit parameter to sk_add_backlog() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit sk_add_backlog() & sk_rcvqueues_full() hard coded sk_rcvbuf as the memory limit. We need to make this limit a parameter for TCP use. No functional change expected in this patch, all callers still using the old sk_rcvbuf limit. Signed-off-by: Eric Dumazet Cc: Neal Cardwell Cc: Tom Herbert Cc: Maciej Żenczykowski Cc: Yuchung Cheng Cc: Ilpo Järvinen Cc: Rick Jones Signed-off-by: David S. Miller --- net/core/sock.c | 4 ++-- net/ipv4/tcp_ipv4.c | 2 +- net/ipv4/udp.c | 4 ++-- net/ipv6/tcp_ipv6.c | 2 +- net/ipv6/udp.c | 8 ++++---- net/llc/llc_conn.c | 2 +- net/sctp/input.c | 4 ++-- net/tipc/socket.c | 2 +- net/x25/x25_dev.c | 2 +- 9 files changed, 15 insertions(+), 15 deletions(-) (limited to 'net') diff --git a/net/core/sock.c b/net/core/sock.c index 679c5bbe2bed..0431aaf7473a 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -389,7 +389,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested) skb->dev = NULL; - if (sk_rcvqueues_full(sk, skb)) { + if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) { atomic_inc(&sk->sk_drops); goto discard_and_relse; } @@ -406,7 +406,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested) rc = sk_backlog_rcv(sk, skb); mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_); - } else if (sk_add_backlog(sk, skb)) { + } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) { bh_unlock_sock(sk); atomic_inc(&sk->sk_drops); goto discard_and_relse; diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 0883921b20c1..917607e9bd5b 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -1752,7 +1752,7 @@ process: if (!tcp_prequeue(sk, skb)) ret = tcp_v4_do_rcv(sk, skb); } - } else if (unlikely(sk_add_backlog(sk, skb))) { + } else if (unlikely(sk_add_backlog(sk, skb, sk->sk_rcvbuf))) { bh_unlock_sock(sk); NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP); goto discard_and_relse; diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 3430e8fc18de..279fd0846302 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -1479,7 +1479,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) goto drop; - if (sk_rcvqueues_full(sk, skb)) + if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) goto drop; rc = 0; @@ -1488,7 +1488,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) bh_lock_sock(sk); if (!sock_owned_by_user(sk)) rc = __udp_queue_rcv_skb(sk, skb); - else if (sk_add_backlog(sk, skb)) { + else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) { bh_unlock_sock(sk); goto drop; } diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 8044f6ac1301..b04e6d8a8371 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -1654,7 +1654,7 @@ process: if (!tcp_prequeue(sk, skb)) ret = tcp_v6_do_rcv(sk, skb); } - } else if (unlikely(sk_add_backlog(sk, skb))) { + } else if (unlikely(sk_add_backlog(sk, skb, sk->sk_rcvbuf))) { bh_unlock_sock(sk); NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP); goto discard_and_relse; diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 37b0699e95e5..d39bbc9e0622 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -611,14 +611,14 @@ static void flush_stack(struct sock **stack, unsigned int count, sk = stack[i]; if (skb1) { - if (sk_rcvqueues_full(sk, skb1)) { + if (sk_rcvqueues_full(sk, skb1, sk->sk_rcvbuf)) { kfree_skb(skb1); goto drop; } bh_lock_sock(sk); if (!sock_owned_by_user(sk)) udpv6_queue_rcv_skb(sk, skb1); - else if (sk_add_backlog(sk, skb1)) { + else if (sk_add_backlog(sk, skb1, sk->sk_rcvbuf)) { kfree_skb(skb1); bh_unlock_sock(sk); goto drop; @@ -790,14 +790,14 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, /* deliver */ - if (sk_rcvqueues_full(sk, skb)) { + if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) { sock_put(sk); goto discard; } bh_lock_sock(sk); if (!sock_owned_by_user(sk)) udpv6_queue_rcv_skb(sk, skb); - else if (sk_add_backlog(sk, skb)) { + else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) { atomic_inc(&sk->sk_drops); bh_unlock_sock(sk); sock_put(sk); diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c index ba137a6a224d..0d0d416dfab6 100644 --- a/net/llc/llc_conn.c +++ b/net/llc/llc_conn.c @@ -828,7 +828,7 @@ void llc_conn_handler(struct llc_sap *sap, struct sk_buff *skb) else { dprintk("%s: adding to backlog...\n", __func__); llc_set_backlog_type(skb, LLC_PACKET); - if (sk_add_backlog(sk, skb)) + if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) goto drop_unlock; } out: diff --git a/net/sctp/input.c b/net/sctp/input.c index 80f71af71384..80564fe03024 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c @@ -342,7 +342,7 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb) sctp_bh_lock_sock(sk); if (sock_owned_by_user(sk)) { - if (sk_add_backlog(sk, skb)) + if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) sctp_chunk_free(chunk); else backloged = 1; @@ -376,7 +376,7 @@ static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb) struct sctp_ep_common *rcvr = chunk->rcvr; int ret; - ret = sk_add_backlog(sk, skb); + ret = sk_add_backlog(sk, skb, sk->sk_rcvbuf); if (!ret) { /* Hold the assoc/ep while hanging on the backlog queue. * This way, we know structures we need will not disappear diff --git a/net/tipc/socket.c b/net/tipc/socket.c index c19fc4a228a8..6d4991e8f670 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -1330,7 +1330,7 @@ static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf) if (!sock_owned_by_user(sk)) { res = filter_rcv(sk, buf); } else { - if (sk_add_backlog(sk, buf)) + if (sk_add_backlog(sk, buf, sk->sk_rcvbuf)) res = TIPC_ERR_OVERLOAD; else res = TIPC_OK; diff --git a/net/x25/x25_dev.c b/net/x25/x25_dev.c index f0ce862d1f46..a8a236338e61 100644 --- a/net/x25/x25_dev.c +++ b/net/x25/x25_dev.c @@ -58,7 +58,7 @@ static int x25_receive_data(struct sk_buff *skb, struct x25_neigh *nb) if (!sock_owned_by_user(sk)) { queued = x25_process_rx_frame(sk, skb); } else { - queued = !sk_add_backlog(sk, skb); + queued = !sk_add_backlog(sk, skb, sk->sk_rcvbuf); } bh_unlock_sock(sk); sock_put(sk); -- cgit v1.2.2 From da882c1f2ecadb0ed582628ec1585e36b137c0f0 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Sun, 22 Apr 2012 23:38:54 +0000 Subject: tcp: sk_add_backlog() is too agressive for TCP MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit While investigating TCP performance problems on 10Gb+ links, we found a tcp sender was dropping lot of incoming ACKS because of sk_rcvbuf limit in sk_add_backlog(), especially if receiver doesnt use GRO/LRO and sends one ACK every two MSS segments. A sender usually tweaks sk_sndbuf, but sk_rcvbuf stays at its default value (87380), allowing a too small backlog. A TCP ACK, even being small, can consume nearly same truesize space than outgoing packets. Using sk_rcvbuf + sk_sndbuf as a limit makes sense and is fast to compute. Performance results on netperf, single flow, receiver with disabled GRO/LRO : 7500 Mbits instead of 6050 Mbits, no more TCPBacklogDrop increments at sender. Signed-off-by: Eric Dumazet Cc: Neal Cardwell Cc: Tom Herbert Cc: Maciej Żenczykowski Cc: Yuchung Cheng Cc: Ilpo Järvinen Cc: Rick Jones Signed-off-by: David S. Miller --- net/ipv4/tcp_ipv4.c | 3 ++- net/ipv6/tcp_ipv6.c | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) (limited to 'net') diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 917607e9bd5b..cf97e9821d76 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -1752,7 +1752,8 @@ process: if (!tcp_prequeue(sk, skb)) ret = tcp_v4_do_rcv(sk, skb); } - } else if (unlikely(sk_add_backlog(sk, skb, sk->sk_rcvbuf))) { + } else if (unlikely(sk_add_backlog(sk, skb, + sk->sk_rcvbuf + sk->sk_sndbuf))) { bh_unlock_sock(sk); NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP); goto discard_and_relse; diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index b04e6d8a8371..5fb19d345cfd 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -1654,7 +1654,8 @@ process: if (!tcp_prequeue(sk, skb)) ret = tcp_v6_do_rcv(sk, skb); } - } else if (unlikely(sk_add_backlog(sk, skb, sk->sk_rcvbuf))) { + } else if (unlikely(sk_add_backlog(sk, skb, + sk->sk_rcvbuf + sk->sk_sndbuf))) { bh_unlock_sock(sk); NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP); goto discard_and_relse; -- cgit v1.2.2 From 1402d366019fedaa2b024f2bac06b7cc9a8782e1 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Mon, 23 Apr 2012 07:11:42 +0000 Subject: tcp: introduce tcp_try_coalesce MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit c8628155ece3 (tcp: reduce out_of_order memory use) took care of coalescing tcp segments provided by legacy devices (linear skbs) We extend this idea to fragged skbs, as their truesize can be heavy. ixgbe for example uses 256+1024+PAGE_SIZE/2 = 3328 bytes per segment. Use this coalescing strategy for receive queue too. This contributes to reduce number of tcp collapses, at minimal cost, and reduces memory overhead and packets drops. Signed-off-by: Eric Dumazet Cc: Neal Cardwell Cc: Tom Herbert Cc: Maciej Żenczykowski Cc: Ilpo Järvinen Acked-by: Neal Cardwell Signed-off-by: David S. Miller --- net/ipv4/tcp_input.c | 79 +++++++++++++++++++++++++++++++++++++++++----------- 1 file changed, 62 insertions(+), 17 deletions(-) (limited to 'net') diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 37e1c5cd2c01..bd7aef59c385 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -4449,6 +4449,58 @@ static inline int tcp_try_rmem_schedule(struct sock *sk, unsigned int size) return 0; } +/** + * tcp_try_coalesce - try to merge skb to prior one + * @sk: socket + * @to: prior buffer + * @from: buffer to add in queue + * + * Before queueing skb @from after @to, try to merge them + * to reduce overall memory use and queue lengths, if cost is small. + * Packets in ofo or receive queues can stay a long time. + * Better try to coalesce them right now to avoid future collapses. + * Returns > 0 value if caller should free @from instead of queueing it + */ +static int tcp_try_coalesce(struct sock *sk, + struct sk_buff *to, + struct sk_buff *from) +{ + int len = from->len; + + if (tcp_hdr(from)->fin) + return 0; + if (len <= skb_tailroom(to)) { + BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len)); +merge: + NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOALESCE); + TCP_SKB_CB(to)->end_seq = TCP_SKB_CB(from)->end_seq; + TCP_SKB_CB(to)->ack_seq = TCP_SKB_CB(from)->ack_seq; + return 1; + } + if (skb_headlen(from) == 0 && + !skb_has_frag_list(to) && + !skb_has_frag_list(from) && + (skb_shinfo(to)->nr_frags + + skb_shinfo(from)->nr_frags <= MAX_SKB_FRAGS)) { + int delta = from->truesize - ksize(from->head) - + SKB_DATA_ALIGN(sizeof(struct sk_buff)); + + WARN_ON_ONCE(delta < len); + memcpy(skb_shinfo(to)->frags + skb_shinfo(to)->nr_frags, + skb_shinfo(from)->frags, + skb_shinfo(from)->nr_frags * sizeof(skb_frag_t)); + skb_shinfo(to)->nr_frags += skb_shinfo(from)->nr_frags; + skb_shinfo(from)->nr_frags = 0; + to->truesize += delta; + atomic_add(delta, &sk->sk_rmem_alloc); + sk_mem_charge(sk, delta); + to->len += len; + to->data_len += len; + goto merge; + } + return 0; +} + static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) { struct tcp_sock *tp = tcp_sk(sk); @@ -4487,23 +4539,11 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) end_seq = TCP_SKB_CB(skb)->end_seq; if (seq == TCP_SKB_CB(skb1)->end_seq) { - /* Packets in ofo can stay in queue a long time. - * Better try to coalesce them right now - * to avoid future tcp_collapse_ofo_queue(), - * probably the most expensive function in tcp stack. - */ - if (skb->len <= skb_tailroom(skb1) && !tcp_hdr(skb)->fin) { - NET_INC_STATS_BH(sock_net(sk), - LINUX_MIB_TCPRCVCOALESCE); - BUG_ON(skb_copy_bits(skb, 0, - skb_put(skb1, skb->len), - skb->len)); - TCP_SKB_CB(skb1)->end_seq = end_seq; - TCP_SKB_CB(skb1)->ack_seq = TCP_SKB_CB(skb)->ack_seq; + if (tcp_try_coalesce(sk, skb1, skb) <= 0) { + __skb_queue_after(&tp->out_of_order_queue, skb1, skb); + } else { __kfree_skb(skb); skb = NULL; - } else { - __skb_queue_after(&tp->out_of_order_queue, skb1, skb); } if (!tp->rx_opt.num_sacks || @@ -4624,13 +4664,18 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) } if (eaten <= 0) { + struct sk_buff *tail; queue_and_out: if (eaten < 0 && tcp_try_rmem_schedule(sk, skb->truesize)) goto drop; - skb_set_owner_r(skb, sk); - __skb_queue_tail(&sk->sk_receive_queue, skb); + tail = skb_peek_tail(&sk->sk_receive_queue); + eaten = tail ? tcp_try_coalesce(sk, tail, skb) : -1; + if (eaten <= 0) { + skb_set_owner_r(skb, sk); + __skb_queue_tail(&sk->sk_receive_queue, skb); + } } tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; if (skb->len) -- cgit v1.2.2 From 41c73a0d44c902e92397552acce181295eaa448b Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Sun, 22 Apr 2012 12:26:16 +0000 Subject: net: speedup skb_splice_bits() Commit 35f3d14db (pipe: add support for shrinking and growing pipes) added a slowdown for splice(socket -> pipe), as we might grow the spd used in skb_splice_bits() for each skb we process in splice() syscall. Its not needed since skb lengths are capped. The default on-stack arrays are more than enough. Use MAX_SKB_FRAGS instead of PIPE_DEF_BUFFERS to describe the reasonable limit per skb. Add coalescing support to help splicing of GRO skbs built from linear skbs (linked into frag_list) Signed-off-by: Eric Dumazet Cc: Jens Axboe Cc: Tom Herbert Signed-off-by: David S. Miller --- net/core/skbuff.c | 30 +++++++++++++++++++----------- 1 file changed, 19 insertions(+), 11 deletions(-) (limited to 'net') diff --git a/net/core/skbuff.c b/net/core/skbuff.c index bf257de95d26..dfb304066f22 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -1581,11 +1581,20 @@ new_page: memcpy(page_address(p) + off, page_address(page) + *offset, *len); sk->sk_sndmsg_off += *len; *offset = off; - get_page(p); return p; } +static bool spd_can_coalesce(const struct splice_pipe_desc *spd, + struct page *page, + unsigned int offset) +{ + return spd->nr_pages && + spd->pages[spd->nr_pages - 1] == page && + (spd->partial[spd->nr_pages - 1].offset + + spd->partial[spd->nr_pages - 1].len == offset); +} + /* * Fill page/offset/length into spd, if it can hold more pages. */ @@ -1595,16 +1604,19 @@ static inline int spd_fill_page(struct splice_pipe_desc *spd, struct sk_buff *skb, int linear, struct sock *sk) { - if (unlikely(spd->nr_pages == pipe->buffers)) + if (unlikely(spd->nr_pages == MAX_SKB_FRAGS)) return 1; if (linear) { page = linear_to_page(page, len, &offset, skb, sk); if (!page) return 1; - } else - get_page(page); - + } + if (spd_can_coalesce(spd, page, offset)) { + spd->partial[spd->nr_pages - 1].len += *len; + return 0; + } + get_page(page); spd->pages[spd->nr_pages] = page; spd->partial[spd->nr_pages].len = *len; spd->partial[spd->nr_pages].offset = offset; @@ -1710,8 +1722,8 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset, struct pipe_inode_info *pipe, unsigned int tlen, unsigned int flags) { - struct partial_page partial[PIPE_DEF_BUFFERS]; - struct page *pages[PIPE_DEF_BUFFERS]; + struct partial_page partial[MAX_SKB_FRAGS]; + struct page *pages[MAX_SKB_FRAGS]; struct splice_pipe_desc spd = { .pages = pages, .partial = partial, @@ -1723,9 +1735,6 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset, struct sock *sk = skb->sk; int ret = 0; - if (splice_grow_spd(pipe, &spd)) - return -ENOMEM; - /* * __skb_splice_bits() only fails if the output has no room left, * so no point in going over the frag_list for the error case. @@ -1761,7 +1770,6 @@ done: lock_sock(sk); } - splice_shrink_spd(pipe, &spd); return ret; } -- cgit v1.2.2 From a108d5f35adc5c5d5cdc882dc0bb920565551bff Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Mon, 23 Apr 2012 23:06:11 -0400 Subject: net: Use bool and remove inline in skb_splice_bits() code. Signed-off-by: David S. Miller --- net/core/skbuff.c | 58 +++++++++++++++++++++++++++---------------------------- 1 file changed, 29 insertions(+), 29 deletions(-) (limited to 'net') diff --git a/net/core/skbuff.c b/net/core/skbuff.c index dfb304066f22..aaf4abc4417d 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -1547,9 +1547,9 @@ static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i) put_page(spd->pages[i]); } -static inline struct page *linear_to_page(struct page *page, unsigned int *len, - unsigned int *offset, - struct sk_buff *skb, struct sock *sk) +static struct page *linear_to_page(struct page *page, unsigned int *len, + unsigned int *offset, + struct sk_buff *skb, struct sock *sk) { struct page *p = sk->sk_sndmsg_page; unsigned int off; @@ -1598,23 +1598,23 @@ static bool spd_can_coalesce(const struct splice_pipe_desc *spd, /* * Fill page/offset/length into spd, if it can hold more pages. */ -static inline int spd_fill_page(struct splice_pipe_desc *spd, - struct pipe_inode_info *pipe, struct page *page, - unsigned int *len, unsigned int offset, - struct sk_buff *skb, int linear, - struct sock *sk) +static bool spd_fill_page(struct splice_pipe_desc *spd, + struct pipe_inode_info *pipe, struct page *page, + unsigned int *len, unsigned int offset, + struct sk_buff *skb, int linear, + struct sock *sk) { if (unlikely(spd->nr_pages == MAX_SKB_FRAGS)) - return 1; + return true; if (linear) { page = linear_to_page(page, len, &offset, skb, sk); if (!page) - return 1; + return true; } if (spd_can_coalesce(spd, page, offset)) { spd->partial[spd->nr_pages - 1].len += *len; - return 0; + return false; } get_page(page); spd->pages[spd->nr_pages] = page; @@ -1622,7 +1622,7 @@ static inline int spd_fill_page(struct splice_pipe_desc *spd, spd->partial[spd->nr_pages].offset = offset; spd->nr_pages++; - return 0; + return false; } static inline void __segment_seek(struct page **page, unsigned int *poff, @@ -1639,20 +1639,20 @@ static inline void __segment_seek(struct page **page, unsigned int *poff, *plen -= off; } -static inline int __splice_segment(struct page *page, unsigned int poff, - unsigned int plen, unsigned int *off, - unsigned int *len, struct sk_buff *skb, - struct splice_pipe_desc *spd, int linear, - struct sock *sk, - struct pipe_inode_info *pipe) +static bool __splice_segment(struct page *page, unsigned int poff, + unsigned int plen, unsigned int *off, + unsigned int *len, struct sk_buff *skb, + struct splice_pipe_desc *spd, int linear, + struct sock *sk, + struct pipe_inode_info *pipe) { if (!*len) - return 1; + return true; /* skip this segment if already processed */ if (*off >= plen) { *off -= plen; - return 0; + return false; } /* ignore any bits we already processed */ @@ -1668,23 +1668,23 @@ static inline int __splice_segment(struct page *page, unsigned int poff, flen = min_t(unsigned int, flen, PAGE_SIZE - poff); if (spd_fill_page(spd, pipe, page, &flen, poff, skb, linear, sk)) - return 1; + return true; __segment_seek(&page, &poff, &plen, flen); *len -= flen; } while (*len && plen); - return 0; + return false; } /* - * Map linear and fragment data from the skb to spd. It reports failure if the + * Map linear and fragment data from the skb to spd. It reports true if the * pipe is full or if we already spliced the requested length. */ -static int __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe, - unsigned int *offset, unsigned int *len, - struct splice_pipe_desc *spd, struct sock *sk) +static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe, + unsigned int *offset, unsigned int *len, + struct splice_pipe_desc *spd, struct sock *sk) { int seg; @@ -1695,7 +1695,7 @@ static int __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe, (unsigned long) skb->data & (PAGE_SIZE - 1), skb_headlen(skb), offset, len, skb, spd, 1, sk, pipe)) - return 1; + return true; /* * then map the fragments @@ -1706,10 +1706,10 @@ static int __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe, if (__splice_segment(skb_frag_page(f), f->page_offset, skb_frag_size(f), offset, len, skb, spd, 0, sk, pipe)) - return 1; + return true; } - return 0; + return false; } /* -- cgit v1.2.2 From d7ccf7c0a0585a126109a4b7c2a309184bfa4cba Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Mon, 23 Apr 2012 23:35:04 -0400 Subject: net: make spd_fill_page() linear argument a bool Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/core/skbuff.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'net') diff --git a/net/core/skbuff.c b/net/core/skbuff.c index aaf4abc4417d..2342a7250391 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -1601,7 +1601,7 @@ static bool spd_can_coalesce(const struct splice_pipe_desc *spd, static bool spd_fill_page(struct splice_pipe_desc *spd, struct pipe_inode_info *pipe, struct page *page, unsigned int *len, unsigned int offset, - struct sk_buff *skb, int linear, + struct sk_buff *skb, bool linear, struct sock *sk) { if (unlikely(spd->nr_pages == MAX_SKB_FRAGS)) @@ -1642,7 +1642,7 @@ static inline void __segment_seek(struct page **page, unsigned int *poff, static bool __splice_segment(struct page *page, unsigned int poff, unsigned int plen, unsigned int *off, unsigned int *len, struct sk_buff *skb, - struct splice_pipe_desc *spd, int linear, + struct splice_pipe_desc *spd, bool linear, struct sock *sk, struct pipe_inode_info *pipe) { @@ -1694,7 +1694,7 @@ static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe, if (__splice_segment(virt_to_page(skb->data), (unsigned long) skb->data & (PAGE_SIZE - 1), skb_headlen(skb), - offset, len, skb, spd, 1, sk, pipe)) + offset, len, skb, spd, true, sk, pipe)) return true; /* @@ -1705,7 +1705,7 @@ static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe, if (__splice_segment(skb_frag_page(f), f->page_offset, skb_frag_size(f), - offset, len, skb, spd, 0, sk, pipe)) + offset, len, skb, spd, false, sk, pipe)) return true; } -- cgit v1.2.2 From 783c175f902b1ae011f12de45770e7912638ea1a Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Mon, 23 Apr 2012 17:34:36 +0000 Subject: tcp: tcp_try_coalesce returns a boolean This clarifies code intention, as suggested by David. Suggested-by: David Miller Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/ipv4/tcp_input.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) (limited to 'net') diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index c1c611b385a7..c93b0cbb7fc1 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -4460,23 +4460,23 @@ static inline int tcp_try_rmem_schedule(struct sock *sk, unsigned int size) * to reduce overall memory use and queue lengths, if cost is small. * Packets in ofo or receive queues can stay a long time. * Better try to coalesce them right now to avoid future collapses. - * Returns > 0 value if caller should free @from instead of queueing it + * Returns true if caller should free @from instead of queueing it */ -static int tcp_try_coalesce(struct sock *sk, - struct sk_buff *to, - struct sk_buff *from) +static bool tcp_try_coalesce(struct sock *sk, + struct sk_buff *to, + struct sk_buff *from) { int len = from->len; if (tcp_hdr(from)->fin) - return 0; + return false; if (len <= skb_tailroom(to)) { BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len)); merge: NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOALESCE); TCP_SKB_CB(to)->end_seq = TCP_SKB_CB(from)->end_seq; TCP_SKB_CB(to)->ack_seq = TCP_SKB_CB(from)->ack_seq; - return 1; + return true; } if (skb_headlen(from) == 0 && !skb_has_frag_list(to) && @@ -4499,7 +4499,7 @@ merge: to->data_len += len; goto merge; } - return 0; + return false; } static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) @@ -4540,7 +4540,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) end_seq = TCP_SKB_CB(skb)->end_seq; if (seq == TCP_SKB_CB(skb1)->end_seq) { - if (tcp_try_coalesce(sk, skb1, skb) <= 0) { + if (!tcp_try_coalesce(sk, skb1, skb)) { __skb_queue_after(&tp->out_of_order_queue, skb1, skb); } else { __kfree_skb(skb); @@ -4672,7 +4672,7 @@ queue_and_out: goto drop; tail = skb_peek_tail(&sk->sk_receive_queue); - eaten = tail ? tcp_try_coalesce(sk, tail, skb) : -1; + eaten = (tail && tcp_try_coalesce(sk, tail, skb)) ? 1 : 0; if (eaten <= 0) { skb_set_owner_r(skb, sk); __skb_queue_tail(&sk->sk_receive_queue, skb); -- cgit v1.2.2 From 38ba0a65faf451dd46c7860b4fade84c0b8e444f Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Mon, 23 Apr 2012 17:48:27 +0000 Subject: net: skb_can_coalesce returns a boolean Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/ipv4/tcp.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'net') diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index bcc4eab5f251..de6a238f0e1d 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -848,9 +848,10 @@ static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffse while (psize > 0) { struct sk_buff *skb = tcp_write_queue_tail(sk); struct page *page = pages[poffset / PAGE_SIZE]; - int copy, i, can_coalesce; + int copy, i; int offset = poffset % PAGE_SIZE; int size = min_t(size_t, psize, PAGE_SIZE - offset); + bool can_coalesce; if (!tcp_send_head(sk) || (copy = size_goal - skb->len) <= 0) { new_segment: -- cgit v1.2.2 From 658cb354edf0f16cc5a2a2bda3ec5be1bba25e6d Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Sun, 22 Apr 2012 21:30:21 +0000 Subject: af_netlink: cleanups netlink_destroy_callback() move to avoid forward reference CodingStyle cleanups Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/netlink/af_netlink.c | 61 ++++++++++++++++++++++++------------------------ 1 file changed, 31 insertions(+), 30 deletions(-) (limited to 'net') diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index da8721443503..85d27f9f0e67 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -104,27 +104,27 @@ static inline int netlink_is_kernel(struct sock *sk) } struct nl_pid_hash { - struct hlist_head *table; - unsigned long rehash_time; + struct hlist_head *table; + unsigned long rehash_time; - unsigned int mask; - unsigned int shift; + unsigned int mask; + unsigned int shift; - unsigned int entries; - unsigned int max_shift; + unsigned int entries; + unsigned int max_shift; - u32 rnd; + u32 rnd; }; struct netlink_table { - struct nl_pid_hash hash; - struct hlist_head mc_list; - struct listeners __rcu *listeners; - unsigned int nl_nonroot; - unsigned int groups; - struct mutex *cb_mutex; - struct module *module; - int registered; + struct nl_pid_hash hash; + struct hlist_head mc_list; + struct listeners __rcu *listeners; + unsigned int nl_nonroot; + unsigned int groups; + struct mutex *cb_mutex; + struct module *module; + int registered; }; static struct netlink_table *nl_table; @@ -132,7 +132,6 @@ static struct netlink_table *nl_table; static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait); static int netlink_dump(struct sock *sk); -static void netlink_destroy_callback(struct netlink_callback *cb); static DEFINE_RWLOCK(nl_table_lock); static atomic_t nl_table_users = ATOMIC_INIT(0); @@ -149,6 +148,12 @@ static inline struct hlist_head *nl_pid_hashfn(struct nl_pid_hash *hash, u32 pid return &hash->table[jhash_1word(pid, hash->rnd) & hash->mask]; } +static void netlink_destroy_callback(struct netlink_callback *cb) +{ + kfree_skb(cb->skb); + kfree(cb); +} + static void netlink_sock_destruct(struct sock *sk) { struct netlink_sock *nlk = nlk_sk(sk); @@ -414,9 +419,9 @@ static int __netlink_create(struct net *net, struct socket *sock, sock_init_data(sock, sk); nlk = nlk_sk(sk); - if (cb_mutex) + if (cb_mutex) { nlk->cb_mutex = cb_mutex; - else { + } else { nlk->cb_mutex = &nlk->cb_def_mutex; mutex_init(nlk->cb_mutex); } @@ -522,8 +527,9 @@ static int netlink_release(struct socket *sock) nl_table[sk->sk_protocol].module = NULL; nl_table[sk->sk_protocol].registered = 0; } - } else if (nlk->subscriptions) + } else if (nlk->subscriptions) { netlink_update_listeners(sk); + } netlink_table_ungrab(); kfree(nlk->groups); @@ -1086,8 +1092,8 @@ int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, u32 pid, if (info.delivery_failure) { kfree_skb(info.skb2); return -ENOBUFS; - } else - consume_skb(info.skb2); + } + consume_skb(info.skb2); if (info.delivered) { if (info.congested && (allocation & __GFP_WAIT)) @@ -1240,8 +1246,9 @@ static int netlink_setsockopt(struct socket *sock, int level, int optname, nlk->flags |= NETLINK_RECV_NO_ENOBUFS; clear_bit(0, &nlk->state); wake_up_interruptible(&nlk->wait); - } else + } else { nlk->flags &= ~NETLINK_RECV_NO_ENOBUFS; + } err = 0; break; default: @@ -1645,12 +1652,6 @@ void netlink_set_nonroot(int protocol, unsigned int flags) } EXPORT_SYMBOL(netlink_set_nonroot); -static void netlink_destroy_callback(struct netlink_callback *cb) -{ - kfree_skb(cb->skb); - kfree(cb); -} - struct nlmsghdr * __nlmsg_put(struct sk_buff *skb, u32 pid, u32 seq, int type, int len, int flags) { @@ -1996,11 +1997,11 @@ static void netlink_seq_stop(struct seq_file *seq, void *v) static int netlink_seq_show(struct seq_file *seq, void *v) { - if (v == SEQ_START_TOKEN) + if (v == SEQ_START_TOKEN) { seq_puts(seq, "sk Eth Pid Groups " "Rmem Wmem Dump Locks Drops Inode\n"); - else { + } else { struct sock *s = v; struct netlink_sock *nlk = nlk_sk(s); -- cgit v1.2.2 From bfb253c9b277acd9f85b1886ff82b1dd5fbff2ae Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Sun, 22 Apr 2012 21:30:29 +0000 Subject: af_netlink: drop_monitor/dropwatch friendly Need to consume_skb() instead of kfree_skb() in netlink_dump() and netlink_unicast_kernel() to avoid false dropwatch positives. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/netlink/af_netlink.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) (limited to 'net') diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 85d27f9f0e67..b3025a603d56 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -154,6 +154,12 @@ static void netlink_destroy_callback(struct netlink_callback *cb) kfree(cb); } +static void netlink_consume_callback(struct netlink_callback *cb) +{ + consume_skb(cb->skb); + kfree(cb); +} + static void netlink_sock_destruct(struct sock *sk) { struct netlink_sock *nlk = nlk_sk(sk); @@ -902,8 +908,10 @@ static int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb) ret = skb->len; skb_set_owner_r(skb, sk); nlk->netlink_rcv(skb); + consume_skb(skb); + } else { + kfree_skb(skb); } - kfree_skb(skb); sock_put(sk); return ret; } @@ -1728,7 +1736,7 @@ static int netlink_dump(struct sock *sk) nlk->cb = NULL; mutex_unlock(nlk->cb_mutex); - netlink_destroy_callback(cb); + netlink_consume_callback(cb); return 0; errout_skb: -- cgit v1.2.2 From 872f24dbc604ef585ea7eec73020dcdfaffd1956 Mon Sep 17 00:00:00 2001 From: Paul Gortmaker Date: Mon, 23 Apr 2012 04:49:13 +0000 Subject: tipc: remove inline instances from C source files. Untie gcc's hands and let it do what it wants within the individual source files. There are two files, node.c and port.c -- only the latter effectively changes (gcc-4.5.2). Objdump shows gcc deciding to not inline port_peernode(). Suggested-by: David S. Miller Signed-off-by: Paul Gortmaker Signed-off-by: David S. Miller --- net/tipc/node.c | 2 +- net/tipc/port.c | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'net') diff --git a/net/tipc/node.c b/net/tipc/node.c index 6a71bea91db0..76565c9916ab 100644 --- a/net/tipc/node.c +++ b/net/tipc/node.c @@ -58,7 +58,7 @@ static atomic_t tipc_num_links = ATOMIC_INIT(0); * entries has been chosen so that no hash chain exceeds 8 nodes and will * usually be much smaller (typically only a single node). */ -static inline unsigned int tipc_hashfn(u32 addr) +static unsigned int tipc_hashfn(u32 addr) { return addr & (NODE_HTABLE_SIZE - 1); } diff --git a/net/tipc/port.c b/net/tipc/port.c index 0f40b1055306..4aede40e592f 100644 --- a/net/tipc/port.c +++ b/net/tipc/port.c @@ -59,12 +59,12 @@ static struct sk_buff *port_build_peer_abort_msg(struct tipc_port *, u32 err); static void port_timeout(unsigned long ref); -static inline u32 port_peernode(struct tipc_port *p_ptr) +static u32 port_peernode(struct tipc_port *p_ptr) { return msg_destnode(&p_ptr->phdr); } -static inline u32 port_peerport(struct tipc_port *p_ptr) +static u32 port_peerport(struct tipc_port *p_ptr) { return msg_destport(&p_ptr->phdr); } -- cgit v1.2.2 From be6bcabc7919522f28c99642b8e04ef7b8e19283 Mon Sep 17 00:00:00 2001 From: Wey-Yi Guy Date: Mon, 23 Apr 2012 09:30:32 -0700 Subject: mac80211: check for non-managed interface Average beacon signal only keep tracked by managed interface, give warning and return 0 for the others. Signed-off-by: Wey-Yi Guy Signed-off-by: John W. Linville --- net/mac80211/util.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'net') diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 8ba8b49c9531..d9a747d387f0 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -1801,6 +1801,10 @@ int ieee80211_ave_rssi(struct ieee80211_vif *vif) struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + if (WARN_ON_ONCE(sdata->vif.type != NL80211_IFTYPE_STATION)) { + /* non-managed type inferfaces */ + return 0; + } return ifmgd->ave_beacon_signal; } EXPORT_SYMBOL_GPL(ieee80211_ave_rssi); -- cgit v1.2.2 From 030ef8f8a59c77d44cadeded6d3a5a12557774f4 Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Mon, 23 Apr 2012 19:49:02 +0200 Subject: mac80211: rename AP variable num_sta_authorized to num_mcast_sta It is only used to test for BSS multicast receivers. Signed-off-by: Felix Fietkau Reviewed-by: Johannes Berg Signed-off-by: John W. Linville --- net/mac80211/debugfs_netdev.c | 4 ++-- net/mac80211/ieee80211_i.h | 2 +- net/mac80211/sta_info.c | 4 ++-- net/mac80211/tx.c | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) (limited to 'net') diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c index e7af5227e322..ea0122dbd2b3 100644 --- a/net/mac80211/debugfs_netdev.c +++ b/net/mac80211/debugfs_netdev.c @@ -394,7 +394,7 @@ static ssize_t ieee80211_if_parse_uapsd_max_sp_len( __IEEE80211_IF_FILE_W(uapsd_max_sp_len); /* AP attributes */ -IEEE80211_IF_FILE(num_sta_authorized, u.ap.num_sta_authorized, ATOMIC); +IEEE80211_IF_FILE(num_mcast_sta, u.ap.num_mcast_sta, ATOMIC); IEEE80211_IF_FILE(num_sta_ps, u.ap.num_sta_ps, ATOMIC); IEEE80211_IF_FILE(dtim_count, u.ap.dtim_count, DEC); @@ -540,7 +540,7 @@ static void add_sta_files(struct ieee80211_sub_if_data *sdata) static void add_ap_files(struct ieee80211_sub_if_data *sdata) { - DEBUGFS_ADD(num_sta_authorized); + DEBUGFS_ADD(num_mcast_sta); DEBUGFS_ADD(num_sta_ps); DEBUGFS_ADD(dtim_count); DEBUGFS_ADD(num_buffered_multicast); diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 1d074260acd1..851fb7dc893c 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -282,7 +282,7 @@ struct ieee80211_if_ap { u8 tim[sizeof(unsigned long) * BITS_TO_LONGS(IEEE80211_MAX_AID + 1)]; struct sk_buff_head ps_bc_buf; atomic_t num_sta_ps; /* number of stations in PS mode */ - atomic_t num_sta_authorized; /* number of authorized stations */ + atomic_t num_mcast_sta; /* number of stations receiving multicast */ int dtim_count; bool dtim_bc_mc; }; diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index 7fd7ac48f893..4c04eb5e4cae 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@ -1418,14 +1418,14 @@ int sta_info_move_state(struct sta_info *sta, set_bit(WLAN_STA_ASSOC, &sta->_flags); } else if (sta->sta_state == IEEE80211_STA_AUTHORIZED) { if (sta->sdata->vif.type == NL80211_IFTYPE_AP) - atomic_dec(&sta->sdata->u.ap.num_sta_authorized); + atomic_dec(&sta->sdata->u.ap.num_mcast_sta); clear_bit(WLAN_STA_AUTHORIZED, &sta->_flags); } break; case IEEE80211_STA_AUTHORIZED: if (sta->sta_state == IEEE80211_STA_ASSOC) { if (sta->sdata->vif.type == NL80211_IFTYPE_AP) - atomic_inc(&sta->sdata->u.ap.num_sta_authorized); + atomic_inc(&sta->sdata->u.ap.num_mcast_sta); set_bit(WLAN_STA_AUTHORIZED, &sta->_flags); } break; diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 0abbef952c14..44001c7e0e58 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -306,7 +306,7 @@ ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx) } } else if (unlikely(tx->sdata->vif.type == NL80211_IFTYPE_AP && ieee80211_is_data(hdr->frame_control) && - !atomic_read(&tx->sdata->u.ap.num_sta_authorized))) { + !atomic_read(&tx->sdata->u.ap.num_mcast_sta))) { /* * No associated STAs - no need to send multicast * frames. -- cgit v1.2.2 From 7e3ed02c6e65a0cb4c9259c0d34740305d9aa5e7 Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Mon, 23 Apr 2012 19:49:03 +0200 Subject: mac80211: fix num_mcast_sta counting issues Moving a STA to an AP VLAN prevents num_mcast_sta from being decremented once the STA leaves, because sta->sdata changes. Fix this by checking for AP VLANs as well. Also exclude 4-addr VLAN stations from num_mcast_sta - remote 4-addr stations ignore 3-address multicast frames anyway. In a typical bridge configuration they receive the same packets as 4-address unicast. This patch also fixes clearing the sdata->u.vlan.sta pointer when the STA is removed from a 4-addr VLAN. Signed-off-by: Felix Fietkau Signed-off-by: John W. Linville --- net/mac80211/cfg.c | 19 +++++++++++++++++++ net/mac80211/sta_info.c | 12 ++++++++---- 2 files changed, 27 insertions(+), 4 deletions(-) (limited to 'net') diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 510a745c3108..70b2af2315a6 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -1005,6 +1005,9 @@ static int ieee80211_change_station(struct wiphy *wiphy, } if (params->vlan && params->vlan != sta->sdata->dev) { + bool prev_4addr = false; + bool new_4addr = false; + vlansdata = IEEE80211_DEV_TO_SUB_IF(params->vlan); if (vlansdata->vif.type != NL80211_IFTYPE_AP_VLAN && @@ -1020,9 +1023,25 @@ static int ieee80211_change_station(struct wiphy *wiphy, } rcu_assign_pointer(vlansdata->u.vlan.sta, sta); + new_4addr = true; + } + + if (sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN && + sta->sdata->u.vlan.sta) { + rcu_assign_pointer(sta->sdata->u.vlan.sta, NULL); + prev_4addr = true; } sta->sdata = vlansdata; + + if (sta->sta_state == IEEE80211_STA_AUTHORIZED && + prev_4addr != new_4addr) { + if (new_4addr) + atomic_dec(&sta->sdata->bss->num_mcast_sta); + else + atomic_inc(&sta->sdata->bss->num_mcast_sta); + } + ieee80211_send_layer2_update(sta); } diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index 4c04eb5e4cae..97a9d6639fb9 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@ -1417,15 +1417,19 @@ int sta_info_move_state(struct sta_info *sta, if (sta->sta_state == IEEE80211_STA_AUTH) { set_bit(WLAN_STA_ASSOC, &sta->_flags); } else if (sta->sta_state == IEEE80211_STA_AUTHORIZED) { - if (sta->sdata->vif.type == NL80211_IFTYPE_AP) - atomic_dec(&sta->sdata->u.ap.num_mcast_sta); + if (sta->sdata->vif.type == NL80211_IFTYPE_AP || + (sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN && + !sta->sdata->u.vlan.sta)) + atomic_dec(&sta->sdata->bss->num_mcast_sta); clear_bit(WLAN_STA_AUTHORIZED, &sta->_flags); } break; case IEEE80211_STA_AUTHORIZED: if (sta->sta_state == IEEE80211_STA_ASSOC) { - if (sta->sdata->vif.type == NL80211_IFTYPE_AP) - atomic_inc(&sta->sdata->u.ap.num_mcast_sta); + if (sta->sdata->vif.type == NL80211_IFTYPE_AP || + (sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN && + !sta->sdata->u.vlan.sta)) + atomic_inc(&sta->sdata->bss->num_mcast_sta); set_bit(WLAN_STA_AUTHORIZED, &sta->_flags); } break; -- cgit v1.2.2 From 94c514fe240fc0dd02187b78facefde8b6744634 Mon Sep 17 00:00:00 2001 From: Andrei Emeltchenko Date: Tue, 24 Apr 2012 14:18:28 +0300 Subject: mac80211: Adds clean sdata helper Adds hepler to clean sdata ieee80211_clean_sdata similar way as ieee80211_setup_sdata is implemented. The function will be used by other interfaces later. Signed-off-by: Andrei Emeltchenko Signed-off-by: John W. Linville --- net/mac80211/iface.c | 19 +++++++++++++++---- net/mac80211/mesh.h | 4 +++- 2 files changed, 18 insertions(+), 5 deletions(-) (limited to 'net') diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index 23d1da376eb3..ba86978dd561 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -1031,6 +1031,18 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata, ieee80211_debugfs_add_netdev(sdata); } +static void ieee80211_clean_sdata(struct ieee80211_sub_if_data *sdata) +{ + switch (sdata->vif.type) { + case NL80211_IFTYPE_MESH_POINT: + mesh_path_flush_by_iface(sdata); + break; + + default: + break; + } +} + static int ieee80211_runtime_change_iftype(struct ieee80211_sub_if_data *sdata, enum nl80211_iftype type) { @@ -1364,8 +1376,8 @@ void ieee80211_if_remove(struct ieee80211_sub_if_data *sdata) list_del_rcu(&sdata->list); mutex_unlock(&sdata->local->iflist_mtx); - if (ieee80211_vif_is_mesh(&sdata->vif)) - mesh_path_flush_by_iface(sdata); + /* clean up type-dependent data */ + ieee80211_clean_sdata(sdata); synchronize_rcu(); unregister_netdevice(sdata->dev); @@ -1386,8 +1398,7 @@ void ieee80211_remove_interfaces(struct ieee80211_local *local) list_for_each_entry_safe(sdata, tmp, &local->interfaces, list) { list_del(&sdata->list); - if (ieee80211_vif_is_mesh(&sdata->vif)) - mesh_path_flush_by_iface(sdata); + ieee80211_clean_sdata(sdata); unregister_netdevice_queue(sdata->dev, &unreg_list); } diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h index 345f0e7d518e..e3642756f8f4 100644 --- a/net/mac80211/mesh.h +++ b/net/mac80211/mesh.h @@ -304,7 +304,6 @@ void mesh_pathtbl_unregister(void); int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata); void mesh_path_timer(unsigned long data); void mesh_path_flush_by_nexthop(struct sta_info *sta); -void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata); void mesh_path_discard_frame(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata); void mesh_path_quiesce(struct ieee80211_sub_if_data *sdata); @@ -345,6 +344,7 @@ void ieee80211_mesh_quiesce(struct ieee80211_sub_if_data *sdata); void ieee80211_mesh_restart(struct ieee80211_sub_if_data *sdata); void mesh_plink_quiesce(struct sta_info *sta); void mesh_plink_restart(struct sta_info *sta); +void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata); void mesh_sync_adjust_tbtt(struct ieee80211_sub_if_data *sdata); #else #define mesh_allocated 0 @@ -358,6 +358,8 @@ static inline void mesh_plink_quiesce(struct sta_info *sta) {} static inline void mesh_plink_restart(struct sta_info *sta) {} static inline bool mesh_path_sel_is_hwmp(struct ieee80211_sub_if_data *sdata) { return false; } +static inline void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata) +{} #endif #endif /* IEEE80211S_H */ -- cgit v1.2.2 From 081579840b4b2421e37bc67e3b089b7ca64ef040 Mon Sep 17 00:00:00 2001 From: John Fastabend Date: Fri, 20 Apr 2012 09:49:23 +0000 Subject: net: dcb: add CEE notify calls This adds code to trigger CEE events when an APP change or setall command is made from user space. This simplifies user space code significantly by creating a single interface to listen on that works with both firmware and userland agents. And if we end up with multiple agents this keeps every thing in sync userland agents, firmware agents, and kernel notifier consumers. For an example agent that listens for these events see: https://github.com/jrfastab/cgdcbxd cgdcbxd is a daemon used to monitor DCB netlink events and manage the net_prio control group sub-system. Signed-off-by: John Fastabend Acked-by: Shmulik Ravid Signed-off-by: David S. Miller --- net/dcb/dcbnl.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'net') diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c index 8dfa1da7c40d..656c7c75b192 100644 --- a/net/dcb/dcbnl.c +++ b/net/dcb/dcbnl.c @@ -704,6 +704,7 @@ static int dcbnl_setapp(struct net_device *netdev, struct nlattr **tb, ret = dcbnl_reply(err, RTM_SETDCB, DCB_CMD_SAPP, DCB_ATTR_APP, pid, seq, flags); + dcbnl_cee_notify(netdev, RTM_SETDCB, DCB_CMD_SAPP, seq, 0); out: return ret; } @@ -936,6 +937,7 @@ static int dcbnl_setall(struct net_device *netdev, struct nlattr **tb, ret = dcbnl_reply(netdev->dcbnl_ops->setall(netdev), RTM_SETDCB, DCB_CMD_SET_ALL, DCB_ATTR_SET_ALL, pid, seq, flags); + dcbnl_cee_notify(netdev, RTM_SETDCB, DCB_CMD_SET_ALL, seq, 0); return ret; } -- cgit v1.2.2 From 808db80a7eeff8314ac51654bfefc864582eea13 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 24 Apr 2012 10:17:59 +0000 Subject: ipv6: call consume_skb() in frag/reassembly Some kfree_skb() calls should be replaced by consume_skb() to avoid drop_monitor/dropwatch false positives. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/ipv6/ip6_output.c | 4 ++-- net/ipv6/reassembly.c | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'net') diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index b7ca46161cb9..b347062aa809 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -210,7 +210,7 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6, kfree_skb(skb); return -ENOBUFS; } - kfree_skb(skb); + consume_skb(skb); skb = skb2; skb_set_owner_w(skb, sk); } @@ -889,7 +889,7 @@ slow_path: } IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGOKS); - kfree_skb(skb); + consume_skb(skb); return err; fail: diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c index 36e04cff1a85..54c5d2b704df 100644 --- a/net/ipv6/reassembly.c +++ b/net/ipv6/reassembly.c @@ -433,7 +433,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev, skb_morph(head, fq->q.fragments); head->next = fq->q.fragments->next; - kfree_skb(fq->q.fragments); + consume_skb(fq->q.fragments); fq->q.fragments = head; } -- cgit v1.2.2 From 8dcf01fc009d12d01fd195ed95eaaee61178f21a Mon Sep 17 00:00:00 2001 From: Shan Wei Date: Tue, 24 Apr 2012 18:21:07 +0000 Subject: net: sock_diag_handler structs can be const read only, so change it to const. Signed-off-by: Shan Wei Acked-by: Pavel Emelyanov Signed-off-by: David S. Miller --- net/core/sock_diag.c | 12 ++++++------ net/ipv4/inet_diag.c | 4 ++-- net/unix/diag.c | 2 +- 3 files changed, 9 insertions(+), 9 deletions(-) (limited to 'net') diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c index b9868e1fd62c..5fd146720f39 100644 --- a/net/core/sock_diag.c +++ b/net/core/sock_diag.c @@ -10,7 +10,7 @@ #include #include -static struct sock_diag_handler *sock_diag_handlers[AF_MAX]; +static const struct sock_diag_handler *sock_diag_handlers[AF_MAX]; static int (*inet_rcv_compat)(struct sk_buff *skb, struct nlmsghdr *nlh); static DEFINE_MUTEX(sock_diag_table_mutex); @@ -70,7 +70,7 @@ void sock_diag_unregister_inet_compat(int (*fn)(struct sk_buff *skb, struct nlms } EXPORT_SYMBOL_GPL(sock_diag_unregister_inet_compat); -int sock_diag_register(struct sock_diag_handler *hndl) +int sock_diag_register(const struct sock_diag_handler *hndl) { int err = 0; @@ -88,7 +88,7 @@ int sock_diag_register(struct sock_diag_handler *hndl) } EXPORT_SYMBOL_GPL(sock_diag_register); -void sock_diag_unregister(struct sock_diag_handler *hnld) +void sock_diag_unregister(const struct sock_diag_handler *hnld) { int family = hnld->family; @@ -102,7 +102,7 @@ void sock_diag_unregister(struct sock_diag_handler *hnld) } EXPORT_SYMBOL_GPL(sock_diag_unregister); -static inline struct sock_diag_handler *sock_diag_lock_handler(int family) +static const inline struct sock_diag_handler *sock_diag_lock_handler(int family) { if (sock_diag_handlers[family] == NULL) request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK, @@ -112,7 +112,7 @@ static inline struct sock_diag_handler *sock_diag_lock_handler(int family) return sock_diag_handlers[family]; } -static inline void sock_diag_unlock_handler(struct sock_diag_handler *h) +static inline void sock_diag_unlock_handler(const struct sock_diag_handler *h) { mutex_unlock(&sock_diag_table_mutex); } @@ -121,7 +121,7 @@ static int __sock_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) { int err; struct sock_diag_req *req = NLMSG_DATA(nlh); - struct sock_diag_handler *hndl; + const struct sock_diag_handler *hndl; if (nlmsg_len(nlh) < sizeof(*req)) return -EINVAL; diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c index 8d25a1c557eb..9f24028a3ba7 100644 --- a/net/ipv4/inet_diag.c +++ b/net/ipv4/inet_diag.c @@ -999,12 +999,12 @@ static int inet_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h) return inet_diag_get_exact(skb, h, (struct inet_diag_req_v2 *)NLMSG_DATA(h)); } -static struct sock_diag_handler inet_diag_handler = { +static const struct sock_diag_handler inet_diag_handler = { .family = AF_INET, .dump = inet_diag_handler_dump, }; -static struct sock_diag_handler inet6_diag_handler = { +static const struct sock_diag_handler inet6_diag_handler = { .family = AF_INET6, .dump = inet_diag_handler_dump, }; diff --git a/net/unix/diag.c b/net/unix/diag.c index f0486ae9ebe6..47d3002737f5 100644 --- a/net/unix/diag.c +++ b/net/unix/diag.c @@ -310,7 +310,7 @@ static int unix_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h) return unix_diag_get_exact(skb, h, (struct unix_diag_req *)NLMSG_DATA(h)); } -static struct sock_diag_handler unix_diag_handler = { +static const struct sock_diag_handler unix_diag_handler = { .family = AF_UNIX, .dump = unix_diag_handler_dump, }; -- cgit v1.2.2 From c2e94d73ea24df499f9183b7401e8d6b94efa417 Mon Sep 17 00:00:00 2001 From: "alex.bluesman.smirnov@gmail.com" Date: Wed, 25 Apr 2012 23:35:50 +0000 Subject: 6lowpan: move frame allocation code to a separate function Separate frame allocation routine from data processing function. This makes code more human readable and easier for understanding. Signed-off-by: Alexander Smirnov Signed-off-by: David S. Miller --- net/ieee802154/6lowpan.c | 81 ++++++++++++++++++++++++++++-------------------- 1 file changed, 48 insertions(+), 33 deletions(-) (limited to 'net') diff --git a/net/ieee802154/6lowpan.c b/net/ieee802154/6lowpan.c index 58c8895716ff..ec03626a3114 100644 --- a/net/ieee802154/6lowpan.c +++ b/net/ieee802154/6lowpan.c @@ -650,6 +650,53 @@ static void lowpan_fragment_timer_expired(unsigned long entry_addr) kfree(entry); } +static struct lowpan_fragment * +lowpan_alloc_new_frame(struct sk_buff *skb, u8 iphc0, u8 len, u8 tag) +{ + struct lowpan_fragment *frame; + + frame = kzalloc(sizeof(struct lowpan_fragment), + GFP_ATOMIC); + if (!frame) + goto frame_err; + + INIT_LIST_HEAD(&frame->list); + + frame->length = (iphc0 & 7) | (len << 3); + frame->tag = tag; + + /* allocate buffer for frame assembling */ + frame->skb = alloc_skb(frame->length + + sizeof(struct ipv6hdr), GFP_ATOMIC); + + if (!frame->skb) + goto skb_err; + + frame->skb->priority = skb->priority; + frame->skb->dev = skb->dev; + + /* reserve headroom for uncompressed ipv6 header */ + skb_reserve(frame->skb, sizeof(struct ipv6hdr)); + skb_put(frame->skb, frame->length); + + init_timer(&frame->timer); + /* time out is the same as for ipv6 - 60 sec */ + frame->timer.expires = jiffies + LOWPAN_FRAG_TIMEOUT; + frame->timer.data = (unsigned long)frame; + frame->timer.function = lowpan_fragment_timer_expired; + + add_timer(&frame->timer); + + list_add_tail(&frame->list, &lowpan_fragments); + + return frame; + +skb_err: + kfree(frame); +frame_err: + return NULL; +} + static int lowpan_process_data(struct sk_buff *skb) { @@ -692,41 +739,9 @@ lowpan_process_data(struct sk_buff *skb) /* alloc new frame structure */ if (!found) { - frame = kzalloc(sizeof(struct lowpan_fragment), - GFP_ATOMIC); + frame = lowpan_alloc_new_frame(skb, iphc0, len, tag); if (!frame) goto unlock_and_drop; - - INIT_LIST_HEAD(&frame->list); - - frame->length = (iphc0 & 7) | (len << 3); - frame->tag = tag; - - /* allocate buffer for frame assembling */ - frame->skb = alloc_skb(frame->length + - sizeof(struct ipv6hdr), GFP_ATOMIC); - - if (!frame->skb) { - kfree(frame); - goto unlock_and_drop; - } - - frame->skb->priority = skb->priority; - frame->skb->dev = skb->dev; - - /* reserve headroom for uncompressed ipv6 header */ - skb_reserve(frame->skb, sizeof(struct ipv6hdr)); - skb_put(frame->skb, frame->length); - - init_timer(&frame->timer); - /* time out is the same as for ipv6 - 60 sec */ - frame->timer.expires = jiffies + LOWPAN_FRAG_TIMEOUT; - frame->timer.data = (unsigned long)frame; - frame->timer.function = lowpan_fragment_timer_expired; - - add_timer(&frame->timer); - - list_add_tail(&frame->list, &lowpan_fragments); } if ((iphc0 & LOWPAN_DISPATCH_MASK) == LOWPAN_DISPATCH_FRAG1) -- cgit v1.2.2 From 2d319508a3551d2995e5cd12d649821b3be00e5b Mon Sep 17 00:00:00 2001 From: "alex.bluesman.smirnov@gmail.com" Date: Wed, 25 Apr 2012 23:35:51 +0000 Subject: 6lowpan: duplicate definition of IEEE802154_ALEN The same macros is defined in 'include/net/af_ieee802154.h' and is called IEEE802154_ADDR_LEN. No need another one, so remove it. Signed-off-by: Alexander Smirnov Signed-off-by: David S. Miller --- net/ieee802154/6lowpan.c | 4 ++-- net/ieee802154/6lowpan.h | 3 --- 2 files changed, 2 insertions(+), 5 deletions(-) (limited to 'net') diff --git a/net/ieee802154/6lowpan.c b/net/ieee802154/6lowpan.c index ec03626a3114..4b5701c8bf97 100644 --- a/net/ieee802154/6lowpan.c +++ b/net/ieee802154/6lowpan.c @@ -196,7 +196,7 @@ lowpan_compress_addr_64(u8 **hc06_ptr, u8 shift, const struct in6_addr *ipaddr, static void lowpan_uip_ds6_set_addr_iid(struct in6_addr *ipaddr, unsigned char *lladdr) { - memcpy(&ipaddr->s6_addr[8], lladdr, IEEE802154_ALEN); + memcpy(&ipaddr->s6_addr[8], lladdr, IEEE802154_ADDR_LEN); /* second bit-flip (Universe/Local) is done according RFC2464 */ ipaddr->s6_addr[8] ^= 0x02; } @@ -221,7 +221,7 @@ lowpan_uncompress_addr(struct sk_buff *skb, struct in6_addr *ipaddr, if (lladdr) lowpan_raw_dump_inline(__func__, "linklocal address", - lladdr, IEEE802154_ALEN); + lladdr, IEEE802154_ADDR_LEN); if (prefcount > 0) memcpy(ipaddr, prefix, prefcount); diff --git a/net/ieee802154/6lowpan.h b/net/ieee802154/6lowpan.h index aeff3f310482..8c2251fb0a3f 100644 --- a/net/ieee802154/6lowpan.h +++ b/net/ieee802154/6lowpan.h @@ -53,9 +53,6 @@ #ifndef __6LOWPAN_H__ #define __6LOWPAN_H__ -/* need to know address length to manipulate with it */ -#define IEEE802154_ALEN 8 - #define UIP_802154_SHORTADDR_LEN 2 /* compressed ipv6 address length */ #define UIP_IPH_LEN 40 /* ipv6 fixed header size */ #define UIP_PROTO_UDP 17 /* ipv6 next header value for UDP */ -- cgit v1.2.2 From de248a75c35e0208294cf304b112916254b69184 Mon Sep 17 00:00:00 2001 From: Pavel Emelyanov Date: Wed, 25 Apr 2012 23:43:04 +0000 Subject: tcp repair: Fix unaligned access when repairing options (v2) Don't pick __u8/__u16 values directly from raw pointers, but instead use an array of structures of code:value pairs. This is OK, since the buffer we take options from is not an skb memory, but a user-to-kernel one. For those options which don't require any value now, require this to be zero (for potential future extension of this API). v2: Changed tcp_repair_opt to use two __u32-s as spotted by David Laight. Signed-off-by: Pavel Emelyanov Signed-off-by: David S. Miller --- net/ipv4/tcp.c | 60 ++++++++++++++++++++-------------------------------------- 1 file changed, 21 insertions(+), 39 deletions(-) (limited to 'net') diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index de6a238f0e1d..9670af341931 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -2283,60 +2283,40 @@ static inline int tcp_can_repair_sock(struct sock *sk) ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_ESTABLISHED)); } -static int tcp_repair_options_est(struct tcp_sock *tp, char __user *optbuf, unsigned int len) +static int tcp_repair_options_est(struct tcp_sock *tp, + struct tcp_repair_opt __user *optbuf, unsigned int len) { - /* - * Options are stored in CODE:VALUE form where CODE is 8bit and VALUE - * fits the respective TCPOLEN_ size - */ + struct tcp_repair_opt opt; - while (len > 0) { - u8 opcode; - - if (get_user(opcode, optbuf)) + while (len >= sizeof(opt)) { + if (copy_from_user(&opt, optbuf, sizeof(opt))) return -EFAULT; optbuf++; - len--; - - switch (opcode) { - case TCPOPT_MSS: { - u16 in_mss; + len -= sizeof(opt); - if (len < sizeof(in_mss)) - return -ENODATA; - if (get_user(in_mss, optbuf)) - return -EFAULT; - - tp->rx_opt.mss_clamp = in_mss; - - optbuf += sizeof(in_mss); - len -= sizeof(in_mss); + switch (opt.opt_code) { + case TCPOPT_MSS: + tp->rx_opt.mss_clamp = opt.opt_val; break; - } - case TCPOPT_WINDOW: { - u8 wscale; - - if (len < sizeof(wscale)) - return -ENODATA; - if (get_user(wscale, optbuf)) - return -EFAULT; - - if (wscale > 14) + case TCPOPT_WINDOW: + if (opt.opt_val > 14) return -EFBIG; - tp->rx_opt.snd_wscale = wscale; - - optbuf += sizeof(wscale); - len -= sizeof(wscale); + tp->rx_opt.snd_wscale = opt.opt_val; break; - } case TCPOPT_SACK_PERM: + if (opt.opt_val != 0) + return -EINVAL; + tp->rx_opt.sack_ok |= TCP_SACK_SEEN; if (sysctl_tcp_fack) tcp_enable_fack(tp); break; case TCPOPT_TIMESTAMP: + if (opt.opt_val != 0) + return -EINVAL; + tp->rx_opt.tstamp_ok = 1; break; } @@ -2557,7 +2537,9 @@ static int do_tcp_setsockopt(struct sock *sk, int level, if (!tp->repair) err = -EINVAL; else if (sk->sk_state == TCP_ESTABLISHED) - err = tcp_repair_options_est(tp, optval, optlen); + err = tcp_repair_options_est(tp, + (struct tcp_repair_opt __user *)optval, + optlen); else err = -EPERM; break; -- cgit v1.2.2 From a2cfd45b52006893fc0d0e850d187d30f86a39dc Mon Sep 17 00:00:00 2001 From: Allan Stephens Date: Thu, 26 Apr 2012 16:46:29 -0400 Subject: tipc: Optimize re-initialization of configuration service Streamlines the job of re-initializing TIPC's configuration service when a node's network address is first assigned. Rather than destroying the configuration server port and then recreating it, TIPC now simply withdraws the existing {0,<0.0.0>} name publication and creates a new {0,} name publication that identifies the node's network address to interested subscribers. Signed-off-by: Allan Stephens Signed-off-by: Paul Gortmaker --- net/tipc/config.c | 15 +++++++++++++++ net/tipc/config.h | 1 + net/tipc/net.c | 3 +-- 3 files changed, 17 insertions(+), 2 deletions(-) (limited to 'net') diff --git a/net/tipc/config.c b/net/tipc/config.c index f5458eddd7bc..a4988cdbde6d 100644 --- a/net/tipc/config.c +++ b/net/tipc/config.c @@ -489,6 +489,21 @@ failed: return res; } +void tipc_cfg_reinit(void) +{ + struct tipc_name_seq seq; + int res; + + seq.type = TIPC_CFG_SRV; + seq.lower = seq.upper = 0; + tipc_withdraw(config_port_ref, TIPC_ZONE_SCOPE, &seq); + + seq.lower = seq.upper = tipc_own_addr; + res = tipc_publish(config_port_ref, TIPC_ZONE_SCOPE, &seq); + if (res) + err("Unable to reinitialize configuration service\n"); +} + void tipc_cfg_stop(void) { if (config_port_ref) { diff --git a/net/tipc/config.h b/net/tipc/config.h index 80da6ebc2785..1f252f3fa058 100644 --- a/net/tipc/config.h +++ b/net/tipc/config.h @@ -66,6 +66,7 @@ struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd, int headroom); int tipc_cfg_init(void); +void tipc_cfg_reinit(void); void tipc_cfg_stop(void); #endif diff --git a/net/tipc/net.c b/net/tipc/net.c index 5fab4ff24a94..f4a490b0cf4c 100644 --- a/net/tipc/net.c +++ b/net/tipc/net.c @@ -176,7 +176,6 @@ int tipc_net_start(u32 addr) char addr_string[16]; tipc_subscr_stop(); - tipc_cfg_stop(); write_lock_bh(&tipc_net_lock); tipc_own_addr = addr; @@ -186,7 +185,7 @@ int tipc_net_start(u32 addr) write_unlock_bh(&tipc_net_lock); tipc_k_signal((Handler)tipc_subscr_start, 0); - tipc_k_signal((Handler)tipc_cfg_init, 0); + tipc_cfg_reinit(); info("Started in network mode\n"); info("Own node address %s, network identity %u\n", -- cgit v1.2.2 From 861d3a0e5bbc93b79b5739cfb4ea0fb553fe9407 Mon Sep 17 00:00:00 2001 From: Allan Stephens Date: Thu, 26 Apr 2012 16:50:22 -0400 Subject: tipc: Optimize initialization of configuration service Initialization now occurs in the calling thread of control, rather than being deferred to the TIPC tasklet. With the current codebase, the deferral is no longer necessary. Signed-off-by: Allan Stephens Signed-off-by: Paul Gortmaker --- net/tipc/core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/tipc/core.c b/net/tipc/core.c index 68eba03e7955..c8a4f00468a5 100644 --- a/net/tipc/core.c +++ b/net/tipc/core.c @@ -152,7 +152,7 @@ static int tipc_core_start(void) if (!res) res = tipc_k_signal((Handler)tipc_subscr_start, 0); if (!res) - res = tipc_k_signal((Handler)tipc_cfg_init, 0); + res = tipc_cfg_init(); if (!res) res = tipc_netlink_start(); if (!res) -- cgit v1.2.2 From eb323b075a360d59fabbbd58c0d7aeb951bfc647 Mon Sep 17 00:00:00 2001 From: Allan Stephens Date: Thu, 26 Apr 2012 17:17:39 -0400 Subject: tipc: Optimize termination of configuration service Termination no longer tests to see if the configuration service port was successfully created or not. In the unlikely event that the port was not created, attempting to delete the non-existent port is detected gracefully and causes no harm. Signed-off-by: Allan Stephens Signed-off-by: Paul Gortmaker --- net/tipc/config.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'net') diff --git a/net/tipc/config.c b/net/tipc/config.c index a4988cdbde6d..843d7ae04661 100644 --- a/net/tipc/config.c +++ b/net/tipc/config.c @@ -506,8 +506,6 @@ void tipc_cfg_reinit(void) void tipc_cfg_stop(void) { - if (config_port_ref) { - tipc_deleteport(config_port_ref); - config_port_ref = 0; - } + tipc_deleteport(config_port_ref); + config_port_ref = 0; } -- cgit v1.2.2 From eb3865a99dc38396a403ef82f99f4c51dd34f0bf Mon Sep 17 00:00:00 2001 From: Allan Stephens Date: Thu, 26 Apr 2012 17:21:49 -0400 Subject: tipc: Enhance re-initialization of network topology service Streamlines the job of re-initializing TIPC's network topology service when a node's network address is first assigned. Rather than destroying the topology server port and breaking its connections to existing subscribers, TIPC now simply lets the service continue running (since the change to the port identifier of each port used by the topology service no longer impacts the flow of messages between the service and its subscribers). This enhancement means that applications that utilize the topology service prior to the assignment of TIPC's network address no longer need to re-establish their subscriptions when the address is finally assigned. However, it is worth noting that any subsequent events for existing subscriptions report the new port identifier of the publishing port, rather than the original port identifier. (For example, a name that was previously reported as being published by <0.0.0:ref> may be subsequently withdrawn by .) This doesn't impact any of the existing known userspace in tipc-utils, since (a) TIPC continues to treat references to the original port ID correctly and (b) normal use cases assign an address before active use. However if there does happen to be some rare/custom application out there that was relying on this, they can simply bypass the enhancement by issuing a subscription to {0,0} and break its connection to the topology service, if an associated withdrawal event occurs. Signed-off-by: Allan Stephens Signed-off-by: Paul Gortmaker --- net/tipc/net.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'net') diff --git a/net/tipc/net.c b/net/tipc/net.c index f4a490b0cf4c..7c236c89cf5e 100644 --- a/net/tipc/net.c +++ b/net/tipc/net.c @@ -175,8 +175,6 @@ int tipc_net_start(u32 addr) { char addr_string[16]; - tipc_subscr_stop(); - write_lock_bh(&tipc_net_lock); tipc_own_addr = addr; tipc_named_reinit(); @@ -184,7 +182,6 @@ int tipc_net_start(u32 addr) tipc_bclink_init(); write_unlock_bh(&tipc_net_lock); - tipc_k_signal((Handler)tipc_subscr_start, 0); tipc_cfg_reinit(); info("Started in network mode\n"); -- cgit v1.2.2 From 2d98abb9fe132898d17b56fb4765687aff82c093 Mon Sep 17 00:00:00 2001 From: Allan Stephens Date: Thu, 26 Apr 2012 17:41:35 -0400 Subject: tipc: Optimize initialization of network topology service Initialization now occurs in the calling thread of control, rather than being deferred to the TIPC tasklet. With the current codebase, the deferral is no longer necessary. Signed-off-by: Allan Stephens Signed-off-by: Paul Gortmaker --- net/tipc/core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/tipc/core.c b/net/tipc/core.c index c8a4f00468a5..ba089407b2f7 100644 --- a/net/tipc/core.c +++ b/net/tipc/core.c @@ -150,7 +150,7 @@ static int tipc_core_start(void) if (!res) res = tipc_nametbl_init(); if (!res) - res = tipc_k_signal((Handler)tipc_subscr_start, 0); + res = tipc_subscr_start(); if (!res) res = tipc_cfg_init(); if (!res) -- cgit v1.2.2 From bbe6a295d0a987068c89ca7e7b7291b754995754 Mon Sep 17 00:00:00 2001 From: Allan Stephens Date: Thu, 26 Apr 2012 17:44:52 -0400 Subject: tipc: remove redundant memset and stale comment from subscr.c Eliminate code to zero-out the main topology service structure, which is already zeroed-out. Get rid of a comment documenting a field of the main topology service structure that no longer exists. Both are cosmetic changes with no impact on runtime behaviour. Signed-off-by: Allan Stephens Signed-off-by: Paul Gortmaker --- net/tipc/subscr.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'net') diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c index b2964e9895d3..af93ea924f60 100644 --- a/net/tipc/subscr.c +++ b/net/tipc/subscr.c @@ -56,7 +56,6 @@ struct tipc_subscriber { /** * struct top_srv - TIPC network topology subscription service - * @user_ref: TIPC userid of subscription service * @setup_port: reference to TIPC port that handles subscription requests * @subscription_count: number of active subscriptions (not subscribers!) * @subscriber_list: list of ports subscribing to service @@ -535,7 +534,6 @@ int tipc_subscr_start(void) struct tipc_name_seq seq = {TIPC_TOP_SRV, TIPC_TOP_SRV, TIPC_TOP_SRV}; int res; - memset(&topsrv, 0, sizeof(topsrv)); spin_lock_init(&topsrv.lock); INIT_LIST_HEAD(&topsrv.subscriber_list); -- cgit v1.2.2 From f7fb9d20ade55e538efe91477014b6b367ecd802 Mon Sep 17 00:00:00 2001 From: Allan Stephens Date: Thu, 26 Apr 2012 17:53:03 -0400 Subject: tipc: Create helper routine to delete unused name sequence structure Replaces two identical chunks of code that delete an unused name sequence structure from TIPC's name table with calls to a new routine that performs this operation. This change is cosmetic and doesn't impact the operation of TIPC. Signed-off-by: Allan Stephens Signed-off-by: Paul Gortmaker --- net/tipc/name_table.c | 27 +++++++++++++++------------ 1 file changed, 15 insertions(+), 12 deletions(-) (limited to 'net') diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c index 25c2975d86ee..42221219275c 100644 --- a/net/tipc/name_table.c +++ b/net/tipc/name_table.c @@ -186,7 +186,19 @@ static struct name_seq *tipc_nameseq_create(u32 type, struct hlist_head *seq_hea return nseq; } -/** +/* + * nameseq_delete_empty - deletes a name sequence structure if now unused + */ +static void nameseq_delete_empty(struct name_seq *seq) +{ + if (!seq->first_free && list_empty(&seq->subscriptions)) { + hlist_del_init(&seq->ns_list); + kfree(seq->sseqs); + kfree(seq); + } +} + +/* * nameseq_find_subseq - find sub-sequence (if any) matching a name instance * * Very time-critical, so binary searches through sub-sequence array. @@ -529,12 +541,7 @@ struct publication *tipc_nametbl_remove_publ(u32 type, u32 lower, return NULL; publ = tipc_nameseq_remove_publ(seq, lower, node, ref, key); - - if (!seq->first_free && list_empty(&seq->subscriptions)) { - hlist_del_init(&seq->ns_list); - kfree(seq->sseqs); - kfree(seq); - } + nameseq_delete_empty(seq); return publ; } @@ -768,11 +775,7 @@ void tipc_nametbl_unsubscribe(struct tipc_subscription *s) spin_lock_bh(&seq->lock); list_del_init(&s->nameseq_list); spin_unlock_bh(&seq->lock); - if ((seq->first_free == 0) && list_empty(&seq->subscriptions)) { - hlist_del_init(&seq->ns_list); - kfree(seq->sseqs); - kfree(seq); - } + nameseq_delete_empty(seq); } write_unlock_bh(&tipc_nametbl_lock); } -- cgit v1.2.2 From 8f1778969359a71f398c9ac6d3a9a3e61439b466 Mon Sep 17 00:00:00 2001 From: Allan Stephens Date: Thu, 26 Apr 2012 17:57:17 -0400 Subject: tipc: Enhance error checking of published names Consolidates validation of scope and name sequence range values into a single routine where it applies both to local name publications and to name publications issued by other nodes in the network. This change means that the scope value for non-local publications is now validated and the name sequence range for local publications is now validated only once. Additionally, a publication attempt that fails validation now creates an entry in the system log file only if debugging capabilities have been enabled; this prevents the system log from being cluttered up with messages caused by a defective application or network node. Signed-off-by: Allan Stephens Signed-off-by: Paul Gortmaker --- net/tipc/name_table.c | 7 ++++--- net/tipc/port.c | 4 ---- 2 files changed, 4 insertions(+), 7 deletions(-) (limited to 'net') diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c index 42221219275c..4de58dece9b2 100644 --- a/net/tipc/name_table.c +++ b/net/tipc/name_table.c @@ -516,9 +516,10 @@ struct publication *tipc_nametbl_insert_publ(u32 type, u32 lower, u32 upper, { struct name_seq *seq = nametbl_find_seq(type); - if (lower > upper) { - warn("Failed to publish illegal {%u,%u,%u}\n", - type, lower, upper); + if ((scope < TIPC_ZONE_SCOPE) || (scope > TIPC_NODE_SCOPE) || + (lower > upper)) { + dbg("Failed to publish illegal {%u,%u,%u} with scope %u\n", + type, lower, upper, scope); return NULL; } diff --git a/net/tipc/port.c b/net/tipc/port.c index 4aede40e592f..e6841706967c 100644 --- a/net/tipc/port.c +++ b/net/tipc/port.c @@ -992,10 +992,6 @@ int tipc_publish(u32 ref, unsigned int scope, struct tipc_name_seq const *seq) if (p_ptr->connected) goto exit; - if (seq->lower > seq->upper) - goto exit; - if ((scope < TIPC_ZONE_SCOPE) || (scope > TIPC_NODE_SCOPE)) - goto exit; key = ref + p_ptr->pub_count + 1; if (key == ref) { res = -EADDRINUSE; -- cgit v1.2.2 From 67469601406c12ced3db9956aeb0ef0854e2952f Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 24 Apr 2012 07:37:38 +0000 Subject: ipv6: RTAX_FEATURE_ALLFRAG causes inefficient TCP segment sizing MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Quoting Tore Anderson from : https://bugzilla.kernel.org/show_bug.cgi?id=42572 When RTAX_FEATURE_ALLFRAG is set on a route, the effective TCP segment size does not take into account the size of the IPv6 Fragmentation header that needs to be included in outbound packets, causing every transmitted TCP segment to be fragmented across two IPv6 packets, the latter of which will only contain 8 bytes of actual payload. RTAX_FEATURE_ALLFRAG is typically set on a route in response to receving a ICMPv6 Packet Too Big message indicating a Path MTU of less than 1280 bytes. 1280 bytes is the minimum IPv6 MTU, however ICMPv6 PTBs with MTU < 1280 are still valid, in particular when an IPv6 packet is sent to an IPv4 destination through a stateless translator. Any ICMPv4 Need To Fragment packets originated from the IPv4 part of the path will be translated to ICMPv6 PTB which may then indicate an MTU of less than 1280. The Linux kernel refuses to reduce the effective MTU to anything below 1280 bytes, instead it sets it to exactly 1280 bytes, and RTAX_FEATURE_ALLFRAG is also set. However, the TCP segment size appears to be set to 1240 bytes (1280 Path MTU - 40 bytes of IPv6 header), instead of 1232 (additionally taking into account the 8 bytes required by the IPv6 Fragmentation extension header). This in turn results in rather inefficient transmission, as every transmitted TCP segment now is split in two fragments containing 1232+8 bytes of payload. After this patch, all the outgoing packets that includes a Fragmentation header all are "atomic" or "non-fragmented" fragments, i.e., they both have Offset=0 and More Fragments=0. With help from David S. Miller Reported-by: Tore Anderson Signed-off-by: Eric Dumazet Cc: Maciej Żenczykowski Cc: Tom Herbert Tested-by: Tore Anderson Signed-off-by: David S. Miller --- net/ipv4/tcp_output.c | 19 +++++++++++++++++-- net/ipv6/tcp_ipv6.c | 1 + 2 files changed, 18 insertions(+), 2 deletions(-) (limited to 'net') diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 7b7cf3811348..834e89fc541b 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -1150,7 +1150,7 @@ int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len) } /* Calculate MSS. Not accounting for SACKs here. */ -int tcp_mtu_to_mss(const struct sock *sk, int pmtu) +int tcp_mtu_to_mss(struct sock *sk, int pmtu) { const struct tcp_sock *tp = tcp_sk(sk); const struct inet_connection_sock *icsk = inet_csk(sk); @@ -1161,6 +1161,14 @@ int tcp_mtu_to_mss(const struct sock *sk, int pmtu) */ mss_now = pmtu - icsk->icsk_af_ops->net_header_len - sizeof(struct tcphdr); + /* IPv6 adds a frag_hdr in case RTAX_FEATURE_ALLFRAG is set */ + if (icsk->icsk_af_ops->net_frag_header_len) { + const struct dst_entry *dst = __sk_dst_get(sk); + + if (dst && dst_allfrag(dst)) + mss_now -= icsk->icsk_af_ops->net_frag_header_len; + } + /* Clamp it (mss_clamp does not include tcp options) */ if (mss_now > tp->rx_opt.mss_clamp) mss_now = tp->rx_opt.mss_clamp; @@ -1179,7 +1187,7 @@ int tcp_mtu_to_mss(const struct sock *sk, int pmtu) } /* Inverse of above */ -int tcp_mss_to_mtu(const struct sock *sk, int mss) +int tcp_mss_to_mtu(struct sock *sk, int mss) { const struct tcp_sock *tp = tcp_sk(sk); const struct inet_connection_sock *icsk = inet_csk(sk); @@ -1190,6 +1198,13 @@ int tcp_mss_to_mtu(const struct sock *sk, int mss) icsk->icsk_ext_hdr_len + icsk->icsk_af_ops->net_header_len; + /* IPv6 adds a frag_hdr in case RTAX_FEATURE_ALLFRAG is set */ + if (icsk->icsk_af_ops->net_frag_header_len) { + const struct dst_entry *dst = __sk_dst_get(sk); + + if (dst && dst_allfrag(dst)) + mtu += icsk->icsk_af_ops->net_frag_header_len; + } return mtu; } diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index cdbf292ad208..57b210969834 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -1778,6 +1778,7 @@ static const struct inet_connection_sock_af_ops ipv6_specific = { .syn_recv_sock = tcp_v6_syn_recv_sock, .get_peer = tcp_v6_get_peer, .net_header_len = sizeof(struct ipv6hdr), + .net_frag_header_len = sizeof(struct frag_hdr), .setsockopt = ipv6_setsockopt, .getsockopt = ipv6_getsockopt, .addr2sockaddr = inet6_csk_addr2sockaddr, -- cgit v1.2.2 From feb50ac19e3527c3c68391718f07272349639a84 Mon Sep 17 00:00:00 2001 From: hartleys Date: Tue, 24 Apr 2012 14:38:37 +0000 Subject: crush: include header for global symbols Include the header to pickup the definitions of the global symbols. Quiets the following sparse warnings: warning: symbol 'crush_find_rule' was not declared. Should it be static? warning: symbol 'crush_do_rule' was not declared. Should it be static? Signed-off-by: H Hartley Sweeten Cc: Sage Weil Cc: "David S. Miller" Signed-off-by: David S. Miller --- net/ceph/crush/mapper.c | 1 + 1 file changed, 1 insertion(+) (limited to 'net') diff --git a/net/ceph/crush/mapper.c b/net/ceph/crush/mapper.c index 854ac53f56cd..363f8f7e6c3c 100644 --- a/net/ceph/crush/mapper.c +++ b/net/ceph/crush/mapper.c @@ -20,6 +20,7 @@ #include #include +#include /* * Implement the core CRUSH mapping algorithm. -- cgit v1.2.2 From 82981930125abfd39d7c8378a9cfdf5e1be2002b Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 26 Apr 2012 20:07:59 +0000 Subject: net: cleanups in sock_setsockopt() Use min_t()/max_t() macros, reformat two comments, use !!test_bit() to match !!sock_flag() Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/core/sock.c | 42 +++++++++++++++--------------------------- 1 file changed, 15 insertions(+), 27 deletions(-) (limited to 'net') diff --git a/net/core/sock.c b/net/core/sock.c index 0431aaf7473a..10605d2ec860 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -577,23 +577,15 @@ int sock_setsockopt(struct socket *sock, int level, int optname, break; case SO_SNDBUF: /* Don't error on this BSD doesn't and if you think - about it this is right. Otherwise apps have to - play 'guess the biggest size' games. RCVBUF/SNDBUF - are treated in BSD as hints */ - - if (val > sysctl_wmem_max) - val = sysctl_wmem_max; + * about it this is right. Otherwise apps have to + * play 'guess the biggest size' games. RCVBUF/SNDBUF + * are treated in BSD as hints + */ + val = min_t(u32, val, sysctl_wmem_max); set_sndbuf: sk->sk_userlocks |= SOCK_SNDBUF_LOCK; - if ((val * 2) < SOCK_MIN_SNDBUF) - sk->sk_sndbuf = SOCK_MIN_SNDBUF; - else - sk->sk_sndbuf = val * 2; - - /* - * Wake up sending tasks if we - * upped the value. - */ + sk->sk_sndbuf = max_t(u32, val * 2, SOCK_MIN_SNDBUF); + /* Wake up sending tasks if we upped the value. */ sk->sk_write_space(sk); break; @@ -606,12 +598,11 @@ set_sndbuf: case SO_RCVBUF: /* Don't error on this BSD doesn't and if you think - about it this is right. Otherwise apps have to - play 'guess the biggest size' games. RCVBUF/SNDBUF - are treated in BSD as hints */ - - if (val > sysctl_rmem_max) - val = sysctl_rmem_max; + * about it this is right. Otherwise apps have to + * play 'guess the biggest size' games. RCVBUF/SNDBUF + * are treated in BSD as hints + */ + val = min_t(u32, val, sysctl_rmem_max); set_rcvbuf: sk->sk_userlocks |= SOCK_RCVBUF_LOCK; /* @@ -629,10 +620,7 @@ set_rcvbuf: * returning the value we actually used in getsockopt * is the most desirable behavior. */ - if ((val * 2) < SOCK_MIN_RCVBUF) - sk->sk_rcvbuf = SOCK_MIN_RCVBUF; - else - sk->sk_rcvbuf = val * 2; + sk->sk_rcvbuf = max_t(u32, val * 2, SOCK_MIN_RCVBUF); break; case SO_RCVBUFFORCE: @@ -975,7 +963,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname, break; case SO_PASSCRED: - v.val = test_bit(SOCK_PASSCRED, &sock->flags) ? 1 : 0; + v.val = !!test_bit(SOCK_PASSCRED, &sock->flags); break; case SO_PEERCRED: @@ -1010,7 +998,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname, break; case SO_PASSSEC: - v.val = test_bit(SOCK_PASSSEC, &sock->flags) ? 1 : 0; + v.val = !!test_bit(SOCK_PASSSEC, &sock->flags); break; case SO_PEERSEC: -- cgit v1.2.2 From aad585473fe1e4b07f2ec1e2432475937f90c385 Mon Sep 17 00:00:00 2001 From: Allan Stephens Date: Thu, 26 Apr 2012 18:13:08 -0400 Subject: tipc: Reject payload messages with invalid message type Adds check to ensure TIPC sockets reject incoming payload messages that have an unrecognized message type. Remove the old open question about whether TIPC_ERR_NO_PORT is the proper return value. It is appropriate here since there are valid instances where another node can make use of the reply, and at this point in time the host is already broadcasting TIPC data, so there are no real security concerns. Signed-off-by: Allan Stephens Signed-off-by: Paul Gortmaker --- net/tipc/socket.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) (limited to 'net') diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 6d4991e8f670..3c00b40f815f 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -1223,11 +1223,8 @@ static u32 filter_rcv(struct sock *sk, struct sk_buff *buf) /* Reject message if it is wrong sort of message for socket */ - /* - * WOULD IT BE BETTER TO JUST DISCARD THESE MESSAGES INSTEAD? - * "NO PORT" ISN'T REALLY THE RIGHT ERROR CODE, AND THERE MAY - * BE SECURITY IMPLICATIONS INHERENT IN REJECTING INVALID TRAFFIC - */ + if (msg_type(msg) > TIPC_DIRECT_MSG) + return TIPC_ERR_NO_PORT; if (sock->state == SS_READY) { if (msg_connected(msg)) -- cgit v1.2.2 From cb75a36c8a1ab68e2dbfbe172f12c792b0c6dba8 Mon Sep 17 00:00:00 2001 From: Jeffrin Jose Date: Wed, 25 Apr 2012 19:17:29 +0530 Subject: net: Fixed a coding style issue related to spaces. Fixed a coding style issue relating to spaces in net/core/sock.c Signed-off-by: Jeffrin Jose Signed-off-by: David S. Miller --- net/core/sock.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/core/sock.c b/net/core/sock.c index 10605d2ec860..836bca6485f5 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -2564,7 +2564,7 @@ static char proto_method_implemented(const void *method) } static long sock_prot_memory_allocated(struct proto *proto) { - return proto->memory_allocated != NULL ? proto_memory_allocated(proto): -1L; + return proto->memory_allocated != NULL ? proto_memory_allocated(proto) : -1L; } static char *sock_prot_memory_pressure(struct proto *proto) -- cgit v1.2.2 From f7ad74fef3af6c6e2ef7f01c5589d77fe7db3d7c Mon Sep 17 00:00:00 2001 From: Benjamin LaHaise Date: Fri, 27 Apr 2012 08:23:21 +0000 Subject: net/ipv6/udp: UDP encapsulation: break backlog_rcv into __udpv6_queue_rcv_skb This is the first step in reworking the IPv6 UDP code to be structured more like the IPv4 UDP code. This patch creates __udpv6_queue_rcv_skb() with the equivalent sematics to __udp_queue_rcv_skb(), and wires it up to the backlog_rcv method. Signed-off-by: Benjamin LaHaise Signed-off-by: David S. Miller --- net/ipv6/udp.c | 42 +++++++++++++++++++++++++++--------------- 1 file changed, 27 insertions(+), 15 deletions(-) (limited to 'net') diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index d39bbc9e0622..6c0367ff7be7 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -496,6 +496,28 @@ out: sock_put(sk); } +static int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) +{ + int rc; + + if (!ipv6_addr_any(&inet6_sk(sk)->daddr)) + sock_rps_save_rxhash(sk, skb); + + rc = sock_queue_rcv_skb(sk, skb); + if (rc < 0) { + int is_udplite = IS_UDPLITE(sk); + + /* Note that an ENOMEM error is charged twice */ + if (rc == -ENOMEM) + UDP6_INC_STATS_BH(sock_net(sk), + UDP_MIB_RCVBUFERRORS, is_udplite); + UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite); + kfree_skb(skb); + return -1; + } + return 0; +} + static __inline__ void udpv6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, u8 type, u8 code, int offset, __be32 info ) @@ -503,15 +525,12 @@ static __inline__ void udpv6_err(struct sk_buff *skb, __udp6_lib_err(skb, opt, type, code, offset, info, &udp_table); } -int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb) +int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) { struct udp_sock *up = udp_sk(sk); int rc; int is_udplite = IS_UDPLITE(sk); - if (!ipv6_addr_any(&inet6_sk(sk)->daddr)) - sock_rps_save_rxhash(sk, skb); - if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) goto drop; @@ -540,19 +559,12 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb) } skb_dst_drop(skb); - rc = sock_queue_rcv_skb(sk, skb); - if (rc < 0) { - /* Note that an ENOMEM error is charged twice */ - if (rc == -ENOMEM) - UDP6_INC_STATS_BH(sock_net(sk), - UDP_MIB_RCVBUFERRORS, is_udplite); - goto drop_no_sk_drops_inc; - } - return 0; + rc = __udpv6_queue_rcv_skb(sk, skb); + + return rc; drop: atomic_inc(&sk->sk_drops); -drop_no_sk_drops_inc: UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite); kfree_skb(skb); return -1; @@ -1471,7 +1483,7 @@ struct proto udpv6_prot = { .getsockopt = udpv6_getsockopt, .sendmsg = udpv6_sendmsg, .recvmsg = udpv6_recvmsg, - .backlog_rcv = udpv6_queue_rcv_skb, + .backlog_rcv = __udpv6_queue_rcv_skb, .hash = udp_lib_hash, .unhash = udp_lib_unhash, .rehash = udp_v6_rehash, -- cgit v1.2.2 From cb80ef463d1881757ade3197cdf875a2ff856651 Mon Sep 17 00:00:00 2001 From: Benjamin LaHaise Date: Fri, 27 Apr 2012 08:23:59 +0000 Subject: net/ipv6/udp: UDP encapsulation: move socket locking into udpv6_queue_rcv_skb() In order to make sure that when the encap_rcv() hook is introduced it is not called with the socket lock held, move socket locking from callers into udpv6_queue_rcv_skb(), matching what happens in IPv4. Signed-off-by: Benjamin LaHaise Signed-off-by: David S. Miller --- net/ipv6/udp.c | 97 ++++++++++++++++++++++++++-------------------------------- 1 file changed, 44 insertions(+), 53 deletions(-) (limited to 'net') diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 6c0367ff7be7..bc533ea8fc68 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -558,14 +558,25 @@ int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) goto drop; } + if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) + goto drop; + skb_dst_drop(skb); - rc = __udpv6_queue_rcv_skb(sk, skb); + bh_lock_sock(sk); + rc = 0; + if (!sock_owned_by_user(sk)) + rc = __udpv6_queue_rcv_skb(sk, skb); + else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) { + bh_unlock_sock(sk); + goto drop; + } + bh_unlock_sock(sk); return rc; drop: - atomic_inc(&sk->sk_drops); UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite); + atomic_inc(&sk->sk_drops); kfree_skb(skb); return -1; } @@ -614,37 +625,27 @@ static struct sock *udp_v6_mcast_next(struct net *net, struct sock *sk, static void flush_stack(struct sock **stack, unsigned int count, struct sk_buff *skb, unsigned int final) { - unsigned int i; + struct sk_buff *skb1 = NULL; struct sock *sk; - struct sk_buff *skb1; + unsigned int i; for (i = 0; i < count; i++) { - skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC); - sk = stack[i]; - if (skb1) { - if (sk_rcvqueues_full(sk, skb1, sk->sk_rcvbuf)) { - kfree_skb(skb1); - goto drop; - } - bh_lock_sock(sk); - if (!sock_owned_by_user(sk)) - udpv6_queue_rcv_skb(sk, skb1); - else if (sk_add_backlog(sk, skb1, sk->sk_rcvbuf)) { - kfree_skb(skb1); - bh_unlock_sock(sk); - goto drop; - } - bh_unlock_sock(sk); - continue; + if (likely(skb1 == NULL)) + skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC); + if (!skb1) { + atomic_inc(&sk->sk_drops); + UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS, + IS_UDPLITE(sk)); + UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, + IS_UDPLITE(sk)); } -drop: - atomic_inc(&sk->sk_drops); - UDP6_INC_STATS_BH(sock_net(sk), - UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk)); - UDP6_INC_STATS_BH(sock_net(sk), - UDP_MIB_INERRORS, IS_UDPLITE(sk)); + + if (skb1 && udpv6_queue_rcv_skb(sk, skb1) <= 0) + skb1 = NULL; } + if (unlikely(skb1)) + kfree_skb(skb1); } /* * Note: called only from the BH handler context, @@ -784,39 +785,29 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, * for sock caches... i'll skip this for now. */ sk = __udp6_lib_lookup_skb(skb, uh->source, uh->dest, udptable); + if (sk != NULL) { + int ret = udpv6_queue_rcv_skb(sk, skb); + sock_put(sk); - if (sk == NULL) { - if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) - goto discard; - - if (udp_lib_checksum_complete(skb)) - goto discard; - UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS, - proto == IPPROTO_UDPLITE); - - icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0); + /* a return value > 0 means to resubmit the input, but + * it wants the return to be -protocol, or 0 + */ + if (ret > 0) + return -ret; - kfree_skb(skb); return 0; } - /* deliver */ - - if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) { - sock_put(sk); + if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) goto discard; - } - bh_lock_sock(sk); - if (!sock_owned_by_user(sk)) - udpv6_queue_rcv_skb(sk, skb); - else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) { - atomic_inc(&sk->sk_drops); - bh_unlock_sock(sk); - sock_put(sk); + + if (udp_lib_checksum_complete(skb)) goto discard; - } - bh_unlock_sock(sk); - sock_put(sk); + + UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE); + icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0); + + kfree_skb(skb); return 0; short_packet: -- cgit v1.2.2 From d7f3f62167bc2299d9669888b493b6e6ba561c35 Mon Sep 17 00:00:00 2001 From: Benjamin LaHaise Date: Fri, 27 Apr 2012 08:24:08 +0000 Subject: net/ipv6/udp: UDP encapsulation: introduce encap_rcv hook into IPv6 Now that the sematics of udpv6_queue_rcv_skb() match IPv4's udp_queue_rcv_skb(), introduce the UDP encap_rcv() hook for IPv6. Signed-off-by: Benjamin LaHaise Signed-off-by: David S. Miller --- net/ipv6/udp.c | 39 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) (limited to 'net') diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index bc533ea8fc68..c1d91a713e8e 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -525,6 +525,14 @@ static __inline__ void udpv6_err(struct sk_buff *skb, __udp6_lib_err(skb, opt, type, code, offset, info, &udp_table); } +static struct static_key udpv6_encap_needed __read_mostly; +void udpv6_encap_enable(void) +{ + if (!static_key_enabled(&udpv6_encap_needed)) + static_key_slow_inc(&udpv6_encap_needed); +} +EXPORT_SYMBOL(udpv6_encap_enable); + int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) { struct udp_sock *up = udp_sk(sk); @@ -534,6 +542,37 @@ int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) goto drop; + if (static_key_false(&udpv6_encap_needed) && up->encap_type) { + int (*encap_rcv)(struct sock *sk, struct sk_buff *skb); + + /* + * This is an encapsulation socket so pass the skb to + * the socket's udp_encap_rcv() hook. Otherwise, just + * fall through and pass this up the UDP socket. + * up->encap_rcv() returns the following value: + * =0 if skb was successfully passed to the encap + * handler or was discarded by it. + * >0 if skb should be passed on to UDP. + * <0 if skb should be resubmitted as proto -N + */ + + /* if we're overly short, let UDP handle it */ + encap_rcv = ACCESS_ONCE(up->encap_rcv); + if (skb->len > sizeof(struct udphdr) && encap_rcv != NULL) { + int ret; + + ret = encap_rcv(sk, skb); + if (ret <= 0) { + UDP_INC_STATS_BH(sock_net(sk), + UDP_MIB_INDATAGRAMS, + is_udplite); + return -ret; + } + } + + /* FALLTHROUGH -- it's a UDP Packet */ + } + /* * UDP-Lite specific tests, ignored on UDP sockets (see net/ipv4/udp.c). */ -- cgit v1.2.2 From d2cf3361677e5bb5d01d45052212b7050a9aa8c4 Mon Sep 17 00:00:00 2001 From: Benjamin LaHaise Date: Fri, 27 Apr 2012 08:24:18 +0000 Subject: net/l2tp: add support for L2TP over IPv6 UDP Now that encap_rcv() works on IPv6 UDP sockets, wire L2TP up to IPv6. Support has been tested with and without hardware offloading. This version fixes the L2TP over localhost issue with incorrect checksums being reported. Signed-off-by: Benjamin LaHaise Signed-off-by: David S. Miller --- net/l2tp/l2tp_core.c | 89 +++++++++++++++++++++++++++++++++++++++++++++------- net/l2tp/l2tp_ppp.c | 42 ++++++++++++++++++++++++- 2 files changed, 118 insertions(+), 13 deletions(-) (limited to 'net') diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index f6732b6c758b..e91d55924d94 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c @@ -53,6 +53,9 @@ #include #include #include +#include +#include +#include #include #include @@ -446,21 +449,43 @@ static inline int l2tp_verify_udp_checksum(struct sock *sk, { struct udphdr *uh = udp_hdr(skb); u16 ulen = ntohs(uh->len); - struct inet_sock *inet; __wsum psum; - if (sk->sk_no_check || skb_csum_unnecessary(skb) || !uh->check) - return 0; - - inet = inet_sk(sk); - psum = csum_tcpudp_nofold(inet->inet_saddr, inet->inet_daddr, ulen, - IPPROTO_UDP, 0); - - if ((skb->ip_summed == CHECKSUM_COMPLETE) && - !csum_fold(csum_add(psum, skb->csum))) + if (sk->sk_no_check || skb_csum_unnecessary(skb)) return 0; - skb->csum = psum; +#if IS_ENABLED(CONFIG_IPV6) + if (sk->sk_family == PF_INET6) { + if (!uh->check) { + LIMIT_NETDEBUG(KERN_INFO "L2TP: IPv6: checksum is 0\n"); + return 1; + } + if ((skb->ip_summed == CHECKSUM_COMPLETE) && + !csum_ipv6_magic(&ipv6_hdr(skb)->saddr, + &ipv6_hdr(skb)->daddr, ulen, + IPPROTO_UDP, skb->csum)) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + return 0; + } + skb->csum = ~csum_unfold(csum_ipv6_magic(&ipv6_hdr(skb)->saddr, + &ipv6_hdr(skb)->daddr, + skb->len, IPPROTO_UDP, + 0)); + } else +#endif + { + struct inet_sock *inet; + if (!uh->check) + return 0; + inet = inet_sk(sk); + psum = csum_tcpudp_nofold(inet->inet_saddr, inet->inet_daddr, + ulen, IPPROTO_UDP, 0); + + if ((skb->ip_summed == CHECKSUM_COMPLETE) && + !csum_fold(csum_add(psum, skb->csum))) + return 0; + skb->csum = psum; + } return __skb_checksum_complete(skb); } @@ -988,7 +1013,12 @@ static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, /* Queue the packet to IP for output */ skb->local_df = 1; - error = ip_queue_xmit(skb, fl); +#if IS_ENABLED(CONFIG_IPV6) + if (skb->sk->sk_family == PF_INET6) + error = inet6_csk_xmit(skb, NULL); + else +#endif + error = ip_queue_xmit(skb, fl); /* Update stats */ if (error >= 0) { @@ -1021,6 +1051,31 @@ static inline void l2tp_skb_set_owner_w(struct sk_buff *skb, struct sock *sk) skb->destructor = l2tp_sock_wfree; } +#if IS_ENABLED(CONFIG_IPV6) +static void l2tp_xmit_ipv6_csum(struct sock *sk, struct sk_buff *skb, + int udp_len) +{ + struct ipv6_pinfo *np = inet6_sk(sk); + struct udphdr *uh = udp_hdr(skb); + + if (!skb_dst(skb) || !skb_dst(skb)->dev || + !(skb_dst(skb)->dev->features & NETIF_F_IPV6_CSUM)) { + __wsum csum = skb_checksum(skb, 0, udp_len, 0); + skb->ip_summed = CHECKSUM_UNNECESSARY; + uh->check = csum_ipv6_magic(&np->saddr, &np->daddr, udp_len, + IPPROTO_UDP, csum); + if (uh->check == 0) + uh->check = CSUM_MANGLED_0; + } else { + skb->ip_summed = CHECKSUM_PARTIAL; + skb->csum_start = skb_transport_header(skb) - skb->head; + skb->csum_offset = offsetof(struct udphdr, check); + uh->check = ~csum_ipv6_magic(&np->saddr, &np->daddr, + udp_len, IPPROTO_UDP, 0); + } +} +#endif + /* If caller requires the skb to have a ppp header, the header must be * inserted in the skb data before calling this function. */ @@ -1089,6 +1144,11 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len uh->check = 0; /* Calculate UDP checksum if configured to do so */ +#if IS_ENABLED(CONFIG_IPV6) + if (sk->sk_family == PF_INET6) + l2tp_xmit_ipv6_csum(sk, skb, udp_len); + else +#endif if (sk->sk_no_check == UDP_CSUM_NOXMIT) skb->ip_summed = CHECKSUM_NONE; else if ((skb_dst(skb) && skb_dst(skb)->dev) && @@ -1424,6 +1484,11 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 /* Mark socket as an encapsulation socket. See net/ipv4/udp.c */ udp_sk(sk)->encap_type = UDP_ENCAP_L2TPINUDP; udp_sk(sk)->encap_rcv = l2tp_udp_encap_recv; +#if IS_ENABLED(CONFIG_IPV6) + if (sk->sk_family == PF_INET6) + udpv6_encap_enable(); + else +#endif udp_encap_enable(); } diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c index 1addd9f3f40a..27b9dec9d254 100644 --- a/net/l2tp/l2tp_ppp.c +++ b/net/l2tp/l2tp_ppp.c @@ -916,7 +916,7 @@ static int pppol2tp_getname(struct socket *sock, struct sockaddr *uaddr, } inet = inet_sk(tunnel->sock); - if (tunnel->version == 2) { + if ((tunnel->version == 2) && (tunnel->sock->sk_family == AF_INET)) { struct sockaddr_pppol2tp sp; len = sizeof(sp); memset(&sp, 0, len); @@ -932,6 +932,46 @@ static int pppol2tp_getname(struct socket *sock, struct sockaddr *uaddr, sp.pppol2tp.addr.sin_port = inet->inet_dport; sp.pppol2tp.addr.sin_addr.s_addr = inet->inet_daddr; memcpy(uaddr, &sp, len); +#if IS_ENABLED(CONFIG_IPV6) + } else if ((tunnel->version == 2) && + (tunnel->sock->sk_family == AF_INET6)) { + struct ipv6_pinfo *np = inet6_sk(tunnel->sock); + struct sockaddr_pppol2tpin6 sp; + len = sizeof(sp); + memset(&sp, 0, len); + sp.sa_family = AF_PPPOX; + sp.sa_protocol = PX_PROTO_OL2TP; + sp.pppol2tp.fd = tunnel->fd; + sp.pppol2tp.pid = pls->owner; + sp.pppol2tp.s_tunnel = tunnel->tunnel_id; + sp.pppol2tp.d_tunnel = tunnel->peer_tunnel_id; + sp.pppol2tp.s_session = session->session_id; + sp.pppol2tp.d_session = session->peer_session_id; + sp.pppol2tp.addr.sin6_family = AF_INET6; + sp.pppol2tp.addr.sin6_port = inet->inet_dport; + memcpy(&sp.pppol2tp.addr.sin6_addr, &np->daddr, + sizeof(np->daddr)); + memcpy(uaddr, &sp, len); + } else if ((tunnel->version == 3) && + (tunnel->sock->sk_family == AF_INET6)) { + struct ipv6_pinfo *np = inet6_sk(tunnel->sock); + struct sockaddr_pppol2tpv3in6 sp; + len = sizeof(sp); + memset(&sp, 0, len); + sp.sa_family = AF_PPPOX; + sp.sa_protocol = PX_PROTO_OL2TP; + sp.pppol2tp.fd = tunnel->fd; + sp.pppol2tp.pid = pls->owner; + sp.pppol2tp.s_tunnel = tunnel->tunnel_id; + sp.pppol2tp.d_tunnel = tunnel->peer_tunnel_id; + sp.pppol2tp.s_session = session->session_id; + sp.pppol2tp.d_session = session->peer_session_id; + sp.pppol2tp.addr.sin6_family = AF_INET6; + sp.pppol2tp.addr.sin6_port = inet->inet_dport; + memcpy(&sp.pppol2tp.addr.sin6_addr, &np->daddr, + sizeof(np->daddr)); + memcpy(uaddr, &sp, len); +#endif } else if (tunnel->version == 3) { struct sockaddr_pppol2tpv3 sp; len = sizeof(sp); -- cgit v1.2.2 From d499bd2ee979cd0e1b5e3f6379d753582c67ec8c Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Mon, 30 Apr 2012 13:21:28 -0400 Subject: l2tp: Add missing net/net/ip6_checksum.h include. Reported-by: Stephen Rothwell Signed-off-by: David S. Miller --- net/l2tp/l2tp_core.c | 1 + 1 file changed, 1 insertion(+) (limited to 'net') diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index e91d55924d94..0ca9bc39150f 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c @@ -56,6 +56,7 @@ #include #include #include +#include #include #include -- cgit v1.2.2 From bb63f1f8a08cf8028564ad04831ebd7a8ffb9cba Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Mon, 30 Apr 2012 00:22:56 +0000 Subject: bridge: Fix fatal typo in setup of multicast_querier_expired Unfortunately it seems that I didn't properly test the case of an expired external querier in the recent multicast bridge series. The setup of the timer in that case is completely broken and leads to a NULL-pointer dereference. This patch fixes it. Signed-off-by: Herbert Xu Acked-by: Stephen Hemminger Signed-off-by: David S. Miller --- net/bridge/br_multicast.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'net') diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index 708e84f31888..5ca4c50ea233 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c @@ -744,8 +744,7 @@ static void br_multicast_local_router_expired(unsigned long data) static void br_multicast_querier_expired(unsigned long data) { - struct net_bridge_port *port = (void *)data; - struct net_bridge *br = port->br; + struct net_bridge *br = (void *)data; spin_lock(&br->multicast_lock); if (!netif_running(br->dev) || br->multicast_disabled) @@ -1581,7 +1580,7 @@ void br_multicast_init(struct net_bridge *br) setup_timer(&br->multicast_router_timer, br_multicast_local_router_expired, 0); setup_timer(&br->multicast_querier_timer, - br_multicast_querier_expired, 0); + br_multicast_querier_expired, (unsigned long)br); setup_timer(&br->multicast_query_timer, br_multicast_query_expired, (unsigned long)br); } -- cgit v1.2.2 From 617d3c7a50b3dc15f558d60013047aede79dc055 Mon Sep 17 00:00:00 2001 From: Paul Gortmaker Date: Mon, 30 Apr 2012 15:29:02 -0400 Subject: tipc: compress out gratuitous extra carriage returns Some of the comment blocks are floating in limbo between two functions, or between blocks of code. Delete the extra line feeds between any comment and its associated following block of code, to be consistent with the majority of the rest of the kernel. Also delete trailing newlines at EOF and fix a couple trivial typos in existing comments. This is a 100% cosmetic change with no runtime impact. We get rid of over 500 lines of non-code, and being blank line deletes, they won't even show up as noise in git blame. Signed-off-by: Paul Gortmaker --- net/tipc/Makefile | 2 - net/tipc/addr.c | 3 -- net/tipc/addr.h | 3 -- net/tipc/bcast.c | 22 --------- net/tipc/bcast.h | 3 -- net/tipc/bearer.c | 22 +-------- net/tipc/bearer.h | 4 -- net/tipc/config.c | 7 --- net/tipc/core.c | 7 --- net/tipc/core.h | 14 ------ net/tipc/discover.c | 14 ------ net/tipc/eth_media.c | 19 -------- net/tipc/handler.c | 1 - net/tipc/link.c | 120 +++---------------------------------------------- net/tipc/link.h | 6 --- net/tipc/log.c | 14 ------ net/tipc/log.h | 1 - net/tipc/msg.c | 3 -- net/tipc/msg.h | 21 --------- net/tipc/name_distr.c | 11 ----- net/tipc/name_table.c | 50 +++------------------ net/tipc/name_table.h | 3 -- net/tipc/node.c | 13 ------ net/tipc/node.h | 2 - net/tipc/node_subscr.c | 3 -- net/tipc/node_subscr.h | 1 - net/tipc/port.c | 33 +------------- net/tipc/port.h | 3 -- net/tipc/ref.c | 13 ------ net/tipc/socket.c | 83 +--------------------------------- net/tipc/subscr.c | 43 ------------------ net/tipc/subscr.h | 2 - 32 files changed, 16 insertions(+), 530 deletions(-) (limited to 'net') diff --git a/net/tipc/Makefile b/net/tipc/Makefile index 521d24d04ab2..6cd55d671d3a 100644 --- a/net/tipc/Makefile +++ b/net/tipc/Makefile @@ -9,5 +9,3 @@ tipc-y += addr.o bcast.o bearer.o config.o \ name_distr.o subscr.o name_table.o net.o \ netlink.o node.o node_subscr.o port.o ref.o \ socket.o log.o eth_media.o - -# End of file diff --git a/net/tipc/addr.c b/net/tipc/addr.c index a6fdab33877e..357b74b26f9e 100644 --- a/net/tipc/addr.c +++ b/net/tipc/addr.c @@ -45,7 +45,6 @@ * * Returns 1 if domain address is valid, otherwise 0 */ - int tipc_addr_domain_valid(u32 addr) { u32 n = tipc_node(addr); @@ -66,7 +65,6 @@ int tipc_addr_domain_valid(u32 addr) * * Returns 1 if address can be used, otherwise 0 */ - int tipc_addr_node_valid(u32 addr) { return tipc_addr_domain_valid(addr) && tipc_node(addr); @@ -86,7 +84,6 @@ int tipc_in_scope(u32 domain, u32 addr) /** * tipc_addr_scope - convert message lookup domain to a 2-bit scope value */ - int tipc_addr_scope(u32 domain) { if (likely(!domain)) diff --git a/net/tipc/addr.h b/net/tipc/addr.h index d706a1d92be8..60b00ab93d74 100644 --- a/net/tipc/addr.h +++ b/net/tipc/addr.h @@ -58,7 +58,6 @@ static inline int in_own_cluster_exact(u32 addr) /** * in_own_node - test for node inclusion; <0.0.0> always matches */ - static inline int in_own_node(u32 addr) { return (addr == tipc_own_addr) || !addr; @@ -67,7 +66,6 @@ static inline int in_own_node(u32 addr) /** * in_own_cluster - test for cluster inclusion; <0.0.0> always matches */ - static inline int in_own_cluster(u32 addr) { return in_own_cluster_exact(addr) || !addr; @@ -79,7 +77,6 @@ static inline int in_own_cluster(u32 addr) * Needed when address of a named message must be looked up a second time * after a network hop. */ - static inline u32 addr_domain(u32 sc) { if (likely(sc == TIPC_NODE_SCOPE)) diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c index e00441a2092f..2625f5ebe3e8 100644 --- a/net/tipc/bcast.c +++ b/net/tipc/bcast.c @@ -73,7 +73,6 @@ struct tipc_bcbearer_pair { * large local variables within multicast routines. Concurrent access is * prevented through use of the spinlock "bc_lock". */ - struct tipc_bcbearer { struct tipc_bearer bearer; struct tipc_media media; @@ -92,7 +91,6 @@ struct tipc_bcbearer { * * Handles sequence numbering, fragmentation, bundling, etc. */ - struct tipc_bclink { struct tipc_link link; struct tipc_node node; @@ -169,7 +167,6 @@ static void bclink_update_last_sent(struct tipc_node *node, u32 seqno) * * Called with bc_lock locked */ - struct tipc_node *tipc_bclink_retransmit_to(void) { return bclink->retransmit_to; @@ -182,7 +179,6 @@ struct tipc_node *tipc_bclink_retransmit_to(void) * * Called with bc_lock locked */ - static void bclink_retransmit_pkt(u32 after, u32 to) { struct sk_buff *buf; @@ -200,7 +196,6 @@ static void bclink_retransmit_pkt(u32 after, u32 to) * * Node is locked, bc_lock unlocked. */ - void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked) { struct sk_buff *crs; @@ -280,7 +275,6 @@ exit: * * tipc_net_lock and node lock set */ - void tipc_bclink_update_link_state(struct tipc_node *n_ptr, u32 last_sent) { struct sk_buff *buf; @@ -344,7 +338,6 @@ void tipc_bclink_update_link_state(struct tipc_node *n_ptr, u32 last_sent) * * Only tipc_net_lock set. */ - static void bclink_peek_nack(struct tipc_msg *msg) { struct tipc_node *n_ptr = tipc_node_find(msg_destnode(msg)); @@ -365,7 +358,6 @@ static void bclink_peek_nack(struct tipc_msg *msg) /* * tipc_bclink_send_msg - broadcast a packet to all nodes in cluster */ - int tipc_bclink_send_msg(struct sk_buff *buf) { int res; @@ -394,7 +386,6 @@ exit: * * Called with both sending node's lock and bc_lock taken. */ - static void bclink_accept_pkt(struct tipc_node *node, u32 seqno) { bclink_update_last_sent(node, seqno); @@ -420,7 +411,6 @@ static void bclink_accept_pkt(struct tipc_node *node, u32 seqno) * * tipc_net_lock is read_locked, no other locks set */ - void tipc_bclink_recv_pkt(struct sk_buff *buf) { struct tipc_msg *msg = buf_msg(buf); @@ -588,7 +578,6 @@ u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr) * Returns 0 (packet sent successfully) under all circumstances, * since the broadcast link's pseudo-bearer never blocks */ - static int tipc_bcbearer_send(struct sk_buff *buf, struct tipc_bearer *unused1, struct tipc_media_addr *unused2) @@ -601,7 +590,6 @@ static int tipc_bcbearer_send(struct sk_buff *buf, * preparation is skipped for broadcast link protocol messages * since they are sent in an unreliable manner and don't need it */ - if (likely(!msg_non_seq(buf_msg(buf)))) { struct tipc_msg *msg; @@ -618,7 +606,6 @@ static int tipc_bcbearer_send(struct sk_buff *buf, } /* Send buffer over bearers until all targets reached */ - bcbearer->remains = bclink->bcast_nodes; for (bp_index = 0; bp_index < MAX_BEARERS; bp_index++) { @@ -660,7 +647,6 @@ static int tipc_bcbearer_send(struct sk_buff *buf, /** * tipc_bcbearer_sort - create sets of bearer pairs used by broadcast bearer */ - void tipc_bcbearer_sort(void) { struct tipc_bcbearer_pair *bp_temp = bcbearer->bpairs_temp; @@ -671,7 +657,6 @@ void tipc_bcbearer_sort(void) spin_lock_bh(&bc_lock); /* Group bearers by priority (can assume max of two per priority) */ - memset(bp_temp, 0, sizeof(bcbearer->bpairs_temp)); for (b_index = 0; b_index < MAX_BEARERS; b_index++) { @@ -687,7 +672,6 @@ void tipc_bcbearer_sort(void) } /* Create array of bearer pairs for broadcasting */ - bp_curr = bcbearer->bpairs; memset(bcbearer->bpairs, 0, sizeof(bcbearer->bpairs)); @@ -817,7 +801,6 @@ void tipc_bclink_stop(void) /** * tipc_nmap_add - add a node to a node map */ - void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node) { int n = tipc_node(node); @@ -833,7 +816,6 @@ void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node) /** * tipc_nmap_remove - remove a node from a node map */ - void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node) { int n = tipc_node(node); @@ -852,7 +834,6 @@ void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node) * @nm_b: input node map B * @nm_diff: output node map A-B (i.e. nodes of A that are not in B) */ - static void tipc_nmap_diff(struct tipc_node_map *nm_a, struct tipc_node_map *nm_b, struct tipc_node_map *nm_diff) @@ -878,7 +859,6 @@ static void tipc_nmap_diff(struct tipc_node_map *nm_a, /** * tipc_port_list_add - add a port to a port list, ensuring no duplicates */ - void tipc_port_list_add(struct tipc_port_list *pl_ptr, u32 port) { struct tipc_port_list *item = pl_ptr; @@ -912,7 +892,6 @@ void tipc_port_list_add(struct tipc_port_list *pl_ptr, u32 port) * tipc_port_list_free - free dynamically created entries in port_list chain * */ - void tipc_port_list_free(struct tipc_port_list *pl_ptr) { struct tipc_port_list *item; @@ -923,4 +902,3 @@ void tipc_port_list_free(struct tipc_port_list *pl_ptr) kfree(item); } } - diff --git a/net/tipc/bcast.h b/net/tipc/bcast.h index 5571394098f9..a93306557e00 100644 --- a/net/tipc/bcast.h +++ b/net/tipc/bcast.h @@ -45,7 +45,6 @@ * @count: # of nodes in set * @map: bitmap of node identifiers that are in the set */ - struct tipc_node_map { u32 count; u32 map[MAX_NODES / WSIZE]; @@ -59,7 +58,6 @@ struct tipc_node_map { * @next: pointer to next entry in list * @ports: array of port references */ - struct tipc_port_list { int count; struct tipc_port_list *next; @@ -77,7 +75,6 @@ void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node); /** * tipc_nmap_equal - test for equality of node maps */ - static inline int tipc_nmap_equal(struct tipc_node_map *nm_a, struct tipc_node_map *nm_b) { return !memcmp(nm_a, nm_b, sizeof(*nm_a)); diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c index 0bfdeba91d51..a297e3a2e3e7 100644 --- a/net/tipc/bearer.c +++ b/net/tipc/bearer.c @@ -53,7 +53,6 @@ static void bearer_disable(struct tipc_bearer *b_ptr); * * Returns 1 if media name is valid, otherwise 0. */ - static int media_name_valid(const char *name) { u32 len; @@ -67,7 +66,6 @@ static int media_name_valid(const char *name) /** * tipc_media_find - locates specified media object by name */ - struct tipc_media *tipc_media_find(const char *name) { u32 i; @@ -82,7 +80,6 @@ struct tipc_media *tipc_media_find(const char *name) /** * media_find_id - locates specified media object by type identifier */ - static struct tipc_media *media_find_id(u8 type) { u32 i; @@ -99,7 +96,6 @@ static struct tipc_media *media_find_id(u8 type) * * Bearers for this media type must be activated separately at a later stage. */ - int tipc_register_media(struct tipc_media *m_ptr) { int res = -EINVAL; @@ -134,7 +130,6 @@ exit: /** * tipc_media_addr_printf - record media address in print buffer */ - void tipc_media_addr_printf(struct print_buf *pb, struct tipc_media_addr *a) { char addr_str[MAX_ADDR_STR]; @@ -156,7 +151,6 @@ void tipc_media_addr_printf(struct print_buf *pb, struct tipc_media_addr *a) /** * tipc_media_get_names - record names of registered media in buffer */ - struct sk_buff *tipc_media_get_names(void) { struct sk_buff *buf; @@ -183,7 +177,6 @@ struct sk_buff *tipc_media_get_names(void) * * Returns 1 if bearer name is valid, otherwise 0. */ - static int bearer_name_validate(const char *name, struct tipc_bearer_names *name_parts) { @@ -194,7 +187,6 @@ static int bearer_name_validate(const char *name, u32 if_len; /* copy bearer name & ensure length is OK */ - name_copy[TIPC_MAX_BEARER_NAME - 1] = 0; /* need above in case non-Posix strncpy() doesn't pad with nulls */ strncpy(name_copy, name, TIPC_MAX_BEARER_NAME); @@ -202,7 +194,6 @@ static int bearer_name_validate(const char *name, return 0; /* ensure all component parts of bearer name are present */ - media_name = name_copy; if_name = strchr(media_name, ':'); if (if_name == NULL) @@ -212,7 +203,6 @@ static int bearer_name_validate(const char *name, if_len = strlen(if_name) + 1; /* validate component parts of bearer name */ - if ((media_len <= 1) || (media_len > TIPC_MAX_MEDIA_NAME) || (if_len <= 1) || (if_len > TIPC_MAX_IF_NAME) || (strspn(media_name, tipc_alphabet) != (media_len - 1)) || @@ -220,7 +210,6 @@ static int bearer_name_validate(const char *name, return 0; /* return bearer name components, if necessary */ - if (name_parts) { strcpy(name_parts->media_name, media_name); strcpy(name_parts->if_name, if_name); @@ -231,7 +220,6 @@ static int bearer_name_validate(const char *name, /** * tipc_bearer_find - locates bearer object with matching bearer name */ - struct tipc_bearer *tipc_bearer_find(const char *name) { struct tipc_bearer *b_ptr; @@ -247,7 +235,6 @@ struct tipc_bearer *tipc_bearer_find(const char *name) /** * tipc_bearer_find_interface - locates bearer object with matching interface name */ - struct tipc_bearer *tipc_bearer_find_interface(const char *if_name) { struct tipc_bearer *b_ptr; @@ -267,7 +254,6 @@ struct tipc_bearer *tipc_bearer_find_interface(const char *if_name) /** * tipc_bearer_get_names - record names of bearers in buffer */ - struct sk_buff *tipc_bearer_get_names(void) { struct sk_buff *buf; @@ -363,7 +349,6 @@ void tipc_continue(struct tipc_bearer *b_ptr) * the bearer is congested. 'tipc_net_lock' is in read_lock here * bearer.lock is busy */ - static void tipc_bearer_schedule_unlocked(struct tipc_bearer *b_ptr, struct tipc_link *l_ptr) { @@ -377,7 +362,6 @@ static void tipc_bearer_schedule_unlocked(struct tipc_bearer *b_ptr, * the bearer is congested. 'tipc_net_lock' is in read_lock here, * bearer.lock is free */ - void tipc_bearer_schedule(struct tipc_bearer *b_ptr, struct tipc_link *l_ptr) { spin_lock_bh(&b_ptr->lock); @@ -410,7 +394,6 @@ int tipc_bearer_resolve_congestion(struct tipc_bearer *b_ptr, /** * tipc_bearer_congested - determines if bearer is currently congested */ - int tipc_bearer_congested(struct tipc_bearer *b_ptr, struct tipc_link *l_ptr) { if (unlikely(b_ptr->blocked)) @@ -423,7 +406,6 @@ int tipc_bearer_congested(struct tipc_bearer *b_ptr, struct tipc_link *l_ptr) /** * tipc_enable_bearer - enable bearer with the given name */ - int tipc_enable_bearer(const char *name, u32 disc_domain, u32 priority) { struct tipc_bearer *b_ptr; @@ -541,7 +523,6 @@ exit: * tipc_block_bearer(): Block the bearer with the given name, * and reset all its links */ - int tipc_block_bearer(const char *name) { struct tipc_bearer *b_ptr = NULL; @@ -573,11 +554,10 @@ int tipc_block_bearer(const char *name) } /** - * bearer_disable - + * bearer_disable * * Note: This routine assumes caller holds tipc_net_lock. */ - static void bearer_disable(struct tipc_bearer *b_ptr) { struct tipc_link *l_ptr; diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h index d3eac56b8c21..e3b2be37fb31 100644 --- a/net/tipc/bearer.h +++ b/net/tipc/bearer.h @@ -49,7 +49,6 @@ * - media type identifier located at offset 3 * - remaining bytes vary according to media type */ - #define TIPC_MEDIA_ADDR_SIZE 20 #define TIPC_MEDIA_TYPE_OFFSET 3 @@ -64,7 +63,6 @@ * @media_id: TIPC media type identifier * @broadcast: non-zero if address is a broadcast address */ - struct tipc_media_addr { u8 value[TIPC_MEDIA_ADDR_SIZE]; u8 media_id; @@ -89,7 +87,6 @@ struct tipc_bearer; * @type_id: TIPC media identifier * @name: media name */ - struct tipc_media { int (*send_msg)(struct sk_buff *buf, struct tipc_bearer *b_ptr, @@ -216,7 +213,6 @@ void tipc_bearer_lock_push(struct tipc_bearer *b_ptr); * send routine always returns success -- even if the buffer was not sent -- * and let TIPC's link code deal with the undelivered message. */ - static inline int tipc_bearer_send(struct tipc_bearer *b_ptr, struct sk_buff *buf, struct tipc_media_addr *dest) diff --git a/net/tipc/config.c b/net/tipc/config.c index 843d7ae04661..c5712a343810 100644 --- a/net/tipc/config.c +++ b/net/tipc/config.c @@ -131,7 +131,6 @@ static struct sk_buff *tipc_show_stats(void) tipc_printf(&pb, "TIPC version " TIPC_MOD_VER "\n"); /* Use additional tipc_printf()'s to return more info ... */ - str_len = tipc_printbuf_validate(&pb); skb_put(buf, TLV_SPACE(str_len)); TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len); @@ -191,7 +190,6 @@ static struct sk_buff *cfg_set_own_addr(void) * configuration commands can't be received until a local configuration * command to enable the first bearer is received and processed. */ - spin_unlock_bh(&config_lock); tipc_core_start_net(addr); spin_lock_bh(&config_lock); @@ -283,13 +281,11 @@ struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area spin_lock_bh(&config_lock); /* Save request and reply details in a well-known location */ - req_tlv_area = request_area; req_tlv_space = request_space; rep_headroom = reply_headroom; /* Check command authorization */ - if (likely(in_own_node(orig_node))) { /* command is permitted */ } else if (cmd >= 0x8000) { @@ -310,7 +306,6 @@ struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area } /* Call appropriate processing routine */ - switch (cmd) { case TIPC_CMD_NOOP: rep_tlv_buf = tipc_cfg_reply_none(); @@ -433,7 +428,6 @@ static void cfg_named_msg_event(void *userdata, struct sk_buff *rep_buf; /* Validate configuration message header (ignore invalid message) */ - req_hdr = (struct tipc_cfg_msg_hdr *)msg; if ((size < sizeof(*req_hdr)) || (size != TCM_ALIGN(ntohl(req_hdr->tcm_len))) || @@ -443,7 +437,6 @@ static void cfg_named_msg_event(void *userdata, } /* Generate reply for request (if can't, return request) */ - rep_buf = tipc_cfg_do_cmd(orig->node, ntohs(req_hdr->tcm_type), msg + sizeof(*req_hdr), diff --git a/net/tipc/core.c b/net/tipc/core.c index ba089407b2f7..f7b95239ebda 100644 --- a/net/tipc/core.c +++ b/net/tipc/core.c @@ -52,14 +52,12 @@ #endif /* global variables used by multiple sub-systems within TIPC */ - int tipc_random; const char tipc_alphabet[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_."; /* configurable TIPC parameters */ - u32 tipc_own_addr; int tipc_max_ports; int tipc_max_subscriptions; @@ -77,7 +75,6 @@ int tipc_remote_management; * NOTE: Headroom is reserved to allow prepending of a data link header. * There may also be unrequested tailroom present at the buffer's end. */ - struct sk_buff *tipc_buf_acquire(u32 size) { struct sk_buff *skb; @@ -95,7 +92,6 @@ struct sk_buff *tipc_buf_acquire(u32 size) /** * tipc_core_stop_net - shut down TIPC networking sub-systems */ - static void tipc_core_stop_net(void) { tipc_net_stop(); @@ -105,7 +101,6 @@ static void tipc_core_stop_net(void) /** * start_net - start TIPC networking sub-systems */ - int tipc_core_start_net(unsigned long addr) { int res; @@ -121,7 +116,6 @@ int tipc_core_start_net(unsigned long addr) /** * tipc_core_stop - switch TIPC from SINGLE NODE to NOT RUNNING mode */ - static void tipc_core_stop(void) { tipc_netlink_stop(); @@ -137,7 +131,6 @@ static void tipc_core_stop(void) /** * tipc_core_start - switch TIPC from NOT RUNNING to SINGLE NODE mode */ - static int tipc_core_start(void) { int res; diff --git a/net/tipc/core.h b/net/tipc/core.h index 13837e0e56b1..2a9bb99537b3 100644 --- a/net/tipc/core.h +++ b/net/tipc/core.h @@ -85,7 +85,6 @@ void tipc_printf(struct print_buf *, const char *fmt, ...); /* * TIPC_OUTPUT is the destination print buffer for system messages. */ - #ifndef TIPC_OUTPUT #define TIPC_OUTPUT TIPC_LOG #endif @@ -102,7 +101,6 @@ void tipc_printf(struct print_buf *, const char *fmt, ...); /* * DBG_OUTPUT is the destination print buffer for debug messages. */ - #ifndef DBG_OUTPUT #define DBG_OUTPUT TIPC_LOG #endif @@ -126,13 +124,11 @@ void tipc_msg_dbg(struct print_buf *, struct tipc_msg *, const char *); /* * TIPC-specific error codes */ - #define ELINKCONG EAGAIN /* link congestion <=> resource unavailable */ /* * Global configuration variables */ - extern u32 tipc_own_addr; extern int tipc_max_ports; extern int tipc_max_subscriptions; @@ -143,7 +139,6 @@ extern int tipc_remote_management; /* * Other global variables */ - extern int tipc_random; extern const char tipc_alphabet[]; @@ -151,7 +146,6 @@ extern const char tipc_alphabet[]; /* * Routines available to privileged subsystems */ - extern int tipc_core_start_net(unsigned long); extern int tipc_handler_start(void); extern void tipc_handler_stop(void); @@ -163,7 +157,6 @@ extern void tipc_socket_stop(void); /* * TIPC timer and signal code */ - typedef void (*Handler) (unsigned long); u32 tipc_k_signal(Handler routine, unsigned long argument); @@ -176,7 +169,6 @@ u32 tipc_k_signal(Handler routine, unsigned long argument); * * Timer must be initialized before use (and terminated when no longer needed). */ - static inline void k_init_timer(struct timer_list *timer, Handler routine, unsigned long argument) { @@ -196,7 +188,6 @@ static inline void k_init_timer(struct timer_list *timer, Handler routine, * then an additional jiffy is added to account for the fact that * the starting time may be in the middle of the current jiffy. */ - static inline void k_start_timer(struct timer_list *timer, unsigned long msec) { mod_timer(timer, jiffies + msecs_to_jiffies(msec) + 1); @@ -212,7 +203,6 @@ static inline void k_start_timer(struct timer_list *timer, unsigned long msec) * WARNING: Must not be called when holding locks required by the timer's * timeout routine, otherwise deadlock can occur on SMP systems! */ - static inline void k_cancel_timer(struct timer_list *timer) { del_timer_sync(timer); @@ -229,12 +219,10 @@ static inline void k_cancel_timer(struct timer_list *timer) * (Do not "enhance" this routine to automatically cancel an active timer, * otherwise deadlock can arise when a timeout routine calls k_term_timer.) */ - static inline void k_term_timer(struct timer_list *timer) { } - /* * TIPC message buffer code * @@ -244,7 +232,6 @@ static inline void k_term_timer(struct timer_list *timer) * Note: Headroom should be a multiple of 4 to ensure the TIPC header fields * are word aligned for quicker access */ - #define BUF_HEADROOM LL_MAX_HEADER struct tipc_skb_cb { @@ -253,7 +240,6 @@ struct tipc_skb_cb { #define TIPC_SKB_CB(__skb) ((struct tipc_skb_cb *)&((__skb)->cb[0])) - static inline struct tipc_msg *buf_msg(struct sk_buff *skb) { return (struct tipc_msg *)skb->data; diff --git a/net/tipc/discover.c b/net/tipc/discover.c index c630a21b2bed..ae054cfe179f 100644 --- a/net/tipc/discover.c +++ b/net/tipc/discover.c @@ -70,7 +70,6 @@ struct tipc_link_req { * @dest_domain: network domain of node(s) which should respond to message * @b_ptr: ptr to bearer issuing message */ - static struct sk_buff *tipc_disc_init_msg(u32 type, u32 dest_domain, struct tipc_bearer *b_ptr) @@ -96,7 +95,6 @@ static struct sk_buff *tipc_disc_init_msg(u32 type, * @node_addr: duplicated node address * @media_addr: media address advertised by duplicated node */ - static void disc_dupl_alert(struct tipc_bearer *b_ptr, u32 node_addr, struct tipc_media_addr *media_addr) { @@ -117,7 +115,6 @@ static void disc_dupl_alert(struct tipc_bearer *b_ptr, u32 node_addr, * @buf: buffer containing message * @b_ptr: bearer that message arrived on */ - void tipc_disc_recv_msg(struct sk_buff *buf, struct tipc_bearer *b_ptr) { struct tipc_node *n_ptr; @@ -221,7 +218,6 @@ void tipc_disc_recv_msg(struct sk_buff *buf, struct tipc_bearer *b_ptr) * the new media address and reset the link to ensure it starts up * cleanly. */ - if (addr_mismatch) { if (tipc_link_is_up(link)) { disc_dupl_alert(b_ptr, orig, &media_addr); @@ -264,7 +260,6 @@ void tipc_disc_recv_msg(struct sk_buff *buf, struct tipc_bearer *b_ptr) * Reinitiates discovery process if discovery object has no associated nodes * and is either not currently searching or is searching at a slow rate */ - static void disc_update(struct tipc_link_req *req) { if (!req->num_nodes) { @@ -280,7 +275,6 @@ static void disc_update(struct tipc_link_req *req) * tipc_disc_add_dest - increment set of discovered nodes * @req: ptr to link request structure */ - void tipc_disc_add_dest(struct tipc_link_req *req) { req->num_nodes++; @@ -290,7 +284,6 @@ void tipc_disc_add_dest(struct tipc_link_req *req) * tipc_disc_remove_dest - decrement set of discovered nodes * @req: ptr to link request structure */ - void tipc_disc_remove_dest(struct tipc_link_req *req) { req->num_nodes--; @@ -301,7 +294,6 @@ void tipc_disc_remove_dest(struct tipc_link_req *req) * disc_send_msg - send link setup request message * @req: ptr to link request structure */ - static void disc_send_msg(struct tipc_link_req *req) { if (!req->bearer->blocked) @@ -314,7 +306,6 @@ static void disc_send_msg(struct tipc_link_req *req) * * Called whenever a link setup request timer associated with a bearer expires. */ - static void disc_timeout(struct tipc_link_req *req) { int max_delay; @@ -322,7 +313,6 @@ static void disc_timeout(struct tipc_link_req *req) spin_lock_bh(&req->bearer->lock); /* Stop searching if only desired node has been found */ - if (tipc_node(req->domain) && req->num_nodes) { req->timer_intv = TIPC_LINK_REQ_INACTIVE; goto exit; @@ -335,7 +325,6 @@ static void disc_timeout(struct tipc_link_req *req) * hold at fast polling rate if don't have any associated nodes, * otherwise hold at slow polling rate */ - disc_send_msg(req); req->timer_intv *= 2; @@ -359,7 +348,6 @@ exit: * * Returns 0 if successful, otherwise -errno. */ - int tipc_disc_create(struct tipc_bearer *b_ptr, struct tipc_media_addr *dest, u32 dest_domain) { @@ -391,7 +379,6 @@ int tipc_disc_create(struct tipc_bearer *b_ptr, * tipc_disc_delete - destroy object sending periodic link setup requests * @req: ptr to link request structure */ - void tipc_disc_delete(struct tipc_link_req *req) { k_cancel_timer(&req->timer); @@ -399,4 +386,3 @@ void tipc_disc_delete(struct tipc_link_req *req) kfree_skb(req->buf); kfree(req); } - diff --git a/net/tipc/eth_media.c b/net/tipc/eth_media.c index 527e3f0e165d..90ac9bfa7abb 100644 --- a/net/tipc/eth_media.c +++ b/net/tipc/eth_media.c @@ -48,7 +48,6 @@ * @tipc_packet_type: used in binding TIPC to Ethernet driver * @cleanup: work item used when disabling bearer */ - struct eth_bearer { struct tipc_bearer *bearer; struct net_device *dev; @@ -67,7 +66,6 @@ static struct notifier_block notifier; * Media-dependent "value" field stores MAC address in first 6 bytes * and zeroes out the remaining bytes. */ - static void eth_media_addr_set(struct tipc_media_addr *a, char *mac) { memcpy(a->value, mac, ETH_ALEN); @@ -79,7 +77,6 @@ static void eth_media_addr_set(struct tipc_media_addr *a, char *mac) /** * send_msg - send a TIPC message out over an Ethernet interface */ - static int send_msg(struct sk_buff *buf, struct tipc_bearer *tb_ptr, struct tipc_media_addr *dest) { @@ -115,7 +112,6 @@ static int send_msg(struct sk_buff *buf, struct tipc_bearer *tb_ptr, * ignores packets sent using Ethernet multicast, and traffic sent to other * nodes (which can happen if interface is running in promiscuous mode). */ - static int recv_msg(struct sk_buff *buf, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) { @@ -140,7 +136,6 @@ static int recv_msg(struct sk_buff *buf, struct net_device *dev, /** * enable_bearer - attach TIPC bearer to an Ethernet interface */ - static int enable_bearer(struct tipc_bearer *tb_ptr) { struct net_device *dev = NULL; @@ -151,7 +146,6 @@ static int enable_bearer(struct tipc_bearer *tb_ptr) int pending_dev = 0; /* Find unused Ethernet bearer structure */ - while (eb_ptr->dev) { if (!eb_ptr->bearer) pending_dev++; @@ -160,7 +154,6 @@ static int enable_bearer(struct tipc_bearer *tb_ptr) } /* Find device with specified name */ - read_lock(&dev_base_lock); for_each_netdev(&init_net, pdev) { if (!strncmp(pdev->name, driver_name, IFNAMSIZ)) { @@ -174,7 +167,6 @@ static int enable_bearer(struct tipc_bearer *tb_ptr) return -ENODEV; /* Create Ethernet bearer for device */ - eb_ptr->dev = dev; eb_ptr->tipc_packet_type.type = htons(ETH_P_TIPC); eb_ptr->tipc_packet_type.dev = dev; @@ -184,7 +176,6 @@ static int enable_bearer(struct tipc_bearer *tb_ptr) dev_add_pack(&eb_ptr->tipc_packet_type); /* Associate TIPC bearer with Ethernet bearer */ - eb_ptr->bearer = tb_ptr; tb_ptr->usr_handle = (void *)eb_ptr; tb_ptr->mtu = dev->mtu; @@ -198,7 +189,6 @@ static int enable_bearer(struct tipc_bearer *tb_ptr) * * This routine must be invoked from a work queue because it can sleep. */ - static void cleanup_bearer(struct work_struct *work) { struct eth_bearer *eb_ptr = @@ -216,7 +206,6 @@ static void cleanup_bearer(struct work_struct *work) * then get worker thread to complete bearer cleanup. (Can't do cleanup * here because cleanup code needs to sleep and caller holds spinlocks.) */ - static void disable_bearer(struct tipc_bearer *tb_ptr) { struct eth_bearer *eb_ptr = (struct eth_bearer *)tb_ptr->usr_handle; @@ -232,7 +221,6 @@ static void disable_bearer(struct tipc_bearer *tb_ptr) * Change the state of the Ethernet bearer (if any) associated with the * specified device. */ - static int recv_notification(struct notifier_block *nb, unsigned long evt, void *dv) { @@ -281,7 +269,6 @@ static int recv_notification(struct notifier_block *nb, unsigned long evt, /** * eth_addr2str - convert Ethernet address to string */ - static int eth_addr2str(struct tipc_media_addr *a, char *str_buf, int str_size) { if (str_size < 18) /* 18 = strlen("aa:bb:cc:dd:ee:ff\0") */ @@ -294,7 +281,6 @@ static int eth_addr2str(struct tipc_media_addr *a, char *str_buf, int str_size) /** * eth_str2addr - convert string to Ethernet address */ - static int eth_str2addr(struct tipc_media_addr *a, char *str_buf) { char mac[ETH_ALEN]; @@ -314,7 +300,6 @@ static int eth_str2addr(struct tipc_media_addr *a, char *str_buf) /** * eth_str2addr - convert Ethernet address format to message header format */ - static int eth_addr2msg(struct tipc_media_addr *a, char *msg_area) { memset(msg_area, 0, TIPC_MEDIA_ADDR_SIZE); @@ -326,7 +311,6 @@ static int eth_addr2msg(struct tipc_media_addr *a, char *msg_area) /** * eth_str2addr - convert message header address format to Ethernet format */ - static int eth_msg2addr(struct tipc_media_addr *a, char *msg_area) { if (msg_area[TIPC_MEDIA_TYPE_OFFSET] != TIPC_MEDIA_TYPE_ETH) @@ -339,7 +323,6 @@ static int eth_msg2addr(struct tipc_media_addr *a, char *msg_area) /* * Ethernet media registration info */ - static struct tipc_media eth_media_info = { .send_msg = send_msg, .enable_bearer = enable_bearer, @@ -363,7 +346,6 @@ static struct tipc_media eth_media_info = { * Register Ethernet media type with TIPC bearer code. Also register * with OS for notifications about device state changes. */ - int tipc_eth_media_start(void) { int res; @@ -386,7 +368,6 @@ int tipc_eth_media_start(void) /** * tipc_eth_media_stop - deactivate Ethernet bearer support */ - void tipc_eth_media_stop(void) { if (!eth_started) diff --git a/net/tipc/handler.c b/net/tipc/handler.c index 274c98e164b7..9c6f22ff1c6d 100644 --- a/net/tipc/handler.c +++ b/net/tipc/handler.c @@ -129,4 +129,3 @@ void tipc_handler_stop(void) kmem_cache_destroy(tipc_queue_item_cache); } - diff --git a/net/tipc/link.c b/net/tipc/link.c index 33cecd6fc37d..7a614f43549d 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c @@ -45,13 +45,11 @@ /* * Out-of-range value for link session numbers */ - #define INVALID_SESSION 0x10000 /* * Link state events: */ - #define STARTING_EVT 856384768 /* link processing trigger */ #define TRAFFIC_MSG_EVT 560815u /* rx'd ??? */ #define TIMEOUT_EVT 560817u /* link timer expired */ @@ -67,7 +65,6 @@ /* * State value stored in 'exp_msg_count' */ - #define START_CHANGEOVER 100000u /** @@ -77,7 +74,6 @@ * @addr_peer: network address of node at far end * @if_peer: name of interface at far end */ - struct tipc_link_name { u32 addr_local; char if_local[TIPC_MAX_IF_NAME]; @@ -105,7 +101,6 @@ static int link_send_long_buf(struct tipc_link *l_ptr, struct sk_buff *buf); /* * Simple link routines */ - static unsigned int align(unsigned int i) { return (i + 3) & ~3u; @@ -143,7 +138,6 @@ static u32 link_last_sent(struct tipc_link *l_ptr) /* * Simple non-static link routines (i.e. referenced outside this file) */ - int tipc_link_is_up(struct tipc_link *l_ptr) { if (!l_ptr) @@ -164,7 +158,6 @@ int tipc_link_is_active(struct tipc_link *l_ptr) * * Returns 1 if link name is valid, otherwise 0. */ - static int link_name_validate(const char *name, struct tipc_link_name *name_parts) { @@ -180,7 +173,6 @@ static int link_name_validate(const char *name, u32 if_peer_len; /* copy link name & ensure length is OK */ - name_copy[TIPC_MAX_LINK_NAME - 1] = 0; /* need above in case non-Posix strncpy() doesn't pad with nulls */ strncpy(name_copy, name, TIPC_MAX_LINK_NAME); @@ -188,7 +180,6 @@ static int link_name_validate(const char *name, return 0; /* ensure all component parts of link name are present */ - addr_local = name_copy; if_local = strchr(addr_local, ':'); if (if_local == NULL) @@ -206,7 +197,6 @@ static int link_name_validate(const char *name, if_peer_len = strlen(if_peer) + 1; /* validate component parts of link name */ - if ((sscanf(addr_local, "%u.%u.%u%c", &z_local, &c_local, &n_local, &dummy) != 3) || (sscanf(addr_peer, "%u.%u.%u%c", @@ -220,7 +210,6 @@ static int link_name_validate(const char *name, return 0; /* return link name components, if necessary */ - if (name_parts) { name_parts->addr_local = tipc_addr(z_local, c_local, n_local); strcpy(name_parts->if_local, if_local); @@ -239,13 +228,11 @@ static int link_name_validate(const char *name, * another thread because tipc_link_delete() always cancels the link timer before * tipc_node_delete() is called.) */ - static void link_timeout(struct tipc_link *l_ptr) { tipc_node_lock(l_ptr->owner); /* update counters used in statistical profiling of send traffic */ - l_ptr->stats.accu_queue_sz += l_ptr->out_queue_size; l_ptr->stats.queue_sz_counts++; @@ -278,7 +265,6 @@ static void link_timeout(struct tipc_link *l_ptr) } /* do all other link processing performed on a periodic basis */ - link_check_defragm_bufs(l_ptr); link_state_event(l_ptr, TIMEOUT_EVT); @@ -302,7 +288,6 @@ static void link_set_timer(struct tipc_link *l_ptr, u32 time) * * Returns pointer to link. */ - struct tipc_link *tipc_link_create(struct tipc_node *n_ptr, struct tipc_bearer *b_ptr, const struct tipc_media_addr *media_addr) @@ -383,7 +368,6 @@ struct tipc_link *tipc_link_create(struct tipc_node *n_ptr, * This routine must not grab the node lock until after link timer cancellation * to avoid a potential deadlock situation. */ - void tipc_link_delete(struct tipc_link *l_ptr) { if (!l_ptr) { @@ -419,7 +403,6 @@ static void link_start(struct tipc_link *l_ptr) * Schedules port for renewed sending of messages after link congestion * has abated. */ - static int link_schedule_port(struct tipc_link *l_ptr, u32 origport, u32 sz) { struct tipc_port *p_ptr; @@ -476,7 +459,6 @@ exit: * link_release_outqueue - purge link's outbound message queue * @l_ptr: pointer to link */ - static void link_release_outqueue(struct tipc_link *l_ptr) { struct sk_buff *buf = l_ptr->first_out; @@ -495,7 +477,6 @@ static void link_release_outqueue(struct tipc_link *l_ptr) * tipc_link_reset_fragments - purge link's inbound message fragments queue * @l_ptr: pointer to link */ - void tipc_link_reset_fragments(struct tipc_link *l_ptr) { struct sk_buff *buf = l_ptr->defragm_buf; @@ -513,7 +494,6 @@ void tipc_link_reset_fragments(struct tipc_link *l_ptr) * tipc_link_stop - purge all inbound and outbound messages associated with link * @l_ptr: pointer to link */ - void tipc_link_stop(struct tipc_link *l_ptr) { struct sk_buff *buf; @@ -569,7 +549,6 @@ void tipc_link_reset(struct tipc_link *l_ptr) } /* Clean up all queues: */ - link_release_outqueue(l_ptr); kfree_skb(l_ptr->proto_msg_queue); l_ptr->proto_msg_queue = NULL; @@ -611,7 +590,6 @@ static void link_activate(struct tipc_link *l_ptr) * @l_ptr: pointer to link * @event: state machine event to process */ - static void link_state_event(struct tipc_link *l_ptr, unsigned int event) { struct tipc_link *other; @@ -785,7 +763,6 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event) * link_bundle_buf(): Append contents of a buffer to * the tail of an existing one. */ - static int link_bundle_buf(struct tipc_link *l_ptr, struct sk_buff *bundler, struct sk_buff *buf) @@ -860,7 +837,6 @@ static void link_add_chain_to_outqueue(struct tipc_link *l_ptr, * inside TIPC when the 'fast path' in tipc_send_buf * has failed, and from link_send() */ - int tipc_link_send_buf(struct tipc_link *l_ptr, struct sk_buff *buf) { struct tipc_msg *msg = buf_msg(buf); @@ -872,7 +848,6 @@ int tipc_link_send_buf(struct tipc_link *l_ptr, struct sk_buff *buf) u32 max_packet = l_ptr->max_pkt; /* Match msg importance against queue limits: */ - if (unlikely(queue_size >= queue_limit)) { if (imp <= TIPC_CRITICAL_IMPORTANCE) { link_schedule_port(l_ptr, msg_origport(msg), size); @@ -888,12 +863,10 @@ int tipc_link_send_buf(struct tipc_link *l_ptr, struct sk_buff *buf) } /* Fragmentation needed ? */ - if (size > max_packet) return link_send_long_buf(l_ptr, buf); - /* Packet can be queued or sent: */ - + /* Packet can be queued or sent. */ if (likely(!tipc_bearer_congested(l_ptr->b_ptr, l_ptr) && !link_congested(l_ptr))) { link_add_to_outqueue(l_ptr, buf, msg); @@ -907,13 +880,11 @@ int tipc_link_send_buf(struct tipc_link *l_ptr, struct sk_buff *buf) } return dsz; } - /* Congestion: can message be bundled ?: */ - + /* Congestion: can message be bundled ? */ if ((msg_user(msg) != CHANGEOVER_PROTOCOL) && (msg_user(msg) != MSG_FRAGMENTER)) { /* Try adding message to an existing bundle */ - if (l_ptr->next_out && link_bundle_buf(l_ptr, l_ptr->last_out, buf)) { tipc_bearer_resolve_congestion(l_ptr->b_ptr, l_ptr); @@ -921,7 +892,6 @@ int tipc_link_send_buf(struct tipc_link *l_ptr, struct sk_buff *buf) } /* Try creating a new bundle */ - if (size <= max_packet * 2 / 3) { struct sk_buff *bundler = tipc_buf_acquire(max_packet); struct tipc_msg bundler_hdr; @@ -951,7 +921,6 @@ int tipc_link_send_buf(struct tipc_link *l_ptr, struct sk_buff *buf) * not been selected yet, and the the owner node is not locked * Called by TIPC internal users, e.g. the name distributor */ - int tipc_link_send(struct sk_buff *buf, u32 dest, u32 selector) { struct tipc_link *l_ptr; @@ -984,7 +953,6 @@ int tipc_link_send(struct sk_buff *buf, u32 dest, u32 selector) * small enough not to require fragmentation. * Called without any locks held. */ - void tipc_link_send_names(struct list_head *message_list, u32 dest) { struct tipc_node *n_ptr; @@ -1013,7 +981,6 @@ void tipc_link_send_names(struct list_head *message_list, u32 dest) read_unlock_bh(&tipc_net_lock); /* discard the messages if they couldn't be sent */ - list_for_each_safe(buf, temp_buf, ((struct sk_buff *)message_list)) { list_del((struct list_head *)buf); kfree_skb(buf); @@ -1026,7 +993,6 @@ void tipc_link_send_names(struct list_head *message_list, u32 dest) * inclusive total message length. Very time critical. * Link is locked. Returns user data length. */ - static int link_send_buf_fast(struct tipc_link *l_ptr, struct sk_buff *buf, u32 *used_max_pkt) { @@ -1111,7 +1077,6 @@ again: * Try building message using port's max_pkt hint. * (Must not hold any locks while building message.) */ - res = tipc_msg_build(hdr, msg_sect, num_sect, total_len, sender->max_pkt, !sender->user_port, &buf); @@ -1131,12 +1096,10 @@ exit: } /* Exit if build request was invalid */ - if (unlikely(res < 0)) goto exit; /* Exit if link (or bearer) is congested */ - if (link_congested(l_ptr) || !list_empty(&l_ptr->b_ptr->cong_links)) { res = link_schedule_port(l_ptr, @@ -1148,7 +1111,6 @@ exit: * Message size exceeds max_pkt hint; update hint, * then re-try fast path or fragment the message */ - sender->max_pkt = l_ptr->max_pkt; tipc_node_unlock(node); read_unlock_bh(&tipc_net_lock); @@ -1166,7 +1128,6 @@ exit: read_unlock_bh(&tipc_net_lock); /* Couldn't find a link to the destination node */ - if (buf) return tipc_reject_msg(buf, TIPC_ERR_NO_NODE); if (res >= 0) @@ -1220,15 +1181,13 @@ again: sect_crs = NULL; curr_sect = -1; - /* Prepare reusable fragment header: */ - + /* Prepare reusable fragment header */ tipc_msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT, INT_H_SIZE, msg_destnode(hdr)); msg_set_size(&fragm_hdr, max_pkt); msg_set_fragm_no(&fragm_hdr, 1); - /* Prepare header of first fragment: */ - + /* Prepare header of first fragment */ buf_chain = buf = tipc_buf_acquire(max_pkt); if (!buf) return -ENOMEM; @@ -1237,8 +1196,7 @@ again: hsz = msg_hdr_sz(hdr); skb_copy_to_linear_data_offset(buf, INT_H_SIZE, hdr, hsz); - /* Chop up message: */ - + /* Chop up message */ fragm_crs = INT_H_SIZE + hsz; fragm_rest = fragm_sz - hsz; @@ -1329,7 +1287,6 @@ reject: } /* Append chain of fragments to send queue & send them */ - l_ptr->long_msg_seq_no++; link_add_chain_to_outqueue(l_ptr, buf_chain, l_ptr->long_msg_seq_no); l_ptr->stats.sent_fragments += fragm_no; @@ -1350,7 +1307,6 @@ u32 tipc_link_push_packet(struct tipc_link *l_ptr) /* Step to position where retransmission failed, if any, */ /* consider that buffers may have been released in meantime */ - if (r_q_size && buf) { u32 last = lesser(mod(r_q_head + r_q_size), link_last_sent(l_ptr)); @@ -1365,7 +1321,6 @@ u32 tipc_link_push_packet(struct tipc_link *l_ptr) } /* Continue retransmission now, if there is anything: */ - if (r_q_size && buf) { msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1)); msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in); @@ -1381,7 +1336,6 @@ u32 tipc_link_push_packet(struct tipc_link *l_ptr) } /* Send deferred protocol message, if any: */ - buf = l_ptr->proto_msg_queue; if (buf) { msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1)); @@ -1398,7 +1352,6 @@ u32 tipc_link_push_packet(struct tipc_link *l_ptr) } /* Send one deferred data message, if send window not full: */ - buf = l_ptr->next_out; if (buf) { struct tipc_msg *msg = buf_msg(buf); @@ -1478,16 +1431,12 @@ static void link_retransmit_failure(struct tipc_link *l_ptr, warn("Retransmission failure on link <%s>\n", l_ptr->name); if (l_ptr->addr) { - /* Handle failure on standard link */ - link_print(l_ptr, "Resetting link\n"); tipc_link_reset(l_ptr); } else { - /* Handle failure on broadcast link */ - struct tipc_node *n_ptr; char addr_string[16]; @@ -1536,7 +1485,6 @@ void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *buf, return; } else { /* Detect repeated retransmit failures on uncongested bearer */ - if (l_ptr->last_retransmitted == msg_seqno(msg)) { if (++l_ptr->stale_count > 100) { link_retransmit_failure(l_ptr, buf); @@ -1571,7 +1519,6 @@ void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *buf, /** * link_insert_deferred_queue - insert deferred messages back into receive chain */ - static struct sk_buff *link_insert_deferred_queue(struct tipc_link *l_ptr, struct sk_buff *buf) { @@ -1602,7 +1549,6 @@ static struct sk_buff *link_insert_deferred_queue(struct tipc_link *l_ptr, * TIPC will ignore the excess, under the assumption that it is optional info * introduced by a later release of the protocol. */ - static int link_recv_buf_validate(struct sk_buff *buf) { static u32 min_data_hdr_size[8] = { @@ -1648,7 +1594,6 @@ static int link_recv_buf_validate(struct sk_buff *buf) * Invoked with no locks held. Bearer pointer must point to a valid bearer * structure (i.e. cannot be NULL), but bearer can be inactive. */ - void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *b_ptr) { read_lock_bh(&tipc_net_lock); @@ -1666,22 +1611,18 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *b_ptr) head = head->next; /* Ensure bearer is still enabled */ - if (unlikely(!b_ptr->active)) goto cont; /* Ensure message is well-formed */ - if (unlikely(!link_recv_buf_validate(buf))) goto cont; /* Ensure message data is a single contiguous unit */ - if (unlikely(skb_linearize(buf))) goto cont; /* Handle arrival of a non-unicast link message */ - msg = buf_msg(buf); if (unlikely(msg_non_seq(msg))) { @@ -1693,20 +1634,17 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *b_ptr) } /* Discard unicast link messages destined for another node */ - if (unlikely(!msg_short(msg) && (msg_destnode(msg) != tipc_own_addr))) goto cont; /* Locate neighboring node that sent message */ - n_ptr = tipc_node_find(msg_prevnode(msg)); if (unlikely(!n_ptr)) goto cont; tipc_node_lock(n_ptr); /* Locate unicast link endpoint that should handle message */ - l_ptr = n_ptr->links[b_ptr->identity]; if (unlikely(!l_ptr)) { tipc_node_unlock(n_ptr); @@ -1714,7 +1652,6 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *b_ptr) } /* Verify that communication with node is currently allowed */ - if ((n_ptr->block_setup & WAIT_PEER_DOWN) && msg_user(msg) == LINK_PROTOCOL && (msg_type(msg) == RESET_MSG || @@ -1728,12 +1665,10 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *b_ptr) } /* Validate message sequence number info */ - seq_no = msg_seqno(msg); ackd = msg_ack(msg); /* Release acked messages */ - if (n_ptr->bclink.supported) tipc_bclink_acknowledge(n_ptr, msg_bcast_ack(msg)); @@ -1752,7 +1687,6 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *b_ptr) } /* Try sending any messages link endpoint has pending */ - if (unlikely(l_ptr->next_out)) tipc_link_push_queue(l_ptr); if (unlikely(!list_empty(&l_ptr->waiting_ports))) @@ -1763,7 +1697,6 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *b_ptr) } /* Now (finally!) process the incoming message */ - protocol_check: if (likely(link_working_working(l_ptr))) { if (likely(seq_no == mod(l_ptr->next_in_no))) { @@ -1859,7 +1792,6 @@ cont: * * Returns increase in queue length (i.e. 0 or 1) */ - u32 tipc_link_defer_pkt(struct sk_buff **head, struct sk_buff **tail, struct sk_buff *buf) { @@ -1908,7 +1840,6 @@ u32 tipc_link_defer_pkt(struct sk_buff **head, struct sk_buff **tail, /* * link_handle_out_of_seq_msg - handle arrival of out-of-sequence packet */ - static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr, struct sk_buff *buf) { @@ -1920,14 +1851,12 @@ static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr, } /* Record OOS packet arrival (force mismatch on next timeout) */ - l_ptr->checkpoint--; /* * Discard packet if a duplicate; otherwise add it to deferred queue * and notify peer of gap as per protocol specification */ - if (less(seq_no, mod(l_ptr->next_in_no))) { l_ptr->stats.duplicates++; kfree_skb(buf); @@ -1957,7 +1886,6 @@ void tipc_link_send_proto_msg(struct tipc_link *l_ptr, u32 msg_typ, int r_flag; /* Discard any previous message that was deferred due to congestion */ - if (l_ptr->proto_msg_queue) { kfree_skb(l_ptr->proto_msg_queue); l_ptr->proto_msg_queue = NULL; @@ -1967,12 +1895,10 @@ void tipc_link_send_proto_msg(struct tipc_link *l_ptr, u32 msg_typ, return; /* Abort non-RESET send if communication with node is prohibited */ - if ((l_ptr->owner->block_setup) && (msg_typ != RESET_MSG)) return; /* Create protocol message with "out-of-sequence" sequence number */ - msg_set_type(msg, msg_typ); msg_set_net_plane(msg, l_ptr->b_ptr->net_plane); msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); @@ -2040,14 +1966,12 @@ void tipc_link_send_proto_msg(struct tipc_link *l_ptr, u32 msg_typ, skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg)); /* Defer message if bearer is already congested */ - if (tipc_bearer_congested(l_ptr->b_ptr, l_ptr)) { l_ptr->proto_msg_queue = buf; return; } /* Defer message if attempting to send results in bearer congestion */ - if (!tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) { tipc_bearer_schedule(l_ptr->b_ptr, l_ptr); l_ptr->proto_msg_queue = buf; @@ -2056,7 +1980,6 @@ void tipc_link_send_proto_msg(struct tipc_link *l_ptr, u32 msg_typ, } /* Discard message if it was sent successfully */ - l_ptr->unacked_window = 0; kfree_skb(buf); } @@ -2066,7 +1989,6 @@ void tipc_link_send_proto_msg(struct tipc_link *l_ptr, u32 msg_typ, * Note that network plane id propagates through the network, and may * change at any time. The node with lowest address rules */ - static void link_recv_proto_msg(struct tipc_link *l_ptr, struct sk_buff *buf) { u32 rec_gap = 0; @@ -2079,7 +2001,6 @@ static void link_recv_proto_msg(struct tipc_link *l_ptr, struct sk_buff *buf) goto exit; /* record unnumbered packet arrival (force mismatch on next timeout) */ - l_ptr->checkpoint--; if (l_ptr->b_ptr->net_plane != msg_net_plane(msg)) @@ -2111,7 +2032,6 @@ static void link_recv_proto_msg(struct tipc_link *l_ptr, struct sk_buff *buf) /* fall thru' */ case ACTIVATE_MSG: /* Update link settings according other endpoint's values */ - strcpy((strrchr(l_ptr->name, ':') + 1), (char *)msg_data(msg)); msg_tol = msg_link_tolerance(msg); @@ -2133,7 +2053,6 @@ static void link_recv_proto_msg(struct tipc_link *l_ptr, struct sk_buff *buf) l_ptr->owner->bclink.supportable = (max_pkt_info != 0); /* Synchronize broadcast link info, if not done previously */ - if (!tipc_node_is_up(l_ptr->owner)) { l_ptr->owner->bclink.last_sent = l_ptr->owner->bclink.last_in = @@ -2185,7 +2104,6 @@ static void link_recv_proto_msg(struct tipc_link *l_ptr, struct sk_buff *buf) } /* Protocol message before retransmits, reduce loss risk */ - if (l_ptr->owner->bclink.supported) tipc_bclink_update_link_state(l_ptr->owner, msg_last_bcast(msg)); @@ -2243,7 +2161,6 @@ static void tipc_link_tunnel(struct tipc_link *l_ptr, * changeover(): Send whole message queue via the remaining link * Owner node is locked. */ - void tipc_link_changeover(struct tipc_link *l_ptr) { u32 msgcount = l_ptr->out_queue_size; @@ -2343,8 +2260,6 @@ void tipc_link_send_duplicate(struct tipc_link *l_ptr, struct tipc_link *tunnel) } } - - /** * buf_extract - extracts embedded TIPC message from another message * @skb: encapsulating message buffer @@ -2353,7 +2268,6 @@ void tipc_link_send_duplicate(struct tipc_link *l_ptr, struct tipc_link *tunnel) * Returns a new message buffer containing an embedded message. The * encapsulating message itself is left unchanged. */ - static struct sk_buff *buf_extract(struct sk_buff *skb, u32 from_pos) { struct tipc_msg *msg = (struct tipc_msg *)(skb->data + from_pos); @@ -2370,7 +2284,6 @@ static struct sk_buff *buf_extract(struct sk_buff *skb, u32 from_pos) * link_recv_changeover_msg(): Receive tunneled packet sent * via other link. Node is locked. Return extracted buffer. */ - static int link_recv_changeover_msg(struct tipc_link **l_ptr, struct sk_buff **buf) { @@ -2405,7 +2318,6 @@ static int link_recv_changeover_msg(struct tipc_link **l_ptr, } /* First original message ?: */ - if (tipc_link_is_up(dest_link)) { info("Resetting link <%s>, changeover initiated by peer\n", dest_link->name); @@ -2420,7 +2332,6 @@ static int link_recv_changeover_msg(struct tipc_link **l_ptr, } /* Receive original message */ - if (dest_link->exp_msg_count == 0) { warn("Link switchover error, " "got too many tunnelled messages\n"); @@ -2469,7 +2380,6 @@ void tipc_link_recv_bundle(struct sk_buff *buf) * Fragmentation/defragmentation: */ - /* * link_send_long_buf: Entry for buffers needing fragmentation. * The buffer is complete, inclusive total message length. @@ -2496,12 +2406,10 @@ static int link_send_long_buf(struct tipc_link *l_ptr, struct sk_buff *buf) destaddr = msg_destnode(inmsg); /* Prepare reusable fragment header: */ - tipc_msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT, INT_H_SIZE, destaddr); /* Chop up message: */ - while (rest > 0) { struct sk_buff *fragm; @@ -2535,7 +2443,6 @@ static int link_send_long_buf(struct tipc_link *l_ptr, struct sk_buff *buf) kfree_skb(buf); /* Append chain of fragments to send queue & send them */ - l_ptr->long_msg_seq_no++; link_add_chain_to_outqueue(l_ptr, buf_chain, l_ptr->long_msg_seq_no); l_ptr->stats.sent_fragments += fragm_no; @@ -2551,7 +2458,6 @@ static int link_send_long_buf(struct tipc_link *l_ptr, struct sk_buff *buf) * help storing these values in unused, available fields in the * pending message. This makes dynamic memory allocation unnecessary. */ - static void set_long_msg_seqno(struct sk_buff *buf, u32 seqno) { msg_set_seqno(buf_msg(buf), seqno); @@ -2603,7 +2509,6 @@ int tipc_link_recv_fragment(struct sk_buff **pending, struct sk_buff **fb, *fb = NULL; /* Is there an incomplete message waiting for this fragment? */ - while (pbuf && ((buf_seqno(pbuf) != long_msg_seq_no) || (msg_orignode(fragm) != msg_orignode(buf_msg(pbuf))))) { prev = pbuf; @@ -2629,7 +2534,6 @@ int tipc_link_recv_fragment(struct sk_buff **pending, struct sk_buff **fb, skb_copy_to_linear_data(pbuf, imsg, msg_data_sz(fragm)); /* Prepare buffer for subsequent fragments. */ - set_long_msg_seqno(pbuf, long_msg_seq_no); set_fragm_size(pbuf, fragm_sz); set_expected_frags(pbuf, exp_fragm_cnt - 1); @@ -2650,7 +2554,6 @@ int tipc_link_recv_fragment(struct sk_buff **pending, struct sk_buff **fb, kfree_skb(fbuf); /* Is message complete? */ - if (exp_frags == 0) { if (prev) prev->next = pbuf->next; @@ -2672,7 +2575,6 @@ int tipc_link_recv_fragment(struct sk_buff **pending, struct sk_buff **fb, * link_check_defragm_bufs - flush stale incoming message fragments * @l_ptr: pointer to link */ - static void link_check_defragm_bufs(struct tipc_link *l_ptr) { struct sk_buff *prev = NULL; @@ -2701,8 +2603,6 @@ static void link_check_defragm_bufs(struct tipc_link *l_ptr) } } - - static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tolerance) { if ((tolerance < TIPC_MIN_LINK_TOL) || (tolerance > TIPC_MAX_LINK_TOL)) @@ -2714,7 +2614,6 @@ static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tolerance) l_ptr->abort_limit = tolerance / (l_ptr->continuity_interval / 4); } - void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window) { /* Data messages from this node, inclusive FIRST_FRAGM */ @@ -2744,7 +2643,6 @@ void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window) * * Returns pointer to link (or 0 if invalid link name). */ - static struct tipc_link *link_find_link(const char *name, struct tipc_node **node) { @@ -2778,7 +2676,6 @@ static struct tipc_link *link_find_link(const char *name, * * Returns 1 if value is within range, 0 if not. */ - static int link_value_is_valid(u16 cmd, u32 new_value) { switch (cmd) { @@ -2794,7 +2691,6 @@ static int link_value_is_valid(u16 cmd, u32 new_value) return 0; } - /** * link_cmd_set_value - change priority/tolerance/window for link/bearer/media * @name - ptr to link, bearer, or media name @@ -2805,7 +2701,6 @@ static int link_value_is_valid(u16 cmd, u32 new_value) * * Returns 0 if value updated and negative value on error. */ - static int link_cmd_set_value(const char *name, u32 new_value, u16 cmd) { struct tipc_node *node; @@ -2910,7 +2805,6 @@ struct sk_buff *tipc_link_cmd_config(const void *req_tlv_area, int req_tlv_space * link_reset_statistics - reset link statistics * @l_ptr: pointer to link */ - static void link_reset_statistics(struct tipc_link *l_ptr) { memset(&l_ptr->stats, 0, sizeof(l_ptr->stats)); @@ -2951,7 +2845,6 @@ struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area, int req_tlv_ /** * percent - convert count to a percentage of total (rounding up or down) */ - static u32 percent(u32 count, u32 total) { return (count * 100 + (total / 2)) / total; @@ -2965,7 +2858,6 @@ static u32 percent(u32 count, u32 total) * * Returns length of print buffer data string (or 0 if error) */ - static int tipc_link_stats(const char *name, char *buf, const u32 buf_size) { struct print_buf pb; @@ -3087,7 +2979,6 @@ struct sk_buff *tipc_link_cmd_show_stats(const void *req_tlv_area, int req_tlv_s * * If no active link can be found, uses default maximum packet size. */ - u32 tipc_link_get_max_pkt(u32 dest, u32 selector) { struct tipc_node *n_ptr; @@ -3171,4 +3062,3 @@ print_state: tipc_printbuf_validate(buf); info("%s", print_area); } - diff --git a/net/tipc/link.h b/net/tipc/link.h index 73c18c140e1d..d6a60a963ce6 100644 --- a/net/tipc/link.h +++ b/net/tipc/link.h @@ -47,13 +47,11 @@ /* * Out-of-range value for link sequence numbers */ - #define INVALID_LINK_SEQ 0x10000 /* * Link states */ - #define WORKING_WORKING 560810u #define WORKING_UNKNOWN 560811u #define RESET_UNKNOWN 560812u @@ -63,7 +61,6 @@ * Starting value for maximum packet size negotiation on unicast links * (unless bearer MTU is less) */ - #define MAX_PKT_DEFAULT 1500 /** @@ -114,7 +111,6 @@ * @defragm_buf: list of partially reassembled inbound message fragments * @stats: collects statistics regarding link activity */ - struct tipc_link { u32 addr; char name[TIPC_MAX_LINK_NAME]; @@ -255,7 +251,6 @@ void tipc_link_retransmit(struct tipc_link *l_ptr, /* * Link sequence number manipulation routines (uses modulo 2**16 arithmetic) */ - static inline u32 buf_seqno(struct sk_buff *buf) { return msg_seqno(buf_msg(buf)); @@ -294,7 +289,6 @@ static inline u32 lesser(u32 left, u32 right) /* * Link status checking routines */ - static inline int link_working_working(struct tipc_link *l_ptr) { return l_ptr->state == WORKING_WORKING; diff --git a/net/tipc/log.c b/net/tipc/log.c index 895c6e530b0b..026733f24919 100644 --- a/net/tipc/log.c +++ b/net/tipc/log.c @@ -47,7 +47,6 @@ * * Additional user-defined print buffers are also permitted. */ - static struct print_buf null_buf = { NULL, 0, NULL, 0 }; struct print_buf *const TIPC_NULL = &null_buf; @@ -72,7 +71,6 @@ struct print_buf *const TIPC_LOG = &log_buf; * on the caller to prevent simultaneous use of the print buffer(s) being * manipulated. */ - static char print_string[TIPC_PB_MAX_STR]; static DEFINE_SPINLOCK(print_lock); @@ -97,7 +95,6 @@ static void tipc_printbuf_move(struct print_buf *pb_to, * Note: If the character array is too small (or absent), the print buffer * becomes a null device that discards anything written to it. */ - void tipc_printbuf_init(struct print_buf *pb, char *raw, u32 size) { pb->buf = raw; @@ -117,7 +114,6 @@ void tipc_printbuf_init(struct print_buf *pb, char *raw, u32 size) * tipc_printbuf_reset - reinitialize print buffer to empty state * @pb: pointer to print buffer structure */ - static void tipc_printbuf_reset(struct print_buf *pb) { if (pb->buf) { @@ -133,7 +129,6 @@ static void tipc_printbuf_reset(struct print_buf *pb) * * Returns non-zero if print buffer is empty. */ - static int tipc_printbuf_empty(struct print_buf *pb) { return !pb->buf || (pb->crs == pb->buf); @@ -148,7 +143,6 @@ static int tipc_printbuf_empty(struct print_buf *pb) * * Returns length of print buffer data string (including trailing NUL) */ - int tipc_printbuf_validate(struct print_buf *pb) { char *err = "\n\n*** PRINT BUFFER OVERFLOW ***\n\n"; @@ -182,14 +176,12 @@ int tipc_printbuf_validate(struct print_buf *pb) * Current contents of destination print buffer (if any) are discarded. * Source print buffer becomes empty if a successful move occurs. */ - static void tipc_printbuf_move(struct print_buf *pb_to, struct print_buf *pb_from) { int len; /* Handle the cases where contents can't be moved */ - if (!pb_to->buf) return; @@ -206,7 +198,6 @@ static void tipc_printbuf_move(struct print_buf *pb_to, } /* Copy data from char after cursor to end (if used) */ - len = pb_from->buf + pb_from->size - pb_from->crs - 2; if ((pb_from->buf[pb_from->size - 1] == 0) && (len > 0)) { strcpy(pb_to->buf, pb_from->crs + 1); @@ -215,7 +206,6 @@ static void tipc_printbuf_move(struct print_buf *pb_to, pb_to->crs = pb_to->buf; /* Copy data from start to cursor (always) */ - len = pb_from->crs - pb_from->buf; strcpy(pb_to->crs, pb_from->buf); pb_to->crs += len; @@ -228,7 +218,6 @@ static void tipc_printbuf_move(struct print_buf *pb_to, * @pb: pointer to print buffer * @fmt: formatted info to be printed */ - void tipc_printf(struct print_buf *pb, const char *fmt, ...) { int chars_to_add; @@ -270,7 +259,6 @@ void tipc_printf(struct print_buf *pb, const char *fmt, ...) * tipc_log_resize - change the size of the TIPC log buffer * @log_size: print buffer size to use */ - int tipc_log_resize(int log_size) { int res = 0; @@ -295,7 +283,6 @@ int tipc_log_resize(int log_size) /** * tipc_log_resize_cmd - reconfigure size of TIPC log buffer */ - struct sk_buff *tipc_log_resize_cmd(const void *req_tlv_area, int req_tlv_space) { u32 value; @@ -316,7 +303,6 @@ struct sk_buff *tipc_log_resize_cmd(const void *req_tlv_area, int req_tlv_space) /** * tipc_log_dump - capture TIPC log buffer contents in configuration message */ - struct sk_buff *tipc_log_dump(void) { struct sk_buff *reply; diff --git a/net/tipc/log.h b/net/tipc/log.h index 2248d96238e6..d1f5eb967fd8 100644 --- a/net/tipc/log.h +++ b/net/tipc/log.h @@ -44,7 +44,6 @@ * @crs: pointer to first unused space in character array (i.e. final NUL) * @echo: echo output to system console if non-zero */ - struct print_buf { char *buf; u32 size; diff --git a/net/tipc/msg.c b/net/tipc/msg.c index e3afe162c0ac..deea0d232dca 100644 --- a/net/tipc/msg.c +++ b/net/tipc/msg.c @@ -72,7 +72,6 @@ void tipc_msg_init(struct tipc_msg *m, u32 user, u32 type, * * Returns message data size or errno */ - int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect, u32 num_sect, unsigned int total_len, int max_size, int usrmem, struct sk_buff **buf) @@ -112,7 +111,6 @@ int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect, } #ifdef CONFIG_TIPC_DEBUG - void tipc_msg_dbg(struct print_buf *buf, struct tipc_msg *msg, const char *str) { u32 usr = msg_user(msg); @@ -352,5 +350,4 @@ void tipc_msg_dbg(struct print_buf *buf, struct tipc_msg *msg, const char *str) if ((usr == MSG_FRAGMENTER) && (msg_type(msg) == FIRST_FRAGMENT)) tipc_msg_dbg(buf, msg_get_wrapped(msg), " /"); } - #endif diff --git a/net/tipc/msg.h b/net/tipc/msg.h index eba524e34a6b..ba2a72beea68 100644 --- a/net/tipc/msg.h +++ b/net/tipc/msg.h @@ -44,7 +44,6 @@ * * Note: Some items are also used with TIPC internal message headers */ - #define TIPC_VERSION 2 /* @@ -58,7 +57,6 @@ /* * Payload message types */ - #define TIPC_CONN_MSG 0 #define TIPC_MCAST_MSG 1 #define TIPC_NAMED_MSG 2 @@ -67,7 +65,6 @@ /* * Message header sizes */ - #define SHORT_H_SIZE 24 /* In-cluster basic payload message */ #define BASIC_H_SIZE 32 /* Basic payload message */ #define NAMED_H_SIZE 40 /* Named payload message */ @@ -121,7 +118,6 @@ static inline void msg_swap_words(struct tipc_msg *msg, u32 a, u32 b) /* * Word 0 */ - static inline u32 msg_version(struct tipc_msg *m) { return msg_bits(m, 0, 29, 7); @@ -216,7 +212,6 @@ static inline void msg_set_size(struct tipc_msg *m, u32 sz) /* * Word 1 */ - static inline u32 msg_type(struct tipc_msg *m) { return msg_bits(m, 1, 29, 0x7); @@ -291,7 +286,6 @@ static inline void msg_set_bcast_ack(struct tipc_msg *m, u32 n) /* * Word 2 */ - static inline u32 msg_ack(struct tipc_msg *m) { return msg_bits(m, 2, 16, 0xffff); @@ -315,8 +309,6 @@ static inline void msg_set_seqno(struct tipc_msg *m, u32 n) /* * Words 3-10 */ - - static inline u32 msg_prevnode(struct tipc_msg *m) { return msg_word(m, 3); @@ -434,7 +426,6 @@ static inline struct tipc_msg *msg_get_wrapped(struct tipc_msg *m) return (struct tipc_msg *)msg_data(m); } - /* * Constants and routines used to read and write TIPC internal message headers */ @@ -442,7 +433,6 @@ static inline struct tipc_msg *msg_get_wrapped(struct tipc_msg *m) /* * Internal message users */ - #define BCAST_PROTOCOL 5 #define MSG_BUNDLER 6 #define LINK_PROTOCOL 7 @@ -456,7 +446,6 @@ static inline struct tipc_msg *msg_get_wrapped(struct tipc_msg *m) /* * Connection management protocol message types */ - #define CONN_PROBE 0 #define CONN_PROBE_REPLY 1 #define CONN_ACK 2 @@ -464,14 +453,12 @@ static inline struct tipc_msg *msg_get_wrapped(struct tipc_msg *m) /* * Name distributor message types */ - #define PUBLICATION 0 #define WITHDRAWAL 1 /* * Segmentation message types */ - #define FIRST_FRAGMENT 0 #define FRAGMENT 1 #define LAST_FRAGMENT 2 @@ -479,7 +466,6 @@ static inline struct tipc_msg *msg_get_wrapped(struct tipc_msg *m) /* * Link management protocol message types */ - #define STATE_MSG 0 #define RESET_MSG 1 #define ACTIVATE_MSG 2 @@ -493,7 +479,6 @@ static inline struct tipc_msg *msg_get_wrapped(struct tipc_msg *m) /* * Config protocol message types */ - #define DSC_REQ_MSG 0 #define DSC_RESP_MSG 1 @@ -501,7 +486,6 @@ static inline struct tipc_msg *msg_get_wrapped(struct tipc_msg *m) /* * Word 1 */ - static inline u32 msg_seq_gap(struct tipc_msg *m) { return msg_bits(m, 1, 16, 0x1fff); @@ -526,7 +510,6 @@ static inline void msg_set_node_sig(struct tipc_msg *m, u32 n) /* * Word 2 */ - static inline u32 msg_dest_domain(struct tipc_msg *m) { return msg_word(m, 2); @@ -561,7 +544,6 @@ static inline void msg_set_bcgap_to(struct tipc_msg *m, u32 n) /* * Word 4 */ - static inline u32 msg_last_bcast(struct tipc_msg *m) { return msg_bits(m, 4, 16, 0xffff); @@ -628,7 +610,6 @@ static inline void msg_set_link_selector(struct tipc_msg *m, u32 n) /* * Word 5 */ - static inline u32 msg_session(struct tipc_msg *m) { return msg_bits(m, 5, 16, 0xffff); @@ -697,7 +678,6 @@ static inline char *msg_media_addr(struct tipc_msg *m) /* * Word 9 */ - static inline u32 msg_msgcnt(struct tipc_msg *m) { return msg_bits(m, 9, 16, 0xffff); @@ -744,5 +724,4 @@ void tipc_msg_init(struct tipc_msg *m, u32 user, u32 type, int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect, u32 num_sect, unsigned int total_len, int max_size, int usrmem, struct sk_buff **buf); - #endif diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c index 211c1723a190..158318e67b08 100644 --- a/net/tipc/name_distr.c +++ b/net/tipc/name_distr.c @@ -58,7 +58,6 @@ * Note: There is no field that identifies the publishing node because it is * the same for all items contained within a publication message. */ - struct distr_item { __be32 type; __be32 lower; @@ -103,7 +102,6 @@ static struct publ_list *publ_lists[] = { /** * publ_to_item - add publication info to a publication message */ - static void publ_to_item(struct distr_item *i, struct publication *p) { i->type = htonl(p->type); @@ -116,7 +114,6 @@ static void publ_to_item(struct distr_item *i, struct publication *p) /** * named_prepare_buf - allocate & initialize a publication message */ - static struct sk_buff *named_prepare_buf(u32 type, u32 size, u32 dest) { struct sk_buff *buf = tipc_buf_acquire(INT_H_SIZE + size); @@ -151,7 +148,6 @@ static void named_cluster_distribute(struct sk_buff *buf) /** * tipc_named_publish - tell other nodes about a new publication by this node */ - void tipc_named_publish(struct publication *publ) { struct sk_buff *buf; @@ -177,7 +173,6 @@ void tipc_named_publish(struct publication *publ) /** * tipc_named_withdraw - tell other nodes about a withdrawn publication by this node */ - void tipc_named_withdraw(struct publication *publ) { struct sk_buff *buf; @@ -236,7 +231,6 @@ static void named_distribute(struct list_head *message_list, u32 node, /** * tipc_named_node_up - tell specified node about all publications by this node */ - void tipc_named_node_up(unsigned long nodearg) { struct tipc_node *n_ptr; @@ -246,7 +240,6 @@ void tipc_named_node_up(unsigned long nodearg) u32 max_item_buf = 0; /* compute maximum amount of publication data to send per message */ - read_lock_bh(&tipc_net_lock); n_ptr = tipc_node_find(node); if (n_ptr) { @@ -262,7 +255,6 @@ void tipc_named_node_up(unsigned long nodearg) return; /* create list of publication messages, then send them as a unit */ - INIT_LIST_HEAD(&message_list); read_lock_bh(&tipc_nametbl_lock); @@ -279,7 +271,6 @@ void tipc_named_node_up(unsigned long nodearg) * Invoked for each publication issued by a newly failed node. * Removes publication structure from name table & deletes it. */ - static void named_purge_publ(struct publication *publ) { struct publication *p; @@ -303,7 +294,6 @@ static void named_purge_publ(struct publication *publ) /** * tipc_named_recv - process name table update message sent by another node */ - void tipc_named_recv(struct sk_buff *buf) { struct publication *publ; @@ -361,7 +351,6 @@ void tipc_named_recv(struct sk_buff *buf) * All name table entries published by this node are updated to reflect * the node's new network address. */ - void tipc_named_reinit(void) { struct publication *publ; diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c index 4de58dece9b2..010f24a59da2 100644 --- a/net/tipc/name_table.c +++ b/net/tipc/name_table.c @@ -56,7 +56,6 @@ static int tipc_nametbl_size = 1024; /* must be a power of 2 */ * publications of the associated name sequence belong to it. * (The cluster and node lists may be empty.) */ - struct name_info { struct list_head node_list; struct list_head cluster_list; @@ -72,7 +71,6 @@ struct name_info { * @upper: name sequence upper bound * @info: pointer to name sequence publication info */ - struct sub_seq { u32 lower; u32 upper; @@ -90,7 +88,6 @@ struct sub_seq { * @subscriptions: list of subscriptions for this 'type' * @lock: spinlock controlling access to publication lists of all sub-sequences */ - struct name_seq { u32 type; struct sub_seq *sseqs; @@ -107,7 +104,6 @@ struct name_seq { * accessed via hashing on 'type'; name sequence lists are *not* sorted * @local_publ_count: number of publications issued by this node */ - struct name_table { struct hlist_head *types; u32 local_publ_count; @@ -124,7 +120,6 @@ static int hash(int x) /** * publ_create - create a publication structure */ - static struct publication *publ_create(u32 type, u32 lower, u32 upper, u32 scope, u32 node, u32 port_ref, u32 key) @@ -151,7 +146,6 @@ static struct publication *publ_create(u32 type, u32 lower, u32 upper, /** * tipc_subseq_alloc - allocate a specified number of sub-sequence structures */ - static struct sub_seq *tipc_subseq_alloc(u32 cnt) { struct sub_seq *sseq = kcalloc(cnt, sizeof(struct sub_seq), GFP_ATOMIC); @@ -163,7 +157,6 @@ static struct sub_seq *tipc_subseq_alloc(u32 cnt) * * Allocates a single sub-sequence structure and sets it to all 0's. */ - static struct name_seq *tipc_nameseq_create(u32 type, struct hlist_head *seq_head) { struct name_seq *nseq = kzalloc(sizeof(*nseq), GFP_ATOMIC); @@ -203,7 +196,6 @@ static void nameseq_delete_empty(struct name_seq *seq) * * Very time-critical, so binary searches through sub-sequence array. */ - static struct sub_seq *nameseq_find_subseq(struct name_seq *nseq, u32 instance) { @@ -233,7 +225,6 @@ static struct sub_seq *nameseq_find_subseq(struct name_seq *nseq, * * Note: Similar to binary search code for locating a sub-sequence. */ - static u32 nameseq_locate_subseq(struct name_seq *nseq, u32 instance) { struct sub_seq *sseqs = nseq->sseqs; @@ -254,9 +245,8 @@ static u32 nameseq_locate_subseq(struct name_seq *nseq, u32 instance) } /** - * tipc_nameseq_insert_publ - + * tipc_nameseq_insert_publ */ - static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq, u32 type, u32 lower, u32 upper, u32 scope, u32 node, u32 port, u32 key) @@ -272,7 +262,6 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq, if (sseq) { /* Lower end overlaps existing entry => need an exact match */ - if ((sseq->lower != lower) || (sseq->upper != upper)) { warn("Cannot publish {%u,%u,%u}, overlap error\n", type, lower, upper); @@ -292,11 +281,9 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq, struct sub_seq *freesseq; /* Find where lower end should be inserted */ - inspos = nameseq_locate_subseq(nseq, lower); /* Fail if upper end overlaps into an existing entry */ - if ((inspos < nseq->first_free) && (upper >= nseq->sseqs[inspos].lower)) { warn("Cannot publish {%u,%u,%u}, overlap error\n", @@ -305,7 +292,6 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq, } /* Ensure there is space for new sub-sequence */ - if (nseq->first_free == nseq->alloc) { struct sub_seq *sseqs = tipc_subseq_alloc(nseq->alloc * 2); @@ -333,7 +319,6 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq, INIT_LIST_HEAD(&info->zone_list); /* Insert new sub-sequence */ - sseq = &nseq->sseqs[inspos]; freesseq = &nseq->sseqs[nseq->first_free]; memmove(sseq + 1, sseq, (freesseq - sseq) * sizeof(*sseq)); @@ -345,8 +330,7 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq, created_subseq = 1; } - /* Insert a publication: */ - + /* Insert a publication */ publ = publ_create(type, lower, upper, scope, node, port, key); if (!publ) return NULL; @@ -364,9 +348,7 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq, info->node_list_size++; } - /* - * Any subscriptions waiting for notification? - */ + /* Any subscriptions waiting for notification? */ list_for_each_entry_safe(s, st, &nseq->subscriptions, nameseq_list) { tipc_subscr_report_overlap(s, publ->lower, @@ -380,7 +362,7 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq, } /** - * tipc_nameseq_remove_publ - + * tipc_nameseq_remove_publ * * NOTE: There may be cases where TIPC is asked to remove a publication * that is not in the name table. For example, if another node issues a @@ -390,7 +372,6 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq, * A failed withdraw request simply returns a failure indication and lets the * caller issue any error or warning messages associated with such a problem. */ - static struct publication *tipc_nameseq_remove_publ(struct name_seq *nseq, u32 inst, u32 node, u32 ref, u32 key) { @@ -407,7 +388,6 @@ static struct publication *tipc_nameseq_remove_publ(struct name_seq *nseq, u32 i info = sseq->info; /* Locate publication, if it exists */ - list_for_each_entry(publ, &info->zone_list, zone_list) { if ((publ->key == key) && (publ->ref == ref) && (!publ->node || (publ->node == node))) @@ -417,26 +397,22 @@ static struct publication *tipc_nameseq_remove_publ(struct name_seq *nseq, u32 i found: /* Remove publication from zone scope list */ - list_del(&publ->zone_list); info->zone_list_size--; /* Remove publication from cluster scope list, if present */ - if (in_own_cluster(node)) { list_del(&publ->cluster_list); info->cluster_list_size--; } /* Remove publication from node scope list, if present */ - if (in_own_node(node)) { list_del(&publ->node_list); info->node_list_size--; } /* Contract subseq list if no more publications for that subseq */ - if (list_empty(&info->zone_list)) { kfree(info); free = &nseq->sseqs[nseq->first_free--]; @@ -445,7 +421,6 @@ found: } /* Notify any waiting subscriptions */ - list_for_each_entry_safe(s, st, &nseq->subscriptions, nameseq_list) { tipc_subscr_report_overlap(s, publ->lower, @@ -464,7 +439,6 @@ found: * the prescribed number of events if there is any sub- * sequence overlapping with the requested sequence */ - static void tipc_nameseq_subscribe(struct name_seq *nseq, struct tipc_subscription *s) { @@ -559,7 +533,6 @@ struct publication *tipc_nametbl_remove_publ(u32 type, u32 lower, * - if name translation is attempted and fails, sets 'destnode' to 0 * and returns 0 */ - u32 tipc_nametbl_translate(u32 type, u32 instance, u32 *destnode) { struct sub_seq *sseq; @@ -582,7 +555,7 @@ u32 tipc_nametbl_translate(u32 type, u32 instance, u32 *destnode) spin_lock_bh(&seq->lock); info = sseq->info; - /* Closest-First Algorithm: */ + /* Closest-First Algorithm */ if (likely(!*destnode)) { if (!list_empty(&info->node_list)) { publ = list_first_entry(&info->node_list, @@ -605,7 +578,7 @@ u32 tipc_nametbl_translate(u32 type, u32 instance, u32 *destnode) } } - /* Round-Robin Algorithm: */ + /* Round-Robin Algorithm */ else if (*destnode == tipc_own_addr) { if (list_empty(&info->node_list)) goto no_match; @@ -646,7 +619,6 @@ not_found: * * Returns non-zero if any off-node ports overlap */ - int tipc_nametbl_mc_translate(u32 type, u32 lower, u32 upper, u32 limit, struct tipc_port_list *dports) { @@ -690,7 +662,6 @@ exit: /* * tipc_nametbl_publish - add name publication to network name tables */ - struct publication *tipc_nametbl_publish(u32 type, u32 lower, u32 upper, u32 scope, u32 port_ref, u32 key) { @@ -716,7 +687,6 @@ struct publication *tipc_nametbl_publish(u32 type, u32 lower, u32 upper, /** * tipc_nametbl_withdraw - withdraw name publication from network name tables */ - int tipc_nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key) { struct publication *publ; @@ -741,7 +711,6 @@ int tipc_nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key) /** * tipc_nametbl_subscribe - add a subscription object to the name table */ - void tipc_nametbl_subscribe(struct tipc_subscription *s) { u32 type = s->seq.type; @@ -765,7 +734,6 @@ void tipc_nametbl_subscribe(struct tipc_subscription *s) /** * tipc_nametbl_unsubscribe - remove a subscription object from name table */ - void tipc_nametbl_unsubscribe(struct tipc_subscription *s) { struct name_seq *seq; @@ -785,7 +753,6 @@ void tipc_nametbl_unsubscribe(struct tipc_subscription *s) /** * subseq_list: print specified sub-sequence contents into the given buffer */ - static void subseq_list(struct sub_seq *sseq, struct print_buf *buf, u32 depth, u32 index) { @@ -822,7 +789,6 @@ static void subseq_list(struct sub_seq *sseq, struct print_buf *buf, u32 depth, /** * nameseq_list: print specified name sequence contents into the given buffer */ - static void nameseq_list(struct name_seq *seq, struct print_buf *buf, u32 depth, u32 type, u32 lowbound, u32 upbound, u32 index) { @@ -853,7 +819,6 @@ static void nameseq_list(struct name_seq *seq, struct print_buf *buf, u32 depth, /** * nametbl_header - print name table header into the given buffer */ - static void nametbl_header(struct print_buf *buf, u32 depth) { const char *header[] = { @@ -875,7 +840,6 @@ static void nametbl_header(struct print_buf *buf, u32 depth) /** * nametbl_list - print specified name table contents into the given buffer */ - static void nametbl_list(struct print_buf *buf, u32 depth_info, u32 type, u32 lowbound, u32 upbound) { @@ -974,7 +938,6 @@ void tipc_nametbl_stop(void) return; /* Verify name table is empty, then release it */ - write_lock_bh(&tipc_nametbl_lock); for (i = 0; i < tipc_nametbl_size; i++) { if (!hlist_empty(&table.types[i])) @@ -984,4 +947,3 @@ void tipc_nametbl_stop(void) table.types = NULL; write_unlock_bh(&tipc_nametbl_lock); } - diff --git a/net/tipc/name_table.h b/net/tipc/name_table.h index 207d59ebf849..71cb4dc712df 100644 --- a/net/tipc/name_table.h +++ b/net/tipc/name_table.h @@ -45,10 +45,8 @@ struct tipc_port_list; /* * TIPC name types reserved for internal TIPC use (both current and planned) */ - #define TIPC_ZM_SRV 3 /* zone master service name type */ - /** * struct publication - info about a published (name or) name sequence * @type: name sequence type @@ -67,7 +65,6 @@ struct tipc_port_list; * * Note that the node list, cluster list, and zone list are circular lists. */ - struct publication { u32 type; u32 lower; diff --git a/net/tipc/node.c b/net/tipc/node.c index 76565c9916ab..d4fd341e6e0d 100644 --- a/net/tipc/node.c +++ b/net/tipc/node.c @@ -66,7 +66,6 @@ static unsigned int tipc_hashfn(u32 addr) /* * tipc_node_find - locate specified node object, if it exists */ - struct tipc_node *tipc_node_find(u32 addr) { struct tipc_node *node; @@ -91,7 +90,6 @@ struct tipc_node *tipc_node_find(u32 addr) * time. (It would be preferable to switch to holding net_lock in write mode, * but this is a non-trivial change.) */ - struct tipc_node *tipc_node_create(u32 addr) { struct tipc_node *n_ptr, *temp_node; @@ -142,13 +140,11 @@ void tipc_node_delete(struct tipc_node *n_ptr) tipc_num_nodes--; } - /** * tipc_node_link_up - handle addition of link * * Link becomes active (alone or shared) or standby, depending on its priority. */ - void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr) { struct tipc_link **active = &n_ptr->active_links[0]; @@ -181,7 +177,6 @@ void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr) /** * node_select_active_links - select active link */ - static void node_select_active_links(struct tipc_node *n_ptr) { struct tipc_link **active = &n_ptr->active_links[0]; @@ -209,7 +204,6 @@ static void node_select_active_links(struct tipc_node *n_ptr) /** * tipc_node_link_down - handle loss of link */ - void tipc_node_link_down(struct tipc_node *n_ptr, struct tipc_link *l_ptr) { struct tipc_link **active; @@ -300,7 +294,6 @@ static void node_lost_contact(struct tipc_node *n_ptr) tipc_addr_string_fill(addr_string, n_ptr->addr)); /* Flush broadcast link info associated with lost node */ - if (n_ptr->bclink.supported) { while (n_ptr->bclink.deferred_head) { struct sk_buff *buf = n_ptr->bclink.deferred_head; @@ -334,7 +327,6 @@ static void node_lost_contact(struct tipc_node *n_ptr) tipc_nodesub_notify(n_ptr); /* Prevent re-contact with node until cleanup is done */ - n_ptr->block_setup = WAIT_PEER_DOWN | WAIT_NAMES_GONE; tipc_k_signal((Handler)node_name_purge_complete, n_ptr->addr); } @@ -362,7 +354,6 @@ struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space) } /* For now, get space for all other nodes */ - payload_size = TLV_SPACE(sizeof(node_info)) * tipc_num_nodes; if (payload_size > 32768u) { read_unlock_bh(&tipc_net_lock); @@ -376,7 +367,6 @@ struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space) } /* Add TLVs for all nodes in scope */ - list_for_each_entry(n_ptr, &tipc_node_list, list) { if (!tipc_in_scope(domain, n_ptr->addr)) continue; @@ -412,7 +402,6 @@ struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space) read_lock_bh(&tipc_net_lock); /* Get space for all unicast links + broadcast link */ - payload_size = TLV_SPACE(sizeof(link_info)) * (atomic_read(&tipc_num_links) + 1); if (payload_size > 32768u) { @@ -427,14 +416,12 @@ struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space) } /* Add TLV for broadcast link */ - link_info.dest = htonl(tipc_cluster_mask(tipc_own_addr)); link_info.up = htonl(1); strlcpy(link_info.str, tipc_bclink_name, TIPC_MAX_LINK_NAME); tipc_cfg_append_tlv(buf, TIPC_TLV_LINK_INFO, &link_info, sizeof(link_info)); /* Add TLVs for any other links in scope */ - list_for_each_entry(n_ptr, &tipc_node_list, list) { u32 i; diff --git a/net/tipc/node.h b/net/tipc/node.h index 72561c971d67..cfcaf4d6e480 100644 --- a/net/tipc/node.h +++ b/net/tipc/node.h @@ -48,7 +48,6 @@ #define INVALID_NODE_SIG 0x10000 /* Flags used to block (re)establishment of contact with a neighboring node */ - #define WAIT_PEER_DOWN 0x0001 /* wait to see that peer's links are down */ #define WAIT_NAMES_GONE 0x0002 /* wait for peer's publications to be purged */ #define WAIT_NODE_DOWN 0x0004 /* wait until peer node is declared down */ @@ -79,7 +78,6 @@ * @deferred_tail: newest OOS b'cast message received from node * @defragm: list of partially reassembled b'cast message fragments from node */ - struct tipc_node { u32 addr; spinlock_t lock; diff --git a/net/tipc/node_subscr.c b/net/tipc/node_subscr.c index 327ffbbfee00..7a27344108fe 100644 --- a/net/tipc/node_subscr.c +++ b/net/tipc/node_subscr.c @@ -41,7 +41,6 @@ /** * tipc_nodesub_subscribe - create "node down" subscription for specified node */ - void tipc_nodesub_subscribe(struct tipc_node_subscr *node_sub, u32 addr, void *usr_handle, net_ev_handler handle_down) { @@ -66,7 +65,6 @@ void tipc_nodesub_subscribe(struct tipc_node_subscr *node_sub, u32 addr, /** * tipc_nodesub_unsubscribe - cancel "node down" subscription (if any) */ - void tipc_nodesub_unsubscribe(struct tipc_node_subscr *node_sub) { if (!node_sub->node) @@ -82,7 +80,6 @@ void tipc_nodesub_unsubscribe(struct tipc_node_subscr *node_sub) * * Note: node is locked by caller */ - void tipc_nodesub_notify(struct tipc_node *node) { struct tipc_node_subscr *ns; diff --git a/net/tipc/node_subscr.h b/net/tipc/node_subscr.h index 4bc2ca0867a1..c95d20727ded 100644 --- a/net/tipc/node_subscr.h +++ b/net/tipc/node_subscr.h @@ -48,7 +48,6 @@ typedef void (*net_ev_handler) (void *usr_handle); * @usr_handle: argument to pass to routine when node fails * @nodesub_list: adjacent entries in list of subscriptions for the node */ - struct tipc_node_subscr { struct tipc_node *node; net_ev_handler handle_node_down; diff --git a/net/tipc/port.c b/net/tipc/port.c index e6841706967c..2ad37a4db376 100644 --- a/net/tipc/port.c +++ b/net/tipc/port.c @@ -75,7 +75,6 @@ static u32 port_peerport(struct tipc_port *p_ptr) * Handles cases where the node's network address has changed from * the default of <0.0.0> to its configured setting. */ - int tipc_port_peer_msg(struct tipc_port *p_ptr, struct tipc_msg *msg) { u32 peernode; @@ -94,7 +93,6 @@ int tipc_port_peer_msg(struct tipc_port *p_ptr, struct tipc_msg *msg) /** * tipc_multicast - send a multicast message to local and remote destinations */ - int tipc_multicast(u32 ref, struct tipc_name_seq const *seq, u32 num_sect, struct iovec const *msg_sect, unsigned int total_len) @@ -111,7 +109,6 @@ int tipc_multicast(u32 ref, struct tipc_name_seq const *seq, return -EINVAL; /* Create multicast message */ - hdr = &oport->phdr; msg_set_type(hdr, TIPC_MCAST_MSG); msg_set_lookup_scope(hdr, TIPC_CLUSTER_SCOPE); @@ -127,12 +124,10 @@ int tipc_multicast(u32 ref, struct tipc_name_seq const *seq, return res; /* Figure out where to send multicast message */ - ext_targets = tipc_nametbl_mc_translate(seq->type, seq->lower, seq->upper, TIPC_NODE_SCOPE, &dports); /* Send message to destinations (duplicate it only if necessary) */ - if (ext_targets) { if (dports.count != 0) { ibuf = skb_copy(buf, GFP_ATOMIC); @@ -163,7 +158,6 @@ int tipc_multicast(u32 ref, struct tipc_name_seq const *seq, * * If there is no port list, perform a lookup to create one */ - void tipc_port_recv_mcast(struct sk_buff *buf, struct tipc_port_list *dp) { struct tipc_msg *msg; @@ -174,7 +168,6 @@ void tipc_port_recv_mcast(struct sk_buff *buf, struct tipc_port_list *dp) msg = buf_msg(buf); /* Create destination port list, if one wasn't supplied */ - if (dp == NULL) { tipc_nametbl_mc_translate(msg_nametype(msg), msg_namelower(msg), @@ -185,7 +178,6 @@ void tipc_port_recv_mcast(struct sk_buff *buf, struct tipc_port_list *dp) } /* Deliver a copy of message to each destination port */ - if (dp->count != 0) { msg_set_destnode(msg, tipc_own_addr); if (dp->count == 1) { @@ -218,7 +210,6 @@ exit: * * Returns pointer to (locked) TIPC port, or NULL if unable to create it */ - struct tipc_port *tipc_createport_raw(void *usr_handle, u32 (*dispatcher)(struct tipc_port *, struct sk_buff *), void (*wakeup)(struct tipc_port *), @@ -257,7 +248,6 @@ struct tipc_port *tipc_createport_raw(void *usr_handle, * to ensure a change to node's own network address doesn't result * in template containing out-dated network address information */ - spin_lock_bh(&tipc_port_list_lock); msg = &p_ptr->phdr; tipc_msg_init(msg, importance, TIPC_NAMED_MSG, NAMED_H_SIZE, 0); @@ -390,7 +380,6 @@ int tipc_reject_msg(struct sk_buff *buf, u32 err) u32 rmsg_sz; /* discard rejected message if it shouldn't be returned to sender */ - if (WARN(!msg_isdata(msg), "attempt to reject message with user=%u", msg_user(msg))) { dump_stack(); @@ -403,7 +392,6 @@ int tipc_reject_msg(struct sk_buff *buf, u32 err) * construct returned message by copying rejected message header and * data (or subset), then updating header fields that need adjusting */ - hdr_sz = msg_hdr_sz(msg); rmsg_sz = hdr_sz + min_t(u32, data_sz, MAX_REJECT_SIZE); @@ -442,7 +430,6 @@ int tipc_reject_msg(struct sk_buff *buf, u32 err) } /* send returned message & dispose of rejected message */ - src_node = msg_prevnode(msg); if (in_own_node(src_node)) tipc_port_recv_msg(rbuf); @@ -552,7 +539,6 @@ void tipc_port_recv_proto_msg(struct sk_buff *buf) int wakeable; /* Validate connection */ - p_ptr = tipc_port_lock(destport); if (!p_ptr || !p_ptr->connected || !tipc_port_peer_msg(p_ptr, msg)) { r_buf = tipc_buf_acquire(BASIC_H_SIZE); @@ -570,7 +556,6 @@ void tipc_port_recv_proto_msg(struct sk_buff *buf) } /* Process protocol message sent by peer */ - switch (msg_type(msg)) { case CONN_ACK: wakeable = tipc_port_congested(p_ptr) && p_ptr->congested && @@ -682,7 +667,6 @@ void tipc_port_reinit(void) * port_dispatcher_sigh(): Signal handler for messages destinated * to the tipc_port interface. */ - static void port_dispatcher_sigh(void *dummy) { struct sk_buff *buf; @@ -843,7 +827,6 @@ reject: * port_dispatcher(): Dispatcher for messages destinated * to the tipc_port interface. Called with port locked. */ - static u32 port_dispatcher(struct tipc_port *dummy, struct sk_buff *buf) { buf->next = NULL; @@ -860,10 +843,8 @@ static u32 port_dispatcher(struct tipc_port *dummy, struct sk_buff *buf) } /* - * Wake up port after congestion: Called with port locked, - * + * Wake up port after congestion: Called with port locked */ - static void port_wakeup_sh(unsigned long ref) { struct tipc_port *p_ptr; @@ -909,7 +890,6 @@ void tipc_acknowledge(u32 ref, u32 ack) /* * tipc_createport(): user level call. */ - int tipc_createport(void *usr_handle, unsigned int importance, tipc_msg_err_event error_cb, @@ -918,7 +898,7 @@ int tipc_createport(void *usr_handle, tipc_msg_event msg_cb, tipc_named_msg_event named_msg_cb, tipc_conn_msg_event conn_msg_cb, - tipc_continue_event continue_event_cb,/* May be zero */ + tipc_continue_event continue_event_cb, /* May be zero */ u32 *portref) { struct user_port *up_ptr; @@ -1091,7 +1071,6 @@ exit: * * Port must be locked. */ - int tipc_disconnect_port(struct tipc_port *tp_ptr) { int res; @@ -1112,7 +1091,6 @@ int tipc_disconnect_port(struct tipc_port *tp_ptr) * tipc_disconnect(): Disconnect port form peer. * This is a node local operation. */ - int tipc_disconnect(u32 ref) { struct tipc_port *p_ptr; @@ -1147,7 +1125,6 @@ int tipc_shutdown(u32 ref) /** * tipc_port_recv_msg - receive message from lower layer and deliver to port user */ - int tipc_port_recv_msg(struct sk_buff *buf) { struct tipc_port *p_ptr; @@ -1180,7 +1157,6 @@ int tipc_port_recv_msg(struct sk_buff *buf) * tipc_port_recv_sections(): Concatenate and deliver sectioned * message for this node. */ - static int tipc_port_recv_sections(struct tipc_port *sender, unsigned int num_sect, struct iovec const *msg_sect, unsigned int total_len) @@ -1198,7 +1174,6 @@ static int tipc_port_recv_sections(struct tipc_port *sender, unsigned int num_se /** * tipc_send - send message sections on connection */ - int tipc_send(u32 ref, unsigned int num_sect, struct iovec const *msg_sect, unsigned int total_len) { @@ -1237,7 +1212,6 @@ int tipc_send(u32 ref, unsigned int num_sect, struct iovec const *msg_sect, /** * tipc_send2name - send message sections to port name */ - int tipc_send2name(u32 ref, struct tipc_name const *name, unsigned int domain, unsigned int num_sect, struct iovec const *msg_sect, unsigned int total_len) @@ -1291,7 +1265,6 @@ int tipc_send2name(u32 ref, struct tipc_name const *name, unsigned int domain, /** * tipc_send2port - send message sections to port identity */ - int tipc_send2port(u32 ref, struct tipc_portid const *dest, unsigned int num_sect, struct iovec const *msg_sect, unsigned int total_len) @@ -1334,7 +1307,6 @@ int tipc_send2port(u32 ref, struct tipc_portid const *dest, /** * tipc_send_buf2port - send message buffer to port identity */ - int tipc_send_buf2port(u32 ref, struct tipc_portid const *dest, struct sk_buff *buf, unsigned int dsz) { @@ -1371,4 +1343,3 @@ int tipc_send_buf2port(u32 ref, struct tipc_portid const *dest, return dsz; return -ELINKCONG; } - diff --git a/net/tipc/port.h b/net/tipc/port.h index 301e1bd840d1..98cbec9c4532 100644 --- a/net/tipc/port.h +++ b/net/tipc/port.h @@ -81,7 +81,6 @@ typedef void (*tipc_continue_event) (void *usr_handle, u32 portref); * @ref: object reference to associated TIPC port * */ - struct user_port { void *usr_handle; u32 ref; @@ -236,7 +235,6 @@ void tipc_port_reinit(void); /** * tipc_port_lock - lock port instance referred to and return its pointer */ - static inline struct tipc_port *tipc_port_lock(u32 ref) { return (struct tipc_port *)tipc_ref_lock(ref); @@ -247,7 +245,6 @@ static inline struct tipc_port *tipc_port_lock(u32 ref) * * Can use pointer instead of tipc_ref_unlock() since port is already locked. */ - static inline void tipc_port_unlock(struct tipc_port *p_ptr) { spin_unlock_bh(p_ptr->lock); diff --git a/net/tipc/ref.c b/net/tipc/ref.c index 9e37b7812c3c..5cada0e38e03 100644 --- a/net/tipc/ref.c +++ b/net/tipc/ref.c @@ -43,7 +43,6 @@ * @lock: spinlock controlling access to object * @ref: reference value for object (combines instance & array index info) */ - struct reference { void *object; spinlock_t lock; @@ -60,7 +59,6 @@ struct reference { * @index_mask: bitmask for array index portion of reference values * @start_mask: initial value for instance value portion of reference values */ - struct ref_table { struct reference *entries; u32 capacity; @@ -96,7 +94,6 @@ static DEFINE_RWLOCK(ref_table_lock); /** * tipc_ref_table_init - create reference table for objects */ - int tipc_ref_table_init(u32 requested_size, u32 start) { struct reference *table; @@ -109,7 +106,6 @@ int tipc_ref_table_init(u32 requested_size, u32 start) /* do nothing */ ; /* allocate table & mark all entries as uninitialized */ - table = vzalloc(actual_size * sizeof(struct reference)); if (table == NULL) return -ENOMEM; @@ -128,7 +124,6 @@ int tipc_ref_table_init(u32 requested_size, u32 start) /** * tipc_ref_table_stop - destroy reference table for objects */ - void tipc_ref_table_stop(void) { if (!tipc_ref_table.entries) @@ -149,7 +144,6 @@ void tipc_ref_table_stop(void) * register a partially initialized object, without running the risk that * the object will be accessed before initialization is complete. */ - u32 tipc_ref_acquire(void *object, spinlock_t **lock) { u32 index; @@ -168,7 +162,6 @@ u32 tipc_ref_acquire(void *object, spinlock_t **lock) } /* take a free entry, if available; otherwise initialize a new entry */ - write_lock_bh(&ref_table_lock); if (tipc_ref_table.first_free) { index = tipc_ref_table.first_free; @@ -211,7 +204,6 @@ u32 tipc_ref_acquire(void *object, spinlock_t **lock) * Disallow future references to an object and free up the entry for re-use. * Note: The entry's spin_lock may still be busy after discard */ - void tipc_ref_discard(u32 ref) { struct reference *entry; @@ -242,12 +234,10 @@ void tipc_ref_discard(u32 ref) * mark entry as unused; increment instance part of entry's reference * to invalidate any subsequent references */ - entry->object = NULL; entry->ref = (ref & ~index_mask) + (index_mask + 1); /* append entry to free entry list */ - if (tipc_ref_table.first_free == 0) tipc_ref_table.first_free = index; else @@ -261,7 +251,6 @@ exit: /** * tipc_ref_lock - lock referenced object and return pointer to it */ - void *tipc_ref_lock(u32 ref) { if (likely(tipc_ref_table.entries)) { @@ -283,7 +272,6 @@ void *tipc_ref_lock(u32 ref) /** * tipc_ref_deref - return pointer referenced object (without locking it) */ - void *tipc_ref_deref(u32 ref) { if (likely(tipc_ref_table.entries)) { @@ -296,4 +284,3 @@ void *tipc_ref_deref(u32 ref) } return NULL; } - diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 3c00b40f815f..5577a447f531 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -123,7 +123,6 @@ static atomic_t tipc_queue_size = ATOMIC_INIT(0); * * Caller must hold socket lock */ - static void advance_rx_queue(struct sock *sk) { kfree_skb(__skb_dequeue(&sk->sk_receive_queue)); @@ -135,7 +134,6 @@ static void advance_rx_queue(struct sock *sk) * * Caller must hold socket lock */ - static void discard_rx_queue(struct sock *sk) { struct sk_buff *buf; @@ -151,7 +149,6 @@ static void discard_rx_queue(struct sock *sk) * * Caller must hold socket lock */ - static void reject_rx_queue(struct sock *sk) { struct sk_buff *buf; @@ -174,7 +171,6 @@ static void reject_rx_queue(struct sock *sk) * * Returns 0 on success, errno otherwise */ - static int tipc_create(struct net *net, struct socket *sock, int protocol, int kern) { @@ -184,7 +180,6 @@ static int tipc_create(struct net *net, struct socket *sock, int protocol, struct tipc_port *tp_ptr; /* Validate arguments */ - if (unlikely(protocol != 0)) return -EPROTONOSUPPORT; @@ -207,13 +202,11 @@ static int tipc_create(struct net *net, struct socket *sock, int protocol, } /* Allocate socket's protocol area */ - sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto); if (sk == NULL) return -ENOMEM; /* Allocate TIPC port for socket to use */ - tp_ptr = tipc_createport_raw(sk, &dispatch, &wakeupdispatch, TIPC_LOW_IMPORTANCE); if (unlikely(!tp_ptr)) { @@ -222,7 +215,6 @@ static int tipc_create(struct net *net, struct socket *sock, int protocol, } /* Finish initializing socket data structures */ - sock->ops = ops; sock->state = state; @@ -258,7 +250,6 @@ static int tipc_create(struct net *net, struct socket *sock, int protocol, * * Returns 0 on success, errno otherwise */ - static int release(struct socket *sock) { struct sock *sk = sock->sk; @@ -270,7 +261,6 @@ static int release(struct socket *sock) * Exit if socket isn't fully initialized (occurs when a failed accept() * releases a pre-allocated child socket that was never used) */ - if (sk == NULL) return 0; @@ -281,7 +271,6 @@ static int release(struct socket *sock) * Reject all unreceived messages, except on an active connection * (which disconnects locally & sends a 'FIN+' to peer) */ - while (sock->state != SS_DISCONNECTING) { buf = __skb_dequeue(&sk->sk_receive_queue); if (buf == NULL) @@ -303,15 +292,12 @@ static int release(struct socket *sock) * Delete TIPC port; this ensures no more messages are queued * (also disconnects an active connection & sends a 'FIN-' to peer) */ - res = tipc_deleteport(tport->ref); /* Discard any remaining (connection-based) messages in receive queue */ - discard_rx_queue(sk); /* Reject any messages that accumulated in backlog queue */ - sock->state = SS_DISCONNECTING; release_sock(sk); @@ -336,7 +322,6 @@ static int release(struct socket *sock) * NOTE: This routine doesn't need to take the socket lock since it doesn't * access any non-constant socket information. */ - static int bind(struct socket *sock, struct sockaddr *uaddr, int uaddr_len) { struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr; @@ -376,7 +361,6 @@ static int bind(struct socket *sock, struct sockaddr *uaddr, int uaddr_len) * accesses socket information that is unchanging (or which changes in * a completely predictable manner). */ - static int get_name(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len, int peer) { @@ -444,7 +428,6 @@ static int get_name(struct socket *sock, struct sockaddr *uaddr, * imply that the operation will succeed, merely that it should be performed * and will not block. */ - static unsigned int poll(struct file *file, struct socket *sock, poll_table *wait) { @@ -482,7 +465,6 @@ static unsigned int poll(struct file *file, struct socket *sock, * * Returns 0 if permission is granted, otherwise errno */ - static int dest_name_check(struct sockaddr_tipc *dest, struct msghdr *m) { struct tipc_cfg_msg_hdr hdr; @@ -518,7 +500,6 @@ static int dest_name_check(struct sockaddr_tipc *dest, struct msghdr *m) * * Returns the number of bytes sent on success, or errno otherwise */ - static int send_msg(struct kiocb *iocb, struct socket *sock, struct msghdr *m, size_t total_len) { @@ -562,7 +543,6 @@ static int send_msg(struct kiocb *iocb, struct socket *sock, } /* Abort any pending connection attempts (very unlikely) */ - reject_rx_queue(sk); } @@ -631,7 +611,6 @@ exit: * * Returns the number of bytes sent on success, or errno otherwise */ - static int send_packet(struct kiocb *iocb, struct socket *sock, struct msghdr *m, size_t total_len) { @@ -642,7 +621,6 @@ static int send_packet(struct kiocb *iocb, struct socket *sock, int res; /* Handle implied connection establishment */ - if (unlikely(dest)) return send_msg(iocb, sock, m, total_len); @@ -695,7 +673,6 @@ static int send_packet(struct kiocb *iocb, struct socket *sock, * Returns the number of bytes sent on success (or partial success), * or errno if no data sent */ - static int send_stream(struct kiocb *iocb, struct socket *sock, struct msghdr *m, size_t total_len) { @@ -715,7 +692,6 @@ static int send_stream(struct kiocb *iocb, struct socket *sock, lock_sock(sk); /* Handle special cases where there is no connection */ - if (unlikely(sock->state != SS_CONNECTED)) { if (sock->state == SS_UNCONNECTED) { res = send_packet(NULL, sock, m, total_len); @@ -747,7 +723,6 @@ static int send_stream(struct kiocb *iocb, struct socket *sock, * (i.e. one large iovec entry), but could be improved to pass sets * of small iovec entries into send_packet(). */ - curr_iov = m->msg_iov; curr_iovlen = m->msg_iovlen; my_msg.msg_iov = &my_iov; @@ -796,7 +771,6 @@ exit: * * Returns 0 on success, errno otherwise */ - static int auto_connect(struct socket *sock, struct tipc_msg *msg) { struct tipc_sock *tsock = tipc_sk(sock->sk); @@ -821,7 +795,6 @@ static int auto_connect(struct socket *sock, struct tipc_msg *msg) * * Note: Address is not captured if not requested by receiver. */ - static void set_orig_addr(struct msghdr *m, struct tipc_msg *msg) { struct sockaddr_tipc *addr = (struct sockaddr_tipc *)m->msg_name; @@ -847,7 +820,6 @@ static void set_orig_addr(struct msghdr *m, struct tipc_msg *msg) * * Returns 0 if successful, otherwise errno */ - static int anc_data_recv(struct msghdr *m, struct tipc_msg *msg, struct tipc_port *tport) { @@ -861,7 +833,6 @@ static int anc_data_recv(struct msghdr *m, struct tipc_msg *msg, return 0; /* Optionally capture errored message object(s) */ - err = msg ? msg_errcode(msg) : 0; if (unlikely(err)) { anc_data[0] = err; @@ -878,7 +849,6 @@ static int anc_data_recv(struct msghdr *m, struct tipc_msg *msg, } /* Optionally capture message destination object */ - dest_type = msg ? msg_type(msg) : TIPC_DIRECT_MSG; switch (dest_type) { case TIPC_NAMED_MSG: @@ -923,7 +893,6 @@ static int anc_data_recv(struct msghdr *m, struct tipc_msg *msg, * * Returns size of returned message data, errno otherwise */ - static int recv_msg(struct kiocb *iocb, struct socket *sock, struct msghdr *m, size_t buf_len, int flags) { @@ -937,7 +906,6 @@ static int recv_msg(struct kiocb *iocb, struct socket *sock, int res; /* Catch invalid receive requests */ - if (unlikely(!buf_len)) return -EINVAL; @@ -952,7 +920,6 @@ static int recv_msg(struct kiocb *iocb, struct socket *sock, restart: /* Look for a message in receive queue; wait if necessary */ - while (skb_queue_empty(&sk->sk_receive_queue)) { if (sock->state == SS_DISCONNECTING) { res = -ENOTCONN; @@ -970,14 +937,12 @@ restart: } /* Look at first message in receive queue */ - buf = skb_peek(&sk->sk_receive_queue); msg = buf_msg(buf); sz = msg_data_sz(msg); err = msg_errcode(msg); /* Complete connection setup for an implied connect */ - if (unlikely(sock->state == SS_CONNECTING)) { res = auto_connect(sock, msg); if (res) @@ -985,24 +950,20 @@ restart: } /* Discard an empty non-errored message & try again */ - if ((!sz) && (!err)) { advance_rx_queue(sk); goto restart; } /* Capture sender's address (optional) */ - set_orig_addr(m, msg); /* Capture ancillary data (optional) */ - res = anc_data_recv(m, msg, tport); if (res) goto exit; /* Capture message data (if valid) & compute return value (always) */ - if (!err) { if (unlikely(buf_len < sz)) { sz = buf_len; @@ -1022,7 +983,6 @@ restart: } /* Consume received message (optional) */ - if (likely(!(flags & MSG_PEEK))) { if ((sock->state != SS_READY) && (++tport->conn_unacked >= TIPC_FLOW_CONTROL_WIN)) @@ -1046,7 +1006,6 @@ exit: * * Returns size of returned message data, errno otherwise */ - static int recv_stream(struct kiocb *iocb, struct socket *sock, struct msghdr *m, size_t buf_len, int flags) { @@ -1062,7 +1021,6 @@ static int recv_stream(struct kiocb *iocb, struct socket *sock, int res = 0; /* Catch invalid receive attempts */ - if (unlikely(!buf_len)) return -EINVAL; @@ -1076,10 +1034,9 @@ static int recv_stream(struct kiocb *iocb, struct socket *sock, target = sock_rcvlowat(sk, flags & MSG_WAITALL, buf_len); timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); -restart: +restart: /* Look for a message in receive queue; wait if necessary */ - while (skb_queue_empty(&sk->sk_receive_queue)) { if (sock->state == SS_DISCONNECTING) { res = -ENOTCONN; @@ -1097,21 +1054,18 @@ restart: } /* Look at first message in receive queue */ - buf = skb_peek(&sk->sk_receive_queue); msg = buf_msg(buf); sz = msg_data_sz(msg); err = msg_errcode(msg); /* Discard an empty non-errored message & try again */ - if ((!sz) && (!err)) { advance_rx_queue(sk); goto restart; } /* Optionally capture sender's address & ancillary data of first msg */ - if (sz_copied == 0) { set_orig_addr(m, msg); res = anc_data_recv(m, msg, tport); @@ -1120,7 +1074,6 @@ restart: } /* Capture message data (if valid) & compute return value (always) */ - if (!err) { u32 offset = (u32)(unsigned long)(TIPC_SKB_CB(buf)->handle); @@ -1152,7 +1105,6 @@ restart: } /* Consume received message (optional) */ - if (likely(!(flags & MSG_PEEK))) { if (unlikely(++tport->conn_unacked >= TIPC_FLOW_CONTROL_WIN)) tipc_acknowledge(tport->ref, tport->conn_unacked); @@ -1160,7 +1112,6 @@ restart: } /* Loop around if more data is required */ - if ((sz_copied < buf_len) && /* didn't get all requested data */ (!skb_queue_empty(&sk->sk_receive_queue) || (sz_copied < target)) && /* and more is ready or required */ @@ -1181,7 +1132,6 @@ exit: * * Returns 1 if queue is unable to accept message, 0 otherwise */ - static int rx_queue_full(struct tipc_msg *msg, u32 queue_size, u32 base) { u32 threshold; @@ -1214,7 +1164,6 @@ static int rx_queue_full(struct tipc_msg *msg, u32 queue_size, u32 base) * * Returns TIPC error status code (TIPC_OK if message is not to be rejected) */ - static u32 filter_rcv(struct sock *sk, struct sk_buff *buf) { struct socket *sock = sk->sk_socket; @@ -1222,7 +1171,6 @@ static u32 filter_rcv(struct sock *sk, struct sk_buff *buf) u32 recv_q_len; /* Reject message if it is wrong sort of message for socket */ - if (msg_type(msg) > TIPC_DIRECT_MSG) return TIPC_ERR_NO_PORT; @@ -1251,7 +1199,6 @@ static u32 filter_rcv(struct sock *sk, struct sk_buff *buf) } /* Reject message if there isn't room to queue it */ - recv_q_len = (u32)atomic_read(&tipc_queue_size); if (unlikely(recv_q_len >= OVERLOAD_LIMIT_BASE)) { if (rx_queue_full(msg, recv_q_len, OVERLOAD_LIMIT_BASE)) @@ -1264,13 +1211,11 @@ static u32 filter_rcv(struct sock *sk, struct sk_buff *buf) } /* Enqueue message (finally!) */ - TIPC_SKB_CB(buf)->handle = 0; atomic_inc(&tipc_queue_size); __skb_queue_tail(&sk->sk_receive_queue, buf); /* Initiate connection termination for an incoming 'FIN' */ - if (unlikely(msg_errcode(msg) && (sock->state == SS_CONNECTED))) { sock->state = SS_DISCONNECTING; tipc_disconnect_port(tipc_sk_port(sk)); @@ -1290,7 +1235,6 @@ static u32 filter_rcv(struct sock *sk, struct sk_buff *buf) * * Returns 0 */ - static int backlog_rcv(struct sock *sk, struct sk_buff *buf) { u32 res; @@ -1310,7 +1254,6 @@ static int backlog_rcv(struct sock *sk, struct sk_buff *buf) * * Returns TIPC error status code (TIPC_OK if message is not to be rejected) */ - static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf) { struct sock *sk = (struct sock *)tport->usr_handle; @@ -1322,7 +1265,6 @@ static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf) * This code is based on sk_receive_skb(), but must be distinct from it * since a TIPC-specific filter/reject mechanism is utilized */ - bh_lock_sock(sk); if (!sock_owned_by_user(sk)) { res = filter_rcv(sk, buf); @@ -1343,7 +1285,6 @@ static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf) * * Called with port lock already taken. */ - static void wakeupdispatch(struct tipc_port *tport) { struct sock *sk = (struct sock *)tport->usr_handle; @@ -1361,7 +1302,6 @@ static void wakeupdispatch(struct tipc_port *tport) * * Returns 0 on success, errno otherwise */ - static int connect(struct socket *sock, struct sockaddr *dest, int destlen, int flags) { @@ -1376,21 +1316,18 @@ static int connect(struct socket *sock, struct sockaddr *dest, int destlen, lock_sock(sk); /* For now, TIPC does not allow use of connect() with DGRAM/RDM types */ - if (sock->state == SS_READY) { res = -EOPNOTSUPP; goto exit; } /* For now, TIPC does not support the non-blocking form of connect() */ - if (flags & O_NONBLOCK) { res = -EOPNOTSUPP; goto exit; } /* Issue Posix-compliant error code if socket is in the wrong state */ - if (sock->state == SS_LISTENING) { res = -EOPNOTSUPP; goto exit; @@ -1410,18 +1347,15 @@ static int connect(struct socket *sock, struct sockaddr *dest, int destlen, * Note: send_msg() validates the rest of the address fields, * so there's no need to do it here */ - if (dst->addrtype == TIPC_ADDR_MCAST) { res = -EINVAL; goto exit; } /* Reject any messages already in receive queue (very unlikely) */ - reject_rx_queue(sk); /* Send a 'SYN-' to destination */ - m.msg_name = dest; m.msg_namelen = destlen; res = send_msg(NULL, sock, &m, 0); @@ -1429,7 +1363,6 @@ static int connect(struct socket *sock, struct sockaddr *dest, int destlen, goto exit; /* Wait until an 'ACK' or 'RST' arrives, or a timeout occurs */ - timeout = tipc_sk(sk)->conn_timeout; release_sock(sk); res = wait_event_interruptible_timeout(*sk_sleep(sk), @@ -1474,7 +1407,6 @@ exit: * * Returns 0 on success, errno otherwise */ - static int listen(struct socket *sock, int len) { struct sock *sk = sock->sk; @@ -1501,7 +1433,6 @@ static int listen(struct socket *sock, int len) * * Returns 0 on success, errno otherwise */ - static int accept(struct socket *sock, struct socket *new_sock, int flags) { struct sock *sk = sock->sk; @@ -1544,11 +1475,9 @@ static int accept(struct socket *sock, struct socket *new_sock, int flags) * Reject any stray messages received by new socket * before the socket lock was taken (very, very unlikely) */ - reject_rx_queue(new_sk); /* Connect new socket to it's peer */ - new_tsock->peer_name.ref = msg_origport(msg); new_tsock->peer_name.node = msg_orignode(msg); tipc_connect2port(new_ref, &new_tsock->peer_name); @@ -1564,7 +1493,6 @@ static int accept(struct socket *sock, struct socket *new_sock, int flags) * Respond to 'SYN-' by discarding it & returning 'ACK'-. * Respond to 'SYN+' by queuing it on new socket. */ - if (!msg_data_sz(msg)) { struct msghdr m = {NULL,}; @@ -1590,7 +1518,6 @@ exit: * * Returns 0 on success, errno otherwise */ - static int shutdown(struct socket *sock, int how) { struct sock *sk = sock->sk; @@ -1607,8 +1534,8 @@ static int shutdown(struct socket *sock, int how) case SS_CONNECTING: case SS_CONNECTED: - /* Disconnect and send a 'FIN+' or 'FIN-' message to peer */ restart: + /* Disconnect and send a 'FIN+' or 'FIN-' message to peer */ buf = __skb_dequeue(&sk->sk_receive_queue); if (buf) { atomic_dec(&tipc_queue_size); @@ -1629,7 +1556,6 @@ restart: case SS_DISCONNECTING: /* Discard any unreceived messages; wake up sleeping tasks */ - discard_rx_queue(sk); if (waitqueue_active(sk_sleep(sk))) wake_up_interruptible(sk_sleep(sk)); @@ -1657,7 +1583,6 @@ restart: * * Returns 0 on success, errno otherwise */ - static int setsockopt(struct socket *sock, int lvl, int opt, char __user *ov, unsigned int ol) { @@ -1717,7 +1642,6 @@ static int setsockopt(struct socket *sock, * * Returns 0 on success, errno otherwise */ - static int getsockopt(struct socket *sock, int lvl, int opt, char __user *ov, int __user *ol) { @@ -1778,7 +1702,6 @@ static int getsockopt(struct socket *sock, /** * Protocol switches for the various types of TIPC sockets */ - static const struct proto_ops msg_ops = { .owner = THIS_MODULE, .family = AF_TIPC, @@ -1884,7 +1807,6 @@ int tipc_socket_init(void) /** * tipc_socket_stop - stop TIPC socket interface */ - void tipc_socket_stop(void) { if (!sockets_enabled) @@ -1894,4 +1816,3 @@ void tipc_socket_stop(void) sock_unregister(tipc_family_ops.family); proto_unregister(&tipc_proto); } - diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c index af93ea924f60..f976e9cd6a72 100644 --- a/net/tipc/subscr.c +++ b/net/tipc/subscr.c @@ -46,7 +46,6 @@ * @subscriber_list: adjacent subscribers in top. server's list of subscribers * @subscription_list: list of subscription objects for this subscriber */ - struct tipc_subscriber { u32 port_ref; spinlock_t *lock; @@ -61,7 +60,6 @@ struct tipc_subscriber { * @subscriber_list: list of ports subscribing to service * @lock: spinlock govering access to subscriber list */ - struct top_srv { u32 setup_port; atomic_t subscription_count; @@ -78,7 +76,6 @@ static struct top_srv topsrv; * * Returns converted value */ - static u32 htohl(u32 in, int swap) { return swap ? swab32(in) : in; @@ -90,7 +87,6 @@ static u32 htohl(u32 in, int swap) * Note: Must not hold subscriber's server port lock, since tipc_send() will * try to take the lock if the message is rejected and returned! */ - static void subscr_send_event(struct tipc_subscription *sub, u32 found_lower, u32 found_upper, @@ -116,7 +112,6 @@ static void subscr_send_event(struct tipc_subscription *sub, * * Returns 1 if there is overlap, otherwise 0. */ - int tipc_subscr_overlap(struct tipc_subscription *sub, u32 found_lower, u32 found_upper) @@ -136,7 +131,6 @@ int tipc_subscr_overlap(struct tipc_subscription *sub, * * Protected by nameseq.lock in name_table.c */ - void tipc_subscr_report_overlap(struct tipc_subscription *sub, u32 found_lower, u32 found_upper, @@ -156,43 +150,35 @@ void tipc_subscr_report_overlap(struct tipc_subscription *sub, /** * subscr_timeout - subscription timeout has occurred */ - static void subscr_timeout(struct tipc_subscription *sub) { struct tipc_port *server_port; /* Validate server port reference (in case subscriber is terminating) */ - server_port = tipc_port_lock(sub->server_ref); if (server_port == NULL) return; /* Validate timeout (in case subscription is being cancelled) */ - if (sub->timeout == TIPC_WAIT_FOREVER) { tipc_port_unlock(server_port); return; } /* Unlink subscription from name table */ - tipc_nametbl_unsubscribe(sub); /* Unlink subscription from subscriber */ - list_del(&sub->subscription_list); /* Release subscriber's server port */ - tipc_port_unlock(server_port); /* Notify subscriber of timeout */ - subscr_send_event(sub, sub->evt.s.seq.lower, sub->evt.s.seq.upper, TIPC_SUBSCR_TIMEOUT, 0, 0); /* Now destroy subscription */ - k_term_timer(&sub->timer); kfree(sub); atomic_dec(&topsrv.subscription_count); @@ -203,7 +189,6 @@ static void subscr_timeout(struct tipc_subscription *sub) * * Called with subscriber port locked. */ - static void subscr_del(struct tipc_subscription *sub) { tipc_nametbl_unsubscribe(sub); @@ -222,7 +207,6 @@ static void subscr_del(struct tipc_subscription *sub) * a new object reference in the interim that uses this lock; this routine will * simply wait for it to be released, then claim it.) */ - static void subscr_terminate(struct tipc_subscriber *subscriber) { u32 port_ref; @@ -230,18 +214,15 @@ static void subscr_terminate(struct tipc_subscriber *subscriber) struct tipc_subscription *sub_temp; /* Invalidate subscriber reference */ - port_ref = subscriber->port_ref; subscriber->port_ref = 0; spin_unlock_bh(subscriber->lock); /* Sever connection to subscriber */ - tipc_shutdown(port_ref); tipc_deleteport(port_ref); /* Destroy any existing subscriptions for subscriber */ - list_for_each_entry_safe(sub, sub_temp, &subscriber->subscription_list, subscription_list) { if (sub->timeout != TIPC_WAIT_FOREVER) { @@ -252,17 +233,14 @@ static void subscr_terminate(struct tipc_subscriber *subscriber) } /* Remove subscriber from topology server's subscriber list */ - spin_lock_bh(&topsrv.lock); list_del(&subscriber->subscriber_list); spin_unlock_bh(&topsrv.lock); /* Reclaim subscriber lock */ - spin_lock_bh(subscriber->lock); /* Now destroy subscriber */ - kfree(subscriber); } @@ -275,7 +253,6 @@ static void subscr_terminate(struct tipc_subscriber *subscriber) * * Note that fields of 's' use subscriber's endianness! */ - static void subscr_cancel(struct tipc_subscr *s, struct tipc_subscriber *subscriber) { @@ -284,7 +261,6 @@ static void subscr_cancel(struct tipc_subscr *s, int found = 0; /* Find first matching subscription, exit if not found */ - list_for_each_entry_safe(sub, sub_temp, &subscriber->subscription_list, subscription_list) { if (!memcmp(s, &sub->evt.s, sizeof(struct tipc_subscr))) { @@ -296,7 +272,6 @@ static void subscr_cancel(struct tipc_subscr *s, return; /* Cancel subscription timer (if used), then delete subscription */ - if (sub->timeout != TIPC_WAIT_FOREVER) { sub->timeout = TIPC_WAIT_FOREVER; spin_unlock_bh(subscriber->lock); @@ -312,7 +287,6 @@ static void subscr_cancel(struct tipc_subscr *s, * * Called with subscriber port locked. */ - static struct tipc_subscription *subscr_subscribe(struct tipc_subscr *s, struct tipc_subscriber *subscriber) { @@ -320,11 +294,9 @@ static struct tipc_subscription *subscr_subscribe(struct tipc_subscr *s, int swap; /* Determine subscriber's endianness */ - swap = !(s->filter & (TIPC_SUB_PORTS | TIPC_SUB_SERVICE)); /* Detect & process a subscription cancellation request */ - if (s->filter & htohl(TIPC_SUB_CANCEL, swap)) { s->filter &= ~htohl(TIPC_SUB_CANCEL, swap); subscr_cancel(s, subscriber); @@ -332,7 +304,6 @@ static struct tipc_subscription *subscr_subscribe(struct tipc_subscr *s, } /* Refuse subscription if global limit exceeded */ - if (atomic_read(&topsrv.subscription_count) >= tipc_max_subscriptions) { warn("Subscription rejected, subscription limit reached (%u)\n", tipc_max_subscriptions); @@ -341,7 +312,6 @@ static struct tipc_subscription *subscr_subscribe(struct tipc_subscr *s, } /* Allocate subscription object */ - sub = kmalloc(sizeof(*sub), GFP_ATOMIC); if (!sub) { warn("Subscription rejected, no memory\n"); @@ -350,7 +320,6 @@ static struct tipc_subscription *subscr_subscribe(struct tipc_subscr *s, } /* Initialize subscription object */ - sub->seq.type = htohl(s->seq.type, swap); sub->seq.lower = htohl(s->seq.lower, swap); sub->seq.upper = htohl(s->seq.upper, swap); @@ -384,7 +353,6 @@ static struct tipc_subscription *subscr_subscribe(struct tipc_subscr *s, * * Called with subscriber's server port unlocked. */ - static void subscr_conn_shutdown_event(void *usr_handle, u32 port_ref, struct sk_buff **buf, @@ -408,7 +376,6 @@ static void subscr_conn_shutdown_event(void *usr_handle, * * Called with subscriber's server port unlocked. */ - static void subscr_conn_msg_event(void *usr_handle, u32 port_ref, struct sk_buff **buf, @@ -423,7 +390,6 @@ static void subscr_conn_msg_event(void *usr_handle, * Lock subscriber's server port (& make a local copy of lock pointer, * in case subscriber is deleted while processing subscription request) */ - if (tipc_port_lock(port_ref) == NULL) return; @@ -451,7 +417,6 @@ static void subscr_conn_msg_event(void *usr_handle, * timeout code cannot delete the subscription, * so the subscription object is still protected. */ - tipc_nametbl_subscribe(sub); } } @@ -460,7 +425,6 @@ static void subscr_conn_msg_event(void *usr_handle, /** * subscr_named_msg_event - handle request to establish a new subscriber */ - static void subscr_named_msg_event(void *usr_handle, u32 port_ref, struct sk_buff **buf, @@ -474,7 +438,6 @@ static void subscr_named_msg_event(void *usr_handle, u32 server_port_ref; /* Create subscriber object */ - subscriber = kzalloc(sizeof(struct tipc_subscriber), GFP_ATOMIC); if (subscriber == NULL) { warn("Subscriber rejected, no memory\n"); @@ -484,7 +447,6 @@ static void subscr_named_msg_event(void *usr_handle, INIT_LIST_HEAD(&subscriber->subscriber_list); /* Create server port & establish connection to subscriber */ - tipc_createport(subscriber, importance, NULL, @@ -503,26 +465,21 @@ static void subscr_named_msg_event(void *usr_handle, tipc_connect2port(subscriber->port_ref, orig); /* Lock server port (& save lock address for future use) */ - subscriber->lock = tipc_port_lock(subscriber->port_ref)->lock; /* Add subscriber to topology server's subscriber list */ - spin_lock_bh(&topsrv.lock); list_add(&subscriber->subscriber_list, &topsrv.subscriber_list); spin_unlock_bh(&topsrv.lock); /* Unlock server port */ - server_port_ref = subscriber->port_ref; spin_unlock_bh(subscriber->lock); /* Send an ACK- to complete connection handshaking */ - tipc_send(server_port_ref, 0, NULL, 0); /* Handle optional subscription request */ - if (size != 0) { subscr_conn_msg_event(subscriber, server_port_ref, buf, data, size); diff --git a/net/tipc/subscr.h b/net/tipc/subscr.h index ef6529c8456f..218d2e07f0cc 100644 --- a/net/tipc/subscr.h +++ b/net/tipc/subscr.h @@ -51,7 +51,6 @@ struct tipc_subscription; * @swap: indicates if subscriber uses opposite endianness in its messages * @evt: template for events generated by subscription */ - struct tipc_subscription { struct tipc_name_seq seq; u32 timeout; @@ -80,5 +79,4 @@ int tipc_subscr_start(void); void tipc_subscr_stop(void); - #endif -- cgit v1.2.2 From d3836f21b0af5513ef55701dd3f50b8c42e44c7a Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 27 Apr 2012 00:33:38 +0000 Subject: net: allow skb->head to be a page fragment MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit skb->head is currently allocated from kmalloc(). This is convenient but has the drawback the data cannot be converted to a page fragment if needed. We have three spots were it hurts : 1) GRO aggregation When a linear skb must be appended to another skb, GRO uses the frag_list fallback, very inefficient since we keep all struct sk_buff around. So drivers enabling GRO but delivering linear skbs to network stack aren't enabling full GRO power. 2) splice(socket -> pipe). We must copy the linear part to a page fragment. This kind of defeats splice() purpose (zero copy claim) 3) TCP coalescing. Recently introduced, this permits to group several contiguous segments into a single skb. This shortens queue lengths and save kernel memory, and greatly reduce probabilities of TCP collapses. This coalescing doesnt work on linear skbs (or we would need to copy data, this would be too slow) Given all these issues, the following patch introduces the possibility of having skb->head be a fragment in itself. We use a new skb flag, skb->head_frag to carry this information. build_skb() is changed to accept a frag_size argument. Drivers willing to provide a page fragment instead of kmalloc() data will set a non zero value, set to the fragment size. Then, on situations we need to convert the skb head to a frag in itself, we can check if skb->head_frag is set and avoid the copies or various fallbacks we have. This means drivers currently using frags could be updated to avoid the current skb->head allocation and reduce their memory footprint (aka skb truesize). (thats 512 or 1024 bytes saved per skb). This also makes bpf/netfilter faster since the 'first frag' will be part of skb linear part, no need to copy data. Signed-off-by: Eric Dumazet Cc: Ilpo Järvinen Cc: Herbert Xu Cc: Maciej Żenczykowski Cc: Neal Cardwell Cc: Tom Herbert Cc: Jeff Kirsher Cc: Ben Hutchings Cc: Matt Carlson Cc: Michael Chan Signed-off-by: David S. Miller --- net/core/skbuff.c | 24 ++++++++++++++++++------ 1 file changed, 18 insertions(+), 6 deletions(-) (limited to 'net') diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 2342a7250391..effa75d0e318 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -245,6 +245,7 @@ EXPORT_SYMBOL(__alloc_skb); /** * build_skb - build a network buffer * @data: data buffer provided by caller + * @frag_size: size of fragment, or 0 if head was kmalloced * * Allocate a new &sk_buff. Caller provides space holding head and * skb_shared_info. @data must have been allocated by kmalloc() @@ -258,20 +259,21 @@ EXPORT_SYMBOL(__alloc_skb); * before giving packet to stack. * RX rings only contains data buffers, not full skbs. */ -struct sk_buff *build_skb(void *data) +struct sk_buff *build_skb(void *data, unsigned int frag_size) { struct skb_shared_info *shinfo; struct sk_buff *skb; - unsigned int size; + unsigned int size = frag_size ? : ksize(data); skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC); if (!skb) return NULL; - size = ksize(data) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + size -= SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); memset(skb, 0, offsetof(struct sk_buff, tail)); skb->truesize = SKB_TRUESIZE(size); + skb->head_frag = frag_size != 0; atomic_set(&skb->users, 1); skb->head = data; skb->data = data; @@ -376,6 +378,14 @@ static void skb_clone_fraglist(struct sk_buff *skb) skb_get(list); } +static void skb_free_head(struct sk_buff *skb) +{ + if (skb->head_frag) + put_page(virt_to_head_page(skb->head)); + else + kfree(skb->head); +} + static void skb_release_data(struct sk_buff *skb) { if (!skb->cloned || @@ -402,7 +412,7 @@ static void skb_release_data(struct sk_buff *skb) if (skb_has_frag_list(skb)) skb_drop_fraglist(skb); - kfree(skb->head); + skb_free_head(skb); } } @@ -644,6 +654,7 @@ static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb) C(tail); C(end); C(head); + C(head_frag); C(data); C(truesize); atomic_set(&n->users, 1); @@ -940,7 +951,7 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, fastpath = atomic_read(&skb_shinfo(skb)->dataref) == delta; } - if (fastpath && + if (fastpath && !skb->head_frag && size + sizeof(struct skb_shared_info) <= ksize(skb->head)) { memmove(skb->head + size, skb_shinfo(skb), offsetof(struct skb_shared_info, @@ -967,7 +978,7 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags])); if (fastpath) { - kfree(skb->head); + skb_free_head(skb); } else { /* copy this zero copy skb frags */ if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { @@ -985,6 +996,7 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, off = (data + nhead) - skb->head; skb->head = data; + skb->head_frag = 0; adjust_others: skb->data += off; #ifdef NET_SKBUFF_DATA_USES_OFFSET -- cgit v1.2.2 From d7e8883cfcf4851afe74fb380cc62b7fa9cf66ba Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Mon, 30 Apr 2012 08:10:34 +0000 Subject: net: make GRO aware of skb->head_frag MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit GRO can check if skb to be merged has its skb->head mapped to a page fragment, instead of a kmalloc() area. We 'upgrade' skb->head as a fragment in itself This avoids the frag_list fallback, and permits to build true GRO skb (one sk_buff and up to 16 fragments), using less memory. This reduces number of cache misses when user makes its copy, since a single sk_buff is fetched. This is a followup of patch "net: allow skb->head to be a page fragment" Signed-off-by: Eric Dumazet Cc: Ilpo Järvinen Cc: Herbert Xu Cc: Maciej Żenczykowski Cc: Neal Cardwell Cc: Tom Herbert Cc: Jeff Kirsher Cc: Ben Hutchings Cc: Matt Carlson Cc: Michael Chan Signed-off-by: David S. Miller --- net/core/dev.c | 5 ++++- net/core/skbuff.c | 27 ++++++++++++++++++++++++++- 2 files changed, 30 insertions(+), 2 deletions(-) (limited to 'net') diff --git a/net/core/dev.c b/net/core/dev.c index 501f3cc703dd..a2be59fe6ab8 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -3546,7 +3546,10 @@ gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb) break; case GRO_MERGED_FREE: - consume_skb(skb); + if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD) + kmem_cache_free(skbuff_head_cache, skb); + else + __kfree_skb(skb); break; case GRO_HELD: diff --git a/net/core/skbuff.c b/net/core/skbuff.c index effa75d0e318..09cc38651b2f 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -69,7 +69,7 @@ #include #include -static struct kmem_cache *skbuff_head_cache __read_mostly; +struct kmem_cache *skbuff_head_cache __read_mostly; static struct kmem_cache *skbuff_fclone_cache __read_mostly; static void sock_pipe_buf_release(struct pipe_inode_info *pipe, @@ -2901,6 +2901,31 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb) NAPI_GRO_CB(skb)->free = 1; goto done; + } else if (skb->head_frag) { + int nr_frags = pinfo->nr_frags; + skb_frag_t *frag = pinfo->frags + nr_frags; + struct page *page = virt_to_head_page(skb->head); + unsigned int first_size = headlen - offset; + unsigned int first_offset; + + if (nr_frags + 1 + skbinfo->nr_frags > MAX_SKB_FRAGS) + return -E2BIG; + + first_offset = skb->data - + (unsigned char *)page_address(page) + + offset; + + pinfo->nr_frags = nr_frags + 1 + skbinfo->nr_frags; + + frag->page.p = page; + frag->page_offset = first_offset; + skb_frag_size_set(frag, first_size); + + memcpy(frag + 1, skbinfo->frags, sizeof(*frag) * skbinfo->nr_frags); + /* We dont need to clear skbinfo->nr_frags here */ + + NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD; + goto done; } else if (skb_gro_len(p) != pinfo->gso_size) return -E2BIG; -- cgit v1.2.2 From 329033f645d93b5f9160b9b972dbc5431ad22a33 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 27 Apr 2012 00:38:33 +0000 Subject: tcp: makes tcp_try_coalesce aware of skb->head_frag MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit TCP coalesce can check if skb to be merged has its skb->head mapped to a page fragment, instead of a kmalloc() area. We had to disable coalescing in this case, for performance reasons. We 'upgrade' skb->head as a fragment in itself. This reduces number of cache misses when user makes its copies, since a less sk_buff are fetched. This makes receive and ofo queues shorter and thus reduce cache line misses in TCP stack. This is a followup of patch "net: allow skb->head to be a page fragment" Tested with tg3 nic, with GRO on or off. We can see "TCPRcvCoalesce" counter being incremented. Signed-off-by: Eric Dumazet Cc: Ilpo Järvinen Cc: Herbert Xu Cc: Maciej Żenczykowski Cc: Neal Cardwell Cc: Tom Herbert Cc: Jeff Kirsher Cc: Ben Hutchings Cc: Matt Carlson Cc: Michael Chan Signed-off-by: David S. Miller --- net/ipv4/tcp_input.c | 55 ++++++++++++++++++++++++++++++++++++++++------------ 1 file changed, 43 insertions(+), 12 deletions(-) (limited to 'net') diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index c93b0cbb7fc1..96a631deb4e6 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -4464,10 +4464,12 @@ static inline int tcp_try_rmem_schedule(struct sock *sk, unsigned int size) */ static bool tcp_try_coalesce(struct sock *sk, struct sk_buff *to, - struct sk_buff *from) + struct sk_buff *from, + bool *fragstolen) { - int len = from->len; + int delta, len = from->len; + *fragstolen = false; if (tcp_hdr(from)->fin) return false; if (len <= skb_tailroom(to)) { @@ -4478,15 +4480,19 @@ merge: TCP_SKB_CB(to)->ack_seq = TCP_SKB_CB(from)->ack_seq; return true; } + + if (skb_has_frag_list(to) || skb_has_frag_list(from)) + return false; + if (skb_headlen(from) == 0 && - !skb_has_frag_list(to) && - !skb_has_frag_list(from) && (skb_shinfo(to)->nr_frags + skb_shinfo(from)->nr_frags <= MAX_SKB_FRAGS)) { - int delta = from->truesize - ksize(from->head) - - SKB_DATA_ALIGN(sizeof(struct sk_buff)); + WARN_ON_ONCE(from->head_frag); + delta = from->truesize - ksize(from->head) - + SKB_DATA_ALIGN(sizeof(struct sk_buff)); WARN_ON_ONCE(delta < len); +copyfrags: memcpy(skb_shinfo(to)->frags + skb_shinfo(to)->nr_frags, skb_shinfo(from)->frags, skb_shinfo(from)->nr_frags * sizeof(skb_frag_t)); @@ -4499,6 +4505,20 @@ merge: to->data_len += len; goto merge; } + if (from->head_frag) { + struct page *page; + unsigned int offset; + + if (skb_shinfo(to)->nr_frags + skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS) + return false; + page = virt_to_head_page(from->head); + offset = from->data - (unsigned char *)page_address(page); + skb_fill_page_desc(to, skb_shinfo(to)->nr_frags, + page, offset, skb_headlen(from)); + *fragstolen = true; + delta = len; /* we dont know real truesize... */ + goto copyfrags; + } return false; } @@ -4540,10 +4560,15 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) end_seq = TCP_SKB_CB(skb)->end_seq; if (seq == TCP_SKB_CB(skb1)->end_seq) { - if (!tcp_try_coalesce(sk, skb1, skb)) { + bool fragstolen; + + if (!tcp_try_coalesce(sk, skb1, skb, &fragstolen)) { __skb_queue_after(&tp->out_of_order_queue, skb1, skb); } else { - __kfree_skb(skb); + if (fragstolen) + kmem_cache_free(skbuff_head_cache, skb); + else + __kfree_skb(skb); skb = NULL; } @@ -4626,6 +4651,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) const struct tcphdr *th = tcp_hdr(skb); struct tcp_sock *tp = tcp_sk(sk); int eaten = -1; + bool fragstolen = false; if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) goto drop; @@ -4672,7 +4698,9 @@ queue_and_out: goto drop; tail = skb_peek_tail(&sk->sk_receive_queue); - eaten = (tail && tcp_try_coalesce(sk, tail, skb)) ? 1 : 0; + eaten = (tail && + tcp_try_coalesce(sk, tail, skb, + &fragstolen)) ? 1 : 0; if (eaten <= 0) { skb_set_owner_r(skb, sk); __skb_queue_tail(&sk->sk_receive_queue, skb); @@ -4699,9 +4727,12 @@ queue_and_out: tcp_fast_path_check(sk); - if (eaten > 0) - __kfree_skb(skb); - else if (!sock_flag(sk, SOCK_DEAD)) + if (eaten > 0) { + if (fragstolen) + kmem_cache_free(skbuff_head_cache, skb); + else + __kfree_skb(skb); + } else if (!sock_flag(sk, SOCK_DEAD)) sk->sk_data_ready(sk, 0); return; } -- cgit v1.2.2 From 1d0c0b328a63826b7c80c27d1c4f2b04e8225273 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 27 Apr 2012 02:10:03 +0000 Subject: net: makes skb_splice_bits() aware of skb->head_frag MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit __skb_splice_bits() can check if skb to be spliced has its skb->head mapped to a page fragment, instead of a kmalloc() area. If so we can avoid a copy of the skb head and get a reference on underlying page. Signed-off-by: Eric Dumazet Cc: Ilpo Järvinen Cc: Herbert Xu Cc: Maciej Żenczykowski Cc: Neal Cardwell Cc: Tom Herbert Cc: Jeff Kirsher Cc: Ben Hutchings Cc: Matt Carlson Cc: Michael Chan Signed-off-by: David S. Miller --- net/core/skbuff.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) (limited to 'net') diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 09cc38651b2f..52ba2b5e803d 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -1699,14 +1699,18 @@ static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe, struct splice_pipe_desc *spd, struct sock *sk) { int seg; + bool head_is_linear = !skb->head_frag; - /* - * map the linear part + /* map the linear part : + * If skb->head_frag is set, this 'linear' part is backed + * by a fragment, and we can avoid a copy. */ if (__splice_segment(virt_to_page(skb->data), (unsigned long) skb->data & (PAGE_SIZE - 1), skb_headlen(skb), - offset, len, skb, spd, true, sk, pipe)) + offset, len, skb, spd, + head_is_linear, + sk, pipe)) return true; /* -- cgit v1.2.2 From 5de7aee5413cdfe6f96289a84a5ad22b1314e873 Mon Sep 17 00:00:00 2001 From: James Chapman Date: Sun, 29 Apr 2012 21:48:46 +0000 Subject: l2tp: fix locking of 64-bit counters for smp L2TP uses 64-bit counters but since these are not updated atomically, we need to make them safe for smp. This patch addresses that. Signed-off-by: James Chapman Signed-off-by: David S. Miller --- net/l2tp/l2tp_core.c | 75 ++++++++++++++++++++++++++++++++++++------------- net/l2tp/l2tp_core.h | 1 + net/l2tp/l2tp_netlink.c | 62 +++++++++++++++++++++++++++++----------- 3 files changed, 103 insertions(+), 35 deletions(-) (limited to 'net') diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index 0ca9bc39150f..f1bfae3e1ba6 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c @@ -330,8 +330,10 @@ static void l2tp_recv_queue_skb(struct l2tp_session *session, struct sk_buff *sk struct sk_buff *skbp; struct sk_buff *tmp; u32 ns = L2TP_SKB_CB(skb)->ns; + struct l2tp_stats *sstats; spin_lock_bh(&session->reorder_q.lock); + sstats = &session->stats; skb_queue_walk_safe(&session->reorder_q, skbp, tmp) { if (L2TP_SKB_CB(skbp)->ns > ns) { __skb_queue_before(&session->reorder_q, skbp, skb); @@ -339,7 +341,9 @@ static void l2tp_recv_queue_skb(struct l2tp_session *session, struct sk_buff *sk "%s: pkt %hu, inserted before %hu, reorder_q len=%d\n", session->name, ns, L2TP_SKB_CB(skbp)->ns, skb_queue_len(&session->reorder_q)); - session->stats.rx_oos_packets++; + u64_stats_update_begin(&sstats->syncp); + sstats->rx_oos_packets++; + u64_stats_update_end(&sstats->syncp); goto out; } } @@ -356,16 +360,23 @@ static void l2tp_recv_dequeue_skb(struct l2tp_session *session, struct sk_buff * { struct l2tp_tunnel *tunnel = session->tunnel; int length = L2TP_SKB_CB(skb)->length; + struct l2tp_stats *tstats, *sstats; /* We're about to requeue the skb, so return resources * to its current owner (a socket receive buffer). */ skb_orphan(skb); - tunnel->stats.rx_packets++; - tunnel->stats.rx_bytes += length; - session->stats.rx_packets++; - session->stats.rx_bytes += length; + tstats = &tunnel->stats; + u64_stats_update_begin(&tstats->syncp); + sstats = &session->stats; + u64_stats_update_begin(&sstats->syncp); + tstats->rx_packets++; + tstats->rx_bytes += length; + sstats->rx_packets++; + sstats->rx_bytes += length; + u64_stats_update_end(&tstats->syncp); + u64_stats_update_end(&sstats->syncp); if (L2TP_SKB_CB(skb)->has_seq) { /* Bump our Nr */ @@ -396,6 +407,7 @@ static void l2tp_recv_dequeue(struct l2tp_session *session) { struct sk_buff *skb; struct sk_buff *tmp; + struct l2tp_stats *sstats; /* If the pkt at the head of the queue has the nr that we * expect to send up next, dequeue it and any other @@ -403,10 +415,13 @@ static void l2tp_recv_dequeue(struct l2tp_session *session) */ start: spin_lock_bh(&session->reorder_q.lock); + sstats = &session->stats; skb_queue_walk_safe(&session->reorder_q, skb, tmp) { if (time_after(jiffies, L2TP_SKB_CB(skb)->expires)) { - session->stats.rx_seq_discards++; - session->stats.rx_errors++; + u64_stats_update_begin(&sstats->syncp); + sstats->rx_seq_discards++; + sstats->rx_errors++; + u64_stats_update_end(&sstats->syncp); PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG, "%s: oos pkt %u len %d discarded (too old), " "waiting for %u, reorder_q_len=%d\n", @@ -558,6 +573,7 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, struct l2tp_tunnel *tunnel = session->tunnel; int offset; u32 ns, nr; + struct l2tp_stats *sstats = &session->stats; /* The ref count is increased since we now hold a pointer to * the session. Take care to decrement the refcnt when exiting @@ -573,7 +589,9 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, PRINTK(tunnel->debug, L2TP_MSG_DATA, KERN_INFO, "%s: cookie mismatch (%u/%u). Discarding.\n", tunnel->name, tunnel->tunnel_id, session->session_id); - session->stats.rx_cookie_discards++; + u64_stats_update_begin(&sstats->syncp); + sstats->rx_cookie_discards++; + u64_stats_update_end(&sstats->syncp); goto discard; } ptr += session->peer_cookie_len; @@ -642,7 +660,9 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, PRINTK(session->debug, L2TP_MSG_SEQ, KERN_WARNING, "%s: recv data has no seq numbers when required. " "Discarding\n", session->name); - session->stats.rx_seq_discards++; + u64_stats_update_begin(&sstats->syncp); + sstats->rx_seq_discards++; + u64_stats_update_end(&sstats->syncp); goto discard; } @@ -661,7 +681,9 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, PRINTK(session->debug, L2TP_MSG_SEQ, KERN_WARNING, "%s: recv data has no seq numbers when required. " "Discarding\n", session->name); - session->stats.rx_seq_discards++; + u64_stats_update_begin(&sstats->syncp); + sstats->rx_seq_discards++; + u64_stats_update_end(&sstats->syncp); goto discard; } } @@ -715,7 +737,9 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, * packets */ if (L2TP_SKB_CB(skb)->ns != session->nr) { - session->stats.rx_seq_discards++; + u64_stats_update_begin(&sstats->syncp); + sstats->rx_seq_discards++; + u64_stats_update_end(&sstats->syncp); PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG, "%s: oos pkt %u len %d discarded, " "waiting for %u, reorder_q_len=%d\n", @@ -742,7 +766,9 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, return; discard: - session->stats.rx_errors++; + u64_stats_update_begin(&sstats->syncp); + sstats->rx_errors++; + u64_stats_update_end(&sstats->syncp); kfree_skb(skb); if (session->deref) @@ -768,6 +794,7 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb, int offset; u16 version; int length; + struct l2tp_stats *tstats; if (tunnel->sock && l2tp_verify_udp_checksum(tunnel->sock, skb)) goto discard_bad_csum; @@ -860,7 +887,10 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb, discard_bad_csum: LIMIT_NETDEBUG("%s: UDP: bad checksum\n", tunnel->name); UDP_INC_STATS_USER(tunnel->l2tp_net, UDP_MIB_INERRORS, 0); - tunnel->stats.rx_errors++; + tstats = &tunnel->stats; + u64_stats_update_begin(&tstats->syncp); + tstats->rx_errors++; + u64_stats_update_end(&tstats->syncp); kfree_skb(skb); return 0; @@ -986,6 +1016,7 @@ static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, struct l2tp_tunnel *tunnel = session->tunnel; unsigned int len = skb->len; int error; + struct l2tp_stats *tstats, *sstats; /* Debug */ if (session->send_seq) @@ -1022,15 +1053,21 @@ static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, error = ip_queue_xmit(skb, fl); /* Update stats */ + tstats = &tunnel->stats; + u64_stats_update_begin(&tstats->syncp); + sstats = &session->stats; + u64_stats_update_begin(&sstats->syncp); if (error >= 0) { - tunnel->stats.tx_packets++; - tunnel->stats.tx_bytes += len; - session->stats.tx_packets++; - session->stats.tx_bytes += len; + tstats->tx_packets++; + tstats->tx_bytes += len; + sstats->tx_packets++; + sstats->tx_bytes += len; } else { - tunnel->stats.tx_errors++; - session->stats.tx_errors++; + tstats->tx_errors++; + sstats->tx_errors++; } + u64_stats_update_end(&tstats->syncp); + u64_stats_update_end(&sstats->syncp); return 0; } diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h index 09e4a38b4f43..a8c943bf4140 100644 --- a/net/l2tp/l2tp_core.h +++ b/net/l2tp/l2tp_core.h @@ -45,6 +45,7 @@ struct l2tp_stats { u64 rx_oos_packets; u64 rx_errors; u64 rx_cookie_discards; + struct u64_stats_sync syncp; }; struct l2tp_tunnel; diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c index bc8c3348f835..1dbb9772fc45 100644 --- a/net/l2tp/l2tp_netlink.c +++ b/net/l2tp/l2tp_netlink.c @@ -225,6 +225,8 @@ static int l2tp_nl_tunnel_send(struct sk_buff *skb, u32 pid, u32 seq, int flags, struct nlattr *nest; struct sock *sk = NULL; struct inet_sock *inet; + struct l2tp_stats stats; + unsigned int start; hdr = genlmsg_put(skb, pid, seq, &l2tp_nl_family, flags, L2TP_CMD_TUNNEL_GET); @@ -242,16 +244,28 @@ static int l2tp_nl_tunnel_send(struct sk_buff *skb, u32 pid, u32 seq, int flags, if (nest == NULL) goto nla_put_failure; - if (nla_put_u64(skb, L2TP_ATTR_TX_PACKETS, tunnel->stats.tx_packets) || - nla_put_u64(skb, L2TP_ATTR_TX_BYTES, tunnel->stats.tx_bytes) || - nla_put_u64(skb, L2TP_ATTR_TX_ERRORS, tunnel->stats.tx_errors) || - nla_put_u64(skb, L2TP_ATTR_RX_PACKETS, tunnel->stats.rx_packets) || - nla_put_u64(skb, L2TP_ATTR_RX_BYTES, tunnel->stats.rx_bytes) || + do { + start = u64_stats_fetch_begin(&tunnel->stats.syncp); + stats.tx_packets = tunnel->stats.tx_packets; + stats.tx_bytes = tunnel->stats.tx_bytes; + stats.tx_errors = tunnel->stats.tx_errors; + stats.rx_packets = tunnel->stats.rx_packets; + stats.rx_bytes = tunnel->stats.rx_bytes; + stats.rx_errors = tunnel->stats.rx_errors; + stats.rx_seq_discards = tunnel->stats.rx_seq_discards; + stats.rx_oos_packets = tunnel->stats.rx_oos_packets; + } while (u64_stats_fetch_retry(&tunnel->stats.syncp, start)); + + if (nla_put_u64(skb, L2TP_ATTR_TX_PACKETS, stats.tx_packets) || + nla_put_u64(skb, L2TP_ATTR_TX_BYTES, stats.tx_bytes) || + nla_put_u64(skb, L2TP_ATTR_TX_ERRORS, stats.tx_errors) || + nla_put_u64(skb, L2TP_ATTR_RX_PACKETS, stats.rx_packets) || + nla_put_u64(skb, L2TP_ATTR_RX_BYTES, stats.rx_bytes) || nla_put_u64(skb, L2TP_ATTR_RX_SEQ_DISCARDS, - tunnel->stats.rx_seq_discards) || + stats.rx_seq_discards) || nla_put_u64(skb, L2TP_ATTR_RX_OOS_PACKETS, - tunnel->stats.rx_oos_packets) || - nla_put_u64(skb, L2TP_ATTR_RX_ERRORS, tunnel->stats.rx_errors)) + stats.rx_oos_packets) || + nla_put_u64(skb, L2TP_ATTR_RX_ERRORS, stats.rx_errors)) goto nla_put_failure; nla_nest_end(skb, nest); @@ -563,6 +577,8 @@ static int l2tp_nl_session_send(struct sk_buff *skb, u32 pid, u32 seq, int flags struct nlattr *nest; struct l2tp_tunnel *tunnel = session->tunnel; struct sock *sk = NULL; + struct l2tp_stats stats; + unsigned int start; sk = tunnel->sock; @@ -600,19 +616,33 @@ static int l2tp_nl_session_send(struct sk_buff *skb, u32 pid, u32 seq, int flags (session->reorder_timeout && nla_put_msecs(skb, L2TP_ATTR_RECV_TIMEOUT, session->reorder_timeout))) goto nla_put_failure; + nest = nla_nest_start(skb, L2TP_ATTR_STATS); if (nest == NULL) goto nla_put_failure; - if (nla_put_u64(skb, L2TP_ATTR_TX_PACKETS, session->stats.tx_packets) || - nla_put_u64(skb, L2TP_ATTR_TX_BYTES, session->stats.tx_bytes) || - nla_put_u64(skb, L2TP_ATTR_TX_ERRORS, session->stats.tx_errors) || - nla_put_u64(skb, L2TP_ATTR_RX_PACKETS, session->stats.rx_packets) || - nla_put_u64(skb, L2TP_ATTR_RX_BYTES, session->stats.rx_bytes) || + + do { + start = u64_stats_fetch_begin(&session->stats.syncp); + stats.tx_packets = session->stats.tx_packets; + stats.tx_bytes = session->stats.tx_bytes; + stats.tx_errors = session->stats.tx_errors; + stats.rx_packets = session->stats.rx_packets; + stats.rx_bytes = session->stats.rx_bytes; + stats.rx_errors = session->stats.rx_errors; + stats.rx_seq_discards = session->stats.rx_seq_discards; + stats.rx_oos_packets = session->stats.rx_oos_packets; + } while (u64_stats_fetch_retry(&session->stats.syncp, start)); + + if (nla_put_u64(skb, L2TP_ATTR_TX_PACKETS, stats.tx_packets) || + nla_put_u64(skb, L2TP_ATTR_TX_BYTES, stats.tx_bytes) || + nla_put_u64(skb, L2TP_ATTR_TX_ERRORS, stats.tx_errors) || + nla_put_u64(skb, L2TP_ATTR_RX_PACKETS, stats.rx_packets) || + nla_put_u64(skb, L2TP_ATTR_RX_BYTES, stats.rx_bytes) || nla_put_u64(skb, L2TP_ATTR_RX_SEQ_DISCARDS, - session->stats.rx_seq_discards) || + stats.rx_seq_discards) || nla_put_u64(skb, L2TP_ATTR_RX_OOS_PACKETS, - session->stats.rx_oos_packets) || - nla_put_u64(skb, L2TP_ATTR_RX_ERRORS, session->stats.rx_errors)) + stats.rx_oos_packets) || + nla_put_u64(skb, L2TP_ATTR_RX_ERRORS, stats.rx_errors)) goto nla_put_failure; nla_nest_end(skb, nest); -- cgit v1.2.2 From de3c7a1827fa144917270eb66956930012ddae52 Mon Sep 17 00:00:00 2001 From: James Chapman Date: Sun, 29 Apr 2012 21:48:47 +0000 Subject: l2tp: Use ip4_datagram_connect() in l2tp_ip_connect() Cleanup the l2tp_ip code to make use of an existing ipv4 support function. Signed-off-by: James Chapman Signed-off-by: David S. Miller --- net/l2tp/l2tp_ip.c | 54 +++++++----------------------------------------------- 1 file changed, 7 insertions(+), 47 deletions(-) (limited to 'net') diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c index 585d93ecee2d..516259284846 100644 --- a/net/l2tp/l2tp_ip.c +++ b/net/l2tp/l2tp_ip.c @@ -299,67 +299,27 @@ static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len { struct sockaddr_l2tpip *lsa = (struct sockaddr_l2tpip *) uaddr; struct inet_sock *inet = inet_sk(sk); - struct flowi4 *fl4; - struct rtable *rt; - __be32 saddr; - int oif, rc; + int rc; - rc = -EINVAL; if (addr_len < sizeof(*lsa)) - goto out; + return -EINVAL; - rc = -EAFNOSUPPORT; - if (lsa->l2tp_family != AF_INET) - goto out; - - lock_sock(sk); - - sk_dst_reset(sk); - - oif = sk->sk_bound_dev_if; - saddr = inet->inet_saddr; - - rc = -EINVAL; if (ipv4_is_multicast(lsa->l2tp_addr.s_addr)) - goto out; + return -EINVAL; - fl4 = &inet->cork.fl.u.ip4; - rt = ip_route_connect(fl4, lsa->l2tp_addr.s_addr, saddr, - RT_CONN_FLAGS(sk), oif, - IPPROTO_L2TP, - 0, 0, sk, true); - if (IS_ERR(rt)) { - rc = PTR_ERR(rt); - if (rc == -ENETUNREACH) - IP_INC_STATS_BH(&init_net, IPSTATS_MIB_OUTNOROUTES); - goto out; - } + rc = ip4_datagram_connect(sk, uaddr, addr_len); + if (rc < 0) + return rc; - rc = -ENETUNREACH; - if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) { - ip_rt_put(rt); - goto out; - } + lock_sock(sk); l2tp_ip_sk(sk)->peer_conn_id = lsa->l2tp_conn_id; - if (!inet->inet_saddr) - inet->inet_saddr = fl4->saddr; - if (!inet->inet_rcv_saddr) - inet->inet_rcv_saddr = fl4->saddr; - inet->inet_daddr = fl4->daddr; - sk->sk_state = TCP_ESTABLISHED; - inet->inet_id = jiffies; - - sk_dst_set(sk, &rt->dst); - write_lock_bh(&l2tp_ip_lock); hlist_del_init(&sk->sk_bind_node); sk_add_bind_node(sk, &l2tp_ip_bind_table); write_unlock_bh(&l2tp_ip_lock); - rc = 0; -out: release_sock(sk); return rc; } -- cgit v1.2.2 From c8657fd50a7553a4a9cb4b04cdc11419c9d93d9b Mon Sep 17 00:00:00 2001 From: James Chapman Date: Sun, 29 Apr 2012 21:48:48 +0000 Subject: l2tp: remove unused stats from l2tp_ip socket The l2tp_ip socket currently maintains packet/byte stats in its private socket structure. But these counters aren't exposed to userspace and so serve no purpose. The counters were also smp-unsafe. So this patch just gets rid of the stats. While here, change a couple of internal __u32 variables to u32. Signed-off-by: James Chapman Signed-off-by: David S. Miller --- net/l2tp/l2tp_ip.c | 32 ++++---------------------------- 1 file changed, 4 insertions(+), 28 deletions(-) (limited to 'net') diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c index 516259284846..adefe5225dca 100644 --- a/net/l2tp/l2tp_ip.c +++ b/net/l2tp/l2tp_ip.c @@ -32,15 +32,8 @@ struct l2tp_ip_sock { /* inet_sock has to be the first member of l2tp_ip_sock */ struct inet_sock inet; - __u32 conn_id; - __u32 peer_conn_id; - - __u64 tx_packets; - __u64 tx_bytes; - __u64 tx_errors; - __u64 rx_packets; - __u64 rx_bytes; - __u64 rx_errors; + u32 conn_id; + u32 peer_conn_id; }; static DEFINE_RWLOCK(l2tp_ip_lock); @@ -298,7 +291,6 @@ out_in_use: static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) { struct sockaddr_l2tpip *lsa = (struct sockaddr_l2tpip *) uaddr; - struct inet_sock *inet = inet_sk(sk); int rc; if (addr_len < sizeof(*lsa)) @@ -374,7 +366,6 @@ static int l2tp_ip_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m { struct sk_buff *skb; int rc; - struct l2tp_ip_sock *lsa = l2tp_ip_sk(sk); struct inet_sock *inet = inet_sk(sk); struct rtable *rt = NULL; struct flowi4 *fl4; @@ -473,14 +464,8 @@ static int l2tp_ip_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m rcu_read_unlock(); error: - /* Update stats */ - if (rc >= 0) { - lsa->tx_packets++; - lsa->tx_bytes += len; + if (rc >= 0) rc = len; - } else { - lsa->tx_errors++; - } out: release_sock(sk); @@ -498,7 +483,6 @@ static int l2tp_ip_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m size_t len, int noblock, int flags, int *addr_len) { struct inet_sock *inet = inet_sk(sk); - struct l2tp_ip_sock *lsk = l2tp_ip_sk(sk); size_t copied = 0; int err = -EOPNOTSUPP; struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name; @@ -540,15 +524,7 @@ static int l2tp_ip_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m done: skb_free_datagram(sk, skb); out: - if (err) { - lsk->rx_errors++; - return err; - } - - lsk->rx_packets++; - lsk->rx_bytes += copied; - - return copied; + return err ? err : copied; } static struct proto l2tp_ip_prot = { -- cgit v1.2.2 From b79585f537ba9631c9b6aafb54a8040c8d515fee Mon Sep 17 00:00:00 2001 From: James Chapman Date: Sun, 29 Apr 2012 21:48:50 +0000 Subject: l2tp: pppol2tp_connect() handles ipv6 sockaddr variants Userspace uses connect() to associate a pppol2tp socket with a tunnel socket. This needs to allow the caller to supply the new IPv6 sockaddr_pppol2tp structures if IPv6 is used. Signed-off-by: James Chapman Signed-off-by: David S. Miller --- net/l2tp/l2tp_ppp.c | 36 ++++++++++++++++++++++++++++-------- 1 file changed, 28 insertions(+), 8 deletions(-) (limited to 'net') diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c index 27b9dec9d254..9f2c421aa307 100644 --- a/net/l2tp/l2tp_ppp.c +++ b/net/l2tp/l2tp_ppp.c @@ -628,7 +628,6 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr, { struct sock *sk = sock->sk; struct sockaddr_pppol2tp *sp = (struct sockaddr_pppol2tp *) uservaddr; - struct sockaddr_pppol2tpv3 *sp3 = (struct sockaddr_pppol2tpv3 *) uservaddr; struct pppox_sock *po = pppox_sk(sk); struct l2tp_session *session = NULL; struct l2tp_tunnel *tunnel; @@ -657,7 +656,13 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr, if (sk->sk_user_data) goto end; /* socket is already attached */ - /* Get params from socket address. Handle L2TPv2 and L2TPv3 */ + /* Get params from socket address. Handle L2TPv2 and L2TPv3. + * This is nasty because there are different sockaddr_pppol2tp + * structs for L2TPv2, L2TPv3, over IPv4 and IPv6. We use + * the sockaddr size to determine which structure the caller + * is using. + */ + peer_tunnel_id = 0; if (sockaddr_len == sizeof(struct sockaddr_pppol2tp)) { fd = sp->pppol2tp.fd; tunnel_id = sp->pppol2tp.s_tunnel; @@ -665,12 +670,31 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr, session_id = sp->pppol2tp.s_session; peer_session_id = sp->pppol2tp.d_session; } else if (sockaddr_len == sizeof(struct sockaddr_pppol2tpv3)) { + struct sockaddr_pppol2tpv3 *sp3 = + (struct sockaddr_pppol2tpv3 *) sp; ver = 3; fd = sp3->pppol2tp.fd; tunnel_id = sp3->pppol2tp.s_tunnel; peer_tunnel_id = sp3->pppol2tp.d_tunnel; session_id = sp3->pppol2tp.s_session; peer_session_id = sp3->pppol2tp.d_session; + } else if (sockaddr_len == sizeof(struct sockaddr_pppol2tpin6)) { + struct sockaddr_pppol2tpin6 *sp6 = + (struct sockaddr_pppol2tpin6 *) sp; + fd = sp6->pppol2tp.fd; + tunnel_id = sp6->pppol2tp.s_tunnel; + peer_tunnel_id = sp6->pppol2tp.d_tunnel; + session_id = sp6->pppol2tp.s_session; + peer_session_id = sp6->pppol2tp.d_session; + } else if (sockaddr_len == sizeof(struct sockaddr_pppol2tpv3in6)) { + struct sockaddr_pppol2tpv3in6 *sp6 = + (struct sockaddr_pppol2tpv3in6 *) sp; + ver = 3; + fd = sp6->pppol2tp.fd; + tunnel_id = sp6->pppol2tp.s_tunnel; + peer_tunnel_id = sp6->pppol2tp.d_tunnel; + session_id = sp6->pppol2tp.s_session; + peer_session_id = sp6->pppol2tp.d_session; } else { error = -EINVAL; goto end; /* bad socket address */ @@ -711,12 +735,8 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr, if (tunnel->recv_payload_hook == NULL) tunnel->recv_payload_hook = pppol2tp_recv_payload_hook; - if (tunnel->peer_tunnel_id == 0) { - if (ver == 2) - tunnel->peer_tunnel_id = sp->pppol2tp.d_tunnel; - else - tunnel->peer_tunnel_id = sp3->pppol2tp.d_tunnel; - } + if (tunnel->peer_tunnel_id == 0) + tunnel->peer_tunnel_id = peer_tunnel_id; /* Create session if it doesn't already exist. We handle the * case where a session was previously created by the netlink -- cgit v1.2.2 From 2121c3f571f08bd723f449f2477f1582709f5a2a Mon Sep 17 00:00:00 2001 From: Chris Elston Date: Sun, 29 Apr 2012 21:48:51 +0000 Subject: l2tp: show IPv6 addresses in l2tp debugfs file If an L2TP tunnel uses IPv6, make sure the l2tp debugfs file shows the IPv6 address correctly. Signed-off-by: Chris Elston Signed-off-by: James Chapman Signed-off-by: David S. Miller --- net/l2tp/l2tp_debugfs.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'net') diff --git a/net/l2tp/l2tp_debugfs.c b/net/l2tp/l2tp_debugfs.c index 76130134bfa6..c0d57bad8b79 100644 --- a/net/l2tp/l2tp_debugfs.c +++ b/net/l2tp/l2tp_debugfs.c @@ -122,6 +122,14 @@ static void l2tp_dfs_seq_tunnel_show(struct seq_file *m, void *v) seq_printf(m, "\nTUNNEL %u peer %u", tunnel->tunnel_id, tunnel->peer_tunnel_id); if (tunnel->sock) { struct inet_sock *inet = inet_sk(tunnel->sock); + +#if IS_ENABLED(CONFIG_IPV6) + if (tunnel->sock->sk_family == AF_INET6) { + struct ipv6_pinfo *np = inet6_sk(tunnel->sock); + seq_printf(m, " from %pI6c to %pI6c\n", + &np->saddr, &np->daddr); + } else +#endif seq_printf(m, " from %pI4 to %pI4\n", &inet->inet_saddr, &inet->inet_daddr); if (tunnel->encap == L2TP_ENCAPTYPE_UDP) -- cgit v1.2.2 From f9bac8df908d7c0a36960265c92f3445623b19d1 Mon Sep 17 00:00:00 2001 From: Chris Elston Date: Sun, 29 Apr 2012 21:48:52 +0000 Subject: l2tp: netlink api for l2tpv3 ipv6 unmanaged tunnels This patch adds support for unmanaged L2TPv3 tunnels over IPv6 using the netlink API. We already support unmanaged L2TPv3 tunnels over IPv4. A patch to iproute2 to make use of this feature will be submitted separately. Signed-off-by: Chris Elston Signed-off-by: James Chapman Signed-off-by: David S. Miller --- net/l2tp/l2tp_core.c | 78 ++++++++++++++++++++++++++++++++++++++----------- net/l2tp/l2tp_core.h | 4 +++ net/l2tp/l2tp_netlink.c | 48 +++++++++++++++++++++++++++--- 3 files changed, 109 insertions(+), 21 deletions(-) (limited to 'net') diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index f1bfae3e1ba6..55fc569c8170 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c @@ -1366,31 +1366,68 @@ static int l2tp_tunnel_sock_create(u32 tunnel_id, u32 peer_tunnel_id, struct l2t { int err = -EINVAL; struct sockaddr_in udp_addr; +#if IS_ENABLED(CONFIG_IPV6) + struct sockaddr_in6 udp6_addr; +#endif struct sockaddr_l2tpip ip_addr; struct socket *sock = NULL; switch (cfg->encap) { case L2TP_ENCAPTYPE_UDP: - err = sock_create(AF_INET, SOCK_DGRAM, 0, sockp); - if (err < 0) - goto out; +#if IS_ENABLED(CONFIG_IPV6) + if (cfg->local_ip6 && cfg->peer_ip6) { + err = sock_create(AF_INET6, SOCK_DGRAM, 0, sockp); + if (err < 0) + goto out; - sock = *sockp; + sock = *sockp; - memset(&udp_addr, 0, sizeof(udp_addr)); - udp_addr.sin_family = AF_INET; - udp_addr.sin_addr = cfg->local_ip; - udp_addr.sin_port = htons(cfg->local_udp_port); - err = kernel_bind(sock, (struct sockaddr *) &udp_addr, sizeof(udp_addr)); - if (err < 0) - goto out; + memset(&udp6_addr, 0, sizeof(udp6_addr)); + udp6_addr.sin6_family = AF_INET6; + memcpy(&udp6_addr.sin6_addr, cfg->local_ip6, + sizeof(udp6_addr.sin6_addr)); + udp6_addr.sin6_port = htons(cfg->local_udp_port); + err = kernel_bind(sock, (struct sockaddr *) &udp6_addr, + sizeof(udp6_addr)); + if (err < 0) + goto out; - udp_addr.sin_family = AF_INET; - udp_addr.sin_addr = cfg->peer_ip; - udp_addr.sin_port = htons(cfg->peer_udp_port); - err = kernel_connect(sock, (struct sockaddr *) &udp_addr, sizeof(udp_addr), 0); - if (err < 0) - goto out; + udp6_addr.sin6_family = AF_INET6; + memcpy(&udp6_addr.sin6_addr, cfg->peer_ip6, + sizeof(udp6_addr.sin6_addr)); + udp6_addr.sin6_port = htons(cfg->peer_udp_port); + err = kernel_connect(sock, + (struct sockaddr *) &udp6_addr, + sizeof(udp6_addr), 0); + if (err < 0) + goto out; + } else +#endif + { + err = sock_create(AF_INET, SOCK_DGRAM, 0, sockp); + if (err < 0) + goto out; + + sock = *sockp; + + memset(&udp_addr, 0, sizeof(udp_addr)); + udp_addr.sin_family = AF_INET; + udp_addr.sin_addr = cfg->local_ip; + udp_addr.sin_port = htons(cfg->local_udp_port); + err = kernel_bind(sock, (struct sockaddr *) &udp_addr, + sizeof(udp_addr)); + if (err < 0) + goto out; + + udp_addr.sin_family = AF_INET; + udp_addr.sin_addr = cfg->peer_ip; + udp_addr.sin_port = htons(cfg->peer_udp_port); + err = kernel_connect(sock, + (struct sockaddr *) &udp_addr, + sizeof(udp_addr), 0); + if (err < 0) + goto out; + } if (!cfg->use_udp_checksums) sock->sk->sk_no_check = UDP_CSUM_NOXMIT; @@ -1398,6 +1435,13 @@ static int l2tp_tunnel_sock_create(u32 tunnel_id, u32 peer_tunnel_id, struct l2t break; case L2TP_ENCAPTYPE_IP: +#if IS_ENABLED(CONFIG_IPV6) + if (cfg->local_ip6 && cfg->peer_ip6) { + /* IP encap over IPv6 not yet supported */ + err = -EPROTONOSUPPORT; + goto out; + } +#endif err = sock_create(AF_INET, SOCK_DGRAM, IPPROTO_L2TP, sockp); if (err < 0) goto out; diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h index a8c943bf4140..0bf60fc88bb7 100644 --- a/net/l2tp/l2tp_core.h +++ b/net/l2tp/l2tp_core.h @@ -151,6 +151,10 @@ struct l2tp_tunnel_cfg { /* Used only for kernel-created sockets */ struct in_addr local_ip; struct in_addr peer_ip; +#if IS_ENABLED(CONFIG_IPV6) + struct in6_addr *local_ip6; + struct in6_addr *peer_ip6; +#endif u16 local_udp_port; u16 peer_udp_port; unsigned int use_udp_checksums:1; diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c index 1dbb9772fc45..24edad0fd9ba 100644 --- a/net/l2tp/l2tp_netlink.c +++ b/net/l2tp/l2tp_netlink.c @@ -133,10 +133,25 @@ static int l2tp_nl_cmd_tunnel_create(struct sk_buff *skb, struct genl_info *info if (info->attrs[L2TP_ATTR_FD]) { fd = nla_get_u32(info->attrs[L2TP_ATTR_FD]); } else { - if (info->attrs[L2TP_ATTR_IP_SADDR]) - cfg.local_ip.s_addr = nla_get_be32(info->attrs[L2TP_ATTR_IP_SADDR]); - if (info->attrs[L2TP_ATTR_IP_DADDR]) - cfg.peer_ip.s_addr = nla_get_be32(info->attrs[L2TP_ATTR_IP_DADDR]); +#if IS_ENABLED(CONFIG_IPV6) + if (info->attrs[L2TP_ATTR_IP6_SADDR] && + info->attrs[L2TP_ATTR_IP6_DADDR]) { + cfg.local_ip6 = nla_data( + info->attrs[L2TP_ATTR_IP6_SADDR]); + cfg.peer_ip6 = nla_data( + info->attrs[L2TP_ATTR_IP6_DADDR]); + } else +#endif + if (info->attrs[L2TP_ATTR_IP_SADDR] && + info->attrs[L2TP_ATTR_IP_DADDR]) { + cfg.local_ip.s_addr = nla_get_be32( + info->attrs[L2TP_ATTR_IP_SADDR]); + cfg.peer_ip.s_addr = nla_get_be32( + info->attrs[L2TP_ATTR_IP_DADDR]); + } else { + ret = -EINVAL; + goto out; + } if (info->attrs[L2TP_ATTR_UDP_SPORT]) cfg.local_udp_port = nla_get_u16(info->attrs[L2TP_ATTR_UDP_SPORT]); if (info->attrs[L2TP_ATTR_UDP_DPORT]) @@ -225,6 +240,9 @@ static int l2tp_nl_tunnel_send(struct sk_buff *skb, u32 pid, u32 seq, int flags, struct nlattr *nest; struct sock *sk = NULL; struct inet_sock *inet; +#if IS_ENABLED(CONFIG_IPV6) + struct ipv6_pinfo *np = NULL; +#endif struct l2tp_stats stats; unsigned int start; @@ -273,6 +291,11 @@ static int l2tp_nl_tunnel_send(struct sk_buff *skb, u32 pid, u32 seq, int flags, if (!sk) goto out; +#if IS_ENABLED(CONFIG_IPV6) + if (sk->sk_family == AF_INET6) + np = inet6_sk(sk); +#endif + inet = inet_sk(sk); switch (tunnel->encap) { @@ -284,6 +307,15 @@ static int l2tp_nl_tunnel_send(struct sk_buff *skb, u32 pid, u32 seq, int flags, goto nla_put_failure; /* NOBREAK */ case L2TP_ENCAPTYPE_IP: +#if IS_ENABLED(CONFIG_IPV6) + if (np) { + if (nla_put(skb, L2TP_ATTR_IP6_SADDR, sizeof(np->saddr), + &np->saddr) || + nla_put(skb, L2TP_ATTR_IP6_DADDR, sizeof(np->daddr), + &np->daddr)) + goto nla_put_failure; + } else +#endif if (nla_put_be32(skb, L2TP_ATTR_IP_SADDR, inet->inet_saddr) || nla_put_be32(skb, L2TP_ATTR_IP_DADDR, inet->inet_daddr)) goto nla_put_failure; @@ -752,6 +784,14 @@ static struct nla_policy l2tp_nl_policy[L2TP_ATTR_MAX + 1] = { [L2TP_ATTR_MTU] = { .type = NLA_U16, }, [L2TP_ATTR_MRU] = { .type = NLA_U16, }, [L2TP_ATTR_STATS] = { .type = NLA_NESTED, }, + [L2TP_ATTR_IP6_SADDR] = { + .type = NLA_BINARY, + .len = sizeof(struct in6_addr), + }, + [L2TP_ATTR_IP6_DADDR] = { + .type = NLA_BINARY, + .len = sizeof(struct in6_addr), + }, [L2TP_ATTR_IFNAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1, -- cgit v1.2.2 From a495f8364efe11f4813914b09cf0d026364d6969 Mon Sep 17 00:00:00 2001 From: Chris Elston Date: Sun, 29 Apr 2012 21:48:53 +0000 Subject: ipv6: Export ipv6 functions for use by other protocols For implementing other protocols on top of IPv6, such as L2TPv3's IP encapsulation over ipv6, we'd like to call some IPv6 functions which are not currently exported. This patch exports them. Signed-off-by: Chris Elston Signed-off-by: James Chapman Signed-off-by: David S. Miller --- net/ipv6/datagram.c | 4 ++++ net/ipv6/exthdrs.c | 1 + net/ipv6/ip6_flowlabel.c | 1 + net/ipv6/ip6_output.c | 3 +++ 4 files changed, 9 insertions(+) (limited to 'net') diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index 7fba35aea06c..b8b61ac88bc2 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include @@ -202,6 +203,7 @@ out: fl6_sock_release(flowlabel); return err; } +EXPORT_SYMBOL_GPL(ip6_datagram_connect); void ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port, u32 info, u8 *payload) @@ -414,6 +416,7 @@ out_free_skb: out: return err; } +EXPORT_SYMBOL_GPL(ipv6_recv_error); /* * Handle IPV6_RECVPATHMTU @@ -868,3 +871,4 @@ int datagram_send_ctl(struct net *net, struct sock *sk, exit_f: return err; } +EXPORT_SYMBOL_GPL(datagram_send_ctl); diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c index aa0a51e64682..a93bd231eca1 100644 --- a/net/ipv6/exthdrs.c +++ b/net/ipv6/exthdrs.c @@ -883,6 +883,7 @@ struct ipv6_txoptions *ipv6_fixup_options(struct ipv6_txoptions *opt_space, return opt; } +EXPORT_SYMBOL_GPL(ipv6_fixup_options); /** * fl6_update_dst - update flowi destination address with info given diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c index 1dd632971bce..cb43df690210 100644 --- a/net/ipv6/ip6_flowlabel.c +++ b/net/ipv6/ip6_flowlabel.c @@ -294,6 +294,7 @@ struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions * opt_space, opt_space->opt_flen = fopt->opt_flen; return opt_space; } +EXPORT_SYMBOL_GPL(fl6_merge_options); static unsigned long check_linger(unsigned long ttl) { diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index b347062aa809..d8e05af2c4bb 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -1535,6 +1535,7 @@ error: IP6_INC_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS); return err; } +EXPORT_SYMBOL_GPL(ip6_append_data); static void ip6_cork_release(struct inet_sock *inet, struct ipv6_pinfo *np) { @@ -1638,6 +1639,7 @@ error: IP6_INC_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS); goto out; } +EXPORT_SYMBOL_GPL(ip6_push_pending_frames); void ip6_flush_pending_frames(struct sock *sk) { @@ -1652,3 +1654,4 @@ void ip6_flush_pending_frames(struct sock *sk) ip6_cork_release(inet_sk(sk), inet6_sk(sk)); } +EXPORT_SYMBOL_GPL(ip6_flush_pending_frames); -- cgit v1.2.2 From a32e0eec7042b21ccb52896cf715e3e2641fed93 Mon Sep 17 00:00:00 2001 From: Chris Elston Date: Sun, 29 Apr 2012 21:48:54 +0000 Subject: l2tp: introduce L2TPv3 IP encapsulation support for IPv6 L2TPv3 defines an IP encapsulation packet format where data is carried directly over IP (no UDP). The kernel already has support for L2TP IP encapsulation over IPv4 (l2tp_ip). This patch introduces support for L2TP IP encapsulation over IPv6. The implementation is derived from ipv6/raw and ipv4/l2tp_ip. Signed-off-by: Chris Elston Signed-off-by: James Chapman Signed-off-by: David S. Miller --- net/l2tp/Makefile | 3 + net/l2tp/l2tp_ip6.c | 792 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 795 insertions(+) create mode 100644 net/l2tp/l2tp_ip6.c (limited to 'net') diff --git a/net/l2tp/Makefile b/net/l2tp/Makefile index 110e7bc2de5e..2870f41ea44d 100644 --- a/net/l2tp/Makefile +++ b/net/l2tp/Makefile @@ -10,3 +10,6 @@ obj-$(subst y,$(CONFIG_L2TP),$(CONFIG_L2TP_IP)) += l2tp_ip.o obj-$(subst y,$(CONFIG_L2TP),$(CONFIG_L2TP_V3)) += l2tp_netlink.o obj-$(subst y,$(CONFIG_L2TP),$(CONFIG_L2TP_ETH)) += l2tp_eth.o obj-$(subst y,$(CONFIG_L2TP),$(CONFIG_L2TP_DEBUGFS)) += l2tp_debugfs.o +ifneq ($(CONFIG_IPV6),) +obj-$(subst y,$(CONFIG_L2TP),$(CONFIG_L2TP_IP)) += l2tp_ip6.o +endif diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c new file mode 100644 index 000000000000..88f0abe35443 --- /dev/null +++ b/net/l2tp/l2tp_ip6.c @@ -0,0 +1,792 @@ +/* + * L2TPv3 IP encapsulation support for IPv6 + * + * Copyright (c) 2012 Katalix Systems Ltd + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "l2tp_core.h" + +struct l2tp_ip6_sock { + /* inet_sock has to be the first member of l2tp_ip6_sock */ + struct inet_sock inet; + + u32 conn_id; + u32 peer_conn_id; + + /* ipv6_pinfo has to be the last member of l2tp_ip6_sock, see + inet6_sk_generic */ + struct ipv6_pinfo inet6; +}; + +static DEFINE_RWLOCK(l2tp_ip6_lock); +static struct hlist_head l2tp_ip6_table; +static struct hlist_head l2tp_ip6_bind_table; + +static inline struct l2tp_ip6_sock *l2tp_ip6_sk(const struct sock *sk) +{ + return (struct l2tp_ip6_sock *)sk; +} + +static struct sock *__l2tp_ip6_bind_lookup(struct net *net, + struct in6_addr *laddr, + int dif, u32 tunnel_id) +{ + struct hlist_node *node; + struct sock *sk; + + sk_for_each_bound(sk, node, &l2tp_ip6_bind_table) { + struct in6_addr *addr = inet6_rcv_saddr(sk); + struct l2tp_ip6_sock *l2tp = l2tp_ip6_sk(sk); + + if (l2tp == NULL) + continue; + + if ((l2tp->conn_id == tunnel_id) && + net_eq(sock_net(sk), net) && + !(addr && ipv6_addr_equal(addr, laddr)) && + !(sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif)) + goto found; + } + + sk = NULL; +found: + return sk; +} + +static inline struct sock *l2tp_ip6_bind_lookup(struct net *net, + struct in6_addr *laddr, + int dif, u32 tunnel_id) +{ + struct sock *sk = __l2tp_ip6_bind_lookup(net, laddr, dif, tunnel_id); + if (sk) + sock_hold(sk); + + return sk; +} + +/* When processing receive frames, there are two cases to + * consider. Data frames consist of a non-zero session-id and an + * optional cookie. Control frames consist of a regular L2TP header + * preceded by 32-bits of zeros. + * + * L2TPv3 Session Header Over IP + * + * 0 1 2 3 + * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Session ID | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Cookie (optional, maximum 64 bits)... + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * + * L2TPv3 Control Message Header Over IP + * + * 0 1 2 3 + * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | (32 bits of zeros) | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * |T|L|x|x|S|x|x|x|x|x|x|x| Ver | Length | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Control Connection ID | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Ns | Nr | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * + * All control frames are passed to userspace. + */ +static int l2tp_ip6_recv(struct sk_buff *skb) +{ + struct sock *sk; + u32 session_id; + u32 tunnel_id; + unsigned char *ptr, *optr; + struct l2tp_session *session; + struct l2tp_tunnel *tunnel = NULL; + int length; + int offset; + + /* Point to L2TP header */ + optr = ptr = skb->data; + + if (!pskb_may_pull(skb, 4)) + goto discard; + + session_id = ntohl(*((__be32 *) ptr)); + ptr += 4; + + /* RFC3931: L2TP/IP packets have the first 4 bytes containing + * the session_id. If it is 0, the packet is a L2TP control + * frame and the session_id value can be discarded. + */ + if (session_id == 0) { + __skb_pull(skb, 4); + goto pass_up; + } + + /* Ok, this is a data packet. Lookup the session. */ + session = l2tp_session_find(&init_net, NULL, session_id); + if (session == NULL) + goto discard; + + tunnel = session->tunnel; + if (tunnel == NULL) + goto discard; + + /* Trace packet contents, if enabled */ + if (tunnel->debug & L2TP_MSG_DATA) { + length = min(32u, skb->len); + if (!pskb_may_pull(skb, length)) + goto discard; + + printk(KERN_DEBUG "%s: ip recv: ", tunnel->name); + + offset = 0; + do { + printk(" %02X", ptr[offset]); + } while (++offset < length); + + printk("\n"); + } + + l2tp_recv_common(session, skb, ptr, optr, 0, skb->len, + tunnel->recv_payload_hook); + return 0; + +pass_up: + /* Get the tunnel_id from the L2TP header */ + if (!pskb_may_pull(skb, 12)) + goto discard; + + if ((skb->data[0] & 0xc0) != 0xc0) + goto discard; + + tunnel_id = ntohl(*(__be32 *) &skb->data[4]); + tunnel = l2tp_tunnel_find(&init_net, tunnel_id); + if (tunnel != NULL) + sk = tunnel->sock; + else { + struct ipv6hdr *iph = ipv6_hdr(skb); + + read_lock_bh(&l2tp_ip6_lock); + sk = __l2tp_ip6_bind_lookup(&init_net, &iph->daddr, + 0, tunnel_id); + read_unlock_bh(&l2tp_ip6_lock); + } + + if (sk == NULL) + goto discard; + + sock_hold(sk); + + if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) + goto discard_put; + + nf_reset(skb); + + return sk_receive_skb(sk, skb, 1); + +discard_put: + sock_put(sk); + +discard: + kfree_skb(skb); + return 0; +} + +static int l2tp_ip6_open(struct sock *sk) +{ + /* Prevent autobind. We don't have ports. */ + inet_sk(sk)->inet_num = IPPROTO_L2TP; + + write_lock_bh(&l2tp_ip6_lock); + sk_add_node(sk, &l2tp_ip6_table); + write_unlock_bh(&l2tp_ip6_lock); + + return 0; +} + +static void l2tp_ip6_close(struct sock *sk, long timeout) +{ + write_lock_bh(&l2tp_ip6_lock); + hlist_del_init(&sk->sk_bind_node); + sk_del_node_init(sk); + write_unlock_bh(&l2tp_ip6_lock); + + sk_common_release(sk); +} + +static void l2tp_ip6_destroy_sock(struct sock *sk) +{ + lock_sock(sk); + ip6_flush_pending_frames(sk); + release_sock(sk); + + inet6_destroy_sock(sk); +} + +static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) +{ + struct inet_sock *inet = inet_sk(sk); + struct ipv6_pinfo *np = inet6_sk(sk); + struct sockaddr_l2tpip6 *addr = (struct sockaddr_l2tpip6 *) uaddr; + __be32 v4addr = 0; + int addr_type; + int err; + + if (addr_len < sizeof(*addr)) + return -EINVAL; + + addr_type = ipv6_addr_type(&addr->l2tp_addr); + + /* l2tp_ip6 sockets are IPv6 only */ + if (addr_type == IPV6_ADDR_MAPPED) + return -EADDRNOTAVAIL; + + /* L2TP is point-point, not multicast */ + if (addr_type & IPV6_ADDR_MULTICAST) + return -EADDRNOTAVAIL; + + err = -EADDRINUSE; + read_lock_bh(&l2tp_ip6_lock); + if (__l2tp_ip6_bind_lookup(&init_net, &addr->l2tp_addr, + sk->sk_bound_dev_if, addr->l2tp_conn_id)) + goto out_in_use; + read_unlock_bh(&l2tp_ip6_lock); + + lock_sock(sk); + + err = -EINVAL; + if (sk->sk_state != TCP_CLOSE) + goto out_unlock; + + /* Check if the address belongs to the host. */ + rcu_read_lock(); + if (addr_type != IPV6_ADDR_ANY) { + struct net_device *dev = NULL; + + if (addr_type & IPV6_ADDR_LINKLOCAL) { + if (addr_len >= sizeof(struct sockaddr_in6) && + addr->l2tp_scope_id) { + /* Override any existing binding, if another + * one is supplied by user. + */ + sk->sk_bound_dev_if = addr->l2tp_scope_id; + } + + /* Binding to link-local address requires an + interface */ + if (!sk->sk_bound_dev_if) + goto out_unlock_rcu; + + err = -ENODEV; + dev = dev_get_by_index_rcu(sock_net(sk), + sk->sk_bound_dev_if); + if (!dev) + goto out_unlock_rcu; + } + + /* ipv4 addr of the socket is invalid. Only the + * unspecified and mapped address have a v4 equivalent. + */ + v4addr = LOOPBACK4_IPV6; + err = -EADDRNOTAVAIL; + if (!ipv6_chk_addr(sock_net(sk), &addr->l2tp_addr, dev, 0)) + goto out_unlock_rcu; + } + rcu_read_unlock(); + + inet->inet_rcv_saddr = inet->inet_saddr = v4addr; + np->rcv_saddr = addr->l2tp_addr; + np->saddr = addr->l2tp_addr; + + l2tp_ip6_sk(sk)->conn_id = addr->l2tp_conn_id; + + write_lock_bh(&l2tp_ip6_lock); + sk_add_bind_node(sk, &l2tp_ip6_bind_table); + sk_del_node_init(sk); + write_unlock_bh(&l2tp_ip6_lock); + + release_sock(sk); + return 0; + +out_unlock_rcu: + rcu_read_unlock(); +out_unlock: + release_sock(sk); + return err; + +out_in_use: + read_unlock_bh(&l2tp_ip6_lock); + return err; +} + +static int l2tp_ip6_connect(struct sock *sk, struct sockaddr *uaddr, + int addr_len) +{ + struct sockaddr_l2tpip6 *lsa = (struct sockaddr_l2tpip6 *) uaddr; + struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr; + struct in6_addr *daddr; + int addr_type; + int rc; + + if (addr_len < sizeof(*lsa)) + return -EINVAL; + + addr_type = ipv6_addr_type(&usin->sin6_addr); + if (addr_type & IPV6_ADDR_MULTICAST) + return -EINVAL; + + if (addr_type & IPV6_ADDR_MAPPED) { + daddr = &usin->sin6_addr; + if (ipv4_is_multicast(daddr->s6_addr32[3])) + return -EINVAL; + } + + rc = ip6_datagram_connect(sk, uaddr, addr_len); + + lock_sock(sk); + + l2tp_ip6_sk(sk)->peer_conn_id = lsa->l2tp_conn_id; + + write_lock_bh(&l2tp_ip6_lock); + hlist_del_init(&sk->sk_bind_node); + sk_add_bind_node(sk, &l2tp_ip6_bind_table); + write_unlock_bh(&l2tp_ip6_lock); + + release_sock(sk); + + return rc; +} + +static int l2tp_ip6_getname(struct socket *sock, struct sockaddr *uaddr, + int *uaddr_len, int peer) +{ + struct sockaddr_l2tpip6 *lsa = (struct sockaddr_l2tpip6 *)uaddr; + struct sock *sk = sock->sk; + struct ipv6_pinfo *np = inet6_sk(sk); + struct l2tp_ip6_sock *lsk = l2tp_ip6_sk(sk); + + lsa->l2tp_family = AF_INET6; + lsa->l2tp_flowinfo = 0; + lsa->l2tp_scope_id = 0; + if (peer) { + if (!lsk->peer_conn_id) + return -ENOTCONN; + lsa->l2tp_conn_id = lsk->peer_conn_id; + lsa->l2tp_addr = np->daddr; + if (np->sndflow) + lsa->l2tp_flowinfo = np->flow_label; + } else { + if (ipv6_addr_any(&np->rcv_saddr)) + lsa->l2tp_addr = np->saddr; + else + lsa->l2tp_addr = np->rcv_saddr; + + lsa->l2tp_conn_id = lsk->conn_id; + } + if (ipv6_addr_type(&lsa->l2tp_addr) & IPV6_ADDR_LINKLOCAL) + lsa->l2tp_scope_id = sk->sk_bound_dev_if; + *uaddr_len = sizeof(*lsa); + return 0; +} + +static int l2tp_ip6_backlog_recv(struct sock *sk, struct sk_buff *skb) +{ + int rc; + + /* Charge it to the socket, dropping if the queue is full. */ + rc = sock_queue_rcv_skb(sk, skb); + if (rc < 0) + goto drop; + + return 0; + +drop: + IP_INC_STATS(&init_net, IPSTATS_MIB_INDISCARDS); + kfree_skb(skb); + return -1; +} + +static int l2tp_ip6_push_pending_frames(struct sock *sk) +{ + struct sk_buff *skb; + __be32 *transhdr = NULL; + int err = 0; + + skb = skb_peek(&sk->sk_write_queue); + if (skb == NULL) + goto out; + + transhdr = (__be32 *)skb_transport_header(skb); + *transhdr = 0; + + err = ip6_push_pending_frames(sk); + +out: + return err; +} + +/* Userspace will call sendmsg() on the tunnel socket to send L2TP + * control frames. + */ +static int l2tp_ip6_sendmsg(struct kiocb *iocb, struct sock *sk, + struct msghdr *msg, size_t len) +{ + struct ipv6_txoptions opt_space; + struct sockaddr_l2tpip6 *lsa = + (struct sockaddr_l2tpip6 *) msg->msg_name; + struct in6_addr *daddr, *final_p, final; + struct ipv6_pinfo *np = inet6_sk(sk); + struct ipv6_txoptions *opt = NULL; + struct ip6_flowlabel *flowlabel = NULL; + struct dst_entry *dst = NULL; + struct flowi6 fl6; + int addr_len = msg->msg_namelen; + int hlimit = -1; + int tclass = -1; + int dontfrag = -1; + int transhdrlen = 4; /* zero session-id */ + int ulen = len + transhdrlen; + int err; + + /* Rough check on arithmetic overflow, + better check is made in ip6_append_data(). + */ + if (len > INT_MAX) + return -EMSGSIZE; + + /* Mirror BSD error message compatibility */ + if (msg->msg_flags & MSG_OOB) + return -EOPNOTSUPP; + + /* + * Get and verify the address. + */ + memset(&fl6, 0, sizeof(fl6)); + + fl6.flowi6_mark = sk->sk_mark; + + if (lsa) { + if (addr_len < SIN6_LEN_RFC2133) + return -EINVAL; + + if (lsa->l2tp_family && lsa->l2tp_family != AF_INET6) + return -EAFNOSUPPORT; + + daddr = &lsa->l2tp_addr; + if (np->sndflow) { + fl6.flowlabel = lsa->l2tp_flowinfo & IPV6_FLOWINFO_MASK; + if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) { + flowlabel = fl6_sock_lookup(sk, fl6.flowlabel); + if (flowlabel == NULL) + return -EINVAL; + daddr = &flowlabel->dst; + } + } + + /* + * Otherwise it will be difficult to maintain + * sk->sk_dst_cache. + */ + if (sk->sk_state == TCP_ESTABLISHED && + ipv6_addr_equal(daddr, &np->daddr)) + daddr = &np->daddr; + + if (addr_len >= sizeof(struct sockaddr_in6) && + lsa->l2tp_scope_id && + ipv6_addr_type(daddr) & IPV6_ADDR_LINKLOCAL) + fl6.flowi6_oif = lsa->l2tp_scope_id; + } else { + if (sk->sk_state != TCP_ESTABLISHED) + return -EDESTADDRREQ; + + daddr = &np->daddr; + fl6.flowlabel = np->flow_label; + } + + if (fl6.flowi6_oif == 0) + fl6.flowi6_oif = sk->sk_bound_dev_if; + + if (msg->msg_controllen) { + opt = &opt_space; + memset(opt, 0, sizeof(struct ipv6_txoptions)); + opt->tot_len = sizeof(struct ipv6_txoptions); + + err = datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt, + &hlimit, &tclass, &dontfrag); + if (err < 0) { + fl6_sock_release(flowlabel); + return err; + } + if ((fl6.flowlabel & IPV6_FLOWLABEL_MASK) && !flowlabel) { + flowlabel = fl6_sock_lookup(sk, fl6.flowlabel); + if (flowlabel == NULL) + return -EINVAL; + } + if (!(opt->opt_nflen|opt->opt_flen)) + opt = NULL; + } + + if (opt == NULL) + opt = np->opt; + if (flowlabel) + opt = fl6_merge_options(&opt_space, flowlabel, opt); + opt = ipv6_fixup_options(&opt_space, opt); + + fl6.flowi6_proto = sk->sk_protocol; + if (!ipv6_addr_any(daddr)) + fl6.daddr = *daddr; + else + fl6.daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */ + if (ipv6_addr_any(&fl6.saddr) && !ipv6_addr_any(&np->saddr)) + fl6.saddr = np->saddr; + + final_p = fl6_update_dst(&fl6, opt, &final); + + if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr)) + fl6.flowi6_oif = np->mcast_oif; + else if (!fl6.flowi6_oif) + fl6.flowi6_oif = np->ucast_oif; + + security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); + + dst = ip6_dst_lookup_flow(sk, &fl6, final_p, true); + if (IS_ERR(dst)) { + err = PTR_ERR(dst); + goto out; + } + + if (hlimit < 0) { + if (ipv6_addr_is_multicast(&fl6.daddr)) + hlimit = np->mcast_hops; + else + hlimit = np->hop_limit; + if (hlimit < 0) + hlimit = ip6_dst_hoplimit(dst); + } + + if (tclass < 0) + tclass = np->tclass; + + if (dontfrag < 0) + dontfrag = np->dontfrag; + + if (msg->msg_flags & MSG_CONFIRM) + goto do_confirm; + +back_from_confirm: + lock_sock(sk); + err = ip6_append_data(sk, ip_generic_getfrag, msg->msg_iov, + ulen, transhdrlen, hlimit, tclass, opt, + &fl6, (struct rt6_info *)dst, + msg->msg_flags, dontfrag); + if (err) + ip6_flush_pending_frames(sk); + else if (!(msg->msg_flags & MSG_MORE)) + err = l2tp_ip6_push_pending_frames(sk); + release_sock(sk); +done: + dst_release(dst); +out: + fl6_sock_release(flowlabel); + + return err < 0 ? err : len; + +do_confirm: + dst_confirm(dst); + if (!(msg->msg_flags & MSG_PROBE) || len) + goto back_from_confirm; + err = 0; + goto done; +} + +static int l2tp_ip6_recvmsg(struct kiocb *iocb, struct sock *sk, + struct msghdr *msg, size_t len, int noblock, + int flags, int *addr_len) +{ + struct inet_sock *inet = inet_sk(sk); + struct sockaddr_l2tpip6 *lsa = (struct sockaddr_l2tpip6 *)msg->msg_name; + size_t copied = 0; + int err = -EOPNOTSUPP; + struct sk_buff *skb; + + if (flags & MSG_OOB) + goto out; + + if (addr_len) + *addr_len = sizeof(*lsa); + + if (flags & MSG_ERRQUEUE) + return ipv6_recv_error(sk, msg, len); + + skb = skb_recv_datagram(sk, flags, noblock, &err); + if (!skb) + goto out; + + copied = skb->len; + if (len < copied) { + msg->msg_flags |= MSG_TRUNC; + copied = len; + } + + err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); + if (err) + goto done; + + sock_recv_timestamp(msg, sk, skb); + + /* Copy the address. */ + if (lsa) { + lsa->l2tp_family = AF_INET6; + lsa->l2tp_unused = 0; + lsa->l2tp_addr = ipv6_hdr(skb)->saddr; + lsa->l2tp_flowinfo = 0; + lsa->l2tp_scope_id = 0; + if (ipv6_addr_type(&lsa->l2tp_addr) & IPV6_ADDR_LINKLOCAL) + lsa->l2tp_scope_id = IP6CB(skb)->iif; + } + + if (inet->cmsg_flags) + ip_cmsg_recv(msg, skb); + + if (flags & MSG_TRUNC) + copied = skb->len; +done: + skb_free_datagram(sk, skb); +out: + return err ? err : copied; +} + +static struct proto l2tp_ip6_prot = { + .name = "L2TP/IPv6", + .owner = THIS_MODULE, + .init = l2tp_ip6_open, + .close = l2tp_ip6_close, + .bind = l2tp_ip6_bind, + .connect = l2tp_ip6_connect, + .disconnect = udp_disconnect, + .ioctl = udp_ioctl, + .destroy = l2tp_ip6_destroy_sock, + .setsockopt = ipv6_setsockopt, + .getsockopt = ipv6_getsockopt, + .sendmsg = l2tp_ip6_sendmsg, + .recvmsg = l2tp_ip6_recvmsg, + .backlog_rcv = l2tp_ip6_backlog_recv, + .hash = inet_hash, + .unhash = inet_unhash, + .obj_size = sizeof(struct l2tp_ip6_sock), +#ifdef CONFIG_COMPAT + .compat_setsockopt = compat_ipv6_setsockopt, + .compat_getsockopt = compat_ipv6_getsockopt, +#endif +}; + +static const struct proto_ops l2tp_ip6_ops = { + .family = PF_INET6, + .owner = THIS_MODULE, + .release = inet6_release, + .bind = inet6_bind, + .connect = inet_dgram_connect, + .socketpair = sock_no_socketpair, + .accept = sock_no_accept, + .getname = l2tp_ip6_getname, + .poll = datagram_poll, + .ioctl = inet6_ioctl, + .listen = sock_no_listen, + .shutdown = inet_shutdown, + .setsockopt = sock_common_setsockopt, + .getsockopt = sock_common_getsockopt, + .sendmsg = inet_sendmsg, + .recvmsg = sock_common_recvmsg, + .mmap = sock_no_mmap, + .sendpage = sock_no_sendpage, +#ifdef CONFIG_COMPAT + .compat_setsockopt = compat_sock_common_setsockopt, + .compat_getsockopt = compat_sock_common_getsockopt, +#endif +}; + +static struct inet_protosw l2tp_ip6_protosw = { + .type = SOCK_DGRAM, + .protocol = IPPROTO_L2TP, + .prot = &l2tp_ip6_prot, + .ops = &l2tp_ip6_ops, + .no_check = 0, +}; + +static struct inet6_protocol l2tp_ip6_protocol __read_mostly = { + .handler = l2tp_ip6_recv, +}; + +static int __init l2tp_ip6_init(void) +{ + int err; + + printk(KERN_INFO "L2TP IP encapsulation support for IPv6 (L2TPv3)\n"); + + err = proto_register(&l2tp_ip6_prot, 1); + if (err != 0) + goto out; + + err = inet6_add_protocol(&l2tp_ip6_protocol, IPPROTO_L2TP); + if (err) + goto out1; + + inet6_register_protosw(&l2tp_ip6_protosw); + return 0; + +out1: + proto_unregister(&l2tp_ip6_prot); +out: + return err; +} + +static void __exit l2tp_ip6_exit(void) +{ + inet6_unregister_protosw(&l2tp_ip6_protosw); + inet6_del_protocol(&l2tp_ip6_protocol, IPPROTO_L2TP); + proto_unregister(&l2tp_ip6_prot); +} + +module_init(l2tp_ip6_init); +module_exit(l2tp_ip6_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Chris Elston "); +MODULE_DESCRIPTION("L2TP IP encapsulation for IPv6"); +MODULE_VERSION("1.0"); + +/* Use the value of SOCK_DGRAM (2) directory, because __stringify doesn't like + * enums + */ +MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET6, 2, IPPROTO_L2TP); -- cgit v1.2.2 From 5dac94e109263e75ab7fe4e66ef88e9b49f500bf Mon Sep 17 00:00:00 2001 From: James Chapman Date: Sun, 29 Apr 2012 21:48:55 +0000 Subject: l2tp: let iproute2 create L2TPv3 IP tunnels using IPv6 The netlink API lets users create unmanaged L2TPv3 tunnels using iproute2. Until now, a request to create an unmanaged L2TPv3 IP encapsulation tunnel over IPv6 would be rejected with EPROTONOSUPPORT. Now that l2tp_ip6 implements sockets for L2TP IP encapsulation over IPv6, we can add support for that tunnel type. Signed-off-by: James Chapman Signed-off-by: David S. Miller --- net/l2tp/l2tp_core.c | 72 ++++++++++++++++++++++++++++++++++++---------------- 1 file changed, 50 insertions(+), 22 deletions(-) (limited to 'net') diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index 55fc569c8170..456b52d8f6d8 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c @@ -1368,6 +1368,7 @@ static int l2tp_tunnel_sock_create(u32 tunnel_id, u32 peer_tunnel_id, struct l2t struct sockaddr_in udp_addr; #if IS_ENABLED(CONFIG_IPV6) struct sockaddr_in6 udp6_addr; + struct sockaddr_l2tpip6 ip6_addr; #endif struct sockaddr_l2tpip ip_addr; struct socket *sock = NULL; @@ -1437,32 +1438,59 @@ static int l2tp_tunnel_sock_create(u32 tunnel_id, u32 peer_tunnel_id, struct l2t case L2TP_ENCAPTYPE_IP: #if IS_ENABLED(CONFIG_IPV6) if (cfg->local_ip6 && cfg->peer_ip6) { - /* IP encap over IPv6 not yet supported */ - err = -EPROTONOSUPPORT; - goto out; - } -#endif - err = sock_create(AF_INET, SOCK_DGRAM, IPPROTO_L2TP, sockp); - if (err < 0) - goto out; + err = sock_create(AF_INET6, SOCK_DGRAM, IPPROTO_L2TP, + sockp); + if (err < 0) + goto out; - sock = *sockp; + sock = *sockp; - memset(&ip_addr, 0, sizeof(ip_addr)); - ip_addr.l2tp_family = AF_INET; - ip_addr.l2tp_addr = cfg->local_ip; - ip_addr.l2tp_conn_id = tunnel_id; - err = kernel_bind(sock, (struct sockaddr *) &ip_addr, sizeof(ip_addr)); - if (err < 0) - goto out; + memset(&ip6_addr, 0, sizeof(ip6_addr)); + ip6_addr.l2tp_family = AF_INET6; + memcpy(&ip6_addr.l2tp_addr, cfg->local_ip6, + sizeof(ip6_addr.l2tp_addr)); + ip6_addr.l2tp_conn_id = tunnel_id; + err = kernel_bind(sock, (struct sockaddr *) &ip6_addr, + sizeof(ip6_addr)); + if (err < 0) + goto out; - ip_addr.l2tp_family = AF_INET; - ip_addr.l2tp_addr = cfg->peer_ip; - ip_addr.l2tp_conn_id = peer_tunnel_id; - err = kernel_connect(sock, (struct sockaddr *) &ip_addr, sizeof(ip_addr), 0); - if (err < 0) - goto out; + ip6_addr.l2tp_family = AF_INET6; + memcpy(&ip6_addr.l2tp_addr, cfg->peer_ip6, + sizeof(ip6_addr.l2tp_addr)); + ip6_addr.l2tp_conn_id = peer_tunnel_id; + err = kernel_connect(sock, + (struct sockaddr *) &ip6_addr, + sizeof(ip6_addr), 0); + if (err < 0) + goto out; + } else +#endif + { + err = sock_create(AF_INET, SOCK_DGRAM, IPPROTO_L2TP, + sockp); + if (err < 0) + goto out; + sock = *sockp; + + memset(&ip_addr, 0, sizeof(ip_addr)); + ip_addr.l2tp_family = AF_INET; + ip_addr.l2tp_addr = cfg->local_ip; + ip_addr.l2tp_conn_id = tunnel_id; + err = kernel_bind(sock, (struct sockaddr *) &ip_addr, + sizeof(ip_addr)); + if (err < 0) + goto out; + + ip_addr.l2tp_family = AF_INET; + ip_addr.l2tp_addr = cfg->peer_ip; + ip_addr.l2tp_conn_id = peer_tunnel_id; + err = kernel_connect(sock, (struct sockaddr *) &ip_addr, + sizeof(ip_addr), 0); + if (err < 0) + goto out; + } break; default: -- cgit v1.2.2 From e4cbb02a1070ebf0185f67a8887cc05f8a183d71 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Mon, 30 Apr 2012 16:07:09 +0000 Subject: net: add a prefetch in socket backlog processing TCP or UDP stacks have big enough latencies that prefetching next pointer is worth it. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/core/sock.c | 1 + 1 file changed, 1 insertion(+) (limited to 'net') diff --git a/net/core/sock.c b/net/core/sock.c index 836bca6485f5..1a8835117fd6 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -1700,6 +1700,7 @@ static void __release_sock(struct sock *sk) do { struct sk_buff *next = skb->next; + prefetch(next); WARN_ON_ONCE(skb_dst_is_noref(skb)); skb->next = NULL; sk_backlog_rcv(sk, skb); -- cgit v1.2.2 From e4ae004b84b315dd4b762e474f97403eac70f76a Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Mon, 30 Apr 2012 23:11:05 +0000 Subject: netem: add ECN capability Add ECN (Explicit Congestion Notification) marking capability to netem tc qdisc add dev eth0 root netem drop 0.5 ecn Instead of dropping packets, try to ECN mark them. Signed-off-by: Eric Dumazet Cc: Neal Cardwell Cc: Tom Herbert Cc: Hagen Paul Pfeifer Cc: Stephen Hemminger Acked-by: Hagen Paul Pfeifer Signed-off-by: David S. Miller --- net/sched/sch_netem.c | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) (limited to 'net') diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index 110973145a4b..231cd11aa6e2 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c @@ -26,6 +26,7 @@ #include #include +#include #define VERSION "1.3" @@ -78,6 +79,7 @@ struct netem_sched_data { psched_tdiff_t jitter; u32 loss; + u32 ecn; u32 limit; u32 counter; u32 gap; @@ -374,9 +376,12 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) ++count; /* Drop packet? */ - if (loss_event(q)) - --count; - + if (loss_event(q)) { + if (q->ecn && INET_ECN_set_ce(skb)) + sch->qstats.drops++; /* mark packet */ + else + --count; + } if (count == 0) { sch->qstats.drops++; kfree_skb(skb); @@ -706,6 +711,7 @@ static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = { [TCA_NETEM_CORRUPT] = { .len = sizeof(struct tc_netem_corrupt) }, [TCA_NETEM_RATE] = { .len = sizeof(struct tc_netem_rate) }, [TCA_NETEM_LOSS] = { .type = NLA_NESTED }, + [TCA_NETEM_ECN] = { .type = NLA_U32 }, }; static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla, @@ -776,6 +782,9 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt) if (tb[TCA_NETEM_RATE]) get_rate(sch, tb[TCA_NETEM_RATE]); + if (tb[TCA_NETEM_ECN]) + q->ecn = nla_get_u32(tb[TCA_NETEM_ECN]); + q->loss_model = CLG_RANDOM; if (tb[TCA_NETEM_LOSS]) ret = get_loss_clg(sch, tb[TCA_NETEM_LOSS]); @@ -902,6 +911,9 @@ static int netem_dump(struct Qdisc *sch, struct sk_buff *skb) if (nla_put(skb, TCA_NETEM_RATE, sizeof(rate), &rate)) goto nla_put_failure; + if (q->ecn && nla_put_u32(skb, TCA_NETEM_ECN, q->ecn)) + goto nla_put_failure; + if (dump_loss_model(q, skb) != 0) goto nla_put_failure; -- cgit v1.2.2 From 1fbc340514fc3003514bd681b372e1f47ae6183f Mon Sep 17 00:00:00 2001 From: Yuchung Cheng Date: Wed, 2 May 2012 13:30:02 +0000 Subject: tcp: early retransmit: tcp_enter_recovery() This a prepartion patch that refactors the code to enter recovery into a new function tcp_enter_recovery(). It's needed to implement the delayed fast retransmit in ER. Signed-off-by: Yuchung Cheng Acked-by: Neal Cardwell Signed-off-by: David S. Miller --- net/ipv4/tcp_input.c | 61 +++++++++++++++++++++++++++++----------------------- 1 file changed, 34 insertions(+), 27 deletions(-) (limited to 'net') diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 96a631deb4e6..be8e09d2c6b1 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -3022,6 +3022,38 @@ static void tcp_update_cwnd_in_recovery(struct sock *sk, int newly_acked_sacked, tp->snd_cwnd = tcp_packets_in_flight(tp) + sndcnt; } +static void tcp_enter_recovery(struct sock *sk, bool ece_ack) +{ + struct tcp_sock *tp = tcp_sk(sk); + int mib_idx; + + if (tcp_is_reno(tp)) + mib_idx = LINUX_MIB_TCPRENORECOVERY; + else + mib_idx = LINUX_MIB_TCPSACKRECOVERY; + + NET_INC_STATS_BH(sock_net(sk), mib_idx); + + tp->high_seq = tp->snd_nxt; + tp->prior_ssthresh = 0; + tp->undo_marker = tp->snd_una; + tp->undo_retrans = tp->retrans_out; + + if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) { + if (!ece_ack) + tp->prior_ssthresh = tcp_current_ssthresh(sk); + tp->snd_ssthresh = inet_csk(sk)->icsk_ca_ops->ssthresh(sk); + TCP_ECN_queue_cwr(tp); + } + + tp->bytes_acked = 0; + tp->snd_cwnd_cnt = 0; + tp->prior_cwnd = tp->snd_cwnd; + tp->prr_delivered = 0; + tp->prr_out = 0; + tcp_set_ca_state(sk, TCP_CA_Recovery); +} + /* Process an event, which can update packets-in-flight not trivially. * Main goal of this function is to calculate new estimate for left_out, * taking into account both packets sitting in receiver's buffer and @@ -3041,7 +3073,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, struct tcp_sock *tp = tcp_sk(sk); int do_lost = is_dupack || ((flag & FLAG_DATA_SACKED) && (tcp_fackets_out(tp) > tp->reordering)); - int fast_rexmit = 0, mib_idx; + int fast_rexmit = 0; if (WARN_ON(!tp->packets_out && tp->sacked_out)) tp->sacked_out = 0; @@ -3142,32 +3174,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, } /* Otherwise enter Recovery state */ - - if (tcp_is_reno(tp)) - mib_idx = LINUX_MIB_TCPRENORECOVERY; - else - mib_idx = LINUX_MIB_TCPSACKRECOVERY; - - NET_INC_STATS_BH(sock_net(sk), mib_idx); - - tp->high_seq = tp->snd_nxt; - tp->prior_ssthresh = 0; - tp->undo_marker = tp->snd_una; - tp->undo_retrans = tp->retrans_out; - - if (icsk->icsk_ca_state < TCP_CA_CWR) { - if (!(flag & FLAG_ECE)) - tp->prior_ssthresh = tcp_current_ssthresh(sk); - tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk); - TCP_ECN_queue_cwr(tp); - } - - tp->bytes_acked = 0; - tp->snd_cwnd_cnt = 0; - tp->prior_cwnd = tp->snd_cwnd; - tp->prr_delivered = 0; - tp->prr_out = 0; - tcp_set_ca_state(sk, TCP_CA_Recovery); + tcp_enter_recovery(sk, (flag & FLAG_ECE)); fast_rexmit = 1; } -- cgit v1.2.2 From eed530b6c67624db3f2cf477bac7c4d005d8f7ba Mon Sep 17 00:00:00 2001 From: Yuchung Cheng Date: Wed, 2 May 2012 13:30:03 +0000 Subject: tcp: early retransmit MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch implements RFC 5827 early retransmit (ER) for TCP. It reduces DUPACK threshold (dupthresh) if outstanding packets are less than 4 to recover losses by fast recovery instead of timeout. While the algorithm is simple, small but frequent network reordering makes this feature dangerous: the connection repeatedly enter false recovery and degrade performance. Therefore we implement a mitigation suggested in the appendix of the RFC that delays entering fast recovery by a small interval, i.e., RTT/4. Currently ER is conservative and is disabled for the rest of the connection after the first reordering event. A large scale web server experiment on the performance impact of ER is summarized in section 6 of the paper "Proportional Rate Reduction for TCP”, IMC 2011. http://conferences.sigcomm.org/imc/2011/docs/p155.pdf Note that Linux has a similar feature called THIN_DUPACK. The differences are THIN_DUPACK do not mitigate reorderings and is only used after slow start. Currently ER is disabled if THIN_DUPACK is enabled. I would be happy to merge THIN_DUPACK feature with ER if people think it's a good idea. ER is enabled by sysctl_tcp_early_retrans: 0: Disables ER 1: Reduce dupthresh to packets_out - 1 when outstanding packets < 4. 2: (Default) reduce dupthresh like mode 1. In addition, delay entering fast recovery by RTT/4. Note: mode 2 is implemented in the third part of this patch series. Signed-off-by: Yuchung Cheng Acked-by: Neal Cardwell Signed-off-by: David S. Miller --- net/ipv4/sysctl_net_ipv4.c | 10 ++++++++++ net/ipv4/tcp.c | 3 +++ net/ipv4/tcp_input.c | 15 +++++++++++++++ net/ipv4/tcp_minisocks.c | 1 + 4 files changed, 29 insertions(+) (limited to 'net') diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index 33417f84e07f..ef32956ed655 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c @@ -27,6 +27,7 @@ #include static int zero; +static int two = 2; static int tcp_retr1_max = 255; static int ip_local_port_range_min[] = { 1, 1 }; static int ip_local_port_range_max[] = { 65535, 65535 }; @@ -676,6 +677,15 @@ static struct ctl_table ipv4_table[] = { .mode = 0644, .proc_handler = proc_dointvec }, + { + .procname = "tcp_early_retrans", + .data = &sysctl_tcp_early_retrans, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = &zero, + .extra2 = &two, + }, { .procname = "udp_mem", .data = &sysctl_udp_mem, diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 9670af341931..6802c89bc44d 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -395,6 +395,7 @@ void tcp_init_sock(struct sock *sk) tp->mss_cache = TCP_MSS_DEFAULT; tp->reordering = sysctl_tcp_reordering; + tcp_enable_early_retrans(tp); icsk->icsk_ca_ops = &tcp_init_congestion_ops; sk->sk_state = TCP_CLOSE; @@ -2495,6 +2496,8 @@ static int do_tcp_setsockopt(struct sock *sk, int level, err = -EINVAL; else tp->thin_dupack = val; + if (tp->thin_dupack) + tcp_disable_early_retrans(tp); break; case TCP_REPAIR: diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index be8e09d2c6b1..e042cabb695e 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -99,6 +99,7 @@ int sysctl_tcp_thin_dupack __read_mostly; int sysctl_tcp_moderate_rcvbuf __read_mostly = 1; int sysctl_tcp_abc __read_mostly; +int sysctl_tcp_early_retrans __read_mostly = 2; #define FLAG_DATA 0x01 /* Incoming frame contained data. */ #define FLAG_WIN_UPDATE 0x02 /* Incoming ACK was a window update. */ @@ -906,6 +907,7 @@ static void tcp_init_metrics(struct sock *sk) if (dst_metric(dst, RTAX_REORDERING) && tp->reordering != dst_metric(dst, RTAX_REORDERING)) { tcp_disable_fack(tp); + tcp_disable_early_retrans(tp); tp->reordering = dst_metric(dst, RTAX_REORDERING); } @@ -988,6 +990,9 @@ static void tcp_update_reordering(struct sock *sk, const int metric, #endif tcp_disable_fack(tp); } + + if (metric > 0) + tcp_disable_early_retrans(tp); } /* This must be called before lost_out is incremented */ @@ -2492,6 +2497,16 @@ static int tcp_time_to_recover(struct sock *sk) tcp_is_sack(tp) && !tcp_send_head(sk)) return 1; + /* Trick#6: TCP early retransmit, per RFC5827. To avoid spurious + * retransmissions due to small network reorderings, we implement + * Mitigation A.3 in the RFC and delay the retransmission for a short + * interval if appropriate. + */ + if (tp->do_early_retrans && !tp->retrans_out && tp->sacked_out && + (tp->packets_out == (tp->sacked_out + 1) && tp->packets_out < 4) && + !tcp_may_send_now(sk)) + return 1; + return 0; } diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index 3cabafb5cdd1..6f6a91832826 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c @@ -482,6 +482,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, newtp->sacked_out = 0; newtp->fackets_out = 0; newtp->snd_ssthresh = TCP_INFINITE_SSTHRESH; + tcp_enable_early_retrans(newtp); /* So many TCP implementations out there (incorrectly) count the * initial SYN frame in their delayed-ACK and congestion control -- cgit v1.2.2 From 750ea2bafa55aaed208b2583470ecd7122225634 Mon Sep 17 00:00:00 2001 From: Yuchung Cheng Date: Wed, 2 May 2012 13:30:04 +0000 Subject: tcp: early retransmit: delayed fast retransmit Implementing the advanced early retransmit (sysctl_tcp_early_retrans==2). Delays the fast retransmit by an interval of RTT/4. We borrow the RTO timer to implement the delay. If we receive another ACK or send a new packet, the timer is cancelled and restored to original RTO value offset by time elapsed. When the delayed-ER timer fires, we enter fast recovery and perform fast retransmit. Signed-off-by: Yuchung Cheng Acked-by: Neal Cardwell Signed-off-by: David S. Miller --- net/ipv4/tcp_input.c | 69 +++++++++++++++++++++++++++++++++++++++++++++------ net/ipv4/tcp_output.c | 5 ++-- net/ipv4/tcp_timer.c | 5 ++++ 3 files changed, 69 insertions(+), 10 deletions(-) (limited to 'net') diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index e042cabb695e..7096790e06bf 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -2344,6 +2344,27 @@ static inline int tcp_dupack_heuristics(const struct tcp_sock *tp) return tcp_is_fack(tp) ? tp->fackets_out : tp->sacked_out + 1; } +static bool tcp_pause_early_retransmit(struct sock *sk, int flag) +{ + struct tcp_sock *tp = tcp_sk(sk); + unsigned long delay; + + /* Delay early retransmit and entering fast recovery for + * max(RTT/4, 2msec) unless ack has ECE mark, no RTT samples + * available, or RTO is scheduled to fire first. + */ + if (sysctl_tcp_early_retrans < 2 || (flag & FLAG_ECE) || !tp->srtt) + return false; + + delay = max_t(unsigned long, (tp->srtt >> 5), msecs_to_jiffies(2)); + if (!time_after(inet_csk(sk)->icsk_timeout, (jiffies + delay))) + return false; + + inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, delay, TCP_RTO_MAX); + tp->early_retrans_delayed = 1; + return true; +} + static inline int tcp_skb_timedout(const struct sock *sk, const struct sk_buff *skb) { @@ -2451,7 +2472,7 @@ static inline int tcp_head_timedout(const struct sock *sk) * Main question: may we further continue forward transmission * with the same cwnd? */ -static int tcp_time_to_recover(struct sock *sk) +static int tcp_time_to_recover(struct sock *sk, int flag) { struct tcp_sock *tp = tcp_sk(sk); __u32 packets_out; @@ -2505,7 +2526,7 @@ static int tcp_time_to_recover(struct sock *sk) if (tp->do_early_retrans && !tp->retrans_out && tp->sacked_out && (tp->packets_out == (tp->sacked_out + 1) && tp->packets_out < 4) && !tcp_may_send_now(sk)) - return 1; + return !tcp_pause_early_retransmit(sk, flag); return 0; } @@ -3172,7 +3193,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, if (icsk->icsk_ca_state <= TCP_CA_Disorder) tcp_try_undo_dsack(sk); - if (!tcp_time_to_recover(sk)) { + if (!tcp_time_to_recover(sk, flag)) { tcp_try_to_open(sk, flag); return; } @@ -3271,16 +3292,47 @@ static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 in_flight) /* Restart timer after forward progress on connection. * RFC2988 recommends to restart timer to now+rto. */ -static void tcp_rearm_rto(struct sock *sk) +void tcp_rearm_rto(struct sock *sk) { - const struct tcp_sock *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); if (!tp->packets_out) { inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS); } else { - inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, - inet_csk(sk)->icsk_rto, TCP_RTO_MAX); + u32 rto = inet_csk(sk)->icsk_rto; + /* Offset the time elapsed after installing regular RTO */ + if (tp->early_retrans_delayed) { + struct sk_buff *skb = tcp_write_queue_head(sk); + const u32 rto_time_stamp = TCP_SKB_CB(skb)->when + rto; + s32 delta = (s32)(rto_time_stamp - tcp_time_stamp); + /* delta may not be positive if the socket is locked + * when the delayed ER timer fires and is rescheduled. + */ + if (delta > 0) + rto = delta; + } + inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, rto, + TCP_RTO_MAX); } + tp->early_retrans_delayed = 0; +} + +/* This function is called when the delayed ER timer fires. TCP enters + * fast recovery and performs fast-retransmit. + */ +void tcp_resume_early_retransmit(struct sock *sk) +{ + struct tcp_sock *tp = tcp_sk(sk); + + tcp_rearm_rto(sk); + + /* Stop if ER is disabled after the delayed ER timer is scheduled */ + if (!tp->do_early_retrans) + return; + + tcp_enter_recovery(sk, false); + tcp_update_scoreboard(sk, 1); + tcp_xmit_retransmit_queue(sk); } /* If we get here, the whole TSO packet has not been acked. */ @@ -3729,6 +3781,9 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) if (after(ack, tp->snd_nxt)) goto invalid_ack; + if (tp->early_retrans_delayed) + tcp_rearm_rto(sk); + if (after(ack, prior_snd_una)) flag |= FLAG_SND_UNA_ADVANCED; diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 834e89fc541b..d94733009923 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -78,9 +78,8 @@ static void tcp_event_new_data_sent(struct sock *sk, const struct sk_buff *skb) tp->frto_counter = 3; tp->packets_out += tcp_skb_pcount(skb); - if (!prior_packets) - inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, - inet_csk(sk)->icsk_rto, TCP_RTO_MAX); + if (!prior_packets || tp->early_retrans_delayed) + tcp_rearm_rto(sk); } /* SND.NXT, if window was not shrunk. diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index 34d4a02c2f16..e911e6c523ec 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c @@ -319,6 +319,11 @@ void tcp_retransmit_timer(struct sock *sk) struct tcp_sock *tp = tcp_sk(sk); struct inet_connection_sock *icsk = inet_csk(sk); + if (tp->early_retrans_delayed) { + tcp_resume_early_retransmit(sk); + return; + } + if (!tp->packets_out) goto out; -- cgit v1.2.2 From 923dd347b8904c24bcac89bf038ed4da87f8aa90 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 2 May 2012 07:55:58 +0000 Subject: net: take care of cloned skbs in tcp_try_coalesce() Before stealing fragments or skb head, we must make sure skbs are not cloned. Alexander was worried about destination skb being cloned : In bridge setups, a driver could be fooled if skb->data_len would not match skb nr_frags. If source skb is cloned, we must take references on pages instead. Bug happened using tcpdump (if not using mmap()) Introduce kfree_skb_partial() helper to cleanup code. Reported-by: Alexander Duyck Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/ipv4/tcp_input.c | 42 ++++++++++++++++++++++++++++-------------- 1 file changed, 28 insertions(+), 14 deletions(-) (limited to 'net') diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 7096790e06bf..a8829370f712 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -4532,6 +4532,7 @@ static inline int tcp_try_rmem_schedule(struct sock *sk, unsigned int size) * @sk: socket * @to: prior buffer * @from: buffer to add in queue + * @fragstolen: pointer to boolean * * Before queueing skb @from after @to, try to merge them * to reduce overall memory use and queue lengths, if cost is small. @@ -4544,10 +4545,10 @@ static bool tcp_try_coalesce(struct sock *sk, struct sk_buff *from, bool *fragstolen) { - int delta, len = from->len; + int i, delta, len = from->len; *fragstolen = false; - if (tcp_hdr(from)->fin) + if (tcp_hdr(from)->fin || skb_cloned(to)) return false; if (len <= skb_tailroom(to)) { BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len)); @@ -4574,7 +4575,13 @@ copyfrags: skb_shinfo(from)->frags, skb_shinfo(from)->nr_frags * sizeof(skb_frag_t)); skb_shinfo(to)->nr_frags += skb_shinfo(from)->nr_frags; - skb_shinfo(from)->nr_frags = 0; + + if (skb_cloned(from)) + for (i = 0; i < skb_shinfo(from)->nr_frags; i++) + skb_frag_ref(from, i); + else + skb_shinfo(from)->nr_frags = 0; + to->truesize += delta; atomic_add(delta, &sk->sk_rmem_alloc); sk_mem_charge(sk, delta); @@ -4592,13 +4599,26 @@ copyfrags: offset = from->data - (unsigned char *)page_address(page); skb_fill_page_desc(to, skb_shinfo(to)->nr_frags, page, offset, skb_headlen(from)); - *fragstolen = true; + + if (skb_cloned(from)) + get_page(page); + else + *fragstolen = true; + delta = len; /* we dont know real truesize... */ goto copyfrags; } return false; } +static void kfree_skb_partial(struct sk_buff *skb, bool head_stolen) +{ + if (head_stolen) + kmem_cache_free(skbuff_head_cache, skb); + else + __kfree_skb(skb); +} + static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) { struct tcp_sock *tp = tcp_sk(sk); @@ -4642,10 +4662,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) if (!tcp_try_coalesce(sk, skb1, skb, &fragstolen)) { __skb_queue_after(&tp->out_of_order_queue, skb1, skb); } else { - if (fragstolen) - kmem_cache_free(skbuff_head_cache, skb); - else - __kfree_skb(skb); + kfree_skb_partial(skb, fragstolen); skb = NULL; } @@ -4804,12 +4821,9 @@ queue_and_out: tcp_fast_path_check(sk); - if (eaten > 0) { - if (fragstolen) - kmem_cache_free(skbuff_head_cache, skb); - else - __kfree_skb(skb); - } else if (!sock_flag(sk, SOCK_DEAD)) + if (eaten > 0) + kfree_skb_partial(skb, fragstolen); + else if (!sock_flag(sk, SOCK_DEAD)) sk->sk_data_ready(sk, 0); return; } -- cgit v1.2.2 From b081f85c2977b1cbb6e635d53d9512f1ef985972 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 2 May 2012 09:58:29 +0000 Subject: net: implement tcp coalescing in tcp_queue_rcv() Extend tcp coalescing implementing it from tcp_queue_rcv(), the main receiver function when application is not blocked in recvmsg(). Function tcp_queue_rcv() is moved a bit to allow its call from tcp_data_queue() This gives good results especially if GRO could not kick, and if skb head is a fragment. Signed-off-by: Eric Dumazet Cc: Alexander Duyck Cc: Neal Cardwell Cc: Tom Herbert Signed-off-by: David S. Miller --- net/ipv4/tcp.c | 10 +++++----- net/ipv4/tcp_input.c | 40 +++++++++++++++++++++------------------- 2 files changed, 26 insertions(+), 24 deletions(-) (limited to 'net') diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 6802c89bc44d..c2cff8b62772 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -981,8 +981,8 @@ static inline int select_size(const struct sock *sk, bool sg) static int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size) { struct sk_buff *skb; - struct tcp_skb_cb *cb; struct tcphdr *th; + bool fragstolen; skb = alloc_skb(size + sizeof(*th), sk->sk_allocation); if (!skb) @@ -995,14 +995,14 @@ static int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size) if (memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size)) goto err_free; - cb = TCP_SKB_CB(skb); - TCP_SKB_CB(skb)->seq = tcp_sk(sk)->rcv_nxt; TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + size; TCP_SKB_CB(skb)->ack_seq = tcp_sk(sk)->snd_una - 1; - tcp_queue_rcv(sk, skb, sizeof(*th)); - + if (tcp_queue_rcv(sk, skb, sizeof(*th), &fragstolen)) { + WARN_ON_ONCE(fragstolen); /* should not happen */ + __kfree_skb(skb); + } return size; err_free: diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index a8829370f712..2f696ef13dcd 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -4739,6 +4739,22 @@ end: skb_set_owner_r(skb, sk); } +int tcp_queue_rcv(struct sock *sk, struct sk_buff *skb, int hdrlen, + bool *fragstolen) +{ + int eaten; + struct sk_buff *tail = skb_peek_tail(&sk->sk_receive_queue); + + __skb_pull(skb, hdrlen); + eaten = (tail && + tcp_try_coalesce(sk, tail, skb, fragstolen)) ? 1 : 0; + tcp_sk(sk)->rcv_nxt = TCP_SKB_CB(skb)->end_seq; + if (!eaten) { + __skb_queue_tail(&sk->sk_receive_queue, skb); + skb_set_owner_r(skb, sk); + } + return eaten; +} static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) { @@ -4785,20 +4801,12 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) } if (eaten <= 0) { - struct sk_buff *tail; queue_and_out: if (eaten < 0 && tcp_try_rmem_schedule(sk, skb->truesize)) goto drop; - tail = skb_peek_tail(&sk->sk_receive_queue); - eaten = (tail && - tcp_try_coalesce(sk, tail, skb, - &fragstolen)) ? 1 : 0; - if (eaten <= 0) { - skb_set_owner_r(skb, sk); - __skb_queue_tail(&sk->sk_receive_queue, skb); - } + eaten = tcp_queue_rcv(sk, skb, 0, &fragstolen); } tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; if (skb->len) @@ -5493,14 +5501,6 @@ discard: return 0; } -void tcp_queue_rcv(struct sock *sk, struct sk_buff *skb, int hdrlen) -{ - __skb_pull(skb, hdrlen); - __skb_queue_tail(&sk->sk_receive_queue, skb); - skb_set_owner_r(skb, sk); - tcp_sk(sk)->rcv_nxt = TCP_SKB_CB(skb)->end_seq; -} - /* * TCP receive function for the ESTABLISHED state. * @@ -5609,6 +5609,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, } else { int eaten = 0; int copied_early = 0; + bool fragstolen = false; if (tp->copied_seq == tp->rcv_nxt && len - tcp_header_len <= tp->ucopy.len) { @@ -5666,7 +5667,8 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITS); /* Bulk data transfer: receiver */ - tcp_queue_rcv(sk, skb, tcp_header_len); + eaten = tcp_queue_rcv(sk, skb, tcp_header_len, + &fragstolen); } tcp_event_data_recv(sk, skb); @@ -5688,7 +5690,7 @@ no_ack: else #endif if (eaten) - __kfree_skb(skb); + kfree_skb_partial(skb, fragstolen); else sk->sk_data_ready(sk, 0); return 0; -- cgit v1.2.2 From 2996d31f9f292cce67cd9105dc0a4a5ee43d2f14 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Wed, 2 May 2012 18:18:42 +0000 Subject: net: Stop decapitating clones that have a head_frag This change is meant ot prevent stealing the skb->head to use as a page in the event that the skb->head was cloned. This allows the other clones to track each other via shinfo->dataref. Without this we break down to two methods for tracking the reference count, one being dataref, the other being the page count. As a result it becomes difficult to track how many references there are to skb->head. Signed-off-by: Alexander Duyck Cc: Eric Dumazet Cc: Jeff Kirsher Acked-by: Eric Dumazet Signed-off-by: David S. Miller --- net/core/skbuff.c | 9 +++++---- net/ipv4/tcp_input.c | 9 ++------- 2 files changed, 7 insertions(+), 11 deletions(-) (limited to 'net') diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 52ba2b5e803d..9e8caa0c4f72 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -1699,17 +1699,18 @@ static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe, struct splice_pipe_desc *spd, struct sock *sk) { int seg; - bool head_is_linear = !skb->head_frag; + bool head_is_locked = !skb->head_frag || skb_cloned(skb); /* map the linear part : - * If skb->head_frag is set, this 'linear' part is backed - * by a fragment, and we can avoid a copy. + * If skb->head_frag is set, this 'linear' part is backed by a + * fragment, and if the head is not shared with any clones then + * we can avoid a copy since we own the head portion of this page. */ if (__splice_segment(virt_to_page(skb->data), (unsigned long) skb->data & (PAGE_SIZE - 1), skb_headlen(skb), offset, len, skb, spd, - head_is_linear, + head_is_locked, sk, pipe)) return true; diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 2f696ef13dcd..c6f78e2b590f 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -4589,7 +4589,7 @@ copyfrags: to->data_len += len; goto merge; } - if (from->head_frag) { + if (from->head_frag && !skb_cloned(from)) { struct page *page; unsigned int offset; @@ -4599,12 +4599,7 @@ copyfrags: offset = from->data - (unsigned char *)page_address(page); skb_fill_page_desc(to, skb_shinfo(to)->nr_frags, page, offset, skb_headlen(from)); - - if (skb_cloned(from)) - get_page(page); - else - *fragstolen = true; - + *fragstolen = true; delta = len; /* we dont know real truesize... */ goto copyfrags; } -- cgit v1.2.2 From 8c1ae10d792155a7221be12b37dcebc3bcc1b49f Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Thu, 3 May 2012 02:25:55 -0400 Subject: net: Add missing linux/prefetch.h include to net/core/sock.c Reported-by: Stephen Rothwell Signed-off-by: David S. Miller --- net/core/sock.c | 1 + 1 file changed, 1 insertion(+) (limited to 'net') diff --git a/net/core/sock.c b/net/core/sock.c index 1a8835117fd6..b8c818e69c23 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -113,6 +113,7 @@ #include #include #include +#include #include -- cgit v1.2.2 From c73c3d9c49daae8acbac4cd14bcd81626887c0e6 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Wed, 2 May 2012 21:18:59 +0000 Subject: tcp: Fix truesize accounting in tcp_try_coalesce This patch addresses several issues in the way we were tracking the truesize in tcp_try_coalesce. First it was using ksize which prevents us from having a 0 sized head frag and getting a usable result. To resolve that this patch uses the end pointer which is set based off either ksize, or the frag_size supplied in build_skb. This allows us to compute the original truesize of the entire buffer and remove that value leaving us with just what was added as pages. The second issue was the use of skb->len if there is a mergeable head frag. We should only need to remove the size of an data aligned sk_buff from our current skb->truesize to compute the delta for a buffer with a reused head. By using skb->len the value of truesize was being artificially reduced which means that head frags could use more memory than buffers using standard allocations. Signed-off-by: Alexander Duyck Cc: Eric Dumazet Cc: Jeff Kirsher Acked-by: Eric Dumazet Signed-off-by: David S. Miller --- net/ipv4/tcp_input.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) (limited to 'net') diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index c6f78e2b590f..3cb273ac8821 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -4565,12 +4565,10 @@ merge: if (skb_headlen(from) == 0 && (skb_shinfo(to)->nr_frags + skb_shinfo(from)->nr_frags <= MAX_SKB_FRAGS)) { - WARN_ON_ONCE(from->head_frag); - delta = from->truesize - ksize(from->head) - - SKB_DATA_ALIGN(sizeof(struct sk_buff)); - - WARN_ON_ONCE(delta < len); + delta = from->truesize - + SKB_TRUESIZE(skb_end_pointer(from) - from->head); copyfrags: + WARN_ON_ONCE(delta < len); memcpy(skb_shinfo(to)->frags + skb_shinfo(to)->nr_frags, skb_shinfo(from)->frags, skb_shinfo(from)->nr_frags * sizeof(skb_frag_t)); @@ -4600,7 +4598,7 @@ copyfrags: skb_fill_page_desc(to, skb_shinfo(to)->nr_frags, page, offset, skb_headlen(from)); *fragstolen = true; - delta = len; /* we dont know real truesize... */ + delta = from->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff)); goto copyfrags; } return false; -- cgit v1.2.2 From 57b55a7ec684d8b846d6d5e67f4982363a83db7e Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Wed, 2 May 2012 21:19:04 +0000 Subject: tcp: Move code related to head frag in tcp_try_coalesce This change reorders the code related to the use of an skb->head_frag so it is placed before we check the rest of the frags. This allows the code to read more linearly instead of like some sort of loop. Signed-off-by: Alexander Duyck Cc: Eric Dumazet Cc: Jeff Kirsher Acked-by: Eric Dumazet Signed-off-by: David S. Miller --- net/ipv4/tcp_input.c | 42 +++++++++++++++++++++++++----------------- 1 file changed, 25 insertions(+), 17 deletions(-) (limited to 'net') diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 3cb273ac8821..41fa5df9abbc 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -4562,9 +4562,31 @@ merge: if (skb_has_frag_list(to) || skb_has_frag_list(from)) return false; - if (skb_headlen(from) == 0 && - (skb_shinfo(to)->nr_frags + - skb_shinfo(from)->nr_frags <= MAX_SKB_FRAGS)) { + if (skb_headlen(from) != 0) { + struct page *page; + unsigned int offset; + + if (skb_shinfo(to)->nr_frags + + skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS) + return false; + + if (!from->head_frag || skb_cloned(from)) + return false; + + delta = from->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff)); + + page = virt_to_head_page(from->head); + offset = from->data - (unsigned char *)page_address(page); + + skb_fill_page_desc(to, skb_shinfo(to)->nr_frags, + page, offset, skb_headlen(from)); + *fragstolen = true; + goto copyfrags; + } else { + if (skb_shinfo(to)->nr_frags + + skb_shinfo(from)->nr_frags > MAX_SKB_FRAGS) + return false; + delta = from->truesize - SKB_TRUESIZE(skb_end_pointer(from) - from->head); copyfrags: @@ -4587,20 +4609,6 @@ copyfrags: to->data_len += len; goto merge; } - if (from->head_frag && !skb_cloned(from)) { - struct page *page; - unsigned int offset; - - if (skb_shinfo(to)->nr_frags + skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS) - return false; - page = virt_to_head_page(from->head); - offset = from->data - (unsigned char *)page_address(page); - skb_fill_page_desc(to, skb_shinfo(to)->nr_frags, - page, offset, skb_headlen(from)); - *fragstolen = true; - delta = from->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff)); - goto copyfrags; - } return false; } -- cgit v1.2.2 From 34a802a5b9c4c4efb61828781230805be6dd0f8e Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Wed, 2 May 2012 21:19:09 +0000 Subject: tcp: move stats merge to the end of tcp_try_coalesce This change cleans up the last bits of tcp_try_coalesce so that we only need one goto which jumps to the end of the function. The idea is to make the code more readable by putting things in a linear order so that we start execution at the top of the function, and end it at the bottom. I also made a slight tweak to the code for handling frags when we are a clone. Instead of making it an if (clone) loop else nr_frags = 0 I changed the logic so that if (!clone) we just set the number of frags to 0 which disables the for loop anyway. Signed-off-by: Alexander Duyck Cc: Eric Dumazet Cc: Jeff Kirsher Acked-by: Eric Dumazet Signed-off-by: David S. Miller --- net/ipv4/tcp_input.c | 55 +++++++++++++++++++++++++++------------------------- 1 file changed, 29 insertions(+), 26 deletions(-) (limited to 'net') diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 41fa5df9abbc..84e69e02fe20 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -4548,15 +4548,13 @@ static bool tcp_try_coalesce(struct sock *sk, int i, delta, len = from->len; *fragstolen = false; + if (tcp_hdr(from)->fin || skb_cloned(to)) return false; + if (len <= skb_tailroom(to)) { BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len)); -merge: - NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOALESCE); - TCP_SKB_CB(to)->end_seq = TCP_SKB_CB(from)->end_seq; - TCP_SKB_CB(to)->ack_seq = TCP_SKB_CB(from)->ack_seq; - return true; + goto merge; } if (skb_has_frag_list(to) || skb_has_frag_list(from)) @@ -4581,7 +4579,6 @@ merge: skb_fill_page_desc(to, skb_shinfo(to)->nr_frags, page, offset, skb_headlen(from)); *fragstolen = true; - goto copyfrags; } else { if (skb_shinfo(to)->nr_frags + skb_shinfo(from)->nr_frags > MAX_SKB_FRAGS) @@ -4589,27 +4586,33 @@ merge: delta = from->truesize - SKB_TRUESIZE(skb_end_pointer(from) - from->head); -copyfrags: - WARN_ON_ONCE(delta < len); - memcpy(skb_shinfo(to)->frags + skb_shinfo(to)->nr_frags, - skb_shinfo(from)->frags, - skb_shinfo(from)->nr_frags * sizeof(skb_frag_t)); - skb_shinfo(to)->nr_frags += skb_shinfo(from)->nr_frags; - - if (skb_cloned(from)) - for (i = 0; i < skb_shinfo(from)->nr_frags; i++) - skb_frag_ref(from, i); - else - skb_shinfo(from)->nr_frags = 0; - - to->truesize += delta; - atomic_add(delta, &sk->sk_rmem_alloc); - sk_mem_charge(sk, delta); - to->len += len; - to->data_len += len; - goto merge; } - return false; + + WARN_ON_ONCE(delta < len); + + memcpy(skb_shinfo(to)->frags + skb_shinfo(to)->nr_frags, + skb_shinfo(from)->frags, + skb_shinfo(from)->nr_frags * sizeof(skb_frag_t)); + skb_shinfo(to)->nr_frags += skb_shinfo(from)->nr_frags; + + if (!skb_cloned(from)) + skb_shinfo(from)->nr_frags = 0; + + /* if the skb is cloned this does nothing since we set nr_frags to 0 */ + for (i = 0; i < skb_shinfo(from)->nr_frags; i++) + skb_frag_ref(from, i); + + to->truesize += delta; + atomic_add(delta, &sk->sk_rmem_alloc); + sk_mem_charge(sk, delta); + to->len += len; + to->data_len += len; + +merge: + NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOALESCE); + TCP_SKB_CB(to)->end_seq = TCP_SKB_CB(from)->end_seq; + TCP_SKB_CB(to)->ack_seq = TCP_SKB_CB(from)->ack_seq; + return true; } static void kfree_skb_partial(struct sk_buff *skb, bool head_stolen) -- cgit v1.2.2 From 715dc1f342713816d1be1c37643a2c9e6ee181a7 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 2 May 2012 23:33:21 +0000 Subject: net: Fix truesize accounting in skb_gro_receive() GRO is very optimistic in skb truesize estimates, only taking into account the used part of fragments. Be conservative, and use more precise computation, so that bloated GRO skbs can be collapsed eventually. Signed-off-by: Eric Dumazet Cc: Alexander Duyck Cc: Jeff Kirsher Acked-by: Alexander Duyck Signed-off-by: David S. Miller --- net/core/skbuff.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) (limited to 'net') diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 9e8caa0c4f72..e1f8bbaadf52 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -2871,6 +2871,7 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb) unsigned int len = skb_gro_len(skb); unsigned int offset = skb_gro_offset(skb); unsigned int headlen = skb_headlen(skb); + unsigned int delta_truesize; if (p->len + len >= 65536) return -E2BIG; @@ -2900,11 +2901,14 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb) frag->page_offset += offset; skb_frag_size_sub(frag, offset); + /* all fragments truesize : remove (head size + sk_buff) */ + delta_truesize = skb->truesize - SKB_TRUESIZE(skb_end_pointer(skb) - skb->head); + skb->truesize -= skb->data_len; skb->len -= skb->data_len; skb->data_len = 0; - NAPI_GRO_CB(skb)->free = 1; + NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE; goto done; } else if (skb->head_frag) { int nr_frags = pinfo->nr_frags; @@ -2929,6 +2933,7 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb) memcpy(frag + 1, skbinfo->frags, sizeof(*frag) * skbinfo->nr_frags); /* We dont need to clear skbinfo->nr_frags here */ + delta_truesize = skb->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff)); NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD; goto done; } else if (skb_gro_len(p) != pinfo->gso_size) @@ -2971,7 +2976,7 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb) p = nskb; merge: - p->truesize += skb->truesize - len; + delta_truesize = skb->truesize; if (offset > headlen) { unsigned int eat = offset - headlen; @@ -2991,7 +2996,7 @@ merge: done: NAPI_GRO_CB(p)->count++; p->data_len += len; - p->truesize += len; + p->truesize += delta_truesize; p->len += len; NAPI_GRO_CB(skb)->same_flow = 1; -- cgit v1.2.2 From 3a7c1ee4ab89f9250b8f82656a7be0ae14aa3691 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Thu, 3 May 2012 01:09:42 +0000 Subject: skb: Add skb_head_is_locked helper function This patch adds support for a skb_head_is_locked helper function. It is meant to be used any time we are considering transferring the head from skb->head to a paged frag. If the head is locked it means we cannot remove the head from the skb so it must be copied or we must take the skb as a whole. Signed-off-by: Alexander Duyck Acked-by: Eric Dumazet Signed-off-by: David S. Miller --- net/core/skbuff.c | 3 +-- net/ipv4/tcp_input.c | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) (limited to 'net') diff --git a/net/core/skbuff.c b/net/core/skbuff.c index e1f8bbaadf52..c199aa428c6d 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -1699,7 +1699,6 @@ static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe, struct splice_pipe_desc *spd, struct sock *sk) { int seg; - bool head_is_locked = !skb->head_frag || skb_cloned(skb); /* map the linear part : * If skb->head_frag is set, this 'linear' part is backed by a @@ -1710,7 +1709,7 @@ static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe, (unsigned long) skb->data & (PAGE_SIZE - 1), skb_headlen(skb), offset, len, skb, spd, - head_is_locked, + skb_head_is_locked(skb), sk, pipe)) return true; diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 84e69e02fe20..7b2d351f24db 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -4568,7 +4568,7 @@ static bool tcp_try_coalesce(struct sock *sk, skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS) return false; - if (!from->head_frag || skb_cloned(from)) + if (skb_head_is_locked(from)) return false; delta = from->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff)); -- cgit v1.2.2 From 17045755193a93fbd2d97db9aad5e726fc8c2a6d Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 4 May 2012 04:37:21 +0000 Subject: net: sched: factorize code (qdisc_drop()) Use qdisc_drop() helper where possible. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/sched/sch_choke.c | 8 +++----- net/sched/sch_dsmark.c | 3 +-- net/sched/sch_htb.c | 4 +--- net/sched/sch_teql.c | 4 +--- 4 files changed, 6 insertions(+), 13 deletions(-) (limited to 'net') diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c index 81445cc8196f..cc37dd52ecf9 100644 --- a/net/sched/sch_choke.c +++ b/net/sched/sch_choke.c @@ -332,15 +332,13 @@ static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch) } q->stats.pdrop++; - sch->qstats.drops++; - kfree_skb(skb); - return NET_XMIT_DROP; + return qdisc_drop(skb, sch); - congestion_drop: +congestion_drop: qdisc_drop(skb, sch); return NET_XMIT_CN; - other_drop: +other_drop: if (ret & __NET_XMIT_BYPASS) sch->qstats.drops++; kfree_skb(skb); diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c index 389b856c6653..3886365cc207 100644 --- a/net/sched/sch_dsmark.c +++ b/net/sched/sch_dsmark.c @@ -265,8 +265,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch) return NET_XMIT_SUCCESS; drop: - kfree_skb(skb); - sch->qstats.drops++; + qdisc_drop(skb, sch); return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; } diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index 2ea6f196e3c8..acae5b0e3849 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c @@ -558,9 +558,7 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch) __skb_queue_tail(&q->direct_queue, skb); q->direct_pkts++; } else { - kfree_skb(skb); - sch->qstats.drops++; - return NET_XMIT_DROP; + return qdisc_drop(skb, sch); } #ifdef CONFIG_NET_CLS_ACT } else if (!cl) { diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c index 45326599fda3..ca0c29695d51 100644 --- a/net/sched/sch_teql.c +++ b/net/sched/sch_teql.c @@ -88,9 +88,7 @@ teql_enqueue(struct sk_buff *skb, struct Qdisc *sch) return NET_XMIT_SUCCESS; } - kfree_skb(skb); - sch->qstats.drops++; - return NET_XMIT_DROP; + return qdisc_drop(skb, sch); } static struct sk_buff * -- cgit v1.2.2 From bd14b1b2e29bd6812597f896dde06eaf7c6d2f24 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 4 May 2012 05:14:02 +0000 Subject: tcp: be more strict before accepting ECN negociation MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It appears some networks play bad games with the two bits reserved for ECN. This can trigger false congestion notifications and very slow transferts. Since RFC 3168 (6.1.1) forbids SYN packets to carry CT bits, we can disable TCP ECN negociation if it happens we receive mangled CT bits in the SYN packet. Signed-off-by: Eric Dumazet Cc: Perry Lorier Cc: Matt Mathis Cc: Yuchung Cheng Cc: Neal Cardwell Cc: Wilmer van der Gaast Cc: Ankur Jain Cc: Tom Herbert Cc: Dave Täht Acked-by: Neal Cardwell Signed-off-by: David S. Miller --- net/ipv4/tcp_ipv4.c | 2 +- net/ipv6/tcp_ipv6.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'net') diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index cf97e9821d76..4ff5e1f70d16 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -1368,7 +1368,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) goto drop_and_free; if (!want_cookie || tmp_opt.tstamp_ok) - TCP_ECN_create_request(req, tcp_hdr(skb)); + TCP_ECN_create_request(req, skb); if (want_cookie) { isn = cookie_v4_init_sequence(sk, skb, &req->mss); diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 57b210969834..078d039e8fd2 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -1140,7 +1140,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) treq->rmt_addr = ipv6_hdr(skb)->saddr; treq->loc_addr = ipv6_hdr(skb)->daddr; if (!want_cookie || tmp_opt.tstamp_ok) - TCP_ECN_create_request(req, tcp_hdr(skb)); + TCP_ECN_create_request(req, skb); treq->iif = sk->sk_bound_dev_if; -- cgit v1.2.2 From 9202e31d4632d82bd713fbd7d3fd229c0cd5b9cf Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Fri, 4 May 2012 14:26:46 +0000 Subject: skb: Drop bad code from pskb_expand_head The fast-path for pskb_expand_head contains a check where the size plus the unaligned size of skb_shared_info is compared against the size of the data buffer. This code path has two issues. First is the fact that after the recent changes by Eric Dumazet to __alloc_skb and build_skb the shared info is always placed in the optimal spot for a buffer size making this check unnecessary. The second issue is the fact that the check doesn't take into account the aligned size of shared info. As a result the code burns cycles doing a memcpy with nothing actually being shifted. Signed-off-by: Alexander Duyck Acked-by: Eric Dumazet Signed-off-by: David S. Miller --- net/core/skbuff.c | 12 ------------ 1 file changed, 12 deletions(-) (limited to 'net') diff --git a/net/core/skbuff.c b/net/core/skbuff.c index c199aa428c6d..4d085d454285 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -951,17 +951,6 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, fastpath = atomic_read(&skb_shinfo(skb)->dataref) == delta; } - if (fastpath && !skb->head_frag && - size + sizeof(struct skb_shared_info) <= ksize(skb->head)) { - memmove(skb->head + size, skb_shinfo(skb), - offsetof(struct skb_shared_info, - frags[skb_shinfo(skb)->nr_frags])); - memmove(skb->head + nhead, skb->head, - skb_tail_pointer(skb) - skb->head); - off = nhead; - goto adjust_others; - } - data = kmalloc(size + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)), gfp_mask); if (!data) @@ -997,7 +986,6 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, skb->head = data; skb->head_frag = 0; -adjust_others: skb->data += off; #ifdef NET_SKBUFF_DATA_USES_OFFSET skb->end = size; -- cgit v1.2.2 From 3e24591a19bbda6fcb2cbe383b41b4ba794501bf Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Fri, 4 May 2012 14:26:51 +0000 Subject: skb: Drop "fastpath" variable for skb_cloned check in pskb_expand_head Since there is now only one spot that actually uses "fastpath" there isn't much point in carrying it. Instead we can just use a check for skb_cloned to verify if we can perform the fast-path free for the head or not. Signed-off-by: Alexander Duyck Acked-by: Eric Dumazet Signed-off-by: David S. Miller --- net/core/skbuff.c | 22 ++++++++-------------- 1 file changed, 8 insertions(+), 14 deletions(-) (limited to 'net') diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 4d085d454285..17e4b1e1bf2c 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -932,7 +932,6 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, u8 *data; int size = nhead + (skb_end_pointer(skb) - skb->head) + ntail; long off; - bool fastpath; BUG_ON(nhead < 0); @@ -941,16 +940,6 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, size = SKB_DATA_ALIGN(size); - /* Check if we can avoid taking references on fragments if we own - * the last reference on skb->head. (see skb_release_data()) - */ - if (!skb->cloned) - fastpath = true; - else { - int delta = skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1; - fastpath = atomic_read(&skb_shinfo(skb)->dataref) == delta; - } - data = kmalloc(size + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)), gfp_mask); if (!data) @@ -966,9 +955,12 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, skb_shinfo(skb), offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags])); - if (fastpath) { - skb_free_head(skb); - } else { + /* + * if shinfo is shared we must drop the old head gracefully, but if it + * is not we can just drop the old head and let the existing refcount + * be since all we did is relocate the values + */ + if (skb_cloned(skb)) { /* copy this zero copy skb frags */ if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { if (skb_copy_ubufs(skb, gfp_mask)) @@ -981,6 +973,8 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, skb_clone_fraglist(skb); skb_release_data(skb); + } else { + skb_free_head(skb); } off = (data + nhead) - skb->head; -- cgit v1.2.2 From ec47ea82477404631d49b8e568c71826c9b663ac Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Fri, 4 May 2012 14:26:56 +0000 Subject: skb: Add inline helper for getting the skb end offset from head With the recent changes for how we compute the skb truesize it occurs to me we are probably going to have a lot of calls to skb_end_pointer - skb->head. Instead of running all over the place doing that it would make more sense to just make it a separate inline skb_end_offset(skb) that way we can return the correct value without having gcc having to do all the optimization to cancel out skb->head - skb->head. Signed-off-by: Alexander Duyck Acked-by: Eric Dumazet Signed-off-by: David S. Miller --- net/core/skbuff.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'net') diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 17e4b1e1bf2c..2c35da818ef9 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -829,7 +829,7 @@ static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old) struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask) { int headerlen = skb_headroom(skb); - unsigned int size = (skb_end_pointer(skb) - skb->head) + skb->data_len; + unsigned int size = skb_end_offset(skb) + skb->data_len; struct sk_buff *n = alloc_skb(size, gfp_mask); if (!n) @@ -930,7 +930,7 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, { int i; u8 *data; - int size = nhead + (skb_end_pointer(skb) - skb->head) + ntail; + int size = nhead + skb_end_offset(skb) + ntail; long off; BUG_ON(nhead < 0); @@ -2727,14 +2727,13 @@ struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features) if (unlikely(!nskb)) goto err; - hsize = skb_end_pointer(nskb) - nskb->head; + hsize = skb_end_offset(nskb); if (skb_cow_head(nskb, doffset + headroom)) { kfree_skb(nskb); goto err; } - nskb->truesize += skb_end_pointer(nskb) - nskb->head - - hsize; + nskb->truesize += skb_end_offset(nskb) - hsize; skb_release_head_state(nskb); __skb_push(nskb, doffset); } else { @@ -2883,7 +2882,8 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb) skb_frag_size_sub(frag, offset); /* all fragments truesize : remove (head size + sk_buff) */ - delta_truesize = skb->truesize - SKB_TRUESIZE(skb_end_pointer(skb) - skb->head); + delta_truesize = skb->truesize - + SKB_TRUESIZE(skb_end_offset(skb)); skb->truesize -= skb->data_len; skb->len -= skb->data_len; -- cgit v1.2.2 From 3a084ddb4bf299a6e898a9a07c89f3917f0713f7 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Thu, 3 May 2012 22:37:45 +0000 Subject: net: IP_MULTICAST_IF setsockopt now recognizes struct mreq Until now, struct mreq has not been recognized and it was worked with as with struct in_addr. That means imr_multiaddr was copied to imr_address. So do recognize struct mreq here and copy that correctly. Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- net/ipv4/ip_sockglue.c | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) (limited to 'net') diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index 51c6c672c8aa..0d11f234d615 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c @@ -673,10 +673,15 @@ static int do_ip_setsockopt(struct sock *sk, int level, break; } else { memset(&mreq, 0, sizeof(mreq)); - if (optlen >= sizeof(struct in_addr) && - copy_from_user(&mreq.imr_address, optval, - sizeof(struct in_addr))) - break; + if (optlen >= sizeof(struct ip_mreq)) { + if (copy_from_user(&mreq, optval, + sizeof(struct ip_mreq))) + break; + } else if (optlen >= sizeof(struct in_addr)) { + if (copy_from_user(&mreq.imr_address, optval, + sizeof(struct in_addr))) + break; + } } if (!mreq.imr_ifindex) { -- cgit v1.2.2 From 031d7709f21c778bcb1eed96d790e82b3bee96b5 Mon Sep 17 00:00:00 2001 From: Tony Zelenoff Date: Thu, 8 Mar 2012 23:35:39 +0000 Subject: netfilter: nf_ct_ecache: refactor notifier registration * ret variable initialization removed as useless * similar code strings concatenated and functions code flow became more plain Signed-off-by: Tony Zelenoff Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_conntrack_ecache.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) (limited to 'net') diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c index b924f3a49a8e..e7be79e640de 100644 --- a/net/netfilter/nf_conntrack_ecache.c +++ b/net/netfilter/nf_conntrack_ecache.c @@ -84,7 +84,7 @@ EXPORT_SYMBOL_GPL(nf_ct_deliver_cached_events); int nf_conntrack_register_notifier(struct net *net, struct nf_ct_event_notifier *new) { - int ret = 0; + int ret; struct nf_ct_event_notifier *notify; mutex_lock(&nf_ct_ecache_mutex); @@ -95,8 +95,7 @@ int nf_conntrack_register_notifier(struct net *net, goto out_unlock; } rcu_assign_pointer(net->ct.nf_conntrack_event_cb, new); - mutex_unlock(&nf_ct_ecache_mutex); - return ret; + ret = 0; out_unlock: mutex_unlock(&nf_ct_ecache_mutex); @@ -121,7 +120,7 @@ EXPORT_SYMBOL_GPL(nf_conntrack_unregister_notifier); int nf_ct_expect_register_notifier(struct net *net, struct nf_exp_event_notifier *new) { - int ret = 0; + int ret; struct nf_exp_event_notifier *notify; mutex_lock(&nf_ct_ecache_mutex); @@ -132,8 +131,7 @@ int nf_ct_expect_register_notifier(struct net *net, goto out_unlock; } rcu_assign_pointer(net->ct.nf_expect_event_cb, new); - mutex_unlock(&nf_ct_ecache_mutex); - return ret; + ret = 0; out_unlock: mutex_unlock(&nf_ct_ecache_mutex); -- cgit v1.2.2 From a9006892643a8f4e885b692de0708bcb35a7d530 Mon Sep 17 00:00:00 2001 From: Eric Leblond Date: Wed, 18 Apr 2012 11:20:41 +0200 Subject: netfilter: nf_ct_helper: allow to disable automatic helper assignment This patch allows you to disable automatic conntrack helper lookup based on TCP/UDP ports, eg. echo 0 > /proc/sys/net/netfilter/nf_conntrack_helper [ Note: flows that already got a helper will keep using it even if automatic helper assignment has been disabled ] Once this behaviour has been disabled, you have to explicitly use the iptables CT target to attach helper to flows. There are good reasons to stop supporting automatic helper assignment, for further information, please read: http://www.netfilter.org/news.html#2012-04-03 This patch also adds one message to inform that automatic helper assignment is deprecated and it will be removed soon (this is spotted only once, with the first flow that gets a helper attached to make it as less annoying as possible). Signed-off-by: Eric Leblond Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_conntrack_core.c | 15 +++-- net/netfilter/nf_conntrack_helper.c | 109 ++++++++++++++++++++++++++++++++---- 2 files changed, 105 insertions(+), 19 deletions(-) (limited to 'net') diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index cf0747c5741f..32c59093146e 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -1336,7 +1336,6 @@ static void nf_conntrack_cleanup_init_net(void) while (untrack_refs() > 0) schedule(); - nf_conntrack_helper_fini(); nf_conntrack_proto_fini(); #ifdef CONFIG_NF_CONNTRACK_ZONES nf_ct_extend_unregister(&nf_ct_zone_extend); @@ -1354,6 +1353,7 @@ static void nf_conntrack_cleanup_net(struct net *net) } nf_ct_free_hashtable(net->ct.hash, net->ct.htable_size); + nf_conntrack_helper_fini(net); nf_conntrack_timeout_fini(net); nf_conntrack_ecache_fini(net); nf_conntrack_tstamp_fini(net); @@ -1504,10 +1504,6 @@ static int nf_conntrack_init_init_net(void) if (ret < 0) goto err_proto; - ret = nf_conntrack_helper_init(); - if (ret < 0) - goto err_helper; - #ifdef CONFIG_NF_CONNTRACK_ZONES ret = nf_ct_extend_register(&nf_ct_zone_extend); if (ret < 0) @@ -1525,10 +1521,8 @@ static int nf_conntrack_init_init_net(void) #ifdef CONFIG_NF_CONNTRACK_ZONES err_extend: - nf_conntrack_helper_fini(); -#endif -err_helper: nf_conntrack_proto_fini(); +#endif err_proto: return ret; } @@ -1589,9 +1583,14 @@ static int nf_conntrack_init_net(struct net *net) ret = nf_conntrack_timeout_init(net); if (ret < 0) goto err_timeout; + ret = nf_conntrack_helper_init(net); + if (ret < 0) + goto err_helper; return 0; +err_helper: + nf_conntrack_timeout_fini(net); err_timeout: nf_conntrack_ecache_fini(net); err_ecache: diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c index 436b7cb79ba4..317f6e43db87 100644 --- a/net/netfilter/nf_conntrack_helper.c +++ b/net/netfilter/nf_conntrack_helper.c @@ -34,6 +34,67 @@ static struct hlist_head *nf_ct_helper_hash __read_mostly; static unsigned int nf_ct_helper_hsize __read_mostly; static unsigned int nf_ct_helper_count __read_mostly; +static bool nf_ct_auto_assign_helper __read_mostly = true; +module_param_named(nf_conntrack_helper, nf_ct_auto_assign_helper, bool, 0644); +MODULE_PARM_DESC(nf_conntrack_helper, + "Enable automatic conntrack helper assignment (default 1)"); + +#ifdef CONFIG_SYSCTL +static struct ctl_table helper_sysctl_table[] = { + { + .procname = "nf_conntrack_helper", + .data = &init_net.ct.sysctl_auto_assign_helper, + .maxlen = sizeof(unsigned int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, + {} +}; + +static int nf_conntrack_helper_init_sysctl(struct net *net) +{ + struct ctl_table *table; + + table = kmemdup(helper_sysctl_table, sizeof(helper_sysctl_table), + GFP_KERNEL); + if (!table) + goto out; + + table[0].data = &net->ct.sysctl_auto_assign_helper; + + net->ct.helper_sysctl_header = + register_net_sysctl(net, "net/netfilter", table); + + if (!net->ct.helper_sysctl_header) { + pr_err("nf_conntrack_helper: can't register to sysctl.\n"); + goto out_register; + } + return 0; + +out_register: + kfree(table); +out: + return -ENOMEM; +} + +static void nf_conntrack_helper_fini_sysctl(struct net *net) +{ + struct ctl_table *table; + + table = net->ct.helper_sysctl_header->ctl_table_arg; + unregister_net_sysctl_table(net->ct.helper_sysctl_header); + kfree(table); +} +#else +static int nf_conntrack_helper_init_sysctl(struct net *net) +{ + return 0; +} + +static void nf_conntrack_helper_fini_sysctl(struct net *net) +{ +} +#endif /* CONFIG_SYSCTL */ /* Stupid hash, but collision free for the default registrations of the * helpers currently in the kernel. */ @@ -118,6 +179,7 @@ int __nf_ct_try_assign_helper(struct nf_conn *ct, struct nf_conn *tmpl, { struct nf_conntrack_helper *helper = NULL; struct nf_conn_help *help; + struct net *net = nf_ct_net(ct); int ret = 0; if (tmpl != NULL) { @@ -127,8 +189,17 @@ int __nf_ct_try_assign_helper(struct nf_conn *ct, struct nf_conn *tmpl, } help = nfct_help(ct); - if (helper == NULL) + if (net->ct.sysctl_auto_assign_helper && helper == NULL) { helper = __nf_ct_helper_find(&ct->tuplehash[IP_CT_DIR_REPLY].tuple); + if (unlikely(!net->ct.auto_assign_helper_warned && helper)) { + pr_info("nf_conntrack: automatic helper " + "assignment is deprecated and it will " + "be removed soon. Use the iptables CT target " + "to attach helpers instead.\n"); + net->ct.auto_assign_helper_warned = true; + } + } + if (helper == NULL) { if (help) RCU_INIT_POINTER(help->helper, NULL); @@ -315,28 +386,44 @@ static struct nf_ct_ext_type helper_extend __read_mostly = { .id = NF_CT_EXT_HELPER, }; -int nf_conntrack_helper_init(void) +int nf_conntrack_helper_init(struct net *net) { int err; - nf_ct_helper_hsize = 1; /* gets rounded up to use one page */ - nf_ct_helper_hash = nf_ct_alloc_hashtable(&nf_ct_helper_hsize, 0); - if (!nf_ct_helper_hash) - return -ENOMEM; + net->ct.auto_assign_helper_warned = false; + net->ct.sysctl_auto_assign_helper = nf_ct_auto_assign_helper; - err = nf_ct_extend_register(&helper_extend); + if (net_eq(net, &init_net)) { + nf_ct_helper_hsize = 1; /* gets rounded up to use one page */ + nf_ct_helper_hash = + nf_ct_alloc_hashtable(&nf_ct_helper_hsize, 0); + if (!nf_ct_helper_hash) + return -ENOMEM; + + err = nf_ct_extend_register(&helper_extend); + if (err < 0) + goto err1; + } + + err = nf_conntrack_helper_init_sysctl(net); if (err < 0) - goto err1; + goto out_sysctl; return 0; +out_sysctl: + if (net_eq(net, &init_net)) + nf_ct_extend_unregister(&helper_extend); err1: nf_ct_free_hashtable(nf_ct_helper_hash, nf_ct_helper_hsize); return err; } -void nf_conntrack_helper_fini(void) +void nf_conntrack_helper_fini(struct net *net) { - nf_ct_extend_unregister(&helper_extend); - nf_ct_free_hashtable(nf_ct_helper_hash, nf_ct_helper_hsize); + nf_conntrack_helper_fini_sysctl(net); + if (net_eq(net, &init_net)) { + nf_ct_extend_unregister(&helper_extend); + nf_ct_free_hashtable(nf_ct_helper_hash, nf_ct_helper_hsize); + } } -- cgit v1.2.2 From 4981682cc19733f3ca43d3abd81dd4adbc9005d5 Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Tue, 8 May 2012 19:36:44 +0200 Subject: netfilter: bridge: optionally set indev to vlan if net.bridge.bridge-nf-filter-vlan-tagged sysctl is enabled, bridge netfilter removes the vlan header temporarily and then feeds the packet to ip(6)tables. When the new "bridge-nf-pass-vlan-input-device" sysctl is on (default off), then bridge netfilter will also set the in-interface to the vlan interface; if such an interface exists. This is needed to make iptables REDIRECT target work with "vlan-on-top-of-bridge" setups and to allow use of "iptables -i" to match the vlan device name. Also update Documentation with current brnf default settings. Signed-off-by: Florian Westphal Acked-by: Bart De Schuymer Signed-off-by: Pablo Neira Ayuso --- net/bridge/br_netfilter.c | 26 ++++++++++++++++++++++++-- 1 file changed, 24 insertions(+), 2 deletions(-) (limited to 'net') diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c index 53f083686ae4..dce55d4ee83b 100644 --- a/net/bridge/br_netfilter.c +++ b/net/bridge/br_netfilter.c @@ -54,12 +54,14 @@ static int brnf_call_ip6tables __read_mostly = 1; static int brnf_call_arptables __read_mostly = 1; static int brnf_filter_vlan_tagged __read_mostly = 0; static int brnf_filter_pppoe_tagged __read_mostly = 0; +static int brnf_pass_vlan_indev __read_mostly = 0; #else #define brnf_call_iptables 1 #define brnf_call_ip6tables 1 #define brnf_call_arptables 1 #define brnf_filter_vlan_tagged 0 #define brnf_filter_pppoe_tagged 0 +#define brnf_pass_vlan_indev 0 #endif #define IS_IP(skb) \ @@ -503,6 +505,19 @@ bridged_dnat: return 0; } +static struct net_device *brnf_get_logical_dev(struct sk_buff *skb, const struct net_device *dev) +{ + struct net_device *vlan, *br; + + br = bridge_parent(dev); + if (brnf_pass_vlan_indev == 0 || !vlan_tx_tag_present(skb)) + return br; + + vlan = __vlan_find_dev_deep(br, vlan_tx_tag_get(skb) & VLAN_VID_MASK); + + return vlan ? vlan : br; +} + /* Some common code for IPv4/IPv6 */ static struct net_device *setup_pre_routing(struct sk_buff *skb) { @@ -515,7 +530,7 @@ static struct net_device *setup_pre_routing(struct sk_buff *skb) nf_bridge->mask |= BRNF_NF_BRIDGE_PREROUTING; nf_bridge->physindev = skb->dev; - skb->dev = bridge_parent(skb->dev); + skb->dev = brnf_get_logical_dev(skb, skb->dev); if (skb->protocol == htons(ETH_P_8021Q)) nf_bridge->mask |= BRNF_8021Q; else if (skb->protocol == htons(ETH_P_PPP_SES)) @@ -774,7 +789,7 @@ static unsigned int br_nf_forward_ip(unsigned int hook, struct sk_buff *skb, else skb->protocol = htons(ETH_P_IPV6); - NF_HOOK(pf, NF_INET_FORWARD, skb, bridge_parent(in), parent, + NF_HOOK(pf, NF_INET_FORWARD, skb, brnf_get_logical_dev(skb, in), parent, br_nf_forward_finish); return NF_STOLEN; @@ -1002,6 +1017,13 @@ static ctl_table brnf_table[] = { .mode = 0644, .proc_handler = brnf_sysctl_call_tables, }, + { + .procname = "bridge-nf-pass-vlan-input-dev", + .data = &brnf_pass_vlan_indev, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = brnf_sysctl_call_tables, + }, { } }; #endif -- cgit v1.2.2 From 41cff6d5c924eb51704dd6928687fd0494e6c744 Mon Sep 17 00:00:00 2001 From: Julian Anastasov Date: Fri, 13 Apr 2012 16:49:37 +0300 Subject: ipvs: timeout tables do not need GFP_ATOMIC allocation They are called only on initialization. Signed-off-by: Julian Anastasov Signed-off-by: Hans Schillstrom Signed-off-by: Simon Horman --- net/netfilter/ipvs/ip_vs_proto.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/netfilter/ipvs/ip_vs_proto.c b/net/netfilter/ipvs/ip_vs_proto.c index fdc82ad9cc0e..ca1647618081 100644 --- a/net/netfilter/ipvs/ip_vs_proto.c +++ b/net/netfilter/ipvs/ip_vs_proto.c @@ -199,7 +199,7 @@ void ip_vs_protocol_timeout_change(struct netns_ipvs *ipvs, int flags) int * ip_vs_create_timeout_table(int *table, int size) { - return kmemdup(table, size, GFP_ATOMIC); + return kmemdup(table, size, GFP_KERNEL); } -- cgit v1.2.2 From 748d845ca9cf0e7c7692ec45e4d8cb3a90b31793 Mon Sep 17 00:00:00 2001 From: Julian Anastasov Date: Fri, 13 Apr 2012 16:49:40 +0300 Subject: ipvs: LBLC scheduler does not need GFP_ATOMIC allocation on init Schedulers are initialized and bound to services only on commands. Signed-off-by: Julian Anastasov Signed-off-by: Hans Schillstrom Signed-off-by: Simon Horman --- net/netfilter/ipvs/ip_vs_lblc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c index 9b0de9a0e08e..df646ccf08a7 100644 --- a/net/netfilter/ipvs/ip_vs_lblc.c +++ b/net/netfilter/ipvs/ip_vs_lblc.c @@ -342,7 +342,7 @@ static int ip_vs_lblc_init_svc(struct ip_vs_service *svc) /* * Allocate the ip_vs_lblc_table for this service */ - tbl = kmalloc(sizeof(*tbl), GFP_ATOMIC); + tbl = kmalloc(sizeof(*tbl), GFP_KERNEL); if (tbl == NULL) return -ENOMEM; -- cgit v1.2.2 From 4beddbe38cb402ff509efa9dc27d3e9e188902cd Mon Sep 17 00:00:00 2001 From: Julian Anastasov Date: Fri, 13 Apr 2012 16:49:39 +0300 Subject: ipvs: DH scheduler does not need GFP_ATOMIC allocation Schedulers are initialized and bound to services only on commands. Signed-off-by: Julian Anastasov Signed-off-by: Hans Schillstrom Signed-off-by: Simon Horman --- net/netfilter/ipvs/ip_vs_dh.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/netfilter/ipvs/ip_vs_dh.c b/net/netfilter/ipvs/ip_vs_dh.c index 1a53a7a2fff0..8b7dca9ea422 100644 --- a/net/netfilter/ipvs/ip_vs_dh.c +++ b/net/netfilter/ipvs/ip_vs_dh.c @@ -149,7 +149,7 @@ static int ip_vs_dh_init_svc(struct ip_vs_service *svc) /* allocate the DH table for this service */ tbl = kmalloc(sizeof(struct ip_vs_dh_bucket)*IP_VS_DH_TAB_SIZE, - GFP_ATOMIC); + GFP_KERNEL); if (tbl == NULL) return -ENOMEM; -- cgit v1.2.2 From 4f2a94dcb65bcdf20d91d5bffd29b9c836559d17 Mon Sep 17 00:00:00 2001 From: Julian Anastasov Date: Fri, 13 Apr 2012 16:49:42 +0300 Subject: ipvs: WRR scheduler does not need GFP_ATOMIC allocation Schedulers are initialized and bound to services only on commands. Signed-off-by: Julian Anastasov Signed-off-by: Hans Schillstrom Signed-off-by: Simon Horman --- net/netfilter/ipvs/ip_vs_wrr.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/netfilter/ipvs/ip_vs_wrr.c b/net/netfilter/ipvs/ip_vs_wrr.c index fd0d4e09876a..231be7dd547a 100644 --- a/net/netfilter/ipvs/ip_vs_wrr.c +++ b/net/netfilter/ipvs/ip_vs_wrr.c @@ -84,7 +84,7 @@ static int ip_vs_wrr_init_svc(struct ip_vs_service *svc) /* * Allocate the mark variable for WRR scheduling */ - mark = kmalloc(sizeof(struct ip_vs_wrr_mark), GFP_ATOMIC); + mark = kmalloc(sizeof(struct ip_vs_wrr_mark), GFP_KERNEL); if (mark == NULL) return -ENOMEM; -- cgit v1.2.2 From 45d4e71a39c3274f982d73688cbee2a4b286c3e3 Mon Sep 17 00:00:00 2001 From: Julian Anastasov Date: Fri, 13 Apr 2012 16:49:41 +0300 Subject: ipvs: LBLCR scheduler does not need GFP_ATOMIC allocation on init Schedulers are initialized and bound to services only on commands. Signed-off-by: Julian Anastasov Signed-off-by: Hans Schillstrom Signed-off-by: Simon Horman --- net/netfilter/ipvs/ip_vs_lblcr.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c index 9dcd39a48897..570e31ea427a 100644 --- a/net/netfilter/ipvs/ip_vs_lblcr.c +++ b/net/netfilter/ipvs/ip_vs_lblcr.c @@ -511,7 +511,7 @@ static int ip_vs_lblcr_init_svc(struct ip_vs_service *svc) /* * Allocate the ip_vs_lblcr_table for this service */ - tbl = kmalloc(sizeof(*tbl), GFP_ATOMIC); + tbl = kmalloc(sizeof(*tbl), GFP_KERNEL); if (tbl == NULL) return -ENOMEM; -- cgit v1.2.2 From d6318f08e8d0b95960f74280cdaa7f0e00fd604c Mon Sep 17 00:00:00 2001 From: Julian Anastasov Date: Fri, 13 Apr 2012 16:49:38 +0300 Subject: ipvs: SH scheduler does not need GFP_ATOMIC allocation Schedulers are initialized and bound to services only on commands. Signed-off-by: Julian Anastasov Signed-off-by: Hans Schillstrom Signed-off-by: Simon Horman --- net/netfilter/ipvs/ip_vs_sh.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/netfilter/ipvs/ip_vs_sh.c b/net/netfilter/ipvs/ip_vs_sh.c index 91e97ee049be..05126521743e 100644 --- a/net/netfilter/ipvs/ip_vs_sh.c +++ b/net/netfilter/ipvs/ip_vs_sh.c @@ -162,7 +162,7 @@ static int ip_vs_sh_init_svc(struct ip_vs_service *svc) /* allocate the SH table for this service */ tbl = kmalloc(sizeof(struct ip_vs_sh_bucket)*IP_VS_SH_TAB_SIZE, - GFP_ATOMIC); + GFP_KERNEL); if (tbl == NULL) return -ENOMEM; -- cgit v1.2.2 From 9615e61e6f23f18fc192ed44735725bb5b500bad Mon Sep 17 00:00:00 2001 From: Sasha Levin Date: Sat, 14 Apr 2012 12:37:47 -0400 Subject: ipvs: use GFP_KERNEL allocation where possible Use GFP_KERNEL instead of GFP_ATOMIC when registering an ipvs protocol. This is safe since it will always run from a process context. Signed-off-by: Sasha Levin Acked-by: Julian Anastasov Signed-off-by: Simon Horman Signed-off-by: Pablo Neira Ayuso --- net/netfilter/ipvs/ip_vs_proto.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/netfilter/ipvs/ip_vs_proto.c b/net/netfilter/ipvs/ip_vs_proto.c index ca1647618081..e91c8982dfac 100644 --- a/net/netfilter/ipvs/ip_vs_proto.c +++ b/net/netfilter/ipvs/ip_vs_proto.c @@ -68,7 +68,7 @@ register_ip_vs_proto_netns(struct net *net, struct ip_vs_protocol *pp) struct netns_ipvs *ipvs = net_ipvs(net); unsigned int hash = IP_VS_PROTO_HASH(pp->protocol); struct ip_vs_proto_data *pd = - kzalloc(sizeof(struct ip_vs_proto_data), GFP_ATOMIC); + kzalloc(sizeof(struct ip_vs_proto_data), GFP_KERNEL); if (!pd) return -ENOMEM; -- cgit v1.2.2 From 82cfc062781f29164b151de851f3076b84175fec Mon Sep 17 00:00:00 2001 From: Julian Anastasov Date: Tue, 24 Apr 2012 23:46:35 +0300 Subject: ipvs: ignore IP_VS_CONN_F_NOOUTPUT in backup server As IP_VS_CONN_F_NOOUTPUT is derived from the forwarding method we should get it from conn_flags just like we do it for IP_VS_CONN_F_FWD_MASK bits when binding to real server. Signed-off-by: Julian Anastasov Signed-off-by: Simon Horman --- net/netfilter/ipvs/ip_vs_conn.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c index 4a09b7873003..f562e630d35a 100644 --- a/net/netfilter/ipvs/ip_vs_conn.c +++ b/net/netfilter/ipvs/ip_vs_conn.c @@ -567,7 +567,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest) if (!(cp->flags & IP_VS_CONN_F_TEMPLATE)) conn_flags &= ~IP_VS_CONN_F_INACTIVE; /* connections inherit forwarding method from dest */ - cp->flags &= ~IP_VS_CONN_F_FWD_MASK; + cp->flags &= ~(IP_VS_CONN_F_FWD_MASK | IP_VS_CONN_F_NOOUTPUT); } cp->flags |= conn_flags; cp->dest = dest; -- cgit v1.2.2 From 06611f82cc0d8b957d6399f11f2799683ad002c3 Mon Sep 17 00:00:00 2001 From: Julian Anastasov Date: Tue, 24 Apr 2012 23:46:36 +0300 Subject: ipvs: remove check for IP_VS_CONN_F_SYNC from ip_vs_bind_dest As the IP_VS_CONN_F_INACTIVE bit is properly set in cp->flags for all kind of connections we do not need to add special checks for synced connections when updating the activeconns/inactconns counters for first time. Now logic will look just like in ip_vs_unbind_dest. Signed-off-by: Julian Anastasov Signed-off-by: Simon Horman --- net/netfilter/ipvs/ip_vs_conn.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'net') diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c index f562e630d35a..1c1bb309a955 100644 --- a/net/netfilter/ipvs/ip_vs_conn.c +++ b/net/netfilter/ipvs/ip_vs_conn.c @@ -585,11 +585,11 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest) /* Update the connection counters */ if (!(cp->flags & IP_VS_CONN_F_TEMPLATE)) { - /* It is a normal connection, so increase the inactive - connection counter because it is in TCP SYNRECV - state (inactive) or other protocol inacive state */ - if ((cp->flags & IP_VS_CONN_F_SYNC) && - (!(cp->flags & IP_VS_CONN_F_INACTIVE))) + /* It is a normal connection, so modify the counters + * according to the flags, later the protocol can + * update them on state change + */ + if (!(cp->flags & IP_VS_CONN_F_INACTIVE)) atomic_inc(&dest->activeconns); else atomic_inc(&dest->inactconns); -- cgit v1.2.2 From 882a844bd5b3ffa35e059f21ee920cc113985a89 Mon Sep 17 00:00:00 2001 From: Julian Anastasov Date: Tue, 24 Apr 2012 23:46:37 +0300 Subject: ipvs: fix ip_vs_try_bind_dest to rebind app and transmitter Initially, when the synced connection is created we use the forwarding method provided by master but once we bind to destination it can be changed. As result, we must update the application and the transmitter. As ip_vs_try_bind_dest is called always for connections that require dest binding, there is no need to validate the cp and dest pointers. Signed-off-by: Julian Anastasov Signed-off-by: Simon Horman --- net/netfilter/ipvs/ip_vs_conn.c | 33 ++++++++++++++++++++++++++------- 1 file changed, 26 insertions(+), 7 deletions(-) (limited to 'net') diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c index 1c1bb309a955..fd74f881d04a 100644 --- a/net/netfilter/ipvs/ip_vs_conn.c +++ b/net/netfilter/ipvs/ip_vs_conn.c @@ -613,14 +613,33 @@ struct ip_vs_dest *ip_vs_try_bind_dest(struct ip_vs_conn *cp) { struct ip_vs_dest *dest; - if ((cp) && (!cp->dest)) { - dest = ip_vs_find_dest(ip_vs_conn_net(cp), cp->af, &cp->daddr, - cp->dport, &cp->vaddr, cp->vport, - cp->protocol, cp->fwmark, cp->flags); + dest = ip_vs_find_dest(ip_vs_conn_net(cp), cp->af, &cp->daddr, + cp->dport, &cp->vaddr, cp->vport, + cp->protocol, cp->fwmark, cp->flags); + if (dest) { + struct ip_vs_proto_data *pd; + + /* Applications work depending on the forwarding method + * but better to reassign them always when binding dest */ + if (cp->app) + ip_vs_unbind_app(cp); + ip_vs_bind_dest(cp, dest); - return dest; - } else - return NULL; + + /* Update its packet transmitter */ + cp->packet_xmit = NULL; +#ifdef CONFIG_IP_VS_IPV6 + if (cp->af == AF_INET6) + ip_vs_bind_xmit_v6(cp); + else +#endif + ip_vs_bind_xmit(cp); + + pd = ip_vs_proto_data_get(ip_vs_conn_net(cp), cp->protocol); + if (pd && atomic_read(&pd->appcnt)) + ip_vs_bind_app(cp, pd->pp); + } + return dest; } -- cgit v1.2.2 From cdcc5e905d59026fbf2e7f74f9cc834203b6207b Mon Sep 17 00:00:00 2001 From: Julian Anastasov Date: Tue, 24 Apr 2012 23:46:38 +0300 Subject: ipvs: always update some of the flags bits in backup As the goal is to mirror the inactconns/activeconns counters in the backup server, make sure the cp->flags are updated even if cp is still not bound to dest. If cp->flags are not updated ip_vs_bind_dest will rely only on the initial flags when updating the counters. To avoid mistakes and complicated checks for protocol state rely only on the IP_VS_CONN_F_INACTIVE bit when updating the counters. Signed-off-by: Julian Anastasov Tested-by: Aleksey Chudov Signed-off-by: Simon Horman --- net/netfilter/ipvs/ip_vs_sync.c | 65 +++++++++++++++-------------------------- 1 file changed, 23 insertions(+), 42 deletions(-) (limited to 'net') diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c index bf5e538af67b..d2df694405f1 100644 --- a/net/netfilter/ipvs/ip_vs_sync.c +++ b/net/netfilter/ipvs/ip_vs_sync.c @@ -731,9 +731,30 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param, else cp = ip_vs_ct_in_get(param); - if (cp && param->pe_data) /* Free pe_data */ + if (cp) { + /* Free pe_data */ kfree(param->pe_data); - if (!cp) { + + dest = cp->dest; + if ((cp->flags ^ flags) & IP_VS_CONN_F_INACTIVE && + !(flags & IP_VS_CONN_F_TEMPLATE) && dest) { + if (flags & IP_VS_CONN_F_INACTIVE) { + atomic_dec(&dest->activeconns); + atomic_inc(&dest->inactconns); + } else { + atomic_inc(&dest->activeconns); + atomic_dec(&dest->inactconns); + } + } + flags &= IP_VS_CONN_F_BACKUP_UPD_MASK; + flags |= cp->flags & ~IP_VS_CONN_F_BACKUP_UPD_MASK; + cp->flags = flags; + if (!dest) { + dest = ip_vs_try_bind_dest(cp); + if (dest) + atomic_dec(&dest->refcnt); + } + } else { /* * Find the appropriate destination for the connection. * If it is not found the connection will remain unbound @@ -742,18 +763,6 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param, dest = ip_vs_find_dest(net, type, daddr, dport, param->vaddr, param->vport, protocol, fwmark, flags); - /* Set the approprite ativity flag */ - if (protocol == IPPROTO_TCP) { - if (state != IP_VS_TCP_S_ESTABLISHED) - flags |= IP_VS_CONN_F_INACTIVE; - else - flags &= ~IP_VS_CONN_F_INACTIVE; - } else if (protocol == IPPROTO_SCTP) { - if (state != IP_VS_SCTP_S_ESTABLISHED) - flags |= IP_VS_CONN_F_INACTIVE; - else - flags &= ~IP_VS_CONN_F_INACTIVE; - } cp = ip_vs_conn_new(param, daddr, dport, flags, dest, fwmark); if (dest) atomic_dec(&dest->refcnt); @@ -763,34 +772,6 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param, IP_VS_DBG(2, "BACKUP, add new conn. failed\n"); return; } - } else if (!cp->dest) { - dest = ip_vs_try_bind_dest(cp); - if (dest) - atomic_dec(&dest->refcnt); - } else if ((cp->dest) && (cp->protocol == IPPROTO_TCP) && - (cp->state != state)) { - /* update active/inactive flag for the connection */ - dest = cp->dest; - if (!(cp->flags & IP_VS_CONN_F_INACTIVE) && - (state != IP_VS_TCP_S_ESTABLISHED)) { - atomic_dec(&dest->activeconns); - atomic_inc(&dest->inactconns); - cp->flags |= IP_VS_CONN_F_INACTIVE; - } else if ((cp->flags & IP_VS_CONN_F_INACTIVE) && - (state == IP_VS_TCP_S_ESTABLISHED)) { - atomic_inc(&dest->activeconns); - atomic_dec(&dest->inactconns); - cp->flags &= ~IP_VS_CONN_F_INACTIVE; - } - } else if ((cp->dest) && (cp->protocol == IPPROTO_SCTP) && - (cp->state != state)) { - dest = cp->dest; - if (!(cp->flags & IP_VS_CONN_F_INACTIVE) && - (state != IP_VS_SCTP_S_ESTABLISHED)) { - atomic_dec(&dest->activeconns); - atomic_inc(&dest->inactconns); - cp->flags &= ~IP_VS_CONN_F_INACTIVE; - } } if (opt) -- cgit v1.2.2 From 1c003b1580e20ff9f500846677303a695b1837cc Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Tue, 8 May 2012 19:39:49 +0200 Subject: ipvs: wakeup master thread High rate of sync messages in master can lead to overflowing the socket buffer and dropping the messages. Fixed sleep of 1 second without wakeup events is not suitable for loaded masters, Use delayed_work to schedule sending for queued messages and limit the delay to IPVS_SYNC_SEND_DELAY (20ms). This will reduce the rate of wakeups but to avoid sending long bursts we wakeup the master thread after IPVS_SYNC_WAKEUP_RATE (8) messages. Add hard limit for the queued messages before sending by using "sync_qlen_max" sysctl var. It defaults to 1/32 of the memory pages but actually represents number of messages. It will protect us from allocating large parts of memory when the sending rate is lower than the queuing rate. As suggested by Pablo, add new sysctl var "sync_sock_size" to configure the SNDBUF (master) or RCVBUF (slave) socket limit. Default value is 0 (preserve system defaults). Change the master thread to detect and block on SNDBUF overflow, so that we do not drop messages when the socket limit is low but the sync_qlen_max limit is not reached. On ENOBUFS or other errors just drop the messages. Change master thread to enter TASK_INTERRUPTIBLE state early, so that we do not miss wakeups due to messages or kthread_should_stop event. Thanks to Pablo Neira Ayuso for his valuable feedback! Signed-off-by: Julian Anastasov Signed-off-by: Simon Horman --- net/netfilter/ipvs/ip_vs_ctl.c | 16 +++++ net/netfilter/ipvs/ip_vs_sync.c | 149 +++++++++++++++++++++++++++++++--------- 2 files changed, 133 insertions(+), 32 deletions(-) (limited to 'net') diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index 37b91996bfba..bd3827ec25c9 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c @@ -1717,6 +1717,18 @@ static struct ctl_table vs_vars[] = { .mode = 0644, .proc_handler = &proc_do_sync_mode, }, + { + .procname = "sync_qlen_max", + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, + { + .procname = "sync_sock_size", + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, { .procname = "cache_bypass", .maxlen = sizeof(int), @@ -3655,6 +3667,10 @@ int __net_init ip_vs_control_net_init_sysctl(struct net *net) tbl[idx++].data = &ipvs->sysctl_snat_reroute; ipvs->sysctl_sync_ver = 1; tbl[idx++].data = &ipvs->sysctl_sync_ver; + ipvs->sysctl_sync_qlen_max = nr_free_buffer_pages() / 32; + tbl[idx++].data = &ipvs->sysctl_sync_qlen_max; + ipvs->sysctl_sync_sock_size = 0; + tbl[idx++].data = &ipvs->sysctl_sync_sock_size; tbl[idx++].data = &ipvs->sysctl_cache_bypass; tbl[idx++].data = &ipvs->sysctl_expire_nodest_conn; tbl[idx++].data = &ipvs->sysctl_expire_quiescent_template; diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c index d2df694405f1..b3235b230139 100644 --- a/net/netfilter/ipvs/ip_vs_sync.c +++ b/net/netfilter/ipvs/ip_vs_sync.c @@ -307,11 +307,15 @@ static inline struct ip_vs_sync_buff *sb_dequeue(struct netns_ipvs *ipvs) spin_lock_bh(&ipvs->sync_lock); if (list_empty(&ipvs->sync_queue)) { sb = NULL; + __set_current_state(TASK_INTERRUPTIBLE); } else { sb = list_entry(ipvs->sync_queue.next, struct ip_vs_sync_buff, list); list_del(&sb->list); + ipvs->sync_queue_len--; + if (!ipvs->sync_queue_len) + ipvs->sync_queue_delay = 0; } spin_unlock_bh(&ipvs->sync_lock); @@ -358,9 +362,16 @@ static inline void sb_queue_tail(struct netns_ipvs *ipvs) struct ip_vs_sync_buff *sb = ipvs->sync_buff; spin_lock(&ipvs->sync_lock); - if (ipvs->sync_state & IP_VS_STATE_MASTER) + if (ipvs->sync_state & IP_VS_STATE_MASTER && + ipvs->sync_queue_len < sysctl_sync_qlen_max(ipvs)) { + if (!ipvs->sync_queue_len) + schedule_delayed_work(&ipvs->master_wakeup_work, + max(IPVS_SYNC_SEND_DELAY, 1)); + ipvs->sync_queue_len++; list_add_tail(&sb->list, &ipvs->sync_queue); - else + if ((++ipvs->sync_queue_delay) == IPVS_SYNC_WAKEUP_RATE) + wake_up_process(ipvs->master_thread); + } else ip_vs_sync_buff_release(sb); spin_unlock(&ipvs->sync_lock); } @@ -379,6 +390,7 @@ get_curr_sync_buff(struct netns_ipvs *ipvs, unsigned long time) time_after_eq(jiffies - ipvs->sync_buff->firstuse, time)) { sb = ipvs->sync_buff; ipvs->sync_buff = NULL; + __set_current_state(TASK_RUNNING); } else sb = NULL; spin_unlock_bh(&ipvs->sync_buff_lock); @@ -392,26 +404,23 @@ get_curr_sync_buff(struct netns_ipvs *ipvs, unsigned long time) void ip_vs_sync_switch_mode(struct net *net, int mode) { struct netns_ipvs *ipvs = net_ipvs(net); + struct ip_vs_sync_buff *sb; + spin_lock_bh(&ipvs->sync_buff_lock); if (!(ipvs->sync_state & IP_VS_STATE_MASTER)) - return; - if (mode == sysctl_sync_ver(ipvs) || !ipvs->sync_buff) - return; + goto unlock; + sb = ipvs->sync_buff; + if (mode == sysctl_sync_ver(ipvs) || !sb) + goto unlock; - spin_lock_bh(&ipvs->sync_buff_lock); /* Buffer empty ? then let buf_create do the job */ - if (ipvs->sync_buff->mesg->size <= sizeof(struct ip_vs_sync_mesg)) { - kfree(ipvs->sync_buff); + if (sb->mesg->size <= sizeof(struct ip_vs_sync_mesg)) { + ip_vs_sync_buff_release(sb); ipvs->sync_buff = NULL; - } else { - spin_lock_bh(&ipvs->sync_lock); - if (ipvs->sync_state & IP_VS_STATE_MASTER) - list_add_tail(&ipvs->sync_buff->list, - &ipvs->sync_queue); - else - ip_vs_sync_buff_release(ipvs->sync_buff); - spin_unlock_bh(&ipvs->sync_lock); - } + } else + sb_queue_tail(ipvs); + +unlock: spin_unlock_bh(&ipvs->sync_buff_lock); } @@ -1129,6 +1138,28 @@ static void ip_vs_process_message(struct net *net, __u8 *buffer, } +/* + * Setup sndbuf (mode=1) or rcvbuf (mode=0) + */ +static void set_sock_size(struct sock *sk, int mode, int val) +{ + /* setsockopt(sock, SOL_SOCKET, SO_SNDBUF, &val, sizeof(val)); */ + /* setsockopt(sock, SOL_SOCKET, SO_RCVBUF, &val, sizeof(val)); */ + lock_sock(sk); + if (mode) { + val = clamp_t(int, val, (SOCK_MIN_SNDBUF + 1) / 2, + sysctl_wmem_max); + sk->sk_sndbuf = val * 2; + sk->sk_userlocks |= SOCK_SNDBUF_LOCK; + } else { + val = clamp_t(int, val, (SOCK_MIN_RCVBUF + 1) / 2, + sysctl_rmem_max); + sk->sk_rcvbuf = val * 2; + sk->sk_userlocks |= SOCK_RCVBUF_LOCK; + } + release_sock(sk); +} + /* * Setup loopback of outgoing multicasts on a sending socket */ @@ -1305,6 +1336,9 @@ static struct socket *make_send_sock(struct net *net) set_mcast_loop(sock->sk, 0); set_mcast_ttl(sock->sk, 1); + result = sysctl_sync_sock_size(ipvs); + if (result > 0) + set_sock_size(sock->sk, 1, result); result = bind_mcastif_addr(sock, ipvs->master_mcast_ifn); if (result < 0) { @@ -1350,6 +1384,9 @@ static struct socket *make_receive_sock(struct net *net) sk_change_net(sock->sk, net); /* it is equivalent to the REUSEADDR option in user-space */ sock->sk->sk_reuse = SK_CAN_REUSE; + result = sysctl_sync_sock_size(ipvs); + if (result > 0) + set_sock_size(sock->sk, 0, result); result = sock->ops->bind(sock, (struct sockaddr *) &mcast_addr, sizeof(struct sockaddr)); @@ -1392,18 +1429,22 @@ ip_vs_send_async(struct socket *sock, const char *buffer, const size_t length) return len; } -static void +static int ip_vs_send_sync_msg(struct socket *sock, struct ip_vs_sync_mesg *msg) { int msize; + int ret; msize = msg->size; /* Put size in network byte order */ msg->size = htons(msg->size); - if (ip_vs_send_async(sock, (char *)msg, msize) != msize) - pr_err("ip_vs_send_async error\n"); + ret = ip_vs_send_async(sock, (char *)msg, msize); + if (ret >= 0 || ret == -EAGAIN) + return ret; + pr_err("ip_vs_send_async error %d\n", ret); + return 0; } static int @@ -1428,36 +1469,75 @@ ip_vs_receive(struct socket *sock, char *buffer, const size_t buflen) return len; } +/* Wakeup the master thread for sending */ +static void master_wakeup_work_handler(struct work_struct *work) +{ + struct netns_ipvs *ipvs = container_of(work, struct netns_ipvs, + master_wakeup_work.work); + + spin_lock_bh(&ipvs->sync_lock); + if (ipvs->sync_queue_len && + ipvs->sync_queue_delay < IPVS_SYNC_WAKEUP_RATE) { + ipvs->sync_queue_delay = IPVS_SYNC_WAKEUP_RATE; + wake_up_process(ipvs->master_thread); + } + spin_unlock_bh(&ipvs->sync_lock); +} + +/* Get next buffer to send */ +static inline struct ip_vs_sync_buff * +next_sync_buff(struct netns_ipvs *ipvs) +{ + struct ip_vs_sync_buff *sb; + + sb = sb_dequeue(ipvs); + if (sb) + return sb; + /* Do not delay entries in buffer for more than 2 seconds */ + return get_curr_sync_buff(ipvs, 2 * HZ); +} static int sync_thread_master(void *data) { struct ip_vs_sync_thread_data *tinfo = data; struct netns_ipvs *ipvs = net_ipvs(tinfo->net); + struct sock *sk = tinfo->sock->sk; struct ip_vs_sync_buff *sb; pr_info("sync thread started: state = MASTER, mcast_ifn = %s, " "syncid = %d\n", ipvs->master_mcast_ifn, ipvs->master_syncid); - while (!kthread_should_stop()) { - while ((sb = sb_dequeue(ipvs))) { - ip_vs_send_sync_msg(tinfo->sock, sb->mesg); - ip_vs_sync_buff_release(sb); + for (;;) { + sb = next_sync_buff(ipvs); + if (unlikely(kthread_should_stop())) + break; + if (!sb) { + schedule_timeout(IPVS_SYNC_CHECK_PERIOD); + continue; } - - /* check if entries stay in ipvs->sync_buff for 2 seconds */ - sb = get_curr_sync_buff(ipvs, 2 * HZ); - if (sb) { - ip_vs_send_sync_msg(tinfo->sock, sb->mesg); - ip_vs_sync_buff_release(sb); + while (ip_vs_send_sync_msg(tinfo->sock, sb->mesg) < 0) { + int ret = 0; + + __wait_event_interruptible(*sk_sleep(sk), + sock_writeable(sk) || + kthread_should_stop(), + ret); + if (unlikely(kthread_should_stop())) + goto done; } - - schedule_timeout_interruptible(HZ); + ip_vs_sync_buff_release(sb); } +done: + __set_current_state(TASK_RUNNING); + if (sb) + ip_vs_sync_buff_release(sb); + /* clean up the sync_buff queue */ while ((sb = sb_dequeue(ipvs))) ip_vs_sync_buff_release(sb); + __set_current_state(TASK_RUNNING); /* clean up the current sync_buff */ sb = get_curr_sync_buff(ipvs, 0); @@ -1538,6 +1618,10 @@ int start_sync_thread(struct net *net, int state, char *mcast_ifn, __u8 syncid) realtask = &ipvs->master_thread; name = "ipvs_master:%d"; threadfn = sync_thread_master; + ipvs->sync_queue_len = 0; + ipvs->sync_queue_delay = 0; + INIT_DELAYED_WORK(&ipvs->master_wakeup_work, + master_wakeup_work_handler); sock = make_send_sock(net); } else if (state == IP_VS_STATE_BACKUP) { if (ipvs->backup_thread) @@ -1623,6 +1707,7 @@ int stop_sync_thread(struct net *net, int state) spin_lock_bh(&ipvs->sync_lock); ipvs->sync_state &= ~IP_VS_STATE_MASTER; spin_unlock_bh(&ipvs->sync_lock); + cancel_delayed_work_sync(&ipvs->master_wakeup_work); retc = kthread_stop(ipvs->master_thread); ipvs->master_thread = NULL; } else if (state == IP_VS_STATE_BACKUP) { -- cgit v1.2.2 From 749c42b620a9511782bc38d0a88702a42434529e Mon Sep 17 00:00:00 2001 From: Julian Anastasov Date: Tue, 24 Apr 2012 23:46:40 +0300 Subject: ipvs: reduce sync rate with time thresholds Add two new sysctl vars to control the sync rate with the main idea to reduce the rate for connection templates because currently it depends on the packet rate for controlled connections. This mechanism should be useful also for normal connections with high traffic. sync_refresh_period: in seconds, difference in reported connection timer that triggers new sync message. It can be used to avoid sync messages for the specified period (or half of the connection timeout if it is lower) if connection state is not changed from last sync. sync_retries: integer, 0..3, defines sync retries with period of sync_refresh_period/8. Useful to protect against loss of sync messages. Allow sysctl_sync_threshold to be used with sysctl_sync_period=0, so that only single sync message is sent if sync_refresh_period is also 0. Add new field "sync_endtime" in connection structure to hold the reported time when connection expires. The 2 lowest bits will represent the retry count. As the sysctl_sync_period now can be 0 use ACCESS_ONCE to avoid division by zero. Special thanks to Aleksey Chudov for being patient with me, for his extensive reports and helping in all tests. Signed-off-by: Julian Anastasov Tested-by: Aleksey Chudov Signed-off-by: Simon Horman --- net/netfilter/ipvs/ip_vs_conn.c | 7 ++- net/netfilter/ipvs/ip_vs_core.c | 30 +--------- net/netfilter/ipvs/ip_vs_ctl.c | 25 ++++++++- net/netfilter/ipvs/ip_vs_sync.c | 121 ++++++++++++++++++++++++++++++++++------ 4 files changed, 137 insertions(+), 46 deletions(-) (limited to 'net') diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c index fd74f881d04a..4f3205def28f 100644 --- a/net/netfilter/ipvs/ip_vs_conn.c +++ b/net/netfilter/ipvs/ip_vs_conn.c @@ -762,7 +762,8 @@ int ip_vs_check_template(struct ip_vs_conn *ct) static void ip_vs_conn_expire(unsigned long data) { struct ip_vs_conn *cp = (struct ip_vs_conn *)data; - struct netns_ipvs *ipvs = net_ipvs(ip_vs_conn_net(cp)); + struct net *net = ip_vs_conn_net(cp); + struct netns_ipvs *ipvs = net_ipvs(net); cp->timeout = 60*HZ; @@ -827,6 +828,9 @@ static void ip_vs_conn_expire(unsigned long data) atomic_read(&cp->refcnt)-1, atomic_read(&cp->n_control)); + if (ipvs->sync_state & IP_VS_STATE_MASTER) + ip_vs_sync_conn(net, cp, sysctl_sync_threshold(ipvs)); + ip_vs_conn_put(cp); } @@ -900,6 +904,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p, /* Set its state and timeout */ cp->state = 0; cp->timeout = 3*HZ; + cp->sync_endtime = jiffies & ~3UL; /* Bind its packet transmitter */ #ifdef CONFIG_IP_VS_IPV6 diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c index c8f36b96f44f..a54b018c6eea 100644 --- a/net/netfilter/ipvs/ip_vs_core.c +++ b/net/netfilter/ipvs/ip_vs_core.c @@ -1613,34 +1613,8 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af) else pkts = atomic_add_return(1, &cp->in_pkts); - if ((ipvs->sync_state & IP_VS_STATE_MASTER) && - cp->protocol == IPPROTO_SCTP) { - if ((cp->state == IP_VS_SCTP_S_ESTABLISHED && - (pkts % sysctl_sync_period(ipvs) - == sysctl_sync_threshold(ipvs))) || - (cp->old_state != cp->state && - ((cp->state == IP_VS_SCTP_S_CLOSED) || - (cp->state == IP_VS_SCTP_S_SHUT_ACK_CLI) || - (cp->state == IP_VS_SCTP_S_SHUT_ACK_SER)))) { - ip_vs_sync_conn(net, cp); - goto out; - } - } - - /* Keep this block last: TCP and others with pp->num_states <= 1 */ - else if ((ipvs->sync_state & IP_VS_STATE_MASTER) && - (((cp->protocol != IPPROTO_TCP || - cp->state == IP_VS_TCP_S_ESTABLISHED) && - (pkts % sysctl_sync_period(ipvs) - == sysctl_sync_threshold(ipvs))) || - ((cp->protocol == IPPROTO_TCP) && (cp->old_state != cp->state) && - ((cp->state == IP_VS_TCP_S_FIN_WAIT) || - (cp->state == IP_VS_TCP_S_CLOSE) || - (cp->state == IP_VS_TCP_S_CLOSE_WAIT) || - (cp->state == IP_VS_TCP_S_TIME_WAIT))))) - ip_vs_sync_conn(net, cp); -out: - cp->old_state = cp->state; + if (ipvs->sync_state & IP_VS_STATE_MASTER) + ip_vs_sync_conn(net, cp, pkts); ip_vs_conn_put(cp); return ret; diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index bd3827ec25c9..a77b9bd433aa 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c @@ -1599,6 +1599,10 @@ static int ip_vs_zero_all(struct net *net) } #ifdef CONFIG_SYSCTL + +static int zero; +static int three = 3; + static int proc_do_defense_mode(ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) @@ -1632,7 +1636,8 @@ proc_do_sync_threshold(ctl_table *table, int write, memcpy(val, valp, sizeof(val)); rc = proc_dointvec(table, write, buffer, lenp, ppos); - if (write && (valp[0] < 0 || valp[1] < 0 || valp[0] >= valp[1])) { + if (write && (valp[0] < 0 || valp[1] < 0 || + (valp[0] >= valp[1] && valp[1]))) { /* Restore the correct value */ memcpy(valp, val, sizeof(val)); } @@ -1754,6 +1759,20 @@ static struct ctl_table vs_vars[] = { .mode = 0644, .proc_handler = proc_do_sync_threshold, }, + { + .procname = "sync_refresh_period", + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_jiffies, + }, + { + .procname = "sync_retries", + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = &zero, + .extra2 = &three, + }, { .procname = "nat_icmp_send", .maxlen = sizeof(int), @@ -3678,6 +3697,10 @@ int __net_init ip_vs_control_net_init_sysctl(struct net *net) ipvs->sysctl_sync_threshold[1] = DEFAULT_SYNC_PERIOD; tbl[idx].data = &ipvs->sysctl_sync_threshold; tbl[idx++].maxlen = sizeof(ipvs->sysctl_sync_threshold); + ipvs->sysctl_sync_refresh_period = DEFAULT_SYNC_REFRESH_PERIOD; + tbl[idx++].data = &ipvs->sysctl_sync_refresh_period; + ipvs->sysctl_sync_retries = clamp_t(int, DEFAULT_SYNC_RETRIES, 0, 3); + tbl[idx++].data = &ipvs->sysctl_sync_retries; tbl[idx++].data = &ipvs->sysctl_nat_icmp_send; diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c index b3235b230139..8d6a4219e904 100644 --- a/net/netfilter/ipvs/ip_vs_sync.c +++ b/net/netfilter/ipvs/ip_vs_sync.c @@ -451,11 +451,94 @@ ip_vs_sync_buff_create_v0(struct netns_ipvs *ipvs) return sb; } +/* Check if conn should be synced. + * pkts: conn packets, use sysctl_sync_threshold to avoid packet check + * - (1) sync_refresh_period: reduce sync rate. Additionally, retry + * sync_retries times with period of sync_refresh_period/8 + * - (2) if both sync_refresh_period and sync_period are 0 send sync only + * for state changes or only once when pkts matches sync_threshold + * - (3) templates: rate can be reduced only with sync_refresh_period or + * with (2) + */ +static int ip_vs_sync_conn_needed(struct netns_ipvs *ipvs, + struct ip_vs_conn *cp, int pkts) +{ + unsigned long orig = ACCESS_ONCE(cp->sync_endtime); + unsigned long now = jiffies; + unsigned long n = (now + cp->timeout) & ~3UL; + unsigned int sync_refresh_period; + int sync_period; + int force; + + /* Check if we sync in current state */ + if (unlikely(cp->flags & IP_VS_CONN_F_TEMPLATE)) + force = 0; + else if (likely(cp->protocol == IPPROTO_TCP)) { + if (!((1 << cp->state) & + ((1 << IP_VS_TCP_S_ESTABLISHED) | + (1 << IP_VS_TCP_S_FIN_WAIT) | + (1 << IP_VS_TCP_S_CLOSE) | + (1 << IP_VS_TCP_S_CLOSE_WAIT) | + (1 << IP_VS_TCP_S_TIME_WAIT)))) + return 0; + force = cp->state != cp->old_state; + if (force && cp->state != IP_VS_TCP_S_ESTABLISHED) + goto set; + } else if (unlikely(cp->protocol == IPPROTO_SCTP)) { + if (!((1 << cp->state) & + ((1 << IP_VS_SCTP_S_ESTABLISHED) | + (1 << IP_VS_SCTP_S_CLOSED) | + (1 << IP_VS_SCTP_S_SHUT_ACK_CLI) | + (1 << IP_VS_SCTP_S_SHUT_ACK_SER)))) + return 0; + force = cp->state != cp->old_state; + if (force && cp->state != IP_VS_SCTP_S_ESTABLISHED) + goto set; + } else { + /* UDP or another protocol with single state */ + force = 0; + } + + sync_refresh_period = sysctl_sync_refresh_period(ipvs); + if (sync_refresh_period > 0) { + long diff = n - orig; + long min_diff = max(cp->timeout >> 1, 10UL * HZ); + + /* Avoid sync if difference is below sync_refresh_period + * and below the half timeout. + */ + if (abs(diff) < min_t(long, sync_refresh_period, min_diff)) { + int retries = orig & 3; + + if (retries >= sysctl_sync_retries(ipvs)) + return 0; + if (time_before(now, orig - cp->timeout + + (sync_refresh_period >> 3))) + return 0; + n |= retries + 1; + } + } + sync_period = sysctl_sync_period(ipvs); + if (sync_period > 0) { + if (!(cp->flags & IP_VS_CONN_F_TEMPLATE) && + pkts % sync_period != sysctl_sync_threshold(ipvs)) + return 0; + } else if (sync_refresh_period <= 0 && + pkts != sysctl_sync_threshold(ipvs)) + return 0; + +set: + cp->old_state = cp->state; + n = cmpxchg(&cp->sync_endtime, orig, n); + return n == orig || force; +} + /* * Version 0 , could be switched in by sys_ctl. * Add an ip_vs_conn information into the current sync_buff. */ -void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp) +static void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp, + int pkts) { struct netns_ipvs *ipvs = net_ipvs(net); struct ip_vs_sync_mesg_v0 *m; @@ -468,6 +551,9 @@ void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp) if (cp->flags & IP_VS_CONN_F_ONE_PACKET) return; + if (!ip_vs_sync_conn_needed(ipvs, cp, pkts)) + return; + spin_lock(&ipvs->sync_buff_lock); if (!ipvs->sync_buff) { ipvs->sync_buff = @@ -513,8 +599,14 @@ void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp) spin_unlock(&ipvs->sync_buff_lock); /* synchronize its controller if it has */ - if (cp->control) - ip_vs_sync_conn(net, cp->control); + cp = cp->control; + if (cp) { + if (cp->flags & IP_VS_CONN_F_TEMPLATE) + pkts = atomic_add_return(1, &cp->in_pkts); + else + pkts = sysctl_sync_threshold(ipvs); + ip_vs_sync_conn(net, cp->control, pkts); + } } /* @@ -522,7 +614,7 @@ void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp) * Called by ip_vs_in. * Sending Version 1 messages */ -void ip_vs_sync_conn(struct net *net, struct ip_vs_conn *cp) +void ip_vs_sync_conn(struct net *net, struct ip_vs_conn *cp, int pkts) { struct netns_ipvs *ipvs = net_ipvs(net); struct ip_vs_sync_mesg *m; @@ -532,13 +624,16 @@ void ip_vs_sync_conn(struct net *net, struct ip_vs_conn *cp) /* Handle old version of the protocol */ if (sysctl_sync_ver(ipvs) == 0) { - ip_vs_sync_conn_v0(net, cp); + ip_vs_sync_conn_v0(net, cp, pkts); return; } /* Do not sync ONE PACKET */ if (cp->flags & IP_VS_CONN_F_ONE_PACKET) goto control; sloop: + if (!ip_vs_sync_conn_needed(ipvs, cp, pkts)) + goto control; + /* Sanity checks */ pe_name_len = 0; if (cp->pe_data_len) { @@ -653,16 +748,10 @@ control: cp = cp->control; if (!cp) return; - /* - * Reduce sync rate for templates - * i.e only increment in_pkts for Templates. - */ - if (cp->flags & IP_VS_CONN_F_TEMPLATE) { - int pkts = atomic_add_return(1, &cp->in_pkts); - - if (pkts % sysctl_sync_period(ipvs) != 1) - return; - } + if (cp->flags & IP_VS_CONN_F_TEMPLATE) + pkts = atomic_add_return(1, &cp->in_pkts); + else + pkts = sysctl_sync_threshold(ipvs); goto sloop; } @@ -1494,7 +1583,7 @@ next_sync_buff(struct netns_ipvs *ipvs) if (sb) return sb; /* Do not delay entries in buffer for more than 2 seconds */ - return get_curr_sync_buff(ipvs, 2 * HZ); + return get_curr_sync_buff(ipvs, IPVS_SYNC_FLUSH_TIME); } static int sync_thread_master(void *data) -- cgit v1.2.2 From f73181c8288fc38747ec4f0f3e8a9052ab785cd5 Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Tue, 8 May 2012 19:40:30 +0200 Subject: ipvs: add support for sync threads Allow master and backup servers to use many threads for sync traffic. Add sysctl var "sync_ports" to define the number of threads. Every thread will use single UDP port, thread 0 will use the default port 8848 while last thread will use port 8848+sync_ports-1. The sync traffic for connections is scheduled to many master threads based on the cp address but one connection is always assigned to same thread to avoid reordering of the sync messages. Remove ip_vs_sync_switch_mode because this check for sync mode change is still risky. Instead, check for mode change under sync_buff_lock. Make sure the backup socks do not block on reading. Special thanks to Aleksey Chudov for helping in all tests. Signed-off-by: Julian Anastasov Tested-by: Aleksey Chudov Signed-off-by: Simon Horman --- net/netfilter/ipvs/ip_vs_conn.c | 7 + net/netfilter/ipvs/ip_vs_ctl.c | 29 ++- net/netfilter/ipvs/ip_vs_sync.c | 401 +++++++++++++++++++++++++--------------- 3 files changed, 280 insertions(+), 157 deletions(-) (limited to 'net') diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c index 4f3205def28f..c7edf2022c3e 100644 --- a/net/netfilter/ipvs/ip_vs_conn.c +++ b/net/netfilter/ipvs/ip_vs_conn.c @@ -619,12 +619,19 @@ struct ip_vs_dest *ip_vs_try_bind_dest(struct ip_vs_conn *cp) if (dest) { struct ip_vs_proto_data *pd; + spin_lock(&cp->lock); + if (cp->dest) { + spin_unlock(&cp->lock); + return dest; + } + /* Applications work depending on the forwarding method * but better to reassign them always when binding dest */ if (cp->app) ip_vs_unbind_app(cp); ip_vs_bind_dest(cp, dest); + spin_unlock(&cp->lock); /* Update its packet transmitter */ cp->packet_xmit = NULL; diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index a77b9bd433aa..dd811b8dd97c 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c @@ -1657,9 +1657,24 @@ proc_do_sync_mode(ctl_table *table, int write, if ((*valp < 0) || (*valp > 1)) { /* Restore the correct value */ *valp = val; - } else { - struct net *net = current->nsproxy->net_ns; - ip_vs_sync_switch_mode(net, val); + } + } + return rc; +} + +static int +proc_do_sync_ports(ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos) +{ + int *valp = table->data; + int val = *valp; + int rc; + + rc = proc_dointvec(table, write, buffer, lenp, ppos); + if (write && (*valp != val)) { + if (*valp < 1 || !is_power_of_2(*valp)) { + /* Restore the correct value */ + *valp = val; } } return rc; @@ -1722,6 +1737,12 @@ static struct ctl_table vs_vars[] = { .mode = 0644, .proc_handler = &proc_do_sync_mode, }, + { + .procname = "sync_ports", + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &proc_do_sync_ports, + }, { .procname = "sync_qlen_max", .maxlen = sizeof(int), @@ -3686,6 +3707,8 @@ int __net_init ip_vs_control_net_init_sysctl(struct net *net) tbl[idx++].data = &ipvs->sysctl_snat_reroute; ipvs->sysctl_sync_ver = 1; tbl[idx++].data = &ipvs->sysctl_sync_ver; + ipvs->sysctl_sync_ports = 1; + tbl[idx++].data = &ipvs->sysctl_sync_ports; ipvs->sysctl_sync_qlen_max = nr_free_buffer_pages() / 32; tbl[idx++].data = &ipvs->sysctl_sync_qlen_max; ipvs->sysctl_sync_sock_size = 0; diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c index 8d6a4219e904..effa10c9e4e3 100644 --- a/net/netfilter/ipvs/ip_vs_sync.c +++ b/net/netfilter/ipvs/ip_vs_sync.c @@ -196,6 +196,7 @@ struct ip_vs_sync_thread_data { struct net *net; struct socket *sock; char *buf; + int id; }; /* Version 0 definition of packet sizes */ @@ -271,13 +272,6 @@ struct ip_vs_sync_buff { unsigned char *end; }; -/* multicast addr */ -static struct sockaddr_in mcast_addr = { - .sin_family = AF_INET, - .sin_port = cpu_to_be16(IP_VS_SYNC_PORT), - .sin_addr.s_addr = cpu_to_be32(IP_VS_SYNC_GROUP), -}; - /* * Copy of struct ip_vs_seq * From unaligned network order to aligned host order @@ -300,22 +294,22 @@ static void hton_seq(struct ip_vs_seq *ho, struct ip_vs_seq *no) put_unaligned_be32(ho->previous_delta, &no->previous_delta); } -static inline struct ip_vs_sync_buff *sb_dequeue(struct netns_ipvs *ipvs) +static inline struct ip_vs_sync_buff * +sb_dequeue(struct netns_ipvs *ipvs, struct ipvs_master_sync_state *ms) { struct ip_vs_sync_buff *sb; spin_lock_bh(&ipvs->sync_lock); - if (list_empty(&ipvs->sync_queue)) { + if (list_empty(&ms->sync_queue)) { sb = NULL; __set_current_state(TASK_INTERRUPTIBLE); } else { - sb = list_entry(ipvs->sync_queue.next, - struct ip_vs_sync_buff, + sb = list_entry(ms->sync_queue.next, struct ip_vs_sync_buff, list); list_del(&sb->list); - ipvs->sync_queue_len--; - if (!ipvs->sync_queue_len) - ipvs->sync_queue_delay = 0; + ms->sync_queue_len--; + if (!ms->sync_queue_len) + ms->sync_queue_delay = 0; } spin_unlock_bh(&ipvs->sync_lock); @@ -338,7 +332,7 @@ ip_vs_sync_buff_create(struct netns_ipvs *ipvs) kfree(sb); return NULL; } - sb->mesg->reserved = 0; /* old nr_conns i.e. must be zeo now */ + sb->mesg->reserved = 0; /* old nr_conns i.e. must be zero now */ sb->mesg->version = SYNC_PROTO_VER; sb->mesg->syncid = ipvs->master_syncid; sb->mesg->size = sizeof(struct ip_vs_sync_mesg); @@ -357,20 +351,21 @@ static inline void ip_vs_sync_buff_release(struct ip_vs_sync_buff *sb) kfree(sb); } -static inline void sb_queue_tail(struct netns_ipvs *ipvs) +static inline void sb_queue_tail(struct netns_ipvs *ipvs, + struct ipvs_master_sync_state *ms) { - struct ip_vs_sync_buff *sb = ipvs->sync_buff; + struct ip_vs_sync_buff *sb = ms->sync_buff; spin_lock(&ipvs->sync_lock); if (ipvs->sync_state & IP_VS_STATE_MASTER && - ipvs->sync_queue_len < sysctl_sync_qlen_max(ipvs)) { - if (!ipvs->sync_queue_len) - schedule_delayed_work(&ipvs->master_wakeup_work, + ms->sync_queue_len < sysctl_sync_qlen_max(ipvs)) { + if (!ms->sync_queue_len) + schedule_delayed_work(&ms->master_wakeup_work, max(IPVS_SYNC_SEND_DELAY, 1)); - ipvs->sync_queue_len++; - list_add_tail(&sb->list, &ipvs->sync_queue); - if ((++ipvs->sync_queue_delay) == IPVS_SYNC_WAKEUP_RATE) - wake_up_process(ipvs->master_thread); + ms->sync_queue_len++; + list_add_tail(&sb->list, &ms->sync_queue); + if ((++ms->sync_queue_delay) == IPVS_SYNC_WAKEUP_RATE) + wake_up_process(ms->master_thread); } else ip_vs_sync_buff_release(sb); spin_unlock(&ipvs->sync_lock); @@ -381,15 +376,15 @@ static inline void sb_queue_tail(struct netns_ipvs *ipvs) * than the specified time or the specified time is zero. */ static inline struct ip_vs_sync_buff * -get_curr_sync_buff(struct netns_ipvs *ipvs, unsigned long time) +get_curr_sync_buff(struct netns_ipvs *ipvs, struct ipvs_master_sync_state *ms, + unsigned long time) { struct ip_vs_sync_buff *sb; spin_lock_bh(&ipvs->sync_buff_lock); - if (ipvs->sync_buff && - time_after_eq(jiffies - ipvs->sync_buff->firstuse, time)) { - sb = ipvs->sync_buff; - ipvs->sync_buff = NULL; + sb = ms->sync_buff; + if (sb && time_after_eq(jiffies - sb->firstuse, time)) { + ms->sync_buff = NULL; __set_current_state(TASK_RUNNING); } else sb = NULL; @@ -397,31 +392,10 @@ get_curr_sync_buff(struct netns_ipvs *ipvs, unsigned long time) return sb; } -/* - * Switch mode from sending version 0 or 1 - * - must handle sync_buf - */ -void ip_vs_sync_switch_mode(struct net *net, int mode) +static inline int +select_master_thread_id(struct netns_ipvs *ipvs, struct ip_vs_conn *cp) { - struct netns_ipvs *ipvs = net_ipvs(net); - struct ip_vs_sync_buff *sb; - - spin_lock_bh(&ipvs->sync_buff_lock); - if (!(ipvs->sync_state & IP_VS_STATE_MASTER)) - goto unlock; - sb = ipvs->sync_buff; - if (mode == sysctl_sync_ver(ipvs) || !sb) - goto unlock; - - /* Buffer empty ? then let buf_create do the job */ - if (sb->mesg->size <= sizeof(struct ip_vs_sync_mesg)) { - ip_vs_sync_buff_release(sb); - ipvs->sync_buff = NULL; - } else - sb_queue_tail(ipvs); - -unlock: - spin_unlock_bh(&ipvs->sync_buff_lock); + return ((long) cp >> (1 + ilog2(sizeof(*cp)))) & ipvs->threads_mask; } /* @@ -543,6 +517,9 @@ static void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp, struct netns_ipvs *ipvs = net_ipvs(net); struct ip_vs_sync_mesg_v0 *m; struct ip_vs_sync_conn_v0 *s; + struct ip_vs_sync_buff *buff; + struct ipvs_master_sync_state *ms; + int id; int len; if (unlikely(cp->af != AF_INET)) @@ -555,20 +532,37 @@ static void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp, return; spin_lock(&ipvs->sync_buff_lock); - if (!ipvs->sync_buff) { - ipvs->sync_buff = - ip_vs_sync_buff_create_v0(ipvs); - if (!ipvs->sync_buff) { + if (!(ipvs->sync_state & IP_VS_STATE_MASTER)) { + spin_unlock(&ipvs->sync_buff_lock); + return; + } + + id = select_master_thread_id(ipvs, cp); + ms = &ipvs->ms[id]; + buff = ms->sync_buff; + if (buff) { + m = (struct ip_vs_sync_mesg_v0 *) buff->mesg; + /* Send buffer if it is for v1 */ + if (!m->nr_conns) { + sb_queue_tail(ipvs, ms); + ms->sync_buff = NULL; + buff = NULL; + } + } + if (!buff) { + buff = ip_vs_sync_buff_create_v0(ipvs); + if (!buff) { spin_unlock(&ipvs->sync_buff_lock); pr_err("ip_vs_sync_buff_create failed.\n"); return; } + ms->sync_buff = buff; } len = (cp->flags & IP_VS_CONN_F_SEQ_MASK) ? FULL_CONN_SIZE : SIMPLE_CONN_SIZE; - m = (struct ip_vs_sync_mesg_v0 *)ipvs->sync_buff->mesg; - s = (struct ip_vs_sync_conn_v0 *)ipvs->sync_buff->head; + m = (struct ip_vs_sync_mesg_v0 *) buff->mesg; + s = (struct ip_vs_sync_conn_v0 *) buff->head; /* copy members */ s->reserved = 0; @@ -589,12 +583,12 @@ static void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp, m->nr_conns++; m->size += len; - ipvs->sync_buff->head += len; + buff->head += len; /* check if there is a space for next one */ - if (ipvs->sync_buff->head + FULL_CONN_SIZE > ipvs->sync_buff->end) { - sb_queue_tail(ipvs); - ipvs->sync_buff = NULL; + if (buff->head + FULL_CONN_SIZE > buff->end) { + sb_queue_tail(ipvs, ms); + ms->sync_buff = NULL; } spin_unlock(&ipvs->sync_buff_lock); @@ -619,6 +613,9 @@ void ip_vs_sync_conn(struct net *net, struct ip_vs_conn *cp, int pkts) struct netns_ipvs *ipvs = net_ipvs(net); struct ip_vs_sync_mesg *m; union ip_vs_sync_conn *s; + struct ip_vs_sync_buff *buff; + struct ipvs_master_sync_state *ms; + int id; __u8 *p; unsigned int len, pe_name_len, pad; @@ -645,6 +642,13 @@ sloop: } spin_lock(&ipvs->sync_buff_lock); + if (!(ipvs->sync_state & IP_VS_STATE_MASTER)) { + spin_unlock(&ipvs->sync_buff_lock); + return; + } + + id = select_master_thread_id(ipvs, cp); + ms = &ipvs->ms[id]; #ifdef CONFIG_IP_VS_IPV6 if (cp->af == AF_INET6) @@ -663,27 +667,32 @@ sloop: /* check if there is a space for this one */ pad = 0; - if (ipvs->sync_buff) { - pad = (4 - (size_t)ipvs->sync_buff->head) & 3; - if (ipvs->sync_buff->head + len + pad > ipvs->sync_buff->end) { - sb_queue_tail(ipvs); - ipvs->sync_buff = NULL; + buff = ms->sync_buff; + if (buff) { + m = buff->mesg; + pad = (4 - (size_t) buff->head) & 3; + /* Send buffer if it is for v0 */ + if (buff->head + len + pad > buff->end || m->reserved) { + sb_queue_tail(ipvs, ms); + ms->sync_buff = NULL; + buff = NULL; pad = 0; } } - if (!ipvs->sync_buff) { - ipvs->sync_buff = ip_vs_sync_buff_create(ipvs); - if (!ipvs->sync_buff) { + if (!buff) { + buff = ip_vs_sync_buff_create(ipvs); + if (!buff) { spin_unlock(&ipvs->sync_buff_lock); pr_err("ip_vs_sync_buff_create failed.\n"); return; } + ms->sync_buff = buff; + m = buff->mesg; } - m = ipvs->sync_buff->mesg; - p = ipvs->sync_buff->head; - ipvs->sync_buff->head += pad + len; + p = buff->head; + buff->head += pad + len; m->size += pad + len; /* Add ev. padding from prev. sync_conn */ while (pad--) @@ -834,6 +843,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param, kfree(param->pe_data); dest = cp->dest; + spin_lock(&cp->lock); if ((cp->flags ^ flags) & IP_VS_CONN_F_INACTIVE && !(flags & IP_VS_CONN_F_TEMPLATE) && dest) { if (flags & IP_VS_CONN_F_INACTIVE) { @@ -847,6 +857,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param, flags &= IP_VS_CONN_F_BACKUP_UPD_MASK; flags |= cp->flags & ~IP_VS_CONN_F_BACKUP_UPD_MASK; cp->flags = flags; + spin_unlock(&cp->lock); if (!dest) { dest = ip_vs_try_bind_dest(cp); if (dest) @@ -1399,9 +1410,15 @@ static int bind_mcastif_addr(struct socket *sock, char *ifname) /* * Set up sending multicast socket over UDP */ -static struct socket *make_send_sock(struct net *net) +static struct socket *make_send_sock(struct net *net, int id) { struct netns_ipvs *ipvs = net_ipvs(net); + /* multicast addr */ + struct sockaddr_in mcast_addr = { + .sin_family = AF_INET, + .sin_port = cpu_to_be16(IP_VS_SYNC_PORT + id), + .sin_addr.s_addr = cpu_to_be32(IP_VS_SYNC_GROUP), + }; struct socket *sock; int result; @@ -1453,9 +1470,15 @@ error: /* * Set up receiving multicast socket over UDP */ -static struct socket *make_receive_sock(struct net *net) +static struct socket *make_receive_sock(struct net *net, int id) { struct netns_ipvs *ipvs = net_ipvs(net); + /* multicast addr */ + struct sockaddr_in mcast_addr = { + .sin_family = AF_INET, + .sin_port = cpu_to_be16(IP_VS_SYNC_PORT + id), + .sin_addr.s_addr = cpu_to_be32(IP_VS_SYNC_GROUP), + }; struct socket *sock; int result; @@ -1549,10 +1572,10 @@ ip_vs_receive(struct socket *sock, char *buffer, const size_t buflen) iov.iov_base = buffer; iov.iov_len = (size_t)buflen; - len = kernel_recvmsg(sock, &msg, &iov, 1, buflen, 0); + len = kernel_recvmsg(sock, &msg, &iov, 1, buflen, MSG_DONTWAIT); if (len < 0) - return -1; + return len; LeaveFunction(7); return len; @@ -1561,44 +1584,47 @@ ip_vs_receive(struct socket *sock, char *buffer, const size_t buflen) /* Wakeup the master thread for sending */ static void master_wakeup_work_handler(struct work_struct *work) { - struct netns_ipvs *ipvs = container_of(work, struct netns_ipvs, - master_wakeup_work.work); + struct ipvs_master_sync_state *ms = + container_of(work, struct ipvs_master_sync_state, + master_wakeup_work.work); + struct netns_ipvs *ipvs = ms->ipvs; spin_lock_bh(&ipvs->sync_lock); - if (ipvs->sync_queue_len && - ipvs->sync_queue_delay < IPVS_SYNC_WAKEUP_RATE) { - ipvs->sync_queue_delay = IPVS_SYNC_WAKEUP_RATE; - wake_up_process(ipvs->master_thread); + if (ms->sync_queue_len && + ms->sync_queue_delay < IPVS_SYNC_WAKEUP_RATE) { + ms->sync_queue_delay = IPVS_SYNC_WAKEUP_RATE; + wake_up_process(ms->master_thread); } spin_unlock_bh(&ipvs->sync_lock); } /* Get next buffer to send */ static inline struct ip_vs_sync_buff * -next_sync_buff(struct netns_ipvs *ipvs) +next_sync_buff(struct netns_ipvs *ipvs, struct ipvs_master_sync_state *ms) { struct ip_vs_sync_buff *sb; - sb = sb_dequeue(ipvs); + sb = sb_dequeue(ipvs, ms); if (sb) return sb; /* Do not delay entries in buffer for more than 2 seconds */ - return get_curr_sync_buff(ipvs, IPVS_SYNC_FLUSH_TIME); + return get_curr_sync_buff(ipvs, ms, IPVS_SYNC_FLUSH_TIME); } static int sync_thread_master(void *data) { struct ip_vs_sync_thread_data *tinfo = data; struct netns_ipvs *ipvs = net_ipvs(tinfo->net); + struct ipvs_master_sync_state *ms = &ipvs->ms[tinfo->id]; struct sock *sk = tinfo->sock->sk; struct ip_vs_sync_buff *sb; pr_info("sync thread started: state = MASTER, mcast_ifn = %s, " - "syncid = %d\n", - ipvs->master_mcast_ifn, ipvs->master_syncid); + "syncid = %d, id = %d\n", + ipvs->master_mcast_ifn, ipvs->master_syncid, tinfo->id); for (;;) { - sb = next_sync_buff(ipvs); + sb = next_sync_buff(ipvs, ms); if (unlikely(kthread_should_stop())) break; if (!sb) { @@ -1624,12 +1650,12 @@ done: ip_vs_sync_buff_release(sb); /* clean up the sync_buff queue */ - while ((sb = sb_dequeue(ipvs))) + while ((sb = sb_dequeue(ipvs, ms))) ip_vs_sync_buff_release(sb); __set_current_state(TASK_RUNNING); /* clean up the current sync_buff */ - sb = get_curr_sync_buff(ipvs, 0); + sb = get_curr_sync_buff(ipvs, ms, 0); if (sb) ip_vs_sync_buff_release(sb); @@ -1648,8 +1674,8 @@ static int sync_thread_backup(void *data) int len; pr_info("sync thread started: state = BACKUP, mcast_ifn = %s, " - "syncid = %d\n", - ipvs->backup_mcast_ifn, ipvs->backup_syncid); + "syncid = %d, id = %d\n", + ipvs->backup_mcast_ifn, ipvs->backup_syncid, tinfo->id); while (!kthread_should_stop()) { wait_event_interruptible(*sk_sleep(tinfo->sock->sk), @@ -1661,7 +1687,8 @@ static int sync_thread_backup(void *data) len = ip_vs_receive(tinfo->sock, tinfo->buf, ipvs->recv_mesg_maxlen); if (len <= 0) { - pr_err("receiving message error\n"); + if (len != -EAGAIN) + pr_err("receiving message error\n"); break; } @@ -1685,90 +1712,140 @@ static int sync_thread_backup(void *data) int start_sync_thread(struct net *net, int state, char *mcast_ifn, __u8 syncid) { struct ip_vs_sync_thread_data *tinfo; - struct task_struct **realtask, *task; + struct task_struct **array = NULL, *task; struct socket *sock; struct netns_ipvs *ipvs = net_ipvs(net); - char *name, *buf = NULL; + char *name; int (*threadfn)(void *data); + int id, count; int result = -ENOMEM; IP_VS_DBG(7, "%s(): pid %d\n", __func__, task_pid_nr(current)); IP_VS_DBG(7, "Each ip_vs_sync_conn entry needs %Zd bytes\n", sizeof(struct ip_vs_sync_conn_v0)); + if (!ipvs->sync_state) { + count = clamp(sysctl_sync_ports(ipvs), 1, IPVS_SYNC_PORTS_MAX); + ipvs->threads_mask = count - 1; + } else + count = ipvs->threads_mask + 1; if (state == IP_VS_STATE_MASTER) { - if (ipvs->master_thread) + if (ipvs->ms) return -EEXIST; strlcpy(ipvs->master_mcast_ifn, mcast_ifn, sizeof(ipvs->master_mcast_ifn)); ipvs->master_syncid = syncid; - realtask = &ipvs->master_thread; - name = "ipvs_master:%d"; + name = "ipvs-m:%d:%d"; threadfn = sync_thread_master; - ipvs->sync_queue_len = 0; - ipvs->sync_queue_delay = 0; - INIT_DELAYED_WORK(&ipvs->master_wakeup_work, - master_wakeup_work_handler); - sock = make_send_sock(net); } else if (state == IP_VS_STATE_BACKUP) { - if (ipvs->backup_thread) + if (ipvs->backup_threads) return -EEXIST; strlcpy(ipvs->backup_mcast_ifn, mcast_ifn, sizeof(ipvs->backup_mcast_ifn)); ipvs->backup_syncid = syncid; - realtask = &ipvs->backup_thread; - name = "ipvs_backup:%d"; + name = "ipvs-b:%d:%d"; threadfn = sync_thread_backup; - sock = make_receive_sock(net); } else { return -EINVAL; } - if (IS_ERR(sock)) { - result = PTR_ERR(sock); - goto out; - } + if (state == IP_VS_STATE_MASTER) { + struct ipvs_master_sync_state *ms; - set_sync_mesg_maxlen(net, state); - if (state == IP_VS_STATE_BACKUP) { - buf = kmalloc(ipvs->recv_mesg_maxlen, GFP_KERNEL); - if (!buf) - goto outsocket; + ipvs->ms = kzalloc(count * sizeof(ipvs->ms[0]), GFP_KERNEL); + if (!ipvs->ms) + goto out; + ms = ipvs->ms; + for (id = 0; id < count; id++, ms++) { + INIT_LIST_HEAD(&ms->sync_queue); + ms->sync_queue_len = 0; + ms->sync_queue_delay = 0; + INIT_DELAYED_WORK(&ms->master_wakeup_work, + master_wakeup_work_handler); + ms->ipvs = ipvs; + } + } else { + array = kzalloc(count * sizeof(struct task_struct *), + GFP_KERNEL); + if (!array) + goto out; } + set_sync_mesg_maxlen(net, state); - tinfo = kmalloc(sizeof(*tinfo), GFP_KERNEL); - if (!tinfo) - goto outbuf; - - tinfo->net = net; - tinfo->sock = sock; - tinfo->buf = buf; + tinfo = NULL; + for (id = 0; id < count; id++) { + if (state == IP_VS_STATE_MASTER) + sock = make_send_sock(net, id); + else + sock = make_receive_sock(net, id); + if (IS_ERR(sock)) { + result = PTR_ERR(sock); + goto outtinfo; + } + tinfo = kmalloc(sizeof(*tinfo), GFP_KERNEL); + if (!tinfo) + goto outsocket; + tinfo->net = net; + tinfo->sock = sock; + if (state == IP_VS_STATE_BACKUP) { + tinfo->buf = kmalloc(ipvs->recv_mesg_maxlen, + GFP_KERNEL); + if (!tinfo->buf) + goto outtinfo; + } + tinfo->id = id; - task = kthread_run(threadfn, tinfo, name, ipvs->gen); - if (IS_ERR(task)) { - result = PTR_ERR(task); - goto outtinfo; + task = kthread_run(threadfn, tinfo, name, ipvs->gen, id); + if (IS_ERR(task)) { + result = PTR_ERR(task); + goto outtinfo; + } + tinfo = NULL; + if (state == IP_VS_STATE_MASTER) + ipvs->ms[id].master_thread = task; + else + array[id] = task; } /* mark as active */ - *realtask = task; + + if (state == IP_VS_STATE_BACKUP) + ipvs->backup_threads = array; + spin_lock_bh(&ipvs->sync_buff_lock); ipvs->sync_state |= state; + spin_unlock_bh(&ipvs->sync_buff_lock); /* increase the module use count */ ip_vs_use_count_inc(); return 0; -outtinfo: - kfree(tinfo); -outbuf: - kfree(buf); outsocket: sk_release_kernel(sock->sk); + +outtinfo: + if (tinfo) { + sk_release_kernel(tinfo->sock->sk); + kfree(tinfo->buf); + kfree(tinfo); + } + count = id; + while (count-- > 0) { + if (state == IP_VS_STATE_MASTER) + kthread_stop(ipvs->ms[count].master_thread); + else + kthread_stop(array[count]); + } + kfree(array); + out: + if (!(ipvs->sync_state & IP_VS_STATE_MASTER)) { + kfree(ipvs->ms); + ipvs->ms = NULL; + } return result; } @@ -1776,39 +1853,60 @@ out: int stop_sync_thread(struct net *net, int state) { struct netns_ipvs *ipvs = net_ipvs(net); + struct task_struct **array; + int id; int retc = -EINVAL; IP_VS_DBG(7, "%s(): pid %d\n", __func__, task_pid_nr(current)); if (state == IP_VS_STATE_MASTER) { - if (!ipvs->master_thread) + if (!ipvs->ms) return -ESRCH; - pr_info("stopping master sync thread %d ...\n", - task_pid_nr(ipvs->master_thread)); - /* * The lock synchronizes with sb_queue_tail(), so that we don't * add sync buffers to the queue, when we are already in * progress of stopping the master sync daemon. */ - spin_lock_bh(&ipvs->sync_lock); + spin_lock_bh(&ipvs->sync_buff_lock); + spin_lock(&ipvs->sync_lock); ipvs->sync_state &= ~IP_VS_STATE_MASTER; - spin_unlock_bh(&ipvs->sync_lock); - cancel_delayed_work_sync(&ipvs->master_wakeup_work); - retc = kthread_stop(ipvs->master_thread); - ipvs->master_thread = NULL; + spin_unlock(&ipvs->sync_lock); + spin_unlock_bh(&ipvs->sync_buff_lock); + + retc = 0; + for (id = ipvs->threads_mask; id >= 0; id--) { + struct ipvs_master_sync_state *ms = &ipvs->ms[id]; + int ret; + + pr_info("stopping master sync thread %d ...\n", + task_pid_nr(ms->master_thread)); + cancel_delayed_work_sync(&ms->master_wakeup_work); + ret = kthread_stop(ms->master_thread); + if (retc >= 0) + retc = ret; + } + kfree(ipvs->ms); + ipvs->ms = NULL; } else if (state == IP_VS_STATE_BACKUP) { - if (!ipvs->backup_thread) + if (!ipvs->backup_threads) return -ESRCH; - pr_info("stopping backup sync thread %d ...\n", - task_pid_nr(ipvs->backup_thread)); - ipvs->sync_state &= ~IP_VS_STATE_BACKUP; - retc = kthread_stop(ipvs->backup_thread); - ipvs->backup_thread = NULL; + array = ipvs->backup_threads; + retc = 0; + for (id = ipvs->threads_mask; id >= 0; id--) { + int ret; + + pr_info("stopping backup sync thread %d ...\n", + task_pid_nr(array[id])); + ret = kthread_stop(array[id]); + if (retc >= 0) + retc = ret; + } + kfree(array); + ipvs->backup_threads = NULL; } /* decrease the module use count */ @@ -1825,13 +1923,8 @@ int __net_init ip_vs_sync_net_init(struct net *net) struct netns_ipvs *ipvs = net_ipvs(net); __mutex_init(&ipvs->sync_mutex, "ipvs->sync_mutex", &__ipvs_sync_key); - INIT_LIST_HEAD(&ipvs->sync_queue); spin_lock_init(&ipvs->sync_lock); spin_lock_init(&ipvs->sync_buff_lock); - - ipvs->sync_mcast_addr.sin_family = AF_INET; - ipvs->sync_mcast_addr.sin_port = cpu_to_be16(IP_VS_SYNC_PORT); - ipvs->sync_mcast_addr.sin_addr.s_addr = cpu_to_be32(IP_VS_SYNC_GROUP); return 0; } -- cgit v1.2.2 From 6b324dbfc3dc13f0a7e236d3529c31d6bc4edbfe Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Tue, 8 May 2012 09:28:19 +0200 Subject: ipvs: optimize the use of flags in ip_vs_bind_dest cp->flags is marked volatile but ip_vs_bind_dest can safely modify the flags, so save some CPU cycles by using temp variable. Signed-off-by: Julian Anastasov Signed-off-by: Simon Horman --- net/netfilter/ipvs/ip_vs_conn.c | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) (limited to 'net') diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c index c7edf2022c3e..1548df9a7524 100644 --- a/net/netfilter/ipvs/ip_vs_conn.c +++ b/net/netfilter/ipvs/ip_vs_conn.c @@ -548,6 +548,7 @@ static inline void ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest) { unsigned int conn_flags; + __u32 flags; /* if dest is NULL, then return directly */ if (!dest) @@ -559,17 +560,19 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest) conn_flags = atomic_read(&dest->conn_flags); if (cp->protocol != IPPROTO_UDP) conn_flags &= ~IP_VS_CONN_F_ONE_PACKET; + flags = cp->flags; /* Bind with the destination and its corresponding transmitter */ - if (cp->flags & IP_VS_CONN_F_SYNC) { + if (flags & IP_VS_CONN_F_SYNC) { /* if the connection is not template and is created * by sync, preserve the activity flag. */ - if (!(cp->flags & IP_VS_CONN_F_TEMPLATE)) + if (!(flags & IP_VS_CONN_F_TEMPLATE)) conn_flags &= ~IP_VS_CONN_F_INACTIVE; /* connections inherit forwarding method from dest */ - cp->flags &= ~(IP_VS_CONN_F_FWD_MASK | IP_VS_CONN_F_NOOUTPUT); + flags &= ~(IP_VS_CONN_F_FWD_MASK | IP_VS_CONN_F_NOOUTPUT); } - cp->flags |= conn_flags; + flags |= conn_flags; + cp->flags = flags; cp->dest = dest; IP_VS_DBG_BUF(7, "Bind-dest %s c:%s:%d v:%s:%d " @@ -584,12 +587,12 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest) atomic_read(&dest->refcnt)); /* Update the connection counters */ - if (!(cp->flags & IP_VS_CONN_F_TEMPLATE)) { + if (!(flags & IP_VS_CONN_F_TEMPLATE)) { /* It is a normal connection, so modify the counters * according to the flags, later the protocol can * update them on state change */ - if (!(cp->flags & IP_VS_CONN_F_INACTIVE)) + if (!(flags & IP_VS_CONN_F_INACTIVE)) atomic_inc(&dest->activeconns); else atomic_inc(&dest->inactconns); -- cgit v1.2.2 From d5cce2087455f4d17b11db881ca51a6965be8de0 Mon Sep 17 00:00:00 2001 From: H Hartley Sweeten Date: Thu, 26 Apr 2012 11:17:28 -0700 Subject: ipvs: ip_vs_ftp: local functions should not be exposed globally Functions not referenced outside of a source file should be marked static to prevent it from being exposed globally. This quiets the sparse warnings: warning: symbol 'ip_vs_ftp_init' was not declared. Should it be static? Signed-off-by: H Hartley Sweeten Signed-off-by: Simon Horman --- net/netfilter/ipvs/ip_vs_ftp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/netfilter/ipvs/ip_vs_ftp.c b/net/netfilter/ipvs/ip_vs_ftp.c index 510f2b5a5855..b20b29c903ef 100644 --- a/net/netfilter/ipvs/ip_vs_ftp.c +++ b/net/netfilter/ipvs/ip_vs_ftp.c @@ -485,7 +485,7 @@ static struct pernet_operations ip_vs_ftp_ops = { .exit = __ip_vs_ftp_exit, }; -int __init ip_vs_ftp_init(void) +static int __init ip_vs_ftp_init(void) { int rv; -- cgit v1.2.2 From 068d522067294987b16b3971f1aab2141eba4c44 Mon Sep 17 00:00:00 2001 From: H Hartley Sweeten Date: Thu, 26 Apr 2012 11:26:15 -0700 Subject: ipvs: ip_vs_proto: local functions should not be exposed globally Functions not referenced outside of a source file should be marked static to prevent it from being exposed globally. This quiets the sparse warnings: warning: symbol '__ipvs_proto_data_get' was not declared. Should it be static? Signed-off-by: H Hartley Sweeten Signed-off-by: Simon Horman --- net/netfilter/ipvs/ip_vs_proto.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/netfilter/ipvs/ip_vs_proto.c b/net/netfilter/ipvs/ip_vs_proto.c index e91c8982dfac..50d82186da87 100644 --- a/net/netfilter/ipvs/ip_vs_proto.c +++ b/net/netfilter/ipvs/ip_vs_proto.c @@ -156,7 +156,7 @@ EXPORT_SYMBOL(ip_vs_proto_get); /* * get ip_vs_protocol object data by netns and proto */ -struct ip_vs_proto_data * +static struct ip_vs_proto_data * __ipvs_proto_data_get(struct netns_ipvs *ipvs, unsigned short proto) { struct ip_vs_proto_data *pd; -- cgit v1.2.2 From 6d8ebc8a27e1b187abfb06dd79b35a393aa9f2a2 Mon Sep 17 00:00:00 2001 From: Hans Schillstrom Date: Mon, 30 Apr 2012 08:13:50 +0200 Subject: net: export sysctl_[r|w]mem_max symbols needed by ip_vs_sync To build ip_vs as a module sysctl_rmem_max and sysctl_wmem_max needs to be exported. The dependency was added by "ipvs: wakeup master thread" patch. Signed-off-by: Hans Schillstrom Signed-off-by: Simon Horman Acked-by: David S. Miller Signed-off-by: Pablo Neira Ayuso --- net/core/sock.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'net') diff --git a/net/core/sock.c b/net/core/sock.c index b8c818e69c23..26ed27fb2bfb 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -259,7 +259,9 @@ static struct lock_class_key af_callback_keys[AF_MAX]; /* Run time adjustable parameters. */ __u32 sysctl_wmem_max __read_mostly = SK_WMEM_MAX; +EXPORT_SYMBOL(sysctl_wmem_max); __u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX; +EXPORT_SYMBOL(sysctl_rmem_max); __u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX; __u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX; -- cgit v1.2.2 From 9768e1ace458fa4ebf88bc3943fd8fb77113ed9c Mon Sep 17 00:00:00 2001 From: Kelvie Wong Date: Wed, 2 May 2012 14:39:24 +0000 Subject: netfilter: nf_ct_expect: partially implement ctnetlink_change_expect This refreshes the "timeout" attribute in existing expectations if one is given. The use case for this would be for userspace helpers to extend the lifetime of the expectation when requested, as this is not possible right now without deleting/recreating the expectation. I use this specifically for forwarding DCERPC traffic through: DCERPC has a port mapper daemon that chooses a (seemingly) random port for future traffic to go to. We expect this traffic (with a reasonable timeout), but sometimes the port mapper will tell the client to continue using the same port. This allows us to extend the expectation accordingly. Signed-off-by: Kelvie Wong Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_conntrack_netlink.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) (limited to 'net') diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index 462ec2dbe561..6f4b00a8fc73 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@ -2080,7 +2080,15 @@ static int ctnetlink_change_expect(struct nf_conntrack_expect *x, const struct nlattr * const cda[]) { - return -EOPNOTSUPP; + if (cda[CTA_EXPECT_TIMEOUT]) { + if (!del_timer(&x->timeout)) + return -ETIME; + + x->timeout.expires = jiffies + + ntohl(nla_get_be32(cda[CTA_EXPECT_TIMEOUT])) * HZ; + add_timer(&x->timeout); + } + return 0; } static const struct nla_policy exp_nat_nla_policy[CTA_EXPECT_NAT_MAX+1] = { -- cgit v1.2.2 From 6714cf5465d2803a21c6a46c1ea747795a8889fa Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Thu, 3 May 2012 02:17:45 +0000 Subject: netfilter: nf_conntrack: fix explicit helper attachment and NAT Explicit helper attachment via the CT target is broken with NAT if non-standard ports are used. This problem was hidden behind the automatic helper assignment routine. Thus, it becomes more noticeable now that we can disable the automatic helper assignment with Eric Leblond's: 9e8ac5a netfilter: nf_ct_helper: allow to disable automatic helper assignment Basically, nf_conntrack_alter_reply asks for looking up the helper up if NAT is enabled. Unfortunately, we don't have the conntrack template at that point anymore. Since we don't want to rely on the automatic helper assignment, we can skip the second look-up and stick to the helper that was attached by iptables. With the CT target, the user is in full control of helper attachment, thus, the policy is to trust what the user explicitly configures via iptables (no automatic magic anymore). Interestingly, this bug was hidden by the automatic helper look-up code. But it can be easily trigger if you attach the helper in a non-standard port, eg. iptables -I PREROUTING -t raw -p tcp --dport 8888 \ -j CT --helper ftp And you disabled the automatic helper assignment. I added the IPS_HELPER_BIT that allows us to differenciate between a helper that has been explicitly attached and those that have been automatically assigned. I didn't come up with a better solution (having backward compatibility in mind). Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_conntrack_helper.c | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) (limited to 'net') diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c index 317f6e43db87..4fa2ff961f5a 100644 --- a/net/netfilter/nf_conntrack_helper.c +++ b/net/netfilter/nf_conntrack_helper.c @@ -182,10 +182,21 @@ int __nf_ct_try_assign_helper(struct nf_conn *ct, struct nf_conn *tmpl, struct net *net = nf_ct_net(ct); int ret = 0; + /* We already got a helper explicitly attached. The function + * nf_conntrack_alter_reply - in case NAT is in use - asks for looking + * the helper up again. Since now the user is in full control of + * making consistent helper configurations, skip this automatic + * re-lookup, otherwise we'll lose the helper. + */ + if (test_bit(IPS_HELPER_BIT, &ct->status)) + return 0; + if (tmpl != NULL) { help = nfct_help(tmpl); - if (help != NULL) + if (help != NULL) { helper = help->helper; + set_bit(IPS_HELPER_BIT, &ct->status); + } } help = nfct_help(ct); -- cgit v1.2.2 From d16cf20e2f2f13411eece7f7fb72c17d141c4a84 Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Tue, 8 May 2012 19:45:28 +0200 Subject: netfilter: remove ip_queue support This patch removes ip_queue support which was marked as obsolete years ago. The nfnetlink_queue modules provides more advanced user-space packet queueing mechanism. This patch also removes capability code included in SELinux that refers to ip_queue. Otherwise, we break compilation. Several warning has been sent regarding this to the mailing list in the past month without anyone rising the hand to stop this with some strong argument. Signed-off-by: Pablo Neira Ayuso --- net/ipv4/netfilter/Makefile | 3 - net/ipv4/netfilter/ip_queue.c | 639 ---------------------------------------- net/ipv6/netfilter/Kconfig | 22 -- net/ipv6/netfilter/Makefile | 1 - net/ipv6/netfilter/ip6_queue.c | 641 ----------------------------------------- 5 files changed, 1306 deletions(-) delete mode 100644 net/ipv4/netfilter/ip_queue.c delete mode 100644 net/ipv6/netfilter/ip6_queue.c (limited to 'net') diff --git a/net/ipv4/netfilter/Makefile b/net/ipv4/netfilter/Makefile index 240b68469a7a..c20674dc9452 100644 --- a/net/ipv4/netfilter/Makefile +++ b/net/ipv4/netfilter/Makefile @@ -66,6 +66,3 @@ obj-$(CONFIG_IP_NF_ARP_MANGLE) += arpt_mangle.o # just filtering instance of ARP tables for now obj-$(CONFIG_IP_NF_ARPFILTER) += arptable_filter.o - -obj-$(CONFIG_IP_NF_QUEUE) += ip_queue.o - diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c deleted file mode 100644 index 09775a1e1348..000000000000 --- a/net/ipv4/netfilter/ip_queue.c +++ /dev/null @@ -1,639 +0,0 @@ -/* - * This is a module which is used for queueing IPv4 packets and - * communicating with userspace via netlink. - * - * (C) 2000-2002 James Morris - * (C) 2003-2005 Netfilter Core Team - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#define IPQ_QMAX_DEFAULT 1024 -#define IPQ_PROC_FS_NAME "ip_queue" -#define NET_IPQ_QMAX 2088 -#define NET_IPQ_QMAX_NAME "ip_queue_maxlen" - -typedef int (*ipq_cmpfn)(struct nf_queue_entry *, unsigned long); - -static unsigned char copy_mode __read_mostly = IPQ_COPY_NONE; -static unsigned int queue_maxlen __read_mostly = IPQ_QMAX_DEFAULT; -static DEFINE_SPINLOCK(queue_lock); -static int peer_pid __read_mostly; -static unsigned int copy_range __read_mostly; -static unsigned int queue_total; -static unsigned int queue_dropped = 0; -static unsigned int queue_user_dropped = 0; -static struct sock *ipqnl __read_mostly; -static LIST_HEAD(queue_list); -static DEFINE_MUTEX(ipqnl_mutex); - -static inline void -__ipq_enqueue_entry(struct nf_queue_entry *entry) -{ - list_add_tail(&entry->list, &queue_list); - queue_total++; -} - -static inline int -__ipq_set_mode(unsigned char mode, unsigned int range) -{ - int status = 0; - - switch(mode) { - case IPQ_COPY_NONE: - case IPQ_COPY_META: - copy_mode = mode; - copy_range = 0; - break; - - case IPQ_COPY_PACKET: - if (range > 0xFFFF) - range = 0xFFFF; - copy_range = range; - copy_mode = mode; - break; - - default: - status = -EINVAL; - - } - return status; -} - -static void __ipq_flush(ipq_cmpfn cmpfn, unsigned long data); - -static inline void -__ipq_reset(void) -{ - peer_pid = 0; - net_disable_timestamp(); - __ipq_set_mode(IPQ_COPY_NONE, 0); - __ipq_flush(NULL, 0); -} - -static struct nf_queue_entry * -ipq_find_dequeue_entry(unsigned long id) -{ - struct nf_queue_entry *entry = NULL, *i; - - spin_lock_bh(&queue_lock); - - list_for_each_entry(i, &queue_list, list) { - if ((unsigned long)i == id) { - entry = i; - break; - } - } - - if (entry) { - list_del(&entry->list); - queue_total--; - } - - spin_unlock_bh(&queue_lock); - return entry; -} - -static void -__ipq_flush(ipq_cmpfn cmpfn, unsigned long data) -{ - struct nf_queue_entry *entry, *next; - - list_for_each_entry_safe(entry, next, &queue_list, list) { - if (!cmpfn || cmpfn(entry, data)) { - list_del(&entry->list); - queue_total--; - nf_reinject(entry, NF_DROP); - } - } -} - -static void -ipq_flush(ipq_cmpfn cmpfn, unsigned long data) -{ - spin_lock_bh(&queue_lock); - __ipq_flush(cmpfn, data); - spin_unlock_bh(&queue_lock); -} - -static struct sk_buff * -ipq_build_packet_message(struct nf_queue_entry *entry, int *errp) -{ - sk_buff_data_t old_tail; - size_t size = 0; - size_t data_len = 0; - struct sk_buff *skb; - struct ipq_packet_msg *pmsg; - struct nlmsghdr *nlh; - struct timeval tv; - - switch (ACCESS_ONCE(copy_mode)) { - case IPQ_COPY_META: - case IPQ_COPY_NONE: - size = NLMSG_SPACE(sizeof(*pmsg)); - break; - - case IPQ_COPY_PACKET: - if (entry->skb->ip_summed == CHECKSUM_PARTIAL && - (*errp = skb_checksum_help(entry->skb))) - return NULL; - - data_len = ACCESS_ONCE(copy_range); - if (data_len == 0 || data_len > entry->skb->len) - data_len = entry->skb->len; - - size = NLMSG_SPACE(sizeof(*pmsg) + data_len); - break; - - default: - *errp = -EINVAL; - return NULL; - } - - skb = alloc_skb(size, GFP_ATOMIC); - if (!skb) - goto nlmsg_failure; - - old_tail = skb->tail; - nlh = NLMSG_PUT(skb, 0, 0, IPQM_PACKET, size - sizeof(*nlh)); - pmsg = NLMSG_DATA(nlh); - memset(pmsg, 0, sizeof(*pmsg)); - - pmsg->packet_id = (unsigned long )entry; - pmsg->data_len = data_len; - tv = ktime_to_timeval(entry->skb->tstamp); - pmsg->timestamp_sec = tv.tv_sec; - pmsg->timestamp_usec = tv.tv_usec; - pmsg->mark = entry->skb->mark; - pmsg->hook = entry->hook; - pmsg->hw_protocol = entry->skb->protocol; - - if (entry->indev) - strcpy(pmsg->indev_name, entry->indev->name); - else - pmsg->indev_name[0] = '\0'; - - if (entry->outdev) - strcpy(pmsg->outdev_name, entry->outdev->name); - else - pmsg->outdev_name[0] = '\0'; - - if (entry->indev && entry->skb->dev && - entry->skb->mac_header != entry->skb->network_header) { - pmsg->hw_type = entry->skb->dev->type; - pmsg->hw_addrlen = dev_parse_header(entry->skb, - pmsg->hw_addr); - } - - if (data_len) - if (skb_copy_bits(entry->skb, 0, pmsg->payload, data_len)) - BUG(); - - nlh->nlmsg_len = skb->tail - old_tail; - return skb; - -nlmsg_failure: - kfree_skb(skb); - *errp = -EINVAL; - printk(KERN_ERR "ip_queue: error creating packet message\n"); - return NULL; -} - -static int -ipq_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum) -{ - int status = -EINVAL; - struct sk_buff *nskb; - - if (copy_mode == IPQ_COPY_NONE) - return -EAGAIN; - - nskb = ipq_build_packet_message(entry, &status); - if (nskb == NULL) - return status; - - spin_lock_bh(&queue_lock); - - if (!peer_pid) - goto err_out_free_nskb; - - if (queue_total >= queue_maxlen) { - queue_dropped++; - status = -ENOSPC; - if (net_ratelimit()) - printk (KERN_WARNING "ip_queue: full at %d entries, " - "dropping packets(s). Dropped: %d\n", queue_total, - queue_dropped); - goto err_out_free_nskb; - } - - /* netlink_unicast will either free the nskb or attach it to a socket */ - status = netlink_unicast(ipqnl, nskb, peer_pid, MSG_DONTWAIT); - if (status < 0) { - queue_user_dropped++; - goto err_out_unlock; - } - - __ipq_enqueue_entry(entry); - - spin_unlock_bh(&queue_lock); - return status; - -err_out_free_nskb: - kfree_skb(nskb); - -err_out_unlock: - spin_unlock_bh(&queue_lock); - return status; -} - -static int -ipq_mangle_ipv4(ipq_verdict_msg_t *v, struct nf_queue_entry *e) -{ - int diff; - struct iphdr *user_iph = (struct iphdr *)v->payload; - struct sk_buff *nskb; - - if (v->data_len < sizeof(*user_iph)) - return 0; - diff = v->data_len - e->skb->len; - if (diff < 0) { - if (pskb_trim(e->skb, v->data_len)) - return -ENOMEM; - } else if (diff > 0) { - if (v->data_len > 0xFFFF) - return -EINVAL; - if (diff > skb_tailroom(e->skb)) { - nskb = skb_copy_expand(e->skb, skb_headroom(e->skb), - diff, GFP_ATOMIC); - if (!nskb) { - printk(KERN_WARNING "ip_queue: error " - "in mangle, dropping packet\n"); - return -ENOMEM; - } - kfree_skb(e->skb); - e->skb = nskb; - } - skb_put(e->skb, diff); - } - if (!skb_make_writable(e->skb, v->data_len)) - return -ENOMEM; - skb_copy_to_linear_data(e->skb, v->payload, v->data_len); - e->skb->ip_summed = CHECKSUM_NONE; - - return 0; -} - -static int -ipq_set_verdict(struct ipq_verdict_msg *vmsg, unsigned int len) -{ - struct nf_queue_entry *entry; - - if (vmsg->value > NF_MAX_VERDICT || vmsg->value == NF_STOLEN) - return -EINVAL; - - entry = ipq_find_dequeue_entry(vmsg->id); - if (entry == NULL) - return -ENOENT; - else { - int verdict = vmsg->value; - - if (vmsg->data_len && vmsg->data_len == len) - if (ipq_mangle_ipv4(vmsg, entry) < 0) - verdict = NF_DROP; - - nf_reinject(entry, verdict); - return 0; - } -} - -static int -ipq_set_mode(unsigned char mode, unsigned int range) -{ - int status; - - spin_lock_bh(&queue_lock); - status = __ipq_set_mode(mode, range); - spin_unlock_bh(&queue_lock); - return status; -} - -static int -ipq_receive_peer(struct ipq_peer_msg *pmsg, - unsigned char type, unsigned int len) -{ - int status = 0; - - if (len < sizeof(*pmsg)) - return -EINVAL; - - switch (type) { - case IPQM_MODE: - status = ipq_set_mode(pmsg->msg.mode.value, - pmsg->msg.mode.range); - break; - - case IPQM_VERDICT: - status = ipq_set_verdict(&pmsg->msg.verdict, - len - sizeof(*pmsg)); - break; - default: - status = -EINVAL; - } - return status; -} - -static int -dev_cmp(struct nf_queue_entry *entry, unsigned long ifindex) -{ - if (entry->indev) - if (entry->indev->ifindex == ifindex) - return 1; - if (entry->outdev) - if (entry->outdev->ifindex == ifindex) - return 1; -#ifdef CONFIG_BRIDGE_NETFILTER - if (entry->skb->nf_bridge) { - if (entry->skb->nf_bridge->physindev && - entry->skb->nf_bridge->physindev->ifindex == ifindex) - return 1; - if (entry->skb->nf_bridge->physoutdev && - entry->skb->nf_bridge->physoutdev->ifindex == ifindex) - return 1; - } -#endif - return 0; -} - -static void -ipq_dev_drop(int ifindex) -{ - ipq_flush(dev_cmp, ifindex); -} - -#define RCV_SKB_FAIL(err) do { netlink_ack(skb, nlh, (err)); return; } while (0) - -static inline void -__ipq_rcv_skb(struct sk_buff *skb) -{ - int status, type, pid, flags; - unsigned int nlmsglen, skblen; - struct nlmsghdr *nlh; - bool enable_timestamp = false; - - skblen = skb->len; - if (skblen < sizeof(*nlh)) - return; - - nlh = nlmsg_hdr(skb); - nlmsglen = nlh->nlmsg_len; - if (nlmsglen < sizeof(*nlh) || skblen < nlmsglen) - return; - - pid = nlh->nlmsg_pid; - flags = nlh->nlmsg_flags; - - if(pid <= 0 || !(flags & NLM_F_REQUEST) || flags & NLM_F_MULTI) - RCV_SKB_FAIL(-EINVAL); - - if (flags & MSG_TRUNC) - RCV_SKB_FAIL(-ECOMM); - - type = nlh->nlmsg_type; - if (type < NLMSG_NOOP || type >= IPQM_MAX) - RCV_SKB_FAIL(-EINVAL); - - if (type <= IPQM_BASE) - return; - - if (!capable(CAP_NET_ADMIN)) - RCV_SKB_FAIL(-EPERM); - - spin_lock_bh(&queue_lock); - - if (peer_pid) { - if (peer_pid != pid) { - spin_unlock_bh(&queue_lock); - RCV_SKB_FAIL(-EBUSY); - } - } else { - enable_timestamp = true; - peer_pid = pid; - } - - spin_unlock_bh(&queue_lock); - if (enable_timestamp) - net_enable_timestamp(); - status = ipq_receive_peer(NLMSG_DATA(nlh), type, - nlmsglen - NLMSG_LENGTH(0)); - if (status < 0) - RCV_SKB_FAIL(status); - - if (flags & NLM_F_ACK) - netlink_ack(skb, nlh, 0); -} - -static void -ipq_rcv_skb(struct sk_buff *skb) -{ - mutex_lock(&ipqnl_mutex); - __ipq_rcv_skb(skb); - mutex_unlock(&ipqnl_mutex); -} - -static int -ipq_rcv_dev_event(struct notifier_block *this, - unsigned long event, void *ptr) -{ - struct net_device *dev = ptr; - - if (!net_eq(dev_net(dev), &init_net)) - return NOTIFY_DONE; - - /* Drop any packets associated with the downed device */ - if (event == NETDEV_DOWN) - ipq_dev_drop(dev->ifindex); - return NOTIFY_DONE; -} - -static struct notifier_block ipq_dev_notifier = { - .notifier_call = ipq_rcv_dev_event, -}; - -static int -ipq_rcv_nl_event(struct notifier_block *this, - unsigned long event, void *ptr) -{ - struct netlink_notify *n = ptr; - - if (event == NETLINK_URELEASE && n->protocol == NETLINK_FIREWALL) { - spin_lock_bh(&queue_lock); - if ((net_eq(n->net, &init_net)) && (n->pid == peer_pid)) - __ipq_reset(); - spin_unlock_bh(&queue_lock); - } - return NOTIFY_DONE; -} - -static struct notifier_block ipq_nl_notifier = { - .notifier_call = ipq_rcv_nl_event, -}; - -#ifdef CONFIG_SYSCTL -static struct ctl_table_header *ipq_sysctl_header; - -static ctl_table ipq_table[] = { - { - .procname = NET_IPQ_QMAX_NAME, - .data = &queue_maxlen, - .maxlen = sizeof(queue_maxlen), - .mode = 0644, - .proc_handler = proc_dointvec - }, - { } -}; -#endif - -#ifdef CONFIG_PROC_FS -static int ip_queue_show(struct seq_file *m, void *v) -{ - spin_lock_bh(&queue_lock); - - seq_printf(m, - "Peer PID : %d\n" - "Copy mode : %hu\n" - "Copy range : %u\n" - "Queue length : %u\n" - "Queue max. length : %u\n" - "Queue dropped : %u\n" - "Netlink dropped : %u\n", - peer_pid, - copy_mode, - copy_range, - queue_total, - queue_maxlen, - queue_dropped, - queue_user_dropped); - - spin_unlock_bh(&queue_lock); - return 0; -} - -static int ip_queue_open(struct inode *inode, struct file *file) -{ - return single_open(file, ip_queue_show, NULL); -} - -static const struct file_operations ip_queue_proc_fops = { - .open = ip_queue_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, - .owner = THIS_MODULE, -}; -#endif - -static const struct nf_queue_handler nfqh = { - .name = "ip_queue", - .outfn = &ipq_enqueue_packet, -}; - -static int __init ip_queue_init(void) -{ - int status = -ENOMEM; - struct proc_dir_entry *proc __maybe_unused; - - netlink_register_notifier(&ipq_nl_notifier); - ipqnl = netlink_kernel_create(&init_net, NETLINK_FIREWALL, 0, - ipq_rcv_skb, NULL, THIS_MODULE); - if (ipqnl == NULL) { - printk(KERN_ERR "ip_queue: failed to create netlink socket\n"); - goto cleanup_netlink_notifier; - } - -#ifdef CONFIG_PROC_FS - proc = proc_create(IPQ_PROC_FS_NAME, 0, init_net.proc_net, - &ip_queue_proc_fops); - if (!proc) { - printk(KERN_ERR "ip_queue: failed to create proc entry\n"); - goto cleanup_ipqnl; - } -#endif - register_netdevice_notifier(&ipq_dev_notifier); -#ifdef CONFIG_SYSCTL - ipq_sysctl_header = register_net_sysctl(&init_net, "net/ipv4", ipq_table); -#endif - status = nf_register_queue_handler(NFPROTO_IPV4, &nfqh); - if (status < 0) { - printk(KERN_ERR "ip_queue: failed to register queue handler\n"); - goto cleanup_sysctl; - } - return status; - -cleanup_sysctl: -#ifdef CONFIG_SYSCTL - unregister_net_sysctl_table(ipq_sysctl_header); -#endif - unregister_netdevice_notifier(&ipq_dev_notifier); - proc_net_remove(&init_net, IPQ_PROC_FS_NAME); -cleanup_ipqnl: __maybe_unused - netlink_kernel_release(ipqnl); - mutex_lock(&ipqnl_mutex); - mutex_unlock(&ipqnl_mutex); - -cleanup_netlink_notifier: - netlink_unregister_notifier(&ipq_nl_notifier); - return status; -} - -static void __exit ip_queue_fini(void) -{ - nf_unregister_queue_handlers(&nfqh); - - ipq_flush(NULL, 0); - -#ifdef CONFIG_SYSCTL - unregister_net_sysctl_table(ipq_sysctl_header); -#endif - unregister_netdevice_notifier(&ipq_dev_notifier); - proc_net_remove(&init_net, IPQ_PROC_FS_NAME); - - netlink_kernel_release(ipqnl); - mutex_lock(&ipqnl_mutex); - mutex_unlock(&ipqnl_mutex); - - netlink_unregister_notifier(&ipq_nl_notifier); -} - -MODULE_DESCRIPTION("IPv4 packet queue handler"); -MODULE_AUTHOR("James Morris "); -MODULE_LICENSE("GPL"); -MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_FIREWALL); - -module_init(ip_queue_init); -module_exit(ip_queue_fini); diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig index d33cddd16fbb..10135342799e 100644 --- a/net/ipv6/netfilter/Kconfig +++ b/net/ipv6/netfilter/Kconfig @@ -25,28 +25,6 @@ config NF_CONNTRACK_IPV6 To compile it as a module, choose M here. If unsure, say N. -config IP6_NF_QUEUE - tristate "IP6 Userspace queueing via NETLINK (OBSOLETE)" - depends on INET && IPV6 && NETFILTER - depends on NETFILTER_ADVANCED - ---help--- - - This option adds a queue handler to the kernel for IPv6 - packets which enables users to receive the filtered packets - with QUEUE target using libipq. - - This option enables the old IPv6-only "ip6_queue" implementation - which has been obsoleted by the new "nfnetlink_queue" code (see - CONFIG_NETFILTER_NETLINK_QUEUE). - - (C) Fernando Anton 2001 - IPv64 Project - Work based in IPv64 draft by Arturo Azcorra. - Universidad Carlos III de Madrid - Universidad Politecnica de Alcala de Henares - email: . - - To compile it as a module, choose M here. If unsure, say N. - config IP6_NF_IPTABLES tristate "IP6 tables support (required for filtering)" depends on INET && IPV6 diff --git a/net/ipv6/netfilter/Makefile b/net/ipv6/netfilter/Makefile index d4dfd0a21097..534d3f216f7b 100644 --- a/net/ipv6/netfilter/Makefile +++ b/net/ipv6/netfilter/Makefile @@ -6,7 +6,6 @@ obj-$(CONFIG_IP6_NF_IPTABLES) += ip6_tables.o obj-$(CONFIG_IP6_NF_FILTER) += ip6table_filter.o obj-$(CONFIG_IP6_NF_MANGLE) += ip6table_mangle.o -obj-$(CONFIG_IP6_NF_QUEUE) += ip6_queue.o obj-$(CONFIG_IP6_NF_RAW) += ip6table_raw.o obj-$(CONFIG_IP6_NF_SECURITY) += ip6table_security.o diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c deleted file mode 100644 index 3ca9303b3a19..000000000000 --- a/net/ipv6/netfilter/ip6_queue.c +++ /dev/null @@ -1,641 +0,0 @@ -/* - * This is a module which is used for queueing IPv6 packets and - * communicating with userspace via netlink. - * - * (C) 2001 Fernando Anton, this code is GPL. - * IPv64 Project - Work based in IPv64 draft by Arturo Azcorra. - * Universidad Carlos III de Madrid - Leganes (Madrid) - Spain - * Universidad Politecnica de Alcala de Henares - Alcala de H. (Madrid) - Spain - * email: fanton@it.uc3m.es - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#define IPQ_QMAX_DEFAULT 1024 -#define IPQ_PROC_FS_NAME "ip6_queue" -#define NET_IPQ_QMAX_NAME "ip6_queue_maxlen" - -typedef int (*ipq_cmpfn)(struct nf_queue_entry *, unsigned long); - -static unsigned char copy_mode __read_mostly = IPQ_COPY_NONE; -static unsigned int queue_maxlen __read_mostly = IPQ_QMAX_DEFAULT; -static DEFINE_SPINLOCK(queue_lock); -static int peer_pid __read_mostly; -static unsigned int copy_range __read_mostly; -static unsigned int queue_total; -static unsigned int queue_dropped = 0; -static unsigned int queue_user_dropped = 0; -static struct sock *ipqnl __read_mostly; -static LIST_HEAD(queue_list); -static DEFINE_MUTEX(ipqnl_mutex); - -static inline void -__ipq_enqueue_entry(struct nf_queue_entry *entry) -{ - list_add_tail(&entry->list, &queue_list); - queue_total++; -} - -static inline int -__ipq_set_mode(unsigned char mode, unsigned int range) -{ - int status = 0; - - switch(mode) { - case IPQ_COPY_NONE: - case IPQ_COPY_META: - copy_mode = mode; - copy_range = 0; - break; - - case IPQ_COPY_PACKET: - if (range > 0xFFFF) - range = 0xFFFF; - copy_range = range; - copy_mode = mode; - break; - - default: - status = -EINVAL; - - } - return status; -} - -static void __ipq_flush(ipq_cmpfn cmpfn, unsigned long data); - -static inline void -__ipq_reset(void) -{ - peer_pid = 0; - net_disable_timestamp(); - __ipq_set_mode(IPQ_COPY_NONE, 0); - __ipq_flush(NULL, 0); -} - -static struct nf_queue_entry * -ipq_find_dequeue_entry(unsigned long id) -{ - struct nf_queue_entry *entry = NULL, *i; - - spin_lock_bh(&queue_lock); - - list_for_each_entry(i, &queue_list, list) { - if ((unsigned long)i == id) { - entry = i; - break; - } - } - - if (entry) { - list_del(&entry->list); - queue_total--; - } - - spin_unlock_bh(&queue_lock); - return entry; -} - -static void -__ipq_flush(ipq_cmpfn cmpfn, unsigned long data) -{ - struct nf_queue_entry *entry, *next; - - list_for_each_entry_safe(entry, next, &queue_list, list) { - if (!cmpfn || cmpfn(entry, data)) { - list_del(&entry->list); - queue_total--; - nf_reinject(entry, NF_DROP); - } - } -} - -static void -ipq_flush(ipq_cmpfn cmpfn, unsigned long data) -{ - spin_lock_bh(&queue_lock); - __ipq_flush(cmpfn, data); - spin_unlock_bh(&queue_lock); -} - -static struct sk_buff * -ipq_build_packet_message(struct nf_queue_entry *entry, int *errp) -{ - sk_buff_data_t old_tail; - size_t size = 0; - size_t data_len = 0; - struct sk_buff *skb; - struct ipq_packet_msg *pmsg; - struct nlmsghdr *nlh; - struct timeval tv; - - switch (ACCESS_ONCE(copy_mode)) { - case IPQ_COPY_META: - case IPQ_COPY_NONE: - size = NLMSG_SPACE(sizeof(*pmsg)); - break; - - case IPQ_COPY_PACKET: - if (entry->skb->ip_summed == CHECKSUM_PARTIAL && - (*errp = skb_checksum_help(entry->skb))) - return NULL; - - data_len = ACCESS_ONCE(copy_range); - if (data_len == 0 || data_len > entry->skb->len) - data_len = entry->skb->len; - - size = NLMSG_SPACE(sizeof(*pmsg) + data_len); - break; - - default: - *errp = -EINVAL; - return NULL; - } - - skb = alloc_skb(size, GFP_ATOMIC); - if (!skb) - goto nlmsg_failure; - - old_tail = skb->tail; - nlh = NLMSG_PUT(skb, 0, 0, IPQM_PACKET, size - sizeof(*nlh)); - pmsg = NLMSG_DATA(nlh); - memset(pmsg, 0, sizeof(*pmsg)); - - pmsg->packet_id = (unsigned long )entry; - pmsg->data_len = data_len; - tv = ktime_to_timeval(entry->skb->tstamp); - pmsg->timestamp_sec = tv.tv_sec; - pmsg->timestamp_usec = tv.tv_usec; - pmsg->mark = entry->skb->mark; - pmsg->hook = entry->hook; - pmsg->hw_protocol = entry->skb->protocol; - - if (entry->indev) - strcpy(pmsg->indev_name, entry->indev->name); - else - pmsg->indev_name[0] = '\0'; - - if (entry->outdev) - strcpy(pmsg->outdev_name, entry->outdev->name); - else - pmsg->outdev_name[0] = '\0'; - - if (entry->indev && entry->skb->dev && - entry->skb->mac_header != entry->skb->network_header) { - pmsg->hw_type = entry->skb->dev->type; - pmsg->hw_addrlen = dev_parse_header(entry->skb, pmsg->hw_addr); - } - - if (data_len) - if (skb_copy_bits(entry->skb, 0, pmsg->payload, data_len)) - BUG(); - - nlh->nlmsg_len = skb->tail - old_tail; - return skb; - -nlmsg_failure: - kfree_skb(skb); - *errp = -EINVAL; - printk(KERN_ERR "ip6_queue: error creating packet message\n"); - return NULL; -} - -static int -ipq_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum) -{ - int status = -EINVAL; - struct sk_buff *nskb; - - if (copy_mode == IPQ_COPY_NONE) - return -EAGAIN; - - nskb = ipq_build_packet_message(entry, &status); - if (nskb == NULL) - return status; - - spin_lock_bh(&queue_lock); - - if (!peer_pid) - goto err_out_free_nskb; - - if (queue_total >= queue_maxlen) { - queue_dropped++; - status = -ENOSPC; - if (net_ratelimit()) - printk (KERN_WARNING "ip6_queue: fill at %d entries, " - "dropping packet(s). Dropped: %d\n", queue_total, - queue_dropped); - goto err_out_free_nskb; - } - - /* netlink_unicast will either free the nskb or attach it to a socket */ - status = netlink_unicast(ipqnl, nskb, peer_pid, MSG_DONTWAIT); - if (status < 0) { - queue_user_dropped++; - goto err_out_unlock; - } - - __ipq_enqueue_entry(entry); - - spin_unlock_bh(&queue_lock); - return status; - -err_out_free_nskb: - kfree_skb(nskb); - -err_out_unlock: - spin_unlock_bh(&queue_lock); - return status; -} - -static int -ipq_mangle_ipv6(ipq_verdict_msg_t *v, struct nf_queue_entry *e) -{ - int diff; - struct ipv6hdr *user_iph = (struct ipv6hdr *)v->payload; - struct sk_buff *nskb; - - if (v->data_len < sizeof(*user_iph)) - return 0; - diff = v->data_len - e->skb->len; - if (diff < 0) { - if (pskb_trim(e->skb, v->data_len)) - return -ENOMEM; - } else if (diff > 0) { - if (v->data_len > 0xFFFF) - return -EINVAL; - if (diff > skb_tailroom(e->skb)) { - nskb = skb_copy_expand(e->skb, skb_headroom(e->skb), - diff, GFP_ATOMIC); - if (!nskb) { - printk(KERN_WARNING "ip6_queue: OOM " - "in mangle, dropping packet\n"); - return -ENOMEM; - } - kfree_skb(e->skb); - e->skb = nskb; - } - skb_put(e->skb, diff); - } - if (!skb_make_writable(e->skb, v->data_len)) - return -ENOMEM; - skb_copy_to_linear_data(e->skb, v->payload, v->data_len); - e->skb->ip_summed = CHECKSUM_NONE; - - return 0; -} - -static int -ipq_set_verdict(struct ipq_verdict_msg *vmsg, unsigned int len) -{ - struct nf_queue_entry *entry; - - if (vmsg->value > NF_MAX_VERDICT || vmsg->value == NF_STOLEN) - return -EINVAL; - - entry = ipq_find_dequeue_entry(vmsg->id); - if (entry == NULL) - return -ENOENT; - else { - int verdict = vmsg->value; - - if (vmsg->data_len && vmsg->data_len == len) - if (ipq_mangle_ipv6(vmsg, entry) < 0) - verdict = NF_DROP; - - nf_reinject(entry, verdict); - return 0; - } -} - -static int -ipq_set_mode(unsigned char mode, unsigned int range) -{ - int status; - - spin_lock_bh(&queue_lock); - status = __ipq_set_mode(mode, range); - spin_unlock_bh(&queue_lock); - return status; -} - -static int -ipq_receive_peer(struct ipq_peer_msg *pmsg, - unsigned char type, unsigned int len) -{ - int status = 0; - - if (len < sizeof(*pmsg)) - return -EINVAL; - - switch (type) { - case IPQM_MODE: - status = ipq_set_mode(pmsg->msg.mode.value, - pmsg->msg.mode.range); - break; - - case IPQM_VERDICT: - status = ipq_set_verdict(&pmsg->msg.verdict, - len - sizeof(*pmsg)); - break; - default: - status = -EINVAL; - } - return status; -} - -static int -dev_cmp(struct nf_queue_entry *entry, unsigned long ifindex) -{ - if (entry->indev) - if (entry->indev->ifindex == ifindex) - return 1; - - if (entry->outdev) - if (entry->outdev->ifindex == ifindex) - return 1; -#ifdef CONFIG_BRIDGE_NETFILTER - if (entry->skb->nf_bridge) { - if (entry->skb->nf_bridge->physindev && - entry->skb->nf_bridge->physindev->ifindex == ifindex) - return 1; - if (entry->skb->nf_bridge->physoutdev && - entry->skb->nf_bridge->physoutdev->ifindex == ifindex) - return 1; - } -#endif - return 0; -} - -static void -ipq_dev_drop(int ifindex) -{ - ipq_flush(dev_cmp, ifindex); -} - -#define RCV_SKB_FAIL(err) do { netlink_ack(skb, nlh, (err)); return; } while (0) - -static inline void -__ipq_rcv_skb(struct sk_buff *skb) -{ - int status, type, pid, flags; - unsigned int nlmsglen, skblen; - struct nlmsghdr *nlh; - bool enable_timestamp = false; - - skblen = skb->len; - if (skblen < sizeof(*nlh)) - return; - - nlh = nlmsg_hdr(skb); - nlmsglen = nlh->nlmsg_len; - if (nlmsglen < sizeof(*nlh) || skblen < nlmsglen) - return; - - pid = nlh->nlmsg_pid; - flags = nlh->nlmsg_flags; - - if(pid <= 0 || !(flags & NLM_F_REQUEST) || flags & NLM_F_MULTI) - RCV_SKB_FAIL(-EINVAL); - - if (flags & MSG_TRUNC) - RCV_SKB_FAIL(-ECOMM); - - type = nlh->nlmsg_type; - if (type < NLMSG_NOOP || type >= IPQM_MAX) - RCV_SKB_FAIL(-EINVAL); - - if (type <= IPQM_BASE) - return; - - if (!capable(CAP_NET_ADMIN)) - RCV_SKB_FAIL(-EPERM); - - spin_lock_bh(&queue_lock); - - if (peer_pid) { - if (peer_pid != pid) { - spin_unlock_bh(&queue_lock); - RCV_SKB_FAIL(-EBUSY); - } - } else { - enable_timestamp = true; - peer_pid = pid; - } - - spin_unlock_bh(&queue_lock); - if (enable_timestamp) - net_enable_timestamp(); - - status = ipq_receive_peer(NLMSG_DATA(nlh), type, - nlmsglen - NLMSG_LENGTH(0)); - if (status < 0) - RCV_SKB_FAIL(status); - - if (flags & NLM_F_ACK) - netlink_ack(skb, nlh, 0); -} - -static void -ipq_rcv_skb(struct sk_buff *skb) -{ - mutex_lock(&ipqnl_mutex); - __ipq_rcv_skb(skb); - mutex_unlock(&ipqnl_mutex); -} - -static int -ipq_rcv_dev_event(struct notifier_block *this, - unsigned long event, void *ptr) -{ - struct net_device *dev = ptr; - - if (!net_eq(dev_net(dev), &init_net)) - return NOTIFY_DONE; - - /* Drop any packets associated with the downed device */ - if (event == NETDEV_DOWN) - ipq_dev_drop(dev->ifindex); - return NOTIFY_DONE; -} - -static struct notifier_block ipq_dev_notifier = { - .notifier_call = ipq_rcv_dev_event, -}; - -static int -ipq_rcv_nl_event(struct notifier_block *this, - unsigned long event, void *ptr) -{ - struct netlink_notify *n = ptr; - - if (event == NETLINK_URELEASE && n->protocol == NETLINK_IP6_FW) { - spin_lock_bh(&queue_lock); - if ((net_eq(n->net, &init_net)) && (n->pid == peer_pid)) - __ipq_reset(); - spin_unlock_bh(&queue_lock); - } - return NOTIFY_DONE; -} - -static struct notifier_block ipq_nl_notifier = { - .notifier_call = ipq_rcv_nl_event, -}; - -#ifdef CONFIG_SYSCTL -static struct ctl_table_header *ipq_sysctl_header; - -static ctl_table ipq_table[] = { - { - .procname = NET_IPQ_QMAX_NAME, - .data = &queue_maxlen, - .maxlen = sizeof(queue_maxlen), - .mode = 0644, - .proc_handler = proc_dointvec - }, - { } -}; -#endif - -#ifdef CONFIG_PROC_FS -static int ip6_queue_show(struct seq_file *m, void *v) -{ - spin_lock_bh(&queue_lock); - - seq_printf(m, - "Peer PID : %d\n" - "Copy mode : %hu\n" - "Copy range : %u\n" - "Queue length : %u\n" - "Queue max. length : %u\n" - "Queue dropped : %u\n" - "Netfilter dropped : %u\n", - peer_pid, - copy_mode, - copy_range, - queue_total, - queue_maxlen, - queue_dropped, - queue_user_dropped); - - spin_unlock_bh(&queue_lock); - return 0; -} - -static int ip6_queue_open(struct inode *inode, struct file *file) -{ - return single_open(file, ip6_queue_show, NULL); -} - -static const struct file_operations ip6_queue_proc_fops = { - .open = ip6_queue_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, - .owner = THIS_MODULE, -}; -#endif - -static const struct nf_queue_handler nfqh = { - .name = "ip6_queue", - .outfn = &ipq_enqueue_packet, -}; - -static int __init ip6_queue_init(void) -{ - int status = -ENOMEM; - struct proc_dir_entry *proc __maybe_unused; - - netlink_register_notifier(&ipq_nl_notifier); - ipqnl = netlink_kernel_create(&init_net, NETLINK_IP6_FW, 0, - ipq_rcv_skb, NULL, THIS_MODULE); - if (ipqnl == NULL) { - printk(KERN_ERR "ip6_queue: failed to create netlink socket\n"); - goto cleanup_netlink_notifier; - } - -#ifdef CONFIG_PROC_FS - proc = proc_create(IPQ_PROC_FS_NAME, 0, init_net.proc_net, - &ip6_queue_proc_fops); - if (!proc) { - printk(KERN_ERR "ip6_queue: failed to create proc entry\n"); - goto cleanup_ipqnl; - } -#endif - register_netdevice_notifier(&ipq_dev_notifier); -#ifdef CONFIG_SYSCTL - ipq_sysctl_header = register_net_sysctl(&init_net, "net/ipv6", ipq_table); -#endif - status = nf_register_queue_handler(NFPROTO_IPV6, &nfqh); - if (status < 0) { - printk(KERN_ERR "ip6_queue: failed to register queue handler\n"); - goto cleanup_sysctl; - } - return status; - -cleanup_sysctl: -#ifdef CONFIG_SYSCTL - unregister_net_sysctl_table(ipq_sysctl_header); -#endif - unregister_netdevice_notifier(&ipq_dev_notifier); - proc_net_remove(&init_net, IPQ_PROC_FS_NAME); - -cleanup_ipqnl: __maybe_unused - netlink_kernel_release(ipqnl); - mutex_lock(&ipqnl_mutex); - mutex_unlock(&ipqnl_mutex); - -cleanup_netlink_notifier: - netlink_unregister_notifier(&ipq_nl_notifier); - return status; -} - -static void __exit ip6_queue_fini(void) -{ - nf_unregister_queue_handlers(&nfqh); - - ipq_flush(NULL, 0); - -#ifdef CONFIG_SYSCTL - unregister_net_sysctl_table(ipq_sysctl_header); -#endif - unregister_netdevice_notifier(&ipq_dev_notifier); - proc_net_remove(&init_net, IPQ_PROC_FS_NAME); - - netlink_kernel_release(ipqnl); - mutex_lock(&ipqnl_mutex); - mutex_unlock(&ipqnl_mutex); - - netlink_unregister_notifier(&ipq_nl_notifier); -} - -MODULE_DESCRIPTION("IPv6 packet queue handler"); -MODULE_LICENSE("GPL"); -MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_IP6_FW); - -module_init(ip6_queue_init); -module_exit(ip6_queue_fini); -- cgit v1.2.2 From 91a0099c190107821292d3114c396ac858ad723e Mon Sep 17 00:00:00 2001 From: Rajkumar Manoharan Date: Wed, 25 Apr 2012 20:24:24 +0530 Subject: mac80211: fix rate control update on 2040 bss change The rate control updation never be called on 2040 BSS change. The station should update its rate control on receiving beacon with different HT mode in the HT operation IE. Not doing so, leads to sending frames with higher(ht40) rates whereas AP is operating in lower mode (ht20). Signed-off-by: Rajkumar Manoharan Signed-off-by: John W. Linville --- net/mac80211/mlme.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 03f93f958fa4..dbd4bd92c018 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -210,7 +210,7 @@ static u32 ieee80211_config_ht_tx(struct ieee80211_sub_if_data *sdata, disable_40 = true; if (sta && (!reconfig || - (disable_40 != !!(sta->sta.ht_cap.cap & + (disable_40 != !(sta->sta.ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)))) { if (disable_40) -- cgit v1.2.2 From e87278e730b11e9852fe0fe967908ef5a4e6e6a0 Mon Sep 17 00:00:00 2001 From: Thomas Pedersen Date: Thu, 26 Apr 2012 15:01:06 -0700 Subject: mac80211: insert mesh peer after init Drivers need the station rate info when inserting a new sta_info. The patch "mac80211: refactor mesh peer initialization" wrongly assumed the rate info could be applied after insertion. After further review, this is clearly not the case. This fixes a regression where HT parameters were not applied before inserting the sta_info, causing performance degradation. Signed-off-by: Thomas Pedersen Signed-off-by: John W. Linville --- net/mac80211/mesh_plink.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'net') diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c index 1ff2a5c63e43..f4124d7c556c 100644 --- a/net/mac80211/mesh_plink.c +++ b/net/mac80211/mesh_plink.c @@ -102,9 +102,6 @@ static struct sta_info *mesh_plink_alloc(struct ieee80211_sub_if_data *sdata, set_sta_flag(sta, WLAN_STA_WME); - if (sta_info_insert(sta)) - return NULL; - return sta; } @@ -281,6 +278,7 @@ static struct sta_info *mesh_peer_init(struct ieee80211_sub_if_data *sdata, struct ieee80211_supported_band *sband; u32 rates, basic_rates = 0; struct sta_info *sta; + bool insert = false; sband = local->hw.wiphy->bands[band]; rates = ieee80211_sta_get_rates(local, elems, band, &basic_rates); @@ -290,6 +288,7 @@ static struct sta_info *mesh_peer_init(struct ieee80211_sub_if_data *sdata, sta = mesh_plink_alloc(sdata, addr); if (!sta) return NULL; + insert = true; } spin_lock_bh(&sta->lock); @@ -306,6 +305,9 @@ static struct sta_info *mesh_peer_init(struct ieee80211_sub_if_data *sdata, rate_control_rate_init(sta); spin_unlock_bh(&sta->lock); + if (insert && sta_info_insert(sta)) + return NULL; + return sta; } -- cgit v1.2.2 From c7d258288f2ad11eaa9656f6f9f48272a102fd1b Mon Sep 17 00:00:00 2001 From: Thomas Pedersen Date: Thu, 26 Apr 2012 15:01:07 -0700 Subject: mac80211: don't transmit 40MHz frames to 20MHz peer If a mesh peer indicates it is operating as 20MHz-only in its HT operation IE, have the rate control algorithm respect this by disabling the equivalent bit in the ieee80211_sta HT capabilities. Signed-off-by: Thomas Pedersen Signed-off-by: John W. Linville --- net/mac80211/mesh_plink.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'net') diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c index f4124d7c556c..6209327840f7 100644 --- a/net/mac80211/mesh_plink.c +++ b/net/mac80211/mesh_plink.c @@ -302,6 +302,12 @@ static struct sta_info *mesh_peer_init(struct ieee80211_sub_if_data *sdata, else memset(&sta->sta.ht_cap, 0, sizeof(sta->sta.ht_cap)); + if (elems->ht_operation) + if (!(elems->ht_operation->ht_param & + IEEE80211_HT_PARAM_CHAN_WIDTH_ANY)) + sta->sta.ht_cap.cap &= + ~IEEE80211_HT_CAP_SUP_WIDTH_20_40; + rate_control_rate_init(sta); spin_unlock_bh(&sta->lock); -- cgit v1.2.2 From d61992182e41e1beec0507fd7bce4ba1face12d6 Mon Sep 17 00:00:00 2001 From: Ben Greear Date: Mon, 23 Apr 2012 12:50:29 -0700 Subject: cfg80211: Add framework to support ethtool stats. Signed-off-by: Ben Greear Signed-off-by: John W. Linville --- net/wireless/ethtool.c | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) (limited to 'net') diff --git a/net/wireless/ethtool.c b/net/wireless/ethtool.c index 9bde4d1d3e9b..7eecdf40cf80 100644 --- a/net/wireless/ethtool.c +++ b/net/wireless/ethtool.c @@ -68,6 +68,32 @@ static int cfg80211_set_ringparam(struct net_device *dev, return -ENOTSUPP; } +static int cfg80211_get_sset_count(struct net_device *dev, int sset) +{ + struct wireless_dev *wdev = dev->ieee80211_ptr; + struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); + if (rdev->ops->get_et_sset_count) + return rdev->ops->get_et_sset_count(wdev->wiphy, dev, sset); + return -EOPNOTSUPP; +} + +static void cfg80211_get_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + struct wireless_dev *wdev = dev->ieee80211_ptr; + struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); + if (rdev->ops->get_et_stats) + rdev->ops->get_et_stats(wdev->wiphy, dev, stats, data); +} + +static void cfg80211_get_strings(struct net_device *dev, u32 sset, u8 *data) +{ + struct wireless_dev *wdev = dev->ieee80211_ptr; + struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); + if (rdev->ops->get_et_strings) + rdev->ops->get_et_strings(wdev->wiphy, dev, sset, data); +} + const struct ethtool_ops cfg80211_ethtool_ops = { .get_drvinfo = cfg80211_get_drvinfo, .get_regs_len = cfg80211_get_regs_len, @@ -75,4 +101,7 @@ const struct ethtool_ops cfg80211_ethtool_ops = { .get_link = ethtool_op_get_link, .get_ringparam = cfg80211_get_ringparam, .set_ringparam = cfg80211_set_ringparam, + .get_strings = cfg80211_get_strings, + .get_ethtool_stats = cfg80211_get_stats, + .get_sset_count = cfg80211_get_sset_count, }; -- cgit v1.2.2 From b1ab79255c539ebe740baa89f8a44ab139381e1c Mon Sep 17 00:00:00 2001 From: Ben Greear Date: Mon, 23 Apr 2012 12:50:30 -0700 Subject: mac80211: Support getting sta_info stats via ethtool. This lets ethtool print out stats related to stations connected to the interface. Does not yet get stats from the underlying driver. Signed-off-by: Ben Greear Signed-off-by: John W. Linville --- net/mac80211/cfg.c | 91 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 91 insertions(+) (limited to 'net') diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 70b2af2315a6..31023ca17575 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -450,6 +450,94 @@ static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo) sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_TDLS_PEER); } +static const char ieee80211_gstrings_sta_stats[][ETH_GSTRING_LEN] = { + "rx_packets", "rx_bytes", "wep_weak_iv_count", + "rx_duplicates", "rx_fragments", "rx_dropped", + "tx_packets", "tx_bytes", "tx_fragments", + "tx_filtered", "tx_retry_failed", "tx_retries", + "beacon_loss" +}; +#define STA_STATS_LEN ARRAY_SIZE(ieee80211_gstrings_sta_stats) + +static int ieee80211_get_et_sset_count(struct wiphy *wiphy, + struct net_device *dev, + int sset) +{ + if (sset == ETH_SS_STATS) + return STA_STATS_LEN; + + return -EOPNOTSUPP; +} + +static void ieee80211_get_et_stats(struct wiphy *wiphy, + struct net_device *dev, + struct ethtool_stats *stats, + u64 *data) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + struct sta_info *sta; + struct ieee80211_local *local = sdata->local; + int i; + + memset(data, 0, sizeof(u64) * STA_STATS_LEN); + +#define ADD_STA_STATS(sta) \ + do { \ + data[i++] += sta->rx_packets; \ + data[i++] += sta->rx_bytes; \ + data[i++] += sta->wep_weak_iv_count; \ + data[i++] += sta->num_duplicates; \ + data[i++] += sta->rx_fragments; \ + data[i++] += sta->rx_dropped; \ + \ + data[i++] += sta->tx_packets; \ + data[i++] += sta->tx_bytes; \ + data[i++] += sta->tx_fragments; \ + data[i++] += sta->tx_filtered_count; \ + data[i++] += sta->tx_retry_failed; \ + data[i++] += sta->tx_retry_count; \ + data[i++] += sta->beacon_loss_count; \ + } while (0) + + /* For Managed stations, find the single station based on BSSID + * and use that. For interface types, iterate through all available + * stations and add stats for any station that is assigned to this + * network device. + */ + + rcu_read_lock(); + + if (sdata->vif.type == NL80211_IFTYPE_STATION) { + sta = sta_info_get_bss(sdata, sdata->u.mgd.bssid); + if (sta && !WARN_ON(sta->sdata->dev != dev)) { + i = 0; + ADD_STA_STATS(sta); + BUG_ON(i != STA_STATS_LEN); + } + } else { + list_for_each_entry_rcu(sta, &local->sta_list, list) { + /* Make sure this station belongs to the proper dev */ + if (sta->sdata->dev != dev) + continue; + + i = 0; + ADD_STA_STATS(sta); + BUG_ON(i != STA_STATS_LEN); + } + } + + rcu_read_unlock(); +} + +static void ieee80211_get_et_strings(struct wiphy *wiphy, + struct net_device *dev, + u32 sset, u8 *data) +{ + if (sset == ETH_SS_STATS) { + int sz_sta_stats = sizeof(ieee80211_gstrings_sta_stats); + memcpy(data, *ieee80211_gstrings_sta_stats, sz_sta_stats); + } +} static int ieee80211_dump_station(struct wiphy *wiphy, struct net_device *dev, int idx, u8 *mac, struct station_info *sinfo) @@ -2794,4 +2882,7 @@ struct cfg80211_ops mac80211_config_ops = { #ifdef CONFIG_PM .set_wakeup = ieee80211_set_wakeup, #endif + .get_et_sset_count = ieee80211_get_et_sset_count, + .get_et_stats = ieee80211_get_et_stats, + .get_et_strings = ieee80211_get_et_strings, }; -- cgit v1.2.2 From e352114fd62f6d568ca0cb18f589cb8df710cf02 Mon Sep 17 00:00:00 2001 From: Ben Greear Date: Mon, 23 Apr 2012 12:50:31 -0700 Subject: mac80211: Framework to get wifi-driver stats via ethtool. This adds hooks to call into the driver to get additional stats for the ethtool API. Signed-off-by: Ben Greear Signed-off-by: John W. Linville --- net/mac80211/cfg.c | 19 ++++++++++++++++--- net/mac80211/driver-ops.h | 37 +++++++++++++++++++++++++++++++++++++ net/mac80211/driver-trace.h | 15 +++++++++++++++ 3 files changed, 68 insertions(+), 3 deletions(-) (limited to 'net') diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 31023ca17575..a38b26730652 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -463,10 +463,17 @@ static int ieee80211_get_et_sset_count(struct wiphy *wiphy, struct net_device *dev, int sset) { + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + int rv = 0; + if (sset == ETH_SS_STATS) - return STA_STATS_LEN; + rv += STA_STATS_LEN; - return -EOPNOTSUPP; + rv += drv_get_et_sset_count(sdata, sset); + + if (rv == 0) + return -EOPNOTSUPP; + return rv; } static void ieee80211_get_et_stats(struct wiphy *wiphy, @@ -527,16 +534,22 @@ static void ieee80211_get_et_stats(struct wiphy *wiphy, } rcu_read_unlock(); + + drv_get_et_stats(sdata, stats, &(data[STA_STATS_LEN])); } static void ieee80211_get_et_strings(struct wiphy *wiphy, struct net_device *dev, u32 sset, u8 *data) { + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + int sz_sta_stats = 0; + if (sset == ETH_SS_STATS) { - int sz_sta_stats = sizeof(ieee80211_gstrings_sta_stats); + sz_sta_stats = sizeof(ieee80211_gstrings_sta_stats); memcpy(data, *ieee80211_gstrings_sta_stats, sz_sta_stats); } + drv_get_et_strings(sdata, sset, &(data[sz_sta_stats])); } static int ieee80211_dump_station(struct wiphy *wiphy, struct net_device *dev, diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h index 4a0e559cb26b..6d33a0c743ab 100644 --- a/net/mac80211/driver-ops.h +++ b/net/mac80211/driver-ops.h @@ -35,6 +35,43 @@ static inline void drv_tx_frags(struct ieee80211_local *local, local->ops->tx_frags(&local->hw, vif, sta, skbs); } +static inline void drv_get_et_strings(struct ieee80211_sub_if_data *sdata, + u32 sset, u8 *data) +{ + struct ieee80211_local *local = sdata->local; + if (local->ops->get_et_strings) { + trace_drv_get_et_strings(local, sset); + local->ops->get_et_strings(&local->hw, &sdata->vif, sset, data); + trace_drv_return_void(local); + } +} + +static inline void drv_get_et_stats(struct ieee80211_sub_if_data *sdata, + struct ethtool_stats *stats, + u64 *data) +{ + struct ieee80211_local *local = sdata->local; + if (local->ops->get_et_stats) { + trace_drv_get_et_stats(local); + local->ops->get_et_stats(&local->hw, &sdata->vif, stats, data); + trace_drv_return_void(local); + } +} + +static inline int drv_get_et_sset_count(struct ieee80211_sub_if_data *sdata, + int sset) +{ + struct ieee80211_local *local = sdata->local; + int rv = 0; + if (local->ops->get_et_sset_count) { + trace_drv_get_et_sset_count(local, sset); + rv = local->ops->get_et_sset_count(&local->hw, &sdata->vif, + sset); + trace_drv_return_int(local, rv); + } + return rv; +} + static inline int drv_start(struct ieee80211_local *local) { int ret; diff --git a/net/mac80211/driver-trace.h b/net/mac80211/driver-trace.h index 7c0754bed61b..6de00b2c268c 100644 --- a/net/mac80211/driver-trace.h +++ b/net/mac80211/driver-trace.h @@ -161,6 +161,21 @@ DEFINE_EVENT(local_only_evt, drv_start, TP_ARGS(local) ); +DEFINE_EVENT(local_u32_evt, drv_get_et_strings, + TP_PROTO(struct ieee80211_local *local, u32 sset), + TP_ARGS(local, sset) +); + +DEFINE_EVENT(local_u32_evt, drv_get_et_sset_count, + TP_PROTO(struct ieee80211_local *local, u32 sset), + TP_ARGS(local, sset) +); + +DEFINE_EVENT(local_only_evt, drv_get_et_stats, + TP_PROTO(struct ieee80211_local *local), + TP_ARGS(local) +); + DEFINE_EVENT(local_only_evt, drv_suspend, TP_PROTO(struct ieee80211_local *local), TP_ARGS(local) -- cgit v1.2.2 From 3073a7c20cea0b7a9946fe61f09d43aa61deb9ea Mon Sep 17 00:00:00 2001 From: Ben Greear Date: Mon, 23 Apr 2012 12:50:32 -0700 Subject: mac80211: Add more ethtools stats: survey, rates, etc The signal and noise are forced to be positive since ethtool deals in unsigned 64-bit values and this number should be human readable. This gives easy access to some of the data formerly exposed in the deprecated /proc/net/wireless file. Signed-off-by: Ben Greear Signed-off-by: John W. Linville --- net/mac80211/cfg.c | 89 +++++++++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 81 insertions(+), 8 deletions(-) (limited to 'net') diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index a38b26730652..39b1fffb24f4 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -455,7 +455,9 @@ static const char ieee80211_gstrings_sta_stats[][ETH_GSTRING_LEN] = { "rx_duplicates", "rx_fragments", "rx_dropped", "tx_packets", "tx_bytes", "tx_fragments", "tx_filtered", "tx_retry_failed", "tx_retries", - "beacon_loss" + "beacon_loss", "sta_state", "txrate", "rxrate", "signal", + "channel", "noise", "ch_time", "ch_time_busy", + "ch_time_ext_busy", "ch_time_rx", "ch_time_tx" }; #define STA_STATS_LEN ARRAY_SIZE(ieee80211_gstrings_sta_stats) @@ -484,7 +486,10 @@ static void ieee80211_get_et_stats(struct wiphy *wiphy, struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct sta_info *sta; struct ieee80211_local *local = sdata->local; - int i; + struct station_info sinfo; + struct survey_info survey; + int i, q; +#define STA_STATS_SURVEY_LEN 7 memset(data, 0, sizeof(u64) * STA_STATS_LEN); @@ -516,11 +521,30 @@ static void ieee80211_get_et_stats(struct wiphy *wiphy, if (sdata->vif.type == NL80211_IFTYPE_STATION) { sta = sta_info_get_bss(sdata, sdata->u.mgd.bssid); - if (sta && !WARN_ON(sta->sdata->dev != dev)) { - i = 0; - ADD_STA_STATS(sta); - BUG_ON(i != STA_STATS_LEN); - } + + if (!(sta && !WARN_ON(sta->sdata->dev != dev))) + goto do_survey; + + i = 0; + ADD_STA_STATS(sta); + + data[i++] = sta->sta_state; + + sinfo.filled = 0; + sta_set_sinfo(sta, &sinfo); + + if (sinfo.filled | STATION_INFO_TX_BITRATE) + data[i] = 100000 * + cfg80211_calculate_bitrate(&sinfo.txrate); + i++; + if (sinfo.filled | STATION_INFO_RX_BITRATE) + data[i] = 100000 * + cfg80211_calculate_bitrate(&sinfo.rxrate); + i++; + + if (sinfo.filled | STATION_INFO_SIGNAL_AVG) + data[i] = (u8)sinfo.signal_avg; + i++; } else { list_for_each_entry_rcu(sta, &local->sta_list, list) { /* Make sure this station belongs to the proper dev */ @@ -529,12 +553,61 @@ static void ieee80211_get_et_stats(struct wiphy *wiphy, i = 0; ADD_STA_STATS(sta); - BUG_ON(i != STA_STATS_LEN); } } +do_survey: + i = STA_STATS_LEN - STA_STATS_SURVEY_LEN; + /* Get survey stats for current channel */ + q = 0; + while (true) { + survey.filled = 0; + if (drv_get_survey(local, q, &survey) != 0) { + survey.filled = 0; + break; + } + + if (survey.channel && + (local->oper_channel->center_freq == + survey.channel->center_freq)) + break; + q++; + } + + if (survey.filled) + data[i++] = survey.channel->center_freq; + else + data[i++] = 0; + if (survey.filled & SURVEY_INFO_NOISE_DBM) + data[i++] = (u8)survey.noise; + else + data[i++] = -1LL; + if (survey.filled & SURVEY_INFO_CHANNEL_TIME) + data[i++] = survey.channel_time; + else + data[i++] = -1LL; + if (survey.filled & SURVEY_INFO_CHANNEL_TIME_BUSY) + data[i++] = survey.channel_time_busy; + else + data[i++] = -1LL; + if (survey.filled & SURVEY_INFO_CHANNEL_TIME_EXT_BUSY) + data[i++] = survey.channel_time_ext_busy; + else + data[i++] = -1LL; + if (survey.filled & SURVEY_INFO_CHANNEL_TIME_RX) + data[i++] = survey.channel_time_rx; + else + data[i++] = -1LL; + if (survey.filled & SURVEY_INFO_CHANNEL_TIME_TX) + data[i++] = survey.channel_time_tx; + else + data[i++] = -1LL; + rcu_read_unlock(); + if (WARN_ON(i != STA_STATS_LEN)) + return; + drv_get_et_stats(sdata, stats, &(data[STA_STATS_LEN])); } -- cgit v1.2.2 From ef9456a85dabe2222a4cb80fe8eef6361170c55d Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Mon, 30 Apr 2012 10:23:36 +0300 Subject: cfg80211: fix BSS comparison Since the BSS table is organized in a RB tree, the BSSs need to be comparable. This means that we must define a < and > operator to the BSS object. compare_ethr_addr isn't enough since it returns only a binary value. Since Felix's cfg80211: use compare_ether_addr on MAC addresses instead of memcmp Because of the constant size and guaranteed 16 bit alignment, the inline compare_ether_addr function is much cheaper than calling memcmp. Signed-off-by: Felix Fietkau Signed-off-by: John W. Linville The BSS table is corrupted: rb_find_bss can't find the bss. As a result BSSes are duplicated in the BSS table, and we get stuck while probing an AP before associating (in STA mode). Change-Id: I85928756f4328028230832c1565ece7f412f3843 CC: Felix Fietkau Signed-off-by: Emmanuel Grumbach Acked-by: Felix Fietkau Signed-off-by: John W. Linville --- net/wireless/scan.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'net') diff --git a/net/wireless/scan.c b/net/wireless/scan.c index 1442bb68a3f3..9dee87c0358c 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c @@ -378,7 +378,11 @@ static int cmp_bss_core(struct cfg80211_bss *a, b->len_information_elements); } - return compare_ether_addr(a->bssid, b->bssid); + /* + * we can't use compare_ether_addr here since we need a < > operator. + * The binary return value of compare_ether_addr isn't enough + */ + return memcmp(a->bssid, b->bssid, sizeof(a->bssid)); } static int cmp_bss(struct cfg80211_bss *a, -- cgit v1.2.2 From 431e31542383b71bc5f2642572a1e6ef07f1bb87 Mon Sep 17 00:00:00 2001 From: Ashok Nagarajan Date: Mon, 30 Apr 2012 14:20:29 -0700 Subject: mac80211: Advertise HT protection mode in IEs Signed-off-by: Ashok Nagarajan Reviewed-by: Thomas Pedersen Signed-off-by: John W. Linville --- net/mac80211/ibss.c | 2 +- net/mac80211/ieee80211_i.h | 3 ++- net/mac80211/mesh.c | 3 ++- net/mac80211/util.c | 9 +++------ 4 files changed, 8 insertions(+), 9 deletions(-) (limited to 'net') diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c index 61cd391c32a3..bb1a3e62a66a 100644 --- a/net/mac80211/ibss.c +++ b/net/mac80211/ibss.c @@ -164,7 +164,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, pos = ieee80211_ie_build_ht_cap(pos, &sband->ht_cap, sband->ht_cap.cap); pos = ieee80211_ie_build_ht_oper(pos, &sband->ht_cap, - chan, channel_type); + chan, channel_type, 0); } if (local->hw.queues >= IEEE80211_NUM_ACS) { diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 6cd89d414f22..ae046b52d5e2 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -1497,7 +1497,8 @@ u8 *ieee80211_ie_build_ht_cap(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap, u16 cap); u8 *ieee80211_ie_build_ht_oper(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap, struct ieee80211_channel *channel, - enum nl80211_channel_type channel_type); + enum nl80211_channel_type channel_type, + u16 prot_mode); /* internal work items */ void ieee80211_work_init(struct ieee80211_local *local); diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index 598a96a3a051..8a952e04690a 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c @@ -396,7 +396,8 @@ int mesh_add_ht_oper_ie(struct sk_buff *skb, return -ENOMEM; pos = skb_put(skb, 2 + sizeof(struct ieee80211_ht_operation)); - ieee80211_ie_build_ht_oper(pos, ht_cap, channel, channel_type); + ieee80211_ie_build_ht_oper(pos, ht_cap, channel, channel_type, + sdata->vif.bss_conf.ht_operation_mode); return 0; } diff --git a/net/mac80211/util.c b/net/mac80211/util.c index d9a747d387f0..22f2216b397e 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -1663,7 +1663,8 @@ u8 *ieee80211_ie_build_ht_cap(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap, u8 *ieee80211_ie_build_ht_oper(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap, struct ieee80211_channel *channel, - enum nl80211_channel_type channel_type) + enum nl80211_channel_type channel_type, + u16 prot_mode) { struct ieee80211_ht_operation *ht_oper; /* Build HT Information */ @@ -1689,11 +1690,7 @@ u8 *ieee80211_ie_build_ht_oper(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap, channel_type != NL80211_CHAN_HT20) ht_oper->ht_param |= IEEE80211_HT_PARAM_CHAN_WIDTH_ANY; - /* - * Note: According to 802.11n-2009 9.13.3.1, HT Protection field and - * RIFS Mode are reserved in IBSS mode, therefore keep them at 0 - */ - ht_oper->operation_mode = 0x0000; + ht_oper->operation_mode = cpu_to_le16(prot_mode); ht_oper->stbc_param = 0x0000; /* It seems that Basic MCS set and Supported MCS set -- cgit v1.2.2 From 57aac7c51c07ca7a2361477f352af422259301bd Mon Sep 17 00:00:00 2001 From: Ashok Nagarajan Date: Mon, 30 Apr 2012 14:20:30 -0700 Subject: mac80211: Implement HT mixed protection mode Section 9.23.3.5 of IEEE 80211s standard describes the protection rules for HT mesh STA in a MBSS. Three HT protection modes are supported for now: non-HT mixed mode - is selected if any non-HT peers are present in our MBSS. 20MHz-protection mode - is selected if all peers in our 20/40MHz MBSS support HT and atleast one HT20 peer is present. no-protection mode - is selected otherwise. This is a limited implementation of 9.23.3.5, which only considers mesh peers when determining the HT protection mode. Station's channel_type needs to be maintained. Signed-off-by: Ashok Nagarajan Reviewed-by: Thomas Pedersen Signed-off-by: John W. Linville --- net/mac80211/mesh_plink.c | 83 +++++++++++++++++++++++++++++++++++++++++++---- net/mac80211/sta_info.h | 1 + 2 files changed, 77 insertions(+), 7 deletions(-) (limited to 'net') diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c index 6209327840f7..2e0ae7310697 100644 --- a/net/mac80211/mesh_plink.c +++ b/net/mac80211/mesh_plink.c @@ -105,6 +105,66 @@ static struct sta_info *mesh_plink_alloc(struct ieee80211_sub_if_data *sdata, return sta; } +/** mesh_set_ht_prot_mode - set correct HT protection mode + * + * Section 9.23.3.5 of IEEE 80211s standard describes the protection rules for + * HT mesh STA in a MBSS. Three HT protection modes are supported for now, + * non-HT mixed mode, 20MHz-protection and no-protection mode. non-HT mixed + * mode is selected if any non-HT peers are present in our MBSS. + * 20MHz-protection mode is selected if all peers in our 20/40MHz MBSS support + * HT and atleast one HT20 peer is present. Otherwise no-protection mode is + * selected. + */ +static u32 mesh_set_ht_prot_mode(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_local *local = sdata->local; + struct sta_info *sta; + u32 changed = 0; + u16 ht_opmode; + bool non_ht_sta = false, ht20_sta = false; + + if (local->_oper_channel_type == NL80211_CHAN_NO_HT) + return 0; + + rcu_read_lock(); + list_for_each_entry_rcu(sta, &local->sta_list, list) { + if (sdata == sta->sdata && + sta->plink_state == NL80211_PLINK_ESTAB) { + switch (sta->ch_type) { + case NL80211_CHAN_NO_HT: + mpl_dbg("mesh_plink %pM: nonHT sta (%pM) is present", + sdata->vif.addr, sta->sta.addr); + non_ht_sta = true; + goto out; + case NL80211_CHAN_HT20: + mpl_dbg("mesh_plink %pM: HT20 sta (%pM) is present", + sdata->vif.addr, sta->sta.addr); + ht20_sta = true; + default: + break; + } + } + } +out: + rcu_read_unlock(); + + if (non_ht_sta) + ht_opmode = IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED; + else if (ht20_sta && local->_oper_channel_type > NL80211_CHAN_HT20) + ht_opmode = IEEE80211_HT_OP_MODE_PROTECTION_20MHZ; + else + ht_opmode = IEEE80211_HT_OP_MODE_PROTECTION_NONE; + + if (sdata->vif.bss_conf.ht_operation_mode != ht_opmode) { + sdata->vif.bss_conf.ht_operation_mode = ht_opmode; + changed = BSS_CHANGED_HT; + mpl_dbg("mesh_plink %pM: protection mode changed to %d", + sdata->vif.addr, ht_opmode); + } + + return changed; +} + /** * __mesh_plink_deactivate - deactivate mesh peer link * @@ -302,11 +362,14 @@ static struct sta_info *mesh_peer_init(struct ieee80211_sub_if_data *sdata, else memset(&sta->sta.ht_cap, 0, sizeof(sta->sta.ht_cap)); - if (elems->ht_operation) + if (elems->ht_operation) { if (!(elems->ht_operation->ht_param & IEEE80211_HT_PARAM_CHAN_WIDTH_ANY)) sta->sta.ht_cap.cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40; + sta->ch_type = + ieee80211_ht_oper_to_channel_type(elems->ht_operation); + } rate_control_rate_init(sta); spin_unlock_bh(&sta->lock); @@ -495,9 +558,10 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m enum plink_event event; enum ieee80211_self_protected_actioncode ftype; size_t baselen; - bool deactivated, matches_local = true; + bool matches_local = true; u8 ie_len; u8 *baseaddr; + u32 changed = 0; __le16 plid, llid, reason; #ifdef CONFIG_MAC80211_VERBOSE_MPL_DEBUG static const char *mplstates[] = { @@ -783,7 +847,8 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m sta->plink_state = NL80211_PLINK_ESTAB; spin_unlock_bh(&sta->lock); mesh_plink_inc_estab_count(sdata); - ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON); + changed |= mesh_set_ht_prot_mode(sdata); + changed |= BSS_CHANGED_BEACON; mpl_dbg("Mesh plink with %pM ESTABLISHED\n", sta->sta.addr); break; @@ -818,7 +883,8 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m sta->plink_state = NL80211_PLINK_ESTAB; spin_unlock_bh(&sta->lock); mesh_plink_inc_estab_count(sdata); - ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON); + changed |= mesh_set_ht_prot_mode(sdata); + changed |= BSS_CHANGED_BEACON; mpl_dbg("Mesh plink with %pM ESTABLISHED\n", sta->sta.addr); mesh_plink_frame_tx(sdata, @@ -836,13 +902,13 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m case CLS_ACPT: reason = cpu_to_le16(WLAN_REASON_MESH_CLOSE); sta->reason = reason; - deactivated = __mesh_plink_deactivate(sta); + __mesh_plink_deactivate(sta); sta->plink_state = NL80211_PLINK_HOLDING; llid = sta->llid; mod_plink_timer(sta, dot11MeshHoldingTimeout(sdata)); spin_unlock_bh(&sta->lock); - if (deactivated) - ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON); + changed |= mesh_set_ht_prot_mode(sdata); + changed |= BSS_CHANGED_BEACON; mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_CLOSE, sta->sta.addr, llid, plid, reason); break; @@ -889,4 +955,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m } rcu_read_unlock(); + + if (changed) + ieee80211_bss_info_change_notify(sdata, changed); } diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h index f75f5d9ac06d..663dc90c4e31 100644 --- a/net/mac80211/sta_info.h +++ b/net/mac80211/sta_info.h @@ -362,6 +362,7 @@ struct sta_info { struct timer_list plink_timer; s64 t_offset; s64 t_offset_setpoint; + enum nl80211_channel_type ch_type; #endif #ifdef CONFIG_MAC80211_DEBUGFS -- cgit v1.2.2 From b91e64aad2c78b0477b9eb3a26335668b4032002 Mon Sep 17 00:00:00 2001 From: Ashok Nagarajan Date: Mon, 30 Apr 2012 14:20:31 -0700 Subject: mac80211: Allow nonHT/HT peering in mesh Now that we have protection enabled, allow non-HT and HT20 stations to peer with HT40+/- stations. Peering is still disallowed for HT40+/- mismatch. Signed-off-by: Ashok Nagarajan Reviewed-by: Thomas Pedersen Signed-off-by: John W. Linville --- net/mac80211/mesh.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) (limited to 'net') diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index 8a952e04690a..0fabb8bae9fb 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c @@ -76,6 +76,7 @@ bool mesh_matches_local(struct ieee80211_sub_if_data *sdata, struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; struct ieee80211_local *local = sdata->local; u32 basic_rates = 0; + enum nl80211_channel_type sta_channel_type = NL80211_CHAN_NO_HT; /* * As support for each feature is added, check for matching @@ -102,10 +103,15 @@ bool mesh_matches_local(struct ieee80211_sub_if_data *sdata, if (sdata->vif.bss_conf.basic_rates != basic_rates) goto mismatch; - /* disallow peering with mismatched channel types for now */ + if (ie->ht_operation) + sta_channel_type = + ieee80211_ht_oper_to_channel_type(ie->ht_operation); + + /* Disallow HT40+/- mismatch */ if (ie->ht_operation && - (local->_oper_channel_type != - ieee80211_ht_oper_to_channel_type(ie->ht_operation))) + local->_oper_channel_type > NL80211_CHAN_HT20 && + sta_channel_type > NL80211_CHAN_HT20 && + local->_oper_channel_type != sta_channel_type) goto mismatch; return true; -- cgit v1.2.2 From 70c33eaae79e53f9e48324736c0cb85534d3f093 Mon Sep 17 00:00:00 2001 From: Ashok Nagarajan Date: Mon, 30 Apr 2012 14:20:32 -0700 Subject: {nl,cfg,mac}80211: Allow user to see/configure HT protection mode This patch introduces a new mesh configuration parameter "ht_opmode" and will allow user to check the current HT protection mode selected. Users could configure the protection mode by the command "iw mesh_iface set mesh_param mesh_ht_protection_mode=2". The default protection mode of mesh is set to non-HT mixed mode. Signed-off-by: Ashok Nagarajan Reviewed-by: Thomas Pedersen Signed-off-by: John W. Linville --- net/mac80211/cfg.c | 5 +++++ net/mac80211/mesh.c | 3 +++ net/mac80211/mesh_plink.c | 1 + net/wireless/mesh.c | 1 + net/wireless/nl80211.c | 7 ++++++- 5 files changed, 16 insertions(+), 1 deletion(-) (limited to 'net') diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 39b1fffb24f4..0221270c0ddf 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -1538,6 +1538,11 @@ static int ieee80211_update_mesh_config(struct wiphy *wiphy, return -ENOTSUPP; conf->rssi_threshold = nconf->rssi_threshold; } + if (_chg_mesh_attr(NL80211_MESHCONF_HT_OPMODE, mask)) { + conf->ht_opmode = nconf->ht_opmode; + sdata->vif.bss_conf.ht_operation_mode = nconf->ht_opmode; + ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_HT); + } return 0; } diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index 0fabb8bae9fb..0a21e4e55f43 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c @@ -595,12 +595,15 @@ void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata) set_bit(MESH_WORK_HOUSEKEEPING, &ifmsh->wrkq_flags); ieee80211_mesh_root_setup(ifmsh); ieee80211_queue_work(&local->hw, &sdata->work); + sdata->vif.bss_conf.ht_operation_mode = + ifmsh->mshcfg.ht_opmode; sdata->vif.bss_conf.beacon_int = MESH_DEFAULT_BEACON_INTERVAL; sdata->vif.bss_conf.basic_rates = ieee80211_mandatory_rates(sdata->local, sdata->local->hw.conf.channel->band); ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON | BSS_CHANGED_BEACON_ENABLED | + BSS_CHANGED_HT | BSS_CHANGED_BASIC_RATES | BSS_CHANGED_BEACON_INT); } diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c index 2e0ae7310697..8cc8461b48a0 100644 --- a/net/mac80211/mesh_plink.c +++ b/net/mac80211/mesh_plink.c @@ -157,6 +157,7 @@ out: if (sdata->vif.bss_conf.ht_operation_mode != ht_opmode) { sdata->vif.bss_conf.ht_operation_mode = ht_opmode; + sdata->u.mesh.mshcfg.ht_opmode = ht_opmode; changed = BSS_CHANGED_HT; mpl_dbg("mesh_plink %pM: protection mode changed to %d", sdata->vif.addr, ht_opmode); diff --git a/net/wireless/mesh.c b/net/wireless/mesh.c index 8c747fa9319b..2749cb86b462 100644 --- a/net/wireless/mesh.c +++ b/net/wireless/mesh.c @@ -61,6 +61,7 @@ const struct mesh_config default_mesh_config = { .dot11MeshGateAnnouncementProtocol = false, .dot11MeshForwarding = true, .rssi_threshold = MESH_RSSI_THRESHOLD, + .ht_opmode = IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED, }; const struct mesh_setup default_mesh_setup = { diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index d5005c59c472..b67b1114e25a 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -3390,7 +3390,9 @@ static int nl80211_get_mesh_config(struct sk_buff *skb, nla_put_u8(msg, NL80211_MESHCONF_FORWARDING, cur_params.dot11MeshForwarding) || nla_put_u32(msg, NL80211_MESHCONF_RSSI_THRESHOLD, - cur_params.rssi_threshold)) + cur_params.rssi_threshold) || + nla_put_u32(msg, NL80211_MESHCONF_HT_OPMODE, + cur_params.ht_opmode)) goto nla_put_failure; nla_nest_end(msg, pinfoattr); genlmsg_end(msg, hdr); @@ -3426,6 +3428,7 @@ static const struct nla_policy nl80211_meshconf_params_policy[NL80211_MESHCONF_A [NL80211_MESHCONF_GATE_ANNOUNCEMENTS] = { .type = NLA_U8 }, [NL80211_MESHCONF_FORWARDING] = { .type = NLA_U8 }, [NL80211_MESHCONF_RSSI_THRESHOLD] = { .type = NLA_U32}, + [NL80211_MESHCONF_HT_OPMODE] = { .type = NLA_U16}, }; static const struct nla_policy @@ -3523,6 +3526,8 @@ do {\ mask, NL80211_MESHCONF_FORWARDING, nla_get_u8); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, rssi_threshold, mask, NL80211_MESHCONF_RSSI_THRESHOLD, nla_get_u32); + FILL_IN_MESH_PARAM_IF_SET(tb, cfg, ht_opmode, + mask, NL80211_MESHCONF_HT_OPMODE, nla_get_u16); if (mask_out) *mask_out = mask; -- cgit v1.2.2 From 84018f55ab883f03d41ec3c9ac7f0cc80830b20f Mon Sep 17 00:00:00 2001 From: Hans Schillstrom Date: Mon, 23 Apr 2012 03:35:26 +0000 Subject: netfilter: ip6_tables: add flags parameter to ipv6_find_hdr() This patch adds the flags parameter to ipv6_find_hdr. This flags allows us to: * know if this is a fragment. * stop at the AH header, so the information contained in that header can be used for some specific packet handling. This patch also adds the offset parameter for inspection of one inner IPv6 header that is contained in error messages. Signed-off-by: Hans Schillstrom Signed-off-by: Pablo Neira Ayuso --- net/ipv6/netfilter/ip6_tables.c | 36 +++++++++++++++++++++++++++++++----- net/ipv6/netfilter/ip6t_ah.c | 4 ++-- net/ipv6/netfilter/ip6t_frag.c | 4 ++-- net/ipv6/netfilter/ip6t_hbh.c | 4 ++-- net/ipv6/netfilter/ip6t_rt.c | 4 ++-- net/netfilter/xt_TPROXY.c | 4 ++-- net/netfilter/xt_socket.c | 4 ++-- 7 files changed, 43 insertions(+), 17 deletions(-) (limited to 'net') diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index d4e350f72bbb..308bdd651230 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c @@ -133,7 +133,7 @@ ip6_packet_match(const struct sk_buff *skb, int protohdr; unsigned short _frag_off; - protohdr = ipv6_find_hdr(skb, protoff, -1, &_frag_off); + protohdr = ipv6_find_hdr(skb, protoff, -1, &_frag_off, NULL); if (protohdr < 0) { if (_frag_off == 0) *hotdrop = true; @@ -362,6 +362,7 @@ ip6t_do_table(struct sk_buff *skb, const struct xt_entry_match *ematch; IP_NF_ASSERT(e); + acpar.thoff = 0; if (!ip6_packet_match(skb, indev, outdev, &e->ipv6, &acpar.thoff, &acpar.fragoff, &acpar.hotdrop)) { no_match: @@ -2278,6 +2279,10 @@ static void __exit ip6_tables_fini(void) * if target < 0. "last header" is transport protocol header, ESP, or * "No next header". * + * Note that *offset is used as input/output parameter. an if it is not zero, + * then it must be a valid offset to an inner IPv6 header. This can be used + * to explore inner IPv6 header, eg. ICMPv6 error messages. + * * If target header is found, its offset is set in *offset and return protocol * number. Otherwise, return -1. * @@ -2289,17 +2294,33 @@ static void __exit ip6_tables_fini(void) * *offset is meaningless and fragment offset is stored in *fragoff if fragoff * isn't NULL. * + * if flags is not NULL and it's a fragment, then the frag flag IP6T_FH_F_FRAG + * will be set. If it's an AH header, the IP6T_FH_F_AUTH flag is set and + * target < 0, then this function will stop at the AH header. */ int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset, - int target, unsigned short *fragoff) + int target, unsigned short *fragoff, int *flags) { unsigned int start = skb_network_offset(skb) + sizeof(struct ipv6hdr); u8 nexthdr = ipv6_hdr(skb)->nexthdr; - unsigned int len = skb->len - start; + unsigned int len; if (fragoff) *fragoff = 0; + if (*offset) { + struct ipv6hdr _ip6, *ip6; + + ip6 = skb_header_pointer(skb, *offset, sizeof(_ip6), &_ip6); + if (!ip6 || (ip6->version != 6)) { + printk(KERN_ERR "IPv6 header not found\n"); + return -EBADMSG; + } + start = *offset + sizeof(struct ipv6hdr); + nexthdr = ip6->nexthdr; + } + len = skb->len - start; + while (nexthdr != target) { struct ipv6_opt_hdr _hdr, *hp; unsigned int hdrlen; @@ -2316,6 +2337,9 @@ int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset, if (nexthdr == NEXTHDR_FRAGMENT) { unsigned short _frag_off; __be16 *fp; + + if (flags) /* Indicate that this is a fragment */ + *flags |= IP6T_FH_F_FRAG; fp = skb_header_pointer(skb, start+offsetof(struct frag_hdr, frag_off), @@ -2336,9 +2360,11 @@ int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset, return -ENOENT; } hdrlen = 8; - } else if (nexthdr == NEXTHDR_AUTH) + } else if (nexthdr == NEXTHDR_AUTH) { + if (flags && (*flags & IP6T_FH_F_AUTH) && (target < 0)) + break; hdrlen = (hp->hdrlen + 2) << 2; - else + } else hdrlen = ipv6_optlen(hp); nexthdr = hp->nexthdr; diff --git a/net/ipv6/netfilter/ip6t_ah.c b/net/ipv6/netfilter/ip6t_ah.c index 89cccc5a9c92..04099ab7d2e3 100644 --- a/net/ipv6/netfilter/ip6t_ah.c +++ b/net/ipv6/netfilter/ip6t_ah.c @@ -41,11 +41,11 @@ static bool ah_mt6(const struct sk_buff *skb, struct xt_action_param *par) struct ip_auth_hdr _ah; const struct ip_auth_hdr *ah; const struct ip6t_ah *ahinfo = par->matchinfo; - unsigned int ptr; + unsigned int ptr = 0; unsigned int hdrlen = 0; int err; - err = ipv6_find_hdr(skb, &ptr, NEXTHDR_AUTH, NULL); + err = ipv6_find_hdr(skb, &ptr, NEXTHDR_AUTH, NULL, NULL); if (err < 0) { if (err != -ENOENT) par->hotdrop = true; diff --git a/net/ipv6/netfilter/ip6t_frag.c b/net/ipv6/netfilter/ip6t_frag.c index eda898fda6ca..3b5735e56bfe 100644 --- a/net/ipv6/netfilter/ip6t_frag.c +++ b/net/ipv6/netfilter/ip6t_frag.c @@ -40,10 +40,10 @@ frag_mt6(const struct sk_buff *skb, struct xt_action_param *par) struct frag_hdr _frag; const struct frag_hdr *fh; const struct ip6t_frag *fraginfo = par->matchinfo; - unsigned int ptr; + unsigned int ptr = 0; int err; - err = ipv6_find_hdr(skb, &ptr, NEXTHDR_FRAGMENT, NULL); + err = ipv6_find_hdr(skb, &ptr, NEXTHDR_FRAGMENT, NULL, NULL); if (err < 0) { if (err != -ENOENT) par->hotdrop = true; diff --git a/net/ipv6/netfilter/ip6t_hbh.c b/net/ipv6/netfilter/ip6t_hbh.c index 59df051eaef6..01df142bb027 100644 --- a/net/ipv6/netfilter/ip6t_hbh.c +++ b/net/ipv6/netfilter/ip6t_hbh.c @@ -50,7 +50,7 @@ hbh_mt6(const struct sk_buff *skb, struct xt_action_param *par) const struct ipv6_opt_hdr *oh; const struct ip6t_opts *optinfo = par->matchinfo; unsigned int temp; - unsigned int ptr; + unsigned int ptr = 0; unsigned int hdrlen = 0; bool ret = false; u8 _opttype; @@ -62,7 +62,7 @@ hbh_mt6(const struct sk_buff *skb, struct xt_action_param *par) err = ipv6_find_hdr(skb, &ptr, (par->match == &hbh_mt6_reg[0]) ? - NEXTHDR_HOP : NEXTHDR_DEST, NULL); + NEXTHDR_HOP : NEXTHDR_DEST, NULL, NULL); if (err < 0) { if (err != -ENOENT) par->hotdrop = true; diff --git a/net/ipv6/netfilter/ip6t_rt.c b/net/ipv6/netfilter/ip6t_rt.c index d8488c50a8e0..2c99b94eeca3 100644 --- a/net/ipv6/netfilter/ip6t_rt.c +++ b/net/ipv6/netfilter/ip6t_rt.c @@ -42,14 +42,14 @@ static bool rt_mt6(const struct sk_buff *skb, struct xt_action_param *par) const struct ipv6_rt_hdr *rh; const struct ip6t_rt *rtinfo = par->matchinfo; unsigned int temp; - unsigned int ptr; + unsigned int ptr = 0; unsigned int hdrlen = 0; bool ret = false; struct in6_addr _addr; const struct in6_addr *ap; int err; - err = ipv6_find_hdr(skb, &ptr, NEXTHDR_ROUTING, NULL); + err = ipv6_find_hdr(skb, &ptr, NEXTHDR_ROUTING, NULL, NULL); if (err < 0) { if (err != -ENOENT) par->hotdrop = true; diff --git a/net/netfilter/xt_TPROXY.c b/net/netfilter/xt_TPROXY.c index 35a959a096e0..146033a86de8 100644 --- a/net/netfilter/xt_TPROXY.c +++ b/net/netfilter/xt_TPROXY.c @@ -282,10 +282,10 @@ tproxy_tg6_v1(struct sk_buff *skb, const struct xt_action_param *par) struct sock *sk; const struct in6_addr *laddr; __be16 lport; - int thoff; + int thoff = 0; int tproto; - tproto = ipv6_find_hdr(skb, &thoff, -1, NULL); + tproto = ipv6_find_hdr(skb, &thoff, -1, NULL, NULL); if (tproto < 0) { pr_debug("unable to find transport header in IPv6 packet, dropping\n"); return NF_DROP; diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c index 72bb07f57f97..9ea482d08cf7 100644 --- a/net/netfilter/xt_socket.c +++ b/net/netfilter/xt_socket.c @@ -263,10 +263,10 @@ socket_mt6_v1(const struct sk_buff *skb, struct xt_action_param *par) struct sock *sk; struct in6_addr *daddr, *saddr; __be16 dport, sport; - int thoff, tproto; + int thoff = 0, tproto; const struct xt_socket_mtinfo1 *info = (struct xt_socket_mtinfo1 *) par->matchinfo; - tproto = ipv6_find_hdr(skb, &thoff, -1, NULL); + tproto = ipv6_find_hdr(skb, &thoff, -1, NULL, NULL); if (tproto < 0) { pr_debug("unable to find transport header in IPv6 packet, dropping\n"); return NF_DROP; -- cgit v1.2.2 From cf308a1fae432f315989e2da6878bfaa3daa22b1 Mon Sep 17 00:00:00 2001 From: Hans Schillstrom Date: Wed, 2 May 2012 07:49:47 +0000 Subject: netfilter: add xt_hmark target for hash-based skb marking The target allows you to create rules in the "raw" and "mangle" tables which set the skbuff mark by means of hash calculation within a given range. The nfmark can influence the routing method (see "Use netfilter MARK value as routing key") and can also be used by other subsystems to change their behaviour. [ Part of this patch has been refactorized and modified by Pablo Neira Ayuso ] Signed-off-by: Hans Schillstrom Signed-off-by: Pablo Neira Ayuso --- net/netfilter/Kconfig | 15 ++ net/netfilter/Makefile | 1 + net/netfilter/xt_HMARK.c | 362 +++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 378 insertions(+) create mode 100644 net/netfilter/xt_HMARK.c (limited to 'net') diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig index 0c6f67e8f2e5..209c1ed43368 100644 --- a/net/netfilter/Kconfig +++ b/net/netfilter/Kconfig @@ -509,6 +509,21 @@ config NETFILTER_XT_TARGET_HL since you can easily create immortal packets that loop forever on the network. +config NETFILTER_XT_TARGET_HMARK + tristate '"HMARK" target support' + depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n) + depends on NETFILTER_ADVANCED + ---help--- + This option adds the "HMARK" target. + + The target allows you to create rules in the "raw" and "mangle" tables + which set the skbuff mark by means of hash calculation within a given + range. The nfmark can influence the routing method (see "Use netfilter + MARK value as routing key") and can also be used by other subsystems to + change their behaviour. + + To compile it as a module, choose M here. If unsure, say N. + config NETFILTER_XT_TARGET_IDLETIMER tristate "IDLETIMER target support" depends on NETFILTER_ADVANCED diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile index ca3676586f51..4e7960cc7b97 100644 --- a/net/netfilter/Makefile +++ b/net/netfilter/Makefile @@ -59,6 +59,7 @@ obj-$(CONFIG_NETFILTER_XT_TARGET_CONNSECMARK) += xt_CONNSECMARK.o obj-$(CONFIG_NETFILTER_XT_TARGET_CT) += xt_CT.o obj-$(CONFIG_NETFILTER_XT_TARGET_DSCP) += xt_DSCP.o obj-$(CONFIG_NETFILTER_XT_TARGET_HL) += xt_HL.o +obj-$(CONFIG_NETFILTER_XT_TARGET_HMARK) += xt_HMARK.o obj-$(CONFIG_NETFILTER_XT_TARGET_LED) += xt_LED.o obj-$(CONFIG_NETFILTER_XT_TARGET_LOG) += xt_LOG.o obj-$(CONFIG_NETFILTER_XT_TARGET_NFLOG) += xt_NFLOG.o diff --git a/net/netfilter/xt_HMARK.c b/net/netfilter/xt_HMARK.c new file mode 100644 index 000000000000..32fbd735d02b --- /dev/null +++ b/net/netfilter/xt_HMARK.c @@ -0,0 +1,362 @@ +/* + * xt_HMARK - Netfilter module to set mark by means of hashing + * + * (C) 2012 by Hans Schillstrom + * (C) 2012 by Pablo Neira Ayuso + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + */ + +#include +#include +#include + +#include +#include + +#include +#if IS_ENABLED(CONFIG_NF_CONNTRACK) +#include +#endif +#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) +#include +#include +#endif + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Hans Schillstrom "); +MODULE_DESCRIPTION("Xtables: packet marking using hash calculation"); +MODULE_ALIAS("ipt_HMARK"); +MODULE_ALIAS("ip6t_HMARK"); + +struct hmark_tuple { + u32 src; + u32 dst; + union hmark_ports uports; + uint8_t proto; +}; + +static inline u32 hmark_addr6_mask(const __u32 *addr32, const __u32 *mask) +{ + return (addr32[0] & mask[0]) ^ + (addr32[1] & mask[1]) ^ + (addr32[2] & mask[2]) ^ + (addr32[3] & mask[3]); +} + +static inline u32 +hmark_addr_mask(int l3num, const __u32 *addr32, const __u32 *mask) +{ + switch (l3num) { + case AF_INET: + return *addr32 & *mask; + case AF_INET6: + return hmark_addr6_mask(addr32, mask); + } + return 0; +} + +static int +hmark_ct_set_htuple(const struct sk_buff *skb, struct hmark_tuple *t, + const struct xt_hmark_info *info) +{ +#if IS_ENABLED(CONFIG_NF_CONNTRACK) + enum ip_conntrack_info ctinfo; + struct nf_conn *ct = nf_ct_get(skb, &ctinfo); + struct nf_conntrack_tuple *otuple; + struct nf_conntrack_tuple *rtuple; + + if (ct == NULL || nf_ct_is_untracked(ct)) + return -1; + + otuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple; + rtuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple; + + t->src = hmark_addr_mask(otuple->src.l3num, otuple->src.u3.all, + info->src_mask.all); + t->dst = hmark_addr_mask(otuple->src.l3num, rtuple->src.u3.all, + info->dst_mask.all); + + if (info->flags & XT_HMARK_FLAG(XT_HMARK_METHOD_L3)) + return 0; + + t->proto = nf_ct_protonum(ct); + if (t->proto != IPPROTO_ICMP) { + t->uports.p16.src = otuple->src.u.all; + t->uports.p16.dst = rtuple->src.u.all; + t->uports.v32 = (t->uports.v32 & info->port_mask.v32) | + info->port_set.v32; + if (t->uports.p16.dst < t->uports.p16.src) + swap(t->uports.p16.dst, t->uports.p16.src); + } + + return 0; +#else + return -1; +#endif +} + +static inline u32 +hmark_hash(struct hmark_tuple *t, const struct xt_hmark_info *info) +{ + u32 hash; + + if (t->dst < t->src) + swap(t->src, t->dst); + + hash = jhash_3words(t->src, t->dst, t->uports.v32, info->hashrnd); + hash = hash ^ (t->proto & info->proto_mask); + + return (hash % info->hmodulus) + info->hoffset; +} + +static void +hmark_set_tuple_ports(const struct sk_buff *skb, unsigned int nhoff, + struct hmark_tuple *t, const struct xt_hmark_info *info) +{ + int protoff; + + protoff = proto_ports_offset(t->proto); + if (protoff < 0) + return; + + nhoff += protoff; + if (skb_copy_bits(skb, nhoff, &t->uports, sizeof(t->uports)) < 0) + return; + + t->uports.v32 = (t->uports.v32 & info->port_mask.v32) | + info->port_set.v32; + + if (t->uports.p16.dst < t->uports.p16.src) + swap(t->uports.p16.dst, t->uports.p16.src); +} + +#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) +static int get_inner6_hdr(const struct sk_buff *skb, int *offset) +{ + struct icmp6hdr *icmp6h, _ih6; + + icmp6h = skb_header_pointer(skb, *offset, sizeof(_ih6), &_ih6); + if (icmp6h == NULL) + return 0; + + if (icmp6h->icmp6_type && icmp6h->icmp6_type < 128) { + *offset += sizeof(struct icmp6hdr); + return 1; + } + return 0; +} + +static int +hmark_pkt_set_htuple_ipv6(const struct sk_buff *skb, struct hmark_tuple *t, + const struct xt_hmark_info *info) +{ + struct ipv6hdr *ip6, _ip6; + int flag = IP6T_FH_F_AUTH; + unsigned int nhoff = 0; + u16 fragoff = 0; + int nexthdr; + + ip6 = (struct ipv6hdr *) (skb->data + skb_network_offset(skb)); + nexthdr = ipv6_find_hdr(skb, &nhoff, -1, &fragoff, &flag); + if (nexthdr < 0) + return 0; + /* No need to check for icmp errors on fragments */ + if ((flag & IP6T_FH_F_FRAG) || (nexthdr != IPPROTO_ICMPV6)) + goto noicmp; + /* Use inner header in case of ICMP errors */ + if (get_inner6_hdr(skb, &nhoff)) { + ip6 = skb_header_pointer(skb, nhoff, sizeof(_ip6), &_ip6); + if (ip6 == NULL) + return -1; + /* If AH present, use SPI like in ESP. */ + flag = IP6T_FH_F_AUTH; + nexthdr = ipv6_find_hdr(skb, &nhoff, -1, &fragoff, &flag); + if (nexthdr < 0) + return -1; + } +noicmp: + t->src = hmark_addr6_mask(ip6->saddr.s6_addr32, info->src_mask.all); + t->dst = hmark_addr6_mask(ip6->daddr.s6_addr32, info->dst_mask.all); + + if (info->flags & XT_HMARK_FLAG(XT_HMARK_METHOD_L3)) + return 0; + + t->proto = nexthdr; + if (t->proto == IPPROTO_ICMPV6) + return 0; + + if (flag & IP6T_FH_F_FRAG) + return 0; + + hmark_set_tuple_ports(skb, nhoff, t, info); + return 0; +} + +static unsigned int +hmark_tg_v6(struct sk_buff *skb, const struct xt_action_param *par) +{ + const struct xt_hmark_info *info = par->targinfo; + struct hmark_tuple t; + + memset(&t, 0, sizeof(struct hmark_tuple)); + + if (info->flags & XT_HMARK_FLAG(XT_HMARK_CT)) { + if (hmark_ct_set_htuple(skb, &t, info) < 0) + return XT_CONTINUE; + } else { + if (hmark_pkt_set_htuple_ipv6(skb, &t, info) < 0) + return XT_CONTINUE; + } + + skb->mark = hmark_hash(&t, info); + return XT_CONTINUE; +} +#endif + +static int get_inner_hdr(const struct sk_buff *skb, int iphsz, int *nhoff) +{ + const struct icmphdr *icmph; + struct icmphdr _ih; + + /* Not enough header? */ + icmph = skb_header_pointer(skb, *nhoff + iphsz, sizeof(_ih), &_ih); + if (icmph == NULL && icmph->type > NR_ICMP_TYPES) + return 0; + + /* Error message? */ + if (icmph->type != ICMP_DEST_UNREACH && + icmph->type != ICMP_SOURCE_QUENCH && + icmph->type != ICMP_TIME_EXCEEDED && + icmph->type != ICMP_PARAMETERPROB && + icmph->type != ICMP_REDIRECT) + return 0; + + *nhoff += iphsz + sizeof(_ih); + return 1; +} + +static int +hmark_pkt_set_htuple_ipv4(const struct sk_buff *skb, struct hmark_tuple *t, + const struct xt_hmark_info *info) +{ + struct iphdr *ip, _ip; + int nhoff = skb_network_offset(skb); + + ip = (struct iphdr *) (skb->data + nhoff); + if (ip->protocol == IPPROTO_ICMP) { + /* Use inner header in case of ICMP errors */ + if (get_inner_hdr(skb, ip->ihl * 4, &nhoff)) { + ip = skb_header_pointer(skb, nhoff, sizeof(_ip), &_ip); + if (ip == NULL) + return -1; + } + } + + t->src = (__force u32) ip->saddr; + t->dst = (__force u32) ip->daddr; + + t->src &= info->src_mask.ip; + t->dst &= info->dst_mask.ip; + + if (info->flags & XT_HMARK_FLAG(XT_HMARK_METHOD_L3)) + return 0; + + t->proto = ip->protocol; + + /* ICMP has no ports, skip */ + if (t->proto == IPPROTO_ICMP) + return 0; + + /* follow-up fragments don't contain ports, skip all fragments */ + if (ip->frag_off & htons(IP_MF | IP_OFFSET)) + return 0; + + hmark_set_tuple_ports(skb, (ip->ihl * 4) + nhoff, t, info); + + return 0; +} + +static unsigned int +hmark_tg_v4(struct sk_buff *skb, const struct xt_action_param *par) +{ + const struct xt_hmark_info *info = par->targinfo; + struct hmark_tuple t; + + memset(&t, 0, sizeof(struct hmark_tuple)); + + if (info->flags & XT_HMARK_FLAG(XT_HMARK_CT)) { + if (hmark_ct_set_htuple(skb, &t, info) < 0) + return XT_CONTINUE; + } else { + if (hmark_pkt_set_htuple_ipv4(skb, &t, info) < 0) + return XT_CONTINUE; + } + + skb->mark = hmark_hash(&t, info); + return XT_CONTINUE; +} + +static int hmark_tg_check(const struct xt_tgchk_param *par) +{ + const struct xt_hmark_info *info = par->targinfo; + + if (!info->hmodulus) { + pr_info("xt_HMARK: hash modulus can't be zero\n"); + return -EINVAL; + } + if (info->proto_mask && + (info->flags & XT_HMARK_FLAG(XT_HMARK_METHOD_L3))) { + pr_info("xt_HMARK: proto mask must be zero with L3 mode\n"); + return -EINVAL; + } + if (info->flags & XT_HMARK_FLAG(XT_HMARK_SPI_MASK) && + (info->flags & (XT_HMARK_FLAG(XT_HMARK_SPORT_MASK) | + XT_HMARK_FLAG(XT_HMARK_DPORT_MASK)))) { + pr_info("xt_HMARK: spi-mask and port-mask can't be combined\n"); + return -EINVAL; + } + if (info->flags & XT_HMARK_FLAG(XT_HMARK_SPI) && + (info->flags & (XT_HMARK_FLAG(XT_HMARK_SPORT) | + XT_HMARK_FLAG(XT_HMARK_DPORT)))) { + pr_info("xt_HMARK: spi-set and port-set can't be combined\n"); + return -EINVAL; + } + return 0; +} + +static struct xt_target hmark_tg_reg[] __read_mostly = { + { + .name = "HMARK", + .family = NFPROTO_IPV4, + .target = hmark_tg_v4, + .targetsize = sizeof(struct xt_hmark_info), + .checkentry = hmark_tg_check, + .me = THIS_MODULE, + }, +#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) + { + .name = "HMARK", + .family = NFPROTO_IPV6, + .target = hmark_tg_v6, + .targetsize = sizeof(struct xt_hmark_info), + .checkentry = hmark_tg_check, + .me = THIS_MODULE, + }, +#endif +}; + +static int __init hmark_tg_init(void) +{ + return xt_register_targets(hmark_tg_reg, ARRAY_SIZE(hmark_tg_reg)); +} + +static void __exit hmark_tg_exit(void) +{ + xt_unregister_targets(hmark_tg_reg, ARRAY_SIZE(hmark_tg_reg)); +} + +module_init(hmark_tg_init); +module_exit(hmark_tg_exit); -- cgit v1.2.2 From 7a909ac70f6b0823d9f23a43f19598d4b57ac901 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Mon, 7 May 2012 10:51:43 +0000 Subject: netfilter: limit, hashlimit: avoid duplicated inline credit_cap can be set to credit, which avoids inlining user2credits twice. Also, remove inline keyword and let compiler decide. old: 684 192 0 876 36c net/netfilter/xt_limit.o 4927 344 32 5303 14b7 net/netfilter/xt_hashlimit.o now: 668 192 0 860 35c net/netfilter/xt_limit.o 4793 344 32 5169 1431 net/netfilter/xt_hashlimit.o Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- net/netfilter/xt_hashlimit.c | 8 +++----- net/netfilter/xt_limit.c | 5 ++--- 2 files changed, 5 insertions(+), 8 deletions(-) (limited to 'net') diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c index d95f9c963cde..2195eb0727a3 100644 --- a/net/netfilter/xt_hashlimit.c +++ b/net/netfilter/xt_hashlimit.c @@ -389,8 +389,7 @@ static void htable_put(struct xt_hashlimit_htable *hinfo) #define CREDITS_PER_JIFFY POW2_BELOW32(MAX_CPJ) /* Precision saver. */ -static inline u_int32_t -user2credits(u_int32_t user) +static u32 user2credits(u32 user) { /* If multiplying would overflow... */ if (user > 0xFFFFFFFF / (HZ*CREDITS_PER_JIFFY)) @@ -400,7 +399,7 @@ user2credits(u_int32_t user) return (user * HZ * CREDITS_PER_JIFFY) / XT_HASHLIMIT_SCALE; } -static inline void rateinfo_recalc(struct dsthash_ent *dh, unsigned long now) +static void rateinfo_recalc(struct dsthash_ent *dh, unsigned long now) { dh->rateinfo.credit += (now - dh->rateinfo.prev) * CREDITS_PER_JIFFY; if (dh->rateinfo.credit > dh->rateinfo.credit_cap) @@ -535,8 +534,7 @@ hashlimit_mt(const struct sk_buff *skb, struct xt_action_param *par) dh->rateinfo.prev = jiffies; dh->rateinfo.credit = user2credits(hinfo->cfg.avg * hinfo->cfg.burst); - dh->rateinfo.credit_cap = user2credits(hinfo->cfg.avg * - hinfo->cfg.burst); + dh->rateinfo.credit_cap = dh->rateinfo.credit; dh->rateinfo.cost = user2credits(hinfo->cfg.avg); } else { /* update expiration timeout */ diff --git a/net/netfilter/xt_limit.c b/net/netfilter/xt_limit.c index 32b7a579a032..5c22ce8ab309 100644 --- a/net/netfilter/xt_limit.c +++ b/net/netfilter/xt_limit.c @@ -88,8 +88,7 @@ limit_mt(const struct sk_buff *skb, struct xt_action_param *par) } /* Precision saver. */ -static u_int32_t -user2credits(u_int32_t user) +static u32 user2credits(u32 user) { /* If multiplying would overflow... */ if (user > 0xFFFFFFFF / (HZ*CREDITS_PER_JIFFY)) @@ -123,7 +122,7 @@ static int limit_mt_check(const struct xt_mtchk_param *par) 128. */ priv->prev = jiffies; priv->credit = user2credits(r->avg * r->burst); /* Credits full. */ - r->credit_cap = user2credits(r->avg * r->burst); /* Credits full. */ + r->credit_cap = priv->credit; /* Credits full. */ r->cost = user2credits(r->avg); } return 0; -- cgit v1.2.2 From 817e076f61bca3d0270af60632d1fe07cd4919f1 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Mon, 7 May 2012 10:51:44 +0000 Subject: netfilter: hashlimit: move rateinfo initialization to helper followup patch would bloat main match function too much. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- net/netfilter/xt_hashlimit.c | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) (limited to 'net') diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c index 2195eb0727a3..b6bbd0630e5f 100644 --- a/net/netfilter/xt_hashlimit.c +++ b/net/netfilter/xt_hashlimit.c @@ -407,6 +407,15 @@ static void rateinfo_recalc(struct dsthash_ent *dh, unsigned long now) dh->rateinfo.prev = now; } +static void rateinfo_init(struct dsthash_ent *dh, + struct xt_hashlimit_htable *hinfo) +{ + dh->rateinfo.prev = jiffies; + dh->rateinfo.credit = user2credits(hinfo->cfg.avg * hinfo->cfg.burst); + dh->rateinfo.cost = user2credits(hinfo->cfg.avg); + dh->rateinfo.credit_cap = dh->rateinfo.credit; +} + static inline __be32 maskl(__be32 a, unsigned int l) { return l ? htonl(ntohl(a) & ~0 << (32 - l)) : 0; @@ -531,11 +540,7 @@ hashlimit_mt(const struct sk_buff *skb, struct xt_action_param *par) goto hotdrop; } dh->expires = jiffies + msecs_to_jiffies(hinfo->cfg.expire); - dh->rateinfo.prev = jiffies; - dh->rateinfo.credit = user2credits(hinfo->cfg.avg * - hinfo->cfg.burst); - dh->rateinfo.credit_cap = dh->rateinfo.credit; - dh->rateinfo.cost = user2credits(hinfo->cfg.avg); + rateinfo_init(dh, hinfo); } else { /* update expiration timeout */ dh->expires = now + msecs_to_jiffies(hinfo->cfg.expire); -- cgit v1.2.2 From 0197dee7d3182bb6b6a21955860dfa14fa022d84 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Mon, 7 May 2012 10:51:45 +0000 Subject: netfilter: hashlimit: byte-based limit mode can be used e.g. for ingress traffic policing or to detect when a host/port consumes more bandwidth than expected. This is done by optionally making cost to mean "cost per 16-byte-chunk-of-data" instead of "cost per packet". Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- net/netfilter/xt_hashlimit.c | 116 ++++++++++++++++++++++++++++++++++++------- 1 file changed, 97 insertions(+), 19 deletions(-) (limited to 'net') diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c index b6bbd0630e5f..d0424f9621f2 100644 --- a/net/netfilter/xt_hashlimit.c +++ b/net/netfilter/xt_hashlimit.c @@ -388,6 +388,18 @@ static void htable_put(struct xt_hashlimit_htable *hinfo) #define CREDITS_PER_JIFFY POW2_BELOW32(MAX_CPJ) +/* in byte mode, the lowest possible rate is one packet/second. + * credit_cap is used as a counter that tells us how many times we can + * refill the "credits available" counter when it becomes empty. + */ +#define MAX_CPJ_BYTES (0xFFFFFFFF / HZ) +#define CREDITS_PER_JIFFY_BYTES POW2_BELOW32(MAX_CPJ_BYTES) + +static u32 xt_hashlimit_len_to_chunks(u32 len) +{ + return (len >> XT_HASHLIMIT_BYTE_SHIFT) + 1; +} + /* Precision saver. */ static u32 user2credits(u32 user) { @@ -399,21 +411,53 @@ static u32 user2credits(u32 user) return (user * HZ * CREDITS_PER_JIFFY) / XT_HASHLIMIT_SCALE; } -static void rateinfo_recalc(struct dsthash_ent *dh, unsigned long now) +static u32 user2credits_byte(u32 user) { - dh->rateinfo.credit += (now - dh->rateinfo.prev) * CREDITS_PER_JIFFY; - if (dh->rateinfo.credit > dh->rateinfo.credit_cap) - dh->rateinfo.credit = dh->rateinfo.credit_cap; + u64 us = user; + us *= HZ * CREDITS_PER_JIFFY_BYTES; + return (u32) (us >> 32); +} + +static void rateinfo_recalc(struct dsthash_ent *dh, unsigned long now, u32 mode) +{ + unsigned long delta = now - dh->rateinfo.prev; + u32 cap; + + if (delta == 0) + return; + dh->rateinfo.prev = now; + + if (mode & XT_HASHLIMIT_BYTES) { + u32 tmp = dh->rateinfo.credit; + dh->rateinfo.credit += CREDITS_PER_JIFFY_BYTES * delta; + cap = CREDITS_PER_JIFFY_BYTES * HZ; + if (tmp >= dh->rateinfo.credit) {/* overflow */ + dh->rateinfo.credit = cap; + return; + } + } else { + dh->rateinfo.credit += delta * CREDITS_PER_JIFFY; + cap = dh->rateinfo.credit_cap; + } + if (dh->rateinfo.credit > cap) + dh->rateinfo.credit = cap; } static void rateinfo_init(struct dsthash_ent *dh, struct xt_hashlimit_htable *hinfo) { dh->rateinfo.prev = jiffies; - dh->rateinfo.credit = user2credits(hinfo->cfg.avg * hinfo->cfg.burst); - dh->rateinfo.cost = user2credits(hinfo->cfg.avg); - dh->rateinfo.credit_cap = dh->rateinfo.credit; + if (hinfo->cfg.mode & XT_HASHLIMIT_BYTES) { + dh->rateinfo.credit = CREDITS_PER_JIFFY_BYTES * HZ; + dh->rateinfo.cost = user2credits_byte(hinfo->cfg.avg); + dh->rateinfo.credit_cap = hinfo->cfg.burst; + } else { + dh->rateinfo.credit = user2credits(hinfo->cfg.avg * + hinfo->cfg.burst); + dh->rateinfo.cost = user2credits(hinfo->cfg.avg); + dh->rateinfo.credit_cap = dh->rateinfo.credit; + } } static inline __be32 maskl(__be32 a, unsigned int l) @@ -519,6 +563,21 @@ hashlimit_init_dst(const struct xt_hashlimit_htable *hinfo, return 0; } +static u32 hashlimit_byte_cost(unsigned int len, struct dsthash_ent *dh) +{ + u64 tmp = xt_hashlimit_len_to_chunks(len); + tmp = tmp * dh->rateinfo.cost; + + if (unlikely(tmp > CREDITS_PER_JIFFY_BYTES * HZ)) + tmp = CREDITS_PER_JIFFY_BYTES * HZ; + + if (dh->rateinfo.credit < tmp && dh->rateinfo.credit_cap) { + dh->rateinfo.credit_cap--; + dh->rateinfo.credit = CREDITS_PER_JIFFY_BYTES * HZ; + } + return (u32) tmp; +} + static bool hashlimit_mt(const struct sk_buff *skb, struct xt_action_param *par) { @@ -527,6 +586,7 @@ hashlimit_mt(const struct sk_buff *skb, struct xt_action_param *par) unsigned long now = jiffies; struct dsthash_ent *dh; struct dsthash_dst dst; + u32 cost; if (hashlimit_init_dst(hinfo, &dst, skb, par->thoff) < 0) goto hotdrop; @@ -544,12 +604,17 @@ hashlimit_mt(const struct sk_buff *skb, struct xt_action_param *par) } else { /* update expiration timeout */ dh->expires = now + msecs_to_jiffies(hinfo->cfg.expire); - rateinfo_recalc(dh, now); + rateinfo_recalc(dh, now, hinfo->cfg.mode); } - if (dh->rateinfo.credit >= dh->rateinfo.cost) { + if (info->cfg.mode & XT_HASHLIMIT_BYTES) + cost = hashlimit_byte_cost(skb->len, dh); + else + cost = dh->rateinfo.cost; + + if (dh->rateinfo.credit >= cost) { /* below the limit */ - dh->rateinfo.credit -= dh->rateinfo.cost; + dh->rateinfo.credit -= cost; spin_unlock(&dh->lock); rcu_read_unlock_bh(); return !(info->cfg.mode & XT_HASHLIMIT_INVERT); @@ -571,14 +636,6 @@ static int hashlimit_mt_check(const struct xt_mtchk_param *par) struct xt_hashlimit_mtinfo1 *info = par->matchinfo; int ret; - /* Check for overflow. */ - if (info->cfg.burst == 0 || - user2credits(info->cfg.avg * info->cfg.burst) < - user2credits(info->cfg.avg)) { - pr_info("overflow, try lower: %u/%u\n", - info->cfg.avg, info->cfg.burst); - return -ERANGE; - } if (info->cfg.gc_interval == 0 || info->cfg.expire == 0) return -EINVAL; if (info->name[sizeof(info->name)-1] != '\0') @@ -591,6 +648,26 @@ static int hashlimit_mt_check(const struct xt_mtchk_param *par) return -EINVAL; } + if (info->cfg.mode >= XT_HASHLIMIT_MAX) { + pr_info("Unknown mode mask %X, kernel too old?\n", + info->cfg.mode); + return -EINVAL; + } + + /* Check for overflow. */ + if (info->cfg.mode & XT_HASHLIMIT_BYTES) { + if (user2credits_byte(info->cfg.avg) == 0) { + pr_info("overflow, rate too high: %u\n", info->cfg.avg); + return -EINVAL; + } + } else if (info->cfg.burst == 0 || + user2credits(info->cfg.avg * info->cfg.burst) < + user2credits(info->cfg.avg)) { + pr_info("overflow, try lower: %u/%u\n", + info->cfg.avg, info->cfg.burst); + return -ERANGE; + } + mutex_lock(&hashlimit_mutex); info->hinfo = htable_find_get(net, info->name, par->family); if (info->hinfo == NULL) { @@ -683,10 +760,11 @@ static int dl_seq_real_show(struct dsthash_ent *ent, u_int8_t family, struct seq_file *s) { int res; + const struct xt_hashlimit_htable *ht = s->private; spin_lock(&ent->lock); /* recalculate to show accurate numbers */ - rateinfo_recalc(ent, jiffies); + rateinfo_recalc(ent, jiffies, ht->cfg.mode); switch (family) { case NFPROTO_IPV4: -- cgit v1.2.2 From 28b29801b95de20405b2a11945cb6dfc7c6c84f9 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Tue, 8 May 2012 18:56:46 +0000 Subject: 802: Convert compare_ether_addr to ether_addr_equal Use the new bool function ether_addr_equal to add some clarity and reduce the likelihood for misuse of compare_ether_addr for sorting. Done via cocci script: $ cat compare_ether_addr.cocci @@ expression a,b; @@ - !compare_ether_addr(a, b) + ether_addr_equal(a, b) @@ expression a,b; @@ - compare_ether_addr(a, b) + !ether_addr_equal(a, b) @@ expression a,b; @@ - !ether_addr_equal(a, b) == 0 + ether_addr_equal(a, b) @@ expression a,b; @@ - !ether_addr_equal(a, b) != 0 + !ether_addr_equal(a, b) @@ expression a,b; @@ - ether_addr_equal(a, b) == 0 + !ether_addr_equal(a, b) @@ expression a,b; @@ - ether_addr_equal(a, b) != 0 + ether_addr_equal(a, b) @@ expression a,b; @@ - !!ether_addr_equal(a, b) + ether_addr_equal(a, b) Signed-off-by: Joe Perches Signed-off-by: David S. Miller --- net/802/stp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/802/stp.c b/net/802/stp.c index 15540b7323cd..2c40ba0ec116 100644 --- a/net/802/stp.c +++ b/net/802/stp.c @@ -46,7 +46,7 @@ static int stp_pdu_rcv(struct sk_buff *skb, struct net_device *dev, proto = rcu_dereference(garp_protos[eh->h_dest[5] - GARP_ADDR_MIN]); if (proto && - compare_ether_addr(eh->h_dest, proto->group_address)) + !ether_addr_equal(eh->h_dest, proto->group_address)) goto err; } else proto = rcu_dereference(stp_proto); -- cgit v1.2.2 From 53a2b3a18d7ec8fc026bbcb59ed722f780b5abcc Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Tue, 8 May 2012 18:56:47 +0000 Subject: 8021q: Convert compare_ether_addr to ether_addr_equal Use the new bool function ether_addr_equal to add some clarity and reduce the likelihood for misuse of compare_ether_addr for sorting. Done via cocci script: $ cat compare_ether_addr.cocci @@ expression a,b; @@ - !compare_ether_addr(a, b) + ether_addr_equal(a, b) @@ expression a,b; @@ - compare_ether_addr(a, b) + !ether_addr_equal(a, b) @@ expression a,b; @@ - !ether_addr_equal(a, b) == 0 + ether_addr_equal(a, b) @@ expression a,b; @@ - !ether_addr_equal(a, b) != 0 + !ether_addr_equal(a, b) @@ expression a,b; @@ - ether_addr_equal(a, b) == 0 + !ether_addr_equal(a, b) @@ expression a,b; @@ - ether_addr_equal(a, b) != 0 + ether_addr_equal(a, b) @@ expression a,b; @@ - !!ether_addr_equal(a, b) + ether_addr_equal(a, b) Signed-off-by: Joe Perches Signed-off-by: David S. Miller --- net/8021q/vlan.c | 10 +++++----- net/8021q/vlan_core.c | 3 +-- net/8021q/vlan_dev.c | 10 +++++----- 3 files changed, 11 insertions(+), 12 deletions(-) (limited to 'net') diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c index efea35b02e7f..6089f0cf23b4 100644 --- a/net/8021q/vlan.c +++ b/net/8021q/vlan.c @@ -266,19 +266,19 @@ static void vlan_sync_address(struct net_device *dev, struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev); /* May be called without an actual change */ - if (!compare_ether_addr(vlan->real_dev_addr, dev->dev_addr)) + if (ether_addr_equal(vlan->real_dev_addr, dev->dev_addr)) return; /* vlan address was different from the old address and is equal to * the new address */ - if (compare_ether_addr(vlandev->dev_addr, vlan->real_dev_addr) && - !compare_ether_addr(vlandev->dev_addr, dev->dev_addr)) + if (!ether_addr_equal(vlandev->dev_addr, vlan->real_dev_addr) && + ether_addr_equal(vlandev->dev_addr, dev->dev_addr)) dev_uc_del(dev, vlandev->dev_addr); /* vlan address was equal to the old address and is different from * the new address */ - if (!compare_ether_addr(vlandev->dev_addr, vlan->real_dev_addr) && - compare_ether_addr(vlandev->dev_addr, dev->dev_addr)) + if (ether_addr_equal(vlandev->dev_addr, vlan->real_dev_addr) && + !ether_addr_equal(vlandev->dev_addr, dev->dev_addr)) dev_uc_add(dev, vlandev->dev_addr); memcpy(vlan->real_dev_addr, dev->dev_addr, ETH_ALEN); diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c index 4d39d802be2c..8ca533c95de0 100644 --- a/net/8021q/vlan_core.c +++ b/net/8021q/vlan_core.c @@ -31,8 +31,7 @@ bool vlan_do_receive(struct sk_buff **skbp, bool last_handler) /* Our lower layer thinks this is not local, let's make sure. * This allows the VLAN to have a different MAC than the * underlying device, and still route correctly. */ - if (!compare_ether_addr(eth_hdr(skb)->h_dest, - vlan_dev->dev_addr)) + if (ether_addr_equal(eth_hdr(skb)->h_dest, vlan_dev->dev_addr)) skb->pkt_type = PACKET_HOST; } diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index 9988d4abb372..eaf5f21b0ef7 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c @@ -277,7 +277,7 @@ static int vlan_dev_open(struct net_device *dev) !(vlan->flags & VLAN_FLAG_LOOSE_BINDING)) return -ENETDOWN; - if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) { + if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr)) { err = dev_uc_add(real_dev, dev->dev_addr); if (err < 0) goto out; @@ -307,7 +307,7 @@ clear_allmulti: if (dev->flags & IFF_ALLMULTI) dev_set_allmulti(real_dev, -1); del_unicast: - if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) + if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr)) dev_uc_del(real_dev, dev->dev_addr); out: netif_carrier_off(dev); @@ -326,7 +326,7 @@ static int vlan_dev_stop(struct net_device *dev) if (dev->flags & IFF_PROMISC) dev_set_promiscuity(real_dev, -1); - if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) + if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr)) dev_uc_del(real_dev, dev->dev_addr); netif_carrier_off(dev); @@ -345,13 +345,13 @@ static int vlan_dev_set_mac_address(struct net_device *dev, void *p) if (!(dev->flags & IFF_UP)) goto out; - if (compare_ether_addr(addr->sa_data, real_dev->dev_addr)) { + if (!ether_addr_equal(addr->sa_data, real_dev->dev_addr)) { err = dev_uc_add(real_dev, addr->sa_data); if (err < 0) return err; } - if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) + if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr)) dev_uc_del(real_dev, dev->dev_addr); out: -- cgit v1.2.2 From 171fe5ef140571c7465e625e9cd9fdf3340ad20e Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Tue, 8 May 2012 18:56:48 +0000 Subject: bridge: netfilter: Convert compare_ether_addr to ether_addr_equal Use the new bool function ether_addr_equal to add some clarity and reduce the likelihood for misuse of compare_ether_addr for sorting. Done via cocci script: $ cat compare_ether_addr.cocci @@ expression a,b; @@ - !compare_ether_addr(a, b) + ether_addr_equal(a, b) @@ expression a,b; @@ - compare_ether_addr(a, b) + !ether_addr_equal(a, b) @@ expression a,b; @@ - !ether_addr_equal(a, b) == 0 + ether_addr_equal(a, b) @@ expression a,b; @@ - !ether_addr_equal(a, b) != 0 + !ether_addr_equal(a, b) @@ expression a,b; @@ - ether_addr_equal(a, b) == 0 + !ether_addr_equal(a, b) @@ expression a,b; @@ - ether_addr_equal(a, b) != 0 + ether_addr_equal(a, b) @@ expression a,b; @@ - !!ether_addr_equal(a, b) + ether_addr_equal(a, b) Signed-off-by: Joe Perches Acked-by: Stephen Hemminger Signed-off-by: David S. Miller --- net/bridge/netfilter/ebt_stp.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'net') diff --git a/net/bridge/netfilter/ebt_stp.c b/net/bridge/netfilter/ebt_stp.c index 5b33a2e634a6..071d87214dde 100644 --- a/net/bridge/netfilter/ebt_stp.c +++ b/net/bridge/netfilter/ebt_stp.c @@ -164,8 +164,8 @@ static int ebt_stp_mt_check(const struct xt_mtchk_param *par) !(info->bitmask & EBT_STP_MASK)) return -EINVAL; /* Make sure the match only receives stp frames */ - if (compare_ether_addr(e->destmac, bridge_ula) || - compare_ether_addr(e->destmsk, msk) || !(e->bitmask & EBT_DESTMAC)) + if (!ether_addr_equal(e->destmac, bridge_ula) || + !ether_addr_equal(e->destmsk, msk) || !(e->bitmask & EBT_DESTMAC)) return -EINVAL; return 0; -- cgit v1.2.2 From 9a7b6ef9b9938a1f5cb91ccc0b713b9443edc79f Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Tue, 8 May 2012 18:56:49 +0000 Subject: bridge: Convert compare_ether_addr to ether_addr_equal Use the new bool function ether_addr_equal to add some clarity and reduce the likelihood for misuse of compare_ether_addr for sorting. Done via cocci script: $ cat compare_ether_addr.cocci @@ expression a,b; @@ - !compare_ether_addr(a, b) + ether_addr_equal(a, b) @@ expression a,b; @@ - compare_ether_addr(a, b) + !ether_addr_equal(a, b) @@ expression a,b; @@ - !ether_addr_equal(a, b) == 0 + ether_addr_equal(a, b) @@ expression a,b; @@ - !ether_addr_equal(a, b) != 0 + !ether_addr_equal(a, b) @@ expression a,b; @@ - ether_addr_equal(a, b) == 0 + !ether_addr_equal(a, b) @@ expression a,b; @@ - ether_addr_equal(a, b) != 0 + ether_addr_equal(a, b) @@ expression a,b; @@ - !!ether_addr_equal(a, b) + ether_addr_equal(a, b) Signed-off-by: Joe Perches Signed-off-by: David S. Miller --- net/bridge/br_device.c | 2 +- net/bridge/br_fdb.c | 14 +++++++------- net/bridge/br_input.c | 2 +- net/bridge/br_stp_bpdu.c | 2 +- net/bridge/br_stp_if.c | 11 +++++------ 5 files changed, 15 insertions(+), 16 deletions(-) (limited to 'net') diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c index d6e5929458b1..929e48aed444 100644 --- a/net/bridge/br_device.c +++ b/net/bridge/br_device.c @@ -170,7 +170,7 @@ static int br_set_mac_address(struct net_device *dev, void *p) return -EADDRNOTAVAIL; spin_lock_bh(&br->lock); - if (compare_ether_addr(dev->dev_addr, addr->sa_data)) { + if (!ether_addr_equal(dev->dev_addr, addr->sa_data)) { dev->addr_assign_type &= ~NET_ADDR_RANDOM; memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); br_fdb_change_mac_address(br, addr->sa_data); diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c index 5945c54bc2de..d21f32383517 100644 --- a/net/bridge/br_fdb.c +++ b/net/bridge/br_fdb.c @@ -107,8 +107,8 @@ void br_fdb_changeaddr(struct net_bridge_port *p, const unsigned char *newaddr) struct net_bridge_port *op; list_for_each_entry(op, &br->port_list, list) { if (op != p && - !compare_ether_addr(op->dev->dev_addr, - f->addr.addr)) { + ether_addr_equal(op->dev->dev_addr, + f->addr.addr)) { f->dst = op; goto insert; } @@ -214,8 +214,8 @@ void br_fdb_delete_by_port(struct net_bridge *br, struct net_bridge_port *op; list_for_each_entry(op, &br->port_list, list) { if (op != p && - !compare_ether_addr(op->dev->dev_addr, - f->addr.addr)) { + ether_addr_equal(op->dev->dev_addr, + f->addr.addr)) { f->dst = op; goto skip_delete; } @@ -237,7 +237,7 @@ struct net_bridge_fdb_entry *__br_fdb_get(struct net_bridge *br, struct net_bridge_fdb_entry *fdb; hlist_for_each_entry_rcu(fdb, h, &br->hash[br_mac_hash(addr)], hlist) { - if (!compare_ether_addr(fdb->addr.addr, addr)) { + if (ether_addr_equal(fdb->addr.addr, addr)) { if (unlikely(has_expired(br, fdb))) break; return fdb; @@ -331,7 +331,7 @@ static struct net_bridge_fdb_entry *fdb_find(struct hlist_head *head, struct net_bridge_fdb_entry *fdb; hlist_for_each_entry(fdb, h, head, hlist) { - if (!compare_ether_addr(fdb->addr.addr, addr)) + if (ether_addr_equal(fdb->addr.addr, addr)) return fdb; } return NULL; @@ -344,7 +344,7 @@ static struct net_bridge_fdb_entry *fdb_find_rcu(struct hlist_head *head, struct net_bridge_fdb_entry *fdb; hlist_for_each_entry_rcu(fdb, h, head, hlist) { - if (!compare_ether_addr(fdb->addr.addr, addr)) + if (ether_addr_equal(fdb->addr.addr, addr)) return fdb; } return NULL; diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c index 5a31731be4d0..76f15fda0212 100644 --- a/net/bridge/br_input.c +++ b/net/bridge/br_input.c @@ -216,7 +216,7 @@ forward: } /* fall through */ case BR_STATE_LEARNING: - if (!compare_ether_addr(p->br->dev->dev_addr, dest)) + if (ether_addr_equal(p->br->dev->dev_addr, dest)) skb->pkt_type = PACKET_HOST; NF_HOOK(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL, diff --git a/net/bridge/br_stp_bpdu.c b/net/bridge/br_stp_bpdu.c index e16aade51ae0..fd30a6022dea 100644 --- a/net/bridge/br_stp_bpdu.c +++ b/net/bridge/br_stp_bpdu.c @@ -167,7 +167,7 @@ void br_stp_rcv(const struct stp_proto *proto, struct sk_buff *skb, if (p->state == BR_STATE_DISABLED) goto out; - if (compare_ether_addr(dest, br->group_addr) != 0) + if (!ether_addr_equal(dest, br->group_addr)) goto out; buf = skb_pull(skb, 3); diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c index f494496373d6..9d5a414a3943 100644 --- a/net/bridge/br_stp_if.c +++ b/net/bridge/br_stp_if.c @@ -178,7 +178,7 @@ void br_stp_set_enabled(struct net_bridge *br, unsigned long val) /* called under bridge lock */ void br_stp_change_bridge_id(struct net_bridge *br, const unsigned char *addr) { - /* should be aligned on 2 bytes for compare_ether_addr() */ + /* should be aligned on 2 bytes for ether_addr_equal() */ unsigned short oldaddr_aligned[ETH_ALEN >> 1]; unsigned char *oldaddr = (unsigned char *)oldaddr_aligned; struct net_bridge_port *p; @@ -191,12 +191,11 @@ void br_stp_change_bridge_id(struct net_bridge *br, const unsigned char *addr) memcpy(br->dev->dev_addr, addr, ETH_ALEN); list_for_each_entry(p, &br->port_list, list) { - if (!compare_ether_addr(p->designated_bridge.addr, oldaddr)) + if (ether_addr_equal(p->designated_bridge.addr, oldaddr)) memcpy(p->designated_bridge.addr, addr, ETH_ALEN); - if (!compare_ether_addr(p->designated_root.addr, oldaddr)) + if (ether_addr_equal(p->designated_root.addr, oldaddr)) memcpy(p->designated_root.addr, addr, ETH_ALEN); - } br_configuration_update(br); @@ -205,7 +204,7 @@ void br_stp_change_bridge_id(struct net_bridge *br, const unsigned char *addr) br_become_root_bridge(br); } -/* should be aligned on 2 bytes for compare_ether_addr() */ +/* should be aligned on 2 bytes for ether_addr_equal() */ static const unsigned short br_mac_zero_aligned[ETH_ALEN >> 1]; /* called under bridge lock */ @@ -227,7 +226,7 @@ bool br_stp_recalculate_bridge_id(struct net_bridge *br) } - if (compare_ether_addr(br->bridge_id.addr, addr) == 0) + if (ether_addr_equal(br->bridge_id.addr, addr)) return false; /* no change */ br_stp_change_bridge_id(br, addr); -- cgit v1.2.2 From 150238ebb4a1ad2c3d09aea754720ad2e898c59f Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Tue, 8 May 2012 18:56:50 +0000 Subject: atm: Convert compare_ether_addr to ether_addr_equal Use the new bool function ether_addr_equal to add some clarity and reduce the likelihood for misuse of compare_ether_addr for sorting. Done via cocci script: $ cat compare_ether_addr.cocci @@ expression a,b; @@ - !compare_ether_addr(a, b) + ether_addr_equal(a, b) @@ expression a,b; @@ - compare_ether_addr(a, b) + !ether_addr_equal(a, b) @@ expression a,b; @@ - !ether_addr_equal(a, b) == 0 + ether_addr_equal(a, b) @@ expression a,b; @@ - !ether_addr_equal(a, b) != 0 + !ether_addr_equal(a, b) @@ expression a,b; @@ - ether_addr_equal(a, b) == 0 + !ether_addr_equal(a, b) @@ expression a,b; @@ - ether_addr_equal(a, b) != 0 + ether_addr_equal(a, b) @@ expression a,b; @@ - !!ether_addr_equal(a, b) + ether_addr_equal(a, b) Signed-off-by: Joe Perches Signed-off-by: David S. Miller --- net/atm/lec.c | 6 +++--- net/atm/mpc.c | 3 +-- 2 files changed, 4 insertions(+), 5 deletions(-) (limited to 'net') diff --git a/net/atm/lec.c b/net/atm/lec.c index f1964caa0f83..bb35cb76b3af 100644 --- a/net/atm/lec.c +++ b/net/atm/lec.c @@ -1255,7 +1255,7 @@ static int lane2_associate_req(struct net_device *dev, const u8 *lan_dst, struct sk_buff *skb; struct lec_priv *priv = netdev_priv(dev); - if (compare_ether_addr(lan_dst, dev->dev_addr)) + if (!ether_addr_equal(lan_dst, dev->dev_addr)) return 0; /* not our mac address */ kfree(priv->tlvs); /* NULL if there was no previous association */ @@ -1662,7 +1662,7 @@ static struct lec_arp_table *lec_arp_find(struct lec_priv *priv, head = &priv->lec_arp_tables[HASH(mac_addr[ETH_ALEN - 1])]; hlist_for_each_entry(entry, node, head, next) { - if (!compare_ether_addr(mac_addr, entry->mac_addr)) + if (ether_addr_equal(mac_addr, entry->mac_addr)) return entry; } return NULL; @@ -1849,7 +1849,7 @@ static struct atm_vcc *lec_arp_resolve(struct lec_priv *priv, case 1: return priv->mcast_vcc; case 2: /* LANE2 wants arp for multicast addresses */ - if (!compare_ether_addr(mac_to_find, bus_mac)) + if (ether_addr_equal(mac_to_find, bus_mac)) return priv->mcast_vcc; break; default: diff --git a/net/atm/mpc.c b/net/atm/mpc.c index aa972409f093..d4cc1be5c364 100644 --- a/net/atm/mpc.c +++ b/net/atm/mpc.c @@ -592,8 +592,7 @@ static netdev_tx_t mpc_send_packet(struct sk_buff *skb, goto non_ip; while (i < mpc->number_of_mps_macs) { - if (!compare_ether_addr(eth->h_dest, - (mpc->mps_macs + i*ETH_ALEN))) + if (ether_addr_equal(eth->h_dest, mpc->mps_macs + i * ETH_ALEN)) if (send_via_shortcut(skb, mpc) == 0) /* try shortcut */ return NETDEV_TX_OK; i++; -- cgit v1.2.2 From c47fc9814ca15cc075f1f09e8c069b041f2ea397 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Tue, 8 May 2012 18:56:51 +0000 Subject: bluetooth: Convert compare_ether_addr to ether_addr_equal Use the new bool function ether_addr_equal to add some clarity and reduce the likelihood for misuse of compare_ether_addr for sorting. Done via cocci script: $ cat compare_ether_addr.cocci @@ expression a,b; @@ - !compare_ether_addr(a, b) + ether_addr_equal(a, b) @@ expression a,b; @@ - compare_ether_addr(a, b) + !ether_addr_equal(a, b) @@ expression a,b; @@ - !ether_addr_equal(a, b) == 0 + ether_addr_equal(a, b) @@ expression a,b; @@ - !ether_addr_equal(a, b) != 0 + !ether_addr_equal(a, b) @@ expression a,b; @@ - ether_addr_equal(a, b) == 0 + !ether_addr_equal(a, b) @@ expression a,b; @@ - ether_addr_equal(a, b) != 0 + ether_addr_equal(a, b) @@ expression a,b; @@ - !!ether_addr_equal(a, b) + ether_addr_equal(a, b) Signed-off-by: Joe Perches Signed-off-by: David S. Miller --- net/bluetooth/bnep/core.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'net') diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c index a779ec703323..88884d1d95fd 100644 --- a/net/bluetooth/bnep/core.c +++ b/net/bluetooth/bnep/core.c @@ -69,7 +69,7 @@ static struct bnep_session *__bnep_get_session(u8 *dst) BT_DBG(""); list_for_each_entry(s, &bnep_session_list, list) - if (!compare_ether_addr(dst, s->eh.h_source)) + if (ether_addr_equal(dst, s->eh.h_source)) return s; return NULL; @@ -422,10 +422,10 @@ static inline int bnep_tx_frame(struct bnep_session *s, struct sk_buff *skb) iv[il++] = (struct kvec) { &type, 1 }; len++; - if (compress_src && !compare_ether_addr(eh->h_dest, s->eh.h_source)) + if (compress_src && ether_addr_equal(eh->h_dest, s->eh.h_source)) type |= 0x01; - if (compress_dst && !compare_ether_addr(eh->h_source, s->eh.h_dest)) + if (compress_dst && ether_addr_equal(eh->h_source, s->eh.h_dest)) type |= 0x02; if (type) -- cgit v1.2.2 From b203ca39126bad99583c908be587df067820a1ea Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Tue, 8 May 2012 18:56:52 +0000 Subject: mac80211: Convert compare_ether_addr to ether_addr_equal Use the new bool function ether_addr_equal to add some clarity and reduce the likelihood for misuse of compare_ether_addr for sorting. Done via cocci script: $ cat compare_ether_addr.cocci @@ expression a,b; @@ - !compare_ether_addr(a, b) + ether_addr_equal(a, b) @@ expression a,b; @@ - compare_ether_addr(a, b) + !ether_addr_equal(a, b) @@ expression a,b; @@ - !ether_addr_equal(a, b) == 0 + ether_addr_equal(a, b) @@ expression a,b; @@ - !ether_addr_equal(a, b) != 0 + !ether_addr_equal(a, b) @@ expression a,b; @@ - ether_addr_equal(a, b) == 0 + !ether_addr_equal(a, b) @@ expression a,b; @@ - ether_addr_equal(a, b) != 0 + ether_addr_equal(a, b) @@ expression a,b; @@ - !!ether_addr_equal(a, b) + ether_addr_equal(a, b) Signed-off-by: Joe Perches Signed-off-by: David S. Miller --- net/mac80211/cfg.c | 2 +- net/mac80211/ibss.c | 12 ++++++------ net/mac80211/ieee80211_i.h | 2 +- net/mac80211/iface.c | 2 +- net/mac80211/mesh.c | 4 ++-- net/mac80211/mesh_hwmp.c | 14 +++++++------- net/mac80211/mesh_pathtbl.c | 12 ++++++------ net/mac80211/mlme.c | 29 +++++++++++++---------------- net/mac80211/rx.c | 35 ++++++++++++++++------------------- net/mac80211/scan.c | 2 +- net/mac80211/sta_info.c | 8 ++++---- net/mac80211/status.c | 2 +- net/mac80211/tx.c | 11 +++++------ 13 files changed, 64 insertions(+), 71 deletions(-) (limited to 'net') diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 70b2af2315a6..7e6781f8c57e 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -919,7 +919,7 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev, } else sdata = IEEE80211_DEV_TO_SUB_IF(dev); - if (compare_ether_addr(mac, sdata->vif.addr) == 0) + if (ether_addr_equal(mac, sdata->vif.addr)) return -EINVAL; if (is_multicast_ether_addr(mac)) diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c index 61cd391c32a3..d307d3b3e32a 100644 --- a/net/mac80211/ibss.c +++ b/net/mac80211/ibss.c @@ -66,7 +66,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, skb_reset_tail_pointer(skb); skb_reserve(skb, sdata->local->hw.extra_tx_headroom); - if (compare_ether_addr(ifibss->bssid, bssid)) + if (!ether_addr_equal(ifibss->bssid, bssid)) sta_info_flush(sdata->local, sdata); /* if merging, indicate to driver that we leave the old IBSS */ @@ -315,7 +315,7 @@ ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata, return NULL; } - if (compare_ether_addr(bssid, sdata->u.ibss.bssid)) { + if (!ether_addr_equal(bssid, sdata->u.ibss.bssid)) { rcu_read_lock(); return NULL; } @@ -401,7 +401,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata, return; if (sdata->vif.type == NL80211_IFTYPE_ADHOC && - compare_ether_addr(mgmt->bssid, sdata->u.ibss.bssid) == 0) { + ether_addr_equal(mgmt->bssid, sdata->u.ibss.bssid)) { rcu_read_lock(); sta = sta_info_get(sdata, mgmt->sa); @@ -506,7 +506,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata, goto put_bss; /* same BSSID */ - if (compare_ether_addr(cbss->bssid, sdata->u.ibss.bssid) == 0) + if (ether_addr_equal(cbss->bssid, sdata->u.ibss.bssid)) goto put_bss; if (rx_status->flag & RX_FLAG_MACTIME_MPDU) { @@ -591,7 +591,7 @@ void ieee80211_ibss_rx_no_sta(struct ieee80211_sub_if_data *sdata, if (ifibss->state == IEEE80211_IBSS_MLME_SEARCH) return; - if (compare_ether_addr(bssid, sdata->u.ibss.bssid)) + if (!ether_addr_equal(bssid, sdata->u.ibss.bssid)) return; sta = sta_info_alloc(sdata, addr, GFP_ATOMIC); @@ -829,7 +829,7 @@ static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata, if (!tx_last_beacon && is_multicast_ether_addr(mgmt->da)) return; - if (compare_ether_addr(mgmt->bssid, ifibss->bssid) != 0 && + if (!ether_addr_equal(mgmt->bssid, ifibss->bssid) && !is_broadcast_ether_addr(mgmt->bssid)) return; diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 6cd89d414f22..fabee974bf6d 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -1195,7 +1195,7 @@ static inline struct ieee80211_local *hw_to_local( static inline int ieee80211_bssid_match(const u8 *raddr, const u8 *addr) { - return compare_ether_addr(raddr, addr) == 0 || + return ether_addr_equal(raddr, addr) || is_broadcast_ether_addr(raddr); } diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index 3e05a8bfddf0..856237c5c1f8 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -127,7 +127,7 @@ static int ieee80211_check_concurrent_iface(struct ieee80211_sub_if_data *sdata, * The remaining checks are only performed for interfaces * with the same MAC address. */ - if (compare_ether_addr(dev->dev_addr, ndev->dev_addr)) + if (!ether_addr_equal(dev->dev_addr, ndev->dev_addr)) continue; /* diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index 598a96a3a051..775627166e48 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c @@ -209,7 +209,7 @@ int mesh_rmc_check(u8 *sa, struct ieee80211s_hdr *mesh_hdr, kmem_cache_free(rm_cache, p); --entries; } else if ((seqnum == p->seqnum) && - (compare_ether_addr(sa, p->sa) == 0)) + (ether_addr_equal(sa, p->sa))) return -1; } @@ -639,7 +639,7 @@ static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata, /* ignore ProbeResp to foreign address */ if (stype == IEEE80211_STYPE_PROBE_RESP && - compare_ether_addr(mgmt->da, sdata->vif.addr)) + !ether_addr_equal(mgmt->da, sdata->vif.addr)) return; baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt; diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c index 503016f58631..27e0c2f06795 100644 --- a/net/mac80211/mesh_hwmp.c +++ b/net/mac80211/mesh_hwmp.c @@ -422,7 +422,7 @@ static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata, new_metric = MAX_METRIC; exp_time = TU_TO_EXP_TIME(orig_lifetime); - if (compare_ether_addr(orig_addr, sdata->vif.addr) == 0) { + if (ether_addr_equal(orig_addr, sdata->vif.addr)) { /* This MP is the originator, we are not interested in this * frame, except for updating transmitter's path info. */ @@ -472,7 +472,7 @@ static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata, /* Update and check transmitter routing info */ ta = mgmt->sa; - if (compare_ether_addr(orig_addr, ta) == 0) + if (ether_addr_equal(orig_addr, ta)) fresh_info = false; else { fresh_info = true; @@ -533,7 +533,7 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata, mhwmp_dbg("received PREQ from %pM", orig_addr); - if (compare_ether_addr(target_addr, sdata->vif.addr) == 0) { + if (ether_addr_equal(target_addr, sdata->vif.addr)) { mhwmp_dbg("PREQ is for us"); forward = false; reply = true; @@ -631,7 +631,7 @@ static void hwmp_prep_frame_process(struct ieee80211_sub_if_data *sdata, mhwmp_dbg("received PREP from %pM", PREP_IE_ORIG_ADDR(prep_elem)); orig_addr = PREP_IE_ORIG_ADDR(prep_elem); - if (compare_ether_addr(orig_addr, sdata->vif.addr) == 0) + if (ether_addr_equal(orig_addr, sdata->vif.addr)) /* destination, no forwarding required */ return; @@ -709,7 +709,7 @@ static void hwmp_perr_frame_process(struct ieee80211_sub_if_data *sdata, spin_lock_bh(&mpath->state_lock); sta = next_hop_deref_protected(mpath); if (mpath->flags & MESH_PATH_ACTIVE && - compare_ether_addr(ta, sta->sta.addr) == 0 && + ether_addr_equal(ta, sta->sta.addr) && (!(mpath->flags & MESH_PATH_SN_VALID) || SN_GT(target_sn, mpath->sn))) { mpath->flags &= ~MESH_PATH_ACTIVE; @@ -756,7 +756,7 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata, metric = le32_to_cpu(rann->rann_metric); /* Ignore our own RANNs */ - if (compare_ether_addr(orig_addr, sdata->vif.addr) == 0) + if (ether_addr_equal(orig_addr, sdata->vif.addr)) return; mhwmp_dbg("received RANN from %pM via neighbour %pM (is_gate=%d)", @@ -1099,7 +1099,7 @@ int mesh_nexthop_lookup(struct sk_buff *skb, if (time_after(jiffies, mpath->exp_time - msecs_to_jiffies(sdata->u.mesh.mshcfg.path_refresh_time)) && - !compare_ether_addr(sdata->vif.addr, hdr->addr4) && + ether_addr_equal(sdata->vif.addr, hdr->addr4) && !(mpath->flags & MESH_PATH_RESOLVING) && !(mpath->flags & MESH_PATH_FIXED)) mesh_queue_preq(mpath, PREQ_Q_F_START | PREQ_Q_F_REFRESH); diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c index baa6096c66b4..b39224d8255c 100644 --- a/net/mac80211/mesh_pathtbl.c +++ b/net/mac80211/mesh_pathtbl.c @@ -348,7 +348,7 @@ static struct mesh_path *mpath_lookup(struct mesh_table *tbl, u8 *dst, hlist_for_each_entry_rcu(node, n, bucket, list) { mpath = node->mpath; if (mpath->sdata == sdata && - compare_ether_addr(dst, mpath->dst) == 0) { + ether_addr_equal(dst, mpath->dst)) { if (MPATH_EXPIRED(mpath)) { spin_lock_bh(&mpath->state_lock); mpath->flags &= ~MESH_PATH_ACTIVE; @@ -517,7 +517,7 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata) int err = 0; u32 hash_idx; - if (compare_ether_addr(dst, sdata->vif.addr) == 0) + if (ether_addr_equal(dst, sdata->vif.addr)) /* never add ourselves as neighbours */ return -ENOTSUPP; @@ -561,7 +561,7 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata) hlist_for_each_entry(node, n, bucket, list) { mpath = node->mpath; if (mpath->sdata == sdata && - compare_ether_addr(dst, mpath->dst) == 0) + ether_addr_equal(dst, mpath->dst)) goto err_exists; } @@ -652,7 +652,7 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata) int err = 0; u32 hash_idx; - if (compare_ether_addr(dst, sdata->vif.addr) == 0) + if (ether_addr_equal(dst, sdata->vif.addr)) /* never add ourselves as neighbours */ return -ENOTSUPP; @@ -690,7 +690,7 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata) hlist_for_each_entry(node, n, bucket, list) { mpath = node->mpath; if (mpath->sdata == sdata && - compare_ether_addr(dst, mpath->dst) == 0) + ether_addr_equal(dst, mpath->dst)) goto err_exists; } @@ -884,7 +884,7 @@ int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata) hlist_for_each_entry(node, n, bucket, list) { mpath = node->mpath; if (mpath->sdata == sdata && - compare_ether_addr(addr, mpath->dst) == 0) { + ether_addr_equal(addr, mpath->dst)) { __mesh_path_del(tbl, node); goto enddel; } diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 03f93f958fa4..b1c617fdabd6 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -1776,7 +1776,7 @@ ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata, memcpy(bssid, ifmgd->auth_data->bss->bssid, ETH_ALEN); - if (compare_ether_addr(bssid, mgmt->bssid)) + if (!ether_addr_equal(bssid, mgmt->bssid)) return RX_MGMT_NONE; auth_alg = le16_to_cpu(mgmt->u.auth.auth_alg); @@ -1853,7 +1853,7 @@ ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata, return RX_MGMT_NONE; if (!ifmgd->associated || - compare_ether_addr(mgmt->bssid, ifmgd->associated->bssid)) + !ether_addr_equal(mgmt->bssid, ifmgd->associated->bssid)) return RX_MGMT_NONE; bssid = ifmgd->associated->bssid; @@ -1886,7 +1886,7 @@ ieee80211_rx_mgmt_disassoc(struct ieee80211_sub_if_data *sdata, return RX_MGMT_NONE; if (!ifmgd->associated || - compare_ether_addr(mgmt->bssid, ifmgd->associated->bssid)) + !ether_addr_equal(mgmt->bssid, ifmgd->associated->bssid)) return RX_MGMT_NONE; reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code); @@ -2113,7 +2113,7 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata, if (!assoc_data) return RX_MGMT_NONE; - if (compare_ether_addr(assoc_data->bss->bssid, mgmt->bssid)) + if (!ether_addr_equal(assoc_data->bss->bssid, mgmt->bssid)) return RX_MGMT_NONE; /* @@ -2193,8 +2193,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata, bool need_ps = false; if (sdata->u.mgd.associated && - compare_ether_addr(mgmt->bssid, sdata->u.mgd.associated->bssid) - == 0) { + ether_addr_equal(mgmt->bssid, sdata->u.mgd.associated->bssid)) { bss = (void *)sdata->u.mgd.associated->priv; /* not previously set so we may need to recalc */ need_ps = !bss->dtim_period; @@ -2249,7 +2248,7 @@ static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_sub_if_data *sdata, ASSERT_MGD_MTX(ifmgd); - if (compare_ether_addr(mgmt->da, sdata->vif.addr)) + if (!ether_addr_equal(mgmt->da, sdata->vif.addr)) return; /* ignore ProbeResp to foreign address */ baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt; @@ -2262,12 +2261,11 @@ static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_sub_if_data *sdata, ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems, false); if (ifmgd->associated && - compare_ether_addr(mgmt->bssid, ifmgd->associated->bssid) == 0) + ether_addr_equal(mgmt->bssid, ifmgd->associated->bssid)) ieee80211_reset_ap_probe(sdata); if (ifmgd->auth_data && !ifmgd->auth_data->bss->proberesp_ies && - compare_ether_addr(mgmt->bssid, ifmgd->auth_data->bss->bssid) - == 0) { + ether_addr_equal(mgmt->bssid, ifmgd->auth_data->bss->bssid)) { /* got probe response, continue with auth */ printk(KERN_DEBUG "%s: direct probe responded\n", sdata->name); ifmgd->auth_data->tries = 0; @@ -2324,8 +2322,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, return; if (ifmgd->assoc_data && !ifmgd->assoc_data->have_beacon && - compare_ether_addr(mgmt->bssid, ifmgd->assoc_data->bss->bssid) - == 0) { + ether_addr_equal(mgmt->bssid, ifmgd->assoc_data->bss->bssid)) { ieee802_11_parse_elems(mgmt->u.beacon.variable, len - baselen, &elems); @@ -2340,7 +2337,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, } if (!ifmgd->associated || - compare_ether_addr(mgmt->bssid, ifmgd->associated->bssid)) + !ether_addr_equal(mgmt->bssid, ifmgd->associated->bssid)) return; bssid = ifmgd->associated->bssid; @@ -3166,7 +3163,7 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata, return err; } } else - WARN_ON_ONCE(compare_ether_addr(ifmgd->bssid, cbss->bssid)); + WARN_ON_ONCE(!ether_addr_equal(ifmgd->bssid, cbss->bssid)); return 0; } @@ -3306,7 +3303,7 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata, bool match; /* keep sta info, bssid if matching */ - match = compare_ether_addr(ifmgd->bssid, req->bss->bssid) == 0; + match = ether_addr_equal(ifmgd->bssid, req->bss->bssid); ieee80211_destroy_auth_data(sdata, match); } @@ -3466,7 +3463,7 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata, sdata->name, req->bssid, req->reason_code); if (ifmgd->associated && - compare_ether_addr(ifmgd->associated->bssid, req->bssid) == 0) + ether_addr_equal(ifmgd->associated->bssid, req->bssid)) ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, req->reason_code, true, frame_buf); else diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index d5ac02fe37ff..26bfd6fcaa48 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -492,12 +492,12 @@ ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx) if (ieee80211_has_tods(hdr->frame_control) || !ieee80211_has_fromds(hdr->frame_control)) return RX_DROP_MONITOR; - if (compare_ether_addr(hdr->addr3, dev_addr) == 0) + if (ether_addr_equal(hdr->addr3, dev_addr)) return RX_DROP_MONITOR; } else { if (!ieee80211_has_a4(hdr->frame_control)) return RX_DROP_MONITOR; - if (compare_ether_addr(hdr->addr4, dev_addr) == 0) + if (ether_addr_equal(hdr->addr4, dev_addr)) return RX_DROP_MONITOR; } } @@ -1275,7 +1275,7 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx) if (rx->sdata->vif.type == NL80211_IFTYPE_ADHOC) { u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len, NL80211_IFTYPE_ADHOC); - if (compare_ether_addr(bssid, rx->sdata->u.ibss.bssid) == 0) { + if (ether_addr_equal(bssid, rx->sdata->u.ibss.bssid)) { sta->last_rx = jiffies; if (ieee80211_is_data(hdr->frame_control)) { sta->last_rx_rate_idx = status->rate_idx; @@ -1438,8 +1438,8 @@ ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata, */ if (((hdr->frame_control ^ f_hdr->frame_control) & cpu_to_le16(IEEE80211_FCTL_FTYPE)) || - compare_ether_addr(hdr->addr1, f_hdr->addr1) != 0 || - compare_ether_addr(hdr->addr2, f_hdr->addr2) != 0) + !ether_addr_equal(hdr->addr1, f_hdr->addr1) || + !ether_addr_equal(hdr->addr2, f_hdr->addr2)) continue; if (time_after(jiffies, entry->first_frag_time + 2 * HZ)) { @@ -1925,7 +1925,7 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx) mpp_path_add(proxied_addr, mpp_addr, sdata); } else { spin_lock_bh(&mppath->state_lock); - if (compare_ether_addr(mppath->mpp, mpp_addr) != 0) + if (!ether_addr_equal(mppath->mpp, mpp_addr)) memcpy(mppath->mpp, mpp_addr, ETH_ALEN); spin_unlock_bh(&mppath->state_lock); } @@ -1934,7 +1934,7 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx) /* Frame has reached destination. Don't forward */ if (!is_multicast_ether_addr(hdr->addr1) && - compare_ether_addr(sdata->vif.addr, hdr->addr3) == 0) + ether_addr_equal(sdata->vif.addr, hdr->addr3)) return RX_CONTINUE; q = ieee80211_select_queue_80211(local, skb, hdr); @@ -2122,13 +2122,13 @@ static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb; struct ieee80211_mgmt *resp; - if (compare_ether_addr(mgmt->da, sdata->vif.addr) != 0) { + if (!ether_addr_equal(mgmt->da, sdata->vif.addr)) { /* Not to own unicast address */ return; } - if (compare_ether_addr(mgmt->sa, sdata->u.mgd.bssid) != 0 || - compare_ether_addr(mgmt->bssid, sdata->u.mgd.bssid) != 0) { + if (!ether_addr_equal(mgmt->sa, sdata->u.mgd.bssid) || + !ether_addr_equal(mgmt->bssid, sdata->u.mgd.bssid)) { /* Not from the current AP or not associated yet. */ return; } @@ -2338,7 +2338,7 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx) if (sdata->vif.type != NL80211_IFTYPE_STATION) break; - if (compare_ether_addr(mgmt->bssid, sdata->u.mgd.bssid)) + if (!ether_addr_equal(mgmt->bssid, sdata->u.mgd.bssid)) break; goto queue; @@ -2772,7 +2772,7 @@ static int prepare_for_handlers(struct ieee80211_rx_data *rx, if (!bssid && !sdata->u.mgd.use_4addr) return 0; if (!multicast && - compare_ether_addr(sdata->vif.addr, hdr->addr1) != 0) { + !ether_addr_equal(sdata->vif.addr, hdr->addr1)) { if (!(sdata->dev->flags & IFF_PROMISC) || sdata->u.mgd.use_4addr) return 0; @@ -2790,8 +2790,7 @@ static int prepare_for_handlers(struct ieee80211_rx_data *rx, return 0; status->rx_flags &= ~IEEE80211_RX_RA_MATCH; } else if (!multicast && - compare_ether_addr(sdata->vif.addr, - hdr->addr1) != 0) { + !ether_addr_equal(sdata->vif.addr, hdr->addr1)) { if (!(sdata->dev->flags & IFF_PROMISC)) return 0; status->rx_flags &= ~IEEE80211_RX_RA_MATCH; @@ -2807,8 +2806,7 @@ static int prepare_for_handlers(struct ieee80211_rx_data *rx, break; case NL80211_IFTYPE_MESH_POINT: if (!multicast && - compare_ether_addr(sdata->vif.addr, - hdr->addr1) != 0) { + !ether_addr_equal(sdata->vif.addr, hdr->addr1)) { if (!(sdata->dev->flags & IFF_PROMISC)) return 0; @@ -2818,8 +2816,7 @@ static int prepare_for_handlers(struct ieee80211_rx_data *rx, case NL80211_IFTYPE_AP_VLAN: case NL80211_IFTYPE_AP: if (!bssid) { - if (compare_ether_addr(sdata->vif.addr, - hdr->addr1)) + if (!ether_addr_equal(sdata->vif.addr, hdr->addr1)) return 0; } else if (!ieee80211_bssid_match(bssid, sdata->vif.addr)) { @@ -2841,7 +2838,7 @@ static int prepare_for_handlers(struct ieee80211_rx_data *rx, case NL80211_IFTYPE_WDS: if (bssid || !ieee80211_is_data(hdr->frame_control)) return 0; - if (compare_ether_addr(sdata->u.wds.remote_addr, hdr->addr2)) + if (!ether_addr_equal(sdata->u.wds.remote_addr, hdr->addr2)) return 0; break; default: diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c index 8282284f835c..169da0742c81 100644 --- a/net/mac80211/scan.c +++ b/net/mac80211/scan.c @@ -194,7 +194,7 @@ ieee80211_scan_rx(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) presp = ieee80211_is_probe_resp(fc); if (presp) { /* ignore ProbeResp to foreign address */ - if (compare_ether_addr(mgmt->da, sdata->vif.addr)) + if (!ether_addr_equal(mgmt->da, sdata->vif.addr)) return RX_DROP_MONITOR; presp = true; diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index 97a9d6639fb9..f5b1638fbf80 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@ -102,7 +102,7 @@ struct sta_info *sta_info_get(struct ieee80211_sub_if_data *sdata, lockdep_is_held(&local->sta_mtx)); while (sta) { if (sta->sdata == sdata && - compare_ether_addr(sta->sta.addr, addr) == 0) + ether_addr_equal(sta->sta.addr, addr)) break; sta = rcu_dereference_check(sta->hnext, lockdep_is_held(&local->sta_mtx)); @@ -125,7 +125,7 @@ struct sta_info *sta_info_get_bss(struct ieee80211_sub_if_data *sdata, while (sta) { if ((sta->sdata == sdata || (sta->sdata->bss && sta->sdata->bss == sdata->bss)) && - compare_ether_addr(sta->sta.addr, addr) == 0) + ether_addr_equal(sta->sta.addr, addr)) break; sta = rcu_dereference_check(sta->hnext, lockdep_is_held(&local->sta_mtx)); @@ -302,7 +302,7 @@ static int sta_info_insert_check(struct sta_info *sta) if (unlikely(!ieee80211_sdata_running(sdata))) return -ENETDOWN; - if (WARN_ON(compare_ether_addr(sta->sta.addr, sdata->vif.addr) == 0 || + if (WARN_ON(ether_addr_equal(sta->sta.addr, sdata->vif.addr) || is_multicast_ether_addr(sta->sta.addr))) return -EINVAL; @@ -912,7 +912,7 @@ struct ieee80211_sta *ieee80211_find_sta_by_ifaddr(struct ieee80211_hw *hw, */ for_each_sta_info(hw_to_local(hw), addr, sta, nxt) { if (localaddr && - compare_ether_addr(sta->sdata->vif.addr, localaddr) != 0) + !ether_addr_equal(sta->sdata->vif.addr, localaddr)) continue; if (!sta->uploaded) return NULL; diff --git a/net/mac80211/status.c b/net/mac80211/status.c index 05f257aa2e08..28cfa981cfb1 100644 --- a/net/mac80211/status.c +++ b/net/mac80211/status.c @@ -384,7 +384,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb) for_each_sta_info(local, hdr->addr1, sta, tmp) { /* skip wrong virtual interface */ - if (compare_ether_addr(hdr->addr2, sta->sdata->vif.addr)) + if (!ether_addr_equal(hdr->addr2, sta->sdata->vif.addr)) continue; if (info->flags & IEEE80211_TX_STATUS_EOSP) diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index d67d36f57d78..e03abc78ee53 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -1665,7 +1665,7 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb, skb->len >= len_rthdr + hdrlen + sizeof(rfc1042_header) + 2) { u8 *payload = (u8 *)hdr + hdrlen; - if (compare_ether_addr(payload, rfc1042_header) == 0) + if (ether_addr_equal(payload, rfc1042_header)) skb->protocol = cpu_to_be16((payload[6] << 8) | payload[7]); } @@ -1698,7 +1698,7 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb, tmp_sdata->vif.type == NL80211_IFTYPE_AP_VLAN || tmp_sdata->vif.type == NL80211_IFTYPE_WDS) continue; - if (compare_ether_addr(tmp_sdata->vif.addr, hdr->addr2) == 0) { + if (ether_addr_equal(tmp_sdata->vif.addr, hdr->addr2)) { sdata = tmp_sdata; break; } @@ -1815,9 +1815,8 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, * is being proxied by a portal (i.e. portal address * differs from proxied address) */ - if (compare_ether_addr(sdata->vif.addr, - skb->data + ETH_ALEN) == 0 && - !(mppath && compare_ether_addr(mppath->mpp, skb->data))) { + if (ether_addr_equal(sdata->vif.addr, skb->data + ETH_ALEN) && + !(mppath && !ether_addr_equal(mppath->mpp, skb->data))) { hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc, skb->data, skb->data + ETH_ALEN); rcu_read_unlock(); @@ -1964,7 +1963,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, if (unlikely(!ieee80211_vif_is_mesh(&sdata->vif) && !is_multicast_ether_addr(hdr.addr1) && !authorized && (cpu_to_be16(ethertype) != sdata->control_port_protocol || - compare_ether_addr(sdata->vif.addr, skb->data + ETH_ALEN)))) { + !ether_addr_equal(sdata->vif.addr, skb->data + ETH_ALEN)))) { #ifdef CONFIG_MAC80211_VERBOSE_DEBUG if (net_ratelimit()) printk(KERN_DEBUG "%s: dropped frame to %pM" -- cgit v1.2.2 From 3bc7945e2642bcea92b0fd6a718565f45061439f Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Tue, 8 May 2012 18:56:53 +0000 Subject: mac80211: Convert compare_ether_addr to ether_addr_equal by hand spatch/coccinelle isn't perfect. It doesn't understand __aligned(x) and doesn't convert functions it can't parse. Convert the remaining compare_ether_addr uses. Signed-off-by: Joe Perches Signed-off-by: David S. Miller --- net/mac80211/rx.c | 4 ++-- net/mac80211/sta_info.h | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'net') diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 26bfd6fcaa48..d722c40c7eca 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -1714,8 +1714,8 @@ static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx, __le16 fc) * of whether the frame was encrypted or not. */ if (ehdr->h_proto == rx->sdata->control_port_protocol && - (compare_ether_addr(ehdr->h_dest, rx->sdata->vif.addr) == 0 || - compare_ether_addr(ehdr->h_dest, pae_group_addr) == 0)) + (ether_addr_equal(ehdr->h_dest, rx->sdata->vif.addr) || + ether_addr_equal(ehdr->h_dest, pae_group_addr))) return true; if (ieee80211_802_1x_port_control(rx) || diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h index f75f5d9ac06d..e920b229fb8c 100644 --- a/net/mac80211/sta_info.h +++ b/net/mac80211/sta_info.h @@ -501,7 +501,7 @@ void for_each_sta_info_type_check(struct ieee80211_local *local, nxt = _sta ? rcu_dereference(_sta->hnext) : NULL \ ) \ /* compare address and run code only if it matches */ \ - if (compare_ether_addr(_sta->sta.addr, (_addr)) == 0) + if (ether_addr_equal(_sta->sta.addr, (_addr))) /* * Get STA info by index, BROKEN! -- cgit v1.2.2 From 8561cf99785faf73cccb9f417170d8b5844bf496 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Tue, 8 May 2012 18:56:54 +0000 Subject: netfilter: Convert compare_ether_addr to ether_addr_equal Use the new bool function ether_addr_equal to add some clarity and reduce the likelihood for misuse of compare_ether_addr for sorting. Done via cocci script: $ cat compare_ether_addr.cocci @@ expression a,b; @@ - !compare_ether_addr(a, b) + ether_addr_equal(a, b) @@ expression a,b; @@ - compare_ether_addr(a, b) + !ether_addr_equal(a, b) @@ expression a,b; @@ - !ether_addr_equal(a, b) == 0 + ether_addr_equal(a, b) @@ expression a,b; @@ - !ether_addr_equal(a, b) != 0 + !ether_addr_equal(a, b) @@ expression a,b; @@ - ether_addr_equal(a, b) == 0 + !ether_addr_equal(a, b) @@ expression a,b; @@ - ether_addr_equal(a, b) != 0 + ether_addr_equal(a, b) @@ expression a,b; @@ - !!ether_addr_equal(a, b) + ether_addr_equal(a, b) Signed-off-by: Joe Perches Signed-off-by: David S. Miller --- net/netfilter/ipset/ip_set_bitmap_ipmac.c | 4 ++-- net/netfilter/xt_mac.c | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'net') diff --git a/net/netfilter/ipset/ip_set_bitmap_ipmac.c b/net/netfilter/ipset/ip_set_bitmap_ipmac.c index 0bb16c469a89..d7eaf10edb6d 100644 --- a/net/netfilter/ipset/ip_set_bitmap_ipmac.c +++ b/net/netfilter/ipset/ip_set_bitmap_ipmac.c @@ -111,7 +111,7 @@ bitmap_ipmac_test(struct ip_set *set, void *value, u32 timeout, u32 flags) return -EAGAIN; case MAC_FILLED: return data->ether == NULL || - compare_ether_addr(data->ether, elem->ether) == 0; + ether_addr_equal(data->ether, elem->ether); } return 0; } @@ -225,7 +225,7 @@ bitmap_ipmac_ttest(struct ip_set *set, void *value, u32 timeout, u32 flags) return -EAGAIN; case MAC_FILLED: return (data->ether == NULL || - compare_ether_addr(data->ether, elem->ether) == 0) && + ether_addr_equal(data->ether, elem->ether)) && !bitmap_expired(map, data->id); } return 0; diff --git a/net/netfilter/xt_mac.c b/net/netfilter/xt_mac.c index 8160f6b1435d..d5b4fd4f91ed 100644 --- a/net/netfilter/xt_mac.c +++ b/net/netfilter/xt_mac.c @@ -36,7 +36,7 @@ static bool mac_mt(const struct sk_buff *skb, struct xt_action_param *par) return false; if (skb_mac_header(skb) + ETH_HLEN > skb->data) return false; - ret = compare_ether_addr(eth_hdr(skb)->h_source, info->srcaddr) == 0; + ret = ether_addr_equal(eth_hdr(skb)->h_source, info->srcaddr); ret ^= info->invert; return ret; } -- cgit v1.2.2 From ac422d3cc25351819ec80b7e2852fd2c600c317c Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Tue, 8 May 2012 18:56:55 +0000 Subject: wireless: Convert compare_ether_addr to ether_addr_equal Use the new bool function ether_addr_equal to add some clarity and reduce the likelihood for misuse of compare_ether_addr for sorting. I removed a conversion from scan.c/cmp_bss_core that appears to be a sorting function. Done via cocci script: $ cat compare_ether_addr.cocci @@ expression a,b; @@ - !compare_ether_addr(a, b) + ether_addr_equal(a, b) @@ expression a,b; @@ - compare_ether_addr(a, b) + !ether_addr_equal(a, b) @@ expression a,b; @@ - !ether_addr_equal(a, b) == 0 + ether_addr_equal(a, b) @@ expression a,b; @@ - !ether_addr_equal(a, b) != 0 + !ether_addr_equal(a, b) @@ expression a,b; @@ - ether_addr_equal(a, b) == 0 + !ether_addr_equal(a, b) @@ expression a,b; @@ - ether_addr_equal(a, b) != 0 + ether_addr_equal(a, b) @@ expression a,b; @@ - !!ether_addr_equal(a, b) + ether_addr_equal(a, b) Signed-off-by: Joe Perches Signed-off-by: David S. Miller --- net/wireless/ibss.c | 2 +- net/wireless/mlme.c | 31 +++++++++++++++---------------- net/wireless/scan.c | 2 +- net/wireless/util.c | 5 ++--- net/wireless/wext-sme.c | 2 +- net/wireless/wext-spy.c | 2 +- 6 files changed, 21 insertions(+), 23 deletions(-) (limited to 'net') diff --git a/net/wireless/ibss.c b/net/wireless/ibss.c index 30f20fe4a5fe..d2a19b0ff71f 100644 --- a/net/wireless/ibss.c +++ b/net/wireless/ibss.c @@ -473,7 +473,7 @@ int cfg80211_ibss_wext_siwap(struct net_device *dev, /* fixed already - and no change */ if (wdev->wext.ibss.bssid && bssid && - compare_ether_addr(bssid, wdev->wext.ibss.bssid) == 0) + ether_addr_equal(bssid, wdev->wext.ibss.bssid)) return 0; wdev_lock(wdev); diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c index 6801d96bc224..eb90988bbd36 100644 --- a/net/wireless/mlme.c +++ b/net/wireless/mlme.c @@ -101,7 +101,7 @@ void __cfg80211_send_deauth(struct net_device *dev, ASSERT_WDEV_LOCK(wdev); if (wdev->current_bss && - compare_ether_addr(wdev->current_bss->pub.bssid, bssid) == 0) { + ether_addr_equal(wdev->current_bss->pub.bssid, bssid)) { cfg80211_unhold_bss(wdev->current_bss); cfg80211_put_bss(&wdev->current_bss->pub); wdev->current_bss = NULL; @@ -116,7 +116,7 @@ void __cfg80211_send_deauth(struct net_device *dev, reason_code = le16_to_cpu(mgmt->u.deauth.reason_code); - from_ap = compare_ether_addr(mgmt->sa, dev->dev_addr) != 0; + from_ap = !ether_addr_equal(mgmt->sa, dev->dev_addr); __cfg80211_disconnected(dev, NULL, 0, reason_code, from_ap); } else if (wdev->sme_state == CFG80211_SME_CONNECTING) { __cfg80211_connect_result(dev, mgmt->bssid, NULL, 0, NULL, 0, @@ -155,7 +155,7 @@ void __cfg80211_send_disassoc(struct net_device *dev, return; if (wdev->current_bss && - compare_ether_addr(wdev->current_bss->pub.bssid, bssid) == 0) { + ether_addr_equal(wdev->current_bss->pub.bssid, bssid)) { cfg80211_sme_disassoc(dev, wdev->current_bss); cfg80211_unhold_bss(wdev->current_bss); cfg80211_put_bss(&wdev->current_bss->pub); @@ -166,7 +166,7 @@ void __cfg80211_send_disassoc(struct net_device *dev, reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code); - from_ap = compare_ether_addr(mgmt->sa, dev->dev_addr) != 0; + from_ap = !ether_addr_equal(mgmt->sa, dev->dev_addr); __cfg80211_disconnected(dev, NULL, 0, reason_code, from_ap); } EXPORT_SYMBOL(__cfg80211_send_disassoc); @@ -286,7 +286,7 @@ int __cfg80211_mlme_auth(struct cfg80211_registered_device *rdev, return -EINVAL; if (wdev->current_bss && - compare_ether_addr(bssid, wdev->current_bss->pub.bssid) == 0) + ether_addr_equal(bssid, wdev->current_bss->pub.bssid)) return -EALREADY; memset(&req, 0, sizeof(req)); @@ -363,7 +363,7 @@ int __cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev, memset(&req, 0, sizeof(req)); if (wdev->current_bss && prev_bssid && - compare_ether_addr(wdev->current_bss->pub.bssid, prev_bssid) == 0) { + ether_addr_equal(wdev->current_bss->pub.bssid, prev_bssid)) { /* * Trying to reassociate: Allow this to proceed and let the old * association to be dropped when the new one is completed. @@ -447,8 +447,7 @@ int __cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev, if (local_state_change) { if (wdev->current_bss && - compare_ether_addr(wdev->current_bss->pub.bssid, bssid) - == 0) { + ether_addr_equal(wdev->current_bss->pub.bssid, bssid)) { cfg80211_unhold_bss(wdev->current_bss); cfg80211_put_bss(&wdev->current_bss->pub); wdev->current_bss = NULL; @@ -497,7 +496,7 @@ static int __cfg80211_mlme_disassoc(struct cfg80211_registered_device *rdev, req.local_state_change = local_state_change; req.ie = ie; req.ie_len = ie_len; - if (compare_ether_addr(wdev->current_bss->pub.bssid, bssid) == 0) + if (ether_addr_equal(wdev->current_bss->pub.bssid, bssid)) req.bss = &wdev->current_bss->pub; else return -ENOTCONN; @@ -760,8 +759,8 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev, break; } - if (compare_ether_addr(wdev->current_bss->pub.bssid, - mgmt->bssid)) { + if (!ether_addr_equal(wdev->current_bss->pub.bssid, + mgmt->bssid)) { err = -ENOTCONN; break; } @@ -774,8 +773,8 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev, break; /* for station, check that DA is the AP */ - if (compare_ether_addr(wdev->current_bss->pub.bssid, - mgmt->da)) { + if (!ether_addr_equal(wdev->current_bss->pub.bssid, + mgmt->da)) { err = -ENOTCONN; break; } @@ -783,11 +782,11 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev, case NL80211_IFTYPE_AP: case NL80211_IFTYPE_P2P_GO: case NL80211_IFTYPE_AP_VLAN: - if (compare_ether_addr(mgmt->bssid, dev->dev_addr)) + if (!ether_addr_equal(mgmt->bssid, dev->dev_addr)) err = -EINVAL; break; case NL80211_IFTYPE_MESH_POINT: - if (compare_ether_addr(mgmt->sa, mgmt->bssid)) { + if (!ether_addr_equal(mgmt->sa, mgmt->bssid)) { err = -EINVAL; break; } @@ -806,7 +805,7 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev, return err; } - if (compare_ether_addr(mgmt->sa, dev->dev_addr) != 0) + if (!ether_addr_equal(mgmt->sa, dev->dev_addr)) return -EINVAL; /* Transmit the Action frame as requested by user space */ diff --git a/net/wireless/scan.c b/net/wireless/scan.c index 1442bb68a3f3..26d96d7ffba8 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c @@ -281,7 +281,7 @@ static bool is_bss(struct cfg80211_bss *a, { const u8 *ssidie; - if (bssid && compare_ether_addr(a->bssid, bssid)) + if (bssid && !ether_addr_equal(a->bssid, bssid)) return false; if (!ssid) diff --git a/net/wireless/util.c b/net/wireless/util.c index 6cba00173a2f..3ba366f51b07 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c @@ -609,10 +609,9 @@ void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list, payload = frame->data; ethertype = (payload[6] << 8) | payload[7]; - if (likely((compare_ether_addr(payload, rfc1042_header) == 0 && + if (likely((ether_addr_equal(payload, rfc1042_header) && ethertype != ETH_P_AARP && ethertype != ETH_P_IPX) || - compare_ether_addr(payload, - bridge_tunnel_header) == 0)) { + ether_addr_equal(payload, bridge_tunnel_header))) { /* remove RFC1042 or Bridge-Tunnel * encapsulation and replace EtherType */ skb_pull(frame, 6); diff --git a/net/wireless/wext-sme.c b/net/wireless/wext-sme.c index 7c01c2f3b6cf..7decbd357d51 100644 --- a/net/wireless/wext-sme.c +++ b/net/wireless/wext-sme.c @@ -276,7 +276,7 @@ int cfg80211_mgd_wext_siwap(struct net_device *dev, /* fixed already - and no change */ if (wdev->wext.connect.bssid && bssid && - compare_ether_addr(bssid, wdev->wext.connect.bssid) == 0) + ether_addr_equal(bssid, wdev->wext.connect.bssid)) goto out; err = __cfg80211_disconnect(rdev, dev, diff --git a/net/wireless/wext-spy.c b/net/wireless/wext-spy.c index 5d643a548feb..33bef22e44e9 100644 --- a/net/wireless/wext-spy.c +++ b/net/wireless/wext-spy.c @@ -203,7 +203,7 @@ void wireless_spy_update(struct net_device * dev, /* Update all records that match */ for (i = 0; i < spydata->spy_number; i++) - if (!compare_ether_addr(address, spydata->spy_address[i])) { + if (ether_addr_equal(address, spydata->spy_address[i])) { memcpy(&(spydata->spy_stat[i]), wstats, sizeof(struct iw_quality)); match = i; -- cgit v1.2.2 From 4c764729aba365796c09d988f53043891f822acb Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Tue, 8 May 2012 18:56:56 +0000 Subject: wireless: Convert compare_ether_addr to ether_addr_equal by hand spatch/coccinelle isn't perfect. It doesn't understand __aligned(x) and doesn't convert functions it can't parse. Convert the remaining compare_ether_addr uses. Signed-off-by: Joe Perches Signed-off-by: David S. Miller --- net/wireless/util.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'net') diff --git a/net/wireless/util.c b/net/wireless/util.c index 3ba366f51b07..1cd255892a43 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c @@ -370,7 +370,7 @@ int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr, iftype != NL80211_IFTYPE_P2P_CLIENT && iftype != NL80211_IFTYPE_MESH_POINT) || (is_multicast_ether_addr(dst) && - !compare_ether_addr(src, addr))) + ether_addr_equal(src, addr))) return -1; if (iftype == NL80211_IFTYPE_MESH_POINT) { struct ieee80211s_hdr *meshdr = @@ -398,9 +398,9 @@ int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr, payload = skb->data + hdrlen; ethertype = (payload[6] << 8) | payload[7]; - if (likely((compare_ether_addr(payload, rfc1042_header) == 0 && + if (likely((ether_addr_equal(payload, rfc1042_header) && ethertype != ETH_P_AARP && ethertype != ETH_P_IPX) || - compare_ether_addr(payload, bridge_tunnel_header) == 0)) { + ether_addr_equal(payload, bridge_tunnel_header))) { /* remove RFC1042 or Bridge-Tunnel encapsulation and * replace EtherType */ skb_pull(skb, hdrlen + 6); -- cgit v1.2.2 From 8feedbb4a710784d2858acba5c90e903e93e36eb Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Tue, 8 May 2012 18:56:57 +0000 Subject: dsa: Convert compare_ether_addr to ether_addr_equal Use the new bool function ether_addr_equal to add some clarity and reduce the likelihood for misuse of compare_ether_addr for sorting. Done via cocci script: $ cat compare_ether_addr.cocci @@ expression a,b; @@ - !compare_ether_addr(a, b) + ether_addr_equal(a, b) @@ expression a,b; @@ - compare_ether_addr(a, b) + !ether_addr_equal(a, b) @@ expression a,b; @@ - !ether_addr_equal(a, b) == 0 + ether_addr_equal(a, b) @@ expression a,b; @@ - !ether_addr_equal(a, b) != 0 + !ether_addr_equal(a, b) @@ expression a,b; @@ - ether_addr_equal(a, b) == 0 + !ether_addr_equal(a, b) @@ expression a,b; @@ - ether_addr_equal(a, b) != 0 + ether_addr_equal(a, b) @@ expression a,b; @@ - !!ether_addr_equal(a, b) + ether_addr_equal(a, b) Signed-off-by: Joe Perches Signed-off-by: David S. Miller --- net/dsa/slave.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'net') diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 56cf9b8e1c7c..e32083d5d8f8 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -66,7 +66,7 @@ static int dsa_slave_open(struct net_device *dev) if (!(master->flags & IFF_UP)) return -ENETDOWN; - if (compare_ether_addr(dev->dev_addr, master->dev_addr)) { + if (!ether_addr_equal(dev->dev_addr, master->dev_addr)) { err = dev_uc_add(master, dev->dev_addr); if (err < 0) goto out; @@ -89,7 +89,7 @@ clear_allmulti: if (dev->flags & IFF_ALLMULTI) dev_set_allmulti(master, -1); del_unicast: - if (compare_ether_addr(dev->dev_addr, master->dev_addr)) + if (!ether_addr_equal(dev->dev_addr, master->dev_addr)) dev_uc_del(master, dev->dev_addr); out: return err; @@ -107,7 +107,7 @@ static int dsa_slave_close(struct net_device *dev) if (dev->flags & IFF_PROMISC) dev_set_promiscuity(master, -1); - if (compare_ether_addr(dev->dev_addr, master->dev_addr)) + if (!ether_addr_equal(dev->dev_addr, master->dev_addr)) dev_uc_del(master, dev->dev_addr); return 0; @@ -146,13 +146,13 @@ static int dsa_slave_set_mac_address(struct net_device *dev, void *a) if (!(dev->flags & IFF_UP)) goto out; - if (compare_ether_addr(addr->sa_data, master->dev_addr)) { + if (!ether_addr_equal(addr->sa_data, master->dev_addr)) { err = dev_uc_add(master, addr->sa_data); if (err < 0) return err; } - if (compare_ether_addr(dev->dev_addr, master->dev_addr)) + if (!ether_addr_equal(dev->dev_addr, master->dev_addr)) dev_uc_del(master, dev->dev_addr); out: -- cgit v1.2.2 From 081d094eaab894ae5a517fde56179dfe67773ff0 Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Thu, 12 Apr 2012 00:42:12 +0000 Subject: ethtool: Split ethtool_get_eeprom() to allow for additional EEPROM accessors We want to support reading module (SFP+, XFP, ...) EEPROMs as well as NIC EEPROMs. They will need a different command number and driver operation, but the structure and arguments will be the same and so we can share most of the code here. Signed-off-by: Ben Hutchings --- net/core/ethtool.c | 24 +++++++++++++++++------- 1 file changed, 17 insertions(+), 7 deletions(-) (limited to 'net') diff --git a/net/core/ethtool.c b/net/core/ethtool.c index beacdd93cd8f..ca7698fd24d4 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c @@ -751,18 +751,17 @@ static int ethtool_get_link(struct net_device *dev, char __user *useraddr) return 0; } -static int ethtool_get_eeprom(struct net_device *dev, void __user *useraddr) +static int ethtool_get_any_eeprom(struct net_device *dev, void __user *useraddr, + int (*getter)(struct net_device *, + struct ethtool_eeprom *, u8 *), + u32 total_len) { struct ethtool_eeprom eeprom; - const struct ethtool_ops *ops = dev->ethtool_ops; void __user *userbuf = useraddr + sizeof(eeprom); u32 bytes_remaining; u8 *data; int ret = 0; - if (!ops->get_eeprom || !ops->get_eeprom_len) - return -EOPNOTSUPP; - if (copy_from_user(&eeprom, useraddr, sizeof(eeprom))) return -EFAULT; @@ -771,7 +770,7 @@ static int ethtool_get_eeprom(struct net_device *dev, void __user *useraddr) return -EINVAL; /* Check for exceeding total eeprom len */ - if (eeprom.offset + eeprom.len > ops->get_eeprom_len(dev)) + if (eeprom.offset + eeprom.len > total_len) return -EINVAL; data = kmalloc(PAGE_SIZE, GFP_USER); @@ -782,7 +781,7 @@ static int ethtool_get_eeprom(struct net_device *dev, void __user *useraddr) while (bytes_remaining > 0) { eeprom.len = min(bytes_remaining, (u32)PAGE_SIZE); - ret = ops->get_eeprom(dev, &eeprom, data); + ret = getter(dev, &eeprom, data); if (ret) break; if (copy_to_user(userbuf, data, eeprom.len)) { @@ -803,6 +802,17 @@ static int ethtool_get_eeprom(struct net_device *dev, void __user *useraddr) return ret; } +static int ethtool_get_eeprom(struct net_device *dev, void __user *useraddr) +{ + const struct ethtool_ops *ops = dev->ethtool_ops; + + if (!ops->get_eeprom || !ops->get_eeprom_len) + return -EOPNOTSUPP; + + return ethtool_get_any_eeprom(dev, useraddr, ops->get_eeprom, + ops->get_eeprom_len(dev)); +} + static int ethtool_set_eeprom(struct net_device *dev, void __user *useraddr) { struct ethtool_eeprom eeprom; -- cgit v1.2.2 From 41c3cb6d20f0252308e9796fa4f3dacb4960de91 Mon Sep 17 00:00:00 2001 From: Stuart Hodgson Date: Thu, 19 Apr 2012 09:44:42 +0100 Subject: ethtool: Extend the ethtool API to obtain plugin module eeprom data ETHTOOL_GMODULEINFO returns a new struct ethtool_modinfo that will return the type and size of plug-in module eeprom (such as SFP+) for parsing by userland program. ETHTOOL_GMODULEEEPROM returns the raw eeprom information using the existing ethtool_eeprom structture to return the data Signed-off-by: Stuart Hodgson Signed-off-by: Ben Hutchings --- net/core/ethtool.c | 47 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 47 insertions(+) (limited to 'net') diff --git a/net/core/ethtool.c b/net/core/ethtool.c index ca7698fd24d4..9c2afb480270 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c @@ -1335,6 +1335,47 @@ static int ethtool_get_ts_info(struct net_device *dev, void __user *useraddr) return err; } +static int ethtool_get_module_info(struct net_device *dev, + void __user *useraddr) +{ + int ret; + struct ethtool_modinfo modinfo; + const struct ethtool_ops *ops = dev->ethtool_ops; + + if (!ops->get_module_info) + return -EOPNOTSUPP; + + if (copy_from_user(&modinfo, useraddr, sizeof(modinfo))) + return -EFAULT; + + ret = ops->get_module_info(dev, &modinfo); + if (ret) + return ret; + + if (copy_to_user(useraddr, &modinfo, sizeof(modinfo))) + return -EFAULT; + + return 0; +} + +static int ethtool_get_module_eeprom(struct net_device *dev, + void __user *useraddr) +{ + int ret; + struct ethtool_modinfo modinfo; + const struct ethtool_ops *ops = dev->ethtool_ops; + + if (!ops->get_module_info || !ops->get_module_eeprom) + return -EOPNOTSUPP; + + ret = ops->get_module_info(dev, &modinfo); + if (ret) + return ret; + + return ethtool_get_any_eeprom(dev, useraddr, ops->get_module_eeprom, + modinfo.eeprom_len); +} + /* The main entry point in this file. Called from net/core/dev.c */ int dev_ethtool(struct net *net, struct ifreq *ifr) @@ -1559,6 +1600,12 @@ int dev_ethtool(struct net *net, struct ifreq *ifr) case ETHTOOL_GET_TS_INFO: rc = ethtool_get_ts_info(dev, useraddr); break; + case ETHTOOL_GMODULEINFO: + rc = ethtool_get_module_info(dev, useraddr); + break; + case ETHTOOL_GMODULEEEPROM: + rc = ethtool_get_module_eeprom(dev, useraddr); + break; default: rc = -EOPNOTSUPP; } -- cgit v1.2.2 From 292e8d8c853889140ed77b7b37c66979b13080ae Mon Sep 17 00:00:00 2001 From: Pavel Emelyanov Date: Thu, 10 May 2012 01:49:41 +0000 Subject: tcp: Move rcvq sending to tcp_input.c It actually works on the input queue and will use its read mem routines, thus it's better to have in in the tcp_input.c file. Signed-off-by: Pavel Emelyanov Acked-by: Eric Dumazet Signed-off-by: David S. Miller --- net/ipv4/tcp.c | 33 --------------------------------- net/ipv4/tcp_input.c | 35 ++++++++++++++++++++++++++++++++++- 2 files changed, 34 insertions(+), 34 deletions(-) (limited to 'net') diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 565406287f6f..86e2cf2ff770 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -978,39 +978,6 @@ static inline int select_size(const struct sock *sk, bool sg) return tmp; } -static int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size) -{ - struct sk_buff *skb; - struct tcphdr *th; - bool fragstolen; - - skb = alloc_skb(size + sizeof(*th), sk->sk_allocation); - if (!skb) - goto err; - - th = (struct tcphdr *)skb_put(skb, sizeof(*th)); - skb_reset_transport_header(skb); - memset(th, 0, sizeof(*th)); - - if (memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size)) - goto err_free; - - TCP_SKB_CB(skb)->seq = tcp_sk(sk)->rcv_nxt; - TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + size; - TCP_SKB_CB(skb)->ack_seq = tcp_sk(sk)->snd_una - 1; - - if (tcp_queue_rcv(sk, skb, sizeof(*th), &fragstolen)) { - WARN_ON_ONCE(fragstolen); /* should not happen */ - __kfree_skb(skb); - } - return size; - -err_free: - kfree_skb(skb); -err: - return -ENOMEM; -} - int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t size) { diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index eb58b94301ec..7c6c99dcc962 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -4746,7 +4746,7 @@ end: skb_set_owner_r(skb, sk); } -int tcp_queue_rcv(struct sock *sk, struct sk_buff *skb, int hdrlen, +static int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb, int hdrlen, bool *fragstolen) { int eaten; @@ -4763,6 +4763,39 @@ int tcp_queue_rcv(struct sock *sk, struct sk_buff *skb, int hdrlen, return eaten; } +int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size) +{ + struct sk_buff *skb; + struct tcphdr *th; + bool fragstolen; + + skb = alloc_skb(size + sizeof(*th), sk->sk_allocation); + if (!skb) + goto err; + + th = (struct tcphdr *)skb_put(skb, sizeof(*th)); + skb_reset_transport_header(skb); + memset(th, 0, sizeof(*th)); + + if (memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size)) + goto err_free; + + TCP_SKB_CB(skb)->seq = tcp_sk(sk)->rcv_nxt; + TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + size; + TCP_SKB_CB(skb)->ack_seq = tcp_sk(sk)->snd_una - 1; + + if (tcp_queue_rcv(sk, skb, sizeof(*th), &fragstolen)) { + WARN_ON_ONCE(fragstolen); /* should not happen */ + __kfree_skb(skb); + } + return size; + +err_free: + kfree_skb(skb); +err: + return -ENOMEM; +} + static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) { const struct tcphdr *th = tcp_hdr(skb); -- cgit v1.2.2 From 3c961afed4d4e766b66092e7af8c8e8005053505 Mon Sep 17 00:00:00 2001 From: Pavel Emelyanov Date: Thu, 10 May 2012 01:50:01 +0000 Subject: tcp: Schedule rmem for rcvq repair send As noted by Eric, no checks are performed on the data size we're putting in the read queue during repair. Thus, validate the given data size with the common rmem management routine. Signed-off-by: Pavel Emelyanov Acked-by: Eric Dumazet Signed-off-by: David S. Miller --- net/ipv4/tcp_input.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'net') diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 7c6c99dcc962..164659f2d636 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -4769,6 +4769,9 @@ int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size) struct tcphdr *th; bool fragstolen; + if (tcp_try_rmem_schedule(sk, size + sizeof(*th))) + goto err; + skb = alloc_skb(size + sizeof(*th), sk->sk_allocation); if (!skb) goto err; -- cgit v1.2.2 From 1070b1b831404455d79d15fe94ae9216fb5f8ab4 Mon Sep 17 00:00:00 2001 From: Pavel Emelyanov Date: Thu, 10 May 2012 01:50:20 +0000 Subject: tcp: Out-line tcp_try_rmem_schedule As proposed by Eric, make the tcp_input.o thinner. add/remove: 1/1 grow/shrink: 1/4 up/down: 868/-1329 (-461) function old new delta tcp_try_rmem_schedule - 864 +864 tcp_ack 4811 4815 +4 tcp_validate_incoming 817 815 -2 tcp_collapse 860 858 -2 tcp_send_rcvq 555 353 -202 tcp_data_queue 3435 3033 -402 tcp_prune_queue 721 - -721 Signed-off-by: Pavel Emelyanov Acked-by: Eric Dumazet Signed-off-by: David S. Miller --- net/ipv4/tcp_input.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 164659f2d636..b99ada27a136 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -4511,7 +4511,7 @@ static void tcp_ofo_queue(struct sock *sk) static int tcp_prune_ofo_queue(struct sock *sk); static int tcp_prune_queue(struct sock *sk); -static inline int tcp_try_rmem_schedule(struct sock *sk, unsigned int size) +static int tcp_try_rmem_schedule(struct sock *sk, unsigned int size) { if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || !sk_rmem_schedule(sk, size)) { -- cgit v1.2.2 From 38d40b3f4e223336422b7e87cb483e758ef87e3a Mon Sep 17 00:00:00 2001 From: James Chapman Date: Wed, 9 May 2012 23:43:08 +0000 Subject: l2tp: fix reorder timeout recovery When L2TP data packet reordering is enabled, packets are held in a queue while waiting for out-of-sequence packets. If a packet gets lost, packets will be held until the reorder timeout expires, when we are supposed to then advance to the sequence number of the next packet but we don't currently do so. As a result, the data channel is stuck because we are waiting for a packet that will never arrive - all packets age out and none are passed. The fix is to add a flag to the session context, which is set when the reorder timeout expires and tells the receive code to reset the next expected sequence number to that of the next packet in the queue. Tested in a production L2TP network with Starent and Nortel L2TP gear. Signed-off-by: James Chapman Signed-off-by: David S. Miller --- net/l2tp/l2tp_core.c | 9 +++++++++ net/l2tp/l2tp_core.h | 1 + 2 files changed, 10 insertions(+) (limited to 'net') diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index 456b52d8f6d8..d1ab3a236cca 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c @@ -428,6 +428,7 @@ start: session->name, L2TP_SKB_CB(skb)->ns, L2TP_SKB_CB(skb)->length, session->nr, skb_queue_len(&session->reorder_q)); + session->reorder_skip = 1; __skb_unlink(skb, &session->reorder_q); kfree_skb(skb); if (session->deref) @@ -436,6 +437,14 @@ start: } if (L2TP_SKB_CB(skb)->has_seq) { + if (session->reorder_skip) { + PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG, + "%s: advancing nr to next pkt: %u -> %u", + session->name, session->nr, + L2TP_SKB_CB(skb)->ns); + session->reorder_skip = 0; + session->nr = L2TP_SKB_CB(skb)->ns; + } if (L2TP_SKB_CB(skb)->ns != session->nr) { PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG, "%s: holding oos pkt %u len %d, " diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h index 0bf60fc88bb7..90026341a1e5 100644 --- a/net/l2tp/l2tp_core.h +++ b/net/l2tp/l2tp_core.h @@ -123,6 +123,7 @@ struct l2tp_session { * categories */ int reorder_timeout; /* configured reorder timeout * (in jiffies) */ + int reorder_skip; /* set if skip to next nr */ int mtu; int mru; enum l2tp_pwtype pwtype; -- cgit v1.2.2 From d301e3256866bfd3ae3093aeb43d3ca9570d758e Mon Sep 17 00:00:00 2001 From: James Chapman Date: Wed, 9 May 2012 23:43:09 +0000 Subject: l2tp: fix data packet sequence number handling If enabled, L2TP data packets have sequence numbers which a receiver can use to drop out of sequence frames or try to reorder them. The first frame has sequence number 0, but the L2TP code currently expects it to be 1. This results in the first data frame being handled as out of sequence. This one-line patch fixes the problem. Signed-off-by: James Chapman Signed-off-by: David S. Miller --- net/l2tp/l2tp_core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index d1ab3a236cca..0d6aedc3a0ce 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c @@ -1762,7 +1762,7 @@ struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunn session->session_id = session_id; session->peer_session_id = peer_session_id; - session->nr = 1; + session->nr = 0; sprintf(&session->name[0], "sess %u/%u", tunnel->tunnel_id, session->session_id); -- cgit v1.2.2 From a6700db17980972199e61c06be535a79e1b0b4e6 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Wed, 9 May 2012 17:04:04 +0000 Subject: net, drivers/net: Convert compare_ether_addr_64bits to ether_addr_equal_64bits Use the new bool function ether_addr_equal_64bits to add some clarity and reduce the likelihood for misuse of compare_ether_addr_64bits for sorting. Done via cocci script: $ cat compare_ether_addr_64bits.cocci @@ expression a,b; @@ - !compare_ether_addr_64bits(a, b) + ether_addr_equal_64bits(a, b) @@ expression a,b; @@ - compare_ether_addr_64bits(a, b) + !ether_addr_equal_64bits(a, b) @@ expression a,b; @@ - !ether_addr_equal_64bits(a, b) == 0 + ether_addr_equal_64bits(a, b) @@ expression a,b; @@ - !ether_addr_equal_64bits(a, b) != 0 + !ether_addr_equal_64bits(a, b) @@ expression a,b; @@ - ether_addr_equal_64bits(a, b) == 0 + !ether_addr_equal_64bits(a, b) @@ expression a,b; @@ - ether_addr_equal_64bits(a, b) != 0 + ether_addr_equal_64bits(a, b) @@ expression a,b; @@ - !!ether_addr_equal_64bits(a, b) + ether_addr_equal_64bits(a, b) Signed-off-by: Joe Perches Signed-off-by: David S. Miller --- net/ethernet/eth.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'net') diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c index 5889a6c38a10..36e58800a9e3 100644 --- a/net/ethernet/eth.c +++ b/net/ethernet/eth.c @@ -164,7 +164,7 @@ __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev) eth = eth_hdr(skb); if (unlikely(is_multicast_ether_addr(eth->h_dest))) { - if (!compare_ether_addr_64bits(eth->h_dest, dev->broadcast)) + if (ether_addr_equal_64bits(eth->h_dest, dev->broadcast)) skb->pkt_type = PACKET_BROADCAST; else skb->pkt_type = PACKET_MULTICAST; @@ -179,7 +179,8 @@ __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev) */ else if (1 /*dev->flags&IFF_PROMISC */ ) { - if (unlikely(compare_ether_addr_64bits(eth->h_dest, dev->dev_addr))) + if (unlikely(!ether_addr_equal_64bits(eth->h_dest, + dev->dev_addr))) skb->pkt_type = PACKET_OTHERHOST; } -- cgit v1.2.2 From 2dd875ff31ac7ff42d6fc7d7f78ac6c0635439f5 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 10 May 2012 05:36:34 +0000 Subject: net_sched: update bstats in dequeue() Class bytes/packets stats can be misleading because they are updated in enqueue() while packet might be dropped later. We already fixed all qdiscs but sch_atm. This patch makes the final cleanup. class rate estimators can now match qdisc ones. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/sched/sch_atm.c | 4 ++-- net/sched/sch_drr.c | 4 ++-- net/sched/sch_hfsc.c | 2 +- net/sched/sch_htb.c | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) (limited to 'net') diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c index a77a4fbc069a..8522a4793374 100644 --- a/net/sched/sch_atm.c +++ b/net/sched/sch_atm.c @@ -423,8 +423,6 @@ drop: __maybe_unused } return ret; } - qdisc_bstats_update(sch, skb); - bstats_update(&flow->bstats, skb); /* * Okay, this may seem weird. We pretend we've dropped the packet if * it goes via ATM. The reason for this is that the outer qdisc @@ -472,6 +470,8 @@ static void sch_atm_dequeue(unsigned long data) if (unlikely(!skb)) break; + qdisc_bstats_update(sch, skb); + bstats_update(&flow->bstats, skb); pr_debug("atm_tc_dequeue: sending on class %p\n", flow); /* remove any LL header somebody else has attached */ skb_pull(skb, skb_network_offset(skb)); diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c index c2189879359b..9ce0b4fe23ff 100644 --- a/net/sched/sch_drr.c +++ b/net/sched/sch_drr.c @@ -376,8 +376,6 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch) cl->deficit = cl->quantum; } - bstats_update(&cl->bstats, skb); - sch->q.qlen++; return err; } @@ -403,6 +401,8 @@ static struct sk_buff *drr_dequeue(struct Qdisc *sch) skb = qdisc_dequeue_peeked(cl->qdisc); if (cl->qdisc->q.qlen == 0) list_del(&cl->alist); + + bstats_update(&cl->bstats, skb); qdisc_bstats_update(sch, skb); sch->q.qlen--; return skb; diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c index 8db3e2c72827..6c2ec4510540 100644 --- a/net/sched/sch_hfsc.c +++ b/net/sched/sch_hfsc.c @@ -1609,7 +1609,6 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch) if (cl->qdisc->q.qlen == 1) set_active(cl, qdisc_pkt_len(skb)); - bstats_update(&cl->bstats, skb); sch->q.qlen++; return NET_XMIT_SUCCESS; @@ -1657,6 +1656,7 @@ hfsc_dequeue(struct Qdisc *sch) return NULL; } + bstats_update(&cl->bstats, skb); update_vf(cl, qdisc_pkt_len(skb), cur_time); if (realtime) cl->cl_cumul += qdisc_pkt_len(skb); diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index acae5b0e3849..9d75b7761313 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c @@ -574,7 +574,6 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch) } return ret; } else { - bstats_update(&cl->bstats, skb); htb_activate(q, cl); } @@ -835,6 +834,7 @@ next: } while (cl != start); if (likely(skb != NULL)) { + bstats_update(&cl->bstats, skb); cl->un.leaf.deficit[level] -= qdisc_pkt_len(skb); if (cl->un.leaf.deficit[level] < 0) { cl->un.leaf.deficit[level] += cl->quantum; -- cgit v1.2.2 From 76e3cc126bb223013a6b9a0e2a51238d1ef2e409 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 10 May 2012 07:51:25 +0000 Subject: codel: Controlled Delay AQM An implementation of CoDel AQM, from Kathleen Nichols and Van Jacobson. http://queue.acm.org/detail.cfm?id=2209336 This AQM main input is no longer queue size in bytes or packets, but the delay packets stay in (FIFO) queue. As we don't have infinite memory, we still can drop packets in enqueue() in case of massive load, but mean of CoDel is to drop packets in dequeue(), using a control law based on two simple parameters : target : target sojourn time (default 5ms) interval : width of moving time window (default 100ms) Based on initial work from Dave Taht. Refactored to help future codel inclusion as a plugin for other linux qdisc (FQ_CODEL, ...), like RED. include/net/codel.h contains codel algorithm as close as possible than Kathleen reference. net/sched/sch_codel.c contains the linux qdisc specific glue. Separate structures permit a memory efficient implementation of fq_codel (to be sent as a separate work) : Each flow has its own struct codel_vars. timestamps are taken at enqueue() time with 1024 ns precision, allowing a range of 2199 seconds in queue, and 100Gb links support. iproute2 uses usec as base unit. Selected packets are dropped, unless ECN is enabled and packets can get ECN mark instead. Tested from 2Mb to 10Gb speeds with no particular problems, on ixgbe and tg3 drivers (BQL enabled). Usage: tc qdisc ... codel [ limit PACKETS ] [ target TIME ] [ interval TIME ] [ ecn ] qdisc codel 10: parent 1:1 limit 2000p target 3.0ms interval 60.0ms ecn Sent 13347099587 bytes 8815805 pkt (dropped 0, overlimits 0 requeues 0) rate 202365Kbit 16708pps backlog 113550b 75p requeues 0 count 116 lastcount 98 ldelay 4.3ms dropping drop_next 816us maxpacket 1514 ecn_mark 84399 drop_overlimit 0 CoDel must be seen as a base module, and should be used keeping in mind there is still a FIFO queue. So a typical setup will probably need a hierarchy of several qdiscs and packet classifiers to be able to meet whatever constraints a user might have. One possible example would be to use fq_codel, which combines Fair Queueing and CoDel, in replacement of sfq / sfq_red. Signed-off-by: Eric Dumazet Signed-off-by: Dave Taht Cc: Kathleen Nichols Cc: Van Jacobson Cc: Tom Herbert Cc: Matt Mathis Cc: Yuchung Cheng Cc: Stephen Hemminger Signed-off-by: David S. Miller --- net/sched/Kconfig | 11 ++ net/sched/Makefile | 1 + net/sched/sch_codel.c | 275 ++++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 287 insertions(+) create mode 100644 net/sched/sch_codel.c (limited to 'net') diff --git a/net/sched/Kconfig b/net/sched/Kconfig index 75b58f81d53d..fadd2522053d 100644 --- a/net/sched/Kconfig +++ b/net/sched/Kconfig @@ -250,6 +250,17 @@ config NET_SCH_QFQ If unsure, say N. +config NET_SCH_CODEL + tristate "Controlled Delay AQM (CODEL)" + help + Say Y here if you want to use the Controlled Delay (CODEL) + packet scheduling algorithm. + + To compile this driver as a module, choose M here: the module + will be called sch_codel. + + If unsure, say N. + config NET_SCH_INGRESS tristate "Ingress Qdisc" depends on NET_CLS_ACT diff --git a/net/sched/Makefile b/net/sched/Makefile index 8cdf4e2b51d3..30fab03b8516 100644 --- a/net/sched/Makefile +++ b/net/sched/Makefile @@ -37,6 +37,7 @@ obj-$(CONFIG_NET_SCH_PLUG) += sch_plug.o obj-$(CONFIG_NET_SCH_MQPRIO) += sch_mqprio.o obj-$(CONFIG_NET_SCH_CHOKE) += sch_choke.o obj-$(CONFIG_NET_SCH_QFQ) += sch_qfq.o +obj-$(CONFIG_NET_SCH_CODEL) += sch_codel.o obj-$(CONFIG_NET_CLS_U32) += cls_u32.o obj-$(CONFIG_NET_CLS_ROUTE4) += cls_route.o diff --git a/net/sched/sch_codel.c b/net/sched/sch_codel.c new file mode 100644 index 000000000000..b4a1a81e757e --- /dev/null +++ b/net/sched/sch_codel.c @@ -0,0 +1,275 @@ +/* + * Codel - The Controlled-Delay Active Queue Management algorithm + * + * Copyright (C) 2011-2012 Kathleen Nichols + * Copyright (C) 2011-2012 Van Jacobson + * + * Implemented on linux by : + * Copyright (C) 2012 Michael D. Taht + * Copyright (C) 2012 Eric Dumazet + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The names of the authors may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * Alternatively, provided that this notice is retained in full, this + * software may be distributed under the terms of the GNU General + * Public License ("GPL") version 2, in which case the provisions of the + * GPL apply INSTEAD OF those given above. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH + * DAMAGE. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include + + +#define DEFAULT_CODEL_LIMIT 1000 + +struct codel_sched_data { + struct codel_params params; + struct codel_vars vars; + struct codel_stats stats; + u32 drop_overlimit; +}; + +/* This is the specific function called from codel_dequeue() + * to dequeue a packet from queue. Note: backlog is handled in + * codel, we dont need to reduce it here. + */ +static struct sk_buff *dequeue(struct codel_vars *vars, struct Qdisc *sch) +{ + struct sk_buff *skb = __skb_dequeue(&sch->q); + + prefetch(&skb->end); /* we'll need skb_shinfo() */ + return skb; +} + +static struct sk_buff *codel_qdisc_dequeue(struct Qdisc *sch) +{ + struct codel_sched_data *q = qdisc_priv(sch); + struct sk_buff *skb; + + skb = codel_dequeue(sch, &q->params, &q->vars, &q->stats, + dequeue, &sch->qstats.backlog); + /* We cant call qdisc_tree_decrease_qlen() if our qlen is 0, + * or HTB crashes. Defer it for next round. + */ + if (q->stats.drop_count && sch->q.qlen) { + qdisc_tree_decrease_qlen(sch, q->stats.drop_count); + q->stats.drop_count = 0; + } + if (skb) + qdisc_bstats_update(sch, skb); + return skb; +} + +static int codel_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch) +{ + struct codel_sched_data *q; + + if (likely(qdisc_qlen(sch) < sch->limit)) { + codel_set_enqueue_time(skb); + return qdisc_enqueue_tail(skb, sch); + } + q = qdisc_priv(sch); + q->drop_overlimit++; + return qdisc_drop(skb, sch); +} + +static const struct nla_policy codel_policy[TCA_CODEL_MAX + 1] = { + [TCA_CODEL_TARGET] = { .type = NLA_U32 }, + [TCA_CODEL_LIMIT] = { .type = NLA_U32 }, + [TCA_CODEL_INTERVAL] = { .type = NLA_U32 }, + [TCA_CODEL_ECN] = { .type = NLA_U32 }, +}; + +static int codel_change(struct Qdisc *sch, struct nlattr *opt) +{ + struct codel_sched_data *q = qdisc_priv(sch); + struct nlattr *tb[TCA_CODEL_MAX + 1]; + unsigned int qlen; + int err; + + if (!opt) + return -EINVAL; + + err = nla_parse_nested(tb, TCA_CODEL_MAX, opt, codel_policy); + if (err < 0) + return err; + + sch_tree_lock(sch); + + if (tb[TCA_CODEL_TARGET]) { + u32 target = nla_get_u32(tb[TCA_CODEL_TARGET]); + + q->params.target = ((u64)target * NSEC_PER_USEC) >> CODEL_SHIFT; + } + + if (tb[TCA_CODEL_INTERVAL]) { + u32 interval = nla_get_u32(tb[TCA_CODEL_INTERVAL]); + + q->params.interval = ((u64)interval * NSEC_PER_USEC) >> CODEL_SHIFT; + } + + if (tb[TCA_CODEL_LIMIT]) + sch->limit = nla_get_u32(tb[TCA_CODEL_LIMIT]); + + if (tb[TCA_CODEL_ECN]) + q->params.ecn = !!nla_get_u32(tb[TCA_CODEL_ECN]); + + qlen = sch->q.qlen; + while (sch->q.qlen > sch->limit) { + struct sk_buff *skb = __skb_dequeue(&sch->q); + + sch->qstats.backlog -= qdisc_pkt_len(skb); + qdisc_drop(skb, sch); + } + qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen); + + sch_tree_unlock(sch); + return 0; +} + +static int codel_init(struct Qdisc *sch, struct nlattr *opt) +{ + struct codel_sched_data *q = qdisc_priv(sch); + + sch->limit = DEFAULT_CODEL_LIMIT; + + codel_params_init(&q->params); + codel_vars_init(&q->vars); + codel_stats_init(&q->stats); + + if (opt) { + int err = codel_change(sch, opt); + + if (err) + return err; + } + + if (sch->limit >= 1) + sch->flags |= TCQ_F_CAN_BYPASS; + else + sch->flags &= ~TCQ_F_CAN_BYPASS; + + return 0; +} + +static int codel_dump(struct Qdisc *sch, struct sk_buff *skb) +{ + struct codel_sched_data *q = qdisc_priv(sch); + struct nlattr *opts; + + opts = nla_nest_start(skb, TCA_OPTIONS); + if (opts == NULL) + goto nla_put_failure; + + if (nla_put_u32(skb, TCA_CODEL_TARGET, + codel_time_to_us(q->params.target)) || + nla_put_u32(skb, TCA_CODEL_LIMIT, + sch->limit) || + nla_put_u32(skb, TCA_CODEL_INTERVAL, + codel_time_to_us(q->params.interval)) || + nla_put_u32(skb, TCA_CODEL_ECN, + q->params.ecn)) + goto nla_put_failure; + + return nla_nest_end(skb, opts); + +nla_put_failure: + nla_nest_cancel(skb, opts); + return -1; +} + +static int codel_dump_stats(struct Qdisc *sch, struct gnet_dump *d) +{ + const struct codel_sched_data *q = qdisc_priv(sch); + struct tc_codel_xstats st = { + .maxpacket = q->stats.maxpacket, + .count = q->vars.count, + .lastcount = q->vars.lastcount, + .drop_overlimit = q->drop_overlimit, + .ldelay = codel_time_to_us(q->vars.ldelay), + .dropping = q->vars.dropping, + .ecn_mark = q->stats.ecn_mark, + }; + + if (q->vars.dropping) { + codel_tdiff_t delta = q->vars.drop_next - codel_get_time(); + + if (delta >= 0) + st.drop_next = codel_time_to_us(delta); + else + st.drop_next = -codel_time_to_us(-delta); + } + + return gnet_stats_copy_app(d, &st, sizeof(st)); +} + +static void codel_reset(struct Qdisc *sch) +{ + struct codel_sched_data *q = qdisc_priv(sch); + + qdisc_reset_queue(sch); + codel_vars_init(&q->vars); +} + +static struct Qdisc_ops codel_qdisc_ops __read_mostly = { + .id = "codel", + .priv_size = sizeof(struct codel_sched_data), + + .enqueue = codel_qdisc_enqueue, + .dequeue = codel_qdisc_dequeue, + .peek = qdisc_peek_dequeued, + .init = codel_init, + .reset = codel_reset, + .change = codel_change, + .dump = codel_dump, + .dump_stats = codel_dump_stats, + .owner = THIS_MODULE, +}; + +static int __init codel_module_init(void) +{ + return register_qdisc(&codel_qdisc_ops); +} + +static void __exit codel_module_exit(void) +{ + unregister_qdisc(&codel_qdisc_ops); +} + +module_init(codel_module_init) +module_exit(codel_module_exit) + +MODULE_DESCRIPTION("Controlled Delay queue discipline"); +MODULE_AUTHOR("Dave Taht"); +MODULE_AUTHOR("Eric Dumazet"); +MODULE_LICENSE("Dual BSD/GPL"); -- cgit v1.2.2 From 06a4c1c55dbe5d9f7a708e8f1a52fd2ac8e5874f Mon Sep 17 00:00:00 2001 From: "alex.bluesman.smirnov@gmail.com" Date: Thu, 10 May 2012 03:25:52 +0000 Subject: 6lowpan: IPv6 link local address According to the RFC4944 (Transmission of IPv6 Packets over IEEE 802.15.4 Networks), chapter 7: The IPv6 link-local address [RFC4291] for an IEEE 802.15.4 interface is formed by appending the Interface Identifier, as defined above, to the prefix FE80::/64. 10 bits 54 bits 64 bits +----------+-----------------------+----------------------------+ |1111111010| (zeros) | Interface Identifier | +----------+-----------------------+----------------------------+ This patch adds IPv6 address generation support for the 6lowpan interfaces. Signed-off-by: Alexander Smirnov Signed-off-by: David S. Miller --- net/ipv6/addrconf.c | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) (limited to 'net') diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index e3b3421f8dad..8b7f100a4b45 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -66,6 +66,7 @@ #include #include +#include #include #include #include @@ -1514,6 +1515,14 @@ static int addrconf_ifid_eui48(u8 *eui, struct net_device *dev) return 0; } +static int addrconf_ifid_eui64(u8 *eui, struct net_device *dev) +{ + if (dev->addr_len != IEEE802154_ADDR_LEN) + return -1; + memcpy(eui, dev->dev_addr, 8); + return 0; +} + static int addrconf_ifid_arcnet(u8 *eui, struct net_device *dev) { /* XXX: inherit EUI-64 from other interface -- yoshfuji */ @@ -1577,6 +1586,8 @@ static int ipv6_generate_eui64(u8 *eui, struct net_device *dev) return addrconf_ifid_sit(eui, dev); case ARPHRD_IPGRE: return addrconf_ifid_gre(eui, dev); + case ARPHRD_IEEE802154: + return addrconf_ifid_eui64(eui, dev); } return -1; } @@ -2438,7 +2449,8 @@ static void addrconf_dev_config(struct net_device *dev) (dev->type != ARPHRD_FDDI) && (dev->type != ARPHRD_IEEE802_TR) && (dev->type != ARPHRD_ARCNET) && - (dev->type != ARPHRD_INFINIBAND)) { + (dev->type != ARPHRD_INFINIBAND) && + (dev->type != ARPHRD_IEEE802154)) { /* Alas, we support only Ethernet autoconfiguration. */ return; } -- cgit v1.2.2 From 9205cc521ec74bd510857a464d4ac4edee949bfd Mon Sep 17 00:00:00 2001 From: Antonio Quartulli Date: Mon, 27 Feb 2012 11:29:53 +0100 Subject: batman-adv: fix wrong dhcp option list browsing In is_type_dhcprequest(), while parsing a DHCP message, if the entry we found in the option list is neither a padding nor the dhcp-type, we have to ignore it and jump as many bytes as its length + 1. The "+ 1" byte is given by the subtype field itself that has to be jumped too. Reported-by: Marek Lindner Signed-off-by: Antonio Quartulli --- net/batman-adv/gateway_client.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'net') diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c index 6f9b9b78f77d..47f7186dcefc 100644 --- a/net/batman-adv/gateway_client.c +++ b/net/batman-adv/gateway_client.c @@ -558,10 +558,10 @@ static bool is_type_dhcprequest(struct sk_buff *skb, int header_len) p++; /* ...and then we jump over the data */ - if (pkt_len < *p) + if (pkt_len < 1 + (*p)) goto out; - pkt_len -= *p; - p += (*p); + pkt_len -= 1 + (*p); + p += 1 + (*p); } } out: -- cgit v1.2.2 From 75cd33f86396c446f84c4bb620be70c36a2a54f6 Mon Sep 17 00:00:00 2001 From: Marek Lindner Date: Thu, 1 Mar 2012 15:35:16 +0800 Subject: batman-adv: introduce is_single_hop_neigh variable to increase readability Signed-off-by: Marek Lindner Acked-by: Simon Wunderlich Signed-off-by: Antonio Quartulli --- net/batman-adv/bat_iv_ogm.c | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) (limited to 'net') diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c index 8b2db2e76c7e..cd8f473c1bd0 100644 --- a/net/batman-adv/bat_iv_ogm.c +++ b/net/batman-adv/bat_iv_ogm.c @@ -480,7 +480,8 @@ static void bat_iv_ogm_queue_add(struct bat_priv *bat_priv, static void bat_iv_ogm_forward(struct orig_node *orig_node, const struct ethhdr *ethhdr, struct batman_ogm_packet *batman_ogm_packet, - int directlink, struct hard_iface *if_incoming) + bool is_single_hop_neigh, + struct hard_iface *if_incoming) { struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); struct neigh_node *router; @@ -533,7 +534,7 @@ static void bat_iv_ogm_forward(struct orig_node *orig_node, /* switch of primaries first hop flag when forwarding */ batman_ogm_packet->flags &= ~PRIMARIES_FIRST_HOP; - if (directlink) + if (is_single_hop_neigh) batman_ogm_packet->flags |= DIRECTLINK; else batman_ogm_packet->flags &= ~DIRECTLINK; @@ -918,7 +919,8 @@ static void bat_iv_ogm_process(const struct ethhdr *ethhdr, struct neigh_node *orig_neigh_router = NULL; int has_directlink_flag; int is_my_addr = 0, is_my_orig = 0, is_my_oldorig = 0; - int is_broadcast = 0, is_bidirectional, is_single_hop_neigh; + int is_broadcast = 0, is_bidirectional; + bool is_single_hop_neigh = false; int is_duplicate; uint32_t if_incoming_seqno; @@ -942,8 +944,8 @@ static void bat_iv_ogm_process(const struct ethhdr *ethhdr, has_directlink_flag = (batman_ogm_packet->flags & DIRECTLINK ? 1 : 0); - is_single_hop_neigh = (compare_eth(ethhdr->h_source, - batman_ogm_packet->orig) ? 1 : 0); + if (compare_eth(ethhdr->h_source, batman_ogm_packet->orig)) + is_single_hop_neigh = true; bat_dbg(DBG_BATMAN, bat_priv, "Received BATMAN packet via NB: %pM, IF: %s [%pM] (from OG: %pM, via prev OG: %pM, seqno %u, ttvn %u, crc %u, changes %u, td %d, TTL %d, V %d, IDF %d)\n", @@ -1114,7 +1116,7 @@ static void bat_iv_ogm_process(const struct ethhdr *ethhdr, /* mark direct link on incoming interface */ bat_iv_ogm_forward(orig_node, ethhdr, batman_ogm_packet, - 1, if_incoming); + is_single_hop_neigh, if_incoming); bat_dbg(DBG_BATMAN, bat_priv, "Forwarding packet: rebroadcast neighbor packet with direct link flag\n"); @@ -1137,7 +1139,7 @@ static void bat_iv_ogm_process(const struct ethhdr *ethhdr, bat_dbg(DBG_BATMAN, bat_priv, "Forwarding packet: rebroadcast originator packet\n"); bat_iv_ogm_forward(orig_node, ethhdr, batman_ogm_packet, - 0, if_incoming); + is_single_hop_neigh, if_incoming); out_neigh: if ((orig_neigh_node) && (!is_single_hop_neigh)) -- cgit v1.2.2 From ffa995e036bef734ea40cbbccda574d1df3a8a58 Mon Sep 17 00:00:00 2001 From: Marek Lindner Date: Thu, 1 Mar 2012 15:35:17 +0800 Subject: batman-adv: introduce packet type handler array for incoming packets The packet handler array replaces the growing switch statement, thus dealing with incoming packets in a more efficient way. It also adds to possibility to register packet handlers on the fly. Signed-off-by: Marek Lindner Acked-by: Simon Wunderlich Signed-off-by: Antonio Quartulli --- net/batman-adv/hard-interface.c | 113 ------------------------------------- net/batman-adv/main.c | 121 ++++++++++++++++++++++++++++++++++++++++ net/batman-adv/main.h | 6 ++ 3 files changed, 127 insertions(+), 113 deletions(-) (limited to 'net') diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c index 47c79d724ba3..95f869c7ca04 100644 --- a/net/batman-adv/hard-interface.c +++ b/net/batman-adv/hard-interface.c @@ -32,12 +32,6 @@ #include - -static int batman_skb_recv(struct sk_buff *skb, - struct net_device *dev, - struct packet_type *ptype, - struct net_device *orig_dev); - void hardif_free_rcu(struct rcu_head *rcu) { struct hard_iface *hard_iface; @@ -551,113 +545,6 @@ out: return NOTIFY_DONE; } -/* incoming packets with the batman ethertype received on any active hard - * interface */ -static int batman_skb_recv(struct sk_buff *skb, struct net_device *dev, - struct packet_type *ptype, - struct net_device *orig_dev) -{ - struct bat_priv *bat_priv; - struct batman_ogm_packet *batman_ogm_packet; - struct hard_iface *hard_iface; - int ret; - - hard_iface = container_of(ptype, struct hard_iface, batman_adv_ptype); - skb = skb_share_check(skb, GFP_ATOMIC); - - /* skb was released by skb_share_check() */ - if (!skb) - goto err_out; - - /* packet should hold at least type and version */ - if (unlikely(!pskb_may_pull(skb, 2))) - goto err_free; - - /* expect a valid ethernet header here. */ - if (unlikely(skb->mac_len != ETH_HLEN || !skb_mac_header(skb))) - goto err_free; - - if (!hard_iface->soft_iface) - goto err_free; - - bat_priv = netdev_priv(hard_iface->soft_iface); - - if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE) - goto err_free; - - /* discard frames on not active interfaces */ - if (hard_iface->if_status != IF_ACTIVE) - goto err_free; - - batman_ogm_packet = (struct batman_ogm_packet *)skb->data; - - if (batman_ogm_packet->header.version != COMPAT_VERSION) { - bat_dbg(DBG_BATMAN, bat_priv, - "Drop packet: incompatible batman version (%i)\n", - batman_ogm_packet->header.version); - goto err_free; - } - - /* all receive handlers return whether they received or reused - * the supplied skb. if not, we have to free the skb. */ - - switch (batman_ogm_packet->header.packet_type) { - /* batman originator packet */ - case BAT_IV_OGM: - ret = recv_bat_ogm_packet(skb, hard_iface); - break; - - /* batman icmp packet */ - case BAT_ICMP: - ret = recv_icmp_packet(skb, hard_iface); - break; - - /* unicast packet */ - case BAT_UNICAST: - ret = recv_unicast_packet(skb, hard_iface); - break; - - /* fragmented unicast packet */ - case BAT_UNICAST_FRAG: - ret = recv_ucast_frag_packet(skb, hard_iface); - break; - - /* broadcast packet */ - case BAT_BCAST: - ret = recv_bcast_packet(skb, hard_iface); - break; - - /* vis packet */ - case BAT_VIS: - ret = recv_vis_packet(skb, hard_iface); - break; - /* Translation table query (request or response) */ - case BAT_TT_QUERY: - ret = recv_tt_query(skb, hard_iface); - break; - /* Roaming advertisement */ - case BAT_ROAM_ADV: - ret = recv_roam_adv(skb, hard_iface); - break; - default: - ret = NET_RX_DROP; - } - - if (ret == NET_RX_DROP) - kfree_skb(skb); - - /* return NET_RX_SUCCESS in any case as we - * most probably dropped the packet for - * routing-logical reasons. */ - - return NET_RX_SUCCESS; - -err_free: - kfree_skb(skb); -err_out: - return NET_RX_DROP; -} - /* This function returns true if the interface represented by ifindex is a * 802.11 wireless device */ bool is_wifi_iface(int ifindex) diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c index 791327219531..d19b93575d56 100644 --- a/net/batman-adv/main.c +++ b/net/batman-adv/main.c @@ -39,6 +39,7 @@ /* List manipulations on hardif_list have to be rtnl_lock()'ed, * list traversals just rcu-locked */ struct list_head hardif_list; +static int (*recv_packet_handler[256])(struct sk_buff *, struct hard_iface *); char bat_routing_algo[20] = "BATMAN IV"; static struct hlist_head bat_algo_list; @@ -46,11 +47,15 @@ unsigned char broadcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; struct workqueue_struct *bat_event_workqueue; +static void recv_handler_init(void); + static int __init batman_init(void) { INIT_LIST_HEAD(&hardif_list); INIT_HLIST_HEAD(&bat_algo_list); + recv_handler_init(); + bat_iv_init(); /* the name should not be longer than 10 chars - see @@ -179,6 +184,122 @@ int is_my_mac(const uint8_t *addr) return 0; } +static int recv_unhandled_packet(struct sk_buff *skb, + struct hard_iface *recv_if) +{ + return NET_RX_DROP; +} + +/* incoming packets with the batman ethertype received on any active hard + * interface + */ +int batman_skb_recv(struct sk_buff *skb, struct net_device *dev, + struct packet_type *ptype, struct net_device *orig_dev) +{ + struct bat_priv *bat_priv; + struct batman_ogm_packet *batman_ogm_packet; + struct hard_iface *hard_iface; + uint8_t idx; + int ret; + + hard_iface = container_of(ptype, struct hard_iface, batman_adv_ptype); + skb = skb_share_check(skb, GFP_ATOMIC); + + /* skb was released by skb_share_check() */ + if (!skb) + goto err_out; + + /* packet should hold at least type and version */ + if (unlikely(!pskb_may_pull(skb, 2))) + goto err_free; + + /* expect a valid ethernet header here. */ + if (unlikely(skb->mac_len != ETH_HLEN || !skb_mac_header(skb))) + goto err_free; + + if (!hard_iface->soft_iface) + goto err_free; + + bat_priv = netdev_priv(hard_iface->soft_iface); + + if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE) + goto err_free; + + /* discard frames on not active interfaces */ + if (hard_iface->if_status != IF_ACTIVE) + goto err_free; + + batman_ogm_packet = (struct batman_ogm_packet *)skb->data; + + if (batman_ogm_packet->header.version != COMPAT_VERSION) { + bat_dbg(DBG_BATMAN, bat_priv, + "Drop packet: incompatible batman version (%i)\n", + batman_ogm_packet->header.version); + goto err_free; + } + + /* all receive handlers return whether they received or reused + * the supplied skb. if not, we have to free the skb. + */ + idx = batman_ogm_packet->header.packet_type; + ret = (*recv_packet_handler[idx])(skb, hard_iface); + + if (ret == NET_RX_DROP) + kfree_skb(skb); + + /* return NET_RX_SUCCESS in any case as we + * most probably dropped the packet for + * routing-logical reasons. + */ + return NET_RX_SUCCESS; + +err_free: + kfree_skb(skb); +err_out: + return NET_RX_DROP; +} + +static void recv_handler_init(void) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(recv_packet_handler); i++) + recv_packet_handler[i] = recv_unhandled_packet; + + /* batman originator packet */ + recv_packet_handler[BAT_IV_OGM] = recv_bat_ogm_packet; + /* batman icmp packet */ + recv_packet_handler[BAT_ICMP] = recv_icmp_packet; + /* unicast packet */ + recv_packet_handler[BAT_UNICAST] = recv_unicast_packet; + /* fragmented unicast packet */ + recv_packet_handler[BAT_UNICAST_FRAG] = recv_ucast_frag_packet; + /* broadcast packet */ + recv_packet_handler[BAT_BCAST] = recv_bcast_packet; + /* vis packet */ + recv_packet_handler[BAT_VIS] = recv_vis_packet; + /* Translation table query (request or response) */ + recv_packet_handler[BAT_TT_QUERY] = recv_tt_query; + /* Roaming advertisement */ + recv_packet_handler[BAT_ROAM_ADV] = recv_roam_adv; +} + +int recv_handler_register(uint8_t packet_type, + int (*recv_handler)(struct sk_buff *, + struct hard_iface *)) +{ + if (recv_packet_handler[packet_type] != &recv_unhandled_packet) + return -EBUSY; + + recv_packet_handler[packet_type] = recv_handler; + return 0; +} + +void recv_handler_unregister(uint8_t packet_type) +{ + recv_packet_handler[packet_type] = recv_unhandled_packet; +} + static struct bat_algo_ops *bat_algo_get(char *name) { struct bat_algo_ops *bat_algo_ops = NULL, *bat_algo_ops_tmp; diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h index d9832acf558d..fd83acd48b28 100644 --- a/net/batman-adv/main.h +++ b/net/batman-adv/main.h @@ -155,6 +155,12 @@ void mesh_free(struct net_device *soft_iface); void inc_module_count(void); void dec_module_count(void); int is_my_mac(const uint8_t *addr); +int batman_skb_recv(struct sk_buff *skb, struct net_device *dev, + struct packet_type *ptype, struct net_device *orig_dev); +int recv_handler_register(uint8_t packet_type, + int (*recv_handler)(struct sk_buff *, + struct hard_iface *)); +void recv_handler_unregister(uint8_t packet_type); int bat_algo_register(struct bat_algo_ops *bat_algo_ops); int bat_algo_select(struct bat_priv *bat_priv, char *name); int bat_algo_seq_print_text(struct seq_file *seq, void *offset); -- cgit v1.2.2 From c3e29312c8c27d403f91522711ce9a290c7571c9 Mon Sep 17 00:00:00 2001 From: Marek Lindner Date: Sun, 4 Mar 2012 16:56:25 +0800 Subject: batman-adv: register batman ogm receive function during protocol init The B.A.T.M.A.N. IV OGM receive function still was hard-coded although it is a routing protocol specific function. This patch takes advantage of the dynamic packet handler registration to remove the hard-coded function calls. Signed-off-by: Marek Lindner Acked-by: Simon Wunderlich Signed-off-by: Antonio Quartulli --- net/batman-adv/bat_iv_ogm.c | 31 +++++++++++++++++++++++++++---- net/batman-adv/main.c | 5 +---- net/batman-adv/routing.c | 22 ++++++++++------------ net/batman-adv/routing.h | 4 +++- net/batman-adv/types.h | 3 --- 5 files changed, 41 insertions(+), 24 deletions(-) (limited to 'net') diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c index cd8f473c1bd0..e0aaf8c87d65 100644 --- a/net/batman-adv/bat_iv_ogm.c +++ b/net/batman-adv/bat_iv_ogm.c @@ -1155,13 +1155,18 @@ out: orig_node_free_ref(orig_node); } -static void bat_iv_ogm_receive(struct hard_iface *if_incoming, - struct sk_buff *skb) +static int bat_iv_ogm_receive(struct sk_buff *skb, + struct hard_iface *if_incoming) { struct batman_ogm_packet *batman_ogm_packet; struct ethhdr *ethhdr; int buff_pos = 0, packet_len; unsigned char *tt_buff, *packet_buff; + bool ret; + + ret = check_management_packet(skb, if_incoming, BATMAN_OGM_HLEN); + if (!ret) + return NET_RX_DROP; packet_len = skb_headlen(skb); ethhdr = (struct ethhdr *)skb_mac_header(skb); @@ -1187,6 +1192,9 @@ static void bat_iv_ogm_receive(struct hard_iface *if_incoming, (packet_buff + buff_pos); } while (bat_iv_ogm_aggr_packet(buff_pos, packet_len, batman_ogm_packet->tt_num_changes)); + + kfree_skb(skb); + return NET_RX_SUCCESS; } static struct bat_algo_ops batman_iv __read_mostly = { @@ -1197,10 +1205,25 @@ static struct bat_algo_ops batman_iv __read_mostly = { .bat_ogm_update_mac = bat_iv_ogm_update_mac, .bat_ogm_schedule = bat_iv_ogm_schedule, .bat_ogm_emit = bat_iv_ogm_emit, - .bat_ogm_receive = bat_iv_ogm_receive, }; int __init bat_iv_init(void) { - return bat_algo_register(&batman_iv); + int ret; + + /* batman originator packet */ + ret = recv_handler_register(BAT_IV_OGM, bat_iv_ogm_receive); + if (ret < 0) + goto out; + + ret = bat_algo_register(&batman_iv); + if (ret < 0) + goto handler_unregister; + + goto out; + +handler_unregister: + recv_handler_unregister(BAT_IV_OGM); +out: + return ret; } diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c index d19b93575d56..f80c4474127c 100644 --- a/net/batman-adv/main.c +++ b/net/batman-adv/main.c @@ -266,8 +266,6 @@ static void recv_handler_init(void) for (i = 0; i < ARRAY_SIZE(recv_packet_handler); i++) recv_packet_handler[i] = recv_unhandled_packet; - /* batman originator packet */ - recv_packet_handler[BAT_IV_OGM] = recv_bat_ogm_packet; /* batman icmp packet */ recv_packet_handler[BAT_ICMP] = recv_icmp_packet; /* unicast packet */ @@ -334,8 +332,7 @@ int bat_algo_register(struct bat_algo_ops *bat_algo_ops) !bat_algo_ops->bat_primary_iface_set || !bat_algo_ops->bat_ogm_update_mac || !bat_algo_ops->bat_ogm_schedule || - !bat_algo_ops->bat_ogm_emit || - !bat_algo_ops->bat_ogm_receive) { + !bat_algo_ops->bat_ogm_emit) { pr_info("Routing algo '%s' does not implement required ops\n", bat_algo_ops->name); goto out; diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c index ff560863bc74..7ed9d8f92916 100644 --- a/net/batman-adv/routing.c +++ b/net/batman-adv/routing.c @@ -248,37 +248,35 @@ int window_protected(struct bat_priv *bat_priv, int32_t seq_num_diff, return 0; } -int recv_bat_ogm_packet(struct sk_buff *skb, struct hard_iface *hard_iface) +bool check_management_packet(struct sk_buff *skb, + struct hard_iface *hard_iface, + int header_len) { - struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface); struct ethhdr *ethhdr; /* drop packet if it has not necessary minimum size */ - if (unlikely(!pskb_may_pull(skb, BATMAN_OGM_HLEN))) - return NET_RX_DROP; + if (unlikely(!pskb_may_pull(skb, header_len))) + return false; ethhdr = (struct ethhdr *)skb_mac_header(skb); /* packet with broadcast indication but unicast recipient */ if (!is_broadcast_ether_addr(ethhdr->h_dest)) - return NET_RX_DROP; + return false; /* packet with broadcast sender address */ if (is_broadcast_ether_addr(ethhdr->h_source)) - return NET_RX_DROP; + return false; /* create a copy of the skb, if needed, to modify it. */ if (skb_cow(skb, 0) < 0) - return NET_RX_DROP; + return false; /* keep skb linear */ if (skb_linearize(skb) < 0) - return NET_RX_DROP; - - bat_priv->bat_algo_ops->bat_ogm_receive(hard_iface, skb); + return false; - kfree_skb(skb); - return NET_RX_SUCCESS; + return true; } static int recv_my_icmp_packet(struct bat_priv *bat_priv, diff --git a/net/batman-adv/routing.h b/net/batman-adv/routing.h index 3d729cb17113..d6bbbebb6567 100644 --- a/net/batman-adv/routing.h +++ b/net/batman-adv/routing.h @@ -23,6 +23,9 @@ #define _NET_BATMAN_ADV_ROUTING_H_ void slide_own_bcast_window(struct hard_iface *hard_iface); +bool check_management_packet(struct sk_buff *skb, + struct hard_iface *hard_iface, + int header_len); void update_route(struct bat_priv *bat_priv, struct orig_node *orig_node, struct neigh_node *neigh_node); int recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if); @@ -30,7 +33,6 @@ int recv_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if); int recv_ucast_frag_packet(struct sk_buff *skb, struct hard_iface *recv_if); int recv_bcast_packet(struct sk_buff *skb, struct hard_iface *recv_if); int recv_vis_packet(struct sk_buff *skb, struct hard_iface *recv_if); -int recv_bat_ogm_packet(struct sk_buff *skb, struct hard_iface *recv_if); int recv_tt_query(struct sk_buff *skb, struct hard_iface *recv_if); int recv_roam_adv(struct sk_buff *skb, struct hard_iface *recv_if); struct neigh_node *find_router(struct bat_priv *bat_priv, diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h index 2f4848b776a7..50e1895a25c3 100644 --- a/net/batman-adv/types.h +++ b/net/batman-adv/types.h @@ -390,9 +390,6 @@ struct bat_algo_ops { int tt_num_changes); /* send scheduled OGM */ void (*bat_ogm_emit)(struct forw_packet *forw_packet); - /* receive incoming OGM */ - void (*bat_ogm_receive)(struct hard_iface *if_incoming, - struct sk_buff *skb); }; #endif /* _NET_BATMAN_ADV_TYPES_H_ */ -- cgit v1.2.2 From d7b2a97e03ad40c7986d3c2707b0b5ba79a63884 Mon Sep 17 00:00:00 2001 From: Marek Lindner Date: Thu, 1 Mar 2012 15:35:19 +0800 Subject: batman-adv: rename last_valid to last_seen Signed-off-by: Marek Lindner Acked-by: Simon Wunderlich Signed-off-by: Antonio Quartulli --- net/batman-adv/bat_iv_ogm.c | 8 ++++---- net/batman-adv/originator.c | 16 ++++++++-------- net/batman-adv/types.h | 8 ++++---- 3 files changed, 16 insertions(+), 16 deletions(-) (limited to 'net') diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c index e0aaf8c87d65..8652a7536b15 100644 --- a/net/batman-adv/bat_iv_ogm.c +++ b/net/batman-adv/bat_iv_ogm.c @@ -651,7 +651,7 @@ static void bat_iv_ogm_orig_update(struct bat_priv *bat_priv, rcu_read_unlock(); orig_node->flags = batman_ogm_packet->flags; - neigh_node->last_valid = jiffies; + neigh_node->last_seen = jiffies; spin_lock_bh(&neigh_node->tq_lock); ring_buffer_set(neigh_node->tq_recv, @@ -772,11 +772,11 @@ static int bat_iv_ogm_calc_tq(struct orig_node *orig_node, if (!neigh_node) goto out; - /* if orig_node is direct neighbor update neigh_node last_valid */ + /* if orig_node is direct neighbor update neigh_node last_seen */ if (orig_node == orig_neigh_node) - neigh_node->last_valid = jiffies; + neigh_node->last_seen = jiffies; - orig_node->last_valid = jiffies; + orig_node->last_seen = jiffies; /* find packet count of corresponding one hop neighbor */ spin_lock_bh(&orig_node->ogm_cnt_lock); diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c index ce4969885894..21c1f83a2af3 100644 --- a/net/batman-adv/originator.c +++ b/net/batman-adv/originator.c @@ -283,7 +283,7 @@ static bool purge_orig_neighbors(struct bat_priv *bat_priv, hlist_for_each_entry_safe(neigh_node, node, node_tmp, &orig_node->neigh_list, list) { - if ((has_timed_out(neigh_node->last_valid, PURGE_TIMEOUT)) || + if ((has_timed_out(neigh_node->last_seen, PURGE_TIMEOUT)) || (neigh_node->if_incoming->if_status == IF_INACTIVE) || (neigh_node->if_incoming->if_status == IF_NOT_IN_USE) || (neigh_node->if_incoming->if_status == IF_TO_BE_REMOVED)) { @@ -300,9 +300,9 @@ static bool purge_orig_neighbors(struct bat_priv *bat_priv, neigh_node->if_incoming->net_dev->name); else bat_dbg(DBG_BATMAN, bat_priv, - "neighbor timeout: originator %pM, neighbor: %pM, last_valid: %lu\n", + "neighbor timeout: originator %pM, neighbor: %pM, last_seen: %lu\n", orig_node->orig, neigh_node->addr, - (neigh_node->last_valid / HZ)); + (neigh_node->last_seen / HZ)); neigh_purged = true; @@ -325,10 +325,10 @@ static bool purge_orig_node(struct bat_priv *bat_priv, { struct neigh_node *best_neigh_node; - if (has_timed_out(orig_node->last_valid, 2 * PURGE_TIMEOUT)) { + if (has_timed_out(orig_node->last_seen, 2 * PURGE_TIMEOUT)) { bat_dbg(DBG_BATMAN, bat_priv, - "Originator timeout: originator %pM, last_valid %lu\n", - orig_node->orig, (orig_node->last_valid / HZ)); + "Originator timeout: originator %pM, last_seen %lu\n", + orig_node->orig, (orig_node->last_seen / HZ)); return true; } else { if (purge_orig_neighbors(bat_priv, orig_node, @@ -446,9 +446,9 @@ int orig_seq_print_text(struct seq_file *seq, void *offset) goto next; last_seen_secs = jiffies_to_msecs(jiffies - - orig_node->last_valid) / 1000; + orig_node->last_seen) / 1000; last_seen_msecs = jiffies_to_msecs(jiffies - - orig_node->last_valid) % 1000; + orig_node->last_seen) % 1000; seq_printf(seq, "%pM %4i.%03is (%3i) %pM [%10s]:", orig_node->orig, last_seen_secs, diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h index 50e1895a25c3..9fa8b73387ec 100644 --- a/net/batman-adv/types.h +++ b/net/batman-adv/types.h @@ -52,7 +52,7 @@ struct hard_iface { /** * orig_node - structure for orig_list maintaining nodes of mesh * @primary_addr: hosts primary interface address - * @last_valid: when last packet from this node was received + * @last_seen: when last packet from this node was received * @bcast_seqno_reset: time when the broadcast seqno window was reset * @batman_seqno_reset: time when the batman seqno window was reset * @gw_flags: flags related to gateway class @@ -70,7 +70,7 @@ struct orig_node { struct neigh_node __rcu *router; /* rcu protected pointer */ unsigned long *bcast_own; uint8_t *bcast_own_sum; - unsigned long last_valid; + unsigned long last_seen; unsigned long bcast_seqno_reset; unsigned long batman_seqno_reset; uint8_t gw_flags; @@ -120,7 +120,7 @@ struct gw_node { /** * neigh_node - * @last_valid: when last packet via this neighbor was received + * @last_seen: when last packet via this neighbor was received */ struct neigh_node { struct hlist_node list; @@ -131,7 +131,7 @@ struct neigh_node { uint8_t tq_avg; uint8_t last_ttl; struct list_head bonding_list; - unsigned long last_valid; + unsigned long last_seen; DECLARE_BITMAP(real_bits, TQ_LOCAL_WINDOW_SIZE); atomic_t refcount; struct rcu_head rcu; -- cgit v1.2.2 From 0b0094e000840115b5baece2293c5fb1aab4fded Mon Sep 17 00:00:00 2001 From: Marek Lindner Date: Thu, 1 Mar 2012 15:35:20 +0800 Subject: batman-adv: replace HZ calculations with jiffies_to_msecs() Signed-off-by: Marek Lindner Acked-by: Simon Wunderlich Signed-off-by: Antonio Quartulli --- net/batman-adv/bat_debugfs.c | 4 ++-- net/batman-adv/originator.c | 15 ++++++++++----- net/batman-adv/send.c | 2 +- 3 files changed, 13 insertions(+), 8 deletions(-) (limited to 'net') diff --git a/net/batman-adv/bat_debugfs.c b/net/batman-adv/bat_debugfs.c index 916380c73ab7..3b588f86d770 100644 --- a/net/batman-adv/bat_debugfs.c +++ b/net/batman-adv/bat_debugfs.c @@ -83,8 +83,8 @@ int debug_log(struct bat_priv *bat_priv, const char *fmt, ...) va_start(args, fmt); vscnprintf(tmp_log_buf, sizeof(tmp_log_buf), fmt, args); - fdebug_log(bat_priv->debug_log, "[%10lu] %s", - (jiffies / HZ), tmp_log_buf); + fdebug_log(bat_priv->debug_log, "[%10u] %s", + jiffies_to_msecs(jiffies), tmp_log_buf); va_end(args); return 0; diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c index 21c1f83a2af3..962636b039b2 100644 --- a/net/batman-adv/originator.c +++ b/net/batman-adv/originator.c @@ -35,7 +35,8 @@ static void purge_orig(struct work_struct *work); static void start_purge_timer(struct bat_priv *bat_priv) { INIT_DELAYED_WORK(&bat_priv->orig_work, purge_orig); - queue_delayed_work(bat_event_workqueue, &bat_priv->orig_work, 1 * HZ); + queue_delayed_work(bat_event_workqueue, + &bat_priv->orig_work, msecs_to_jiffies(1000)); } /* returns 1 if they are the same originator */ @@ -274,6 +275,7 @@ static bool purge_orig_neighbors(struct bat_priv *bat_priv, struct hlist_node *node, *node_tmp; struct neigh_node *neigh_node; bool neigh_purged = false; + unsigned long last_seen; *best_neigh_node = NULL; @@ -288,6 +290,8 @@ static bool purge_orig_neighbors(struct bat_priv *bat_priv, (neigh_node->if_incoming->if_status == IF_NOT_IN_USE) || (neigh_node->if_incoming->if_status == IF_TO_BE_REMOVED)) { + last_seen = neigh_node->last_seen; + if ((neigh_node->if_incoming->if_status == IF_INACTIVE) || (neigh_node->if_incoming->if_status == @@ -300,9 +304,9 @@ static bool purge_orig_neighbors(struct bat_priv *bat_priv, neigh_node->if_incoming->net_dev->name); else bat_dbg(DBG_BATMAN, bat_priv, - "neighbor timeout: originator %pM, neighbor: %pM, last_seen: %lu\n", + "neighbor timeout: originator %pM, neighbor: %pM, last_seen: %u\n", orig_node->orig, neigh_node->addr, - (neigh_node->last_seen / HZ)); + jiffies_to_msecs(last_seen)); neigh_purged = true; @@ -327,8 +331,9 @@ static bool purge_orig_node(struct bat_priv *bat_priv, if (has_timed_out(orig_node->last_seen, 2 * PURGE_TIMEOUT)) { bat_dbg(DBG_BATMAN, bat_priv, - "Originator timeout: originator %pM, last_seen %lu\n", - orig_node->orig, (orig_node->last_seen / HZ)); + "Originator timeout: originator %pM, last_seen %u\n", + orig_node->orig, + jiffies_to_msecs(orig_node->last_seen)); return true; } else { if (purge_orig_neighbors(bat_priv, orig_node, diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c index 7c66b6121fa6..8e74d9763be3 100644 --- a/net/batman-adv/send.c +++ b/net/batman-adv/send.c @@ -292,7 +292,7 @@ static void send_outstanding_bcast_packet(struct work_struct *work) /* if we still have some more bcasts to send */ if (forw_packet->num_packets < 3) { _add_bcast_packet_to_list(bat_priv, forw_packet, - ((5 * HZ) / 1000)); + msecs_to_jiffies(5)); return; } -- cgit v1.2.2 From 7ae8b2852f946c71fdbd910156baa605a4ae3cee Mon Sep 17 00:00:00 2001 From: Marek Lindner Date: Thu, 1 Mar 2012 15:35:21 +0800 Subject: batman-adv: split neigh_new function into generic and batman iv specific parts Signed-off-by: Marek Lindner Acked-by: Simon Wunderlich Signed-off-by: Antonio Quartulli --- net/batman-adv/bat_iv_ogm.c | 40 ++++++++++++++++++++++++++++++++++------ net/batman-adv/originator.c | 28 +++++++++++----------------- net/batman-adv/originator.h | 7 +++---- 3 files changed, 48 insertions(+), 27 deletions(-) (limited to 'net') diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c index 8652a7536b15..ae0a08c391ea 100644 --- a/net/batman-adv/bat_iv_ogm.c +++ b/net/batman-adv/bat_iv_ogm.c @@ -30,6 +30,32 @@ #include "send.h" #include "bat_algo.h" +static struct neigh_node *bat_iv_ogm_neigh_new(struct hard_iface *hard_iface, + const uint8_t *neigh_addr, + struct orig_node *orig_node, + struct orig_node *orig_neigh, + uint32_t seqno) +{ + struct neigh_node *neigh_node; + + neigh_node = batadv_neigh_node_new(hard_iface, neigh_addr, seqno); + if (!neigh_node) + goto out; + + INIT_LIST_HEAD(&neigh_node->bonding_list); + spin_lock_init(&neigh_node->tq_lock); + + neigh_node->orig_node = orig_neigh; + neigh_node->if_incoming = hard_iface; + + spin_lock_bh(&orig_node->neigh_list_lock); + hlist_add_head_rcu(&neigh_node->list, &orig_node->neigh_list); + spin_unlock_bh(&orig_node->neigh_list_lock); + +out: + return neigh_node; +} + static int bat_iv_ogm_iface_enable(struct hard_iface *hard_iface) { struct batman_ogm_packet *batman_ogm_packet; @@ -638,8 +664,9 @@ static void bat_iv_ogm_orig_update(struct bat_priv *bat_priv, if (!orig_tmp) goto unlock; - neigh_node = create_neighbor(orig_node, orig_tmp, - ethhdr->h_source, if_incoming); + neigh_node = bat_iv_ogm_neigh_new(if_incoming, ethhdr->h_source, + orig_node, orig_tmp, + batman_ogm_packet->seqno); orig_node_free_ref(orig_tmp); if (!neigh_node) @@ -764,10 +791,11 @@ static int bat_iv_ogm_calc_tq(struct orig_node *orig_node, rcu_read_unlock(); if (!neigh_node) - neigh_node = create_neighbor(orig_neigh_node, - orig_neigh_node, - orig_neigh_node->orig, - if_incoming); + neigh_node = bat_iv_ogm_neigh_new(if_incoming, + orig_neigh_node->orig, + orig_neigh_node, + orig_neigh_node, + batman_ogm_packet->seqno); if (!neigh_node) goto out; diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c index 962636b039b2..f4b62011ca3f 100644 --- a/net/batman-adv/originator.c +++ b/net/batman-adv/originator.c @@ -85,35 +85,29 @@ struct neigh_node *orig_node_get_router(struct orig_node *orig_node) return router; } -struct neigh_node *create_neighbor(struct orig_node *orig_node, - struct orig_node *orig_neigh_node, - const uint8_t *neigh, - struct hard_iface *if_incoming) +struct neigh_node *batadv_neigh_node_new(struct hard_iface *hard_iface, + const uint8_t *neigh_addr, + uint32_t seqno) { - struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); + struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface); struct neigh_node *neigh_node; - bat_dbg(DBG_BATMAN, bat_priv, - "Creating new last-hop neighbor of originator\n"); - neigh_node = kzalloc(sizeof(*neigh_node), GFP_ATOMIC); if (!neigh_node) - return NULL; + goto out; INIT_HLIST_NODE(&neigh_node->list); - INIT_LIST_HEAD(&neigh_node->bonding_list); - spin_lock_init(&neigh_node->tq_lock); - memcpy(neigh_node->addr, neigh, ETH_ALEN); - neigh_node->orig_node = orig_neigh_node; - neigh_node->if_incoming = if_incoming; + memcpy(neigh_node->addr, neigh_addr, ETH_ALEN); /* extra reference for return */ atomic_set(&neigh_node->refcount, 2); - spin_lock_bh(&orig_node->neigh_list_lock); - hlist_add_head_rcu(&neigh_node->list, &orig_node->neigh_list); - spin_unlock_bh(&orig_node->neigh_list_lock); + bat_dbg(DBG_BATMAN, bat_priv, + "Creating new neighbor %pM, initial seqno %d\n", + neigh_addr, seqno); + +out: return neigh_node; } diff --git a/net/batman-adv/originator.h b/net/batman-adv/originator.h index 3fe2eda85652..f74d0d693359 100644 --- a/net/batman-adv/originator.h +++ b/net/batman-adv/originator.h @@ -29,10 +29,9 @@ void originator_free(struct bat_priv *bat_priv); void purge_orig_ref(struct bat_priv *bat_priv); void orig_node_free_ref(struct orig_node *orig_node); struct orig_node *get_orig_node(struct bat_priv *bat_priv, const uint8_t *addr); -struct neigh_node *create_neighbor(struct orig_node *orig_node, - struct orig_node *orig_neigh_node, - const uint8_t *neigh, - struct hard_iface *if_incoming); +struct neigh_node *batadv_neigh_node_new(struct hard_iface *hard_iface, + const uint8_t *neigh_addr, + uint32_t seqno); void neigh_node_free_ref(struct neigh_node *neigh_node); struct neigh_node *orig_node_get_router(struct orig_node *orig_node); int orig_seq_print_text(struct seq_file *seq, void *offset); -- cgit v1.2.2 From edbf12ba56c4578f62a1357e005bcf11c2c38a16 Mon Sep 17 00:00:00 2001 From: Marek Lindner Date: Sun, 11 Mar 2012 06:17:49 +0800 Subject: batman-adv: ignore protocol packets if the interface did not enable this protocol Reported-by: Simon Wunderlich Signed-off-by: Marek Lindner Signed-off-by: Antonio Quartulli --- net/batman-adv/bat_iv_ogm.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'net') diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c index ae0a08c391ea..37e368d05937 100644 --- a/net/batman-adv/bat_iv_ogm.c +++ b/net/batman-adv/bat_iv_ogm.c @@ -1186,6 +1186,7 @@ out: static int bat_iv_ogm_receive(struct sk_buff *skb, struct hard_iface *if_incoming) { + struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); struct batman_ogm_packet *batman_ogm_packet; struct ethhdr *ethhdr; int buff_pos = 0, packet_len; @@ -1196,6 +1197,12 @@ static int bat_iv_ogm_receive(struct sk_buff *skb, if (!ret) return NET_RX_DROP; + /* did we receive a B.A.T.M.A.N. IV OGM packet on an interface + * that does not have B.A.T.M.A.N. IV enabled ? + */ + if (bat_priv->bat_algo_ops->bat_ogm_emit != bat_iv_ogm_emit) + return NET_RX_DROP; + packet_len = skb_headlen(skb); ethhdr = (struct ethhdr *)skb_mac_header(skb); packet_buff = skb->data; -- cgit v1.2.2 From c32293983d836ed6cbc5e8b58cb8cd10b26a774e Mon Sep 17 00:00:00 2001 From: Marek Lindner Date: Sun, 11 Mar 2012 06:17:50 +0800 Subject: batman-adv: refactoring API: find generalized name for bat_ogm_update_mac callback Signed-off-by: Marek Lindner Signed-off-by: Antonio Quartulli --- net/batman-adv/bat_iv_ogm.c | 18 +++++++++--------- net/batman-adv/hard-interface.c | 4 ++-- net/batman-adv/main.c | 2 +- net/batman-adv/types.h | 6 ++++-- 4 files changed, 16 insertions(+), 14 deletions(-) (limited to 'net') diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c index 37e368d05937..9074e743ad61 100644 --- a/net/batman-adv/bat_iv_ogm.c +++ b/net/batman-adv/bat_iv_ogm.c @@ -93,24 +93,24 @@ static void bat_iv_ogm_iface_disable(struct hard_iface *hard_iface) hard_iface->packet_buff = NULL; } -static void bat_iv_ogm_primary_iface_set(struct hard_iface *hard_iface) +static void bat_iv_ogm_iface_update_mac(struct hard_iface *hard_iface) { struct batman_ogm_packet *batman_ogm_packet; batman_ogm_packet = (struct batman_ogm_packet *)hard_iface->packet_buff; - batman_ogm_packet->flags = PRIMARIES_FIRST_HOP; - batman_ogm_packet->header.ttl = TTL; + memcpy(batman_ogm_packet->orig, + hard_iface->net_dev->dev_addr, ETH_ALEN); + memcpy(batman_ogm_packet->prev_sender, + hard_iface->net_dev->dev_addr, ETH_ALEN); } -static void bat_iv_ogm_update_mac(struct hard_iface *hard_iface) +static void bat_iv_ogm_primary_iface_set(struct hard_iface *hard_iface) { struct batman_ogm_packet *batman_ogm_packet; batman_ogm_packet = (struct batman_ogm_packet *)hard_iface->packet_buff; - memcpy(batman_ogm_packet->orig, - hard_iface->net_dev->dev_addr, ETH_ALEN); - memcpy(batman_ogm_packet->prev_sender, - hard_iface->net_dev->dev_addr, ETH_ALEN); + batman_ogm_packet->flags = PRIMARIES_FIRST_HOP; + batman_ogm_packet->header.ttl = TTL; } /* when do we schedule our own ogm to be sent */ @@ -1236,8 +1236,8 @@ static struct bat_algo_ops batman_iv __read_mostly = { .name = "BATMAN IV", .bat_iface_enable = bat_iv_ogm_iface_enable, .bat_iface_disable = bat_iv_ogm_iface_disable, + .bat_iface_update_mac = bat_iv_ogm_iface_update_mac, .bat_primary_iface_set = bat_iv_ogm_primary_iface_set, - .bat_ogm_update_mac = bat_iv_ogm_update_mac, .bat_ogm_schedule = bat_iv_ogm_schedule, .bat_ogm_emit = bat_iv_ogm_emit, }; diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c index 95f869c7ca04..0b84bb1b62c4 100644 --- a/net/batman-adv/hard-interface.c +++ b/net/batman-adv/hard-interface.c @@ -228,7 +228,7 @@ static void hardif_activate_interface(struct hard_iface *hard_iface) bat_priv = netdev_priv(hard_iface->soft_iface); - bat_priv->bat_algo_ops->bat_ogm_update_mac(hard_iface); + bat_priv->bat_algo_ops->bat_iface_update_mac(hard_iface); hard_iface->if_status = IF_TO_BE_ACTIVATED; /** @@ -524,7 +524,7 @@ static int hard_if_event(struct notifier_block *this, check_known_mac_addr(hard_iface->net_dev); bat_priv = netdev_priv(hard_iface->soft_iface); - bat_priv->bat_algo_ops->bat_ogm_update_mac(hard_iface); + bat_priv->bat_algo_ops->bat_iface_update_mac(hard_iface); primary_if = primary_if_get_selected(bat_priv); if (!primary_if) diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c index f80c4474127c..083a2993efe4 100644 --- a/net/batman-adv/main.c +++ b/net/batman-adv/main.c @@ -329,8 +329,8 @@ int bat_algo_register(struct bat_algo_ops *bat_algo_ops) /* all algorithms must implement all ops (for now) */ if (!bat_algo_ops->bat_iface_enable || !bat_algo_ops->bat_iface_disable || + !bat_algo_ops->bat_iface_update_mac || !bat_algo_ops->bat_primary_iface_set || - !bat_algo_ops->bat_ogm_update_mac || !bat_algo_ops->bat_ogm_schedule || !bat_algo_ops->bat_ogm_emit) { pr_info("Routing algo '%s' does not implement required ops\n", diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h index 9fa8b73387ec..66a3750aa9e7 100644 --- a/net/batman-adv/types.h +++ b/net/batman-adv/types.h @@ -381,10 +381,12 @@ struct bat_algo_ops { int (*bat_iface_enable)(struct hard_iface *hard_iface); /* de-init routing info when hard-interface is disabled */ void (*bat_iface_disable)(struct hard_iface *hard_iface); + /* (re-)init mac addresses of the protocol information + * belonging to this hard-interface + */ + void (*bat_iface_update_mac)(struct hard_iface *hard_iface); /* called when primary interface is selected / changed */ void (*bat_primary_iface_set)(struct hard_iface *hard_iface); - /* init mac addresses of the OGM belonging to this hard-interface */ - void (*bat_ogm_update_mac)(struct hard_iface *hard_iface); /* prepare a new outgoing OGM for the send queue */ void (*bat_ogm_schedule)(struct hard_iface *hard_iface, int tt_num_changes); -- cgit v1.2.2 From f245c38ba74e1433294b5f41c9d6eb3e97b88e5b Mon Sep 17 00:00:00 2001 From: Marek Lindner Date: Sun, 11 Mar 2012 06:17:51 +0800 Subject: batman-adv: rename sysfs macros to reflect the soft-interface dependency Signed-off-by: Marek Lindner Signed-off-by: Antonio Quartulli --- net/batman-adv/bat_sysfs.c | 57 +++++++++++++++++++++++----------------------- 1 file changed, 29 insertions(+), 28 deletions(-) (limited to 'net') diff --git a/net/batman-adv/bat_sysfs.c b/net/batman-adv/bat_sysfs.c index 2c816883ca13..913299d03f4a 100644 --- a/net/batman-adv/bat_sysfs.c +++ b/net/batman-adv/bat_sysfs.c @@ -63,7 +63,7 @@ struct bat_attribute bat_attr_##_name = { \ .store = _store, \ }; -#define BAT_ATTR_STORE_BOOL(_name, _post_func) \ +#define BAT_ATTR_SIF_STORE_BOOL(_name, _post_func) \ ssize_t store_##_name(struct kobject *kobj, struct attribute *attr, \ char *buff, size_t count) \ { \ @@ -73,9 +73,9 @@ ssize_t store_##_name(struct kobject *kobj, struct attribute *attr, \ &bat_priv->_name, net_dev); \ } -#define BAT_ATTR_SHOW_BOOL(_name) \ -ssize_t show_##_name(struct kobject *kobj, struct attribute *attr, \ - char *buff) \ +#define BAT_ATTR_SIF_SHOW_BOOL(_name) \ +ssize_t show_##_name(struct kobject *kobj, \ + struct attribute *attr, char *buff) \ { \ struct bat_priv *bat_priv = kobj_to_batpriv(kobj); \ return sprintf(buff, "%s\n", \ @@ -83,16 +83,17 @@ ssize_t show_##_name(struct kobject *kobj, struct attribute *attr, \ "disabled" : "enabled"); \ } \ -/* Use this, if you are going to turn a [name] in bat_priv on or off */ -#define BAT_ATTR_BOOL(_name, _mode, _post_func) \ - static BAT_ATTR_STORE_BOOL(_name, _post_func) \ - static BAT_ATTR_SHOW_BOOL(_name) \ +/* Use this, if you are going to turn a [name] in the soft-interface + * (bat_priv) on or off */ +#define BAT_ATTR_SIF_BOOL(_name, _mode, _post_func) \ + static BAT_ATTR_SIF_STORE_BOOL(_name, _post_func) \ + static BAT_ATTR_SIF_SHOW_BOOL(_name) \ static BAT_ATTR(_name, _mode, show_##_name, store_##_name) -#define BAT_ATTR_STORE_UINT(_name, _min, _max, _post_func) \ +#define BAT_ATTR_SIF_STORE_UINT(_name, _min, _max, _post_func) \ ssize_t store_##_name(struct kobject *kobj, struct attribute *attr, \ - char *buff, size_t count) \ + char *buff, size_t count) \ { \ struct net_device *net_dev = kobj_to_netdev(kobj); \ struct bat_priv *bat_priv = netdev_priv(net_dev); \ @@ -100,19 +101,19 @@ ssize_t store_##_name(struct kobject *kobj, struct attribute *attr, \ attr, &bat_priv->_name, net_dev); \ } -#define BAT_ATTR_SHOW_UINT(_name) \ -ssize_t show_##_name(struct kobject *kobj, struct attribute *attr, \ - char *buff) \ +#define BAT_ATTR_SIF_SHOW_UINT(_name) \ +ssize_t show_##_name(struct kobject *kobj, \ + struct attribute *attr, char *buff) \ { \ struct bat_priv *bat_priv = kobj_to_batpriv(kobj); \ return sprintf(buff, "%i\n", atomic_read(&bat_priv->_name)); \ } \ -/* Use this, if you are going to set [name] in bat_priv to unsigned integer - * values only */ -#define BAT_ATTR_UINT(_name, _mode, _min, _max, _post_func) \ - static BAT_ATTR_STORE_UINT(_name, _min, _max, _post_func) \ - static BAT_ATTR_SHOW_UINT(_name) \ +/* Use this, if you are going to set [name] in the soft-interface + * (bat_priv) to an unsigned integer value */ +#define BAT_ATTR_SIF_UINT(_name, _mode, _min, _max, _post_func) \ + static BAT_ATTR_SIF_STORE_UINT(_name, _min, _max, _post_func) \ + static BAT_ATTR_SIF_SHOW_UINT(_name) \ static BAT_ATTR(_name, _mode, show_##_name, store_##_name) @@ -384,24 +385,24 @@ static ssize_t store_gw_bwidth(struct kobject *kobj, struct attribute *attr, return gw_bandwidth_set(net_dev, buff, count); } -BAT_ATTR_BOOL(aggregated_ogms, S_IRUGO | S_IWUSR, NULL); -BAT_ATTR_BOOL(bonding, S_IRUGO | S_IWUSR, NULL); +BAT_ATTR_SIF_BOOL(aggregated_ogms, S_IRUGO | S_IWUSR, NULL); +BAT_ATTR_SIF_BOOL(bonding, S_IRUGO | S_IWUSR, NULL); #ifdef CONFIG_BATMAN_ADV_BLA -BAT_ATTR_BOOL(bridge_loop_avoidance, S_IRUGO | S_IWUSR, NULL); +BAT_ATTR_SIF_BOOL(bridge_loop_avoidance, S_IRUGO | S_IWUSR, NULL); #endif -BAT_ATTR_BOOL(fragmentation, S_IRUGO | S_IWUSR, update_min_mtu); -BAT_ATTR_BOOL(ap_isolation, S_IRUGO | S_IWUSR, NULL); +BAT_ATTR_SIF_BOOL(fragmentation, S_IRUGO | S_IWUSR, update_min_mtu); +BAT_ATTR_SIF_BOOL(ap_isolation, S_IRUGO | S_IWUSR, NULL); static BAT_ATTR(vis_mode, S_IRUGO | S_IWUSR, show_vis_mode, store_vis_mode); static BAT_ATTR(routing_algo, S_IRUGO, show_bat_algo, NULL); static BAT_ATTR(gw_mode, S_IRUGO | S_IWUSR, show_gw_mode, store_gw_mode); -BAT_ATTR_UINT(orig_interval, S_IRUGO | S_IWUSR, 2 * JITTER, INT_MAX, NULL); -BAT_ATTR_UINT(hop_penalty, S_IRUGO | S_IWUSR, 0, TQ_MAX_VALUE, NULL); -BAT_ATTR_UINT(gw_sel_class, S_IRUGO | S_IWUSR, 1, TQ_MAX_VALUE, - post_gw_deselect); +BAT_ATTR_SIF_UINT(orig_interval, S_IRUGO | S_IWUSR, 2 * JITTER, INT_MAX, NULL); +BAT_ATTR_SIF_UINT(hop_penalty, S_IRUGO | S_IWUSR, 0, TQ_MAX_VALUE, NULL); +BAT_ATTR_SIF_UINT(gw_sel_class, S_IRUGO | S_IWUSR, 1, TQ_MAX_VALUE, + post_gw_deselect); static BAT_ATTR(gw_bandwidth, S_IRUGO | S_IWUSR, show_gw_bwidth, store_gw_bwidth); #ifdef CONFIG_BATMAN_ADV_DEBUG -BAT_ATTR_UINT(log_level, S_IRUGO | S_IWUSR, 0, 15, NULL); +BAT_ATTR_SIF_UINT(log_level, S_IRUGO | S_IWUSR, 0, 15, NULL); #endif static struct bat_attribute *mesh_attrs[] = { -- cgit v1.2.2 From 9d853f624691776232a675768bcdb239d7b837ea Mon Sep 17 00:00:00 2001 From: Linus Luessing Date: Sun, 11 Mar 2012 06:17:52 +0800 Subject: batman-adv: Adding hard_iface specific sysfs wrapper macros for UINT This allows us to easily add a sysfs parameter for an unsigned int later, which is not for a batman mesh interface (e.g. bat0), but for a common interface instead. It allows reading and writing an atomic_t in hard_iface (instead of bat_priv compared to the mesh variant). Developed by Linus during a 6 months trainee study period in Ascom (Switzerland) AG. Signed-off-by: Linus Luessing Signed-off-by: Marek Lindner --- net/batman-adv/bat_sysfs.c | 43 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 43 insertions(+) (limited to 'net') diff --git a/net/batman-adv/bat_sysfs.c b/net/batman-adv/bat_sysfs.c index 913299d03f4a..5bc7b66d32dc 100644 --- a/net/batman-adv/bat_sysfs.c +++ b/net/batman-adv/bat_sysfs.c @@ -117,6 +117,49 @@ ssize_t show_##_name(struct kobject *kobj, \ static BAT_ATTR(_name, _mode, show_##_name, store_##_name) +#define BAT_ATTR_HIF_STORE_UINT(_name, _min, _max, _post_func) \ +ssize_t store_##_name(struct kobject *kobj, struct attribute *attr, \ + char *buff, size_t count) \ +{ \ + struct net_device *net_dev = kobj_to_netdev(kobj); \ + struct hard_iface *hard_iface = hardif_get_by_netdev(net_dev); \ + ssize_t length; \ + \ + if (!hard_iface) \ + return 0; \ + \ + length = __store_uint_attr(buff, count, _min, _max, _post_func, \ + attr, &hard_iface->_name, net_dev); \ + \ + hardif_free_ref(hard_iface); \ + return length; \ +} + +#define BAT_ATTR_HIF_SHOW_UINT(_name) \ +ssize_t show_##_name(struct kobject *kobj, \ + struct attribute *attr, char *buff) \ +{ \ + struct net_device *net_dev = kobj_to_netdev(kobj); \ + struct hard_iface *hard_iface = hardif_get_by_netdev(net_dev); \ + ssize_t length; \ + \ + if (!hard_iface) \ + return 0; \ + \ + length = sprintf(buff, "%i\n", atomic_read(&hard_iface->_name));\ + \ + hardif_free_ref(hard_iface); \ + return length; \ +} + +/* Use this, if you are going to set [name] in hard_iface to an + * unsigned integer value*/ +#define BAT_ATTR_HIF_UINT(_name, _mode, _min, _max, _post_func) \ + static BAT_ATTR_HIF_STORE_UINT(_name, _min, _max, _post_func) \ + static BAT_ATTR_HIF_SHOW_UINT(_name) \ + static BAT_ATTR(_name, _mode, show_##_name, store_##_name) + + static int store_bool_attr(char *buff, size_t count, struct net_device *net_dev, const char *attr_name, atomic_t *attr) -- cgit v1.2.2 From 13b2541b11b1df346805f0869c843635ceb0229f Mon Sep 17 00:00:00 2001 From: Marek Lindner Date: Sun, 11 Mar 2012 06:17:53 +0800 Subject: batman-adv: avoid temporary routing loops by being strict on forwarded OGMs batman-adv would forward OGMs from non-besthops while replacing the the TQ and TTL values with the values from the best hop. In certain corner cases this leads to a temporary routing loop. This patch changes this behavior: Only packets from best next hops are forwarded - TQ and TTL values won't be replaced anymore. However, the protocol needs to rebroadcast OGMs from single hop neighbors regardless of whether or not they are the best hop. To handle this case a new flag is introduced to alert neighboring nodes about the forwarded OGM that is not from my best next hop. It is to be discarded by all nodes except for the one originating the OGM. Signed-off-by: Marek Lindner Acked-by: Daniele Furlan Tested-by: Simon Wunderlich --- net/batman-adv/bat_iv_ogm.c | 60 +++++++++++++++++++++++---------------------- net/batman-adv/packet.h | 1 + 2 files changed, 32 insertions(+), 29 deletions(-) (limited to 'net') diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c index 9074e743ad61..bafb47370b4c 100644 --- a/net/batman-adv/bat_iv_ogm.c +++ b/net/batman-adv/bat_iv_ogm.c @@ -507,11 +507,10 @@ static void bat_iv_ogm_forward(struct orig_node *orig_node, const struct ethhdr *ethhdr, struct batman_ogm_packet *batman_ogm_packet, bool is_single_hop_neigh, + bool is_from_best_next_hop, struct hard_iface *if_incoming) { struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); - struct neigh_node *router; - uint8_t in_tq, in_ttl, tq_avg = 0; uint8_t tt_num_changes; if (batman_ogm_packet->header.ttl <= 1) { @@ -519,41 +518,30 @@ static void bat_iv_ogm_forward(struct orig_node *orig_node, return; } - router = orig_node_get_router(orig_node); + if (!is_from_best_next_hop) { + /* Mark the forwarded packet when it is not coming from our + * best next hop. We still need to forward the packet for our + * neighbor link quality detection to work in case the packet + * originated from a single hop neighbor. Otherwise we can + * simply drop the ogm. + */ + if (is_single_hop_neigh) + batman_ogm_packet->flags |= NOT_BEST_NEXT_HOP; + else + return; + } - in_tq = batman_ogm_packet->tq; - in_ttl = batman_ogm_packet->header.ttl; tt_num_changes = batman_ogm_packet->tt_num_changes; batman_ogm_packet->header.ttl--; memcpy(batman_ogm_packet->prev_sender, ethhdr->h_source, ETH_ALEN); - /* rebroadcast tq of our best ranking neighbor to ensure the rebroadcast - * of our best tq value */ - if (router && router->tq_avg != 0) { - - /* rebroadcast ogm of best ranking neighbor as is */ - if (!compare_eth(router->addr, ethhdr->h_source)) { - batman_ogm_packet->tq = router->tq_avg; - - if (router->last_ttl) - batman_ogm_packet->header.ttl = - router->last_ttl - 1; - } - - tq_avg = router->tq_avg; - } - - if (router) - neigh_node_free_ref(router); - /* apply hop penalty */ batman_ogm_packet->tq = hop_penalty(batman_ogm_packet->tq, bat_priv); bat_dbg(DBG_BATMAN, bat_priv, - "Forwarding packet: tq_orig: %i, tq_avg: %i, tq_forw: %i, ttl_orig: %i, ttl_forw: %i\n", - in_tq, tq_avg, batman_ogm_packet->tq, in_ttl - 1, - batman_ogm_packet->header.ttl); + "Forwarding packet: tq: %i, ttl: %i\n", + batman_ogm_packet->tq, batman_ogm_packet->header.ttl); batman_ogm_packet->seqno = htonl(batman_ogm_packet->seqno); batman_ogm_packet->tt_crc = htons(batman_ogm_packet->tt_crc); @@ -949,6 +937,7 @@ static void bat_iv_ogm_process(const struct ethhdr *ethhdr, int is_my_addr = 0, is_my_orig = 0, is_my_oldorig = 0; int is_broadcast = 0, is_bidirectional; bool is_single_hop_neigh = false; + bool is_from_best_next_hop = false; int is_duplicate; uint32_t if_incoming_seqno; @@ -1070,6 +1059,13 @@ static void bat_iv_ogm_process(const struct ethhdr *ethhdr, return; } + if (batman_ogm_packet->flags & NOT_BEST_NEXT_HOP) { + bat_dbg(DBG_BATMAN, bat_priv, + "Drop packet: ignoring all packets not forwarded from " + "the best next hop (sender: %pM)\n", ethhdr->h_source); + return; + } + orig_node = get_orig_node(bat_priv, batman_ogm_packet->orig); if (!orig_node) return; @@ -1094,6 +1090,10 @@ static void bat_iv_ogm_process(const struct ethhdr *ethhdr, if (router) router_router = orig_node_get_router(router->orig_node); + if ((router && router->tq_avg != 0) && + (compare_eth(router->addr, ethhdr->h_source))) + is_from_best_next_hop = true; + /* avoid temporary routing loops */ if (router && router_router && (compare_eth(router->addr, batman_ogm_packet->prev_sender)) && @@ -1144,7 +1144,8 @@ static void bat_iv_ogm_process(const struct ethhdr *ethhdr, /* mark direct link on incoming interface */ bat_iv_ogm_forward(orig_node, ethhdr, batman_ogm_packet, - is_single_hop_neigh, if_incoming); + is_single_hop_neigh, is_from_best_next_hop, + if_incoming); bat_dbg(DBG_BATMAN, bat_priv, "Forwarding packet: rebroadcast neighbor packet with direct link flag\n"); @@ -1167,7 +1168,8 @@ static void bat_iv_ogm_process(const struct ethhdr *ethhdr, bat_dbg(DBG_BATMAN, bat_priv, "Forwarding packet: rebroadcast originator packet\n"); bat_iv_ogm_forward(orig_node, ethhdr, batman_ogm_packet, - is_single_hop_neigh, if_incoming); + is_single_hop_neigh, is_from_best_next_hop, + if_incoming); out_neigh: if ((orig_neigh_node) && (!is_single_hop_neigh)) diff --git a/net/batman-adv/packet.h b/net/batman-adv/packet.h index f54969c61a1e..0ee1af770798 100644 --- a/net/batman-adv/packet.h +++ b/net/batman-adv/packet.h @@ -39,6 +39,7 @@ enum bat_packettype { #define COMPAT_VERSION 14 enum batman_iv_flags { + NOT_BEST_NEXT_HOP = 1 << 3, PRIMARIES_FIRST_HOP = 1 << 4, VIS_SERVER = 1 << 5, DIRECTLINK = 1 << 6 -- cgit v1.2.2 From fefa53297112f85d99d77374c745e247ba78831c Mon Sep 17 00:00:00 2001 From: Marek Lindner Date: Sat, 17 Mar 2012 15:28:34 +0800 Subject: batman-adv: fix checkpatch string complaint Regression introduced by: f76d019194e0a88c57371df169ecc979690a04c2 Signed-off-by: Marek Lindner Signed-off-by: Antonio Quartulli --- net/batman-adv/bat_iv_ogm.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'net') diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c index bafb47370b4c..abd10c490fd9 100644 --- a/net/batman-adv/bat_iv_ogm.c +++ b/net/batman-adv/bat_iv_ogm.c @@ -1061,8 +1061,8 @@ static void bat_iv_ogm_process(const struct ethhdr *ethhdr, if (batman_ogm_packet->flags & NOT_BEST_NEXT_HOP) { bat_dbg(DBG_BATMAN, bat_priv, - "Drop packet: ignoring all packets not forwarded from " - "the best next hop (sender: %pM)\n", ethhdr->h_source); + "Drop packet: ignoring all packets not forwarded from the best next hop (sender: %pM)\n", + ethhdr->h_source); return; } -- cgit v1.2.2 From c64703aace094bd2836bb0755b7faf8b4cbab998 Mon Sep 17 00:00:00 2001 From: Antonio Quartulli Date: Wed, 14 Mar 2012 12:57:02 +0100 Subject: batman-adv: update copyright years update copyright years in order to include 2012 Signed-off-by: Antonio Quartulli --- net/batman-adv/bridge_loop_avoidance.c | 2 +- net/batman-adv/bridge_loop_avoidance.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'net') diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c index ad394c6496cc..8bf97515a77d 100644 --- a/net/batman-adv/bridge_loop_avoidance.c +++ b/net/batman-adv/bridge_loop_avoidance.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2011 B.A.T.M.A.N. contributors: + * Copyright (C) 2011-2012 B.A.T.M.A.N. contributors: * * Simon Wunderlich * diff --git a/net/batman-adv/bridge_loop_avoidance.h b/net/batman-adv/bridge_loop_avoidance.h index 4a8e4fc766bc..e39f93acc28f 100644 --- a/net/batman-adv/bridge_loop_avoidance.h +++ b/net/batman-adv/bridge_loop_avoidance.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2011 B.A.T.M.A.N. contributors: + * Copyright (C) 2011-2012 B.A.T.M.A.N. contributors: * * Simon Wunderlich * -- cgit v1.2.2 From 35c133a000d54b7e3fe81e8c8e4b8af5878ad6dd Mon Sep 17 00:00:00 2001 From: Antonio Quartulli Date: Wed, 14 Mar 2012 13:03:01 +0100 Subject: batman-adv: add contributor name translation_table.{c,h} have been heavily modified by another contributor and for legal purposes it is better to include his name into the contributor list Signed-off-by: Antonio Quartulli --- net/batman-adv/translation-table.c | 2 +- net/batman-adv/translation-table.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'net') diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c index a38d315d3cd6..2cb46f0bb163 100644 --- a/net/batman-adv/translation-table.c +++ b/net/batman-adv/translation-table.c @@ -1,7 +1,7 @@ /* * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors: * - * Marek Lindner, Simon Wunderlich + * Marek Lindner, Simon Wunderlich, Antonio Quartulli * * This program is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public diff --git a/net/batman-adv/translation-table.h b/net/batman-adv/translation-table.h index bfebe26edd8e..593d1b31217c 100644 --- a/net/batman-adv/translation-table.h +++ b/net/batman-adv/translation-table.h @@ -1,7 +1,7 @@ /* * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors: * - * Marek Lindner, Simon Wunderlich + * Marek Lindner, Simon Wunderlich, Antonio Quartulli * * This program is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public -- cgit v1.2.2 From 647c0c70e8a44e359d1d90d9d067d0b6b611076a Mon Sep 17 00:00:00 2001 From: Eldad Zack Date: Sat, 5 May 2012 10:13:53 +0000 Subject: net/ipv6/af_inet6.c: checkpatch cleanup af_inet6.c:80: ERROR: do not initialise statics to 0 or NULL af_inet6.c:259: ERROR: spaces required around that '=' (ctx:VxV) af_inet6.c:394: WARNING: EXPORT_SYMBOL(foo); should immediately follow its function/variable af_inet6.c:412: WARNING: EXPORT_SYMBOL(foo); should immediately follow its function/variable af_inet6.c:422: ERROR: do not use assignment in if condition af_inet6.c:425: ERROR: do not use assignment in if condition af_inet6.c:433: ERROR: do not use assignment in if condition af_inet6.c:437: WARNING: EXPORT_SYMBOL(foo); should immediately follow its function/variable af_inet6.c:446: ERROR: spaces required around that '=' (ctx:VxV) af_inet6.c:478: WARNING: EXPORT_SYMBOL(foo); should immediately follow its function/variable af_inet6.c:485: ERROR: that open brace { should be on the previous line af_inet6.c:485: ERROR: space required before the open parenthesis '(' af_inet6.c:513: WARNING: EXPORT_SYMBOL(foo); should immediately follow its function/variable af_inet6.c:629: WARNING: EXPORT_SYMBOL(foo); should immediately follow its function/variable af_inet6.c:647: WARNING: EXPORT_SYMBOL(foo); should immediately follow its function/variable af_inet6.c:687: WARNING: EXPORT_SYMBOL(foo); should immediately follow its function/variable af_inet6.c:709: WARNING: EXPORT_SYMBOL(foo); should immediately follow its function/variable af_inet6.c:1073: ERROR: space required before the open parenthesis '(' Signed-off-by: Eldad Zack Signed-off-by: David S. Miller --- net/ipv6/af_inet6.c | 29 +++++++++++------------------ 1 file changed, 11 insertions(+), 18 deletions(-) (limited to 'net') diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index 0ad046c7ae95..bf8e14659e2d 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c @@ -77,7 +77,7 @@ struct ipv6_params ipv6_defaults = { .autoconf = 1, }; -static int disable_ipv6_mod = 0; +static int disable_ipv6_mod; module_param_named(disable, disable_ipv6_mod, int, 0444); MODULE_PARM_DESC(disable, "Disable IPv6 module such that it is non-functional"); @@ -256,7 +256,7 @@ out_rcu_unlock: /* bind for INET6 API */ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) { - struct sockaddr_in6 *addr=(struct sockaddr_in6 *)uaddr; + struct sockaddr_in6 *addr = (struct sockaddr_in6 *)uaddr; struct sock *sk = sock->sk; struct inet_sock *inet = inet_sk(sk); struct ipv6_pinfo *np = inet6_sk(sk); @@ -390,7 +390,6 @@ out_unlock: rcu_read_unlock(); goto out; } - EXPORT_SYMBOL(inet6_bind); int inet6_release(struct socket *sock) @@ -408,7 +407,6 @@ int inet6_release(struct socket *sock) return inet_release(sock); } - EXPORT_SYMBOL(inet6_release); void inet6_destroy_sock(struct sock *sk) @@ -419,10 +417,12 @@ void inet6_destroy_sock(struct sock *sk) /* Release rx options */ - if ((skb = xchg(&np->pktoptions, NULL)) != NULL) + skb = xchg(&np->pktoptions, NULL); + if (skb != NULL) kfree_skb(skb); - if ((skb = xchg(&np->rxpmtu, NULL)) != NULL) + skb = xchg(&np->rxpmtu, NULL); + if (skb != NULL) kfree_skb(skb); /* Free flowlabels */ @@ -430,10 +430,10 @@ void inet6_destroy_sock(struct sock *sk) /* Free tx options */ - if ((opt = xchg(&np->opt, NULL)) != NULL) + opt = xchg(&np->opt, NULL); + if (opt != NULL) sock_kfree_s(sk, opt, opt->tot_len); } - EXPORT_SYMBOL_GPL(inet6_destroy_sock); /* @@ -443,7 +443,7 @@ EXPORT_SYMBOL_GPL(inet6_destroy_sock); int inet6_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len, int peer) { - struct sockaddr_in6 *sin=(struct sockaddr_in6 *)uaddr; + struct sockaddr_in6 *sin = (struct sockaddr_in6 *)uaddr; struct sock *sk = sock->sk; struct inet_sock *inet = inet_sk(sk); struct ipv6_pinfo *np = inet6_sk(sk); @@ -474,7 +474,6 @@ int inet6_getname(struct socket *sock, struct sockaddr *uaddr, *uaddr_len = sizeof(*sin); return 0; } - EXPORT_SYMBOL(inet6_getname); int inet6_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) @@ -482,8 +481,7 @@ int inet6_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) struct sock *sk = sock->sk; struct net *net = sock_net(sk); - switch(cmd) - { + switch (cmd) { case SIOCGSTAMP: return sock_get_timestamp(sk, (struct timeval __user *)arg); @@ -509,7 +507,6 @@ int inet6_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) /*NOTREACHED*/ return 0; } - EXPORT_SYMBOL(inet6_ioctl); const struct proto_ops inet6_stream_ops = { @@ -625,7 +622,6 @@ out_illegal: p->type); goto out; } - EXPORT_SYMBOL(inet6_register_protosw); void @@ -643,7 +639,6 @@ inet6_unregister_protosw(struct inet_protosw *p) synchronize_net(); } } - EXPORT_SYMBOL(inet6_unregister_protosw); int inet6_sk_rebuild_header(struct sock *sk) @@ -683,7 +678,6 @@ int inet6_sk_rebuild_header(struct sock *sk) return 0; } - EXPORT_SYMBOL_GPL(inet6_sk_rebuild_header); int ipv6_opt_accepted(struct sock *sk, struct sk_buff *skb) @@ -705,7 +699,6 @@ int ipv6_opt_accepted(struct sock *sk, struct sk_buff *skb) } return 0; } - EXPORT_SYMBOL_GPL(ipv6_opt_accepted); static int ipv6_gso_pull_exthdrs(struct sk_buff *skb, int proto) @@ -1070,7 +1063,7 @@ static int __init inet6_init(void) BUILD_BUG_ON(sizeof(struct inet6_skb_parm) > sizeof(dummy_skb->cb)); /* Register the socket-side information for inet6_create. */ - for(r = &inetsw6[0]; r < &inetsw6[SOCK_MAX]; ++r) + for (r = &inetsw6[0]; r < &inetsw6[SOCK_MAX]; ++r) INIT_LIST_HEAD(r); if (disable_ipv6_mod) { -- cgit v1.2.2 From 4b549a2ef4bef9965d97cbd992ba67930cd3e0fe Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 11 May 2012 09:30:50 +0000 Subject: fq_codel: Fair Queue Codel AQM Fair Queue Codel packet scheduler Principles : - Packets are classified (internal classifier or external) on flows. - This is a Stochastic model (as we use a hash, several flows might be hashed on same slot) - Each flow has a CoDel managed queue. - Flows are linked onto two (Round Robin) lists, so that new flows have priority on old ones. - For a given flow, packets are not reordered (CoDel uses a FIFO) - head drops only. - ECN capability is on by default. - Very low memory footprint (64 bytes per flow) tc qdisc ... fq_codel [ limit PACKETS ] [ flows number ] [ target TIME ] [ interval TIME ] [ noecn ] [ quantum BYTES ] defaults : 1024 flows, 10240 packets limit, quantum : device MTU target : 5ms (CoDel default) interval : 100ms (CoDel default) Impressive results on load : class htb 1:1 root leaf 10: prio 0 quantum 1514 rate 200000Kbit ceil 200000Kbit burst 1475b/8 mpu 0b overhead 0b cburst 1475b/8 mpu 0b overhead 0b level 0 Sent 43304920109 bytes 33063109 pkt (dropped 0, overlimits 0 requeues 0) rate 201691Kbit 28595pps backlog 0b 312p requeues 0 lended: 33063109 borrowed: 0 giants: 0 tokens: -912 ctokens: -912 class fq_codel 10:1735 parent 10: (dropped 1292, overlimits 0 requeues 0) backlog 15140b 10p requeues 0 deficit 1514 count 1 lastcount 1 ldelay 7.1ms class fq_codel 10:4524 parent 10: (dropped 1291, overlimits 0 requeues 0) backlog 16654b 11p requeues 0 deficit 1514 count 1 lastcount 1 ldelay 7.1ms class fq_codel 10:4e74 parent 10: (dropped 1290, overlimits 0 requeues 0) backlog 6056b 4p requeues 0 deficit 1514 count 1 lastcount 1 ldelay 6.4ms dropping drop_next 92.0ms class fq_codel 10:628a parent 10: (dropped 1289, overlimits 0 requeues 0) backlog 7570b 5p requeues 0 deficit 1514 count 1 lastcount 1 ldelay 5.4ms dropping drop_next 90.9ms class fq_codel 10:a4b3 parent 10: (dropped 302, overlimits 0 requeues 0) backlog 16654b 11p requeues 0 deficit 1514 count 1 lastcount 1 ldelay 7.1ms class fq_codel 10:c3c2 parent 10: (dropped 1284, overlimits 0 requeues 0) backlog 13626b 9p requeues 0 deficit 1514 count 1 lastcount 1 ldelay 5.9ms class fq_codel 10:d331 parent 10: (dropped 299, overlimits 0 requeues 0) backlog 15140b 10p requeues 0 deficit 1514 count 1 lastcount 1 ldelay 7.0ms class fq_codel 10:d526 parent 10: (dropped 12160, overlimits 0 requeues 0) backlog 35870b 211p requeues 0 deficit 1508 count 12160 lastcount 1 ldelay 15.3ms dropping drop_next 247us class fq_codel 10:e2c6 parent 10: (dropped 1288, overlimits 0 requeues 0) backlog 15140b 10p requeues 0 deficit 1514 count 1 lastcount 1 ldelay 7.1ms class fq_codel 10:eab5 parent 10: (dropped 1285, overlimits 0 requeues 0) backlog 16654b 11p requeues 0 deficit 1514 count 1 lastcount 1 ldelay 5.9ms class fq_codel 10:f220 parent 10: (dropped 1289, overlimits 0 requeues 0) backlog 15140b 10p requeues 0 deficit 1514 count 1 lastcount 1 ldelay 7.1ms qdisc htb 1: root refcnt 6 r2q 10 default 1 direct_packets_stat 0 ver 3.17 Sent 43331086547 bytes 33092812 pkt (dropped 0, overlimits 66063544 requeues 71) rate 201697Kbit 28602pps backlog 0b 260p requeues 71 qdisc fq_codel 10: parent 1:1 limit 10240p flows 65536 target 5.0ms interval 100.0ms ecn Sent 43331086547 bytes 33092812 pkt (dropped 949359, overlimits 0 requeues 0) rate 201697Kbit 28602pps backlog 189352b 260p requeues 0 maxpacket 1514 drop_overlimit 0 new_flow_count 5582 ecn_mark 125593 new_flows_len 0 old_flows_len 11 PING 172.30.42.18 (172.30.42.18) 56(84) bytes of data. 64 bytes from 172.30.42.18: icmp_req=1 ttl=64 time=0.227 ms 64 bytes from 172.30.42.18: icmp_req=2 ttl=64 time=0.165 ms 64 bytes from 172.30.42.18: icmp_req=3 ttl=64 time=0.166 ms 64 bytes from 172.30.42.18: icmp_req=4 ttl=64 time=0.151 ms 64 bytes from 172.30.42.18: icmp_req=5 ttl=64 time=0.164 ms 64 bytes from 172.30.42.18: icmp_req=6 ttl=64 time=0.172 ms 64 bytes from 172.30.42.18: icmp_req=7 ttl=64 time=0.175 ms 64 bytes from 172.30.42.18: icmp_req=8 ttl=64 time=0.183 ms 64 bytes from 172.30.42.18: icmp_req=9 ttl=64 time=0.158 ms 64 bytes from 172.30.42.18: icmp_req=10 ttl=64 time=0.200 ms 10 packets transmitted, 10 received, 0% packet loss, time 8999ms rtt min/avg/max/mdev = 0.151/0.176/0.227/0.022 ms Much better than SFQ because of priority given to new flows, and fast path dirtying less cache lines. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/sched/Kconfig | 11 + net/sched/Makefile | 1 + net/sched/sch_fq_codel.c | 624 +++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 636 insertions(+) create mode 100644 net/sched/sch_fq_codel.c (limited to 'net') diff --git a/net/sched/Kconfig b/net/sched/Kconfig index fadd2522053d..e7a8976bf25c 100644 --- a/net/sched/Kconfig +++ b/net/sched/Kconfig @@ -261,6 +261,17 @@ config NET_SCH_CODEL If unsure, say N. +config NET_SCH_FQ_CODEL + tristate "Fair Queue Controlled Delay AQM (FQ_CODEL)" + help + Say Y here if you want to use the FQ Controlled Delay (FQ_CODEL) + packet scheduling algorithm. + + To compile this driver as a module, choose M here: the module + will be called sch_fq_codel. + + If unsure, say N. + config NET_SCH_INGRESS tristate "Ingress Qdisc" depends on NET_CLS_ACT diff --git a/net/sched/Makefile b/net/sched/Makefile index 30fab03b8516..5940a1992f0d 100644 --- a/net/sched/Makefile +++ b/net/sched/Makefile @@ -38,6 +38,7 @@ obj-$(CONFIG_NET_SCH_MQPRIO) += sch_mqprio.o obj-$(CONFIG_NET_SCH_CHOKE) += sch_choke.o obj-$(CONFIG_NET_SCH_QFQ) += sch_qfq.o obj-$(CONFIG_NET_SCH_CODEL) += sch_codel.o +obj-$(CONFIG_NET_SCH_FQ_CODEL) += sch_fq_codel.o obj-$(CONFIG_NET_CLS_U32) += cls_u32.o obj-$(CONFIG_NET_CLS_ROUTE4) += cls_route.o diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c new file mode 100644 index 000000000000..a7b3754d21c8 --- /dev/null +++ b/net/sched/sch_fq_codel.c @@ -0,0 +1,624 @@ +/* + * Fair Queue CoDel discipline + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * Copyright (C) 2012 Eric Dumazet + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* Fair Queue CoDel. + * + * Principles : + * Packets are classified (internal classifier or external) on flows. + * This is a Stochastic model (as we use a hash, several flows + * might be hashed on same slot) + * Each flow has a CoDel managed queue. + * Flows are linked onto two (Round Robin) lists, + * so that new flows have priority on old ones. + * + * For a given flow, packets are not reordered (CoDel uses a FIFO) + * head drops only. + * ECN capability is on by default. + * Low memory footprint (64 bytes per flow) + */ + +struct fq_codel_flow { + struct sk_buff *head; + struct sk_buff *tail; + struct list_head flowchain; + int deficit; + u32 dropped; /* number of drops (or ECN marks) on this flow */ + struct codel_vars cvars; +}; /* please try to keep this structure <= 64 bytes */ + +struct fq_codel_sched_data { + struct tcf_proto *filter_list; /* optional external classifier */ + struct fq_codel_flow *flows; /* Flows table [flows_cnt] */ + u32 *backlogs; /* backlog table [flows_cnt] */ + u32 flows_cnt; /* number of flows */ + u32 perturbation; /* hash perturbation */ + u32 quantum; /* psched_mtu(qdisc_dev(sch)); */ + struct codel_params cparams; + struct codel_stats cstats; + u32 drop_overlimit; + u32 new_flow_count; + + struct list_head new_flows; /* list of new flows */ + struct list_head old_flows; /* list of old flows */ +}; + +static unsigned int fq_codel_hash(const struct fq_codel_sched_data *q, + const struct sk_buff *skb) +{ + struct flow_keys keys; + unsigned int hash; + + skb_flow_dissect(skb, &keys); + hash = jhash_3words((__force u32)keys.dst, + (__force u32)keys.src ^ keys.ip_proto, + (__force u32)keys.ports, q->perturbation); + return ((u64)hash * q->flows_cnt) >> 32; +} + +static unsigned int fq_codel_classify(struct sk_buff *skb, struct Qdisc *sch, + int *qerr) +{ + struct fq_codel_sched_data *q = qdisc_priv(sch); + struct tcf_result res; + int result; + + if (TC_H_MAJ(skb->priority) == sch->handle && + TC_H_MIN(skb->priority) > 0 && + TC_H_MIN(skb->priority) <= q->flows_cnt) + return TC_H_MIN(skb->priority); + + if (!q->filter_list) + return fq_codel_hash(q, skb) + 1; + + *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; + result = tc_classify(skb, q->filter_list, &res); + if (result >= 0) { +#ifdef CONFIG_NET_CLS_ACT + switch (result) { + case TC_ACT_STOLEN: + case TC_ACT_QUEUED: + *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; + case TC_ACT_SHOT: + return 0; + } +#endif + if (TC_H_MIN(res.classid) <= q->flows_cnt) + return TC_H_MIN(res.classid); + } + return 0; +} + +/* helper functions : might be changed when/if skb use a standard list_head */ + +/* remove one skb from head of slot queue */ +static inline struct sk_buff *dequeue_head(struct fq_codel_flow *flow) +{ + struct sk_buff *skb = flow->head; + + flow->head = skb->next; + skb->next = NULL; + return skb; +} + +/* add skb to flow queue (tail add) */ +static inline void flow_queue_add(struct fq_codel_flow *flow, + struct sk_buff *skb) +{ + if (flow->head == NULL) + flow->head = skb; + else + flow->tail->next = skb; + flow->tail = skb; + skb->next = NULL; +} + +static unsigned int fq_codel_drop(struct Qdisc *sch) +{ + struct fq_codel_sched_data *q = qdisc_priv(sch); + struct sk_buff *skb; + unsigned int maxbacklog = 0, idx = 0, i, len; + struct fq_codel_flow *flow; + + /* Queue is full! Find the fat flow and drop packet from it. + * This might sound expensive, but with 1024 flows, we scan + * 4KB of memory, and we dont need to handle a complex tree + * in fast path (packet queue/enqueue) with many cache misses. + */ + for (i = 0; i < q->flows_cnt; i++) { + if (q->backlogs[i] > maxbacklog) { + maxbacklog = q->backlogs[i]; + idx = i; + } + } + flow = &q->flows[idx]; + skb = dequeue_head(flow); + len = qdisc_pkt_len(skb); + q->backlogs[idx] -= len; + kfree_skb(skb); + sch->q.qlen--; + sch->qstats.drops++; + sch->qstats.backlog -= len; + flow->dropped++; + return idx; +} + +static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch) +{ + struct fq_codel_sched_data *q = qdisc_priv(sch); + unsigned int idx; + struct fq_codel_flow *flow; + int uninitialized_var(ret); + + idx = fq_codel_classify(skb, sch, &ret); + if (idx == 0) { + if (ret & __NET_XMIT_BYPASS) + sch->qstats.drops++; + kfree_skb(skb); + return ret; + } + idx--; + + codel_set_enqueue_time(skb); + flow = &q->flows[idx]; + flow_queue_add(flow, skb); + q->backlogs[idx] += qdisc_pkt_len(skb); + sch->qstats.backlog += qdisc_pkt_len(skb); + + if (list_empty(&flow->flowchain)) { + list_add_tail(&flow->flowchain, &q->new_flows); + codel_vars_init(&flow->cvars); + q->new_flow_count++; + flow->deficit = q->quantum; + flow->dropped = 0; + } + if (++sch->q.qlen < sch->limit) + return NET_XMIT_SUCCESS; + + q->drop_overlimit++; + /* Return Congestion Notification only if we dropped a packet + * from this flow. + */ + if (fq_codel_drop(sch) == idx) + return NET_XMIT_CN; + + /* As we dropped a packet, better let upper stack know this */ + qdisc_tree_decrease_qlen(sch, 1); + return NET_XMIT_SUCCESS; +} + +/* This is the specific function called from codel_dequeue() + * to dequeue a packet from queue. Note: backlog is handled in + * codel, we dont need to reduce it here. + */ +static struct sk_buff *dequeue(struct codel_vars *vars, struct Qdisc *sch) +{ + struct fq_codel_flow *flow; + struct sk_buff *skb = NULL; + + flow = container_of(vars, struct fq_codel_flow, cvars); + if (flow->head) { + skb = dequeue_head(flow); + sch->qstats.backlog -= qdisc_pkt_len(skb); + sch->q.qlen--; + } + return skb; +} + +static struct sk_buff *fq_codel_dequeue(struct Qdisc *sch) +{ + struct fq_codel_sched_data *q = qdisc_priv(sch); + struct sk_buff *skb; + struct fq_codel_flow *flow; + struct list_head *head; + u32 prev_drop_count, prev_ecn_mark; + +begin: + head = &q->new_flows; + if (list_empty(head)) { + head = &q->old_flows; + if (list_empty(head)) + return NULL; + } + flow = list_first_entry(head, struct fq_codel_flow, flowchain); + + if (flow->deficit <= 0) { + flow->deficit += q->quantum; + list_move_tail(&flow->flowchain, &q->old_flows); + goto begin; + } + + prev_drop_count = q->cstats.drop_count; + prev_ecn_mark = q->cstats.ecn_mark; + + skb = codel_dequeue(sch, &q->cparams, &flow->cvars, &q->cstats, + dequeue, &q->backlogs[flow - q->flows]); + + flow->dropped += q->cstats.drop_count - prev_drop_count; + flow->dropped += q->cstats.ecn_mark - prev_ecn_mark; + + if (!skb) { + /* force a pass through old_flows to prevent starvation */ + if ((head == &q->new_flows) && !list_empty(&q->old_flows)) + list_move_tail(&flow->flowchain, &q->old_flows); + else + list_del_init(&flow->flowchain); + goto begin; + } + qdisc_bstats_update(sch, skb); + flow->deficit -= qdisc_pkt_len(skb); + /* We cant call qdisc_tree_decrease_qlen() if our qlen is 0, + * or HTB crashes. Defer it for next round. + */ + if (q->cstats.drop_count && sch->q.qlen) { + qdisc_tree_decrease_qlen(sch, q->cstats.drop_count); + q->cstats.drop_count = 0; + } + return skb; +} + +static void fq_codel_reset(struct Qdisc *sch) +{ + struct sk_buff *skb; + + while ((skb = fq_codel_dequeue(sch)) != NULL) + kfree_skb(skb); +} + +static const struct nla_policy fq_codel_policy[TCA_FQ_CODEL_MAX + 1] = { + [TCA_FQ_CODEL_TARGET] = { .type = NLA_U32 }, + [TCA_FQ_CODEL_LIMIT] = { .type = NLA_U32 }, + [TCA_FQ_CODEL_INTERVAL] = { .type = NLA_U32 }, + [TCA_FQ_CODEL_ECN] = { .type = NLA_U32 }, + [TCA_FQ_CODEL_FLOWS] = { .type = NLA_U32 }, + [TCA_FQ_CODEL_QUANTUM] = { .type = NLA_U32 }, +}; + +static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt) +{ + struct fq_codel_sched_data *q = qdisc_priv(sch); + struct nlattr *tb[TCA_FQ_CODEL_MAX + 1]; + int err; + + if (!opt) + return -EINVAL; + + err = nla_parse_nested(tb, TCA_FQ_CODEL_MAX, opt, fq_codel_policy); + if (err < 0) + return err; + if (tb[TCA_FQ_CODEL_FLOWS]) { + if (q->flows) + return -EINVAL; + q->flows_cnt = nla_get_u32(tb[TCA_FQ_CODEL_FLOWS]); + if (!q->flows_cnt || + q->flows_cnt > 65536) + return -EINVAL; + } + sch_tree_lock(sch); + + if (tb[TCA_FQ_CODEL_TARGET]) { + u64 target = nla_get_u32(tb[TCA_FQ_CODEL_TARGET]); + + q->cparams.target = (target * NSEC_PER_USEC) >> CODEL_SHIFT; + } + + if (tb[TCA_FQ_CODEL_INTERVAL]) { + u64 interval = nla_get_u32(tb[TCA_FQ_CODEL_INTERVAL]); + + q->cparams.interval = (interval * NSEC_PER_USEC) >> CODEL_SHIFT; + } + + if (tb[TCA_FQ_CODEL_LIMIT]) + sch->limit = nla_get_u32(tb[TCA_FQ_CODEL_LIMIT]); + + if (tb[TCA_FQ_CODEL_ECN]) + q->cparams.ecn = !!nla_get_u32(tb[TCA_FQ_CODEL_ECN]); + + if (tb[TCA_FQ_CODEL_QUANTUM]) + q->quantum = max(256U, nla_get_u32(tb[TCA_FQ_CODEL_QUANTUM])); + + while (sch->q.qlen > sch->limit) { + struct sk_buff *skb = fq_codel_dequeue(sch); + + kfree_skb(skb); + q->cstats.drop_count++; + } + qdisc_tree_decrease_qlen(sch, q->cstats.drop_count); + q->cstats.drop_count = 0; + + sch_tree_unlock(sch); + return 0; +} + +static void *fq_codel_zalloc(size_t sz) +{ + void *ptr = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN); + + if (!ptr) + ptr = vzalloc(sz); + return ptr; +} + +static void fq_codel_free(void *addr) +{ + if (addr) { + if (is_vmalloc_addr(addr)) + vfree(addr); + else + kfree(addr); + } +} + +static void fq_codel_destroy(struct Qdisc *sch) +{ + struct fq_codel_sched_data *q = qdisc_priv(sch); + + tcf_destroy_chain(&q->filter_list); + fq_codel_free(q->backlogs); + fq_codel_free(q->flows); +} + +static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt) +{ + struct fq_codel_sched_data *q = qdisc_priv(sch); + int i; + + sch->limit = 10*1024; + q->flows_cnt = 1024; + q->quantum = psched_mtu(qdisc_dev(sch)); + q->perturbation = net_random(); + INIT_LIST_HEAD(&q->new_flows); + INIT_LIST_HEAD(&q->old_flows); + codel_params_init(&q->cparams); + codel_stats_init(&q->cstats); + q->cparams.ecn = true; + + if (opt) { + int err = fq_codel_change(sch, opt); + if (err) + return err; + } + + if (!q->flows) { + q->flows = fq_codel_zalloc(q->flows_cnt * + sizeof(struct fq_codel_flow)); + if (!q->flows) + return -ENOMEM; + q->backlogs = fq_codel_zalloc(q->flows_cnt * sizeof(u32)); + if (!q->backlogs) { + fq_codel_free(q->flows); + return -ENOMEM; + } + for (i = 0; i < q->flows_cnt; i++) { + struct fq_codel_flow *flow = q->flows + i; + + INIT_LIST_HEAD(&flow->flowchain); + } + } + if (sch->limit >= 1) + sch->flags |= TCQ_F_CAN_BYPASS; + else + sch->flags &= ~TCQ_F_CAN_BYPASS; + return 0; +} + +static int fq_codel_dump(struct Qdisc *sch, struct sk_buff *skb) +{ + struct fq_codel_sched_data *q = qdisc_priv(sch); + struct nlattr *opts; + + opts = nla_nest_start(skb, TCA_OPTIONS); + if (opts == NULL) + goto nla_put_failure; + + if (nla_put_u32(skb, TCA_FQ_CODEL_TARGET, + codel_time_to_us(q->cparams.target)) || + nla_put_u32(skb, TCA_FQ_CODEL_LIMIT, + sch->limit) || + nla_put_u32(skb, TCA_FQ_CODEL_INTERVAL, + codel_time_to_us(q->cparams.interval)) || + nla_put_u32(skb, TCA_FQ_CODEL_ECN, + q->cparams.ecn) || + nla_put_u32(skb, TCA_FQ_CODEL_QUANTUM, + q->quantum) || + nla_put_u32(skb, TCA_FQ_CODEL_FLOWS, + q->flows_cnt)) + goto nla_put_failure; + + nla_nest_end(skb, opts); + return skb->len; + +nla_put_failure: + return -1; +} + +static int fq_codel_dump_stats(struct Qdisc *sch, struct gnet_dump *d) +{ + struct fq_codel_sched_data *q = qdisc_priv(sch); + struct tc_fq_codel_xstats st = { + .type = TCA_FQ_CODEL_XSTATS_QDISC, + .qdisc_stats.maxpacket = q->cstats.maxpacket, + .qdisc_stats.drop_overlimit = q->drop_overlimit, + .qdisc_stats.ecn_mark = q->cstats.ecn_mark, + .qdisc_stats.new_flow_count = q->new_flow_count, + }; + struct list_head *pos; + + list_for_each(pos, &q->new_flows) + st.qdisc_stats.new_flows_len++; + + list_for_each(pos, &q->old_flows) + st.qdisc_stats.old_flows_len++; + + return gnet_stats_copy_app(d, &st, sizeof(st)); +} + +static struct Qdisc *fq_codel_leaf(struct Qdisc *sch, unsigned long arg) +{ + return NULL; +} + +static unsigned long fq_codel_get(struct Qdisc *sch, u32 classid) +{ + return 0; +} + +static unsigned long fq_codel_bind(struct Qdisc *sch, unsigned long parent, + u32 classid) +{ + /* we cannot bypass queue discipline anymore */ + sch->flags &= ~TCQ_F_CAN_BYPASS; + return 0; +} + +static void fq_codel_put(struct Qdisc *q, unsigned long cl) +{ +} + +static struct tcf_proto **fq_codel_find_tcf(struct Qdisc *sch, unsigned long cl) +{ + struct fq_codel_sched_data *q = qdisc_priv(sch); + + if (cl) + return NULL; + return &q->filter_list; +} + +static int fq_codel_dump_class(struct Qdisc *sch, unsigned long cl, + struct sk_buff *skb, struct tcmsg *tcm) +{ + tcm->tcm_handle |= TC_H_MIN(cl); + return 0; +} + +static int fq_codel_dump_class_stats(struct Qdisc *sch, unsigned long cl, + struct gnet_dump *d) +{ + struct fq_codel_sched_data *q = qdisc_priv(sch); + u32 idx = cl - 1; + struct gnet_stats_queue qs = { 0 }; + struct tc_fq_codel_xstats xstats; + + if (idx < q->flows_cnt) { + const struct fq_codel_flow *flow = &q->flows[idx]; + const struct sk_buff *skb = flow->head; + + memset(&xstats, 0, sizeof(xstats)); + xstats.type = TCA_FQ_CODEL_XSTATS_CLASS; + xstats.class_stats.deficit = flow->deficit; + xstats.class_stats.ldelay = + codel_time_to_us(flow->cvars.ldelay); + xstats.class_stats.count = flow->cvars.count; + xstats.class_stats.lastcount = flow->cvars.lastcount; + xstats.class_stats.dropping = flow->cvars.dropping; + if (flow->cvars.dropping) { + codel_tdiff_t delta = flow->cvars.drop_next - + codel_get_time(); + + xstats.class_stats.drop_next = (delta >= 0) ? + codel_time_to_us(delta) : + -codel_time_to_us(-delta); + } + while (skb) { + qs.qlen++; + skb = skb->next; + } + qs.backlog = q->backlogs[idx]; + qs.drops = flow->dropped; + } + if (gnet_stats_copy_queue(d, &qs) < 0) + return -1; + if (idx < q->flows_cnt) + return gnet_stats_copy_app(d, &xstats, sizeof(xstats)); + return 0; +} + +static void fq_codel_walk(struct Qdisc *sch, struct qdisc_walker *arg) +{ + struct fq_codel_sched_data *q = qdisc_priv(sch); + unsigned int i; + + if (arg->stop) + return; + + for (i = 0; i < q->flows_cnt; i++) { + if (list_empty(&q->flows[i].flowchain) || + arg->count < arg->skip) { + arg->count++; + continue; + } + if (arg->fn(sch, i + 1, arg) < 0) { + arg->stop = 1; + break; + } + arg->count++; + } +} + +static const struct Qdisc_class_ops fq_codel_class_ops = { + .leaf = fq_codel_leaf, + .get = fq_codel_get, + .put = fq_codel_put, + .tcf_chain = fq_codel_find_tcf, + .bind_tcf = fq_codel_bind, + .unbind_tcf = fq_codel_put, + .dump = fq_codel_dump_class, + .dump_stats = fq_codel_dump_class_stats, + .walk = fq_codel_walk, +}; + +static struct Qdisc_ops fq_codel_qdisc_ops __read_mostly = { + .cl_ops = &fq_codel_class_ops, + .id = "fq_codel", + .priv_size = sizeof(struct fq_codel_sched_data), + .enqueue = fq_codel_enqueue, + .dequeue = fq_codel_dequeue, + .peek = qdisc_peek_dequeued, + .drop = fq_codel_drop, + .init = fq_codel_init, + .reset = fq_codel_reset, + .destroy = fq_codel_destroy, + .change = fq_codel_change, + .dump = fq_codel_dump, + .dump_stats = fq_codel_dump_stats, + .owner = THIS_MODULE, +}; + +static int __init fq_codel_module_init(void) +{ + return register_qdisc(&fq_codel_qdisc_ops); +} + +static void __exit fq_codel_module_exit(void) +{ + unregister_qdisc(&fq_codel_qdisc_ops); +} + +module_init(fq_codel_module_init) +module_exit(fq_codel_module_exit) +MODULE_AUTHOR("Eric Dumazet"); +MODULE_LICENSE("GPL"); -- cgit v1.2.2 From 8710e2613a4819aac44f4aed7e29027ac3eeb683 Mon Sep 17 00:00:00 2001 From: Antonio Quartulli Date: Fri, 16 Mar 2012 11:52:31 +0100 Subject: batman-adv: avoid skb_linearise() if not needed Whenever we want to access headers only, we do not need to linearise the whole packet. Instead we can use pskb_may_pull() Signed-off-by: Antonio Quartulli --- net/batman-adv/routing.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'net') diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c index 7ed9d8f92916..4c6467db881c 100644 --- a/net/batman-adv/routing.c +++ b/net/batman-adv/routing.c @@ -916,8 +916,9 @@ static int check_unicast_ttvn(struct bat_priv *bat_priv, /* Check whether I have to reroute the packet */ if (seq_before(unicast_packet->ttvn, curr_ttvn) || tt_poss_change) { - /* Linearize the skb before accessing it */ - if (skb_linearize(skb) < 0) + /* check if there is enough data before accessing it */ + if (pskb_may_pull(skb, sizeof(struct unicast_packet) + + ETH_HLEN) < 0) return 0; ethhdr = (struct ethhdr *)(skb->data + -- cgit v1.2.2 From 3275e7cc84fb0574e9662e8e74c3b1dab38f7143 Mon Sep 17 00:00:00 2001 From: Antonio Quartulli Date: Fri, 16 Mar 2012 18:03:28 +0100 Subject: batman-adv: improve unicast packet (re)routing In case of a client X roaming from a generic node A to another node B, it is possible that a third node C gets A's OGM but not B's. At this point in time, if C wants to send data to X it will send a unicast packet destined to A. The packet header will contain A's last ttvn (C got A's OGM and so it knows it). The packet will travel towards A without being intercepted because the ttvn contained in its header is the newest for A. Once A will receive the packet, A's state will not report to be in a "roaming phase" (because, after a roaming, once A sends out its OGM, all the changes are committed and the node is considered not to be in the roaming state anymore) and it will match the ttvn carried by the packet. Therefore there is no reason for A to try to alter the packet's route, thus dropping the packet because the destination client is not there anymore. However, C is well aware that it's routing information towards the client X is outdated as it received an OGM from A saying that the client roamed away. Thanks to this detail, this patch introduces a small change in behaviour: as long as C is in the state of not knowing the new location of client X it will forward the traffic to its last known location using ttvn-1 of the destination. By using an older ttvn node A will be forced to re-route the packet. Intermediate nodes are also allowed to update the packet's destination as long as they have the information about the client's new location. Signed-off-by: Antonio Quartulli --- net/batman-adv/routing.c | 7 +++++++ net/batman-adv/translation-table.c | 19 +++++++++++++++++++ net/batman-adv/translation-table.h | 2 ++ net/batman-adv/unicast.c | 8 ++++++++ 4 files changed, 36 insertions(+) (limited to 'net') diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c index 4c6467db881c..b1824bba09b3 100644 --- a/net/batman-adv/routing.c +++ b/net/batman-adv/routing.c @@ -923,6 +923,13 @@ static int check_unicast_ttvn(struct bat_priv *bat_priv, ethhdr = (struct ethhdr *)(skb->data + sizeof(struct unicast_packet)); + + /* we don't have an updated route for this client, so we should + * not try to reroute the packet!! + */ + if (tt_global_client_is_roaming(bat_priv, ethhdr->h_dest)) + return 1; + orig_node = transtable_search(bat_priv, NULL, ethhdr->h_dest); if (!orig_node) { diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c index 2cb46f0bb163..b3fb597c79b5 100644 --- a/net/batman-adv/translation-table.c +++ b/net/batman-adv/translation-table.c @@ -2117,3 +2117,22 @@ request_table: } } } + +/* returns true whether we know that the client has moved from its old + * originator to another one. This entry is kept is still kept for consistency + * purposes + */ +bool tt_global_client_is_roaming(struct bat_priv *bat_priv, uint8_t *addr) +{ + struct tt_global_entry *tt_global_entry; + bool ret = false; + + tt_global_entry = tt_global_hash_find(bat_priv, addr); + if (!tt_global_entry) + goto out; + + ret = tt_global_entry->common.flags & TT_CLIENT_ROAM; + tt_global_entry_free_ref(tt_global_entry); +out: + return ret; +} diff --git a/net/batman-adv/translation-table.h b/net/batman-adv/translation-table.h index 593d1b31217c..c43374dc364d 100644 --- a/net/batman-adv/translation-table.h +++ b/net/batman-adv/translation-table.h @@ -53,5 +53,7 @@ bool is_ap_isolated(struct bat_priv *bat_priv, uint8_t *src, uint8_t *dst); void tt_update_orig(struct bat_priv *bat_priv, struct orig_node *orig_node, const unsigned char *tt_buff, uint8_t tt_num_changes, uint8_t ttvn, uint16_t tt_crc); +bool tt_global_client_is_roaming(struct bat_priv *bat_priv, uint8_t *addr); + #endif /* _NET_BATMAN_ADV_TRANSLATION_TABLE_H_ */ diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c index 676f6a626b2c..74175c210858 100644 --- a/net/batman-adv/unicast.c +++ b/net/batman-adv/unicast.c @@ -331,6 +331,14 @@ find_router: unicast_packet->ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn); + /* inform the destination node that we are still missing a correct route + * for this client. The destination will receive this packet and will + * try to reroute it because the ttvn contained in the header is less + * than the current one + */ + if (tt_global_client_is_roaming(bat_priv, ethhdr->h_dest)) + unicast_packet->ttvn = unicast_packet->ttvn - 1; + if (atomic_read(&bat_priv->fragmentation) && data_len + sizeof(*unicast_packet) > neigh_node->if_incoming->net_dev->mtu) { -- cgit v1.2.2 From e3b0d0dea6e044283dff1c0852b20c98eb41a7f1 Mon Sep 17 00:00:00 2001 From: Marek Lindner Date: Sat, 17 Mar 2012 15:28:32 +0800 Subject: batman-adv: prepare lq_update_lock to be shared among different protocols Signed-off-by: Marek Lindner Signed-off-by: Antonio Quartulli --- net/batman-adv/bat_iv_ogm.c | 9 ++++----- net/batman-adv/originator.c | 1 + net/batman-adv/types.h | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) (limited to 'net') diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c index abd10c490fd9..dc53798ebb47 100644 --- a/net/batman-adv/bat_iv_ogm.c +++ b/net/batman-adv/bat_iv_ogm.c @@ -43,7 +43,6 @@ static struct neigh_node *bat_iv_ogm_neigh_new(struct hard_iface *hard_iface, goto out; INIT_LIST_HEAD(&neigh_node->bonding_list); - spin_lock_init(&neigh_node->tq_lock); neigh_node->orig_node = orig_neigh; neigh_node->if_incoming = hard_iface; @@ -637,12 +636,12 @@ static void bat_iv_ogm_orig_update(struct bat_priv *bat_priv, if (is_duplicate) continue; - spin_lock_bh(&tmp_neigh_node->tq_lock); + spin_lock_bh(&tmp_neigh_node->lq_update_lock); ring_buffer_set(tmp_neigh_node->tq_recv, &tmp_neigh_node->tq_index, 0); tmp_neigh_node->tq_avg = ring_buffer_avg(tmp_neigh_node->tq_recv); - spin_unlock_bh(&tmp_neigh_node->tq_lock); + spin_unlock_bh(&tmp_neigh_node->lq_update_lock); } if (!neigh_node) { @@ -668,12 +667,12 @@ static void bat_iv_ogm_orig_update(struct bat_priv *bat_priv, orig_node->flags = batman_ogm_packet->flags; neigh_node->last_seen = jiffies; - spin_lock_bh(&neigh_node->tq_lock); + spin_lock_bh(&neigh_node->lq_update_lock); ring_buffer_set(neigh_node->tq_recv, &neigh_node->tq_index, batman_ogm_packet->tq); neigh_node->tq_avg = ring_buffer_avg(neigh_node->tq_recv); - spin_unlock_bh(&neigh_node->tq_lock); + spin_unlock_bh(&neigh_node->lq_update_lock); if (!is_duplicate) { orig_node->last_ttl = batman_ogm_packet->header.ttl; diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c index f4b62011ca3f..41147942ba53 100644 --- a/net/batman-adv/originator.c +++ b/net/batman-adv/originator.c @@ -99,6 +99,7 @@ struct neigh_node *batadv_neigh_node_new(struct hard_iface *hard_iface, INIT_HLIST_NODE(&neigh_node->list); memcpy(neigh_node->addr, neigh_addr, ETH_ALEN); + spin_lock_init(&neigh_node->lq_update_lock); /* extra reference for return */ atomic_set(&neigh_node->refcount, 2); diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h index 66a3750aa9e7..61308e8016ff 100644 --- a/net/batman-adv/types.h +++ b/net/batman-adv/types.h @@ -137,7 +137,7 @@ struct neigh_node { struct rcu_head rcu; struct orig_node *orig_node; struct hard_iface *if_incoming; - spinlock_t tq_lock; /* protects: tq_recv, tq_index */ + spinlock_t lq_update_lock; /* protects: tq_recv, tq_index */ }; #ifdef CONFIG_BATMAN_ADV_BLA -- cgit v1.2.2 From 8c7bf248a318444accbe0c2c5db15bd727661606 Mon Sep 17 00:00:00 2001 From: Marek Lindner Date: Sat, 17 Mar 2012 15:28:33 +0800 Subject: batman-adv: refactor window_protected to avoid unnecessary return statement Reported-by: David Laight Signed-off-by: Marek Lindner Signed-off-by: Antonio Quartulli --- net/batman-adv/routing.c | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) (limited to 'net') diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c index b1824bba09b3..840e2c64a301 100644 --- a/net/batman-adv/routing.c +++ b/net/batman-adv/routing.c @@ -234,17 +234,14 @@ int window_protected(struct bat_priv *bat_priv, int32_t seq_num_diff, { if ((seq_num_diff <= -TQ_LOCAL_WINDOW_SIZE) || (seq_num_diff >= EXPECTED_SEQNO_RANGE)) { - if (has_timed_out(*last_reset, RESET_PROTECTION_MS)) { - - *last_reset = jiffies; - bat_dbg(DBG_BATMAN, bat_priv, - "old packet received, start protection\n"); - - return 0; - } else { + if (!has_timed_out(*last_reset, RESET_PROTECTION_MS)) return 1; - } + + *last_reset = jiffies; + bat_dbg(DBG_BATMAN, bat_priv, + "old packet received, start protection\n"); } + return 0; } -- cgit v1.2.2 From 679695813c0e29ecab666210752c9c0b4dd9f01c Mon Sep 17 00:00:00 2001 From: Sven Eckelmann Date: Mon, 26 Mar 2012 16:22:45 +0200 Subject: batman-adv: use shorter pr_warn instead of pr_warning Signed-off-by: Sven Eckelmann Signed-off-by: Antonio Quartulli --- net/batman-adv/hard-interface.c | 6 +++--- net/batman-adv/send.c | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) (limited to 'net') diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c index 0b84bb1b62c4..dc334fa89847 100644 --- a/net/batman-adv/hard-interface.c +++ b/net/batman-adv/hard-interface.c @@ -173,9 +173,9 @@ static void check_known_mac_addr(const struct net_device *net_dev) net_dev->dev_addr)) continue; - pr_warning("The newly added mac address (%pM) already exists on: %s\n", - net_dev->dev_addr, hard_iface->net_dev->name); - pr_warning("It is strongly recommended to keep mac addresses unique to avoid problems!\n"); + pr_warn("The newly added mac address (%pM) already exists on: %s\n", + net_dev->dev_addr, hard_iface->net_dev->name); + pr_warn("It is strongly recommended to keep mac addresses unique to avoid problems!\n"); } rcu_read_unlock(); } diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c index 8e74d9763be3..f47299f22c68 100644 --- a/net/batman-adv/send.c +++ b/net/batman-adv/send.c @@ -45,8 +45,8 @@ int send_skb_packet(struct sk_buff *skb, struct hard_iface *hard_iface, goto send_skb_err; if (!(hard_iface->net_dev->flags & IFF_UP)) { - pr_warning("Interface %s is not up - can't send packet via that interface!\n", - hard_iface->net_dev->name); + pr_warn("Interface %s is not up - can't send packet via that interface!\n", + hard_iface->net_dev->name); goto send_skb_err; } -- cgit v1.2.2 From e01572654a43329ae9ed0708931f577b5e0e6731 Mon Sep 17 00:00:00 2001 From: Sven Eckelmann Date: Fri, 30 Mar 2012 18:44:09 +0200 Subject: batman-adv: Start new development cycle Signed-off-by: Sven Eckelmann Signed-off-by: Antonio Quartulli --- net/batman-adv/main.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h index fd83acd48b28..f4a3ec003479 100644 --- a/net/batman-adv/main.h +++ b/net/batman-adv/main.h @@ -28,7 +28,7 @@ #define DRIVER_DEVICE "batman-adv" #ifndef SOURCE_VERSION -#define SOURCE_VERSION "2012.1.0" +#define SOURCE_VERSION "2012.2.0" #endif /* B.A.T.M.A.N. parameters */ -- cgit v1.2.2 From 521251f2f5fa16747cc21e71580e404af855d140 Mon Sep 17 00:00:00 2001 From: Antonio Quartulli Date: Mon, 16 Jan 2012 00:36:58 +0100 Subject: batman-adv: unset the TT_CLIENT_PENDING flag if the new local entry already exists When trying to add a new tt_local_entry, if such entry already exists, we have to ensure that the TT_CLIENT_PENDING flag is not set, otherwise the entry will be deleted soon. Reported-by: Simon Wunderlich Signed-off-by: Antonio Quartulli --- net/batman-adv/translation-table.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'net') diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c index b3fb597c79b5..a66c2dcd1088 100644 --- a/net/batman-adv/translation-table.c +++ b/net/batman-adv/translation-table.c @@ -206,6 +206,8 @@ void tt_local_add(struct net_device *soft_iface, const uint8_t *addr, if (tt_local_entry) { tt_local_entry->last_seen = jiffies; + /* possibly unset the TT_CLIENT_PENDING flag */ + tt_local_entry->common.flags &= ~TT_CLIENT_PENDING; goto out; } -- cgit v1.2.2 From ce5b4b977127ee20c3f9c3fd3637cd3796f649f5 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Mon, 14 May 2012 09:47:05 +0000 Subject: net/codel: Add missing #include MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit m68k allmodconfig: net/sched/sch_codel.c: In function ‘dequeue’: net/sched/sch_codel.c:70: error: implicit declaration of function ‘prefetch’ make[1]: *** [net/sched/sch_codel.o] Error 1 Signed-off-by: Geert Uytterhoeven Acked-by: Eric Dumazet Signed-off-by: David S. Miller --- net/sched/sch_codel.c | 1 + 1 file changed, 1 insertion(+) (limited to 'net') diff --git a/net/sched/sch_codel.c b/net/sched/sch_codel.c index b4a1a81e757e..213ef60bced8 100644 --- a/net/sched/sch_codel.c +++ b/net/sched/sch_codel.c @@ -46,6 +46,7 @@ #include #include #include +#include #include #include -- cgit v1.2.2 From 669d67bf777def468970f2dcba1537edf3b2d329 Mon Sep 17 00:00:00 2001 From: Sasha Levin Date: Mon, 14 May 2012 11:57:06 +0000 Subject: net: codel: fix build errors Fix the following build error: net/sched/sch_fq_codel.c: In function 'fq_codel_dump_stats': net/sched/sch_fq_codel.c:464:3: error: unknown field 'qdisc_stats' specified in initializer net/sched/sch_fq_codel.c:464:3: warning: missing braces around initializer net/sched/sch_fq_codel.c:464:3: warning: (near initialization for 'st.') net/sched/sch_fq_codel.c:465:3: error: unknown field 'qdisc_stats' specified in initializer net/sched/sch_fq_codel.c:465:3: warning: excess elements in struct initializer net/sched/sch_fq_codel.c:465:3: warning: (near initialization for 'st') net/sched/sch_fq_codel.c:466:3: error: unknown field 'qdisc_stats' specified in initializer net/sched/sch_fq_codel.c:466:3: warning: excess elements in struct initializer net/sched/sch_fq_codel.c:466:3: warning: (near initialization for 'st') net/sched/sch_fq_codel.c:467:3: error: unknown field 'qdisc_stats' specified in initializer net/sched/sch_fq_codel.c:467:3: warning: excess elements in struct initializer net/sched/sch_fq_codel.c:467:3: warning: (near initialization for 'st') make[1]: *** [net/sched/sch_fq_codel.o] Error 1 Signed-off-by: Sasha Levin Signed-off-by: David S. Miller --- net/sched/sch_fq_codel.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'net') diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c index a7b3754d21c8..337ff204f272 100644 --- a/net/sched/sch_fq_codel.c +++ b/net/sched/sch_fq_codel.c @@ -461,13 +461,14 @@ static int fq_codel_dump_stats(struct Qdisc *sch, struct gnet_dump *d) struct fq_codel_sched_data *q = qdisc_priv(sch); struct tc_fq_codel_xstats st = { .type = TCA_FQ_CODEL_XSTATS_QDISC, - .qdisc_stats.maxpacket = q->cstats.maxpacket, - .qdisc_stats.drop_overlimit = q->drop_overlimit, - .qdisc_stats.ecn_mark = q->cstats.ecn_mark, - .qdisc_stats.new_flow_count = q->new_flow_count, }; struct list_head *pos; + st.qdisc_stats.maxpacket = q->cstats.maxpacket; + st.qdisc_stats.drop_overlimit = q->drop_overlimit; + st.qdisc_stats.ecn_mark = q->cstats.ecn_mark; + st.qdisc_stats.new_flow_count = q->new_flow_count; + list_for_each(pos, &q->new_flows) st.qdisc_stats.new_flows_len++; -- cgit v1.2.2 From 7e1525249814acfd293d579abcb6462767643a8a Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Tue, 15 May 2012 01:57:44 +0000 Subject: xfrm: make xfrm_algo.c a module By making this a standalone config option (auto-selected as needed), selecting CRYPTO from here rather than from XFRM (which is boolean) allows the core crypto code to become a module again even when XFRM=y. Signed-off-by: Jan Beulich Signed-off-by: David S. Miller --- net/ipv4/Kconfig | 4 ++-- net/ipv6/Kconfig | 4 ++-- net/xfrm/Kconfig | 13 +++++++++---- net/xfrm/Makefile | 3 ++- net/xfrm/xfrm_algo.c | 2 ++ 5 files changed, 17 insertions(+), 9 deletions(-) (limited to 'net') diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig index d183262943d9..2c8febd3ebda 100644 --- a/net/ipv4/Kconfig +++ b/net/ipv4/Kconfig @@ -312,7 +312,7 @@ config SYN_COOKIES config INET_AH tristate "IP: AH transformation" - select XFRM + select XFRM_ALGO select CRYPTO select CRYPTO_HMAC select CRYPTO_MD5 @@ -324,7 +324,7 @@ config INET_AH config INET_ESP tristate "IP: ESP transformation" - select XFRM + select XFRM_ALGO select CRYPTO select CRYPTO_AUTHENC select CRYPTO_HMAC diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig index 36d7437ac054..5728695b5449 100644 --- a/net/ipv6/Kconfig +++ b/net/ipv6/Kconfig @@ -69,7 +69,7 @@ config IPV6_OPTIMISTIC_DAD config INET6_AH tristate "IPv6: AH transformation" - select XFRM + select XFRM_ALGO select CRYPTO select CRYPTO_HMAC select CRYPTO_MD5 @@ -81,7 +81,7 @@ config INET6_AH config INET6_ESP tristate "IPv6: ESP transformation" - select XFRM + select XFRM_ALGO select CRYPTO select CRYPTO_AUTHENC select CRYPTO_HMAC diff --git a/net/xfrm/Kconfig b/net/xfrm/Kconfig index 6d081674515f..ce90b8d92365 100644 --- a/net/xfrm/Kconfig +++ b/net/xfrm/Kconfig @@ -3,12 +3,17 @@ # config XFRM bool - select CRYPTO depends on NET +config XFRM_ALGO + tristate + select XFRM + select CRYPTO + config XFRM_USER tristate "Transformation user configuration interface" - depends on INET && XFRM + depends on INET + select XFRM_ALGO ---help--- Support for Transformation(XFRM) user configuration interface like IPsec used by native Linux tools. @@ -48,13 +53,13 @@ config XFRM_STATISTICS config XFRM_IPCOMP tristate - select XFRM + select XFRM_ALGO select CRYPTO select CRYPTO_DEFLATE config NET_KEY tristate "PF_KEY sockets" - select XFRM + select XFRM_ALGO ---help--- PF_KEYv2 socket family, compatible to KAME ones. They are required if you are going to use IPsec tools ported diff --git a/net/xfrm/Makefile b/net/xfrm/Makefile index aa429eefe919..c0e961983f17 100644 --- a/net/xfrm/Makefile +++ b/net/xfrm/Makefile @@ -3,8 +3,9 @@ # obj-$(CONFIG_XFRM) := xfrm_policy.o xfrm_state.o xfrm_hash.o \ - xfrm_input.o xfrm_output.o xfrm_algo.o \ + xfrm_input.o xfrm_output.o \ xfrm_sysctl.o xfrm_replay.o obj-$(CONFIG_XFRM_STATISTICS) += xfrm_proc.o +obj-$(CONFIG_XFRM_ALGO) += xfrm_algo.o obj-$(CONFIG_XFRM_USER) += xfrm_user.o obj-$(CONFIG_XFRM_IPCOMP) += xfrm_ipcomp.o diff --git a/net/xfrm/xfrm_algo.c b/net/xfrm/xfrm_algo.c index 791ab2e77f3f..ecd6d8d8a66c 100644 --- a/net/xfrm/xfrm_algo.c +++ b/net/xfrm/xfrm_algo.c @@ -752,3 +752,5 @@ void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len) } EXPORT_SYMBOL_GPL(pskb_put); #endif + +MODULE_LICENSE("GPL"); -- cgit v1.2.2 From 71b697fcc7b0a88da5106dd3671d732d0b4360d1 Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Tue, 15 May 2012 02:00:44 +0000 Subject: xfrm_algo: drop an unnecessary inclusion For several releases, this has not been needed anymore, as no helper functions declared in net/ah.h get implemented by xfrm_algo.c anymore. Signed-off-by: Jan Beulich Signed-off-by: David S. Miller --- net/xfrm/xfrm_algo.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'net') diff --git a/net/xfrm/xfrm_algo.c b/net/xfrm/xfrm_algo.c index ecd6d8d8a66c..4ce2d93162c1 100644 --- a/net/xfrm/xfrm_algo.c +++ b/net/xfrm/xfrm_algo.c @@ -15,9 +15,6 @@ #include #include #include -#if defined(CONFIG_INET_AH) || defined(CONFIG_INET_AH_MODULE) || defined(CONFIG_INET6_AH) || defined(CONFIG_INET6_AH_MODULE) -#include -#endif #if defined(CONFIG_INET_ESP) || defined(CONFIG_INET_ESP_MODULE) || defined(CONFIG_INET6_ESP) || defined(CONFIG_INET6_ESP_MODULE) #include #endif -- cgit v1.2.2 From e87cc4728f0e2fb663e592a1141742b1d6c63256 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 13 May 2012 21:56:26 +0000 Subject: net: Convert net_ratelimit uses to net__ratelimited Standardize the net core ratelimited logging functions. Coalesce formats, align arguments. Change a printk then vprintk sequence to use printf extension %pV. Signed-off-by: Joe Perches Signed-off-by: David S. Miller --- net/atm/ioctl.c | 8 ++--- net/caif/caif_socket.c | 10 +++--- net/core/dev.c | 30 +++++++--------- net/core/pktgen.c | 7 ++-- net/core/rtnetlink.c | 8 ++--- net/core/skbuff.c | 11 +++--- net/decnet/dn_fib.c | 5 ++- net/decnet/dn_neigh.c | 22 +++++------- net/decnet/dn_nsp_in.c | 11 +++--- net/decnet/dn_nsp_out.c | 5 ++- net/decnet/dn_route.c | 15 +++----- net/decnet/dn_table.c | 4 +-- net/decnet/netfilter/dn_rtmsg.c | 3 +- net/ipv4/icmp.c | 20 +++++------ net/ipv4/ip_fragment.c | 3 +- net/ipv4/ip_input.c | 13 ++++--- net/ipv4/ip_options.c | 6 ++-- net/ipv4/ip_output.c | 4 +-- net/ipv4/ipconfig.c | 11 +++--- net/ipv4/ipmr.c | 3 +- net/ipv4/netfilter/arp_tables.c | 5 ++- net/ipv4/netfilter/ip_tables.c | 3 +- net/ipv4/netfilter/ipt_CLUSTERIP.c | 3 +- net/ipv4/netfilter/nf_nat_h323.c | 26 +++++--------- net/ipv4/netfilter/nf_nat_snmp_basic.c | 8 ++--- net/ipv4/route.c | 40 +++++++++------------ net/ipv4/tcp.c | 14 ++++---- net/ipv4/tcp_input.c | 7 ++-- net/ipv4/tcp_ipv4.c | 11 +++--- net/ipv4/tcp_output.c | 3 +- net/ipv6/addrconf.c | 17 ++++----- net/ipv6/ah6.c | 4 +-- net/ipv6/ip6_output.c | 3 +- net/ipv6/ip6_tunnel.c | 33 ++++++----------- net/ipv6/ip6mr.c | 3 +- net/ipv6/netfilter/ip6_tables.c | 3 +- net/ipv6/netfilter/ip6t_REJECT.c | 6 ++-- net/ipv6/netfilter/ip6table_mangle.c | 3 +- net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c | 3 +- net/ipv6/netfilter/nf_conntrack_reasm.c | 7 ++-- net/ipv6/reassembly.c | 6 ++-- net/ipv6/route.c | 12 ++----- net/ipv6/sit.c | 6 ++-- net/ipv6/tcp_ipv6.c | 10 +++--- net/llc/af_llc.c | 7 ++-- net/mac80211/agg-rx.c | 13 +++---- net/mac80211/ht.c | 8 ++--- net/mac80211/ibss.c | 10 +++--- net/mac80211/mlme.c | 12 +++---- net/mac80211/rx.c | 11 +++--- net/mac80211/tx.c | 17 ++++----- net/netfilter/nf_conntrack_amanda.c | 3 +- net/netfilter/nf_conntrack_core.c | 5 +-- net/netfilter/nf_conntrack_expect.c | 4 +-- net/netfilter/nf_conntrack_h323_main.c | 9 ++--- net/netfilter/nf_conntrack_irc.c | 8 ++--- net/netfilter/nfnetlink_queue.c | 9 ++--- net/netfilter/xt_TCPMSS.c | 10 +++--- net/netfilter/xt_hashlimit.c | 3 +- net/openvswitch/vport-netdev.c | 6 ++-- net/sched/act_ipt.c | 7 ++-- net/sched/act_mirred.c | 5 ++- net/sched/cls_u32.c | 3 +- net/sched/ematch.c | 4 +-- net/sched/sch_api.c | 10 +++--- net/sched/sch_generic.c | 11 +++--- net/sched/sch_gred.c | 12 +++---- net/sctp/sm_sideeffect.c | 5 ++- net/sctp/sm_statefuns.c | 18 +++++----- net/sctp/socket.c | 6 ++-- net/socket.c | 3 +- net/sunrpc/svc.c | 18 +++++----- net/sunrpc/svc_xprt.c | 13 +++---- net/sunrpc/svcsock.c | 28 ++++++--------- net/wireless/lib80211_crypt_ccmp.c | 33 ++++++----------- net/wireless/lib80211_crypt_tkip.c | 50 +++++++++----------------- 76 files changed, 306 insertions(+), 490 deletions(-) (limited to 'net') diff --git a/net/atm/ioctl.c b/net/atm/ioctl.c index 62dc8bfe6fe7..bbd3b639992e 100644 --- a/net/atm/ioctl.c +++ b/net/atm/ioctl.c @@ -97,9 +97,8 @@ static int do_vcc_ioctl(struct socket *sock, unsigned int cmd, error = sock_get_timestampns(sk, argp); goto done; case ATM_SETSC: - if (net_ratelimit()) - pr_warning("ATM_SETSC is obsolete; used by %s:%d\n", - current->comm, task_pid_nr(current)); + net_warn_ratelimited("ATM_SETSC is obsolete; used by %s:%d\n", + current->comm, task_pid_nr(current)); error = 0; goto done; case ATMSIGD_CTRL: @@ -123,8 +122,7 @@ static int do_vcc_ioctl(struct socket *sock, unsigned int cmd, work for 32-bit userspace. TBH I don't really want to think about it at all. dwmw2. */ if (compat) { - if (net_ratelimit()) - pr_warning("32-bit task cannot be atmsigd\n"); + net_warn_ratelimited("32-bit task cannot be atmsigd\n"); error = -EINVAL; goto done; } diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c index 0dccdb3c7d26..fb8944355264 100644 --- a/net/caif/caif_socket.c +++ b/net/caif/caif_socket.c @@ -131,10 +131,9 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= (unsigned int)sk->sk_rcvbuf && rx_flow_is_on(cf_sk)) { - if (net_ratelimit()) - pr_debug("sending flow OFF (queue len = %d %d)\n", - atomic_read(&cf_sk->sk.sk_rmem_alloc), - sk_rcvbuf_lowwater(cf_sk)); + net_dbg_ratelimited("sending flow OFF (queue len = %d %d)\n", + atomic_read(&cf_sk->sk.sk_rmem_alloc), + sk_rcvbuf_lowwater(cf_sk)); set_rx_flow_off(cf_sk); caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ); } @@ -144,8 +143,7 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) return err; if (!sk_rmem_schedule(sk, skb->truesize) && rx_flow_is_on(cf_sk)) { set_rx_flow_off(cf_sk); - if (net_ratelimit()) - pr_debug("sending flow OFF due to rmem_schedule\n"); + net_dbg_ratelimited("sending flow OFF due to rmem_schedule\n"); caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ); } skb->dev = NULL; diff --git a/net/core/dev.c b/net/core/dev.c index a2be59fe6ab8..3dd853998d38 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1673,10 +1673,9 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev) if (skb_network_header(skb2) < skb2->data || skb2->network_header > skb2->tail) { - if (net_ratelimit()) - pr_crit("protocol %04x is buggy, dev %s\n", - ntohs(skb2->protocol), - dev->name); + net_crit_ratelimited("protocol %04x is buggy, dev %s\n", + ntohs(skb2->protocol), + dev->name); skb_reset_network_header(skb2); } @@ -2343,11 +2342,9 @@ EXPORT_SYMBOL(__skb_tx_hash); static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index) { if (unlikely(queue_index >= dev->real_num_tx_queues)) { - if (net_ratelimit()) { - pr_warn("%s selects TX queue %d, but real number of TX queues is %d\n", - dev->name, queue_index, - dev->real_num_tx_queues); - } + net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n", + dev->name, queue_index, + dev->real_num_tx_queues); return 0; } return queue_index; @@ -2589,17 +2586,15 @@ int dev_queue_xmit(struct sk_buff *skb) } } HARD_TX_UNLOCK(dev, txq); - if (net_ratelimit()) - pr_crit("Virtual device %s asks to queue packet!\n", - dev->name); + net_crit_ratelimited("Virtual device %s asks to queue packet!\n", + dev->name); } else { /* Recursion is detected! It is possible, * unfortunately */ recursion_alert: - if (net_ratelimit()) - pr_crit("Dead loop on virtual device %s, fix it urgently!\n", - dev->name); + net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n", + dev->name); } } @@ -3080,9 +3075,8 @@ static int ing_filter(struct sk_buff *skb, struct netdev_queue *rxq) struct Qdisc *q; if (unlikely(MAX_RED_LOOP < ttl++)) { - if (net_ratelimit()) - pr_warn("Redir loop detected Dropping packet (%d->%d)\n", - skb->skb_iif, dev->ifindex); + net_warn_ratelimited("Redir loop detected Dropping packet (%d->%d)\n", + skb->skb_iif, dev->ifindex); return TC_ACT_SHOT; } diff --git a/net/core/pktgen.c b/net/core/pktgen.c index ffb5d382f241..33912573959d 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c @@ -2934,8 +2934,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev, if (datalen < sizeof(struct pktgen_hdr)) { datalen = sizeof(struct pktgen_hdr); - if (net_ratelimit()) - pr_info("increased datalen to %d\n", datalen); + net_info_ratelimited("increased datalen to %d\n", datalen); } udph->source = htons(pkt_dev->cur_udp_src); @@ -3365,8 +3364,8 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev) pkt_dev->errors++; break; default: /* Drivers are not supposed to return other values! */ - if (net_ratelimit()) - pr_info("%s xmit error: %d\n", pkt_dev->odevname, ret); + net_info_ratelimited("%s xmit error: %d\n", + pkt_dev->odevname, ret); pkt_dev->errors++; /* fallthru */ case NETDEV_TX_LOCKED: diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index b442d35bbc8b..21318d15bbc3 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -1524,11 +1524,9 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm, err = 0; errout: - if (err < 0 && modified && net_ratelimit()) - printk(KERN_WARNING "A link change request failed with " - "some changes committed already. Interface %s may " - "have been left with an inconsistent configuration, " - "please check.\n", dev->name); + if (err < 0 && modified) + net_warn_ratelimited("A link change request failed with some changes committed already. Interface %s may have been left with an inconsistent configuration, please check.\n", + dev->name); if (send_addr_notify) call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 2c35da818ef9..2a1871942317 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -3299,10 +3299,8 @@ bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off) { if (unlikely(start > skb_headlen(skb)) || unlikely((int)start + off > skb_headlen(skb) - 2)) { - if (net_ratelimit()) - printk(KERN_WARNING - "bad partial csum: csum=%u/%u len=%u\n", - start, off, skb_headlen(skb)); + net_warn_ratelimited("bad partial csum: csum=%u/%u len=%u\n", + start, off, skb_headlen(skb)); return false; } skb->ip_summed = CHECKSUM_PARTIAL; @@ -3314,8 +3312,7 @@ EXPORT_SYMBOL_GPL(skb_partial_csum_set); void __skb_warn_lro_forwarding(const struct sk_buff *skb) { - if (net_ratelimit()) - pr_warning("%s: received packets cannot be forwarded" - " while LRO is enabled\n", skb->dev->name); + net_warn_ratelimited("%s: received packets cannot be forwarded while LRO is enabled\n", + skb->dev->name); } EXPORT_SYMBOL(__skb_warn_lro_forwarding); diff --git a/net/decnet/dn_fib.c b/net/decnet/dn_fib.c index 65a8cd7891fe..7eaf98799729 100644 --- a/net/decnet/dn_fib.c +++ b/net/decnet/dn_fib.c @@ -438,9 +438,8 @@ int dn_fib_semantic_match(int type, struct dn_fib_info *fi, const struct flowidn res->fi = NULL; return 1; default: - if (net_ratelimit()) - printk("DECnet: impossible routing event : dn_fib_semantic_match type=%d\n", - type); + net_err_ratelimited("DECnet: impossible routing event : dn_fib_semantic_match type=%d\n", + type); res->fi = NULL; return -EINVAL; } diff --git a/net/decnet/dn_neigh.c b/net/decnet/dn_neigh.c index ee7013f24fca..ac90f658586c 100644 --- a/net/decnet/dn_neigh.c +++ b/net/decnet/dn_neigh.c @@ -162,8 +162,8 @@ static int dn_neigh_construct(struct neighbour *neigh) else if ((dev->type == ARPHRD_ETHER) || (dev->type == ARPHRD_LOOPBACK)) dn_dn2eth(neigh->ha, dn->addr); else { - if (net_ratelimit()) - printk(KERN_DEBUG "Trying to create neigh for hw %d\n", dev->type); + net_dbg_ratelimited("Trying to create neigh for hw %d\n", + dev->type); return -EINVAL; } @@ -236,15 +236,13 @@ static int dn_long_output(struct neighbour *neigh, struct sk_buff *skb) if (skb_headroom(skb) < headroom) { struct sk_buff *skb2 = skb_realloc_headroom(skb, headroom); if (skb2 == NULL) { - if (net_ratelimit()) - printk(KERN_CRIT "dn_long_output: no memory\n"); + net_crit_ratelimited("dn_long_output: no memory\n"); kfree_skb(skb); return -ENOBUFS; } kfree_skb(skb); skb = skb2; - if (net_ratelimit()) - printk(KERN_INFO "dn_long_output: Increasing headroom\n"); + net_info_ratelimited("dn_long_output: Increasing headroom\n"); } data = skb_push(skb, sizeof(struct dn_long_packet) + 3); @@ -281,15 +279,13 @@ static int dn_short_output(struct neighbour *neigh, struct sk_buff *skb) if (skb_headroom(skb) < headroom) { struct sk_buff *skb2 = skb_realloc_headroom(skb, headroom); if (skb2 == NULL) { - if (net_ratelimit()) - printk(KERN_CRIT "dn_short_output: no memory\n"); + net_crit_ratelimited("dn_short_output: no memory\n"); kfree_skb(skb); return -ENOBUFS; } kfree_skb(skb); skb = skb2; - if (net_ratelimit()) - printk(KERN_INFO "dn_short_output: Increasing headroom\n"); + net_info_ratelimited("dn_short_output: Increasing headroom\n"); } data = skb_push(skb, sizeof(struct dn_short_packet) + 2); @@ -322,15 +318,13 @@ static int dn_phase3_output(struct neighbour *neigh, struct sk_buff *skb) if (skb_headroom(skb) < headroom) { struct sk_buff *skb2 = skb_realloc_headroom(skb, headroom); if (skb2 == NULL) { - if (net_ratelimit()) - printk(KERN_CRIT "dn_phase3_output: no memory\n"); + net_crit_ratelimited("dn_phase3_output: no memory\n"); kfree_skb(skb); return -ENOBUFS; } kfree_skb(skb); skb = skb2; - if (net_ratelimit()) - printk(KERN_INFO "dn_phase3_output: Increasing headroom\n"); + net_info_ratelimited("dn_phase3_output: Increasing headroom\n"); } data = skb_push(skb, sizeof(struct dn_short_packet) + 2); diff --git a/net/decnet/dn_nsp_in.c b/net/decnet/dn_nsp_in.c index 58084f37151e..c344163e6ac0 100644 --- a/net/decnet/dn_nsp_in.c +++ b/net/decnet/dn_nsp_in.c @@ -80,12 +80,15 @@ extern int decnet_log_martians; static void dn_log_martian(struct sk_buff *skb, const char *msg) { - if (decnet_log_martians && net_ratelimit()) { + if (decnet_log_martians) { char *devname = skb->dev ? skb->dev->name : "???"; struct dn_skb_cb *cb = DN_SKB_CB(skb); - printk(KERN_INFO "DECnet: Martian packet (%s) dev=%s src=0x%04hx dst=0x%04hx srcport=0x%04hx dstport=0x%04hx\n", - msg, devname, le16_to_cpu(cb->src), le16_to_cpu(cb->dst), - le16_to_cpu(cb->src_port), le16_to_cpu(cb->dst_port)); + net_info_ratelimited("DECnet: Martian packet (%s) dev=%s src=0x%04hx dst=0x%04hx srcport=0x%04hx dstport=0x%04hx\n", + msg, devname, + le16_to_cpu(cb->src), + le16_to_cpu(cb->dst), + le16_to_cpu(cb->src_port), + le16_to_cpu(cb->dst_port)); } } diff --git a/net/decnet/dn_nsp_out.c b/net/decnet/dn_nsp_out.c index b952f88d9c1f..564a6ad13ce7 100644 --- a/net/decnet/dn_nsp_out.c +++ b/net/decnet/dn_nsp_out.c @@ -1,4 +1,3 @@ - /* * DECnet An implementation of the DECnet protocol suite for the LINUX * operating system. DECnet is implemented using the BSD Socket @@ -554,8 +553,8 @@ static __inline__ void dn_nsp_do_disc(struct sock *sk, unsigned char msgflg, unsigned char *msg; if ((dst == NULL) || (rem == 0)) { - if (net_ratelimit()) - printk(KERN_DEBUG "DECnet: dn_nsp_do_disc: BUG! Please report this to SteveW@ACM.org rem=%u dst=%p\n", le16_to_cpu(rem), dst); + net_dbg_ratelimited("DECnet: dn_nsp_do_disc: BUG! Please report this to SteveW@ACM.org rem=%u dst=%p\n", + le16_to_cpu(rem), dst); return; } diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c index 7e1f8788da19..586302e557ad 100644 --- a/net/decnet/dn_route.c +++ b/net/decnet/dn_route.c @@ -748,8 +748,7 @@ static int dn_output(struct sk_buff *skb) dn_to_neigh_output); error: - if (net_ratelimit()) - printk(KERN_DEBUG "dn_output: This should not happen\n"); + net_dbg_ratelimited("dn_output: This should not happen\n"); kfree_skb(skb); @@ -807,12 +806,10 @@ drop: */ static int dn_rt_bug(struct sk_buff *skb) { - if (net_ratelimit()) { - struct dn_skb_cb *cb = DN_SKB_CB(skb); + struct dn_skb_cb *cb = DN_SKB_CB(skb); - printk(KERN_DEBUG "dn_rt_bug: skb from:%04x to:%04x\n", - le16_to_cpu(cb->src), le16_to_cpu(cb->dst)); - } + net_dbg_ratelimited("dn_rt_bug: skb from:%04x to:%04x\n", + le16_to_cpu(cb->src), le16_to_cpu(cb->dst)); kfree_skb(skb); @@ -1327,9 +1324,7 @@ static int dn_route_input_slow(struct sk_buff *skb) out_dev = DN_FIB_RES_DEV(res); if (out_dev == NULL) { - if (net_ratelimit()) - printk(KERN_CRIT "Bug in dn_route_input_slow() " - "No output device\n"); + net_crit_ratelimited("Bug in dn_route_input_slow() No output device\n"); goto e_inval; } dev_hold(out_dev); diff --git a/net/decnet/dn_table.c b/net/decnet/dn_table.c index a9a62f225a6b..650f3380c98a 100644 --- a/net/decnet/dn_table.c +++ b/net/decnet/dn_table.c @@ -836,8 +836,8 @@ struct dn_fib_table *dn_fib_get_table(u32 n, int create) if (!create) return NULL; - if (in_interrupt() && net_ratelimit()) { - printk(KERN_DEBUG "DECnet: BUG! Attempt to create routing table from interrupt\n"); + if (in_interrupt()) { + net_dbg_ratelimited("DECnet: BUG! Attempt to create routing table from interrupt\n"); return NULL; } diff --git a/net/decnet/netfilter/dn_rtmsg.c b/net/decnet/netfilter/dn_rtmsg.c index 1531135130db..44b890936fc0 100644 --- a/net/decnet/netfilter/dn_rtmsg.c +++ b/net/decnet/netfilter/dn_rtmsg.c @@ -57,8 +57,7 @@ nlmsg_failure: if (skb) kfree_skb(skb); *errp = -ENOMEM; - if (net_ratelimit()) - printk(KERN_ERR "dn_rtmsg: error creating netlink message\n"); + net_err_ratelimited("dn_rtmsg: error creating netlink message\n"); return NULL; } diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index 2cb2bf845641..c75efbdc71cb 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c @@ -713,11 +713,10 @@ static void icmp_unreach(struct sk_buff *skb) if (!net->ipv4.sysctl_icmp_ignore_bogus_error_responses && inet_addr_type(net, iph->daddr) == RTN_BROADCAST) { - if (net_ratelimit()) - pr_warn("%pI4 sent an invalid ICMP type %u, code %u error to a broadcast: %pI4 on %s\n", - &ip_hdr(skb)->saddr, - icmph->type, icmph->code, - &iph->daddr, skb->dev->name); + net_warn_ratelimited("%pI4 sent an invalid ICMP type %u, code %u error to a broadcast: %pI4 on %s\n", + &ip_hdr(skb)->saddr, + icmph->type, icmph->code, + &iph->daddr, skb->dev->name); goto out; } @@ -906,8 +905,7 @@ out_err: static void icmp_address(struct sk_buff *skb) { #if 0 - if (net_ratelimit()) - printk(KERN_DEBUG "a guy asks for address mask. Who is it?\n"); + net_dbg_ratelimited("a guy asks for address mask. Who is it?\n"); #endif } @@ -943,10 +941,10 @@ static void icmp_address_reply(struct sk_buff *skb) inet_ifa_match(ip_hdr(skb)->saddr, ifa)) break; } - if (!ifa && net_ratelimit()) { - pr_info("Wrong address mask %pI4 from %s/%pI4\n", - mp, dev->name, &ip_hdr(skb)->saddr); - } + if (!ifa) + net_info_ratelimited("Wrong address mask %pI4 from %s/%pI4\n", + mp, + dev->name, &ip_hdr(skb)->saddr); } } diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index 71e5c328176c..9f9bd139335f 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c @@ -644,8 +644,7 @@ out_nomem: err = -ENOMEM; goto out_fail; out_oversize: - if (net_ratelimit()) - pr_info("Oversized IP packet from %pI4\n", &qp->saddr); + net_info_ratelimited("Oversized IP packet from %pI4\n", &qp->saddr); out_fail: IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS); return err; diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c index 26eccc5bab1c..8590144ca330 100644 --- a/net/ipv4/ip_input.c +++ b/net/ipv4/ip_input.c @@ -210,9 +210,8 @@ static int ip_local_deliver_finish(struct sk_buff *skb) int ret; if (!net_eq(net, &init_net) && !ipprot->netns_ok) { - if (net_ratelimit()) - printk("%s: proto %d isn't netns-ready\n", - __func__, protocol); + net_info_ratelimited("%s: proto %d isn't netns-ready\n", + __func__, protocol); kfree_skb(skb); goto out; } @@ -298,10 +297,10 @@ static inline bool ip_rcv_options(struct sk_buff *skb) if (in_dev) { if (!IN_DEV_SOURCE_ROUTE(in_dev)) { - if (IN_DEV_LOG_MARTIANS(in_dev) && - net_ratelimit()) - pr_info("source route option %pI4 -> %pI4\n", - &iph->saddr, &iph->daddr); + if (IN_DEV_LOG_MARTIANS(in_dev)) + net_info_ratelimited("source route option %pI4 -> %pI4\n", + &iph->saddr, + &iph->daddr); goto drop; } } diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c index 95722ed0e5bb..708b99494e23 100644 --- a/net/ipv4/ip_options.c +++ b/net/ipv4/ip_options.c @@ -578,8 +578,10 @@ void ip_forward_options(struct sk_buff *skb) ip_hdr(skb)->daddr = opt->nexthop; ip_rt_get_source(&optptr[srrptr-1], skb, rt); optptr[2] = srrptr+4; - } else if (net_ratelimit()) - pr_crit("%s(): Argh! Destination lost!\n", __func__); + } else { + net_crit_ratelimited("%s(): Argh! Destination lost!\n", + __func__); + } if (opt->ts_needaddr) { optptr = raw + opt->ts; ip_rt_get_source(&optptr[optptr[2]-9], skb, rt); diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 4910176d24ed..451f97c42eb4 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -214,8 +214,8 @@ static inline int ip_finish_output2(struct sk_buff *skb) } rcu_read_unlock(); - if (net_ratelimit()) - printk(KERN_DEBUG "ip_finish_output2: No header cache and no neighbour!\n"); + net_dbg_ratelimited("%s: No header cache and no neighbour!\n", + __func__); kfree_skb(skb); return -EINVAL; } diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c index f267280d8709..24a3df9890e1 100644 --- a/net/ipv4/ipconfig.c +++ b/net/ipv4/ipconfig.c @@ -955,8 +955,7 @@ static int __init ic_bootp_recv(struct sk_buff *skb, struct net_device *dev, str /* Fragments are not supported */ if (ip_is_fragment(h)) { - if (net_ratelimit()) - pr_err("DHCP/BOOTP: Ignoring fragmented reply\n"); + net_err_ratelimited("DHCP/BOOTP: Ignoring fragmented reply\n"); goto drop; } @@ -1004,16 +1003,14 @@ static int __init ic_bootp_recv(struct sk_buff *skb, struct net_device *dev, str /* Is it a reply to our BOOTP request? */ if (b->op != BOOTP_REPLY || b->xid != d->xid) { - if (net_ratelimit()) - pr_err("DHCP/BOOTP: Reply not for us, op[%x] xid[%x]\n", - b->op, b->xid); + net_err_ratelimited("DHCP/BOOTP: Reply not for us, op[%x] xid[%x]\n", + b->op, b->xid); goto drop_unlock; } /* Is it a reply for the device we are configuring? */ if (b->xid != ic_dev_xid) { - if (net_ratelimit()) - pr_err("DHCP/BOOTP: Ignoring delayed packet\n"); + net_err_ratelimited("DHCP/BOOTP: Ignoring delayed packet\n"); goto drop_unlock; } diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 5bef604ac0fa..a9e519ad6db5 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -949,8 +949,7 @@ static int ipmr_cache_report(struct mr_table *mrt, ret = sock_queue_rcv_skb(mroute_sk, skb); rcu_read_unlock(); if (ret < 0) { - if (net_ratelimit()) - pr_warn("mroute: pending queue full, dropping entries\n"); + net_warn_ratelimited("mroute: pending queue full, dropping entries\n"); kfree_skb(skb); } diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index a3935273869f..97e61eadf580 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c @@ -221,9 +221,8 @@ static inline int arp_checkentry(const struct arpt_arp *arp) static unsigned int arpt_error(struct sk_buff *skb, const struct xt_action_param *par) { - if (net_ratelimit()) - pr_err("arp_tables: error: '%s'\n", - (const char *)par->targinfo); + net_err_ratelimited("arp_tables: error: '%s'\n", + (const char *)par->targinfo); return NF_DROP; } diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index 585b80f3cc68..170b1fdd6b72 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c @@ -153,8 +153,7 @@ ip_checkentry(const struct ipt_ip *ip) static unsigned int ipt_error(struct sk_buff *skb, const struct xt_action_param *par) { - if (net_ratelimit()) - pr_info("error: `%s'\n", (const char *)par->targinfo); + net_info_ratelimited("error: `%s'\n", (const char *)par->targinfo); return NF_DROP; } diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c index a639967eb727..fe5daea5214d 100644 --- a/net/ipv4/netfilter/ipt_CLUSTERIP.c +++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c @@ -246,8 +246,7 @@ clusterip_hashfn(const struct sk_buff *skb, dport = ports[1]; } } else { - if (net_ratelimit()) - pr_info("unknown protocol %u\n", iph->protocol); + net_info_ratelimited("unknown protocol %u\n", iph->protocol); } switch (config->hash_mode) { diff --git a/net/ipv4/netfilter/nf_nat_h323.c b/net/ipv4/netfilter/nf_nat_h323.c index 82536701e3a3..cad29c121318 100644 --- a/net/ipv4/netfilter/nf_nat_h323.c +++ b/net/ipv4/netfilter/nf_nat_h323.c @@ -42,9 +42,7 @@ static int set_addr(struct sk_buff *skb, if (!nf_nat_mangle_tcp_packet(skb, ct, ctinfo, addroff, sizeof(buf), (char *) &buf, sizeof(buf))) { - if (net_ratelimit()) - pr_notice("nf_nat_h323: nf_nat_mangle_tcp_packet" - " error\n"); + net_notice_ratelimited("nf_nat_h323: nf_nat_mangle_tcp_packet error\n"); return -1; } @@ -58,9 +56,7 @@ static int set_addr(struct sk_buff *skb, if (!nf_nat_mangle_udp_packet(skb, ct, ctinfo, addroff, sizeof(buf), (char *) &buf, sizeof(buf))) { - if (net_ratelimit()) - pr_notice("nf_nat_h323: nf_nat_mangle_udp_packet" - " error\n"); + net_notice_ratelimited("nf_nat_h323: nf_nat_mangle_udp_packet error\n"); return -1; } /* nf_nat_mangle_udp_packet uses skb_make_writable() to copy @@ -214,8 +210,7 @@ static int nat_rtp_rtcp(struct sk_buff *skb, struct nf_conn *ct, /* Run out of expectations */ if (i >= H323_RTP_CHANNEL_MAX) { - if (net_ratelimit()) - pr_notice("nf_nat_h323: out of expectations\n"); + net_notice_ratelimited("nf_nat_h323: out of expectations\n"); return 0; } @@ -244,8 +239,7 @@ static int nat_rtp_rtcp(struct sk_buff *skb, struct nf_conn *ct, } if (nated_port == 0) { /* No port available */ - if (net_ratelimit()) - pr_notice("nf_nat_h323: out of RTP ports\n"); + net_notice_ratelimited("nf_nat_h323: out of RTP ports\n"); return 0; } @@ -308,8 +302,7 @@ static int nat_t120(struct sk_buff *skb, struct nf_conn *ct, } if (nated_port == 0) { /* No port available */ - if (net_ratelimit()) - pr_notice("nf_nat_h323: out of TCP ports\n"); + net_notice_ratelimited("nf_nat_h323: out of TCP ports\n"); return 0; } @@ -365,8 +358,7 @@ static int nat_h245(struct sk_buff *skb, struct nf_conn *ct, } if (nated_port == 0) { /* No port available */ - if (net_ratelimit()) - pr_notice("nf_nat_q931: out of TCP ports\n"); + net_notice_ratelimited("nf_nat_q931: out of TCP ports\n"); return 0; } @@ -456,8 +448,7 @@ static int nat_q931(struct sk_buff *skb, struct nf_conn *ct, } if (nated_port == 0) { /* No port available */ - if (net_ratelimit()) - pr_notice("nf_nat_ras: out of TCP ports\n"); + net_notice_ratelimited("nf_nat_ras: out of TCP ports\n"); return 0; } @@ -545,8 +536,7 @@ static int nat_callforwarding(struct sk_buff *skb, struct nf_conn *ct, } if (nated_port == 0) { /* No port available */ - if (net_ratelimit()) - pr_notice("nf_nat_q931: out of TCP ports\n"); + net_notice_ratelimited("nf_nat_q931: out of TCP ports\n"); return 0; } diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c index 2133c30a4a5f..746edec8b86e 100644 --- a/net/ipv4/netfilter/nf_nat_snmp_basic.c +++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c @@ -1206,8 +1206,7 @@ static int snmp_translate(struct nf_conn *ct, if (!snmp_parse_mangle((unsigned char *)udph + sizeof(struct udphdr), paylen, &map, &udph->check)) { - if (net_ratelimit()) - printk(KERN_WARNING "bsalg: parser failed\n"); + net_warn_ratelimited("bsalg: parser failed\n"); return NF_DROP; } return NF_ACCEPT; @@ -1241,9 +1240,8 @@ static int help(struct sk_buff *skb, unsigned int protoff, * can mess around with the payload. */ if (ntohs(udph->len) != skb->len - (iph->ihl << 2)) { - if (net_ratelimit()) - printk(KERN_WARNING "SNMP: dropping malformed packet src=%pI4 dst=%pI4\n", - &iph->saddr, &iph->daddr); + net_warn_ratelimited("SNMP: dropping malformed packet src=%pI4 dst=%pI4\n", + &iph->saddr, &iph->daddr); return NF_DROP; } diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 5773f5d9e213..42d76441501f 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -960,8 +960,7 @@ void rt_cache_flush_batch(struct net *net) static void rt_emergency_hash_rebuild(struct net *net) { - if (net_ratelimit()) - pr_warn("Route hash chain too long!\n"); + net_warn_ratelimited("Route hash chain too long!\n"); rt_cache_invalidate(net); } @@ -1084,8 +1083,7 @@ static int rt_garbage_collect(struct dst_ops *ops) goto out; if (dst_entries_get_slow(&ipv4_dst_ops) < ip_rt_max_size) goto out; - if (net_ratelimit()) - pr_warn("dst cache overflow\n"); + net_warn_ratelimited("dst cache overflow\n"); RT_CACHE_STAT_INC(gc_dst_overflow); return 1; @@ -1182,8 +1180,7 @@ restart: if (rt->rt_type == RTN_UNICAST || rt_is_output_route(rt)) { int err = rt_bind_neighbour(rt); if (err) { - if (net_ratelimit()) - pr_warn("Neighbour table failure & not caching routes\n"); + net_warn_ratelimited("Neighbour table failure & not caching routes\n"); ip_rt_put(rt); return ERR_PTR(err); } @@ -1299,8 +1296,7 @@ restart: goto restart; } - if (net_ratelimit()) - pr_warn("Neighbour table overflow\n"); + net_warn_ratelimited("Neighbour table overflow\n"); rt_drop(rt); return ERR_PTR(-ENOBUFS); } @@ -1503,11 +1499,11 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw, reject_redirect: #ifdef CONFIG_IP_ROUTE_VERBOSE - if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit()) - pr_info("Redirect from %pI4 on %s about %pI4 ignored\n" - " Advised path = %pI4 -> %pI4\n", - &old_gw, dev->name, &new_gw, - &saddr, &daddr); + if (IN_DEV_LOG_MARTIANS(in_dev)) + net_info_ratelimited("Redirect from %pI4 on %s about %pI4 ignored\n" + " Advised path = %pI4 -> %pI4\n", + &old_gw, dev->name, &new_gw, + &saddr, &daddr); #endif ; } @@ -1617,11 +1613,10 @@ void ip_rt_send_redirect(struct sk_buff *skb) ++peer->rate_tokens; #ifdef CONFIG_IP_ROUTE_VERBOSE if (log_martians && - peer->rate_tokens == ip_rt_redirect_number && - net_ratelimit()) - pr_warn("host %pI4/if%d ignores redirects for %pI4 to %pI4\n", - &ip_hdr(skb)->saddr, rt->rt_iif, - &rt->rt_dst, &rt->rt_gateway); + peer->rate_tokens == ip_rt_redirect_number) + net_warn_ratelimited("host %pI4/if%d ignores redirects for %pI4 to %pI4\n", + &ip_hdr(skb)->saddr, rt->rt_iif, + &rt->rt_dst, &rt->rt_gateway); #endif } } @@ -2135,8 +2130,7 @@ static int __mkroute_input(struct sk_buff *skb, /* get a working reference to the output device */ out_dev = __in_dev_get_rcu(FIB_RES_DEV(*res)); if (out_dev == NULL) { - if (net_ratelimit()) - pr_crit("Bug in ip_route_input_slow(). Please report.\n"); + net_crit_ratelimited("Bug in ip_route_input_slow(). Please report.\n"); return -EINVAL; } @@ -2407,9 +2401,9 @@ no_route: martian_destination: RT_CACHE_STAT_INC(in_martian_dst); #ifdef CONFIG_IP_ROUTE_VERBOSE - if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit()) - pr_warn("martian destination %pI4 from %pI4, dev %s\n", - &daddr, &saddr, dev->name); + if (IN_DEV_LOG_MARTIANS(in_dev)) + net_warn_ratelimited("martian destination %pI4 from %pI4, dev %s\n", + &daddr, &saddr, dev->name); #endif e_hostunreach: diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 86e2cf2ff770..feb2e25091b1 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -1745,9 +1745,9 @@ do_prequeue: } if ((flags & MSG_PEEK) && (peek_seq - copied - urg_hole != tp->copied_seq)) { - if (net_ratelimit()) - printk(KERN_DEBUG "TCP(%s:%d): Application bug, race in MSG_PEEK.\n", - current->comm, task_pid_nr(current)); + net_dbg_ratelimited("TCP(%s:%d): Application bug, race in MSG_PEEK\n", + current->comm, + task_pid_nr(current)); peek_seq = tp->copied_seq; } continue; @@ -2002,10 +2002,10 @@ bool tcp_check_oom(struct sock *sk, int shift) too_many_orphans = tcp_too_many_orphans(sk, shift); out_of_socket_memory = tcp_out_of_memory(sk); - if (too_many_orphans && net_ratelimit()) - pr_info("too many orphaned sockets\n"); - if (out_of_socket_memory && net_ratelimit()) - pr_info("out of memory -- consider tuning tcp_mem\n"); + if (too_many_orphans) + net_info_ratelimited("too many orphaned sockets\n"); + if (out_of_socket_memory) + net_info_ratelimited("out of memory -- consider tuning tcp_mem\n"); return too_many_orphans || out_of_socket_memory; } diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index b99ada27a136..100b242135b1 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -3952,10 +3952,9 @@ void tcp_parse_options(const struct sk_buff *skb, struct tcp_options_received *o __u8 snd_wscale = *(__u8 *)ptr; opt_rx->wscale_ok = 1; if (snd_wscale > 14) { - if (net_ratelimit()) - pr_info("%s: Illegal window scaling value %d >14 received\n", - __func__, - snd_wscale); + net_info_ratelimited("%s: Illegal window scaling value %d >14 received\n", + __func__, + snd_wscale); snd_wscale = 14; } opt_rx->snd_wscale = snd_wscale; diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 4ff5e1f70d16..2e76ffb66d7c 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -1239,12 +1239,11 @@ static int tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb) NULL, NULL, skb); if (genhash || memcmp(hash_location, newhash, 16) != 0) { - if (net_ratelimit()) { - pr_info("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n", - &iph->saddr, ntohs(th->source), - &iph->daddr, ntohs(th->dest), - genhash ? " tcp_v4_calc_md5_hash failed" : ""); - } + net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n", + &iph->saddr, ntohs(th->source), + &iph->daddr, ntohs(th->dest), + genhash ? " tcp_v4_calc_md5_hash failed" + : ""); return 1; } return 0; diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index d94733009923..7979bfd5d0fb 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -2181,8 +2181,7 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) #if FASTRETRANS_DEBUG > 0 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) { - if (net_ratelimit()) - printk(KERN_DEBUG "retrans_out leaked.\n"); + net_dbg_ratelimited("retrans_out leaked\n"); } #endif if (!tp->retrans_out) diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 8b7f100a4b45..4d1d51a0bd26 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -1415,9 +1415,8 @@ void addrconf_dad_failure(struct inet6_ifaddr *ifp) return; } - if (net_ratelimit()) - printk(KERN_INFO "%s: IPv6 duplicate address %pI6c detected!\n", - ifp->idev->dev->name, &ifp->addr); + net_info_ratelimited("%s: IPv6 duplicate address %pI6c detected!\n", + ifp->idev->dev->name, &ifp->addr); if (idev->cnf.accept_dad > 1 && !idev->cnf.disable_ipv6) { struct in6_addr addr; @@ -1847,16 +1846,15 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len, bool sllao) prefered_lft = ntohl(pinfo->prefered); if (prefered_lft > valid_lft) { - if (net_ratelimit()) - printk(KERN_WARNING "addrconf: prefix option has invalid lifetime\n"); + net_warn_ratelimited("addrconf: prefix option has invalid lifetime\n"); return; } in6_dev = in6_dev_get(dev); if (in6_dev == NULL) { - if (net_ratelimit()) - printk(KERN_DEBUG "addrconf: device %s not configured\n", dev->name); + net_dbg_ratelimited("addrconf: device %s not configured\n", + dev->name); return; } @@ -1931,9 +1929,8 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len, bool sllao) } goto ok; } - if (net_ratelimit()) - printk(KERN_DEBUG "IPv6 addrconf: prefix with wrong length %d\n", - pinfo->prefix_len); + net_dbg_ratelimited("IPv6 addrconf: prefix with wrong length %d\n", + pinfo->prefix_len); in6_dev_put(in6_dev); return; diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c index 2ae79dbeec2f..a9f4156f7c3f 100644 --- a/net/ipv6/ah6.c +++ b/net/ipv6/ah6.c @@ -189,8 +189,8 @@ static void ipv6_rearrange_destopt(struct ipv6hdr *iph, struct ipv6_opt_hdr *des hao = (struct ipv6_destopt_hao *)&opt[off]; if (hao->length != sizeof(hao->addr)) { - if (net_ratelimit()) - printk(KERN_WARNING "destopt hao: invalid header length: %u\n", hao->length); + net_warn_ratelimited("destopt hao: invalid header length: %u\n", + hao->length); goto bad; } final_addr = hao->addr; diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index d8e05af2c4bb..be2264e7dd2d 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -252,8 +252,7 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6, dst->dev, dst_output); } - if (net_ratelimit()) - printk(KERN_DEBUG "IPv6: sending pkt_too_big to self\n"); + net_dbg_ratelimited("IPv6: sending pkt_too_big to self\n"); skb->dev = dst->dev; icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS); diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 5df487c81ed9..27fec272d39d 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -460,19 +460,14 @@ ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt, struct ipv6_tlv_tnl_enc_lim *tel; __u32 mtu; case ICMPV6_DEST_UNREACH: - if (net_ratelimit()) - printk(KERN_WARNING - "%s: Path to destination invalid " - "or inactive!\n", t->parms.name); + net_warn_ratelimited("%s: Path to destination invalid or inactive!\n", + t->parms.name); rel_msg = 1; break; case ICMPV6_TIME_EXCEED: if ((*code) == ICMPV6_EXC_HOPLIMIT) { - if (net_ratelimit()) - printk(KERN_WARNING - "%s: Too small hop limit or " - "routing loop in tunnel!\n", - t->parms.name); + net_warn_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n", + t->parms.name); rel_msg = 1; } break; @@ -484,17 +479,13 @@ ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt, if (teli && teli == *info - 2) { tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli]; if (tel->encap_limit == 0) { - if (net_ratelimit()) - printk(KERN_WARNING - "%s: Too small encapsulation " - "limit or routing loop in " - "tunnel!\n", t->parms.name); + net_warn_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n", + t->parms.name); rel_msg = 1; } - } else if (net_ratelimit()) { - printk(KERN_WARNING - "%s: Recipient unable to parse tunneled " - "packet!\n ", t->parms.name); + } else { + net_warn_ratelimited("%s: Recipient unable to parse tunneled packet!\n", + t->parms.name); } break; case ICMPV6_PKT_TOOBIG: @@ -919,10 +910,8 @@ static int ip6_tnl_xmit2(struct sk_buff *skb, if (tdev == dev) { stats->collisions++; - if (net_ratelimit()) - printk(KERN_WARNING - "%s: Local routing loop detected!\n", - t->parms.name); + net_warn_ratelimited("%s: Local routing loop detected!\n", + t->parms.name); goto tx_err_dst_release; } mtu = dst_mtu(dst) - sizeof (*ipv6h); diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index efc0098b59dd..ba936e18b61b 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c @@ -1147,8 +1147,7 @@ static int ip6mr_cache_report(struct mr6_table *mrt, struct sk_buff *pkt, */ ret = sock_queue_rcv_skb(mrt->mroute6_sk, skb); if (ret < 0) { - if (net_ratelimit()) - printk(KERN_WARNING "mroute6: pending queue full, dropping entries.\n"); + net_warn_ratelimited("mroute6: pending queue full, dropping entries\n"); kfree_skb(skb); } diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index 308bdd651230..d7cb04506c3d 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c @@ -181,8 +181,7 @@ ip6_checkentry(const struct ip6t_ip6 *ipv6) static unsigned int ip6t_error(struct sk_buff *skb, const struct xt_action_param *par) { - if (net_ratelimit()) - pr_info("error: `%s'\n", (const char *)par->targinfo); + net_info_ratelimited("error: `%s'\n", (const char *)par->targinfo); return NF_DROP; } diff --git a/net/ipv6/netfilter/ip6t_REJECT.c b/net/ipv6/netfilter/ip6t_REJECT.c index aad2fa41cf46..fd4fb34c51c7 100644 --- a/net/ipv6/netfilter/ip6t_REJECT.c +++ b/net/ipv6/netfilter/ip6t_REJECT.c @@ -114,8 +114,7 @@ static void send_reset(struct net *net, struct sk_buff *oldskb) GFP_ATOMIC); if (!nskb) { - if (net_ratelimit()) - pr_debug("cannot alloc skb\n"); + net_dbg_ratelimited("cannot alloc skb\n"); dst_release(dst); return; } @@ -210,8 +209,7 @@ reject_tg6(struct sk_buff *skb, const struct xt_action_param *par) send_reset(net, skb); break; default: - if (net_ratelimit()) - pr_info("case %u not handled yet\n", reject->with); + net_info_ratelimited("case %u not handled yet\n", reject->with); break; } diff --git a/net/ipv6/netfilter/ip6table_mangle.c b/net/ipv6/netfilter/ip6table_mangle.c index 00d19173db7e..4d782405f125 100644 --- a/net/ipv6/netfilter/ip6table_mangle.c +++ b/net/ipv6/netfilter/ip6table_mangle.c @@ -42,8 +42,7 @@ ip6t_mangle_out(struct sk_buff *skb, const struct net_device *out) /* root is playing with raw sockets. */ if (skb->len < sizeof(struct iphdr) || ip_hdrlen(skb) < sizeof(struct iphdr)) { - if (net_ratelimit()) - pr_warning("ip6t_hook: happy cracking.\n"); + net_warn_ratelimited("ip6t_hook: happy cracking\n"); return NF_ACCEPT; } #endif diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c index fe925e492520..3224ef90a21a 100644 --- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c +++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c @@ -232,8 +232,7 @@ static unsigned int ipv6_conntrack_local(unsigned int hooknum, { /* root is playing with raw sockets. */ if (skb->len < sizeof(struct ipv6hdr)) { - if (net_ratelimit()) - pr_notice("ipv6_conntrack_local: packet too short\n"); + net_notice_ratelimited("ipv6_conntrack_local: packet too short\n"); return NF_ACCEPT; } return __ipv6_conntrack_in(dev_net(out), hooknum, skb, okfn); diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c index 48a2be1b7c70..c9c78c2e666b 100644 --- a/net/ipv6/netfilter/nf_conntrack_reasm.c +++ b/net/ipv6/netfilter/nf_conntrack_reasm.c @@ -444,12 +444,11 @@ nf_ct_frag6_reasm(struct nf_ct_frag6_queue *fq, struct net_device *dev) return head; out_oversize: - if (net_ratelimit()) - printk(KERN_DEBUG "nf_ct_frag6_reasm: payload len = %d\n", payload_len); + net_dbg_ratelimited("nf_ct_frag6_reasm: payload len = %d\n", + payload_len); goto out_fail; out_oom: - if (net_ratelimit()) - printk(KERN_DEBUG "nf_ct_frag6_reasm: no memory for reassembly\n"); + net_dbg_ratelimited("nf_ct_frag6_reasm: no memory for reassembly\n"); out_fail: return NULL; } diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c index 54c5d2b704df..f1b86fdc06ad 100644 --- a/net/ipv6/reassembly.c +++ b/net/ipv6/reassembly.c @@ -518,12 +518,10 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev, return 1; out_oversize: - if (net_ratelimit()) - printk(KERN_DEBUG "ip6_frag_reasm: payload len = %d\n", payload_len); + net_dbg_ratelimited("ip6_frag_reasm: payload len = %d\n", payload_len); goto out_fail; out_oom: - if (net_ratelimit()) - printk(KERN_DEBUG "ip6_frag_reasm: no memory for reassembly\n"); + net_dbg_ratelimited("ip6_frag_reasm: no memory for reassembly\n"); out_fail: rcu_read_lock(); IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS); diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 0aefc36f74c7..e20e32069024 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -794,9 +794,7 @@ static struct rt6_info *rt6_alloc_cow(struct rt6_info *ort, goto retry; } - if (net_ratelimit()) - printk(KERN_WARNING - "ipv6: Neighbour table overflow.\n"); + net_warn_ratelimited("ipv6: Neighbour table overflow\n"); dst_free(&rt->dst); return NULL; } @@ -1643,9 +1641,7 @@ void rt6_redirect(const struct in6_addr *dest, const struct in6_addr *src, rt = ip6_route_redirect(dest, src, saddr, neigh->dev); if (rt == net->ipv6.ip6_null_entry) { - if (net_ratelimit()) - printk(KERN_DEBUG "rt6_redirect: source isn't a valid nexthop " - "for redirect target\n"); + net_dbg_ratelimited("rt6_redirect: source isn't a valid nexthop for redirect target\n"); goto out; } @@ -2106,9 +2102,7 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev, int err; if (!rt) { - if (net_ratelimit()) - pr_warning("IPv6: Maximum number of routes reached," - " consider increasing route/max_size.\n"); + net_warn_ratelimited("IPv6: Maximum number of routes reached, consider increasing route/max_size\n"); return ERR_PTR(-ENOMEM); } diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index e5fef943e30a..a36a09701bff 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c @@ -702,8 +702,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb, neigh = dst_neigh_lookup(skb_dst(skb), &iph6->daddr); if (neigh == NULL) { - if (net_ratelimit()) - printk(KERN_DEBUG "sit: nexthop == NULL\n"); + net_dbg_ratelimited("sit: nexthop == NULL\n"); goto tx_error; } @@ -732,8 +731,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb, neigh = dst_neigh_lookup(skb_dst(skb), &iph6->daddr); if (neigh == NULL) { - if (net_ratelimit()) - printk(KERN_DEBUG "sit: nexthop == NULL\n"); + net_dbg_ratelimited("sit: nexthop == NULL\n"); goto tx_error; } diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 078d039e8fd2..4cf55ae7bf80 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -723,12 +723,10 @@ static int tcp_v6_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb) NULL, NULL, skb); if (genhash || memcmp(hash_location, newhash, 16) != 0) { - if (net_ratelimit()) { - printk(KERN_INFO "MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n", - genhash ? "failed" : "mismatch", - &ip6h->saddr, ntohs(th->source), - &ip6h->daddr, ntohs(th->dest)); - } + net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n", + genhash ? "failed" : "mismatch", + &ip6h->saddr, ntohs(th->source), + &ip6h->daddr, ntohs(th->dest)); return 1; } return 0; diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c index 17bc85d5b7ba..78424f41cf36 100644 --- a/net/llc/af_llc.c +++ b/net/llc/af_llc.c @@ -806,10 +806,9 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock, sk_wait_data(sk, &timeo); if ((flags & MSG_PEEK) && peek_seq != llc->copied_seq) { - if (net_ratelimit()) - printk(KERN_DEBUG "LLC(%s:%d): Application " - "bug, race in MSG_PEEK.\n", - current->comm, task_pid_nr(current)); + net_dbg_ratelimited("LLC(%s:%d): Application bug, race in MSG_PEEK\n", + current->comm, + task_pid_nr(current)); peek_seq = llc->copied_seq; } continue; diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c index a070d4f460ea..26ddb699d693 100644 --- a/net/mac80211/agg-rx.c +++ b/net/mac80211/agg-rx.c @@ -260,11 +260,8 @@ void ieee80211_process_addba_request(struct ieee80211_local *local, (buf_size > IEEE80211_MAX_AMPDU_BUF)) { status = WLAN_STATUS_INVALID_QOS_PARAM; #ifdef CONFIG_MAC80211_HT_DEBUG - if (net_ratelimit()) - printk(KERN_DEBUG "AddBA Req with bad params from " - "%pM on tid %u. policy %d, buffer size %d\n", - mgmt->sa, tid, ba_policy, - buf_size); + net_dbg_ratelimited("AddBA Req with bad params from %pM on tid %u. policy %d, buffer size %d\n", + mgmt->sa, tid, ba_policy, buf_size); #endif /* CONFIG_MAC80211_HT_DEBUG */ goto end_no_lock; } @@ -281,10 +278,8 @@ void ieee80211_process_addba_request(struct ieee80211_local *local, if (sta->ampdu_mlme.tid_rx[tid]) { #ifdef CONFIG_MAC80211_HT_DEBUG - if (net_ratelimit()) - printk(KERN_DEBUG "unexpected AddBA Req from " - "%pM on tid %u\n", - mgmt->sa, tid); + net_dbg_ratelimited("unexpected AddBA Req from %pM on tid %u\n", + mgmt->sa, tid); #endif /* CONFIG_MAC80211_HT_DEBUG */ /* delete existing Rx BA session on the same tid */ diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c index 9b603366943c..6f8615c54b22 100644 --- a/net/mac80211/ht.c +++ b/net/mac80211/ht.c @@ -306,10 +306,10 @@ void ieee80211_process_delba(struct ieee80211_sub_if_data *sdata, initiator = (params & IEEE80211_DELBA_PARAM_INITIATOR_MASK) >> 11; #ifdef CONFIG_MAC80211_HT_DEBUG - if (net_ratelimit()) - printk(KERN_DEBUG "delba from %pM (%s) tid %d reason code %d\n", - mgmt->sa, initiator ? "initiator" : "recipient", tid, - le16_to_cpu(mgmt->u.action.u.delba.reason_code)); + net_dbg_ratelimited("delba from %pM (%s) tid %d reason code %d\n", + mgmt->sa, initiator ? "initiator" : "recipient", + tid, + le16_to_cpu(mgmt->u.action.u.delba.reason_code)); #endif /* CONFIG_MAC80211_HT_DEBUG */ if (initiator == WLAN_BACK_INITIATOR) diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c index ebafba61c2ff..3ad33a824624 100644 --- a/net/mac80211/ibss.c +++ b/net/mac80211/ibss.c @@ -303,9 +303,8 @@ ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata, * allow new one to be added. */ if (local->num_sta >= IEEE80211_IBSS_MAX_STA_ENTRIES) { - if (net_ratelimit()) - printk(KERN_DEBUG "%s: No room for a new IBSS STA entry %pM\n", - sdata->name, addr); + net_dbg_ratelimited("%s: No room for a new IBSS STA entry %pM\n", + sdata->name, addr); rcu_read_lock(); return NULL; } @@ -582,9 +581,8 @@ void ieee80211_ibss_rx_no_sta(struct ieee80211_sub_if_data *sdata, * allow new one to be added. */ if (local->num_sta >= IEEE80211_IBSS_MAX_STA_ENTRIES) { - if (net_ratelimit()) - printk(KERN_DEBUG "%s: No room for a new IBSS STA entry %pM\n", - sdata->name, addr); + net_dbg_ratelimited("%s: No room for a new IBSS STA entry %pM\n", + sdata->name, addr); return; } diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 52cd301245ae..b3b3c264ff66 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -1567,9 +1567,9 @@ static void ieee80211_mgd_probe_ap(struct ieee80211_sub_if_data *sdata, } #ifdef CONFIG_MAC80211_VERBOSE_DEBUG - if (beacon && net_ratelimit()) - printk(KERN_DEBUG "%s: detected beacon loss from AP " - "- sending probe request\n", sdata->name); + if (beacon) + net_dbg_ratelimited("%s: detected beacon loss from AP - sending probe request\n", + sdata->name); #endif /* @@ -2404,10 +2404,8 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, if (ifmgd->flags & IEEE80211_STA_BEACON_POLL) { #ifdef CONFIG_MAC80211_VERBOSE_DEBUG - if (net_ratelimit()) { - printk(KERN_DEBUG "%s: cancelling probereq poll due " - "to a received beacon\n", sdata->name); - } + net_dbg_ratelimited("%s: cancelling probereq poll due to a received beacon\n", + sdata->name); #endif ifmgd->flags &= ~IEEE80211_STA_BEACON_POLL; mutex_lock(&local->iflist_mtx); diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index d722c40c7eca..8257a09eeed4 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -1752,9 +1752,9 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx) * local net stack and back to the wireless medium */ xmit_skb = skb_copy(skb, GFP_ATOMIC); - if (!xmit_skb && net_ratelimit()) - printk(KERN_DEBUG "%s: failed to clone " - "multicast frame\n", dev->name); + if (!xmit_skb) + net_dbg_ratelimited("%s: failed to clone multicast frame\n", + dev->name); } else { dsta = sta_info_get(sdata, skb->data); if (dsta) { @@ -1957,9 +1957,8 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx) fwd_skb = skb_copy(skb, GFP_ATOMIC); if (!fwd_skb) { - if (net_ratelimit()) - printk(KERN_DEBUG "%s: failed to clone mesh frame\n", - sdata->name); + net_dbg_ratelimited("%s: failed to clone mesh frame\n", + sdata->name); goto out; } diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index e03abc78ee53..5f827a6b0d8d 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -413,9 +413,8 @@ ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx) if (skb_queue_len(&tx->sdata->bss->ps_bc_buf) >= AP_MAX_BC_BUFFER) { #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG - if (net_ratelimit()) - printk(KERN_DEBUG "%s: BC TX buffer full - dropping the oldest frame\n", - tx->sdata->name); + net_dbg_ratelimited("%s: BC TX buffer full - dropping the oldest frame\n", + tx->sdata->name); #endif dev_kfree_skb(skb_dequeue(&tx->sdata->bss->ps_bc_buf)); } else @@ -476,10 +475,8 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx) if (skb_queue_len(&sta->ps_tx_buf[ac]) >= STA_MAX_TX_BUFFER) { struct sk_buff *old = skb_dequeue(&sta->ps_tx_buf[ac]); #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG - if (net_ratelimit()) - printk(KERN_DEBUG "%s: STA %pM TX buffer for " - "AC %d full - dropping oldest frame\n", - tx->sdata->name, sta->sta.addr, ac); + net_dbg_ratelimited("%s: STA %pM TX buffer for AC %d full - dropping oldest frame\n", + tx->sdata->name, sta->sta.addr, ac); #endif dev_kfree_skb(old); } else @@ -1965,10 +1962,8 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, (cpu_to_be16(ethertype) != sdata->control_port_protocol || !ether_addr_equal(sdata->vif.addr, skb->data + ETH_ALEN)))) { #ifdef CONFIG_MAC80211_VERBOSE_DEBUG - if (net_ratelimit()) - printk(KERN_DEBUG "%s: dropped frame to %pM" - " (unauthorized port)\n", dev->name, - hdr.addr1); + net_dbg_ratelimited("%s: dropped frame to %pM (unauthorized port)\n", + dev->name, hdr.addr1); #endif I802_DEBUG_INC(local->tx_handlers_drop_unauth_port); diff --git a/net/netfilter/nf_conntrack_amanda.c b/net/netfilter/nf_conntrack_amanda.c index 13fd2c55e329..f2de8c55ac50 100644 --- a/net/netfilter/nf_conntrack_amanda.c +++ b/net/netfilter/nf_conntrack_amanda.c @@ -107,8 +107,7 @@ static int amanda_help(struct sk_buff *skb, /* No data? */ dataoff = protoff + sizeof(struct udphdr); if (dataoff >= skb->len) { - if (net_ratelimit()) - printk(KERN_ERR "amanda_help: skblen = %u\n", skb->len); + net_err_ratelimited("amanda_help: skblen = %u\n", skb->len); return NF_ACCEPT; } diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 32c59093146e..ac3af97cc468 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -683,10 +683,7 @@ __nf_conntrack_alloc(struct net *net, u16 zone, unlikely(atomic_read(&net->ct.count) > nf_conntrack_max)) { if (!early_drop(net, hash_bucket(hash, net))) { atomic_dec(&net->ct.count); - if (net_ratelimit()) - printk(KERN_WARNING - "nf_conntrack: table full, dropping" - " packet.\n"); + net_warn_ratelimited("nf_conntrack: table full, dropping packet\n"); return ERR_PTR(-ENOMEM); } } diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c index 4147ba3f653c..45cf602a76bc 100644 --- a/net/netfilter/nf_conntrack_expect.c +++ b/net/netfilter/nf_conntrack_expect.c @@ -424,9 +424,7 @@ static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect) } if (net->ct.expect_count >= nf_ct_expect_max) { - if (net_ratelimit()) - printk(KERN_WARNING - "nf_conntrack: expectation table full\n"); + net_warn_ratelimited("nf_conntrack: expectation table full\n"); ret = -EMFILE; } out: diff --git a/net/netfilter/nf_conntrack_h323_main.c b/net/netfilter/nf_conntrack_h323_main.c index 471b054ad002..93c13eb67b3f 100644 --- a/net/netfilter/nf_conntrack_h323_main.c +++ b/net/netfilter/nf_conntrack_h323_main.c @@ -605,8 +605,7 @@ static int h245_help(struct sk_buff *skb, unsigned int protoff, drop: spin_unlock_bh(&nf_h323_lock); - if (net_ratelimit()) - pr_info("nf_ct_h245: packet dropped\n"); + net_info_ratelimited("nf_ct_h245: packet dropped\n"); return NF_DROP; } @@ -1156,8 +1155,7 @@ static int q931_help(struct sk_buff *skb, unsigned int protoff, drop: spin_unlock_bh(&nf_h323_lock); - if (net_ratelimit()) - pr_info("nf_ct_q931: packet dropped\n"); + net_info_ratelimited("nf_ct_q931: packet dropped\n"); return NF_DROP; } @@ -1731,8 +1729,7 @@ static int ras_help(struct sk_buff *skb, unsigned int protoff, drop: spin_unlock_bh(&nf_h323_lock); - if (net_ratelimit()) - pr_info("nf_ct_ras: packet dropped\n"); + net_info_ratelimited("nf_ct_ras: packet dropped\n"); return NF_DROP; } diff --git a/net/netfilter/nf_conntrack_irc.c b/net/netfilter/nf_conntrack_irc.c index 4f9390b98697..81366c118271 100644 --- a/net/netfilter/nf_conntrack_irc.c +++ b/net/netfilter/nf_conntrack_irc.c @@ -185,11 +185,9 @@ static int help(struct sk_buff *skb, unsigned int protoff, tuple = &ct->tuplehash[dir].tuple; if (tuple->src.u3.ip != dcc_ip && tuple->dst.u3.ip != dcc_ip) { - if (net_ratelimit()) - printk(KERN_WARNING - "Forged DCC command from %pI4: %pI4:%u\n", - &tuple->src.u3.ip, - &dcc_ip, dcc_port); + net_warn_ratelimited("Forged DCC command from %pI4: %pI4:%u\n", + &tuple->src.u3.ip, + &dcc_ip, dcc_port); continue; } diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c index 8d6bcf32c0ed..4162437b8361 100644 --- a/net/netfilter/nfnetlink_queue.c +++ b/net/netfilter/nfnetlink_queue.c @@ -395,8 +395,7 @@ nlmsg_failure: nla_put_failure: if (skb) kfree_skb(skb); - if (net_ratelimit()) - printk(KERN_ERR "nf_queue: error creating packet message\n"); + net_err_ratelimited("nf_queue: error creating packet message\n"); return NULL; } @@ -433,10 +432,8 @@ nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum) } if (queue->queue_total >= queue->queue_maxlen) { queue->queue_dropped++; - if (net_ratelimit()) - printk(KERN_WARNING "nf_queue: full at %d entries, " - "dropping packets(s).\n", - queue->queue_total); + net_warn_ratelimited("nf_queue: full at %d entries, dropping packets(s)\n", + queue->queue_total); goto err_out_free_nskb; } entry->id = ++queue->id_sequence; diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c index 190ad37c5cf8..71a266de5fb4 100644 --- a/net/netfilter/xt_TCPMSS.c +++ b/net/netfilter/xt_TCPMSS.c @@ -67,15 +67,13 @@ tcpmss_mangle_packet(struct sk_buff *skb, if (info->mss == XT_TCPMSS_CLAMP_PMTU) { if (dst_mtu(skb_dst(skb)) <= minlen) { - if (net_ratelimit()) - pr_err("unknown or invalid path-MTU (%u)\n", - dst_mtu(skb_dst(skb))); + net_err_ratelimited("unknown or invalid path-MTU (%u)\n", + dst_mtu(skb_dst(skb))); return -1; } if (in_mtu <= minlen) { - if (net_ratelimit()) - pr_err("unknown or invalid path-MTU (%u)\n", - in_mtu); + net_err_ratelimited("unknown or invalid path-MTU (%u)\n", + in_mtu); return -1; } newmss = min(dst_mtu(skb_dst(skb)), in_mtu) - minlen; diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c index d0424f9621f2..5d5af1d04fa2 100644 --- a/net/netfilter/xt_hashlimit.c +++ b/net/netfilter/xt_hashlimit.c @@ -171,8 +171,7 @@ dsthash_alloc_init(struct xt_hashlimit_htable *ht, if (ht->cfg.max && ht->count >= ht->cfg.max) { /* FIXME: do something. question is what.. */ - if (net_ratelimit()) - pr_err("max count of %u reached\n", ht->cfg.max); + net_err_ratelimited("max count of %u reached\n", ht->cfg.max); ent = NULL; } else ent = kmem_cache_alloc(hashlimit_cachep, GFP_ATOMIC); diff --git a/net/openvswitch/vport-netdev.c b/net/openvswitch/vport-netdev.c index 5920bda4ab6b..3fd6c0d88e12 100644 --- a/net/openvswitch/vport-netdev.c +++ b/net/openvswitch/vport-netdev.c @@ -157,9 +157,9 @@ static int netdev_send(struct vport *vport, struct sk_buff *skb) int len; if (unlikely(packet_length(skb) > mtu && !skb_is_gso(skb))) { - if (net_ratelimit()) - pr_warn("%s: dropped over-mtu packet: %d > %d\n", - ovs_dp_name(vport->dp), packet_length(skb), mtu); + net_warn_ratelimited("%s: dropped over-mtu packet: %d > %d\n", + ovs_dp_name(vport->dp), + packet_length(skb), mtu); goto error; } diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c index 0beba0e5312e..60e281ad0f07 100644 --- a/net/sched/act_ipt.c +++ b/net/sched/act_ipt.c @@ -1,5 +1,5 @@ /* - * net/sched/ipt.c iptables target interface + * net/sched/ipt.c iptables target interface * *TODO: Add other tables. For now we only support the ipv4 table targets * @@ -235,9 +235,8 @@ static int tcf_ipt(struct sk_buff *skb, const struct tc_action *a, result = TC_ACT_PIPE; break; default: - if (net_ratelimit()) - pr_notice("tc filter: Bogus netfilter code" - " %d assume ACCEPT\n", ret); + net_notice_ratelimited("tc filter: Bogus netfilter code %d assume ACCEPT\n", + ret); result = TC_POLICE_OK; break; } diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c index d583aea3b3df..fe81cc18e9e0 100644 --- a/net/sched/act_mirred.c +++ b/net/sched/act_mirred.c @@ -174,9 +174,8 @@ static int tcf_mirred(struct sk_buff *skb, const struct tc_action *a, } if (!(dev->flags & IFF_UP)) { - if (net_ratelimit()) - pr_notice("tc mirred to Houston: device %s is down\n", - dev->name); + net_notice_ratelimited("tc mirred to Houston: device %s is down\n", + dev->name); goto out; } diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c index 591b006a8c5a..d45373fb00b9 100644 --- a/net/sched/cls_u32.c +++ b/net/sched/cls_u32.c @@ -234,8 +234,7 @@ out: return -1; deadloop: - if (net_ratelimit()) - pr_warning("cls_u32: dead loop\n"); + net_warn_ratelimited("cls_u32: dead loop\n"); return -1; } diff --git a/net/sched/ematch.c b/net/sched/ematch.c index aca233c2b848..3a633debb6df 100644 --- a/net/sched/ematch.c +++ b/net/sched/ematch.c @@ -537,9 +537,7 @@ pop_stack: return res; stack_overflow: - if (net_ratelimit()) - pr_warning("tc ematch: local stack overflow," - " increase NET_EMATCH_STACK\n"); + net_warn_ratelimited("tc ematch: local stack overflow, increase NET_EMATCH_STACK\n"); return -1; } EXPORT_SYMBOL(__tcf_em_tree_match); diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index d2daefcc205f..085ce53d570a 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -1691,12 +1691,10 @@ reclassify: tp = otp; if (verd++ >= MAX_REC_LOOP) { - if (net_ratelimit()) - pr_notice("%s: packet reclassify loop" - " rule prio %u protocol %02x\n", - tp->q->ops->id, - tp->prio & 0xffff, - ntohs(tp->protocol)); + net_notice_ratelimited("%s: packet reclassify loop rule prio %u protocol %02x\n", + tp->q->ops->id, + tp->prio & 0xffff, + ntohs(tp->protocol)); return TC_ACT_SHOT; } skb->tc_verd = SET_TC_VERD(skb->tc_verd, verd); diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 0eb1202c22a6..511323e89cec 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -86,9 +86,8 @@ static inline int handle_dev_cpu_collision(struct sk_buff *skb, * deadloop is detected. Return OK to try the next skb. */ kfree_skb(skb); - if (net_ratelimit()) - pr_warning("Dead loop on netdevice %s, fix it urgently!\n", - dev_queue->dev->name); + net_warn_ratelimited("Dead loop on netdevice %s, fix it urgently!\n", + dev_queue->dev->name); ret = qdisc_qlen(q); } else { /* @@ -136,9 +135,9 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q, ret = handle_dev_cpu_collision(skb, txq, q); } else { /* Driver returned NETDEV_TX_BUSY - requeue skb */ - if (unlikely (ret != NETDEV_TX_BUSY && net_ratelimit())) - pr_warning("BUG %s code %d qlen %d\n", - dev->name, ret, q->q.qlen); + if (unlikely(ret != NETDEV_TX_BUSY)) + net_warn_ratelimited("BUG %s code %d qlen %d\n", + dev->name, ret, q->q.qlen); ret = dev_requeue_skb(skb, q); } diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c index ab620bf90785..e901583e4ea5 100644 --- a/net/sched/sch_gred.c +++ b/net/sched/sch_gred.c @@ -255,10 +255,8 @@ static struct sk_buff *gred_dequeue(struct Qdisc *sch) u16 dp = tc_index_to_dp(skb); if (dp >= t->DPs || (q = t->tab[dp]) == NULL) { - if (net_ratelimit()) - pr_warning("GRED: Unable to relocate VQ 0x%x " - "after dequeue, screwing up " - "backlog.\n", tc_index_to_dp(skb)); + net_warn_ratelimited("GRED: Unable to relocate VQ 0x%x after dequeue, screwing up backlog\n", + tc_index_to_dp(skb)); } else { q->backlog -= qdisc_pkt_len(skb); @@ -287,10 +285,8 @@ static unsigned int gred_drop(struct Qdisc *sch) u16 dp = tc_index_to_dp(skb); if (dp >= t->DPs || (q = t->tab[dp]) == NULL) { - if (net_ratelimit()) - pr_warning("GRED: Unable to relocate VQ 0x%x " - "while dropping, screwing up " - "backlog.\n", tc_index_to_dp(skb)); + net_warn_ratelimited("GRED: Unable to relocate VQ 0x%x while dropping, screwing up backlog\n", + tc_index_to_dp(skb)); } else { q->backlog -= len; q->stats.other++; diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index fbb374c65945..c96d1a81cf42 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c @@ -1161,9 +1161,8 @@ static int sctp_side_effects(sctp_event_t event_type, sctp_subtype_t subtype, break; case SCTP_DISPOSITION_VIOLATION: - if (net_ratelimit()) - pr_err("protocol violation state %d chunkid %d\n", - state, subtype.chunk); + net_err_ratelimited("protocol violation state %d chunkid %d\n", + state, subtype.chunk); break; case SCTP_DISPOSITION_NOT_IMPL: diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index a147b4d307d2..9fca10357350 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -1129,17 +1129,15 @@ sctp_disposition_t sctp_sf_backbeat_8_3(const struct sctp_endpoint *ep, /* This should never happen, but lets log it if so. */ if (unlikely(!link)) { if (from_addr.sa.sa_family == AF_INET6) { - if (net_ratelimit()) - pr_warn("%s association %p could not find address %pI6\n", - __func__, - asoc, - &from_addr.v6.sin6_addr); + net_warn_ratelimited("%s association %p could not find address %pI6\n", + __func__, + asoc, + &from_addr.v6.sin6_addr); } else { - if (net_ratelimit()) - pr_warn("%s association %p could not find address %pI4\n", - __func__, - asoc, - &from_addr.v4.sin_addr.s_addr); + net_warn_ratelimited("%s association %p could not find address %pI4\n", + __func__, + asoc, + &from_addr.v4.sin_addr.s_addr); } return SCTP_DISPOSITION_DISCARD; } diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 92ba71dfe080..b3b8a8d813eb 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -5840,10 +5840,8 @@ SCTP_STATIC int sctp_listen_start(struct sock *sk, int backlog) if (!sctp_sk(sk)->hmac && sctp_hmac_alg) { tfm = crypto_alloc_hash(sctp_hmac_alg, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(tfm)) { - if (net_ratelimit()) { - pr_info("failed to load transform for %s: %ld\n", - sctp_hmac_alg, PTR_ERR(tfm)); - } + net_info_ratelimited("failed to load transform for %s: %ld\n", + sctp_hmac_alg, PTR_ERR(tfm)); return -ENOSYS; } sctp_sk(sk)->hmac = tfm; diff --git a/net/socket.c b/net/socket.c index d3aaa4f67a3b..2a2898ce596e 100644 --- a/net/socket.c +++ b/net/socket.c @@ -1234,8 +1234,7 @@ int __sock_create(struct net *net, int family, int type, int protocol, */ sock = sock_alloc(); if (!sock) { - if (net_ratelimit()) - printk(KERN_WARNING "socket: no more sockets\n"); + net_warn_ratelimited("socket: no more sockets\n"); return -ENFILE; /* Not exactly a match, but its the closest posix thing */ } diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index 4153846984ac..017c0117d154 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c @@ -1041,23 +1041,21 @@ static void svc_unregister(const struct svc_serv *serv, struct net *net) * Printk the given error with the address of the client that caused it. */ static __printf(2, 3) -int svc_printk(struct svc_rqst *rqstp, const char *fmt, ...) +void svc_printk(struct svc_rqst *rqstp, const char *fmt, ...) { + struct va_format vaf; va_list args; - int r; char buf[RPC_MAX_ADDRBUFLEN]; - if (!net_ratelimit()) - return 0; + va_start(args, fmt); - printk(KERN_WARNING "svc: %s: ", - svc_print_addr(rqstp, buf, sizeof(buf))); + vaf.fmt = fmt; + vaf.va = &args; - va_start(args, fmt); - r = vprintk(fmt, args); - va_end(args); + net_warn_ratelimited("svc: %s: %pV", + svc_print_addr(rqstp, buf, sizeof(buf)), &vaf); - return r; + va_end(args); } /* diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index 4bda09d7e1a4..b98ee3514912 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c @@ -544,14 +544,11 @@ static void svc_check_conn_limits(struct svc_serv *serv) struct svc_xprt *xprt = NULL; spin_lock_bh(&serv->sv_lock); if (!list_empty(&serv->sv_tempsocks)) { - if (net_ratelimit()) { - /* Try to help the admin */ - printk(KERN_NOTICE "%s: too many open " - "connections, consider increasing %s\n", - serv->sv_name, serv->sv_maxconn ? - "the max number of connections." : - "the number of threads."); - } + /* Try to help the admin */ + net_notice_ratelimited("%s: too many open connections, consider increasing the %s\n", + serv->sv_name, serv->sv_maxconn ? + "max number of connections" : + "number of threads"); /* * Always select the oldest connection. It's not fair, * but so is life diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index f0132b2e875e..a6de09de5d21 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c @@ -617,11 +617,8 @@ static int svc_udp_recvfrom(struct svc_rqst *rqstp) rqstp->rq_prot = IPPROTO_UDP; if (!svc_udp_get_dest_address(rqstp, cmh)) { - if (net_ratelimit()) - printk(KERN_WARNING - "svc: received unknown control message %d/%d; " - "dropping RPC reply datagram\n", - cmh->cmsg_level, cmh->cmsg_type); + net_warn_ratelimited("svc: received unknown control message %d/%d; dropping RPC reply datagram\n", + cmh->cmsg_level, cmh->cmsg_type); skb_free_datagram_locked(svsk->sk_sk, skb); return 0; } @@ -871,18 +868,17 @@ static struct svc_xprt *svc_tcp_accept(struct svc_xprt *xprt) if (err == -ENOMEM) printk(KERN_WARNING "%s: no more sockets!\n", serv->sv_name); - else if (err != -EAGAIN && net_ratelimit()) - printk(KERN_WARNING "%s: accept failed (err %d)!\n", - serv->sv_name, -err); + else if (err != -EAGAIN) + net_warn_ratelimited("%s: accept failed (err %d)!\n", + serv->sv_name, -err); return NULL; } set_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags); err = kernel_getpeername(newsock, sin, &slen); if (err < 0) { - if (net_ratelimit()) - printk(KERN_WARNING "%s: peername failed (err %d)!\n", - serv->sv_name, -err); + net_warn_ratelimited("%s: peername failed (err %d)!\n", + serv->sv_name, -err); goto failed; /* aborted connection or whatever */ } @@ -1012,19 +1008,15 @@ static int svc_tcp_recv_record(struct svc_sock *svsk, struct svc_rqst *rqstp) * bit set in the fragment length header. * But apparently no known nfs clients send fragmented * records. */ - if (net_ratelimit()) - printk(KERN_NOTICE "RPC: multiple fragments " - "per record not supported\n"); + net_notice_ratelimited("RPC: multiple fragments per record not supported\n"); goto err_delete; } svsk->sk_reclen &= RPC_FRAGMENT_SIZE_MASK; dprintk("svc: TCP record, %d bytes\n", svsk->sk_reclen); if (svsk->sk_reclen > serv->sv_max_mesg) { - if (net_ratelimit()) - printk(KERN_NOTICE "RPC: " - "fragment too large: 0x%08lx\n", - (unsigned long)svsk->sk_reclen); + net_notice_ratelimited("RPC: fragment too large: 0x%08lx\n", + (unsigned long)svsk->sk_reclen); goto err_delete; } } diff --git a/net/wireless/lib80211_crypt_ccmp.c b/net/wireless/lib80211_crypt_ccmp.c index 755738d26bb4..1526c211db66 100644 --- a/net/wireless/lib80211_crypt_ccmp.c +++ b/net/wireless/lib80211_crypt_ccmp.c @@ -304,10 +304,8 @@ static int lib80211_ccmp_decrypt(struct sk_buff *skb, int hdr_len, void *priv) pos = skb->data + hdr_len; keyidx = pos[3]; if (!(keyidx & (1 << 5))) { - if (net_ratelimit()) { - printk(KERN_DEBUG "CCMP: received packet without ExtIV" - " flag from %pM\n", hdr->addr2); - } + net_dbg_ratelimited("CCMP: received packet without ExtIV flag from %pM\n", + hdr->addr2); key->dot11RSNAStatsCCMPFormatErrors++; return -2; } @@ -318,11 +316,8 @@ static int lib80211_ccmp_decrypt(struct sk_buff *skb, int hdr_len, void *priv) return -6; } if (!key->key_set) { - if (net_ratelimit()) { - printk(KERN_DEBUG "CCMP: received packet from %pM" - " with keyid=%d that does not have a configured" - " key\n", hdr->addr2, keyidx); - } + net_dbg_ratelimited("CCMP: received packet from %pM with keyid=%d that does not have a configured key\n", + hdr->addr2, keyidx); return -3; } @@ -336,15 +331,11 @@ static int lib80211_ccmp_decrypt(struct sk_buff *skb, int hdr_len, void *priv) if (ccmp_replay_check(pn, key->rx_pn)) { #ifdef CONFIG_LIB80211_DEBUG - if (net_ratelimit()) { - printk(KERN_DEBUG "CCMP: replay detected: STA=%pM " - "previous PN %02x%02x%02x%02x%02x%02x " - "received PN %02x%02x%02x%02x%02x%02x\n", - hdr->addr2, - key->rx_pn[0], key->rx_pn[1], key->rx_pn[2], - key->rx_pn[3], key->rx_pn[4], key->rx_pn[5], - pn[0], pn[1], pn[2], pn[3], pn[4], pn[5]); - } + net_dbg_ratelimited("CCMP: replay detected: STA=%pM previous PN %02x%02x%02x%02x%02x%02x received PN %02x%02x%02x%02x%02x%02x\n", + hdr->addr2, + key->rx_pn[0], key->rx_pn[1], key->rx_pn[2], + key->rx_pn[3], key->rx_pn[4], key->rx_pn[5], + pn[0], pn[1], pn[2], pn[3], pn[4], pn[5]); #endif key->dot11RSNAStatsCCMPReplays++; return -4; @@ -370,10 +361,8 @@ static int lib80211_ccmp_decrypt(struct sk_buff *skb, int hdr_len, void *priv) } if (memcmp(mic, a, CCMP_MIC_LEN) != 0) { - if (net_ratelimit()) { - printk(KERN_DEBUG "CCMP: decrypt failed: STA=" - "%pM\n", hdr->addr2); - } + net_dbg_ratelimited("CCMP: decrypt failed: STA=%pM\n", + hdr->addr2); key->dot11RSNAStatsCCMPDecryptErrors++; return -5; } diff --git a/net/wireless/lib80211_crypt_tkip.c b/net/wireless/lib80211_crypt_tkip.c index 38734846c19e..d475cfc8568f 100644 --- a/net/wireless/lib80211_crypt_tkip.c +++ b/net/wireless/lib80211_crypt_tkip.c @@ -360,12 +360,9 @@ static int lib80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv) struct scatterlist sg; if (tkey->flags & IEEE80211_CRYPTO_TKIP_COUNTERMEASURES) { - if (net_ratelimit()) { - struct ieee80211_hdr *hdr = - (struct ieee80211_hdr *)skb->data; - printk(KERN_DEBUG ": TKIP countermeasures: dropped " - "TX packet to %pM\n", hdr->addr1); - } + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + net_dbg_ratelimited("TKIP countermeasures: dropped TX packet to %pM\n", + hdr->addr1); return -1; } @@ -420,10 +417,8 @@ static int lib80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv) hdr = (struct ieee80211_hdr *)skb->data; if (tkey->flags & IEEE80211_CRYPTO_TKIP_COUNTERMEASURES) { - if (net_ratelimit()) { - printk(KERN_DEBUG ": TKIP countermeasures: dropped " - "received packet from %pM\n", hdr->addr2); - } + net_dbg_ratelimited("TKIP countermeasures: dropped received packet from %pM\n", + hdr->addr2); return -1; } @@ -433,10 +428,8 @@ static int lib80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv) pos = skb->data + hdr_len; keyidx = pos[3]; if (!(keyidx & (1 << 5))) { - if (net_ratelimit()) { - printk(KERN_DEBUG "TKIP: received packet without ExtIV" - " flag from %pM\n", hdr->addr2); - } + net_dbg_ratelimited("TKIP: received packet without ExtIV flag from %pM\n", + hdr->addr2); return -2; } keyidx >>= 6; @@ -446,11 +439,8 @@ static int lib80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv) return -6; } if (!tkey->key_set) { - if (net_ratelimit()) { - printk(KERN_DEBUG "TKIP: received packet from %pM" - " with keyid=%d that does not have a configured" - " key\n", hdr->addr2, keyidx); - } + net_dbg_ratelimited("TKIP: received packet from %pM with keyid=%d that does not have a configured key\n", + hdr->addr2, keyidx); return -3; } iv16 = (pos[0] << 8) | pos[2]; @@ -459,12 +449,9 @@ static int lib80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv) if (tkip_replay_check(iv32, iv16, tkey->rx_iv32, tkey->rx_iv16)) { #ifdef CONFIG_LIB80211_DEBUG - if (net_ratelimit()) { - printk(KERN_DEBUG "TKIP: replay detected: STA=%pM" - " previous TSC %08x%04x received TSC " - "%08x%04x\n", hdr->addr2, - tkey->rx_iv32, tkey->rx_iv16, iv32, iv16); - } + net_dbg_ratelimited("TKIP: replay detected: STA=%pM previous TSC %08x%04x received TSC %08x%04x\n", + hdr->addr2, tkey->rx_iv32, tkey->rx_iv16, + iv32, iv16); #endif tkey->dot11RSNAStatsTKIPReplays++; return -4; @@ -481,11 +468,8 @@ static int lib80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv) crypto_blkcipher_setkey(tkey->rx_tfm_arc4, rc4key, 16); sg_init_one(&sg, pos, plen + 4); if (crypto_blkcipher_decrypt(&desc, &sg, &sg, plen + 4)) { - if (net_ratelimit()) { - printk(KERN_DEBUG ": TKIP: failed to decrypt " - "received packet from %pM\n", - hdr->addr2); - } + net_dbg_ratelimited("TKIP: failed to decrypt received packet from %pM\n", + hdr->addr2); return -7; } @@ -501,10 +485,8 @@ static int lib80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv) tkey->rx_phase1_done = 0; } #ifdef CONFIG_LIB80211_DEBUG - if (net_ratelimit()) { - printk(KERN_DEBUG "TKIP: ICV error detected: STA=" - "%pM\n", hdr->addr2); - } + net_dbg_ratelimited("TKIP: ICV error detected: STA=%pM\n", + hdr->addr2); #endif tkey->dot11RSNAStatsTKIPICVErrors++; return -5; -- cgit v1.2.2 From bc9b35ad41387379e0b1257b3171da0dca73562d Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 15 May 2012 15:04:57 -0400 Subject: xfrm: Convert several xfrm policy match functions to bool. xfrm_selector_match xfrm_sec_ctx_match __xfrm4_selector_match __xfrm6_selector_match Signed-off-by: David S. Miller --- net/xfrm/xfrm_policy.c | 27 ++++++++++++++------------- 1 file changed, 14 insertions(+), 13 deletions(-) (limited to 'net') diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 7661576b6f45..3c87a1c4066f 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -56,7 +56,7 @@ static int xfrm_bundle_ok(struct xfrm_dst *xdst); static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol, int dir); -static inline int +static inline bool __xfrm4_selector_match(const struct xfrm_selector *sel, const struct flowi *fl) { const struct flowi4 *fl4 = &fl->u.ip4; @@ -69,7 +69,7 @@ __xfrm4_selector_match(const struct xfrm_selector *sel, const struct flowi *fl) (fl4->flowi4_oif == sel->ifindex || !sel->ifindex); } -static inline int +static inline bool __xfrm6_selector_match(const struct xfrm_selector *sel, const struct flowi *fl) { const struct flowi6 *fl6 = &fl->u.ip6; @@ -82,8 +82,8 @@ __xfrm6_selector_match(const struct xfrm_selector *sel, const struct flowi *fl) (fl6->flowi6_oif == sel->ifindex || !sel->ifindex); } -int xfrm_selector_match(const struct xfrm_selector *sel, const struct flowi *fl, - unsigned short family) +bool xfrm_selector_match(const struct xfrm_selector *sel, const struct flowi *fl, + unsigned short family) { switch (family) { case AF_INET: @@ -91,7 +91,7 @@ int xfrm_selector_match(const struct xfrm_selector *sel, const struct flowi *fl, case AF_INET6: return __xfrm6_selector_match(sel, fl); } - return 0; + return false; } static inline struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos, @@ -877,7 +877,8 @@ static int xfrm_policy_match(const struct xfrm_policy *pol, u8 type, u16 family, int dir) { const struct xfrm_selector *sel = &pol->selector; - int match, ret = -ESRCH; + int ret = -ESRCH; + bool match; if (pol->family != family || (fl->flowi_mark & pol->mark.m) != pol->mark.v || @@ -1006,8 +1007,8 @@ static struct xfrm_policy *xfrm_sk_policy_lookup(struct sock *sk, int dir, read_lock_bh(&xfrm_policy_lock); if ((pol = sk->sk_policy[dir]) != NULL) { - int match = xfrm_selector_match(&pol->selector, fl, - sk->sk_family); + bool match = xfrm_selector_match(&pol->selector, fl, + sk->sk_family); int err = 0; if (match) { @@ -2767,8 +2768,8 @@ EXPORT_SYMBOL_GPL(xfrm_audit_policy_delete); #endif #ifdef CONFIG_XFRM_MIGRATE -static int xfrm_migrate_selector_match(const struct xfrm_selector *sel_cmp, - const struct xfrm_selector *sel_tgt) +static bool xfrm_migrate_selector_match(const struct xfrm_selector *sel_cmp, + const struct xfrm_selector *sel_tgt) { if (sel_cmp->proto == IPSEC_ULPROTO_ANY) { if (sel_tgt->family == sel_cmp->family && @@ -2778,14 +2779,14 @@ static int xfrm_migrate_selector_match(const struct xfrm_selector *sel_cmp, sel_cmp->family) == 0 && sel_tgt->prefixlen_d == sel_cmp->prefixlen_d && sel_tgt->prefixlen_s == sel_cmp->prefixlen_s) { - return 1; + return true; } } else { if (memcmp(sel_tgt, sel_cmp, sizeof(*sel_tgt)) == 0) { - return 1; + return true; } } - return 0; + return false; } static struct xfrm_policy * xfrm_migrate_policy_find(const struct xfrm_selector *sel, -- cgit v1.2.2 From 60eea6cf2964beea6c38d9050bc3823a93db97e0 Mon Sep 17 00:00:00 2001 From: Paul Gortmaker Date: Thu, 10 May 2012 16:17:00 -0400 Subject: atm: remove the coupling to token ring support The token ring support is going away, so decouple the atm support from it in advance. Signed-off-by: Paul Gortmaker --- net/atm/lec.c | 138 ++++------------------------------------------------------ net/atm/lec.h | 1 - 2 files changed, 8 insertions(+), 131 deletions(-) (limited to 'net') diff --git a/net/atm/lec.c b/net/atm/lec.c index bb35cb76b3af..a7d172105c99 100644 --- a/net/atm/lec.c +++ b/net/atm/lec.c @@ -26,11 +26,6 @@ #include #include -/* TokenRing if needed */ -#ifdef CONFIG_TR -#include -#endif - /* And atm device */ #include #include @@ -162,50 +157,6 @@ static void lec_handle_bridge(struct sk_buff *skb, struct net_device *dev) } #endif /* defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) */ -/* - * Modelled after tr_type_trans - * All multicast and ARE or STE frames go to BUS. - * Non source routed frames go by destination address. - * Last hop source routed frames go by destination address. - * Not last hop source routed frames go by _next_ route descriptor. - * Returns pointer to destination MAC address or fills in rdesc - * and returns NULL. - */ -#ifdef CONFIG_TR -static unsigned char *get_tr_dst(unsigned char *packet, unsigned char *rdesc) -{ - struct trh_hdr *trh; - unsigned int riflen, num_rdsc; - - trh = (struct trh_hdr *)packet; - if (trh->daddr[0] & (uint8_t) 0x80) - return bus_mac; /* multicast */ - - if (trh->saddr[0] & TR_RII) { - riflen = (ntohs(trh->rcf) & TR_RCF_LEN_MASK) >> 8; - if ((ntohs(trh->rcf) >> 13) != 0) - return bus_mac; /* ARE or STE */ - } else - return trh->daddr; /* not source routed */ - - if (riflen < 6) - return trh->daddr; /* last hop, source routed */ - - /* riflen is 6 or more, packet has more than one route descriptor */ - num_rdsc = (riflen / 2) - 1; - memset(rdesc, 0, ETH_ALEN); - /* offset 4 comes from LAN destination field in LE control frames */ - if (trh->rcf & htons((uint16_t) TR_RCF_DIR_BIT)) - memcpy(&rdesc[4], &trh->rseg[num_rdsc - 2], sizeof(__be16)); - else { - memcpy(&rdesc[4], &trh->rseg[1], sizeof(__be16)); - rdesc[5] = ((ntohs(trh->rseg[0]) & 0x000f) | (rdesc[5] & 0xf0)); - } - - return NULL; -} -#endif /* CONFIG_TR */ - /* * Open/initialize the netdevice. This is called (in the current kernel) * sometime after booting when the 'ifconfig' program is run. @@ -257,9 +208,6 @@ static netdev_tx_t lec_start_xmit(struct sk_buff *skb, struct lec_arp_table *entry; unsigned char *dst; int min_frame_size; -#ifdef CONFIG_TR - unsigned char rdesc[ETH_ALEN]; /* Token Ring route descriptor */ -#endif int is_rdesc; pr_debug("called\n"); @@ -290,24 +238,10 @@ static netdev_tx_t lec_start_xmit(struct sk_buff *skb, } skb_push(skb, 2); - /* Put le header to place, works for TokenRing too */ + /* Put le header to place */ lec_h = (struct lecdatahdr_8023 *)skb->data; lec_h->le_header = htons(priv->lecid); -#ifdef CONFIG_TR - /* - * Ugly. Use this to realign Token Ring packets for - * e.g. PCA-200E driver. - */ - if (priv->is_trdev) { - skb2 = skb_realloc_headroom(skb, LEC_HEADER_LEN); - kfree_skb(skb); - if (skb2 == NULL) - return NETDEV_TX_OK; - skb = skb2; - } -#endif - #if DUMP_PACKETS >= 2 #define MAX_DUMP_SKB 99 #elif DUMP_PACKETS >= 1 @@ -321,12 +255,7 @@ static netdev_tx_t lec_start_xmit(struct sk_buff *skb, #endif /* DUMP_PACKETS >= 1 */ /* Minimum ethernet-frame size */ -#ifdef CONFIG_TR - if (priv->is_trdev) - min_frame_size = LEC_MINIMUM_8025_SIZE; - else -#endif - min_frame_size = LEC_MINIMUM_8023_SIZE; + min_frame_size = LEC_MINIMUM_8023_SIZE; if (skb->len < min_frame_size) { if ((skb->len + skb_tailroom(skb)) < min_frame_size) { skb2 = skb_copy_expand(skb, 0, @@ -345,15 +274,6 @@ static netdev_tx_t lec_start_xmit(struct sk_buff *skb, /* Send to right vcc */ is_rdesc = 0; dst = lec_h->h_dest; -#ifdef CONFIG_TR - if (priv->is_trdev) { - dst = get_tr_dst(skb->data + 2, rdesc); - if (dst == NULL) { - dst = rdesc; - is_rdesc = 1; - } - } -#endif entry = NULL; vcc = lec_arp_resolve(priv, dst, is_rdesc, &entry); pr_debug("%s:vcc:%p vcc_flags:%lx, entry:%p\n", @@ -710,12 +630,7 @@ static void lec_push(struct atm_vcc *vcc, struct sk_buff *skb) dev_kfree_skb(skb); return; } -#ifdef CONFIG_TR - if (priv->is_trdev) - dst = ((struct lecdatahdr_8025 *)skb->data)->h_dest; - else -#endif - dst = ((struct lecdatahdr_8023 *)skb->data)->h_dest; + dst = ((struct lecdatahdr_8023 *)skb->data)->h_dest; /* * If this is a Data Direct VCC, and the VCC does not match @@ -723,16 +638,7 @@ static void lec_push(struct atm_vcc *vcc, struct sk_buff *skb) */ spin_lock_irqsave(&priv->lec_arp_lock, flags); if (lec_is_data_direct(vcc)) { -#ifdef CONFIG_TR - if (priv->is_trdev) - src = - ((struct lecdatahdr_8025 *)skb->data)-> - h_source; - else -#endif - src = - ((struct lecdatahdr_8023 *)skb->data)-> - h_source; + src = ((struct lecdatahdr_8023 *)skb->data)->h_source; entry = lec_arp_find(priv, src); if (entry && entry->vcc != vcc) { lec_arp_remove(priv, entry); @@ -750,12 +656,7 @@ static void lec_push(struct atm_vcc *vcc, struct sk_buff *skb) if (!hlist_empty(&priv->lec_arp_empty_ones)) lec_arp_check_empties(priv, vcc, skb); skb_pull(skb, 2); /* skip lec_id */ -#ifdef CONFIG_TR - if (priv->is_trdev) - skb->protocol = tr_type_trans(skb, dev); - else -#endif - skb->protocol = eth_type_trans(skb, dev); + skb->protocol = eth_type_trans(skb, dev); dev->stats.rx_packets++; dev->stats.rx_bytes += skb->len; memset(ATM_SKB(skb), 0, sizeof(struct atm_skb_data)); @@ -827,27 +728,13 @@ static int lecd_attach(struct atm_vcc *vcc, int arg) i = 0; else i = arg; -#ifdef CONFIG_TR if (arg >= MAX_LEC_ITF) return -EINVAL; -#else /* Reserve the top NUM_TR_DEVS for TR */ - if (arg >= (MAX_LEC_ITF - NUM_TR_DEVS)) - return -EINVAL; -#endif if (!dev_lec[i]) { - int is_trdev, size; - - is_trdev = 0; - if (i >= (MAX_LEC_ITF - NUM_TR_DEVS)) - is_trdev = 1; + int size; size = sizeof(struct lec_priv); -#ifdef CONFIG_TR - if (is_trdev) - dev_lec[i] = alloc_trdev(size); - else -#endif - dev_lec[i] = alloc_etherdev(size); + dev_lec[i] = alloc_etherdev(size); if (!dev_lec[i]) return -ENOMEM; dev_lec[i]->netdev_ops = &lec_netdev_ops; @@ -858,7 +745,6 @@ static int lecd_attach(struct atm_vcc *vcc, int arg) } priv = netdev_priv(dev_lec[i]); - priv->is_trdev = is_trdev; } else { priv = netdev_priv(dev_lec[i]); if (priv->lecd) @@ -2372,15 +2258,7 @@ lec_arp_check_empties(struct lec_priv *priv, struct hlist_node *node, *next; struct lec_arp_table *entry, *tmp; struct lecdatahdr_8023 *hdr = (struct lecdatahdr_8023 *)skb->data; - unsigned char *src; -#ifdef CONFIG_TR - struct lecdatahdr_8025 *tr_hdr = (struct lecdatahdr_8025 *)skb->data; - - if (priv->is_trdev) - src = tr_hdr->h_source; - else -#endif - src = hdr->h_source; + unsigned char *src = hdr->h_source; spin_lock_irqsave(&priv->lec_arp_lock, flags); hlist_for_each_entry_safe(entry, node, next, diff --git a/net/atm/lec.h b/net/atm/lec.h index dfc071966463..c730e57de199 100644 --- a/net/atm/lec.h +++ b/net/atm/lec.h @@ -142,7 +142,6 @@ struct lec_priv { int itfnum; /* e.g. 2 for lec2, 5 for lec5 */ struct lane2_ops *lane2_ops; /* can be NULL for LANE v1 */ int is_proxy; /* bridge between ATM and Ethernet */ - int is_trdev; /* Device type, 0 = Ethernet, 1 = TokenRing */ }; struct lec_vcc_priv { -- cgit v1.2.2 From 211ed865108e24697b44bee5daac502ee6bdd4a4 Mon Sep 17 00:00:00 2001 From: Paul Gortmaker Date: Thu, 10 May 2012 17:14:35 -0400 Subject: net: delete all instances of special processing for token ring We are going to delete the Token ring support. This removes any special processing in the core networking for token ring, (aside from net/tr.c itself), leaving the drivers and remaining tokenring support present but inert. The mass removal of the drivers and net/tr.c will be in a separate commit, so that the history of these files that we still care about won't have the giant deletion tied into their history. Signed-off-by: Paul Gortmaker --- net/802/p8022.c | 3 +-- net/802/tr.c | 1 - net/core/dev.c | 14 ++++++-------- net/ipv4/Kconfig | 4 ++-- net/ipv4/arp.c | 13 +------------ net/ipv4/ipconfig.c | 2 -- net/ipv6/addrconf.c | 2 -- net/ipv6/ndisc.c | 17 ----------------- net/ipx/af_ipx.c | 10 +--------- net/llc/af_llc.c | 3 +-- net/llc/llc_output.c | 3 --- net/llc/llc_sap.c | 4 ---- net/sysctl_net.c | 4 ---- 13 files changed, 12 insertions(+), 68 deletions(-) (limited to 'net') diff --git a/net/802/p8022.c b/net/802/p8022.c index 7f353c4f437a..0bda8de7df51 100644 --- a/net/802/p8022.c +++ b/net/802/p8022.c @@ -1,6 +1,5 @@ /* - * NET3: Support for 802.2 demultiplexing off Ethernet (Token ring - * is kept separate see p8022tr.c) + * NET3: Support for 802.2 demultiplexing off Ethernet * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version diff --git a/net/802/tr.c b/net/802/tr.c index 30a352ed09b1..175243b0d6d8 100644 --- a/net/802/tr.c +++ b/net/802/tr.c @@ -604,7 +604,6 @@ static void tr_setup(struct net_device *dev) dev->header_ops = &tr_header_ops; - dev->type = ARPHRD_IEEE802_TR; dev->hard_header_len = TR_HLEN; dev->mtu = 2000; dev->addr_len = TR_ALEN; diff --git a/net/core/dev.c b/net/core/dev.c index 3dd853998d38..66cae6e975d9 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -300,10 +300,9 @@ static const unsigned short netdev_lock_type[] = ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE, ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET, ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL, - ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211, - ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, - ARPHRD_PHONET_PIPE, ARPHRD_IEEE802154, - ARPHRD_VOID, ARPHRD_NONE}; + ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM, + ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE, + ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE}; static const char *const netdev_lock_name[] = {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25", @@ -318,10 +317,9 @@ static const char *const netdev_lock_name[] = "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE", "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET", "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL", - "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211", - "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", - "_xmit_PHONET_PIPE", "_xmit_IEEE802154", - "_xmit_VOID", "_xmit_NONE"}; + "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM", + "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE", + "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"}; static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)]; static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)]; diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig index 2c8febd3ebda..20f1cb5c8aba 100644 --- a/net/ipv4/Kconfig +++ b/net/ipv4/Kconfig @@ -262,8 +262,8 @@ config ARPD bool "IP: ARP daemon support" ---help--- The kernel maintains an internal cache which maps IP addresses to - hardware addresses on the local network, so that Ethernet/Token Ring/ - etc. frames are sent to the proper address on the physical networking + hardware addresses on the local network, so that Ethernet + frames are sent to the proper address on the physical networking layer. Normally, kernel uses the ARP protocol to resolve these mappings. diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c index 373b56bf8f49..5097571cc4b0 100644 --- a/net/ipv4/arp.c +++ b/net/ipv4/arp.c @@ -89,7 +89,6 @@ #include #include #include -#include #include #include #include @@ -193,9 +192,6 @@ int arp_mc_map(__be32 addr, u8 *haddr, struct net_device *dev, int dir) case ARPHRD_IEEE802: ip_eth_mc_map(addr, haddr); return 0; - case ARPHRD_IEEE802_TR: - ip_tr_mc_map(addr, haddr); - return 0; case ARPHRD_INFINIBAND: ip_ib_mc_map(addr, dev->broadcast, haddr); return 0; @@ -647,12 +643,6 @@ struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip, arp->ar_hrd = htons(ARPHRD_ETHER); arp->ar_pro = htons(ETH_P_IP); break; -#endif -#if IS_ENABLED(CONFIG_TR) - case ARPHRD_IEEE802_TR: - arp->ar_hrd = htons(ARPHRD_IEEE802); - arp->ar_pro = htons(ETH_P_IP); - break; #endif } @@ -751,11 +741,10 @@ static int arp_process(struct sk_buff *skb) goto out; break; case ARPHRD_ETHER: - case ARPHRD_IEEE802_TR: case ARPHRD_FDDI: case ARPHRD_IEEE802: /* - * ETHERNET, Token Ring and Fibre Channel (which are IEEE 802 + * ETHERNET, and Fibre Channel (which are IEEE 802 * devices, according to RFC 2625) devices will accept ARP * hardware types of either 1 (Ethernet) or 6 (IEEE 802.2). * This is the case also of FDDI, where the RFC 1390 says that diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c index 24a3df9890e1..430015010e57 100644 --- a/net/ipv4/ipconfig.c +++ b/net/ipv4/ipconfig.c @@ -808,8 +808,6 @@ static void __init ic_bootp_send_if(struct ic_device *d, unsigned long jiffies_d b->op = BOOTP_REQUEST; if (dev->type < 256) /* check for false types */ b->htype = dev->type; - else if (dev->type == ARPHRD_IEEE802_TR) /* fix for token ring */ - b->htype = ARPHRD_IEEE802; else if (dev->type == ARPHRD_FDDI) b->htype = ARPHRD_ETHER; else { diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 4d1d51a0bd26..f6b5b8ac3cf2 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -1575,7 +1575,6 @@ static int ipv6_generate_eui64(u8 *eui, struct net_device *dev) switch (dev->type) { case ARPHRD_ETHER: case ARPHRD_FDDI: - case ARPHRD_IEEE802_TR: return addrconf_ifid_eui48(eui, dev); case ARPHRD_ARCNET: return addrconf_ifid_arcnet(eui, dev); @@ -2444,7 +2443,6 @@ static void addrconf_dev_config(struct net_device *dev) if ((dev->type != ARPHRD_ETHER) && (dev->type != ARPHRD_FDDI) && - (dev->type != ARPHRD_IEEE802_TR) && (dev->type != ARPHRD_ARCNET) && (dev->type != ARPHRD_INFINIBAND) && (dev->type != ARPHRD_IEEE802154)) { diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index 35615c6358b8..56963f15bdc5 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c @@ -327,9 +327,6 @@ int ndisc_mc_map(const struct in6_addr *addr, char *buf, struct net_device *dev, case ARPHRD_FDDI: ipv6_eth_mc_map(addr, buf); return 0; - case ARPHRD_IEEE802_TR: - ipv6_tr_mc_map(addr,buf); - return 0; case ARPHRD_ARCNET: ipv6_arcnet_mc_map(addr, buf); return 0; @@ -795,20 +792,6 @@ static void ndisc_recv_ns(struct sk_buff *skb) if (ifp->flags & (IFA_F_TENTATIVE|IFA_F_OPTIMISTIC)) { if (dad) { - if (dev->type == ARPHRD_IEEE802_TR) { - const unsigned char *sadr; - sadr = skb_mac_header(skb); - if (((sadr[8] ^ dev->dev_addr[0]) & 0x7f) == 0 && - sadr[9] == dev->dev_addr[1] && - sadr[10] == dev->dev_addr[2] && - sadr[11] == dev->dev_addr[3] && - sadr[12] == dev->dev_addr[4] && - sadr[13] == dev->dev_addr[5]) { - /* looped-back to us */ - goto out; - } - } - /* * We are colliding with another node * who is doing DAD diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c index 9680226640ef..824d4a3338ae 100644 --- a/net/ipx/af_ipx.c +++ b/net/ipx/af_ipx.c @@ -983,10 +983,6 @@ static int ipxitf_create(struct ipx_interface_definition *idef) goto out; switch (idef->ipx_dlink_type) { - case IPX_FRAME_TR_8022: - printk(KERN_WARNING "IPX frame type 802.2TR is " - "obsolete Use 802.2 instead.\n"); - /* fall through */ case IPX_FRAME_8022: dlink_type = htons(ETH_P_802_2); datalink = p8022_datalink; @@ -996,10 +992,7 @@ static int ipxitf_create(struct ipx_interface_definition *idef) dlink_type = htons(ETH_P_IPX); datalink = pEII_datalink; break; - } else - printk(KERN_WARNING "IPX frame type EtherII over " - "token-ring is obsolete. Use SNAP " - "instead.\n"); + } /* fall through */ case IPX_FRAME_SNAP: dlink_type = htons(ETH_P_SNAP); @@ -1275,7 +1268,6 @@ const char *ipx_frame_name(__be16 frame) case ETH_P_802_2: rc = "802.2"; break; case ETH_P_SNAP: rc = "SNAP"; break; case ETH_P_802_3: rc = "802.3"; break; - case ETH_P_TR_802_2: rc = "802.2TR"; break; } return rc; diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c index 78424f41cf36..e9440753e16b 100644 --- a/net/llc/af_llc.c +++ b/net/llc/af_llc.c @@ -71,8 +71,7 @@ static inline u16 llc_ui_next_link_no(int sap) */ static inline __be16 llc_proto_type(u16 arphrd) { - return arphrd == ARPHRD_IEEE802_TR ? - htons(ETH_P_TR_802_2) : htons(ETH_P_802_2); + return htons(ETH_P_802_2); } /** diff --git a/net/llc/llc_output.c b/net/llc/llc_output.c index b658cba89fdd..2dae8a5df23f 100644 --- a/net/llc/llc_output.c +++ b/net/llc/llc_output.c @@ -14,9 +14,7 @@ */ #include -#include #include -#include #include #include #include @@ -37,7 +35,6 @@ int llc_mac_hdr_init(struct sk_buff *skb, int rc = -EINVAL; switch (skb->dev->type) { - case ARPHRD_IEEE802_TR: case ARPHRD_ETHER: case ARPHRD_LOOPBACK: rc = dev_hard_header(skb, skb->dev, ETH_P_802_2, da, sa, diff --git a/net/llc/llc_sap.c b/net/llc/llc_sap.c index 94e7fca75b85..7c5073badc73 100644 --- a/net/llc/llc_sap.c +++ b/net/llc/llc_sap.c @@ -31,10 +31,6 @@ static int llc_mac_header_len(unsigned short devtype) case ARPHRD_ETHER: case ARPHRD_LOOPBACK: return sizeof(struct ethhdr); -#if defined(CONFIG_TR) || defined(CONFIG_TR_MODULE) - case ARPHRD_IEEE802_TR: - return sizeof(struct trh_hdr); -#endif } return 0; } diff --git a/net/sysctl_net.c b/net/sysctl_net.c index f3e813a8d107..e3a6e37cd1c5 100644 --- a/net/sysctl_net.c +++ b/net/sysctl_net.c @@ -26,10 +26,6 @@ #include #endif -#ifdef CONFIG_TR -#include -#endif - static struct ctl_table_set * net_ctl_header_lookup(struct ctl_table_root *root, struct nsproxy *namespaces) { -- cgit v1.2.2 From ee446fd5e6dafee4a16fd1bd345d2571dcfd6f5d Mon Sep 17 00:00:00 2001 From: Paul Gortmaker Date: Wed, 9 May 2012 22:41:59 -0400 Subject: tokenring: delete all remaining driver support This represents the mass deletion of the of the tokenring support. It gets rid of: - the net/tr.c which the drivers depended on - the drivers/net component - the Kbuild infrastructure around it - any tokenring related CONFIG_ settings in any defconfigs - the tokenring headers in the include/linux dir - the firmware associated with the tokenring drivers. - any associated token ring documentation. Signed-off-by: Paul Gortmaker --- net/802/Makefile | 1 - net/802/tr.c | 669 ------------------------------------------------------- 2 files changed, 670 deletions(-) delete mode 100644 net/802/tr.c (limited to 'net') diff --git a/net/802/Makefile b/net/802/Makefile index 7893d679910c..a30d6e385aed 100644 --- a/net/802/Makefile +++ b/net/802/Makefile @@ -4,7 +4,6 @@ # Check the p8022 selections against net/core/Makefile. obj-$(CONFIG_LLC) += p8022.o psnap.o -obj-$(CONFIG_TR) += p8022.o psnap.o tr.o obj-$(CONFIG_NET_FC) += fc.o obj-$(CONFIG_FDDI) += fddi.o obj-$(CONFIG_HIPPI) += hippi.o diff --git a/net/802/tr.c b/net/802/tr.c deleted file mode 100644 index 175243b0d6d8..000000000000 --- a/net/802/tr.c +++ /dev/null @@ -1,669 +0,0 @@ -/* - * NET3: Token ring device handling subroutines - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - * - * Fixes: 3 Feb 97 Paul Norton Minor routing fixes. - * Added rif table to /proc/net/tr_rif and rif timeout to - * /proc/sys/net/token-ring/rif_timeout. - * 22 Jun 98 Paul Norton Rearranged - * tr_header and tr_type_trans to handle passing IPX SNAP and - * 802.2 through the correct layers. Eliminated tr_reformat. - * - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -static void tr_add_rif_info(struct trh_hdr *trh, struct net_device *dev); -static void rif_check_expire(unsigned long dummy); - -#define TR_SR_DEBUG 0 - -/* - * Each RIF entry we learn is kept this way - */ - -struct rif_cache { - unsigned char addr[TR_ALEN]; - int iface; - __be16 rcf; - __be16 rseg[8]; - struct rif_cache *next; - unsigned long last_used; - unsigned char local_ring; -}; - -#define RIF_TABLE_SIZE 32 - -/* - * We hash the RIF cache 32 ways. We do after all have to look it - * up a lot. - */ - -static struct rif_cache *rif_table[RIF_TABLE_SIZE]; - -static DEFINE_SPINLOCK(rif_lock); - - -/* - * Garbage disposal timer. - */ - -static struct timer_list rif_timer; - -static int sysctl_tr_rif_timeout = 60*10*HZ; - -static inline unsigned long rif_hash(const unsigned char *addr) -{ - unsigned long x; - - x = addr[0]; - x = (x << 2) ^ addr[1]; - x = (x << 2) ^ addr[2]; - x = (x << 2) ^ addr[3]; - x = (x << 2) ^ addr[4]; - x = (x << 2) ^ addr[5]; - - x ^= x >> 8; - - return x & (RIF_TABLE_SIZE - 1); -} - -/* - * Put the headers on a token ring packet. Token ring source routing - * makes this a little more exciting than on ethernet. - */ - -static int tr_header(struct sk_buff *skb, struct net_device *dev, - unsigned short type, - const void *daddr, const void *saddr, unsigned int len) -{ - struct trh_hdr *trh; - int hdr_len; - - /* - * Add the 802.2 SNAP header if IP as the IPv4/IPv6 code calls - * dev->hard_header directly. - */ - if (type == ETH_P_IP || type == ETH_P_IPV6 || type == ETH_P_ARP) - { - struct trllc *trllc; - - hdr_len = sizeof(struct trh_hdr) + sizeof(struct trllc); - trh = (struct trh_hdr *)skb_push(skb, hdr_len); - trllc = (struct trllc *)(trh+1); - trllc->dsap = trllc->ssap = EXTENDED_SAP; - trllc->llc = UI_CMD; - trllc->protid[0] = trllc->protid[1] = trllc->protid[2] = 0x00; - trllc->ethertype = htons(type); - } - else - { - hdr_len = sizeof(struct trh_hdr); - trh = (struct trh_hdr *)skb_push(skb, hdr_len); - } - - trh->ac=AC; - trh->fc=LLC_FRAME; - - if(saddr) - memcpy(trh->saddr,saddr,dev->addr_len); - else - memcpy(trh->saddr,dev->dev_addr,dev->addr_len); - - /* - * Build the destination and then source route the frame - */ - - if(daddr) - { - memcpy(trh->daddr,daddr,dev->addr_len); - tr_source_route(skb, trh, dev); - return hdr_len; - } - - return -hdr_len; -} - -/* - * A neighbour discovery of some species (eg arp) has completed. We - * can now send the packet. - */ - -static int tr_rebuild_header(struct sk_buff *skb) -{ - struct trh_hdr *trh=(struct trh_hdr *)skb->data; - struct trllc *trllc=(struct trllc *)(skb->data+sizeof(struct trh_hdr)); - struct net_device *dev = skb->dev; - - /* - * FIXME: We don't yet support IPv6 over token rings - */ - - if(trllc->ethertype != htons(ETH_P_IP)) { - printk("tr_rebuild_header: Don't know how to resolve type %04X addresses ?\n", ntohs(trllc->ethertype)); - return 0; - } - -#ifdef CONFIG_INET - if(arp_find(trh->daddr, skb)) { - return 1; - } - else -#endif - { - tr_source_route(skb,trh,dev); - return 0; - } -} - -/* - * Some of this is a bit hackish. We intercept RIF information - * used for source routing. We also grab IP directly and don't feed - * it via SNAP. - */ - -__be16 tr_type_trans(struct sk_buff *skb, struct net_device *dev) -{ - - struct trh_hdr *trh; - struct trllc *trllc; - unsigned int riflen=0; - - skb->dev = dev; - skb_reset_mac_header(skb); - trh = tr_hdr(skb); - - if(trh->saddr[0] & TR_RII) - riflen = (ntohs(trh->rcf) & TR_RCF_LEN_MASK) >> 8; - - trllc = (struct trllc *)(skb->data+sizeof(struct trh_hdr)-TR_MAXRIFLEN+riflen); - - skb_pull(skb,sizeof(struct trh_hdr)-TR_MAXRIFLEN+riflen); - - if(*trh->daddr & 0x80) - { - if(!memcmp(trh->daddr,dev->broadcast,TR_ALEN)) - skb->pkt_type=PACKET_BROADCAST; - else - skb->pkt_type=PACKET_MULTICAST; - } - else if ( (trh->daddr[0] & 0x01) && (trh->daddr[1] & 0x00) && (trh->daddr[2] & 0x5E)) - { - skb->pkt_type=PACKET_MULTICAST; - } - else if(dev->flags & IFF_PROMISC) - { - if(memcmp(trh->daddr, dev->dev_addr, TR_ALEN)) - skb->pkt_type=PACKET_OTHERHOST; - } - - if ((skb->pkt_type != PACKET_BROADCAST) && - (skb->pkt_type != PACKET_MULTICAST)) - tr_add_rif_info(trh,dev) ; - - /* - * Strip the SNAP header from ARP packets since we don't - * pass them through to the 802.2/SNAP layers. - */ - - if (trllc->dsap == EXTENDED_SAP && - (trllc->ethertype == htons(ETH_P_IP) || - trllc->ethertype == htons(ETH_P_IPV6) || - trllc->ethertype == htons(ETH_P_ARP))) - { - skb_pull(skb, sizeof(struct trllc)); - return trllc->ethertype; - } - - return htons(ETH_P_TR_802_2); -} - -/* - * We try to do source routing... - */ - -void tr_source_route(struct sk_buff *skb,struct trh_hdr *trh, - struct net_device *dev) -{ - int slack; - unsigned int hash; - struct rif_cache *entry; - unsigned char *olddata; - unsigned long flags; - static const unsigned char mcast_func_addr[] - = {0xC0,0x00,0x00,0x04,0x00,0x00}; - - spin_lock_irqsave(&rif_lock, flags); - - /* - * Broadcasts are single route as stated in RFC 1042 - */ - if( (!memcmp(&(trh->daddr[0]),&(dev->broadcast[0]),TR_ALEN)) || - (!memcmp(&(trh->daddr[0]),&(mcast_func_addr[0]), TR_ALEN)) ) - { - trh->rcf=htons((((sizeof(trh->rcf)) << 8) & TR_RCF_LEN_MASK) - | TR_RCF_FRAME2K | TR_RCF_LIMITED_BROADCAST); - trh->saddr[0]|=TR_RII; - } - else - { - hash = rif_hash(trh->daddr); - /* - * Walk the hash table and look for an entry - */ - for(entry=rif_table[hash];entry && memcmp(&(entry->addr[0]),&(trh->daddr[0]),TR_ALEN);entry=entry->next); - - /* - * If we found an entry we can route the frame. - */ - if(entry) - { -#if TR_SR_DEBUG -printk("source routing for %pM\n", trh->daddr); -#endif - if(!entry->local_ring && (ntohs(entry->rcf) & TR_RCF_LEN_MASK) >> 8) - { - trh->rcf=entry->rcf; - memcpy(&trh->rseg[0],&entry->rseg[0],8*sizeof(unsigned short)); - trh->rcf^=htons(TR_RCF_DIR_BIT); - trh->rcf&=htons(0x1fff); /* Issam Chehab */ - - trh->saddr[0]|=TR_RII; -#if TR_SR_DEBUG - printk("entry found with rcf %04x\n", entry->rcf); - } - else - { - printk("entry found but without rcf length, local=%02x\n", entry->local_ring); -#endif - } - entry->last_used=jiffies; - } - else - { - /* - * Without the information we simply have to shout - * on the wire. The replies should rapidly clean this - * situation up. - */ - trh->rcf=htons((((sizeof(trh->rcf)) << 8) & TR_RCF_LEN_MASK) - | TR_RCF_FRAME2K | TR_RCF_LIMITED_BROADCAST); - trh->saddr[0]|=TR_RII; -#if TR_SR_DEBUG - printk("no entry in rif table found - broadcasting frame\n"); -#endif - } - } - - /* Compress the RIF here so we don't have to do it in the driver(s) */ - if (!(trh->saddr[0] & 0x80)) - slack = 18; - else - slack = 18 - ((ntohs(trh->rcf) & TR_RCF_LEN_MASK)>>8); - olddata = skb->data; - spin_unlock_irqrestore(&rif_lock, flags); - - skb_pull(skb, slack); - memmove(skb->data, olddata, sizeof(struct trh_hdr) - slack); -} - -/* - * We have learned some new RIF information for our source - * routing. - */ - -static void tr_add_rif_info(struct trh_hdr *trh, struct net_device *dev) -{ - unsigned int hash, rii_p = 0; - unsigned long flags; - struct rif_cache *entry; - unsigned char saddr0; - - spin_lock_irqsave(&rif_lock, flags); - saddr0 = trh->saddr[0]; - - /* - * Firstly see if the entry exists - */ - - if(trh->saddr[0] & TR_RII) - { - trh->saddr[0]&=0x7f; - if (((ntohs(trh->rcf) & TR_RCF_LEN_MASK) >> 8) > 2) - { - rii_p = 1; - } - } - - hash = rif_hash(trh->saddr); - for(entry=rif_table[hash];entry && memcmp(&(entry->addr[0]),&(trh->saddr[0]),TR_ALEN);entry=entry->next); - - if(entry==NULL) - { -#if TR_SR_DEBUG - printk("adding rif_entry: addr:%pM rcf:%04X\n", - trh->saddr, ntohs(trh->rcf)); -#endif - /* - * Allocate our new entry. A failure to allocate loses - * use the information. This is harmless. - * - * FIXME: We ought to keep some kind of cache size - * limiting and adjust the timers to suit. - */ - entry=kmalloc(sizeof(struct rif_cache),GFP_ATOMIC); - - if(!entry) - { - printk(KERN_DEBUG "tr.c: Couldn't malloc rif cache entry !\n"); - spin_unlock_irqrestore(&rif_lock, flags); - return; - } - - memcpy(&(entry->addr[0]),&(trh->saddr[0]),TR_ALEN); - entry->iface = dev->ifindex; - entry->next=rif_table[hash]; - entry->last_used=jiffies; - rif_table[hash]=entry; - - if (rii_p) - { - entry->rcf = trh->rcf & htons((unsigned short)~TR_RCF_BROADCAST_MASK); - memcpy(&(entry->rseg[0]),&(trh->rseg[0]),8*sizeof(unsigned short)); - entry->local_ring = 0; - } - else - { - entry->local_ring = 1; - } - } - else /* Y. Tahara added */ - { - /* - * Update existing entries - */ - if (!entry->local_ring) - if (entry->rcf != (trh->rcf & htons((unsigned short)~TR_RCF_BROADCAST_MASK)) && - !(trh->rcf & htons(TR_RCF_BROADCAST_MASK))) - { -#if TR_SR_DEBUG -printk("updating rif_entry: addr:%pM rcf:%04X\n", - trh->saddr, ntohs(trh->rcf)); -#endif - entry->rcf = trh->rcf & htons((unsigned short)~TR_RCF_BROADCAST_MASK); - memcpy(&(entry->rseg[0]),&(trh->rseg[0]),8*sizeof(unsigned short)); - } - entry->last_used=jiffies; - } - trh->saddr[0]=saddr0; /* put the routing indicator back for tcpdump */ - spin_unlock_irqrestore(&rif_lock, flags); -} - -/* - * Scan the cache with a timer and see what we need to throw out. - */ - -static void rif_check_expire(unsigned long dummy) -{ - int i; - unsigned long flags, next_interval = jiffies + sysctl_tr_rif_timeout/2; - - spin_lock_irqsave(&rif_lock, flags); - - for(i =0; i < RIF_TABLE_SIZE; i++) { - struct rif_cache *entry, **pentry; - - pentry = rif_table+i; - while((entry=*pentry) != NULL) { - unsigned long expires - = entry->last_used + sysctl_tr_rif_timeout; - - if (time_before_eq(expires, jiffies)) { - *pentry = entry->next; - kfree(entry); - } else { - pentry = &entry->next; - - if (time_before(expires, next_interval)) - next_interval = expires; - } - } - } - - spin_unlock_irqrestore(&rif_lock, flags); - - mod_timer(&rif_timer, next_interval); - -} - -/* - * Generate the /proc/net information for the token ring RIF - * routing. - */ - -#ifdef CONFIG_PROC_FS - -static struct rif_cache *rif_get_idx(loff_t pos) -{ - int i; - struct rif_cache *entry; - loff_t off = 0; - - for(i = 0; i < RIF_TABLE_SIZE; i++) - for(entry = rif_table[i]; entry; entry = entry->next) { - if (off == pos) - return entry; - ++off; - } - - return NULL; -} - -static void *rif_seq_start(struct seq_file *seq, loff_t *pos) - __acquires(&rif_lock) -{ - spin_lock_irq(&rif_lock); - - return *pos ? rif_get_idx(*pos - 1) : SEQ_START_TOKEN; -} - -static void *rif_seq_next(struct seq_file *seq, void *v, loff_t *pos) -{ - int i; - struct rif_cache *ent = v; - - ++*pos; - - if (v == SEQ_START_TOKEN) { - i = -1; - goto scan; - } - - if (ent->next) - return ent->next; - - i = rif_hash(ent->addr); - scan: - while (++i < RIF_TABLE_SIZE) { - if ((ent = rif_table[i]) != NULL) - return ent; - } - return NULL; -} - -static void rif_seq_stop(struct seq_file *seq, void *v) - __releases(&rif_lock) -{ - spin_unlock_irq(&rif_lock); -} - -static int rif_seq_show(struct seq_file *seq, void *v) -{ - int j, rcf_len, segment, brdgnmb; - struct rif_cache *entry = v; - - if (v == SEQ_START_TOKEN) - seq_puts(seq, - "if TR address TTL rcf routing segments\n"); - else { - struct net_device *dev = dev_get_by_index(&init_net, entry->iface); - long ttl = (long) (entry->last_used + sysctl_tr_rif_timeout) - - (long) jiffies; - - seq_printf(seq, "%s %pM %7li ", - dev?dev->name:"?", - entry->addr, - ttl/HZ); - - if (entry->local_ring) - seq_puts(seq, "local\n"); - else { - - seq_printf(seq, "%04X", ntohs(entry->rcf)); - rcf_len = ((ntohs(entry->rcf) & TR_RCF_LEN_MASK)>>8)-2; - if (rcf_len) - rcf_len >>= 1; - for(j = 1; j < rcf_len; j++) { - if(j==1) { - segment=ntohs(entry->rseg[j-1])>>4; - seq_printf(seq," %03X",segment); - } - - segment=ntohs(entry->rseg[j])>>4; - brdgnmb=ntohs(entry->rseg[j-1])&0x00f; - seq_printf(seq,"-%01X-%03X",brdgnmb,segment); - } - seq_putc(seq, '\n'); - } - - if (dev) - dev_put(dev); - } - return 0; -} - - -static const struct seq_operations rif_seq_ops = { - .start = rif_seq_start, - .next = rif_seq_next, - .stop = rif_seq_stop, - .show = rif_seq_show, -}; - -static int rif_seq_open(struct inode *inode, struct file *file) -{ - return seq_open(file, &rif_seq_ops); -} - -static const struct file_operations rif_seq_fops = { - .owner = THIS_MODULE, - .open = rif_seq_open, - .read = seq_read, - .llseek = seq_lseek, - .release = seq_release, -}; - -#endif - -static const struct header_ops tr_header_ops = { - .create = tr_header, - .rebuild= tr_rebuild_header, -}; - -static void tr_setup(struct net_device *dev) -{ - /* - * Configure and register - */ - - dev->header_ops = &tr_header_ops; - - dev->hard_header_len = TR_HLEN; - dev->mtu = 2000; - dev->addr_len = TR_ALEN; - dev->tx_queue_len = 100; /* Long queues on tr */ - - memset(dev->broadcast,0xFF, TR_ALEN); - - /* New-style flags. */ - dev->flags = IFF_BROADCAST | IFF_MULTICAST ; -} - -/** - * alloc_trdev - Register token ring device - * @sizeof_priv: Size of additional driver-private structure to be allocated - * for this token ring device - * - * Fill in the fields of the device structure with token ring-generic values. - * - * Constructs a new net device, complete with a private data area of - * size @sizeof_priv. A 32-byte (not bit) alignment is enforced for - * this private data area. - */ -struct net_device *alloc_trdev(int sizeof_priv) -{ - return alloc_netdev(sizeof_priv, "tr%d", tr_setup); -} - -#ifdef CONFIG_SYSCTL -static struct ctl_table tr_table[] = { - { - .procname = "rif_timeout", - .data = &sysctl_tr_rif_timeout, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec - }, - { }, -}; -#endif - -/* - * Called during bootup. We don't actually have to initialise - * too much for this. - */ - -static int __init rif_init(void) -{ - rif_timer.expires = jiffies + sysctl_tr_rif_timeout; - setup_timer(&rif_timer, rif_check_expire, 0); - add_timer(&rif_timer); -#ifdef CONFIG_SYSCTL - register_net_sysctl(&init_net, "net/token-ring", tr_table); -#endif - proc_net_fops_create(&init_net, "tr_rif", S_IRUGO, &rif_seq_fops); - return 0; -} - -module_init(rif_init); - -EXPORT_SYMBOL(tr_type_trans); -EXPORT_SYMBOL(alloc_trdev); - -MODULE_LICENSE("GPL"); -- cgit v1.2.2 From f32138319ca6541e65f95f8e17c9cc88ac1baf94 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Tue, 15 May 2012 14:11:53 +0000 Subject: net: ipv6: Standardize prefixes for message logging Add #define pr_fmt(fmt) as appropriate. Add "IPv6: " to appropriate files. Convert printk(KERN_ to pr_ (but not KERN_DEBUG). Standardize on "%s: " not "%s(): " when emitting __func__. Use "%s: ", __func__ instead of embedding function name. Coalesce formats, align arguments. ADDRCONF output is now prefixed with "IPv6: " Signed-off-by: Joe Perches Signed-off-by: David S. Miller --- net/ipv6/addrconf.c | 53 ++++++++++++++++++++++----------------------------- net/ipv6/addrlabel.c | 2 +- net/ipv6/af_inet6.c | 14 +++++--------- net/ipv6/ah6.c | 16 +++++++++------- net/ipv6/esp6.c | 10 ++++++---- net/ipv6/icmp.c | 8 ++++---- net/ipv6/ip6_fib.c | 20 +++++++++---------- net/ipv6/ip6_tunnel.c | 21 ++++++++++---------- net/ipv6/ip6mr.c | 2 +- net/ipv6/ipcomp6.c | 11 +++++++---- net/ipv6/mcast.c | 3 +-- net/ipv6/mip6.c | 30 ++++++++++++++--------------- net/ipv6/ndisc.c | 22 +++++++++------------ net/ipv6/route.c | 8 +++++--- net/ipv6/sit.c | 6 ++++-- net/ipv6/tunnel6.c | 10 ++++++---- 16 files changed, 116 insertions(+), 120 deletions(-) (limited to 'net') diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 4d1d51a0bd26..707989068555 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -38,6 +38,8 @@ * status etc. */ +#define pr_fmt(fmt) "IPv6: " fmt + #include #include #include @@ -327,11 +329,11 @@ void in6_dev_finish_destroy(struct inet6_dev *idev) WARN_ON(idev->mc_list != NULL); #ifdef NET_REFCNT_DEBUG - printk(KERN_DEBUG "in6_dev_finish_destroy: %s\n", dev ? dev->name : "NIL"); + printk(KERN_DEBUG "%s: %s\n", __func__, dev ? dev->name : "NIL"); #endif dev_put(dev); if (!idev->dead) { - pr_warning("Freeing alive inet6 device %p\n", idev); + pr_warn("Freeing alive inet6 device %p\n", idev); return; } snmp6_free_dev(idev); @@ -372,7 +374,7 @@ static struct inet6_dev *ipv6_add_dev(struct net_device *dev) if (snmp6_alloc_dev(ndev) < 0) { ADBG((KERN_WARNING - "%s(): cannot allocate memory for statistics; dev=%s.\n", + "%s: cannot allocate memory for statistics; dev=%s.\n", __func__, dev->name)); neigh_parms_release(&nd_tbl, ndev->nd_parms); dev_put(dev); @@ -382,7 +384,7 @@ static struct inet6_dev *ipv6_add_dev(struct net_device *dev) if (snmp6_register_dev(ndev) < 0) { ADBG((KERN_WARNING - "%s(): cannot create /proc/net/dev_snmp6/%s\n", + "%s: cannot create /proc/net/dev_snmp6/%s\n", __func__, dev->name)); neigh_parms_release(&nd_tbl, ndev->nd_parms); ndev->dead = 1; @@ -400,9 +402,7 @@ static struct inet6_dev *ipv6_add_dev(struct net_device *dev) #if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE) if (dev->type == ARPHRD_SIT && (dev->priv_flags & IFF_ISATAP)) { - printk(KERN_INFO - "%s: Disabled Multicast RS\n", - dev->name); + pr_info("%s: Disabled Multicast RS\n", dev->name); ndev->cnf.rtr_solicits = 0; } #endif @@ -551,7 +551,7 @@ void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp) pr_notice("Timer is still running, when freeing ifa=%p\n", ifp); if (ifp->state != INET6_IFADDR_STATE_DEAD) { - pr_warning("Freeing alive inet6 address %p\n", ifp); + pr_warn("Freeing alive inet6 address %p\n", ifp); return; } dst_release(&ifp->rt->dst); @@ -841,8 +841,7 @@ retry: in6_dev_hold(idev); if (idev->cnf.use_tempaddr <= 0) { write_unlock(&idev->lock); - printk(KERN_INFO - "ipv6_create_tempaddr(): use_tempaddr is disabled.\n"); + pr_info("%s: use_tempaddr is disabled\n", __func__); in6_dev_put(idev); ret = -1; goto out; @@ -852,8 +851,8 @@ retry: idev->cnf.use_tempaddr = -1; /*XXX*/ spin_unlock_bh(&ifp->lock); write_unlock(&idev->lock); - printk(KERN_WARNING - "ipv6_create_tempaddr(): regeneration time exceeded. disabled temporary address support.\n"); + pr_warn("%s: regeneration time exceeded - disabled temporary address support\n", + __func__); in6_dev_put(idev); ret = -1; goto out; @@ -863,8 +862,8 @@ retry: if (__ipv6_try_regen_rndid(idev, tmpaddr) < 0) { spin_unlock_bh(&ifp->lock); write_unlock(&idev->lock); - printk(KERN_WARNING - "ipv6_create_tempaddr(): regeneration of randomized interface id failed.\n"); + pr_warn("%s: regeneration of randomized interface id failed\n", + __func__); in6_ifa_put(ifp); in6_dev_put(idev); ret = -1; @@ -914,8 +913,7 @@ retry: if (!ift || IS_ERR(ift)) { in6_ifa_put(ifp); in6_dev_put(idev); - printk(KERN_INFO - "ipv6_create_tempaddr(): retry temporary address regeneration.\n"); + pr_info("%s: retry temporary address regeneration\n", __func__); tmpaddr = &addr; write_lock(&idev->lock); goto retry; @@ -1429,7 +1427,7 @@ void addrconf_dad_failure(struct inet6_ifaddr *ifp) /* DAD failed for link-local based on MAC address */ idev->cnf.disable_ipv6 = 1; - printk(KERN_INFO "%s: IPv6 being disabled!\n", + pr_info("%s: IPv6 being disabled!\n", ifp->idev->dev->name); } } @@ -1660,9 +1658,8 @@ static void ipv6_regen_rndid(unsigned long data) idev->cnf.regen_max_retry * idev->cnf.dad_transmits * idev->nd_parms->retrans_time - idev->cnf.max_desync_factor * HZ; if (time_before(expires, jiffies)) { - printk(KERN_WARNING - "ipv6_regen_rndid(): too short regeneration interval; timer disabled for %s.\n", - idev->dev->name); + pr_warn("%s: too short regeneration interval; timer disabled for %s\n", + __func__, idev->dev->name); goto out; } @@ -2507,7 +2504,7 @@ static void addrconf_gre_config(struct net_device *dev) struct inet6_dev *idev; struct in6_addr addr; - pr_info("ipv6: addrconf_gre_config(%s)\n", dev->name); + pr_info("%s(%s)\n", __func__, dev->name); ASSERT_RTNL(); @@ -2599,9 +2596,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event, if (event == NETDEV_UP) { if (!addrconf_qdisc_ok(dev)) { /* device is not ready yet. */ - printk(KERN_INFO - "ADDRCONF(NETDEV_UP): %s: " - "link is not ready\n", + pr_info("ADDRCONF(NETDEV_UP): %s: link is not ready\n", dev->name); break; } @@ -2626,10 +2621,8 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event, idev->if_flags |= IF_READY; } - printk(KERN_INFO - "ADDRCONF(NETDEV_CHANGE): %s: " - "link becomes ready\n", - dev->name); + pr_info("ADDRCONF(NETDEV_CHANGE): %s: link becomes ready\n", + dev->name); run_pending = 1; } @@ -4757,8 +4750,8 @@ int __init addrconf_init(void) err = ipv6_addr_label_init(); if (err < 0) { - printk(KERN_CRIT "IPv6 Addrconf:" - " cannot initialize default policy table: %d.\n", err); + pr_crit("%s: cannot initialize default policy table: %d\n", + __func__, err); goto out; } diff --git a/net/ipv6/addrlabel.c b/net/ipv6/addrlabel.c index 2d8ddba9ee58..95aea16b8b6f 100644 --- a/net/ipv6/addrlabel.c +++ b/net/ipv6/addrlabel.c @@ -350,7 +350,7 @@ static int __net_init ip6addrlbl_net_init(struct net *net) int err = 0; int i; - ADDRLABEL(KERN_DEBUG "%s()\n", __func__); + ADDRLABEL(KERN_DEBUG "%s\n", __func__); for (i = 0; i < ARRAY_SIZE(ip6addrlbl_init_table); i++) { int ret = ip6addrlbl_add(net, diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index bf8e14659e2d..138d4986c327 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c @@ -18,6 +18,7 @@ * 2 of the License, or (at your option) any later version. */ +#define pr_fmt(fmt) "IPv6: " fmt #include #include @@ -612,13 +613,11 @@ out: return ret; out_permanent: - printk(KERN_ERR "Attempt to override permanent protocol %d.\n", - protocol); + pr_err("Attempt to override permanent protocol %d\n", protocol); goto out; out_illegal: - printk(KERN_ERR - "Ignoring attempt to register invalid socket type %d.\n", + pr_err("Ignoring attempt to register invalid socket type %d\n", p->type); goto out; } @@ -628,8 +627,7 @@ void inet6_unregister_protosw(struct inet_protosw *p) { if (INET_PROTOSW_PERMANENT & p->flags) { - printk(KERN_ERR - "Attempt to unregister permanent protocol %d.\n", + pr_err("Attempt to unregister permanent protocol %d\n", p->protocol); } else { spin_lock_bh(&inetsw6_lock); @@ -1067,9 +1065,7 @@ static int __init inet6_init(void) INIT_LIST_HEAD(r); if (disable_ipv6_mod) { - printk(KERN_INFO - "IPv6: Loaded, but administratively disabled, " - "reboot required to enable\n"); + pr_info("Loaded, but administratively disabled, reboot required to enable\n"); goto out; } diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c index a9f4156f7c3f..9aa3d010ac5d 100644 --- a/net/ipv6/ah6.c +++ b/net/ipv6/ah6.c @@ -24,6 +24,8 @@ * This file is derived from net/ipv4/ah.c. */ +#define pr_fmt(fmt) "IPv6: " fmt + #include #include #include @@ -659,9 +661,9 @@ static int ah6_init_state(struct xfrm_state *x) if (aalg_desc->uinfo.auth.icv_fullbits/8 != crypto_ahash_digestsize(ahash)) { - printk(KERN_INFO "AH: %s digestsize %u != %hu\n", - x->aalg->alg_name, crypto_ahash_digestsize(ahash), - aalg_desc->uinfo.auth.icv_fullbits/8); + pr_info("AH: %s digestsize %u != %hu\n", + x->aalg->alg_name, crypto_ahash_digestsize(ahash), + aalg_desc->uinfo.auth.icv_fullbits/8); goto error; } @@ -727,12 +729,12 @@ static const struct inet6_protocol ah6_protocol = { static int __init ah6_init(void) { if (xfrm_register_type(&ah6_type, AF_INET6) < 0) { - printk(KERN_INFO "ipv6 ah init: can't add xfrm type\n"); + pr_info("%s: can't add xfrm type\n", __func__); return -EAGAIN; } if (inet6_add_protocol(&ah6_protocol, IPPROTO_AH) < 0) { - printk(KERN_INFO "ipv6 ah init: can't add protocol\n"); + pr_info("%s: can't add protocol\n", __func__); xfrm_unregister_type(&ah6_type, AF_INET6); return -EAGAIN; } @@ -743,10 +745,10 @@ static int __init ah6_init(void) static void __exit ah6_fini(void) { if (inet6_del_protocol(&ah6_protocol, IPPROTO_AH) < 0) - printk(KERN_INFO "ipv6 ah close: can't remove protocol\n"); + pr_info("%s: can't remove protocol\n", __func__); if (xfrm_unregister_type(&ah6_type, AF_INET6) < 0) - printk(KERN_INFO "ipv6 ah close: can't remove xfrm type\n"); + pr_info("%s: can't remove xfrm type\n", __func__); } diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c index 1ac7938dd9ec..6697eb0fba55 100644 --- a/net/ipv6/esp6.c +++ b/net/ipv6/esp6.c @@ -24,6 +24,8 @@ * This file is derived from net/ipv4/esp.c */ +#define pr_fmt(fmt) "IPv6: " fmt + #include #include #include @@ -651,11 +653,11 @@ static const struct inet6_protocol esp6_protocol = { static int __init esp6_init(void) { if (xfrm_register_type(&esp6_type, AF_INET6) < 0) { - printk(KERN_INFO "ipv6 esp init: can't add xfrm type\n"); + pr_info("%s: can't add xfrm type\n", __func__); return -EAGAIN; } if (inet6_add_protocol(&esp6_protocol, IPPROTO_ESP) < 0) { - printk(KERN_INFO "ipv6 esp init: can't add protocol\n"); + pr_info("%s: can't add protocol\n", __func__); xfrm_unregister_type(&esp6_type, AF_INET6); return -EAGAIN; } @@ -666,9 +668,9 @@ static int __init esp6_init(void) static void __exit esp6_fini(void) { if (inet6_del_protocol(&esp6_protocol, IPPROTO_ESP) < 0) - printk(KERN_INFO "ipv6 esp close: can't remove protocol\n"); + pr_info("%s: can't remove protocol\n", __func__); if (xfrm_unregister_type(&esp6_type, AF_INET6) < 0) - printk(KERN_INFO "ipv6 esp close: can't remove xfrm type\n"); + pr_info("%s: can't remove xfrm type\n", __func__); } module_init(esp6_init); diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index cc079d8d4681..23c56ce9e86b 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c @@ -29,6 +29,8 @@ * Kazunori MIYAZAWA @USAGI: change output process to use ip6_append_data */ +#define pr_fmt(fmt) "IPv6: " fmt + #include #include #include @@ -820,9 +822,7 @@ static int __net_init icmpv6_sk_init(struct net *net) err = inet_ctl_sock_create(&sk, PF_INET6, SOCK_RAW, IPPROTO_ICMPV6, net); if (err < 0) { - printk(KERN_ERR - "Failed to initialize the ICMP6 control socket " - "(err %d).\n", + pr_err("Failed to initialize the ICMP6 control socket (err %d)\n", err); goto fail; } @@ -881,7 +881,7 @@ int __init icmpv6_init(void) return 0; fail: - printk(KERN_ERR "Failed to register ICMP6 protocol\n"); + pr_err("Failed to register ICMP6 protocol\n"); unregister_pernet_subsys(&icmpv6_sk_ops); return err; } diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index 93717435013e..e9846da77424 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c @@ -18,6 +18,9 @@ * routing table. * Ville Nuorvala: Fixed routing subtrees. */ + +#define pr_fmt(fmt) "IPv6: " fmt + #include #include #include @@ -451,12 +454,10 @@ static struct fib6_node * fib6_add_1(struct fib6_node *root, void *addr, !ipv6_prefix_equal(&key->addr, addr, fn->fn_bit)) { if (!allow_create) { if (replace_required) { - pr_warn("IPv6: Can't replace route, " - "no match found\n"); + pr_warn("Can't replace route, no match found\n"); return ERR_PTR(-ENOENT); } - pr_warn("IPv6: NLM_F_CREATE should be set " - "when creating new route\n"); + pr_warn("NLM_F_CREATE should be set when creating new route\n"); } goto insert_above; } @@ -499,11 +500,10 @@ static struct fib6_node * fib6_add_1(struct fib6_node *root, void *addr, * That would keep IPv6 consistent with IPv4 */ if (replace_required) { - pr_warn("IPv6: Can't replace route, no match found\n"); + pr_warn("Can't replace route, no match found\n"); return ERR_PTR(-ENOENT); } - pr_warn("IPv6: NLM_F_CREATE should be set " - "when creating new route\n"); + pr_warn("NLM_F_CREATE should be set when creating new route\n"); } /* * We walked to the bottom of tree. @@ -696,7 +696,7 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt, */ if (!replace) { if (!add) - pr_warn("IPv6: NLM_F_CREATE should be set when creating new route\n"); + pr_warn("NLM_F_CREATE should be set when creating new route\n"); add: rt->dst.rt6_next = iter; @@ -715,7 +715,7 @@ add: if (!found) { if (add) goto add; - pr_warn("IPv6: NLM_F_REPLACE set, but no existing node found!\n"); + pr_warn("NLM_F_REPLACE set, but no existing node found!\n"); return -ENOENT; } *ins = rt; @@ -768,7 +768,7 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info) replace_required = 1; } if (!allow_create && !replace_required) - pr_warn("IPv6: RTM_NEWROUTE with no NLM_F_CREATE or NLM_F_REPLACE\n"); + pr_warn("RTM_NEWROUTE with no NLM_F_CREATE or NLM_F_REPLACE\n"); fn = fib6_add_1(root, &rt->rt6i_dst.addr, sizeof(struct in6_addr), rt->rt6i_dst.plen, offsetof(struct rt6_info, rt6i_dst), diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 27fec272d39d..7962b3d42673 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -18,6 +18,8 @@ * */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include #include #include @@ -836,15 +838,12 @@ static inline int ip6_tnl_xmit_ctl(struct ip6_tnl *t) ldev = dev_get_by_index_rcu(net, p->link); if (unlikely(!ipv6_chk_addr(net, &p->laddr, ldev, 0))) - printk(KERN_WARNING - "%s xmit: Local address not yet configured!\n", - p->name); + pr_warn("%s xmit: Local address not yet configured!\n", + p->name); else if (!ipv6_addr_is_multicast(&p->raddr) && unlikely(ipv6_chk_addr(net, &p->raddr, NULL, 0))) - printk(KERN_WARNING - "%s xmit: Routing loop! " - "Remote address found on this node!\n", - p->name); + pr_warn("%s xmit: Routing loop! Remote address found on this node!\n", + p->name); else ret = 1; rcu_read_unlock(); @@ -1542,13 +1541,13 @@ static int __init ip6_tunnel_init(void) err = xfrm6_tunnel_register(&ip4ip6_handler, AF_INET); if (err < 0) { - printk(KERN_ERR "ip6_tunnel init: can't register ip4ip6\n"); + pr_err("%s: can't register ip4ip6\n", __func__); goto out_ip4ip6; } err = xfrm6_tunnel_register(&ip6ip6_handler, AF_INET6); if (err < 0) { - printk(KERN_ERR "ip6_tunnel init: can't register ip6ip6\n"); + pr_err("%s: can't register ip6ip6\n", __func__); goto out_ip6ip6; } @@ -1569,10 +1568,10 @@ out_pernet: static void __exit ip6_tunnel_cleanup(void) { if (xfrm6_tunnel_deregister(&ip4ip6_handler, AF_INET)) - printk(KERN_INFO "ip6_tunnel close: can't deregister ip4ip6\n"); + pr_info("%s: can't deregister ip4ip6\n", __func__); if (xfrm6_tunnel_deregister(&ip6ip6_handler, AF_INET6)) - printk(KERN_INFO "ip6_tunnel close: can't deregister ip6ip6\n"); + pr_info("%s: can't deregister ip6ip6\n", __func__); unregister_pernet_device(&ip6_tnl_net_ops); } diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index ba936e18b61b..b15dc08643a4 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c @@ -1350,7 +1350,7 @@ int __init ip6_mr_init(void) goto reg_notif_fail; #ifdef CONFIG_IPV6_PIMSM_V2 if (inet6_add_protocol(&pim6_protocol, IPPROTO_PIM) < 0) { - printk(KERN_ERR "ip6_mr_init: can't add PIM protocol\n"); + pr_err("%s: can't add PIM protocol\n", __func__); err = -EAGAIN; goto add_proto_fail; } diff --git a/net/ipv6/ipcomp6.c b/net/ipv6/ipcomp6.c index bba658d9a03c..1addba5b8b39 100644 --- a/net/ipv6/ipcomp6.c +++ b/net/ipv6/ipcomp6.c @@ -30,6 +30,9 @@ * The decompression of IP datagram MUST be done after the reassembly, * AH/ESP processing. */ + +#define pr_fmt(fmt) "IPv6: " fmt + #include #include #include @@ -190,11 +193,11 @@ static const struct inet6_protocol ipcomp6_protocol = static int __init ipcomp6_init(void) { if (xfrm_register_type(&ipcomp6_type, AF_INET6) < 0) { - printk(KERN_INFO "ipcomp6 init: can't add xfrm type\n"); + pr_info("%s: can't add xfrm type\n", __func__); return -EAGAIN; } if (inet6_add_protocol(&ipcomp6_protocol, IPPROTO_COMP) < 0) { - printk(KERN_INFO "ipcomp6 init: can't add protocol\n"); + pr_info("%s: can't add protocol\n", __func__); xfrm_unregister_type(&ipcomp6_type, AF_INET6); return -EAGAIN; } @@ -204,9 +207,9 @@ static int __init ipcomp6_init(void) static void __exit ipcomp6_fini(void) { if (inet6_del_protocol(&ipcomp6_protocol, IPPROTO_COMP) < 0) - printk(KERN_INFO "ipv6 ipcomp close: can't remove protocol\n"); + pr_info("%s: can't remove protocol\n", __func__); if (xfrm_unregister_type(&ipcomp6_type, AF_INET6) < 0) - printk(KERN_INFO "ipv6 ipcomp close: can't remove xfrm type\n"); + pr_info("%s: can't remove xfrm type\n", __func__); } module_init(ipcomp6_init); diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index 7dfb89f2bae5..2a3a22cf7604 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c @@ -2627,8 +2627,7 @@ static int __net_init igmp6_net_init(struct net *net) err = inet_ctl_sock_create(&net->ipv6.igmp_sk, PF_INET6, SOCK_RAW, IPPROTO_ICMPV6, net); if (err < 0) { - printk(KERN_ERR - "Failed to initialize the IGMP6 control socket (err %d).\n", + pr_err("Failed to initialize the IGMP6 control socket (err %d)\n", err); goto out; } diff --git a/net/ipv6/mip6.c b/net/ipv6/mip6.c index 7e1e0fbfef21..2e02f7c9d76d 100644 --- a/net/ipv6/mip6.c +++ b/net/ipv6/mip6.c @@ -22,6 +22,8 @@ * Masahide NAKAMURA @USAGI */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include #include #include @@ -307,13 +309,12 @@ static int mip6_destopt_offset(struct xfrm_state *x, struct sk_buff *skb, static int mip6_destopt_init_state(struct xfrm_state *x) { if (x->id.spi) { - printk(KERN_INFO "%s: spi is not 0: %u\n", __func__, - x->id.spi); + pr_info("%s: spi is not 0: %u\n", __func__, x->id.spi); return -EINVAL; } if (x->props.mode != XFRM_MODE_ROUTEOPTIMIZATION) { - printk(KERN_INFO "%s: state's mode is not %u: %u\n", - __func__, XFRM_MODE_ROUTEOPTIMIZATION, x->props.mode); + pr_info("%s: state's mode is not %u: %u\n", + __func__, XFRM_MODE_ROUTEOPTIMIZATION, x->props.mode); return -EINVAL; } @@ -443,13 +444,12 @@ static int mip6_rthdr_offset(struct xfrm_state *x, struct sk_buff *skb, static int mip6_rthdr_init_state(struct xfrm_state *x) { if (x->id.spi) { - printk(KERN_INFO "%s: spi is not 0: %u\n", __func__, - x->id.spi); + pr_info("%s: spi is not 0: %u\n", __func__, x->id.spi); return -EINVAL; } if (x->props.mode != XFRM_MODE_ROUTEOPTIMIZATION) { - printk(KERN_INFO "%s: state's mode is not %u: %u\n", - __func__, XFRM_MODE_ROUTEOPTIMIZATION, x->props.mode); + pr_info("%s: state's mode is not %u: %u\n", + __func__, XFRM_MODE_ROUTEOPTIMIZATION, x->props.mode); return -EINVAL; } @@ -481,18 +481,18 @@ static const struct xfrm_type mip6_rthdr_type = static int __init mip6_init(void) { - printk(KERN_INFO "Mobile IPv6\n"); + pr_info("Mobile IPv6\n"); if (xfrm_register_type(&mip6_destopt_type, AF_INET6) < 0) { - printk(KERN_INFO "%s: can't add xfrm type(destopt)\n", __func__); + pr_info("%s: can't add xfrm type(destopt)\n", __func__); goto mip6_destopt_xfrm_fail; } if (xfrm_register_type(&mip6_rthdr_type, AF_INET6) < 0) { - printk(KERN_INFO "%s: can't add xfrm type(rthdr)\n", __func__); + pr_info("%s: can't add xfrm type(rthdr)\n", __func__); goto mip6_rthdr_xfrm_fail; } if (rawv6_mh_filter_register(mip6_mh_filter) < 0) { - printk(KERN_INFO "%s: can't add rawv6 mh filter\n", __func__); + pr_info("%s: can't add rawv6 mh filter\n", __func__); goto mip6_rawv6_mh_fail; } @@ -510,11 +510,11 @@ static int __init mip6_init(void) static void __exit mip6_fini(void) { if (rawv6_mh_filter_unregister(mip6_mh_filter) < 0) - printk(KERN_INFO "%s: can't remove rawv6 mh filter\n", __func__); + pr_info("%s: can't remove rawv6 mh filter\n", __func__); if (xfrm_unregister_type(&mip6_rthdr_type, AF_INET6) < 0) - printk(KERN_INFO "%s: can't remove xfrm type(rthdr)\n", __func__); + pr_info("%s: can't remove xfrm type(rthdr)\n", __func__); if (xfrm_unregister_type(&mip6_destopt_type, AF_INET6) < 0) - printk(KERN_INFO "%s: can't remove xfrm type(destopt)\n", __func__); + pr_info("%s: can't remove xfrm type(destopt)\n", __func__); } module_init(mip6_init); diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index 35615c6358b8..511e5b4bb610 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c @@ -266,7 +266,7 @@ static struct ndisc_options *ndisc_parse_options(u8 *opt, int opt_len, case ND_OPT_REDIRECT_HDR: if (ndopts->nd_opt_array[nd_opt->nd_opt_type]) { ND_PRINTK2(KERN_WARNING - "%s(): duplicated ND6 option found: type=%d\n", + "%s: duplicated ND6 option found: type=%d\n", __func__, nd_opt->nd_opt_type); } else { @@ -297,7 +297,7 @@ static struct ndisc_options *ndisc_parse_options(u8 *opt, int opt_len, * protocol. */ ND_PRINTK2(KERN_NOTICE - "%s(): ignored unsupported option; type=%d, len=%d\n", + "%s: ignored unsupported option; type=%d, len=%d\n", __func__, nd_opt->nd_opt_type, nd_opt->nd_opt_len); } @@ -459,7 +459,7 @@ struct sk_buff *ndisc_build_skb(struct net_device *dev, 1, &err); if (!skb) { ND_PRINTK0(KERN_ERR - "ICMPv6 ND: %s() failed to allocate an skb, err=%d.\n", + "ICMPv6 ND: %s failed to allocate an skb, err=%d.\n", __func__, err); return NULL; } @@ -696,7 +696,7 @@ static void ndisc_solicit(struct neighbour *neigh, struct sk_buff *skb) if ((probes -= neigh->parms->ucast_probes) < 0) { if (!(neigh->nud_state & NUD_VALID)) { - ND_PRINTK1(KERN_DEBUG "%s(): trying to ucast probe in NUD_INVALID: %pI6\n", + ND_PRINTK1(KERN_DEBUG "%s: trying to ucast probe in NUD_INVALID: %pI6\n", __func__, target); } ndisc_send_ns(dev, neigh, target, target, saddr); @@ -1230,7 +1230,7 @@ static void ndisc_router_discovery(struct sk_buff *skb) neigh = dst_neigh_lookup(&rt->dst, &ipv6_hdr(skb)->saddr); if (!neigh) { ND_PRINTK0(KERN_ERR - "ICMPv6 RA: %s() got default router without neighbour.\n", + "ICMPv6 RA: %s got default router without neighbour.\n", __func__); dst_release(&rt->dst); return; @@ -1248,7 +1248,7 @@ static void ndisc_router_discovery(struct sk_buff *skb) rt = rt6_add_dflt_router(&ipv6_hdr(skb)->saddr, skb->dev, pref); if (rt == NULL) { ND_PRINTK0(KERN_ERR - "ICMPv6 RA: %s() failed to add default route.\n", + "ICMPv6 RA: %s failed to add default route.\n", __func__); return; } @@ -1256,7 +1256,7 @@ static void ndisc_router_discovery(struct sk_buff *skb) neigh = dst_neigh_lookup(&rt->dst, &ipv6_hdr(skb)->saddr); if (neigh == NULL) { ND_PRINTK0(KERN_ERR - "ICMPv6 RA: %s() got default router without neighbour.\n", + "ICMPv6 RA: %s got default router without neighbour.\n", __func__); dst_release(&rt->dst); return; @@ -1605,7 +1605,7 @@ void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target) 1, &err); if (buff == NULL) { ND_PRINTK0(KERN_ERR - "ICMPv6 Redirect: %s() failed to allocate an skb, err=%d.\n", + "ICMPv6 Redirect: %s failed to allocate an skb, err=%d.\n", __func__, err); goto release; } @@ -1767,11 +1767,7 @@ static void ndisc_warn_deprecated_sysctl(struct ctl_table *ctl, static int warned; if (strcmp(warncomm, current->comm) && warned < 5) { strcpy(warncomm, current->comm); - printk(KERN_WARNING - "process `%s' is using deprecated sysctl (%s) " - "net.ipv6.neigh.%s.%s; " - "Use net.ipv6.neigh.%s.%s_ms " - "instead.\n", + pr_warn("process `%s' is using deprecated sysctl (%s) net.ipv6.neigh.%s.%s - use net.ipv6.neigh.%s.%s_ms instead\n", warncomm, func, dev_name, ctl->procname, dev_name, ctl->procname); diff --git a/net/ipv6/route.c b/net/ipv6/route.c index e20e32069024..90119a32b89d 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -24,6 +24,8 @@ * Fixed routing subtrees. */ +#define pr_fmt(fmt) "IPv6: " fmt + #include #include #include @@ -794,7 +796,7 @@ static struct rt6_info *rt6_alloc_cow(struct rt6_info *ort, goto retry; } - net_warn_ratelimited("ipv6: Neighbour table overflow\n"); + net_warn_ratelimited("Neighbour table overflow\n"); dst_free(&rt->dst); return NULL; } @@ -1280,7 +1282,7 @@ int ip6_route_add(struct fib6_config *cfg) !(cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_CREATE)) { table = fib6_get_table(net, cfg->fc_table); if (!table) { - printk(KERN_WARNING "IPv6: NLM_F_CREATE should be specified when creating new route\n"); + pr_warn("NLM_F_CREATE should be specified when creating new route\n"); table = fib6_new_table(net, cfg->fc_table); } } else { @@ -2102,7 +2104,7 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev, int err; if (!rt) { - net_warn_ratelimited("IPv6: Maximum number of routes reached, consider increasing route/max_size\n"); + net_warn_ratelimited("Maximum number of routes reached, consider increasing route/max_size\n"); return ERR_PTR(-ENOMEM); } diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index a36a09701bff..60415711563f 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c @@ -17,6 +17,8 @@ * Fred Templin : isatap support */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include #include #include @@ -1301,7 +1303,7 @@ static int __init sit_init(void) { int err; - printk(KERN_INFO "IPv6 over IPv4 tunneling driver\n"); + pr_info("IPv6 over IPv4 tunneling driver\n"); err = register_pernet_device(&sit_net_ops); if (err < 0) @@ -1309,7 +1311,7 @@ static int __init sit_init(void) err = xfrm4_tunnel_register(&sit_handler, AF_INET6); if (err < 0) { unregister_pernet_device(&sit_net_ops); - printk(KERN_INFO "sit init: Can't add protocol\n"); + pr_info("%s: can't add protocol\n", __func__); } return err; } diff --git a/net/ipv6/tunnel6.c b/net/ipv6/tunnel6.c index 4f3cec12aa85..4b0f50d9a962 100644 --- a/net/ipv6/tunnel6.c +++ b/net/ipv6/tunnel6.c @@ -19,6 +19,8 @@ * YOSHIFUJI Hideaki */ +#define pr_fmt(fmt) "IPv6: " fmt + #include #include #include @@ -160,11 +162,11 @@ static const struct inet6_protocol tunnel46_protocol = { static int __init tunnel6_init(void) { if (inet6_add_protocol(&tunnel6_protocol, IPPROTO_IPV6)) { - printk(KERN_ERR "tunnel6 init(): can't add protocol\n"); + pr_err("%s: can't add protocol\n", __func__); return -EAGAIN; } if (inet6_add_protocol(&tunnel46_protocol, IPPROTO_IPIP)) { - printk(KERN_ERR "tunnel6 init(): can't add protocol\n"); + pr_err("%s: can't add protocol\n", __func__); inet6_del_protocol(&tunnel6_protocol, IPPROTO_IPV6); return -EAGAIN; } @@ -174,9 +176,9 @@ static int __init tunnel6_init(void) static void __exit tunnel6_fini(void) { if (inet6_del_protocol(&tunnel46_protocol, IPPROTO_IPIP)) - printk(KERN_ERR "tunnel6 close: can't remove protocol\n"); + pr_err("%s: can't remove protocol\n", __func__); if (inet6_del_protocol(&tunnel6_protocol, IPPROTO_IPV6)) - printk(KERN_ERR "tunnel6 close: can't remove protocol\n"); + pr_err("%s: can't remove protocol\n", __func__); } module_init(tunnel6_init); -- cgit v1.2.2 From 91df42bedccb919902c7cf7eb876c982ae7f1b1d Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Tue, 15 May 2012 14:11:54 +0000 Subject: net: ipv4 and ipv6: Convert printk(KERN_DEBUG to pr_debug Use the current debugging style and enable dynamic_debug. Signed-off-by: Joe Perches Signed-off-by: David S. Miller --- net/ipv4/ah4.c | 4 ++-- net/ipv4/arp.c | 9 ++++---- net/ipv4/devinet.c | 5 ++--- net/ipv4/inet_timewait_sock.c | 4 ++-- net/ipv4/route.c | 9 ++++---- net/ipv4/tcp_input.c | 48 +++++++++++++++++++++---------------------- net/ipv4/tcp_output.c | 4 +++- net/ipv6/addrconf.c | 17 ++++++++------- net/ipv6/esp6.c | 4 ++-- net/ipv6/ip6_fib.c | 5 +++-- net/ipv6/ip6_tunnel.c | 2 +- net/ipv6/ipcomp6.c | 4 ++-- 12 files changed, 58 insertions(+), 57 deletions(-) (limited to 'net') diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c index 3a280756dd73..e8f2617ecd47 100644 --- a/net/ipv4/ah4.c +++ b/net/ipv4/ah4.c @@ -406,8 +406,8 @@ static void ah4_err(struct sk_buff *skb, u32 info) ah->spi, IPPROTO_AH, AF_INET); if (!x) return; - printk(KERN_DEBUG "pmtu discovery on SA AH/%08x/%08x\n", - ntohl(ah->spi), ntohl(iph->daddr)); + pr_debug("pmtu discovery on SA AH/%08x/%08x\n", + ntohl(ah->spi), ntohl(iph->daddr)); xfrm_state_put(x); } diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c index 373b56bf8f49..3e2bf3dedce5 100644 --- a/net/ipv4/arp.c +++ b/net/ipv4/arp.c @@ -73,6 +73,8 @@ * Jesper D. Brouer: Proxy ARP PVLAN RFC 3069 support. */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include #include #include @@ -364,8 +366,7 @@ static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb) probes -= neigh->parms->ucast_probes; if (probes < 0) { if (!(neigh->nud_state & NUD_VALID)) - printk(KERN_DEBUG - "trying to ucast probe in NUD_INVALID\n"); + pr_debug("trying to ucast probe in NUD_INVALID\n"); dst_ha = neigh->ha; read_lock_bh(&neigh->lock); } else { @@ -452,7 +453,7 @@ static int arp_set_predefined(int addr_hint, unsigned char *haddr, { switch (addr_hint) { case RTN_LOCAL: - printk(KERN_DEBUG "ARP: arp called for own IP address\n"); + pr_debug("arp called for own IP address\n"); memcpy(haddr, dev->dev_addr, dev->addr_len); return 1; case RTN_MULTICAST: @@ -473,7 +474,7 @@ int arp_find(unsigned char *haddr, struct sk_buff *skb) struct neighbour *n; if (!skb_dst(skb)) { - printk(KERN_DEBUG "arp_find is called with dst==NULL\n"); + pr_debug("arp_find is called with dst==NULL\n"); kfree_skb(skb); return 1; } diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index 88c9e3f68c78..10e15a144e95 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c @@ -217,8 +217,7 @@ void in_dev_finish_destroy(struct in_device *idev) WARN_ON(idev->ifa_list); WARN_ON(idev->mc_list); #ifdef NET_REFCNT_DEBUG - printk(KERN_DEBUG "in_dev_finish_destroy: %p=%s\n", - idev, dev ? dev->name : "NIL"); + pr_debug("%s: %p=%s\n", __func__, idev, dev ? dev->name : "NIL"); #endif dev_put(dev); if (!idev->dead) @@ -1174,7 +1173,7 @@ static int inetdev_event(struct notifier_block *this, unsigned long event, switch (event) { case NETDEV_REGISTER: - printk(KERN_DEBUG "inetdev_event: bug\n"); + pr_debug("%s: bug\n", __func__); RCU_INIT_POINTER(dev->ip_ptr, NULL); break; case NETDEV_UP: diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c index 543ef6225458..2784db3155fb 100644 --- a/net/ipv4/inet_timewait_sock.c +++ b/net/ipv4/inet_timewait_sock.c @@ -89,8 +89,8 @@ static void __inet_twsk_kill(struct inet_timewait_sock *tw, #ifdef SOCK_REFCNT_DEBUG if (atomic_read(&tw->tw_refcnt) != 1) { - printk(KERN_DEBUG "%s timewait_sock %p refcnt=%d\n", - tw->tw_prot->name, tw, atomic_read(&tw->tw_refcnt)); + pr_debug("%s timewait_sock %p refcnt=%d\n", + tw->tw_prot->name, tw, atomic_read(&tw->tw_refcnt)); } #endif while (refcnt) { diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 42d76441501f..76e5880cdb07 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -1374,8 +1374,7 @@ void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more) return; } } else if (!rt) - printk(KERN_DEBUG "rt_bind_peer(0) @%p\n", - __builtin_return_address(0)); + pr_debug("rt_bind_peer(0) @%p\n", __builtin_return_address(0)); ip_select_fb_ident(iph); } @@ -1839,9 +1838,9 @@ static void ipv4_link_failure(struct sk_buff *skb) static int ip_rt_bug(struct sk_buff *skb) { - printk(KERN_DEBUG "ip_rt_bug: %pI4 -> %pI4, %s\n", - &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr, - skb->dev ? skb->dev->name : "?"); + pr_debug("%s: %pI4 -> %pI4, %s\n", + __func__, &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr, + skb->dev ? skb->dev->name : "?"); kfree_skb(skb); WARN_ON(1); return 0; diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 100b242135b1..eb97787be757 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -981,12 +981,12 @@ static void tcp_update_reordering(struct sock *sk, const int metric, NET_INC_STATS_BH(sock_net(sk), mib_idx); #if FASTRETRANS_DEBUG > 1 - printk(KERN_DEBUG "Disorder%d %d %u f%u s%u rr%d\n", - tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state, - tp->reordering, - tp->fackets_out, - tp->sacked_out, - tp->undo_marker ? tp->undo_retrans : 0); + pr_debug("Disorder%d %d %u f%u s%u rr%d\n", + tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state, + tp->reordering, + tp->fackets_out, + tp->sacked_out, + tp->undo_marker ? tp->undo_retrans : 0); #endif tcp_disable_fack(tp); } @@ -2716,22 +2716,22 @@ static void DBGUNDO(struct sock *sk, const char *msg) struct inet_sock *inet = inet_sk(sk); if (sk->sk_family == AF_INET) { - printk(KERN_DEBUG "Undo %s %pI4/%u c%u l%u ss%u/%u p%u\n", - msg, - &inet->inet_daddr, ntohs(inet->inet_dport), - tp->snd_cwnd, tcp_left_out(tp), - tp->snd_ssthresh, tp->prior_ssthresh, - tp->packets_out); + pr_debug("Undo %s %pI4/%u c%u l%u ss%u/%u p%u\n", + msg, + &inet->inet_daddr, ntohs(inet->inet_dport), + tp->snd_cwnd, tcp_left_out(tp), + tp->snd_ssthresh, tp->prior_ssthresh, + tp->packets_out); } #if IS_ENABLED(CONFIG_IPV6) else if (sk->sk_family == AF_INET6) { struct ipv6_pinfo *np = inet6_sk(sk); - printk(KERN_DEBUG "Undo %s %pI6/%u c%u l%u ss%u/%u p%u\n", - msg, - &np->daddr, ntohs(inet->inet_dport), - tp->snd_cwnd, tcp_left_out(tp), - tp->snd_ssthresh, tp->prior_ssthresh, - tp->packets_out); + pr_debug("Undo %s %pI6/%u c%u l%u ss%u/%u p%u\n", + msg, + &np->daddr, ntohs(inet->inet_dport), + tp->snd_cwnd, tcp_left_out(tp), + tp->snd_ssthresh, tp->prior_ssthresh, + tp->packets_out); } #endif } @@ -3511,18 +3511,18 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets, if (!tp->packets_out && tcp_is_sack(tp)) { icsk = inet_csk(sk); if (tp->lost_out) { - printk(KERN_DEBUG "Leak l=%u %d\n", - tp->lost_out, icsk->icsk_ca_state); + pr_debug("Leak l=%u %d\n", + tp->lost_out, icsk->icsk_ca_state); tp->lost_out = 0; } if (tp->sacked_out) { - printk(KERN_DEBUG "Leak s=%u %d\n", - tp->sacked_out, icsk->icsk_ca_state); + pr_debug("Leak s=%u %d\n", + tp->sacked_out, icsk->icsk_ca_state); tp->sacked_out = 0; } if (tp->retrans_out) { - printk(KERN_DEBUG "Leak r=%u %d\n", - tp->retrans_out, icsk->icsk_ca_state); + pr_debug("Leak r=%u %d\n", + tp->retrans_out, icsk->icsk_ca_state); tp->retrans_out = 0; } } diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 7979bfd5d0fb..1a630825c45b 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -34,6 +34,8 @@ * */ +#define pr_fmt(fmt) "TCP: " fmt + #include #include @@ -2415,7 +2417,7 @@ int tcp_send_synack(struct sock *sk) skb = tcp_write_queue_head(sk); if (skb == NULL || !(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) { - printk(KERN_DEBUG "tcp_send_synack: wrong queue state\n"); + pr_debug("%s: wrong queue state\n", __func__); return -EFAULT; } if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_ACK)) { diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 707989068555..8ec009c0b2bc 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -329,7 +329,7 @@ void in6_dev_finish_destroy(struct inet6_dev *idev) WARN_ON(idev->mc_list != NULL); #ifdef NET_REFCNT_DEBUG - printk(KERN_DEBUG "%s: %s\n", __func__, dev ? dev->name : "NIL"); + pr_debug("%s: %s\n", __func__, dev ? dev->name : "NIL"); #endif dev_put(dev); if (!idev->dead) { @@ -542,7 +542,7 @@ void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp) WARN_ON(!hlist_unhashed(&ifp->addr_lst)); #ifdef NET_REFCNT_DEBUG - printk(KERN_DEBUG "inet6_ifa_finish_destroy\n"); + pr_debug("%s\n", __func__); #endif in6_dev_put(ifp->idev); @@ -2405,7 +2405,7 @@ static void init_loopback(struct net_device *dev) ASSERT_RTNL(); if ((idev = ipv6_find_idev(dev)) == NULL) { - printk(KERN_DEBUG "init loopback: add_dev failed\n"); + pr_debug("%s: add_dev failed\n", __func__); return; } @@ -2474,7 +2474,7 @@ static void addrconf_sit_config(struct net_device *dev) */ if ((idev = ipv6_find_idev(dev)) == NULL) { - printk(KERN_DEBUG "init sit: add_dev failed\n"); + pr_debug("%s: add_dev failed\n", __func__); return; } @@ -2509,7 +2509,7 @@ static void addrconf_gre_config(struct net_device *dev) ASSERT_RTNL(); if ((idev = ipv6_find_idev(dev)) == NULL) { - printk(KERN_DEBUG "init gre: add_dev failed\n"); + pr_debug("%s: add_dev failed\n", __func__); return; } @@ -2549,7 +2549,7 @@ static void ip6_tnl_add_linklocal(struct inet6_dev *idev) if (!ipv6_inherit_linklocal(idev, link_dev)) return; } - printk(KERN_DEBUG "init ip6-ip6: add_linklocal failed\n"); + pr_debug("init ip6-ip6: add_linklocal failed\n"); } /* @@ -2565,7 +2565,7 @@ static void addrconf_ip6_tnl_config(struct net_device *dev) idev = addrconf_add_dev(dev); if (IS_ERR(idev)) { - printk(KERN_DEBUG "init ip6-ip6: add_dev failed\n"); + pr_debug("init ip6-ip6: add_dev failed\n"); return; } ip6_tnl_add_linklocal(idev); @@ -2893,8 +2893,7 @@ static void addrconf_rs_timer(unsigned long data) * Note: we do not support deprecated "all on-link" * assumption any longer. */ - printk(KERN_DEBUG "%s: no IPv6 routers present\n", - idev->dev->name); + pr_debug("%s: no IPv6 routers present\n", idev->dev->name); } out: diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c index 6697eb0fba55..1e62b7557b00 100644 --- a/net/ipv6/esp6.c +++ b/net/ipv6/esp6.c @@ -444,8 +444,8 @@ static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, esph->spi, IPPROTO_ESP, AF_INET6); if (!x) return; - printk(KERN_DEBUG "pmtu discovery on SA ESP/%08x/%pI6\n", - ntohl(esph->spi), &iph->daddr); + pr_debug("pmtu discovery on SA ESP/%08x/%pI6\n", + ntohl(esph->spi), &iph->daddr); xfrm_state_put(x); } diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index e9846da77424..0c220a416626 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c @@ -41,7 +41,7 @@ #define RT6_DEBUG 2 #if RT6_DEBUG >= 3 -#define RT6_TRACE(x...) printk(KERN_DEBUG x) +#define RT6_TRACE(x...) pr_debug(x) #else #define RT6_TRACE(x...) do { ; } while (0) #endif @@ -1420,7 +1420,8 @@ static int fib6_clean_node(struct fib6_walker_t *w) res = fib6_del(rt, &info); if (res) { #if RT6_DEBUG >= 2 - printk(KERN_DEBUG "fib6_clean_node: del failed: rt=%p@%p err=%d\n", rt, rt->rt6i_node, res); + pr_debug("%s: del failed: rt=%p@%p err=%d\n", + __func__, rt, rt->rt6i_node, res); #endif continue; } diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 7962b3d42673..e65c56009bb0 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -62,7 +62,7 @@ MODULE_LICENSE("GPL"); MODULE_ALIAS_NETDEV("ip6tnl0"); #ifdef IP6_TNL_DEBUG -#define IP6_TNL_TRACE(x...) printk(KERN_DEBUG "%s:" x "\n", __func__) +#define IP6_TNL_TRACE(x...) pr_debug("%s:" x "\n", __func__) #else #define IP6_TNL_TRACE(x...) do {;} while(0) #endif diff --git a/net/ipv6/ipcomp6.c b/net/ipv6/ipcomp6.c index 1addba5b8b39..5cb75bfe45b1 100644 --- a/net/ipv6/ipcomp6.c +++ b/net/ipv6/ipcomp6.c @@ -72,8 +72,8 @@ static void ipcomp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, if (!x) return; - printk(KERN_DEBUG "pmtu discovery on SA IPCOMP/%08x/%pI6\n", - spi, &iph->daddr); + pr_debug("pmtu discovery on SA IPCOMP/%08x/%pI6\n", + spi, &iph->daddr); xfrm_state_put(x); } -- cgit v1.2.2 From 1010f540181b00c7013eeb04d1bf8aedd5a56835 Mon Sep 17 00:00:00 2001 From: "alex.bluesman.smirnov@gmail.com" Date: Tue, 15 May 2012 20:50:20 +0000 Subject: mac802154: allocation of ieee802154 device An interface to allocate and register ieee802154 compatible device. The allocated device has the following representation in memory: +-----------------------+ | struct wpan_phy | +-----------------------+ | struct mac802154_priv | +-----------------------+ | driver's private data | +-----------------------+ Used by device drivers to register new instance in the stack. Signed-off-by: Alexander Smirnov Signed-off-by: David S. Miller --- net/Kconfig | 1 + net/Makefile | 1 + net/mac802154/Kconfig | 16 +++++ net/mac802154/Makefile | 2 + net/mac802154/ieee802154_dev.c | 149 +++++++++++++++++++++++++++++++++++++++++ net/mac802154/mac802154.h | 63 +++++++++++++++++ 6 files changed, 232 insertions(+) create mode 100644 net/mac802154/Kconfig create mode 100644 net/mac802154/Makefile create mode 100644 net/mac802154/ieee802154_dev.c create mode 100644 net/mac802154/mac802154.h (limited to 'net') diff --git a/net/Kconfig b/net/Kconfig index e07272d0bb2d..4c06c7c513a4 100644 --- a/net/Kconfig +++ b/net/Kconfig @@ -211,6 +211,7 @@ source "net/econet/Kconfig" source "net/wanrouter/Kconfig" source "net/phonet/Kconfig" source "net/ieee802154/Kconfig" +source "net/mac802154/Kconfig" source "net/sched/Kconfig" source "net/dcb/Kconfig" source "net/dns_resolver/Kconfig" diff --git a/net/Makefile b/net/Makefile index ad432fa4d934..2a97cded8601 100644 --- a/net/Makefile +++ b/net/Makefile @@ -60,6 +60,7 @@ ifneq ($(CONFIG_DCB),) obj-y += dcb/ endif obj-$(CONFIG_IEEE802154) += ieee802154/ +obj-$(CONFIG_MAC802154) += mac802154/ ifeq ($(CONFIG_NET),y) obj-$(CONFIG_SYSCTL) += sysctl_net.o diff --git a/net/mac802154/Kconfig b/net/mac802154/Kconfig new file mode 100644 index 000000000000..a967ddaa4e2f --- /dev/null +++ b/net/mac802154/Kconfig @@ -0,0 +1,16 @@ +config MAC802154 + tristate "Generic IEEE 802.15.4 Soft Networking Stack (mac802154)" + depends on IEEE802154 && EXPERIMENTAL + select CRC_CCITT + ---help--- + This option enables the hardware independent IEEE 802.15.4 + networking stack for SoftMAC devices (the ones implementing + only PHY level of IEEE 802.15.4 standard). + + Note: this implementation is neither certified, nor feature + complete! Compatibility with other implementations hasn't + been tested yet! + + If you plan to use HardMAC IEEE 802.15.4 devices, you can + say N here. Alternatievly you can say M to compile it as + module. diff --git a/net/mac802154/Makefile b/net/mac802154/Makefile new file mode 100644 index 000000000000..cda9393e1a4f --- /dev/null +++ b/net/mac802154/Makefile @@ -0,0 +1,2 @@ +obj-$(CONFIG_MAC802154) += mac802154.o +mac802154-objs := ieee802154_dev.o diff --git a/net/mac802154/ieee802154_dev.c b/net/mac802154/ieee802154_dev.c new file mode 100644 index 000000000000..d130271eb528 --- /dev/null +++ b/net/mac802154/ieee802154_dev.c @@ -0,0 +1,149 @@ +/* + * Copyright (C) 2007-2012 Siemens AG + * + * Written by: + * Alexander Smirnov + * + * Based on the code from 'linux-zigbee.sourceforge.net' project. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#include +#include +#include + +#include +#include +#include + +#include "mac802154.h" + +struct ieee802154_dev * +ieee802154_alloc_device(size_t priv_data_len, struct ieee802154_ops *ops) +{ + struct wpan_phy *phy; + struct mac802154_priv *priv; + size_t priv_size; + + if (!ops || !ops->xmit || !ops->ed || !ops->start || + !ops->stop || !ops->set_channel) { + printk(KERN_ERR + "undefined IEEE802.15.4 device operations\n"); + return NULL; + } + + /* Ensure 32-byte alignment of our private data and hw private data. + * We use the wpan_phy priv data for both our mac802154_priv and for + * the driver's private data + * + * in memory it'll be like this: + * + * +-----------------------+ + * | struct wpan_phy | + * +-----------------------+ + * | struct mac802154_priv | + * +-----------------------+ + * | driver's private data | + * +-----------------------+ + * + * Due to ieee802154 layer isn't aware of driver and MAC structures, + * so lets allign them here. + */ + + priv_size = ALIGN(sizeof(*priv), NETDEV_ALIGN) + priv_data_len; + + phy = wpan_phy_alloc(priv_size); + if (!phy) { + printk(KERN_ERR + "failure to allocate master IEEE802.15.4 device\n"); + return NULL; + } + + priv = wpan_phy_priv(phy); + priv->hw.phy = priv->phy = phy; + priv->hw.priv = (char *)priv + ALIGN(sizeof(*priv), NETDEV_ALIGN); + priv->ops = ops; + + INIT_LIST_HEAD(&priv->slaves); + mutex_init(&priv->slaves_mtx); + + return &priv->hw; +} +EXPORT_SYMBOL(ieee802154_alloc_device); + +void ieee802154_free_device(struct ieee802154_dev *hw) +{ + struct mac802154_priv *priv = mac802154_to_priv(hw); + + wpan_phy_free(priv->phy); + + mutex_destroy(&priv->slaves_mtx); +} +EXPORT_SYMBOL(ieee802154_free_device); + +int ieee802154_register_device(struct ieee802154_dev *dev) +{ + struct mac802154_priv *priv = mac802154_to_priv(dev); + int rc = -ENOMEM; + + priv->dev_workqueue = + create_singlethread_workqueue(wpan_phy_name(priv->phy)); + if (!priv->dev_workqueue) + goto out; + + wpan_phy_set_dev(priv->phy, priv->hw.parent); + + rc = wpan_phy_register(priv->phy); + if (rc < 0) + goto out_wq; + + rtnl_lock(); + + mutex_lock(&priv->slaves_mtx); + priv->running = MAC802154_DEVICE_RUN; + mutex_unlock(&priv->slaves_mtx); + + rtnl_unlock(); + + return 0; + +out_wq: + destroy_workqueue(priv->dev_workqueue); +out: + return rc; +} +EXPORT_SYMBOL(ieee802154_register_device); + +void ieee802154_unregister_device(struct ieee802154_dev *dev) +{ + struct mac802154_priv *priv = mac802154_to_priv(dev); + + flush_workqueue(priv->dev_workqueue); + destroy_workqueue(priv->dev_workqueue); + + rtnl_lock(); + + mutex_lock(&priv->slaves_mtx); + priv->running = MAC802154_DEVICE_STOPPED; + mutex_unlock(&priv->slaves_mtx); + + rtnl_unlock(); + + wpan_phy_unregister(priv->phy); +} +EXPORT_SYMBOL(ieee802154_unregister_device); + +MODULE_DESCRIPTION("IEEE 802.15.4 implementation"); +MODULE_LICENSE("GPL v2"); diff --git a/net/mac802154/mac802154.h b/net/mac802154/mac802154.h new file mode 100644 index 000000000000..980d0a24040e --- /dev/null +++ b/net/mac802154/mac802154.h @@ -0,0 +1,63 @@ +/* + * Copyright (C) 2007-2012 Siemens AG + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Written by: + * Pavel Smolenskiy + * Maxim Gorbachyov + * Dmitry Eremin-Solenikov + * Alexander Smirnov + */ +#ifndef MAC802154_H +#define MAC802154_H + +/* mac802154 device private data */ +struct mac802154_priv { + struct ieee802154_dev hw; + struct ieee802154_ops *ops; + + /* ieee802154 phy */ + struct wpan_phy *phy; + + int open_count; + + /* As in mac80211 slaves list is modified: + * 1) under the RTNL + * 2) protected by slaves_mtx; + * 3) in an RCU manner + * + * So atomic readers can use any of this protection methods. + */ + struct list_head slaves; + struct mutex slaves_mtx; + + /* This one is used for scanning and other jobs not to be interfered + * with serial driver. + */ + struct workqueue_struct *dev_workqueue; + + /* SoftMAC device is registered and running. One can add subinterfaces. + * This flag should be modified under slaves_mtx and RTNL, so you can + * read them using any of protection methods. + */ + bool running; +}; + +#define MAC802154_DEVICE_STOPPED 0x00 +#define MAC802154_DEVICE_RUN 0x01 + +#define mac802154_to_priv(_hw) container_of(_hw, struct mac802154_priv, hw) + +#endif /* MAC802154_H */ -- cgit v1.2.2 From 1cd829c83eab8b899b85d597c767fcf8b4cf8fd6 Mon Sep 17 00:00:00 2001 From: "alex.bluesman.smirnov@gmail.com" Date: Tue, 15 May 2012 20:50:21 +0000 Subject: mac802154: RX data path Main RX data path implementation between physical and mac layers. Signed-off-by: Alexander Smirnov Signed-off-by: David S. Miller --- net/mac802154/Makefile | 2 +- net/mac802154/rx.c | 113 +++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 114 insertions(+), 1 deletion(-) create mode 100644 net/mac802154/rx.c (limited to 'net') diff --git a/net/mac802154/Makefile b/net/mac802154/Makefile index cda9393e1a4f..e00fe474dce4 100644 --- a/net/mac802154/Makefile +++ b/net/mac802154/Makefile @@ -1,2 +1,2 @@ obj-$(CONFIG_MAC802154) += mac802154.o -mac802154-objs := ieee802154_dev.o +mac802154-objs := ieee802154_dev.o rx.o diff --git a/net/mac802154/rx.c b/net/mac802154/rx.c new file mode 100644 index 000000000000..d15738fae551 --- /dev/null +++ b/net/mac802154/rx.c @@ -0,0 +1,113 @@ +/* + * Copyright (C) 2007-2012 Siemens AG + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Written by: + * Pavel Smolenskiy + * Maxim Gorbachyov + * Dmitry Eremin-Solenikov + * Alexander Smirnov + */ + +#include +#include +#include +#include +#include + +#include +#include + +#include "mac802154.h" + +/* The IEEE 802.15.4 standard defines 4 MAC packet types: + * - beacon frame + * - MAC command frame + * - acknowledgement frame + * - data frame + * + * and only the data frame should be pushed to the upper layers, other types + * are just internal MAC layer management information. So only data packets + * are going to be sent to the networking queue, all other will be processed + * right here by using the device workqueue. + */ +struct rx_work { + struct sk_buff *skb; + struct work_struct work; + struct ieee802154_dev *dev; + u8 lqi; +}; + +static void +mac802154_subif_rx(struct ieee802154_dev *hw, struct sk_buff *skb, u8 lqi) +{ + struct mac802154_priv *priv = mac802154_to_priv(hw); + + mac_cb(skb)->lqi = lqi; + skb->protocol = htons(ETH_P_IEEE802154); + skb_reset_mac_header(skb); + + BUILD_BUG_ON(sizeof(struct ieee802154_mac_cb) > sizeof(skb->cb)); + + if (!(priv->hw.flags & IEEE802154_HW_OMIT_CKSUM)) { + u16 crc; + + if (skb->len < 2) { + pr_debug("got invalid frame\n"); + goto out; + } + crc = crc_ccitt(0, skb->data, skb->len); + if (crc) { + pr_debug("CRC mismatch\n"); + goto out; + } + skb_trim(skb, skb->len - 2); /* CRC */ + } + +out: + dev_kfree_skb(skb); + return; +} + +static void mac802154_rx_worker(struct work_struct *work) +{ + struct rx_work *rw = container_of(work, struct rx_work, work); + struct sk_buff *skb = rw->skb; + + mac802154_subif_rx(rw->dev, skb, rw->lqi); + kfree(rw); +} + +void +ieee802154_rx_irqsafe(struct ieee802154_dev *dev, struct sk_buff *skb, u8 lqi) +{ + struct mac802154_priv *priv = mac802154_to_priv(dev); + struct rx_work *work; + + if (!skb) + return; + + work = kzalloc(sizeof(struct rx_work), GFP_ATOMIC); + if (!work) + return; + + INIT_WORK(&work->work, mac802154_rx_worker); + work->skb = skb; + work->dev = dev; + work->lqi = lqi; + + queue_work(priv->dev_workqueue, &work->work); +} +EXPORT_SYMBOL(ieee802154_rx_irqsafe); -- cgit v1.2.2 From 5b641ebeec348761c9ebac9454c672d4d2d3ef91 Mon Sep 17 00:00:00 2001 From: "alex.bluesman.smirnov@gmail.com" Date: Tue, 15 May 2012 20:50:22 +0000 Subject: mac802154: TX data path Main TX data path implementation between upper and physical layers. Signed-off-by: Alexander Smirnov Signed-off-by: David S. Miller --- net/mac802154/Makefile | 2 +- net/mac802154/mac802154.h | 5 ++ net/mac802154/tx.c | 116 ++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 122 insertions(+), 1 deletion(-) create mode 100644 net/mac802154/tx.c (limited to 'net') diff --git a/net/mac802154/Makefile b/net/mac802154/Makefile index e00fe474dce4..490beef7176e 100644 --- a/net/mac802154/Makefile +++ b/net/mac802154/Makefile @@ -1,2 +1,2 @@ obj-$(CONFIG_MAC802154) += mac802154.o -mac802154-objs := ieee802154_dev.o rx.o +mac802154-objs := ieee802154_dev.o rx.o tx.o diff --git a/net/mac802154/mac802154.h b/net/mac802154/mac802154.h index 980d0a24040e..4f2d97506c6a 100644 --- a/net/mac802154/mac802154.h +++ b/net/mac802154/mac802154.h @@ -60,4 +60,9 @@ struct mac802154_priv { #define mac802154_to_priv(_hw) container_of(_hw, struct mac802154_priv, hw) +#define MAC802154_MAX_XMIT_ATTEMPTS 3 + +netdev_tx_t mac802154_tx(struct mac802154_priv *priv, struct sk_buff *skb, + u8 page, u8 chan); + #endif /* MAC802154_H */ diff --git a/net/mac802154/tx.c b/net/mac802154/tx.c new file mode 100644 index 000000000000..8781d8f904d9 --- /dev/null +++ b/net/mac802154/tx.c @@ -0,0 +1,116 @@ +/* + * Copyright 2007-2012 Siemens AG + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Written by: + * Dmitry Eremin-Solenikov + * Sergey Lapin + * Maxim Gorbachyov + * Alexander Smirnov + */ + +#include +#include +#include + +#include +#include + +#include "mac802154.h" + +/* IEEE 802.15.4 transceivers can sleep during the xmit session, so process + * packets through the workqueue. + */ +struct xmit_work { + struct sk_buff *skb; + struct work_struct work; + struct mac802154_priv *priv; + u8 chan; + u8 page; + u8 xmit_attempts; +}; + +static void mac802154_xmit_worker(struct work_struct *work) +{ + struct xmit_work *xw = container_of(work, struct xmit_work, work); + int res; + + mutex_lock(&xw->priv->phy->pib_lock); + if (xw->priv->phy->current_channel != xw->chan || + xw->priv->phy->current_page != xw->page) { + res = xw->priv->ops->set_channel(&xw->priv->hw, + xw->page, + xw->chan); + if (res) { + pr_debug("set_channel failed\n"); + goto out; + } + } + + res = xw->priv->ops->xmit(&xw->priv->hw, xw->skb); + +out: + mutex_unlock(&xw->priv->phy->pib_lock); + + if (res) { + if (xw->xmit_attempts++ < MAC802154_MAX_XMIT_ATTEMPTS) { + queue_work(xw->priv->dev_workqueue, &xw->work); + return; + } else + pr_debug("transmission failed for %d times", + MAC802154_MAX_XMIT_ATTEMPTS); + } + + dev_kfree_skb(xw->skb); + + kfree(xw); +} + +netdev_tx_t mac802154_tx(struct mac802154_priv *priv, struct sk_buff *skb, + u8 page, u8 chan) +{ + struct xmit_work *work; + + if (!(priv->phy->channels_supported[page] & (1 << chan))) + WARN_ON(1); + return NETDEV_TX_OK; + + if (!(priv->hw.flags & IEEE802154_HW_OMIT_CKSUM)) { + u16 crc = crc_ccitt(0, skb->data, skb->len); + u8 *data = skb_put(skb, 2); + data[0] = crc & 0xff; + data[1] = crc >> 8; + } + + if (skb_cow_head(skb, priv->hw.extra_tx_headroom)) { + dev_kfree_skb(skb); + return NETDEV_TX_OK; + } + + work = kzalloc(sizeof(struct xmit_work), GFP_ATOMIC); + if (!work) + return NETDEV_TX_BUSY; + + INIT_WORK(&work->work, mac802154_xmit_worker); + work->skb = skb; + work->priv = priv; + work->page = page; + work->chan = chan; + work->xmit_attempts = 0; + + queue_work(priv->dev_workqueue, &work->work); + + return NETDEV_TX_OK; +} -- cgit v1.2.2 From 4d23c9cc075e778584aa74da402f6bf968ad92b7 Mon Sep 17 00:00:00 2001 From: "alex.bluesman.smirnov@gmail.com" Date: Tue, 15 May 2012 20:50:24 +0000 Subject: mac802154: slave interfaces declaration Slaves represent typical network interfaces available from userspace. Each ieee802154 device/transceiver may have several slaves and able to be associated with several networks at the same time. So this patch adds structure for slaves declaration. Signed-off-by: Alexander Smirnov Signed-off-by: David S. Miller --- net/mac802154/mac802154.h | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) (limited to 'net') diff --git a/net/mac802154/mac802154.h b/net/mac802154/mac802154.h index 4f2d97506c6a..d53b86c75d53 100644 --- a/net/mac802154/mac802154.h +++ b/net/mac802154/mac802154.h @@ -58,6 +58,34 @@ struct mac802154_priv { #define MAC802154_DEVICE_STOPPED 0x00 #define MAC802154_DEVICE_RUN 0x01 +/* Slave interface definition. + * + * Slaves represent typical network interfaces available from userspace. + * Each ieee802154 device/transceiver may have several slaves and able + * to be associated with several networks at the same time. + */ +struct mac802154_sub_if_data { + struct list_head list; /* the ieee802154_priv->slaves list */ + + struct mac802154_priv *hw; + struct net_device *dev; + + int type; + + spinlock_t mib_lock; + + __le16 pan_id; + __le16 short_addr; + + u8 chan; + u8 page; + + /* MAC BSN field */ + u8 bsn; + /* MAC DSN field */ + u8 dsn; +}; + #define mac802154_to_priv(_hw) container_of(_hw, struct mac802154_priv, hw) #define MAC802154_MAX_XMIT_ATTEMPTS 3 -- cgit v1.2.2 From 6e2128d42af43906d8bcbed7cf2207244fa4301e Mon Sep 17 00:00:00 2001 From: "alex.bluesman.smirnov@gmail.com" Date: Tue, 15 May 2012 20:50:25 +0000 Subject: mac802154: basic MAC commands interface support Declare set of MAC-commands for reduced functionality interface. Signed-off-by: Alexander Smirnov Signed-off-by: David S. Miller --- net/mac802154/Makefile | 2 +- net/mac802154/mac802154.h | 2 ++ net/mac802154/mac_cmd.c | 45 +++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 48 insertions(+), 1 deletion(-) create mode 100644 net/mac802154/mac_cmd.c (limited to 'net') diff --git a/net/mac802154/Makefile b/net/mac802154/Makefile index 490beef7176e..4d9dd0f3b969 100644 --- a/net/mac802154/Makefile +++ b/net/mac802154/Makefile @@ -1,2 +1,2 @@ obj-$(CONFIG_MAC802154) += mac802154.o -mac802154-objs := ieee802154_dev.o rx.o tx.o +mac802154-objs := ieee802154_dev.o rx.o tx.o mac_cmd.o diff --git a/net/mac802154/mac802154.h b/net/mac802154/mac802154.h index d53b86c75d53..7ec2821c574b 100644 --- a/net/mac802154/mac802154.h +++ b/net/mac802154/mac802154.h @@ -90,6 +90,8 @@ struct mac802154_sub_if_data { #define MAC802154_MAX_XMIT_ATTEMPTS 3 +extern struct ieee802154_reduced_mlme_ops mac802154_mlme_reduced; + netdev_tx_t mac802154_tx(struct mac802154_priv *priv, struct sk_buff *skb, u8 page, u8 chan); diff --git a/net/mac802154/mac_cmd.c b/net/mac802154/mac_cmd.c new file mode 100644 index 000000000000..7a5d0e052cd7 --- /dev/null +++ b/net/mac802154/mac_cmd.c @@ -0,0 +1,45 @@ +/* + * MAC commands interface + * + * Copyright 2007-2012 Siemens AG + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Written by: + * Sergey Lapin + * Dmitry Eremin-Solenikov + * Alexander Smirnov + */ + +#include +#include + +#include +#include +#include + +#include "mac802154.h" + +struct wpan_phy *mac802154_get_phy(const struct net_device *dev) +{ + struct mac802154_sub_if_data *priv = netdev_priv(dev); + + BUG_ON(dev->type != ARPHRD_IEEE802154); + + return to_phy(get_device(&priv->hw->phy->dev)); +} + +struct ieee802154_reduced_mlme_ops mac802154_mlme_reduced = { + .get_phy = mac802154_get_phy, +}; -- cgit v1.2.2 From ef2486f5538b886ad4f0d1ac0857b518291b48f7 Mon Sep 17 00:00:00 2001 From: "alex.bluesman.smirnov@gmail.com" Date: Tue, 15 May 2012 20:50:26 +0000 Subject: mac802154: basic mib support Basic support for IEEE 802.15.4 management information base. Current implementation contains a command to set HW address only. Signed-off-by: Alexander Smirnov Signed-off-by: David S. Miller --- net/mac802154/Makefile | 2 +- net/mac802154/mac802154.h | 3 ++ net/mac802154/mib.c | 93 +++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 97 insertions(+), 1 deletion(-) create mode 100644 net/mac802154/mib.c (limited to 'net') diff --git a/net/mac802154/Makefile b/net/mac802154/Makefile index 4d9dd0f3b969..6b348b07a765 100644 --- a/net/mac802154/Makefile +++ b/net/mac802154/Makefile @@ -1,2 +1,2 @@ obj-$(CONFIG_MAC802154) += mac802154.o -mac802154-objs := ieee802154_dev.o rx.o tx.o mac_cmd.o +mac802154-objs := ieee802154_dev.o rx.o tx.o mac_cmd.o mib.o diff --git a/net/mac802154/mac802154.h b/net/mac802154/mac802154.h index 7ec2821c574b..7d9a0d4a99fd 100644 --- a/net/mac802154/mac802154.h +++ b/net/mac802154/mac802154.h @@ -95,4 +95,7 @@ extern struct ieee802154_reduced_mlme_ops mac802154_mlme_reduced; netdev_tx_t mac802154_tx(struct mac802154_priv *priv, struct sk_buff *skb, u8 page, u8 chan); +/* MIB callbacks */ +void mac802154_dev_set_ieee_addr(struct net_device *dev); + #endif /* MAC802154_H */ diff --git a/net/mac802154/mib.c b/net/mac802154/mib.c new file mode 100644 index 000000000000..ab59821ec729 --- /dev/null +++ b/net/mac802154/mib.c @@ -0,0 +1,93 @@ +/* + * Copyright 2007-2012 Siemens AG + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Written by: + * Dmitry Eremin-Solenikov + * Sergey Lapin + * Maxim Gorbachyov + * Alexander Smirnov + */ + +#include + +#include +#include + +#include "mac802154.h" + +struct hw_addr_filt_notify_work { + struct work_struct work; + struct net_device *dev; + unsigned long changed; +}; + +struct mac802154_priv *mac802154_slave_get_priv(struct net_device *dev) +{ + struct mac802154_sub_if_data *priv = netdev_priv(dev); + + BUG_ON(dev->type != ARPHRD_IEEE802154); + + return priv->hw; +} + +static void hw_addr_notify(struct work_struct *work) +{ + struct hw_addr_filt_notify_work *nw = container_of(work, + struct hw_addr_filt_notify_work, work); + struct mac802154_priv *hw = mac802154_slave_get_priv(nw->dev); + int res; + + res = hw->ops->set_hw_addr_filt(&hw->hw, + &hw->hw.hw_filt, + nw->changed); + if (res) + pr_debug("failed changed mask %lx\n", nw->changed); + + kfree(nw); + + return; +} + +static void set_hw_addr_filt(struct net_device *dev, unsigned long changed) +{ + struct mac802154_sub_if_data *priv = netdev_priv(dev); + struct hw_addr_filt_notify_work *work; + + work = kzalloc(sizeof(*work), GFP_ATOMIC); + if (!work) + return; + + INIT_WORK(&work->work, hw_addr_notify); + work->dev = dev; + work->changed = changed; + queue_work(priv->hw->dev_workqueue, &work->work); + + return; +} + +void mac802154_dev_set_ieee_addr(struct net_device *dev) +{ + struct mac802154_sub_if_data *priv = netdev_priv(dev); + struct mac802154_priv *mac = priv->hw; + + if (mac->ops->set_hw_addr_filt && + memcmp(mac->hw.hw_filt.ieee_addr, + dev->dev_addr, IEEE802154_ADDR_LEN)) { + memcpy(mac->hw.hw_filt.ieee_addr, + dev->dev_addr, IEEE802154_ADDR_LEN); + set_hw_addr_filt(dev, IEEE802515_AFILT_IEEEADDR_CHANGED); + } +} -- cgit v1.2.2 From 90c049b2c6ae26d1a4d526d660a976620eaa554a Mon Sep 17 00:00:00 2001 From: "alex.bluesman.smirnov@gmail.com" Date: Tue, 15 May 2012 20:50:27 +0000 Subject: ieee802154: interface type to be added This stack implementation distinguishes several types of slave interfaces. Another parameter to 'add_iface_' function is added to clarify the interface type is going to be registered. Signed-off-by: Alexander Smirnov Signed-off-by: David S. Miller --- net/ieee802154/nl-phy.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) (limited to 'net') diff --git a/net/ieee802154/nl-phy.c b/net/ieee802154/nl-phy.c index 3bdc4303c339..eed291626da6 100644 --- a/net/ieee802154/nl-phy.c +++ b/net/ieee802154/nl-phy.c @@ -179,6 +179,7 @@ static int ieee802154_add_iface(struct sk_buff *skb, const char *devname; int rc = -ENOBUFS; struct net_device *dev; + int type = __IEEE802154_DEV_INVALID; pr_debug("%s\n", __func__); @@ -221,7 +222,13 @@ static int ieee802154_add_iface(struct sk_buff *skb, goto nla_put_failure; } - dev = phy->add_iface(phy, devname); + if (info->attrs[IEEE802154_ATTR_DEV_TYPE]) { + type = nla_get_u8(info->attrs[IEEE802154_ATTR_DEV_TYPE]); + if (type >= __IEEE802154_DEV_MAX) + return -EINVAL; + } + + dev = phy->add_iface(phy, devname, type); if (IS_ERR(dev)) { rc = PTR_ERR(dev); goto nla_put_failure; -- cgit v1.2.2 From 62610ad21870a8cf842d4a48f07c3a964e1d2622 Mon Sep 17 00:00:00 2001 From: "alex.bluesman.smirnov@gmail.com" Date: Tue, 15 May 2012 20:50:28 +0000 Subject: mac802154: slaves management support This patch adds functionality for registration and removing slaves in the stack. Signed-off-by: Alexander Smirnov Signed-off-by: David S. Miller --- net/mac802154/ieee802154_dev.c | 142 +++++++++++++++++++++++++++++++++++++++++ net/mac802154/mac802154.h | 3 + 2 files changed, 145 insertions(+) (limited to 'net') diff --git a/net/mac802154/ieee802154_dev.c b/net/mac802154/ieee802154_dev.c index d130271eb528..9f36760b6c3a 100644 --- a/net/mac802154/ieee802154_dev.c +++ b/net/mac802154/ieee802154_dev.c @@ -24,12 +24,140 @@ #include #include +#include +#include #include #include #include #include "mac802154.h" +int mac802154_slave_open(struct net_device *dev) +{ + struct mac802154_sub_if_data *priv = netdev_priv(dev); + struct mac802154_priv *ipriv = priv->hw; + int res = 0; + + if (ipriv->open_count++ == 0) { + res = ipriv->ops->start(&ipriv->hw); + WARN_ON(res); + if (res) + goto err; + } + + if (ipriv->ops->ieee_addr) { + res = ipriv->ops->ieee_addr(&ipriv->hw, dev->dev_addr); + WARN_ON(res); + if (res) + goto err; + mac802154_dev_set_ieee_addr(dev); + } + + netif_start_queue(dev); + return 0; +err: + priv->hw->open_count--; + + return res; +} + +int mac802154_slave_close(struct net_device *dev) +{ + struct mac802154_sub_if_data *priv = netdev_priv(dev); + struct mac802154_priv *ipriv = priv->hw; + + netif_stop_queue(dev); + + if (!--ipriv->open_count) + ipriv->ops->stop(&ipriv->hw); + + return 0; +} + +static int +mac802154_netdev_register(struct wpan_phy *phy, struct net_device *dev) +{ + struct mac802154_sub_if_data *priv; + struct mac802154_priv *ipriv; + int err; + + ipriv = wpan_phy_priv(phy); + + priv = netdev_priv(dev); + priv->dev = dev; + priv->hw = ipriv; + + dev->needed_headroom = ipriv->hw.extra_tx_headroom; + + SET_NETDEV_DEV(dev, &ipriv->phy->dev); + + mutex_lock(&ipriv->slaves_mtx); + if (!ipriv->running) { + mutex_unlock(&ipriv->slaves_mtx); + return -ENODEV; + } + mutex_unlock(&ipriv->slaves_mtx); + + err = register_netdev(dev); + if (err < 0) + return err; + + rtnl_lock(); + mutex_lock(&ipriv->slaves_mtx); + list_add_tail_rcu(&priv->list, &ipriv->slaves); + mutex_unlock(&ipriv->slaves_mtx); + rtnl_unlock(); + + return 0; +} + +static void +mac802154_del_iface(struct wpan_phy *phy, struct net_device *dev) +{ + struct mac802154_sub_if_data *sdata; + ASSERT_RTNL(); + + sdata = netdev_priv(dev); + + BUG_ON(sdata->hw->phy != phy); + + mutex_lock(&sdata->hw->slaves_mtx); + list_del_rcu(&sdata->list); + mutex_unlock(&sdata->hw->slaves_mtx); + + synchronize_rcu(); + unregister_netdevice(sdata->dev); +} + +static struct net_device * +mac802154_add_iface(struct wpan_phy *phy, const char *name, int type) +{ + struct net_device *dev; + int err = -ENOMEM; + + /* No devices is currently added */ + switch (type) { + default: + dev = NULL; + err = -EINVAL; + break; + } + if (!dev) + goto err; + + err = mac802154_netdev_register(phy, dev); + if (err) + goto err_free; + + dev_hold(dev); /* we return an incremented device refcount */ + return dev; + +err_free: + free_netdev(dev); +err: + return ERR_PTR(err); +} + struct ieee802154_dev * ieee802154_alloc_device(size_t priv_data_len, struct ieee802154_ops *ops) { @@ -87,6 +215,8 @@ void ieee802154_free_device(struct ieee802154_dev *hw) { struct mac802154_priv *priv = mac802154_to_priv(hw); + BUG_ON(!list_empty(&priv->slaves)); + wpan_phy_free(priv->phy); mutex_destroy(&priv->slaves_mtx); @@ -105,6 +235,9 @@ int ieee802154_register_device(struct ieee802154_dev *dev) wpan_phy_set_dev(priv->phy, priv->hw.parent); + priv->phy->add_iface = mac802154_add_iface; + priv->phy->del_iface = mac802154_del_iface; + rc = wpan_phy_register(priv->phy); if (rc < 0) goto out_wq; @@ -129,6 +262,7 @@ EXPORT_SYMBOL(ieee802154_register_device); void ieee802154_unregister_device(struct ieee802154_dev *dev) { struct mac802154_priv *priv = mac802154_to_priv(dev); + struct mac802154_sub_if_data *sdata, *next; flush_workqueue(priv->dev_workqueue); destroy_workqueue(priv->dev_workqueue); @@ -139,6 +273,14 @@ void ieee802154_unregister_device(struct ieee802154_dev *dev) priv->running = MAC802154_DEVICE_STOPPED; mutex_unlock(&priv->slaves_mtx); + list_for_each_entry_safe(sdata, next, &priv->slaves, list) { + mutex_lock(&sdata->hw->slaves_mtx); + list_del(&sdata->list); + mutex_unlock(&sdata->hw->slaves_mtx); + + unregister_netdevice(sdata->dev); + } + rtnl_unlock(); wpan_phy_unregister(priv->phy); diff --git a/net/mac802154/mac802154.h b/net/mac802154/mac802154.h index 7d9a0d4a99fd..622752622c87 100644 --- a/net/mac802154/mac802154.h +++ b/net/mac802154/mac802154.h @@ -92,6 +92,9 @@ struct mac802154_sub_if_data { extern struct ieee802154_reduced_mlme_ops mac802154_mlme_reduced; +int mac802154_slave_open(struct net_device *dev); +int mac802154_slave_close(struct net_device *dev); + netdev_tx_t mac802154_tx(struct mac802154_priv *priv, struct sk_buff *skb, u8 page, u8 chan); -- cgit v1.2.2 From 0606069d9ef538687957d41ed6387d665af7a643 Mon Sep 17 00:00:00 2001 From: "alex.bluesman.smirnov@gmail.com" Date: Tue, 15 May 2012 20:50:29 +0000 Subject: mac802154: monitor device support Support for monitor device intended to capture all the network activity. This interface could be used by networks sniffers and is already supported by WireShark. That's a good test point to check that basic MAC support works. Signed-off-by: Alexander Smirnov Signed-off-by: David S. Miller --- net/mac802154/Makefile | 2 +- net/mac802154/ieee802154_dev.c | 5 +- net/mac802154/mac802154.h | 5 ++ net/mac802154/monitor.c | 116 +++++++++++++++++++++++++++++++++++++++++ net/mac802154/rx.c | 1 + 5 files changed, 127 insertions(+), 2 deletions(-) create mode 100644 net/mac802154/monitor.c (limited to 'net') diff --git a/net/mac802154/Makefile b/net/mac802154/Makefile index 6b348b07a765..ec1bd3fc1273 100644 --- a/net/mac802154/Makefile +++ b/net/mac802154/Makefile @@ -1,2 +1,2 @@ obj-$(CONFIG_MAC802154) += mac802154.o -mac802154-objs := ieee802154_dev.o rx.o tx.o mac_cmd.o mib.o +mac802154-objs := ieee802154_dev.o rx.o tx.o mac_cmd.o mib.o monitor.o diff --git a/net/mac802154/ieee802154_dev.c b/net/mac802154/ieee802154_dev.c index 9f36760b6c3a..e3edfb0661b0 100644 --- a/net/mac802154/ieee802154_dev.c +++ b/net/mac802154/ieee802154_dev.c @@ -135,8 +135,11 @@ mac802154_add_iface(struct wpan_phy *phy, const char *name, int type) struct net_device *dev; int err = -ENOMEM; - /* No devices is currently added */ switch (type) { + case IEEE802154_DEV_MONITOR: + dev = alloc_netdev(sizeof(struct mac802154_sub_if_data), + name, mac802154_monitor_setup); + break; default: dev = NULL; err = -EINVAL; diff --git a/net/mac802154/mac802154.h b/net/mac802154/mac802154.h index 622752622c87..789d9c948aec 100644 --- a/net/mac802154/mac802154.h +++ b/net/mac802154/mac802154.h @@ -90,11 +90,16 @@ struct mac802154_sub_if_data { #define MAC802154_MAX_XMIT_ATTEMPTS 3 +#define MAC802154_CHAN_NONE (~(u8)0) /* No channel is assigned */ + extern struct ieee802154_reduced_mlme_ops mac802154_mlme_reduced; int mac802154_slave_open(struct net_device *dev); int mac802154_slave_close(struct net_device *dev); +void mac802154_monitors_rx(struct mac802154_priv *priv, struct sk_buff *skb); +void mac802154_monitor_setup(struct net_device *dev); + netdev_tx_t mac802154_tx(struct mac802154_priv *priv, struct sk_buff *skb, u8 page, u8 chan); diff --git a/net/mac802154/monitor.c b/net/mac802154/monitor.c new file mode 100644 index 000000000000..434a26f76a80 --- /dev/null +++ b/net/mac802154/monitor.c @@ -0,0 +1,116 @@ +/* + * Copyright 2007, 2008, 2009 Siemens AG + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Written by: + * Dmitry Eremin-Solenikov + * Sergey Lapin + * Maxim Gorbachyov + * Alexander Smirnov + */ + +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "mac802154.h" + +static netdev_tx_t mac802154_monitor_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct mac802154_sub_if_data *priv; + u8 chan, page; + + priv = netdev_priv(dev); + + /* FIXME: locking */ + chan = priv->hw->phy->current_channel; + page = priv->hw->phy->current_page; + + if (chan == MAC802154_CHAN_NONE) /* not initialized */ + return NETDEV_TX_OK; + + if (WARN_ON(page >= WPAN_NUM_PAGES) || + WARN_ON(chan >= WPAN_NUM_CHANNELS)) + return NETDEV_TX_OK; + + skb->skb_iif = dev->ifindex; + dev->stats.tx_packets++; + dev->stats.tx_bytes += skb->len; + + return mac802154_tx(priv->hw, skb, page, chan); +} + + +void mac802154_monitors_rx(struct mac802154_priv *priv, struct sk_buff *skb) +{ + struct sk_buff *skb2; + struct mac802154_sub_if_data *sdata; + u16 crc = crc_ccitt(0, skb->data, skb->len); + u8 *data; + + rcu_read_lock(); + list_for_each_entry_rcu(sdata, &priv->slaves, list) { + if (sdata->type != IEEE802154_DEV_MONITOR) + continue; + + skb2 = skb_clone(skb, GFP_ATOMIC); + skb2->dev = sdata->dev; + skb2->pkt_type = PACKET_HOST; + data = skb_put(skb2, 2); + data[0] = crc & 0xff; + data[1] = crc >> 8; + + netif_rx_ni(skb2); + } + rcu_read_unlock(); +} + +static const struct net_device_ops mac802154_monitor_ops = { + .ndo_open = mac802154_slave_open, + .ndo_stop = mac802154_slave_close, + .ndo_start_xmit = mac802154_monitor_xmit, +}; + +void mac802154_monitor_setup(struct net_device *dev) +{ + struct mac802154_sub_if_data *priv; + + dev->addr_len = 0; + dev->hard_header_len = 0; + dev->needed_tailroom = 2; /* room for FCS */ + dev->mtu = IEEE802154_MTU; + dev->tx_queue_len = 10; + dev->type = ARPHRD_IEEE802154_MONITOR; + dev->flags = IFF_NOARP | IFF_BROADCAST; + dev->watchdog_timeo = 0; + + dev->destructor = free_netdev; + dev->netdev_ops = &mac802154_monitor_ops; + dev->ml_priv = &mac802154_mlme_reduced; + + priv = netdev_priv(dev); + priv->type = IEEE802154_DEV_MONITOR; + + priv->chan = MAC802154_CHAN_NONE; /* not initialized */ + priv->page = 0; +} diff --git a/net/mac802154/rx.c b/net/mac802154/rx.c index d15738fae551..4a7d76d4f8bc 100644 --- a/net/mac802154/rx.c +++ b/net/mac802154/rx.c @@ -76,6 +76,7 @@ mac802154_subif_rx(struct ieee802154_dev *hw, struct sk_buff *skb, u8 lqi) skb_trim(skb, skb->len - 2); /* CRC */ } + mac802154_monitors_rx(priv, skb); out: dev_kfree_skb(skb); return; -- cgit v1.2.2 From 865ec5523dadbedefbc5710a68969f686a28d928 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 16 May 2012 04:39:09 +0000 Subject: fq_codel: should use qdisc backlog as threshold codel_should_drop() logic allows a packet being not dropped if queue size is under max packet size. In fq_codel, we have two possible backlogs : The qdisc global one, and the flow local one. The meaningful one for codel_should_drop() should be the global backlog, not the per flow one, so that thin flows can have a non zero drop/mark probability. Signed-off-by: Eric Dumazet Cc: Dave Taht Cc: Kathleen Nichols Cc: Van Jacobson Signed-off-by: David S. Miller --- net/sched/sch_codel.c | 4 ++-- net/sched/sch_fq_codel.c | 5 +++-- 2 files changed, 5 insertions(+), 4 deletions(-) (limited to 'net') diff --git a/net/sched/sch_codel.c b/net/sched/sch_codel.c index 213ef60bced8..2f9ab17db85a 100644 --- a/net/sched/sch_codel.c +++ b/net/sched/sch_codel.c @@ -77,8 +77,8 @@ static struct sk_buff *codel_qdisc_dequeue(struct Qdisc *sch) struct codel_sched_data *q = qdisc_priv(sch); struct sk_buff *skb; - skb = codel_dequeue(sch, &q->params, &q->vars, &q->stats, - dequeue, &sch->qstats.backlog); + skb = codel_dequeue(sch, &q->params, &q->vars, &q->stats, dequeue); + /* We cant call qdisc_tree_decrease_qlen() if our qlen is 0, * or HTB crashes. Defer it for next round. */ diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c index 337ff204f272..9fc1c62ec80e 100644 --- a/net/sched/sch_fq_codel.c +++ b/net/sched/sch_fq_codel.c @@ -217,13 +217,14 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch) */ static struct sk_buff *dequeue(struct codel_vars *vars, struct Qdisc *sch) { + struct fq_codel_sched_data *q = qdisc_priv(sch); struct fq_codel_flow *flow; struct sk_buff *skb = NULL; flow = container_of(vars, struct fq_codel_flow, cvars); if (flow->head) { skb = dequeue_head(flow); - sch->qstats.backlog -= qdisc_pkt_len(skb); + q->backlogs[flow - q->flows] -= qdisc_pkt_len(skb); sch->q.qlen--; } return skb; @@ -256,7 +257,7 @@ begin: prev_ecn_mark = q->cstats.ecn_mark; skb = codel_dequeue(sch, &q->cparams, &flow->cvars, &q->cstats, - dequeue, &q->backlogs[flow - q->flows]); + dequeue); flow->dropped += q->cstats.drop_count - prev_drop_count; flow->dropped += q->cstats.ecn_mark - prev_ecn_mark; -- cgit v1.2.2 From 1b23a5dfc20469d4a4bb8a552dd224ac693c407c Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 16 May 2012 05:57:07 +0000 Subject: net: sock_flag() cleanup - sock_flag() accepts a const pointer - sock_flag() returns a boolean Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/core/sock.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) (limited to 'net') diff --git a/net/core/sock.c b/net/core/sock.c index 26ed27fb2bfb..9d144ee7e379 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -849,7 +849,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname, break; case SO_BROADCAST: - v.val = !!sock_flag(sk, SOCK_BROADCAST); + v.val = sock_flag(sk, SOCK_BROADCAST); break; case SO_SNDBUF: @@ -865,7 +865,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname, break; case SO_KEEPALIVE: - v.val = !!sock_flag(sk, SOCK_KEEPOPEN); + v.val = sock_flag(sk, SOCK_KEEPOPEN); break; case SO_TYPE: @@ -887,7 +887,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname, break; case SO_OOBINLINE: - v.val = !!sock_flag(sk, SOCK_URGINLINE); + v.val = sock_flag(sk, SOCK_URGINLINE); break; case SO_NO_CHECK: @@ -900,7 +900,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname, case SO_LINGER: lv = sizeof(v.ling); - v.ling.l_onoff = !!sock_flag(sk, SOCK_LINGER); + v.ling.l_onoff = sock_flag(sk, SOCK_LINGER); v.ling.l_linger = sk->sk_lingertime / HZ; break; @@ -1012,11 +1012,11 @@ int sock_getsockopt(struct socket *sock, int level, int optname, break; case SO_RXQ_OVFL: - v.val = !!sock_flag(sk, SOCK_RXQ_OVFL); + v.val = sock_flag(sk, SOCK_RXQ_OVFL); break; case SO_WIFI_STATUS: - v.val = !!sock_flag(sk, SOCK_WIFI_STATUS); + v.val = sock_flag(sk, SOCK_WIFI_STATUS); break; case SO_PEEK_OFF: @@ -1026,7 +1026,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname, v.val = sk->sk_peek_off; break; case SO_NOFCS: - v.val = !!sock_flag(sk, SOCK_NOFCS); + v.val = sock_flag(sk, SOCK_NOFCS); break; default: return -ENOPROTOOPT; -- cgit v1.2.2 From 1f27e2516c1d95ae19024bec5be68a3f489cc47e Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Thu, 10 May 2012 22:11:54 +0000 Subject: netfilter: xt_hashlimit: use _ALL macro to reject unknown flag bits David Miller says: The canonical way to validate if the set bits are in a valid range is to have a "_ALL" macro, and test: if (val & ~XT_HASHLIMIT_ALL) goto err;" make it so. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- net/netfilter/xt_hashlimit.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c index 5d5af1d04fa2..26a668a84aa2 100644 --- a/net/netfilter/xt_hashlimit.c +++ b/net/netfilter/xt_hashlimit.c @@ -647,7 +647,7 @@ static int hashlimit_mt_check(const struct xt_mtchk_param *par) return -EINVAL; } - if (info->cfg.mode >= XT_HASHLIMIT_MAX) { + if (info->cfg.mode & ~XT_HASHLIMIT_ALL) { pr_info("Unknown mode mask %X, kernel too old?\n", info->cfg.mode); return -EINVAL; -- cgit v1.2.2 From 58618115492711d99fbccb79c5317299e32231fe Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Sat, 12 May 2012 01:00:03 +0000 Subject: netfilter: xt_HMARK: potential NULL dereference in get_inner_hdr() There is a typo in the error checking and "&&" was used instead of "||". If skb_header_pointer() returns NULL then it leads to a NULL dereference. Signed-off-by: Dan Carpenter Acked-by: Hans Schillstrom Signed-off-by: Pablo Neira Ayuso --- net/netfilter/xt_HMARK.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/netfilter/xt_HMARK.c b/net/netfilter/xt_HMARK.c index 32fbd735d02b..5817d03105b2 100644 --- a/net/netfilter/xt_HMARK.c +++ b/net/netfilter/xt_HMARK.c @@ -223,7 +223,7 @@ static int get_inner_hdr(const struct sk_buff *skb, int iphsz, int *nhoff) /* Not enough header? */ icmph = skb_header_pointer(skb, *nhoff + iphsz, sizeof(_ih), &_ih); - if (icmph == NULL && icmph->type > NR_ICMP_TYPES) + if (icmph == NULL || icmph->type > NR_ICMP_TYPES) return 0; /* Error message? */ -- cgit v1.2.2 From c44f5faa8e8c2036da2ba656f79b57a737543aff Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Mon, 14 May 2012 02:01:46 +0200 Subject: netfilter: xt_HMARK: modulus is expensive for hash calculation Use: ((u64)(HASH_VAL * HASH_SIZE)) >> 32 as suggested by David S. Miller. Signed-off-by: Pablo Neira Ayuso --- net/netfilter/xt_HMARK.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/netfilter/xt_HMARK.c b/net/netfilter/xt_HMARK.c index 5817d03105b2..0a96a43108ed 100644 --- a/net/netfilter/xt_HMARK.c +++ b/net/netfilter/xt_HMARK.c @@ -109,7 +109,7 @@ hmark_hash(struct hmark_tuple *t, const struct xt_hmark_info *info) hash = jhash_3words(t->src, t->dst, t->uports.v32, info->hashrnd); hash = hash ^ (t->proto & info->proto_mask); - return (hash % info->hmodulus) + info->hoffset; + return (((u64)hash * info->hmodulus) >> 32) + info->hoffset; } static void -- cgit v1.2.2 From 1a4ac9870fb82eed56623d0f69ec59aa5bef85fe Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Mon, 14 May 2012 10:55:03 +0200 Subject: netfilter: nf_ct_tcp: extend log message for invalid ignored packets Extend log message if packets are ignored to include the TCP state, ie. replace: [ 3968.070196] nf_ct_tcp: invalid packet ignored IN= OUT= SRC=... by: [ 3968.070196] nf_ct_tcp: invalid packet ignored in state ESTABLISHED IN= OUT= SRC=... This information is useful to know in what state we were while ignoring the packet. Signed-off-by: Pablo Neira Ayuso Acked-by: Jozsef Kadlecsik --- net/netfilter/nf_conntrack_proto_tcp.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'net') diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c index 4dfbfa840f8a..21ff1a99f534 100644 --- a/net/netfilter/nf_conntrack_proto_tcp.c +++ b/net/netfilter/nf_conntrack_proto_tcp.c @@ -952,7 +952,8 @@ static int tcp_packet(struct nf_conn *ct, spin_unlock_bh(&ct->lock); if (LOG_INVALID(net, IPPROTO_TCP)) nf_log_packet(pf, 0, skb, NULL, NULL, NULL, - "nf_ct_tcp: invalid packet ignored "); + "nf_ct_tcp: invalid packet ignored in " + "state %s ", tcp_conntrack_names[old_state]); return NF_ACCEPT; case TCP_CONNTRACK_MAX: /* Invalid packet */ -- cgit v1.2.2 From 127f559127f5175e4bec3dab725a34845d956591 Mon Sep 17 00:00:00 2001 From: Jozsef Kadlecsik Date: Mon, 7 May 2012 02:35:44 +0000 Subject: netfilter: ipset: fix timeout value overflow bug Large timeout parameters could result wrong timeout values due to an overflow at msec to jiffies conversion (reported by Andreas Herz) [ This patch was mangled by Pablo Neira Ayuso since David Laight and Eric Dumazet noticed that we were using hardcoded 1000 instead of MSEC_PER_SEC to calculate the timeout ] Signed-off-by: Jozsef Kadlecsik Signed-off-by: Pablo Neira Ayuso --- net/netfilter/xt_set.c | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) (limited to 'net') diff --git a/net/netfilter/xt_set.c b/net/netfilter/xt_set.c index 0ec8138aa470..035960ec5cb9 100644 --- a/net/netfilter/xt_set.c +++ b/net/netfilter/xt_set.c @@ -44,6 +44,14 @@ const struct ip_set_adt_opt n = { \ .cmdflags = cfs, \ .timeout = t, \ } +#define ADT_MOPT(n, f, d, fs, cfs, t) \ +struct ip_set_adt_opt n = { \ + .family = f, \ + .dim = d, \ + .flags = fs, \ + .cmdflags = cfs, \ + .timeout = t, \ +} /* Revision 0 interface: backward compatible with netfilter/iptables */ @@ -296,11 +304,14 @@ static unsigned int set_target_v2(struct sk_buff *skb, const struct xt_action_param *par) { const struct xt_set_info_target_v2 *info = par->targinfo; - ADT_OPT(add_opt, par->family, info->add_set.dim, - info->add_set.flags, info->flags, info->timeout); + ADT_MOPT(add_opt, par->family, info->add_set.dim, + info->add_set.flags, info->flags, info->timeout); ADT_OPT(del_opt, par->family, info->del_set.dim, info->del_set.flags, 0, UINT_MAX); + /* Normalize to fit into jiffies */ + if (add_opt.timeout > UINT_MAX/MSEC_PER_SEC) + add_opt.timeout = UINT_MAX/MSEC_PER_SEC; if (info->add_set.index != IPSET_INVALID_ID) ip_set_add(info->add_set.index, skb, par, &add_opt); if (info->del_set.index != IPSET_INVALID_ID) -- cgit v1.2.2 From 1a52099640bd7c1ff1577e9a967fb781ac59dfba Mon Sep 17 00:00:00 2001 From: Eldad Zack Date: Wed, 9 May 2012 12:03:35 +0000 Subject: netfilter: xt_CT: remove redundant header include nf_conntrack_l4proto.h is included twice. Signed-off-by: Eldad Zack Signed-off-by: Pablo Neira Ayuso --- net/netfilter/xt_CT.c | 1 - 1 file changed, 1 deletion(-) (limited to 'net') diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c index 3746d8b9a478..a51de9b052be 100644 --- a/net/netfilter/xt_CT.c +++ b/net/netfilter/xt_CT.c @@ -17,7 +17,6 @@ #include #include #include -#include #include #include -- cgit v1.2.2 From be3eed2e96340d3c7a4d1ea1d63e7bd6095d1e34 Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Mon, 14 May 2012 00:42:02 +0200 Subject: netfilter: nf_ct_h323: fix usage of MODULE_ALIAS_NFCT_HELPER ctnetlink uses the aliases that are created by MODULE_ALIAS_NFCT_HELPER to auto-load the module based on the helper name. Thus, we have to use RAS, Q.931 and H.245, not H.323. Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_conntrack_h323_main.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'net') diff --git a/net/netfilter/nf_conntrack_h323_main.c b/net/netfilter/nf_conntrack_h323_main.c index 93c13eb67b3f..46d69d7f1bb4 100644 --- a/net/netfilter/nf_conntrack_h323_main.c +++ b/net/netfilter/nf_conntrack_h323_main.c @@ -1830,4 +1830,6 @@ MODULE_AUTHOR("Jing Min Zhao "); MODULE_DESCRIPTION("H.323 connection tracking helper"); MODULE_LICENSE("GPL"); MODULE_ALIAS("ip_conntrack_h323"); -MODULE_ALIAS_NFCT_HELPER("h323"); +MODULE_ALIAS_NFCT_HELPER("RAS"); +MODULE_ALIAS_NFCT_HELPER("Q.931"); +MODULE_ALIAS_NFCT_HELPER("H.245"); -- cgit v1.2.2 From a4ca44fa578c7c7fd123b7fba3c2c98d4ba4e53d Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Wed, 16 May 2012 09:55:56 +0000 Subject: net: l2tp: Standardize logging styles Use more current logging styles. Add pr_fmt to prefix output appropriately. Convert printks to pr_. Convert PRINTK macros to new l2tp_ macros. Neaten some _refcount debugging macros. Use print_hex_dump_bytes instead of hand-coded loops. Coalesce formats and align arguments. Some KERN_DEBUG output is not now emitted unless dynamic_debugging is enabled. Signed-off-by: Joe Perches Signed-off-by: James Chapman Signed-off-by: David S. Miller --- net/l2tp/l2tp_core.c | 216 ++++++++++++++++++++++-------------------------- net/l2tp/l2tp_core.h | 35 ++++++-- net/l2tp/l2tp_debugfs.c | 6 +- net/l2tp/l2tp_eth.c | 15 ++-- net/l2tp/l2tp_ip.c | 15 ++-- net/l2tp/l2tp_ip6.c | 15 ++-- net/l2tp/l2tp_netlink.c | 4 +- net/l2tp/l2tp_ppp.c | 128 ++++++++++++++-------------- 8 files changed, 213 insertions(+), 221 deletions(-) (limited to 'net') diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index 0d6aedc3a0ce..32b2155e7ab4 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c @@ -18,6 +18,8 @@ * published by the Free Software Foundation. */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include #include #include @@ -86,12 +88,6 @@ /* Default trace flags */ #define L2TP_DEFAULT_DEBUG_FLAGS 0 -#define PRINTK(_mask, _type, _lvl, _fmt, args...) \ - do { \ - if ((_mask) & (_type)) \ - printk(_lvl "L2TP: " _fmt, ##args); \ - } while (0) - /* Private data stored for received packets in the skb. */ struct l2tp_skb_cb { @@ -141,14 +137,20 @@ static inline void l2tp_tunnel_dec_refcount_1(struct l2tp_tunnel *tunnel) l2tp_tunnel_free(tunnel); } #ifdef L2TP_REFCNT_DEBUG -#define l2tp_tunnel_inc_refcount(_t) do { \ - printk(KERN_DEBUG "l2tp_tunnel_inc_refcount: %s:%d %s: cnt=%d\n", __func__, __LINE__, (_t)->name, atomic_read(&_t->ref_count)); \ - l2tp_tunnel_inc_refcount_1(_t); \ - } while (0) -#define l2tp_tunnel_dec_refcount(_t) do { \ - printk(KERN_DEBUG "l2tp_tunnel_dec_refcount: %s:%d %s: cnt=%d\n", __func__, __LINE__, (_t)->name, atomic_read(&_t->ref_count)); \ - l2tp_tunnel_dec_refcount_1(_t); \ - } while (0) +#define l2tp_tunnel_inc_refcount(_t) \ +do { \ + pr_debug("l2tp_tunnel_inc_refcount: %s:%d %s: cnt=%d\n", \ + __func__, __LINE__, (_t)->name, \ + atomic_read(&_t->ref_count)); \ + l2tp_tunnel_inc_refcount_1(_t); \ +} while (0) +#define l2tp_tunnel_dec_refcount(_t) +do { \ + pr_debug("l2tp_tunnel_dec_refcount: %s:%d %s: cnt=%d\n", \ + __func__, __LINE__, (_t)->name, \ + atomic_read(&_t->ref_count)); \ + l2tp_tunnel_dec_refcount_1(_t); \ +} while (0) #else #define l2tp_tunnel_inc_refcount(t) l2tp_tunnel_inc_refcount_1(t) #define l2tp_tunnel_dec_refcount(t) l2tp_tunnel_dec_refcount_1(t) @@ -337,10 +339,10 @@ static void l2tp_recv_queue_skb(struct l2tp_session *session, struct sk_buff *sk skb_queue_walk_safe(&session->reorder_q, skbp, tmp) { if (L2TP_SKB_CB(skbp)->ns > ns) { __skb_queue_before(&session->reorder_q, skbp, skb); - PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG, - "%s: pkt %hu, inserted before %hu, reorder_q len=%d\n", - session->name, ns, L2TP_SKB_CB(skbp)->ns, - skb_queue_len(&session->reorder_q)); + l2tp_dbg(session, L2TP_MSG_SEQ, + "%s: pkt %hu, inserted before %hu, reorder_q len=%d\n", + session->name, ns, L2TP_SKB_CB(skbp)->ns, + skb_queue_len(&session->reorder_q)); u64_stats_update_begin(&sstats->syncp); sstats->rx_oos_packets++; u64_stats_update_end(&sstats->syncp); @@ -386,8 +388,8 @@ static void l2tp_recv_dequeue_skb(struct l2tp_session *session, struct sk_buff * else session->nr &= 0xffffff; - PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG, - "%s: updated nr to %hu\n", session->name, session->nr); + l2tp_dbg(session, L2TP_MSG_SEQ, "%s: updated nr to %hu\n", + session->name, session->nr); } /* call private receive handler */ @@ -422,12 +424,11 @@ start: sstats->rx_seq_discards++; sstats->rx_errors++; u64_stats_update_end(&sstats->syncp); - PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG, - "%s: oos pkt %u len %d discarded (too old), " - "waiting for %u, reorder_q_len=%d\n", - session->name, L2TP_SKB_CB(skb)->ns, - L2TP_SKB_CB(skb)->length, session->nr, - skb_queue_len(&session->reorder_q)); + l2tp_dbg(session, L2TP_MSG_SEQ, + "%s: oos pkt %u len %d discarded (too old), waiting for %u, reorder_q_len=%d\n", + session->name, L2TP_SKB_CB(skb)->ns, + L2TP_SKB_CB(skb)->length, session->nr, + skb_queue_len(&session->reorder_q)); session->reorder_skip = 1; __skb_unlink(skb, &session->reorder_q); kfree_skb(skb); @@ -438,20 +439,19 @@ start: if (L2TP_SKB_CB(skb)->has_seq) { if (session->reorder_skip) { - PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG, - "%s: advancing nr to next pkt: %u -> %u", - session->name, session->nr, - L2TP_SKB_CB(skb)->ns); + l2tp_dbg(session, L2TP_MSG_SEQ, + "%s: advancing nr to next pkt: %u -> %u", + session->name, session->nr, + L2TP_SKB_CB(skb)->ns); session->reorder_skip = 0; session->nr = L2TP_SKB_CB(skb)->ns; } if (L2TP_SKB_CB(skb)->ns != session->nr) { - PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG, - "%s: holding oos pkt %u len %d, " - "waiting for %u, reorder_q_len=%d\n", - session->name, L2TP_SKB_CB(skb)->ns, - L2TP_SKB_CB(skb)->length, session->nr, - skb_queue_len(&session->reorder_q)); + l2tp_dbg(session, L2TP_MSG_SEQ, + "%s: holding oos pkt %u len %d, waiting for %u, reorder_q_len=%d\n", + session->name, L2TP_SKB_CB(skb)->ns, + L2TP_SKB_CB(skb)->length, session->nr, + skb_queue_len(&session->reorder_q)); goto out; } } @@ -595,9 +595,10 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, /* Parse and check optional cookie */ if (session->peer_cookie_len > 0) { if (memcmp(ptr, &session->peer_cookie[0], session->peer_cookie_len)) { - PRINTK(tunnel->debug, L2TP_MSG_DATA, KERN_INFO, - "%s: cookie mismatch (%u/%u). Discarding.\n", - tunnel->name, tunnel->tunnel_id, session->session_id); + l2tp_info(tunnel, L2TP_MSG_DATA, + "%s: cookie mismatch (%u/%u). Discarding.\n", + tunnel->name, tunnel->tunnel_id, + session->session_id); u64_stats_update_begin(&sstats->syncp); sstats->rx_cookie_discards++; u64_stats_update_end(&sstats->syncp); @@ -626,9 +627,9 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, L2TP_SKB_CB(skb)->ns = ns; L2TP_SKB_CB(skb)->has_seq = 1; - PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG, - "%s: recv data ns=%u, nr=%u, session nr=%u\n", - session->name, ns, nr, session->nr); + l2tp_dbg(session, L2TP_MSG_SEQ, + "%s: recv data ns=%u, nr=%u, session nr=%u\n", + session->name, ns, nr, session->nr); } } else if (session->l2specific_type == L2TP_L2SPECTYPE_DEFAULT) { u32 l2h = ntohl(*(__be32 *) ptr); @@ -640,9 +641,9 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, L2TP_SKB_CB(skb)->ns = ns; L2TP_SKB_CB(skb)->has_seq = 1; - PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG, - "%s: recv data ns=%u, session nr=%u\n", - session->name, ns, session->nr); + l2tp_dbg(session, L2TP_MSG_SEQ, + "%s: recv data ns=%u, session nr=%u\n", + session->name, ns, session->nr); } } @@ -655,9 +656,9 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, * configure it so. */ if ((!session->lns_mode) && (!session->send_seq)) { - PRINTK(session->debug, L2TP_MSG_SEQ, KERN_INFO, - "%s: requested to enable seq numbers by LNS\n", - session->name); + l2tp_info(session, L2TP_MSG_SEQ, + "%s: requested to enable seq numbers by LNS\n", + session->name); session->send_seq = -1; l2tp_session_set_header_len(session, tunnel->version); } @@ -666,9 +667,9 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, * If user has configured mandatory sequence numbers, discard. */ if (session->recv_seq) { - PRINTK(session->debug, L2TP_MSG_SEQ, KERN_WARNING, - "%s: recv data has no seq numbers when required. " - "Discarding\n", session->name); + l2tp_warn(session, L2TP_MSG_SEQ, + "%s: recv data has no seq numbers when required. Discarding.\n", + session->name); u64_stats_update_begin(&sstats->syncp); sstats->rx_seq_discards++; u64_stats_update_end(&sstats->syncp); @@ -681,15 +682,15 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, * LAC is broken. Discard the frame. */ if ((!session->lns_mode) && (session->send_seq)) { - PRINTK(session->debug, L2TP_MSG_SEQ, KERN_INFO, - "%s: requested to disable seq numbers by LNS\n", - session->name); + l2tp_info(session, L2TP_MSG_SEQ, + "%s: requested to disable seq numbers by LNS\n", + session->name); session->send_seq = 0; l2tp_session_set_header_len(session, tunnel->version); } else if (session->send_seq) { - PRINTK(session->debug, L2TP_MSG_SEQ, KERN_WARNING, - "%s: recv data has no seq numbers when required. " - "Discarding\n", session->name); + l2tp_warn(session, L2TP_MSG_SEQ, + "%s: recv data has no seq numbers when required. Discarding.\n", + session->name); u64_stats_update_begin(&sstats->syncp); sstats->rx_seq_discards++; u64_stats_update_end(&sstats->syncp); @@ -749,12 +750,11 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, u64_stats_update_begin(&sstats->syncp); sstats->rx_seq_discards++; u64_stats_update_end(&sstats->syncp); - PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG, - "%s: oos pkt %u len %d discarded, " - "waiting for %u, reorder_q_len=%d\n", - session->name, L2TP_SKB_CB(skb)->ns, - L2TP_SKB_CB(skb)->length, session->nr, - skb_queue_len(&session->reorder_q)); + l2tp_dbg(session, L2TP_MSG_SEQ, + "%s: oos pkt %u len %d discarded, waiting for %u, reorder_q_len=%d\n", + session->name, L2TP_SKB_CB(skb)->ns, + L2TP_SKB_CB(skb)->length, session->nr, + skb_queue_len(&session->reorder_q)); goto discard; } skb_queue_tail(&session->reorder_q, skb); @@ -800,7 +800,6 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb, unsigned char *ptr, *optr; u16 hdrflags; u32 tunnel_id, session_id; - int offset; u16 version; int length; struct l2tp_stats *tstats; @@ -813,8 +812,9 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb, /* Short packet? */ if (!pskb_may_pull(skb, L2TP_HDR_SIZE_SEQ)) { - PRINTK(tunnel->debug, L2TP_MSG_DATA, KERN_INFO, - "%s: recv short packet (len=%d)\n", tunnel->name, skb->len); + l2tp_info(tunnel, L2TP_MSG_DATA, + "%s: recv short packet (len=%d)\n", + tunnel->name, skb->len); goto error; } @@ -824,14 +824,8 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb, if (!pskb_may_pull(skb, length)) goto error; - printk(KERN_DEBUG "%s: recv: ", tunnel->name); - - offset = 0; - do { - printk(" %02X", skb->data[offset]); - } while (++offset < length); - - printk("\n"); + pr_debug("%s: recv\n", tunnel->name); + print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, skb->data, length); } /* Point to L2TP header */ @@ -843,9 +837,9 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb, /* Check protocol version */ version = hdrflags & L2TP_HDR_VER_MASK; if (version != tunnel->version) { - PRINTK(tunnel->debug, L2TP_MSG_DATA, KERN_INFO, - "%s: recv protocol version mismatch: got %d expected %d\n", - tunnel->name, version, tunnel->version); + l2tp_info(tunnel, L2TP_MSG_DATA, + "%s: recv protocol version mismatch: got %d expected %d\n", + tunnel->name, version, tunnel->version); goto error; } @@ -854,8 +848,9 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb, /* If type is control packet, it is handled by userspace. */ if (hdrflags & L2TP_HDRFLAG_T) { - PRINTK(tunnel->debug, L2TP_MSG_DATA, KERN_DEBUG, - "%s: recv control packet, len=%d\n", tunnel->name, length); + l2tp_dbg(tunnel, L2TP_MSG_DATA, + "%s: recv control packet, len=%d\n", + tunnel->name, length); goto error; } @@ -883,9 +878,9 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb, session = l2tp_session_find(tunnel->l2tp_net, tunnel, session_id); if (!session || !session->recv_skb) { /* Not found? Pass to userspace to deal with */ - PRINTK(tunnel->debug, L2TP_MSG_DATA, KERN_INFO, - "%s: no session found (%u/%u). Passing up.\n", - tunnel->name, tunnel_id, session_id); + l2tp_info(tunnel, L2TP_MSG_DATA, + "%s: no session found (%u/%u). Passing up.\n", + tunnel->name, tunnel_id, session_id); goto error; } @@ -925,8 +920,8 @@ int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb) if (tunnel == NULL) goto pass_up; - PRINTK(tunnel->debug, L2TP_MSG_DATA, KERN_DEBUG, - "%s: received %d bytes\n", tunnel->name, skb->len); + l2tp_dbg(tunnel, L2TP_MSG_DATA, "%s: received %d bytes\n", + tunnel->name, skb->len); if (l2tp_udp_recv_core(tunnel, skb, tunnel->recv_payload_hook)) goto pass_up_put; @@ -968,8 +963,8 @@ static int l2tp_build_l2tpv2_header(struct l2tp_session *session, void *buf) *bufp++ = 0; session->ns++; session->ns &= 0xffff; - PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG, - "%s: updated ns to %u\n", session->name, session->ns); + l2tp_dbg(session, L2TP_MSG_SEQ, "%s: updated ns to %u\n", + session->name, session->ns); } return bufp - optr; @@ -1005,8 +1000,9 @@ static int l2tp_build_l2tpv3_header(struct l2tp_session *session, void *buf) l2h = 0x40000000 | session->ns; session->ns++; session->ns &= 0xffffff; - PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG, - "%s: updated ns to %u\n", session->name, session->ns); + l2tp_dbg(session, L2TP_MSG_SEQ, + "%s: updated ns to %u\n", + session->name, session->ns); } *((__be32 *) bufp) = htonl(l2h); @@ -1029,27 +1025,19 @@ static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, /* Debug */ if (session->send_seq) - PRINTK(session->debug, L2TP_MSG_DATA, KERN_DEBUG, - "%s: send %Zd bytes, ns=%u\n", session->name, - data_len, session->ns - 1); + l2tp_dbg(session, L2TP_MSG_DATA, "%s: send %Zd bytes, ns=%u\n", + session->name, data_len, session->ns - 1); else - PRINTK(session->debug, L2TP_MSG_DATA, KERN_DEBUG, - "%s: send %Zd bytes\n", session->name, data_len); + l2tp_dbg(session, L2TP_MSG_DATA, "%s: send %Zd bytes\n", + session->name, data_len); if (session->debug & L2TP_MSG_DATA) { - int i; int uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0; unsigned char *datap = skb->data + uhlen; - printk(KERN_DEBUG "%s: xmit:", session->name); - for (i = 0; i < (len - uhlen); i++) { - printk(" %02X", *datap++); - if (i == 31) { - printk(" ..."); - break; - } - } - printk("\n"); + pr_debug("%s: xmit\n", session->name); + print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, + datap, min_t(size_t, 32, len - uhlen)); } /* Queue the packet to IP for output */ @@ -1248,8 +1236,7 @@ static void l2tp_tunnel_destruct(struct sock *sk) if (tunnel == NULL) goto end; - PRINTK(tunnel->debug, L2TP_MSG_CONTROL, KERN_INFO, - "%s: closing...\n", tunnel->name); + l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: closing...\n", tunnel->name); /* Close all sessions */ l2tp_tunnel_closeall(tunnel); @@ -1291,8 +1278,8 @@ static void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel) BUG_ON(tunnel == NULL); - PRINTK(tunnel->debug, L2TP_MSG_CONTROL, KERN_INFO, - "%s: closing all sessions...\n", tunnel->name); + l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: closing all sessions...\n", + tunnel->name); write_lock_bh(&tunnel->hlist_lock); for (hash = 0; hash < L2TP_HASH_SIZE; hash++) { @@ -1300,8 +1287,8 @@ again: hlist_for_each_safe(walk, tmp, &tunnel->session_hlist[hash]) { session = hlist_entry(walk, struct l2tp_session, hlist); - PRINTK(session->debug, L2TP_MSG_CONTROL, KERN_INFO, - "%s: closing session\n", session->name); + l2tp_info(session, L2TP_MSG_CONTROL, + "%s: closing session\n", session->name); hlist_del_init(&session->hlist); @@ -1354,8 +1341,7 @@ static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel) BUG_ON(atomic_read(&tunnel->ref_count) != 0); BUG_ON(tunnel->sock != NULL); - PRINTK(tunnel->debug, L2TP_MSG_CONTROL, KERN_INFO, - "%s: free...\n", tunnel->name); + l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: free...\n", tunnel->name); /* Remove from tunnel list */ spin_lock_bh(&pn->l2tp_tunnel_list_lock); @@ -1536,7 +1522,7 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 err = -EBADF; sock = sockfd_lookup(fd, &err); if (!sock) { - printk(KERN_ERR "tunl %hu: sockfd_lookup(fd=%d) returned %d\n", + pr_err("tunl %hu: sockfd_lookup(fd=%d) returned %d\n", tunnel_id, fd, err); goto err; } @@ -1552,7 +1538,7 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 case L2TP_ENCAPTYPE_UDP: err = -EPROTONOSUPPORT; if (sk->sk_protocol != IPPROTO_UDP) { - printk(KERN_ERR "tunl %hu: fd %d wrong protocol, got %d, expected %d\n", + pr_err("tunl %hu: fd %d wrong protocol, got %d, expected %d\n", tunnel_id, fd, sk->sk_protocol, IPPROTO_UDP); goto err; } @@ -1560,7 +1546,7 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 case L2TP_ENCAPTYPE_IP: err = -EPROTONOSUPPORT; if (sk->sk_protocol != IPPROTO_L2TP) { - printk(KERN_ERR "tunl %hu: fd %d wrong protocol, got %d, expected %d\n", + pr_err("tunl %hu: fd %d wrong protocol, got %d, expected %d\n", tunnel_id, fd, sk->sk_protocol, IPPROTO_L2TP); goto err; } @@ -1868,7 +1854,7 @@ static int __init l2tp_init(void) if (rc) goto out; - printk(KERN_INFO "L2TP core driver, %s\n", L2TP_DRV_VERSION); + pr_info("L2TP core driver, %s\n", L2TP_DRV_VERSION); out: return rc; diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h index 90026341a1e5..a38ec6cdeee1 100644 --- a/net/l2tp/l2tp_core.h +++ b/net/l2tp/l2tp_core.h @@ -261,17 +261,36 @@ static inline void l2tp_session_dec_refcount_1(struct l2tp_session *session) } #ifdef L2TP_REFCNT_DEBUG -#define l2tp_session_inc_refcount(_s) do { \ - printk(KERN_DEBUG "l2tp_session_inc_refcount: %s:%d %s: cnt=%d\n", __func__, __LINE__, (_s)->name, atomic_read(&_s->ref_count)); \ - l2tp_session_inc_refcount_1(_s); \ - } while (0) -#define l2tp_session_dec_refcount(_s) do { \ - printk(KERN_DEBUG "l2tp_session_dec_refcount: %s:%d %s: cnt=%d\n", __func__, __LINE__, (_s)->name, atomic_read(&_s->ref_count)); \ - l2tp_session_dec_refcount_1(_s); \ - } while (0) +#define l2tp_session_inc_refcount(_s) \ +do { \ + pr_debug("l2tp_session_inc_refcount: %s:%d %s: cnt=%d\n", \ + __func__, __LINE__, (_s)->name, \ + atomic_read(&_s->ref_count)); \ + l2tp_session_inc_refcount_1(_s); \ +} while (0) +#define l2tp_session_dec_refcount(_s) \ +do { \ + pr_debug("l2tp_session_dec_refcount: %s:%d %s: cnt=%d\n", \ + __func__, __LINE__, (_s)->name, \ + atomic_read(&_s->ref_count)); \ + l2tp_session_dec_refcount_1(_s); \ +} while (0) #else #define l2tp_session_inc_refcount(s) l2tp_session_inc_refcount_1(s) #define l2tp_session_dec_refcount(s) l2tp_session_dec_refcount_1(s) #endif +#define l2tp_printk(ptr, type, func, fmt, ...) \ +do { \ + if (((ptr)->debug) & (type)) \ + func(fmt, ##__VA_ARGS__); \ +} while (0) + +#define l2tp_warn(ptr, type, fmt, ...) \ + l2tp_printk(ptr, type, pr_warn, fmt, ##__VA_ARGS__) +#define l2tp_info(ptr, type, fmt, ...) \ + l2tp_printk(ptr, type, pr_info, fmt, ##__VA_ARGS__) +#define l2tp_dbg(ptr, type, fmt, ...) \ + l2tp_printk(ptr, type, pr_debug, fmt, ##__VA_ARGS__) + #endif /* _L2TP_CORE_H_ */ diff --git a/net/l2tp/l2tp_debugfs.c b/net/l2tp/l2tp_debugfs.c index c0d57bad8b79..c3813bc84552 100644 --- a/net/l2tp/l2tp_debugfs.c +++ b/net/l2tp/l2tp_debugfs.c @@ -9,6 +9,8 @@ * 2 of the License, or (at your option) any later version. */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include #include #include @@ -325,11 +327,11 @@ static int __init l2tp_debugfs_init(void) if (tunnels == NULL) rc = -EIO; - printk(KERN_INFO "L2TP debugfs support\n"); + pr_info("L2TP debugfs support\n"); out: if (rc) - printk(KERN_WARNING "l2tp debugfs: unable to init\n"); + pr_warn("unable to init\n"); return rc; } diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c index 63fe5f353f04..443591d629ca 100644 --- a/net/l2tp/l2tp_eth.c +++ b/net/l2tp/l2tp_eth.c @@ -9,6 +9,8 @@ * 2 of the License, or (at your option) any later version. */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include #include #include @@ -115,21 +117,14 @@ static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb, if (session->debug & L2TP_MSG_DATA) { unsigned int length; - int offset; u8 *ptr = skb->data; length = min(32u, skb->len); if (!pskb_may_pull(skb, length)) goto error; - printk(KERN_DEBUG "%s: eth recv: ", session->name); - - offset = 0; - do { - printk(" %02X", ptr[offset]); - } while (++offset < length); - - printk("\n"); + pr_debug("%s: eth recv\n", session->name); + print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length); } if (!pskb_may_pull(skb, sizeof(ETH_HLEN))) @@ -308,7 +303,7 @@ static int __init l2tp_eth_init(void) if (err) goto out_unreg; - printk(KERN_INFO "L2TP ethernet pseudowire support (L2TPv3)\n"); + pr_info("L2TP ethernet pseudowire support (L2TPv3)\n"); return 0; diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c index c89a32fb5d5e..889f5d13d7ba 100644 --- a/net/l2tp/l2tp_ip.c +++ b/net/l2tp/l2tp_ip.c @@ -9,6 +9,8 @@ * 2 of the License, or (at your option) any later version. */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include #include #include @@ -120,7 +122,6 @@ static int l2tp_ip_recv(struct sk_buff *skb) struct l2tp_session *session; struct l2tp_tunnel *tunnel = NULL; int length; - int offset; /* Point to L2TP header */ optr = ptr = skb->data; @@ -155,14 +156,8 @@ static int l2tp_ip_recv(struct sk_buff *skb) if (!pskb_may_pull(skb, length)) goto discard; - printk(KERN_DEBUG "%s: ip recv: ", tunnel->name); - - offset = 0; - do { - printk(" %02X", ptr[offset]); - } while (++offset < length); - - printk("\n"); + pr_debug("%s: ip recv\n", tunnel->name); + print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length); } l2tp_recv_common(session, skb, ptr, optr, 0, skb->len, tunnel->recv_payload_hook); @@ -593,7 +588,7 @@ static int __init l2tp_ip_init(void) { int err; - printk(KERN_INFO "L2TP IP encapsulation support (L2TPv3)\n"); + pr_info("L2TP IP encapsulation support (L2TPv3)\n"); err = proto_register(&l2tp_ip_prot, 1); if (err != 0) diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c index 88f0abe35443..0291d8d85f30 100644 --- a/net/l2tp/l2tp_ip6.c +++ b/net/l2tp/l2tp_ip6.c @@ -9,6 +9,8 @@ * 2 of the License, or (at your option) any later version. */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include #include #include @@ -133,7 +135,6 @@ static int l2tp_ip6_recv(struct sk_buff *skb) struct l2tp_session *session; struct l2tp_tunnel *tunnel = NULL; int length; - int offset; /* Point to L2TP header */ optr = ptr = skb->data; @@ -168,14 +169,8 @@ static int l2tp_ip6_recv(struct sk_buff *skb) if (!pskb_may_pull(skb, length)) goto discard; - printk(KERN_DEBUG "%s: ip recv: ", tunnel->name); - - offset = 0; - do { - printk(" %02X", ptr[offset]); - } while (++offset < length); - - printk("\n"); + pr_debug("%s: ip recv\n", tunnel->name); + print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length); } l2tp_recv_common(session, skb, ptr, optr, 0, skb->len, @@ -752,7 +747,7 @@ static int __init l2tp_ip6_init(void) { int err; - printk(KERN_INFO "L2TP IP encapsulation support for IPv6 (L2TPv3)\n"); + pr_info("L2TP IP encapsulation support for IPv6 (L2TPv3)\n"); err = proto_register(&l2tp_ip6_prot, 1); if (err != 0) diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c index 24edad0fd9ba..8577264378fe 100644 --- a/net/l2tp/l2tp_netlink.c +++ b/net/l2tp/l2tp_netlink.c @@ -14,6 +14,8 @@ * published by the Free Software Foundation. */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include #include #include @@ -902,7 +904,7 @@ static int l2tp_nl_init(void) { int err; - printk(KERN_INFO "L2TP netlink interface\n"); + pr_info("L2TP netlink interface\n"); err = genl_register_family_with_ops(&l2tp_nl_family, l2tp_nl_ops, ARRAY_SIZE(l2tp_nl_ops)); diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c index 9f2c421aa307..8ef6b9416cba 100644 --- a/net/l2tp/l2tp_ppp.c +++ b/net/l2tp/l2tp_ppp.c @@ -57,6 +57,8 @@ * http://openl2tp.sourceforge.net. */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include #include #include @@ -106,12 +108,6 @@ /* Space for UDP, L2TP and PPP headers */ #define PPPOL2TP_HEADER_OVERHEAD 40 -#define PRINTK(_mask, _type, _lvl, _fmt, args...) \ - do { \ - if ((_mask) & (_type)) \ - printk(_lvl "PPPOL2TP: " _fmt, ##args); \ - } while (0) - /* Number of bytes to build transmit L2TP headers. * Unfortunately the size is different depending on whether sequence numbers * are enabled. @@ -236,9 +232,9 @@ static void pppol2tp_recv(struct l2tp_session *session, struct sk_buff *skb, int if (sk->sk_state & PPPOX_BOUND) { struct pppox_sock *po; - PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_DEBUG, - "%s: recv %d byte data frame, passing to ppp\n", - session->name, data_len); + l2tp_dbg(session, PPPOL2TP_MSG_DATA, + "%s: recv %d byte data frame, passing to ppp\n", + session->name, data_len); /* We need to forget all info related to the L2TP packet * gathered in the skb as we are going to reuse the same @@ -259,8 +255,8 @@ static void pppol2tp_recv(struct l2tp_session *session, struct sk_buff *skb, int po = pppox_sk(sk); ppp_input(&po->chan, skb); } else { - PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_INFO, - "%s: socket not bound\n", session->name); + l2tp_info(session, PPPOL2TP_MSG_DATA, "%s: socket not bound\n", + session->name); /* Not bound. Nothing we can do, so discard. */ session->stats.rx_errors++; @@ -270,8 +266,7 @@ static void pppol2tp_recv(struct l2tp_session *session, struct sk_buff *skb, int return; no_sock: - PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_INFO, - "%s: no socket\n", session->name); + l2tp_info(session, PPPOL2TP_MSG_DATA, "%s: no socket\n", session->name); kfree_skb(skb); } @@ -827,8 +822,8 @@ out_no_ppp: /* This is how we get the session context from the socket. */ sk->sk_user_data = session; sk->sk_state = PPPOX_CONNECTED; - PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, - "%s: created\n", session->name); + l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: created\n", + session->name); end: release_sock(sk); @@ -881,8 +876,8 @@ static int pppol2tp_session_create(struct net *net, u32 tunnel_id, u32 session_i ps = l2tp_session_priv(session); ps->tunnel_sock = tunnel->sock; - PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, - "%s: created\n", session->name); + l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: created\n", + session->name); error = 0; @@ -1058,9 +1053,9 @@ static int pppol2tp_session_ioctl(struct l2tp_session *session, struct l2tp_tunnel *tunnel = session->tunnel; struct pppol2tp_ioc_stats stats; - PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_DEBUG, - "%s: pppol2tp_session_ioctl(cmd=%#x, arg=%#lx)\n", - session->name, cmd, arg); + l2tp_dbg(session, PPPOL2TP_MSG_CONTROL, + "%s: pppol2tp_session_ioctl(cmd=%#x, arg=%#lx)\n", + session->name, cmd, arg); sk = ps->sock; sock_hold(sk); @@ -1078,8 +1073,8 @@ static int pppol2tp_session_ioctl(struct l2tp_session *session, if (copy_to_user((void __user *) arg, &ifr, sizeof(struct ifreq))) break; - PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, - "%s: get mtu=%d\n", session->name, session->mtu); + l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: get mtu=%d\n", + session->name, session->mtu); err = 0; break; @@ -1094,8 +1089,8 @@ static int pppol2tp_session_ioctl(struct l2tp_session *session, session->mtu = ifr.ifr_mtu; - PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, - "%s: set mtu=%d\n", session->name, session->mtu); + l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: set mtu=%d\n", + session->name, session->mtu); err = 0; break; @@ -1108,8 +1103,8 @@ static int pppol2tp_session_ioctl(struct l2tp_session *session, if (put_user(session->mru, (int __user *) arg)) break; - PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, - "%s: get mru=%d\n", session->name, session->mru); + l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: get mru=%d\n", + session->name, session->mru); err = 0; break; @@ -1123,8 +1118,8 @@ static int pppol2tp_session_ioctl(struct l2tp_session *session, break; session->mru = val; - PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, - "%s: set mru=%d\n", session->name, session->mru); + l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: set mru=%d\n", + session->name, session->mru); err = 0; break; @@ -1133,8 +1128,8 @@ static int pppol2tp_session_ioctl(struct l2tp_session *session, if (put_user(ps->flags, (int __user *) arg)) break; - PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, - "%s: get flags=%d\n", session->name, ps->flags); + l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: get flags=%d\n", + session->name, ps->flags); err = 0; break; @@ -1143,8 +1138,8 @@ static int pppol2tp_session_ioctl(struct l2tp_session *session, if (get_user(val, (int __user *) arg)) break; ps->flags = val; - PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, - "%s: set flags=%d\n", session->name, ps->flags); + l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: set flags=%d\n", + session->name, ps->flags); err = 0; break; @@ -1160,8 +1155,8 @@ static int pppol2tp_session_ioctl(struct l2tp_session *session, if (copy_to_user((void __user *) arg, &stats, sizeof(stats))) break; - PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, - "%s: get L2TP stats\n", session->name); + l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: get L2TP stats\n", + session->name); err = 0; break; @@ -1188,9 +1183,9 @@ static int pppol2tp_tunnel_ioctl(struct l2tp_tunnel *tunnel, struct sock *sk; struct pppol2tp_ioc_stats stats; - PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_DEBUG, - "%s: pppol2tp_tunnel_ioctl(cmd=%#x, arg=%#lx)\n", - tunnel->name, cmd, arg); + l2tp_dbg(tunnel, PPPOL2TP_MSG_CONTROL, + "%s: pppol2tp_tunnel_ioctl(cmd=%#x, arg=%#lx)\n", + tunnel->name, cmd, arg); sk = tunnel->sock; sock_hold(sk); @@ -1224,8 +1219,8 @@ static int pppol2tp_tunnel_ioctl(struct l2tp_tunnel *tunnel, err = -EFAULT; break; } - PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, - "%s: get L2TP stats\n", tunnel->name); + l2tp_info(tunnel, PPPOL2TP_MSG_CONTROL, "%s: get L2TP stats\n", + tunnel->name); err = 0; break; @@ -1314,8 +1309,8 @@ static int pppol2tp_tunnel_setsockopt(struct sock *sk, switch (optname) { case PPPOL2TP_SO_DEBUG: tunnel->debug = val; - PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, - "%s: set debug=%x\n", tunnel->name, tunnel->debug); + l2tp_info(tunnel, PPPOL2TP_MSG_CONTROL, "%s: set debug=%x\n", + tunnel->name, tunnel->debug); break; default: @@ -1342,8 +1337,9 @@ static int pppol2tp_session_setsockopt(struct sock *sk, break; } session->recv_seq = val ? -1 : 0; - PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, - "%s: set recv_seq=%d\n", session->name, session->recv_seq); + l2tp_info(session, PPPOL2TP_MSG_CONTROL, + "%s: set recv_seq=%d\n", + session->name, session->recv_seq); break; case PPPOL2TP_SO_SENDSEQ: @@ -1358,8 +1354,9 @@ static int pppol2tp_session_setsockopt(struct sock *sk, po->chan.hdrlen = val ? PPPOL2TP_L2TP_HDR_SIZE_SEQ : PPPOL2TP_L2TP_HDR_SIZE_NOSEQ; } - PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, - "%s: set send_seq=%d\n", session->name, session->send_seq); + l2tp_info(session, PPPOL2TP_MSG_CONTROL, + "%s: set send_seq=%d\n", + session->name, session->send_seq); break; case PPPOL2TP_SO_LNSMODE: @@ -1368,20 +1365,22 @@ static int pppol2tp_session_setsockopt(struct sock *sk, break; } session->lns_mode = val ? -1 : 0; - PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, - "%s: set lns_mode=%d\n", session->name, session->lns_mode); + l2tp_info(session, PPPOL2TP_MSG_CONTROL, + "%s: set lns_mode=%d\n", + session->name, session->lns_mode); break; case PPPOL2TP_SO_DEBUG: session->debug = val; - PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, - "%s: set debug=%x\n", session->name, session->debug); + l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: set debug=%x\n", + session->name, session->debug); break; case PPPOL2TP_SO_REORDERTO: session->reorder_timeout = msecs_to_jiffies(val); - PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, - "%s: set reorder_timeout=%d\n", session->name, session->reorder_timeout); + l2tp_info(session, PPPOL2TP_MSG_CONTROL, + "%s: set reorder_timeout=%d\n", + session->name, session->reorder_timeout); break; default: @@ -1460,8 +1459,8 @@ static int pppol2tp_tunnel_getsockopt(struct sock *sk, switch (optname) { case PPPOL2TP_SO_DEBUG: *val = tunnel->debug; - PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, - "%s: get debug=%x\n", tunnel->name, tunnel->debug); + l2tp_info(tunnel, PPPOL2TP_MSG_CONTROL, "%s: get debug=%x\n", + tunnel->name, tunnel->debug); break; default: @@ -1483,32 +1482,32 @@ static int pppol2tp_session_getsockopt(struct sock *sk, switch (optname) { case PPPOL2TP_SO_RECVSEQ: *val = session->recv_seq; - PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, - "%s: get recv_seq=%d\n", session->name, *val); + l2tp_info(session, PPPOL2TP_MSG_CONTROL, + "%s: get recv_seq=%d\n", session->name, *val); break; case PPPOL2TP_SO_SENDSEQ: *val = session->send_seq; - PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, - "%s: get send_seq=%d\n", session->name, *val); + l2tp_info(session, PPPOL2TP_MSG_CONTROL, + "%s: get send_seq=%d\n", session->name, *val); break; case PPPOL2TP_SO_LNSMODE: *val = session->lns_mode; - PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, - "%s: get lns_mode=%d\n", session->name, *val); + l2tp_info(session, PPPOL2TP_MSG_CONTROL, + "%s: get lns_mode=%d\n", session->name, *val); break; case PPPOL2TP_SO_DEBUG: *val = session->debug; - PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, - "%s: get debug=%d\n", session->name, *val); + l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: get debug=%d\n", + session->name, *val); break; case PPPOL2TP_SO_REORDERTO: *val = (int) jiffies_to_msecs(session->reorder_timeout); - PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, - "%s: get reorder_timeout=%d\n", session->name, *val); + l2tp_info(session, PPPOL2TP_MSG_CONTROL, + "%s: get reorder_timeout=%d\n", session->name, *val); break; default: @@ -1871,8 +1870,7 @@ static int __init pppol2tp_init(void) goto out_unregister_pppox; #endif - printk(KERN_INFO "PPPoL2TP kernel driver, %s\n", - PPPOL2TP_DRV_VERSION); + pr_info("PPPoL2TP kernel driver, %s\n", PPPOL2TP_DRV_VERSION); out: return err; -- cgit v1.2.2 From dc6b9b78234fecdc6d2ca5e1629185718202bcf5 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 16 May 2012 22:48:15 +0000 Subject: net: include/net/sock.h cleanup bool/const conversions where possible __inline__ -> inline space cleanups Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/dccp/proto.c | 4 ++-- net/ipv4/tcp.c | 12 ++++++------ net/llc/af_llc.c | 4 ++-- 3 files changed, 10 insertions(+), 10 deletions(-) (limited to 'net') diff --git a/net/dccp/proto.c b/net/dccp/proto.c index 7065c0ae1e7b..6c7c78b83940 100644 --- a/net/dccp/proto.c +++ b/net/dccp/proto.c @@ -848,7 +848,7 @@ int dccp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, default: dccp_pr_debug("packet_type=%s\n", dccp_packet_name(dh->dccph_type)); - sk_eat_skb(sk, skb, 0); + sk_eat_skb(sk, skb, false); } verify_sock_status: if (sock_flag(sk, SOCK_DONE)) { @@ -905,7 +905,7 @@ verify_sock_status: len = skb->len; found_fin_ok: if (!(flags & MSG_PEEK)) - sk_eat_skb(sk, skb, 0); + sk_eat_skb(sk, skb, false); break; } while (1); out: diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index feb2e25091b1..e8a80d0b5b3c 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -1473,11 +1473,11 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, break; } if (tcp_hdr(skb)->fin) { - sk_eat_skb(sk, skb, 0); + sk_eat_skb(sk, skb, false); ++seq; break; } - sk_eat_skb(sk, skb, 0); + sk_eat_skb(sk, skb, false); if (!desc->count) break; tp->copied_seq = seq; @@ -1513,7 +1513,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, int target; /* Read at least this many bytes */ long timeo; struct task_struct *user_recv = NULL; - int copied_early = 0; + bool copied_early = false; struct sk_buff *skb; u32 urg_hole = 0; @@ -1801,7 +1801,7 @@ do_prequeue: dma_async_memcpy_issue_pending(tp->ucopy.dma_chan); if ((offset + used) == skb->len) - copied_early = 1; + copied_early = true; } else #endif @@ -1835,7 +1835,7 @@ skip_copy: goto found_fin_ok; if (!(flags & MSG_PEEK)) { sk_eat_skb(sk, skb, copied_early); - copied_early = 0; + copied_early = false; } continue; @@ -1844,7 +1844,7 @@ skip_copy: ++*seq; if (!(flags & MSG_PEEK)) { sk_eat_skb(sk, skb, copied_early); - copied_early = 0; + copied_early = false; } break; } while (len > 0); diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c index e9440753e16b..fe5453c3e719 100644 --- a/net/llc/af_llc.c +++ b/net/llc/af_llc.c @@ -838,7 +838,7 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock, if (!(flags & MSG_PEEK)) { spin_lock_irqsave(&sk->sk_receive_queue.lock, cpu_flags); - sk_eat_skb(sk, skb, 0); + sk_eat_skb(sk, skb, false); spin_unlock_irqrestore(&sk->sk_receive_queue.lock, cpu_flags); *seq = 0; } @@ -861,7 +861,7 @@ copy_uaddr: if (!(flags & MSG_PEEK)) { spin_lock_irqsave(&sk->sk_receive_queue.lock, cpu_flags); - sk_eat_skb(sk, skb, 0); + sk_eat_skb(sk, skb, false); spin_unlock_irqrestore(&sk->sk_receive_queue.lock, cpu_flags); *seq = 0; } -- cgit v1.2.2 From f342cda7789381236a6c94926a9ca177a9098867 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Wed, 16 May 2012 17:50:41 +0000 Subject: pktgen: Use pr_debug Convert printk(KERN_DEBUG to pr_debug which can enable dynamic debugging. Remove embedded prefixes from the conversions as pr_fmt adds them. Align arguments. Signed-off-by: Joe Perches Signed-off-by: David S. Miller --- net/core/pktgen.c | 41 ++++++++++++++++++----------------------- 1 file changed, 18 insertions(+), 23 deletions(-) (limited to 'net') diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 70236db0fb4f..30eb768cd677 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c @@ -891,8 +891,8 @@ static ssize_t pktgen_if_write(struct file *file, if (copy_from_user(tb, user_buffer, copy)) return -EFAULT; tb[copy] = 0; - printk(KERN_DEBUG "pktgen: %s,%lu buffer -:%s:-\n", name, - (unsigned long)count, tb); + pr_debug("%s,%lu buffer -:%s:-\n", + name, (unsigned long)count, tb); } if (!strcmp(name, "min_pkt_size")) { @@ -1261,8 +1261,7 @@ static ssize_t pktgen_if_write(struct file *file, pkt_dev->cur_daddr = pkt_dev->daddr_min; } if (debug) - printk(KERN_DEBUG "pktgen: dst_min set to: %s\n", - pkt_dev->dst_min); + pr_debug("dst_min set to: %s\n", pkt_dev->dst_min); i += len; sprintf(pg_result, "OK: dst_min=%s", pkt_dev->dst_min); return count; @@ -1284,8 +1283,7 @@ static ssize_t pktgen_if_write(struct file *file, pkt_dev->cur_daddr = pkt_dev->daddr_max; } if (debug) - printk(KERN_DEBUG "pktgen: dst_max set to: %s\n", - pkt_dev->dst_max); + pr_debug("dst_max set to: %s\n", pkt_dev->dst_max); i += len; sprintf(pg_result, "OK: dst_max=%s", pkt_dev->dst_max); return count; @@ -1307,7 +1305,7 @@ static ssize_t pktgen_if_write(struct file *file, pkt_dev->cur_in6_daddr = pkt_dev->in6_daddr; if (debug) - printk(KERN_DEBUG "pktgen: dst6 set to: %s\n", buf); + pr_debug("dst6 set to: %s\n", buf); i += len; sprintf(pg_result, "OK: dst6=%s", buf); @@ -1329,7 +1327,7 @@ static ssize_t pktgen_if_write(struct file *file, pkt_dev->cur_in6_daddr = pkt_dev->min_in6_daddr; if (debug) - printk(KERN_DEBUG "pktgen: dst6_min set to: %s\n", buf); + pr_debug("dst6_min set to: %s\n", buf); i += len; sprintf(pg_result, "OK: dst6_min=%s", buf); @@ -1350,7 +1348,7 @@ static ssize_t pktgen_if_write(struct file *file, snprintf(buf, sizeof(buf), "%pI6c", &pkt_dev->max_in6_daddr); if (debug) - printk(KERN_DEBUG "pktgen: dst6_max set to: %s\n", buf); + pr_debug("dst6_max set to: %s\n", buf); i += len; sprintf(pg_result, "OK: dst6_max=%s", buf); @@ -1373,7 +1371,7 @@ static ssize_t pktgen_if_write(struct file *file, pkt_dev->cur_in6_saddr = pkt_dev->in6_saddr; if (debug) - printk(KERN_DEBUG "pktgen: src6 set to: %s\n", buf); + pr_debug("src6 set to: %s\n", buf); i += len; sprintf(pg_result, "OK: src6=%s", buf); @@ -1394,8 +1392,7 @@ static ssize_t pktgen_if_write(struct file *file, pkt_dev->cur_saddr = pkt_dev->saddr_min; } if (debug) - printk(KERN_DEBUG "pktgen: src_min set to: %s\n", - pkt_dev->src_min); + pr_debug("src_min set to: %s\n", pkt_dev->src_min); i += len; sprintf(pg_result, "OK: src_min=%s", pkt_dev->src_min); return count; @@ -1415,8 +1412,7 @@ static ssize_t pktgen_if_write(struct file *file, pkt_dev->cur_saddr = pkt_dev->saddr_max; } if (debug) - printk(KERN_DEBUG "pktgen: src_max set to: %s\n", - pkt_dev->src_max); + pr_debug("src_max set to: %s\n", pkt_dev->src_max); i += len; sprintf(pg_result, "OK: src_max=%s", pkt_dev->src_max); return count; @@ -1527,7 +1523,7 @@ static ssize_t pktgen_if_write(struct file *file, pkt_dev->svlan_id = 0xffff; if (debug) - printk(KERN_DEBUG "pktgen: VLAN/SVLAN auto turned off\n"); + pr_debug("VLAN/SVLAN auto turned off\n"); } return count; } @@ -1542,10 +1538,10 @@ static ssize_t pktgen_if_write(struct file *file, pkt_dev->vlan_id = value; /* turn on VLAN */ if (debug) - printk(KERN_DEBUG "pktgen: VLAN turned on\n"); + pr_debug("VLAN turned on\n"); if (debug && pkt_dev->nr_labels) - printk(KERN_DEBUG "pktgen: MPLS auto turned off\n"); + pr_debug("MPLS auto turned off\n"); pkt_dev->nr_labels = 0; /* turn off MPLS */ sprintf(pg_result, "OK: vlan_id=%u", pkt_dev->vlan_id); @@ -1554,7 +1550,7 @@ static ssize_t pktgen_if_write(struct file *file, pkt_dev->svlan_id = 0xffff; if (debug) - printk(KERN_DEBUG "pktgen: VLAN/SVLAN turned off\n"); + pr_debug("VLAN/SVLAN turned off\n"); } return count; } @@ -1599,10 +1595,10 @@ static ssize_t pktgen_if_write(struct file *file, pkt_dev->svlan_id = value; /* turn on SVLAN */ if (debug) - printk(KERN_DEBUG "pktgen: SVLAN turned on\n"); + pr_debug("SVLAN turned on\n"); if (debug && pkt_dev->nr_labels) - printk(KERN_DEBUG "pktgen: MPLS auto turned off\n"); + pr_debug("MPLS auto turned off\n"); pkt_dev->nr_labels = 0; /* turn off MPLS */ sprintf(pg_result, "OK: svlan_id=%u", pkt_dev->svlan_id); @@ -1611,7 +1607,7 @@ static ssize_t pktgen_if_write(struct file *file, pkt_dev->svlan_id = 0xffff; if (debug) - printk(KERN_DEBUG "pktgen: VLAN/SVLAN turned off\n"); + pr_debug("VLAN/SVLAN turned off\n"); } return count; } @@ -1779,8 +1775,7 @@ static ssize_t pktgen_thread_write(struct file *file, i += len; if (debug) - printk(KERN_DEBUG "pktgen: t=%s, count=%lu\n", - name, (unsigned long)count); + pr_debug("t=%s, count=%lu\n", name, (unsigned long)count); if (!t) { pr_err("ERROR: No thread\n"); -- cgit v1.2.2 From 675418d5187785d3d996ca15fd700f5e02901cbc Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Wed, 16 May 2012 19:28:38 +0000 Subject: net: ipv6: ndisc: Neaten ND_PRINTx macros Why use several macros when one will do? Convert the multiple ND_PRINTKx macros to a single ND_PRINTK macro. Use the new net__ratelimited mechanism too. Add pr_fmt with "ICMPv6: " as prefix. Remove embedded ICMPv6 prefixes from messages. Signed-off-by: Joe Perches Signed-off-by: David S. Miller --- net/ipv6/ndisc.c | 216 +++++++++++++++++++++++-------------------------------- 1 file changed, 91 insertions(+), 125 deletions(-) (limited to 'net') diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index cbb863d66481..c7a27ac906df 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c @@ -27,27 +27,7 @@ * YOSHIFUJI Hideaki @USAGI : Verify ND options properly */ -/* Set to 3 to get tracing... */ -#define ND_DEBUG 1 - -#define ND_PRINTK(fmt, args...) do { if (net_ratelimit()) { printk(fmt, ## args); } } while(0) -#define ND_NOPRINTK(x...) do { ; } while(0) -#define ND_PRINTK0 ND_PRINTK -#define ND_PRINTK1 ND_NOPRINTK -#define ND_PRINTK2 ND_NOPRINTK -#define ND_PRINTK3 ND_NOPRINTK -#if ND_DEBUG >= 1 -#undef ND_PRINTK1 -#define ND_PRINTK1 ND_PRINTK -#endif -#if ND_DEBUG >= 2 -#undef ND_PRINTK2 -#define ND_PRINTK2 ND_PRINTK -#endif -#if ND_DEBUG >= 3 -#undef ND_PRINTK3 -#define ND_PRINTK3 ND_PRINTK -#endif +#define pr_fmt(fmt) "ICMPv6: " fmt #include #include @@ -92,6 +72,15 @@ #include #include +/* Set to 3 to get tracing... */ +#define ND_DEBUG 1 + +#define ND_PRINTK(val, level, fmt, ...) \ +do { \ + if (val <= ND_DEBUG) \ + net_##level##_ratelimited(fmt, ##__VA_ARGS__); \ +} while (0) + static u32 ndisc_hash(const void *pkey, const struct net_device *dev, __u32 *hash_rnd); @@ -265,10 +254,9 @@ static struct ndisc_options *ndisc_parse_options(u8 *opt, int opt_len, case ND_OPT_MTU: case ND_OPT_REDIRECT_HDR: if (ndopts->nd_opt_array[nd_opt->nd_opt_type]) { - ND_PRINTK2(KERN_WARNING - "%s: duplicated ND6 option found: type=%d\n", - __func__, - nd_opt->nd_opt_type); + ND_PRINTK(2, warn, + "%s: duplicated ND6 option found: type=%d\n", + __func__, nd_opt->nd_opt_type); } else { ndopts->nd_opt_array[nd_opt->nd_opt_type] = nd_opt; } @@ -296,10 +284,11 @@ static struct ndisc_options *ndisc_parse_options(u8 *opt, int opt_len, * to accommodate future extension to the * protocol. */ - ND_PRINTK2(KERN_NOTICE - "%s: ignored unsupported option; type=%d, len=%d\n", - __func__, - nd_opt->nd_opt_type, nd_opt->nd_opt_len); + ND_PRINTK(2, notice, + "%s: ignored unsupported option; type=%d, len=%d\n", + __func__, + nd_opt->nd_opt_type, + nd_opt->nd_opt_len); } } opt_len -= l; @@ -455,9 +444,8 @@ struct sk_buff *ndisc_build_skb(struct net_device *dev, len + hlen + tlen), 1, &err); if (!skb) { - ND_PRINTK0(KERN_ERR - "ICMPv6 ND: %s failed to allocate an skb, err=%d.\n", - __func__, err); + ND_PRINTK(0, err, "ND: %s failed to allocate an skb, err=%d\n", + __func__, err); return NULL; } @@ -693,8 +681,9 @@ static void ndisc_solicit(struct neighbour *neigh, struct sk_buff *skb) if ((probes -= neigh->parms->ucast_probes) < 0) { if (!(neigh->nud_state & NUD_VALID)) { - ND_PRINTK1(KERN_DEBUG "%s: trying to ucast probe in NUD_INVALID: %pI6\n", - __func__, target); + ND_PRINTK(1, dbg, + "%s: trying to ucast probe in NUD_INVALID: %pI6\n", + __func__, target); } ndisc_send_ns(dev, neigh, target, target, saddr); } else if ((probes -= neigh->parms->app_probes) < 0) { @@ -740,8 +729,7 @@ static void ndisc_recv_ns(struct sk_buff *skb) int is_router = -1; if (ipv6_addr_is_multicast(&msg->target)) { - ND_PRINTK2(KERN_WARNING - "ICMPv6 NS: multicast target address"); + ND_PRINTK(2, warn, "NS: multicast target address\n"); return; } @@ -754,22 +742,20 @@ static void ndisc_recv_ns(struct sk_buff *skb) daddr->s6_addr32[1] == htonl(0x00000000) && daddr->s6_addr32[2] == htonl(0x00000001) && daddr->s6_addr [12] == 0xff )) { - ND_PRINTK2(KERN_WARNING - "ICMPv6 NS: bad DAD packet (wrong destination)\n"); + ND_PRINTK(2, warn, "NS: bad DAD packet (wrong destination)\n"); return; } if (!ndisc_parse_options(msg->opt, ndoptlen, &ndopts)) { - ND_PRINTK2(KERN_WARNING - "ICMPv6 NS: invalid ND options\n"); + ND_PRINTK(2, warn, "NS: invalid ND options\n"); return; } if (ndopts.nd_opts_src_lladdr) { lladdr = ndisc_opt_addr_data(ndopts.nd_opts_src_lladdr, dev); if (!lladdr) { - ND_PRINTK2(KERN_WARNING - "ICMPv6 NS: invalid link-layer address length\n"); + ND_PRINTK(2, warn, + "NS: invalid link-layer address length\n"); return; } @@ -779,8 +765,8 @@ static void ndisc_recv_ns(struct sk_buff *skb) * in the message. */ if (dad) { - ND_PRINTK2(KERN_WARNING - "ICMPv6 NS: bad DAD packet (link-layer address option)\n"); + ND_PRINTK(2, warn, + "NS: bad DAD packet (link-layer address option)\n"); return; } } @@ -898,34 +884,30 @@ static void ndisc_recv_na(struct sk_buff *skb) struct neighbour *neigh; if (skb->len < sizeof(struct nd_msg)) { - ND_PRINTK2(KERN_WARNING - "ICMPv6 NA: packet too short\n"); + ND_PRINTK(2, warn, "NA: packet too short\n"); return; } if (ipv6_addr_is_multicast(&msg->target)) { - ND_PRINTK2(KERN_WARNING - "ICMPv6 NA: target address is multicast.\n"); + ND_PRINTK(2, warn, "NA: target address is multicast\n"); return; } if (ipv6_addr_is_multicast(daddr) && msg->icmph.icmp6_solicited) { - ND_PRINTK2(KERN_WARNING - "ICMPv6 NA: solicited NA is multicasted.\n"); + ND_PRINTK(2, warn, "NA: solicited NA is multicasted\n"); return; } if (!ndisc_parse_options(msg->opt, ndoptlen, &ndopts)) { - ND_PRINTK2(KERN_WARNING - "ICMPv6 NS: invalid ND option\n"); + ND_PRINTK(2, warn, "NS: invalid ND option\n"); return; } if (ndopts.nd_opts_tgt_lladdr) { lladdr = ndisc_opt_addr_data(ndopts.nd_opts_tgt_lladdr, dev); if (!lladdr) { - ND_PRINTK2(KERN_WARNING - "ICMPv6 NA: invalid link-layer address length\n"); + ND_PRINTK(2, warn, + "NA: invalid link-layer address length\n"); return; } } @@ -946,9 +928,9 @@ static void ndisc_recv_na(struct sk_buff *skb) unsolicited advertisement. */ if (skb->pkt_type != PACKET_LOOPBACK) - ND_PRINTK1(KERN_WARNING - "ICMPv6 NA: someone advertises our address %pI6 on %s!\n", - &ifp->addr, ifp->idev->dev->name); + ND_PRINTK(1, warn, + "NA: someone advertises our address %pI6 on %s!\n", + &ifp->addr, ifp->idev->dev->name); in6_ifa_put(ifp); return; } @@ -1010,8 +992,7 @@ static void ndisc_recv_rs(struct sk_buff *skb) idev = __in6_dev_get(skb->dev); if (!idev) { - if (net_ratelimit()) - ND_PRINTK1("ICMP6 RS: can't find in6 device\n"); + ND_PRINTK(1, err, "RS: can't find in6 device\n"); return; } @@ -1028,8 +1009,7 @@ static void ndisc_recv_rs(struct sk_buff *skb) /* Parse ND options */ if (!ndisc_parse_options(rs_msg->opt, ndoptlen, &ndopts)) { - if (net_ratelimit()) - ND_PRINTK2("ICMP6 NS: invalid ND option, ignored\n"); + ND_PRINTK(2, notice, "NS: invalid ND option, ignored\n"); goto out; } @@ -1127,20 +1107,17 @@ static void ndisc_router_discovery(struct sk_buff *skb) optlen = (skb->tail - skb->transport_header) - sizeof(struct ra_msg); if (!(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL)) { - ND_PRINTK2(KERN_WARNING - "ICMPv6 RA: source address is not link-local.\n"); + ND_PRINTK(2, warn, "RA: source address is not link-local\n"); return; } if (optlen < 0) { - ND_PRINTK2(KERN_WARNING - "ICMPv6 RA: packet too short\n"); + ND_PRINTK(2, warn, "RA: packet too short\n"); return; } #ifdef CONFIG_IPV6_NDISC_NODETYPE if (skb->ndisc_nodetype == NDISC_NODETYPE_HOST) { - ND_PRINTK2(KERN_WARNING - "ICMPv6 RA: from host or unauthorized router\n"); + ND_PRINTK(2, warn, "RA: from host or unauthorized router\n"); return; } #endif @@ -1151,15 +1128,13 @@ static void ndisc_router_discovery(struct sk_buff *skb) in6_dev = __in6_dev_get(skb->dev); if (in6_dev == NULL) { - ND_PRINTK0(KERN_ERR - "ICMPv6 RA: can't find inet6 device for %s.\n", - skb->dev->name); + ND_PRINTK(0, err, "RA: can't find inet6 device for %s\n", + skb->dev->name); return; } if (!ndisc_parse_options(opt, optlen, &ndopts)) { - ND_PRINTK2(KERN_WARNING - "ICMP6 RA: invalid ND options\n"); + ND_PRINTK(2, warn, "RA: invalid ND options\n"); return; } @@ -1212,9 +1187,9 @@ static void ndisc_router_discovery(struct sk_buff *skb) if (rt) { neigh = dst_neigh_lookup(&rt->dst, &ipv6_hdr(skb)->saddr); if (!neigh) { - ND_PRINTK0(KERN_ERR - "ICMPv6 RA: %s got default router without neighbour.\n", - __func__); + ND_PRINTK(0, err, + "RA: %s got default router without neighbour\n", + __func__); dst_release(&rt->dst); return; } @@ -1225,22 +1200,21 @@ static void ndisc_router_discovery(struct sk_buff *skb) } if (rt == NULL && lifetime) { - ND_PRINTK3(KERN_DEBUG - "ICMPv6 RA: adding default router.\n"); + ND_PRINTK(3, dbg, "RA: adding default router\n"); rt = rt6_add_dflt_router(&ipv6_hdr(skb)->saddr, skb->dev, pref); if (rt == NULL) { - ND_PRINTK0(KERN_ERR - "ICMPv6 RA: %s failed to add default route.\n", - __func__); + ND_PRINTK(0, err, + "RA: %s failed to add default route\n", + __func__); return; } neigh = dst_neigh_lookup(&rt->dst, &ipv6_hdr(skb)->saddr); if (neigh == NULL) { - ND_PRINTK0(KERN_ERR - "ICMPv6 RA: %s got default router without neighbour.\n", - __func__); + ND_PRINTK(0, err, + "RA: %s got default router without neighbour\n", + __func__); dst_release(&rt->dst); return; } @@ -1308,8 +1282,8 @@ skip_linkparms: lladdr = ndisc_opt_addr_data(ndopts.nd_opts_src_lladdr, skb->dev); if (!lladdr) { - ND_PRINTK2(KERN_WARNING - "ICMPv6 RA: invalid link-layer address length\n"); + ND_PRINTK(2, warn, + "RA: invalid link-layer address length\n"); goto out; } } @@ -1373,9 +1347,7 @@ skip_routeinfo: mtu = ntohl(n); if (mtu < IPV6_MIN_MTU || mtu > skb->dev->mtu) { - ND_PRINTK2(KERN_WARNING - "ICMPv6 RA: invalid mtu: %d\n", - mtu); + ND_PRINTK(2, warn, "RA: invalid mtu: %d\n", mtu); } else if (in6_dev->cnf.mtu6 != mtu) { in6_dev->cnf.mtu6 = mtu; @@ -1396,8 +1368,7 @@ skip_routeinfo: } if (ndopts.nd_opts_tgt_lladdr || ndopts.nd_opts_rh) { - ND_PRINTK2(KERN_WARNING - "ICMPv6 RA: invalid RA options"); + ND_PRINTK(2, warn, "RA: invalid RA options\n"); } out: if (rt) @@ -1422,15 +1393,15 @@ static void ndisc_redirect_rcv(struct sk_buff *skb) switch (skb->ndisc_nodetype) { case NDISC_NODETYPE_HOST: case NDISC_NODETYPE_NODEFAULT: - ND_PRINTK2(KERN_WARNING - "ICMPv6 Redirect: from host or unauthorized router\n"); + ND_PRINTK(2, warn, + "Redirect: from host or unauthorized router\n"); return; } #endif if (!(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL)) { - ND_PRINTK2(KERN_WARNING - "ICMPv6 Redirect: source address is not link-local.\n"); + ND_PRINTK(2, warn, + "Redirect: source address is not link-local\n"); return; } @@ -1438,8 +1409,7 @@ static void ndisc_redirect_rcv(struct sk_buff *skb) optlen -= sizeof(struct icmp6hdr) + 2 * sizeof(struct in6_addr); if (optlen < 0) { - ND_PRINTK2(KERN_WARNING - "ICMPv6 Redirect: packet too short\n"); + ND_PRINTK(2, warn, "Redirect: packet too short\n"); return; } @@ -1448,8 +1418,8 @@ static void ndisc_redirect_rcv(struct sk_buff *skb) dest = target + 1; if (ipv6_addr_is_multicast(dest)) { - ND_PRINTK2(KERN_WARNING - "ICMPv6 Redirect: destination address is multicast.\n"); + ND_PRINTK(2, warn, + "Redirect: destination address is multicast\n"); return; } @@ -1457,8 +1427,8 @@ static void ndisc_redirect_rcv(struct sk_buff *skb) on_link = 1; } else if (ipv6_addr_type(target) != (IPV6_ADDR_UNICAST|IPV6_ADDR_LINKLOCAL)) { - ND_PRINTK2(KERN_WARNING - "ICMPv6 Redirect: target address is not link-local unicast.\n"); + ND_PRINTK(2, warn, + "Redirect: target address is not link-local unicast\n"); return; } @@ -1474,16 +1444,15 @@ static void ndisc_redirect_rcv(struct sk_buff *skb) */ if (!ndisc_parse_options((u8*)(dest + 1), optlen, &ndopts)) { - ND_PRINTK2(KERN_WARNING - "ICMPv6 Redirect: invalid ND options\n"); + ND_PRINTK(2, warn, "Redirect: invalid ND options\n"); return; } if (ndopts.nd_opts_tgt_lladdr) { lladdr = ndisc_opt_addr_data(ndopts.nd_opts_tgt_lladdr, skb->dev); if (!lladdr) { - ND_PRINTK2(KERN_WARNING - "ICMPv6 Redirect: invalid link-layer address length\n"); + ND_PRINTK(2, warn, + "Redirect: invalid link-layer address length\n"); return; } } @@ -1518,16 +1487,15 @@ void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target) u8 ha_buf[MAX_ADDR_LEN], *ha = NULL; if (ipv6_get_lladdr(dev, &saddr_buf, IFA_F_TENTATIVE)) { - ND_PRINTK2(KERN_WARNING - "ICMPv6 Redirect: no link-local address on %s\n", - dev->name); + ND_PRINTK(2, warn, "Redirect: no link-local address on %s\n", + dev->name); return; } if (!ipv6_addr_equal(&ipv6_hdr(skb)->daddr, target) && ipv6_addr_type(target) != (IPV6_ADDR_UNICAST|IPV6_ADDR_LINKLOCAL)) { - ND_PRINTK2(KERN_WARNING - "ICMPv6 Redirect: target address is not link-local unicast.\n"); + ND_PRINTK(2, warn, + "Redirect: target address is not link-local unicast\n"); return; } @@ -1546,8 +1514,8 @@ void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target) rt = (struct rt6_info *) dst; if (rt->rt6i_flags & RTF_GATEWAY) { - ND_PRINTK2(KERN_WARNING - "ICMPv6 Redirect: destination is not a neighbour.\n"); + ND_PRINTK(2, warn, + "Redirect: destination is not a neighbour\n"); goto release; } if (!rt->rt6i_peer) @@ -1558,8 +1526,8 @@ void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target) if (dev->addr_len) { struct neighbour *neigh = dst_neigh_lookup(skb_dst(skb), target); if (!neigh) { - ND_PRINTK2(KERN_WARNING - "ICMPv6 Redirect: no neigh for target address\n"); + ND_PRINTK(2, warn, + "Redirect: no neigh for target address\n"); goto release; } @@ -1587,9 +1555,9 @@ void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target) len + hlen + tlen), 1, &err); if (buff == NULL) { - ND_PRINTK0(KERN_ERR - "ICMPv6 Redirect: %s failed to allocate an skb, err=%d.\n", - __func__, err); + ND_PRINTK(0, err, + "Redirect: %s failed to allocate an skb, err=%d\n", + __func__, err); goto release; } @@ -1674,16 +1642,14 @@ int ndisc_rcv(struct sk_buff *skb) __skb_push(skb, skb->data - skb_transport_header(skb)); if (ipv6_hdr(skb)->hop_limit != 255) { - ND_PRINTK2(KERN_WARNING - "ICMPv6 NDISC: invalid hop-limit: %d\n", - ipv6_hdr(skb)->hop_limit); + ND_PRINTK(2, warn, "NDISC: invalid hop-limit: %d\n", + ipv6_hdr(skb)->hop_limit); return 0; } if (msg->icmph.icmp6_code != 0) { - ND_PRINTK2(KERN_WARNING - "ICMPv6 NDISC: invalid ICMPv6 code: %d\n", - msg->icmph.icmp6_code); + ND_PRINTK(2, warn, "NDISC: invalid ICMPv6 code: %d\n", + msg->icmph.icmp6_code); return 0; } @@ -1804,9 +1770,9 @@ static int __net_init ndisc_net_init(struct net *net) err = inet_ctl_sock_create(&sk, PF_INET6, SOCK_RAW, IPPROTO_ICMPV6, net); if (err < 0) { - ND_PRINTK0(KERN_ERR - "ICMPv6 NDISC: Failed to initialize the control socket (err %d).\n", - err); + ND_PRINTK(0, err, + "NDISC: Failed to initialize the control socket (err %d)\n", + err); return err; } -- cgit v1.2.2 From e005d193d55ee5f757b13306112d8c23aac27a88 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Wed, 16 May 2012 19:58:40 +0000 Subject: net: core: Use pr_ Use the current logging style. This enables use of dynamic debugging as well. Convert printk(KERN_ to pr_. Add pr_fmt. Remove embedded prefixes, use %s, __func__ instead. Signed-off-by: Joe Perches Signed-off-by: David S. Miller --- net/core/drop_monitor.c | 10 ++++++---- net/core/neighbour.c | 13 +++++++------ net/core/net_namespace.c | 6 ++++-- net/core/netprio_cgroup.c | 6 ++++-- net/core/skbuff.c | 20 ++++++++++---------- net/core/sock.c | 25 +++++++++++++------------ 6 files changed, 44 insertions(+), 36 deletions(-) (limited to 'net') diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c index a7cad741df01..eca00a96bcf3 100644 --- a/net/core/drop_monitor.c +++ b/net/core/drop_monitor.c @@ -4,6 +4,8 @@ * Copyright (C) 2009 Neil Horman */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include #include #include @@ -381,10 +383,10 @@ static int __init init_net_drop_monitor(void) struct per_cpu_dm_data *data; int cpu, rc; - printk(KERN_INFO "Initializing network drop monitor service\n"); + pr_info("Initializing network drop monitor service\n"); if (sizeof(void *) > 8) { - printk(KERN_ERR "Unable to store program counters on this arch, Drop monitor failed\n"); + pr_err("Unable to store program counters on this arch, Drop monitor failed\n"); return -ENOSPC; } @@ -392,13 +394,13 @@ static int __init init_net_drop_monitor(void) dropmon_ops, ARRAY_SIZE(dropmon_ops)); if (rc) { - printk(KERN_ERR "Could not create drop monitor netlink family\n"); + pr_err("Could not create drop monitor netlink family\n"); return rc; } rc = register_netdevice_notifier(&dropmon_net_notifier); if (rc < 0) { - printk(KERN_CRIT "Failed to register netdevice notifier\n"); + pr_crit("Failed to register netdevice notifier\n"); goto out_unreg; } diff --git a/net/core/neighbour.c b/net/core/neighbour.c index fadaa819b854..eb09f8bbbf07 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -15,6 +15,8 @@ * Harald Welte Add neighbour cache statistics like rtstat */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include #include #include @@ -712,14 +714,13 @@ void neigh_destroy(struct neighbour *neigh) NEIGH_CACHE_STAT_INC(neigh->tbl, destroys); if (!neigh->dead) { - printk(KERN_WARNING - "Destroying alive neighbour %p\n", neigh); + pr_warn("Destroying alive neighbour %p\n", neigh); dump_stack(); return; } if (neigh_del_timer(neigh)) - printk(KERN_WARNING "Impossible event.\n"); + pr_warn("Impossible event\n"); skb_queue_purge(&neigh->arp_queue); neigh->arp_queue_len_bytes = 0; @@ -1554,8 +1555,8 @@ void neigh_table_init(struct neigh_table *tbl) write_unlock(&neigh_tbl_lock); if (unlikely(tmp)) { - printk(KERN_ERR "NEIGH: Registering multiple tables for " - "family %d\n", tbl->family); + pr_err("Registering multiple tables for family %d\n", + tbl->family); dump_stack(); } } @@ -1571,7 +1572,7 @@ int neigh_table_clear(struct neigh_table *tbl) pneigh_queue_purge(&tbl->proxy_queue); neigh_ifdown(tbl, NULL); if (atomic_read(&tbl->entries)) - printk(KERN_CRIT "neighbour leakage\n"); + pr_crit("neighbour leakage\n"); write_lock(&neigh_tbl_lock); for (tp = &neigh_tables; *tp; tp = &(*tp)->next) { if (*tp == tbl) { diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c index 31a5ae51a45c..dddbacb8f28c 100644 --- a/net/core/net_namespace.c +++ b/net/core/net_namespace.c @@ -1,3 +1,5 @@ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include #include #include @@ -212,8 +214,8 @@ static void net_free(struct net *net) { #ifdef NETNS_REFCNT_DEBUG if (unlikely(atomic_read(&net->use_count) != 0)) { - printk(KERN_EMERG "network namespace not free! Usage: %d\n", - atomic_read(&net->use_count)); + pr_emerg("network namespace not free! Usage: %d\n", + atomic_read(&net->use_count)); return; } #endif diff --git a/net/core/netprio_cgroup.c b/net/core/netprio_cgroup.c index ba6900f73900..09eda68b6763 100644 --- a/net/core/netprio_cgroup.c +++ b/net/core/netprio_cgroup.c @@ -9,6 +9,8 @@ * Authors: Neil Horman */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include #include #include @@ -88,7 +90,7 @@ static void extend_netdev_table(struct net_device *dev, u32 new_len) old_priomap = rtnl_dereference(dev->priomap); if (!new_priomap) { - printk(KERN_WARNING "Unable to alloc new priomap!\n"); + pr_warn("Unable to alloc new priomap!\n"); return; } @@ -136,7 +138,7 @@ static struct cgroup_subsys_state *cgrp_create(struct cgroup *cgrp) ret = get_prioidx(&cs->prioidx); if (ret != 0) { - printk(KERN_WARNING "No space in priority index array\n"); + pr_warn("No space in priority index array\n"); kfree(cs); return ERR_PTR(ret); } diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 2a1871942317..7a10f0894152 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -36,6 +36,8 @@ * The functions in this file will not compile correctly with gcc 2.4.x */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include #include #include @@ -118,11 +120,10 @@ static const struct pipe_buf_operations sock_pipe_buf_ops = { */ static void skb_over_panic(struct sk_buff *skb, int sz, void *here) { - printk(KERN_EMERG "skb_over_panic: text:%p len:%d put:%d head:%p " - "data:%p tail:%#lx end:%#lx dev:%s\n", - here, skb->len, sz, skb->head, skb->data, - (unsigned long)skb->tail, (unsigned long)skb->end, - skb->dev ? skb->dev->name : ""); + pr_emerg("%s: text:%p len:%d put:%d head:%p data:%p tail:%#lx end:%#lx dev:%s\n", + __func__, here, skb->len, sz, skb->head, skb->data, + (unsigned long)skb->tail, (unsigned long)skb->end, + skb->dev ? skb->dev->name : ""); BUG(); } @@ -137,11 +138,10 @@ static void skb_over_panic(struct sk_buff *skb, int sz, void *here) static void skb_under_panic(struct sk_buff *skb, int sz, void *here) { - printk(KERN_EMERG "skb_under_panic: text:%p len:%d put:%d head:%p " - "data:%p tail:%#lx end:%#lx dev:%s\n", - here, skb->len, sz, skb->head, skb->data, - (unsigned long)skb->tail, (unsigned long)skb->end, - skb->dev ? skb->dev->name : ""); + pr_emerg("%s: text:%p len:%d put:%d head:%p data:%p tail:%#lx end:%#lx dev:%s\n", + __func__, here, skb->len, sz, skb->head, skb->data, + (unsigned long)skb->tail, (unsigned long)skb->end, + skb->dev ? skb->dev->name : ""); BUG(); } diff --git a/net/core/sock.c b/net/core/sock.c index 9d144ee7e379..5efcd6307fa7 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -89,6 +89,8 @@ * 2 of the License, or (at your option) any later version. */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include #include #include @@ -297,9 +299,8 @@ static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen) *timeo_p = 0; if (warned < 10 && net_ratelimit()) { warned++; - printk(KERN_INFO "sock_set_timeout: `%s' (pid %d) " - "tries to set negative timeout\n", - current->comm, task_pid_nr(current)); + pr_info("%s: `%s' (pid %d) tries to set negative timeout\n", + __func__, current->comm, task_pid_nr(current)); } return 0; } @@ -317,8 +318,8 @@ static void sock_warn_obsolete_bsdism(const char *name) static char warncomm[TASK_COMM_LEN]; if (strcmp(warncomm, current->comm) && warned < 5) { strcpy(warncomm, current->comm); - printk(KERN_WARNING "process `%s' is using obsolete " - "%s SO_BSDCOMPAT\n", warncomm, name); + pr_warn("process `%s' is using obsolete %s SO_BSDCOMPAT\n", + warncomm, name); warned++; } } @@ -1238,8 +1239,8 @@ static void __sk_free(struct sock *sk) sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP); if (atomic_read(&sk->sk_omem_alloc)) - printk(KERN_DEBUG "%s: optmem leakage (%d bytes) detected.\n", - __func__, atomic_read(&sk->sk_omem_alloc)); + pr_debug("%s: optmem leakage (%d bytes) detected\n", + __func__, atomic_read(&sk->sk_omem_alloc)); if (sk->sk_peer_cred) put_cred(sk->sk_peer_cred); @@ -2424,7 +2425,7 @@ static void assign_proto_idx(struct proto *prot) prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR); if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) { - printk(KERN_ERR "PROTO_INUSE_NR exhausted\n"); + pr_err("PROTO_INUSE_NR exhausted\n"); return; } @@ -2454,8 +2455,8 @@ int proto_register(struct proto *prot, int alloc_slab) NULL); if (prot->slab == NULL) { - printk(KERN_CRIT "%s: Can't create sock SLAB cache!\n", - prot->name); + pr_crit("%s: Can't create sock SLAB cache!\n", + prot->name); goto out; } @@ -2469,8 +2470,8 @@ int proto_register(struct proto *prot, int alloc_slab) SLAB_HWCACHE_ALIGN, NULL); if (prot->rsk_prot->slab == NULL) { - printk(KERN_CRIT "%s: Can't create request sock SLAB cache!\n", - prot->name); + pr_crit("%s: Can't create request sock SLAB cache!\n", + prot->name); goto out_free_request_sock_slab_name; } } -- cgit v1.2.2 From a2a385d627e1549da4b43a8b3dfe370589766e1c Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 16 May 2012 23:15:34 +0000 Subject: tcp: bool conversions bool conversions where possible. __inline__ -> inline space cleanups Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/ipv4/tcp.c | 20 ++--- net/ipv4/tcp_cong.c | 6 +- net/ipv4/tcp_hybla.c | 10 +-- net/ipv4/tcp_input.c | 214 ++++++++++++++++++++++++----------------------- net/ipv4/tcp_ipv4.c | 26 +++--- net/ipv4/tcp_minisocks.c | 24 +++--- net/ipv4/tcp_output.c | 75 +++++++++-------- net/ipv6/tcp_ipv6.c | 4 +- 8 files changed, 191 insertions(+), 188 deletions(-) (limited to 'net') diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index e8a80d0b5b3c..63ddaee7209f 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -593,7 +593,7 @@ static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb) tp->pushed_seq = tp->write_seq; } -static inline int forced_push(const struct tcp_sock *tp) +static inline bool forced_push(const struct tcp_sock *tp) { return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1)); } @@ -1082,7 +1082,7 @@ new_segment: if (err) goto do_fault; } else { - int merge = 0; + bool merge = false; int i = skb_shinfo(skb)->nr_frags; struct page *page = sk->sk_sndmsg_page; int off; @@ -1096,7 +1096,7 @@ new_segment: off != PAGE_SIZE) { /* We can extend the last page * fragment. */ - merge = 1; + merge = true; } else if (i == MAX_SKB_FRAGS || !sg) { /* Need to add new fragment and cannot * do this because interface is non-SG, @@ -1293,7 +1293,7 @@ static int tcp_peek_sndq(struct sock *sk, struct msghdr *msg, int len) void tcp_cleanup_rbuf(struct sock *sk, int copied) { struct tcp_sock *tp = tcp_sk(sk); - int time_to_ack = 0; + bool time_to_ack = false; struct sk_buff *skb = skb_peek(&sk->sk_receive_queue); @@ -1319,7 +1319,7 @@ void tcp_cleanup_rbuf(struct sock *sk, int copied) ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED) && !icsk->icsk_ack.pingpong)) && !atomic_read(&sk->sk_rmem_alloc))) - time_to_ack = 1; + time_to_ack = true; } /* We send an ACK if we can now advertise a non-zero window @@ -1341,7 +1341,7 @@ void tcp_cleanup_rbuf(struct sock *sk, int copied) * "Lots" means "at least twice" here. */ if (new_window && new_window >= 2 * rcv_window_now) - time_to_ack = 1; + time_to_ack = true; } } if (time_to_ack) @@ -2171,7 +2171,7 @@ EXPORT_SYMBOL(tcp_close); /* These states need RST on ABORT according to RFC793 */ -static inline int tcp_need_reset(int state) +static inline bool tcp_need_reset(int state) { return (1 << state) & (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_FIN_WAIT1 | @@ -2245,7 +2245,7 @@ int tcp_disconnect(struct sock *sk, int flags) } EXPORT_SYMBOL(tcp_disconnect); -static inline int tcp_can_repair_sock(struct sock *sk) +static inline bool tcp_can_repair_sock(const struct sock *sk) { return capable(CAP_NET_ADMIN) && ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_ESTABLISHED)); @@ -3172,13 +3172,13 @@ out_free: struct tcp_md5sig_pool __percpu *tcp_alloc_md5sig_pool(struct sock *sk) { struct tcp_md5sig_pool __percpu *pool; - int alloc = 0; + bool alloc = false; retry: spin_lock_bh(&tcp_md5sig_pool_lock); pool = tcp_md5sig_pool; if (tcp_md5sig_users++ == 0) { - alloc = 1; + alloc = true; spin_unlock_bh(&tcp_md5sig_pool_lock); } else if (!pool) { tcp_md5sig_users--; diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c index 272a84593c85..04dbd7ae7c62 100644 --- a/net/ipv4/tcp_cong.c +++ b/net/ipv4/tcp_cong.c @@ -280,19 +280,19 @@ int tcp_set_congestion_control(struct sock *sk, const char *name) /* RFC2861 Check whether we are limited by application or congestion window * This is the inverse of cwnd check in tcp_tso_should_defer */ -int tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight) +bool tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight) { const struct tcp_sock *tp = tcp_sk(sk); u32 left; if (in_flight >= tp->snd_cwnd) - return 1; + return true; left = tp->snd_cwnd - in_flight; if (sk_can_gso(sk) && left * sysctl_tcp_tso_win_divisor < tp->snd_cwnd && left * tp->mss_cache < sk->sk_gso_max_size) - return 1; + return true; return left <= tcp_max_tso_deferred_mss(tp); } EXPORT_SYMBOL_GPL(tcp_is_cwnd_limited); diff --git a/net/ipv4/tcp_hybla.c b/net/ipv4/tcp_hybla.c index fe3ecf484b44..57bdd17dff4d 100644 --- a/net/ipv4/tcp_hybla.c +++ b/net/ipv4/tcp_hybla.c @@ -15,7 +15,7 @@ /* Tcp Hybla structure. */ struct hybla { - u8 hybla_en; + bool hybla_en; u32 snd_cwnd_cents; /* Keeps increment values when it is <1, <<7 */ u32 rho; /* Rho parameter, integer part */ u32 rho2; /* Rho * Rho, integer part */ @@ -24,8 +24,7 @@ struct hybla { u32 minrtt; /* Minimum smoothed round trip time value seen */ }; -/* Hybla reference round trip time (default= 1/40 sec = 25 ms), - expressed in jiffies */ +/* Hybla reference round trip time (default= 1/40 sec = 25 ms), in ms */ static int rtt0 = 25; module_param(rtt0, int, 0644); MODULE_PARM_DESC(rtt0, "reference rout trip time (ms)"); @@ -39,7 +38,7 @@ static inline void hybla_recalc_param (struct sock *sk) ca->rho_3ls = max_t(u32, tcp_sk(sk)->srtt / msecs_to_jiffies(rtt0), 8); ca->rho = ca->rho_3ls >> 3; ca->rho2_7ls = (ca->rho_3ls * ca->rho_3ls) << 1; - ca->rho2 = ca->rho2_7ls >>7; + ca->rho2 = ca->rho2_7ls >> 7; } static void hybla_init(struct sock *sk) @@ -52,7 +51,7 @@ static void hybla_init(struct sock *sk) ca->rho_3ls = 0; ca->rho2_7ls = 0; ca->snd_cwnd_cents = 0; - ca->hybla_en = 1; + ca->hybla_en = true; tp->snd_cwnd = 2; tp->snd_cwnd_clamp = 65535; @@ -67,6 +66,7 @@ static void hybla_init(struct sock *sk) static void hybla_state(struct sock *sk, u8 ca_state) { struct hybla *ca = inet_csk_ca(sk); + ca->hybla_en = (ca_state == TCP_CA_Open); } diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index eb97787be757..b961ef54b17d 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -196,9 +196,10 @@ static void tcp_enter_quickack_mode(struct sock *sk) * and the session is not interactive. */ -static inline int tcp_in_quickack_mode(const struct sock *sk) +static inline bool tcp_in_quickack_mode(const struct sock *sk) { const struct inet_connection_sock *icsk = inet_csk(sk); + return icsk->icsk_ack.quick && !icsk->icsk_ack.pingpong; } @@ -253,11 +254,11 @@ static inline void TCP_ECN_rcv_syn(struct tcp_sock *tp, const struct tcphdr *th) tp->ecn_flags &= ~TCP_ECN_OK; } -static inline int TCP_ECN_rcv_ecn_echo(const struct tcp_sock *tp, const struct tcphdr *th) +static bool TCP_ECN_rcv_ecn_echo(const struct tcp_sock *tp, const struct tcphdr *th) { if (th->ece && !th->syn && (tp->ecn_flags & TCP_ECN_OK)) - return 1; - return 0; + return true; + return false; } /* Buffer size and advertised window tuning. @@ -1123,36 +1124,36 @@ static void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp, * the exact amount is rather hard to quantify. However, tp->max_window can * be used as an exaggerated estimate. */ -static int tcp_is_sackblock_valid(struct tcp_sock *tp, int is_dsack, - u32 start_seq, u32 end_seq) +static bool tcp_is_sackblock_valid(struct tcp_sock *tp, bool is_dsack, + u32 start_seq, u32 end_seq) { /* Too far in future, or reversed (interpretation is ambiguous) */ if (after(end_seq, tp->snd_nxt) || !before(start_seq, end_seq)) - return 0; + return false; /* Nasty start_seq wrap-around check (see comments above) */ if (!before(start_seq, tp->snd_nxt)) - return 0; + return false; /* In outstanding window? ...This is valid exit for D-SACKs too. * start_seq == snd_una is non-sensical (see comments above) */ if (after(start_seq, tp->snd_una)) - return 1; + return true; if (!is_dsack || !tp->undo_marker) - return 0; + return false; /* ...Then it's D-SACK, and must reside below snd_una completely */ if (after(end_seq, tp->snd_una)) - return 0; + return false; if (!before(start_seq, tp->undo_marker)) - return 1; + return true; /* Too old */ if (!after(end_seq, tp->undo_marker)) - return 0; + return false; /* Undo_marker boundary crossing (overestimates a lot). Known already: * start_seq < undo_marker and end_seq >= undo_marker. @@ -1224,17 +1225,17 @@ static void tcp_mark_lost_retrans(struct sock *sk) tp->lost_retrans_low = new_low_seq; } -static int tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb, - struct tcp_sack_block_wire *sp, int num_sacks, - u32 prior_snd_una) +static bool tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb, + struct tcp_sack_block_wire *sp, int num_sacks, + u32 prior_snd_una) { struct tcp_sock *tp = tcp_sk(sk); u32 start_seq_0 = get_unaligned_be32(&sp[0].start_seq); u32 end_seq_0 = get_unaligned_be32(&sp[0].end_seq); - int dup_sack = 0; + bool dup_sack = false; if (before(start_seq_0, TCP_SKB_CB(ack_skb)->ack_seq)) { - dup_sack = 1; + dup_sack = true; tcp_dsack_seen(tp); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDSACKRECV); } else if (num_sacks > 1) { @@ -1243,7 +1244,7 @@ static int tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb, if (!after(end_seq_0, end_seq_1) && !before(start_seq_0, start_seq_1)) { - dup_sack = 1; + dup_sack = true; tcp_dsack_seen(tp); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDSACKOFORECV); @@ -1274,9 +1275,10 @@ struct tcp_sacktag_state { * FIXME: this could be merged to shift decision code */ static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb, - u32 start_seq, u32 end_seq) + u32 start_seq, u32 end_seq) { - int in_sack, err; + int err; + bool in_sack; unsigned int pkt_len; unsigned int mss; @@ -1322,7 +1324,7 @@ static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb, static u8 tcp_sacktag_one(struct sock *sk, struct tcp_sacktag_state *state, u8 sacked, u32 start_seq, u32 end_seq, - int dup_sack, int pcount) + bool dup_sack, int pcount) { struct tcp_sock *tp = tcp_sk(sk); int fack_count = state->fack_count; @@ -1402,10 +1404,10 @@ static u8 tcp_sacktag_one(struct sock *sk, /* Shift newly-SACKed bytes from this skb to the immediately previous * already-SACKed sk_buff. Mark the newly-SACKed bytes as such. */ -static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb, - struct tcp_sacktag_state *state, - unsigned int pcount, int shifted, int mss, - int dup_sack) +static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb, + struct tcp_sacktag_state *state, + unsigned int pcount, int shifted, int mss, + bool dup_sack) { struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *prev = tcp_write_queue_prev(sk, skb); @@ -1455,7 +1457,7 @@ static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb, if (skb->len > 0) { BUG_ON(!tcp_skb_pcount(skb)); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SACKSHIFTED); - return 0; + return false; } /* Whole SKB was eaten :-) */ @@ -1478,7 +1480,7 @@ static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb, NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SACKMERGED); - return 1; + return true; } /* I wish gso_size would have a bit more sane initialization than @@ -1501,7 +1503,7 @@ static int skb_can_shift(const struct sk_buff *skb) static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb, struct tcp_sacktag_state *state, u32 start_seq, u32 end_seq, - int dup_sack) + bool dup_sack) { struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *prev; @@ -1640,14 +1642,14 @@ static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk, struct tcp_sack_block *next_dup, struct tcp_sacktag_state *state, u32 start_seq, u32 end_seq, - int dup_sack_in) + bool dup_sack_in) { struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *tmp; tcp_for_write_queue_from(skb, sk) { int in_sack = 0; - int dup_sack = dup_sack_in; + bool dup_sack = dup_sack_in; if (skb == tcp_send_head(sk)) break; @@ -1662,7 +1664,7 @@ static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk, next_dup->start_seq, next_dup->end_seq); if (in_sack > 0) - dup_sack = 1; + dup_sack = true; } /* skb reference here is a bit tricky to get right, since @@ -1767,7 +1769,7 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb, struct sk_buff *skb; int num_sacks = min(TCP_NUM_SACKS, (ptr[1] - TCPOLEN_SACK_BASE) >> 3); int used_sacks; - int found_dup_sack = 0; + bool found_dup_sack = false; int i, j; int first_sack_index; @@ -1798,7 +1800,7 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb, used_sacks = 0; first_sack_index = 0; for (i = 0; i < num_sacks; i++) { - int dup_sack = !i && found_dup_sack; + bool dup_sack = !i && found_dup_sack; sp[used_sacks].start_seq = get_unaligned_be32(&sp_wire[i].start_seq); sp[used_sacks].end_seq = get_unaligned_be32(&sp_wire[i].end_seq); @@ -1865,7 +1867,7 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb, while (i < used_sacks) { u32 start_seq = sp[i].start_seq; u32 end_seq = sp[i].end_seq; - int dup_sack = (found_dup_sack && (i == first_sack_index)); + bool dup_sack = (found_dup_sack && (i == first_sack_index)); struct tcp_sack_block *next_dup = NULL; if (found_dup_sack && ((i + 1) == first_sack_index)) @@ -1967,9 +1969,9 @@ out: } /* Limits sacked_out so that sum with lost_out isn't ever larger than - * packets_out. Returns zero if sacked_out adjustement wasn't necessary. + * packets_out. Returns false if sacked_out adjustement wasn't necessary. */ -static int tcp_limit_reno_sacked(struct tcp_sock *tp) +static bool tcp_limit_reno_sacked(struct tcp_sock *tp) { u32 holes; @@ -1978,9 +1980,9 @@ static int tcp_limit_reno_sacked(struct tcp_sock *tp) if ((tp->sacked_out + holes) > tp->packets_out) { tp->sacked_out = tp->packets_out - holes; - return 1; + return true; } - return 0; + return false; } /* If we receive more dupacks than we expected counting segments @@ -2034,40 +2036,40 @@ static int tcp_is_sackfrto(const struct tcp_sock *tp) /* F-RTO can only be used if TCP has never retransmitted anything other than * head (SACK enhanced variant from Appendix B of RFC4138 is more robust here) */ -int tcp_use_frto(struct sock *sk) +bool tcp_use_frto(struct sock *sk) { const struct tcp_sock *tp = tcp_sk(sk); const struct inet_connection_sock *icsk = inet_csk(sk); struct sk_buff *skb; if (!sysctl_tcp_frto) - return 0; + return false; /* MTU probe and F-RTO won't really play nicely along currently */ if (icsk->icsk_mtup.probe_size) - return 0; + return false; if (tcp_is_sackfrto(tp)) - return 1; + return true; /* Avoid expensive walking of rexmit queue if possible */ if (tp->retrans_out > 1) - return 0; + return false; skb = tcp_write_queue_head(sk); if (tcp_skb_is_last(sk, skb)) - return 1; + return true; skb = tcp_write_queue_next(sk, skb); /* Skips head */ tcp_for_write_queue_from(skb, sk) { if (skb == tcp_send_head(sk)) break; if (TCP_SKB_CB(skb)->sacked & TCPCB_RETRANS) - return 0; + return false; /* Short-circuit when first non-SACKed skb has been checked */ if (!(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) break; } - return 1; + return true; } /* RTO occurred, but do not yet enter Loss state. Instead, defer RTO @@ -2303,7 +2305,7 @@ void tcp_enter_loss(struct sock *sk, int how) * * Do processing similar to RTO timeout. */ -static int tcp_check_sack_reneging(struct sock *sk, int flag) +static bool tcp_check_sack_reneging(struct sock *sk, int flag) { if (flag & FLAG_SACK_RENEGING) { struct inet_connection_sock *icsk = inet_csk(sk); @@ -2314,9 +2316,9 @@ static int tcp_check_sack_reneging(struct sock *sk, int flag) tcp_retransmit_skb(sk, tcp_write_queue_head(sk)); inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, TCP_RTO_MAX); - return 1; + return true; } - return 0; + return false; } static inline int tcp_fackets_out(const struct tcp_sock *tp) @@ -2472,28 +2474,28 @@ static inline int tcp_head_timedout(const struct sock *sk) * Main question: may we further continue forward transmission * with the same cwnd? */ -static int tcp_time_to_recover(struct sock *sk, int flag) +static bool tcp_time_to_recover(struct sock *sk, int flag) { struct tcp_sock *tp = tcp_sk(sk); __u32 packets_out; /* Do not perform any recovery during F-RTO algorithm */ if (tp->frto_counter) - return 0; + return false; /* Trick#1: The loss is proven. */ if (tp->lost_out) - return 1; + return true; /* Not-A-Trick#2 : Classic rule... */ if (tcp_dupack_heuristics(tp) > tp->reordering) - return 1; + return true; /* Trick#3 : when we use RFC2988 timer restart, fast * retransmit can be triggered by timeout of queue head. */ if (tcp_is_fack(tp) && tcp_head_timedout(sk)) - return 1; + return true; /* Trick#4: It is still not OK... But will it be useful to delay * recovery more? @@ -2505,7 +2507,7 @@ static int tcp_time_to_recover(struct sock *sk, int flag) /* We have nothing to send. This connection is limited * either by receiver window or by application. */ - return 1; + return true; } /* If a thin stream is detected, retransmit after first @@ -2516,7 +2518,7 @@ static int tcp_time_to_recover(struct sock *sk, int flag) if ((tp->thin_dupack || sysctl_tcp_thin_dupack) && tcp_stream_is_thin(tp) && tcp_dupack_heuristics(tp) > 1 && tcp_is_sack(tp) && !tcp_send_head(sk)) - return 1; + return true; /* Trick#6: TCP early retransmit, per RFC5827. To avoid spurious * retransmissions due to small network reorderings, we implement @@ -2528,7 +2530,7 @@ static int tcp_time_to_recover(struct sock *sk, int flag) !tcp_may_send_now(sk)) return !tcp_pause_early_retransmit(sk, flag); - return 0; + return false; } /* New heuristics: it is possible only after we switched to restart timer @@ -2767,7 +2769,7 @@ static inline int tcp_may_undo(const struct tcp_sock *tp) } /* People celebrate: "We love our President!" */ -static int tcp_try_undo_recovery(struct sock *sk) +static bool tcp_try_undo_recovery(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); @@ -2792,10 +2794,10 @@ static int tcp_try_undo_recovery(struct sock *sk) * is ACKed. For Reno it is MUST to prevent false * fast retransmits (RFC2582). SACK TCP is safe. */ tcp_moderate_cwnd(tp); - return 1; + return true; } tcp_set_ca_state(sk, TCP_CA_Open); - return 0; + return false; } /* Try to undo cwnd reduction, because D-SACKs acked all retransmitted data */ @@ -2825,19 +2827,19 @@ static void tcp_try_undo_dsack(struct sock *sk) * that successive retransmissions of a segment must not advance * retrans_stamp under any conditions. */ -static int tcp_any_retrans_done(const struct sock *sk) +static bool tcp_any_retrans_done(const struct sock *sk) { const struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; if (tp->retrans_out) - return 1; + return true; skb = tcp_write_queue_head(sk); if (unlikely(skb && TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS)) - return 1; + return true; - return 0; + return false; } /* Undo during fast recovery after partial ACK. */ @@ -2871,7 +2873,7 @@ static int tcp_try_undo_partial(struct sock *sk, int acked) } /* Undo during loss recovery after partial ACK. */ -static int tcp_try_undo_loss(struct sock *sk) +static bool tcp_try_undo_loss(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); @@ -2893,9 +2895,9 @@ static int tcp_try_undo_loss(struct sock *sk) tp->undo_marker = 0; if (tcp_is_sack(tp)) tcp_set_ca_state(sk, TCP_CA_Open); - return 1; + return true; } - return 0; + return false; } static inline void tcp_complete_cwr(struct sock *sk) @@ -3370,7 +3372,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets, const struct inet_connection_sock *icsk = inet_csk(sk); struct sk_buff *skb; u32 now = tcp_time_stamp; - int fully_acked = 1; + int fully_acked = true; int flag = 0; u32 pkts_acked = 0; u32 reord = tp->packets_out; @@ -3394,7 +3396,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets, if (!acked_pcount) break; - fully_acked = 0; + fully_acked = false; } else { acked_pcount = tcp_skb_pcount(skb); } @@ -3673,7 +3675,7 @@ static void tcp_undo_spur_to_response(struct sock *sk, int flag) * to prove that the RTO is indeed spurious. It transfers the control * from F-RTO to the conventional RTO recovery */ -static int tcp_process_frto(struct sock *sk, int flag) +static bool tcp_process_frto(struct sock *sk, int flag) { struct tcp_sock *tp = tcp_sk(sk); @@ -3689,7 +3691,7 @@ static int tcp_process_frto(struct sock *sk, int flag) if (!before(tp->snd_una, tp->frto_highmark)) { tcp_enter_frto_loss(sk, (tp->frto_counter == 1 ? 2 : 3), flag); - return 1; + return true; } if (!tcp_is_sackfrto(tp)) { @@ -3698,19 +3700,19 @@ static int tcp_process_frto(struct sock *sk, int flag) * data, winupdate */ if (!(flag & FLAG_ANY_PROGRESS) && (flag & FLAG_NOT_DUP)) - return 1; + return true; if (!(flag & FLAG_DATA_ACKED)) { tcp_enter_frto_loss(sk, (tp->frto_counter == 1 ? 0 : 3), flag); - return 1; + return true; } } else { if (!(flag & FLAG_DATA_ACKED) && (tp->frto_counter == 1)) { /* Prevent sending of new data. */ tp->snd_cwnd = min(tp->snd_cwnd, tcp_packets_in_flight(tp)); - return 1; + return true; } if ((tp->frto_counter >= 2) && @@ -3720,10 +3722,10 @@ static int tcp_process_frto(struct sock *sk, int flag) /* RFC4138 shortcoming (see comment above) */ if (!(flag & FLAG_FORWARD_PROGRESS) && (flag & FLAG_NOT_DUP)) - return 1; + return true; tcp_enter_frto_loss(sk, 3, flag); - return 1; + return true; } } @@ -3735,7 +3737,7 @@ static int tcp_process_frto(struct sock *sk, int flag) if (!tcp_may_send_now(sk)) tcp_enter_frto_loss(sk, 2, flag); - return 1; + return true; } else { switch (sysctl_tcp_frto_response) { case 2: @@ -3752,7 +3754,7 @@ static int tcp_process_frto(struct sock *sk, int flag) tp->undo_marker = 0; NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSPURIOUSRTOS); } - return 0; + return false; } /* This routine deals with incoming acks, but not outgoing ones. */ @@ -3770,7 +3772,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) int prior_sacked = tp->sacked_out; int pkts_acked = 0; int newly_acked_sacked = 0; - int frto_cwnd = 0; + bool frto_cwnd = false; /* If the ack is older than previous acks * then we can probably ignore it. @@ -4025,7 +4027,7 @@ void tcp_parse_options(const struct sk_buff *skb, struct tcp_options_received *o } EXPORT_SYMBOL(tcp_parse_options); -static int tcp_parse_aligned_timestamp(struct tcp_sock *tp, const struct tcphdr *th) +static bool tcp_parse_aligned_timestamp(struct tcp_sock *tp, const struct tcphdr *th) { const __be32 *ptr = (const __be32 *)(th + 1); @@ -4036,31 +4038,31 @@ static int tcp_parse_aligned_timestamp(struct tcp_sock *tp, const struct tcphdr tp->rx_opt.rcv_tsval = ntohl(*ptr); ++ptr; tp->rx_opt.rcv_tsecr = ntohl(*ptr); - return 1; + return true; } - return 0; + return false; } /* Fast parse options. This hopes to only see timestamps. * If it is wrong it falls back on tcp_parse_options(). */ -static int tcp_fast_parse_options(const struct sk_buff *skb, - const struct tcphdr *th, - struct tcp_sock *tp, const u8 **hvpp) +static bool tcp_fast_parse_options(const struct sk_buff *skb, + const struct tcphdr *th, + struct tcp_sock *tp, const u8 **hvpp) { /* In the spirit of fast parsing, compare doff directly to constant * values. Because equality is used, short doff can be ignored here. */ if (th->doff == (sizeof(*th) / 4)) { tp->rx_opt.saw_tstamp = 0; - return 0; + return false; } else if (tp->rx_opt.tstamp_ok && th->doff == ((sizeof(*th) + TCPOLEN_TSTAMP_ALIGNED) / 4)) { if (tcp_parse_aligned_timestamp(tp, th)) - return 1; + return true; } tcp_parse_options(skb, &tp->rx_opt, hvpp, 1); - return 1; + return true; } #ifdef CONFIG_TCP_MD5SIG @@ -4301,7 +4303,7 @@ static void tcp_fin(struct sock *sk) } } -static inline int tcp_sack_extend(struct tcp_sack_block *sp, u32 seq, +static inline bool tcp_sack_extend(struct tcp_sack_block *sp, u32 seq, u32 end_seq) { if (!after(seq, sp->end_seq) && !after(sp->start_seq, end_seq)) { @@ -4309,9 +4311,9 @@ static inline int tcp_sack_extend(struct tcp_sack_block *sp, u32 seq, sp->start_seq = seq; if (after(end_seq, sp->end_seq)) sp->end_seq = end_seq; - return 1; + return true; } - return 0; + return false; } static void tcp_dsack_set(struct sock *sk, u32 seq, u32 end_seq) @@ -4507,7 +4509,7 @@ static void tcp_ofo_queue(struct sock *sk) } } -static int tcp_prune_ofo_queue(struct sock *sk); +static bool tcp_prune_ofo_queue(struct sock *sk); static int tcp_prune_queue(struct sock *sk); static int tcp_try_rmem_schedule(struct sock *sk, unsigned int size) @@ -5092,10 +5094,10 @@ static void tcp_collapse_ofo_queue(struct sock *sk) * Purge the out-of-order queue. * Return true if queue was pruned. */ -static int tcp_prune_ofo_queue(struct sock *sk) +static bool tcp_prune_ofo_queue(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); - int res = 0; + bool res = false; if (!skb_queue_empty(&tp->out_of_order_queue)) { NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_OFOPRUNED); @@ -5109,7 +5111,7 @@ static int tcp_prune_ofo_queue(struct sock *sk) if (tp->rx_opt.sack_ok) tcp_sack_reset(&tp->rx_opt); sk_mem_reclaim(sk); - res = 1; + res = true; } return res; } @@ -5186,7 +5188,7 @@ void tcp_cwnd_application_limited(struct sock *sk) tp->snd_cwnd_stamp = tcp_time_stamp; } -static int tcp_should_expand_sndbuf(const struct sock *sk) +static bool tcp_should_expand_sndbuf(const struct sock *sk) { const struct tcp_sock *tp = tcp_sk(sk); @@ -5194,21 +5196,21 @@ static int tcp_should_expand_sndbuf(const struct sock *sk) * not modify it. */ if (sk->sk_userlocks & SOCK_SNDBUF_LOCK) - return 0; + return false; /* If we are under global TCP memory pressure, do not expand. */ if (sk_under_memory_pressure(sk)) - return 0; + return false; /* If we are under soft global TCP memory pressure, do not expand. */ if (sk_memory_allocated(sk) >= sk_prot_mem_limits(sk, 0)) - return 0; + return false; /* If we filled the congestion window, do not expand. */ if (tp->packets_out >= tp->snd_cwnd) - return 0; + return false; - return 1; + return true; } /* When incoming ACK allowed to free some skb from write_queue, @@ -5434,16 +5436,16 @@ static inline int tcp_checksum_complete_user(struct sock *sk, } #ifdef CONFIG_NET_DMA -static int tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb, +static bool tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb, int hlen) { struct tcp_sock *tp = tcp_sk(sk); int chunk = skb->len - hlen; int dma_cookie; - int copied_early = 0; + bool copied_early = false; if (tp->ucopy.wakeup) - return 0; + return false; if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) tp->ucopy.dma_chan = net_dma_find_channel(); @@ -5459,7 +5461,7 @@ static int tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb, goto out; tp->ucopy.dma_cookie = dma_cookie; - copied_early = 1; + copied_early = true; tp->ucopy.len -= chunk; tp->copied_seq += chunk; diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 2e76ffb66d7c..a43b87dfe800 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -866,14 +866,14 @@ static void tcp_v4_reqsk_destructor(struct request_sock *req) } /* - * Return 1 if a syncookie should be sent + * Return true if a syncookie should be sent */ -int tcp_syn_flood_action(struct sock *sk, +bool tcp_syn_flood_action(struct sock *sk, const struct sk_buff *skb, const char *proto) { const char *msg = "Dropping request"; - int want_cookie = 0; + bool want_cookie = false; struct listen_sock *lopt; @@ -881,7 +881,7 @@ int tcp_syn_flood_action(struct sock *sk, #ifdef CONFIG_SYN_COOKIES if (sysctl_tcp_syncookies) { msg = "Sending cookies"; - want_cookie = 1; + want_cookie = true; NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES); } else #endif @@ -1196,7 +1196,7 @@ clear_hash_noput: } EXPORT_SYMBOL(tcp_v4_md5_hash_skb); -static int tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb) +static bool tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb) { /* * This gets called for each TCP segment that arrives @@ -1219,16 +1219,16 @@ static int tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb) /* We've parsed the options - do we have a hash? */ if (!hash_expected && !hash_location) - return 0; + return false; if (hash_expected && !hash_location) { NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND); - return 1; + return true; } if (!hash_expected && hash_location) { NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED); - return 1; + return true; } /* Okay, so this is hash_expected and hash_location - @@ -1244,9 +1244,9 @@ static int tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb) &iph->daddr, ntohs(th->dest), genhash ? " tcp_v4_calc_md5_hash failed" : ""); - return 1; + return true; } - return 0; + return false; } #endif @@ -1280,7 +1280,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) __be32 saddr = ip_hdr(skb)->saddr; __be32 daddr = ip_hdr(skb)->daddr; __u32 isn = TCP_SKB_CB(skb)->when; - int want_cookie = 0; + bool want_cookie = false; /* Never answer to SYNs send to broadcast or multicast */ if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) @@ -1339,7 +1339,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) while (l-- > 0) *c++ ^= *hash_location++; - want_cookie = 0; /* not our kind of cookie */ + want_cookie = false; /* not our kind of cookie */ tmp_ext.cookie_out_never = 0; /* false */ tmp_ext.cookie_plus = tmp_opt.cookie_plus; } else if (!tp->rx_opt.cookie_in_always) { @@ -2073,7 +2073,7 @@ static void *listening_get_idx(struct seq_file *seq, loff_t *pos) return rc; } -static inline int empty_bucket(struct tcp_iter_state *st) +static inline bool empty_bucket(struct tcp_iter_state *st) { return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain) && hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].twchain); diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index 6f6a91832826..b85d9fe7d663 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c @@ -55,7 +55,7 @@ EXPORT_SYMBOL_GPL(tcp_death_row); * state. */ -static int tcp_remember_stamp(struct sock *sk) +static bool tcp_remember_stamp(struct sock *sk) { const struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); @@ -72,13 +72,13 @@ static int tcp_remember_stamp(struct sock *sk) } if (release_it) inet_putpeer(peer); - return 1; + return true; } - return 0; + return false; } -static int tcp_tw_remember_stamp(struct inet_timewait_sock *tw) +static bool tcp_tw_remember_stamp(struct inet_timewait_sock *tw) { struct sock *sk = (struct sock *) tw; struct inet_peer *peer; @@ -94,17 +94,17 @@ static int tcp_tw_remember_stamp(struct inet_timewait_sock *tw) peer->tcp_ts = tcptw->tw_ts_recent; } inet_putpeer(peer); - return 1; + return true; } - return 0; + return false; } -static __inline__ int tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win) +static bool tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win) { if (seq == s_win) - return 1; + return true; if (after(end_seq, s_win) && before(seq, e_win)) - return 1; + return true; return seq == e_win && seq == end_seq; } @@ -143,7 +143,7 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb, struct tcp_options_received tmp_opt; const u8 *hash_location; struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw); - int paws_reject = 0; + bool paws_reject = false; tmp_opt.saw_tstamp = 0; if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) { @@ -316,7 +316,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo) struct inet_timewait_sock *tw = NULL; const struct inet_connection_sock *icsk = inet_csk(sk); const struct tcp_sock *tp = tcp_sk(sk); - int recycle_ok = 0; + bool recycle_ok = false; if (tcp_death_row.sysctl_tw_recycle && tp->rx_opt.ts_recent_stamp) recycle_ok = tcp_remember_stamp(sk); @@ -575,7 +575,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, struct sock *child; const struct tcphdr *th = tcp_hdr(skb); __be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK); - int paws_reject = 0; + bool paws_reject = false; tmp_opt.saw_tstamp = 0; if (th->doff > (sizeof(struct tcphdr)>>2)) { diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 1a630825c45b..803cbfe82fbc 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -370,7 +370,7 @@ static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags) TCP_SKB_CB(skb)->end_seq = seq; } -static inline int tcp_urg_mode(const struct tcp_sock *tp) +static inline bool tcp_urg_mode(const struct tcp_sock *tp) { return tp->snd_una != tp->snd_up; } @@ -1391,20 +1391,20 @@ static int tcp_init_tso_segs(const struct sock *sk, struct sk_buff *skb, } /* Minshall's variant of the Nagle send check. */ -static inline int tcp_minshall_check(const struct tcp_sock *tp) +static inline bool tcp_minshall_check(const struct tcp_sock *tp) { return after(tp->snd_sml, tp->snd_una) && !after(tp->snd_sml, tp->snd_nxt); } -/* Return 0, if packet can be sent now without violation Nagle's rules: +/* Return false, if packet can be sent now without violation Nagle's rules: * 1. It is full sized. * 2. Or it contains FIN. (already checked by caller) * 3. Or TCP_CORK is not set, and TCP_NODELAY is set. * 4. Or TCP_CORK is not set, and all sent packets are ACKed. * With Minshall's modification: all sent small packets are ACKed. */ -static inline int tcp_nagle_check(const struct tcp_sock *tp, +static inline bool tcp_nagle_check(const struct tcp_sock *tp, const struct sk_buff *skb, unsigned int mss_now, int nonagle) { @@ -1413,11 +1413,11 @@ static inline int tcp_nagle_check(const struct tcp_sock *tp, (!nonagle && tp->packets_out && tcp_minshall_check(tp))); } -/* Return non-zero if the Nagle test allows this packet to be +/* Return true if the Nagle test allows this packet to be * sent now. */ -static inline int tcp_nagle_test(const struct tcp_sock *tp, const struct sk_buff *skb, - unsigned int cur_mss, int nonagle) +static inline bool tcp_nagle_test(const struct tcp_sock *tp, const struct sk_buff *skb, + unsigned int cur_mss, int nonagle) { /* Nagle rule does not apply to frames, which sit in the middle of the * write_queue (they have no chances to get new data). @@ -1426,24 +1426,25 @@ static inline int tcp_nagle_test(const struct tcp_sock *tp, const struct sk_buff * argument based upon the location of SKB in the send queue. */ if (nonagle & TCP_NAGLE_PUSH) - return 1; + return true; /* Don't use the nagle rule for urgent data (or for the final FIN). * Nagle can be ignored during F-RTO too (see RFC4138). */ if (tcp_urg_mode(tp) || (tp->frto_counter == 2) || (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)) - return 1; + return true; if (!tcp_nagle_check(tp, skb, cur_mss, nonagle)) - return 1; + return true; - return 0; + return false; } /* Does at least the first segment of SKB fit into the send window? */ -static inline int tcp_snd_wnd_test(const struct tcp_sock *tp, const struct sk_buff *skb, - unsigned int cur_mss) +static bool tcp_snd_wnd_test(const struct tcp_sock *tp, + const struct sk_buff *skb, + unsigned int cur_mss) { u32 end_seq = TCP_SKB_CB(skb)->end_seq; @@ -1476,7 +1477,7 @@ static unsigned int tcp_snd_test(const struct sock *sk, struct sk_buff *skb, } /* Test if sending is allowed right now. */ -int tcp_may_send_now(struct sock *sk) +bool tcp_may_send_now(struct sock *sk) { const struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb = tcp_send_head(sk); @@ -1546,7 +1547,7 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len, * * This algorithm is from John Heffner. */ -static int tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb) +static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb) { struct tcp_sock *tp = tcp_sk(sk); const struct inet_connection_sock *icsk = inet_csk(sk); @@ -1606,11 +1607,11 @@ static int tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb) /* Ok, it looks like it is advisable to defer. */ tp->tso_deferred = 1 | (jiffies << 1); - return 1; + return true; send_now: tp->tso_deferred = 0; - return 0; + return false; } /* Create a new MTU probe if we are ready. @@ -1752,11 +1753,11 @@ static int tcp_mtu_probe(struct sock *sk) * snd_up-64k-mss .. snd_up cannot be large. However, taking into * account rare use of URG, this is not a big flaw. * - * Returns 1, if no segments are in flight and we have queued segments, but - * cannot send anything now because of SWS or another problem. + * Returns true, if no segments are in flight and we have queued segments, + * but cannot send anything now because of SWS or another problem. */ -static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, - int push_one, gfp_t gfp) +static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, + int push_one, gfp_t gfp) { struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; @@ -1770,7 +1771,7 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, /* Do MTU probing. */ result = tcp_mtu_probe(sk); if (!result) { - return 0; + return false; } else if (result > 0) { sent_pkts = 1; } @@ -1829,7 +1830,7 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, if (likely(sent_pkts)) { tcp_cwnd_validate(sk); - return 0; + return false; } return !tp->packets_out && tcp_send_head(sk); } @@ -2028,22 +2029,22 @@ static void tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb) } /* Check if coalescing SKBs is legal. */ -static int tcp_can_collapse(const struct sock *sk, const struct sk_buff *skb) +static bool tcp_can_collapse(const struct sock *sk, const struct sk_buff *skb) { if (tcp_skb_pcount(skb) > 1) - return 0; + return false; /* TODO: SACK collapsing could be used to remove this condition */ if (skb_shinfo(skb)->nr_frags != 0) - return 0; + return false; if (skb_cloned(skb)) - return 0; + return false; if (skb == tcp_send_head(sk)) - return 0; + return false; /* Some heurestics for collapsing over SACK'd could be invented */ if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) - return 0; + return false; - return 1; + return true; } /* Collapse packets in the retransmit queue to make to create @@ -2054,7 +2055,7 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to, { struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb = to, *tmp; - int first = 1; + bool first = true; if (!sysctl_tcp_retrans_collapse) return; @@ -2068,7 +2069,7 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to, space -= skb->len; if (first) { - first = 0; + first = false; continue; } @@ -2208,18 +2209,18 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) /* Check if we forward retransmits are possible in the current * window/congestion state. */ -static int tcp_can_forward_retransmit(struct sock *sk) +static bool tcp_can_forward_retransmit(struct sock *sk) { const struct inet_connection_sock *icsk = inet_csk(sk); const struct tcp_sock *tp = tcp_sk(sk); /* Forward retransmissions are possible only during Recovery. */ if (icsk->icsk_ca_state != TCP_CA_Recovery) - return 0; + return false; /* No forward retransmissions in Reno are possible. */ if (tcp_is_reno(tp)) - return 0; + return false; /* Yeah, we have to make difficult choice between forward transmission * and retransmission... Both ways have their merits... @@ -2230,9 +2231,9 @@ static int tcp_can_forward_retransmit(struct sock *sk) */ if (tcp_may_send_now(sk)) - return 0; + return false; - return 1; + return true; } /* This gets called after a retransmit timeout, and the initially diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 4cf55ae7bf80..554d5999abc4 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -1055,7 +1055,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) struct tcp_sock *tp = tcp_sk(sk); __u32 isn = TCP_SKB_CB(skb)->when; struct dst_entry *dst = NULL; - int want_cookie = 0; + bool want_cookie = false; if (skb->protocol == htons(ETH_P_IP)) return tcp_v4_conn_request(sk, skb); @@ -1116,7 +1116,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) while (l-- > 0) *c++ ^= *hash_location++; - want_cookie = 0; /* not our kind of cookie */ + want_cookie = false; /* not our kind of cookie */ tmp_ext.cookie_out_never = 0; /* false */ tmp_ext.cookie_plus = tmp_opt.cookie_plus; } else if (!tp->rx_opt.cookie_in_always) { -- cgit v1.2.2 From 1de5a71c3e6eae2fbf15e9a9e13a8fc269bb82bc Mon Sep 17 00:00:00 2001 From: Eldad Zack Date: Thu, 17 May 2012 06:00:25 +0000 Subject: ipv6: correct the ipv6 option name - Pad0 to Pad1 The padding destination or hop-by-hop option is called Pad1 and not Pad0. See RFC2460 (4.2) or the IANA ipv6-parameters registry: http://www.iana.org/assignments/ipv6-parameters/ipv6-parameters.xml Signed-off-by: Eldad Zack Signed-off-by: David S. Miller --- net/bridge/br_multicast.c | 4 ++-- net/bridge/br_netfilter.c | 2 +- net/ipv6/ah6.c | 4 ++-- net/ipv6/exthdrs.c | 4 ++-- net/ipv6/mip6.c | 2 +- net/sched/act_csum.c | 2 +- 6 files changed, 9 insertions(+), 9 deletions(-) (limited to 'net') diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index 5ca4c50ea233..b66581208cb2 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c @@ -460,8 +460,8 @@ static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br, hopopt[3] = 2; /* Length of RA Option */ hopopt[4] = 0; /* Type = 0x0000 (MLD) */ hopopt[5] = 0; - hopopt[6] = IPV6_TLV_PAD0; /* Pad0 */ - hopopt[7] = IPV6_TLV_PAD0; /* Pad0 */ + hopopt[6] = IPV6_TLV_PAD1; /* Pad1 */ + hopopt[7] = IPV6_TLV_PAD1; /* Pad1 */ skb_put(skb, sizeof(*ip6h) + 8); diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c index dce55d4ee83b..e41456bd3cc6 100644 --- a/net/bridge/br_netfilter.c +++ b/net/bridge/br_netfilter.c @@ -558,7 +558,7 @@ static int check_hbh_len(struct sk_buff *skb) int optlen = nh[off + 1] + 2; switch (nh[off]) { - case IPV6_TLV_PAD0: + case IPV6_TLV_PAD1: optlen = 1; break; diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c index 9aa3d010ac5d..5d32e7a93b26 100644 --- a/net/ipv6/ah6.c +++ b/net/ipv6/ah6.c @@ -127,7 +127,7 @@ static int zero_out_mutable_opts(struct ipv6_opt_hdr *opthdr) switch (opt[off]) { - case IPV6_TLV_PAD0: + case IPV6_TLV_PAD1: optlen = 1; break; default: @@ -171,7 +171,7 @@ static void ipv6_rearrange_destopt(struct ipv6hdr *iph, struct ipv6_opt_hdr *des switch (opt[off]) { - case IPV6_TLV_PAD0: + case IPV6_TLV_PAD1: optlen = 1; break; default: diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c index a93bd231eca1..a3cded6a1997 100644 --- a/net/ipv6/exthdrs.c +++ b/net/ipv6/exthdrs.c @@ -75,7 +75,7 @@ int ipv6_find_tlv(struct sk_buff *skb, int offset, int type) return offset; switch (opttype) { - case IPV6_TLV_PAD0: + case IPV6_TLV_PAD1: optlen = 1; break; default: @@ -156,7 +156,7 @@ static int ip6_parse_tlv(struct tlvtype_proc *procs, struct sk_buff *skb) int i; switch (nh[off]) { - case IPV6_TLV_PAD0: + case IPV6_TLV_PAD1: optlen = 1; break; diff --git a/net/ipv6/mip6.c b/net/ipv6/mip6.c index 2e02f7c9d76d..5b087c31d87b 100644 --- a/net/ipv6/mip6.c +++ b/net/ipv6/mip6.c @@ -46,7 +46,7 @@ static inline void *mip6_padn(__u8 *data, __u8 padlen) if (!data) return NULL; if (padlen == 1) { - data[0] = IPV6_TLV_PAD0; + data[0] = IPV6_TLV_PAD1; } else if (padlen > 1) { data[0] = IPV6_TLV_PADN; data[1] = padlen - 2; diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c index 882124ceb70c..2c8ad7c86e43 100644 --- a/net/sched/act_csum.c +++ b/net/sched/act_csum.c @@ -397,7 +397,7 @@ static int tcf_csum_ipv6_hopopts(struct ipv6_opt_hdr *ip6xh, while (len > 1) { switch (xh[off]) { - case IPV6_TLV_PAD0: + case IPV6_TLV_PAD1: optlen = 1; break; case IPV6_TLV_JUMBO: -- cgit v1.2.2 From a1c7fff7e18f59e684e07b0f9a770561cd39f395 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 17 May 2012 07:34:16 +0000 Subject: net: netdev_alloc_skb() use build_skb() netdev_alloc_skb() is used by networks driver in their RX path to allocate an skb to receive an incoming frame. With recent skb->head_frag infrastructure, it makes sense to change netdev_alloc_skb() to use build_skb() and a frag allocator. This permits a zero copy splice(socket->pipe), and better GRO or TCP coalescing. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/core/skbuff.c | 32 +++++++++++++++++++++++++++++++- 1 file changed, 31 insertions(+), 1 deletion(-) (limited to 'net') diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 7a10f0894152..7645df1bada0 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -293,6 +293,12 @@ struct sk_buff *build_skb(void *data, unsigned int frag_size) } EXPORT_SYMBOL(build_skb); +struct netdev_alloc_cache { + struct page *page; + unsigned int offset; +}; +static DEFINE_PER_CPU(struct netdev_alloc_cache, netdev_alloc_cache); + /** * __netdev_alloc_skb - allocate an skbuff for rx on a specific device * @dev: network device to receive on @@ -310,8 +316,32 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int length, gfp_t gfp_mask) { struct sk_buff *skb; + unsigned int fragsz = SKB_DATA_ALIGN(length + NET_SKB_PAD) + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); - skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, 0, NUMA_NO_NODE); + if (fragsz <= PAGE_SIZE && !(gfp_mask & __GFP_WAIT)) { + struct netdev_alloc_cache *nc; + void *data = NULL; + + nc = &get_cpu_var(netdev_alloc_cache); + if (!nc->page) { +refill: nc->page = alloc_page(gfp_mask); + nc->offset = 0; + } + if (likely(nc->page)) { + if (nc->offset + fragsz > PAGE_SIZE) { + put_page(nc->page); + goto refill; + } + data = page_address(nc->page) + nc->offset; + nc->offset += fragsz; + get_page(nc->page); + } + put_cpu_var(netdev_alloc_cache); + skb = data ? build_skb(data, fragsz) : NULL; + } else { + skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, 0, NUMA_NO_NODE); + } if (likely(skb)) { skb_reserve(skb, NET_SKB_PAD); skb->dev = dev; -- cgit v1.2.2 From cad456d5abbb6307be7a658d701bc44ca689e906 Mon Sep 17 00:00:00 2001 From: Neil Horman Date: Thu, 17 May 2012 10:04:00 +0000 Subject: drop_monitor: convert to modular building When I first wrote drop monitor I wrote it to just build monolithically. There is no reason it can't be built modularly as well, so lets give it that flexibiity. I've tested this by building it as both a module and monolithically, and it seems to work quite well Change notes: v2) * fixed for_each_present_cpu loops to be more correct as per Eric D. * Converted exit path failures to BUG_ON as per Ben H. v3) * Converted del_timer to del_timer_sync to close race noted by Ben H. Signed-off-by: Neil Horman CC: "David S. Miller" CC: Eric Dumazet CC: Ben Hutchings Reviewed-by: Ben Hutchings Signed-off-by: David S. Miller --- net/Kconfig | 2 +- net/core/drop_monitor.c | 46 ++++++++++++++++++++++++++++++++++++++++++++-- 2 files changed, 45 insertions(+), 3 deletions(-) (limited to 'net') diff --git a/net/Kconfig b/net/Kconfig index 4c06c7c513a4..da1282359303 100644 --- a/net/Kconfig +++ b/net/Kconfig @@ -296,7 +296,7 @@ config NET_TCPPROBE module will be called tcp_probe. config NET_DROP_MONITOR - boolean "Network packet drop alerting service" + tristate "Network packet drop alerting service" depends on INET && EXPERIMENTAL && TRACEPOINTS ---help--- This feature provides an alerting service to userspace in the diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c index eca00a96bcf3..3252e7e0a005 100644 --- a/net/core/drop_monitor.c +++ b/net/core/drop_monitor.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include @@ -263,9 +264,15 @@ static int set_all_monitor_traces(int state) switch (state) { case TRACE_ON: + if (!try_module_get(THIS_MODULE)) { + rc = -ENODEV; + break; + } + rc |= register_trace_kfree_skb(trace_kfree_skb_hit, NULL); rc |= register_trace_napi_poll(trace_napi_poll_hit, NULL); break; + case TRACE_OFF: rc |= unregister_trace_kfree_skb(trace_kfree_skb_hit, NULL); rc |= unregister_trace_napi_poll(trace_napi_poll_hit, NULL); @@ -281,6 +288,9 @@ static int set_all_monitor_traces(int state) kfree_rcu(new_stat, rcu); } } + + module_put(THIS_MODULE); + break; default: rc = 1; @@ -406,7 +416,7 @@ static int __init init_net_drop_monitor(void) rc = 0; - for_each_present_cpu(cpu) { + for_each_possible_cpu(cpu) { data = &per_cpu(dm_cpu_data, cpu); data->cpu = cpu; INIT_WORK(&data->dm_alert_work, send_dm_alert); @@ -425,4 +435,36 @@ out: return rc; } -late_initcall(init_net_drop_monitor); +static void exit_net_drop_monitor(void) +{ + struct per_cpu_dm_data *data; + int cpu; + + BUG_ON(unregister_netdevice_notifier(&dropmon_net_notifier)); + + /* + * Because of the module_get/put we do in the trace state change path + * we are guarnateed not to have any current users when we get here + * all we need to do is make sure that we don't have any running timers + * or pending schedule calls + */ + + for_each_possible_cpu(cpu) { + data = &per_cpu(dm_cpu_data, cpu); + del_timer_sync(&data->send_timer); + cancel_work_sync(&data->dm_alert_work); + /* + * At this point, we should have exclusive access + * to this struct and can free the skb inside it + */ + kfree_skb(data->skb); + } + + BUG_ON(genl_unregister_family(&net_drop_monitor_family)); +} + +module_init(init_net_drop_monitor); +module_exit(exit_net_drop_monitor); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Neil Horman "); -- cgit v1.2.2 From a508da6cc0093171833efb8376b00473f24221b9 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Thu, 17 May 2012 10:25:49 +0000 Subject: lapb: Neaten debugging Enable dynamic debugging and remove a bunch of #ifdef/#endifs. Add a lapb_dbg(level, fmt, ...) macro and replace the printk(KERN_DEBUG uses. Add pr_fmt and remove embedded prefixes. Signed-off-by: Joe Perches Signed-off-by: David S. Miller --- net/lapb/lapb_iface.c | 22 ++-- net/lapb/lapb_in.c | 320 ++++++++++++++------------------------------------ net/lapb/lapb_out.c | 38 ++---- net/lapb/lapb_subr.c | 28 ++--- net/lapb/lapb_timer.c | 32 ++--- 5 files changed, 134 insertions(+), 306 deletions(-) (limited to 'net') diff --git a/net/lapb/lapb_iface.c b/net/lapb/lapb_iface.c index ab3d35f23257..3cdaa046c1bc 100644 --- a/net/lapb/lapb_iface.c +++ b/net/lapb/lapb_iface.c @@ -15,6 +15,8 @@ * 2000-10-29 Henner Eisen lapb_data_indication() return status. */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include #include #include @@ -279,9 +281,7 @@ int lapb_connect_request(struct net_device *dev) lapb_establish_data_link(lapb); -#if LAPB_DEBUG > 0 - printk(KERN_DEBUG "lapb: (%p) S0 -> S1\n", lapb->dev); -#endif + lapb_dbg(0, "(%p) S0 -> S1\n", lapb->dev); lapb->state = LAPB_STATE_1; rc = LAPB_OK; @@ -305,12 +305,8 @@ int lapb_disconnect_request(struct net_device *dev) goto out_put; case LAPB_STATE_1: -#if LAPB_DEBUG > 1 - printk(KERN_DEBUG "lapb: (%p) S1 TX DISC(1)\n", lapb->dev); -#endif -#if LAPB_DEBUG > 0 - printk(KERN_DEBUG "lapb: (%p) S1 -> S0\n", lapb->dev); -#endif + lapb_dbg(1, "(%p) S1 TX DISC(1)\n", lapb->dev); + lapb_dbg(0, "(%p) S1 -> S0\n", lapb->dev); lapb_send_control(lapb, LAPB_DISC, LAPB_POLLON, LAPB_COMMAND); lapb->state = LAPB_STATE_0; lapb_start_t1timer(lapb); @@ -329,12 +325,8 @@ int lapb_disconnect_request(struct net_device *dev) lapb_stop_t2timer(lapb); lapb->state = LAPB_STATE_2; -#if LAPB_DEBUG > 1 - printk(KERN_DEBUG "lapb: (%p) S3 DISC(1)\n", lapb->dev); -#endif -#if LAPB_DEBUG > 0 - printk(KERN_DEBUG "lapb: (%p) S3 -> S2\n", lapb->dev); -#endif + lapb_dbg(1, "(%p) S3 DISC(1)\n", lapb->dev); + lapb_dbg(0, "(%p) S3 -> S2\n", lapb->dev); rc = LAPB_OK; out_put: diff --git a/net/lapb/lapb_in.c b/net/lapb/lapb_in.c index f4e3c1accab7..5dba899131b3 100644 --- a/net/lapb/lapb_in.c +++ b/net/lapb/lapb_in.c @@ -15,6 +15,8 @@ * 2000-10-29 Henner Eisen lapb_data_indication() return status. */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include #include #include @@ -44,25 +46,16 @@ static void lapb_state0_machine(struct lapb_cb *lapb, struct sk_buff *skb, { switch (frame->type) { case LAPB_SABM: -#if LAPB_DEBUG > 1 - printk(KERN_DEBUG "lapb: (%p) S0 RX SABM(%d)\n", - lapb->dev, frame->pf); -#endif + lapb_dbg(1, "(%p) S0 RX SABM(%d)\n", lapb->dev, frame->pf); if (lapb->mode & LAPB_EXTENDED) { -#if LAPB_DEBUG > 1 - printk(KERN_DEBUG "lapb: (%p) S0 TX DM(%d)\n", - lapb->dev, frame->pf); -#endif + lapb_dbg(1, "(%p) S0 TX DM(%d)\n", + lapb->dev, frame->pf); lapb_send_control(lapb, LAPB_DM, frame->pf, LAPB_RESPONSE); } else { -#if LAPB_DEBUG > 1 - printk(KERN_DEBUG "lapb: (%p) S0 TX UA(%d)\n", - lapb->dev, frame->pf); -#endif -#if LAPB_DEBUG > 0 - printk(KERN_DEBUG "lapb: (%p) S0 -> S3\n", lapb->dev); -#endif + lapb_dbg(1, "(%p) S0 TX UA(%d)\n", + lapb->dev, frame->pf); + lapb_dbg(0, "(%p) S0 -> S3\n", lapb->dev); lapb_send_control(lapb, LAPB_UA, frame->pf, LAPB_RESPONSE); lapb_stop_t1timer(lapb); @@ -78,18 +71,11 @@ static void lapb_state0_machine(struct lapb_cb *lapb, struct sk_buff *skb, break; case LAPB_SABME: -#if LAPB_DEBUG > 1 - printk(KERN_DEBUG "lapb: (%p) S0 RX SABME(%d)\n", - lapb->dev, frame->pf); -#endif + lapb_dbg(1, "(%p) S0 RX SABME(%d)\n", lapb->dev, frame->pf); if (lapb->mode & LAPB_EXTENDED) { -#if LAPB_DEBUG > 1 - printk(KERN_DEBUG "lapb: (%p) S0 TX UA(%d)\n", - lapb->dev, frame->pf); -#endif -#if LAPB_DEBUG > 0 - printk(KERN_DEBUG "lapb: (%p) S0 -> S3\n", lapb->dev); -#endif + lapb_dbg(1, "(%p) S0 TX UA(%d)\n", + lapb->dev, frame->pf); + lapb_dbg(0, "(%p) S0 -> S3\n", lapb->dev); lapb_send_control(lapb, LAPB_UA, frame->pf, LAPB_RESPONSE); lapb_stop_t1timer(lapb); @@ -102,22 +88,16 @@ static void lapb_state0_machine(struct lapb_cb *lapb, struct sk_buff *skb, lapb->va = 0; lapb_connect_indication(lapb, LAPB_OK); } else { -#if LAPB_DEBUG > 1 - printk(KERN_DEBUG "lapb: (%p) S0 TX DM(%d)\n", - lapb->dev, frame->pf); -#endif + lapb_dbg(1, "(%p) S0 TX DM(%d)\n", + lapb->dev, frame->pf); lapb_send_control(lapb, LAPB_DM, frame->pf, LAPB_RESPONSE); } break; case LAPB_DISC: -#if LAPB_DEBUG > 1 - printk(KERN_DEBUG "lapb: (%p) S0 RX DISC(%d)\n", - lapb->dev, frame->pf); - printk(KERN_DEBUG "lapb: (%p) S0 TX UA(%d)\n", - lapb->dev, frame->pf); -#endif + lapb_dbg(1, "(%p) S0 RX DISC(%d)\n", lapb->dev, frame->pf); + lapb_dbg(1, "(%p) S0 TX UA(%d)\n", lapb->dev, frame->pf); lapb_send_control(lapb, LAPB_UA, frame->pf, LAPB_RESPONSE); break; @@ -137,68 +117,45 @@ static void lapb_state1_machine(struct lapb_cb *lapb, struct sk_buff *skb, { switch (frame->type) { case LAPB_SABM: -#if LAPB_DEBUG > 1 - printk(KERN_DEBUG "lapb: (%p) S1 RX SABM(%d)\n", - lapb->dev, frame->pf); -#endif + lapb_dbg(1, "(%p) S1 RX SABM(%d)\n", lapb->dev, frame->pf); if (lapb->mode & LAPB_EXTENDED) { -#if LAPB_DEBUG > 1 - printk(KERN_DEBUG "lapb: (%p) S1 TX DM(%d)\n", - lapb->dev, frame->pf); -#endif + lapb_dbg(1, "(%p) S1 TX DM(%d)\n", + lapb->dev, frame->pf); lapb_send_control(lapb, LAPB_DM, frame->pf, LAPB_RESPONSE); } else { -#if LAPB_DEBUG > 1 - printk(KERN_DEBUG "lapb: (%p) S1 TX UA(%d)\n", - lapb->dev, frame->pf); -#endif + lapb_dbg(1, "(%p) S1 TX UA(%d)\n", + lapb->dev, frame->pf); lapb_send_control(lapb, LAPB_UA, frame->pf, LAPB_RESPONSE); } break; case LAPB_SABME: -#if LAPB_DEBUG > 1 - printk(KERN_DEBUG "lapb: (%p) S1 RX SABME(%d)\n", - lapb->dev, frame->pf); -#endif + lapb_dbg(1, "(%p) S1 RX SABME(%d)\n", lapb->dev, frame->pf); if (lapb->mode & LAPB_EXTENDED) { -#if LAPB_DEBUG > 1 - printk(KERN_DEBUG "lapb: (%p) S1 TX UA(%d)\n", - lapb->dev, frame->pf); -#endif + lapb_dbg(1, "(%p) S1 TX UA(%d)\n", + lapb->dev, frame->pf); lapb_send_control(lapb, LAPB_UA, frame->pf, LAPB_RESPONSE); } else { -#if LAPB_DEBUG > 1 - printk(KERN_DEBUG "lapb: (%p) S1 TX DM(%d)\n", - lapb->dev, frame->pf); -#endif + lapb_dbg(1, "(%p) S1 TX DM(%d)\n", + lapb->dev, frame->pf); lapb_send_control(lapb, LAPB_DM, frame->pf, LAPB_RESPONSE); } break; case LAPB_DISC: -#if LAPB_DEBUG > 1 - printk(KERN_DEBUG "lapb: (%p) S1 RX DISC(%d)\n", - lapb->dev, frame->pf); - printk(KERN_DEBUG "lapb: (%p) S1 TX DM(%d)\n", - lapb->dev, frame->pf); -#endif + lapb_dbg(1, "(%p) S1 RX DISC(%d)\n", lapb->dev, frame->pf); + lapb_dbg(1, "(%p) S1 TX DM(%d)\n", lapb->dev, frame->pf); lapb_send_control(lapb, LAPB_DM, frame->pf, LAPB_RESPONSE); break; case LAPB_UA: -#if LAPB_DEBUG > 1 - printk(KERN_DEBUG "lapb: (%p) S1 RX UA(%d)\n", - lapb->dev, frame->pf); -#endif + lapb_dbg(1, "(%p) S1 RX UA(%d)\n", lapb->dev, frame->pf); if (frame->pf) { -#if LAPB_DEBUG > 0 - printk(KERN_DEBUG "lapb: (%p) S1 -> S3\n", lapb->dev); -#endif + lapb_dbg(0, "(%p) S1 -> S3\n", lapb->dev); lapb_stop_t1timer(lapb); lapb_stop_t2timer(lapb); lapb->state = LAPB_STATE_3; @@ -212,14 +169,9 @@ static void lapb_state1_machine(struct lapb_cb *lapb, struct sk_buff *skb, break; case LAPB_DM: -#if LAPB_DEBUG > 1 - printk(KERN_DEBUG "lapb: (%p) S1 RX DM(%d)\n", - lapb->dev, frame->pf); -#endif + lapb_dbg(1, "(%p) S1 RX DM(%d)\n", lapb->dev, frame->pf); if (frame->pf) { -#if LAPB_DEBUG > 0 - printk(KERN_DEBUG "lapb: (%p) S1 -> S0\n", lapb->dev); -#endif + lapb_dbg(0, "(%p) S1 -> S0\n", lapb->dev); lapb_clear_queues(lapb); lapb->state = LAPB_STATE_0; lapb_start_t1timer(lapb); @@ -242,34 +194,22 @@ static void lapb_state2_machine(struct lapb_cb *lapb, struct sk_buff *skb, switch (frame->type) { case LAPB_SABM: case LAPB_SABME: -#if LAPB_DEBUG > 1 - printk(KERN_DEBUG "lapb: (%p) S2 RX {SABM,SABME}(%d)\n", - lapb->dev, frame->pf); - printk(KERN_DEBUG "lapb: (%p) S2 TX DM(%d)\n", - lapb->dev, frame->pf); -#endif + lapb_dbg(1, "(%p) S2 RX {SABM,SABME}(%d)\n", + lapb->dev, frame->pf); + lapb_dbg(1, "(%p) S2 TX DM(%d)\n", lapb->dev, frame->pf); lapb_send_control(lapb, LAPB_DM, frame->pf, LAPB_RESPONSE); break; case LAPB_DISC: -#if LAPB_DEBUG > 1 - printk(KERN_DEBUG "lapb: (%p) S2 RX DISC(%d)\n", - lapb->dev, frame->pf); - printk(KERN_DEBUG "lapb: (%p) S2 TX UA(%d)\n", - lapb->dev, frame->pf); -#endif + lapb_dbg(1, "(%p) S2 RX DISC(%d)\n", lapb->dev, frame->pf); + lapb_dbg(1, "(%p) S2 TX UA(%d)\n", lapb->dev, frame->pf); lapb_send_control(lapb, LAPB_UA, frame->pf, LAPB_RESPONSE); break; case LAPB_UA: -#if LAPB_DEBUG > 1 - printk(KERN_DEBUG "lapb: (%p) S2 RX UA(%d)\n", - lapb->dev, frame->pf); -#endif + lapb_dbg(1, "(%p) S2 RX UA(%d)\n", lapb->dev, frame->pf); if (frame->pf) { -#if LAPB_DEBUG > 0 - printk(KERN_DEBUG "lapb: (%p) S2 -> S0\n", lapb->dev); -#endif + lapb_dbg(0, "(%p) S2 -> S0\n", lapb->dev); lapb->state = LAPB_STATE_0; lapb_start_t1timer(lapb); lapb_stop_t2timer(lapb); @@ -278,14 +218,9 @@ static void lapb_state2_machine(struct lapb_cb *lapb, struct sk_buff *skb, break; case LAPB_DM: -#if LAPB_DEBUG > 1 - printk(KERN_DEBUG "lapb: (%p) S2 RX DM(%d)\n", - lapb->dev, frame->pf); -#endif + lapb_dbg(1, "(%p) S2 RX DM(%d)\n", lapb->dev, frame->pf); if (frame->pf) { -#if LAPB_DEBUG > 0 - printk(KERN_DEBUG "lapb: (%p) S2 -> S0\n", lapb->dev); -#endif + lapb_dbg(0, "(%p) S2 -> S0\n", lapb->dev); lapb->state = LAPB_STATE_0; lapb_start_t1timer(lapb); lapb_stop_t2timer(lapb); @@ -297,12 +232,9 @@ static void lapb_state2_machine(struct lapb_cb *lapb, struct sk_buff *skb, case LAPB_REJ: case LAPB_RNR: case LAPB_RR: -#if LAPB_DEBUG > 1 - printk(KERN_DEBUG "lapb: (%p) S2 RX {I,REJ,RNR,RR}(%d)\n", - lapb->dev, frame->pf); - printk(KERN_DEBUG "lapb: (%p) S2 RX DM(%d)\n", + lapb_dbg(1, "(%p) S2 RX {I,REJ,RNR,RR}(%d)\n", lapb->dev, frame->pf); -#endif + lapb_dbg(1, "(%p) S2 RX DM(%d)\n", lapb->dev, frame->pf); if (frame->pf) lapb_send_control(lapb, LAPB_DM, frame->pf, LAPB_RESPONSE); @@ -325,22 +257,15 @@ static void lapb_state3_machine(struct lapb_cb *lapb, struct sk_buff *skb, switch (frame->type) { case LAPB_SABM: -#if LAPB_DEBUG > 1 - printk(KERN_DEBUG "lapb: (%p) S3 RX SABM(%d)\n", - lapb->dev, frame->pf); -#endif + lapb_dbg(1, "(%p) S3 RX SABM(%d)\n", lapb->dev, frame->pf); if (lapb->mode & LAPB_EXTENDED) { -#if LAPB_DEBUG > 1 - printk(KERN_DEBUG "lapb: (%p) S3 TX DM(%d)\n", - lapb->dev, frame->pf); -#endif + lapb_dbg(1, "(%p) S3 TX DM(%d)\n", + lapb->dev, frame->pf); lapb_send_control(lapb, LAPB_DM, frame->pf, LAPB_RESPONSE); } else { -#if LAPB_DEBUG > 1 - printk(KERN_DEBUG "lapb: (%p) S3 TX UA(%d)\n", - lapb->dev, frame->pf); -#endif + lapb_dbg(1, "(%p) S3 TX UA(%d)\n", + lapb->dev, frame->pf); lapb_send_control(lapb, LAPB_UA, frame->pf, LAPB_RESPONSE); lapb_stop_t1timer(lapb); @@ -355,15 +280,10 @@ static void lapb_state3_machine(struct lapb_cb *lapb, struct sk_buff *skb, break; case LAPB_SABME: -#if LAPB_DEBUG > 1 - printk(KERN_DEBUG "lapb: (%p) S3 RX SABME(%d)\n", - lapb->dev, frame->pf); -#endif + lapb_dbg(1, "(%p) S3 RX SABME(%d)\n", lapb->dev, frame->pf); if (lapb->mode & LAPB_EXTENDED) { -#if LAPB_DEBUG > 1 - printk(KERN_DEBUG "lapb: (%p) S3 TX UA(%d)\n", - lapb->dev, frame->pf); -#endif + lapb_dbg(1, "(%p) S3 TX UA(%d)\n", + lapb->dev, frame->pf); lapb_send_control(lapb, LAPB_UA, frame->pf, LAPB_RESPONSE); lapb_stop_t1timer(lapb); @@ -375,23 +295,16 @@ static void lapb_state3_machine(struct lapb_cb *lapb, struct sk_buff *skb, lapb->va = 0; lapb_requeue_frames(lapb); } else { -#if LAPB_DEBUG > 1 - printk(KERN_DEBUG "lapb: (%p) S3 TX DM(%d)\n", - lapb->dev, frame->pf); -#endif + lapb_dbg(1, "(%p) S3 TX DM(%d)\n", + lapb->dev, frame->pf); lapb_send_control(lapb, LAPB_DM, frame->pf, LAPB_RESPONSE); } break; case LAPB_DISC: -#if LAPB_DEBUG > 1 - printk(KERN_DEBUG "lapb: (%p) S3 RX DISC(%d)\n", - lapb->dev, frame->pf); -#endif -#if LAPB_DEBUG > 0 - printk(KERN_DEBUG "lapb: (%p) S3 -> S0\n", lapb->dev); -#endif + lapb_dbg(1, "(%p) S3 RX DISC(%d)\n", lapb->dev, frame->pf); + lapb_dbg(0, "(%p) S3 -> S0\n", lapb->dev); lapb_clear_queues(lapb); lapb_send_control(lapb, LAPB_UA, frame->pf, LAPB_RESPONSE); lapb_start_t1timer(lapb); @@ -401,13 +314,8 @@ static void lapb_state3_machine(struct lapb_cb *lapb, struct sk_buff *skb, break; case LAPB_DM: -#if LAPB_DEBUG > 1 - printk(KERN_DEBUG "lapb: (%p) S3 RX DM(%d)\n", - lapb->dev, frame->pf); -#endif -#if LAPB_DEBUG > 0 - printk(KERN_DEBUG "lapb: (%p) S3 -> S0\n", lapb->dev); -#endif + lapb_dbg(1, "(%p) S3 RX DM(%d)\n", lapb->dev, frame->pf); + lapb_dbg(0, "(%p) S3 -> S0\n", lapb->dev); lapb_clear_queues(lapb); lapb->state = LAPB_STATE_0; lapb_start_t1timer(lapb); @@ -416,10 +324,8 @@ static void lapb_state3_machine(struct lapb_cb *lapb, struct sk_buff *skb, break; case LAPB_RNR: -#if LAPB_DEBUG > 1 - printk(KERN_DEBUG "lapb: (%p) S3 RX RNR(%d) R%d\n", - lapb->dev, frame->pf, frame->nr); -#endif + lapb_dbg(1, "(%p) S3 RX RNR(%d) R%d\n", + lapb->dev, frame->pf, frame->nr); lapb->condition |= LAPB_PEER_RX_BUSY_CONDITION; lapb_check_need_response(lapb, frame->cr, frame->pf); if (lapb_validate_nr(lapb, frame->nr)) { @@ -428,9 +334,7 @@ static void lapb_state3_machine(struct lapb_cb *lapb, struct sk_buff *skb, lapb->frmr_data = *frame; lapb->frmr_type = LAPB_FRMR_Z; lapb_transmit_frmr(lapb); -#if LAPB_DEBUG > 0 - printk(KERN_DEBUG "lapb: (%p) S3 -> S4\n", lapb->dev); -#endif + lapb_dbg(0, "(%p) S3 -> S4\n", lapb->dev); lapb_start_t1timer(lapb); lapb_stop_t2timer(lapb); lapb->state = LAPB_STATE_4; @@ -439,10 +343,8 @@ static void lapb_state3_machine(struct lapb_cb *lapb, struct sk_buff *skb, break; case LAPB_RR: -#if LAPB_DEBUG > 1 - printk(KERN_DEBUG "lapb: (%p) S3 RX RR(%d) R%d\n", - lapb->dev, frame->pf, frame->nr); -#endif + lapb_dbg(1, "(%p) S3 RX RR(%d) R%d\n", + lapb->dev, frame->pf, frame->nr); lapb->condition &= ~LAPB_PEER_RX_BUSY_CONDITION; lapb_check_need_response(lapb, frame->cr, frame->pf); if (lapb_validate_nr(lapb, frame->nr)) { @@ -451,9 +353,7 @@ static void lapb_state3_machine(struct lapb_cb *lapb, struct sk_buff *skb, lapb->frmr_data = *frame; lapb->frmr_type = LAPB_FRMR_Z; lapb_transmit_frmr(lapb); -#if LAPB_DEBUG > 0 - printk(KERN_DEBUG "lapb: (%p) S3 -> S4\n", lapb->dev); -#endif + lapb_dbg(0, "(%p) S3 -> S4\n", lapb->dev); lapb_start_t1timer(lapb); lapb_stop_t2timer(lapb); lapb->state = LAPB_STATE_4; @@ -462,10 +362,8 @@ static void lapb_state3_machine(struct lapb_cb *lapb, struct sk_buff *skb, break; case LAPB_REJ: -#if LAPB_DEBUG > 1 - printk(KERN_DEBUG "lapb: (%p) S3 RX REJ(%d) R%d\n", - lapb->dev, frame->pf, frame->nr); -#endif + lapb_dbg(1, "(%p) S3 RX REJ(%d) R%d\n", + lapb->dev, frame->pf, frame->nr); lapb->condition &= ~LAPB_PEER_RX_BUSY_CONDITION; lapb_check_need_response(lapb, frame->cr, frame->pf); if (lapb_validate_nr(lapb, frame->nr)) { @@ -477,9 +375,7 @@ static void lapb_state3_machine(struct lapb_cb *lapb, struct sk_buff *skb, lapb->frmr_data = *frame; lapb->frmr_type = LAPB_FRMR_Z; lapb_transmit_frmr(lapb); -#if LAPB_DEBUG > 0 - printk(KERN_DEBUG "lapb: (%p) S3 -> S4\n", lapb->dev); -#endif + lapb_dbg(0, "(%p) S3 -> S4\n", lapb->dev); lapb_start_t1timer(lapb); lapb_stop_t2timer(lapb); lapb->state = LAPB_STATE_4; @@ -488,17 +384,13 @@ static void lapb_state3_machine(struct lapb_cb *lapb, struct sk_buff *skb, break; case LAPB_I: -#if LAPB_DEBUG > 1 - printk(KERN_DEBUG "lapb: (%p) S3 RX I(%d) S%d R%d\n", - lapb->dev, frame->pf, frame->ns, frame->nr); -#endif + lapb_dbg(1, "(%p) S3 RX I(%d) S%d R%d\n", + lapb->dev, frame->pf, frame->ns, frame->nr); if (!lapb_validate_nr(lapb, frame->nr)) { lapb->frmr_data = *frame; lapb->frmr_type = LAPB_FRMR_Z; lapb_transmit_frmr(lapb); -#if LAPB_DEBUG > 0 - printk(KERN_DEBUG "lapb: (%p) S3 -> S4\n", lapb->dev); -#endif + lapb_dbg(0, "(%p) S3 -> S4\n", lapb->dev); lapb_start_t1timer(lapb); lapb_stop_t2timer(lapb); lapb->state = LAPB_STATE_4; @@ -522,7 +414,7 @@ static void lapb_state3_machine(struct lapb_cb *lapb, struct sk_buff *skb, * a frame lost on the wire. */ if (cn == NET_RX_DROP) { - printk(KERN_DEBUG "LAPB: rx congestion\n"); + pr_debug("rx congestion\n"); break; } lapb->vr = (lapb->vr + 1) % modulus; @@ -541,11 +433,8 @@ static void lapb_state3_machine(struct lapb_cb *lapb, struct sk_buff *skb, if (frame->pf) lapb_enquiry_response(lapb); } else { -#if LAPB_DEBUG > 1 - printk(KERN_DEBUG - "lapb: (%p) S3 TX REJ(%d) R%d\n", - lapb->dev, frame->pf, lapb->vr); -#endif + lapb_dbg(1, "(%p) S3 TX REJ(%d) R%d\n", + lapb->dev, frame->pf, lapb->vr); lapb->condition |= LAPB_REJECT_CONDITION; lapb_send_control(lapb, LAPB_REJ, frame->pf, LAPB_RESPONSE); @@ -555,31 +444,22 @@ static void lapb_state3_machine(struct lapb_cb *lapb, struct sk_buff *skb, break; case LAPB_FRMR: -#if LAPB_DEBUG > 1 - printk(KERN_DEBUG "lapb: (%p) S3 RX FRMR(%d) %02X " - "%02X %02X %02X %02X\n", lapb->dev, frame->pf, - skb->data[0], skb->data[1], skb->data[2], - skb->data[3], skb->data[4]); -#endif + lapb_dbg(1, "(%p) S3 RX FRMR(%d) %02X %02X %02X %02X %02X\n", + lapb->dev, frame->pf, + skb->data[0], skb->data[1], skb->data[2], + skb->data[3], skb->data[4]); lapb_establish_data_link(lapb); -#if LAPB_DEBUG > 0 - printk(KERN_DEBUG "lapb: (%p) S3 -> S1\n", lapb->dev); -#endif + lapb_dbg(0, "(%p) S3 -> S1\n", lapb->dev); lapb_requeue_frames(lapb); lapb->state = LAPB_STATE_1; break; case LAPB_ILLEGAL: -#if LAPB_DEBUG > 1 - printk(KERN_DEBUG "lapb: (%p) S3 RX ILLEGAL(%d)\n", - lapb->dev, frame->pf); -#endif + lapb_dbg(1, "(%p) S3 RX ILLEGAL(%d)\n", lapb->dev, frame->pf); lapb->frmr_data = *frame; lapb->frmr_type = LAPB_FRMR_W; lapb_transmit_frmr(lapb); -#if LAPB_DEBUG > 0 - printk(KERN_DEBUG "lapb: (%p) S3 -> S4\n", lapb->dev); -#endif + lapb_dbg(0, "(%p) S3 -> S4\n", lapb->dev); lapb_start_t1timer(lapb); lapb_stop_t2timer(lapb); lapb->state = LAPB_STATE_4; @@ -600,25 +480,16 @@ static void lapb_state4_machine(struct lapb_cb *lapb, struct sk_buff *skb, { switch (frame->type) { case LAPB_SABM: -#if LAPB_DEBUG > 1 - printk(KERN_DEBUG "lapb: (%p) S4 RX SABM(%d)\n", - lapb->dev, frame->pf); -#endif + lapb_dbg(1, "(%p) S4 RX SABM(%d)\n", lapb->dev, frame->pf); if (lapb->mode & LAPB_EXTENDED) { -#if LAPB_DEBUG > 1 - printk(KERN_DEBUG "lapb: (%p) S4 TX DM(%d)\n", - lapb->dev, frame->pf); -#endif + lapb_dbg(1, "(%p) S4 TX DM(%d)\n", + lapb->dev, frame->pf); lapb_send_control(lapb, LAPB_DM, frame->pf, LAPB_RESPONSE); } else { -#if LAPB_DEBUG > 1 - printk(KERN_DEBUG "lapb: (%p) S4 TX UA(%d)\n", - lapb->dev, frame->pf); -#endif -#if LAPB_DEBUG > 0 - printk(KERN_DEBUG "lapb: (%p) S4 -> S3\n", lapb->dev); -#endif + lapb_dbg(1, "(%p) S4 TX UA(%d)\n", + lapb->dev, frame->pf); + lapb_dbg(0, "(%p) S4 -> S3\n", lapb->dev); lapb_send_control(lapb, LAPB_UA, frame->pf, LAPB_RESPONSE); lapb_stop_t1timer(lapb); @@ -634,18 +505,11 @@ static void lapb_state4_machine(struct lapb_cb *lapb, struct sk_buff *skb, break; case LAPB_SABME: -#if LAPB_DEBUG > 1 - printk(KERN_DEBUG "lapb: (%p) S4 RX SABME(%d)\n", - lapb->dev, frame->pf); -#endif + lapb_dbg(1, "(%p) S4 RX SABME(%d)\n", lapb->dev, frame->pf); if (lapb->mode & LAPB_EXTENDED) { -#if LAPB_DEBUG > 1 - printk(KERN_DEBUG "lapb: (%p) S4 TX UA(%d)\n", - lapb->dev, frame->pf); -#endif -#if LAPB_DEBUG > 0 - printk(KERN_DEBUG "lapb: (%p) S4 -> S3\n", lapb->dev); -#endif + lapb_dbg(1, "(%p) S4 TX UA(%d)\n", + lapb->dev, frame->pf); + lapb_dbg(0, "(%p) S4 -> S3\n", lapb->dev); lapb_send_control(lapb, LAPB_UA, frame->pf, LAPB_RESPONSE); lapb_stop_t1timer(lapb); @@ -658,10 +522,8 @@ static void lapb_state4_machine(struct lapb_cb *lapb, struct sk_buff *skb, lapb->va = 0; lapb_connect_indication(lapb, LAPB_OK); } else { -#if LAPB_DEBUG > 1 - printk(KERN_DEBUG "lapb: (%p) S4 TX DM(%d)\n", - lapb->dev, frame->pf); -#endif + lapb_dbg(1, "(%p) S4 TX DM(%d)\n", + lapb->dev, frame->pf); lapb_send_control(lapb, LAPB_DM, frame->pf, LAPB_RESPONSE); } diff --git a/net/lapb/lapb_out.c b/net/lapb/lapb_out.c index baab2760f651..ba4d015bd1a6 100644 --- a/net/lapb/lapb_out.c +++ b/net/lapb/lapb_out.c @@ -14,6 +14,8 @@ * LAPB 002 Jonathan Naylor New timer architecture. */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include #include #include @@ -60,10 +62,8 @@ static void lapb_send_iframe(struct lapb_cb *lapb, struct sk_buff *skb, int poll *frame |= lapb->vs << 1; } -#if LAPB_DEBUG > 1 - printk(KERN_DEBUG "lapb: (%p) S%d TX I(%d) S%d R%d\n", - lapb->dev, lapb->state, poll_bit, lapb->vs, lapb->vr); -#endif + lapb_dbg(1, "(%p) S%d TX I(%d) S%d R%d\n", + lapb->dev, lapb->state, poll_bit, lapb->vs, lapb->vr); lapb_transmit_buffer(lapb, skb, LAPB_COMMAND); } @@ -148,11 +148,9 @@ void lapb_transmit_buffer(struct lapb_cb *lapb, struct sk_buff *skb, int type) } } -#if LAPB_DEBUG > 2 - printk(KERN_DEBUG "lapb: (%p) S%d TX %02X %02X %02X\n", - lapb->dev, lapb->state, - skb->data[0], skb->data[1], skb->data[2]); -#endif + lapb_dbg(2, "(%p) S%d TX %02X %02X %02X\n", + lapb->dev, lapb->state, + skb->data[0], skb->data[1], skb->data[2]); if (!lapb_data_transmit(lapb, skb)) kfree_skb(skb); @@ -164,16 +162,10 @@ void lapb_establish_data_link(struct lapb_cb *lapb) lapb->n2count = 0; if (lapb->mode & LAPB_EXTENDED) { -#if LAPB_DEBUG > 1 - printk(KERN_DEBUG "lapb: (%p) S%d TX SABME(1)\n", - lapb->dev, lapb->state); -#endif + lapb_dbg(1, "(%p) S%d TX SABME(1)\n", lapb->dev, lapb->state); lapb_send_control(lapb, LAPB_SABME, LAPB_POLLON, LAPB_COMMAND); } else { -#if LAPB_DEBUG > 1 - printk(KERN_DEBUG "lapb: (%p) S%d TX SABM(1)\n", - lapb->dev, lapb->state); -#endif + lapb_dbg(1, "(%p) S%d TX SABM(1)\n", lapb->dev, lapb->state); lapb_send_control(lapb, LAPB_SABM, LAPB_POLLON, LAPB_COMMAND); } @@ -183,10 +175,8 @@ void lapb_establish_data_link(struct lapb_cb *lapb) void lapb_enquiry_response(struct lapb_cb *lapb) { -#if LAPB_DEBUG > 1 - printk(KERN_DEBUG "lapb: (%p) S%d TX RR(1) R%d\n", - lapb->dev, lapb->state, lapb->vr); -#endif + lapb_dbg(1, "(%p) S%d TX RR(1) R%d\n", + lapb->dev, lapb->state, lapb->vr); lapb_send_control(lapb, LAPB_RR, LAPB_POLLON, LAPB_RESPONSE); @@ -195,10 +185,8 @@ void lapb_enquiry_response(struct lapb_cb *lapb) void lapb_timeout_response(struct lapb_cb *lapb) { -#if LAPB_DEBUG > 1 - printk(KERN_DEBUG "lapb: (%p) S%d TX RR(0) R%d\n", - lapb->dev, lapb->state, lapb->vr); -#endif + lapb_dbg(1, "(%p) S%d TX RR(0) R%d\n", + lapb->dev, lapb->state, lapb->vr); lapb_send_control(lapb, LAPB_RR, LAPB_POLLOFF, LAPB_RESPONSE); lapb->condition &= ~LAPB_ACK_PENDING_CONDITION; diff --git a/net/lapb/lapb_subr.c b/net/lapb/lapb_subr.c index 066225b4e824..9d0a426eccbb 100644 --- a/net/lapb/lapb_subr.c +++ b/net/lapb/lapb_subr.c @@ -13,6 +13,8 @@ * LAPB 001 Jonathan Naylor Started Coding */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include #include #include @@ -111,11 +113,9 @@ int lapb_decode(struct lapb_cb *lapb, struct sk_buff *skb, { frame->type = LAPB_ILLEGAL; -#if LAPB_DEBUG > 2 - printk(KERN_DEBUG "lapb: (%p) S%d RX %02X %02X %02X\n", - lapb->dev, lapb->state, - skb->data[0], skb->data[1], skb->data[2]); -#endif + lapb_dbg(2, "(%p) S%d RX %02X %02X %02X\n", + lapb->dev, lapb->state, + skb->data[0], skb->data[1], skb->data[2]); /* We always need to look at 2 bytes, sometimes we need * to look at 3 and those cases are handled below. @@ -284,12 +284,10 @@ void lapb_transmit_frmr(struct lapb_cb *lapb) dptr++; *dptr++ = lapb->frmr_type; -#if LAPB_DEBUG > 1 - printk(KERN_DEBUG "lapb: (%p) S%d TX FRMR %02X %02X %02X %02X %02X\n", - lapb->dev, lapb->state, - skb->data[1], skb->data[2], skb->data[3], - skb->data[4], skb->data[5]); -#endif + lapb_dbg(1, "(%p) S%d TX FRMR %02X %02X %02X %02X %02X\n", + lapb->dev, lapb->state, + skb->data[1], skb->data[2], skb->data[3], + skb->data[4], skb->data[5]); } else { dptr = skb_put(skb, 4); *dptr++ = LAPB_FRMR; @@ -301,11 +299,9 @@ void lapb_transmit_frmr(struct lapb_cb *lapb) dptr++; *dptr++ = lapb->frmr_type; -#if LAPB_DEBUG > 1 - printk(KERN_DEBUG "lapb: (%p) S%d TX FRMR %02X %02X %02X\n", - lapb->dev, lapb->state, skb->data[1], - skb->data[2], skb->data[3]); -#endif + lapb_dbg(1, "(%p) S%d TX FRMR %02X %02X %02X\n", + lapb->dev, lapb->state, skb->data[1], + skb->data[2], skb->data[3]); } lapb_transmit_buffer(lapb, skb, LAPB_RESPONSE); diff --git a/net/lapb/lapb_timer.c b/net/lapb/lapb_timer.c index f8cd641dfc82..54563ad8aeb1 100644 --- a/net/lapb/lapb_timer.c +++ b/net/lapb/lapb_timer.c @@ -14,6 +14,8 @@ * LAPB 002 Jonathan Naylor New timer architecture. */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include #include #include @@ -105,21 +107,17 @@ static void lapb_t1timer_expiry(unsigned long param) lapb_clear_queues(lapb); lapb->state = LAPB_STATE_0; lapb_disconnect_indication(lapb, LAPB_TIMEDOUT); -#if LAPB_DEBUG > 0 - printk(KERN_DEBUG "lapb: (%p) S1 -> S0\n", lapb->dev); -#endif + lapb_dbg(0, "(%p) S1 -> S0\n", lapb->dev); return; } else { lapb->n2count++; if (lapb->mode & LAPB_EXTENDED) { -#if LAPB_DEBUG > 1 - printk(KERN_DEBUG "lapb: (%p) S1 TX SABME(1)\n", lapb->dev); -#endif + lapb_dbg(1, "(%p) S1 TX SABME(1)\n", + lapb->dev); lapb_send_control(lapb, LAPB_SABME, LAPB_POLLON, LAPB_COMMAND); } else { -#if LAPB_DEBUG > 1 - printk(KERN_DEBUG "lapb: (%p) S1 TX SABM(1)\n", lapb->dev); -#endif + lapb_dbg(1, "(%p) S1 TX SABM(1)\n", + lapb->dev); lapb_send_control(lapb, LAPB_SABM, LAPB_POLLON, LAPB_COMMAND); } } @@ -133,15 +131,11 @@ static void lapb_t1timer_expiry(unsigned long param) lapb_clear_queues(lapb); lapb->state = LAPB_STATE_0; lapb_disconnect_confirmation(lapb, LAPB_TIMEDOUT); -#if LAPB_DEBUG > 0 - printk(KERN_DEBUG "lapb: (%p) S2 -> S0\n", lapb->dev); -#endif + lapb_dbg(0, "(%p) S2 -> S0\n", lapb->dev); return; } else { lapb->n2count++; -#if LAPB_DEBUG > 1 - printk(KERN_DEBUG "lapb: (%p) S2 TX DISC(1)\n", lapb->dev); -#endif + lapb_dbg(1, "(%p) S2 TX DISC(1)\n", lapb->dev); lapb_send_control(lapb, LAPB_DISC, LAPB_POLLON, LAPB_COMMAND); } break; @@ -155,9 +149,7 @@ static void lapb_t1timer_expiry(unsigned long param) lapb->state = LAPB_STATE_0; lapb_stop_t2timer(lapb); lapb_disconnect_indication(lapb, LAPB_TIMEDOUT); -#if LAPB_DEBUG > 0 - printk(KERN_DEBUG "lapb: (%p) S3 -> S0\n", lapb->dev); -#endif + lapb_dbg(0, "(%p) S3 -> S0\n", lapb->dev); return; } else { lapb->n2count++; @@ -173,9 +165,7 @@ static void lapb_t1timer_expiry(unsigned long param) lapb_clear_queues(lapb); lapb->state = LAPB_STATE_0; lapb_disconnect_indication(lapb, LAPB_TIMEDOUT); -#if LAPB_DEBUG > 0 - printk(KERN_DEBUG "lapb: (%p) S4 -> S0\n", lapb->dev); -#endif + lapb_dbg(0, "(%p) S4 -> S0\n", lapb->dev); return; } else { lapb->n2count++; -- cgit v1.2.2 From 349f29d841dbae854bd7367be7c250401f974f47 Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Thu, 17 May 2012 20:59:51 -0700 Subject: econet: remove ancient bug ridden protocol More spring cleaning! The ancient Econet protocol should go. Most of the bug fixes in recent years have been fixing security vulnerabilities. The hardware hasn't been made since the 90s, it is only interesting as an archeological curiosity. For the truly curious, or insomniac, go read up on it. http://en.wikipedia.org/wiki/Econet Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- net/Kconfig | 1 - net/Makefile | 1 - net/econet/Kconfig | 36 -- net/econet/Makefile | 7 - net/econet/af_econet.c | 1172 ------------------------------------------------ 5 files changed, 1217 deletions(-) delete mode 100644 net/econet/Kconfig delete mode 100644 net/econet/Makefile delete mode 100644 net/econet/af_econet.c (limited to 'net') diff --git a/net/Kconfig b/net/Kconfig index da1282359303..1e47bd03dde3 100644 --- a/net/Kconfig +++ b/net/Kconfig @@ -207,7 +207,6 @@ source "net/ipx/Kconfig" source "drivers/net/appletalk/Kconfig" source "net/x25/Kconfig" source "net/lapb/Kconfig" -source "net/econet/Kconfig" source "net/wanrouter/Kconfig" source "net/phonet/Kconfig" source "net/ieee802154/Kconfig" diff --git a/net/Makefile b/net/Makefile index 2a97cded8601..4f4ee083064c 100644 --- a/net/Makefile +++ b/net/Makefile @@ -40,7 +40,6 @@ obj-$(CONFIG_AF_RXRPC) += rxrpc/ obj-$(CONFIG_ATM) += atm/ obj-$(CONFIG_L2TP) += l2tp/ obj-$(CONFIG_DECNET) += decnet/ -obj-$(CONFIG_ECONET) += econet/ obj-$(CONFIG_PHONET) += phonet/ ifneq ($(CONFIG_VLAN_8021Q),) obj-y += 8021q/ diff --git a/net/econet/Kconfig b/net/econet/Kconfig deleted file mode 100644 index 39a2d2975e0e..000000000000 --- a/net/econet/Kconfig +++ /dev/null @@ -1,36 +0,0 @@ -# -# Acorn Econet/AUN protocols -# - -config ECONET - tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)" - depends on EXPERIMENTAL && INET - ---help--- - Econet is a fairly old and slow networking protocol mainly used by - Acorn computers to access file and print servers. It uses native - Econet network cards. AUN is an implementation of the higher level - parts of Econet that runs over ordinary Ethernet connections, on - top of the UDP packet protocol, which in turn runs on top of the - Internet protocol IP. - - If you say Y here, you can choose with the next two options whether - to send Econet/AUN traffic over a UDP Ethernet connection or over - a native Econet network card. - - To compile this driver as a module, choose M here: the module - will be called econet. - -config ECONET_AUNUDP - bool "AUN over UDP" - depends on ECONET - help - Say Y here if you want to send Econet/AUN traffic over a UDP - connection (UDP is a packet based protocol that runs on top of the - Internet protocol IP) using an ordinary Ethernet network card. - -config ECONET_NATIVE - bool "Native Econet" - depends on ECONET - help - Say Y here if you have a native Econet network card installed in - your computer. diff --git a/net/econet/Makefile b/net/econet/Makefile deleted file mode 100644 index 05fae8be2fed..000000000000 --- a/net/econet/Makefile +++ /dev/null @@ -1,7 +0,0 @@ -# -# Makefile for Econet support code. -# - -obj-$(CONFIG_ECONET) += econet.o - -econet-y := af_econet.o diff --git a/net/econet/af_econet.c b/net/econet/af_econet.c deleted file mode 100644 index fa14ca76b77b..000000000000 --- a/net/econet/af_econet.c +++ /dev/null @@ -1,1172 +0,0 @@ -/* - * An implementation of the Acorn Econet and AUN protocols. - * Philip Blundell - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - * - */ - -#define pr_fmt(fmt) fmt - -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -static const struct proto_ops econet_ops; -static struct hlist_head econet_sklist; -static DEFINE_SPINLOCK(econet_lock); -static DEFINE_MUTEX(econet_mutex); - -/* Since there are only 256 possible network numbers (or fewer, depends - how you count) it makes sense to use a simple lookup table. */ -static struct net_device *net2dev_map[256]; - -#define EC_PORT_IP 0xd2 - -#ifdef CONFIG_ECONET_AUNUDP -static DEFINE_SPINLOCK(aun_queue_lock); -static struct socket *udpsock; -#define AUN_PORT 0x8000 - -struct aunhdr { - unsigned char code; /* AUN magic protocol byte */ - unsigned char port; - unsigned char cb; - unsigned char pad; - unsigned long handle; -}; - -static unsigned long aun_seq; - -/* Queue of packets waiting to be transmitted. */ -static struct sk_buff_head aun_queue; -static struct timer_list ab_cleanup_timer; - -#endif /* CONFIG_ECONET_AUNUDP */ - -/* Per-packet information */ -struct ec_cb { - struct sockaddr_ec sec; - unsigned long cookie; /* Supplied by user. */ -#ifdef CONFIG_ECONET_AUNUDP - int done; - unsigned long seq; /* Sequencing */ - unsigned long timeout; /* Timeout */ - unsigned long start; /* jiffies */ -#endif -#ifdef CONFIG_ECONET_NATIVE - void (*sent)(struct sk_buff *, int result); -#endif -}; - -static void econet_remove_socket(struct hlist_head *list, struct sock *sk) -{ - spin_lock_bh(&econet_lock); - sk_del_node_init(sk); - spin_unlock_bh(&econet_lock); -} - -static void econet_insert_socket(struct hlist_head *list, struct sock *sk) -{ - spin_lock_bh(&econet_lock); - sk_add_node(sk, list); - spin_unlock_bh(&econet_lock); -} - -/* - * Pull a packet from our receive queue and hand it to the user. - * If necessary we block. - */ - -static int econet_recvmsg(struct kiocb *iocb, struct socket *sock, - struct msghdr *msg, size_t len, int flags) -{ - struct sock *sk = sock->sk; - struct sk_buff *skb; - size_t copied; - int err; - - msg->msg_namelen = sizeof(struct sockaddr_ec); - - mutex_lock(&econet_mutex); - - /* - * Call the generic datagram receiver. This handles all sorts - * of horrible races and re-entrancy so we can forget about it - * in the protocol layers. - * - * Now it will return ENETDOWN, if device have just gone down, - * but then it will block. - */ - - skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err); - - /* - * An error occurred so return it. Because skb_recv_datagram() - * handles the blocking we don't see and worry about blocking - * retries. - */ - - if (skb == NULL) - goto out; - - /* - * You lose any data beyond the buffer you gave. If it worries a - * user program they can ask the device for its MTU anyway. - */ - - copied = skb->len; - if (copied > len) { - copied = len; - msg->msg_flags |= MSG_TRUNC; - } - - /* We can't use skb_copy_datagram here */ - err = memcpy_toiovec(msg->msg_iov, skb->data, copied); - if (err) - goto out_free; - sk->sk_stamp = skb->tstamp; - - if (msg->msg_name) - memcpy(msg->msg_name, skb->cb, msg->msg_namelen); - - /* - * Free or return the buffer as appropriate. Again this - * hides all the races and re-entrancy issues from us. - */ - err = copied; - -out_free: - skb_free_datagram(sk, skb); -out: - mutex_unlock(&econet_mutex); - return err; -} - -/* - * Bind an Econet socket. - */ - -static int econet_bind(struct socket *sock, struct sockaddr *uaddr, - int addr_len) -{ - struct sockaddr_ec *sec = (struct sockaddr_ec *)uaddr; - struct sock *sk; - struct econet_sock *eo; - - /* - * Check legality - */ - - if (addr_len < sizeof(struct sockaddr_ec) || - sec->sec_family != AF_ECONET) - return -EINVAL; - - mutex_lock(&econet_mutex); - - sk = sock->sk; - eo = ec_sk(sk); - - eo->cb = sec->cb; - eo->port = sec->port; - eo->station = sec->addr.station; - eo->net = sec->addr.net; - - mutex_unlock(&econet_mutex); - - return 0; -} - -#if defined(CONFIG_ECONET_AUNUDP) || defined(CONFIG_ECONET_NATIVE) -/* - * Queue a transmit result for the user to be told about. - */ - -static void tx_result(struct sock *sk, unsigned long cookie, int result) -{ - struct sk_buff *skb = alloc_skb(0, GFP_ATOMIC); - struct ec_cb *eb; - struct sockaddr_ec *sec; - - if (skb == NULL) { - pr_debug("econet: memory squeeze, transmit result dropped\n"); - return; - } - - eb = (struct ec_cb *)&skb->cb; - sec = (struct sockaddr_ec *)&eb->sec; - memset(sec, 0, sizeof(struct sockaddr_ec)); - sec->cookie = cookie; - sec->type = ECTYPE_TRANSMIT_STATUS | result; - sec->sec_family = AF_ECONET; - - if (sock_queue_rcv_skb(sk, skb) < 0) - kfree_skb(skb); -} -#endif - -#ifdef CONFIG_ECONET_NATIVE -/* - * Called by the Econet hardware driver when a packet transmit - * has completed. Tell the user. - */ - -static void ec_tx_done(struct sk_buff *skb, int result) -{ - struct ec_cb *eb = (struct ec_cb *)&skb->cb; - tx_result(skb->sk, eb->cookie, result); -} -#endif - -/* - * Send a packet. We have to work out which device it's going out on - * and hence whether to use real Econet or the UDP emulation. - */ - -static int econet_sendmsg(struct kiocb *iocb, struct socket *sock, - struct msghdr *msg, size_t len) -{ - struct sockaddr_ec *saddr = (struct sockaddr_ec *)msg->msg_name; - struct net_device *dev; - struct ec_addr addr; - int err; - unsigned char port, cb; -#if defined(CONFIG_ECONET_AUNUDP) || defined(CONFIG_ECONET_NATIVE) - struct sock *sk = sock->sk; - struct sk_buff *skb; - struct ec_cb *eb; -#endif -#ifdef CONFIG_ECONET_AUNUDP - struct msghdr udpmsg; - struct iovec iov[2]; - struct aunhdr ah; - struct sockaddr_in udpdest; - __kernel_size_t size; - mm_segment_t oldfs; - char *userbuf; -#endif - - /* - * Check the flags. - */ - - if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_CMSG_COMPAT)) - return -EINVAL; - - /* - * Get and verify the address. - */ - - mutex_lock(&econet_mutex); - - if (saddr == NULL || msg->msg_namelen < sizeof(struct sockaddr_ec)) { - mutex_unlock(&econet_mutex); - return -EINVAL; - } - addr.station = saddr->addr.station; - addr.net = saddr->addr.net; - port = saddr->port; - cb = saddr->cb; - - /* Look for a device with the right network number. */ - dev = net2dev_map[addr.net]; - - /* If not directly reachable, use some default */ - if (dev == NULL) { - dev = net2dev_map[0]; - /* No interfaces at all? */ - if (dev == NULL) { - mutex_unlock(&econet_mutex); - return -ENETDOWN; - } - } - - if (dev->type == ARPHRD_ECONET) { - /* Real hardware Econet. We're not worthy etc. */ -#ifdef CONFIG_ECONET_NATIVE - unsigned short proto = 0; - int hlen, tlen; - int res; - - if (len + 15 > dev->mtu) { - mutex_unlock(&econet_mutex); - return -EMSGSIZE; - } - - dev_hold(dev); - - hlen = LL_RESERVED_SPACE(dev); - tlen = dev->needed_tailroom; - skb = sock_alloc_send_skb(sk, len + hlen + tlen, - msg->msg_flags & MSG_DONTWAIT, &err); - if (skb == NULL) - goto out_unlock; - - skb_reserve(skb, hlen); - skb_reset_network_header(skb); - - eb = (struct ec_cb *)&skb->cb; - - eb->cookie = saddr->cookie; - eb->sec = *saddr; - eb->sent = ec_tx_done; - - err = -EINVAL; - res = dev_hard_header(skb, dev, ntohs(proto), &addr, NULL, len); - if (res < 0) - goto out_free; - if (res > 0) { - struct ec_framehdr *fh; - /* Poke in our control byte and - port number. Hack, hack. */ - fh = (struct ec_framehdr *)skb->data; - fh->cb = cb; - fh->port = port; - if (sock->type != SOCK_DGRAM) { - skb_reset_tail_pointer(skb); - skb->len = 0; - } - } - - /* Copy the data. Returns -EFAULT on error */ - err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len); - skb->protocol = proto; - skb->dev = dev; - skb->priority = sk->sk_priority; - if (err) - goto out_free; - - err = -ENETDOWN; - if (!(dev->flags & IFF_UP)) - goto out_free; - - /* - * Now send it - */ - - dev_queue_xmit(skb); - dev_put(dev); - mutex_unlock(&econet_mutex); - return len; - -out_free: - kfree_skb(skb); -out_unlock: - if (dev) - dev_put(dev); -#else - err = -EPROTOTYPE; -#endif - mutex_unlock(&econet_mutex); - - return err; - } - -#ifdef CONFIG_ECONET_AUNUDP - /* AUN virtual Econet. */ - - if (udpsock == NULL) { - mutex_unlock(&econet_mutex); - return -ENETDOWN; /* No socket - can't send */ - } - - if (len > 32768) { - err = -E2BIG; - goto error; - } - - /* Make up a UDP datagram and hand it off to some higher intellect. */ - - memset(&udpdest, 0, sizeof(udpdest)); - udpdest.sin_family = AF_INET; - udpdest.sin_port = htons(AUN_PORT); - - /* At the moment we use the stupid Acorn scheme of Econet address - y.x maps to IP a.b.c.x. This should be replaced with something - more flexible and more aware of subnet masks. */ - { - struct in_device *idev; - unsigned long network = 0; - - rcu_read_lock(); - idev = __in_dev_get_rcu(dev); - if (idev) { - if (idev->ifa_list) - network = ntohl(idev->ifa_list->ifa_address) & - 0xffffff00; /* !!! */ - } - rcu_read_unlock(); - udpdest.sin_addr.s_addr = htonl(network | addr.station); - } - - memset(&ah, 0, sizeof(ah)); - ah.port = port; - ah.cb = cb & 0x7f; - ah.code = 2; /* magic */ - - /* tack our header on the front of the iovec */ - size = sizeof(struct aunhdr); - iov[0].iov_base = (void *)&ah; - iov[0].iov_len = size; - - userbuf = vmalloc(len); - if (userbuf == NULL) { - err = -ENOMEM; - goto error; - } - - iov[1].iov_base = userbuf; - iov[1].iov_len = len; - err = memcpy_fromiovec(userbuf, msg->msg_iov, len); - if (err) - goto error_free_buf; - - /* Get a skbuff (no data, just holds our cb information) */ - skb = sock_alloc_send_skb(sk, 0, msg->msg_flags & MSG_DONTWAIT, &err); - if (skb == NULL) - goto error_free_buf; - - eb = (struct ec_cb *)&skb->cb; - - eb->cookie = saddr->cookie; - eb->timeout = 5 * HZ; - eb->start = jiffies; - ah.handle = aun_seq; - eb->seq = (aun_seq++); - eb->sec = *saddr; - - skb_queue_tail(&aun_queue, skb); - - udpmsg.msg_name = (void *)&udpdest; - udpmsg.msg_namelen = sizeof(udpdest); - udpmsg.msg_iov = &iov[0]; - udpmsg.msg_iovlen = 2; - udpmsg.msg_control = NULL; - udpmsg.msg_controllen = 0; - udpmsg.msg_flags = 0; - - oldfs = get_fs(); - set_fs(KERNEL_DS); /* More privs :-) */ - err = sock_sendmsg(udpsock, &udpmsg, size); - set_fs(oldfs); - -error_free_buf: - vfree(userbuf); -error: -#else - err = -EPROTOTYPE; -#endif - mutex_unlock(&econet_mutex); - - return err; -} - -/* - * Look up the address of a socket. - */ - -static int econet_getname(struct socket *sock, struct sockaddr *uaddr, - int *uaddr_len, int peer) -{ - struct sock *sk; - struct econet_sock *eo; - struct sockaddr_ec *sec = (struct sockaddr_ec *)uaddr; - - if (peer) - return -EOPNOTSUPP; - - memset(sec, 0, sizeof(*sec)); - mutex_lock(&econet_mutex); - - sk = sock->sk; - eo = ec_sk(sk); - - sec->sec_family = AF_ECONET; - sec->port = eo->port; - sec->addr.station = eo->station; - sec->addr.net = eo->net; - - mutex_unlock(&econet_mutex); - - *uaddr_len = sizeof(*sec); - return 0; -} - -static void econet_destroy_timer(unsigned long data) -{ - struct sock *sk = (struct sock *)data; - - if (!sk_has_allocations(sk)) { - sk_free(sk); - return; - } - - sk->sk_timer.expires = jiffies + 10 * HZ; - add_timer(&sk->sk_timer); - pr_debug("econet: socket destroy delayed\n"); -} - -/* - * Close an econet socket. - */ - -static int econet_release(struct socket *sock) -{ - struct sock *sk; - - mutex_lock(&econet_mutex); - - sk = sock->sk; - if (!sk) - goto out_unlock; - - econet_remove_socket(&econet_sklist, sk); - - /* - * Now the socket is dead. No more input will appear. - */ - - sk->sk_state_change(sk); /* It is useless. Just for sanity. */ - - sock_orphan(sk); - - /* Purge queues */ - - skb_queue_purge(&sk->sk_receive_queue); - - if (sk_has_allocations(sk)) { - sk->sk_timer.data = (unsigned long)sk; - sk->sk_timer.expires = jiffies + HZ; - sk->sk_timer.function = econet_destroy_timer; - add_timer(&sk->sk_timer); - - goto out_unlock; - } - - sk_free(sk); - -out_unlock: - mutex_unlock(&econet_mutex); - return 0; -} - -static struct proto econet_proto = { - .name = "ECONET", - .owner = THIS_MODULE, - .obj_size = sizeof(struct econet_sock), -}; - -/* - * Create an Econet socket - */ - -static int econet_create(struct net *net, struct socket *sock, int protocol, - int kern) -{ - struct sock *sk; - struct econet_sock *eo; - int err; - - if (!net_eq(net, &init_net)) - return -EAFNOSUPPORT; - - /* Econet only provides datagram services. */ - if (sock->type != SOCK_DGRAM) - return -ESOCKTNOSUPPORT; - - sock->state = SS_UNCONNECTED; - - err = -ENOBUFS; - sk = sk_alloc(net, PF_ECONET, GFP_KERNEL, &econet_proto); - if (sk == NULL) - goto out; - - sk->sk_reuse = SK_CAN_REUSE; - sock->ops = &econet_ops; - sock_init_data(sock, sk); - - eo = ec_sk(sk); - sock_reset_flag(sk, SOCK_ZAPPED); - sk->sk_family = PF_ECONET; - eo->num = protocol; - - econet_insert_socket(&econet_sklist, sk); - return 0; -out: - return err; -} - -/* - * Handle Econet specific ioctls - */ - -static int ec_dev_ioctl(struct socket *sock, unsigned int cmd, void __user *arg) -{ - struct ifreq ifr; - struct ec_device *edev; - struct net_device *dev; - struct sockaddr_ec *sec; - int err; - - /* - * Fetch the caller's info block into kernel space - */ - - if (copy_from_user(&ifr, arg, sizeof(struct ifreq))) - return -EFAULT; - - dev = dev_get_by_name(&init_net, ifr.ifr_name); - if (dev == NULL) - return -ENODEV; - - sec = (struct sockaddr_ec *)&ifr.ifr_addr; - - mutex_lock(&econet_mutex); - - err = 0; - switch (cmd) { - case SIOCSIFADDR: - if (!capable(CAP_NET_ADMIN)) { - err = -EPERM; - break; - } - - edev = dev->ec_ptr; - if (edev == NULL) { - /* Magic up a new one. */ - edev = kzalloc(sizeof(struct ec_device), GFP_KERNEL); - if (edev == NULL) { - err = -ENOMEM; - break; - } - dev->ec_ptr = edev; - } else - net2dev_map[edev->net] = NULL; - edev->station = sec->addr.station; - edev->net = sec->addr.net; - net2dev_map[sec->addr.net] = dev; - if (!net2dev_map[0]) - net2dev_map[0] = dev; - break; - - case SIOCGIFADDR: - edev = dev->ec_ptr; - if (edev == NULL) { - err = -ENODEV; - break; - } - memset(sec, 0, sizeof(struct sockaddr_ec)); - sec->addr.station = edev->station; - sec->addr.net = edev->net; - sec->sec_family = AF_ECONET; - dev_put(dev); - if (copy_to_user(arg, &ifr, sizeof(struct ifreq))) - err = -EFAULT; - break; - - default: - err = -EINVAL; - break; - } - - mutex_unlock(&econet_mutex); - - dev_put(dev); - - return err; -} - -/* - * Handle generic ioctls - */ - -static int econet_ioctl(struct socket *sock, unsigned int cmd, - unsigned long arg) -{ - struct sock *sk = sock->sk; - void __user *argp = (void __user *)arg; - - switch (cmd) { - case SIOCGSTAMP: - return sock_get_timestamp(sk, argp); - - case SIOCGSTAMPNS: - return sock_get_timestampns(sk, argp); - - case SIOCSIFADDR: - case SIOCGIFADDR: - return ec_dev_ioctl(sock, cmd, argp); - - } - - return -ENOIOCTLCMD; -} - -static const struct net_proto_family econet_family_ops = { - .family = PF_ECONET, - .create = econet_create, - .owner = THIS_MODULE, -}; - -static const struct proto_ops econet_ops = { - .family = PF_ECONET, - .owner = THIS_MODULE, - .release = econet_release, - .bind = econet_bind, - .connect = sock_no_connect, - .socketpair = sock_no_socketpair, - .accept = sock_no_accept, - .getname = econet_getname, - .poll = datagram_poll, - .ioctl = econet_ioctl, - .listen = sock_no_listen, - .shutdown = sock_no_shutdown, - .setsockopt = sock_no_setsockopt, - .getsockopt = sock_no_getsockopt, - .sendmsg = econet_sendmsg, - .recvmsg = econet_recvmsg, - .mmap = sock_no_mmap, - .sendpage = sock_no_sendpage, -}; - -#if defined(CONFIG_ECONET_AUNUDP) || defined(CONFIG_ECONET_NATIVE) -/* - * Find the listening socket, if any, for the given data. - */ - -static struct sock *ec_listening_socket(unsigned char port, unsigned char - station, unsigned char net) -{ - struct sock *sk; - struct hlist_node *node; - - spin_lock(&econet_lock); - sk_for_each(sk, node, &econet_sklist) { - struct econet_sock *opt = ec_sk(sk); - if ((opt->port == port || opt->port == 0) && - (opt->station == station || opt->station == 0) && - (opt->net == net || opt->net == 0)) { - sock_hold(sk); - goto found; - } - } - sk = NULL; -found: - spin_unlock(&econet_lock); - return sk; -} - -/* - * Queue a received packet for a socket. - */ - -static int ec_queue_packet(struct sock *sk, struct sk_buff *skb, - unsigned char stn, unsigned char net, - unsigned char cb, unsigned char port) -{ - struct ec_cb *eb = (struct ec_cb *)&skb->cb; - struct sockaddr_ec *sec = (struct sockaddr_ec *)&eb->sec; - - memset(sec, 0, sizeof(struct sockaddr_ec)); - sec->sec_family = AF_ECONET; - sec->type = ECTYPE_PACKET_RECEIVED; - sec->port = port; - sec->cb = cb; - sec->addr.net = net; - sec->addr.station = stn; - - return sock_queue_rcv_skb(sk, skb); -} -#endif - -#ifdef CONFIG_ECONET_AUNUDP -/* - * Send an AUN protocol response. - */ - -static void aun_send_response(__u32 addr, unsigned long seq, int code, int cb) -{ - struct sockaddr_in sin = { - .sin_family = AF_INET, - .sin_port = htons(AUN_PORT), - .sin_addr = {.s_addr = addr} - }; - struct aunhdr ah = {.code = code, .cb = cb, .handle = seq}; - struct kvec iov = {.iov_base = (void *)&ah, .iov_len = sizeof(ah)}; - struct msghdr udpmsg; - - udpmsg.msg_name = (void *)&sin; - udpmsg.msg_namelen = sizeof(sin); - udpmsg.msg_control = NULL; - udpmsg.msg_controllen = 0; - udpmsg.msg_flags = 0; - - kernel_sendmsg(udpsock, &udpmsg, &iov, 1, sizeof(ah)); -} - - -/* - * Handle incoming AUN packets. Work out if anybody wants them, - * and send positive or negative acknowledgements as appropriate. - */ - -static void aun_incoming(struct sk_buff *skb, struct aunhdr *ah, size_t len) -{ - struct iphdr *ip = ip_hdr(skb); - unsigned char stn = ntohl(ip->saddr) & 0xff; - struct dst_entry *dst = skb_dst(skb); - struct ec_device *edev = NULL; - struct sock *sk = NULL; - struct sk_buff *newskb; - - if (dst) - edev = dst->dev->ec_ptr; - - if (!edev) - goto bad; - - sk = ec_listening_socket(ah->port, stn, edev->net); - if (sk == NULL) - goto bad; /* Nobody wants it */ - - newskb = alloc_skb((len - sizeof(struct aunhdr) + 15) & ~15, - GFP_ATOMIC); - if (newskb == NULL) { - pr_debug("AUN: memory squeeze, dropping packet\n"); - /* Send nack and hope sender tries again */ - goto bad; - } - - memcpy(skb_put(newskb, len - sizeof(struct aunhdr)), (void *)(ah + 1), - len - sizeof(struct aunhdr)); - - if (ec_queue_packet(sk, newskb, stn, edev->net, ah->cb, ah->port)) { - /* Socket is bankrupt. */ - kfree_skb(newskb); - goto bad; - } - - aun_send_response(ip->saddr, ah->handle, 3, 0); - sock_put(sk); - return; - -bad: - aun_send_response(ip->saddr, ah->handle, 4, 0); - if (sk) - sock_put(sk); -} - -/* - * Handle incoming AUN transmit acknowledgements. If the sequence - * number matches something in our backlog then kill it and tell - * the user. If the remote took too long to reply then we may have - * dropped the packet already. - */ - -static void aun_tx_ack(unsigned long seq, int result) -{ - struct sk_buff *skb; - unsigned long flags; - struct ec_cb *eb; - - spin_lock_irqsave(&aun_queue_lock, flags); - skb_queue_walk(&aun_queue, skb) { - eb = (struct ec_cb *)&skb->cb; - if (eb->seq == seq) - goto foundit; - } - spin_unlock_irqrestore(&aun_queue_lock, flags); - pr_debug("AUN: unknown sequence %ld\n", seq); - return; - -foundit: - tx_result(skb->sk, eb->cookie, result); - skb_unlink(skb, &aun_queue); - spin_unlock_irqrestore(&aun_queue_lock, flags); - kfree_skb(skb); -} - -/* - * Deal with received AUN frames - sort out what type of thing it is - * and hand it to the right function. - */ - -static void aun_data_available(struct sock *sk, int slen) -{ - int err; - struct sk_buff *skb; - unsigned char *data; - struct aunhdr *ah; - size_t len; - - while ((skb = skb_recv_datagram(sk, 0, 1, &err)) == NULL) { - if (err == -EAGAIN) { - pr_err("AUN: no data available?!\n"); - return; - } - pr_debug("AUN: recvfrom() error %d\n", -err); - } - - data = skb_transport_header(skb) + sizeof(struct udphdr); - ah = (struct aunhdr *)data; - len = skb->len - sizeof(struct udphdr); - - switch (ah->code) { - case 2: - aun_incoming(skb, ah, len); - break; - case 3: - aun_tx_ack(ah->handle, ECTYPE_TRANSMIT_OK); - break; - case 4: - aun_tx_ack(ah->handle, ECTYPE_TRANSMIT_NOT_LISTENING); - break; - default: - pr_debug("AUN: unknown packet type: %d\n", data[0]); - } - - skb_free_datagram(sk, skb); -} - -/* - * Called by the timer to manage the AUN transmit queue. If a packet - * was sent to a dead or nonexistent host then we will never get an - * acknowledgement back. After a few seconds we need to spot this and - * drop the packet. - */ - -static void ab_cleanup(unsigned long h) -{ - struct sk_buff *skb, *n; - unsigned long flags; - - spin_lock_irqsave(&aun_queue_lock, flags); - skb_queue_walk_safe(&aun_queue, skb, n) { - struct ec_cb *eb = (struct ec_cb *)&skb->cb; - if ((jiffies - eb->start) > eb->timeout) { - tx_result(skb->sk, eb->cookie, - ECTYPE_TRANSMIT_NOT_PRESENT); - skb_unlink(skb, &aun_queue); - kfree_skb(skb); - } - } - spin_unlock_irqrestore(&aun_queue_lock, flags); - - mod_timer(&ab_cleanup_timer, jiffies + (HZ * 2)); -} - -static int __init aun_udp_initialise(void) -{ - int error; - struct sockaddr_in sin; - - skb_queue_head_init(&aun_queue); - setup_timer(&ab_cleanup_timer, ab_cleanup, 0); - ab_cleanup_timer.expires = jiffies + (HZ * 2); - add_timer(&ab_cleanup_timer); - - memset(&sin, 0, sizeof(sin)); - sin.sin_port = htons(AUN_PORT); - - /* We can count ourselves lucky Acorn machines are too dim to - speak IPv6. :-) */ - error = sock_create_kern(PF_INET, SOCK_DGRAM, 0, &udpsock); - if (error < 0) { - pr_err("AUN: socket error %d\n", -error); - return error; - } - - udpsock->sk->sk_reuse = SK_CAN_REUSE; - udpsock->sk->sk_allocation = GFP_ATOMIC; /* we're going to call it - from interrupts */ - - error = udpsock->ops->bind(udpsock, (struct sockaddr *)&sin, - sizeof(sin)); - if (error < 0) { - pr_err("AUN: bind error %d\n", -error); - goto release; - } - - udpsock->sk->sk_data_ready = aun_data_available; - - return 0; - -release: - sock_release(udpsock); - udpsock = NULL; - return error; -} -#endif - -#ifdef CONFIG_ECONET_NATIVE - -/* - * Receive an Econet frame from a device. - */ - -static int econet_rcv(struct sk_buff *skb, struct net_device *dev, - struct packet_type *pt, struct net_device *orig_dev) -{ - struct ec_framehdr *hdr; - struct sock *sk = NULL; - struct ec_device *edev = dev->ec_ptr; - - if (!net_eq(dev_net(dev), &init_net)) - goto drop; - - if (skb->pkt_type == PACKET_OTHERHOST) - goto drop; - - if (!edev) - goto drop; - - skb = skb_share_check(skb, GFP_ATOMIC); - if (skb == NULL) - return NET_RX_DROP; - - if (!pskb_may_pull(skb, sizeof(struct ec_framehdr))) - goto drop; - - hdr = (struct ec_framehdr *)skb->data; - - /* First check for encapsulated IP */ - if (hdr->port == EC_PORT_IP) { - skb->protocol = htons(ETH_P_IP); - skb_pull(skb, sizeof(struct ec_framehdr)); - netif_rx(skb); - return NET_RX_SUCCESS; - } - - sk = ec_listening_socket(hdr->port, hdr->src_stn, hdr->src_net); - if (!sk) - goto drop; - - if (ec_queue_packet(sk, skb, edev->net, hdr->src_stn, hdr->cb, - hdr->port)) - goto drop; - sock_put(sk); - return NET_RX_SUCCESS; - -drop: - if (sk) - sock_put(sk); - kfree_skb(skb); - return NET_RX_DROP; -} - -static struct packet_type econet_packet_type __read_mostly = { - .type = cpu_to_be16(ETH_P_ECONET), - .func = econet_rcv, -}; - -static void econet_hw_initialise(void) -{ - dev_add_pack(&econet_packet_type); -} - -#endif - -static int econet_notifier(struct notifier_block *this, unsigned long msg, - void *data) -{ - struct net_device *dev = data; - struct ec_device *edev; - - if (!net_eq(dev_net(dev), &init_net)) - return NOTIFY_DONE; - - switch (msg) { - case NETDEV_UNREGISTER: - /* A device has gone down - kill any data we hold for it. */ - edev = dev->ec_ptr; - if (edev) { - if (net2dev_map[0] == dev) - net2dev_map[0] = NULL; - net2dev_map[edev->net] = NULL; - kfree(edev); - dev->ec_ptr = NULL; - } - break; - } - - return NOTIFY_DONE; -} - -static struct notifier_block econet_netdev_notifier = { - .notifier_call = econet_notifier, -}; - -static void __exit econet_proto_exit(void) -{ -#ifdef CONFIG_ECONET_AUNUDP - del_timer(&ab_cleanup_timer); - if (udpsock) - sock_release(udpsock); -#endif - unregister_netdevice_notifier(&econet_netdev_notifier); -#ifdef CONFIG_ECONET_NATIVE - dev_remove_pack(&econet_packet_type); -#endif - sock_unregister(econet_family_ops.family); - proto_unregister(&econet_proto); -} - -static int __init econet_proto_init(void) -{ - int err = proto_register(&econet_proto, 0); - - if (err != 0) - goto out; - sock_register(&econet_family_ops); -#ifdef CONFIG_ECONET_AUNUDP - aun_udp_initialise(); -#endif -#ifdef CONFIG_ECONET_NATIVE - econet_hw_initialise(); -#endif - register_netdevice_notifier(&econet_netdev_notifier); -out: - return err; -} - -module_init(econet_proto_init); -module_exit(econet_proto_exit); - -MODULE_LICENSE("GPL"); -MODULE_ALIAS_NETPROTO(PF_ECONET); -- cgit v1.2.2 From cbc264cacd08e51fd4a64b5d5b1ba48f523990d1 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 18 May 2012 05:57:13 +0200 Subject: ip_frag: struct inet_frags match() method returns a bool - match() method returns a boolean - return (A && B && C && D) -> return A && B && C && D - fix indentation Signed-off-by: Eric Dumazet --- net/ipv4/ip_fragment.c | 10 +++++----- net/ipv6/reassembly.c | 9 +++++---- 2 files changed, 10 insertions(+), 9 deletions(-) (limited to 'net') diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index 9f9bd139335f..695b27f44161 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c @@ -148,17 +148,17 @@ static unsigned int ip4_hashfn(struct inet_frag_queue *q) return ipqhashfn(ipq->id, ipq->saddr, ipq->daddr, ipq->protocol); } -static int ip4_frag_match(struct inet_frag_queue *q, void *a) +static bool ip4_frag_match(struct inet_frag_queue *q, void *a) { struct ipq *qp; struct ip4_create_arg *arg = a; qp = container_of(q, struct ipq, q); return qp->id == arg->iph->id && - qp->saddr == arg->iph->saddr && - qp->daddr == arg->iph->daddr && - qp->protocol == arg->iph->protocol && - qp->user == arg->user; + qp->saddr == arg->iph->saddr && + qp->daddr == arg->iph->daddr && + qp->protocol == arg->iph->protocol && + qp->user == arg->user; } /* Memory Tracking Functions. */ diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c index f1b86fdc06ad..5d32dfa4d75a 100644 --- a/net/ipv6/reassembly.c +++ b/net/ipv6/reassembly.c @@ -134,15 +134,16 @@ static unsigned int ip6_hashfn(struct inet_frag_queue *q) return inet6_hash_frag(fq->id, &fq->saddr, &fq->daddr, ip6_frags.rnd); } -int ip6_frag_match(struct inet_frag_queue *q, void *a) +bool ip6_frag_match(struct inet_frag_queue *q, void *a) { struct frag_queue *fq; struct ip6_create_arg *arg = a; fq = container_of(q, struct frag_queue, q); - return (fq->id == arg->id && fq->user == arg->user && - ipv6_addr_equal(&fq->saddr, arg->src) && - ipv6_addr_equal(&fq->daddr, arg->dst)); + return fq->id == arg->id && + fq->user == arg->user && + ipv6_addr_equal(&fq->saddr, arg->src) && + ipv6_addr_equal(&fq->daddr, arg->dst); } EXPORT_SYMBOL(ip6_frag_match); -- cgit v1.2.2 From 92113bfde2f0982daa5a372d67b62f3d55bbc88a Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 18 May 2012 08:14:11 +0200 Subject: ipv6: bool conversions phase1 ipv6_opt_accepted() returns a bool, and can use const pointers ipv6_addr_equal(), ipv6_addr_any(), ipv6_addr_loopback(), ipv6_addr_orchid() return a bool. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/ipv6/af_inet6.c | 10 +++++----- net/ipv6/ip6_tunnel.c | 2 +- 2 files changed, 6 insertions(+), 6 deletions(-) (limited to 'net') diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index 138d4986c327..e22e6d88bac6 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c @@ -678,10 +678,10 @@ int inet6_sk_rebuild_header(struct sock *sk) } EXPORT_SYMBOL_GPL(inet6_sk_rebuild_header); -int ipv6_opt_accepted(struct sock *sk, struct sk_buff *skb) +bool ipv6_opt_accepted(const struct sock *sk, const struct sk_buff *skb) { - struct ipv6_pinfo *np = inet6_sk(sk); - struct inet6_skb_parm *opt = IP6CB(skb); + const struct ipv6_pinfo *np = inet6_sk(sk); + const struct inet6_skb_parm *opt = IP6CB(skb); if (np->rxopt.all) { if ((opt->hop && (np->rxopt.bits.hopopts || @@ -693,9 +693,9 @@ int ipv6_opt_accepted(struct sock *sk, struct sk_buff *skb) np->rxopt.bits.osrcrt)) || ((opt->dst1 || opt->dst0) && (np->rxopt.bits.dstopts || np->rxopt.bits.odstopts))) - return 1; + return true; } - return 0; + return false; } EXPORT_SYMBOL_GPL(ipv6_opt_accepted); diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index e65c56009bb0..c9015fad8d65 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -818,7 +818,7 @@ static void init_tel_txopt(struct ipv6_tel_txoption *opt, __u8 encap_limit) * 0 else **/ -static inline int +static inline bool ip6_tnl_addr_conflict(const struct ip6_tnl *t, const struct ipv6hdr *hdr) { return ipv6_addr_equal(&t->parms.raddr, &hdr->saddr); -- cgit v1.2.2 From 6f532612cc2410a5079ea0f83e7a5011adfbf70d Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 18 May 2012 05:12:12 +0000 Subject: net: introduce netdev_alloc_frag() Fix two issues introduced in commit a1c7fff7e18f5 ( net: netdev_alloc_skb() use build_skb() ) - Must be IRQ safe (non NAPI drivers can use it) - Must not leak the frag if build_skb() fails to allocate sk_buff This patch introduces netdev_alloc_frag() for drivers willing to use build_skb() instead of __netdev_alloc_skb() variants. Factorize code so that : __dev_alloc_skb() is a wrapper around __netdev_alloc_skb(), and dev_alloc_skb() a wrapper around netdev_alloc_skb() Use __GFP_COLD flag. Almost all network drivers now benefit from skb->head_frag infrastructure. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/core/skbuff.c | 82 +++++++++++++++++++++++++++---------------------------- 1 file changed, 41 insertions(+), 41 deletions(-) (limited to 'net') diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 7645df1bada0..7ceb673d622f 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -299,6 +299,40 @@ struct netdev_alloc_cache { }; static DEFINE_PER_CPU(struct netdev_alloc_cache, netdev_alloc_cache); +/** + * netdev_alloc_frag - allocate a page fragment + * @fragsz: fragment size + * + * Allocates a frag from a page for receive buffer. + * Uses GFP_ATOMIC allocations. + */ +void *netdev_alloc_frag(unsigned int fragsz) +{ + struct netdev_alloc_cache *nc; + void *data = NULL; + unsigned long flags; + + local_irq_save(flags); + nc = &__get_cpu_var(netdev_alloc_cache); + if (unlikely(!nc->page)) { +refill: + nc->page = alloc_page(GFP_ATOMIC | __GFP_COLD); + nc->offset = 0; + } + if (likely(nc->page)) { + if (nc->offset + fragsz > PAGE_SIZE) { + put_page(nc->page); + goto refill; + } + data = page_address(nc->page) + nc->offset; + nc->offset += fragsz; + get_page(nc->page); + } + local_irq_restore(flags); + return data; +} +EXPORT_SYMBOL(netdev_alloc_frag); + /** * __netdev_alloc_skb - allocate an skbuff for rx on a specific device * @dev: network device to receive on @@ -313,32 +347,20 @@ static DEFINE_PER_CPU(struct netdev_alloc_cache, netdev_alloc_cache); * %NULL is returned if there is no free memory. */ struct sk_buff *__netdev_alloc_skb(struct net_device *dev, - unsigned int length, gfp_t gfp_mask) + unsigned int length, gfp_t gfp_mask) { - struct sk_buff *skb; + struct sk_buff *skb = NULL; unsigned int fragsz = SKB_DATA_ALIGN(length + NET_SKB_PAD) + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); if (fragsz <= PAGE_SIZE && !(gfp_mask & __GFP_WAIT)) { - struct netdev_alloc_cache *nc; - void *data = NULL; + void *data = netdev_alloc_frag(fragsz); - nc = &get_cpu_var(netdev_alloc_cache); - if (!nc->page) { -refill: nc->page = alloc_page(gfp_mask); - nc->offset = 0; - } - if (likely(nc->page)) { - if (nc->offset + fragsz > PAGE_SIZE) { - put_page(nc->page); - goto refill; - } - data = page_address(nc->page) + nc->offset; - nc->offset += fragsz; - get_page(nc->page); + if (likely(data)) { + skb = build_skb(data, fragsz); + if (unlikely(!skb)) + put_page(virt_to_head_page(data)); } - put_cpu_var(netdev_alloc_cache); - skb = data ? build_skb(data, fragsz) : NULL; } else { skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, 0, NUMA_NO_NODE); } @@ -360,28 +382,6 @@ void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off, } EXPORT_SYMBOL(skb_add_rx_frag); -/** - * dev_alloc_skb - allocate an skbuff for receiving - * @length: length to allocate - * - * Allocate a new &sk_buff and assign it a usage count of one. The - * buffer has unspecified headroom built in. Users should allocate - * the headroom they think they need without accounting for the - * built in space. The built in space is used for optimisations. - * - * %NULL is returned if there is no free memory. Although this function - * allocates memory it can be called from an interrupt. - */ -struct sk_buff *dev_alloc_skb(unsigned int length) -{ - /* - * There is more code here than it seems: - * __dev_alloc_skb is an inline - */ - return __dev_alloc_skb(length, GFP_ATOMIC); -} -EXPORT_SYMBOL(dev_alloc_skb); - static void skb_drop_list(struct sk_buff **listp) { struct sk_buff *list = *listp; -- cgit v1.2.2 From d7f7c0ac11bd6792b755ec76ac3befdc0767262a Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 18 May 2012 05:37:56 +0000 Subject: ipv6: remove csummode in ip6_append_data() csummode variable is always CHECKSUM_NONE in ip6_append_data() Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/ipv6/ip6_output.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'net') diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index be2264e7dd2d..a254e4b5d968 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -1198,7 +1198,6 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to, int copy; int err; int offset = 0; - int csummode = CHECKSUM_NONE; __u8 tx_flags = 0; if (flags&MSG_PROBE) @@ -1411,7 +1410,7 @@ alloc_new_skb: /* * Fill in the control structures */ - skb->ip_summed = csummode; + skb->ip_summed = CHECKSUM_NONE; skb->csum = 0; /* reserve for fragmentation and ipsec header */ skb_reserve(skb, hh_len + sizeof(struct frag_hdr) + @@ -1454,7 +1453,6 @@ alloc_new_skb: transhdrlen = 0; exthdrlen = 0; dst_exthdrlen = 0; - csummode = CHECKSUM_NONE; /* * Put the packet on the pending queue -- cgit v1.2.2 From 72e843bb09d4533208aa5573861a983c46914019 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 18 May 2012 11:02:15 +0000 Subject: ipv6: ip6_fragment() should check CHECKSUM_PARTIAL Quoting Tore Anderson from : If the allfrag feature has been set on a host route (due to an ICMPv6 Packet Too Big received indicating a MTU of less than 1280), TCP SYN/ACK packets to that destination appears to get an incorrect TCP checksum. This in turn means they are thrown away as invalid. In the case of an IPv4 client behind a link with a MTU of less than 1260, accessing an IPv6 server through a stateless translator, this means that the client can only download a single large file from the server, because once it is in the server's routing cache with the allfrag feature set, new TCP connections can no longer be established. It appears ip6_fragment() doesn't handle CHECKSUM_PARTIAL properly. As network drivers are not prepared to fetch correct transport header, a safe fix is to call skb_checksum_help() before fragmenting packet. Reported-by: Tore Anderson Signed-off-by: Eric Dumazet Tested-by: Tore Anderson Signed-off-by: David S. Miller --- net/ipv6/ip6_output.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'net') diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index a254e4b5d968..3dc633f39372 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -788,6 +788,10 @@ slow_path_clean: } slow_path: + if ((skb->ip_summed == CHECKSUM_PARTIAL) && + skb_checksum_help(skb)) + goto fail; + left = skb->len - hlen; /* Space per frame */ ptr = hlen; /* Where to start from */ -- cgit v1.2.2 From 32e9072b92a1c556a303d8d0e0d64feb667e601d Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sat, 19 May 2012 00:51:04 -0400 Subject: ipx: Remove spurious NULL checking in ipx_ioctl(). We already unconditionally dereference 'sk' via lock_sock(sk) earlier in this function, and our caller (sock_do_ioctl()) makes takes similar liberties. Reported-by: Dan Carpenter Signed-off-by: David S. Miller --- net/ipx/af_ipx.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'net') diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c index 824d4a3338ae..dfd6faaf0ea7 100644 --- a/net/ipx/af_ipx.c +++ b/net/ipx/af_ipx.c @@ -1901,9 +1901,7 @@ static int ipx_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) (const unsigned short __user *)argp); break; case SIOCGSTAMP: - rc = -EINVAL; - if (sk) - rc = sock_get_timestamp(sk, argp); + rc = sock_get_timestamp(sk, argp); break; case SIOCGIFDSTADDR: case SIOCSIFDSTADDR: -- cgit v1.2.2 From a50feda546ac03415707a9bbcac8d6b20714db21 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 18 May 2012 18:57:34 +0000 Subject: ipv6: bool/const conversions phase2 Mostly bool conversions, some inline removals and const additions. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/ipv6/addrlabel.c | 24 +++++++++--------- net/ipv6/ah6.c | 6 ++--- net/ipv6/anycast.c | 12 ++++----- net/ipv6/datagram.c | 4 +-- net/ipv6/exthdrs.c | 54 +++++++++++++++++++-------------------- net/ipv6/exthdrs_core.c | 2 +- net/ipv6/icmp.c | 14 +++++----- net/ipv6/ip6_flowlabel.c | 24 +++++++++--------- net/ipv6/ip6_input.c | 9 ++++--- net/ipv6/mcast.c | 66 ++++++++++++++++++++++++------------------------ net/ipv6/ndisc.c | 4 +-- net/ipv6/raw.c | 10 ++++---- net/ipv6/route.c | 8 +++--- 13 files changed, 119 insertions(+), 118 deletions(-) (limited to 'net') diff --git a/net/ipv6/addrlabel.c b/net/ipv6/addrlabel.c index 95aea16b8b6f..eb6a63632d3c 100644 --- a/net/ipv6/addrlabel.c +++ b/net/ipv6/addrlabel.c @@ -129,7 +129,7 @@ static void ip6addrlbl_free_rcu(struct rcu_head *h) ip6addrlbl_free(container_of(h, struct ip6addrlbl_entry, rcu)); } -static inline int ip6addrlbl_hold(struct ip6addrlbl_entry *p) +static bool ip6addrlbl_hold(struct ip6addrlbl_entry *p) { return atomic_inc_not_zero(&p->refcnt); } @@ -141,20 +141,20 @@ static inline void ip6addrlbl_put(struct ip6addrlbl_entry *p) } /* Find label */ -static int __ip6addrlbl_match(struct net *net, - struct ip6addrlbl_entry *p, - const struct in6_addr *addr, - int addrtype, int ifindex) +static bool __ip6addrlbl_match(struct net *net, + const struct ip6addrlbl_entry *p, + const struct in6_addr *addr, + int addrtype, int ifindex) { if (!net_eq(ip6addrlbl_net(p), net)) - return 0; + return false; if (p->ifindex && p->ifindex != ifindex) - return 0; + return false; if (p->addrtype && p->addrtype != addrtype) - return 0; + return false; if (!ipv6_prefix_equal(addr, &p->prefix, p->prefixlen)) - return 0; - return 1; + return false; + return true; } static struct ip6addrlbl_entry *__ipv6_addr_label(struct net *net, @@ -456,8 +456,8 @@ static int ip6addrlbl_newdel(struct sk_buff *skb, struct nlmsghdr *nlh, return err; } -static inline void ip6addrlbl_putmsg(struct nlmsghdr *nlh, - int prefixlen, int ifindex, u32 lseq) +static void ip6addrlbl_putmsg(struct nlmsghdr *nlh, + int prefixlen, int ifindex, u32 lseq) { struct ifaddrlblmsg *ifal = nlmsg_data(nlh); ifal->ifal_family = AF_INET6; diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c index 5d32e7a93b26..f1a4a2c28ed3 100644 --- a/net/ipv6/ah6.c +++ b/net/ipv6/ah6.c @@ -113,7 +113,7 @@ static inline struct scatterlist *ah_req_sg(struct crypto_ahash *ahash, __alignof__(struct scatterlist)); } -static int zero_out_mutable_opts(struct ipv6_opt_hdr *opthdr) +static bool zero_out_mutable_opts(struct ipv6_opt_hdr *opthdr) { u8 *opt = (u8 *)opthdr; int len = ipv6_optlen(opthdr); @@ -145,10 +145,10 @@ static int zero_out_mutable_opts(struct ipv6_opt_hdr *opthdr) len -= optlen; } if (len == 0) - return 1; + return true; bad: - return 0; + return false; } #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) diff --git a/net/ipv6/anycast.c b/net/ipv6/anycast.c index db00d27ffb16..cdf02be5f191 100644 --- a/net/ipv6/anycast.c +++ b/net/ipv6/anycast.c @@ -342,7 +342,7 @@ static int ipv6_dev_ac_dec(struct net_device *dev, const struct in6_addr *addr) * check if the interface has this anycast address * called with rcu_read_lock() */ -static int ipv6_chk_acast_dev(struct net_device *dev, const struct in6_addr *addr) +static bool ipv6_chk_acast_dev(struct net_device *dev, const struct in6_addr *addr) { struct inet6_dev *idev; struct ifacaddr6 *aca; @@ -356,16 +356,16 @@ static int ipv6_chk_acast_dev(struct net_device *dev, const struct in6_addr *add read_unlock_bh(&idev->lock); return aca != NULL; } - return 0; + return false; } /* * check if given interface (or any, if dev==0) has this anycast address */ -int ipv6_chk_acast_addr(struct net *net, struct net_device *dev, - const struct in6_addr *addr) +bool ipv6_chk_acast_addr(struct net *net, struct net_device *dev, + const struct in6_addr *addr) { - int found = 0; + bool found = false; rcu_read_lock(); if (dev) @@ -373,7 +373,7 @@ int ipv6_chk_acast_addr(struct net *net, struct net_device *dev, else for_each_netdev_rcu(net, dev) if (ipv6_chk_acast_dev(dev, addr)) { - found = 1; + found = true; break; } rcu_read_unlock(); diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index b8b61ac88bc2..be2b67d631e5 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c @@ -34,9 +34,9 @@ #include #include -static inline int ipv6_mapped_addr_any(const struct in6_addr *a) +static bool ipv6_mapped_addr_any(const struct in6_addr *a) { - return (ipv6_addr_v4mapped(a) && (a->s6_addr32[3] == 0)); + return ipv6_addr_v4mapped(a) && (a->s6_addr32[3] == 0); } int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c index a3cded6a1997..50ec95f9aeeb 100644 --- a/net/ipv6/exthdrs.c +++ b/net/ipv6/exthdrs.c @@ -96,14 +96,14 @@ EXPORT_SYMBOL_GPL(ipv6_find_tlv); /* * Parsing tlv encoded headers. * - * Parsing function "func" returns 1, if parsing succeed - * and 0, if it failed. + * Parsing function "func" returns true, if parsing succeed + * and false, if it failed. * It MUST NOT touch skb->h. */ struct tlvtype_proc { int type; - int (*func)(struct sk_buff *skb, int offset); + bool (*func)(struct sk_buff *skb, int offset); }; /********************* @@ -112,11 +112,11 @@ struct tlvtype_proc { /* An unknown option is detected, decide what to do */ -static int ip6_tlvopt_unknown(struct sk_buff *skb, int optoff) +static bool ip6_tlvopt_unknown(struct sk_buff *skb, int optoff) { switch ((skb_network_header(skb)[optoff] & 0xC0) >> 6) { case 0: /* ignore */ - return 1; + return true; case 1: /* drop packet */ break; @@ -129,18 +129,18 @@ static int ip6_tlvopt_unknown(struct sk_buff *skb, int optoff) break; case 2: /* send ICMP PARM PROB regardless and drop packet */ icmpv6_param_prob(skb, ICMPV6_UNK_OPTION, optoff); - return 0; + return false; } kfree_skb(skb); - return 0; + return false; } /* Parse tlv encoded option header (hop-by-hop or destination) */ -static int ip6_parse_tlv(struct tlvtype_proc *procs, struct sk_buff *skb) +static bool ip6_parse_tlv(const struct tlvtype_proc *procs, struct sk_buff *skb) { - struct tlvtype_proc *curr; + const struct tlvtype_proc *curr; const unsigned char *nh = skb_network_header(skb); int off = skb_network_header_len(skb); int len = (skb_transport_header(skb)[1] + 1) << 3; @@ -186,14 +186,14 @@ static int ip6_parse_tlv(struct tlvtype_proc *procs, struct sk_buff *skb) /* type specific length/alignment checks will be performed in the func(). */ - if (curr->func(skb, off) == 0) - return 0; + if (curr->func(skb, off) == false) + return false; break; } } if (curr->type < 0) { if (ip6_tlvopt_unknown(skb, off) == 0) - return 0; + return false; } break; } @@ -201,10 +201,10 @@ static int ip6_parse_tlv(struct tlvtype_proc *procs, struct sk_buff *skb) len -= optlen; } if (len == 0) - return 1; + return true; bad: kfree_skb(skb); - return 0; + return false; } /***************************** @@ -212,7 +212,7 @@ bad: *****************************/ #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) -static int ipv6_dest_hao(struct sk_buff *skb, int optoff) +static bool ipv6_dest_hao(struct sk_buff *skb, int optoff) { struct ipv6_destopt_hao *hao; struct inet6_skb_parm *opt = IP6CB(skb); @@ -266,15 +266,15 @@ static int ipv6_dest_hao(struct sk_buff *skb, int optoff) if (skb->tstamp.tv64 == 0) __net_timestamp(skb); - return 1; + return true; discard: kfree_skb(skb); - return 0; + return false; } #endif -static struct tlvtype_proc tlvprocdestopt_lst[] = { +static const struct tlvtype_proc tlvprocdestopt_lst[] = { #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) { .type = IPV6_TLV_HAO, @@ -579,23 +579,23 @@ static inline struct net *ipv6_skb_net(struct sk_buff *skb) /* Router Alert as of RFC 2711 */ -static int ipv6_hop_ra(struct sk_buff *skb, int optoff) +static bool ipv6_hop_ra(struct sk_buff *skb, int optoff) { const unsigned char *nh = skb_network_header(skb); if (nh[optoff + 1] == 2) { IP6CB(skb)->ra = optoff; - return 1; + return true; } LIMIT_NETDEBUG(KERN_DEBUG "ipv6_hop_ra: wrong RA length %d\n", nh[optoff + 1]); kfree_skb(skb); - return 0; + return false; } /* Jumbo payload */ -static int ipv6_hop_jumbo(struct sk_buff *skb, int optoff) +static bool ipv6_hop_jumbo(struct sk_buff *skb, int optoff) { const unsigned char *nh = skb_network_header(skb); struct net *net = ipv6_skb_net(skb); @@ -614,13 +614,13 @@ static int ipv6_hop_jumbo(struct sk_buff *skb, int optoff) IP6_INC_STATS_BH(net, ipv6_skb_idev(skb), IPSTATS_MIB_INHDRERRORS); icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, optoff+2); - return 0; + return false; } if (ipv6_hdr(skb)->payload_len) { IP6_INC_STATS_BH(net, ipv6_skb_idev(skb), IPSTATS_MIB_INHDRERRORS); icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, optoff); - return 0; + return false; } if (pkt_len > skb->len - sizeof(struct ipv6hdr)) { @@ -632,14 +632,14 @@ static int ipv6_hop_jumbo(struct sk_buff *skb, int optoff) if (pskb_trim_rcsum(skb, pkt_len + sizeof(struct ipv6hdr))) goto drop; - return 1; + return true; drop: kfree_skb(skb); - return 0; + return false; } -static struct tlvtype_proc tlvprochopopt_lst[] = { +static const struct tlvtype_proc tlvprochopopt_lst[] = { { .type = IPV6_TLV_ROUTERALERT, .func = ipv6_hop_ra, diff --git a/net/ipv6/exthdrs_core.c b/net/ipv6/exthdrs_core.c index 7b1a884634d5..f73d59a14131 100644 --- a/net/ipv6/exthdrs_core.c +++ b/net/ipv6/exthdrs_core.c @@ -9,7 +9,7 @@ * find out if nexthdr is a well-known extension header or a protocol */ -int ipv6_ext_hdr(u8 nexthdr) +bool ipv6_ext_hdr(u8 nexthdr) { /* * find out if nexthdr is an extension header or a protocol diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index 23c56ce9e86b..091a2971c7b7 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c @@ -131,7 +131,7 @@ void icmpv6_param_prob(struct sk_buff *skb, u8 code, int pos) * --ANK (980726) */ -static int is_ineligible(struct sk_buff *skb) +static bool is_ineligible(const struct sk_buff *skb) { int ptr = (u8 *)(ipv6_hdr(skb) + 1) - skb->data; int len = skb->len - ptr; @@ -139,11 +139,11 @@ static int is_ineligible(struct sk_buff *skb) __be16 frag_off; if (len < 0) - return 1; + return true; ptr = ipv6_skip_exthdr(skb, ptr, &nexthdr, &frag_off); if (ptr < 0) - return 0; + return false; if (nexthdr == IPPROTO_ICMPV6) { u8 _type, *tp; tp = skb_header_pointer(skb, @@ -151,9 +151,9 @@ static int is_ineligible(struct sk_buff *skb) sizeof(_type), &_type); if (tp == NULL || !(*tp & ICMPV6_INFOMSG_MASK)) - return 1; + return true; } - return 0; + return false; } /* @@ -208,14 +208,14 @@ static inline bool icmpv6_xrlim_allow(struct sock *sk, u8 type, * highest-order two bits set to 10 */ -static __inline__ int opt_unrec(struct sk_buff *skb, __u32 offset) +static bool opt_unrec(struct sk_buff *skb, __u32 offset) { u8 _optval, *op; offset += skb_network_offset(skb); op = skb_header_pointer(skb, offset, sizeof(_optval), &_optval); if (op == NULL) - return 1; + return true; return (*op & 0xC0) == 0x80; } diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c index cb43df690210..9772fbd8a3f5 100644 --- a/net/ipv6/ip6_flowlabel.c +++ b/net/ipv6/ip6_flowlabel.c @@ -433,32 +433,32 @@ static int mem_check(struct sock *sk) return 0; } -static int ipv6_hdr_cmp(struct ipv6_opt_hdr *h1, struct ipv6_opt_hdr *h2) +static bool ipv6_hdr_cmp(struct ipv6_opt_hdr *h1, struct ipv6_opt_hdr *h2) { if (h1 == h2) - return 0; + return false; if (h1 == NULL || h2 == NULL) - return 1; + return true; if (h1->hdrlen != h2->hdrlen) - return 1; + return true; return memcmp(h1+1, h2+1, ((h1->hdrlen+1)<<3) - sizeof(*h1)); } -static int ipv6_opt_cmp(struct ipv6_txoptions *o1, struct ipv6_txoptions *o2) +static bool ipv6_opt_cmp(struct ipv6_txoptions *o1, struct ipv6_txoptions *o2) { if (o1 == o2) - return 0; + return false; if (o1 == NULL || o2 == NULL) - return 1; + return true; if (o1->opt_nflen != o2->opt_nflen) - return 1; + return true; if (ipv6_hdr_cmp(o1->hopopt, o2->hopopt)) - return 1; + return true; if (ipv6_hdr_cmp(o1->dst0opt, o2->dst0opt)) - return 1; + return true; if (ipv6_hdr_cmp((struct ipv6_opt_hdr *)o1->srcrt, (struct ipv6_opt_hdr *)o2->srcrt)) - return 1; - return 0; + return true; + return false; } static inline void fl_link(struct ipv6_pinfo *np, struct ipv6_fl_socklist *sfl, diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c index 1ca5d45a12e8..21a15dfe4a9e 100644 --- a/net/ipv6/ip6_input.c +++ b/net/ipv6/ip6_input.c @@ -170,7 +170,8 @@ static int ip6_input_finish(struct sk_buff *skb) { const struct inet6_protocol *ipprot; unsigned int nhoff; - int nexthdr, raw; + int nexthdr; + bool raw; u8 hash; struct inet6_dev *idev; struct net *net = dev_net(skb_dst(skb)->dev); @@ -251,7 +252,7 @@ int ip6_input(struct sk_buff *skb) int ip6_mc_input(struct sk_buff *skb) { const struct ipv6hdr *hdr; - int deliver; + bool deliver; IP6_UPD_PO_STATS_BH(dev_net(skb_dst(skb)->dev), ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_INMCAST, @@ -287,7 +288,7 @@ int ip6_mc_input(struct sk_buff *skb) * is for MLD (0x0000). */ if ((ptr[2] | ptr[3]) == 0) { - deliver = 0; + deliver = false; if (!ipv6_ext_hdr(nexthdr)) { /* BUG */ @@ -312,7 +313,7 @@ int ip6_mc_input(struct sk_buff *skb) case ICMPV6_MGM_REPORT: case ICMPV6_MGM_REDUCTION: case ICMPV6_MLD2_REPORT: - deliver = 1; + deliver = true; break; } goto out; diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index 2a3a22cf7604..6d0f5dc8e3a6 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c @@ -606,13 +606,13 @@ done: return err; } -int inet6_mc_check(struct sock *sk, const struct in6_addr *mc_addr, - const struct in6_addr *src_addr) +bool inet6_mc_check(struct sock *sk, const struct in6_addr *mc_addr, + const struct in6_addr *src_addr) { struct ipv6_pinfo *np = inet6_sk(sk); struct ipv6_mc_socklist *mc; struct ip6_sf_socklist *psl; - int rv = 1; + bool rv = true; rcu_read_lock(); for_each_pmc_rcu(np, mc) { @@ -621,7 +621,7 @@ int inet6_mc_check(struct sock *sk, const struct in6_addr *mc_addr, } if (!mc) { rcu_read_unlock(); - return 1; + return true; } read_lock(&mc->sflock); psl = mc->sflist; @@ -635,9 +635,9 @@ int inet6_mc_check(struct sock *sk, const struct in6_addr *mc_addr, break; } if (mc->sfmode == MCAST_INCLUDE && i >= psl->sl_count) - rv = 0; + rv = false; if (mc->sfmode == MCAST_EXCLUDE && i < psl->sl_count) - rv = 0; + rv = false; } read_unlock(&mc->sflock); rcu_read_unlock(); @@ -931,15 +931,15 @@ int ipv6_dev_mc_dec(struct net_device *dev, const struct in6_addr *addr) /* * identify MLD packets for MLD filter exceptions */ -int ipv6_is_mld(struct sk_buff *skb, int nexthdr) +bool ipv6_is_mld(struct sk_buff *skb, int nexthdr) { struct icmp6hdr *pic; if (nexthdr != IPPROTO_ICMPV6) - return 0; + return false; if (!pskb_may_pull(skb, sizeof(struct icmp6hdr))) - return 0; + return false; pic = icmp6_hdr(skb); @@ -948,22 +948,22 @@ int ipv6_is_mld(struct sk_buff *skb, int nexthdr) case ICMPV6_MGM_REPORT: case ICMPV6_MGM_REDUCTION: case ICMPV6_MLD2_REPORT: - return 1; + return true; default: break; } - return 0; + return false; } /* * check if the interface/address pair is valid */ -int ipv6_chk_mcast_addr(struct net_device *dev, const struct in6_addr *group, - const struct in6_addr *src_addr) +bool ipv6_chk_mcast_addr(struct net_device *dev, const struct in6_addr *group, + const struct in6_addr *src_addr) { struct inet6_dev *idev; struct ifmcaddr6 *mc; - int rv = 0; + bool rv = false; rcu_read_lock(); idev = __in6_dev_get(dev); @@ -990,7 +990,7 @@ int ipv6_chk_mcast_addr(struct net_device *dev, const struct in6_addr *group, rv = mc->mca_sfcount[MCAST_EXCLUDE] !=0; spin_unlock_bh(&mc->mca_lock); } else - rv = 1; /* don't filter unspecified source */ + rv = true; /* don't filter unspecified source */ } read_unlock_bh(&idev->lock); } @@ -1046,8 +1046,8 @@ static void igmp6_group_queried(struct ifmcaddr6 *ma, unsigned long resptime) } /* mark EXCLUDE-mode sources */ -static int mld_xmarksources(struct ifmcaddr6 *pmc, int nsrcs, - const struct in6_addr *srcs) +static bool mld_xmarksources(struct ifmcaddr6 *pmc, int nsrcs, + const struct in6_addr *srcs) { struct ip6_sf_list *psf; int i, scount; @@ -1070,12 +1070,12 @@ static int mld_xmarksources(struct ifmcaddr6 *pmc, int nsrcs, } pmc->mca_flags &= ~MAF_GSQUERY; if (scount == nsrcs) /* all sources excluded */ - return 0; - return 1; + return false; + return true; } -static int mld_marksources(struct ifmcaddr6 *pmc, int nsrcs, - const struct in6_addr *srcs) +static bool mld_marksources(struct ifmcaddr6 *pmc, int nsrcs, + const struct in6_addr *srcs) { struct ip6_sf_list *psf; int i, scount; @@ -1099,10 +1099,10 @@ static int mld_marksources(struct ifmcaddr6 *pmc, int nsrcs, } if (!scount) { pmc->mca_flags &= ~MAF_GSQUERY; - return 0; + return false; } pmc->mca_flags |= MAF_GSQUERY; - return 1; + return true; } /* called with rcu_read_lock() */ @@ -1276,17 +1276,17 @@ int igmp6_event_report(struct sk_buff *skb) return 0; } -static int is_in(struct ifmcaddr6 *pmc, struct ip6_sf_list *psf, int type, - int gdeleted, int sdeleted) +static bool is_in(struct ifmcaddr6 *pmc, struct ip6_sf_list *psf, int type, + int gdeleted, int sdeleted) { switch (type) { case MLD2_MODE_IS_INCLUDE: case MLD2_MODE_IS_EXCLUDE: if (gdeleted || sdeleted) - return 0; + return false; if (!((pmc->mca_flags & MAF_GSQUERY) && !psf->sf_gsresp)) { if (pmc->mca_sfmode == MCAST_INCLUDE) - return 1; + return true; /* don't include if this source is excluded * in all filters */ @@ -1295,29 +1295,29 @@ static int is_in(struct ifmcaddr6 *pmc, struct ip6_sf_list *psf, int type, return pmc->mca_sfcount[MCAST_EXCLUDE] == psf->sf_count[MCAST_EXCLUDE]; } - return 0; + return false; case MLD2_CHANGE_TO_INCLUDE: if (gdeleted || sdeleted) - return 0; + return false; return psf->sf_count[MCAST_INCLUDE] != 0; case MLD2_CHANGE_TO_EXCLUDE: if (gdeleted || sdeleted) - return 0; + return false; if (pmc->mca_sfcount[MCAST_EXCLUDE] == 0 || psf->sf_count[MCAST_INCLUDE]) - return 0; + return false; return pmc->mca_sfcount[MCAST_EXCLUDE] == psf->sf_count[MCAST_EXCLUDE]; case MLD2_ALLOW_NEW_SOURCES: if (gdeleted || !psf->sf_crcount) - return 0; + return false; return (pmc->mca_sfmode == MCAST_INCLUDE) ^ sdeleted; case MLD2_BLOCK_OLD_SOURCES: if (pmc->mca_sfmode == MCAST_INCLUDE) return gdeleted || (psf->sf_crcount && sdeleted); return psf->sf_crcount && !gdeleted && !sdeleted; } - return 0; + return false; } static int diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index c7a27ac906df..54f62d3b8dd6 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c @@ -348,7 +348,7 @@ static int ndisc_constructor(struct neighbour *neigh) struct net_device *dev = neigh->dev; struct inet6_dev *in6_dev; struct neigh_parms *parms; - int is_multicast = ipv6_addr_is_multicast(addr); + bool is_multicast = ipv6_addr_is_multicast(addr); in6_dev = in6_dev_get(dev); if (in6_dev == NULL) { @@ -725,7 +725,7 @@ static void ndisc_recv_ns(struct sk_buff *skb) struct inet6_dev *idev = NULL; struct neighbour *neigh; int dad = ipv6_addr_any(saddr); - int inc; + bool inc; int is_router = -1; if (ipv6_addr_is_multicast(&msg->target)) { diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index 5bddea778840..93d69836fded 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c @@ -72,7 +72,7 @@ static struct sock *__raw_v6_lookup(struct net *net, struct sock *sk, const struct in6_addr *rmt_addr, int dif) { struct hlist_node *node; - int is_multicast = ipv6_addr_is_multicast(loc_addr); + bool is_multicast = ipv6_addr_is_multicast(loc_addr); sk_for_each_from(sk, node) if (inet_sk(sk)->inet_num == num) { @@ -153,12 +153,12 @@ EXPORT_SYMBOL(rawv6_mh_filter_unregister); * * Caller owns SKB so we must make clones. */ -static int ipv6_raw_deliver(struct sk_buff *skb, int nexthdr) +static bool ipv6_raw_deliver(struct sk_buff *skb, int nexthdr) { const struct in6_addr *saddr; const struct in6_addr *daddr; struct sock *sk; - int delivered = 0; + bool delivered = false; __u8 hash; struct net *net; @@ -179,7 +179,7 @@ static int ipv6_raw_deliver(struct sk_buff *skb, int nexthdr) while (sk) { int filtered; - delivered = 1; + delivered = true; switch (nexthdr) { case IPPROTO_ICMPV6: filtered = icmpv6_filter(sk, skb); @@ -225,7 +225,7 @@ out: return delivered; } -int raw6_local_deliver(struct sk_buff *skb, int nexthdr) +bool raw6_local_deliver(struct sk_buff *skb, int nexthdr) { struct sock *raw_sk; diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 90119a32b89d..999a982ad3fd 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -333,22 +333,22 @@ static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev, } } -static __inline__ int rt6_check_expired(const struct rt6_info *rt) +static bool rt6_check_expired(const struct rt6_info *rt) { struct rt6_info *ort = NULL; if (rt->rt6i_flags & RTF_EXPIRES) { if (time_after(jiffies, rt->dst.expires)) - return 1; + return true; } else if (rt->dst.from) { ort = (struct rt6_info *) rt->dst.from; return (ort->rt6i_flags & RTF_EXPIRES) && time_after(jiffies, ort->dst.expires); } - return 0; + return false; } -static inline int rt6_need_strict(const struct in6_addr *daddr) +static bool rt6_need_strict(const struct in6_addr *daddr) { return ipv6_addr_type(daddr) & (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK); -- cgit v1.2.2 From 4adb9c4ac88d874105ac31161c9c55004a972f87 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 18 May 2012 20:49:06 +0000 Subject: net: napi_frags_skb() is static No need to export napi_frags_skb() Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/core/dev.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'net') diff --git a/net/core/dev.c b/net/core/dev.c index 33684b6e95e2..cd0981977f5c 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -3602,7 +3602,7 @@ gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb, } EXPORT_SYMBOL(napi_frags_finish); -struct sk_buff *napi_frags_skb(struct napi_struct *napi) +static struct sk_buff *napi_frags_skb(struct napi_struct *napi) { struct sk_buff *skb = napi->skb; struct ethhdr *eth; @@ -3637,7 +3637,6 @@ struct sk_buff *napi_frags_skb(struct napi_struct *napi) out: return skb; } -EXPORT_SYMBOL(napi_frags_skb); gro_result_t napi_gro_frags(struct napi_struct *napi) { -- cgit v1.2.2 From a34a101e1e6365638b02ea83a38d7a4cb228dc04 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 18 May 2012 21:51:44 +0000 Subject: ipv6: disable GSO on sockets hitting dst_allfrag If the allfrag feature has been set on a host route (due to an ICMPv6 Packet Too Big received indicating a MTU of less than 1280), we hit a very slow behavior in TCP stack, because all big packets are dropped and only a retransmit timer is able to push one MSS frame every 200 ms. One way to handle this is to disable GSO on the socket the first time a super packet is dropped. Adding a specific dst_allfrag() in the fast path is probably overkill since the dst_allfrag() case almost never happen. Result on netperf TCP_STREAM, one flow : Before : 60 kbit/sec After : 1.6 Gbit/sec Reported-by: Tore Anderson Signed-off-by: Eric Dumazet Tested-by: Tore Anderson Signed-off-by: David S. Miller --- net/ipv6/ip6_output.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'net') diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 3dc633f39372..d99fdc699625 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -643,7 +643,10 @@ int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) /* We must not fragment if the socket is set to force MTU discovery * or if the skb it not generated by a local socket. */ - if (!skb->local_df && skb->len > mtu) { + if (unlikely(!skb->local_df && skb->len > mtu)) { + if (skb->sk && dst_allfrag(skb_dst(skb))) + sk_nocaps_add(skb->sk, NETIF_F_GSO_MASK); + skb->dev = skb_dst(skb)->dev; icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), -- cgit v1.2.2 From 9a52e97e24d6da744e6c3332b8dd478a4974983e Mon Sep 17 00:00:00 2001 From: Jeffrin Jose Date: Sat, 19 May 2012 01:45:21 +0000 Subject: net:ipv6:fixed a trailing white space issue. Fixed a trailing white space issue found by checkpatch.pl tool in net/ipv6/udp.c Signed-off-by: Jeffrin Jose Signed-off-by: David S. Miller --- net/ipv6/udp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index c1d91a713e8e..847253c0e544 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -103,7 +103,7 @@ int udp_v6_get_port(struct sock *sk, unsigned short snum) { unsigned int hash2_nulladdr = udp6_portaddr_hash(sock_net(sk), &in6addr_any, snum); - unsigned int hash2_partial = + unsigned int hash2_partial = udp6_portaddr_hash(sock_net(sk), &inet6_sk(sk)->rcv_saddr, 0); /* precompute partial secondary hash */ -- cgit v1.2.2 From 3dde25988292864a582b4a9389b1ae835aa3fe80 Mon Sep 17 00:00:00 2001 From: Jeffrin Jose Date: Sat, 19 May 2012 01:59:04 +0000 Subject: net:ipv6:fixed space issues relating to operators. Fixed space issues relating to operators found by checkpatch.pl tool in net/ipv6/udp.c Signed-off-by: Jeffrin Jose Signed-off-by: David S. Miller --- net/ipv6/udp.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'net') diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 847253c0e544..f05099fc5901 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -349,7 +349,7 @@ int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk, bool slow; if (addr_len) - *addr_len=sizeof(struct sockaddr_in6); + *addr_len = sizeof(struct sockaddr_in6); if (flags & MSG_ERRQUEUE) return ipv6_recv_error(sk, msg, len); @@ -1379,7 +1379,7 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, * do checksum of UDP packets sent as multiple IP fragments. */ offset = skb_checksum_start_offset(skb); - csum = skb_checksum(skb, offset, skb->len- offset, 0); + csum = skb_checksum(skb, offset, skb->len - offset, 0); offset += skb->csum_offset; *(__sum16 *)(skb->data + offset) = csum_fold(csum); skb->ip_summed = CHECKSUM_NONE; -- cgit v1.2.2 From bad43ca8325f493dcaa0896c2f036276af059c7e Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Sat, 19 May 2012 03:02:02 +0000 Subject: net: introduce skb_try_coalesce() Move tcp_try_coalesce() protocol independent part to skb_try_coalesce(). skb_try_coalesce() can be used in IPv4 defrag and IPv6 reassembly, to build optimized skbs (less sk_buff, and possibly less 'headers') skb_try_coalesce() is zero copy, unless the copy can fit in destination header (its a rare case) kfree_skb_partial() is also moved to net/core/skbuff.c and exported, because IPv6 will need it in patch (ipv6: use skb coalescing in reassembly). Signed-off-by: Eric Dumazet Cc: Alexander Duyck Signed-off-by: David S. Miller --- net/core/skbuff.c | 86 ++++++++++++++++++++++++++++++++++++++++++++++++++++ net/ipv4/tcp_input.c | 67 ++-------------------------------------- 2 files changed, 89 insertions(+), 64 deletions(-) (limited to 'net') diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 7ceb673d622f..016694d62484 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -3346,3 +3346,89 @@ void __skb_warn_lro_forwarding(const struct sk_buff *skb) skb->dev->name); } EXPORT_SYMBOL(__skb_warn_lro_forwarding); + +void kfree_skb_partial(struct sk_buff *skb, bool head_stolen) +{ + if (head_stolen) + kmem_cache_free(skbuff_head_cache, skb); + else + __kfree_skb(skb); +} +EXPORT_SYMBOL(kfree_skb_partial); + +/** + * skb_try_coalesce - try to merge skb to prior one + * @to: prior buffer + * @from: buffer to add + * @fragstolen: pointer to boolean + * + */ +bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from, + bool *fragstolen, int *delta_truesize) +{ + int i, delta, len = from->len; + + *fragstolen = false; + + if (skb_cloned(to)) + return false; + + if (len <= skb_tailroom(to)) { + BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len)); + *delta_truesize = 0; + return true; + } + + if (skb_has_frag_list(to) || skb_has_frag_list(from)) + return false; + + if (skb_headlen(from) != 0) { + struct page *page; + unsigned int offset; + + if (skb_shinfo(to)->nr_frags + + skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS) + return false; + + if (skb_head_is_locked(from)) + return false; + + delta = from->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff)); + + page = virt_to_head_page(from->head); + offset = from->data - (unsigned char *)page_address(page); + + skb_fill_page_desc(to, skb_shinfo(to)->nr_frags, + page, offset, skb_headlen(from)); + *fragstolen = true; + } else { + if (skb_shinfo(to)->nr_frags + + skb_shinfo(from)->nr_frags > MAX_SKB_FRAGS) + return false; + + delta = from->truesize - + SKB_TRUESIZE(skb_end_pointer(from) - from->head); + } + + WARN_ON_ONCE(delta < len); + + memcpy(skb_shinfo(to)->frags + skb_shinfo(to)->nr_frags, + skb_shinfo(from)->frags, + skb_shinfo(from)->nr_frags * sizeof(skb_frag_t)); + skb_shinfo(to)->nr_frags += skb_shinfo(from)->nr_frags; + + if (!skb_cloned(from)) + skb_shinfo(from)->nr_frags = 0; + + /* if the skb is cloned this does nothing since we set nr_frags to 0 */ + for (i = 0; i < skb_shinfo(from)->nr_frags; i++) + skb_frag_ref(from, i); + + to->truesize += delta; + to->len += len; + to->data_len += len; + + *delta_truesize = delta; + return true; +} +EXPORT_SYMBOL(skb_try_coalesce); diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index b961ef54b17d..cfa2aa128342 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -4549,84 +4549,23 @@ static bool tcp_try_coalesce(struct sock *sk, struct sk_buff *from, bool *fragstolen) { - int i, delta, len = from->len; + int delta; *fragstolen = false; - if (tcp_hdr(from)->fin || skb_cloned(to)) + if (tcp_hdr(from)->fin) return false; - - if (len <= skb_tailroom(to)) { - BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len)); - goto merge; - } - - if (skb_has_frag_list(to) || skb_has_frag_list(from)) + if (!skb_try_coalesce(to, from, fragstolen, &delta)) return false; - if (skb_headlen(from) != 0) { - struct page *page; - unsigned int offset; - - if (skb_shinfo(to)->nr_frags + - skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS) - return false; - - if (skb_head_is_locked(from)) - return false; - - delta = from->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff)); - - page = virt_to_head_page(from->head); - offset = from->data - (unsigned char *)page_address(page); - - skb_fill_page_desc(to, skb_shinfo(to)->nr_frags, - page, offset, skb_headlen(from)); - *fragstolen = true; - } else { - if (skb_shinfo(to)->nr_frags + - skb_shinfo(from)->nr_frags > MAX_SKB_FRAGS) - return false; - - delta = from->truesize - - SKB_TRUESIZE(skb_end_pointer(from) - from->head); - } - - WARN_ON_ONCE(delta < len); - - memcpy(skb_shinfo(to)->frags + skb_shinfo(to)->nr_frags, - skb_shinfo(from)->frags, - skb_shinfo(from)->nr_frags * sizeof(skb_frag_t)); - skb_shinfo(to)->nr_frags += skb_shinfo(from)->nr_frags; - - if (!skb_cloned(from)) - skb_shinfo(from)->nr_frags = 0; - - /* if the skb is cloned this does nothing since we set nr_frags to 0 */ - for (i = 0; i < skb_shinfo(from)->nr_frags; i++) - skb_frag_ref(from, i); - - to->truesize += delta; atomic_add(delta, &sk->sk_rmem_alloc); sk_mem_charge(sk, delta); - to->len += len; - to->data_len += len; - -merge: NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOALESCE); TCP_SKB_CB(to)->end_seq = TCP_SKB_CB(from)->end_seq; TCP_SKB_CB(to)->ack_seq = TCP_SKB_CB(from)->ack_seq; return true; } -static void kfree_skb_partial(struct sk_buff *skb, bool head_stolen) -{ - if (head_stolen) - kmem_cache_free(skbuff_head_cache, skb); - else - __kfree_skb(skb); -} - static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) { struct tcp_sock *tp = tcp_sk(sk); -- cgit v1.2.2 From 3cc4949269e01f39443d0fcfffb5bc6b47878d45 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Sat, 19 May 2012 03:02:20 +0000 Subject: ipv4: use skb coalescing in defragmentation ip_frag_reasm() can use skb_try_coalesce() to build optimized skb, reducing memory used by them (truesize), and reducing number of cache line misses and overhead for the consumer. Signed-off-by: Eric Dumazet Cc: Alexander Duyck Signed-off-by: David S. Miller --- net/ipv4/ip_fragment.c | 26 ++++++++++++++++++++------ 1 file changed, 20 insertions(+), 6 deletions(-) (limited to 'net') diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index 695b27f44161..9dbd3dd6022d 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c @@ -545,6 +545,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev, int len; int ihlen; int err; + int sum_truesize; u8 ecn; ipq_kill(qp); @@ -611,19 +612,32 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev, atomic_add(clone->truesize, &qp->q.net->mem); } - skb_shinfo(head)->frag_list = head->next; skb_push(head, head->data - skb_network_header(head)); - for (fp=head->next; fp; fp = fp->next) { - head->data_len += fp->len; - head->len += fp->len; + sum_truesize = head->truesize; + for (fp = head->next; fp;) { + bool headstolen; + int delta; + struct sk_buff *next = fp->next; + + sum_truesize += fp->truesize; if (head->ip_summed != fp->ip_summed) head->ip_summed = CHECKSUM_NONE; else if (head->ip_summed == CHECKSUM_COMPLETE) head->csum = csum_add(head->csum, fp->csum); - head->truesize += fp->truesize; + + if (skb_try_coalesce(head, fp, &headstolen, &delta)) { + kfree_skb_partial(fp, headstolen); + } else { + if (!skb_shinfo(head)->frag_list) + skb_shinfo(head)->frag_list = fp; + head->data_len += fp->len; + head->len += fp->len; + head->truesize += fp->truesize; + } + fp = next; } - atomic_sub(head->truesize, &qp->q.net->mem); + atomic_sub(sum_truesize, &qp->q.net->mem); head->next = NULL; head->dev = dev; -- cgit v1.2.2 From ec16439e173aaf56f62bd8e175e976fbd452497b Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Sat, 19 May 2012 03:02:35 +0000 Subject: ipv6: use skb coalescing in reassembly ip6_frag_reasm() can use skb_try_coalesce() to build optimized skb, reducing memory used by them (truesize), and reducing number of cache line misses and overhead for the consumer. Signed-off-by: Eric Dumazet Cc: Alexander Duyck Signed-off-by: David S. Miller --- net/ipv6/reassembly.c | 26 ++++++++++++++++++++------ 1 file changed, 20 insertions(+), 6 deletions(-) (limited to 'net') diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c index 5d32dfa4d75a..4ff9af628e72 100644 --- a/net/ipv6/reassembly.c +++ b/net/ipv6/reassembly.c @@ -415,6 +415,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev, struct sk_buff *fp, *head = fq->q.fragments; int payload_len; unsigned int nhoff; + int sum_truesize; fq_kill(fq); @@ -484,20 +485,33 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev, head->mac_header += sizeof(struct frag_hdr); head->network_header += sizeof(struct frag_hdr); - skb_shinfo(head)->frag_list = head->next; skb_reset_transport_header(head); skb_push(head, head->data - skb_network_header(head)); - for (fp=head->next; fp; fp = fp->next) { - head->data_len += fp->len; - head->len += fp->len; + sum_truesize = head->truesize; + for (fp = head->next; fp;) { + bool headstolen; + int delta; + struct sk_buff *next = fp->next; + + sum_truesize += fp->truesize; if (head->ip_summed != fp->ip_summed) head->ip_summed = CHECKSUM_NONE; else if (head->ip_summed == CHECKSUM_COMPLETE) head->csum = csum_add(head->csum, fp->csum); - head->truesize += fp->truesize; + + if (skb_try_coalesce(head, fp, &headstolen, &delta)) { + kfree_skb_partial(fp, headstolen); + } else { + if (!skb_shinfo(head)->frag_list) + skb_shinfo(head)->frag_list = fp; + head->data_len += fp->len; + head->len += fp->len; + head->truesize += fp->truesize; + } + fp = next; } - atomic_sub(head->truesize, &fq->q.net->mem); + atomic_sub(sum_truesize, &fq->q.net->mem); head->next = NULL; head->dev = dev; -- cgit v1.2.2 From b37f4d7b011955c84cdbb8c370927d93701fb174 Mon Sep 17 00:00:00 2001 From: Eldad Zack Date: Sat, 19 May 2012 14:04:39 +0000 Subject: net/ipv4/ipconfig: neaten __setup placement The __setup macro should follow the corresponding setup handler. Signed-off-by: Eldad Zack Signed-off-by: David S. Miller --- net/ipv4/ipconfig.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'net') diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c index 430015010e57..67e8a6b086ea 100644 --- a/net/ipv4/ipconfig.c +++ b/net/ipv4/ipconfig.c @@ -1621,11 +1621,13 @@ static int __init ip_auto_config_setup(char *addrs) return 1; } +__setup("ip=", ip_auto_config_setup); static int __init nfsaddrs_config_setup(char *addrs) { return ip_auto_config_setup(addrs); } +__setup("nfsaddrs=", nfsaddrs_config_setup); static int __init vendor_class_identifier_setup(char *addrs) { @@ -1636,7 +1638,4 @@ static int __init vendor_class_identifier_setup(char *addrs) vendor_class_identifier); return 1; } - -__setup("ip=", ip_auto_config_setup); -__setup("nfsaddrs=", nfsaddrs_config_setup); __setup("dhcpclass=", vendor_class_identifier_setup); -- cgit v1.2.2 From 413c27d8697751f72d2d6cf289140a8e060a8032 Mon Sep 17 00:00:00 2001 From: Eldad Zack Date: Sat, 19 May 2012 14:13:18 +0000 Subject: net/ipv4: replace simple_strtoul with kstrtoul Replace simple_strtoul with kstrtoul in three similar occurrences, all setup handlers: * route.c: set_rhash_entries * tcp.c: set_thash_entries * udp.c: set_uhash_entries Also check if the conversion failed. Signed-off-by: Eldad Zack Signed-off-by: David S. Miller --- net/ipv4/route.c | 8 +++++++- net/ipv4/tcp.c | 8 +++++++- net/ipv4/udp.c | 8 +++++++- 3 files changed, 21 insertions(+), 3 deletions(-) (limited to 'net') diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 76e5880cdb07..ffcb3b016843 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -3408,9 +3408,15 @@ struct ip_rt_acct __percpu *ip_rt_acct __read_mostly; static __initdata unsigned long rhash_entries; static int __init set_rhash_entries(char *str) { + ssize_t ret; + if (!str) return 0; - rhash_entries = simple_strtoul(str, &str, 0); + + ret = kstrtoul(str, 0, &rhash_entries); + if (ret) + return 0; + return 1; } __setup("rhash_entries=", set_rhash_entries); diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 63ddaee7209f..e13546ca9923 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -3462,9 +3462,15 @@ extern struct tcp_congestion_ops tcp_reno; static __initdata unsigned long thash_entries; static int __init set_thash_entries(char *str) { + ssize_t ret; + if (!str) return 0; - thash_entries = simple_strtoul(str, &str, 0); + + ret = kstrtoul(str, 0, &thash_entries); + if (ret) + return 0; + return 1; } __setup("thash_entries=", set_thash_entries); diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 279fd0846302..609397ee78fb 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -2173,9 +2173,15 @@ void udp4_proc_exit(void) static __initdata unsigned long uhash_entries; static int __init set_uhash_entries(char *str) { + ssize_t ret; + if (!str) return 0; - uhash_entries = simple_strtoul(str, &str, 0); + + ret = kstrtoul(str, 0, &uhash_entries); + if (ret) + return 0; + if (uhash_entries && uhash_entries < UDP_HTABLE_SIZE_MIN) uhash_entries = UDP_HTABLE_SIZE_MIN; return 1; -- cgit v1.2.2 From 9b905fe68433378032b851c4d81a59187689fa52 Mon Sep 17 00:00:00 2001 From: Eldad Zack Date: Sun, 20 May 2012 01:59:33 +0000 Subject: ipv6/exthdrs: strict Pad1 and PadN check The following tightens the padding check from commit c1412fce7eccae62b4de22494f6ab3ff8a90c0c6 : * Take into account combinations of consecutive Pad1 and PadN. * Catch the corner case of when only padding is present in the header, when the extention header length is 0 (i.e., 8 bytes). In this case, the header would have exactly 6 bytes of padding: +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ : Next Header : Hdr Ext Len=0 : : +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + : Padding (Pad1 or PadN) : +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ Signed-off-by: Eldad Zack Signed-off-by: David S. Miller --- net/ipv6/exthdrs.c | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) (limited to 'net') diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c index 50ec95f9aeeb..6447dc49429f 100644 --- a/net/ipv6/exthdrs.c +++ b/net/ipv6/exthdrs.c @@ -144,6 +144,7 @@ static bool ip6_parse_tlv(const struct tlvtype_proc *procs, struct sk_buff *skb) const unsigned char *nh = skb_network_header(skb); int off = skb_network_header_len(skb); int len = (skb_transport_header(skb)[1] + 1) << 3; + int padlen = 0; if (skb_transport_offset(skb) + len > skb_headlen(skb)) goto bad; @@ -158,6 +159,9 @@ static bool ip6_parse_tlv(const struct tlvtype_proc *procs, struct sk_buff *skb) switch (nh[off]) { case IPV6_TLV_PAD1: optlen = 1; + padlen++; + if (padlen > 7) + goto bad; break; case IPV6_TLV_PADN: @@ -166,7 +170,8 @@ static bool ip6_parse_tlv(const struct tlvtype_proc *procs, struct sk_buff *skb) * of 8. 7 is therefore the highest valid value. * See also RFC 4942, Section 2.1.9.5. */ - if (optlen > 7) + padlen += optlen; + if (padlen > 7) goto bad; /* RFC 4942 recommends receiving hosts to * actively check PadN payload to contain @@ -195,11 +200,19 @@ static bool ip6_parse_tlv(const struct tlvtype_proc *procs, struct sk_buff *skb) if (ip6_tlvopt_unknown(skb, off) == 0) return false; } + padlen = 0; break; } off += optlen; len -= optlen; } + /* This case will not be caught by above check since its padding + * length is smaller than 7: + * 1 byte NH + 1 byte Length + 6 bytes Padding + */ + if ((padlen == 6) && ((off - skb_network_header_len(skb)) == 8)) + goto bad; + if (len == 0) return true; bad: -- cgit v1.2.2