From fd27817ce941c7134adefd2852f8a5f274315ff4 Mon Sep 17 00:00:00 2001 From: James Ketrenos Date: Tue, 13 Sep 2005 17:25:51 -0500 Subject: [PATCH] Fixed some endian issues with 802.11 header usage in ieee80211_rx.c Fixed some endian issues with 802.11 header usage in ieee80211_rx.c Signed-off-by: James Ketrenos Signed-off-by: Jeff Garzik --- net/ieee80211/ieee80211_rx.c | 43 ++++++++++++++++++++++++++----------------- 1 file changed, 26 insertions(+), 17 deletions(-) (limited to 'net') diff --git a/net/ieee80211/ieee80211_rx.c b/net/ieee80211/ieee80211_rx.c index f7dcd854139e..74c4ed77cfd9 100644 --- a/net/ieee80211/ieee80211_rx.c +++ b/net/ieee80211/ieee80211_rx.c @@ -787,11 +787,11 @@ static inline int ieee80211_network_init(struct ieee80211_device *ieee, /* Pull out fixed field data */ memcpy(network->bssid, beacon->header.addr3, ETH_ALEN); - network->capability = beacon->capability; + network->capability = le16_to_cpu(beacon->capability); network->last_scanned = jiffies; - network->time_stamp[0] = beacon->time_stamp[0]; - network->time_stamp[1] = beacon->time_stamp[1]; - network->beacon_interval = beacon->beacon_interval; + network->time_stamp[0] = le32_to_cpu(beacon->time_stamp[0]); + network->time_stamp[1] = le32_to_cpu(beacon->time_stamp[1]); + network->beacon_interval = le16_to_cpu(beacon->beacon_interval); /* Where to pull this? beacon->listen_interval; */ network->listen_interval = 0x0A; network->rates_len = network->rates_ex_len = 0; @@ -1070,8 +1070,9 @@ static inline void ieee80211_process_probe_response(struct ieee80211_device escape_essid(info_element->data, info_element->len), MAC_ARG(beacon->header.addr3), - WLAN_FC_GET_STYPE(beacon->header. - frame_ctl) == + WLAN_FC_GET_STYPE(le16_to_cpu + (beacon->header. + frame_ctl)) == IEEE80211_STYPE_PROBE_RESP ? "PROBE RESPONSE" : "BEACON"); return; @@ -1122,8 +1123,9 @@ static inline void ieee80211_process_probe_response(struct ieee80211_device escape_essid(network.ssid, network.ssid_len), MAC_ARG(network.bssid), - WLAN_FC_GET_STYPE(beacon->header. - frame_ctl) == + WLAN_FC_GET_STYPE(le16_to_cpu + (beacon->header. + frame_ctl)) == IEEE80211_STYPE_PROBE_RESP ? "PROBE RESPONSE" : "BEACON"); #endif @@ -1134,8 +1136,9 @@ static inline void ieee80211_process_probe_response(struct ieee80211_device escape_essid(target->ssid, target->ssid_len), MAC_ARG(target->bssid), - WLAN_FC_GET_STYPE(beacon->header. - frame_ctl) == + WLAN_FC_GET_STYPE(le16_to_cpu + (beacon->header. + frame_ctl)) == IEEE80211_STYPE_PROBE_RESP ? "PROBE RESPONSE" : "BEACON"); update_network(target, &network); @@ -1148,20 +1151,23 @@ void ieee80211_rx_mgt(struct ieee80211_device *ieee, struct ieee80211_hdr *header, struct ieee80211_rx_stats *stats) { - switch (WLAN_FC_GET_STYPE(header->frame_ctl)) { + switch (WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl))) { case IEEE80211_STYPE_ASSOC_RESP: IEEE80211_DEBUG_MGMT("received ASSOCIATION RESPONSE (%d)\n", - WLAN_FC_GET_STYPE(header->frame_ctl)); + WLAN_FC_GET_STYPE(le16_to_cpu + (header->frame_ctl))); break; case IEEE80211_STYPE_REASSOC_RESP: IEEE80211_DEBUG_MGMT("received REASSOCIATION RESPONSE (%d)\n", - WLAN_FC_GET_STYPE(header->frame_ctl)); + WLAN_FC_GET_STYPE(le16_to_cpu + (header->frame_ctl))); break; case IEEE80211_STYPE_PROBE_RESP: IEEE80211_DEBUG_MGMT("received PROBE RESPONSE (%d)\n", - WLAN_FC_GET_STYPE(header->frame_ctl)); + WLAN_FC_GET_STYPE(le16_to_cpu + (header->frame_ctl))); IEEE80211_DEBUG_SCAN("Probe response\n"); ieee80211_process_probe_response(ieee, (struct @@ -1171,7 +1177,8 @@ void ieee80211_rx_mgt(struct ieee80211_device *ieee, case IEEE80211_STYPE_BEACON: IEEE80211_DEBUG_MGMT("received BEACON (%d)\n", - WLAN_FC_GET_STYPE(header->frame_ctl)); + WLAN_FC_GET_STYPE(le16_to_cpu + (header->frame_ctl))); IEEE80211_DEBUG_SCAN("Beacon\n"); ieee80211_process_probe_response(ieee, (struct @@ -1181,10 +1188,12 @@ void ieee80211_rx_mgt(struct ieee80211_device *ieee, default: IEEE80211_DEBUG_MGMT("received UNKNOWN (%d)\n", - WLAN_FC_GET_STYPE(header->frame_ctl)); + WLAN_FC_GET_STYPE(le16_to_cpu + (header->frame_ctl))); IEEE80211_WARNING("%s: Unknown management packet: %d\n", ieee->dev->name, - WLAN_FC_GET_STYPE(header->frame_ctl)); + WLAN_FC_GET_STYPE(le16_to_cpu + (header->frame_ctl))); break; } } -- cgit v1.2.2 From b1b508e1b13529b3cc5b59c352f49b9b58a302b6 Mon Sep 17 00:00:00 2001 From: James Ketrenos Date: Tue, 13 Sep 2005 17:27:19 -0500 Subject: [PATCH] ieee80211 quality scaling algorithm extension handler Incorporated Bill Moss' quality scaling algorithm into default wireless extension handler. Signed-off-by: James Ketrenos Signed-off-by: Jeff Garzik --- net/ieee80211/ieee80211_wx.c | 40 ++++++++++++++++++++++++++++++---------- 1 file changed, 30 insertions(+), 10 deletions(-) (limited to 'net') diff --git a/net/ieee80211/ieee80211_wx.c b/net/ieee80211/ieee80211_wx.c index 94882f39b072..04f0897b0653 100644 --- a/net/ieee80211/ieee80211_wx.c +++ b/net/ieee80211/ieee80211_wx.c @@ -140,18 +140,38 @@ static inline char *ipw2100_translate_scan(struct ieee80211_device *ieee, start = iwe_stream_add_point(start, stop, &iwe, custom); /* Add quality statistics */ - /* TODO: Fix these values... */ iwe.cmd = IWEVQUAL; - iwe.u.qual.qual = network->stats.signal; - iwe.u.qual.level = network->stats.rssi; - iwe.u.qual.noise = network->stats.noise; - iwe.u.qual.updated = network->stats.mask & IEEE80211_STATMASK_WEMASK; - if (!(network->stats.mask & IEEE80211_STATMASK_RSSI)) - iwe.u.qual.updated |= IW_QUAL_LEVEL_INVALID; - if (!(network->stats.mask & IEEE80211_STATMASK_NOISE)) + iwe.u.qual.updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED | + IW_QUAL_NOISE_UPDATED; + + if (!(network->stats.mask & IEEE80211_STATMASK_RSSI)) { + iwe.u.qual.updated |= IW_QUAL_QUAL_INVALID | + IW_QUAL_LEVEL_INVALID; + iwe.u.qual.qual = 0; + iwe.u.qual.level = 0; + } else { + iwe.u.qual.level = network->stats.rssi; + iwe.u.qual.qual = + (100 * + (ieee->perfect_rssi - ieee->worst_rssi) * + (ieee->perfect_rssi - ieee->worst_rssi) - + (ieee->perfect_rssi - network->stats.rssi) * + (15 * (ieee->perfect_rssi - ieee->worst_rssi) + + 62 * (ieee->perfect_rssi - network->stats.rssi))) / + ((ieee->perfect_rssi - ieee->worst_rssi) * + (ieee->perfect_rssi - ieee->worst_rssi)); + if (iwe.u.qual.qual > 100) + iwe.u.qual.qual = 100; + else if (iwe.u.qual.qual < 1) + iwe.u.qual.qual = 0; + } + + if (!(network->stats.mask & IEEE80211_STATMASK_NOISE)) { iwe.u.qual.updated |= IW_QUAL_NOISE_INVALID; - if (!(network->stats.mask & IEEE80211_STATMASK_SIGNAL)) - iwe.u.qual.updated |= IW_QUAL_QUAL_INVALID; + iwe.u.qual.noise = 0; + } else { + iwe.u.qual.noise = network->stats.noise; + } start = iwe_stream_add_event(start, stop, &iwe, IW_EV_QUAL_LEN); -- cgit v1.2.2 From 74079fdce472a2b16d502fe39e06b135ef06c69b Mon Sep 17 00:00:00 2001 From: James Ketrenos Date: Tue, 13 Sep 2005 17:35:21 -0500 Subject: [PATCH] ieee80211 Added wireless spy support Added wireless spy support to Rx code path. Signed-off-by: James Ketrenos NOTE: Looks like scripts/Lindent generated output different than the Lindented version already in-kernel, hence all the whitespace deltas... *sigh* Signed-off-by: Jeff Garzik --- net/ieee80211/ieee80211_crypt.c | 24 ++++++++++---------- net/ieee80211/ieee80211_crypt_ccmp.c | 26 +++++++++++----------- net/ieee80211/ieee80211_crypt_tkip.c | 26 +++++++++++----------- net/ieee80211/ieee80211_crypt_wep.c | 26 +++++++++++----------- net/ieee80211/ieee80211_rx.c | 43 ++++++++++++++++++++++-------------- 5 files changed, 78 insertions(+), 67 deletions(-) (limited to 'net') diff --git a/net/ieee80211/ieee80211_crypt.c b/net/ieee80211/ieee80211_crypt.c index 61a9d92e455b..dc835f68edd3 100644 --- a/net/ieee80211/ieee80211_crypt.c +++ b/net/ieee80211/ieee80211_crypt.c @@ -191,18 +191,18 @@ static void ieee80211_crypt_null_deinit(void *priv) } static struct ieee80211_crypto_ops ieee80211_crypt_null = { - .name = "NULL", - .init = ieee80211_crypt_null_init, - .deinit = ieee80211_crypt_null_deinit, - .encrypt_mpdu = NULL, - .decrypt_mpdu = NULL, - .encrypt_msdu = NULL, - .decrypt_msdu = NULL, - .set_key = NULL, - .get_key = NULL, - .extra_prefix_len = 0, - .extra_postfix_len = 0, - .owner = THIS_MODULE, + .name = "NULL", + .init = ieee80211_crypt_null_init, + .deinit = ieee80211_crypt_null_deinit, + .encrypt_mpdu = NULL, + .decrypt_mpdu = NULL, + .encrypt_msdu = NULL, + .decrypt_msdu = NULL, + .set_key = NULL, + .get_key = NULL, + .extra_prefix_len = 0, + .extra_postfix_len = 0, + .owner = THIS_MODULE, }; static int __init ieee80211_crypto_init(void) diff --git a/net/ieee80211/ieee80211_crypt_ccmp.c b/net/ieee80211/ieee80211_crypt_ccmp.c index 8fc13f45971e..7b6290885e7d 100644 --- a/net/ieee80211/ieee80211_crypt_ccmp.c +++ b/net/ieee80211/ieee80211_crypt_ccmp.c @@ -426,19 +426,19 @@ static char *ieee80211_ccmp_print_stats(char *p, void *priv) } static struct ieee80211_crypto_ops ieee80211_crypt_ccmp = { - .name = "CCMP", - .init = ieee80211_ccmp_init, - .deinit = ieee80211_ccmp_deinit, - .encrypt_mpdu = ieee80211_ccmp_encrypt, - .decrypt_mpdu = ieee80211_ccmp_decrypt, - .encrypt_msdu = NULL, - .decrypt_msdu = NULL, - .set_key = ieee80211_ccmp_set_key, - .get_key = ieee80211_ccmp_get_key, - .print_stats = ieee80211_ccmp_print_stats, - .extra_prefix_len = CCMP_HDR_LEN, - .extra_postfix_len = CCMP_MIC_LEN, - .owner = THIS_MODULE, + .name = "CCMP", + .init = ieee80211_ccmp_init, + .deinit = ieee80211_ccmp_deinit, + .encrypt_mpdu = ieee80211_ccmp_encrypt, + .decrypt_mpdu = ieee80211_ccmp_decrypt, + .encrypt_msdu = NULL, + .decrypt_msdu = NULL, + .set_key = ieee80211_ccmp_set_key, + .get_key = ieee80211_ccmp_get_key, + .print_stats = ieee80211_ccmp_print_stats, + .extra_prefix_len = CCMP_HDR_LEN, + .extra_postfix_len = CCMP_MIC_LEN, + .owner = THIS_MODULE, }; static int __init ieee80211_crypto_ccmp_init(void) diff --git a/net/ieee80211/ieee80211_crypt_tkip.c b/net/ieee80211/ieee80211_crypt_tkip.c index d4f9164be1a1..dca380e57454 100644 --- a/net/ieee80211/ieee80211_crypt_tkip.c +++ b/net/ieee80211/ieee80211_crypt_tkip.c @@ -654,19 +654,19 @@ static char *ieee80211_tkip_print_stats(char *p, void *priv) } static struct ieee80211_crypto_ops ieee80211_crypt_tkip = { - .name = "TKIP", - .init = ieee80211_tkip_init, - .deinit = ieee80211_tkip_deinit, - .encrypt_mpdu = ieee80211_tkip_encrypt, - .decrypt_mpdu = ieee80211_tkip_decrypt, - .encrypt_msdu = ieee80211_michael_mic_add, - .decrypt_msdu = ieee80211_michael_mic_verify, - .set_key = ieee80211_tkip_set_key, - .get_key = ieee80211_tkip_get_key, - .print_stats = ieee80211_tkip_print_stats, - .extra_prefix_len = 4 + 4, /* IV + ExtIV */ - .extra_postfix_len = 8 + 4, /* MIC + ICV */ - .owner = THIS_MODULE, + .name = "TKIP", + .init = ieee80211_tkip_init, + .deinit = ieee80211_tkip_deinit, + .encrypt_mpdu = ieee80211_tkip_encrypt, + .decrypt_mpdu = ieee80211_tkip_decrypt, + .encrypt_msdu = ieee80211_michael_mic_add, + .decrypt_msdu = ieee80211_michael_mic_verify, + .set_key = ieee80211_tkip_set_key, + .get_key = ieee80211_tkip_get_key, + .print_stats = ieee80211_tkip_print_stats, + .extra_prefix_len = 4 + 4, /* IV + ExtIV */ + .extra_postfix_len = 8 + 4, /* MIC + ICV */ + .owner = THIS_MODULE, }; static int __init ieee80211_crypto_tkip_init(void) diff --git a/net/ieee80211/ieee80211_crypt_wep.c b/net/ieee80211/ieee80211_crypt_wep.c index b4d2514a0902..ebe16155e792 100644 --- a/net/ieee80211/ieee80211_crypt_wep.c +++ b/net/ieee80211/ieee80211_crypt_wep.c @@ -229,19 +229,19 @@ static char *prism2_wep_print_stats(char *p, void *priv) } static struct ieee80211_crypto_ops ieee80211_crypt_wep = { - .name = "WEP", - .init = prism2_wep_init, - .deinit = prism2_wep_deinit, - .encrypt_mpdu = prism2_wep_encrypt, - .decrypt_mpdu = prism2_wep_decrypt, - .encrypt_msdu = NULL, - .decrypt_msdu = NULL, - .set_key = prism2_wep_set_key, - .get_key = prism2_wep_get_key, - .print_stats = prism2_wep_print_stats, - .extra_prefix_len = 4, /* IV */ - .extra_postfix_len = 4, /* ICV */ - .owner = THIS_MODULE, + .name = "WEP", + .init = prism2_wep_init, + .deinit = prism2_wep_deinit, + .encrypt_mpdu = prism2_wep_encrypt, + .decrypt_mpdu = prism2_wep_decrypt, + .encrypt_msdu = NULL, + .decrypt_msdu = NULL, + .set_key = prism2_wep_set_key, + .get_key = prism2_wep_get_key, + .print_stats = prism2_wep_print_stats, + .extra_prefix_len = 4, /* IV */ + .extra_postfix_len = 4, /* ICV */ + .owner = THIS_MODULE, }; static int __init ieee80211_crypto_wep_init(void) diff --git a/net/ieee80211/ieee80211_rx.c b/net/ieee80211/ieee80211_rx.c index 74c4ed77cfd9..6bbaa44a0d03 100644 --- a/net/ieee80211/ieee80211_rx.c +++ b/net/ieee80211/ieee80211_rx.c @@ -378,33 +378,47 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb, frag = WLAN_GET_SEQ_FRAG(sc); hdrlen = ieee80211_get_hdrlen(fc); -#ifdef NOT_YET -#if WIRELESS_EXT > 15 /* Put this code here so that we avoid duplicating it in all * Rx paths. - Jean II */ #ifdef IW_WIRELESS_SPY /* defined in iw_handler.h */ /* If spy monitoring on */ - if (iface->spy_data.spy_number > 0) { + if (ieee->spy_data.spy_number > 0) { struct iw_quality wstats; - wstats.level = rx_stats->signal; - wstats.noise = rx_stats->noise; - wstats.updated = 6; /* No qual value */ + + wstats.updated = 0; + if (rx_stats->mask & IEEE80211_STATMASK_RSSI) { + wstats.level = rx_stats->rssi; + wstats.updated |= IW_QUAL_LEVEL_UPDATED; + } else + wstats.updated |= IW_QUAL_LEVEL_INVALID; + + if (rx_stats->mask & IEEE80211_STATMASK_NOISE) { + wstats.noise = rx_stats->noise; + wstats.updated |= IW_QUAL_NOISE_UPDATED; + } else + wstats.updated |= IW_QUAL_NOISE_INVALID; + + if (rx_stats->mask & IEEE80211_STATMASK_SIGNAL) { + wstats.qual = rx_stats->signal; + wstats.updated |= IW_QUAL_QUAL_UPDATED; + } else + wstats.updated |= IW_QUAL_QUAL_INVALID; + /* Update spy records */ - wireless_spy_update(dev, hdr->addr2, &wstats); + wireless_spy_update(ieee->dev, hdr->addr2, &wstats); } #endif /* IW_WIRELESS_SPY */ -#endif /* WIRELESS_EXT > 15 */ + +#ifdef NOT_YET hostap_update_rx_stats(local->ap, hdr, rx_stats); #endif -#if WIRELESS_EXT > 15 if (ieee->iw_mode == IW_MODE_MONITOR) { ieee80211_monitor_rx(ieee, skb, rx_stats); stats->rx_packets++; stats->rx_bytes += skb->len; return 1; } -#endif if (ieee->host_decrypt) { int idx = 0; @@ -771,8 +785,7 @@ static inline int ieee80211_is_ofdm_rate(u8 rate) return 0; } -static inline int ieee80211_network_init(struct ieee80211_device *ieee, - struct ieee80211_probe_response +static inline int ieee80211_network_init(struct ieee80211_device *ieee, struct ieee80211_probe_response *beacon, struct ieee80211_network *network, struct ieee80211_rx_stats *stats) @@ -1028,11 +1041,9 @@ static inline void update_network(struct ieee80211_network *dst, } static inline void ieee80211_process_probe_response(struct ieee80211_device - *ieee, - struct + *ieee, struct ieee80211_probe_response - *beacon, - struct ieee80211_rx_stats + *beacon, struct ieee80211_rx_stats *stats) { struct ieee80211_network network; -- cgit v1.2.2 From 68e4e036b841d5fb23ae1ac51b1e40baf9d582bf Mon Sep 17 00:00:00 2001 From: James Ketrenos Date: Tue, 13 Sep 2005 17:37:22 -0500 Subject: [PATCH] Changed 802.11 headers to use ieee80211_info_element[0] Changed 802.11 headers to use ieee80211_info_element as zero sized array so that sizeof calculations do not account for IE sizes. Signed-off-by: James Ketrenos Signed-off-by: Jeff Garzik --- net/ieee80211/ieee80211_rx.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'net') diff --git a/net/ieee80211/ieee80211_rx.c b/net/ieee80211/ieee80211_rx.c index 6bbaa44a0d03..cae55e5ccb2e 100644 --- a/net/ieee80211/ieee80211_rx.c +++ b/net/ieee80211/ieee80211_rx.c @@ -822,7 +822,7 @@ static inline int ieee80211_network_init(struct ieee80211_device *ieee, struct i network->wpa_ie_len = 0; network->rsn_ie_len = 0; - info_element = &beacon->info_element; + info_element = beacon->info_element; left = stats->len - ((void *)info_element - (void *)beacon); while (left >= sizeof(struct ieee80211_info_element_hdr)) { if (sizeof(struct ieee80211_info_element_hdr) + @@ -1050,7 +1050,7 @@ static inline void ieee80211_process_probe_response(struct ieee80211_device struct ieee80211_network *target; struct ieee80211_network *oldest = NULL; #ifdef CONFIG_IEEE80211_DEBUG - struct ieee80211_info_element *info_element = &beacon->info_element; + struct ieee80211_info_element *info_element = beacon->info_element; #endif unsigned long flags; -- cgit v1.2.2 From 7b1fa54020985fa4b154f9f5d2f04ba174c7f1a5 Mon Sep 17 00:00:00 2001 From: James Ketrenos Date: Tue, 13 Sep 2005 17:38:13 -0500 Subject: [PATCH] ieee80211 Removed ieee80211_info_element_hdr Removed ieee80211_info_element_hdr structure as ieee80211_info_element provides the same use. Signed-off-by: James Ketrenos Signed-off-by: Jeff Garzik --- net/ieee80211/ieee80211_rx.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'net') diff --git a/net/ieee80211/ieee80211_rx.c b/net/ieee80211/ieee80211_rx.c index cae55e5ccb2e..e0337c8fcde8 100644 --- a/net/ieee80211/ieee80211_rx.c +++ b/net/ieee80211/ieee80211_rx.c @@ -823,10 +823,10 @@ static inline int ieee80211_network_init(struct ieee80211_device *ieee, struct i network->rsn_ie_len = 0; info_element = beacon->info_element; - left = stats->len - ((void *)info_element - (void *)beacon); - while (left >= sizeof(struct ieee80211_info_element_hdr)) { - if (sizeof(struct ieee80211_info_element_hdr) + - info_element->len > left) { + left = stats->len - sizeof(*beacon); + while (left >= sizeof(struct ieee80211_info_element)) { + if (sizeof(struct ieee80211_info_element) + info_element->len > + left) { IEEE80211_DEBUG_SCAN ("SCAN: parse failed: info_element->len + 2 > left : info_element->len+2=%Zd left=%d.\n", info_element->len + @@ -967,7 +967,7 @@ static inline int ieee80211_network_init(struct ieee80211_device *ieee, struct i break; } - left -= sizeof(struct ieee80211_info_element_hdr) + + left -= sizeof(struct ieee80211_info_element) + info_element->len; info_element = (struct ieee80211_info_element *) &info_element->data[info_element->len]; -- cgit v1.2.2 From 18294d8727b825eb2f3f98d6b6ae4a999dff854a Mon Sep 17 00:00:00 2001 From: James Ketrenos Date: Tue, 13 Sep 2005 17:40:29 -0500 Subject: [PATCH] ieee80211 Cleanup memcpy parameters. Cleanup memcpy parameters. Signed-off-by: James Ketrenos Signed-off-by: Jeff Garzik --- net/ieee80211/ieee80211_tx.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'net') diff --git a/net/ieee80211/ieee80211_tx.c b/net/ieee80211/ieee80211_tx.c index c9aaff3fea1e..1a09448016ed 100644 --- a/net/ieee80211/ieee80211_tx.c +++ b/net/ieee80211/ieee80211_tx.c @@ -285,8 +285,8 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev) } /* Save source and destination addresses */ - memcpy(&dest, skb->data, ETH_ALEN); - memcpy(&src, skb->data + ETH_ALEN, ETH_ALEN); + memcpy(dest, skb->data, ETH_ALEN); + memcpy(src, skb->data + ETH_ALEN, ETH_ALEN); /* Advance the SKB to the start of the payload */ skb_pull(skb, sizeof(struct ethhdr)); @@ -304,15 +304,15 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev) fc |= IEEE80211_FCTL_TODS; /* To DS: Addr1 = BSSID, Addr2 = SA, Addr3 = DA */ - memcpy(&header.addr1, ieee->bssid, ETH_ALEN); - memcpy(&header.addr2, &src, ETH_ALEN); - memcpy(&header.addr3, &dest, ETH_ALEN); + memcpy(header.addr1, ieee->bssid, ETH_ALEN); + memcpy(header.addr2, src, ETH_ALEN); + memcpy(header.addr3, dest, ETH_ALEN); } else if (ieee->iw_mode == IW_MODE_ADHOC) { /* not From/To DS: Addr1 = DA, Addr2 = SA, Addr3 = BSSID */ - memcpy(&header.addr1, dest, ETH_ALEN); - memcpy(&header.addr2, src, ETH_ALEN); - memcpy(&header.addr3, ieee->bssid, ETH_ALEN); + memcpy(header.addr1, dest, ETH_ALEN); + memcpy(header.addr2, src, ETH_ALEN); + memcpy(header.addr3, ieee->bssid, ETH_ALEN); } header.frame_ctl = cpu_to_le16(fc); hdr_len = IEEE80211_3ADDR_LEN; -- cgit v1.2.2 From 262d8e467710a1c870717bc432caaf74cde3ce20 Mon Sep 17 00:00:00 2001 From: James Ketrenos Date: Tue, 13 Sep 2005 17:42:53 -0500 Subject: [PATCH] ieee80211 Switched to sscanf in store_debug_level Switched to sscanf as per friendly comment in store_debug_level. Signed-off-by: James Ketrenos Signed-off-by: Jeff Garzik --- net/ieee80211/ieee80211_module.c | 26 ++++++-------------------- 1 file changed, 6 insertions(+), 20 deletions(-) (limited to 'net') diff --git a/net/ieee80211/ieee80211_module.c b/net/ieee80211/ieee80211_module.c index 03a47343ddc7..4b43ae1235f9 100644 --- a/net/ieee80211/ieee80211_module.c +++ b/net/ieee80211/ieee80211_module.c @@ -195,34 +195,20 @@ static int show_debug_level(char *page, char **start, off_t offset, static int store_debug_level(struct file *file, const char __user * buffer, unsigned long count, void *data) { - char buf[] = "0x00000000"; - char *p = (char *)buf; + char buf[] = "0x00000000\n"; + unsigned long len = min((unsigned long)sizeof(buf) - 1, count); unsigned long val; - if (count > sizeof(buf) - 1) - count = sizeof(buf) - 1; - - if (copy_from_user(buf, buffer, count)) + if (copy_from_user(buf, buffer, len)) return count; - buf[count] = 0; - /* - * what a FPOS... What, sscanf(buf, "%i", &val) would be too - * scary? - */ - if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') { - p++; - if (p[0] == 'x' || p[0] == 'X') - p++; - val = simple_strtoul(p, &p, 16); - } else - val = simple_strtoul(p, &p, 10); - if (p == buf) + buf[len] = 0; + if (sscanf(buf, "%li", &val) != 1) printk(KERN_INFO DRV_NAME ": %s is not in hex or decimal form.\n", buf); else ieee80211_debug_level = val; - return strlen(buf); + return strnlen(buf, len); } static int __init ieee80211_init(void) -- cgit v1.2.2 From 20d64713ae71c0b0aa06084acbef2244021baaca Mon Sep 17 00:00:00 2001 From: James Ketrenos Date: Wed, 21 Sep 2005 11:53:43 -0500 Subject: [PATCH] ieee80211: Fixed a kernel oops on module unload tree 367069f24fc38b4aa910e86ff40094d2078d8aa7 parent a33a1982012e9070736e3717231714dc9892303b author James Ketrenos 1124430800 -0500 committer James Ketrenos 1127310571 -0500 Fixed a kernel oops on module unload by adding spin lock protection to ieee80211's crypt handlers (thanks to Zhu Yi) Modified scan result logic to report WPA and RSN IEs if set (vs.being based on wpa_enabled) Added ieee80211_device as the first parameter to the crypt init() method. TKIP modified to use that structure for determining whether to countermeasures are active. Signed-off-by: James Ketrenos Signed-off-by: Jeff Garzik --- net/ieee80211/ieee80211_crypt.c | 9 +++++---- net/ieee80211/ieee80211_crypt_ccmp.c | 2 +- net/ieee80211/ieee80211_crypt_tkip.c | 31 ++++++++++++++++++++++++++++--- net/ieee80211/ieee80211_crypt_wep.c | 2 +- net/ieee80211/ieee80211_rx.c | 11 ----------- net/ieee80211/ieee80211_tx.c | 14 -------------- net/ieee80211/ieee80211_wx.c | 6 +++--- 7 files changed, 38 insertions(+), 37 deletions(-) (limited to 'net') diff --git a/net/ieee80211/ieee80211_crypt.c b/net/ieee80211/ieee80211_crypt.c index dc835f68edd3..0c366299db0f 100644 --- a/net/ieee80211/ieee80211_crypt.c +++ b/net/ieee80211/ieee80211_crypt.c @@ -41,7 +41,9 @@ void ieee80211_crypt_deinit_entries(struct ieee80211_device *ieee, int force) { struct list_head *ptr, *n; struct ieee80211_crypt_data *entry; + unsigned long flags; + spin_lock_irqsave(&ieee->lock, flags); for (ptr = ieee->crypt_deinit_list.next, n = ptr->next; ptr != &ieee->crypt_deinit_list; ptr = n, n = ptr->next) { entry = list_entry(ptr, struct ieee80211_crypt_data, list); @@ -57,14 +59,13 @@ void ieee80211_crypt_deinit_entries(struct ieee80211_device *ieee, int force) } kfree(entry); } + spin_unlock_irqrestore(&ieee->lock, flags); } void ieee80211_crypt_deinit_handler(unsigned long data) { struct ieee80211_device *ieee = (struct ieee80211_device *)data; - unsigned long flags; - spin_lock_irqsave(&ieee->lock, flags); ieee80211_crypt_deinit_entries(ieee, 0); if (!list_empty(&ieee->crypt_deinit_list)) { printk(KERN_DEBUG "%s: entries remaining in delayed crypt " @@ -72,7 +73,6 @@ void ieee80211_crypt_deinit_handler(unsigned long data) ieee->crypt_deinit_timer.expires = jiffies + HZ; add_timer(&ieee->crypt_deinit_timer); } - spin_unlock_irqrestore(&ieee->lock, flags); } @@ -182,7 +182,8 @@ struct ieee80211_crypto_ops *ieee80211_get_crypto_ops(const char *name) return NULL; } -static void *ieee80211_crypt_null_init(int keyidx) +static void *ieee80211_crypt_null_init(struct ieee80211_device *ieee, + int keyidx) { return (void *)1; } diff --git a/net/ieee80211/ieee80211_crypt_ccmp.c b/net/ieee80211/ieee80211_crypt_ccmp.c index 7b6290885e7d..1e6644b133dc 100644 --- a/net/ieee80211/ieee80211_crypt_ccmp.c +++ b/net/ieee80211/ieee80211_crypt_ccmp.c @@ -74,7 +74,7 @@ static void ieee80211_ccmp_aes_encrypt(struct crypto_tfm *tfm, crypto_cipher_encrypt(tfm, &dst, &src, AES_BLOCK_LEN); } -static void *ieee80211_ccmp_init(int key_idx) +static void *ieee80211_ccmp_init(struct ieee80211_device *ieee, int key_idx) { struct ieee80211_ccmp_data *priv; diff --git a/net/ieee80211/ieee80211_crypt_tkip.c b/net/ieee80211/ieee80211_crypt_tkip.c index dca380e57454..0c495f07e718 100644 --- a/net/ieee80211/ieee80211_crypt_tkip.c +++ b/net/ieee80211/ieee80211_crypt_tkip.c @@ -59,9 +59,11 @@ struct ieee80211_tkip_data { /* scratch buffers for virt_to_page() (crypto API) */ u8 rx_hdr[16], tx_hdr[16]; + + struct ieee80211_device *ieee; }; -static void *ieee80211_tkip_init(int key_idx) +static void *ieee80211_tkip_init(struct ieee80211_device *ieee, int key_idx) { struct ieee80211_tkip_data *priv; @@ -69,6 +71,9 @@ static void *ieee80211_tkip_init(int key_idx) if (priv == NULL) goto fail; memset(priv, 0, sizeof(*priv)); + + priv->ieee = ieee; + priv->key_idx = key_idx; priv->tfm_arc4 = crypto_alloc_tfm("arc4", 0); @@ -264,11 +269,21 @@ static int ieee80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv) u32 crc; struct scatterlist sg; + hdr = (struct ieee80211_hdr *)skb->data; + + if (tkey->ieee->tkip_countermeasures) { + if (net_ratelimit()) { + printk(KERN_DEBUG "%s: TKIP countermeasures: dropped " + "TX packet to " MAC_FMT "\n", + tkey->ieee->dev->name, MAC_ARG(hdr->addr1)); + } + return -1; + } + if (skb_headroom(skb) < 8 || skb_tailroom(skb) < 4 || skb->len < hdr_len) return -1; - hdr = (struct ieee80211_hdr *)skb->data; if (!tkey->tx_phase1_done) { tkip_mixing_phase1(tkey->tx_ttak, tkey->key, hdr->addr2, tkey->tx_iv32); @@ -325,10 +340,20 @@ static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv) struct scatterlist sg; int plen; + hdr = (struct ieee80211_hdr *)skb->data; + + if (tkey->ieee->tkip_countermeasures) { + if (net_ratelimit()) { + printk(KERN_DEBUG "%s: TKIP countermeasures: dropped " + "received packet from " MAC_FMT "\n", + tkey->ieee->dev->name, MAC_ARG(hdr->addr2)); + } + return -1; + } + if (skb->len < hdr_len + 8 + 4) return -1; - hdr = (struct ieee80211_hdr *)skb->data; pos = skb->data + hdr_len; keyidx = pos[3]; if (!(keyidx & (1 << 5))) { diff --git a/net/ieee80211/ieee80211_crypt_wep.c b/net/ieee80211/ieee80211_crypt_wep.c index ebe16155e792..63e783fa5173 100644 --- a/net/ieee80211/ieee80211_crypt_wep.c +++ b/net/ieee80211/ieee80211_crypt_wep.c @@ -37,7 +37,7 @@ struct prism2_wep_data { struct crypto_tfm *tfm; }; -static void *prism2_wep_init(int keyidx) +static void *prism2_wep_init(struct ieee80211_device *ieee, int keyidx) { struct prism2_wep_data *priv; diff --git a/net/ieee80211/ieee80211_rx.c b/net/ieee80211/ieee80211_rx.c index e0337c8fcde8..9a125d45289a 100644 --- a/net/ieee80211/ieee80211_rx.c +++ b/net/ieee80211/ieee80211_rx.c @@ -280,17 +280,6 @@ ieee80211_rx_frame_decrypt(struct ieee80211_device *ieee, struct sk_buff *skb, hdr = (struct ieee80211_hdr *)skb->data; hdrlen = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl)); -#ifdef CONFIG_IEEE80211_CRYPT_TKIP - if (ieee->tkip_countermeasures && strcmp(crypt->ops->name, "TKIP") == 0) { - if (net_ratelimit()) { - printk(KERN_DEBUG "%s: TKIP countermeasures: dropped " - "received packet from " MAC_FMT "\n", - ieee->dev->name, MAC_ARG(hdr->addr2)); - } - return -1; - } -#endif - atomic_inc(&crypt->refcnt); res = crypt->ops->decrypt_mpdu(skb, hdrlen, crypt->priv); atomic_dec(&crypt->refcnt); diff --git a/net/ieee80211/ieee80211_tx.c b/net/ieee80211/ieee80211_tx.c index 1a09448016ed..435ef5a73d75 100644 --- a/net/ieee80211/ieee80211_tx.c +++ b/net/ieee80211/ieee80211_tx.c @@ -157,20 +157,6 @@ static inline int ieee80211_encrypt_fragment(struct ieee80211_device *ieee, struct ieee80211_crypt_data *crypt = ieee->crypt[ieee->tx_keyidx]; int res; -#ifdef CONFIG_IEEE80211_CRYPT_TKIP - struct ieee80211_hdr *header; - - if (ieee->tkip_countermeasures && - crypt && crypt->ops && strcmp(crypt->ops->name, "TKIP") == 0) { - header = (struct ieee80211_hdr *)frag->data; - if (net_ratelimit()) { - printk(KERN_DEBUG "%s: TKIP countermeasures: dropped " - "TX packet to " MAC_FMT "\n", - ieee->dev->name, MAC_ARG(header->addr1)); - } - return -1; - } -#endif /* To encrypt, frame format is: * IV (4 bytes), clear payload (including SNAP), ICV (4 bytes) */ diff --git a/net/ieee80211/ieee80211_wx.c b/net/ieee80211/ieee80211_wx.c index 04f0897b0653..fc4e1377aba7 100644 --- a/net/ieee80211/ieee80211_wx.c +++ b/net/ieee80211/ieee80211_wx.c @@ -182,7 +182,7 @@ static inline char *ipw2100_translate_scan(struct ieee80211_device *ieee, if (iwe.u.data.length) start = iwe_stream_add_point(start, stop, &iwe, custom); - if (ieee->wpa_enabled && network->wpa_ie_len) { + if (network->wpa_ie_len) { char buf[MAX_WPA_IE_LEN * 2 + 30]; u8 *p = buf; @@ -197,7 +197,7 @@ static inline char *ipw2100_translate_scan(struct ieee80211_device *ieee, start = iwe_stream_add_point(start, stop, &iwe, buf); } - if (ieee->wpa_enabled && network->rsn_ie_len) { + if (network->rsn_ie_len) { char buf[MAX_WPA_IE_LEN * 2 + 30]; u8 *p = buf; @@ -351,7 +351,7 @@ int ieee80211_wx_set_encode(struct ieee80211_device *ieee, } if (new_crypt->ops && try_module_get(new_crypt->ops->owner)) - new_crypt->priv = new_crypt->ops->init(key); + new_crypt->priv = new_crypt->ops->init(ieee, key); if (!new_crypt->ops || !new_crypt->priv) { kfree(new_crypt); -- cgit v1.2.2 From f1bf6638af9e9bbbb6fb0b769054fb7db1ae652f Mon Sep 17 00:00:00 2001 From: James Ketrenos Date: Wed, 21 Sep 2005 11:53:54 -0500 Subject: [PATCH] ieee80211: Hardware crypto and fragmentation offload support tree 5322d496af90d03ffbec27292dc1a6268a746ede parent 6c9364386ccb786e4a84427ab3ad712f0b7b8904 author James Ketrenos 1124432367 -0500 committer James Ketrenos 1127311810 -0500 Hardware crypto and fragmentation offload support added (Zhu Yi) Signed-off-by: James Ketrenos Signed-off-by: Jeff Garzik --- net/ieee80211/ieee80211_tx.c | 27 ++++++++++++++++++------ net/ieee80211/ieee80211_wx.c | 50 +++++++++++++++++++++++++------------------- 2 files changed, 50 insertions(+), 27 deletions(-) (limited to 'net') diff --git a/net/ieee80211/ieee80211_tx.c b/net/ieee80211/ieee80211_tx.c index 435ef5a73d75..785e76f7e4e9 100644 --- a/net/ieee80211/ieee80211_tx.c +++ b/net/ieee80211/ieee80211_tx.c @@ -231,7 +231,7 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev) int i, bytes_per_frag, nr_frags, bytes_last_frag, frag_size; unsigned long flags; struct net_device_stats *stats = &ieee->stats; - int ether_type, encrypt; + int ether_type, encrypt, host_encrypt; int bytes, fc, hdr_len; struct sk_buff *skb_frag; struct ieee80211_hdr header = { /* Ensure zero initialized */ @@ -262,7 +262,8 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev) crypt = ieee->crypt[ieee->tx_keyidx]; encrypt = !(ether_type == ETH_P_PAE && ieee->ieee802_1x) && - ieee->host_encrypt && crypt && crypt->ops; + ieee->sec.encrypt; + host_encrypt = ieee->host_encrypt && encrypt; if (!encrypt && ieee->ieee802_1x && ieee->drop_unencrypted && ether_type != ETH_P_PAE) { @@ -280,7 +281,7 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev) /* Determine total amount of storage required for TXB packets */ bytes = skb->len + SNAP_SIZE + sizeof(u16); - if (encrypt) + if (host_encrypt) fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA | IEEE80211_FCTL_PROTECTED; else @@ -320,7 +321,7 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev) bytes_per_frag -= IEEE80211_FCS_LEN; /* Each fragment may need to have room for encryptiong pre/postfix */ - if (encrypt) + if (host_encrypt) bytes_per_frag -= crypt->ops->extra_prefix_len + crypt->ops->extra_postfix_len; @@ -348,7 +349,7 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev) for (i = 0; i < nr_frags; i++) { skb_frag = txb->fragments[i]; - if (encrypt) + if (host_encrypt) skb_reserve(skb_frag, crypt->ops->extra_prefix_len); frag_hdr = (struct ieee80211_hdr *)skb_put(skb_frag, hdr_len); @@ -380,8 +381,22 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev) /* Encryption routine will move the header forward in order * to insert the IV between the header and the payload */ - if (encrypt) + if (host_encrypt) ieee80211_encrypt_fragment(ieee, skb_frag, hdr_len); + + /* ipw2200/2915 Hardware encryption doesn't support TKIP MIC */ + if (!ieee->host_encrypt && encrypt && + (ieee->sec.level == SEC_LEVEL_2) && + crypt && crypt->ops && crypt->ops->encrypt_msdu) { + int res = 0; + res = crypt->ops->encrypt_msdu(skb_frag, hdr_len, + crypt->priv); + if (res < 0) { + IEEE80211_ERROR("TKIP MIC encryption failed\n"); + goto failed; + } + } + if (ieee->config & (CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS)) skb_put(skb_frag, 4); diff --git a/net/ieee80211/ieee80211_wx.c b/net/ieee80211/ieee80211_wx.c index fc4e1377aba7..f88c8116a23c 100644 --- a/net/ieee80211/ieee80211_wx.c +++ b/net/ieee80211/ieee80211_wx.c @@ -278,6 +278,7 @@ int ieee80211_wx_set_encode(struct ieee80211_device *ieee, }; int i, key, key_provided, len; struct ieee80211_crypt_data **crypt; + int host_crypto = ieee->host_encrypt || ieee->host_decrypt; IEEE80211_DEBUG_WX("SET_ENCODE\n"); @@ -318,6 +319,7 @@ int ieee80211_wx_set_encode(struct ieee80211_device *ieee, if (i == WEP_KEYS) { sec.enabled = 0; + sec.encrypt = 0; sec.level = SEC_LEVEL_0; sec.flags |= SEC_ENABLED | SEC_LEVEL; } @@ -326,6 +328,7 @@ int ieee80211_wx_set_encode(struct ieee80211_device *ieee, } sec.enabled = 1; + sec.encrypt = 1; sec.flags |= SEC_ENABLED; if (*crypt != NULL && (*crypt)->ops != NULL && @@ -335,7 +338,7 @@ int ieee80211_wx_set_encode(struct ieee80211_device *ieee, ieee80211_crypt_delayed_deinit(ieee, crypt); } - if (*crypt == NULL) { + if (*crypt == NULL && host_crypto) { struct ieee80211_crypt_data *new_crypt; /* take WEP into use */ @@ -375,31 +378,34 @@ int ieee80211_wx_set_encode(struct ieee80211_device *ieee, key, escape_essid(sec.keys[key], len), erq->length, len); sec.key_sizes[key] = len; - (*crypt)->ops->set_key(sec.keys[key], len, NULL, - (*crypt)->priv); + if (*crypt) + (*crypt)->ops->set_key(sec.keys[key], len, NULL, + (*crypt)->priv); sec.flags |= (1 << key); /* This ensures a key will be activated if no key is * explicitely set */ if (key == sec.active_key) sec.flags |= SEC_ACTIVE_KEY; + } else { - len = (*crypt)->ops->get_key(sec.keys[key], WEP_KEY_LEN, - NULL, (*crypt)->priv); - if (len == 0) { - /* Set a default key of all 0 */ - IEEE80211_DEBUG_WX("Setting key %d to all zero.\n", - key); - memset(sec.keys[key], 0, 13); - (*crypt)->ops->set_key(sec.keys[key], 13, NULL, - (*crypt)->priv); - sec.key_sizes[key] = 13; - sec.flags |= (1 << key); + if (host_crypto) { + len = (*crypt)->ops->get_key(sec.keys[key], WEP_KEY_LEN, + NULL, (*crypt)->priv); + if (len == 0) { + /* Set a default key of all 0 */ + IEEE80211_DEBUG_WX("Setting key %d to all " + "zero.\n", key); + memset(sec.keys[key], 0, 13); + (*crypt)->ops->set_key(sec.keys[key], 13, NULL, + (*crypt)->priv); + sec.key_sizes[key] = 13; + sec.flags |= (1 << key); + } } - /* No key data - just set the default TX key index */ if (key_provided) { - IEEE80211_DEBUG_WX - ("Setting key %d to default Tx key.\n", key); + IEEE80211_DEBUG_WX("Setting key %d to default Tx " + "key.\n", key); ieee->tx_keyidx = key; sec.active_key = key; sec.flags |= SEC_ACTIVE_KEY; @@ -442,6 +448,7 @@ int ieee80211_wx_get_encode(struct ieee80211_device *ieee, struct iw_point *erq = &(wrqu->encoding); int len, key; struct ieee80211_crypt_data *crypt; + struct ieee80211_security *sec = &ieee->sec; IEEE80211_DEBUG_WX("GET_ENCODE\n"); @@ -456,13 +463,13 @@ int ieee80211_wx_get_encode(struct ieee80211_device *ieee, crypt = ieee->crypt[key]; erq->flags = key + 1; - if (crypt == NULL || crypt->ops == NULL) { + if (!sec->enabled) { erq->length = 0; erq->flags |= IW_ENCODE_DISABLED; return 0; } - if (strcmp(crypt->ops->name, "WEP") != 0) { + if (sec->level != SEC_LEVEL_1) { /* only WEP is supported with wireless extensions, so just * report that encryption is used */ erq->length = 0; @@ -470,9 +477,10 @@ int ieee80211_wx_get_encode(struct ieee80211_device *ieee, return 0; } - len = crypt->ops->get_key(keybuf, WEP_KEY_LEN, NULL, crypt->priv); - erq->length = (len >= 0 ? len : 0); + len = sec->key_sizes[key]; + memcpy(keybuf, sec->keys[key], len); + erq->length = (len >= 0 ? len : 0); erq->flags |= IW_ENCODE_ENABLED; if (ieee->open_wep) -- cgit v1.2.2 From 42e349fd10d471d9ae6955a7f12523291c0193e6 Mon Sep 17 00:00:00 2001 From: James Ketrenos Date: Wed, 21 Sep 2005 11:54:07 -0500 Subject: [PATCH] ieee80211: Fix time calculation, switching to use jiffies_to_msecs tree b9cdd7058b787807655ea6f125e2adbf8d26c863 parent 85d9b2bddfcf3ed2eb4d061947c25c6a832891ab author Zhu Yi 1124435212 -0500 committer James Ketrenos 1127312152 -0500 Fix time calculation, switching to use jiffies_to_msecs. Signed-off-by: James Ketrenos Signed-off-by: Jeff Garzik --- net/ieee80211/ieee80211_wx.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) (limited to 'net') diff --git a/net/ieee80211/ieee80211_wx.c b/net/ieee80211/ieee80211_wx.c index f88c8116a23c..df64bf3716b7 100644 --- a/net/ieee80211/ieee80211_wx.c +++ b/net/ieee80211/ieee80211_wx.c @@ -32,6 +32,7 @@ #include #include +#include #include #include @@ -217,8 +218,8 @@ static inline char *ipw2100_translate_scan(struct ieee80211_device *ieee, iwe.cmd = IWEVCUSTOM; p = custom; p += snprintf(p, MAX_CUSTOM_LEN - (p - custom), - " Last beacon: %lums ago", - (jiffies - network->last_scanned) / (HZ / 100)); + " Last beacon: %dms ago", + jiffies_to_msecs(jiffies - network->last_scanned)); iwe.u.data.length = p - custom; if (iwe.u.data.length) start = iwe_stream_add_point(start, stop, &iwe, custom); @@ -248,13 +249,13 @@ int ieee80211_wx_get_scan(struct ieee80211_device *ieee, ev = ipw2100_translate_scan(ieee, ev, stop, network); else IEEE80211_DEBUG_SCAN("Not showing network '%s (" - MAC_FMT ")' due to age (%lums).\n", + MAC_FMT ")' due to age (%dms).\n", escape_essid(network->ssid, network->ssid_len), MAC_ARG(network->bssid), - (jiffies - - network->last_scanned) / (HZ / - 100)); + jiffies_to_msecs(jiffies - + network-> + last_scanned)); } spin_unlock_irqrestore(&ieee->lock, flags); -- cgit v1.2.2 From 0ad0c3c64484b1458b51167bd3e614d8d9d070f8 Mon Sep 17 00:00:00 2001 From: James Ketrenos Date: Wed, 21 Sep 2005 11:54:15 -0500 Subject: [PATCH] ieee80211: Fix kernel Oops when module unload tree b69e983266840983183a00f5ac02c66d5270ca47 parent cdd6372949b76694622ed74fe36e1dd17a92eb71 author Zhu Yi 1124435425 -0500 committer James Ketrenos 1127312421 -0500 Fix kernel Oops when module unload. Export a new function ieee80211_crypt_quiescing from ieee80211. Device drivers call it to make the host crypto stack enter the quiescence state, which means "process existing requests, but don't accept new ones". This is usually called during a driver's host crypto data structure free (module unload) path. Signed-off-by: James Ketrenos Signed-off-by: Jeff Garzik --- net/ieee80211/ieee80211_crypt.c | 33 +++++++++++++++++++++++++++------ net/ieee80211/ieee80211_module.c | 2 ++ 2 files changed, 29 insertions(+), 6 deletions(-) (limited to 'net') diff --git a/net/ieee80211/ieee80211_crypt.c b/net/ieee80211/ieee80211_crypt.c index 0c366299db0f..60d3166facce 100644 --- a/net/ieee80211/ieee80211_crypt.c +++ b/net/ieee80211/ieee80211_crypt.c @@ -44,6 +44,10 @@ void ieee80211_crypt_deinit_entries(struct ieee80211_device *ieee, int force) unsigned long flags; spin_lock_irqsave(&ieee->lock, flags); + + if (list_empty(&ieee->crypt_deinit_list)) + goto unlock; + for (ptr = ieee->crypt_deinit_list.next, n = ptr->next; ptr != &ieee->crypt_deinit_list; ptr = n, n = ptr->next) { entry = list_entry(ptr, struct ieee80211_crypt_data, list); @@ -59,21 +63,35 @@ void ieee80211_crypt_deinit_entries(struct ieee80211_device *ieee, int force) } kfree(entry); } + unlock: + spin_unlock_irqrestore(&ieee->lock, flags); +} + +/* After this, crypt_deinit_list won't accept new members */ +void ieee80211_crypt_quiescing(struct ieee80211_device *ieee) +{ + unsigned long flags; + + spin_lock_irqsave(&ieee->lock, flags); + ieee->crypt_quiesced = 1; spin_unlock_irqrestore(&ieee->lock, flags); } void ieee80211_crypt_deinit_handler(unsigned long data) { struct ieee80211_device *ieee = (struct ieee80211_device *)data; + unsigned long flags; ieee80211_crypt_deinit_entries(ieee, 0); - if (!list_empty(&ieee->crypt_deinit_list)) { + + spin_lock_irqsave(&ieee->lock, flags); + if (!list_empty(&ieee->crypt_deinit_list) && !ieee->crypt_quiesced) { printk(KERN_DEBUG "%s: entries remaining in delayed crypt " "deletion list\n", ieee->dev->name); ieee->crypt_deinit_timer.expires = jiffies + HZ; add_timer(&ieee->crypt_deinit_timer); } - + spin_unlock_irqrestore(&ieee->lock, flags); } void ieee80211_crypt_delayed_deinit(struct ieee80211_device *ieee, @@ -93,10 +111,12 @@ void ieee80211_crypt_delayed_deinit(struct ieee80211_device *ieee, * locking. */ spin_lock_irqsave(&ieee->lock, flags); - list_add(&tmp->list, &ieee->crypt_deinit_list); - if (!timer_pending(&ieee->crypt_deinit_timer)) { - ieee->crypt_deinit_timer.expires = jiffies + HZ; - add_timer(&ieee->crypt_deinit_timer); + if (!ieee->crypt_quiesced) { + list_add(&tmp->list, &ieee->crypt_deinit_list); + if (!timer_pending(&ieee->crypt_deinit_timer)) { + ieee->crypt_deinit_timer.expires = jiffies + HZ; + add_timer(&ieee->crypt_deinit_timer); + } } spin_unlock_irqrestore(&ieee->lock, flags); } @@ -250,6 +270,7 @@ static void __exit ieee80211_crypto_deinit(void) EXPORT_SYMBOL(ieee80211_crypt_deinit_entries); EXPORT_SYMBOL(ieee80211_crypt_deinit_handler); EXPORT_SYMBOL(ieee80211_crypt_delayed_deinit); +EXPORT_SYMBOL(ieee80211_crypt_quiescing); EXPORT_SYMBOL(ieee80211_register_crypto_ops); EXPORT_SYMBOL(ieee80211_unregister_crypto_ops); diff --git a/net/ieee80211/ieee80211_module.c b/net/ieee80211/ieee80211_module.c index 4b43ae1235f9..82a4fd713b28 100644 --- a/net/ieee80211/ieee80211_module.c +++ b/net/ieee80211/ieee80211_module.c @@ -138,6 +138,7 @@ struct net_device *alloc_ieee80211(int sizeof_priv) init_timer(&ieee->crypt_deinit_timer); ieee->crypt_deinit_timer.data = (unsigned long)ieee; ieee->crypt_deinit_timer.function = ieee80211_crypt_deinit_handler; + ieee->crypt_quiesced = 0; spin_lock_init(&ieee->lock); @@ -161,6 +162,7 @@ void free_ieee80211(struct net_device *dev) int i; + ieee80211_crypt_quiescing(ieee); del_timer_sync(&ieee->crypt_deinit_timer); ieee80211_crypt_deinit_entries(ieee, 1); -- cgit v1.2.2 From 259bf1fd8a7cc0f6b47fd8a9ac5e2dfc33182a3d Mon Sep 17 00:00:00 2001 From: James Ketrenos Date: Wed, 21 Sep 2005 11:54:22 -0500 Subject: [PATCH] ieee80211: Allow drivers to fix an issue when using wpa_supplicant with WEP tree 898fedef6ca1b5b58b8bdf7e6d8894a78bbde4cd parent 8720fff53090ae428d2159332b6f4b2749dea10f author Zhu Yi 1124435746 -0500 committer James Ketrenos 1127312509 -0500 Allow drivers to fix an issue when using wpa_supplicant with WEP. The problem is introduced by the hwcrypto patch. We changed indicator of the encryption request from the upper layer (i.e. wpa_supplicant): In the original host based crypto the driver could use: crypt && crypt->ops. In the new hardware based crypto, the driver should use the flags specified in ieee->sec.encrypt. Signed-off-by: James Ketrenos Signed-off-by: Jeff Garzik --- net/ieee80211/ieee80211_wx.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'net') diff --git a/net/ieee80211/ieee80211_wx.c b/net/ieee80211/ieee80211_wx.c index df64bf3716b7..49afea7989d0 100644 --- a/net/ieee80211/ieee80211_wx.c +++ b/net/ieee80211/ieee80211_wx.c @@ -322,7 +322,7 @@ int ieee80211_wx_set_encode(struct ieee80211_device *ieee, sec.enabled = 0; sec.encrypt = 0; sec.level = SEC_LEVEL_0; - sec.flags |= SEC_ENABLED | SEC_LEVEL; + sec.flags |= SEC_ENABLED | SEC_LEVEL | SEC_ENCRYPT; } goto done; @@ -330,7 +330,7 @@ int ieee80211_wx_set_encode(struct ieee80211_device *ieee, sec.enabled = 1; sec.encrypt = 1; - sec.flags |= SEC_ENABLED; + sec.flags |= SEC_ENABLED | SEC_ENCRYPT; if (*crypt != NULL && (*crypt)->ops != NULL && strcmp((*crypt)->ops->name, "WEP") != 0) { @@ -412,8 +412,6 @@ int ieee80211_wx_set_encode(struct ieee80211_device *ieee, sec.flags |= SEC_ACTIVE_KEY; } } - - done: ieee->open_wep = !(erq->flags & IW_ENCODE_RESTRICTED); sec.auth_mode = ieee->open_wep ? WLAN_AUTH_OPEN : WLAN_AUTH_SHARED_KEY; sec.flags |= SEC_AUTH_MODE; @@ -425,6 +423,7 @@ int ieee80211_wx_set_encode(struct ieee80211_device *ieee, sec.flags |= SEC_LEVEL; sec.level = SEC_LEVEL_1; /* 40 and 104 bit WEP */ + done: if (ieee->set_security) ieee->set_security(dev, &sec); -- cgit v1.2.2 From e0d369d1d969fc9e4fd08a20f6dad04d369aceea Mon Sep 17 00:00:00 2001 From: James Ketrenos Date: Wed, 21 Sep 2005 11:54:30 -0500 Subject: [PATCH] ieee82011: Added WE-18 support to default wireless extension handler tree 1536f39c18756698d033da72c49300a561be1289 parent 07172d7c9f10ee3d05d6f6489ba6d6ee2628da06 author Liu Hong 1124436225 -0500 committer James Ketrenos 1127312664 -0500 Added WE-18 support to default wireless extension handler in ieee80211 subsystem. Updated patch since last send to account for ieee80211_device parameter being added to the crypto init method. Signed-off-by: James Ketrenos Signed-off-by: Jeff Garzik --- net/ieee80211/ieee80211_wx.c | 238 +++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 230 insertions(+), 8 deletions(-) (limited to 'net') diff --git a/net/ieee80211/ieee80211_wx.c b/net/ieee80211/ieee80211_wx.c index 49afea7989d0..db66217699d5 100644 --- a/net/ieee80211/ieee80211_wx.c +++ b/net/ieee80211/ieee80211_wx.c @@ -422,6 +422,7 @@ int ieee80211_wx_set_encode(struct ieee80211_device *ieee, * TODO: When WPA is added this is one place that needs to change */ sec.flags |= SEC_LEVEL; sec.level = SEC_LEVEL_1; /* 40 and 104 bit WEP */ + sec.encode_alg[key] = SEC_ALG_WEP; done: if (ieee->set_security) @@ -469,14 +470,6 @@ int ieee80211_wx_get_encode(struct ieee80211_device *ieee, return 0; } - if (sec->level != SEC_LEVEL_1) { - /* only WEP is supported with wireless extensions, so just - * report that encryption is used */ - erq->length = 0; - erq->flags |= IW_ENCODE_ENABLED; - return 0; - } - len = sec->key_sizes[key]; memcpy(keybuf, sec->keys[key], len); @@ -491,6 +484,235 @@ int ieee80211_wx_get_encode(struct ieee80211_device *ieee, return 0; } +#if WIRELESS_EXT > 17 +int ieee80211_wx_set_encodeext(struct ieee80211_device *ieee, + struct iw_request_info *info, + union iwreq_data *wrqu, char *extra) +{ + struct net_device *dev = ieee->dev; + struct iw_point *encoding = &wrqu->encoding; + struct iw_encode_ext *ext = (struct iw_encode_ext *)extra; + int i, idx, ret = 0; + const char *alg, *module; + struct ieee80211_crypto_ops *ops; + struct ieee80211_crypt_data **crypt; + + struct ieee80211_security sec = { + .flags = 0, + }; + + idx = encoding->flags & IW_ENCODE_INDEX; + if (idx) { + if (idx < 1 || idx > WEP_KEYS) + return -EINVAL; + idx--; + } else + idx = ieee->tx_keyidx; + + if (ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY) + crypt = &ieee->crypt[idx]; + else { + if (idx != 0) + return -EINVAL; + if (ieee->iw_mode == IW_MODE_INFRA) + crypt = &ieee->crypt[idx]; + else + return -EINVAL; + } + + sec.flags |= SEC_ENABLED | SEC_ENCRYPT; + if ((encoding->flags & IW_ENCODE_DISABLED) || + ext->alg == IW_ENCODE_ALG_NONE) { + if (*crypt) + ieee80211_crypt_delayed_deinit(ieee, crypt); + + for (i = 0; i < WEP_KEYS; i++) + if (ieee->crypt[i] != NULL) + break; + + if (i == WEP_KEYS) { + sec.enabled = 0; + sec.encrypt = 0; + sec.level = SEC_LEVEL_0; + sec.flags |= SEC_LEVEL; + } + goto done; + } + + sec.enabled = 1; + sec.encrypt = 1; + + if (!(ieee->host_encrypt || ieee->host_decrypt)) + goto skip_host_crypt; + + switch (ext->alg) { + case IW_ENCODE_ALG_WEP: + alg = "WEP"; + module = "ieee80211_crypt_wep"; + break; + case IW_ENCODE_ALG_TKIP: + alg = "TKIP"; + module = "ieee80211_crypt_tkip"; + break; + case IW_ENCODE_ALG_CCMP: + alg = "CCMP"; + module = "ieee80211_crypt_ccmp"; + break; + default: + IEEE80211_DEBUG_WX("%s: unknown crypto alg %d\n", + dev->name, ext->alg); + ret = -EINVAL; + goto done; + } + + ops = ieee80211_get_crypto_ops(alg); + if (ops == NULL) { + request_module(module); + ops = ieee80211_get_crypto_ops(alg); + } + if (ops == NULL) { + IEEE80211_DEBUG_WX("%s: unknown crypto alg %d\n", + dev->name, ext->alg); + ret = -EINVAL; + goto done; + } + + if (*crypt == NULL || (*crypt)->ops != ops) { + struct ieee80211_crypt_data *new_crypt; + + ieee80211_crypt_delayed_deinit(ieee, crypt); + + new_crypt = (struct ieee80211_crypt_data *) + kmalloc(sizeof(*new_crypt), GFP_KERNEL); + if (new_crypt == NULL) { + ret = -ENOMEM; + goto done; + } + memset(new_crypt, 0, sizeof(struct ieee80211_crypt_data)); + new_crypt->ops = ops; + if (new_crypt->ops && try_module_get(new_crypt->ops->owner)) + new_crypt->priv = new_crypt->ops->init(ieee, idx); + if (new_crypt->priv == NULL) { + kfree(new_crypt); + ret = -EINVAL; + goto done; + } + *crypt = new_crypt; + } + + if (ext->key_len > 0 && (*crypt)->ops->set_key && + (*crypt)->ops->set_key(ext->key, ext->key_len, ext->rx_seq, + (*crypt)->priv) < 0) { + IEEE80211_DEBUG_WX("%s: key setting failed\n", dev->name); + ret = -EINVAL; + goto done; + } + + skip_host_crypt: + if (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) { + ieee->tx_keyidx = idx; + sec.active_key = idx; + sec.flags |= SEC_ACTIVE_KEY; + } + + if (ext->alg != IW_ENCODE_ALG_NONE) { + memcpy(sec.keys[idx], ext->key, ext->key_len); + sec.key_sizes[idx] = ext->key_len; + sec.flags |= (1 << idx); + if (ext->alg == IW_ENCODE_ALG_WEP) { + sec.encode_alg[idx] = SEC_ALG_WEP; + sec.flags |= SEC_LEVEL; + sec.level = SEC_LEVEL_1; + } else if (ext->alg == IW_ENCODE_ALG_TKIP) { + sec.encode_alg[idx] = SEC_ALG_TKIP; + sec.flags |= SEC_LEVEL; + sec.level = SEC_LEVEL_2; + } else if (ext->alg == IW_ENCODE_ALG_CCMP) { + sec.encode_alg[idx] = SEC_ALG_CCMP; + sec.flags |= SEC_LEVEL; + sec.level = SEC_LEVEL_3; + } + } + done: + if (ieee->set_security) + ieee->set_security(ieee->dev, &sec); + + /* + * Do not reset port if card is in Managed mode since resetting will + * generate new IEEE 802.11 authentication which may end up in looping + * with IEEE 802.1X. If your hardware requires a reset after WEP + * configuration (for example... Prism2), implement the reset_port in + * the callbacks structures used to initialize the 802.11 stack. + */ + if (ieee->reset_on_keychange && + ieee->iw_mode != IW_MODE_INFRA && + ieee->reset_port && ieee->reset_port(dev)) { + IEEE80211_DEBUG_WX("%s: reset_port failed\n", dev->name); + return -EINVAL; + } + + return ret; +} + +int ieee80211_wx_get_encodeext(struct ieee80211_device *ieee, + struct iw_request_info *info, + union iwreq_data *wrqu, char *extra) +{ + struct iw_point *encoding = &wrqu->encoding; + struct iw_encode_ext *ext = (struct iw_encode_ext *)extra; + struct ieee80211_security *sec = &ieee->sec; + int idx, max_key_len; + + max_key_len = encoding->length - sizeof(*ext); + if (max_key_len < 0) + return -EINVAL; + + idx = encoding->flags & IW_ENCODE_INDEX; + if (idx) { + if (idx < 1 || idx > WEP_KEYS) + return -EINVAL; + idx--; + } else + idx = ieee->tx_keyidx; + + if (!ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY) + if (idx != 0 || ieee->iw_mode != IW_MODE_INFRA) + return -EINVAL; + + encoding->flags = idx + 1; + memset(ext, 0, sizeof(*ext)); + + if (!sec->enabled) { + ext->alg = IW_ENCODE_ALG_NONE; + ext->key_len = 0; + encoding->flags |= IW_ENCODE_DISABLED; + } else { + if (sec->encode_alg[idx] == SEC_ALG_WEP) + ext->alg = IW_ENCODE_ALG_WEP; + else if (sec->encode_alg[idx] == SEC_ALG_TKIP) + ext->alg = IW_ENCODE_ALG_TKIP; + else if (sec->encode_alg[idx] == SEC_ALG_CCMP) + ext->alg = IW_ENCODE_ALG_CCMP; + else + return -EINVAL; + + ext->key_len = sec->key_sizes[idx]; + memcpy(ext->key, sec->keys[idx], ext->key_len); + encoding->flags |= IW_ENCODE_ENABLED; + if (ext->key_len && + (ext->alg == IW_ENCODE_ALG_TKIP || + ext->alg == IW_ENCODE_ALG_CCMP)) + ext->ext_flags |= IW_ENCODE_EXT_TX_SEQ_VALID; + + } + + return 0; +} + +EXPORT_SYMBOL(ieee80211_wx_set_encodeext); +EXPORT_SYMBOL(ieee80211_wx_get_encodeext); +#endif + EXPORT_SYMBOL(ieee80211_wx_get_scan); EXPORT_SYMBOL(ieee80211_wx_set_encode); EXPORT_SYMBOL(ieee80211_wx_get_encode); -- cgit v1.2.2 From ee34af37c095482b9dba254b9cd7cb5e65e9a25e Mon Sep 17 00:00:00 2001 From: James Ketrenos Date: Wed, 21 Sep 2005 11:54:36 -0500 Subject: [PATCH] ieee80211: Renamed ieee80211_hdr to ieee80211_hdr_3addr tree e9c18b2c8e5ad446a4d213243c2dcf9fd1652a7b parent 4e97ad6ae7084a4f741e94e76c41c68bc7c5a76a author James Ketrenos 1124444315 -0500 committer James Ketrenos 1127312922 -0500 Renamed ieee80211_hdr to ieee80211_hdr_3addr and modified ieee80211_hdr to just contain the frame_ctrl and duration_id. Changed uses of ieee80211_hdr to ieee80211_hdr_4addr or ieee80211_hdr_3addr based on what was expected for that portion of code. NOTE: This requires changes to ipw2100, ipw2200, hostap, and atmel drivers. Signed-off-by: James Ketrenos Signed-off-by: Jeff Garzik --- net/ieee80211/ieee80211_crypt_ccmp.c | 10 +++++----- net/ieee80211/ieee80211_crypt_tkip.c | 27 ++++++++++++++------------- net/ieee80211/ieee80211_rx.c | 32 ++++++++++++++++---------------- net/ieee80211/ieee80211_tx.c | 7 ++++--- 4 files changed, 39 insertions(+), 37 deletions(-) (limited to 'net') diff --git a/net/ieee80211/ieee80211_crypt_ccmp.c b/net/ieee80211/ieee80211_crypt_ccmp.c index 1e6644b133dc..d3b5cdee69ef 100644 --- a/net/ieee80211/ieee80211_crypt_ccmp.c +++ b/net/ieee80211/ieee80211_crypt_ccmp.c @@ -119,7 +119,7 @@ static inline void xor_block(u8 * b, u8 * a, size_t len) } static void ccmp_init_blocks(struct crypto_tfm *tfm, - struct ieee80211_hdr *hdr, + struct ieee80211_hdr_4addr *hdr, u8 * pn, size_t dlen, u8 * b0, u8 * auth, u8 * s0) { u8 *pos, qc = 0; @@ -196,7 +196,7 @@ static int ieee80211_ccmp_encrypt(struct sk_buff *skb, int hdr_len, void *priv) struct ieee80211_ccmp_data *key = priv; int data_len, i, blocks, last, len; u8 *pos, *mic; - struct ieee80211_hdr *hdr; + struct ieee80211_hdr_4addr *hdr; u8 *b0 = key->tx_b0; u8 *b = key->tx_b; u8 *e = key->tx_e; @@ -229,7 +229,7 @@ static int ieee80211_ccmp_encrypt(struct sk_buff *skb, int hdr_len, void *priv) *pos++ = key->tx_pn[1]; *pos++ = key->tx_pn[0]; - hdr = (struct ieee80211_hdr *)skb->data; + hdr = (struct ieee80211_hdr_4addr *)skb->data; ccmp_init_blocks(key->tfm, hdr, key->tx_pn, data_len, b0, b, s0); blocks = (data_len + AES_BLOCK_LEN - 1) / AES_BLOCK_LEN; @@ -258,7 +258,7 @@ static int ieee80211_ccmp_decrypt(struct sk_buff *skb, int hdr_len, void *priv) { struct ieee80211_ccmp_data *key = priv; u8 keyidx, *pos; - struct ieee80211_hdr *hdr; + struct ieee80211_hdr_4addr *hdr; u8 *b0 = key->rx_b0; u8 *b = key->rx_b; u8 *a = key->rx_a; @@ -272,7 +272,7 @@ static int ieee80211_ccmp_decrypt(struct sk_buff *skb, int hdr_len, void *priv) return -1; } - hdr = (struct ieee80211_hdr *)skb->data; + hdr = (struct ieee80211_hdr_4addr *)skb->data; pos = skb->data + hdr_len; keyidx = pos[3]; if (!(keyidx & (1 << 5))) { diff --git a/net/ieee80211/ieee80211_crypt_tkip.c b/net/ieee80211/ieee80211_crypt_tkip.c index 0c495f07e718..f091aacd4297 100644 --- a/net/ieee80211/ieee80211_crypt_tkip.c +++ b/net/ieee80211/ieee80211_crypt_tkip.c @@ -265,11 +265,11 @@ static int ieee80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv) struct ieee80211_tkip_data *tkey = priv; int len; u8 rc4key[16], *pos, *icv; - struct ieee80211_hdr *hdr; + struct ieee80211_hdr_4addr *hdr; u32 crc; struct scatterlist sg; - hdr = (struct ieee80211_hdr *)skb->data; + hdr = (struct ieee80211_hdr_4addr *)skb->data; if (tkey->ieee->tkip_countermeasures) { if (net_ratelimit()) { @@ -334,13 +334,13 @@ static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv) u8 keyidx, *pos; u32 iv32; u16 iv16; - struct ieee80211_hdr *hdr; + struct ieee80211_hdr_4addr *hdr; u8 icv[4]; u32 crc; struct scatterlist sg; int plen; - hdr = (struct ieee80211_hdr *)skb->data; + hdr = (struct ieee80211_hdr_4addr *)skb->data; if (tkey->ieee->tkip_countermeasures) { if (net_ratelimit()) { @@ -466,9 +466,9 @@ static int michael_mic(struct ieee80211_tkip_data *tkey, u8 * key, u8 * hdr, static void michael_mic_hdr(struct sk_buff *skb, u8 * hdr) { - struct ieee80211_hdr *hdr11; + struct ieee80211_hdr_4addr *hdr11; - hdr11 = (struct ieee80211_hdr *)skb->data; + hdr11 = (struct ieee80211_hdr_4addr *)skb->data; switch (le16_to_cpu(hdr11->frame_ctl) & (IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS)) { case IEEE80211_FCTL_TODS: @@ -517,7 +517,8 @@ static int ieee80211_michael_mic_add(struct sk_buff *skb, int hdr_len, #if WIRELESS_EXT >= 18 static void ieee80211_michael_mic_failure(struct net_device *dev, - struct ieee80211_hdr *hdr, int keyidx) + struct ieee80211_hdr_4addr *hdr, + int keyidx) { union iwreq_data wrqu; struct iw_michaelmicfailure ev; @@ -537,7 +538,8 @@ static void ieee80211_michael_mic_failure(struct net_device *dev, } #elif WIRELESS_EXT >= 15 static void ieee80211_michael_mic_failure(struct net_device *dev, - struct ieee80211_hdr *hdr, int keyidx) + struct ieee80211_hdr_4addr *hdr, + int keyidx) { union iwreq_data wrqu; char buf[128]; @@ -551,9 +553,8 @@ static void ieee80211_michael_mic_failure(struct net_device *dev, wireless_send_event(dev, IWEVCUSTOM, &wrqu, buf); } #else /* WIRELESS_EXT >= 15 */ -static inline void ieee80211_michael_mic_failure(struct net_device *dev, - struct ieee80211_hdr *hdr, - int keyidx) +static inline void ieee80211_michael_mic_failure(struct net_device *dev, struct ieee80211_hdr_4addr + *hdr, int keyidx) { } #endif /* WIRELESS_EXT >= 15 */ @@ -572,8 +573,8 @@ static int ieee80211_michael_mic_verify(struct sk_buff *skb, int keyidx, skb->data + hdr_len, skb->len - 8 - hdr_len, mic)) return -1; if (memcmp(mic, skb->data + skb->len - 8, 8) != 0) { - struct ieee80211_hdr *hdr; - hdr = (struct ieee80211_hdr *)skb->data; + struct ieee80211_hdr_4addr *hdr; + hdr = (struct ieee80211_hdr_4addr *)skb->data; printk(KERN_DEBUG "%s: Michael MIC verification failed for " "MSDU from " MAC_FMT " keyidx=%d\n", skb->dev ? skb->dev->name : "N/A", MAC_ARG(hdr->addr2), diff --git a/net/ieee80211/ieee80211_rx.c b/net/ieee80211/ieee80211_rx.c index 9a125d45289a..71d14c7d915c 100644 --- a/net/ieee80211/ieee80211_rx.c +++ b/net/ieee80211/ieee80211_rx.c @@ -87,7 +87,7 @@ static struct ieee80211_frag_entry *ieee80211_frag_cache_find(struct /* Called only as a tasklet (software IRQ) */ static struct sk_buff *ieee80211_frag_cache_get(struct ieee80211_device *ieee, - struct ieee80211_hdr *hdr) + struct ieee80211_hdr_4addr *hdr) { struct sk_buff *skb = NULL; u16 sc; @@ -101,7 +101,7 @@ static struct sk_buff *ieee80211_frag_cache_get(struct ieee80211_device *ieee, if (frag == 0) { /* Reserve enough space to fit maximum frame length */ skb = dev_alloc_skb(ieee->dev->mtu + - sizeof(struct ieee80211_hdr) + + sizeof(struct ieee80211_hdr_4addr) + 8 /* LLC */ + 2 /* alignment */ + 8 /* WEP */ + ETH_ALEN /* WDS */ ); @@ -138,7 +138,7 @@ static struct sk_buff *ieee80211_frag_cache_get(struct ieee80211_device *ieee, /* Called only as a tasklet (software IRQ) */ static int ieee80211_frag_cache_invalidate(struct ieee80211_device *ieee, - struct ieee80211_hdr *hdr) + struct ieee80211_hdr_4addr *hdr) { u16 sc; unsigned int seq; @@ -176,7 +176,7 @@ ieee80211_rx_frame_mgmt(struct ieee80211_device *ieee, struct sk_buff *skb, ieee->dev->name); return 0; /* - hostap_update_sta_ps(ieee, (struct hostap_ieee80211_hdr *) + hostap_update_sta_ps(ieee, (struct hostap_ieee80211_hdr_4addr *) skb->data);*/ } @@ -232,13 +232,13 @@ static int ieee80211_is_eapol_frame(struct ieee80211_device *ieee, { struct net_device *dev = ieee->dev; u16 fc, ethertype; - struct ieee80211_hdr *hdr; + struct ieee80211_hdr_3addr *hdr; u8 *pos; if (skb->len < 24) return 0; - hdr = (struct ieee80211_hdr *)skb->data; + hdr = (struct ieee80211_hdr_3addr *)skb->data; fc = le16_to_cpu(hdr->frame_ctl); /* check that the frame is unicast frame to us */ @@ -271,13 +271,13 @@ static inline int ieee80211_rx_frame_decrypt(struct ieee80211_device *ieee, struct sk_buff *skb, struct ieee80211_crypt_data *crypt) { - struct ieee80211_hdr *hdr; + struct ieee80211_hdr_3addr *hdr; int res, hdrlen; if (crypt == NULL || crypt->ops->decrypt_mpdu == NULL) return 0; - hdr = (struct ieee80211_hdr *)skb->data; + hdr = (struct ieee80211_hdr_3addr *)skb->data; hdrlen = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl)); atomic_inc(&crypt->refcnt); @@ -303,13 +303,13 @@ ieee80211_rx_frame_decrypt_msdu(struct ieee80211_device *ieee, struct sk_buff *skb, int keyidx, struct ieee80211_crypt_data *crypt) { - struct ieee80211_hdr *hdr; + struct ieee80211_hdr_3addr *hdr; int res, hdrlen; if (crypt == NULL || crypt->ops->decrypt_msdu == NULL) return 0; - hdr = (struct ieee80211_hdr *)skb->data; + hdr = (struct ieee80211_hdr_3addr *)skb->data; hdrlen = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl)); atomic_inc(&crypt->refcnt); @@ -332,7 +332,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb, struct ieee80211_rx_stats *rx_stats) { struct net_device *dev = ieee->dev; - struct ieee80211_hdr *hdr; + struct ieee80211_hdr_4addr *hdr; size_t hdrlen; u16 fc, type, stype, sc; struct net_device_stats *stats; @@ -352,7 +352,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb, struct ieee80211_crypt_data *crypt = NULL; int keyidx = 0; - hdr = (struct ieee80211_hdr *)skb->data; + hdr = (struct ieee80211_hdr_4addr *)skb->data; stats = &ieee->stats; if (skb->len < 10) { @@ -552,7 +552,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb, (keyidx = ieee80211_rx_frame_decrypt(ieee, skb, crypt)) < 0) goto rx_dropped; - hdr = (struct ieee80211_hdr *)skb->data; + hdr = (struct ieee80211_hdr_4addr *)skb->data; /* skb: hdr + (possibly fragmented) plaintext payload */ // PR: FIXME: hostap has additional conditions in the "if" below: @@ -606,7 +606,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb, /* this was the last fragment and the frame will be * delivered, so remove skb from fragment cache */ skb = frag_skb; - hdr = (struct ieee80211_hdr *)skb->data; + hdr = (struct ieee80211_hdr_4addr *)skb->data; ieee80211_frag_cache_invalidate(ieee, hdr); } @@ -616,7 +616,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb, ieee80211_rx_frame_decrypt_msdu(ieee, skb, keyidx, crypt)) goto rx_dropped; - hdr = (struct ieee80211_hdr *)skb->data; + hdr = (struct ieee80211_hdr_4addr *)skb->data; if (crypt && !(fc & IEEE80211_FCTL_PROTECTED) && !ieee->open_wep) { if ( /*ieee->ieee802_1x && */ ieee80211_is_eapol_frame(ieee, skb)) { @@ -1148,7 +1148,7 @@ static inline void ieee80211_process_probe_response(struct ieee80211_device } void ieee80211_rx_mgt(struct ieee80211_device *ieee, - struct ieee80211_hdr *header, + struct ieee80211_hdr_4addr *header, struct ieee80211_rx_stats *stats) { switch (WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl))) { diff --git a/net/ieee80211/ieee80211_tx.c b/net/ieee80211/ieee80211_tx.c index 785e76f7e4e9..29770cfefc3d 100644 --- a/net/ieee80211/ieee80211_tx.c +++ b/net/ieee80211/ieee80211_tx.c @@ -227,14 +227,14 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev) { struct ieee80211_device *ieee = netdev_priv(dev); struct ieee80211_txb *txb = NULL; - struct ieee80211_hdr *frag_hdr; + struct ieee80211_hdr_3addr *frag_hdr; int i, bytes_per_frag, nr_frags, bytes_last_frag, frag_size; unsigned long flags; struct net_device_stats *stats = &ieee->stats; int ether_type, encrypt, host_encrypt; int bytes, fc, hdr_len; struct sk_buff *skb_frag; - struct ieee80211_hdr header = { /* Ensure zero initialized */ + struct ieee80211_hdr_3addr header = { /* Ensure zero initialized */ .duration_id = 0, .seq_ctl = 0 }; @@ -352,7 +352,8 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev) if (host_encrypt) skb_reserve(skb_frag, crypt->ops->extra_prefix_len); - frag_hdr = (struct ieee80211_hdr *)skb_put(skb_frag, hdr_len); + frag_hdr = + (struct ieee80211_hdr_3addr *)skb_put(skb_frag, hdr_len); memcpy(frag_hdr, &header, hdr_len); /* If this is not the last fragment, then add the MOREFRAGS -- cgit v1.2.2 From 3cdd00c5827621cd0b1bb0665aa62ef9a724297d Mon Sep 17 00:00:00 2001 From: James Ketrenos Date: Wed, 21 Sep 2005 11:54:43 -0500 Subject: [PATCH] ieee80211: adds support for the creation of RTS packets tree b45c9c1017fd23216bfbe71e441aed9aa297fc84 parent 04aacdd71e904656a304d923bdcf57ad3bd2b254 author Ivo van Doorn 1124445405 -0500 committer James Ketrenos 1127313029 -0500 This patch adds support for the creation of RTS packets when the config flag CFG_IEEE80211_RTS has been set. Signed-Off-By: Ivo van Doorn Signed-off-by: James Ketrenos Signed-off-by: Jeff Garzik --- net/ieee80211/ieee80211_module.c | 1 + net/ieee80211/ieee80211_tx.c | 41 +++++++++++++++++++++++++++++++++++++--- 2 files changed, 39 insertions(+), 3 deletions(-) (limited to 'net') diff --git a/net/ieee80211/ieee80211_module.c b/net/ieee80211/ieee80211_module.c index 82a4fd713b28..67d6bdd2e3f2 100644 --- a/net/ieee80211/ieee80211_module.c +++ b/net/ieee80211/ieee80211_module.c @@ -126,6 +126,7 @@ struct net_device *alloc_ieee80211(int sizeof_priv) /* Default fragmentation threshold is maximum payload size */ ieee->fts = DEFAULT_FTS; + ieee->rts = DEFAULT_FTS; ieee->scan_age = DEFAULT_MAX_SCAN_AGE; ieee->open_wep = 1; diff --git a/net/ieee80211/ieee80211_tx.c b/net/ieee80211/ieee80211_tx.c index 29770cfefc3d..cdee41cefb26 100644 --- a/net/ieee80211/ieee80211_tx.c +++ b/net/ieee80211/ieee80211_tx.c @@ -222,13 +222,15 @@ static struct ieee80211_txb *ieee80211_alloc_txb(int nr_frags, int txb_size, return txb; } -/* SKBs are added to the ieee->tx_queue. */ +/* Incoming skb is converted to a txb which consist of + * a block of 802.11 fragment packets (stored as skbs) */ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev) { struct ieee80211_device *ieee = netdev_priv(dev); struct ieee80211_txb *txb = NULL; struct ieee80211_hdr_3addr *frag_hdr; - int i, bytes_per_frag, nr_frags, bytes_last_frag, frag_size; + int i, bytes_per_frag, nr_frags, bytes_last_frag, frag_size, + rts_required; unsigned long flags; struct net_device_stats *stats = &ieee->stats; int ether_type, encrypt, host_encrypt; @@ -334,6 +336,13 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev) else bytes_last_frag = bytes_per_frag; + rts_required = (frag_size > ieee->rts + && ieee->config & CFG_IEEE80211_RTS); + if (rts_required) + nr_frags++; + else + bytes_last_frag = bytes_per_frag; + /* When we allocate the TXB we allocate enough space for the reserve * and full fragment bytes (bytes_per_frag doesn't include prefix, * postfix, header, FCS, etc.) */ @@ -346,7 +355,33 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev) txb->encrypted = encrypt; txb->payload_size = bytes; - for (i = 0; i < nr_frags; i++) { + if (rts_required) { + skb_frag = txb->fragments[0]; + frag_hdr = + (struct ieee80211_hdr_3addr *)skb_put(skb_frag, hdr_len); + + /* + * Set header frame_ctl to the RTS. + */ + header.frame_ctl = + cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_RTS); + memcpy(frag_hdr, &header, hdr_len); + + /* + * Restore header frame_ctl to the original data setting. + */ + header.frame_ctl = cpu_to_le16(fc); + + if (ieee->config & + (CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS)) + skb_put(skb_frag, 4); + + txb->rts_included = 1; + i = 1; + } else + i = 0; + + for (; i < nr_frags; i++) { skb_frag = txb->fragments[i]; if (host_encrypt) -- cgit v1.2.2 From 3f552bbf8614d2d26f488ca0d3e188bdec484bf4 Mon Sep 17 00:00:00 2001 From: James Ketrenos Date: Wed, 21 Sep 2005 11:54:47 -0500 Subject: [PATCH] ieee82011: Added ieee80211_tx_frame to convert generic 802.11 data frames, and callbacks tree 40adc78b623ae70d56074934ec6334eb4f0ae6a5 parent db43d847bcebaa3df6414e26d0008eb21690e8cf author James Ketrenos 1124445938 -0500 committer James Ketrenos 1127313102 -0500 Added ieee80211_tx_frame to convert generic 802.11 data frames into txbs for transmission. Added several purpose specific callbacks (handle_assoc, handle_auth, etc.) which the driver can register with for being notified on reception of variouf frame elements. Signed-off-by: James Ketrenos Signed-off-by: Jeff Garzik --- net/ieee80211/ieee80211_rx.c | 58 ++++++++++++++++++++++++++++----------- net/ieee80211/ieee80211_tx.c | 64 ++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 107 insertions(+), 15 deletions(-) (limited to 'net') diff --git a/net/ieee80211/ieee80211_rx.c b/net/ieee80211/ieee80211_rx.c index 71d14c7d915c..d1ae28280d7e 100644 --- a/net/ieee80211/ieee80211_rx.c +++ b/net/ieee80211/ieee80211_rx.c @@ -1029,12 +1029,18 @@ static inline void update_network(struct ieee80211_network *dst, /* dst->last_associate is not overwritten */ } +static inline int is_beacon(int fc) +{ + return (WLAN_FC_GET_STYPE(le16_to_cpu(fc)) == IEEE80211_STYPE_BEACON); +} + static inline void ieee80211_process_probe_response(struct ieee80211_device *ieee, struct ieee80211_probe_response *beacon, struct ieee80211_rx_stats *stats) { + struct net_device *dev = ieee->dev; struct ieee80211_network network; struct ieee80211_network *target; struct ieee80211_network *oldest = NULL; @@ -1070,11 +1076,10 @@ static inline void ieee80211_process_probe_response(struct ieee80211_device escape_essid(info_element->data, info_element->len), MAC_ARG(beacon->header.addr3), - WLAN_FC_GET_STYPE(le16_to_cpu - (beacon->header. - frame_ctl)) == - IEEE80211_STYPE_PROBE_RESP ? - "PROBE RESPONSE" : "BEACON"); + is_beacon(le16_to_cpu + (beacon->header. + frame_ctl)) ? + "BEACON" : "PROBE RESPONSE"); return; } @@ -1123,11 +1128,10 @@ static inline void ieee80211_process_probe_response(struct ieee80211_device escape_essid(network.ssid, network.ssid_len), MAC_ARG(network.bssid), - WLAN_FC_GET_STYPE(le16_to_cpu - (beacon->header. - frame_ctl)) == - IEEE80211_STYPE_PROBE_RESP ? - "PROBE RESPONSE" : "BEACON"); + is_beacon(le16_to_cpu + (beacon->header. + frame_ctl)) ? + "BEACON" : "PROBE RESPONSE"); #endif memcpy(target, &network, sizeof(*target)); list_add_tail(&target->list, &ieee->network_list); @@ -1136,15 +1140,22 @@ static inline void ieee80211_process_probe_response(struct ieee80211_device escape_essid(target->ssid, target->ssid_len), MAC_ARG(target->bssid), - WLAN_FC_GET_STYPE(le16_to_cpu - (beacon->header. - frame_ctl)) == - IEEE80211_STYPE_PROBE_RESP ? - "PROBE RESPONSE" : "BEACON"); + is_beacon(le16_to_cpu + (beacon->header. + frame_ctl)) ? + "BEACON" : "PROBE RESPONSE"); update_network(target, &network); } spin_unlock_irqrestore(&ieee->lock, flags); + + if (is_beacon(le16_to_cpu(beacon->header.frame_ctl))) { + if (ieee->handle_beacon != NULL) + ieee->handle_beacon(dev, beacon, &network); + } else { + if (ieee->handle_probe_response != NULL) + ieee->handle_probe_response(dev, beacon, &network); + } } void ieee80211_rx_mgt(struct ieee80211_device *ieee, @@ -1185,6 +1196,23 @@ void ieee80211_rx_mgt(struct ieee80211_device *ieee, ieee80211_probe_response *) header, stats); break; + case IEEE80211_STYPE_AUTH: + + IEEE80211_DEBUG_MGMT("recieved auth (%d)\n", + WLAN_FC_GET_STYPE(le16_to_cpu + (header->frame_ctl))); + + if (ieee->handle_auth != NULL) + ieee->handle_auth(ieee->dev, + (struct ieee80211_auth *)header); + break; + + case IEEE80211_STYPE_DISASSOC: + if (ieee->handle_disassoc != NULL) + ieee->handle_disassoc(ieee->dev, + (struct ieee80211_disassoc *) + header); + break; default: IEEE80211_DEBUG_MGMT("received UNKNOWN (%d)\n", diff --git a/net/ieee80211/ieee80211_tx.c b/net/ieee80211/ieee80211_tx.c index cdee41cefb26..f505aa127e21 100644 --- a/net/ieee80211/ieee80211_tx.c +++ b/net/ieee80211/ieee80211_tx.c @@ -459,7 +459,71 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev) netif_stop_queue(dev); stats->tx_errors++; return 1; +} + +/* Incoming 802.11 strucure is converted to a TXB + * a block of 802.11 fragment packets (stored as skbs) */ +int ieee80211_tx_frame(struct ieee80211_device *ieee, + struct ieee80211_hdr *frame, int len) +{ + struct ieee80211_txb *txb = NULL; + unsigned long flags; + struct net_device_stats *stats = &ieee->stats; + struct sk_buff *skb_frag; + + spin_lock_irqsave(&ieee->lock, flags); + + /* If there is no driver handler to take the TXB, dont' bother + * creating it... */ + if (!ieee->hard_start_xmit) { + printk(KERN_WARNING "%s: No xmit handler.\n", ieee->dev->name); + goto success; + } + if (unlikely(len < 24)) { + printk(KERN_WARNING "%s: skb too small (%d).\n", + ieee->dev->name, len); + goto success; + } + + /* When we allocate the TXB we allocate enough space for the reserve + * and full fragment bytes (bytes_per_frag doesn't include prefix, + * postfix, header, FCS, etc.) */ + txb = ieee80211_alloc_txb(1, len, GFP_ATOMIC); + if (unlikely(!txb)) { + printk(KERN_WARNING "%s: Could not allocate TXB\n", + ieee->dev->name); + goto failed; + } + txb->encrypted = 0; + txb->payload_size = len; + + skb_frag = txb->fragments[0]; + + memcpy(skb_put(skb_frag, len), frame, len); + + if (ieee->config & + (CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS)) + skb_put(skb_frag, 4); + + success: + spin_unlock_irqrestore(&ieee->lock, flags); + + if (txb) { + if ((*ieee->hard_start_xmit) (txb, ieee->dev) == 0) { + stats->tx_packets++; + stats->tx_bytes += txb->payload_size; + return 0; + } + ieee80211_txb_free(txb); + } + return 0; + + failed: + spin_unlock_irqrestore(&ieee->lock, flags); + stats->tx_errors++; + return 1; } +EXPORT_SYMBOL(ieee80211_tx_frame); EXPORT_SYMBOL(ieee80211_txb_free); -- cgit v1.2.2 From 1264fc0498e1e20f97b1ab690e523e7a7fc50eab Mon Sep 17 00:00:00 2001 From: James Ketrenos Date: Wed, 21 Sep 2005 11:54:53 -0500 Subject: [PATCH] ieee80211: Fix TKIP, repeated fragmentation problem, and payload_size reporting tree 8428e9f510e6ad6c77baec89cb57374842abf733 parent d78bfd3ddae9c422dd350159110f9c4d7cfc50de author Liu Hong 1124446520 -0500 committer James Ketrenos 1127313183 -0500 Fix TKIP, repeated fragmentation problem, and payload_size reporting 1. TKIP encryption Originally, TKIP encryption issues msdu + mpdu encryption on every fragment. Change the behavior to msdu encryption on the whole packet, then mpdu encryption on every fragment. 2. Avoid repeated fragmentation when !host_encrypt. We only need do fragmentation when using host encryption. Otherwise we only need pass the whole packet to driver, letting driver do the fragmentation. 3. change the txb->payload_size to correct value FW will use this value to determine whether to do fragmentation. If we pass the wrong value, fw may cut on the wrong bound which will make decryption fail when we do host encryption. NOTE: This requires changing drivers (hostap) that have extra_prefix_len used within them (structure member name change). Signed-off-by: Hong Liu Signed-off-by: James Ketrenos Signed-off-by: Jeff Garzik --- net/ieee80211/ieee80211_crypt.c | 4 +- net/ieee80211/ieee80211_crypt_ccmp.c | 4 +- net/ieee80211/ieee80211_crypt_tkip.c | 5 +- net/ieee80211/ieee80211_crypt_wep.c | 4 +- net/ieee80211/ieee80211_module.c | 7 +- net/ieee80211/ieee80211_tx.c | 148 ++++++++++++++++++++--------------- 6 files changed, 98 insertions(+), 74 deletions(-) (limited to 'net') diff --git a/net/ieee80211/ieee80211_crypt.c b/net/ieee80211/ieee80211_crypt.c index 60d3166facce..e26bcc918032 100644 --- a/net/ieee80211/ieee80211_crypt.c +++ b/net/ieee80211/ieee80211_crypt.c @@ -221,8 +221,8 @@ static struct ieee80211_crypto_ops ieee80211_crypt_null = { .decrypt_msdu = NULL, .set_key = NULL, .get_key = NULL, - .extra_prefix_len = 0, - .extra_postfix_len = 0, + .extra_mpdu_prefix_len = 0, + .extra_mpdu_postfix_len = 0, .owner = THIS_MODULE, }; diff --git a/net/ieee80211/ieee80211_crypt_ccmp.c b/net/ieee80211/ieee80211_crypt_ccmp.c index d3b5cdee69ef..a3dc5712b98b 100644 --- a/net/ieee80211/ieee80211_crypt_ccmp.c +++ b/net/ieee80211/ieee80211_crypt_ccmp.c @@ -436,8 +436,8 @@ static struct ieee80211_crypto_ops ieee80211_crypt_ccmp = { .set_key = ieee80211_ccmp_set_key, .get_key = ieee80211_ccmp_get_key, .print_stats = ieee80211_ccmp_print_stats, - .extra_prefix_len = CCMP_HDR_LEN, - .extra_postfix_len = CCMP_MIC_LEN, + .extra_mpdu_prefix_len = CCMP_HDR_LEN, + .extra_mpdu_postfix_len = CCMP_MIC_LEN, .owner = THIS_MODULE, }; diff --git a/net/ieee80211/ieee80211_crypt_tkip.c b/net/ieee80211/ieee80211_crypt_tkip.c index f091aacd4297..f973d6cb8248 100644 --- a/net/ieee80211/ieee80211_crypt_tkip.c +++ b/net/ieee80211/ieee80211_crypt_tkip.c @@ -690,8 +690,9 @@ static struct ieee80211_crypto_ops ieee80211_crypt_tkip = { .set_key = ieee80211_tkip_set_key, .get_key = ieee80211_tkip_get_key, .print_stats = ieee80211_tkip_print_stats, - .extra_prefix_len = 4 + 4, /* IV + ExtIV */ - .extra_postfix_len = 8 + 4, /* MIC + ICV */ + .extra_mpdu_prefix_len = 4 + 4, /* IV + ExtIV */ + .extra_mpdu_postfix_len = 4, /* ICV */ + .extra_msdu_postfix_len = 8, /* MIC */ .owner = THIS_MODULE, }; diff --git a/net/ieee80211/ieee80211_crypt_wep.c b/net/ieee80211/ieee80211_crypt_wep.c index 63e783fa5173..2aaeac1e02d7 100644 --- a/net/ieee80211/ieee80211_crypt_wep.c +++ b/net/ieee80211/ieee80211_crypt_wep.c @@ -239,8 +239,8 @@ static struct ieee80211_crypto_ops ieee80211_crypt_wep = { .set_key = prism2_wep_set_key, .get_key = prism2_wep_get_key, .print_stats = prism2_wep_print_stats, - .extra_prefix_len = 4, /* IV */ - .extra_postfix_len = 4, /* ICV */ + .extra_mpdu_prefix_len = 4, /* IV */ + .extra_mpdu_postfix_len = 4, /* ICV */ .owner = THIS_MODULE, }; diff --git a/net/ieee80211/ieee80211_module.c b/net/ieee80211/ieee80211_module.c index 67d6bdd2e3f2..dddc61647390 100644 --- a/net/ieee80211/ieee80211_module.c +++ b/net/ieee80211/ieee80211_module.c @@ -133,6 +133,12 @@ struct net_device *alloc_ieee80211(int sizeof_priv) /* Default to enabling full open WEP with host based encrypt/decrypt */ ieee->host_encrypt = 1; ieee->host_decrypt = 1; + /* Host fragementation in Open mode. Default is enabled. + * Note: host fragmentation is always enabled if host encryption + * is enabled. For cards can do hardware encryption, they must do + * hardware fragmentation as well. So we don't need a variable + * like host_enc_frag. */ + ieee->host_open_frag = 1; ieee->ieee802_1x = 1; /* Default to supporting 802.1x */ INIT_LIST_HEAD(&ieee->crypt_deinit_list); @@ -147,7 +153,6 @@ struct net_device *alloc_ieee80211(int sizeof_priv) ieee->tkip_countermeasures = 0; ieee->drop_unencrypted = 0; ieee->privacy_invoked = 0; - ieee->ieee802_1x = 1; return dev; diff --git a/net/ieee80211/ieee80211_tx.c b/net/ieee80211/ieee80211_tx.c index f505aa127e21..23a1f88de7cb 100644 --- a/net/ieee80211/ieee80211_tx.c +++ b/net/ieee80211/ieee80211_tx.c @@ -128,7 +128,7 @@ payload of each frame is reduced to 492 bytes. static u8 P802_1H_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0xf8 }; static u8 RFC1042_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0x00 }; -static inline int ieee80211_put_snap(u8 * data, u16 h_proto) +static inline int ieee80211_copy_snap(u8 * data, u16 h_proto) { struct ieee80211_snap_hdr *snap; u8 *oui; @@ -159,15 +159,9 @@ static inline int ieee80211_encrypt_fragment(struct ieee80211_device *ieee, /* To encrypt, frame format is: * IV (4 bytes), clear payload (including SNAP), ICV (4 bytes) */ - - // PR: FIXME: Copied from hostap. Check fragmentation/MSDU/MPDU encryption. - /* Host-based IEEE 802.11 fragmentation for TX is not yet supported, so - * call both MSDU and MPDU encryption functions from here. */ atomic_inc(&crypt->refcnt); res = 0; - if (crypt->ops->encrypt_msdu) - res = crypt->ops->encrypt_msdu(frag, hdr_len, crypt->priv); - if (res == 0 && crypt->ops->encrypt_mpdu) + if (crypt->ops->encrypt_mpdu) res = crypt->ops->encrypt_mpdu(frag, hdr_len, crypt->priv); atomic_dec(&crypt->refcnt); @@ -222,7 +216,7 @@ static struct ieee80211_txb *ieee80211_alloc_txb(int nr_frags, int txb_size, return txb; } -/* Incoming skb is converted to a txb which consist of +/* Incoming skb is converted to a txb which consists of * a block of 802.11 fragment packets (stored as skbs) */ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev) { @@ -233,7 +227,7 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev) rts_required; unsigned long flags; struct net_device_stats *stats = &ieee->stats; - int ether_type, encrypt, host_encrypt; + int ether_type, encrypt, host_encrypt, host_encrypt_msdu; int bytes, fc, hdr_len; struct sk_buff *skb_frag; struct ieee80211_hdr_3addr header = { /* Ensure zero initialized */ @@ -241,8 +235,8 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev) .seq_ctl = 0 }; u8 dest[ETH_ALEN], src[ETH_ALEN]; - struct ieee80211_crypt_data *crypt; + int snapped = 0; spin_lock_irqsave(&ieee->lock, flags); @@ -266,6 +260,7 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev) encrypt = !(ether_type == ETH_P_PAE && ieee->ieee802_1x) && ieee->sec.encrypt; host_encrypt = ieee->host_encrypt && encrypt; + host_encrypt_msdu = ieee->host_encrypt_msdu && encrypt; if (!encrypt && ieee->ieee802_1x && ieee->drop_unencrypted && ether_type != ETH_P_PAE) { @@ -291,14 +286,12 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev) if (ieee->iw_mode == IW_MODE_INFRA) { fc |= IEEE80211_FCTL_TODS; - /* To DS: Addr1 = BSSID, Addr2 = SA, - Addr3 = DA */ + /* To DS: Addr1 = BSSID, Addr2 = SA, Addr3 = DA */ memcpy(header.addr1, ieee->bssid, ETH_ALEN); memcpy(header.addr2, src, ETH_ALEN); memcpy(header.addr3, dest, ETH_ALEN); } else if (ieee->iw_mode == IW_MODE_ADHOC) { - /* not From/To DS: Addr1 = DA, Addr2 = SA, - Addr3 = BSSID */ + /* not From/To DS: Addr1 = DA, Addr2 = SA, Addr3 = BSSID */ memcpy(header.addr1, dest, ETH_ALEN); memcpy(header.addr2, src, ETH_ALEN); memcpy(header.addr3, ieee->bssid, ETH_ALEN); @@ -306,42 +299,75 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev) header.frame_ctl = cpu_to_le16(fc); hdr_len = IEEE80211_3ADDR_LEN; - /* Determine fragmentation size based on destination (multicast - * and broadcast are not fragmented) */ - if (is_multicast_ether_addr(dest) || is_broadcast_ether_addr(dest)) - frag_size = MAX_FRAG_THRESHOLD; - else - frag_size = ieee->fts; + /* Encrypt msdu first on the whole data packet. */ + if ((host_encrypt || host_encrypt_msdu) && + crypt && crypt->ops && crypt->ops->encrypt_msdu) { + int res = 0; + int len = bytes + hdr_len + crypt->ops->extra_msdu_prefix_len + + crypt->ops->extra_msdu_postfix_len; + struct sk_buff *skb_new = dev_alloc_skb(len); + if (unlikely(!skb_new)) + goto failed; + skb_reserve(skb_new, crypt->ops->extra_msdu_prefix_len); + memcpy(skb_put(skb_new, hdr_len), &header, hdr_len); + snapped = 1; + ieee80211_copy_snap(skb_put(skb_new, SNAP_SIZE + sizeof(u16)), + ether_type); + memcpy(skb_put(skb_new, skb->len), skb->data, skb->len); + res = crypt->ops->encrypt_msdu(skb_new, hdr_len, crypt->priv); + if (res < 0) { + IEEE80211_ERROR("msdu encryption failed\n"); + dev_kfree_skb_any(skb_new); + goto failed; + } + dev_kfree_skb_any(skb); + skb = skb_new; + bytes += crypt->ops->extra_msdu_prefix_len + + crypt->ops->extra_msdu_postfix_len; + skb_pull(skb, hdr_len); + } - /* Determine amount of payload per fragment. Regardless of if - * this stack is providing the full 802.11 header, one will - * eventually be affixed to this fragment -- so we must account for - * it when determining the amount of payload space. */ - bytes_per_frag = frag_size - IEEE80211_3ADDR_LEN; - if (ieee->config & - (CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS)) - bytes_per_frag -= IEEE80211_FCS_LEN; + if (host_encrypt || ieee->host_open_frag) { + /* Determine fragmentation size based on destination (multicast + * and broadcast are not fragmented) */ + if (is_multicast_ether_addr(dest)) + frag_size = MAX_FRAG_THRESHOLD; + else + frag_size = ieee->fts; + + /* Determine amount of payload per fragment. Regardless of if + * this stack is providing the full 802.11 header, one will + * eventually be affixed to this fragment -- so we must account + * for it when determining the amount of payload space. */ + bytes_per_frag = frag_size - IEEE80211_3ADDR_LEN; + if (ieee->config & + (CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS)) + bytes_per_frag -= IEEE80211_FCS_LEN; - /* Each fragment may need to have room for encryptiong pre/postfix */ - if (host_encrypt) - bytes_per_frag -= crypt->ops->extra_prefix_len + - crypt->ops->extra_postfix_len; - - /* Number of fragments is the total bytes_per_frag / - * payload_per_fragment */ - nr_frags = bytes / bytes_per_frag; - bytes_last_frag = bytes % bytes_per_frag; - if (bytes_last_frag) - nr_frags++; - else - bytes_last_frag = bytes_per_frag; + /* Each fragment may need to have room for encryptiong + * pre/postfix */ + if (host_encrypt) + bytes_per_frag -= crypt->ops->extra_mpdu_prefix_len + + crypt->ops->extra_mpdu_postfix_len; + + /* Number of fragments is the total + * bytes_per_frag / payload_per_fragment */ + nr_frags = bytes / bytes_per_frag; + bytes_last_frag = bytes % bytes_per_frag; + if (bytes_last_frag) + nr_frags++; + else + bytes_last_frag = bytes_per_frag; + } else { + nr_frags = 1; + bytes_per_frag = bytes_last_frag = bytes; + frag_size = bytes + IEEE80211_3ADDR_LEN; + } rts_required = (frag_size > ieee->rts && ieee->config & CFG_IEEE80211_RTS); if (rts_required) nr_frags++; - else - bytes_last_frag = bytes_per_frag; /* When we allocate the TXB we allocate enough space for the reserve * and full fragment bytes (bytes_per_frag doesn't include prefix, @@ -353,7 +379,11 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev) goto failed; } txb->encrypted = encrypt; - txb->payload_size = bytes; + if (host_encrypt) + txb->payload_size = frag_size * (nr_frags - 1) + + bytes_last_frag; + else + txb->payload_size = bytes; if (rts_required) { skb_frag = txb->fragments[0]; @@ -385,7 +415,8 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev) skb_frag = txb->fragments[i]; if (host_encrypt) - skb_reserve(skb_frag, crypt->ops->extra_prefix_len); + skb_reserve(skb_frag, + crypt->ops->extra_mpdu_prefix_len); frag_hdr = (struct ieee80211_hdr_3addr *)skb_put(skb_frag, hdr_len); @@ -402,11 +433,10 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev) bytes = bytes_last_frag; } - /* Put a SNAP header on the first fragment */ - if (i == 0) { - ieee80211_put_snap(skb_put - (skb_frag, SNAP_SIZE + sizeof(u16)), - ether_type); + if (i == 0 && !snapped) { + ieee80211_copy_snap(skb_put + (skb_frag, SNAP_SIZE + sizeof(u16)), + ether_type); bytes -= SNAP_SIZE + sizeof(u16); } @@ -420,19 +450,6 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev) if (host_encrypt) ieee80211_encrypt_fragment(ieee, skb_frag, hdr_len); - /* ipw2200/2915 Hardware encryption doesn't support TKIP MIC */ - if (!ieee->host_encrypt && encrypt && - (ieee->sec.level == SEC_LEVEL_2) && - crypt && crypt->ops && crypt->ops->encrypt_msdu) { - int res = 0; - res = crypt->ops->encrypt_msdu(skb_frag, hdr_len, - crypt->priv); - if (res < 0) { - IEEE80211_ERROR("TKIP MIC encryption failed\n"); - goto failed; - } - } - if (ieee->config & (CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS)) skb_put(skb_frag, 4); @@ -444,7 +461,8 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev) dev_kfree_skb_any(skb); if (txb) { - if ((*ieee->hard_start_xmit) (txb, dev) == 0) { + int ret = (*ieee->hard_start_xmit) (txb, dev); + if (ret == 0) { stats->tx_packets++; stats->tx_bytes += txb->payload_size; return 0; -- cgit v1.2.2 From 2c0aa2a5c2dfa1293ae3a07517d0b7de149358a1 Mon Sep 17 00:00:00 2001 From: James Ketrenos Date: Wed, 21 Sep 2005 11:56:27 -0500 Subject: [PATCH] ieee80211: Return NETDEV_TX_BUSY when QoS buffer full tree ba6509c7cd1dd4244a2f285f2da5d632e7ffbb25 parent 7b5f9f2ddcabdaea214527a895e6e8445cafdd80 author James Ketrenos 1124447000 -0500 committer James Ketrenos 1127313383 -0500 Per the conversations with folks at OLS, the QoS layer in 802.11 drivers can now result in NETDEV_TX_BUSY being returned when the queue a packet is targetted for is full. To implement this, ieee80211_xmit will now call the driver's is_queue_full to determine if the current priority queue is full. If so, NETDEV_TX_BUSY is returned to the kernel and no processing is done on the frame. Signed-off-by: James Ketrenos Signed-off-by: Jeff Garzik --- net/ieee80211/ieee80211_tx.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'net') diff --git a/net/ieee80211/ieee80211_tx.c b/net/ieee80211/ieee80211_tx.c index 23a1f88de7cb..e9efdd42ba37 100644 --- a/net/ieee80211/ieee80211_tx.c +++ b/net/ieee80211/ieee80211_tx.c @@ -236,8 +236,12 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev) }; u8 dest[ETH_ALEN], src[ETH_ALEN]; struct ieee80211_crypt_data *crypt; + int priority = skb->priority; int snapped = 0; + if (ieee->is_queue_full && (*ieee->is_queue_full) (dev, priority)) + return NETDEV_TX_BUSY; + spin_lock_irqsave(&ieee->lock, flags); /* If there is no driver handler to take the TXB, dont' bother @@ -467,6 +471,14 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev) stats->tx_bytes += txb->payload_size; return 0; } + + if (ret == NETDEV_TX_BUSY) { + printk(KERN_ERR "%s: NETDEV_TX_BUSY returned; " + "driver should report queue full via " + "ieee_device->is_queue_full.\n", + ieee->dev->name); + } + ieee80211_txb_free(txb); } -- cgit v1.2.2 From 9e8571affd1c54b9638b4ff9844e47aae07310f6 Mon Sep 17 00:00:00 2001 From: James Ketrenos Date: Wed, 21 Sep 2005 11:56:33 -0500 Subject: [PATCH] ieee80211: Add QoS (WME) support to the ieee80211 subsystem tree a3ad796273e98036eb0e9fc063225070fa24508a parent 1b9c0aeb377abf8e4a43a86cff42382f74ca0259 author Mohamed Abbas 1124447069 -0500 committer James Ketrenos 1127313435 -0500 Add QoS (WME) support to the ieee80211 subsystem. NOTE: This requires drivers that use the ieee80211 hard_start_xmit (ipw2100 and ipw2200) to add the priority parameter to their callback. Signed-off-by: James Ketrenos Signed-off-by: Jeff Garzik --- net/ieee80211/ieee80211_rx.c | 334 ++++++++++++++++++++++++++++++++++++++++--- net/ieee80211/ieee80211_tx.c | 5 +- 2 files changed, 318 insertions(+), 21 deletions(-) (limited to 'net') diff --git a/net/ieee80211/ieee80211_rx.c b/net/ieee80211/ieee80211_rx.c index d1ae28280d7e..2c4613527dfd 100644 --- a/net/ieee80211/ieee80211_rx.c +++ b/net/ieee80211/ieee80211_rx.c @@ -534,6 +534,9 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb, /* Nullfunc frames may have PS-bit set, so they must be passed to * hostap_handle_sta_rx() before being dropped here. */ + + stype &= ~IEEE80211_STYPE_QOS_DATA; + if (stype != IEEE80211_STYPE_DATA && stype != IEEE80211_STYPE_DATA_CFACK && stype != IEEE80211_STYPE_DATA_CFPOLL && @@ -758,6 +761,264 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb, #define MGMT_FRAME_FIXED_PART_LENGTH 0x24 +static u8 qos_oui[QOS_OUI_LEN] = { 0x00, 0x50, 0xF2 }; + +/* +* Make ther structure we read from the beacon packet has +* the right values +*/ +static int ieee80211_verify_qos_info(struct ieee80211_qos_information_element + *info_element, int sub_type) +{ + + if (info_element->qui_subtype != sub_type) + return -1; + if (memcmp(info_element->qui, qos_oui, QOS_OUI_LEN)) + return -1; + if (info_element->qui_type != QOS_OUI_TYPE) + return -1; + if (info_element->version != QOS_VERSION_1) + return -1; + + return 0; +} + +/* + * Parse a QoS parameter element + */ +static int ieee80211_read_qos_param_element(struct ieee80211_qos_parameter_info + *element_param, struct ieee80211_info_element + *info_element) +{ + int ret = 0; + u16 size = sizeof(struct ieee80211_qos_parameter_info) - 2; + + if ((info_element == NULL) || (element_param == NULL)) + return -1; + + if (info_element->id == QOS_ELEMENT_ID && info_element->len == size) { + memcpy(element_param->info_element.qui, info_element->data, + info_element->len); + element_param->info_element.elementID = info_element->id; + element_param->info_element.length = info_element->len; + } else + ret = -1; + if (ret == 0) + ret = ieee80211_verify_qos_info(&element_param->info_element, + QOS_OUI_PARAM_SUB_TYPE); + return ret; +} + +/* + * Parse a QoS information element + */ +static int ieee80211_read_qos_info_element(struct + ieee80211_qos_information_element + *element_info, struct ieee80211_info_element + *info_element) +{ + int ret = 0; + u16 size = sizeof(struct ieee80211_qos_information_element) - 2; + + if (element_info == NULL) + return -1; + if (info_element == NULL) + return -1; + + if ((info_element->id == QOS_ELEMENT_ID) && (info_element->len == size)) { + memcpy(element_info->qui, info_element->data, + info_element->len); + element_info->elementID = info_element->id; + element_info->length = info_element->len; + } else + ret = -1; + + if (ret == 0) + ret = ieee80211_verify_qos_info(element_info, + QOS_OUI_INFO_SUB_TYPE); + return ret; +} + +/* + * Write QoS parameters from the ac parameters. + */ +static int ieee80211_qos_convert_ac_to_parameters(struct + ieee80211_qos_parameter_info + *param_elm, struct + ieee80211_qos_parameters + *qos_param) +{ + int rc = 0; + int i; + struct ieee80211_qos_ac_parameter *ac_params; + u32 txop; + u8 cw_min; + u8 cw_max; + + for (i = 0; i < QOS_QUEUE_NUM; i++) { + ac_params = &(param_elm->ac_params_record[i]); + + qos_param->aifs[i] = (ac_params->aci_aifsn) & 0x0F; + qos_param->aifs[i] -= (qos_param->aifs[i] < 2) ? 0 : 2; + + cw_min = ac_params->ecw_min_max & 0x0F; + qos_param->cw_min[i] = (u16) ((1 << cw_min) - 1); + + cw_max = (ac_params->ecw_min_max & 0xF0) >> 4; + qos_param->cw_max[i] = (u16) ((1 << cw_max) - 1); + + qos_param->flag[i] = + (ac_params->aci_aifsn & 0x10) ? 0x01 : 0x00; + + txop = le16_to_cpu(ac_params->tx_op_limit) * 32; + qos_param->tx_op_limit[i] = (u16) txop; + } + return rc; +} + +/* + * we have a generic data element which it may contain QoS information or + * parameters element. check the information element length to decide + * which type to read + */ +static int ieee80211_parse_qos_info_param_IE(struct ieee80211_info_element + *info_element, + struct ieee80211_network *network) +{ + int rc = 0; + struct ieee80211_qos_parameters *qos_param = NULL; + struct ieee80211_qos_information_element qos_info_element; + + rc = ieee80211_read_qos_info_element(&qos_info_element, info_element); + + if (rc == 0) { + network->qos_data.param_count = qos_info_element.ac_info & 0x0F; + network->flags |= NETWORK_HAS_QOS_INFORMATION; + } else { + struct ieee80211_qos_parameter_info param_element; + + rc = ieee80211_read_qos_param_element(¶m_element, + info_element); + if (rc == 0) { + qos_param = &(network->qos_data.parameters); + ieee80211_qos_convert_ac_to_parameters(¶m_element, + qos_param); + network->flags |= NETWORK_HAS_QOS_PARAMETERS; + network->qos_data.param_count = + param_element.info_element.ac_info & 0x0F; + } + } + + if (rc == 0) { + IEEE80211_DEBUG_QOS("QoS is supported\n"); + network->qos_data.supported = 1; + } + return rc; +} + +static int ieee80211_handle_assoc_resp(struct ieee80211_device *ieee, struct ieee80211_assoc_response + *frame, struct ieee80211_rx_stats *stats) +{ + struct ieee80211_network network_resp; + struct ieee80211_network *network = &network_resp; + struct ieee80211_info_element *info_element; + struct net_device *dev = ieee->dev; + u16 left; + + network->flags = 0; + network->qos_data.active = 0; + network->qos_data.supported = 0; + network->qos_data.param_count = 0; + network->qos_data.old_param_count = 0; + + //network->atim_window = le16_to_cpu(frame->aid) & (0x3FFF); + network->atim_window = le16_to_cpu(frame->aid); + network->listen_interval = le16_to_cpu(frame->status); + + info_element = frame->info_element; + left = stats->len - sizeof(*frame); + + while (left >= sizeof(struct ieee80211_info_element)) { + if (sizeof(struct ieee80211_info_element) + + info_element->len > left) { + IEEE80211_DEBUG_QOS("ASSOC RESP: parse failed: " + "info_element->len + 2 > left : " + "info_element->len+2=%zd left=%d, id=%d.\n", + info_element->len + + sizeof(struct + ieee80211_info_element), + left, info_element->id); + return 1; + } + + switch (info_element->id) { + case MFIE_TYPE_SSID: + if (ieee80211_is_empty_essid(info_element->data, + info_element->len)) { + network->flags |= NETWORK_EMPTY_ESSID; + break; + } + + network->ssid_len = min(info_element->len, + (u8) IW_ESSID_MAX_SIZE); + memcpy(network->ssid, info_element->data, + network->ssid_len); + if (network->ssid_len < IW_ESSID_MAX_SIZE) + memset(network->ssid + network->ssid_len, 0, + IW_ESSID_MAX_SIZE - network->ssid_len); + + IEEE80211_DEBUG_QOS("MFIE_TYPE_SSID: '%s' len=%d.\n", + network->ssid, network->ssid_len); + break; + + case MFIE_TYPE_TIM: + IEEE80211_DEBUG_QOS("MFIE_TYPE_TIM: ignored\n"); + break; + + case MFIE_TYPE_IBSS_SET: + IEEE80211_DEBUG_QOS("MFIE_TYPE_IBSS_SET: ignored\n"); + break; + + case MFIE_TYPE_CHALLENGE: + IEEE80211_DEBUG_QOS("MFIE_TYPE_CHALLENGE: ignored\n"); + break; + + case MFIE_TYPE_GENERIC: + IEEE80211_DEBUG_QOS("MFIE_TYPE_GENERIC: %d bytes\n", + info_element->len); + ieee80211_parse_qos_info_param_IE(info_element, + network); + break; + + case MFIE_TYPE_RSN: + IEEE80211_DEBUG_QOS("MFIE_TYPE_RSN: %d bytes\n", + info_element->len); + break; + + case MFIE_TYPE_QOS_PARAMETER: + printk("QoS Error need to parse QOS_PARAMETER IE\n"); + break; + + default: + IEEE80211_DEBUG_QOS("unsupported IE %d\n", + info_element->id); + break; + } + + left -= sizeof(struct ieee80211_info_element) + + info_element->len; + info_element = (struct ieee80211_info_element *) + &info_element->data[info_element->len]; + } + + if (ieee->handle_assoc_response != NULL) + ieee->handle_assoc_response(dev, frame, network); + + return 0; +} + +/***************************************************/ + static inline int ieee80211_is_ofdm_rate(u8 rate) { switch (rate & ~IEEE80211_BASIC_RATE_MASK) { @@ -786,6 +1047,9 @@ static inline int ieee80211_network_init(struct ieee80211_device *ieee, struct i struct ieee80211_info_element *info_element; u16 left; u8 i; + network->qos_data.active = 0; + network->qos_data.supported = 0; + network->qos_data.param_count = 0; /* Pull out fixed field data */ memcpy(network->bssid, beacon->header.addr3, ETH_ALEN); @@ -813,13 +1077,11 @@ static inline int ieee80211_network_init(struct ieee80211_device *ieee, struct i info_element = beacon->info_element; left = stats->len - sizeof(*beacon); - while (left >= sizeof(struct ieee80211_info_element)) { - if (sizeof(struct ieee80211_info_element) + info_element->len > - left) { + while (left >= sizeof(*info_element)) { + if (sizeof(*info_element) + info_element->len > left) { IEEE80211_DEBUG_SCAN ("SCAN: parse failed: info_element->len + 2 > left : info_element->len+2=%Zd left=%d.\n", - info_element->len + - sizeof(struct ieee80211_info_element), left); + info_element->len + sizeof(*info_element), left); return 1; } @@ -847,15 +1109,14 @@ static inline int ieee80211_network_init(struct ieee80211_device *ieee, struct i #ifdef CONFIG_IEEE80211_DEBUG p = rates_str; #endif - network->rates_len = - min(info_element->len, MAX_RATES_LENGTH); + network->rates_len = min(info_element->len, + MAX_RATES_LENGTH); for (i = 0; i < network->rates_len; i++) { network->rates[i] = info_element->data[i]; #ifdef CONFIG_IEEE80211_DEBUG - p += snprintf(p, - sizeof(rates_str) - (p - - rates_str), - "%02X ", network->rates[i]); + p += snprintf(p, sizeof(rates_str) - + (p - rates_str), "%02X ", + network->rates[i]); #endif if (ieee80211_is_ofdm_rate (info_element->data[i])) { @@ -875,15 +1136,14 @@ static inline int ieee80211_network_init(struct ieee80211_device *ieee, struct i #ifdef CONFIG_IEEE80211_DEBUG p = rates_str; #endif - network->rates_ex_len = - min(info_element->len, MAX_RATES_EX_LENGTH); + network->rates_ex_len = min(info_element->len, + MAX_RATES_EX_LENGTH); for (i = 0; i < network->rates_ex_len; i++) { network->rates_ex[i] = info_element->data[i]; #ifdef CONFIG_IEEE80211_DEBUG - p += snprintf(p, - sizeof(rates_str) - (p - - rates_str), - "%02X ", network->rates[i]); + p += snprintf(p, sizeof(rates_str) - + (p - rates_str), "%02X ", + network->rates[i]); #endif if (ieee80211_is_ofdm_rate (info_element->data[i])) { @@ -929,6 +1189,10 @@ static inline int ieee80211_network_init(struct ieee80211_device *ieee, struct i case MFIE_TYPE_GENERIC: IEEE80211_DEBUG_SCAN("MFIE_TYPE_GENERIC: %d bytes\n", info_element->len); + if (!ieee80211_parse_qos_info_param_IE(info_element, + network)) + break; + if (info_element->len >= 4 && info_element->data[0] == 0x00 && info_element->data[1] == 0x50 && @@ -950,14 +1214,18 @@ static inline int ieee80211_network_init(struct ieee80211_device *ieee, struct i network->rsn_ie_len); break; + case MFIE_TYPE_QOS_PARAMETER: + printk(KERN_ERR + "QoS Error need to parse QOS_PARAMETER IE\n"); + break; + default: IEEE80211_DEBUG_SCAN("unsupported IE %d\n", info_element->id); break; } - left -= sizeof(struct ieee80211_info_element) + - info_element->len; + left -= sizeof(*info_element) + info_element->len; info_element = (struct ieee80211_info_element *) &info_element->data[info_element->len]; } @@ -1004,6 +1272,9 @@ static inline int is_same_network(struct ieee80211_network *src, static inline void update_network(struct ieee80211_network *dst, struct ieee80211_network *src) { + int qos_active; + u8 old_param; + memcpy(&dst->stats, &src->stats, sizeof(struct ieee80211_rx_stats)); dst->capability = src->capability; memcpy(dst->rates, src->rates, src->rates_len); @@ -1026,6 +1297,28 @@ static inline void update_network(struct ieee80211_network *dst, dst->rsn_ie_len = src->rsn_ie_len; dst->last_scanned = jiffies; + qos_active = src->qos_data.active; + old_param = dst->qos_data.old_param_count; + if (dst->flags & NETWORK_HAS_QOS_MASK) + memcpy(&dst->qos_data, &src->qos_data, + sizeof(struct ieee80211_qos_data)); + else { + dst->qos_data.supported = src->qos_data.supported; + dst->qos_data.param_count = src->qos_data.param_count; + } + + if (dst->qos_data.supported == 1) { + if (dst->ssid_len) + IEEE80211_DEBUG_QOS + ("QoS the network %s is QoS supported\n", + dst->ssid); + else + IEEE80211_DEBUG_QOS + ("QoS the network is QoS supported\n"); + } + dst->qos_data.active = qos_active; + dst->qos_data.old_param_count = old_param; + /* dst->last_associate is not overwritten */ } @@ -1167,6 +1460,9 @@ void ieee80211_rx_mgt(struct ieee80211_device *ieee, IEEE80211_DEBUG_MGMT("received ASSOCIATION RESPONSE (%d)\n", WLAN_FC_GET_STYPE(le16_to_cpu (header->frame_ctl))); + ieee80211_handle_assoc_resp(ieee, + (struct ieee80211_assoc_response *) + header, stats); break; case IEEE80211_STYPE_REASSOC_RESP: diff --git a/net/ieee80211/ieee80211_tx.c b/net/ieee80211/ieee80211_tx.c index e9efdd42ba37..aba72f9880a1 100644 --- a/net/ieee80211/ieee80211_tx.c +++ b/net/ieee80211/ieee80211_tx.c @@ -465,7 +465,7 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev) dev_kfree_skb_any(skb); if (txb) { - int ret = (*ieee->hard_start_xmit) (txb, dev); + int ret = (*ieee->hard_start_xmit) (txb, dev, priority); if (ret == 0) { stats->tx_packets++; stats->tx_bytes += txb->payload_size; @@ -500,6 +500,7 @@ int ieee80211_tx_frame(struct ieee80211_device *ieee, unsigned long flags; struct net_device_stats *stats = &ieee->stats; struct sk_buff *skb_frag; + int priority = -1; spin_lock_irqsave(&ieee->lock, flags); @@ -540,7 +541,7 @@ int ieee80211_tx_frame(struct ieee80211_device *ieee, spin_unlock_irqrestore(&ieee->lock, flags); if (txb) { - if ((*ieee->hard_start_xmit) (txb, ieee->dev) == 0) { + if ((*ieee->hard_start_xmit) (txb, ieee->dev, priority) == 0) { stats->tx_packets++; stats->tx_bytes += txb->payload_size; return 0; -- cgit v1.2.2 From 02cda6ae01814f58422c45259fb48136fbd7bcc1 Mon Sep 17 00:00:00 2001 From: James Ketrenos Date: Wed, 21 Sep 2005 11:56:38 -0500 Subject: [PATCH] ieee80211: Added ieee80211_geo to provide helper functions tree 385b391fc0d7c124cd0547fdb6183e9a0c333391 parent 97d7a47f76e72bedde7f402785559ed4c7a8e8e8 author James Ketrenos 1124447590 -0500 committer James Ketrenos 1127313735 -0500 Added ieee80211_geo to provide helper functions to drivers for implementing supported channel maps. Signed-off-by: James Ketrenos Signed-off-by: Jeff Garzik --- net/ieee80211/Makefile | 3 +- net/ieee80211/ieee80211_geo.c | 141 ++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 143 insertions(+), 1 deletion(-) create mode 100644 net/ieee80211/ieee80211_geo.c (limited to 'net') diff --git a/net/ieee80211/Makefile b/net/ieee80211/Makefile index a6ccac5baea8..f988417121da 100644 --- a/net/ieee80211/Makefile +++ b/net/ieee80211/Makefile @@ -7,5 +7,6 @@ ieee80211-objs := \ ieee80211_module.o \ ieee80211_tx.o \ ieee80211_rx.o \ - ieee80211_wx.o + ieee80211_wx.o \ + ieee80211_geo.o diff --git a/net/ieee80211/ieee80211_geo.c b/net/ieee80211/ieee80211_geo.c new file mode 100644 index 000000000000..c4b54ef8f6d5 --- /dev/null +++ b/net/ieee80211/ieee80211_geo.c @@ -0,0 +1,141 @@ +/****************************************************************************** + + Copyright(c) 2005 Intel Corporation. All rights reserved. + + This program is free software; you can redistribute it and/or modify it + under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., 59 + Temple Place - Suite 330, Boston, MA 02111-1307, USA. + + The full GNU General Public License is included in this distribution in the + file called LICENSE. + + Contact Information: + James P. Ketrenos + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +******************************************************************************/ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +int ieee80211_is_valid_channel(struct ieee80211_device *ieee, u8 channel) +{ + int i; + + /* Driver needs to initialize the geography map before using + * these helper functions */ + BUG_ON(ieee->geo.bg_channels == 0 && ieee->geo.a_channels == 0); + + if (ieee->freq_band & IEEE80211_24GHZ_BAND) + for (i = 0; i < ieee->geo.bg_channels; i++) + /* NOTE: If G mode is currently supported but + * this is a B only channel, we don't see it + * as valid. */ + if ((ieee->geo.bg[i].channel == channel) && + (!(ieee->mode & IEEE_G) || + !(ieee->geo.bg[i].flags & IEEE80211_CH_B_ONLY))) + return IEEE80211_24GHZ_BAND; + + if (ieee->freq_band & IEEE80211_52GHZ_BAND) + for (i = 0; i < ieee->geo.a_channels; i++) + if (ieee->geo.a[i].channel == channel) + return IEEE80211_52GHZ_BAND; + + return 0; +} + +int ieee80211_channel_to_index(struct ieee80211_device *ieee, u8 channel) +{ + int i; + + /* Driver needs to initialize the geography map before using + * these helper functions */ + BUG_ON(ieee->geo.bg_channels == 0 && ieee->geo.a_channels == 0); + + if (ieee->freq_band & IEEE80211_24GHZ_BAND) + for (i = 0; i < ieee->geo.bg_channels; i++) + if (ieee->geo.bg[i].channel == channel) + return i; + + if (ieee->freq_band & IEEE80211_52GHZ_BAND) + for (i = 0; i < ieee->geo.a_channels; i++) + if (ieee->geo.a[i].channel == channel) + return i; + + return -1; +} + +u8 ieee80211_freq_to_channel(struct ieee80211_device * ieee, u32 freq) +{ + int i; + + /* Driver needs to initialize the geography map before using + * these helper functions */ + BUG_ON(ieee->geo.bg_channels == 0 && ieee->geo.a_channels == 0); + + freq /= 100000; + + if (ieee->freq_band & IEEE80211_24GHZ_BAND) + for (i = 0; i < ieee->geo.bg_channels; i++) + if (ieee->geo.bg[i].freq == freq) + return ieee->geo.bg[i].channel; + + if (ieee->freq_band & IEEE80211_52GHZ_BAND) + for (i = 0; i < ieee->geo.a_channels; i++) + if (ieee->geo.a[i].freq == freq) + return ieee->geo.a[i].channel; + + return 0; +} + +int ieee80211_set_geo(struct ieee80211_device *ieee, + const struct ieee80211_geo *geo) +{ + memcpy(ieee->geo.name, geo->name, 3); + ieee->geo.name[3] = '\0'; + ieee->geo.bg_channels = geo->bg_channels; + ieee->geo.a_channels = geo->a_channels; + memcpy(ieee->geo.bg, geo->bg, geo->bg_channels * + sizeof(struct ieee80211_channel)); + memcpy(ieee->geo.a, geo->a, ieee->geo.a_channels * + sizeof(struct ieee80211_channel)); + return 0; +} + +const struct ieee80211_geo *ieee80211_get_geo(struct ieee80211_device *ieee) +{ + return &ieee->geo; +} + +EXPORT_SYMBOL(ieee80211_is_valid_channel); +EXPORT_SYMBOL(ieee80211_freq_to_channel); +EXPORT_SYMBOL(ieee80211_channel_to_index); +EXPORT_SYMBOL(ieee80211_set_geo); +EXPORT_SYMBOL(ieee80211_get_geo); -- cgit v1.2.2 From 42c94e43be27f8b9be9b5be491bae8af05e54dbd Mon Sep 17 00:00:00 2001 From: James Ketrenos Date: Wed, 21 Sep 2005 11:58:29 -0500 Subject: [PATCH] ieee80211: Type-o, capbility definition for QoS, and ERP parsing tree 3ac0dd07b9972dfd68fee47ec2152d3d378de000 parent 9ada1d971d9829c34a14d98840080b7e69fdff6b author Mohamed Abbad 1126054379 -0500 committer James Ketrenos 1127314340 -0500 Type-o, capbility definition for QoS, and ERP parsing Added WLAN_CAPABILITY_QOS Fixed type-o WLAN_CAPABILITY_OSSS_OFDM -> WLAN_CAPABILITY_DSSS_OFDM Added ERP IE parsing to ieee80211_rx Added handle_probe_request callback. Signed-off-by: James Ketrenos Signed-off-by: Jeff Garzik --- net/ieee80211/ieee80211_rx.c | 25 ++++++++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) (limited to 'net') diff --git a/net/ieee80211/ieee80211_rx.c b/net/ieee80211/ieee80211_rx.c index 2c4613527dfd..8bcdbabae3a1 100644 --- a/net/ieee80211/ieee80211_rx.c +++ b/net/ieee80211/ieee80211_rx.c @@ -1065,6 +1065,8 @@ static inline int ieee80211_network_init(struct ieee80211_device *ieee, struct i network->ssid_len = 0; network->flags = 0; network->atim_window = 0; + network->erp_value = (network->capability & WLAN_CAPABILITY_IBSS) ? + 0x3 : 0x0; if (stats->freq == IEEE80211_52GHZ_BAND) { /* for A band (No DS info) */ @@ -1178,8 +1180,16 @@ static inline int ieee80211_network_init(struct ieee80211_device *ieee, struct i IEEE80211_DEBUG_SCAN("MFIE_TYPE_TIM: ignored\n"); break; + case MFIE_TYPE_ERP_INFO: + network->erp_value = info_element->data[0]; + IEEE80211_DEBUG_SCAN("MFIE_TYPE_ERP_SET: %d\n", + network->erp_value); + break; + case MFIE_TYPE_IBSS_SET: - IEEE80211_DEBUG_SCAN("MFIE_TYPE_IBSS_SET: ignored\n"); + network->atim_window = info_element->data[0]; + IEEE80211_DEBUG_SCAN("MFIE_TYPE_IBSS_SET: %d\n", + network->atim_window); break; case MFIE_TYPE_CHALLENGE: @@ -1290,6 +1300,7 @@ static inline void update_network(struct ieee80211_network *dst, dst->beacon_interval = src->beacon_interval; dst->listen_interval = src->listen_interval; dst->atim_window = src->atim_window; + dst->erp_value = src->erp_value; memcpy(dst->wpa_ie, src->wpa_ie, src->wpa_ie_len); dst->wpa_ie_len = src->wpa_ie_len; @@ -1471,6 +1482,18 @@ void ieee80211_rx_mgt(struct ieee80211_device *ieee, (header->frame_ctl))); break; + case IEEE80211_STYPE_PROBE_REQ: + IEEE80211_DEBUG_MGMT("recieved auth (%d)\n", + WLAN_FC_GET_STYPE(le16_to_cpu + (header->frame_ctl))); + + if (ieee->handle_probe_request != NULL) + ieee->handle_probe_request(ieee->dev, + (struct + ieee80211_probe_request *) + header, stats); + break; + case IEEE80211_STYPE_PROBE_RESP: IEEE80211_DEBUG_MGMT("received PROBE RESPONSE (%d)\n", WLAN_FC_GET_STYPE(le16_to_cpu -- cgit v1.2.2 From ccd0fda3a6d9186d067893114f65b8df758d5a1f Mon Sep 17 00:00:00 2001 From: James Ketrenos Date: Wed, 21 Sep 2005 11:58:32 -0500 Subject: [PATCH] ieee80211: Mixed PTK/GTK CCMP/TKIP support tree 5c7559a1216ae1121487f6aed94a6017490729b3 parent c1ff4c22e5622c8987bf96c09158c4924cde98c2 author Hong Liu 1125482767 +0800 committer James Ketrenos 1127314427 -0500 Mixed PTK/GTK CCMP/TKIP support. Signed-off-by: Hong Liu Signed-off-by: Jeff Garzik --- net/ieee80211/ieee80211_module.c | 2 ++ net/ieee80211/ieee80211_rx.c | 5 +++-- net/ieee80211/ieee80211_wx.c | 13 ++++++++++--- 3 files changed, 15 insertions(+), 5 deletions(-) (limited to 'net') diff --git a/net/ieee80211/ieee80211_module.c b/net/ieee80211/ieee80211_module.c index dddc61647390..941f1a13fafa 100644 --- a/net/ieee80211/ieee80211_module.c +++ b/net/ieee80211/ieee80211_module.c @@ -133,6 +133,8 @@ struct net_device *alloc_ieee80211(int sizeof_priv) /* Default to enabling full open WEP with host based encrypt/decrypt */ ieee->host_encrypt = 1; ieee->host_decrypt = 1; + ieee->host_mc_decrypt = 1; + /* Host fragementation in Open mode. Default is enabled. * Note: host fragmentation is always enabled if host encryption * is enabled. For cards can do hardware encryption, they must do diff --git a/net/ieee80211/ieee80211_rx.c b/net/ieee80211/ieee80211_rx.c index 8bcdbabae3a1..65315bcd6e0a 100644 --- a/net/ieee80211/ieee80211_rx.c +++ b/net/ieee80211/ieee80211_rx.c @@ -409,7 +409,8 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb, return 1; } - if (ieee->host_decrypt) { + if (is_multicast_ether_addr(hdr->addr1) ? ieee->host_mc_decrypt : + ieee->host_decrypt) { int idx = 0; if (skb->len >= hdrlen + 3) idx = skb->data[hdrlen + 3] >> 6; @@ -1066,7 +1067,7 @@ static inline int ieee80211_network_init(struct ieee80211_device *ieee, struct i network->flags = 0; network->atim_window = 0; network->erp_value = (network->capability & WLAN_CAPABILITY_IBSS) ? - 0x3 : 0x0; + 0x3 : 0x0; if (stats->freq == IEEE80211_52GHZ_BAND) { /* for A band (No DS info) */ diff --git a/net/ieee80211/ieee80211_wx.c b/net/ieee80211/ieee80211_wx.c index db66217699d5..d710f47c4bd5 100644 --- a/net/ieee80211/ieee80211_wx.c +++ b/net/ieee80211/ieee80211_wx.c @@ -493,6 +493,7 @@ int ieee80211_wx_set_encodeext(struct ieee80211_device *ieee, struct iw_point *encoding = &wrqu->encoding; struct iw_encode_ext *ext = (struct iw_encode_ext *)extra; int i, idx, ret = 0; + int group_key = 0; const char *alg, *module; struct ieee80211_crypto_ops *ops; struct ieee80211_crypt_data **crypt; @@ -509,9 +510,10 @@ int ieee80211_wx_set_encodeext(struct ieee80211_device *ieee, } else idx = ieee->tx_keyidx; - if (ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY) + if (ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY) { crypt = &ieee->crypt[idx]; - else { + group_key = 1; + } else { if (idx != 0) return -EINVAL; if (ieee->iw_mode == IW_MODE_INFRA) @@ -542,7 +544,9 @@ int ieee80211_wx_set_encodeext(struct ieee80211_device *ieee, sec.enabled = 1; sec.encrypt = 1; - if (!(ieee->host_encrypt || ieee->host_decrypt)) + if (group_key ? !ieee->host_mc_decrypt : + !(ieee->host_encrypt || ieee->host_decrypt || + ieee->host_encrypt_msdu)) goto skip_host_crypt; switch (ext->alg) { @@ -632,6 +636,9 @@ int ieee80211_wx_set_encodeext(struct ieee80211_device *ieee, sec.flags |= SEC_LEVEL; sec.level = SEC_LEVEL_3; } + /* Don't set sec level for group keys. */ + if (group_key) + sec.flags &= ~SEC_LEVEL; } done: if (ieee->set_security) -- cgit v1.2.2 From 7dc888fefc053996354ca40602159e0ce5669f86 Mon Sep 17 00:00:00 2001 From: James Ketrenos Date: Wed, 21 Sep 2005 11:58:38 -0500 Subject: [PATCH] ieee80211: Keep auth mode unchanged after iwconfig key off/on cycle tree 2e6f6e7dc4f4eeb8e3dc265020016dd53e40578a parent ba2075794a089430b3dd7c90ff46ce1b67e9c7cc author Zhu Yi 1125551043 +0800 committer James Ketrenos 1127314475 -0500 [Bug 768] Keep auth mode unchanged after iwconfig key off/on cycle. Signed-off-by: Zhu Yi Signed-off-by: Jeff Garzik --- net/ieee80211/ieee80211_wx.c | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) (limited to 'net') diff --git a/net/ieee80211/ieee80211_wx.c b/net/ieee80211/ieee80211_wx.c index d710f47c4bd5..65aa96da730d 100644 --- a/net/ieee80211/ieee80211_wx.c +++ b/net/ieee80211/ieee80211_wx.c @@ -412,11 +412,15 @@ int ieee80211_wx_set_encode(struct ieee80211_device *ieee, sec.flags |= SEC_ACTIVE_KEY; } } - ieee->open_wep = !(erq->flags & IW_ENCODE_RESTRICTED); - sec.auth_mode = ieee->open_wep ? WLAN_AUTH_OPEN : WLAN_AUTH_SHARED_KEY; - sec.flags |= SEC_AUTH_MODE; - IEEE80211_DEBUG_WX("Auth: %s\n", sec.auth_mode == WLAN_AUTH_OPEN ? - "OPEN" : "SHARED KEY"); + if (erq->flags & (IW_ENCODE_OPEN | IW_ENCODE_RESTRICTED)) { + ieee->open_wep = !(erq->flags & IW_ENCODE_RESTRICTED); + sec.auth_mode = ieee->open_wep ? WLAN_AUTH_OPEN : + WLAN_AUTH_SHARED_KEY; + sec.flags |= SEC_AUTH_MODE; + IEEE80211_DEBUG_WX("Auth: %s\n", + sec.auth_mode == WLAN_AUTH_OPEN ? + "OPEN" : "SHARED KEY"); + } /* For now we just support WEP, so only set that security level... * TODO: When WPA is added this is one place that needs to change */ -- cgit v1.2.2 From ebeaddcc02fd47d1dbb7f25318d046461d90e4af Mon Sep 17 00:00:00 2001 From: James Ketrenos Date: Wed, 21 Sep 2005 11:58:43 -0500 Subject: [PATCH] ieee80211: Updated copyright dates tree 0d3e41e574fcb41b9da7f0b7e1d27ec350726654 parent dbe2885fe2f454d538eaaabefc741ded1026f476 author James Ketrenos 1126720499 -0500 committer James Ketrenos 1127314531 -0500 Updated copyright dates. NOTE: This is a split out of just the copyright updates from patch 24/29 in the prior series. Signed-off-by: James Ketrenos Signed-off-by: Jeff Garzik --- net/ieee80211/ieee80211_module.c | 2 +- net/ieee80211/ieee80211_rx.c | 2 +- net/ieee80211/ieee80211_tx.c | 2 +- net/ieee80211/ieee80211_wx.c | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) (limited to 'net') diff --git a/net/ieee80211/ieee80211_module.c b/net/ieee80211/ieee80211_module.c index 941f1a13fafa..0ae91c9917cf 100644 --- a/net/ieee80211/ieee80211_module.c +++ b/net/ieee80211/ieee80211_module.c @@ -1,6 +1,6 @@ /******************************************************************************* - Copyright(c) 2004 Intel Corporation. All rights reserved. + Copyright(c) 2004-2005 Intel Corporation. All rights reserved. Portions of this file are based on the WEP enablement code provided by the Host AP project hostap-drivers v0.1.3 diff --git a/net/ieee80211/ieee80211_rx.c b/net/ieee80211/ieee80211_rx.c index 65315bcd6e0a..256d5524445c 100644 --- a/net/ieee80211/ieee80211_rx.c +++ b/net/ieee80211/ieee80211_rx.c @@ -5,7 +5,7 @@ * Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen * * Copyright (c) 2002-2003, Jouni Malinen - * Copyright (c) 2004, Intel Corporation + * Copyright (c) 2004-2005, Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as diff --git a/net/ieee80211/ieee80211_tx.c b/net/ieee80211/ieee80211_tx.c index aba72f9880a1..24ade5f68e02 100644 --- a/net/ieee80211/ieee80211_tx.c +++ b/net/ieee80211/ieee80211_tx.c @@ -1,6 +1,6 @@ /****************************************************************************** - Copyright(c) 2003 - 2004 Intel Corporation. All rights reserved. + Copyright(c) 2003 - 2005 Intel Corporation. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of version 2 of the GNU General Public License as diff --git a/net/ieee80211/ieee80211_wx.c b/net/ieee80211/ieee80211_wx.c index 65aa96da730d..e28648e70cf1 100644 --- a/net/ieee80211/ieee80211_wx.c +++ b/net/ieee80211/ieee80211_wx.c @@ -1,6 +1,6 @@ /****************************************************************************** - Copyright(c) 2004 Intel Corporation. All rights reserved. + Copyright(c) 2004-2005 Intel Corporation. All rights reserved. Portions of this file are based on the WEP enablement code provided by the Host AP project hostap-drivers v0.1.3 -- cgit v1.2.2 From 9a01c16bd49071b2e7904d222cae71d5f8bf6bb5 Mon Sep 17 00:00:00 2001 From: James Ketrenos Date: Wed, 21 Sep 2005 14:33:32 -0500 Subject: [PATCH] ieee82011: Remove WIRELESS_EXT ifdefs Remove old WIRELESS_EXT version compatibility In-tree doesn't need to maintain backward compatibility. Signed-off-by: James Ketrenos Signed-off-by: Jeff Garzik --- net/ieee80211/ieee80211_crypt_tkip.c | 23 ----------------------- net/ieee80211/ieee80211_wx.c | 2 -- 2 files changed, 25 deletions(-) (limited to 'net') diff --git a/net/ieee80211/ieee80211_crypt_tkip.c b/net/ieee80211/ieee80211_crypt_tkip.c index f973d6cb8248..21022f195bab 100644 --- a/net/ieee80211/ieee80211_crypt_tkip.c +++ b/net/ieee80211/ieee80211_crypt_tkip.c @@ -515,7 +515,6 @@ static int ieee80211_michael_mic_add(struct sk_buff *skb, int hdr_len, return 0; } -#if WIRELESS_EXT >= 18 static void ieee80211_michael_mic_failure(struct net_device *dev, struct ieee80211_hdr_4addr *hdr, int keyidx) @@ -536,28 +535,6 @@ static void ieee80211_michael_mic_failure(struct net_device *dev, wrqu.data.length = sizeof(ev); wireless_send_event(dev, IWEVMICHAELMICFAILURE, &wrqu, (char *)&ev); } -#elif WIRELESS_EXT >= 15 -static void ieee80211_michael_mic_failure(struct net_device *dev, - struct ieee80211_hdr_4addr *hdr, - int keyidx) -{ - union iwreq_data wrqu; - char buf[128]; - - /* TODO: needed parameters: count, keyid, key type, TSC */ - sprintf(buf, "MLME-MICHAELMICFAILURE.indication(keyid=%d %scast addr=" - MAC_FMT ")", keyidx, hdr->addr1[0] & 0x01 ? "broad" : "uni", - MAC_ARG(hdr->addr2)); - memset(&wrqu, 0, sizeof(wrqu)); - wrqu.data.length = strlen(buf); - wireless_send_event(dev, IWEVCUSTOM, &wrqu, buf); -} -#else /* WIRELESS_EXT >= 15 */ -static inline void ieee80211_michael_mic_failure(struct net_device *dev, struct ieee80211_hdr_4addr - *hdr, int keyidx) -{ -} -#endif /* WIRELESS_EXT >= 15 */ static int ieee80211_michael_mic_verify(struct sk_buff *skb, int keyidx, int hdr_len, void *priv) diff --git a/net/ieee80211/ieee80211_wx.c b/net/ieee80211/ieee80211_wx.c index e28648e70cf1..3dd2bbae0c24 100644 --- a/net/ieee80211/ieee80211_wx.c +++ b/net/ieee80211/ieee80211_wx.c @@ -488,7 +488,6 @@ int ieee80211_wx_get_encode(struct ieee80211_device *ieee, return 0; } -#if WIRELESS_EXT > 17 int ieee80211_wx_set_encodeext(struct ieee80211_device *ieee, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) @@ -722,7 +721,6 @@ int ieee80211_wx_get_encodeext(struct ieee80211_device *ieee, EXPORT_SYMBOL(ieee80211_wx_set_encodeext); EXPORT_SYMBOL(ieee80211_wx_get_encodeext); -#endif EXPORT_SYMBOL(ieee80211_wx_get_scan); EXPORT_SYMBOL(ieee80211_wx_set_encode); -- cgit v1.2.2 From 31696160c7415b5a7efa650c7f1ca5c9623f5d8f Mon Sep 17 00:00:00 2001 From: James Ketrenos Date: Wed, 21 Sep 2005 11:58:46 -0500 Subject: [PATCH] ieee80211: Added subsystem version string and reporting via MODULE_VERSION tree c1b50ac5d2d1f9b727c39c6bd86a7872f25a1127 parent 1bb997a3ac7dd1941e02426d2f70bd28993a82b7 author James Ketrenos 1126720779 -0500 committer James Ketrenos 1127314674 -0500 Added subsystem version string and reporting via MODULE_VERSION and pritnk during load. NOTE: This is the version support split out from patch 24/29 of the prior series. Signed-off-by: James Ketrenos Signed-off-by: Jeff Garzik --- net/ieee80211/ieee80211_module.c | 24 ++++++++++++++++++------ 1 file changed, 18 insertions(+), 6 deletions(-) (limited to 'net') diff --git a/net/ieee80211/ieee80211_module.c b/net/ieee80211/ieee80211_module.c index 0ae91c9917cf..319312564167 100644 --- a/net/ieee80211/ieee80211_module.c +++ b/net/ieee80211/ieee80211_module.c @@ -53,13 +53,16 @@ #include -MODULE_DESCRIPTION("802.11 data/management/control stack"); -MODULE_AUTHOR - ("Copyright (C) 2004 Intel Corporation "); +#define DRV_DESCRIPTION "802.11 data/management/control stack" +#define DRV_NAME "ieee80211" +#define DRV_VERSION IEEE80211_VERSION +#define DRV_COPYRIGHT "Copyright (C) 2004-2005 Intel Corporation " + +MODULE_VERSION(DRV_VERSION); +MODULE_DESCRIPTION(DRV_DESCRIPTION); +MODULE_AUTHOR(DRV_COPYRIGHT); MODULE_LICENSE("GPL"); -#define DRV_NAME "ieee80211" - static inline int ieee80211_networks_allocate(struct ieee80211_device *ieee) { if (ieee->networks) @@ -220,9 +223,11 @@ static int store_debug_level(struct file *file, const char __user * buffer, return strnlen(buf, len); } +#endif /* CONFIG_IEEE80211_DEBUG */ static int __init ieee80211_init(void) { +#ifdef CONFIG_IEEE80211_DEBUG struct proc_dir_entry *e; ieee80211_debug_level = debug; @@ -242,26 +247,33 @@ static int __init ieee80211_init(void) e->read_proc = show_debug_level; e->write_proc = store_debug_level; e->data = NULL; +#endif /* CONFIG_IEEE80211_DEBUG */ + + printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n"); + printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n"); return 0; } static void __exit ieee80211_exit(void) { +#ifdef CONFIG_IEEE80211_DEBUG if (ieee80211_proc) { remove_proc_entry("debug_level", ieee80211_proc); remove_proc_entry(DRV_NAME, proc_net); ieee80211_proc = NULL; } +#endif /* CONFIG_IEEE80211_DEBUG */ } +#ifdef CONFIG_IEEE80211_DEBUG #include module_param(debug, int, 0444); MODULE_PARM_DESC(debug, "debug output mask"); +#endif /* CONFIG_IEEE80211_DEBUG */ module_exit(ieee80211_exit); module_init(ieee80211_init); -#endif const char *escape_essid(const char *essid, u8 essid_len) { -- cgit v1.2.2 From 31b59eaee8f8ec29d8cb6ac0c8eed086689d8030 Mon Sep 17 00:00:00 2001 From: James Ketrenos Date: Wed, 21 Sep 2005 11:58:49 -0500 Subject: [PATCH] ieee80211: Added handle_deauth() callback, enhanced tkip/ccmp support of varying hw/sw offload tree de81b55e78e85997642c651ea677078d0554a14f parent c8030da8c159f8b82712172a6748a42523aea83a author James Ketrenos 1127104380 -0500 committer James Ketrenos 1127315225 -0500 Added handle_deauth() callback. Enhanced crypt_{tkip,ccmp} to support varying splits of HW/SW offload. Changed channel freq to u32 from u16. Signed-off-by: Jeff Garzik --- net/ieee80211/ieee80211_crypt_ccmp.c | 41 ++++++++++++++++-------- net/ieee80211/ieee80211_crypt_tkip.c | 60 +++++++++++++++++++++++++----------- net/ieee80211/ieee80211_rx.c | 6 ++++ net/ieee80211/ieee80211_tx.c | 18 +++++++++-- 4 files changed, 93 insertions(+), 32 deletions(-) (limited to 'net') diff --git a/net/ieee80211/ieee80211_crypt_ccmp.c b/net/ieee80211/ieee80211_crypt_ccmp.c index a3dc5712b98b..081d8575dbb1 100644 --- a/net/ieee80211/ieee80211_crypt_ccmp.c +++ b/net/ieee80211/ieee80211_crypt_ccmp.c @@ -191,26 +191,18 @@ static void ccmp_init_blocks(struct crypto_tfm *tfm, ieee80211_ccmp_aes_encrypt(tfm, b0, s0); } -static int ieee80211_ccmp_encrypt(struct sk_buff *skb, int hdr_len, void *priv) +static int ieee80211_ccmp_hdr(struct sk_buff *skb, int hdr_len, void *priv) { struct ieee80211_ccmp_data *key = priv; - int data_len, i, blocks, last, len; - u8 *pos, *mic; - struct ieee80211_hdr_4addr *hdr; - u8 *b0 = key->tx_b0; - u8 *b = key->tx_b; - u8 *e = key->tx_e; - u8 *s0 = key->tx_s0; + int i; + u8 *pos; - if (skb_headroom(skb) < CCMP_HDR_LEN || - skb_tailroom(skb) < CCMP_MIC_LEN || skb->len < hdr_len) + if (skb_headroom(skb) < CCMP_HDR_LEN || skb->len < hdr_len) return -1; - data_len = skb->len - hdr_len; pos = skb_push(skb, CCMP_HDR_LEN); memmove(pos, pos + CCMP_HDR_LEN, hdr_len); pos += hdr_len; - mic = skb_put(skb, CCMP_MIC_LEN); i = CCMP_PN_LEN - 1; while (i >= 0) { @@ -229,6 +221,30 @@ static int ieee80211_ccmp_encrypt(struct sk_buff *skb, int hdr_len, void *priv) *pos++ = key->tx_pn[1]; *pos++ = key->tx_pn[0]; + return CCMP_HDR_LEN; +} + +static int ieee80211_ccmp_encrypt(struct sk_buff *skb, int hdr_len, void *priv) +{ + struct ieee80211_ccmp_data *key = priv; + int data_len, i, blocks, last, len; + u8 *pos, *mic; + struct ieee80211_hdr_4addr *hdr; + u8 *b0 = key->tx_b0; + u8 *b = key->tx_b; + u8 *e = key->tx_e; + u8 *s0 = key->tx_s0; + + if (skb_tailroom(skb) < CCMP_MIC_LEN || skb->len < hdr_len) + return -1; + + data_len = skb->len - hdr_len; + len = ieee80211_ccmp_hdr(skb, hdr_len, priv); + if (len < 0) + return -1; + + pos = skb->data + hdr_len + CCMP_HDR_LEN; + mic = skb_put(skb, CCMP_MIC_LEN); hdr = (struct ieee80211_hdr_4addr *)skb->data; ccmp_init_blocks(key->tfm, hdr, key->tx_pn, data_len, b0, b, s0); @@ -429,6 +445,7 @@ static struct ieee80211_crypto_ops ieee80211_crypt_ccmp = { .name = "CCMP", .init = ieee80211_ccmp_init, .deinit = ieee80211_ccmp_deinit, + .build_iv = ieee80211_ccmp_hdr, .encrypt_mpdu = ieee80211_ccmp_encrypt, .decrypt_mpdu = ieee80211_ccmp_decrypt, .encrypt_msdu = NULL, diff --git a/net/ieee80211/ieee80211_crypt_tkip.c b/net/ieee80211/ieee80211_crypt_tkip.c index 21022f195bab..e0733050ae71 100644 --- a/net/ieee80211/ieee80211_crypt_tkip.c +++ b/net/ieee80211/ieee80211_crypt_tkip.c @@ -260,35 +260,27 @@ static void tkip_mixing_phase2(u8 * WEPSeed, const u8 * TK, const u16 * TTAK, #endif } -static int ieee80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv) +static u8 *ieee80211_tkip_hdr(struct sk_buff *skb, int hdr_len, void *priv) { struct ieee80211_tkip_data *tkey = priv; int len; - u8 rc4key[16], *pos, *icv; + u8 *rc4key, *pos, *icv; struct ieee80211_hdr_4addr *hdr; u32 crc; - struct scatterlist sg; hdr = (struct ieee80211_hdr_4addr *)skb->data; - if (tkey->ieee->tkip_countermeasures) { - if (net_ratelimit()) { - printk(KERN_DEBUG "%s: TKIP countermeasures: dropped " - "TX packet to " MAC_FMT "\n", - tkey->ieee->dev->name, MAC_ARG(hdr->addr1)); - } - return -1; - } - - if (skb_headroom(skb) < 8 || skb_tailroom(skb) < 4 || - skb->len < hdr_len) - return -1; + if (skb_headroom(skb) < 8 || skb->len < hdr_len) + return NULL; if (!tkey->tx_phase1_done) { tkip_mixing_phase1(tkey->tx_ttak, tkey->key, hdr->addr2, tkey->tx_iv32); tkey->tx_phase1_done = 1; } + rc4key = kmalloc(16, GFP_ATOMIC); + if (!rc4key) + return NULL; tkip_mixing_phase2(rc4key, tkey->key, tkey->tx_ttak, tkey->tx_iv16); len = skb->len - hdr_len; @@ -297,9 +289,9 @@ static int ieee80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv) pos += hdr_len; icv = skb_put(skb, 4); - *pos++ = rc4key[0]; - *pos++ = rc4key[1]; - *pos++ = rc4key[2]; + *pos++ = *rc4key; + *pos++ = *(rc4key + 1); + *pos++ = *(rc4key + 2); *pos++ = (tkey->key_idx << 6) | (1 << 5) /* Ext IV included */ ; *pos++ = tkey->tx_iv32 & 0xff; *pos++ = (tkey->tx_iv32 >> 8) & 0xff; @@ -312,6 +304,38 @@ static int ieee80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv) icv[2] = crc >> 16; icv[3] = crc >> 24; + return rc4key; +} + +static int ieee80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv) +{ + struct ieee80211_tkip_data *tkey = priv; + int len; + const u8 *rc4key; + u8 *pos; + struct scatterlist sg; + + if (tkey->ieee->tkip_countermeasures) { + if (net_ratelimit()) { + struct ieee80211_hdr_4addr *hdr = + (struct ieee80211_hdr_4addr *)skb->data; + printk(KERN_DEBUG "%s: TKIP countermeasures: dropped " + "TX packet to " MAC_FMT "\n", + tkey->ieee->dev->name, MAC_ARG(hdr->addr1)); + } + return -1; + } + + if (skb_tailroom(skb) < 4 || skb->len < hdr_len) + return -1; + + len = skb->len - hdr_len; + pos = skb->data + hdr_len; + + rc4key = ieee80211_tkip_hdr(skb, hdr_len, priv); + if (!rc4key) + return -1; + crypto_cipher_setkey(tkey->tfm_arc4, rc4key, 16); sg.page = virt_to_page(pos); sg.offset = offset_in_page(pos); diff --git a/net/ieee80211/ieee80211_rx.c b/net/ieee80211/ieee80211_rx.c index 256d5524445c..fcf05bf677b8 100644 --- a/net/ieee80211/ieee80211_rx.c +++ b/net/ieee80211/ieee80211_rx.c @@ -1534,6 +1534,12 @@ void ieee80211_rx_mgt(struct ieee80211_device *ieee, header); break; + case IEEE80211_STYPE_DEAUTH: + printk("DEAUTH from AP\n"); + if (ieee->handle_deauth != NULL) + ieee->handle_deauth(ieee->dev, (struct ieee80211_auth *) + header); + break; default: IEEE80211_DEBUG_MGMT("received UNKNOWN (%d)\n", WLAN_FC_GET_STYPE(le16_to_cpu diff --git a/net/ieee80211/ieee80211_tx.c b/net/ieee80211/ieee80211_tx.c index 24ade5f68e02..8d87897d7eb7 100644 --- a/net/ieee80211/ieee80211_tx.c +++ b/net/ieee80211/ieee80211_tx.c @@ -227,7 +227,7 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev) rts_required; unsigned long flags; struct net_device_stats *stats = &ieee->stats; - int ether_type, encrypt, host_encrypt, host_encrypt_msdu; + int ether_type, encrypt, host_encrypt, host_encrypt_msdu, host_build_iv; int bytes, fc, hdr_len; struct sk_buff *skb_frag; struct ieee80211_hdr_3addr header = { /* Ensure zero initialized */ @@ -263,8 +263,10 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev) encrypt = !(ether_type == ETH_P_PAE && ieee->ieee802_1x) && ieee->sec.encrypt; + host_encrypt = ieee->host_encrypt && encrypt; host_encrypt_msdu = ieee->host_encrypt_msdu && encrypt; + host_build_iv = ieee->host_build_iv && encrypt; if (!encrypt && ieee->ieee802_1x && ieee->drop_unencrypted && ether_type != ETH_P_PAE) { @@ -310,8 +312,10 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev) int len = bytes + hdr_len + crypt->ops->extra_msdu_prefix_len + crypt->ops->extra_msdu_postfix_len; struct sk_buff *skb_new = dev_alloc_skb(len); + if (unlikely(!skb_new)) goto failed; + skb_reserve(skb_new, crypt->ops->extra_msdu_prefix_len); memcpy(skb_put(skb_new, hdr_len), &header, hdr_len); snapped = 1; @@ -418,7 +422,7 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev) for (; i < nr_frags; i++) { skb_frag = txb->fragments[i]; - if (host_encrypt) + if (host_encrypt || host_build_iv) skb_reserve(skb_frag, crypt->ops->extra_mpdu_prefix_len); @@ -453,6 +457,16 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev) * to insert the IV between the header and the payload */ if (host_encrypt) ieee80211_encrypt_fragment(ieee, skb_frag, hdr_len); + else if (host_build_iv) { + struct ieee80211_crypt_data *crypt; + + crypt = ieee->crypt[ieee->tx_keyidx]; + atomic_inc(&crypt->refcnt); + if (crypt->ops->build_iv) + crypt->ops->build_iv(skb_frag, hdr_len, + crypt->priv); + atomic_dec(&crypt->refcnt); + } if (ieee->config & (CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS)) -- cgit v1.2.2 From 6eb6edf04acd09e3cea09456913e8da59323b89e Mon Sep 17 00:00:00 2001 From: James Ketrenos Date: Thu, 22 Sep 2005 10:34:15 +0000 Subject: [PATCH] ieee80211: in-tree driver updates to sync with latest ieee80211 series Changed crypto method from requiring a struct ieee80211_device reference to the init handler. Instead we now have a get/set flags method for each crypto component. Setting of TKIP countermeasures can now be done via set_flags(IEEE80211_CRYPTO_TKIP_COUNTERMEASURES) Signed-off-by: James Ketrenos Signed-off-by: Jeff Garzik --- net/ieee80211/ieee80211_crypt.c | 3 +-- net/ieee80211/ieee80211_crypt_ccmp.c | 2 +- net/ieee80211/ieee80211_crypt_tkip.c | 34 ++++++++++++++++++++++++---------- net/ieee80211/ieee80211_crypt_wep.c | 2 +- net/ieee80211/ieee80211_module.c | 1 - net/ieee80211/ieee80211_wx.c | 4 ++-- 6 files changed, 29 insertions(+), 17 deletions(-) (limited to 'net') diff --git a/net/ieee80211/ieee80211_crypt.c b/net/ieee80211/ieee80211_crypt.c index e26bcc918032..f3b6aa3be638 100644 --- a/net/ieee80211/ieee80211_crypt.c +++ b/net/ieee80211/ieee80211_crypt.c @@ -202,8 +202,7 @@ struct ieee80211_crypto_ops *ieee80211_get_crypto_ops(const char *name) return NULL; } -static void *ieee80211_crypt_null_init(struct ieee80211_device *ieee, - int keyidx) +static void *ieee80211_crypt_null_init(int keyidx) { return (void *)1; } diff --git a/net/ieee80211/ieee80211_crypt_ccmp.c b/net/ieee80211/ieee80211_crypt_ccmp.c index 081d8575dbb1..05a853c13012 100644 --- a/net/ieee80211/ieee80211_crypt_ccmp.c +++ b/net/ieee80211/ieee80211_crypt_ccmp.c @@ -74,7 +74,7 @@ static void ieee80211_ccmp_aes_encrypt(struct crypto_tfm *tfm, crypto_cipher_encrypt(tfm, &dst, &src, AES_BLOCK_LEN); } -static void *ieee80211_ccmp_init(struct ieee80211_device *ieee, int key_idx) +static void *ieee80211_ccmp_init(int key_idx) { struct ieee80211_ccmp_data *priv; diff --git a/net/ieee80211/ieee80211_crypt_tkip.c b/net/ieee80211/ieee80211_crypt_tkip.c index e0733050ae71..2e34f29b7956 100644 --- a/net/ieee80211/ieee80211_crypt_tkip.c +++ b/net/ieee80211/ieee80211_crypt_tkip.c @@ -60,10 +60,24 @@ struct ieee80211_tkip_data { /* scratch buffers for virt_to_page() (crypto API) */ u8 rx_hdr[16], tx_hdr[16]; - struct ieee80211_device *ieee; + unsigned long flags; }; -static void *ieee80211_tkip_init(struct ieee80211_device *ieee, int key_idx) +static unsigned long ieee80211_tkip_set_flags(unsigned long flags, void *priv) +{ + struct ieee80211_tkip_data *_priv = priv; + unsigned long old_flags = _priv->flags; + _priv->flags = flags; + return old_flags; +} + +static unsigned long ieee80211_tkip_get_flags(void *priv) +{ + struct ieee80211_tkip_data *_priv = priv; + return _priv->flags; +} + +static void *ieee80211_tkip_init(int key_idx) { struct ieee80211_tkip_data *priv; @@ -72,8 +86,6 @@ static void *ieee80211_tkip_init(struct ieee80211_device *ieee, int key_idx) goto fail; memset(priv, 0, sizeof(*priv)); - priv->ieee = ieee; - priv->key_idx = key_idx; priv->tfm_arc4 = crypto_alloc_tfm("arc4", 0); @@ -315,13 +327,13 @@ static int ieee80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv) u8 *pos; struct scatterlist sg; - if (tkey->ieee->tkip_countermeasures) { + if (tkey->flags & IEEE80211_CRYPTO_TKIP_COUNTERMEASURES) { if (net_ratelimit()) { struct ieee80211_hdr_4addr *hdr = (struct ieee80211_hdr_4addr *)skb->data; - printk(KERN_DEBUG "%s: TKIP countermeasures: dropped " + printk(KERN_DEBUG "TKIP countermeasures: dropped " "TX packet to " MAC_FMT "\n", - tkey->ieee->dev->name, MAC_ARG(hdr->addr1)); + MAC_ARG(hdr->addr1)); } return -1; } @@ -366,11 +378,11 @@ static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv) hdr = (struct ieee80211_hdr_4addr *)skb->data; - if (tkey->ieee->tkip_countermeasures) { + if (tkey->flags & IEEE80211_CRYPTO_TKIP_COUNTERMEASURES) { if (net_ratelimit()) { - printk(KERN_DEBUG "%s: TKIP countermeasures: dropped " + printk(KERN_DEBUG "TKIP countermeasures: dropped " "received packet from " MAC_FMT "\n", - tkey->ieee->dev->name, MAC_ARG(hdr->addr2)); + MAC_ARG(hdr->addr2)); } return -1; } @@ -694,6 +706,8 @@ static struct ieee80211_crypto_ops ieee80211_crypt_tkip = { .extra_mpdu_prefix_len = 4 + 4, /* IV + ExtIV */ .extra_mpdu_postfix_len = 4, /* ICV */ .extra_msdu_postfix_len = 8, /* MIC */ + .get_flags = ieee80211_tkip_get_flags, + .set_flags = ieee80211_tkip_set_flags, .owner = THIS_MODULE, }; diff --git a/net/ieee80211/ieee80211_crypt_wep.c b/net/ieee80211/ieee80211_crypt_wep.c index 2aaeac1e02d7..7c08ed2f2628 100644 --- a/net/ieee80211/ieee80211_crypt_wep.c +++ b/net/ieee80211/ieee80211_crypt_wep.c @@ -37,7 +37,7 @@ struct prism2_wep_data { struct crypto_tfm *tfm; }; -static void *prism2_wep_init(struct ieee80211_device *ieee, int keyidx) +static void *prism2_wep_init(int keyidx) { struct prism2_wep_data *priv; diff --git a/net/ieee80211/ieee80211_module.c b/net/ieee80211/ieee80211_module.c index 319312564167..5714692e82b4 100644 --- a/net/ieee80211/ieee80211_module.c +++ b/net/ieee80211/ieee80211_module.c @@ -155,7 +155,6 @@ struct net_device *alloc_ieee80211(int sizeof_priv) spin_lock_init(&ieee->lock); ieee->wpa_enabled = 0; - ieee->tkip_countermeasures = 0; ieee->drop_unencrypted = 0; ieee->privacy_invoked = 0; diff --git a/net/ieee80211/ieee80211_wx.c b/net/ieee80211/ieee80211_wx.c index 3dd2bbae0c24..ee7a70a13250 100644 --- a/net/ieee80211/ieee80211_wx.c +++ b/net/ieee80211/ieee80211_wx.c @@ -355,7 +355,7 @@ int ieee80211_wx_set_encode(struct ieee80211_device *ieee, } if (new_crypt->ops && try_module_get(new_crypt->ops->owner)) - new_crypt->priv = new_crypt->ops->init(ieee, key); + new_crypt->priv = new_crypt->ops->init(key); if (!new_crypt->ops || !new_crypt->priv) { kfree(new_crypt); @@ -598,7 +598,7 @@ int ieee80211_wx_set_encodeext(struct ieee80211_device *ieee, memset(new_crypt, 0, sizeof(struct ieee80211_crypt_data)); new_crypt->ops = ops; if (new_crypt->ops && try_module_get(new_crypt->ops->owner)) - new_crypt->priv = new_crypt->ops->init(ieee, idx); + new_crypt->priv = new_crypt->ops->init(idx); if (new_crypt->priv == NULL) { kfree(new_crypt); ret = -EINVAL; -- cgit v1.2.2 From 23475d66bd8600e0c5353f86c1b74f68df27bdb5 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Thu, 11 Aug 2005 16:25:08 -0400 Subject: [PATCH] RPC: Report connection errors properly when mounting with "soft" Fix up xprt_connect_status: the soft timeout logic was clobbering tk_status, so TCP connect errors were not properly reported on soft mounts. Test-plan: Destructive testing (unplugging the network temporarily). Connectathon with UDP and TCP. Version: Thu, 11 Aug 2005 16:01:28 -0400 Signed-off-by: Chuck Lever Signed-off-by: Trond Myklebust --- net/sunrpc/xprt.c | 27 ++++++++++++++++++--------- 1 file changed, 18 insertions(+), 9 deletions(-) (limited to 'net') diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 3c654e06b084..b28ea0cc0cb7 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -592,24 +592,33 @@ xprt_connect_status(struct rpc_task *task) return; } - /* if soft mounted, just cause this RPC to fail */ - if (RPC_IS_SOFT(task)) - task->tk_status = -EIO; - switch (task->tk_status) { case -ECONNREFUSED: case -ECONNRESET: + dprintk("RPC: %4d xprt_connect_status: server %s refused connection\n", + task->tk_pid, task->tk_client->cl_server); + break; case -ENOTCONN: - return; + dprintk("RPC: %4d xprt_connect_status: connection broken\n", + task->tk_pid); + break; case -ETIMEDOUT: - dprintk("RPC: %4d xprt_connect_status: timed out\n", + dprintk("RPC: %4d xprt_connect_status: connect attempt timed out\n", task->tk_pid); break; default: - printk(KERN_ERR "RPC: error %d connecting to server %s\n", - -task->tk_status, task->tk_client->cl_server); + dprintk("RPC: %4d xprt_connect_status: error %d connecting to server %s\n", + task->tk_pid, -task->tk_status, task->tk_client->cl_server); + xprt_release_write(xprt, task); + task->tk_status = -EIO; + return; + } + + /* if soft mounted, just cause this RPC to fail */ + if (RPC_IS_SOFT(task)) { + xprt_release_write(xprt, task); + task->tk_status = -EIO; } - xprt_release_write(xprt, task); } /* -- cgit v1.2.2 From da35187801732397a7e05fb9e77f3700cc35f5db Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Thu, 11 Aug 2005 16:25:11 -0400 Subject: [PATCH] RPC: proper soft timeout behavior for rpcbind Implement a best practice: for soft mounts, an rpcbind timeout should cause an RPC request to fail. This also provides an FSM hook for retrying an rpcbind with a different rpcbind protocol version. We'll use this later to try multiple rpcbind protocol versions when binding. To enable this, expose the RPC error code returned during a portmap request to the FSM so it can make some decision about how to report, retry, or fail the request. Test-plan: Hundreds of passes with connectathon NFSv3 locking suite, on the client and server. Version: Thu, 11 Aug 2005 16:01:53 -0400 Signed-off-by: Chuck Lever Signed-off-by: Trond Myklebust --- net/sunrpc/clnt.c | 97 +++++++++++++++++++++++++++++++++++++++++++------------ 1 file changed, 77 insertions(+), 20 deletions(-) (limited to 'net') diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index f17e6153b688..2d3cf0a52d82 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c @@ -53,6 +53,7 @@ static void call_allocate(struct rpc_task *task); static void call_encode(struct rpc_task *task); static void call_decode(struct rpc_task *task); static void call_bind(struct rpc_task *task); +static void call_bind_status(struct rpc_task *task); static void call_transmit(struct rpc_task *task); static void call_status(struct rpc_task *task); static void call_refresh(struct rpc_task *task); @@ -734,43 +735,94 @@ static void call_bind(struct rpc_task *task) { struct rpc_clnt *clnt = task->tk_client; - struct rpc_xprt *xprt = clnt->cl_xprt; - - dprintk("RPC: %4d call_bind xprt %p %s connected\n", task->tk_pid, - xprt, (xprt_connected(xprt) ? "is" : "is not")); - task->tk_action = (xprt_connected(xprt)) ? call_transmit : call_connect; + dprintk("RPC: %4d call_bind (status %d)\n", + task->tk_pid, task->tk_status); + task->tk_action = call_connect; if (!clnt->cl_port) { - task->tk_action = call_connect; + task->tk_action = call_bind_status; task->tk_timeout = RPC_CONNECT_TIMEOUT; rpc_getport(task, clnt); } } /* - * 4a. Connect to the RPC server (TCP case) + * 4a. Sort out bind result + */ +static void +call_bind_status(struct rpc_task *task) +{ + int status = -EACCES; + + if (task->tk_status >= 0) { + dprintk("RPC: %4d call_bind_status (status %d)\n", + task->tk_pid, task->tk_status); + task->tk_status = 0; + task->tk_action = call_connect; + return; + } + + switch (task->tk_status) { + case -EACCES: + dprintk("RPC: %4d remote rpcbind: RPC program/version unavailable\n", + task->tk_pid); + break; + case -ETIMEDOUT: + dprintk("RPC: %4d rpcbind request timed out\n", + task->tk_pid); + if (RPC_IS_SOFT(task)) { + status = -EIO; + break; + } + goto retry_bind; + case -EPFNOSUPPORT: + dprintk("RPC: %4d remote rpcbind service unavailable\n", + task->tk_pid); + break; + case -EPROTONOSUPPORT: + dprintk("RPC: %4d remote rpcbind version 2 unavailable\n", + task->tk_pid); + break; + default: + dprintk("RPC: %4d unrecognized rpcbind error (%d)\n", + task->tk_pid, -task->tk_status); + status = -EIO; + break; + } + + rpc_exit(task, status); + return; + +retry_bind: + task->tk_status = 0; + task->tk_action = call_bind; + return; +} + +/* + * 4b. Connect to the RPC server */ static void call_connect(struct rpc_task *task) { - struct rpc_clnt *clnt = task->tk_client; + struct rpc_xprt *xprt = task->tk_xprt; - dprintk("RPC: %4d call_connect status %d\n", - task->tk_pid, task->tk_status); + dprintk("RPC: %4d call_connect xprt %p %s connected\n", + task->tk_pid, xprt, + (xprt_connected(xprt) ? "is" : "is not")); - if (xprt_connected(clnt->cl_xprt)) { - task->tk_action = call_transmit; - return; + task->tk_action = call_transmit; + if (!xprt_connected(xprt)) { + task->tk_action = call_connect_status; + if (task->tk_status < 0) + return; + xprt_connect(task); } - task->tk_action = call_connect_status; - if (task->tk_status < 0) - return; - xprt_connect(task); } /* - * 4b. Sort out connect result + * 4c. Sort out connect result */ static void call_connect_status(struct rpc_task *task) @@ -778,6 +830,9 @@ call_connect_status(struct rpc_task *task) struct rpc_clnt *clnt = task->tk_client; int status = task->tk_status; + dprintk("RPC: %5u call_connect_status (status %d)\n", + task->tk_pid, task->tk_status); + task->tk_status = 0; if (status >= 0) { clnt->cl_stats->netreconn++; @@ -785,17 +840,19 @@ call_connect_status(struct rpc_task *task) return; } - /* Something failed: we may have to rebind */ + /* Something failed: remote service port may have changed */ if (clnt->cl_autobind) clnt->cl_port = 0; + switch (status) { case -ENOTCONN: case -ETIMEDOUT: case -EAGAIN: - task->tk_action = (clnt->cl_port == 0) ? call_bind : call_connect; + task->tk_action = call_bind; break; default: rpc_exit(task, -EIO); + break; } } -- cgit v1.2.2 From eab5c084b858fd95a873fc2b97de9a9ad937b4ed Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Thu, 11 Aug 2005 16:25:14 -0400 Subject: [PATCH] NFS: use a constant value for TCP retransmit timeouts Implement a best practice: don't use exponential backoff when computing retransmit timeout values on TCP connections, but simply retransmit at regular intervals. This also fixes a bug introduced when xprt_reset_majortimeo() was added. Test-plan: Enable RPC debugging and watch timeout behavior on a NFS/TCP mount. Version: Thu, 11 Aug 2005 16:02:19 -0400 Signed-off-by: Chuck Lever Signed-off-by: Trond Myklebust --- net/sunrpc/xprt.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'net') diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index b28ea0cc0cb7..0e4ffdaa0129 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -1453,7 +1453,7 @@ xprt_default_timeout(struct rpc_timeout *to, int proto) if (proto == IPPROTO_UDP) xprt_set_timeout(to, 5, 5 * HZ); else - xprt_set_timeout(to, 5, 60 * HZ); + xprt_set_timeout(to, 2, 60 * HZ); } /* @@ -1464,7 +1464,7 @@ xprt_set_timeout(struct rpc_timeout *to, unsigned int retr, unsigned long incr) { to->to_initval = to->to_increment = incr; - to->to_maxval = incr * retr; + to->to_maxval = to->to_initval + (incr * retr); to->to_retries = retr; to->to_exponential = 0; } -- cgit v1.2.2 From 602f83273c89fdd25f24757564d8001cf723e740 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Thu, 11 Aug 2005 16:25:17 -0400 Subject: [PATCH] RPC: portmapper doesn't need a reserved port The in-kernel portmapper does not require a reserved port for making bind queries. Test-plan: Tens of runs of the Connectathon locking suite with TCP and UDP against several other NFS server implementations using NFSv3, not NFSv4 (which doesn't require rpcbind). Version: Thu, 11 Aug 2005 16:02:43 -0400 Signed-off-by: Chuck Lever Signed-off-by: Trond Myklebust --- net/sunrpc/pmap_clnt.c | 1 + 1 file changed, 1 insertion(+) (limited to 'net') diff --git a/net/sunrpc/pmap_clnt.c b/net/sunrpc/pmap_clnt.c index 4e81f2766923..d8e3f220002b 100644 --- a/net/sunrpc/pmap_clnt.c +++ b/net/sunrpc/pmap_clnt.c @@ -208,6 +208,7 @@ pmap_create(char *hostname, struct sockaddr_in *srvaddr, int proto) if (IS_ERR(xprt)) return (struct rpc_clnt *)xprt; xprt->addr.sin_port = htons(RPC_PMAP_PORT); + xprt->resvport = 0; /* printk("pmap: create clnt\n"); */ clnt = rpc_new_client(xprt, hostname, -- cgit v1.2.2 From 094bb20b9fcab3a1652a77741caba6b78097d622 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Thu, 11 Aug 2005 16:25:20 -0400 Subject: [PATCH] RPC: extract socket logic common to both client and server Clean-up: Move some code that is common to both RPC client- and server-side socket transports into its own source file, net/sunrpc/socklib.c. Test-plan: Compile kernel with CONFIG_NFS enabled. Millions of fsx operations over UDP, client and server. Connectathon over UDP. Version: Thu, 11 Aug 2005 16:03:09 -0400 Signed-off-by: Chuck Lever Signed-off-by: Trond Myklebust --- net/sunrpc/Makefile | 2 +- net/sunrpc/socklib.c | 175 +++++++++++++++++++++++++++++++++++++++++++++++++++ net/sunrpc/svcsock.c | 3 - net/sunrpc/xdr.c | 75 ---------------------- net/sunrpc/xprt.c | 64 ------------------- 5 files changed, 176 insertions(+), 143 deletions(-) create mode 100644 net/sunrpc/socklib.c (limited to 'net') diff --git a/net/sunrpc/Makefile b/net/sunrpc/Makefile index 46a2ce00a29b..f0a955627177 100644 --- a/net/sunrpc/Makefile +++ b/net/sunrpc/Makefile @@ -6,7 +6,7 @@ obj-$(CONFIG_SUNRPC) += sunrpc.o obj-$(CONFIG_SUNRPC_GSS) += auth_gss/ -sunrpc-y := clnt.o xprt.o sched.o \ +sunrpc-y := clnt.o xprt.o socklib.o sched.o \ auth.o auth_null.o auth_unix.o \ svc.o svcsock.o svcauth.o svcauth_unix.o \ pmap_clnt.o timer.o xdr.o \ diff --git a/net/sunrpc/socklib.c b/net/sunrpc/socklib.c new file mode 100644 index 000000000000..8f97e90f36c8 --- /dev/null +++ b/net/sunrpc/socklib.c @@ -0,0 +1,175 @@ +/* + * linux/net/sunrpc/socklib.c + * + * Common socket helper routines for RPC client and server + * + * Copyright (C) 1995, 1996 Olaf Kirch + */ + +#include +#include +#include +#include + + +/** + * skb_read_bits - copy some data bits from skb to internal buffer + * @desc: sk_buff copy helper + * @to: copy destination + * @len: number of bytes to copy + * + * Possibly called several times to iterate over an sk_buff and copy + * data out of it. + */ +static size_t skb_read_bits(skb_reader_t *desc, void *to, size_t len) +{ + if (len > desc->count) + len = desc->count; + if (skb_copy_bits(desc->skb, desc->offset, to, len)) + return 0; + desc->count -= len; + desc->offset += len; + return len; +} + +/** + * skb_read_and_csum_bits - copy and checksum from skb to buffer + * @desc: sk_buff copy helper + * @to: copy destination + * @len: number of bytes to copy + * + * Same as skb_read_bits, but calculate a checksum at the same time. + */ +static size_t skb_read_and_csum_bits(skb_reader_t *desc, void *to, size_t len) +{ + unsigned int csum2, pos; + + if (len > desc->count) + len = desc->count; + pos = desc->offset; + csum2 = skb_copy_and_csum_bits(desc->skb, pos, to, len, 0); + desc->csum = csum_block_add(desc->csum, csum2, pos); + desc->count -= len; + desc->offset += len; + return len; +} + +/** + * xdr_partial_copy_from_skb - copy data out of an skb + * @xdr: target XDR buffer + * @base: starting offset + * @desc: sk_buff copy helper + * @copy_actor: virtual method for copying data + * + */ +ssize_t xdr_partial_copy_from_skb(struct xdr_buf *xdr, unsigned int base, skb_reader_t *desc, skb_read_actor_t copy_actor) +{ + struct page **ppage = xdr->pages; + unsigned int len, pglen = xdr->page_len; + ssize_t copied = 0; + int ret; + + len = xdr->head[0].iov_len; + if (base < len) { + len -= base; + ret = copy_actor(desc, (char *)xdr->head[0].iov_base + base, len); + copied += ret; + if (ret != len || !desc->count) + goto out; + base = 0; + } else + base -= len; + + if (unlikely(pglen == 0)) + goto copy_tail; + if (unlikely(base >= pglen)) { + base -= pglen; + goto copy_tail; + } + if (base || xdr->page_base) { + pglen -= base; + base += xdr->page_base; + ppage += base >> PAGE_CACHE_SHIFT; + base &= ~PAGE_CACHE_MASK; + } + do { + char *kaddr; + + /* ACL likes to be lazy in allocating pages - ACLs + * are small by default but can get huge. */ + if (unlikely(*ppage == NULL)) { + *ppage = alloc_page(GFP_ATOMIC); + if (unlikely(*ppage == NULL)) { + if (copied == 0) + copied = -ENOMEM; + goto out; + } + } + + len = PAGE_CACHE_SIZE; + kaddr = kmap_atomic(*ppage, KM_SKB_SUNRPC_DATA); + if (base) { + len -= base; + if (pglen < len) + len = pglen; + ret = copy_actor(desc, kaddr + base, len); + base = 0; + } else { + if (pglen < len) + len = pglen; + ret = copy_actor(desc, kaddr, len); + } + flush_dcache_page(*ppage); + kunmap_atomic(kaddr, KM_SKB_SUNRPC_DATA); + copied += ret; + if (ret != len || !desc->count) + goto out; + ppage++; + } while ((pglen -= len) != 0); +copy_tail: + len = xdr->tail[0].iov_len; + if (base < len) + copied += copy_actor(desc, (char *)xdr->tail[0].iov_base + base, len - base); +out: + return copied; +} + +/** + * csum_partial_copy_to_xdr - checksum and copy data + * @xdr: target XDR buffer + * @skb: source skb + * + * We have set things up such that we perform the checksum of the UDP + * packet in parallel with the copies into the RPC client iovec. -DaveM + */ +int csum_partial_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb) +{ + skb_reader_t desc; + + desc.skb = skb; + desc.offset = sizeof(struct udphdr); + desc.count = skb->len - desc.offset; + + if (skb->ip_summed == CHECKSUM_UNNECESSARY) + goto no_checksum; + + desc.csum = csum_partial(skb->data, desc.offset, skb->csum); + if (xdr_partial_copy_from_skb(xdr, 0, &desc, skb_read_and_csum_bits) < 0) + return -1; + if (desc.offset != skb->len) { + unsigned int csum2; + csum2 = skb_checksum(skb, desc.offset, skb->len - desc.offset, 0); + desc.csum = csum_block_add(desc.csum, csum2, desc.offset); + } + if (desc.count) + return -1; + if ((unsigned short)csum_fold(desc.csum)) + return -1; + return 0; +no_checksum: + if (xdr_partial_copy_from_skb(xdr, 0, &desc, skb_read_bits) < 0) + return -1; + if (desc.count) + return -1; + return 0; +} diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index 30ec3efc48a6..130f2b5d93dd 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c @@ -548,9 +548,6 @@ svc_write_space(struct sock *sk) /* * Receive a datagram from a UDP socket. */ -extern int -csum_partial_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb); - static int svc_udp_recvfrom(struct svc_rqst *rqstp) { diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c index fde16f40a581..9cc12aeed22c 100644 --- a/net/sunrpc/xdr.c +++ b/net/sunrpc/xdr.c @@ -176,81 +176,6 @@ xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset, xdr->buflen += len; } -ssize_t -xdr_partial_copy_from_skb(struct xdr_buf *xdr, unsigned int base, - skb_reader_t *desc, - skb_read_actor_t copy_actor) -{ - struct page **ppage = xdr->pages; - unsigned int len, pglen = xdr->page_len; - ssize_t copied = 0; - int ret; - - len = xdr->head[0].iov_len; - if (base < len) { - len -= base; - ret = copy_actor(desc, (char *)xdr->head[0].iov_base + base, len); - copied += ret; - if (ret != len || !desc->count) - goto out; - base = 0; - } else - base -= len; - - if (pglen == 0) - goto copy_tail; - if (base >= pglen) { - base -= pglen; - goto copy_tail; - } - if (base || xdr->page_base) { - pglen -= base; - base += xdr->page_base; - ppage += base >> PAGE_CACHE_SHIFT; - base &= ~PAGE_CACHE_MASK; - } - do { - char *kaddr; - - /* ACL likes to be lazy in allocating pages - ACLs - * are small by default but can get huge. */ - if (unlikely(*ppage == NULL)) { - *ppage = alloc_page(GFP_ATOMIC); - if (unlikely(*ppage == NULL)) { - if (copied == 0) - copied = -ENOMEM; - goto out; - } - } - - len = PAGE_CACHE_SIZE; - kaddr = kmap_atomic(*ppage, KM_SKB_SUNRPC_DATA); - if (base) { - len -= base; - if (pglen < len) - len = pglen; - ret = copy_actor(desc, kaddr + base, len); - base = 0; - } else { - if (pglen < len) - len = pglen; - ret = copy_actor(desc, kaddr, len); - } - flush_dcache_page(*ppage); - kunmap_atomic(kaddr, KM_SKB_SUNRPC_DATA); - copied += ret; - if (ret != len || !desc->count) - goto out; - ppage++; - } while ((pglen -= len) != 0); -copy_tail: - len = xdr->tail[0].iov_len; - if (base < len) - copied += copy_actor(desc, (char *)xdr->tail[0].iov_base + base, len - base); -out: - return copied; -} - int xdr_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen, diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 0e4ffdaa0129..67444f494fea 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -691,70 +691,6 @@ xprt_complete_rqst(struct rpc_xprt *xprt, struct rpc_rqst *req, int copied) return; } -static size_t -skb_read_bits(skb_reader_t *desc, void *to, size_t len) -{ - if (len > desc->count) - len = desc->count; - if (skb_copy_bits(desc->skb, desc->offset, to, len)) - return 0; - desc->count -= len; - desc->offset += len; - return len; -} - -static size_t -skb_read_and_csum_bits(skb_reader_t *desc, void *to, size_t len) -{ - unsigned int csum2, pos; - - if (len > desc->count) - len = desc->count; - pos = desc->offset; - csum2 = skb_copy_and_csum_bits(desc->skb, pos, to, len, 0); - desc->csum = csum_block_add(desc->csum, csum2, pos); - desc->count -= len; - desc->offset += len; - return len; -} - -/* - * We have set things up such that we perform the checksum of the UDP - * packet in parallel with the copies into the RPC client iovec. -DaveM - */ -int -csum_partial_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb) -{ - skb_reader_t desc; - - desc.skb = skb; - desc.offset = sizeof(struct udphdr); - desc.count = skb->len - desc.offset; - - if (skb->ip_summed == CHECKSUM_UNNECESSARY) - goto no_checksum; - - desc.csum = csum_partial(skb->data, desc.offset, skb->csum); - if (xdr_partial_copy_from_skb(xdr, 0, &desc, skb_read_and_csum_bits) < 0) - return -1; - if (desc.offset != skb->len) { - unsigned int csum2; - csum2 = skb_checksum(skb, desc.offset, skb->len - desc.offset, 0); - desc.csum = csum_block_add(desc.csum, csum2, desc.offset); - } - if (desc.count) - return -1; - if ((unsigned short)csum_fold(desc.csum)) - return -1; - return 0; -no_checksum: - if (xdr_partial_copy_from_skb(xdr, 0, &desc, skb_read_bits) < 0) - return -1; - if (desc.count) - return -1; - return 0; -} - /* * Input handler for RPC replies. Called from a bottom half and hence * atomic. -- cgit v1.2.2 From a246b0105bbd9a70a698f69baae2042996f2a0e9 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Thu, 11 Aug 2005 16:25:23 -0400 Subject: [PATCH] RPC: introduce client-side transport switch Move the bulk of client-side socket-specific code into a separate source file, net/sunrpc/xprtsock.c. Test-plan: Millions of fsx operations. Performance characterization such as "sio" or "iozone". Destructive testing (unplugging the network temporarily, server reboots). Connectathon with v2, v3, and v4. Version: Thu, 11 Aug 2005 16:03:38 -0400 Signed-off-by: Chuck Lever Signed-off-by: Trond Myklebust --- net/sunrpc/Makefile | 2 +- net/sunrpc/clnt.c | 3 +- net/sunrpc/sysctl.c | 3 + net/sunrpc/xdr.c | 102 +---- net/sunrpc/xprt.c | 916 +++----------------------------------------- net/sunrpc/xprtsock.c | 1021 +++++++++++++++++++++++++++++++++++++++++++++++++ 6 files changed, 1070 insertions(+), 977 deletions(-) create mode 100644 net/sunrpc/xprtsock.c (limited to 'net') diff --git a/net/sunrpc/Makefile b/net/sunrpc/Makefile index f0a955627177..cdcab9ca4c60 100644 --- a/net/sunrpc/Makefile +++ b/net/sunrpc/Makefile @@ -6,7 +6,7 @@ obj-$(CONFIG_SUNRPC) += sunrpc.o obj-$(CONFIG_SUNRPC_GSS) += auth_gss/ -sunrpc-y := clnt.o xprt.o socklib.o sched.o \ +sunrpc-y := clnt.o xprt.o socklib.o xprtsock.o sched.o \ auth.o auth_null.o auth_unix.o \ svc.o svcsock.o svcauth.o svcauth_unix.o \ pmap_clnt.o timer.o xdr.o \ diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index 2d3cf0a52d82..ab50c3c9e6a8 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c @@ -525,8 +525,7 @@ rpc_setbufsize(struct rpc_clnt *clnt, unsigned int sndsize, unsigned int rcvsize xprt->rcvsize = 0; if (rcvsize) xprt->rcvsize = rcvsize + RPC_SLACK_SPACE; - if (xprt_connected(xprt)) - xprt_sock_setbufsize(xprt); + xprt->ops->set_buffer_size(xprt); } /* diff --git a/net/sunrpc/sysctl.c b/net/sunrpc/sysctl.c index 1b9616a12e24..ef483262f17f 100644 --- a/net/sunrpc/sysctl.c +++ b/net/sunrpc/sysctl.c @@ -119,6 +119,9 @@ done: return 0; } +unsigned int xprt_udp_slot_table_entries = RPC_DEF_SLOT_TABLE; +unsigned int xprt_tcp_slot_table_entries = RPC_DEF_SLOT_TABLE; + static unsigned int min_slot_table_size = RPC_MIN_SLOT_TABLE; static unsigned int max_slot_table_size = RPC_MAX_SLOT_TABLE; diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c index 9cc12aeed22c..32df43372ee9 100644 --- a/net/sunrpc/xdr.c +++ b/net/sunrpc/xdr.c @@ -6,15 +6,12 @@ * Copyright (C) 1995, 1996 Olaf Kirch */ +#include #include -#include #include #include #include #include -#include -#include -#include #include #include @@ -177,103 +174,6 @@ xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset, } -int -xdr_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen, - struct xdr_buf *xdr, unsigned int base, int msgflags) -{ - struct page **ppage = xdr->pages; - unsigned int len, pglen = xdr->page_len; - int err, ret = 0; - ssize_t (*sendpage)(struct socket *, struct page *, int, size_t, int); - - len = xdr->head[0].iov_len; - if (base < len || (addr != NULL && base == 0)) { - struct kvec iov = { - .iov_base = xdr->head[0].iov_base + base, - .iov_len = len - base, - }; - struct msghdr msg = { - .msg_name = addr, - .msg_namelen = addrlen, - .msg_flags = msgflags, - }; - if (xdr->len > len) - msg.msg_flags |= MSG_MORE; - - if (iov.iov_len != 0) - err = kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len); - else - err = kernel_sendmsg(sock, &msg, NULL, 0, 0); - if (ret == 0) - ret = err; - else if (err > 0) - ret += err; - if (err != iov.iov_len) - goto out; - base = 0; - } else - base -= len; - - if (pglen == 0) - goto copy_tail; - if (base >= pglen) { - base -= pglen; - goto copy_tail; - } - if (base || xdr->page_base) { - pglen -= base; - base += xdr->page_base; - ppage += base >> PAGE_CACHE_SHIFT; - base &= ~PAGE_CACHE_MASK; - } - - sendpage = sock->ops->sendpage ? : sock_no_sendpage; - do { - int flags = msgflags; - - len = PAGE_CACHE_SIZE; - if (base) - len -= base; - if (pglen < len) - len = pglen; - - if (pglen != len || xdr->tail[0].iov_len != 0) - flags |= MSG_MORE; - - /* Hmm... We might be dealing with highmem pages */ - if (PageHighMem(*ppage)) - sendpage = sock_no_sendpage; - err = sendpage(sock, *ppage, base, len, flags); - if (ret == 0) - ret = err; - else if (err > 0) - ret += err; - if (err != len) - goto out; - base = 0; - ppage++; - } while ((pglen -= len) != 0); -copy_tail: - len = xdr->tail[0].iov_len; - if (base < len) { - struct kvec iov = { - .iov_base = xdr->tail[0].iov_base + base, - .iov_len = len - base, - }; - struct msghdr msg = { - .msg_flags = msgflags, - }; - err = kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len); - if (ret == 0) - ret = err; - else if (err > 0) - ret += err; - } -out: - return ret; -} - - /* * Helper routines for doing 'memmove' like operations on a struct xdr_buf * diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 67444f494fea..4342acf4d1cd 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -32,37 +32,16 @@ * tasks that rely on callbacks. * * Copyright (C) 1995-1997, Olaf Kirch - * - * TCP callback races fixes (C) 1998 Red Hat Software - * TCP send fixes (C) 1998 Red Hat Software - * TCP NFS related read + write fixes - * (C) 1999 Dave Airlie, University of Limerick, Ireland - * - * Rewrite of larges part of the code in order to stabilize TCP stuff. - * Fix behaviour when socket buffer is full. - * (C) 1999 Trond Myklebust */ +#include + #include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include +#include #include #include -#include -#include -#include -#include +#include /* * Local variables @@ -74,64 +53,17 @@ #endif #define XPRT_MAX_BACKOFF (8) -#define XPRT_IDLE_TIMEOUT (5*60*HZ) -#define XPRT_MAX_RESVPORT (800) /* * Local functions */ static void xprt_request_init(struct rpc_task *, struct rpc_xprt *); static inline void do_xprt_reserve(struct rpc_task *); -static void xprt_disconnect(struct rpc_xprt *); static void xprt_connect_status(struct rpc_task *task); -static struct rpc_xprt * xprt_setup(int proto, struct sockaddr_in *ap, - struct rpc_timeout *to); -static struct socket *xprt_create_socket(struct rpc_xprt *, int, int); -static void xprt_bind_socket(struct rpc_xprt *, struct socket *); static int __xprt_get_cong(struct rpc_xprt *, struct rpc_task *); static int xprt_clear_backlog(struct rpc_xprt *xprt); -#ifdef RPC_DEBUG_DATA -/* - * Print the buffer contents (first 128 bytes only--just enough for - * diropres return). - */ -static void -xprt_pktdump(char *msg, u32 *packet, unsigned int count) -{ - u8 *buf = (u8 *) packet; - int j; - - dprintk("RPC: %s\n", msg); - for (j = 0; j < count && j < 128; j += 4) { - if (!(j & 31)) { - if (j) - dprintk("\n"); - dprintk("0x%04x ", j); - } - dprintk("%02x%02x%02x%02x ", - buf[j], buf[j+1], buf[j+2], buf[j+3]); - } - dprintk("\n"); -} -#else -static inline void -xprt_pktdump(char *msg, u32 *packet, unsigned int count) -{ - /* NOP */ -} -#endif - -/* - * Look up RPC transport given an INET socket - */ -static inline struct rpc_xprt * -xprt_from_sock(struct sock *sk) -{ - return (struct rpc_xprt *) sk->sk_user_data; -} - /* * Serialize write access to sockets, in order to prevent different * requests from interfering with each other. @@ -234,62 +166,6 @@ xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task) spin_unlock_bh(&xprt->sock_lock); } -/* - * Write data to socket. - */ -static inline int -xprt_sendmsg(struct rpc_xprt *xprt, struct rpc_rqst *req) -{ - struct socket *sock = xprt->sock; - struct xdr_buf *xdr = &req->rq_snd_buf; - struct sockaddr *addr = NULL; - int addrlen = 0; - unsigned int skip; - int result; - - if (!sock) - return -ENOTCONN; - - xprt_pktdump("packet data:", - req->rq_svec->iov_base, - req->rq_svec->iov_len); - - /* For UDP, we need to provide an address */ - if (!xprt->stream) { - addr = (struct sockaddr *) &xprt->addr; - addrlen = sizeof(xprt->addr); - } - /* Dont repeat bytes */ - skip = req->rq_bytes_sent; - - clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags); - result = xdr_sendpages(sock, addr, addrlen, xdr, skip, MSG_DONTWAIT); - - dprintk("RPC: xprt_sendmsg(%d) = %d\n", xdr->len - skip, result); - - if (result >= 0) - return result; - - switch (result) { - case -ECONNREFUSED: - /* When the server has died, an ICMP port unreachable message - * prompts ECONNREFUSED. - */ - case -EAGAIN: - break; - case -ECONNRESET: - case -ENOTCONN: - case -EPIPE: - /* connection broken */ - if (xprt->stream) - result = -ENOTCONN; - break; - default: - printk(KERN_NOTICE "RPC: sendmsg returned error %d\n", -result); - } - return result; -} - /* * Van Jacobson congestion avoidance. Check if the congestion window * overflowed. Put the task to sleep if this is the case. @@ -405,48 +281,20 @@ int xprt_adjust_timeout(struct rpc_rqst *req) return status; } -/* - * Close down a transport socket - */ -static void -xprt_close(struct rpc_xprt *xprt) -{ - struct socket *sock = xprt->sock; - struct sock *sk = xprt->inet; - - if (!sk) - return; - - write_lock_bh(&sk->sk_callback_lock); - xprt->inet = NULL; - xprt->sock = NULL; - - sk->sk_user_data = NULL; - sk->sk_data_ready = xprt->old_data_ready; - sk->sk_state_change = xprt->old_state_change; - sk->sk_write_space = xprt->old_write_space; - write_unlock_bh(&sk->sk_callback_lock); - - sk->sk_no_check = 0; - - sock_release(sock); -} - static void xprt_socket_autoclose(void *args) { struct rpc_xprt *xprt = (struct rpc_xprt *)args; xprt_disconnect(xprt); - xprt_close(xprt); + xprt->ops->close(xprt); xprt_release_write(xprt, NULL); } /* * Mark a transport as disconnected */ -static void -xprt_disconnect(struct rpc_xprt *xprt) +void xprt_disconnect(struct rpc_xprt *xprt) { dprintk("RPC: disconnected transport %p\n", xprt); spin_lock_bh(&xprt->sock_lock); @@ -479,57 +327,6 @@ out_abort: spin_unlock(&xprt->sock_lock); } -static void xprt_socket_connect(void *args) -{ - struct rpc_xprt *xprt = (struct rpc_xprt *)args; - struct socket *sock = xprt->sock; - int status = -EIO; - - if (xprt->shutdown || xprt->addr.sin_port == 0) - goto out; - - /* - * Start by resetting any existing state - */ - xprt_close(xprt); - sock = xprt_create_socket(xprt, xprt->prot, xprt->resvport); - if (sock == NULL) { - /* couldn't create socket or bind to reserved port; - * this is likely a permanent error, so cause an abort */ - goto out; - } - xprt_bind_socket(xprt, sock); - xprt_sock_setbufsize(xprt); - - status = 0; - if (!xprt->stream) - goto out; - - /* - * Tell the socket layer to start connecting... - */ - status = sock->ops->connect(sock, (struct sockaddr *) &xprt->addr, - sizeof(xprt->addr), O_NONBLOCK); - dprintk("RPC: %p connect status %d connected %d sock state %d\n", - xprt, -status, xprt_connected(xprt), sock->sk->sk_state); - if (status < 0) { - switch (status) { - case -EINPROGRESS: - case -EALREADY: - goto out_clear; - } - } -out: - if (status < 0) - rpc_wake_up_status(&xprt->pending, status); - else - rpc_wake_up(&xprt->pending); -out_clear: - smp_mb__before_clear_bit(); - clear_bit(XPRT_CONNECTING, &xprt->sockstate); - smp_mb__after_clear_bit(); -} - /* * Attempt to connect a TCP socket. * @@ -552,30 +349,16 @@ void xprt_connect(struct rpc_task *task) if (!xprt_lock_write(xprt, task)) return; if (xprt_connected(xprt)) - goto out_write; + xprt_release_write(xprt, task); + else { + if (task->tk_rqstp) + task->tk_rqstp->rq_bytes_sent = 0; - if (task->tk_rqstp) - task->tk_rqstp->rq_bytes_sent = 0; - - task->tk_timeout = RPC_CONNECT_TIMEOUT; - rpc_sleep_on(&xprt->pending, task, xprt_connect_status, NULL); - if (!test_and_set_bit(XPRT_CONNECTING, &xprt->sockstate)) { - /* Note: if we are here due to a dropped connection - * we delay reconnecting by RPC_REESTABLISH_TIMEOUT/HZ - * seconds - */ - if (xprt->sock != NULL) - schedule_delayed_work(&xprt->sock_connect, - RPC_REESTABLISH_TIMEOUT); - else { - schedule_work(&xprt->sock_connect); - if (!RPC_IS_ASYNC(task)) - flush_scheduled_work(); - } + task->tk_timeout = RPC_CONNECT_TIMEOUT; + rpc_sleep_on(&xprt->pending, task, xprt_connect_status, NULL); + xprt->ops->connect(task); } return; - out_write: - xprt_release_write(xprt, task); } /* @@ -624,8 +407,7 @@ xprt_connect_status(struct rpc_task *task) /* * Look up the RPC request corresponding to a reply, and then lock it. */ -static inline struct rpc_rqst * -xprt_lookup_rqst(struct rpc_xprt *xprt, u32 xid) +struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, u32 xid) { struct list_head *pos; struct rpc_rqst *req = NULL; @@ -644,8 +426,7 @@ xprt_lookup_rqst(struct rpc_xprt *xprt, u32 xid) * Complete reply received. * The TCP code relies on us to remove the request from xprt->pending. */ -static void -xprt_complete_rqst(struct rpc_xprt *xprt, struct rpc_rqst *req, int copied) +void xprt_complete_rqst(struct rpc_xprt *xprt, struct rpc_rqst *req, int copied) { struct rpc_task *task = req->rq_task; struct rpc_clnt *clnt = task->tk_client; @@ -691,409 +472,6 @@ xprt_complete_rqst(struct rpc_xprt *xprt, struct rpc_rqst *req, int copied) return; } -/* - * Input handler for RPC replies. Called from a bottom half and hence - * atomic. - */ -static void -udp_data_ready(struct sock *sk, int len) -{ - struct rpc_task *task; - struct rpc_xprt *xprt; - struct rpc_rqst *rovr; - struct sk_buff *skb; - int err, repsize, copied; - u32 _xid, *xp; - - read_lock(&sk->sk_callback_lock); - dprintk("RPC: udp_data_ready...\n"); - if (!(xprt = xprt_from_sock(sk))) { - printk("RPC: udp_data_ready request not found!\n"); - goto out; - } - - dprintk("RPC: udp_data_ready client %p\n", xprt); - - if ((skb = skb_recv_datagram(sk, 0, 1, &err)) == NULL) - goto out; - - if (xprt->shutdown) - goto dropit; - - repsize = skb->len - sizeof(struct udphdr); - if (repsize < 4) { - printk("RPC: impossible RPC reply size %d!\n", repsize); - goto dropit; - } - - /* Copy the XID from the skb... */ - xp = skb_header_pointer(skb, sizeof(struct udphdr), - sizeof(_xid), &_xid); - if (xp == NULL) - goto dropit; - - /* Look up and lock the request corresponding to the given XID */ - spin_lock(&xprt->sock_lock); - rovr = xprt_lookup_rqst(xprt, *xp); - if (!rovr) - goto out_unlock; - task = rovr->rq_task; - - dprintk("RPC: %4d received reply\n", task->tk_pid); - - if ((copied = rovr->rq_private_buf.buflen) > repsize) - copied = repsize; - - /* Suck it into the iovec, verify checksum if not done by hw. */ - if (csum_partial_copy_to_xdr(&rovr->rq_private_buf, skb)) - goto out_unlock; - - /* Something worked... */ - dst_confirm(skb->dst); - - xprt_complete_rqst(xprt, rovr, copied); - - out_unlock: - spin_unlock(&xprt->sock_lock); - dropit: - skb_free_datagram(sk, skb); - out: - read_unlock(&sk->sk_callback_lock); -} - -/* - * Copy from an skb into memory and shrink the skb. - */ -static inline size_t -tcp_copy_data(skb_reader_t *desc, void *p, size_t len) -{ - if (len > desc->count) - len = desc->count; - if (skb_copy_bits(desc->skb, desc->offset, p, len)) { - dprintk("RPC: failed to copy %zu bytes from skb. %zu bytes remain\n", - len, desc->count); - return 0; - } - desc->offset += len; - desc->count -= len; - dprintk("RPC: copied %zu bytes from skb. %zu bytes remain\n", - len, desc->count); - return len; -} - -/* - * TCP read fragment marker - */ -static inline void -tcp_read_fraghdr(struct rpc_xprt *xprt, skb_reader_t *desc) -{ - size_t len, used; - char *p; - - p = ((char *) &xprt->tcp_recm) + xprt->tcp_offset; - len = sizeof(xprt->tcp_recm) - xprt->tcp_offset; - used = tcp_copy_data(desc, p, len); - xprt->tcp_offset += used; - if (used != len) - return; - xprt->tcp_reclen = ntohl(xprt->tcp_recm); - if (xprt->tcp_reclen & 0x80000000) - xprt->tcp_flags |= XPRT_LAST_FRAG; - else - xprt->tcp_flags &= ~XPRT_LAST_FRAG; - xprt->tcp_reclen &= 0x7fffffff; - xprt->tcp_flags &= ~XPRT_COPY_RECM; - xprt->tcp_offset = 0; - /* Sanity check of the record length */ - if (xprt->tcp_reclen < 4) { - printk(KERN_ERR "RPC: Invalid TCP record fragment length\n"); - xprt_disconnect(xprt); - } - dprintk("RPC: reading TCP record fragment of length %d\n", - xprt->tcp_reclen); -} - -static void -tcp_check_recm(struct rpc_xprt *xprt) -{ - dprintk("RPC: xprt = %p, tcp_copied = %lu, tcp_offset = %u, tcp_reclen = %u, tcp_flags = %lx\n", - xprt, xprt->tcp_copied, xprt->tcp_offset, xprt->tcp_reclen, xprt->tcp_flags); - if (xprt->tcp_offset == xprt->tcp_reclen) { - xprt->tcp_flags |= XPRT_COPY_RECM; - xprt->tcp_offset = 0; - if (xprt->tcp_flags & XPRT_LAST_FRAG) { - xprt->tcp_flags &= ~XPRT_COPY_DATA; - xprt->tcp_flags |= XPRT_COPY_XID; - xprt->tcp_copied = 0; - } - } -} - -/* - * TCP read xid - */ -static inline void -tcp_read_xid(struct rpc_xprt *xprt, skb_reader_t *desc) -{ - size_t len, used; - char *p; - - len = sizeof(xprt->tcp_xid) - xprt->tcp_offset; - dprintk("RPC: reading XID (%Zu bytes)\n", len); - p = ((char *) &xprt->tcp_xid) + xprt->tcp_offset; - used = tcp_copy_data(desc, p, len); - xprt->tcp_offset += used; - if (used != len) - return; - xprt->tcp_flags &= ~XPRT_COPY_XID; - xprt->tcp_flags |= XPRT_COPY_DATA; - xprt->tcp_copied = 4; - dprintk("RPC: reading reply for XID %08x\n", - ntohl(xprt->tcp_xid)); - tcp_check_recm(xprt); -} - -/* - * TCP read and complete request - */ -static inline void -tcp_read_request(struct rpc_xprt *xprt, skb_reader_t *desc) -{ - struct rpc_rqst *req; - struct xdr_buf *rcvbuf; - size_t len; - ssize_t r; - - /* Find and lock the request corresponding to this xid */ - spin_lock(&xprt->sock_lock); - req = xprt_lookup_rqst(xprt, xprt->tcp_xid); - if (!req) { - xprt->tcp_flags &= ~XPRT_COPY_DATA; - dprintk("RPC: XID %08x request not found!\n", - ntohl(xprt->tcp_xid)); - spin_unlock(&xprt->sock_lock); - return; - } - - rcvbuf = &req->rq_private_buf; - len = desc->count; - if (len > xprt->tcp_reclen - xprt->tcp_offset) { - skb_reader_t my_desc; - - len = xprt->tcp_reclen - xprt->tcp_offset; - memcpy(&my_desc, desc, sizeof(my_desc)); - my_desc.count = len; - r = xdr_partial_copy_from_skb(rcvbuf, xprt->tcp_copied, - &my_desc, tcp_copy_data); - desc->count -= r; - desc->offset += r; - } else - r = xdr_partial_copy_from_skb(rcvbuf, xprt->tcp_copied, - desc, tcp_copy_data); - - if (r > 0) { - xprt->tcp_copied += r; - xprt->tcp_offset += r; - } - if (r != len) { - /* Error when copying to the receive buffer, - * usually because we weren't able to allocate - * additional buffer pages. All we can do now - * is turn off XPRT_COPY_DATA, so the request - * will not receive any additional updates, - * and time out. - * Any remaining data from this record will - * be discarded. - */ - xprt->tcp_flags &= ~XPRT_COPY_DATA; - dprintk("RPC: XID %08x truncated request\n", - ntohl(xprt->tcp_xid)); - dprintk("RPC: xprt = %p, tcp_copied = %lu, tcp_offset = %u, tcp_reclen = %u\n", - xprt, xprt->tcp_copied, xprt->tcp_offset, xprt->tcp_reclen); - goto out; - } - - dprintk("RPC: XID %08x read %Zd bytes\n", - ntohl(xprt->tcp_xid), r); - dprintk("RPC: xprt = %p, tcp_copied = %lu, tcp_offset = %u, tcp_reclen = %u\n", - xprt, xprt->tcp_copied, xprt->tcp_offset, xprt->tcp_reclen); - - if (xprt->tcp_copied == req->rq_private_buf.buflen) - xprt->tcp_flags &= ~XPRT_COPY_DATA; - else if (xprt->tcp_offset == xprt->tcp_reclen) { - if (xprt->tcp_flags & XPRT_LAST_FRAG) - xprt->tcp_flags &= ~XPRT_COPY_DATA; - } - -out: - if (!(xprt->tcp_flags & XPRT_COPY_DATA)) { - dprintk("RPC: %4d received reply complete\n", - req->rq_task->tk_pid); - xprt_complete_rqst(xprt, req, xprt->tcp_copied); - } - spin_unlock(&xprt->sock_lock); - tcp_check_recm(xprt); -} - -/* - * TCP discard extra bytes from a short read - */ -static inline void -tcp_read_discard(struct rpc_xprt *xprt, skb_reader_t *desc) -{ - size_t len; - - len = xprt->tcp_reclen - xprt->tcp_offset; - if (len > desc->count) - len = desc->count; - desc->count -= len; - desc->offset += len; - xprt->tcp_offset += len; - dprintk("RPC: discarded %Zu bytes\n", len); - tcp_check_recm(xprt); -} - -/* - * TCP record receive routine - * We first have to grab the record marker, then the XID, then the data. - */ -static int -tcp_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, - unsigned int offset, size_t len) -{ - struct rpc_xprt *xprt = rd_desc->arg.data; - skb_reader_t desc = { - .skb = skb, - .offset = offset, - .count = len, - .csum = 0 - }; - - dprintk("RPC: tcp_data_recv\n"); - do { - /* Read in a new fragment marker if necessary */ - /* Can we ever really expect to get completely empty fragments? */ - if (xprt->tcp_flags & XPRT_COPY_RECM) { - tcp_read_fraghdr(xprt, &desc); - continue; - } - /* Read in the xid if necessary */ - if (xprt->tcp_flags & XPRT_COPY_XID) { - tcp_read_xid(xprt, &desc); - continue; - } - /* Read in the request data */ - if (xprt->tcp_flags & XPRT_COPY_DATA) { - tcp_read_request(xprt, &desc); - continue; - } - /* Skip over any trailing bytes on short reads */ - tcp_read_discard(xprt, &desc); - } while (desc.count); - dprintk("RPC: tcp_data_recv done\n"); - return len - desc.count; -} - -static void tcp_data_ready(struct sock *sk, int bytes) -{ - struct rpc_xprt *xprt; - read_descriptor_t rd_desc; - - read_lock(&sk->sk_callback_lock); - dprintk("RPC: tcp_data_ready...\n"); - if (!(xprt = xprt_from_sock(sk))) { - printk("RPC: tcp_data_ready socket info not found!\n"); - goto out; - } - if (xprt->shutdown) - goto out; - - /* We use rd_desc to pass struct xprt to tcp_data_recv */ - rd_desc.arg.data = xprt; - rd_desc.count = 65536; - tcp_read_sock(sk, &rd_desc, tcp_data_recv); -out: - read_unlock(&sk->sk_callback_lock); -} - -static void -tcp_state_change(struct sock *sk) -{ - struct rpc_xprt *xprt; - - read_lock(&sk->sk_callback_lock); - if (!(xprt = xprt_from_sock(sk))) - goto out; - dprintk("RPC: tcp_state_change client %p...\n", xprt); - dprintk("RPC: state %x conn %d dead %d zapped %d\n", - sk->sk_state, xprt_connected(xprt), - sock_flag(sk, SOCK_DEAD), - sock_flag(sk, SOCK_ZAPPED)); - - switch (sk->sk_state) { - case TCP_ESTABLISHED: - spin_lock_bh(&xprt->sock_lock); - if (!xprt_test_and_set_connected(xprt)) { - /* Reset TCP record info */ - xprt->tcp_offset = 0; - xprt->tcp_reclen = 0; - xprt->tcp_copied = 0; - xprt->tcp_flags = XPRT_COPY_RECM | XPRT_COPY_XID; - rpc_wake_up(&xprt->pending); - } - spin_unlock_bh(&xprt->sock_lock); - break; - case TCP_SYN_SENT: - case TCP_SYN_RECV: - break; - default: - xprt_disconnect(xprt); - break; - } - out: - read_unlock(&sk->sk_callback_lock); -} - -/* - * Called when more output buffer space is available for this socket. - * We try not to wake our writers until they can make "significant" - * progress, otherwise we'll waste resources thrashing sock_sendmsg - * with a bunch of small requests. - */ -static void -xprt_write_space(struct sock *sk) -{ - struct rpc_xprt *xprt; - struct socket *sock; - - read_lock(&sk->sk_callback_lock); - if (!(xprt = xprt_from_sock(sk)) || !(sock = sk->sk_socket)) - goto out; - if (xprt->shutdown) - goto out; - - /* Wait until we have enough socket memory */ - if (xprt->stream) { - /* from net/core/stream.c:sk_stream_write_space */ - if (sk_stream_wspace(sk) < sk_stream_min_wspace(sk)) - goto out; - } else { - /* from net/core/sock.c:sock_def_write_space */ - if (!sock_writeable(sk)) - goto out; - } - - if (!test_and_clear_bit(SOCK_NOSPACE, &sock->flags)) - goto out; - - spin_lock_bh(&xprt->sock_lock); - if (xprt->snd_task) - rpc_wake_up_task(xprt->snd_task); - spin_unlock_bh(&xprt->sock_lock); -out: - read_unlock(&sk->sk_callback_lock); -} - /* * RPC receive timeout handler. */ @@ -1161,19 +539,10 @@ xprt_transmit(struct rpc_task *task) struct rpc_clnt *clnt = task->tk_client; struct rpc_rqst *req = task->tk_rqstp; struct rpc_xprt *xprt = req->rq_xprt; - int status, retry = 0; - + int status; dprintk("RPC: %4d xprt_transmit(%u)\n", task->tk_pid, req->rq_slen); - /* set up everything as needed. */ - /* Write the record marker */ - if (xprt->stream) { - u32 *marker = req->rq_svec[0].iov_base; - - *marker = htonl(0x80000000|(req->rq_slen-sizeof(*marker))); - } - smp_rmb(); if (!req->rq_received) { if (list_empty(&req->rq_list)) { @@ -1191,41 +560,9 @@ xprt_transmit(struct rpc_task *task) } else if (!req->rq_bytes_sent) return; - /* Continue transmitting the packet/record. We must be careful - * to cope with writespace callbacks arriving _after_ we have - * called xprt_sendmsg(). - */ - while (1) { - req->rq_xtime = jiffies; - status = xprt_sendmsg(xprt, req); - - if (status < 0) - break; - - if (xprt->stream) { - req->rq_bytes_sent += status; - - /* If we've sent the entire packet, immediately - * reset the count of bytes sent. */ - if (req->rq_bytes_sent >= req->rq_slen) { - req->rq_bytes_sent = 0; - goto out_receive; - } - } else { - if (status >= req->rq_slen) - goto out_receive; - status = -EAGAIN; - break; - } - - dprintk("RPC: %4d xmit incomplete (%d left of %d)\n", - task->tk_pid, req->rq_slen - req->rq_bytes_sent, - req->rq_slen); - - status = -EAGAIN; - if (retry++ > 50) - break; - } + status = xprt->ops->send_request(task); + if (!status) + goto out_receive; /* Note: at this point, task->tk_sleeping has not yet been set, * hence there is no danger of the waking up task being put on @@ -1234,26 +571,10 @@ xprt_transmit(struct rpc_task *task) task->tk_status = status; switch (status) { - case -EAGAIN: - if (test_bit(SOCK_ASYNC_NOSPACE, &xprt->sock->flags)) { - /* Protect against races with xprt_write_space */ - spin_lock_bh(&xprt->sock_lock); - /* Don't race with disconnect */ - if (!xprt_connected(xprt)) - task->tk_status = -ENOTCONN; - else if (test_bit(SOCK_NOSPACE, &xprt->sock->flags)) { - task->tk_timeout = req->rq_timeout; - rpc_sleep_on(&xprt->pending, task, NULL, NULL); - } - spin_unlock_bh(&xprt->sock_lock); - return; - } - /* Keep holding the socket if it is blocked */ - rpc_delay(task, HZ>>4); - return; case -ECONNREFUSED: task->tk_timeout = RPC_REESTABLISH_TIMEOUT; rpc_sleep_on(&xprt->sending, task, NULL, NULL); + case -EAGAIN: case -ENOTCONN: return; default: @@ -1367,7 +688,8 @@ xprt_release(struct rpc_task *task) list_del(&req->rq_list); xprt->last_used = jiffies; if (list_empty(&xprt->recv) && !xprt->shutdown) - mod_timer(&xprt->timer, xprt->last_used + XPRT_IDLE_TIMEOUT); + mod_timer(&xprt->timer, + xprt->last_used + RPC_IDLE_DISCONNECT_TIMEOUT); spin_unlock_bh(&xprt->sock_lock); task->tk_rqstp = NULL; memset(req, 0, sizeof(*req)); /* mark unused */ @@ -1380,18 +702,6 @@ xprt_release(struct rpc_task *task) spin_unlock(&xprt->xprt_lock); } -/* - * Set default timeout parameters - */ -static void -xprt_default_timeout(struct rpc_timeout *to, int proto) -{ - if (proto == IPPROTO_UDP) - xprt_set_timeout(to, 5, 5 * HZ); - else - xprt_set_timeout(to, 2, 60 * HZ); -} - /* * Set constant timeout */ @@ -1405,68 +715,51 @@ xprt_set_timeout(struct rpc_timeout *to, unsigned int retr, unsigned long incr) to->to_exponential = 0; } -unsigned int xprt_udp_slot_table_entries = RPC_DEF_SLOT_TABLE; -unsigned int xprt_tcp_slot_table_entries = RPC_DEF_SLOT_TABLE; - /* * Initialize an RPC client */ static struct rpc_xprt * xprt_setup(int proto, struct sockaddr_in *ap, struct rpc_timeout *to) { + int result; struct rpc_xprt *xprt; - unsigned int entries; - size_t slot_table_size; struct rpc_rqst *req; - dprintk("RPC: setting up %s transport...\n", - proto == IPPROTO_UDP? "UDP" : "TCP"); - - entries = (proto == IPPROTO_TCP)? - xprt_tcp_slot_table_entries : xprt_udp_slot_table_entries; - if ((xprt = kmalloc(sizeof(struct rpc_xprt), GFP_KERNEL)) == NULL) return ERR_PTR(-ENOMEM); memset(xprt, 0, sizeof(*xprt)); /* Nnnngh! */ - xprt->max_reqs = entries; - slot_table_size = entries * sizeof(xprt->slot[0]); - xprt->slot = kmalloc(slot_table_size, GFP_KERNEL); - if (xprt->slot == NULL) { - kfree(xprt); - return ERR_PTR(-ENOMEM); - } - memset(xprt->slot, 0, slot_table_size); xprt->addr = *ap; - xprt->prot = proto; - xprt->stream = (proto == IPPROTO_TCP)? 1 : 0; - if (xprt->stream) { - xprt->cwnd = RPC_MAXCWND(xprt); - xprt->nocong = 1; - xprt->max_payload = (1U << 31) - 1; - } else { - xprt->cwnd = RPC_INITCWND; - xprt->max_payload = (1U << 16) - (MAX_HEADER << 3); + + switch (proto) { + case IPPROTO_UDP: + result = xs_setup_udp(xprt, to); + break; + case IPPROTO_TCP: + result = xs_setup_tcp(xprt, to); + break; + default: + printk(KERN_ERR "RPC: unrecognized transport protocol: %d\n", + proto); + result = -EIO; + break; + } + if (result) { + kfree(xprt); + return ERR_PTR(result); } + spin_lock_init(&xprt->sock_lock); spin_lock_init(&xprt->xprt_lock); init_waitqueue_head(&xprt->cong_wait); INIT_LIST_HEAD(&xprt->free); INIT_LIST_HEAD(&xprt->recv); - INIT_WORK(&xprt->sock_connect, xprt_socket_connect, xprt); INIT_WORK(&xprt->task_cleanup, xprt_socket_autoclose, xprt); init_timer(&xprt->timer); xprt->timer.function = xprt_init_autodisconnect; xprt->timer.data = (unsigned long) xprt; xprt->last_used = jiffies; - xprt->port = XPRT_MAX_RESVPORT; - - /* Set timeout parameters */ - if (to) { - xprt->timeout = *to; - } else - xprt_default_timeout(&xprt->timeout, xprt->prot); rpc_init_wait_queue(&xprt->pending, "xprt_pending"); rpc_init_wait_queue(&xprt->sending, "xprt_sending"); @@ -1474,134 +767,17 @@ xprt_setup(int proto, struct sockaddr_in *ap, struct rpc_timeout *to) rpc_init_priority_wait_queue(&xprt->backlog, "xprt_backlog"); /* initialize free list */ - for (req = &xprt->slot[entries-1]; req >= &xprt->slot[0]; req--) + for (req = &xprt->slot[xprt->max_reqs-1]; req >= &xprt->slot[0]; req--) list_add(&req->rq_list, &xprt->free); xprt_init_xid(xprt); - /* Check whether we want to use a reserved port */ - xprt->resvport = capable(CAP_NET_BIND_SERVICE) ? 1 : 0; - dprintk("RPC: created transport %p with %u slots\n", xprt, xprt->max_reqs); return xprt; } -/* - * Bind to a reserved port - */ -static inline int xprt_bindresvport(struct rpc_xprt *xprt, struct socket *sock) -{ - struct sockaddr_in myaddr = { - .sin_family = AF_INET, - }; - int err, port; - - /* Were we already bound to a given port? Try to reuse it */ - port = xprt->port; - do { - myaddr.sin_port = htons(port); - err = sock->ops->bind(sock, (struct sockaddr *) &myaddr, - sizeof(myaddr)); - if (err == 0) { - xprt->port = port; - return 0; - } - if (--port == 0) - port = XPRT_MAX_RESVPORT; - } while (err == -EADDRINUSE && port != xprt->port); - - printk("RPC: Can't bind to reserved port (%d).\n", -err); - return err; -} - -static void -xprt_bind_socket(struct rpc_xprt *xprt, struct socket *sock) -{ - struct sock *sk = sock->sk; - - if (xprt->inet) - return; - - write_lock_bh(&sk->sk_callback_lock); - sk->sk_user_data = xprt; - xprt->old_data_ready = sk->sk_data_ready; - xprt->old_state_change = sk->sk_state_change; - xprt->old_write_space = sk->sk_write_space; - if (xprt->prot == IPPROTO_UDP) { - sk->sk_data_ready = udp_data_ready; - sk->sk_no_check = UDP_CSUM_NORCV; - xprt_set_connected(xprt); - } else { - tcp_sk(sk)->nonagle = 1; /* disable Nagle's algorithm */ - sk->sk_data_ready = tcp_data_ready; - sk->sk_state_change = tcp_state_change; - xprt_clear_connected(xprt); - } - sk->sk_write_space = xprt_write_space; - - /* Reset to new socket */ - xprt->sock = sock; - xprt->inet = sk; - write_unlock_bh(&sk->sk_callback_lock); - - return; -} - -/* - * Set socket buffer length - */ -void -xprt_sock_setbufsize(struct rpc_xprt *xprt) -{ - struct sock *sk = xprt->inet; - - if (xprt->stream) - return; - if (xprt->rcvsize) { - sk->sk_userlocks |= SOCK_RCVBUF_LOCK; - sk->sk_rcvbuf = xprt->rcvsize * xprt->max_reqs * 2; - } - if (xprt->sndsize) { - sk->sk_userlocks |= SOCK_SNDBUF_LOCK; - sk->sk_sndbuf = xprt->sndsize * xprt->max_reqs * 2; - sk->sk_write_space(sk); - } -} - -/* - * Datastream sockets are created here, but xprt_connect will create - * and connect stream sockets. - */ -static struct socket * xprt_create_socket(struct rpc_xprt *xprt, int proto, int resvport) -{ - struct socket *sock; - int type, err; - - dprintk("RPC: xprt_create_socket(%s %d)\n", - (proto == IPPROTO_UDP)? "udp" : "tcp", proto); - - type = (proto == IPPROTO_UDP)? SOCK_DGRAM : SOCK_STREAM; - - if ((err = sock_create_kern(PF_INET, type, proto, &sock)) < 0) { - printk("RPC: can't create socket (%d).\n", -err); - return NULL; - } - - /* If the caller has the capability, bind to a reserved port */ - if (resvport && xprt_bindresvport(xprt, sock) < 0) { - printk("RPC: can't bind to reserved port.\n"); - goto failed; - } - - return sock; - -failed: - sock_release(sock); - return NULL; -} - /* * Create an RPC client transport given the protocol and peer address. */ @@ -1631,10 +807,6 @@ xprt_shutdown(struct rpc_xprt *xprt) rpc_wake_up(&xprt->backlog); wake_up(&xprt->cong_wait); del_timer_sync(&xprt->timer); - - /* synchronously wait for connect worker to finish */ - cancel_delayed_work(&xprt->sock_connect); - flush_scheduled_work(); } /* @@ -1655,9 +827,7 @@ xprt_destroy(struct rpc_xprt *xprt) { dprintk("RPC: destroying transport %p\n", xprt); xprt_shutdown(xprt); - xprt_disconnect(xprt); - xprt_close(xprt); - kfree(xprt->slot); + xprt->ops->destroy(xprt); kfree(xprt); return 0; diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c new file mode 100644 index 000000000000..fa1180ac4823 --- /dev/null +++ b/net/sunrpc/xprtsock.c @@ -0,0 +1,1021 @@ +/* + * linux/net/sunrpc/xprtsock.c + * + * Client-side transport implementation for sockets. + * + * TCP callback races fixes (C) 1998 Red Hat Software + * TCP send fixes (C) 1998 Red Hat Software + * TCP NFS related read + write fixes + * (C) 1999 Dave Airlie, University of Limerick, Ireland + * + * Rewrite of larges part of the code in order to stabilize TCP stuff. + * Fix behaviour when socket buffer is full. + * (C) 1999 Trond Myklebust + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#ifdef RPC_DEBUG +# undef RPC_DEBUG_DATA +# define RPCDBG_FACILITY RPCDBG_XPRT +#endif + +#define XPRT_MAX_RESVPORT (800) + +#ifdef RPC_DEBUG_DATA +/* + * Print the buffer contents (first 128 bytes only--just enough for + * diropres return). + */ +static void +xprt_pktdump(char *msg, u32 *packet, unsigned int count) +{ + u8 *buf = (u8 *) packet; + int j; + + dprintk("RPC: %s\n", msg); + for (j = 0; j < count && j < 128; j += 4) { + if (!(j & 31)) { + if (j) + dprintk("\n"); + dprintk("0x%04x ", j); + } + dprintk("%02x%02x%02x%02x ", + buf[j], buf[j+1], buf[j+2], buf[j+3]); + } + dprintk("\n"); +} +#else +static inline void +xprt_pktdump(char *msg, u32 *packet, unsigned int count) +{ + /* NOP */ +} +#endif + +/* + * Look up RPC transport given an INET socket + */ +static inline struct rpc_xprt * +xprt_from_sock(struct sock *sk) +{ + return (struct rpc_xprt *) sk->sk_user_data; +} + +static int +xdr_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen, + struct xdr_buf *xdr, unsigned int base, int msgflags) +{ + struct page **ppage = xdr->pages; + unsigned int len, pglen = xdr->page_len; + int err, ret = 0; + ssize_t (*sendpage)(struct socket *, struct page *, int, size_t, int); + + len = xdr->head[0].iov_len; + if (base < len || (addr != NULL && base == 0)) { + struct kvec iov = { + .iov_base = xdr->head[0].iov_base + base, + .iov_len = len - base, + }; + struct msghdr msg = { + .msg_name = addr, + .msg_namelen = addrlen, + .msg_flags = msgflags, + }; + if (xdr->len > len) + msg.msg_flags |= MSG_MORE; + + if (iov.iov_len != 0) + err = kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len); + else + err = kernel_sendmsg(sock, &msg, NULL, 0, 0); + if (ret == 0) + ret = err; + else if (err > 0) + ret += err; + if (err != iov.iov_len) + goto out; + base = 0; + } else + base -= len; + + if (pglen == 0) + goto copy_tail; + if (base >= pglen) { + base -= pglen; + goto copy_tail; + } + if (base || xdr->page_base) { + pglen -= base; + base += xdr->page_base; + ppage += base >> PAGE_CACHE_SHIFT; + base &= ~PAGE_CACHE_MASK; + } + + sendpage = sock->ops->sendpage ? : sock_no_sendpage; + do { + int flags = msgflags; + + len = PAGE_CACHE_SIZE; + if (base) + len -= base; + if (pglen < len) + len = pglen; + + if (pglen != len || xdr->tail[0].iov_len != 0) + flags |= MSG_MORE; + + /* Hmm... We might be dealing with highmem pages */ + if (PageHighMem(*ppage)) + sendpage = sock_no_sendpage; + err = sendpage(sock, *ppage, base, len, flags); + if (ret == 0) + ret = err; + else if (err > 0) + ret += err; + if (err != len) + goto out; + base = 0; + ppage++; + } while ((pglen -= len) != 0); +copy_tail: + len = xdr->tail[0].iov_len; + if (base < len) { + struct kvec iov = { + .iov_base = xdr->tail[0].iov_base + base, + .iov_len = len - base, + }; + struct msghdr msg = { + .msg_flags = msgflags, + }; + err = kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len); + if (ret == 0) + ret = err; + else if (err > 0) + ret += err; + } +out: + return ret; +} + +/* + * Write data to socket. + */ +static inline int +xprt_sendmsg(struct rpc_xprt *xprt, struct rpc_rqst *req) +{ + struct socket *sock = xprt->sock; + struct xdr_buf *xdr = &req->rq_snd_buf; + struct sockaddr *addr = NULL; + int addrlen = 0; + unsigned int skip; + int result; + + if (!sock) + return -ENOTCONN; + + xprt_pktdump("packet data:", + req->rq_svec->iov_base, + req->rq_svec->iov_len); + + /* For UDP, we need to provide an address */ + if (!xprt->stream) { + addr = (struct sockaddr *) &xprt->addr; + addrlen = sizeof(xprt->addr); + } + /* Dont repeat bytes */ + skip = req->rq_bytes_sent; + + clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags); + result = xdr_sendpages(sock, addr, addrlen, xdr, skip, MSG_DONTWAIT); + + dprintk("RPC: xprt_sendmsg(%d) = %d\n", xdr->len - skip, result); + + if (result >= 0) + return result; + + switch (result) { + case -ECONNREFUSED: + /* When the server has died, an ICMP port unreachable message + * prompts ECONNREFUSED. + */ + case -EAGAIN: + break; + case -ECONNRESET: + case -ENOTCONN: + case -EPIPE: + /* connection broken */ + if (xprt->stream) + result = -ENOTCONN; + break; + default: + printk(KERN_NOTICE "RPC: sendmsg returned error %d\n", -result); + } + return result; +} + +static int +xprt_send_request(struct rpc_task *task) +{ + struct rpc_rqst *req = task->tk_rqstp; + struct rpc_xprt *xprt = req->rq_xprt; + int status, retry = 0; + + /* set up everything as needed. */ + /* Write the record marker */ + if (xprt->stream) { + u32 *marker = req->rq_svec[0].iov_base; + + *marker = htonl(0x80000000|(req->rq_slen-sizeof(*marker))); + } + + /* Continue transmitting the packet/record. We must be careful + * to cope with writespace callbacks arriving _after_ we have + * called xprt_sendmsg(). + */ + while (1) { + req->rq_xtime = jiffies; + status = xprt_sendmsg(xprt, req); + + if (status < 0) + break; + + if (xprt->stream) { + req->rq_bytes_sent += status; + + /* If we've sent the entire packet, immediately + * reset the count of bytes sent. */ + if (req->rq_bytes_sent >= req->rq_slen) { + req->rq_bytes_sent = 0; + return 0; + } + } else { + if (status >= req->rq_slen) + return 0; + status = -EAGAIN; + break; + } + + dprintk("RPC: %4d xmit incomplete (%d left of %d)\n", + task->tk_pid, req->rq_slen - req->rq_bytes_sent, + req->rq_slen); + + status = -EAGAIN; + if (retry++ > 50) + break; + } + + if (status == -EAGAIN) { + if (test_bit(SOCK_ASYNC_NOSPACE, &xprt->sock->flags)) { + /* Protect against races with xprt_write_space */ + spin_lock_bh(&xprt->sock_lock); + /* Don't race with disconnect */ + if (!xprt_connected(xprt)) + task->tk_status = -ENOTCONN; + else if (test_bit(SOCK_NOSPACE, &xprt->sock->flags)) { + task->tk_timeout = req->rq_timeout; + rpc_sleep_on(&xprt->pending, task, NULL, NULL); + } + spin_unlock_bh(&xprt->sock_lock); + return status; + } + /* Keep holding the socket if it is blocked */ + rpc_delay(task, HZ>>4); + } + return status; +} + +/* + * Close down a transport socket + */ +static void +xprt_close(struct rpc_xprt *xprt) +{ + struct socket *sock = xprt->sock; + struct sock *sk = xprt->inet; + + if (!sk) + return; + + write_lock_bh(&sk->sk_callback_lock); + xprt->inet = NULL; + xprt->sock = NULL; + + sk->sk_user_data = NULL; + sk->sk_data_ready = xprt->old_data_ready; + sk->sk_state_change = xprt->old_state_change; + sk->sk_write_space = xprt->old_write_space; + write_unlock_bh(&sk->sk_callback_lock); + + sk->sk_no_check = 0; + + sock_release(sock); +} + +static void xprt_socket_destroy(struct rpc_xprt *xprt) +{ + cancel_delayed_work(&xprt->sock_connect); + flush_scheduled_work(); + + xprt_disconnect(xprt); + xprt_close(xprt); + kfree(xprt->slot); +} + +/* + * Input handler for RPC replies. Called from a bottom half and hence + * atomic. + */ +static void +udp_data_ready(struct sock *sk, int len) +{ + struct rpc_task *task; + struct rpc_xprt *xprt; + struct rpc_rqst *rovr; + struct sk_buff *skb; + int err, repsize, copied; + u32 _xid, *xp; + + read_lock(&sk->sk_callback_lock); + dprintk("RPC: udp_data_ready...\n"); + if (!(xprt = xprt_from_sock(sk))) { + printk("RPC: udp_data_ready request not found!\n"); + goto out; + } + + dprintk("RPC: udp_data_ready client %p\n", xprt); + + if ((skb = skb_recv_datagram(sk, 0, 1, &err)) == NULL) + goto out; + + if (xprt->shutdown) + goto dropit; + + repsize = skb->len - sizeof(struct udphdr); + if (repsize < 4) { + printk("RPC: impossible RPC reply size %d!\n", repsize); + goto dropit; + } + + /* Copy the XID from the skb... */ + xp = skb_header_pointer(skb, sizeof(struct udphdr), + sizeof(_xid), &_xid); + if (xp == NULL) + goto dropit; + + /* Look up and lock the request corresponding to the given XID */ + spin_lock(&xprt->sock_lock); + rovr = xprt_lookup_rqst(xprt, *xp); + if (!rovr) + goto out_unlock; + task = rovr->rq_task; + + dprintk("RPC: %4d received reply\n", task->tk_pid); + + if ((copied = rovr->rq_private_buf.buflen) > repsize) + copied = repsize; + + /* Suck it into the iovec, verify checksum if not done by hw. */ + if (csum_partial_copy_to_xdr(&rovr->rq_private_buf, skb)) + goto out_unlock; + + /* Something worked... */ + dst_confirm(skb->dst); + + xprt_complete_rqst(xprt, rovr, copied); + + out_unlock: + spin_unlock(&xprt->sock_lock); + dropit: + skb_free_datagram(sk, skb); + out: + read_unlock(&sk->sk_callback_lock); +} + +/* + * Copy from an skb into memory and shrink the skb. + */ +static inline size_t +tcp_copy_data(skb_reader_t *desc, void *p, size_t len) +{ + if (len > desc->count) + len = desc->count; + if (skb_copy_bits(desc->skb, desc->offset, p, len)) { + dprintk("RPC: failed to copy %zu bytes from skb. %zu bytes remain\n", + len, desc->count); + return 0; + } + desc->offset += len; + desc->count -= len; + dprintk("RPC: copied %zu bytes from skb. %zu bytes remain\n", + len, desc->count); + return len; +} + +/* + * TCP read fragment marker + */ +static inline void +tcp_read_fraghdr(struct rpc_xprt *xprt, skb_reader_t *desc) +{ + size_t len, used; + char *p; + + p = ((char *) &xprt->tcp_recm) + xprt->tcp_offset; + len = sizeof(xprt->tcp_recm) - xprt->tcp_offset; + used = tcp_copy_data(desc, p, len); + xprt->tcp_offset += used; + if (used != len) + return; + xprt->tcp_reclen = ntohl(xprt->tcp_recm); + if (xprt->tcp_reclen & 0x80000000) + xprt->tcp_flags |= XPRT_LAST_FRAG; + else + xprt->tcp_flags &= ~XPRT_LAST_FRAG; + xprt->tcp_reclen &= 0x7fffffff; + xprt->tcp_flags &= ~XPRT_COPY_RECM; + xprt->tcp_offset = 0; + /* Sanity check of the record length */ + if (xprt->tcp_reclen < 4) { + printk(KERN_ERR "RPC: Invalid TCP record fragment length\n"); + xprt_disconnect(xprt); + } + dprintk("RPC: reading TCP record fragment of length %d\n", + xprt->tcp_reclen); +} + +static void +tcp_check_recm(struct rpc_xprt *xprt) +{ + dprintk("RPC: xprt = %p, tcp_copied = %lu, tcp_offset = %u, tcp_reclen = %u, tcp_flags = %lx\n", + xprt, xprt->tcp_copied, xprt->tcp_offset, xprt->tcp_reclen, xprt->tcp_flags); + if (xprt->tcp_offset == xprt->tcp_reclen) { + xprt->tcp_flags |= XPRT_COPY_RECM; + xprt->tcp_offset = 0; + if (xprt->tcp_flags & XPRT_LAST_FRAG) { + xprt->tcp_flags &= ~XPRT_COPY_DATA; + xprt->tcp_flags |= XPRT_COPY_XID; + xprt->tcp_copied = 0; + } + } +} + +/* + * TCP read xid + */ +static inline void +tcp_read_xid(struct rpc_xprt *xprt, skb_reader_t *desc) +{ + size_t len, used; + char *p; + + len = sizeof(xprt->tcp_xid) - xprt->tcp_offset; + dprintk("RPC: reading XID (%Zu bytes)\n", len); + p = ((char *) &xprt->tcp_xid) + xprt->tcp_offset; + used = tcp_copy_data(desc, p, len); + xprt->tcp_offset += used; + if (used != len) + return; + xprt->tcp_flags &= ~XPRT_COPY_XID; + xprt->tcp_flags |= XPRT_COPY_DATA; + xprt->tcp_copied = 4; + dprintk("RPC: reading reply for XID %08x\n", + ntohl(xprt->tcp_xid)); + tcp_check_recm(xprt); +} + +/* + * TCP read and complete request + */ +static inline void +tcp_read_request(struct rpc_xprt *xprt, skb_reader_t *desc) +{ + struct rpc_rqst *req; + struct xdr_buf *rcvbuf; + size_t len; + ssize_t r; + + /* Find and lock the request corresponding to this xid */ + spin_lock(&xprt->sock_lock); + req = xprt_lookup_rqst(xprt, xprt->tcp_xid); + if (!req) { + xprt->tcp_flags &= ~XPRT_COPY_DATA; + dprintk("RPC: XID %08x request not found!\n", + ntohl(xprt->tcp_xid)); + spin_unlock(&xprt->sock_lock); + return; + } + + rcvbuf = &req->rq_private_buf; + len = desc->count; + if (len > xprt->tcp_reclen - xprt->tcp_offset) { + skb_reader_t my_desc; + + len = xprt->tcp_reclen - xprt->tcp_offset; + memcpy(&my_desc, desc, sizeof(my_desc)); + my_desc.count = len; + r = xdr_partial_copy_from_skb(rcvbuf, xprt->tcp_copied, + &my_desc, tcp_copy_data); + desc->count -= r; + desc->offset += r; + } else + r = xdr_partial_copy_from_skb(rcvbuf, xprt->tcp_copied, + desc, tcp_copy_data); + + if (r > 0) { + xprt->tcp_copied += r; + xprt->tcp_offset += r; + } + if (r != len) { + /* Error when copying to the receive buffer, + * usually because we weren't able to allocate + * additional buffer pages. All we can do now + * is turn off XPRT_COPY_DATA, so the request + * will not receive any additional updates, + * and time out. + * Any remaining data from this record will + * be discarded. + */ + xprt->tcp_flags &= ~XPRT_COPY_DATA; + dprintk("RPC: XID %08x truncated request\n", + ntohl(xprt->tcp_xid)); + dprintk("RPC: xprt = %p, tcp_copied = %lu, tcp_offset = %u, tcp_reclen = %u\n", + xprt, xprt->tcp_copied, xprt->tcp_offset, xprt->tcp_reclen); + goto out; + } + + dprintk("RPC: XID %08x read %Zd bytes\n", + ntohl(xprt->tcp_xid), r); + dprintk("RPC: xprt = %p, tcp_copied = %lu, tcp_offset = %u, tcp_reclen = %u\n", + xprt, xprt->tcp_copied, xprt->tcp_offset, xprt->tcp_reclen); + + if (xprt->tcp_copied == req->rq_private_buf.buflen) + xprt->tcp_flags &= ~XPRT_COPY_DATA; + else if (xprt->tcp_offset == xprt->tcp_reclen) { + if (xprt->tcp_flags & XPRT_LAST_FRAG) + xprt->tcp_flags &= ~XPRT_COPY_DATA; + } + +out: + if (!(xprt->tcp_flags & XPRT_COPY_DATA)) { + dprintk("RPC: %4d received reply complete\n", + req->rq_task->tk_pid); + xprt_complete_rqst(xprt, req, xprt->tcp_copied); + } + spin_unlock(&xprt->sock_lock); + tcp_check_recm(xprt); +} + +/* + * TCP discard extra bytes from a short read + */ +static inline void +tcp_read_discard(struct rpc_xprt *xprt, skb_reader_t *desc) +{ + size_t len; + + len = xprt->tcp_reclen - xprt->tcp_offset; + if (len > desc->count) + len = desc->count; + desc->count -= len; + desc->offset += len; + xprt->tcp_offset += len; + dprintk("RPC: discarded %Zu bytes\n", len); + tcp_check_recm(xprt); +} + +/* + * TCP record receive routine + * We first have to grab the record marker, then the XID, then the data. + */ +static int +tcp_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, + unsigned int offset, size_t len) +{ + struct rpc_xprt *xprt = rd_desc->arg.data; + skb_reader_t desc = { + .skb = skb, + .offset = offset, + .count = len, + .csum = 0 + }; + + dprintk("RPC: tcp_data_recv\n"); + do { + /* Read in a new fragment marker if necessary */ + /* Can we ever really expect to get completely empty fragments? */ + if (xprt->tcp_flags & XPRT_COPY_RECM) { + tcp_read_fraghdr(xprt, &desc); + continue; + } + /* Read in the xid if necessary */ + if (xprt->tcp_flags & XPRT_COPY_XID) { + tcp_read_xid(xprt, &desc); + continue; + } + /* Read in the request data */ + if (xprt->tcp_flags & XPRT_COPY_DATA) { + tcp_read_request(xprt, &desc); + continue; + } + /* Skip over any trailing bytes on short reads */ + tcp_read_discard(xprt, &desc); + } while (desc.count); + dprintk("RPC: tcp_data_recv done\n"); + return len - desc.count; +} + +static void tcp_data_ready(struct sock *sk, int bytes) +{ + struct rpc_xprt *xprt; + read_descriptor_t rd_desc; + + read_lock(&sk->sk_callback_lock); + dprintk("RPC: tcp_data_ready...\n"); + if (!(xprt = xprt_from_sock(sk))) { + printk("RPC: tcp_data_ready socket info not found!\n"); + goto out; + } + if (xprt->shutdown) + goto out; + + /* We use rd_desc to pass struct xprt to tcp_data_recv */ + rd_desc.arg.data = xprt; + rd_desc.count = 65536; + tcp_read_sock(sk, &rd_desc, tcp_data_recv); +out: + read_unlock(&sk->sk_callback_lock); +} + +static void +tcp_state_change(struct sock *sk) +{ + struct rpc_xprt *xprt; + + read_lock(&sk->sk_callback_lock); + if (!(xprt = xprt_from_sock(sk))) + goto out; + dprintk("RPC: tcp_state_change client %p...\n", xprt); + dprintk("RPC: state %x conn %d dead %d zapped %d\n", + sk->sk_state, xprt_connected(xprt), + sock_flag(sk, SOCK_DEAD), + sock_flag(sk, SOCK_ZAPPED)); + + switch (sk->sk_state) { + case TCP_ESTABLISHED: + spin_lock_bh(&xprt->sock_lock); + if (!xprt_test_and_set_connected(xprt)) { + /* Reset TCP record info */ + xprt->tcp_offset = 0; + xprt->tcp_reclen = 0; + xprt->tcp_copied = 0; + xprt->tcp_flags = XPRT_COPY_RECM | XPRT_COPY_XID; + rpc_wake_up(&xprt->pending); + } + spin_unlock_bh(&xprt->sock_lock); + break; + case TCP_SYN_SENT: + case TCP_SYN_RECV: + break; + default: + xprt_disconnect(xprt); + break; + } + out: + read_unlock(&sk->sk_callback_lock); +} + +/* + * Called when more output buffer space is available for this socket. + * We try not to wake our writers until they can make "significant" + * progress, otherwise we'll waste resources thrashing sock_sendmsg + * with a bunch of small requests. + */ +static void +xprt_write_space(struct sock *sk) +{ + struct rpc_xprt *xprt; + struct socket *sock; + + read_lock(&sk->sk_callback_lock); + if (!(xprt = xprt_from_sock(sk)) || !(sock = sk->sk_socket)) + goto out; + if (xprt->shutdown) + goto out; + + /* Wait until we have enough socket memory */ + if (xprt->stream) { + /* from net/core/stream.c:sk_stream_write_space */ + if (sk_stream_wspace(sk) < sk_stream_min_wspace(sk)) + goto out; + } else { + /* from net/core/sock.c:sock_def_write_space */ + if (!sock_writeable(sk)) + goto out; + } + + if (!test_and_clear_bit(SOCK_NOSPACE, &sock->flags)) + goto out; + + spin_lock_bh(&xprt->sock_lock); + if (xprt->snd_task) + rpc_wake_up_task(xprt->snd_task); + spin_unlock_bh(&xprt->sock_lock); +out: + read_unlock(&sk->sk_callback_lock); +} + +/* + * Set socket buffer length + */ +static void +xprt_sock_setbufsize(struct rpc_xprt *xprt) +{ + struct sock *sk = xprt->inet; + + if (xprt->stream) + return; + if (xprt->rcvsize) { + sk->sk_userlocks |= SOCK_RCVBUF_LOCK; + sk->sk_rcvbuf = xprt->rcvsize * xprt->max_reqs * 2; + } + if (xprt->sndsize) { + sk->sk_userlocks |= SOCK_SNDBUF_LOCK; + sk->sk_sndbuf = xprt->sndsize * xprt->max_reqs * 2; + sk->sk_write_space(sk); + } +} + +/* + * Bind to a reserved port + */ +static inline int xprt_bindresvport(struct rpc_xprt *xprt, struct socket *sock) +{ + struct sockaddr_in myaddr = { + .sin_family = AF_INET, + }; + int err, port; + + /* Were we already bound to a given port? Try to reuse it */ + port = xprt->port; + do { + myaddr.sin_port = htons(port); + err = sock->ops->bind(sock, (struct sockaddr *) &myaddr, + sizeof(myaddr)); + if (err == 0) { + xprt->port = port; + return 0; + } + if (--port == 0) + port = XPRT_MAX_RESVPORT; + } while (err == -EADDRINUSE && port != xprt->port); + + printk("RPC: Can't bind to reserved port (%d).\n", -err); + return err; +} + +static void +xprt_bind_socket(struct rpc_xprt *xprt, struct socket *sock) +{ + struct sock *sk = sock->sk; + + if (xprt->inet) + return; + + write_lock_bh(&sk->sk_callback_lock); + sk->sk_user_data = xprt; + xprt->old_data_ready = sk->sk_data_ready; + xprt->old_state_change = sk->sk_state_change; + xprt->old_write_space = sk->sk_write_space; + if (xprt->prot == IPPROTO_UDP) { + sk->sk_data_ready = udp_data_ready; + sk->sk_no_check = UDP_CSUM_NORCV; + xprt_set_connected(xprt); + } else { + tcp_sk(sk)->nonagle = 1; /* disable Nagle's algorithm */ + sk->sk_data_ready = tcp_data_ready; + sk->sk_state_change = tcp_state_change; + xprt_clear_connected(xprt); + } + sk->sk_write_space = xprt_write_space; + + /* Reset to new socket */ + xprt->sock = sock; + xprt->inet = sk; + write_unlock_bh(&sk->sk_callback_lock); + + return; +} + +/* + * Datastream sockets are created here, but xprt_connect will create + * and connect stream sockets. + */ +static struct socket * xprt_create_socket(struct rpc_xprt *xprt, int proto, int resvport) +{ + struct socket *sock; + int type, err; + + dprintk("RPC: xprt_create_socket(%s %d)\n", + (proto == IPPROTO_UDP)? "udp" : "tcp", proto); + + type = (proto == IPPROTO_UDP)? SOCK_DGRAM : SOCK_STREAM; + + if ((err = sock_create_kern(PF_INET, type, proto, &sock)) < 0) { + printk("RPC: can't create socket (%d).\n", -err); + return NULL; + } + + /* If the caller has the capability, bind to a reserved port */ + if (resvport && xprt_bindresvport(xprt, sock) < 0) { + printk("RPC: can't bind to reserved port.\n"); + goto failed; + } + + return sock; + +failed: + sock_release(sock); + return NULL; +} + +static void xprt_socket_connect(void *args) +{ + struct rpc_xprt *xprt = (struct rpc_xprt *)args; + struct socket *sock = xprt->sock; + int status = -EIO; + + if (xprt->shutdown || xprt->addr.sin_port == 0) + goto out; + + /* + * Start by resetting any existing state + */ + xprt_close(xprt); + sock = xprt_create_socket(xprt, xprt->prot, xprt->resvport); + if (sock == NULL) { + /* couldn't create socket or bind to reserved port; + * this is likely a permanent error, so cause an abort */ + goto out; + } + xprt_bind_socket(xprt, sock); + xprt_sock_setbufsize(xprt); + + status = 0; + if (!xprt->stream) + goto out; + + /* + * Tell the socket layer to start connecting... + */ + status = sock->ops->connect(sock, (struct sockaddr *) &xprt->addr, + sizeof(xprt->addr), O_NONBLOCK); + dprintk("RPC: %p connect status %d connected %d sock state %d\n", + xprt, -status, xprt_connected(xprt), sock->sk->sk_state); + if (status < 0) { + switch (status) { + case -EINPROGRESS: + case -EALREADY: + goto out_clear; + } + } +out: + if (status < 0) + rpc_wake_up_status(&xprt->pending, status); + else + rpc_wake_up(&xprt->pending); +out_clear: + smp_mb__before_clear_bit(); + clear_bit(XPRT_CONNECTING, &xprt->sockstate); + smp_mb__after_clear_bit(); +} + +static void +xprt_connect_sock(struct rpc_task *task) +{ + struct rpc_xprt *xprt = task->tk_xprt; + + if (!test_and_set_bit(XPRT_CONNECTING, &xprt->sockstate)) { + /* Note: if we are here due to a dropped connection + * we delay reconnecting by RPC_REESTABLISH_TIMEOUT/HZ + * seconds + */ + if (xprt->sock != NULL) + schedule_delayed_work(&xprt->sock_connect, + RPC_REESTABLISH_TIMEOUT); + else { + schedule_work(&xprt->sock_connect); + /* flush_scheduled_work can sleep... */ + if (!RPC_IS_ASYNC(task)) + flush_scheduled_work(); + } + } +} + +/* + * Set default timeout parameters + */ +static void +xprt_default_timeout(struct rpc_timeout *to, int proto) +{ + if (proto == IPPROTO_UDP) + xprt_set_timeout(to, 5, 5 * HZ); + else + xprt_set_timeout(to, 2, 60 * HZ); +} + +static struct rpc_xprt_ops xprt_socket_ops = { + .set_buffer_size = xprt_sock_setbufsize, + .connect = xprt_connect_sock, + .send_request = xprt_send_request, + .close = xprt_close, + .destroy = xprt_socket_destroy, +}; + +extern unsigned int xprt_udp_slot_table_entries; +extern unsigned int xprt_tcp_slot_table_entries; + +int xs_setup_udp(struct rpc_xprt *xprt, struct rpc_timeout *to) +{ + size_t slot_table_size; + + dprintk("RPC: setting up udp-ipv4 transport...\n"); + + xprt->max_reqs = xprt_udp_slot_table_entries; + slot_table_size = xprt->max_reqs * sizeof(xprt->slot[0]); + xprt->slot = kmalloc(slot_table_size, GFP_KERNEL); + if (xprt->slot == NULL) + return -ENOMEM; + memset(xprt->slot, 0, slot_table_size); + + xprt->prot = IPPROTO_UDP; + xprt->port = XPRT_MAX_RESVPORT; + xprt->stream = 0; + xprt->nocong = 0; + xprt->cwnd = RPC_INITCWND; + xprt->resvport = capable(CAP_NET_BIND_SERVICE) ? 1 : 0; + /* XXX: header size can vary due to auth type, IPv6, etc. */ + xprt->max_payload = (1U << 16) - (MAX_HEADER << 3); + + INIT_WORK(&xprt->sock_connect, xprt_socket_connect, xprt); + + xprt->ops = &xprt_socket_ops; + + if (to) + xprt->timeout = *to; + else + xprt_default_timeout(to, xprt->prot); + + return 0; +} + +int xs_setup_tcp(struct rpc_xprt *xprt, struct rpc_timeout *to) +{ + size_t slot_table_size; + + dprintk("RPC: setting up tcp-ipv4 transport...\n"); + + xprt->max_reqs = xprt_tcp_slot_table_entries; + slot_table_size = xprt->max_reqs * sizeof(xprt->slot[0]); + xprt->slot = kmalloc(slot_table_size, GFP_KERNEL); + if (xprt->slot == NULL) + return -ENOMEM; + memset(xprt->slot, 0, slot_table_size); + + xprt->prot = IPPROTO_TCP; + xprt->port = XPRT_MAX_RESVPORT; + xprt->stream = 1; + xprt->nocong = 1; + xprt->cwnd = RPC_MAXCWND(xprt); + xprt->resvport = capable(CAP_NET_BIND_SERVICE) ? 1 : 0; + xprt->max_payload = (1U << 31) - 1; + + INIT_WORK(&xprt->sock_connect, xprt_socket_connect, xprt); + + xprt->ops = &xprt_socket_ops; + + if (to) + xprt->timeout = *to; + else + xprt_default_timeout(to, xprt->prot); + + return 0; +} -- cgit v1.2.2 From 9903cd1c27a1f30e8efea75e125be3b2002f7cb9 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Thu, 11 Aug 2005 16:25:26 -0400 Subject: [PATCH] RPC: transport switch function naming Introduce block header comments and a function naming convention to the socket transport implementation. Provide a debug setting for transports that is separate from RPCDBG_XPRT. Eliminate xprt_default_timeout(). Provide block comments for exposed interfaces in xprt.c, and eliminate the useless obvious comments. Convert printk's to dprintk's. Test-plan: Compile kernel with CONFIG_NFS enabled. Version: Thu, 11 Aug 2005 16:04:04 -0400 Signed-off-by: Chuck Lever Signed-off-by: Trond Myklebust --- net/sunrpc/xprt.c | 147 ++++++++-------- net/sunrpc/xprtsock.c | 464 ++++++++++++++++++++++++++------------------------ 2 files changed, 310 insertions(+), 301 deletions(-) (limited to 'net') diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 4342acf4d1cd..589195e630ef 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -227,9 +227,6 @@ xprt_adjust_cwnd(struct rpc_xprt *xprt, int result) xprt->cwnd = cwnd; } -/* - * Reset the major timeout value - */ static void xprt_reset_majortimeo(struct rpc_rqst *req) { struct rpc_timeout *to = &req->rq_xprt->timeout; @@ -244,8 +241,10 @@ static void xprt_reset_majortimeo(struct rpc_rqst *req) req->rq_majortimeo += jiffies; } -/* - * Adjust timeout values etc for next retransmit +/** + * xprt_adjust_timeout - adjust timeout values for next retransmit + * @req: RPC request containing parameters to use for the adjustment + * */ int xprt_adjust_timeout(struct rpc_rqst *req) { @@ -291,8 +290,10 @@ xprt_socket_autoclose(void *args) xprt_release_write(xprt, NULL); } -/* - * Mark a transport as disconnected +/** + * xprt_disconnect - mark a transport as disconnected + * @xprt: transport to flag for disconnect + * */ void xprt_disconnect(struct rpc_xprt *xprt) { @@ -303,9 +304,6 @@ void xprt_disconnect(struct rpc_xprt *xprt) spin_unlock_bh(&xprt->sock_lock); } -/* - * Used to allow disconnection when we've been idle - */ static void xprt_init_autodisconnect(unsigned long data) { @@ -327,8 +325,9 @@ out_abort: spin_unlock(&xprt->sock_lock); } -/* - * Attempt to connect a TCP socket. +/** + * xprt_connect - schedule a transport connect operation + * @task: RPC task that is requesting the connect * */ void xprt_connect(struct rpc_task *task) @@ -361,11 +360,7 @@ void xprt_connect(struct rpc_task *task) return; } -/* - * We arrive here when awoken from waiting on connection establishment. - */ -static void -xprt_connect_status(struct rpc_task *task) +static void xprt_connect_status(struct rpc_task *task) { struct rpc_xprt *xprt = task->tk_xprt; @@ -404,8 +399,11 @@ xprt_connect_status(struct rpc_task *task) } } -/* - * Look up the RPC request corresponding to a reply, and then lock it. +/** + * xprt_lookup_rqst - find an RPC request corresponding to an XID + * @xprt: transport on which the original request was transmitted + * @xid: RPC XID of incoming reply + * */ struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, u32 xid) { @@ -422,9 +420,12 @@ struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, u32 xid) return req; } -/* - * Complete reply received. - * The TCP code relies on us to remove the request from xprt->pending. +/** + * xprt_complete_rqst - called when reply processing is complete + * @xprt: controlling transport + * @req: RPC request that just completed + * @copied: actual number of bytes received from the transport + * */ void xprt_complete_rqst(struct rpc_xprt *xprt, struct rpc_rqst *req, int copied) { @@ -498,12 +499,12 @@ out: spin_unlock(&xprt->sock_lock); } -/* - * Place the actual RPC call. - * We have to copy the iovec because sendmsg fiddles with its contents. +/** + * xprt_prepare_transmit - reserve the transport before sending a request + * @task: RPC task about to send a request + * */ -int -xprt_prepare_transmit(struct rpc_task *task) +int xprt_prepare_transmit(struct rpc_task *task) { struct rpc_rqst *req = task->tk_rqstp; struct rpc_xprt *xprt = req->rq_xprt; @@ -533,8 +534,13 @@ out_unlock: return err; } -void -xprt_transmit(struct rpc_task *task) +/** + * xprt_transmit - send an RPC request on a transport + * @task: controlling RPC task + * + * We have to copy the iovec because sendmsg fiddles with its contents. + */ +void xprt_transmit(struct rpc_task *task) { struct rpc_clnt *clnt = task->tk_client; struct rpc_rqst *req = task->tk_rqstp; @@ -604,11 +610,7 @@ xprt_transmit(struct rpc_task *task) spin_unlock_bh(&xprt->sock_lock); } -/* - * Reserve an RPC call slot. - */ -static inline void -do_xprt_reserve(struct rpc_task *task) +static inline void do_xprt_reserve(struct rpc_task *task) { struct rpc_xprt *xprt = task->tk_xprt; @@ -628,8 +630,14 @@ do_xprt_reserve(struct rpc_task *task) rpc_sleep_on(&xprt->backlog, task, NULL, NULL); } -void -xprt_reserve(struct rpc_task *task) +/** + * xprt_reserve - allocate an RPC request slot + * @task: RPC task requesting a slot allocation + * + * If no more slots are available, place the task on the transport's + * backlog queue. + */ +void xprt_reserve(struct rpc_task *task) { struct rpc_xprt *xprt = task->tk_xprt; @@ -641,9 +649,6 @@ xprt_reserve(struct rpc_task *task) } } -/* - * Allocate a 'unique' XID - */ static inline u32 xprt_alloc_xid(struct rpc_xprt *xprt) { return xprt->xid++; @@ -654,11 +659,7 @@ static inline void xprt_init_xid(struct rpc_xprt *xprt) get_random_bytes(&xprt->xid, sizeof(xprt->xid)); } -/* - * Initialize RPC request - */ -static void -xprt_request_init(struct rpc_task *task, struct rpc_xprt *xprt) +static void xprt_request_init(struct rpc_task *task, struct rpc_xprt *xprt) { struct rpc_rqst *req = task->tk_rqstp; @@ -670,11 +671,12 @@ xprt_request_init(struct rpc_task *task, struct rpc_xprt *xprt) req, ntohl(req->rq_xid)); } -/* - * Release an RPC call slot +/** + * xprt_release - release an RPC request slot + * @task: task which is finished with the slot + * */ -void -xprt_release(struct rpc_task *task) +void xprt_release(struct rpc_task *task) { struct rpc_xprt *xprt = task->tk_xprt; struct rpc_rqst *req; @@ -702,11 +704,14 @@ xprt_release(struct rpc_task *task) spin_unlock(&xprt->xprt_lock); } -/* - * Set constant timeout +/** + * xprt_set_timeout - set constant RPC timeout + * @to: RPC timeout parameters to set up + * @retr: number of retries + * @incr: amount of increase after each retry + * */ -void -xprt_set_timeout(struct rpc_timeout *to, unsigned int retr, unsigned long incr) +void xprt_set_timeout(struct rpc_timeout *to, unsigned int retr, unsigned long incr) { to->to_initval = to->to_increment = incr; @@ -715,11 +720,7 @@ xprt_set_timeout(struct rpc_timeout *to, unsigned int retr, unsigned long incr) to->to_exponential = 0; } -/* - * Initialize an RPC client - */ -static struct rpc_xprt * -xprt_setup(int proto, struct sockaddr_in *ap, struct rpc_timeout *to) +static struct rpc_xprt *xprt_setup(int proto, struct sockaddr_in *ap, struct rpc_timeout *to) { int result; struct rpc_xprt *xprt; @@ -778,11 +779,14 @@ xprt_setup(int proto, struct sockaddr_in *ap, struct rpc_timeout *to) return xprt; } -/* - * Create an RPC client transport given the protocol and peer address. +/** + * xprt_create_proto - create an RPC client transport + * @proto: requested transport protocol + * @sap: remote peer's address + * @to: timeout parameters for new transport + * */ -struct rpc_xprt * -xprt_create_proto(int proto, struct sockaddr_in *sap, struct rpc_timeout *to) +struct rpc_xprt *xprt_create_proto(int proto, struct sockaddr_in *sap, struct rpc_timeout *to) { struct rpc_xprt *xprt; @@ -794,11 +798,7 @@ xprt_create_proto(int proto, struct sockaddr_in *sap, struct rpc_timeout *to) return xprt; } -/* - * Prepare for transport shutdown. - */ -static void -xprt_shutdown(struct rpc_xprt *xprt) +static void xprt_shutdown(struct rpc_xprt *xprt) { xprt->shutdown = 1; rpc_wake_up(&xprt->sending); @@ -809,21 +809,18 @@ xprt_shutdown(struct rpc_xprt *xprt) del_timer_sync(&xprt->timer); } -/* - * Clear the xprt backlog queue - */ -static int -xprt_clear_backlog(struct rpc_xprt *xprt) { +static int xprt_clear_backlog(struct rpc_xprt *xprt) { rpc_wake_up_next(&xprt->backlog); wake_up(&xprt->cong_wait); return 1; } -/* - * Destroy an RPC transport, killing off all requests. +/** + * xprt_destroy - destroy an RPC transport, killing off all requests. + * @xprt: transport to destroy + * */ -int -xprt_destroy(struct rpc_xprt *xprt) +int xprt_destroy(struct rpc_xprt *xprt) { dprintk("RPC: destroying transport %p\n", xprt); xprt_shutdown(xprt); diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index fa1180ac4823..80222de3afa4 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -33,23 +33,21 @@ #include #include +/* + * Maximum port number to use when requesting a reserved port. + */ +#define XS_MAX_RESVPORT (800U) + #ifdef RPC_DEBUG # undef RPC_DEBUG_DATA -# define RPCDBG_FACILITY RPCDBG_XPRT +# define RPCDBG_FACILITY RPCDBG_TRANS #endif -#define XPRT_MAX_RESVPORT (800) - #ifdef RPC_DEBUG_DATA -/* - * Print the buffer contents (first 128 bytes only--just enough for - * diropres return). - */ -static void -xprt_pktdump(char *msg, u32 *packet, unsigned int count) +static void xs_pktdump(char *msg, u32 *packet, unsigned int count) { - u8 *buf = (u8 *) packet; - int j; + u8 *buf = (u8 *) packet; + int j; dprintk("RPC: %s\n", msg); for (j = 0; j < count && j < 128; j += 4) { @@ -64,25 +62,22 @@ xprt_pktdump(char *msg, u32 *packet, unsigned int count) dprintk("\n"); } #else -static inline void -xprt_pktdump(char *msg, u32 *packet, unsigned int count) +static inline void xs_pktdump(char *msg, u32 *packet, unsigned int count) { /* NOP */ } #endif -/* - * Look up RPC transport given an INET socket +/** + * xs_sendpages - write pages directly to a socket + * @sock: socket to send on + * @addr: UDP only -- address of destination + * @addrlen: UDP only -- length of destination address + * @xdr: buffer containing this request + * @base: starting position in the buffer + * */ -static inline struct rpc_xprt * -xprt_from_sock(struct sock *sk) -{ - return (struct rpc_xprt *) sk->sk_user_data; -} - -static int -xdr_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen, - struct xdr_buf *xdr, unsigned int base, int msgflags) +static int xs_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen, struct xdr_buf *xdr, unsigned int base, int msgflags) { struct page **ppage = xdr->pages; unsigned int len, pglen = xdr->page_len; @@ -125,7 +120,7 @@ xdr_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen, } if (base || xdr->page_base) { pglen -= base; - base += xdr->page_base; + base += xdr->page_base; ppage += base >> PAGE_CACHE_SHIFT; base &= ~PAGE_CACHE_MASK; } @@ -176,23 +171,25 @@ out: return ret; } -/* - * Write data to socket. +/** + * xs_sendmsg - write an RPC request to a socket + * @xprt: generic transport + * @req: the RPC request to write + * */ -static inline int -xprt_sendmsg(struct rpc_xprt *xprt, struct rpc_rqst *req) +static int xs_sendmsg(struct rpc_xprt *xprt, struct rpc_rqst *req) { - struct socket *sock = xprt->sock; - struct xdr_buf *xdr = &req->rq_snd_buf; + struct socket *sock = xprt->sock; + struct xdr_buf *xdr = &req->rq_snd_buf; struct sockaddr *addr = NULL; int addrlen = 0; - unsigned int skip; - int result; + unsigned int skip; + int result; if (!sock) return -ENOTCONN; - xprt_pktdump("packet data:", + xs_pktdump("packet data:", req->rq_svec->iov_base, req->rq_svec->iov_len); @@ -201,13 +198,13 @@ xprt_sendmsg(struct rpc_xprt *xprt, struct rpc_rqst *req) addr = (struct sockaddr *) &xprt->addr; addrlen = sizeof(xprt->addr); } - /* Dont repeat bytes */ + /* Don't repeat bytes */ skip = req->rq_bytes_sent; clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags); - result = xdr_sendpages(sock, addr, addrlen, xdr, skip, MSG_DONTWAIT); + result = xs_sendpages(sock, addr, addrlen, xdr, skip, MSG_DONTWAIT); - dprintk("RPC: xprt_sendmsg(%d) = %d\n", xdr->len - skip, result); + dprintk("RPC: xs_sendmsg(%d) = %d\n", xdr->len - skip, result); if (result >= 0) return result; @@ -215,8 +212,7 @@ xprt_sendmsg(struct rpc_xprt *xprt, struct rpc_rqst *req) switch (result) { case -ECONNREFUSED: /* When the server has died, an ICMP port unreachable message - * prompts ECONNREFUSED. - */ + * prompts ECONNREFUSED. */ case -EAGAIN: break; case -ECONNRESET: @@ -227,13 +223,25 @@ xprt_sendmsg(struct rpc_xprt *xprt, struct rpc_rqst *req) result = -ENOTCONN; break; default: - printk(KERN_NOTICE "RPC: sendmsg returned error %d\n", -result); + break; } return result; } -static int -xprt_send_request(struct rpc_task *task) +/** + * xs_send_request - write an RPC request to a socket + * @task: address of RPC task that manages the state of an RPC request + * + * Return values: + * 0: The request has been sent + * EAGAIN: The socket was blocked, please call again later to + * complete the request + * other: Some other error occured, the request was not sent + * + * XXX: In the case of soft timeouts, should we eventually give up + * if the socket is not able to make progress? + */ +static int xs_send_request(struct rpc_task *task) { struct rpc_rqst *req = task->tk_rqstp; struct rpc_xprt *xprt = req->rq_xprt; @@ -242,18 +250,18 @@ xprt_send_request(struct rpc_task *task) /* set up everything as needed. */ /* Write the record marker */ if (xprt->stream) { - u32 *marker = req->rq_svec[0].iov_base; + u32 *marker = req->rq_svec[0].iov_base; *marker = htonl(0x80000000|(req->rq_slen-sizeof(*marker))); } /* Continue transmitting the packet/record. We must be careful * to cope with writespace callbacks arriving _after_ we have - * called xprt_sendmsg(). + * called sendmsg(). */ while (1) { req->rq_xtime = jiffies; - status = xprt_sendmsg(xprt, req); + status = xs_sendmsg(xprt, req); if (status < 0) break; @@ -285,7 +293,7 @@ xprt_send_request(struct rpc_task *task) if (status == -EAGAIN) { if (test_bit(SOCK_ASYNC_NOSPACE, &xprt->sock->flags)) { - /* Protect against races with xprt_write_space */ + /* Protect against races with xs_write_space */ spin_lock_bh(&xprt->sock_lock); /* Don't race with disconnect */ if (!xprt_connected(xprt)) @@ -303,65 +311,77 @@ xprt_send_request(struct rpc_task *task) return status; } -/* - * Close down a transport socket +/** + * xs_close - close a socket + * @xprt: transport + * */ -static void -xprt_close(struct rpc_xprt *xprt) +static void xs_close(struct rpc_xprt *xprt) { - struct socket *sock = xprt->sock; - struct sock *sk = xprt->inet; + struct socket *sock = xprt->sock; + struct sock *sk = xprt->inet; if (!sk) return; + dprintk("RPC: xs_close xprt %p\n", xprt); + write_lock_bh(&sk->sk_callback_lock); xprt->inet = NULL; xprt->sock = NULL; - sk->sk_user_data = NULL; - sk->sk_data_ready = xprt->old_data_ready; + sk->sk_user_data = NULL; + sk->sk_data_ready = xprt->old_data_ready; sk->sk_state_change = xprt->old_state_change; - sk->sk_write_space = xprt->old_write_space; + sk->sk_write_space = xprt->old_write_space; write_unlock_bh(&sk->sk_callback_lock); - sk->sk_no_check = 0; + sk->sk_no_check = 0; sock_release(sock); } -static void xprt_socket_destroy(struct rpc_xprt *xprt) +/** + * xs_destroy - prepare to shutdown a transport + * @xprt: doomed transport + * + */ +static void xs_destroy(struct rpc_xprt *xprt) { + dprintk("RPC: xs_destroy xprt %p\n", xprt); + cancel_delayed_work(&xprt->sock_connect); flush_scheduled_work(); xprt_disconnect(xprt); - xprt_close(xprt); + xs_close(xprt); kfree(xprt->slot); } -/* - * Input handler for RPC replies. Called from a bottom half and hence - * atomic. +static inline struct rpc_xprt *xprt_from_sock(struct sock *sk) +{ + return (struct rpc_xprt *) sk->sk_user_data; +} + +/** + * xs_udp_data_ready - "data ready" callback for UDP sockets + * @sk: socket with data to read + * @len: how much data to read + * */ -static void -udp_data_ready(struct sock *sk, int len) +static void xs_udp_data_ready(struct sock *sk, int len) { - struct rpc_task *task; - struct rpc_xprt *xprt; + struct rpc_task *task; + struct rpc_xprt *xprt; struct rpc_rqst *rovr; - struct sk_buff *skb; + struct sk_buff *skb; int err, repsize, copied; u32 _xid, *xp; read_lock(&sk->sk_callback_lock); - dprintk("RPC: udp_data_ready...\n"); - if (!(xprt = xprt_from_sock(sk))) { - printk("RPC: udp_data_ready request not found!\n"); + dprintk("RPC: xs_udp_data_ready...\n"); + if (!(xprt = xprt_from_sock(sk))) goto out; - } - - dprintk("RPC: udp_data_ready client %p\n", xprt); if ((skb = skb_recv_datagram(sk, 0, 1, &err)) == NULL) goto out; @@ -371,7 +391,7 @@ udp_data_ready(struct sock *sk, int len) repsize = skb->len - sizeof(struct udphdr); if (repsize < 4) { - printk("RPC: impossible RPC reply size %d!\n", repsize); + dprintk("RPC: impossible RPC reply size %d!\n", repsize); goto dropit; } @@ -410,11 +430,7 @@ udp_data_ready(struct sock *sk, int len) read_unlock(&sk->sk_callback_lock); } -/* - * Copy from an skb into memory and shrink the skb. - */ -static inline size_t -tcp_copy_data(skb_reader_t *desc, void *p, size_t len) +static inline size_t xs_tcp_copy_data(skb_reader_t *desc, void *p, size_t len) { if (len > desc->count) len = desc->count; @@ -430,18 +446,14 @@ tcp_copy_data(skb_reader_t *desc, void *p, size_t len) return len; } -/* - * TCP read fragment marker - */ -static inline void -tcp_read_fraghdr(struct rpc_xprt *xprt, skb_reader_t *desc) +static inline void xs_tcp_read_fraghdr(struct rpc_xprt *xprt, skb_reader_t *desc) { size_t len, used; char *p; p = ((char *) &xprt->tcp_recm) + xprt->tcp_offset; len = sizeof(xprt->tcp_recm) - xprt->tcp_offset; - used = tcp_copy_data(desc, p, len); + used = xs_tcp_copy_data(desc, p, len); xprt->tcp_offset += used; if (used != len) return; @@ -455,15 +467,15 @@ tcp_read_fraghdr(struct rpc_xprt *xprt, skb_reader_t *desc) xprt->tcp_offset = 0; /* Sanity check of the record length */ if (xprt->tcp_reclen < 4) { - printk(KERN_ERR "RPC: Invalid TCP record fragment length\n"); + dprintk("RPC: invalid TCP record fragment length\n"); xprt_disconnect(xprt); + return; } dprintk("RPC: reading TCP record fragment of length %d\n", xprt->tcp_reclen); } -static void -tcp_check_recm(struct rpc_xprt *xprt) +static void xs_tcp_check_recm(struct rpc_xprt *xprt) { dprintk("RPC: xprt = %p, tcp_copied = %lu, tcp_offset = %u, tcp_reclen = %u, tcp_flags = %lx\n", xprt, xprt->tcp_copied, xprt->tcp_offset, xprt->tcp_reclen, xprt->tcp_flags); @@ -478,11 +490,7 @@ tcp_check_recm(struct rpc_xprt *xprt) } } -/* - * TCP read xid - */ -static inline void -tcp_read_xid(struct rpc_xprt *xprt, skb_reader_t *desc) +static inline void xs_tcp_read_xid(struct rpc_xprt *xprt, skb_reader_t *desc) { size_t len, used; char *p; @@ -490,7 +498,7 @@ tcp_read_xid(struct rpc_xprt *xprt, skb_reader_t *desc) len = sizeof(xprt->tcp_xid) - xprt->tcp_offset; dprintk("RPC: reading XID (%Zu bytes)\n", len); p = ((char *) &xprt->tcp_xid) + xprt->tcp_offset; - used = tcp_copy_data(desc, p, len); + used = xs_tcp_copy_data(desc, p, len); xprt->tcp_offset += used; if (used != len) return; @@ -499,14 +507,10 @@ tcp_read_xid(struct rpc_xprt *xprt, skb_reader_t *desc) xprt->tcp_copied = 4; dprintk("RPC: reading reply for XID %08x\n", ntohl(xprt->tcp_xid)); - tcp_check_recm(xprt); + xs_tcp_check_recm(xprt); } -/* - * TCP read and complete request - */ -static inline void -tcp_read_request(struct rpc_xprt *xprt, skb_reader_t *desc) +static inline void xs_tcp_read_request(struct rpc_xprt *xprt, skb_reader_t *desc) { struct rpc_rqst *req; struct xdr_buf *rcvbuf; @@ -533,12 +537,12 @@ tcp_read_request(struct rpc_xprt *xprt, skb_reader_t *desc) memcpy(&my_desc, desc, sizeof(my_desc)); my_desc.count = len; r = xdr_partial_copy_from_skb(rcvbuf, xprt->tcp_copied, - &my_desc, tcp_copy_data); + &my_desc, xs_tcp_copy_data); desc->count -= r; desc->offset += r; } else r = xdr_partial_copy_from_skb(rcvbuf, xprt->tcp_copied, - desc, tcp_copy_data); + desc, xs_tcp_copy_data); if (r > 0) { xprt->tcp_copied += r; @@ -581,14 +585,10 @@ out: xprt_complete_rqst(xprt, req, xprt->tcp_copied); } spin_unlock(&xprt->sock_lock); - tcp_check_recm(xprt); + xs_tcp_check_recm(xprt); } -/* - * TCP discard extra bytes from a short read - */ -static inline void -tcp_read_discard(struct rpc_xprt *xprt, skb_reader_t *desc) +static inline void xs_tcp_read_discard(struct rpc_xprt *xprt, skb_reader_t *desc) { size_t len; @@ -599,16 +599,10 @@ tcp_read_discard(struct rpc_xprt *xprt, skb_reader_t *desc) desc->offset += len; xprt->tcp_offset += len; dprintk("RPC: discarded %Zu bytes\n", len); - tcp_check_recm(xprt); + xs_tcp_check_recm(xprt); } -/* - * TCP record receive routine - * We first have to grab the record marker, then the XID, then the data. - */ -static int -tcp_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, - unsigned int offset, size_t len) +static int xs_tcp_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, unsigned int offset, size_t len) { struct rpc_xprt *xprt = rd_desc->arg.data; skb_reader_t desc = { @@ -616,64 +610,72 @@ tcp_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, .offset = offset, .count = len, .csum = 0 - }; + }; - dprintk("RPC: tcp_data_recv\n"); + dprintk("RPC: xs_tcp_data_recv started\n"); do { /* Read in a new fragment marker if necessary */ /* Can we ever really expect to get completely empty fragments? */ if (xprt->tcp_flags & XPRT_COPY_RECM) { - tcp_read_fraghdr(xprt, &desc); + xs_tcp_read_fraghdr(xprt, &desc); continue; } /* Read in the xid if necessary */ if (xprt->tcp_flags & XPRT_COPY_XID) { - tcp_read_xid(xprt, &desc); + xs_tcp_read_xid(xprt, &desc); continue; } /* Read in the request data */ if (xprt->tcp_flags & XPRT_COPY_DATA) { - tcp_read_request(xprt, &desc); + xs_tcp_read_request(xprt, &desc); continue; } /* Skip over any trailing bytes on short reads */ - tcp_read_discard(xprt, &desc); + xs_tcp_read_discard(xprt, &desc); } while (desc.count); - dprintk("RPC: tcp_data_recv done\n"); + dprintk("RPC: xs_tcp_data_recv done\n"); return len - desc.count; } -static void tcp_data_ready(struct sock *sk, int bytes) +/** + * xs_tcp_data_ready - "data ready" callback for TCP sockets + * @sk: socket with data to read + * @bytes: how much data to read + * + */ +static void xs_tcp_data_ready(struct sock *sk, int bytes) { struct rpc_xprt *xprt; read_descriptor_t rd_desc; read_lock(&sk->sk_callback_lock); - dprintk("RPC: tcp_data_ready...\n"); - if (!(xprt = xprt_from_sock(sk))) { - printk("RPC: tcp_data_ready socket info not found!\n"); + dprintk("RPC: xs_tcp_data_ready...\n"); + if (!(xprt = xprt_from_sock(sk))) goto out; - } if (xprt->shutdown) goto out; - /* We use rd_desc to pass struct xprt to tcp_data_recv */ + /* We use rd_desc to pass struct xprt to xs_tcp_data_recv */ rd_desc.arg.data = xprt; rd_desc.count = 65536; - tcp_read_sock(sk, &rd_desc, tcp_data_recv); + tcp_read_sock(sk, &rd_desc, xs_tcp_data_recv); out: read_unlock(&sk->sk_callback_lock); } -static void -tcp_state_change(struct sock *sk) +/** + * xs_tcp_state_change - callback to handle TCP socket state changes + * @sk: socket whose state has changed + * + */ +static void xs_tcp_state_change(struct sock *sk) { - struct rpc_xprt *xprt; + struct rpc_xprt *xprt; read_lock(&sk->sk_callback_lock); if (!(xprt = xprt_from_sock(sk))) goto out; - dprintk("RPC: tcp_state_change client %p...\n", xprt); + dprintk("RPC: xs_tcp_state_change client %p...\n", xprt); dprintk("RPC: state %x conn %d dead %d zapped %d\n", sk->sk_state, xprt_connected(xprt), sock_flag(sk, SOCK_DEAD), @@ -703,17 +705,20 @@ tcp_state_change(struct sock *sk) read_unlock(&sk->sk_callback_lock); } -/* +/** + * xs_write_space - callback invoked when socket buffer space becomes + * available + * @sk: socket whose state has changed + * * Called when more output buffer space is available for this socket. * We try not to wake our writers until they can make "significant" * progress, otherwise we'll waste resources thrashing sock_sendmsg * with a bunch of small requests. */ -static void -xprt_write_space(struct sock *sk) +static void xs_write_space(struct sock *sk) { - struct rpc_xprt *xprt; - struct socket *sock; + struct rpc_xprt *xprt; + struct socket *sock; read_lock(&sk->sk_callback_lock); if (!(xprt = xprt_from_sock(sk)) || !(sock = sk->sk_socket)) @@ -743,11 +748,15 @@ out: read_unlock(&sk->sk_callback_lock); } -/* - * Set socket buffer length +/** + * xs_set_buffer_size - set send and receive limits + * @xprt: generic transport + * + * Set socket send and receive limits based on the + * sndsize and rcvsize fields in the generic transport + * structure. This applies only to UDP sockets. */ -static void -xprt_sock_setbufsize(struct rpc_xprt *xprt) +static void xs_set_buffer_size(struct rpc_xprt *xprt) { struct sock *sk = xprt->inet; @@ -764,15 +773,12 @@ xprt_sock_setbufsize(struct rpc_xprt *xprt) } } -/* - * Bind to a reserved port - */ -static inline int xprt_bindresvport(struct rpc_xprt *xprt, struct socket *sock) +static int xs_bindresvport(struct rpc_xprt *xprt, struct socket *sock) { struct sockaddr_in myaddr = { .sin_family = AF_INET, }; - int err, port; + int err, port; /* Were we already bound to a given port? Try to reuse it */ port = xprt->port; @@ -782,20 +788,47 @@ static inline int xprt_bindresvport(struct rpc_xprt *xprt, struct socket *sock) sizeof(myaddr)); if (err == 0) { xprt->port = port; + dprintk("RPC: xs_bindresvport bound to port %u\n", + port); return 0; } if (--port == 0) - port = XPRT_MAX_RESVPORT; + port = XS_MAX_RESVPORT; } while (err == -EADDRINUSE && port != xprt->port); - printk("RPC: Can't bind to reserved port (%d).\n", -err); + dprintk("RPC: can't bind to reserved port (%d).\n", -err); return err; } -static void -xprt_bind_socket(struct rpc_xprt *xprt, struct socket *sock) +static struct socket *xs_create(struct rpc_xprt *xprt, int proto, int resvport) { - struct sock *sk = sock->sk; + struct socket *sock; + int type, err; + + dprintk("RPC: xs_create(%s %d)\n", + (proto == IPPROTO_UDP)? "udp" : "tcp", proto); + + type = (proto == IPPROTO_UDP)? SOCK_DGRAM : SOCK_STREAM; + + if ((err = sock_create_kern(PF_INET, type, proto, &sock)) < 0) { + dprintk("RPC: can't create socket (%d).\n", -err); + return NULL; + } + + /* If the caller has the capability, bind to a reserved port */ + if (resvport && xs_bindresvport(xprt, sock) < 0) + goto failed; + + return sock; + +failed: + sock_release(sock); + return NULL; +} + +static void xs_bind(struct rpc_xprt *xprt, struct socket *sock) +{ + struct sock *sk = sock->sk; if (xprt->inet) return; @@ -806,16 +839,16 @@ xprt_bind_socket(struct rpc_xprt *xprt, struct socket *sock) xprt->old_state_change = sk->sk_state_change; xprt->old_write_space = sk->sk_write_space; if (xprt->prot == IPPROTO_UDP) { - sk->sk_data_ready = udp_data_ready; + sk->sk_data_ready = xs_udp_data_ready; sk->sk_no_check = UDP_CSUM_NORCV; xprt_set_connected(xprt); } else { tcp_sk(sk)->nonagle = 1; /* disable Nagle's algorithm */ - sk->sk_data_ready = tcp_data_ready; - sk->sk_state_change = tcp_state_change; + sk->sk_data_ready = xs_tcp_data_ready; + sk->sk_state_change = xs_tcp_state_change; xprt_clear_connected(xprt); } - sk->sk_write_space = xprt_write_space; + sk->sk_write_space = xs_write_space; /* Reset to new socket */ xprt->sock = sock; @@ -825,39 +858,13 @@ xprt_bind_socket(struct rpc_xprt *xprt, struct socket *sock) return; } -/* - * Datastream sockets are created here, but xprt_connect will create - * and connect stream sockets. +/** + * xs_connect_worker - try to connect a socket to a remote endpoint + * @args: RPC transport to connect + * + * Invoked by a work queue tasklet. */ -static struct socket * xprt_create_socket(struct rpc_xprt *xprt, int proto, int resvport) -{ - struct socket *sock; - int type, err; - - dprintk("RPC: xprt_create_socket(%s %d)\n", - (proto == IPPROTO_UDP)? "udp" : "tcp", proto); - - type = (proto == IPPROTO_UDP)? SOCK_DGRAM : SOCK_STREAM; - - if ((err = sock_create_kern(PF_INET, type, proto, &sock)) < 0) { - printk("RPC: can't create socket (%d).\n", -err); - return NULL; - } - - /* If the caller has the capability, bind to a reserved port */ - if (resvport && xprt_bindresvport(xprt, sock) < 0) { - printk("RPC: can't bind to reserved port.\n"); - goto failed; - } - - return sock; - -failed: - sock_release(sock); - return NULL; -} - -static void xprt_socket_connect(void *args) +static void xs_connect_worker(void *args) { struct rpc_xprt *xprt = (struct rpc_xprt *)args; struct socket *sock = xprt->sock; @@ -866,18 +873,20 @@ static void xprt_socket_connect(void *args) if (xprt->shutdown || xprt->addr.sin_port == 0) goto out; + dprintk("RPC: xs_connect_worker xprt %p\n", xprt); + /* * Start by resetting any existing state */ - xprt_close(xprt); - sock = xprt_create_socket(xprt, xprt->prot, xprt->resvport); + xs_close(xprt); + sock = xs_create(xprt, xprt->prot, xprt->resvport); if (sock == NULL) { /* couldn't create socket or bind to reserved port; * this is likely a permanent error, so cause an abort */ goto out; } - xprt_bind_socket(xprt, sock); - xprt_sock_setbufsize(xprt); + xs_bind(xprt, sock); + xs_set_buffer_size(xprt); status = 0; if (!xprt->stream) @@ -908,20 +917,23 @@ out_clear: smp_mb__after_clear_bit(); } -static void -xprt_connect_sock(struct rpc_task *task) +/** + * xs_connect - connect a socket to a remote endpoint + * @task: address of RPC task that manages state of connect request + * + * TCP: If the remote end dropped the connection, delay reconnecting. + */ +static void xs_connect(struct rpc_task *task) { struct rpc_xprt *xprt = task->tk_xprt; if (!test_and_set_bit(XPRT_CONNECTING, &xprt->sockstate)) { - /* Note: if we are here due to a dropped connection - * we delay reconnecting by RPC_REESTABLISH_TIMEOUT/HZ - * seconds - */ - if (xprt->sock != NULL) + if (xprt->sock != NULL) { + dprintk("RPC: xs_connect delayed xprt %p\n", xprt); schedule_delayed_work(&xprt->sock_connect, RPC_REESTABLISH_TIMEOUT); - else { + } else { + dprintk("RPC: xs_connect scheduled xprt %p\n", xprt); schedule_work(&xprt->sock_connect); /* flush_scheduled_work can sleep... */ if (!RPC_IS_ASYNC(task)) @@ -930,29 +942,23 @@ xprt_connect_sock(struct rpc_task *task) } } -/* - * Set default timeout parameters - */ -static void -xprt_default_timeout(struct rpc_timeout *to, int proto) -{ - if (proto == IPPROTO_UDP) - xprt_set_timeout(to, 5, 5 * HZ); - else - xprt_set_timeout(to, 2, 60 * HZ); -} - -static struct rpc_xprt_ops xprt_socket_ops = { - .set_buffer_size = xprt_sock_setbufsize, - .connect = xprt_connect_sock, - .send_request = xprt_send_request, - .close = xprt_close, - .destroy = xprt_socket_destroy, +static struct rpc_xprt_ops xs_ops = { + .set_buffer_size = xs_set_buffer_size, + .connect = xs_connect, + .send_request = xs_send_request, + .close = xs_close, + .destroy = xs_destroy, }; extern unsigned int xprt_udp_slot_table_entries; extern unsigned int xprt_tcp_slot_table_entries; +/** + * xs_setup_udp - Set up transport to use a UDP socket + * @xprt: transport to set up + * @to: timeout parameters + * + */ int xs_setup_udp(struct rpc_xprt *xprt, struct rpc_timeout *to) { size_t slot_table_size; @@ -967,7 +973,7 @@ int xs_setup_udp(struct rpc_xprt *xprt, struct rpc_timeout *to) memset(xprt->slot, 0, slot_table_size); xprt->prot = IPPROTO_UDP; - xprt->port = XPRT_MAX_RESVPORT; + xprt->port = XS_MAX_RESVPORT; xprt->stream = 0; xprt->nocong = 0; xprt->cwnd = RPC_INITCWND; @@ -975,18 +981,24 @@ int xs_setup_udp(struct rpc_xprt *xprt, struct rpc_timeout *to) /* XXX: header size can vary due to auth type, IPv6, etc. */ xprt->max_payload = (1U << 16) - (MAX_HEADER << 3); - INIT_WORK(&xprt->sock_connect, xprt_socket_connect, xprt); + INIT_WORK(&xprt->sock_connect, xs_connect_worker, xprt); - xprt->ops = &xprt_socket_ops; + xprt->ops = &xs_ops; if (to) xprt->timeout = *to; else - xprt_default_timeout(to, xprt->prot); + xprt_set_timeout(&xprt->timeout, 5, 5 * HZ); return 0; } +/** + * xs_setup_tcp - Set up transport to use a TCP socket + * @xprt: transport to set up + * @to: timeout parameters + * + */ int xs_setup_tcp(struct rpc_xprt *xprt, struct rpc_timeout *to) { size_t slot_table_size; @@ -1001,21 +1013,21 @@ int xs_setup_tcp(struct rpc_xprt *xprt, struct rpc_timeout *to) memset(xprt->slot, 0, slot_table_size); xprt->prot = IPPROTO_TCP; - xprt->port = XPRT_MAX_RESVPORT; + xprt->port = XS_MAX_RESVPORT; xprt->stream = 1; xprt->nocong = 1; xprt->cwnd = RPC_MAXCWND(xprt); xprt->resvport = capable(CAP_NET_BIND_SERVICE) ? 1 : 0; xprt->max_payload = (1U << 31) - 1; - INIT_WORK(&xprt->sock_connect, xprt_socket_connect, xprt); + INIT_WORK(&xprt->sock_connect, xs_connect_worker, xprt); - xprt->ops = &xprt_socket_ops; + xprt->ops = &xs_ops; if (to) xprt->timeout = *to; else - xprt_default_timeout(to, xprt->prot); + xprt_set_timeout(&xprt->timeout, 2, 60 * HZ); return 0; } -- cgit v1.2.2 From b4b5cc85ed4ecbe4adbfbc4df028850de67a9f09 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Thu, 11 Aug 2005 16:25:29 -0400 Subject: [PATCH] RPC: Reduce stack utilization in xs_sendpages Reduce stack utilization of the RPC socket transport's send path. A couple of unlikely()s are added to ensure the compiler places the tail processing at the end of the csect. Test-plan: Millions of fsx operations. Performance characterization such as "sio" or "iozone". Version: Thu, 11 Aug 2005 16:04:30 -0400 Signed-off-by: Chuck Lever Signed-off-by: Trond Myklebust --- net/sunrpc/xprtsock.c | 73 ++++++++++++++++++++++++++++++--------------------- 1 file changed, 43 insertions(+), 30 deletions(-) (limited to 'net') diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 80222de3afa4..a5a04203a6b0 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -68,6 +68,41 @@ static inline void xs_pktdump(char *msg, u32 *packet, unsigned int count) } #endif +#define XS_SENDMSG_FLAGS (MSG_DONTWAIT | MSG_NOSIGNAL) + +static inline int xs_send_head(struct socket *sock, struct sockaddr *addr, int addrlen, struct xdr_buf *xdr, unsigned int base, unsigned int len) +{ + struct kvec iov = { + .iov_base = xdr->head[0].iov_base + base, + .iov_len = len - base, + }; + struct msghdr msg = { + .msg_name = addr, + .msg_namelen = addrlen, + .msg_flags = XS_SENDMSG_FLAGS, + }; + + if (xdr->len > len) + msg.msg_flags |= MSG_MORE; + + if (likely(iov.iov_len)) + return kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len); + return kernel_sendmsg(sock, &msg, NULL, 0, 0); +} + +static int xs_send_tail(struct socket *sock, struct xdr_buf *xdr, unsigned int base, unsigned int len) +{ + struct kvec iov = { + .iov_base = xdr->tail[0].iov_base + base, + .iov_len = len - base, + }; + struct msghdr msg = { + .msg_flags = XS_SENDMSG_FLAGS, + }; + + return kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len); +} + /** * xs_sendpages - write pages directly to a socket * @sock: socket to send on @@ -77,7 +112,7 @@ static inline void xs_pktdump(char *msg, u32 *packet, unsigned int count) * @base: starting position in the buffer * */ -static int xs_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen, struct xdr_buf *xdr, unsigned int base, int msgflags) +static int xs_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen, struct xdr_buf *xdr, unsigned int base) { struct page **ppage = xdr->pages; unsigned int len, pglen = xdr->page_len; @@ -86,35 +121,20 @@ static int xs_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen, len = xdr->head[0].iov_len; if (base < len || (addr != NULL && base == 0)) { - struct kvec iov = { - .iov_base = xdr->head[0].iov_base + base, - .iov_len = len - base, - }; - struct msghdr msg = { - .msg_name = addr, - .msg_namelen = addrlen, - .msg_flags = msgflags, - }; - if (xdr->len > len) - msg.msg_flags |= MSG_MORE; - - if (iov.iov_len != 0) - err = kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len); - else - err = kernel_sendmsg(sock, &msg, NULL, 0, 0); + err = xs_send_head(sock, addr, addrlen, xdr, base, len); if (ret == 0) ret = err; else if (err > 0) ret += err; - if (err != iov.iov_len) + if (err != (len - base)) goto out; base = 0; } else base -= len; - if (pglen == 0) + if (unlikely(pglen == 0)) goto copy_tail; - if (base >= pglen) { + if (unlikely(base >= pglen)) { base -= pglen; goto copy_tail; } @@ -127,7 +147,7 @@ static int xs_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen, sendpage = sock->ops->sendpage ? : sock_no_sendpage; do { - int flags = msgflags; + int flags = XS_SENDMSG_FLAGS; len = PAGE_CACHE_SIZE; if (base) @@ -154,14 +174,7 @@ static int xs_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen, copy_tail: len = xdr->tail[0].iov_len; if (base < len) { - struct kvec iov = { - .iov_base = xdr->tail[0].iov_base + base, - .iov_len = len - base, - }; - struct msghdr msg = { - .msg_flags = msgflags, - }; - err = kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len); + err = xs_send_tail(sock, xdr, base, len); if (ret == 0) ret = err; else if (err > 0) @@ -202,7 +215,7 @@ static int xs_sendmsg(struct rpc_xprt *xprt, struct rpc_rqst *req) skip = req->rq_bytes_sent; clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags); - result = xs_sendpages(sock, addr, addrlen, xdr, skip, MSG_DONTWAIT); + result = xs_sendpages(sock, addr, addrlen, xdr, skip); dprintk("RPC: xs_sendmsg(%d) = %d\n", xdr->len - skip, result); -- cgit v1.2.2 From 4a0f8c04f2ece949d54a0c4fd7490259cf23a58a Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Thu, 11 Aug 2005 16:25:32 -0400 Subject: [PATCH] RPC: Rename sock_lock Clean-up: replace a name reference to sockets in the generic parts of the RPC client by renaming sock_lock in the rpc_xprt structure. Test-plan: Compile kernel with CONFIG_NFS enabled. Version: Thu, 11 Aug 2005 16:05:00 -0400 Signed-off-by: Chuck Lever Signed-off-by: Trond Myklebust --- net/sunrpc/xprt.c | 44 ++++++++++++++++++++++---------------------- net/sunrpc/xprtsock.c | 22 +++++++++++----------- 2 files changed, 33 insertions(+), 33 deletions(-) (limited to 'net') diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 589195e630ef..1f0da8c1a3b0 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -106,9 +106,9 @@ xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task) { int retval; - spin_lock_bh(&xprt->sock_lock); + spin_lock_bh(&xprt->transport_lock); retval = __xprt_lock_write(xprt, task); - spin_unlock_bh(&xprt->sock_lock); + spin_unlock_bh(&xprt->transport_lock); return retval; } @@ -161,9 +161,9 @@ __xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task) static inline void xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task) { - spin_lock_bh(&xprt->sock_lock); + spin_lock_bh(&xprt->transport_lock); __xprt_release_write(xprt, task); - spin_unlock_bh(&xprt->sock_lock); + spin_unlock_bh(&xprt->transport_lock); } /* @@ -266,9 +266,9 @@ int xprt_adjust_timeout(struct rpc_rqst *req) req->rq_retries = 0; xprt_reset_majortimeo(req); /* Reset the RTT counters == "slow start" */ - spin_lock_bh(&xprt->sock_lock); + spin_lock_bh(&xprt->transport_lock); rpc_init_rtt(req->rq_task->tk_client->cl_rtt, to->to_initval); - spin_unlock_bh(&xprt->sock_lock); + spin_unlock_bh(&xprt->transport_lock); pprintk("RPC: %lu timeout\n", jiffies); status = -ETIMEDOUT; } @@ -298,10 +298,10 @@ xprt_socket_autoclose(void *args) void xprt_disconnect(struct rpc_xprt *xprt) { dprintk("RPC: disconnected transport %p\n", xprt); - spin_lock_bh(&xprt->sock_lock); + spin_lock_bh(&xprt->transport_lock); xprt_clear_connected(xprt); rpc_wake_up_status(&xprt->pending, -ENOTCONN); - spin_unlock_bh(&xprt->sock_lock); + spin_unlock_bh(&xprt->transport_lock); } static void @@ -309,12 +309,12 @@ xprt_init_autodisconnect(unsigned long data) { struct rpc_xprt *xprt = (struct rpc_xprt *)data; - spin_lock(&xprt->sock_lock); + spin_lock(&xprt->transport_lock); if (!list_empty(&xprt->recv) || xprt->shutdown) goto out_abort; if (test_and_set_bit(XPRT_LOCKED, &xprt->sockstate)) goto out_abort; - spin_unlock(&xprt->sock_lock); + spin_unlock(&xprt->transport_lock); /* Let keventd close the socket */ if (test_bit(XPRT_CONNECTING, &xprt->sockstate) != 0) xprt_release_write(xprt, NULL); @@ -322,7 +322,7 @@ xprt_init_autodisconnect(unsigned long data) schedule_work(&xprt->task_cleanup); return; out_abort: - spin_unlock(&xprt->sock_lock); + spin_unlock(&xprt->transport_lock); } /** @@ -482,7 +482,7 @@ xprt_timer(struct rpc_task *task) struct rpc_rqst *req = task->tk_rqstp; struct rpc_xprt *xprt = req->rq_xprt; - spin_lock(&xprt->sock_lock); + spin_lock(&xprt->transport_lock); if (req->rq_received) goto out; @@ -496,7 +496,7 @@ xprt_timer(struct rpc_task *task) out: task->tk_timeout = 0; rpc_wake_up_task(task); - spin_unlock(&xprt->sock_lock); + spin_unlock(&xprt->transport_lock); } /** @@ -515,7 +515,7 @@ int xprt_prepare_transmit(struct rpc_task *task) if (xprt->shutdown) return -EIO; - spin_lock_bh(&xprt->sock_lock); + spin_lock_bh(&xprt->transport_lock); if (req->rq_received && !req->rq_bytes_sent) { err = req->rq_received; goto out_unlock; @@ -530,7 +530,7 @@ int xprt_prepare_transmit(struct rpc_task *task) goto out_unlock; } out_unlock: - spin_unlock_bh(&xprt->sock_lock); + spin_unlock_bh(&xprt->transport_lock); return err; } @@ -552,13 +552,13 @@ void xprt_transmit(struct rpc_task *task) smp_rmb(); if (!req->rq_received) { if (list_empty(&req->rq_list)) { - spin_lock_bh(&xprt->sock_lock); + spin_lock_bh(&xprt->transport_lock); /* Update the softirq receive buffer */ memcpy(&req->rq_private_buf, &req->rq_rcv_buf, sizeof(req->rq_private_buf)); /* Add request to the receive list */ list_add_tail(&req->rq_list, &xprt->recv); - spin_unlock_bh(&xprt->sock_lock); + spin_unlock_bh(&xprt->transport_lock); xprt_reset_majortimeo(req); /* Turn off autodisconnect */ del_singleshot_timer_sync(&xprt->timer); @@ -592,7 +592,7 @@ void xprt_transmit(struct rpc_task *task) out_receive: dprintk("RPC: %4d xmit complete\n", task->tk_pid); /* Set the task's receive timeout value */ - spin_lock_bh(&xprt->sock_lock); + spin_lock_bh(&xprt->transport_lock); if (!xprt->nocong) { int timer = task->tk_msg.rpc_proc->p_timer; task->tk_timeout = rpc_calc_rto(clnt->cl_rtt, timer); @@ -607,7 +607,7 @@ void xprt_transmit(struct rpc_task *task) else if (!req->rq_received) rpc_sleep_on(&xprt->pending, task, NULL, xprt_timer); __xprt_release_write(xprt, task); - spin_unlock_bh(&xprt->sock_lock); + spin_unlock_bh(&xprt->transport_lock); } static inline void do_xprt_reserve(struct rpc_task *task) @@ -683,7 +683,7 @@ void xprt_release(struct rpc_task *task) if (!(req = task->tk_rqstp)) return; - spin_lock_bh(&xprt->sock_lock); + spin_lock_bh(&xprt->transport_lock); __xprt_release_write(xprt, task); __xprt_put_cong(xprt, req); if (!list_empty(&req->rq_list)) @@ -692,7 +692,7 @@ void xprt_release(struct rpc_task *task) if (list_empty(&xprt->recv) && !xprt->shutdown) mod_timer(&xprt->timer, xprt->last_used + RPC_IDLE_DISCONNECT_TIMEOUT); - spin_unlock_bh(&xprt->sock_lock); + spin_unlock_bh(&xprt->transport_lock); task->tk_rqstp = NULL; memset(req, 0, sizeof(*req)); /* mark unused */ @@ -750,7 +750,7 @@ static struct rpc_xprt *xprt_setup(int proto, struct sockaddr_in *ap, struct rpc return ERR_PTR(result); } - spin_lock_init(&xprt->sock_lock); + spin_lock_init(&xprt->transport_lock); spin_lock_init(&xprt->xprt_lock); init_waitqueue_head(&xprt->cong_wait); diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index a5a04203a6b0..bc90caab6088 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -307,7 +307,7 @@ static int xs_send_request(struct rpc_task *task) if (status == -EAGAIN) { if (test_bit(SOCK_ASYNC_NOSPACE, &xprt->sock->flags)) { /* Protect against races with xs_write_space */ - spin_lock_bh(&xprt->sock_lock); + spin_lock_bh(&xprt->transport_lock); /* Don't race with disconnect */ if (!xprt_connected(xprt)) task->tk_status = -ENOTCONN; @@ -315,7 +315,7 @@ static int xs_send_request(struct rpc_task *task) task->tk_timeout = req->rq_timeout; rpc_sleep_on(&xprt->pending, task, NULL, NULL); } - spin_unlock_bh(&xprt->sock_lock); + spin_unlock_bh(&xprt->transport_lock); return status; } /* Keep holding the socket if it is blocked */ @@ -415,7 +415,7 @@ static void xs_udp_data_ready(struct sock *sk, int len) goto dropit; /* Look up and lock the request corresponding to the given XID */ - spin_lock(&xprt->sock_lock); + spin_lock(&xprt->transport_lock); rovr = xprt_lookup_rqst(xprt, *xp); if (!rovr) goto out_unlock; @@ -436,7 +436,7 @@ static void xs_udp_data_ready(struct sock *sk, int len) xprt_complete_rqst(xprt, rovr, copied); out_unlock: - spin_unlock(&xprt->sock_lock); + spin_unlock(&xprt->transport_lock); dropit: skb_free_datagram(sk, skb); out: @@ -531,13 +531,13 @@ static inline void xs_tcp_read_request(struct rpc_xprt *xprt, skb_reader_t *desc ssize_t r; /* Find and lock the request corresponding to this xid */ - spin_lock(&xprt->sock_lock); + spin_lock(&xprt->transport_lock); req = xprt_lookup_rqst(xprt, xprt->tcp_xid); if (!req) { xprt->tcp_flags &= ~XPRT_COPY_DATA; dprintk("RPC: XID %08x request not found!\n", ntohl(xprt->tcp_xid)); - spin_unlock(&xprt->sock_lock); + spin_unlock(&xprt->transport_lock); return; } @@ -597,7 +597,7 @@ out: req->rq_task->tk_pid); xprt_complete_rqst(xprt, req, xprt->tcp_copied); } - spin_unlock(&xprt->sock_lock); + spin_unlock(&xprt->transport_lock); xs_tcp_check_recm(xprt); } @@ -696,7 +696,7 @@ static void xs_tcp_state_change(struct sock *sk) switch (sk->sk_state) { case TCP_ESTABLISHED: - spin_lock_bh(&xprt->sock_lock); + spin_lock_bh(&xprt->transport_lock); if (!xprt_test_and_set_connected(xprt)) { /* Reset TCP record info */ xprt->tcp_offset = 0; @@ -705,7 +705,7 @@ static void xs_tcp_state_change(struct sock *sk) xprt->tcp_flags = XPRT_COPY_RECM | XPRT_COPY_XID; rpc_wake_up(&xprt->pending); } - spin_unlock_bh(&xprt->sock_lock); + spin_unlock_bh(&xprt->transport_lock); break; case TCP_SYN_SENT: case TCP_SYN_RECV: @@ -753,10 +753,10 @@ static void xs_write_space(struct sock *sk) if (!test_and_clear_bit(SOCK_NOSPACE, &sock->flags)) goto out; - spin_lock_bh(&xprt->sock_lock); + spin_lock_bh(&xprt->transport_lock); if (xprt->snd_task) rpc_wake_up_task(xprt->snd_task); - spin_unlock_bh(&xprt->sock_lock); + spin_unlock_bh(&xprt->transport_lock); out: read_unlock(&sk->sk_callback_lock); } -- cgit v1.2.2 From 5dc07727f86b25851e95193a0c484ea21b531c47 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Thu, 11 Aug 2005 16:25:35 -0400 Subject: [PATCH] RPC: Rename xprt_lock Clean-up: Replace the xprt_lock with something more aptly named. This lock single-threads the XID and request slot reservation process. Test-plan: Compile kernel with CONFIG_NFS enabled. Version: Thu, 11 Aug 2005 16:05:26 -0400 Signed-off-by: Chuck Lever Signed-off-by: Trond Myklebust --- net/sunrpc/xprt.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'net') diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 1f0da8c1a3b0..9c45c522e3ef 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -643,9 +643,9 @@ void xprt_reserve(struct rpc_task *task) task->tk_status = -EIO; if (!xprt->shutdown) { - spin_lock(&xprt->xprt_lock); + spin_lock(&xprt->reserve_lock); do_xprt_reserve(task); - spin_unlock(&xprt->xprt_lock); + spin_unlock(&xprt->reserve_lock); } } @@ -698,10 +698,10 @@ void xprt_release(struct rpc_task *task) dprintk("RPC: %4d release request %p\n", task->tk_pid, req); - spin_lock(&xprt->xprt_lock); + spin_lock(&xprt->reserve_lock); list_add(&req->rq_list, &xprt->free); xprt_clear_backlog(xprt); - spin_unlock(&xprt->xprt_lock); + spin_unlock(&xprt->reserve_lock); } /** @@ -751,7 +751,7 @@ static struct rpc_xprt *xprt_setup(int proto, struct sockaddr_in *ap, struct rpc } spin_lock_init(&xprt->transport_lock); - spin_lock_init(&xprt->xprt_lock); + spin_lock_init(&xprt->reserve_lock); init_waitqueue_head(&xprt->cong_wait); INIT_LIST_HEAD(&xprt->free); -- cgit v1.2.2 From 2226feb6bcd0e5e117a9be3ea3dd3ffc14f3e41e Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Thu, 11 Aug 2005 16:25:38 -0400 Subject: [PATCH] RPC: rename the sockstate field Clean-up: get rid of a name reference to sockets in the generic parts of the RPC client by renaming the sockstate field in the rpc_xprt structure. Test-plan: Compile kernel with CONFIG_NFS enabled. Version: Thu, 11 Aug 2005 16:05:53 -0400 Signed-off-by: Chuck Lever Signed-off-by: Trond Myklebust --- net/sunrpc/xprt.c | 14 +++++++------- net/sunrpc/xprtsock.c | 6 ++---- 2 files changed, 9 insertions(+), 11 deletions(-) (limited to 'net') diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 9c45c522e3ef..57c5e77b155e 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -74,7 +74,7 @@ __xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task) { struct rpc_rqst *req = task->tk_rqstp; - if (test_and_set_bit(XPRT_LOCKED, &xprt->sockstate)) { + if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) { if (task == xprt->snd_task) return 1; goto out_sleep; @@ -88,7 +88,7 @@ __xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task) return 1; } smp_mb__before_clear_bit(); - clear_bit(XPRT_LOCKED, &xprt->sockstate); + clear_bit(XPRT_LOCKED, &xprt->state); smp_mb__after_clear_bit(); out_sleep: dprintk("RPC: %4d failed to lock socket %p\n", task->tk_pid, xprt); @@ -118,7 +118,7 @@ __xprt_lock_write_next(struct rpc_xprt *xprt) { struct rpc_task *task; - if (test_and_set_bit(XPRT_LOCKED, &xprt->sockstate)) + if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) return; if (!xprt->nocong && RPCXPRT_CONGESTED(xprt)) goto out_unlock; @@ -139,7 +139,7 @@ __xprt_lock_write_next(struct rpc_xprt *xprt) } out_unlock: smp_mb__before_clear_bit(); - clear_bit(XPRT_LOCKED, &xprt->sockstate); + clear_bit(XPRT_LOCKED, &xprt->state); smp_mb__after_clear_bit(); } @@ -152,7 +152,7 @@ __xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task) if (xprt->snd_task == task) { xprt->snd_task = NULL; smp_mb__before_clear_bit(); - clear_bit(XPRT_LOCKED, &xprt->sockstate); + clear_bit(XPRT_LOCKED, &xprt->state); smp_mb__after_clear_bit(); __xprt_lock_write_next(xprt); } @@ -312,11 +312,11 @@ xprt_init_autodisconnect(unsigned long data) spin_lock(&xprt->transport_lock); if (!list_empty(&xprt->recv) || xprt->shutdown) goto out_abort; - if (test_and_set_bit(XPRT_LOCKED, &xprt->sockstate)) + if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) goto out_abort; spin_unlock(&xprt->transport_lock); /* Let keventd close the socket */ - if (test_bit(XPRT_CONNECTING, &xprt->sockstate) != 0) + if (xprt_connecting(xprt)) xprt_release_write(xprt, NULL); else schedule_work(&xprt->task_cleanup); diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index bc90caab6088..76a33b54f436 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -925,9 +925,7 @@ out: else rpc_wake_up(&xprt->pending); out_clear: - smp_mb__before_clear_bit(); - clear_bit(XPRT_CONNECTING, &xprt->sockstate); - smp_mb__after_clear_bit(); + xprt_clear_connecting(xprt); } /** @@ -940,7 +938,7 @@ static void xs_connect(struct rpc_task *task) { struct rpc_xprt *xprt = task->tk_xprt; - if (!test_and_set_bit(XPRT_CONNECTING, &xprt->sockstate)) { + if (!xprt_test_and_set_connecting(xprt)) { if (xprt->sock != NULL) { dprintk("RPC: xs_connect delayed xprt %p\n", xprt); schedule_delayed_work(&xprt->sock_connect, -- cgit v1.2.2 From 86b9f57dfdf455763d2be73a742a9a88bb664173 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Thu, 11 Aug 2005 16:25:41 -0400 Subject: [PATCH] RPC: Eliminate socket.h includes in RPC client Clean-up: get rid of unnecessary socket.h and in.h includes in the generic parts of the RPC client. Test-plan: Compile kernel with CONFIG_NFS enabled. Version: Thu, 11 Aug 2005 16:06:23 -0400 Signed-off-by: Chuck Lever Signed-off-by: Trond Myklebust --- net/sunrpc/auth.c | 1 - net/sunrpc/auth_gss/auth_gss.c | 2 -- net/sunrpc/auth_gss/gss_krb5_mech.c | 1 - net/sunrpc/auth_gss/gss_mech_switch.c | 1 - net/sunrpc/auth_null.c | 2 -- net/sunrpc/auth_unix.c | 2 -- net/sunrpc/clnt.c | 1 - net/sunrpc/sunrpc_syms.c | 1 - 8 files changed, 11 deletions(-) (limited to 'net') diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c index 505e2d4b3d62..a415d99c394d 100644 --- a/net/sunrpc/auth.c +++ b/net/sunrpc/auth.c @@ -11,7 +11,6 @@ #include #include #include -#include #include #include diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c index 2f7b867161d2..53a030acdf75 100644 --- a/net/sunrpc/auth_gss/auth_gss.c +++ b/net/sunrpc/auth_gss/auth_gss.c @@ -42,8 +42,6 @@ #include #include #include -#include -#include #include #include #include diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c index 606a8a82cafb..462c5b86b073 100644 --- a/net/sunrpc/auth_gss/gss_krb5_mech.c +++ b/net/sunrpc/auth_gss/gss_krb5_mech.c @@ -39,7 +39,6 @@ #include #include #include -#include #include #include #include diff --git a/net/sunrpc/auth_gss/gss_mech_switch.c b/net/sunrpc/auth_gss/gss_mech_switch.c index 9dfb68377d69..58aeaddd8c79 100644 --- a/net/sunrpc/auth_gss/gss_mech_switch.c +++ b/net/sunrpc/auth_gss/gss_mech_switch.c @@ -35,7 +35,6 @@ #include #include -#include #include #include #include diff --git a/net/sunrpc/auth_null.c b/net/sunrpc/auth_null.c index 9b72d3abf823..f56767aaa927 100644 --- a/net/sunrpc/auth_null.c +++ b/net/sunrpc/auth_null.c @@ -7,9 +7,7 @@ */ #include -#include #include -#include #include #include #include diff --git a/net/sunrpc/auth_unix.c b/net/sunrpc/auth_unix.c index 4ff297a9b15b..890fb5ea0dcb 100644 --- a/net/sunrpc/auth_unix.c +++ b/net/sunrpc/auth_unix.c @@ -9,8 +9,6 @@ #include #include #include -#include -#include #include #include diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index ab50c3c9e6a8..0d1b010a4a01 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c @@ -27,7 +27,6 @@ #include #include #include -#include #include #include diff --git a/net/sunrpc/sunrpc_syms.c b/net/sunrpc/sunrpc_syms.c index ed48ff022d35..2387e7b823ff 100644 --- a/net/sunrpc/sunrpc_syms.c +++ b/net/sunrpc/sunrpc_syms.c @@ -10,7 +10,6 @@ #include #include -#include #include #include #include -- cgit v1.2.2 From 44fbac2288dfed6f1963ac00bf922c3bcd779cd1 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Thu, 11 Aug 2005 16:25:44 -0400 Subject: [PATCH] RPC: Add helper for waking tasks pending on a transport Clean-up: remove only reference to xprt->pending from the socket transport implementation. This makes a cleaner interface for other transport implementations as well. Test-plan: Compile kernel with CONFIG_NFS enabled. Version: Thu, 11 Aug 2005 16:06:52 -0400 Signed-off-by: Chuck Lever Signed-off-by: Trond Myklebust --- net/sunrpc/xprt.c | 18 ++++++++++++++++-- net/sunrpc/xprtsock.c | 7 ++----- 2 files changed, 18 insertions(+), 7 deletions(-) (limited to 'net') diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 57c5e77b155e..2f9cd468b953 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -227,6 +227,20 @@ xprt_adjust_cwnd(struct rpc_xprt *xprt, int result) xprt->cwnd = cwnd; } +/** + * xprt_wake_pending_tasks - wake all tasks on a transport's pending queue + * @xprt: transport with waiting tasks + * @status: result code to plant in each task before waking it + * + */ +void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status) +{ + if (status < 0) + rpc_wake_up_status(&xprt->pending, status); + else + rpc_wake_up(&xprt->pending); +} + static void xprt_reset_majortimeo(struct rpc_rqst *req) { struct rpc_timeout *to = &req->rq_xprt->timeout; @@ -300,7 +314,7 @@ void xprt_disconnect(struct rpc_xprt *xprt) dprintk("RPC: disconnected transport %p\n", xprt); spin_lock_bh(&xprt->transport_lock); xprt_clear_connected(xprt); - rpc_wake_up_status(&xprt->pending, -ENOTCONN); + xprt_wake_pending_tasks(xprt, -ENOTCONN); spin_unlock_bh(&xprt->transport_lock); } @@ -803,7 +817,7 @@ static void xprt_shutdown(struct rpc_xprt *xprt) xprt->shutdown = 1; rpc_wake_up(&xprt->sending); rpc_wake_up(&xprt->resend); - rpc_wake_up(&xprt->pending); + xprt_wake_pending_tasks(xprt, -EIO); rpc_wake_up(&xprt->backlog); wake_up(&xprt->cong_wait); del_timer_sync(&xprt->timer); diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 76a33b54f436..182da2edf61c 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -703,7 +703,7 @@ static void xs_tcp_state_change(struct sock *sk) xprt->tcp_reclen = 0; xprt->tcp_copied = 0; xprt->tcp_flags = XPRT_COPY_RECM | XPRT_COPY_XID; - rpc_wake_up(&xprt->pending); + xprt_wake_pending_tasks(xprt, 0); } spin_unlock_bh(&xprt->transport_lock); break; @@ -920,10 +920,7 @@ static void xs_connect_worker(void *args) } } out: - if (status < 0) - rpc_wake_up_status(&xprt->pending, status); - else - rpc_wake_up(&xprt->pending); + xprt_wake_pending_tasks(xprt, status); out_clear: xprt_clear_connecting(xprt); } -- cgit v1.2.2 From 55aa4f58aa43dc9a51fb80010630d94b96053a2e Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Thu, 11 Aug 2005 16:25:47 -0400 Subject: [PATCH] RPC: client-side transport switch cleanup Clean-up: change some comments to reflect the realities of the new RPC transport switch mechanism. Get rid of unused xprt_receive() prototype. Also, organize function prototypes in xprt.h by usage and scope. Test-plan: Compile kernel with CONFIG_NFS enabled. Version: Thu, 11 Aug 2005 16:07:21 -0400 Signed-off-by: Chuck Lever Signed-off-by: Trond Myklebust --- net/sunrpc/clnt.c | 2 +- net/sunrpc/xprt.c | 26 ++++++++++++-------------- net/sunrpc/xprtsock.c | 12 +++++++----- 3 files changed, 20 insertions(+), 20 deletions(-) (limited to 'net') diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index 0d1b010a4a01..4677959d2834 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c @@ -1,5 +1,5 @@ /* - * linux/net/sunrpc/rpcclnt.c + * linux/net/sunrpc/clnt.c * * This file contains the high-level RPC interface. * It is modeled as a finite state machine to support both synchronous diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 2f9cd468b953..247fa1ec870c 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -10,12 +10,12 @@ * one is available. Otherwise, it sleeps on the backlog queue * (xprt_reserve). * - Next, the caller puts together the RPC message, stuffs it into - * the request struct, and calls xprt_call(). - * - xprt_call transmits the message and installs the caller on the - * socket's wait list. At the same time, it installs a timer that + * the request struct, and calls xprt_transmit(). + * - xprt_transmit sends the message and installs the caller on the + * transport's wait list. At the same time, it installs a timer that * is run after the packet's timeout has expired. * - When a packet arrives, the data_ready handler walks the list of - * pending requests for that socket. If a matching XID is found, the + * pending requests for that transport. If a matching XID is found, the * caller is woken up, and the timer removed. * - When no reply arrives within the timeout interval, the timer is * fired by the kernel and runs xprt_timer(). It either adjusts the @@ -32,6 +32,8 @@ * tasks that rely on callbacks. * * Copyright (C) 1995-1997, Olaf Kirch + * + * Transport switch API copyright (C) 2005, Chuck Lever */ #include @@ -52,8 +54,6 @@ # define RPCDBG_FACILITY RPCDBG_XPRT #endif -#define XPRT_MAX_BACKOFF (8) - /* * Local functions */ @@ -65,9 +65,9 @@ static int __xprt_get_cong(struct rpc_xprt *, struct rpc_task *); static int xprt_clear_backlog(struct rpc_xprt *xprt); /* - * Serialize write access to sockets, in order to prevent different + * Serialize write access to transports, in order to prevent different * requests from interfering with each other. - * Also prevents TCP socket connects from colliding with writes. + * Also prevents transport connects from colliding with writes. */ static int __xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task) @@ -91,7 +91,7 @@ __xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task) clear_bit(XPRT_LOCKED, &xprt->state); smp_mb__after_clear_bit(); out_sleep: - dprintk("RPC: %4d failed to lock socket %p\n", task->tk_pid, xprt); + dprintk("RPC: %4d failed to lock transport %p\n", task->tk_pid, xprt); task->tk_timeout = 0; task->tk_status = -EAGAIN; if (req && req->rq_ntrans) @@ -144,7 +144,7 @@ out_unlock: } /* - * Releases the socket for use by other requests. + * Releases the transport for use by other requests. */ static void __xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task) @@ -294,8 +294,7 @@ int xprt_adjust_timeout(struct rpc_rqst *req) return status; } -static void -xprt_socket_autoclose(void *args) +static void xprt_autoclose(void *args) { struct rpc_xprt *xprt = (struct rpc_xprt *)args; @@ -329,7 +328,6 @@ xprt_init_autodisconnect(unsigned long data) if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) goto out_abort; spin_unlock(&xprt->transport_lock); - /* Let keventd close the socket */ if (xprt_connecting(xprt)) xprt_release_write(xprt, NULL); else @@ -770,7 +768,7 @@ static struct rpc_xprt *xprt_setup(int proto, struct sockaddr_in *ap, struct rpc INIT_LIST_HEAD(&xprt->free); INIT_LIST_HEAD(&xprt->recv); - INIT_WORK(&xprt->task_cleanup, xprt_socket_autoclose, xprt); + INIT_WORK(&xprt->task_cleanup, xprt_autoclose, xprt); init_timer(&xprt->timer); xprt->timer.function = xprt_init_autodisconnect; xprt->timer.data = (unsigned long) xprt; diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 182da2edf61c..7f0b9f7f167b 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -11,6 +11,8 @@ * Rewrite of larges part of the code in order to stabilize TCP stuff. * Fix behaviour when socket buffer is full. * (C) 1999 Trond Myklebust + * + * IP socket transport implementation, (C) 2005 Chuck Lever */ #include @@ -363,7 +365,7 @@ static void xs_destroy(struct rpc_xprt *xprt) { dprintk("RPC: xs_destroy xprt %p\n", xprt); - cancel_delayed_work(&xprt->sock_connect); + cancel_delayed_work(&xprt->connect_worker); flush_scheduled_work(); xprt_disconnect(xprt); @@ -938,11 +940,11 @@ static void xs_connect(struct rpc_task *task) if (!xprt_test_and_set_connecting(xprt)) { if (xprt->sock != NULL) { dprintk("RPC: xs_connect delayed xprt %p\n", xprt); - schedule_delayed_work(&xprt->sock_connect, + schedule_delayed_work(&xprt->connect_worker, RPC_REESTABLISH_TIMEOUT); } else { dprintk("RPC: xs_connect scheduled xprt %p\n", xprt); - schedule_work(&xprt->sock_connect); + schedule_work(&xprt->connect_worker); /* flush_scheduled_work can sleep... */ if (!RPC_IS_ASYNC(task)) flush_scheduled_work(); @@ -989,7 +991,7 @@ int xs_setup_udp(struct rpc_xprt *xprt, struct rpc_timeout *to) /* XXX: header size can vary due to auth type, IPv6, etc. */ xprt->max_payload = (1U << 16) - (MAX_HEADER << 3); - INIT_WORK(&xprt->sock_connect, xs_connect_worker, xprt); + INIT_WORK(&xprt->connect_worker, xs_connect_worker, xprt); xprt->ops = &xs_ops; @@ -1028,7 +1030,7 @@ int xs_setup_tcp(struct rpc_xprt *xprt, struct rpc_timeout *to) xprt->resvport = capable(CAP_NET_BIND_SERVICE) ? 1 : 0; xprt->max_payload = (1U << 31) - 1; - INIT_WORK(&xprt->sock_connect, xs_connect_worker, xprt); + INIT_WORK(&xprt->connect_worker, xs_connect_worker, xprt); xprt->ops = &xs_ops; -- cgit v1.2.2 From c7b2cae8a634015b72941ba2fc6c4bc9b8d3a129 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Thu, 11 Aug 2005 16:25:50 -0400 Subject: [PATCH] RPC: separate TCP and UDP write space callbacks Split the socket write space callback function into a TCP version and UDP version, eliminating one dependence on the "xprt->stream" variable. Keep the common pieces of this path in xprt.c so other transports can use it too. Test-plan: Write-intensive workload on a single mount point. Version: Thu, 11 Aug 2005 16:07:51 -0400 Signed-off-by: Chuck Lever Signed-off-by: Trond Myklebust --- net/sunrpc/xprt.c | 34 +++++++++++++++++++++ net/sunrpc/xprtsock.c | 84 ++++++++++++++++++++++++++++++++------------------- 2 files changed, 87 insertions(+), 31 deletions(-) (limited to 'net') diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 247fa1ec870c..31ef7dc7eed6 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -241,6 +241,40 @@ void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status) rpc_wake_up(&xprt->pending); } +/** + * xprt_wait_for_buffer_space - wait for transport output buffer to clear + * @task: task to be put to sleep + * + */ +void xprt_wait_for_buffer_space(struct rpc_task *task) +{ + struct rpc_rqst *req = task->tk_rqstp; + struct rpc_xprt *xprt = req->rq_xprt; + + task->tk_timeout = req->rq_timeout; + rpc_sleep_on(&xprt->pending, task, NULL, NULL); +} + +/** + * xprt_write_space - wake the task waiting for transport output buffer space + * @xprt: transport with waiting tasks + * + * Can be called in a soft IRQ context, so xprt_write_space never sleeps. + */ +void xprt_write_space(struct rpc_xprt *xprt) +{ + if (unlikely(xprt->shutdown)) + return; + + spin_lock_bh(&xprt->transport_lock); + if (xprt->snd_task) { + dprintk("RPC: write space: waking waiting task on xprt %p\n", + xprt); + rpc_wake_up_task(xprt->snd_task); + } + spin_unlock_bh(&xprt->transport_lock); +} + static void xprt_reset_majortimeo(struct rpc_rqst *req) { struct rpc_timeout *to = &req->rq_xprt->timeout; diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 7f0b9f7f167b..70a772d7a796 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -308,15 +308,13 @@ static int xs_send_request(struct rpc_task *task) if (status == -EAGAIN) { if (test_bit(SOCK_ASYNC_NOSPACE, &xprt->sock->flags)) { - /* Protect against races with xs_write_space */ + /* Protect against races with write_space */ spin_lock_bh(&xprt->transport_lock); /* Don't race with disconnect */ if (!xprt_connected(xprt)) task->tk_status = -ENOTCONN; - else if (test_bit(SOCK_NOSPACE, &xprt->sock->flags)) { - task->tk_timeout = req->rq_timeout; - rpc_sleep_on(&xprt->pending, task, NULL, NULL); - } + else if (test_bit(SOCK_NOSPACE, &xprt->sock->flags)) + xprt_wait_for_buffer_space(task); spin_unlock_bh(&xprt->transport_lock); return status; } @@ -721,45 +719,68 @@ static void xs_tcp_state_change(struct sock *sk) } /** - * xs_write_space - callback invoked when socket buffer space becomes - * available + * xs_udp_write_space - callback invoked when socket buffer space + * becomes available * @sk: socket whose state has changed * * Called when more output buffer space is available for this socket. * We try not to wake our writers until they can make "significant" - * progress, otherwise we'll waste resources thrashing sock_sendmsg + * progress, otherwise we'll waste resources thrashing kernel_sendmsg * with a bunch of small requests. */ -static void xs_write_space(struct sock *sk) +static void xs_udp_write_space(struct sock *sk) { - struct rpc_xprt *xprt; - struct socket *sock; - read_lock(&sk->sk_callback_lock); - if (!(xprt = xprt_from_sock(sk)) || !(sock = sk->sk_socket)) - goto out; - if (xprt->shutdown) - goto out; - /* Wait until we have enough socket memory */ - if (xprt->stream) { - /* from net/core/stream.c:sk_stream_write_space */ - if (sk_stream_wspace(sk) < sk_stream_min_wspace(sk)) + /* from net/core/sock.c:sock_def_write_space */ + if (sock_writeable(sk)) { + struct socket *sock; + struct rpc_xprt *xprt; + + if (unlikely(!(sock = sk->sk_socket))) goto out; - } else { - /* from net/core/sock.c:sock_def_write_space */ - if (!sock_writeable(sk)) + if (unlikely(!(xprt = xprt_from_sock(sk)))) + goto out; + if (unlikely(!test_and_clear_bit(SOCK_NOSPACE, &sock->flags))) goto out; + + xprt_write_space(xprt); } - if (!test_and_clear_bit(SOCK_NOSPACE, &sock->flags)) - goto out; + out: + read_unlock(&sk->sk_callback_lock); +} - spin_lock_bh(&xprt->transport_lock); - if (xprt->snd_task) - rpc_wake_up_task(xprt->snd_task); - spin_unlock_bh(&xprt->transport_lock); -out: +/** + * xs_tcp_write_space - callback invoked when socket buffer space + * becomes available + * @sk: socket whose state has changed + * + * Called when more output buffer space is available for this socket. + * We try not to wake our writers until they can make "significant" + * progress, otherwise we'll waste resources thrashing kernel_sendmsg + * with a bunch of small requests. + */ +static void xs_tcp_write_space(struct sock *sk) +{ + read_lock(&sk->sk_callback_lock); + + /* from net/core/stream.c:sk_stream_write_space */ + if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) { + struct socket *sock; + struct rpc_xprt *xprt; + + if (unlikely(!(sock = sk->sk_socket))) + goto out; + if (unlikely(!(xprt = xprt_from_sock(sk)))) + goto out; + if (unlikely(!test_and_clear_bit(SOCK_NOSPACE, &sock->flags))) + goto out; + + xprt_write_space(xprt); + } + + out: read_unlock(&sk->sk_callback_lock); } @@ -855,15 +876,16 @@ static void xs_bind(struct rpc_xprt *xprt, struct socket *sock) xprt->old_write_space = sk->sk_write_space; if (xprt->prot == IPPROTO_UDP) { sk->sk_data_ready = xs_udp_data_ready; + sk->sk_write_space = xs_udp_write_space; sk->sk_no_check = UDP_CSUM_NORCV; xprt_set_connected(xprt); } else { tcp_sk(sk)->nonagle = 1; /* disable Nagle's algorithm */ sk->sk_data_ready = xs_tcp_data_ready; sk->sk_state_change = xs_tcp_state_change; + sk->sk_write_space = xs_tcp_write_space; xprt_clear_connected(xprt); } - sk->sk_write_space = xs_write_space; /* Reset to new socket */ xprt->sock = sock; -- cgit v1.2.2 From b0d93ad511ce2f37823a07c7a3258117a431f5fb Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Thu, 11 Aug 2005 16:25:53 -0400 Subject: [PATCH] RPC: separate TCP and UDP transport connection logic Create separate connection worker functions for managing UDP and TCP transport sockets. This eliminates several dependencies on "xprt->stream". Test-plan: Destructive testing (unplugging the network temporarily). Connectathon with v2, v3, and v4. Version: Thu, 11 Aug 2005 16:08:18 -0400 Signed-off-by: Chuck Lever Signed-off-by: Trond Myklebust --- net/sunrpc/xprtsock.c | 164 ++++++++++++++++++++++++++++---------------------- 1 file changed, 91 insertions(+), 73 deletions(-) (limited to 'net') diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 70a772d7a796..f91529787b9b 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -836,102 +836,118 @@ static int xs_bindresvport(struct rpc_xprt *xprt, struct socket *sock) return err; } -static struct socket *xs_create(struct rpc_xprt *xprt, int proto, int resvport) +/** + * xs_udp_connect_worker - set up a UDP socket + * @args: RPC transport to connect + * + * Invoked by a work queue tasklet. + */ +static void xs_udp_connect_worker(void *args) { - struct socket *sock; - int type, err; - - dprintk("RPC: xs_create(%s %d)\n", - (proto == IPPROTO_UDP)? "udp" : "tcp", proto); + struct rpc_xprt *xprt = (struct rpc_xprt *) args; + struct socket *sock = xprt->sock; + int err, status = -EIO; - type = (proto == IPPROTO_UDP)? SOCK_DGRAM : SOCK_STREAM; + if (xprt->shutdown || xprt->addr.sin_port == 0) + goto out; - if ((err = sock_create_kern(PF_INET, type, proto, &sock)) < 0) { - dprintk("RPC: can't create socket (%d).\n", -err); - return NULL; - } + dprintk("RPC: xs_udp_connect_worker for xprt %p\n", xprt); - /* If the caller has the capability, bind to a reserved port */ - if (resvport && xs_bindresvport(xprt, sock) < 0) - goto failed; + /* Start by resetting any existing state */ + xs_close(xprt); - return sock; + if ((err = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock)) < 0) { + dprintk("RPC: can't create UDP transport socket (%d).\n", -err); + goto out; + } -failed: - sock_release(sock); - return NULL; -} + if (xprt->resvport && xs_bindresvport(xprt, sock) < 0) { + sock_release(sock); + goto out; + } -static void xs_bind(struct rpc_xprt *xprt, struct socket *sock) -{ - struct sock *sk = sock->sk; + if (!xprt->inet) { + struct sock *sk = sock->sk; - if (xprt->inet) - return; + write_lock_bh(&sk->sk_callback_lock); - write_lock_bh(&sk->sk_callback_lock); - sk->sk_user_data = xprt; - xprt->old_data_ready = sk->sk_data_ready; - xprt->old_state_change = sk->sk_state_change; - xprt->old_write_space = sk->sk_write_space; - if (xprt->prot == IPPROTO_UDP) { + sk->sk_user_data = xprt; + xprt->old_data_ready = sk->sk_data_ready; + xprt->old_state_change = sk->sk_state_change; + xprt->old_write_space = sk->sk_write_space; sk->sk_data_ready = xs_udp_data_ready; sk->sk_write_space = xs_udp_write_space; sk->sk_no_check = UDP_CSUM_NORCV; + xprt_set_connected(xprt); - } else { - tcp_sk(sk)->nonagle = 1; /* disable Nagle's algorithm */ - sk->sk_data_ready = xs_tcp_data_ready; - sk->sk_state_change = xs_tcp_state_change; - sk->sk_write_space = xs_tcp_write_space; - xprt_clear_connected(xprt); - } - /* Reset to new socket */ - xprt->sock = sock; - xprt->inet = sk; - write_unlock_bh(&sk->sk_callback_lock); + /* Reset to new socket */ + xprt->sock = sock; + xprt->inet = sk; - return; + write_unlock_bh(&sk->sk_callback_lock); + } + xs_set_buffer_size(xprt); + status = 0; +out: + xprt_wake_pending_tasks(xprt, status); + xprt_clear_connecting(xprt); } /** - * xs_connect_worker - try to connect a socket to a remote endpoint + * xs_tcp_connect_worker - connect a TCP socket to a remote endpoint * @args: RPC transport to connect * * Invoked by a work queue tasklet. */ -static void xs_connect_worker(void *args) +static void xs_tcp_connect_worker(void *args) { struct rpc_xprt *xprt = (struct rpc_xprt *)args; struct socket *sock = xprt->sock; - int status = -EIO; + int err, status = -EIO; if (xprt->shutdown || xprt->addr.sin_port == 0) goto out; - dprintk("RPC: xs_connect_worker xprt %p\n", xprt); + dprintk("RPC: xs_tcp_connect_worker for xprt %p\n", xprt); - /* - * Start by resetting any existing state - */ + /* Start by resetting any existing socket state */ xs_close(xprt); - sock = xs_create(xprt, xprt->prot, xprt->resvport); - if (sock == NULL) { - /* couldn't create socket or bind to reserved port; - * this is likely a permanent error, so cause an abort */ + + if ((err = sock_create_kern(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock)) < 0) { + dprintk("RPC: can't create TCP transport socket (%d).\n", -err); goto out; } - xs_bind(xprt, sock); - xs_set_buffer_size(xprt); - status = 0; - if (!xprt->stream) + if (xprt->resvport && xs_bindresvport(xprt, sock) < 0) { + sock_release(sock); goto out; + } - /* - * Tell the socket layer to start connecting... - */ + if (!xprt->inet) { + struct sock *sk = sock->sk; + + write_lock_bh(&sk->sk_callback_lock); + + sk->sk_user_data = xprt; + xprt->old_data_ready = sk->sk_data_ready; + xprt->old_state_change = sk->sk_state_change; + xprt->old_write_space = sk->sk_write_space; + sk->sk_data_ready = xs_tcp_data_ready; + sk->sk_state_change = xs_tcp_state_change; + sk->sk_write_space = xs_tcp_write_space; + tcp_sk(sk)->nonagle = 1; + + xprt_clear_connected(xprt); + + /* Reset to new socket */ + xprt->sock = sock; + xprt->inet = sk; + + write_unlock_bh(&sk->sk_callback_lock); + } + + /* Tell the socket layer to start connecting... */ status = sock->ops->connect(sock, (struct sockaddr *) &xprt->addr, sizeof(xprt->addr), O_NONBLOCK); dprintk("RPC: %p connect status %d connected %d sock state %d\n", @@ -959,18 +975,20 @@ static void xs_connect(struct rpc_task *task) { struct rpc_xprt *xprt = task->tk_xprt; - if (!xprt_test_and_set_connecting(xprt)) { - if (xprt->sock != NULL) { - dprintk("RPC: xs_connect delayed xprt %p\n", xprt); - schedule_delayed_work(&xprt->connect_worker, + if (xprt_test_and_set_connecting(xprt)) + return; + + if (xprt->sock != NULL) { + dprintk("RPC: xs_connect delayed xprt %p\n", xprt); + schedule_delayed_work(&xprt->connect_worker, RPC_REESTABLISH_TIMEOUT); - } else { - dprintk("RPC: xs_connect scheduled xprt %p\n", xprt); - schedule_work(&xprt->connect_worker); - /* flush_scheduled_work can sleep... */ - if (!RPC_IS_ASYNC(task)) - flush_scheduled_work(); - } + } else { + dprintk("RPC: xs_connect scheduled xprt %p\n", xprt); + schedule_work(&xprt->connect_worker); + + /* flush_scheduled_work can sleep... */ + if (!RPC_IS_ASYNC(task)) + flush_scheduled_work(); } } @@ -1013,7 +1031,7 @@ int xs_setup_udp(struct rpc_xprt *xprt, struct rpc_timeout *to) /* XXX: header size can vary due to auth type, IPv6, etc. */ xprt->max_payload = (1U << 16) - (MAX_HEADER << 3); - INIT_WORK(&xprt->connect_worker, xs_connect_worker, xprt); + INIT_WORK(&xprt->connect_worker, xs_udp_connect_worker, xprt); xprt->ops = &xs_ops; @@ -1052,7 +1070,7 @@ int xs_setup_tcp(struct rpc_xprt *xprt, struct rpc_timeout *to) xprt->resvport = capable(CAP_NET_BIND_SERVICE) ? 1 : 0; xprt->max_payload = (1U << 31) - 1; - INIT_WORK(&xprt->connect_worker, xs_connect_worker, xprt); + INIT_WORK(&xprt->connect_worker, xs_tcp_connect_worker, xprt); xprt->ops = &xs_ops; -- cgit v1.2.2 From 262965f53defd312a294b45366ea17907b6a616b Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Thu, 11 Aug 2005 16:25:56 -0400 Subject: [PATCH] RPC: separate TCP and UDP socket write paths Split the RPC client's main socket write path into a TCP version and a UDP version to eliminate another dependency on the "xprt->stream" variable. Compiler optimization removes unneeded code from xs_sendpages, as this function is now called with some constant arguments. We can now cleanly perform transport protocol-specific return code testing and error recovery in each path. Test-plan: Millions of fsx operations. Performance characterization such as "sio" or "iozone". Examine oprofile results for any changes before and after this patch is applied. Version: Thu, 11 Aug 2005 16:08:46 -0400 Signed-off-by: Chuck Lever Signed-off-by: Trond Myklebust --- net/sunrpc/xprtsock.c | 215 ++++++++++++++++++++++++++++++-------------------- 1 file changed, 128 insertions(+), 87 deletions(-) (limited to 'net') diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index f91529787b9b..57988300640a 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -40,6 +40,12 @@ */ #define XS_MAX_RESVPORT (800U) +/* + * How many times to try sending a request on a socket before waiting + * for the socket buffer to clear. + */ +#define XS_SENDMSG_RETRY (10U) + #ifdef RPC_DEBUG # undef RPC_DEBUG_DATA # define RPCDBG_FACILITY RPCDBG_TRANS @@ -114,13 +120,18 @@ static int xs_send_tail(struct socket *sock, struct xdr_buf *xdr, unsigned int b * @base: starting position in the buffer * */ -static int xs_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen, struct xdr_buf *xdr, unsigned int base) +static inline int xs_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen, struct xdr_buf *xdr, unsigned int base) { struct page **ppage = xdr->pages; unsigned int len, pglen = xdr->page_len; int err, ret = 0; ssize_t (*sendpage)(struct socket *, struct page *, int, size_t, int); + if (unlikely(!sock)) + return -ENOTCONN; + + clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags); + len = xdr->head[0].iov_len; if (base < len || (addr != NULL && base == 0)) { err = xs_send_head(sock, addr, addrlen, xdr, base, len); @@ -187,140 +198,162 @@ out: } /** - * xs_sendmsg - write an RPC request to a socket - * @xprt: generic transport - * @req: the RPC request to write + * xs_nospace - place task on wait queue if transmit was incomplete + * @task: task to put to sleep * */ -static int xs_sendmsg(struct rpc_xprt *xprt, struct rpc_rqst *req) +static void xs_nospace(struct rpc_task *task) { - struct socket *sock = xprt->sock; - struct xdr_buf *xdr = &req->rq_snd_buf; - struct sockaddr *addr = NULL; - int addrlen = 0; - unsigned int skip; - int result; + struct rpc_rqst *req = task->tk_rqstp; + struct rpc_xprt *xprt = req->rq_xprt; - if (!sock) - return -ENOTCONN; + dprintk("RPC: %4d xmit incomplete (%u left of %u)\n", + task->tk_pid, req->rq_slen - req->rq_bytes_sent, + req->rq_slen); + + if (test_bit(SOCK_ASYNC_NOSPACE, &xprt->sock->flags)) { + /* Protect against races with write_space */ + spin_lock_bh(&xprt->transport_lock); + + /* Don't race with disconnect */ + if (!xprt_connected(xprt)) + task->tk_status = -ENOTCONN; + else if (test_bit(SOCK_NOSPACE, &xprt->sock->flags)) + xprt_wait_for_buffer_space(task); + + spin_unlock_bh(&xprt->transport_lock); + } else + /* Keep holding the socket if it is blocked */ + rpc_delay(task, HZ>>4); +} + +/** + * xs_udp_send_request - write an RPC request to a UDP socket + * @task: address of RPC task that manages the state of an RPC request + * + * Return values: + * 0: The request has been sent + * EAGAIN: The socket was blocked, please call again later to + * complete the request + * ENOTCONN: Caller needs to invoke connect logic then call again + * other: Some other error occured, the request was not sent + */ +static int xs_udp_send_request(struct rpc_task *task) +{ + struct rpc_rqst *req = task->tk_rqstp; + struct rpc_xprt *xprt = req->rq_xprt; + struct xdr_buf *xdr = &req->rq_snd_buf; + int status; xs_pktdump("packet data:", req->rq_svec->iov_base, req->rq_svec->iov_len); - /* For UDP, we need to provide an address */ - if (!xprt->stream) { - addr = (struct sockaddr *) &xprt->addr; - addrlen = sizeof(xprt->addr); - } - /* Don't repeat bytes */ - skip = req->rq_bytes_sent; + req->rq_xtime = jiffies; + status = xs_sendpages(xprt->sock, (struct sockaddr *) &xprt->addr, + sizeof(xprt->addr), xdr, req->rq_bytes_sent); - clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags); - result = xs_sendpages(sock, addr, addrlen, xdr, skip); + dprintk("RPC: xs_udp_send_request(%u) = %d\n", + xdr->len - req->rq_bytes_sent, status); - dprintk("RPC: xs_sendmsg(%d) = %d\n", xdr->len - skip, result); + if (likely(status >= (int) req->rq_slen)) + return 0; - if (result >= 0) - return result; + /* Still some bytes left; set up for a retry later. */ + if (status > 0) + status = -EAGAIN; - switch (result) { + switch (status) { + case -ENETUNREACH: + case -EPIPE: case -ECONNREFUSED: /* When the server has died, an ICMP port unreachable message * prompts ECONNREFUSED. */ - case -EAGAIN: break; - case -ECONNRESET: - case -ENOTCONN: - case -EPIPE: - /* connection broken */ - if (xprt->stream) - result = -ENOTCONN; + case -EAGAIN: + xs_nospace(task); break; default: + dprintk("RPC: sendmsg returned unrecognized error %d\n", + -status); break; } - return result; + + return status; } /** - * xs_send_request - write an RPC request to a socket + * xs_tcp_send_request - write an RPC request to a TCP socket * @task: address of RPC task that manages the state of an RPC request * * Return values: - * 0: The request has been sent - * EAGAIN: The socket was blocked, please call again later to - * complete the request - * other: Some other error occured, the request was not sent + * 0: The request has been sent + * EAGAIN: The socket was blocked, please call again later to + * complete the request + * ENOTCONN: Caller needs to invoke connect logic then call again + * other: Some other error occured, the request was not sent * * XXX: In the case of soft timeouts, should we eventually give up - * if the socket is not able to make progress? + * if sendmsg is not able to make progress? */ -static int xs_send_request(struct rpc_task *task) +static int xs_tcp_send_request(struct rpc_task *task) { struct rpc_rqst *req = task->tk_rqstp; struct rpc_xprt *xprt = req->rq_xprt; + struct xdr_buf *xdr = &req->rq_snd_buf; + u32 *marker = req->rq_svec[0].iov_base; int status, retry = 0; - /* set up everything as needed. */ /* Write the record marker */ - if (xprt->stream) { - u32 *marker = req->rq_svec[0].iov_base; + *marker = htonl(0x80000000|(req->rq_slen-sizeof(*marker))); - *marker = htonl(0x80000000|(req->rq_slen-sizeof(*marker))); - } + xs_pktdump("packet data:", + req->rq_svec->iov_base, + req->rq_svec->iov_len); /* Continue transmitting the packet/record. We must be careful * to cope with writespace callbacks arriving _after_ we have - * called sendmsg(). - */ + * called sendmsg(). */ while (1) { req->rq_xtime = jiffies; - status = xs_sendmsg(xprt, req); + status = xs_sendpages(xprt->sock, NULL, 0, xdr, + req->rq_bytes_sent); - if (status < 0) - break; + dprintk("RPC: xs_tcp_send_request(%u) = %d\n", + xdr->len - req->rq_bytes_sent, status); - if (xprt->stream) { - req->rq_bytes_sent += status; - - /* If we've sent the entire packet, immediately - * reset the count of bytes sent. */ - if (req->rq_bytes_sent >= req->rq_slen) { - req->rq_bytes_sent = 0; - return 0; - } - } else { - if (status >= req->rq_slen) - return 0; - status = -EAGAIN; + if (unlikely(status < 0)) break; - } - dprintk("RPC: %4d xmit incomplete (%d left of %d)\n", - task->tk_pid, req->rq_slen - req->rq_bytes_sent, - req->rq_slen); + /* If we've sent the entire packet, immediately + * reset the count of bytes sent. */ + req->rq_bytes_sent += status; + if (likely(req->rq_bytes_sent >= req->rq_slen)) { + req->rq_bytes_sent = 0; + return 0; + } status = -EAGAIN; - if (retry++ > 50) + if (retry++ > XS_SENDMSG_RETRY) break; } - if (status == -EAGAIN) { - if (test_bit(SOCK_ASYNC_NOSPACE, &xprt->sock->flags)) { - /* Protect against races with write_space */ - spin_lock_bh(&xprt->transport_lock); - /* Don't race with disconnect */ - if (!xprt_connected(xprt)) - task->tk_status = -ENOTCONN; - else if (test_bit(SOCK_NOSPACE, &xprt->sock->flags)) - xprt_wait_for_buffer_space(task); - spin_unlock_bh(&xprt->transport_lock); - return status; - } - /* Keep holding the socket if it is blocked */ - rpc_delay(task, HZ>>4); + switch (status) { + case -EAGAIN: + xs_nospace(task); + break; + case -ECONNREFUSED: + case -ECONNRESET: + case -ENOTCONN: + case -EPIPE: + status = -ENOTCONN; + break; + default: + dprintk("RPC: sendmsg returned unrecognized error %d\n", + -status); + break; } + return status; } @@ -992,10 +1025,18 @@ static void xs_connect(struct rpc_task *task) } } -static struct rpc_xprt_ops xs_ops = { +static struct rpc_xprt_ops xs_udp_ops = { + .set_buffer_size = xs_set_buffer_size, + .connect = xs_connect, + .send_request = xs_udp_send_request, + .close = xs_close, + .destroy = xs_destroy, +}; + +static struct rpc_xprt_ops xs_tcp_ops = { .set_buffer_size = xs_set_buffer_size, .connect = xs_connect, - .send_request = xs_send_request, + .send_request = xs_tcp_send_request, .close = xs_close, .destroy = xs_destroy, }; @@ -1033,7 +1074,7 @@ int xs_setup_udp(struct rpc_xprt *xprt, struct rpc_timeout *to) INIT_WORK(&xprt->connect_worker, xs_udp_connect_worker, xprt); - xprt->ops = &xs_ops; + xprt->ops = &xs_udp_ops; if (to) xprt->timeout = *to; @@ -1072,7 +1113,7 @@ int xs_setup_tcp(struct rpc_xprt *xprt, struct rpc_timeout *to) INIT_WORK(&xprt->connect_worker, xs_tcp_connect_worker, xprt); - xprt->ops = &xs_ops; + xprt->ops = &xs_tcp_ops; if (to) xprt->timeout = *to; -- cgit v1.2.2 From 808012fbb23a52ec59352445d2076d175ad4ab26 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Thu, 25 Aug 2005 16:25:49 -0700 Subject: [PATCH] RPC: skip over transport-specific heads automatically Add a generic mechanism for skipping over transport-specific headers when constructing an RPC request. This removes another "xprt->stream" dependency. Test-plan: Write-intensive workload on a single mount point (try both UDP and TCP). Signed-off-by: Chuck Lever Signed-off-by: Trond Myklebust --- net/sunrpc/auth_gss/auth_gss.c | 6 ++---- net/sunrpc/clnt.c | 5 ++--- net/sunrpc/xprtsock.c | 24 +++++++++++++++++------- 3 files changed, 21 insertions(+), 14 deletions(-) (limited to 'net') diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c index 53a030acdf75..d2b08f16c257 100644 --- a/net/sunrpc/auth_gss/auth_gss.c +++ b/net/sunrpc/auth_gss/auth_gss.c @@ -844,10 +844,8 @@ gss_marshal(struct rpc_task *task, u32 *p) /* We compute the checksum for the verifier over the xdr-encoded bytes * starting with the xid and ending at the end of the credential: */ - iov.iov_base = req->rq_snd_buf.head[0].iov_base; - if (task->tk_client->cl_xprt->stream) - /* See clnt.c:call_header() */ - iov.iov_base += 4; + iov.iov_base = xprt_skip_transport_header(task->tk_xprt, + req->rq_snd_buf.head[0].iov_base); iov.iov_len = (u8 *)p - (u8 *)iov.iov_base; xdr_buf_from_iov(&iov, &verf_buf); diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index 4677959d2834..cc1b773a79d3 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c @@ -1075,13 +1075,12 @@ static u32 * call_header(struct rpc_task *task) { struct rpc_clnt *clnt = task->tk_client; - struct rpc_xprt *xprt = clnt->cl_xprt; struct rpc_rqst *req = task->tk_rqstp; u32 *p = req->rq_svec[0].iov_base; /* FIXME: check buffer size? */ - if (xprt->stream) - *p++ = 0; /* fill in later */ + + p = xprt_skip_transport_header(task->tk_xprt, p); *p++ = req->rq_xid; /* XID */ *p++ = htonl(RPC_CALL); /* CALL */ *p++ = htonl(RPC_VERSION); /* RPC version */ diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 57988300640a..aaf053b1a0c4 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -282,6 +282,13 @@ static int xs_udp_send_request(struct rpc_task *task) return status; } +static inline void xs_encode_tcp_record_marker(struct xdr_buf *buf) +{ + u32 reclen = buf->len - sizeof(rpc_fraghdr); + rpc_fraghdr *base = buf->head[0].iov_base; + *base = htonl(RPC_LAST_STREAM_FRAGMENT | reclen); +} + /** * xs_tcp_send_request - write an RPC request to a TCP socket * @task: address of RPC task that manages the state of an RPC request @@ -301,11 +308,9 @@ static int xs_tcp_send_request(struct rpc_task *task) struct rpc_rqst *req = task->tk_rqstp; struct rpc_xprt *xprt = req->rq_xprt; struct xdr_buf *xdr = &req->rq_snd_buf; - u32 *marker = req->rq_svec[0].iov_base; int status, retry = 0; - /* Write the record marker */ - *marker = htonl(0x80000000|(req->rq_slen-sizeof(*marker))); + xs_encode_tcp_record_marker(&req->rq_snd_buf); xs_pktdump("packet data:", req->rq_svec->iov_base, @@ -503,16 +508,19 @@ static inline void xs_tcp_read_fraghdr(struct rpc_xprt *xprt, skb_reader_t *desc xprt->tcp_offset += used; if (used != len) return; + xprt->tcp_reclen = ntohl(xprt->tcp_recm); - if (xprt->tcp_reclen & 0x80000000) + if (xprt->tcp_reclen & RPC_LAST_STREAM_FRAGMENT) xprt->tcp_flags |= XPRT_LAST_FRAG; else xprt->tcp_flags &= ~XPRT_LAST_FRAG; - xprt->tcp_reclen &= 0x7fffffff; + xprt->tcp_reclen &= RPC_FRAGMENT_SIZE_MASK; + xprt->tcp_flags &= ~XPRT_COPY_RECM; xprt->tcp_offset = 0; + /* Sanity check of the record length */ - if (xprt->tcp_reclen < 4) { + if (unlikely(xprt->tcp_reclen < 4)) { dprintk("RPC: invalid TCP record fragment length\n"); xprt_disconnect(xprt); return; @@ -1065,6 +1073,7 @@ int xs_setup_udp(struct rpc_xprt *xprt, struct rpc_timeout *to) xprt->prot = IPPROTO_UDP; xprt->port = XS_MAX_RESVPORT; + xprt->tsh_size = 0; xprt->stream = 0; xprt->nocong = 0; xprt->cwnd = RPC_INITCWND; @@ -1105,11 +1114,12 @@ int xs_setup_tcp(struct rpc_xprt *xprt, struct rpc_timeout *to) xprt->prot = IPPROTO_TCP; xprt->port = XS_MAX_RESVPORT; + xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32); xprt->stream = 1; xprt->nocong = 1; xprt->cwnd = RPC_MAXCWND(xprt); xprt->resvport = capable(CAP_NET_BIND_SERVICE) ? 1 : 0; - xprt->max_payload = (1U << 31) - 1; + xprt->max_payload = RPC_MAX_FRAGMENT_SIZE; INIT_WORK(&xprt->connect_worker, xs_tcp_connect_worker, xprt); -- cgit v1.2.2 From 43118c29dea2b23798bd42a147015cceee7fa885 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Thu, 25 Aug 2005 16:25:49 -0700 Subject: [PATCH] RPC: get rid of xprt->stream Now we can fix up the last few places that use the "xprt->stream" variable, and get rid of it from the rpc_xprt structure. Test-plan: Destructive testing (unplugging the network temporarily). Connectathon with UDP and TCP. Signed-off-by: Chuck Lever Signed-off-by: Trond Myklebust --- net/sunrpc/xprt.c | 3 +-- net/sunrpc/xprtsock.c | 28 ++++++++++++++++++---------- 2 files changed, 19 insertions(+), 12 deletions(-) (limited to 'net') diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 31ef7dc7eed6..43fef7626442 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -630,8 +630,7 @@ void xprt_transmit(struct rpc_task *task) case -ENOTCONN: return; default: - if (xprt->stream) - xprt_disconnect(xprt); + break; } xprt_release_write(xprt, task); return; diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index aaf053b1a0c4..5bb6fed3df34 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -356,6 +356,7 @@ static int xs_tcp_send_request(struct rpc_task *task) default: dprintk("RPC: sendmsg returned unrecognized error %d\n", -status); + xprt_disconnect(xprt); break; } @@ -826,19 +827,17 @@ static void xs_tcp_write_space(struct sock *sk) } /** - * xs_set_buffer_size - set send and receive limits + * xs_udp_set_buffer_size - set send and receive limits * @xprt: generic transport * * Set socket send and receive limits based on the * sndsize and rcvsize fields in the generic transport - * structure. This applies only to UDP sockets. + * structure. */ -static void xs_set_buffer_size(struct rpc_xprt *xprt) +static void xs_udp_set_buffer_size(struct rpc_xprt *xprt) { struct sock *sk = xprt->inet; - if (xprt->stream) - return; if (xprt->rcvsize) { sk->sk_userlocks |= SOCK_RCVBUF_LOCK; sk->sk_rcvbuf = xprt->rcvsize * xprt->max_reqs * 2; @@ -850,6 +849,17 @@ static void xs_set_buffer_size(struct rpc_xprt *xprt) } } +/** + * xs_tcp_set_buffer_size - set send and receive limits + * @xprt: generic transport + * + * Nothing to do for TCP. + */ +static void xs_tcp_set_buffer_size(struct rpc_xprt *xprt) +{ + return; +} + static int xs_bindresvport(struct rpc_xprt *xprt, struct socket *sock) { struct sockaddr_in myaddr = { @@ -928,7 +938,7 @@ static void xs_udp_connect_worker(void *args) write_unlock_bh(&sk->sk_callback_lock); } - xs_set_buffer_size(xprt); + xs_udp_set_buffer_size(xprt); status = 0; out: xprt_wake_pending_tasks(xprt, status); @@ -1034,7 +1044,7 @@ static void xs_connect(struct rpc_task *task) } static struct rpc_xprt_ops xs_udp_ops = { - .set_buffer_size = xs_set_buffer_size, + .set_buffer_size = xs_udp_set_buffer_size, .connect = xs_connect, .send_request = xs_udp_send_request, .close = xs_close, @@ -1042,7 +1052,7 @@ static struct rpc_xprt_ops xs_udp_ops = { }; static struct rpc_xprt_ops xs_tcp_ops = { - .set_buffer_size = xs_set_buffer_size, + .set_buffer_size = xs_tcp_set_buffer_size, .connect = xs_connect, .send_request = xs_tcp_send_request, .close = xs_close, @@ -1074,7 +1084,6 @@ int xs_setup_udp(struct rpc_xprt *xprt, struct rpc_timeout *to) xprt->prot = IPPROTO_UDP; xprt->port = XS_MAX_RESVPORT; xprt->tsh_size = 0; - xprt->stream = 0; xprt->nocong = 0; xprt->cwnd = RPC_INITCWND; xprt->resvport = capable(CAP_NET_BIND_SERVICE) ? 1 : 0; @@ -1115,7 +1124,6 @@ int xs_setup_tcp(struct rpc_xprt *xprt, struct rpc_timeout *to) xprt->prot = IPPROTO_TCP; xprt->port = XS_MAX_RESVPORT; xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32); - xprt->stream = 1; xprt->nocong = 1; xprt->cwnd = RPC_MAXCWND(xprt); xprt->resvport = capable(CAP_NET_BIND_SERVICE) ? 1 : 0; -- cgit v1.2.2 From fe3aca290f17ae4978bd73d02aa4029f1c9c024c Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Thu, 25 Aug 2005 16:25:50 -0700 Subject: [PATCH] RPC: add API to set transport-specific timeouts Prepare the way to remove the "xprt->nocong" variable by adding a callout to the RPC client transport switch API to handle setting RPC retransmit timeouts. Add a pair of generic helper functions that provide the ability to set a simple fixed timeout, or to set a timeout based on the state of a round- trip estimator. Test-plan: Use WAN simulation to cause sporadic bursty packet loss. Look for significant regression in performance or client stability. Signed-off-by: Chuck Lever Signed-off-by: Trond Myklebust --- net/sunrpc/xprt.c | 67 ++++++++++++++++++++++++++++++++++----------------- net/sunrpc/xprtsock.c | 2 ++ 2 files changed, 47 insertions(+), 22 deletions(-) (limited to 'net') diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 43fef7626442..1ac2fbe05102 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -275,6 +275,38 @@ void xprt_write_space(struct rpc_xprt *xprt) spin_unlock_bh(&xprt->transport_lock); } +/** + * xprt_set_retrans_timeout_def - set a request's retransmit timeout + * @task: task whose timeout is to be set + * + * Set a request's retransmit timeout based on the transport's + * default timeout parameters. Used by transports that don't adjust + * the retransmit timeout based on round-trip time estimation. + */ +void xprt_set_retrans_timeout_def(struct rpc_task *task) +{ + task->tk_timeout = task->tk_rqstp->rq_timeout; +} + +/* + * xprt_set_retrans_timeout_rtt - set a request's retransmit timeout + * @task: task whose timeout is to be set + * + * Set a request's retransmit timeout using the RTT estimator. + */ +void xprt_set_retrans_timeout_rtt(struct rpc_task *task) +{ + int timer = task->tk_msg.rpc_proc->p_timer; + struct rpc_rtt *rtt = task->tk_client->cl_rtt; + struct rpc_rqst *req = task->tk_rqstp; + unsigned long max_timeout = req->rq_xprt->timeout.to_maxval; + + task->tk_timeout = rpc_calc_rto(rtt, timer); + task->tk_timeout <<= rpc_ntimeo(rtt, timer) + req->rq_retries; + if (task->tk_timeout > max_timeout || task->tk_timeout == 0) + task->tk_timeout = max_timeout; +} + static void xprt_reset_majortimeo(struct rpc_rqst *req) { struct rpc_timeout *to = &req->rq_xprt->timeout; @@ -588,7 +620,6 @@ out_unlock: */ void xprt_transmit(struct rpc_task *task) { - struct rpc_clnt *clnt = task->tk_client; struct rpc_rqst *req = task->tk_rqstp; struct rpc_xprt *xprt = req->rq_xprt; int status; @@ -613,8 +644,19 @@ void xprt_transmit(struct rpc_task *task) return; status = xprt->ops->send_request(task); - if (!status) - goto out_receive; + if (status == 0) { + dprintk("RPC: %4d xmit complete\n", task->tk_pid); + spin_lock_bh(&xprt->transport_lock); + xprt->ops->set_retrans_timeout(task); + /* Don't race with disconnect */ + if (!xprt_connected(xprt)) + task->tk_status = -ENOTCONN; + else if (!req->rq_received) + rpc_sleep_on(&xprt->pending, task, NULL, xprt_timer); + __xprt_release_write(xprt, task); + spin_unlock_bh(&xprt->transport_lock); + return; + } /* Note: at this point, task->tk_sleeping has not yet been set, * hence there is no danger of the waking up task being put on @@ -634,25 +676,6 @@ void xprt_transmit(struct rpc_task *task) } xprt_release_write(xprt, task); return; - out_receive: - dprintk("RPC: %4d xmit complete\n", task->tk_pid); - /* Set the task's receive timeout value */ - spin_lock_bh(&xprt->transport_lock); - if (!xprt->nocong) { - int timer = task->tk_msg.rpc_proc->p_timer; - task->tk_timeout = rpc_calc_rto(clnt->cl_rtt, timer); - task->tk_timeout <<= rpc_ntimeo(clnt->cl_rtt, timer) + req->rq_retries; - if (task->tk_timeout > xprt->timeout.to_maxval || task->tk_timeout == 0) - task->tk_timeout = xprt->timeout.to_maxval; - } else - task->tk_timeout = req->rq_timeout; - /* Don't race with disconnect */ - if (!xprt_connected(xprt)) - task->tk_status = -ENOTCONN; - else if (!req->rq_received) - rpc_sleep_on(&xprt->pending, task, NULL, xprt_timer); - __xprt_release_write(xprt, task); - spin_unlock_bh(&xprt->transport_lock); } static inline void do_xprt_reserve(struct rpc_task *task) diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 5bb6fed3df34..79433ffd1df0 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -1047,6 +1047,7 @@ static struct rpc_xprt_ops xs_udp_ops = { .set_buffer_size = xs_udp_set_buffer_size, .connect = xs_connect, .send_request = xs_udp_send_request, + .set_retrans_timeout = xprt_set_retrans_timeout_rtt, .close = xs_close, .destroy = xs_destroy, }; @@ -1055,6 +1056,7 @@ static struct rpc_xprt_ops xs_tcp_ops = { .set_buffer_size = xs_tcp_set_buffer_size, .connect = xs_connect, .send_request = xs_tcp_send_request, + .set_retrans_timeout = xprt_set_retrans_timeout_def, .close = xs_close, .destroy = xs_destroy, }; -- cgit v1.2.2 From 12a804698b29d040b7cdd92e8a44b0e75164dae9 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Thu, 25 Aug 2005 16:25:51 -0700 Subject: [PATCH] RPC: expose API for serializing access to RPC transports The next several patches introduce an API that allows transports to choose whether the RPC client provides congestion control or whether the transport itself provides it. The first method we abstract is the one that serializes access to the RPC transport to prevent the bytes from different requests from mingling together. This method provides proper request serialization and the opportunity to prevent new requests from being started because the transport is congested. The normal situation is for the transport to handle congestion control itself. Although NFS over UDP was first, it has been recognized after years of experience that having the transport provide congestion control is much better than doing it in the RPC client. Thus TCP, and probably every future transport implementation, will use the default method, xprt_lock_write, provided in xprt.c, which does not provide any kind of congestion control. UDP can continue using the xprt.c-provided Van Jacobson congestion avoidance implementation. Test-plan: Use WAN simulation to cause sporadic bursty packet loss. Look for significant regression in performance or client stability. Signed-off-by: Chuck Lever Signed-off-by: Trond Myklebust --- net/sunrpc/xprt.c | 64 +++++++++++++++++++++++++++++++++++++++++---------- net/sunrpc/xprtsock.c | 2 ++ 2 files changed, 54 insertions(+), 12 deletions(-) (limited to 'net') diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 1ac2fbe05102..2d1e8b83dd68 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -64,14 +64,56 @@ static int __xprt_get_cong(struct rpc_xprt *, struct rpc_task *); static int xprt_clear_backlog(struct rpc_xprt *xprt); +/** + * xprt_reserve_xprt - serialize write access to transports + * @task: task that is requesting access to the transport + * + * This prevents mixing the payload of separate requests, and prevents + * transport connects from colliding with writes. No congestion control + * is provided. + */ +int xprt_reserve_xprt(struct rpc_task *task) +{ + struct rpc_xprt *xprt = task->tk_xprt; + struct rpc_rqst *req = task->tk_rqstp; + + if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) { + if (task == xprt->snd_task) + return 1; + if (task == NULL) + return 0; + goto out_sleep; + } + xprt->snd_task = task; + if (req) { + req->rq_bytes_sent = 0; + req->rq_ntrans++; + } + return 1; + +out_sleep: + dprintk("RPC: %4d failed to lock transport %p\n", + task->tk_pid, xprt); + task->tk_timeout = 0; + task->tk_status = -EAGAIN; + if (req && req->rq_ntrans) + rpc_sleep_on(&xprt->resend, task, NULL, NULL); + else + rpc_sleep_on(&xprt->sending, task, NULL, NULL); + return 0; +} + /* - * Serialize write access to transports, in order to prevent different - * requests from interfering with each other. - * Also prevents transport connects from colliding with writes. + * xprt_reserve_xprt_cong - serialize write access to transports + * @task: task that is requesting access to the transport + * + * Same as xprt_reserve_xprt, but Van Jacobson congestion control is + * integrated into the decision of whether a request is allowed to be + * woken up and given access to the transport. */ -static int -__xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task) +int xprt_reserve_xprt_cong(struct rpc_task *task) { + struct rpc_xprt *xprt = task->tk_xprt; struct rpc_rqst *req = task->tk_rqstp; if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) { @@ -79,7 +121,7 @@ __xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task) return 1; goto out_sleep; } - if (xprt->nocong || __xprt_get_cong(xprt, task)) { + if (__xprt_get_cong(xprt, task)) { xprt->snd_task = task; if (req) { req->rq_bytes_sent = 0; @@ -101,20 +143,18 @@ out_sleep: return 0; } -static inline int -xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task) +static inline int xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task) { int retval; spin_lock_bh(&xprt->transport_lock); - retval = __xprt_lock_write(xprt, task); + retval = xprt->ops->reserve_xprt(task); spin_unlock_bh(&xprt->transport_lock); return retval; } -static void -__xprt_lock_write_next(struct rpc_xprt *xprt) +static void __xprt_lock_write_next(struct rpc_xprt *xprt) { struct rpc_task *task; @@ -598,7 +638,7 @@ int xprt_prepare_transmit(struct rpc_task *task) err = req->rq_received; goto out_unlock; } - if (!__xprt_lock_write(xprt, task)) { + if (!xprt->ops->reserve_xprt(task)) { err = -EAGAIN; goto out_unlock; } diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 79433ffd1df0..fc4fbe8ea346 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -1045,6 +1045,7 @@ static void xs_connect(struct rpc_task *task) static struct rpc_xprt_ops xs_udp_ops = { .set_buffer_size = xs_udp_set_buffer_size, + .reserve_xprt = xprt_reserve_xprt_cong, .connect = xs_connect, .send_request = xs_udp_send_request, .set_retrans_timeout = xprt_set_retrans_timeout_rtt, @@ -1054,6 +1055,7 @@ static struct rpc_xprt_ops xs_udp_ops = { static struct rpc_xprt_ops xs_tcp_ops = { .set_buffer_size = xs_tcp_set_buffer_size, + .reserve_xprt = xprt_reserve_xprt, .connect = xs_connect, .send_request = xs_tcp_send_request, .set_retrans_timeout = xprt_set_retrans_timeout_def, -- cgit v1.2.2 From 49e9a89086b3cae784a4868ca852863e4f4ea3fe Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Thu, 25 Aug 2005 16:25:51 -0700 Subject: [PATCH] RPC: expose API for serializing access to RPC transports The next method we abstract is the one that releases a transport, allowing another task to have access to the transport. Again, one generic version of this is provided for transports that don't need the RPC client to perform congestion control, and one version is for transports that can use the original Van Jacobson implementation in xprt.c. Test-plan: Use WAN simulation to cause sporadic bursty packet loss. Look for significant regression in performance or client stability. Signed-off-by: Chuck Lever Signed-off-by: Trond Myklebust --- net/sunrpc/xprt.c | 77 +++++++++++++++++++++++++++++++++++++++++---------- net/sunrpc/xprtsock.c | 2 ++ 2 files changed, 65 insertions(+), 14 deletions(-) (limited to 'net') diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 2d1e8b83dd68..e92ea99dd318 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -153,14 +153,42 @@ static inline int xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task) return retval; } - static void __xprt_lock_write_next(struct rpc_xprt *xprt) +{ + struct rpc_task *task; + struct rpc_rqst *req; + + if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) + return; + + task = rpc_wake_up_next(&xprt->resend); + if (!task) { + task = rpc_wake_up_next(&xprt->sending); + if (!task) + goto out_unlock; + } + + req = task->tk_rqstp; + xprt->snd_task = task; + if (req) { + req->rq_bytes_sent = 0; + req->rq_ntrans++; + } + return; + +out_unlock: + smp_mb__before_clear_bit(); + clear_bit(XPRT_LOCKED, &xprt->state); + smp_mb__after_clear_bit(); +} + +static void __xprt_lock_write_next_cong(struct rpc_xprt *xprt) { struct rpc_task *task; if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) return; - if (!xprt->nocong && RPCXPRT_CONGESTED(xprt)) + if (RPCXPRT_CONGESTED(xprt)) goto out_unlock; task = rpc_wake_up_next(&xprt->resend); if (!task) { @@ -168,7 +196,7 @@ static void __xprt_lock_write_next(struct rpc_xprt *xprt) if (!task) goto out_unlock; } - if (xprt->nocong || __xprt_get_cong(xprt, task)) { + if (__xprt_get_cong(xprt, task)) { struct rpc_rqst *req = task->tk_rqstp; xprt->snd_task = task; if (req) { @@ -183,11 +211,14 @@ out_unlock: smp_mb__after_clear_bit(); } -/* - * Releases the transport for use by other requests. +/** + * xprt_release_xprt - allow other requests to use a transport + * @xprt: transport with other tasks potentially waiting + * @task: task that is releasing access to the transport + * + * Note that "task" can be NULL. No congestion control is provided. */ -static void -__xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task) +void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task) { if (xprt->snd_task == task) { xprt->snd_task = NULL; @@ -198,11 +229,29 @@ __xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task) } } -static inline void -xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task) +/** + * xprt_release_xprt_cong - allow other requests to use a transport + * @xprt: transport with other tasks potentially waiting + * @task: task that is releasing access to the transport + * + * Note that "task" can be NULL. Another task is awoken to use the + * transport if the transport's congestion window allows it. + */ +void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task) +{ + if (xprt->snd_task == task) { + xprt->snd_task = NULL; + smp_mb__before_clear_bit(); + clear_bit(XPRT_LOCKED, &xprt->state); + smp_mb__after_clear_bit(); + __xprt_lock_write_next_cong(xprt); + } +} + +static inline void xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task) { spin_lock_bh(&xprt->transport_lock); - __xprt_release_write(xprt, task); + xprt->ops->release_xprt(xprt, task); spin_unlock_bh(&xprt->transport_lock); } @@ -237,7 +286,7 @@ __xprt_put_cong(struct rpc_xprt *xprt, struct rpc_rqst *req) return; req->rq_cong = 0; xprt->cong -= RPC_CWNDSCALE; - __xprt_lock_write_next(xprt); + __xprt_lock_write_next_cong(xprt); } /* @@ -256,7 +305,7 @@ xprt_adjust_cwnd(struct rpc_xprt *xprt, int result) cwnd += (RPC_CWNDSCALE * RPC_CWNDSCALE + (cwnd >> 1)) / cwnd; if (cwnd > RPC_MAXCWND(xprt)) cwnd = RPC_MAXCWND(xprt); - __xprt_lock_write_next(xprt); + __xprt_lock_write_next_cong(xprt); } else if (result == -ETIMEDOUT) { cwnd >>= 1; if (cwnd < RPC_CWNDSCALE) @@ -693,7 +742,7 @@ void xprt_transmit(struct rpc_task *task) task->tk_status = -ENOTCONN; else if (!req->rq_received) rpc_sleep_on(&xprt->pending, task, NULL, xprt_timer); - __xprt_release_write(xprt, task); + xprt->ops->release_xprt(xprt, task); spin_unlock_bh(&xprt->transport_lock); return; } @@ -792,7 +841,7 @@ void xprt_release(struct rpc_task *task) if (!(req = task->tk_rqstp)) return; spin_lock_bh(&xprt->transport_lock); - __xprt_release_write(xprt, task); + xprt->ops->release_xprt(xprt, task); __xprt_put_cong(xprt, req); if (!list_empty(&req->rq_list)) list_del(&req->rq_list); diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index fc4fbe8ea346..8589c1ad55e3 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -1046,6 +1046,7 @@ static void xs_connect(struct rpc_task *task) static struct rpc_xprt_ops xs_udp_ops = { .set_buffer_size = xs_udp_set_buffer_size, .reserve_xprt = xprt_reserve_xprt_cong, + .release_xprt = xprt_release_xprt_cong, .connect = xs_connect, .send_request = xs_udp_send_request, .set_retrans_timeout = xprt_set_retrans_timeout_rtt, @@ -1056,6 +1057,7 @@ static struct rpc_xprt_ops xs_udp_ops = { static struct rpc_xprt_ops xs_tcp_ops = { .set_buffer_size = xs_tcp_set_buffer_size, .reserve_xprt = xprt_reserve_xprt, + .release_xprt = xprt_release_xprt, .connect = xs_connect, .send_request = xs_tcp_send_request, .set_retrans_timeout = xprt_set_retrans_timeout_def, -- cgit v1.2.2 From 46c0ee8bc4ad3743de05e8b8b20201df44dcb6d3 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Thu, 25 Aug 2005 16:25:52 -0700 Subject: [PATCH] RPC: separate xprt_timer implementations Allow transports to hook the retransmit timer interrupt. Some transports calculate their congestion window here so that a retransmit timeout has immediate effect on the congestion window. Test-plan: Use WAN simulation to cause sporadic bursty packet loss. Look for significant regression in performance or client stability. Signed-off-by: Chuck Lever Signed-off-by: Trond Myklebust --- net/sunrpc/xprt.c | 45 ++++++++++++++++++++------------------------- net/sunrpc/xprtsock.c | 12 ++++++++++++ 2 files changed, 32 insertions(+), 25 deletions(-) (limited to 'net') diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index e92ea99dd318..ffc595592af3 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -289,16 +289,19 @@ __xprt_put_cong(struct rpc_xprt *xprt, struct rpc_rqst *req) __xprt_lock_write_next_cong(xprt); } -/* - * Adjust RPC congestion window +/** + * xprt_adjust_cwnd - adjust transport congestion window + * @task: recently completed RPC request used to adjust window + * @result: result code of completed RPC request + * * We use a time-smoothed congestion estimator to avoid heavy oscillation. */ -static void -xprt_adjust_cwnd(struct rpc_xprt *xprt, int result) +void xprt_adjust_cwnd(struct rpc_task *task, int result) { - unsigned long cwnd; + struct rpc_rqst *req = task->tk_rqstp; + struct rpc_xprt *xprt = task->tk_xprt; + unsigned long cwnd = xprt->cwnd; - cwnd = xprt->cwnd; if (result >= 0 && cwnd <= xprt->cong) { /* The (cwnd >> 1) term makes sure * the result gets rounded properly. */ @@ -314,6 +317,7 @@ xprt_adjust_cwnd(struct rpc_xprt *xprt, int result) dprintk("RPC: cong %ld, cwnd was %ld, now %ld\n", xprt->cong, xprt->cwnd, cwnd); xprt->cwnd = cwnd; + __xprt_put_cong(xprt, req); } /** @@ -602,8 +606,7 @@ void xprt_complete_rqst(struct rpc_xprt *xprt, struct rpc_rqst *req, int copied) /* Adjust congestion window */ if (!xprt->nocong) { unsigned timer = task->tk_msg.rpc_proc->p_timer; - xprt_adjust_cwnd(xprt, copied); - __xprt_put_cong(xprt, req); + xprt_adjust_cwnd(task, copied); if (timer) { if (req->rq_ntrans == 1) rpc_update_rtt(clnt->cl_rtt, timer, @@ -640,27 +643,19 @@ void xprt_complete_rqst(struct rpc_xprt *xprt, struct rpc_rqst *req, int copied) return; } -/* - * RPC receive timeout handler. - */ -static void -xprt_timer(struct rpc_task *task) +static void xprt_timer(struct rpc_task *task) { - struct rpc_rqst *req = task->tk_rqstp; + struct rpc_rqst *req = task->tk_rqstp; struct rpc_xprt *xprt = req->rq_xprt; - spin_lock(&xprt->transport_lock); - if (req->rq_received) - goto out; - - xprt_adjust_cwnd(req->rq_xprt, -ETIMEDOUT); - __xprt_put_cong(xprt, req); + dprintk("RPC: %4d xprt_timer\n", task->tk_pid); - dprintk("RPC: %4d xprt_timer (%s request)\n", - task->tk_pid, req ? "pending" : "backlogged"); - - task->tk_status = -ETIMEDOUT; -out: + spin_lock(&xprt->transport_lock); + if (!req->rq_received) { + if (xprt->ops->timer) + xprt->ops->timer(task); + task->tk_status = -ETIMEDOUT; + } task->tk_timeout = 0; rpc_wake_up_task(task); spin_unlock(&xprt->transport_lock); diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 8589c1ad55e3..c3658ff027a6 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -860,6 +860,17 @@ static void xs_tcp_set_buffer_size(struct rpc_xprt *xprt) return; } +/** + * xs_udp_timer - called when a retransmit timeout occurs on a UDP transport + * @task: task that timed out + * + * Adjust the congestion window after a retransmit timeout has occurred. + */ +static void xs_udp_timer(struct rpc_task *task) +{ + xprt_adjust_cwnd(task, -ETIMEDOUT); +} + static int xs_bindresvport(struct rpc_xprt *xprt, struct socket *sock) { struct sockaddr_in myaddr = { @@ -1050,6 +1061,7 @@ static struct rpc_xprt_ops xs_udp_ops = { .connect = xs_connect, .send_request = xs_udp_send_request, .set_retrans_timeout = xprt_set_retrans_timeout_rtt, + .timer = xs_udp_timer, .close = xs_close, .destroy = xs_destroy, }; -- cgit v1.2.2 From 1570c1e41eabf6b7031f3e4322a2cf1cbe319fee Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Thu, 25 Aug 2005 16:25:52 -0700 Subject: [PATCH] RPC: add generic interface for adjusting the congestion window A new interface that allows transports to adjust their congestion window using the Van Jacobson implementation in xprt.c is provided. Test-plan: Use WAN simulation to cause sporadic bursty packet loss. Look for significant regression in performance or client stability. Signed-off-by: Chuck Lever Signed-off-by: Trond Myklebust --- net/sunrpc/xprt.c | 66 ++++++++++++++++++++------------------------------- net/sunrpc/xprtsock.c | 13 ++++------ 2 files changed, 31 insertions(+), 48 deletions(-) (limited to 'net') diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index ffc595592af3..707806fe1a23 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -591,56 +591,42 @@ struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, u32 xid) return req; } +/** + * xprt_update_rtt - update an RPC client's RTT state after receiving a reply + * @task: RPC request that recently completed + * + */ +void xprt_update_rtt(struct rpc_task *task) +{ + struct rpc_rqst *req = task->tk_rqstp; + struct rpc_rtt *rtt = task->tk_client->cl_rtt; + unsigned timer = task->tk_msg.rpc_proc->p_timer; + + if (timer) { + if (req->rq_ntrans == 1) + rpc_update_rtt(rtt, timer, + (long)jiffies - req->rq_xtime); + rpc_set_timeo(rtt, timer, req->rq_ntrans - 1); + } +} + /** * xprt_complete_rqst - called when reply processing is complete - * @xprt: controlling transport - * @req: RPC request that just completed + * @task: RPC request that recently completed * @copied: actual number of bytes received from the transport * + * Caller holds transport lock. */ -void xprt_complete_rqst(struct rpc_xprt *xprt, struct rpc_rqst *req, int copied) -{ - struct rpc_task *task = req->rq_task; - struct rpc_clnt *clnt = task->tk_client; - - /* Adjust congestion window */ - if (!xprt->nocong) { - unsigned timer = task->tk_msg.rpc_proc->p_timer; - xprt_adjust_cwnd(task, copied); - if (timer) { - if (req->rq_ntrans == 1) - rpc_update_rtt(clnt->cl_rtt, timer, - (long)jiffies - req->rq_xtime); - rpc_set_timeo(clnt->cl_rtt, timer, req->rq_ntrans - 1); - } - } +void xprt_complete_rqst(struct rpc_task *task, int copied) +{ + struct rpc_rqst *req = task->tk_rqstp; -#ifdef RPC_PROFILE - /* Profile only reads for now */ - if (copied > 1024) { - static unsigned long nextstat; - static unsigned long pkt_rtt, pkt_len, pkt_cnt; - - pkt_cnt++; - pkt_len += req->rq_slen + copied; - pkt_rtt += jiffies - req->rq_xtime; - if (time_before(nextstat, jiffies)) { - printk("RPC: %lu %ld cwnd\n", jiffies, xprt->cwnd); - printk("RPC: %ld %ld %ld %ld stat\n", - jiffies, pkt_cnt, pkt_len, pkt_rtt); - pkt_rtt = pkt_len = pkt_cnt = 0; - nextstat = jiffies + 5 * HZ; - } - } -#endif + dprintk("RPC: %5u xid %08x complete (%d bytes received)\n", + task->tk_pid, ntohl(req->rq_xid), copied); - dprintk("RPC: %4d has input (%d bytes)\n", task->tk_pid, copied); list_del_init(&req->rq_list); req->rq_received = req->rq_private_buf.len = copied; - - /* ... and wake up the process. */ rpc_wake_up_task(task); - return; } static void xprt_timer(struct rpc_task *task) diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index c3658ff027a6..980f26504f48 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -460,8 +460,6 @@ static void xs_udp_data_ready(struct sock *sk, int len) goto out_unlock; task = rovr->rq_task; - dprintk("RPC: %4d received reply\n", task->tk_pid); - if ((copied = rovr->rq_private_buf.buflen) > repsize) copied = repsize; @@ -472,7 +470,9 @@ static void xs_udp_data_ready(struct sock *sk, int len) /* Something worked... */ dst_confirm(skb->dst); - xprt_complete_rqst(xprt, rovr, copied); + xprt_adjust_cwnd(task, copied); + xprt_update_rtt(task); + xprt_complete_rqst(task, copied); out_unlock: spin_unlock(&xprt->transport_lock); @@ -634,11 +634,8 @@ static inline void xs_tcp_read_request(struct rpc_xprt *xprt, skb_reader_t *desc } out: - if (!(xprt->tcp_flags & XPRT_COPY_DATA)) { - dprintk("RPC: %4d received reply complete\n", - req->rq_task->tk_pid); - xprt_complete_rqst(xprt, req, xprt->tcp_copied); - } + if (!(xprt->tcp_flags & XPRT_COPY_DATA)) + xprt_complete_rqst(req->rq_task, xprt->tcp_copied); spin_unlock(&xprt->transport_lock); xs_tcp_check_recm(xprt); } -- cgit v1.2.2 From a58dd398f5db4f73d5c581069fd70a4304cc4f0a Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Thu, 25 Aug 2005 16:25:53 -0700 Subject: [PATCH] RPC: add a release_rqst callout to the RPC transport switch The final place where congestion control state is adjusted is in xprt_release, where each request is finally released. Add a callout there to allow transports to perform additional processing when a request is about to be released. Test-plan: Use WAN simulation to cause sporadic bursty packet loss. Look for significant regression in performance or client stability. Signed-off-by: Chuck Lever Signed-off-by: Trond Myklebust --- net/sunrpc/xprt.c | 14 +++++++++++++- net/sunrpc/xprtsock.c | 1 + 2 files changed, 14 insertions(+), 1 deletion(-) (limited to 'net') diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 707806fe1a23..e8d11bd6158e 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -289,6 +289,17 @@ __xprt_put_cong(struct rpc_xprt *xprt, struct rpc_rqst *req) __xprt_lock_write_next_cong(xprt); } +/** + * xprt_release_rqst_cong - housekeeping when request is complete + * @task: RPC request that recently completed + * + * Useful for transports that require congestion control. + */ +void xprt_release_rqst_cong(struct rpc_task *task) +{ + __xprt_put_cong(task->tk_xprt, task->tk_rqstp); +} + /** * xprt_adjust_cwnd - adjust transport congestion window * @task: recently completed RPC request used to adjust window @@ -823,7 +834,8 @@ void xprt_release(struct rpc_task *task) return; spin_lock_bh(&xprt->transport_lock); xprt->ops->release_xprt(xprt, task); - __xprt_put_cong(xprt, req); + if (xprt->ops->release_request) + xprt->ops->release_request(task); if (!list_empty(&req->rq_list)) list_del(&req->rq_list); xprt->last_used = jiffies; diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 980f26504f48..6c2f5dcea416 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -1059,6 +1059,7 @@ static struct rpc_xprt_ops xs_udp_ops = { .send_request = xs_udp_send_request, .set_retrans_timeout = xprt_set_retrans_timeout_rtt, .timer = xs_udp_timer, + .release_request = xprt_release_rqst_cong, .close = xs_close, .destroy = xs_destroy, }; -- cgit v1.2.2 From ed63c003701a314c4893c11eceb9d68f8f46c662 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Thu, 25 Aug 2005 16:25:53 -0700 Subject: [PATCH] RPC: remove xprt->nocong Get rid of the "xprt->nocong" variable. Test-plan: Use WAN simulation to cause sporadic bursty packet loss with UDP mounts. Look for significant regression in performance or client stability. Signed-off-by: Chuck Lever Signed-off-by: Trond Myklebust --- net/sunrpc/xprtsock.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'net') diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 6c2f5dcea416..7e5e020fe78d 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -1100,7 +1100,6 @@ int xs_setup_udp(struct rpc_xprt *xprt, struct rpc_timeout *to) xprt->prot = IPPROTO_UDP; xprt->port = XS_MAX_RESVPORT; xprt->tsh_size = 0; - xprt->nocong = 0; xprt->cwnd = RPC_INITCWND; xprt->resvport = capable(CAP_NET_BIND_SERVICE) ? 1 : 0; /* XXX: header size can vary due to auth type, IPv6, etc. */ @@ -1140,7 +1139,6 @@ int xs_setup_tcp(struct rpc_xprt *xprt, struct rpc_timeout *to) xprt->prot = IPPROTO_TCP; xprt->port = XS_MAX_RESVPORT; xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32); - xprt->nocong = 1; xprt->cwnd = RPC_MAXCWND(xprt); xprt->resvport = capable(CAP_NET_BIND_SERVICE) ? 1 : 0; xprt->max_payload = RPC_MAX_FRAGMENT_SIZE; -- cgit v1.2.2 From 555ee3af161b037865793bd4bebc06b58daafde6 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Thu, 25 Aug 2005 16:25:54 -0700 Subject: [PATCH] RPC: clean up after nocong was removed Clean-up: Move some macros that are specific to the Van Jacobson implementation into xprt.c. Get rid of the cong_wait field in rpc_xprt, which is no longer used. Get rid of xprt_clear_backlog. Test-plan: Compile with CONFIG_NFS enabled. Signed-off-by: Chuck Lever Signed-off-by: Trond Myklebust --- net/sunrpc/xprt.c | 29 +++++++++++++++++++---------- net/sunrpc/xprtsock.c | 2 -- 2 files changed, 19 insertions(+), 12 deletions(-) (limited to 'net') diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index e8d11bd6158e..0458319a1bdd 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -62,7 +62,23 @@ static inline void do_xprt_reserve(struct rpc_task *); static void xprt_connect_status(struct rpc_task *task); static int __xprt_get_cong(struct rpc_xprt *, struct rpc_task *); -static int xprt_clear_backlog(struct rpc_xprt *xprt); +/* + * The transport code maintains an estimate on the maximum number of out- + * standing RPC requests, using a smoothed version of the congestion + * avoidance implemented in 44BSD. This is basically the Van Jacobson + * congestion algorithm: If a retransmit occurs, the congestion window is + * halved; otherwise, it is incremented by 1/cwnd when + * + * - a reply is received and + * - a full number of requests are outstanding and + * - the congestion window hasn't been updated recently. + */ +#define RPC_CWNDSHIFT (8U) +#define RPC_CWNDSCALE (1U << RPC_CWNDSHIFT) +#define RPC_INITCWND RPC_CWNDSCALE +#define RPC_MAXCWND(xprt) ((xprt)->max_reqs << RPC_CWNDSHIFT) + +#define RPCXPRT_CONGESTED(xprt) ((xprt)->cong >= (xprt)->cwnd) /** * xprt_reserve_xprt - serialize write access to transports @@ -850,7 +866,7 @@ void xprt_release(struct rpc_task *task) spin_lock(&xprt->reserve_lock); list_add(&req->rq_list, &xprt->free); - xprt_clear_backlog(xprt); + rpc_wake_up_next(&xprt->backlog); spin_unlock(&xprt->reserve_lock); } @@ -902,7 +918,6 @@ static struct rpc_xprt *xprt_setup(int proto, struct sockaddr_in *ap, struct rpc spin_lock_init(&xprt->transport_lock); spin_lock_init(&xprt->reserve_lock); - init_waitqueue_head(&xprt->cong_wait); INIT_LIST_HEAD(&xprt->free); INIT_LIST_HEAD(&xprt->recv); @@ -911,6 +926,7 @@ static struct rpc_xprt *xprt_setup(int proto, struct sockaddr_in *ap, struct rpc xprt->timer.function = xprt_init_autodisconnect; xprt->timer.data = (unsigned long) xprt; xprt->last_used = jiffies; + xprt->cwnd = RPC_INITCWND; rpc_init_wait_queue(&xprt->pending, "xprt_pending"); rpc_init_wait_queue(&xprt->sending, "xprt_sending"); @@ -955,16 +971,9 @@ static void xprt_shutdown(struct rpc_xprt *xprt) rpc_wake_up(&xprt->resend); xprt_wake_pending_tasks(xprt, -EIO); rpc_wake_up(&xprt->backlog); - wake_up(&xprt->cong_wait); del_timer_sync(&xprt->timer); } -static int xprt_clear_backlog(struct rpc_xprt *xprt) { - rpc_wake_up_next(&xprt->backlog); - wake_up(&xprt->cong_wait); - return 1; -} - /** * xprt_destroy - destroy an RPC transport, killing off all requests. * @xprt: transport to destroy diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 7e5e020fe78d..26402c063f00 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -1100,7 +1100,6 @@ int xs_setup_udp(struct rpc_xprt *xprt, struct rpc_timeout *to) xprt->prot = IPPROTO_UDP; xprt->port = XS_MAX_RESVPORT; xprt->tsh_size = 0; - xprt->cwnd = RPC_INITCWND; xprt->resvport = capable(CAP_NET_BIND_SERVICE) ? 1 : 0; /* XXX: header size can vary due to auth type, IPv6, etc. */ xprt->max_payload = (1U << 16) - (MAX_HEADER << 3); @@ -1139,7 +1138,6 @@ int xs_setup_tcp(struct rpc_xprt *xprt, struct rpc_timeout *to) xprt->prot = IPPROTO_TCP; xprt->port = XS_MAX_RESVPORT; xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32); - xprt->cwnd = RPC_MAXCWND(xprt); xprt->resvport = capable(CAP_NET_BIND_SERVICE) ? 1 : 0; xprt->max_payload = RPC_MAX_FRAGMENT_SIZE; -- cgit v1.2.2 From 529b33c6db0120126b1381faa51406dc463acdc9 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Thu, 25 Aug 2005 16:25:54 -0700 Subject: [PATCH] RPC: allow RPC client's port range to be adjustable Select an RPC client source port between 650 and 1023 instead of between 1 and 800. The old range conflicts with a number of network services. Provide sysctls to allow admins to select a different port range. Note that this doesn't affect user-level RPC library behavior, which still uses 1 to 800. Based on a suggestion by Olaf Kirch . Test-plan: Repeated mount and unmount. Destructive testing. Idle timeouts. Signed-off-by: Chuck Lever Signed-off-by: Trond Myklebust --- net/sunrpc/sysctl.c | 29 +++++++++++++++++++++++++++++ net/sunrpc/xprtsock.c | 23 ++++++++--------------- 2 files changed, 37 insertions(+), 15 deletions(-) (limited to 'net') diff --git a/net/sunrpc/sysctl.c b/net/sunrpc/sysctl.c index ef483262f17f..d0c9f460e411 100644 --- a/net/sunrpc/sysctl.c +++ b/net/sunrpc/sysctl.c @@ -121,9 +121,16 @@ done: unsigned int xprt_udp_slot_table_entries = RPC_DEF_SLOT_TABLE; unsigned int xprt_tcp_slot_table_entries = RPC_DEF_SLOT_TABLE; +unsigned int xprt_min_resvport = RPC_DEF_MIN_RESVPORT; +EXPORT_SYMBOL(xprt_min_resvport); +unsigned int xprt_max_resvport = RPC_DEF_MAX_RESVPORT; +EXPORT_SYMBOL(xprt_max_resvport); + static unsigned int min_slot_table_size = RPC_MIN_SLOT_TABLE; static unsigned int max_slot_table_size = RPC_MAX_SLOT_TABLE; +static unsigned int xprt_min_resvport_limit = RPC_MIN_RESVPORT; +static unsigned int xprt_max_resvport_limit = RPC_MAX_RESVPORT; static ctl_table debug_table[] = { { @@ -180,6 +187,28 @@ static ctl_table debug_table[] = { .extra1 = &min_slot_table_size, .extra2 = &max_slot_table_size }, + { + .ctl_name = CTL_MIN_RESVPORT, + .procname = "min_resvport", + .data = &xprt_min_resvport, + .maxlen = sizeof(unsigned int), + .mode = 0644, + .proc_handler = &proc_dointvec_minmax, + .strategy = &sysctl_intvec, + .extra1 = &xprt_min_resvport_limit, + .extra2 = &xprt_max_resvport_limit + }, + { + .ctl_name = CTL_MAX_RESVPORT, + .procname = "max_resvport", + .data = &xprt_max_resvport, + .maxlen = sizeof(unsigned int), + .mode = 0644, + .proc_handler = &proc_dointvec_minmax, + .strategy = &sysctl_intvec, + .extra1 = &xprt_min_resvport_limit, + .extra2 = &xprt_max_resvport_limit + }, { .ctl_name = 0 } }; diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 26402c063f00..62c2e7caa345 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -35,11 +35,6 @@ #include #include -/* - * Maximum port number to use when requesting a reserved port. - */ -#define XS_MAX_RESVPORT (800U) - /* * How many times to try sending a request on a socket before waiting * for the socket buffer to clear. @@ -873,10 +868,9 @@ static int xs_bindresvport(struct rpc_xprt *xprt, struct socket *sock) struct sockaddr_in myaddr = { .sin_family = AF_INET, }; - int err, port; + int err; + unsigned short port = xprt->port; - /* Were we already bound to a given port? Try to reuse it */ - port = xprt->port; do { myaddr.sin_port = htons(port); err = sock->ops->bind(sock, (struct sockaddr *) &myaddr, @@ -887,8 +881,10 @@ static int xs_bindresvport(struct rpc_xprt *xprt, struct socket *sock) port); return 0; } - if (--port == 0) - port = XS_MAX_RESVPORT; + if (port <= xprt_min_resvport) + port = xprt_max_resvport; + else + port--; } while (err == -EADDRINUSE && port != xprt->port); dprintk("RPC: can't bind to reserved port (%d).\n", -err); @@ -1075,9 +1071,6 @@ static struct rpc_xprt_ops xs_tcp_ops = { .destroy = xs_destroy, }; -extern unsigned int xprt_udp_slot_table_entries; -extern unsigned int xprt_tcp_slot_table_entries; - /** * xs_setup_udp - Set up transport to use a UDP socket * @xprt: transport to set up @@ -1098,7 +1091,7 @@ int xs_setup_udp(struct rpc_xprt *xprt, struct rpc_timeout *to) memset(xprt->slot, 0, slot_table_size); xprt->prot = IPPROTO_UDP; - xprt->port = XS_MAX_RESVPORT; + xprt->port = xprt_max_resvport; xprt->tsh_size = 0; xprt->resvport = capable(CAP_NET_BIND_SERVICE) ? 1 : 0; /* XXX: header size can vary due to auth type, IPv6, etc. */ @@ -1136,7 +1129,7 @@ int xs_setup_tcp(struct rpc_xprt *xprt, struct rpc_timeout *to) memset(xprt->slot, 0, slot_table_size); xprt->prot = IPPROTO_TCP; - xprt->port = XS_MAX_RESVPORT; + xprt->port = xprt_max_resvport; xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32); xprt->resvport = capable(CAP_NET_BIND_SERVICE) ? 1 : 0; xprt->max_payload = RPC_MAX_FRAGMENT_SIZE; -- cgit v1.2.2 From 3167e12c0c424f3c323944701615343022d86418 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Thu, 25 Aug 2005 16:25:55 -0700 Subject: [PATCH] RPC: make sure to get the same local port number when reconnecting Implement a best practice: if the remote end drops our connection, try to reconnect using the same port number. This is important because the NFS server's Duplicate Reply Cache often hashes on the source port number. If the client reuses the port number when it reconnects, the server's DRC will be more effective. Based on suggestions by Mike Eisler, Olaf Kirch, and Alexey Kuznetsky. Test-plan: Destructive testing. Signed-off-by: Chuck Lever Signed-off-by: Trond Myklebust --- net/sunrpc/xprtsock.c | 65 +++++++++++++++++++++++++++++++++++++++++---------- 1 file changed, 53 insertions(+), 12 deletions(-) (limited to 'net') diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 62c2e7caa345..88ac71fcd335 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -362,6 +362,8 @@ static int xs_tcp_send_request(struct rpc_task *task) * xs_close - close a socket * @xprt: transport * + * This is used when all requests are complete; ie, no DRC state remains + * on the server we want to save. */ static void xs_close(struct rpc_xprt *xprt) { @@ -949,6 +951,30 @@ out: xprt_clear_connecting(xprt); } +/* + * We need to preserve the port number so the reply cache on the server can + * find our cached RPC replies when we get around to reconnecting. + */ +static void xs_tcp_reuse_connection(struct rpc_xprt *xprt) +{ + int result; + struct socket *sock = xprt->sock; + struct sockaddr any; + + dprintk("RPC: disconnecting xprt %p to reuse port\n", xprt); + + /* + * Disconnect the transport socket by doing a connect operation + * with AF_UNSPEC. This should return immediately... + */ + memset(&any, 0, sizeof(any)); + any.sa_family = AF_UNSPEC; + result = sock->ops->connect(sock, &any, sizeof(any), 0); + if (result) + dprintk("RPC: AF_UNSPEC connect return code %d\n", + result); +} + /** * xs_tcp_connect_worker - connect a TCP socket to a remote endpoint * @args: RPC transport to connect @@ -966,18 +992,20 @@ static void xs_tcp_connect_worker(void *args) dprintk("RPC: xs_tcp_connect_worker for xprt %p\n", xprt); - /* Start by resetting any existing socket state */ - xs_close(xprt); - - if ((err = sock_create_kern(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock)) < 0) { - dprintk("RPC: can't create TCP transport socket (%d).\n", -err); - goto out; - } + if (!xprt->sock) { + /* start from scratch */ + if ((err = sock_create_kern(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock)) < 0) { + dprintk("RPC: can't create TCP transport socket (%d).\n", -err); + goto out; + } - if (xprt->resvport && xs_bindresvport(xprt, sock) < 0) { - sock_release(sock); - goto out; - } + if (xprt->resvport && xs_bindresvport(xprt, sock) < 0) { + sock_release(sock); + goto out; + } + } else + /* "close" the socket, preserving the local port */ + xs_tcp_reuse_connection(xprt); if (!xprt->inet) { struct sock *sk = sock->sk; @@ -991,7 +1019,12 @@ static void xs_tcp_connect_worker(void *args) sk->sk_data_ready = xs_tcp_data_ready; sk->sk_state_change = xs_tcp_state_change; sk->sk_write_space = xs_tcp_write_space; - tcp_sk(sk)->nonagle = 1; + + /* socket options */ + sk->sk_userlocks |= SOCK_BINDPORT_LOCK; + sock_reset_flag(sk, SOCK_LINGER); + tcp_sk(sk)->linger2 = 0; + tcp_sk(sk)->nonagle |= TCP_NAGLE_OFF; xprt_clear_connected(xprt); @@ -1012,6 +1045,14 @@ static void xs_tcp_connect_worker(void *args) case -EINPROGRESS: case -EALREADY: goto out_clear; + case -ECONNREFUSED: + case -ECONNRESET: + /* retry with existing socket, after a delay */ + break; + default: + /* get rid of existing socket, and retry */ + xs_close(xprt); + break; } } out: -- cgit v1.2.2 From 03bf4b707eee06706c9db343dd5c905b7ee47ed2 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Thu, 25 Aug 2005 16:25:55 -0700 Subject: [PATCH] RPC: parametrize various transport connect timeouts Each transport implementation can now set unique bind, connect, reestablishment, and idle timeout values. These are variables, allowing the values to be modified dynamically. This permits exponential backoff of any of these values, for instance. As an example, we implement exponential backoff for the connection reestablishment timeout. Test-plan: Destructive testing (unplugging the network temporarily). Connectathon with UDP and TCP. Signed-off-by: Chuck Lever Signed-off-by: Trond Myklebust --- net/sunrpc/clnt.c | 2 +- net/sunrpc/xprt.c | 5 ++-- net/sunrpc/xprtsock.c | 68 +++++++++++++++++++++++++++++++++++++++++++++++++-- 3 files changed, 69 insertions(+), 6 deletions(-) (limited to 'net') diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index cc1b773a79d3..24b44e73f391 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c @@ -740,7 +740,7 @@ call_bind(struct rpc_task *task) task->tk_action = call_connect; if (!clnt->cl_port) { task->tk_action = call_bind_status; - task->tk_timeout = RPC_CONNECT_TIMEOUT; + task->tk_timeout = task->tk_xprt->bind_timeout; rpc_getport(task, clnt); } } diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 0458319a1bdd..215be0d0ef6b 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -551,7 +551,7 @@ void xprt_connect(struct rpc_task *task) if (task->tk_rqstp) task->tk_rqstp->rq_bytes_sent = 0; - task->tk_timeout = RPC_CONNECT_TIMEOUT; + task->tk_timeout = xprt->connect_timeout; rpc_sleep_on(&xprt->pending, task, xprt_connect_status, NULL); xprt->ops->connect(task); } @@ -763,7 +763,6 @@ void xprt_transmit(struct rpc_task *task) switch (status) { case -ECONNREFUSED: - task->tk_timeout = RPC_REESTABLISH_TIMEOUT; rpc_sleep_on(&xprt->sending, task, NULL, NULL); case -EAGAIN: case -ENOTCONN: @@ -857,7 +856,7 @@ void xprt_release(struct rpc_task *task) xprt->last_used = jiffies; if (list_empty(&xprt->recv) && !xprt->shutdown) mod_timer(&xprt->timer, - xprt->last_used + RPC_IDLE_DISCONNECT_TIMEOUT); + xprt->last_used + xprt->idle_timeout); spin_unlock_bh(&xprt->transport_lock); task->tk_rqstp = NULL; memset(req, 0, sizeof(*req)); /* mark unused */ diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 88ac71fcd335..06c2d95484e0 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -41,6 +41,50 @@ */ #define XS_SENDMSG_RETRY (10U) +/* + * Time out for an RPC UDP socket connect. UDP socket connects are + * synchronous, but we set a timeout anyway in case of resource + * exhaustion on the local host. + */ +#define XS_UDP_CONN_TO (5U * HZ) + +/* + * Wait duration for an RPC TCP connection to be established. Solaris + * NFS over TCP uses 60 seconds, for example, which is in line with how + * long a server takes to reboot. + */ +#define XS_TCP_CONN_TO (60U * HZ) + +/* + * Wait duration for a reply from the RPC portmapper. + */ +#define XS_BIND_TO (60U * HZ) + +/* + * Delay if a UDP socket connect error occurs. This is most likely some + * kind of resource problem on the local host. + */ +#define XS_UDP_REEST_TO (2U * HZ) + +/* + * The reestablish timeout allows clients to delay for a bit before attempting + * to reconnect to a server that just dropped our connection. + * + * We implement an exponential backoff when trying to reestablish a TCP + * transport connection with the server. Some servers like to drop a TCP + * connection when they are overworked, so we start with a short timeout and + * increase over time if the server is down or not responding. + */ +#define XS_TCP_INIT_REEST_TO (3U * HZ) +#define XS_TCP_MAX_REEST_TO (5U * 60 * HZ) + +/* + * TCP idle timeout; client drops the transport socket if it is idle + * for this long. Note that we also timeout UDP sockets to prevent + * holding port numbers when there is no RPC traffic. + */ +#define XS_IDLE_DISC_TO (5U * 60 * HZ) + #ifdef RPC_DEBUG # undef RPC_DEBUG_DATA # define RPCDBG_FACILITY RPCDBG_TRANS @@ -739,6 +783,7 @@ static void xs_tcp_state_change(struct sock *sk) xprt->tcp_reclen = 0; xprt->tcp_copied = 0; xprt->tcp_flags = XPRT_COPY_RECM | XPRT_COPY_XID; + xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; xprt_wake_pending_tasks(xprt, 0); } spin_unlock_bh(&xprt->transport_lock); @@ -1066,6 +1111,13 @@ out_clear: * @task: address of RPC task that manages state of connect request * * TCP: If the remote end dropped the connection, delay reconnecting. + * + * UDP socket connects are synchronous, but we use a work queue anyway + * to guarantee that even unprivileged user processes can set up a + * socket on a privileged port. + * + * If a UDP socket connect fails, the delay behavior here prevents + * retry floods (hard mounts). */ static void xs_connect(struct rpc_task *task) { @@ -1075,9 +1127,13 @@ static void xs_connect(struct rpc_task *task) return; if (xprt->sock != NULL) { - dprintk("RPC: xs_connect delayed xprt %p\n", xprt); + dprintk("RPC: xs_connect delayed xprt %p for %lu seconds\n", + xprt, xprt->reestablish_timeout / HZ); schedule_delayed_work(&xprt->connect_worker, - RPC_REESTABLISH_TIMEOUT); + xprt->reestablish_timeout); + xprt->reestablish_timeout <<= 1; + if (xprt->reestablish_timeout > XS_TCP_MAX_REEST_TO) + xprt->reestablish_timeout = XS_TCP_MAX_REEST_TO; } else { dprintk("RPC: xs_connect scheduled xprt %p\n", xprt); schedule_work(&xprt->connect_worker); @@ -1139,6 +1195,10 @@ int xs_setup_udp(struct rpc_xprt *xprt, struct rpc_timeout *to) xprt->max_payload = (1U << 16) - (MAX_HEADER << 3); INIT_WORK(&xprt->connect_worker, xs_udp_connect_worker, xprt); + xprt->bind_timeout = XS_BIND_TO; + xprt->connect_timeout = XS_UDP_CONN_TO; + xprt->reestablish_timeout = XS_UDP_REEST_TO; + xprt->idle_timeout = XS_IDLE_DISC_TO; xprt->ops = &xs_udp_ops; @@ -1176,6 +1236,10 @@ int xs_setup_tcp(struct rpc_xprt *xprt, struct rpc_timeout *to) xprt->max_payload = RPC_MAX_FRAGMENT_SIZE; INIT_WORK(&xprt->connect_worker, xs_tcp_connect_worker, xprt); + xprt->bind_timeout = XS_BIND_TO; + xprt->connect_timeout = XS_TCP_CONN_TO; + xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; + xprt->idle_timeout = XS_IDLE_DISC_TO; xprt->ops = &xs_tcp_ops; -- cgit v1.2.2 From 470056c288334eb0b37be26c9ff8aee37ed1cc7a Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Thu, 25 Aug 2005 16:25:56 -0700 Subject: [PATCH] RPC: rationalize set_buffer_size In fact, ->set_buffer_size should be completely functionless for non-UDP. Test-plan: Check socket buffer size on UDP sockets over time. Signed-off-by: Chuck Lever Signed-off-by: Trond Myklebust --- net/sunrpc/clnt.c | 10 ++-------- net/sunrpc/xprtsock.c | 30 +++++++++++++++--------------- 2 files changed, 17 insertions(+), 23 deletions(-) (limited to 'net') diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index 24b44e73f391..5a8f01d726e9 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c @@ -517,14 +517,8 @@ void rpc_setbufsize(struct rpc_clnt *clnt, unsigned int sndsize, unsigned int rcvsize) { struct rpc_xprt *xprt = clnt->cl_xprt; - - xprt->sndsize = 0; - if (sndsize) - xprt->sndsize = sndsize + RPC_SLACK_SPACE; - xprt->rcvsize = 0; - if (rcvsize) - xprt->rcvsize = rcvsize + RPC_SLACK_SPACE; - xprt->ops->set_buffer_size(xprt); + if (xprt->ops->set_buffer_size) + xprt->ops->set_buffer_size(xprt, sndsize, rcvsize); } /* diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 06c2d95484e0..2e1529217e65 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -865,15 +865,7 @@ static void xs_tcp_write_space(struct sock *sk) read_unlock(&sk->sk_callback_lock); } -/** - * xs_udp_set_buffer_size - set send and receive limits - * @xprt: generic transport - * - * Set socket send and receive limits based on the - * sndsize and rcvsize fields in the generic transport - * structure. - */ -static void xs_udp_set_buffer_size(struct rpc_xprt *xprt) +static void xs_udp_do_set_buffer_size(struct rpc_xprt *xprt) { struct sock *sk = xprt->inet; @@ -889,14 +881,23 @@ static void xs_udp_set_buffer_size(struct rpc_xprt *xprt) } /** - * xs_tcp_set_buffer_size - set send and receive limits + * xs_udp_set_buffer_size - set send and receive limits * @xprt: generic transport + * @sndsize: requested size of send buffer, in bytes + * @rcvsize: requested size of receive buffer, in bytes * - * Nothing to do for TCP. + * Set socket send and receive buffer size limits. */ -static void xs_tcp_set_buffer_size(struct rpc_xprt *xprt) +static void xs_udp_set_buffer_size(struct rpc_xprt *xprt, size_t sndsize, size_t rcvsize) { - return; + xprt->sndsize = 0; + if (sndsize) + xprt->sndsize = sndsize + 1024; + xprt->rcvsize = 0; + if (rcvsize) + xprt->rcvsize = rcvsize + 1024; + + xs_udp_do_set_buffer_size(xprt); } /** @@ -989,7 +990,7 @@ static void xs_udp_connect_worker(void *args) write_unlock_bh(&sk->sk_callback_lock); } - xs_udp_set_buffer_size(xprt); + xs_udp_do_set_buffer_size(xprt); status = 0; out: xprt_wake_pending_tasks(xprt, status); @@ -1158,7 +1159,6 @@ static struct rpc_xprt_ops xs_udp_ops = { }; static struct rpc_xprt_ops xs_tcp_ops = { - .set_buffer_size = xs_tcp_set_buffer_size, .reserve_xprt = xprt_reserve_xprt, .release_xprt = xprt_release_xprt, .connect = xs_connect, -- cgit v1.2.2 From 278c995c8a153bb2a9bc427e931cfb9c8034c9d7 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sun, 24 Jul 2005 23:53:01 +0100 Subject: [PATCH] RPC,NFS: new rpc_pipefs patch Currently rpc_mkdir/rpc_rmdir and rpc_mkpipe/mk_unlink have an API that's a little unfortunate. They take a path relative to the rpc_pipefs root and thus need to perform a full lookup. If you look at debugfs or usbfs they always store the dentry for directories they created and thus can pass in a dentry + single pathname component pair into their equivalents of the above functions. And in fact rpc_pipefs actually stores a dentry for all but one component so this change not only simplifies the core rpc_pipe code but also the callers. Unfortuntately this code path is only used by the NFS4 idmapper and AUTH_GSSAPI for which I don't have a test enviroment. Could someone give it a spin? It's the last bit needed before we can rework the lookup_hash API Signed-off-by: Christoph Hellwig Signed-off-by: Trond Myklebust --- net/sunrpc/auth_gss/auth_gss.c | 9 +- net/sunrpc/clnt.c | 53 +++++--- net/sunrpc/rpc_pipe.c | 268 +++++++++++++++-------------------------- 3 files changed, 133 insertions(+), 197 deletions(-) (limited to 'net') diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c index d2b08f16c257..bd2555139fa9 100644 --- a/net/sunrpc/auth_gss/auth_gss.c +++ b/net/sunrpc/auth_gss/auth_gss.c @@ -87,7 +87,6 @@ struct gss_auth { struct list_head upcalls; struct rpc_clnt *client; struct dentry *dentry; - char path[48]; spinlock_t lock; }; @@ -690,10 +689,8 @@ gss_create(struct rpc_clnt *clnt, rpc_authflavor_t flavor) if (err) goto err_put_mech; - snprintf(gss_auth->path, sizeof(gss_auth->path), "%s/%s", - clnt->cl_pathname, - gss_auth->mech->gm_name); - gss_auth->dentry = rpc_mkpipe(gss_auth->path, clnt, &gss_upcall_ops, RPC_PIPE_WAIT_FOR_OPEN); + gss_auth->dentry = rpc_mkpipe(clnt->cl_dentry, gss_auth->mech->gm_name, + clnt, &gss_upcall_ops, RPC_PIPE_WAIT_FOR_OPEN); if (IS_ERR(gss_auth->dentry)) { err = PTR_ERR(gss_auth->dentry); goto err_put_mech; @@ -718,7 +715,7 @@ gss_destroy(struct rpc_auth *auth) auth, auth->au_flavor); gss_auth = container_of(auth, struct gss_auth, rpc_auth); - rpc_unlink(gss_auth->path); + rpc_unlink(gss_auth->dentry); gss_mech_put(gss_auth->mech); rpcauth_free_credcache(auth); diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index 5a8f01d726e9..63bf591310e0 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c @@ -67,26 +67,42 @@ static u32 * call_verify(struct rpc_task *task); static int rpc_setup_pipedir(struct rpc_clnt *clnt, char *dir_name) { - static uint32_t clntid; + static unsigned int clntid; + char name[128]; int error; if (dir_name == NULL) return 0; - for (;;) { - snprintf(clnt->cl_pathname, sizeof(clnt->cl_pathname), - "%s/clnt%x", dir_name, - (unsigned int)clntid++); - clnt->cl_pathname[sizeof(clnt->cl_pathname) - 1] = '\0'; - clnt->cl_dentry = rpc_mkdir(clnt->cl_pathname, clnt); - if (!IS_ERR(clnt->cl_dentry)) - return 0; + + retry_parent: + clnt->__cl_parent_dentry = rpc_mkdir(NULL, dir_name, NULL); + if (IS_ERR(clnt->__cl_parent_dentry)) { + error = PTR_ERR(clnt->__cl_parent_dentry); + if (error == -EEXIST) + goto retry_parent; /* XXX(hch): WTF? */ + + printk(KERN_INFO "RPC: Couldn't create pipefs entry %s, error %d\n", + dir_name, error); + return error; + } + + + retry_child: + snprintf(name, sizeof(name), "clnt%x", clntid++); + name[sizeof(name) - 1] = '\0'; + + clnt->cl_dentry = rpc_mkdir(clnt->__cl_parent_dentry, name, clnt); + if (IS_ERR(clnt->cl_dentry)) { error = PTR_ERR(clnt->cl_dentry); - if (error != -EEXIST) { - printk(KERN_INFO "RPC: Couldn't create pipefs entry %s, error %d\n", - clnt->cl_pathname, error); - return error; - } + if (error == -EEXIST) + goto retry_child; + printk(KERN_INFO "RPC: Couldn't create pipefs entry %s, error %d\n", + name, error); + rpc_rmdir(clnt->__cl_parent_dentry); + return error; } + + return 0; } /* @@ -174,7 +190,8 @@ rpc_new_client(struct rpc_xprt *xprt, char *servname, return clnt; out_no_auth: - rpc_rmdir(clnt->cl_pathname); + rpc_rmdir(clnt->cl_dentry); + rpc_rmdir(clnt->__cl_parent_dentry); out_no_path: if (clnt->cl_server != clnt->cl_inline_name) kfree(clnt->cl_server); @@ -302,8 +319,10 @@ rpc_destroy_client(struct rpc_clnt *clnt) rpc_destroy_client(clnt->cl_parent); goto out_free; } - if (clnt->cl_pathname[0]) - rpc_rmdir(clnt->cl_pathname); + if (clnt->cl_dentry) + rpc_rmdir(clnt->cl_dentry); + if (clnt->__cl_parent_dentry) + rpc_rmdir(clnt->__cl_parent_dentry); if (clnt->cl_xprt) { xprt_destroy(clnt->cl_xprt); clnt->cl_xprt = NULL; diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index ded6c63f11ec..b382809726d8 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c @@ -414,38 +414,6 @@ rpc_put_mount(void) simple_release_fs(&rpc_mount, &rpc_mount_count); } -static int -rpc_lookup_parent(char *path, struct nameidata *nd) -{ - if (path[0] == '\0') - return -ENOENT; - if (rpc_get_mount()) { - printk(KERN_WARNING "%s: %s failed to mount " - "pseudofilesystem \n", __FILE__, __FUNCTION__); - return -ENODEV; - } - nd->mnt = mntget(rpc_mount); - nd->dentry = dget(rpc_mount->mnt_root); - nd->last_type = LAST_ROOT; - nd->flags = LOOKUP_PARENT; - nd->depth = 0; - - if (path_walk(path, nd)) { - printk(KERN_WARNING "%s: %s failed to find path %s\n", - __FILE__, __FUNCTION__, path); - rpc_put_mount(); - return -ENOENT; - } - return 0; -} - -static void -rpc_release_path(struct nameidata *nd) -{ - path_release(nd); - rpc_put_mount(); -} - static struct inode * rpc_get_inode(struct super_block *sb, int mode) { @@ -550,197 +518,149 @@ out_bad: return -ENOMEM; } -static int -__rpc_mkdir(struct inode *dir, struct dentry *dentry) +struct dentry * +rpc_mkdir(struct dentry *parent, char *name, struct rpc_clnt *rpc_client) { + struct inode *dir; + struct dentry *dentry; struct inode *inode; - - inode = rpc_get_inode(dir->i_sb, S_IFDIR | S_IRUSR | S_IXUSR); - if (!inode) - goto out_err; - inode->i_ino = iunique(dir->i_sb, 100); - d_instantiate(dentry, inode); - dir->i_nlink++; - inode_dir_notify(dir, DN_CREATE); - rpc_get_mount(); - return 0; -out_err: - printk(KERN_WARNING "%s: %s failed to allocate inode for dentry %s\n", - __FILE__, __FUNCTION__, dentry->d_name.name); - return -ENOMEM; -} - -static int -__rpc_rmdir(struct inode *dir, struct dentry *dentry) -{ int error; - shrink_dcache_parent(dentry); - if (dentry->d_inode) { - rpc_close_pipes(dentry->d_inode); - rpc_inode_setowner(dentry->d_inode, NULL); - } - if ((error = simple_rmdir(dir, dentry)) != 0) - return error; - if (!error) { - inode_dir_notify(dir, DN_DELETE); - d_drop(dentry); - rpc_put_mount(); - } - return 0; -} + if (!parent) + parent = rpc_mount->mnt_root; -static struct dentry * -rpc_lookup_negative(char *path, struct nameidata *nd) -{ - struct dentry *dentry; - struct inode *dir; - int error; - - if ((error = rpc_lookup_parent(path, nd)) != 0) + dir = parent->d_inode; + + error = rpc_get_mount(); + if (error) return ERR_PTR(error); - dir = nd->dentry->d_inode; + down(&dir->i_sem); - dentry = lookup_hash(&nd->last, nd->dentry); + dentry = lookup_one_len(name, parent, strlen(name)); if (IS_ERR(dentry)) - goto out_err; + goto out_unlock; if (dentry->d_inode) { - dput(dentry); dentry = ERR_PTR(-EEXIST); - goto out_err; + goto out_dput; } - return dentry; -out_err: - up(&dir->i_sem); - rpc_release_path(nd); - return dentry; -} + inode = rpc_get_inode(dir->i_sb, S_IFDIR | S_IRUSR | S_IXUSR); + if (!inode) + goto out_dput; + inode->i_ino = iunique(dir->i_sb, 100); + dir->i_nlink++; + RPC_I(dentry->d_inode)->private = rpc_client; -struct dentry * -rpc_mkdir(char *path, struct rpc_clnt *rpc_client) -{ - struct nameidata nd; - struct dentry *dentry; - struct inode *dir; - int error; + d_instantiate(dentry, inode); + dget(dentry); + up(&dir->i_sem); + + inode_dir_notify(dir, DN_CREATE); - dentry = rpc_lookup_negative(path, &nd); - if (IS_ERR(dentry)) - return dentry; - dir = nd.dentry->d_inode; - if ((error = __rpc_mkdir(dir, dentry)) != 0) - goto err_dput; - RPC_I(dentry->d_inode)->private = rpc_client; error = rpc_populate(dentry, authfiles, RPCAUTH_info, RPCAUTH_EOF); if (error) - goto err_depopulate; -out: - up(&dir->i_sem); - rpc_release_path(&nd); + goto out_depopulate; + return dentry; -err_depopulate: - rpc_depopulate(dentry); - __rpc_rmdir(dir, dentry); -err_dput: + + out_depopulate: + rpc_rmdir(dentry); + out_dput: dput(dentry); - printk(KERN_WARNING "%s: %s() failed to create directory %s (errno = %d)\n", - __FILE__, __FUNCTION__, path, error); - dentry = ERR_PTR(error); - goto out; + out_unlock: + up(&dir->i_sem); + rpc_put_mount(); + return dentry; } -int -rpc_rmdir(char *path) +void +rpc_rmdir(struct dentry *dentry) { - struct nameidata nd; - struct dentry *dentry; - struct inode *dir; - int error; + struct dentry *parent = dentry->d_parent; - if ((error = rpc_lookup_parent(path, &nd)) != 0) - return error; - dir = nd.dentry->d_inode; - down(&dir->i_sem); - dentry = lookup_hash(&nd.last, nd.dentry); - if (IS_ERR(dentry)) { - error = PTR_ERR(dentry); - goto out_release; - } rpc_depopulate(dentry); - error = __rpc_rmdir(dir, dentry); - dput(dentry); -out_release: - up(&dir->i_sem); - rpc_release_path(&nd); - return error; + + down(&parent->d_inode->i_sem); + if (dentry->d_inode) { + rpc_close_pipes(dentry->d_inode); + rpc_inode_setowner(dentry->d_inode, NULL); + simple_rmdir(parent->d_inode, dentry); + } + up(&parent->d_inode->i_sem); + + inode_dir_notify(parent->d_inode, DN_DELETE); + rpc_put_mount(); } struct dentry * -rpc_mkpipe(char *path, void *private, struct rpc_pipe_ops *ops, int flags) +rpc_mkpipe(struct dentry *parent, char *name, void *private, + struct rpc_pipe_ops *ops, int flags) { - struct nameidata nd; + struct inode *dir = parent->d_inode; struct dentry *dentry; - struct inode *dir, *inode; + struct inode *inode; struct rpc_inode *rpci; + int error; + + error = rpc_get_mount(); + if (error) + return ERR_PTR(error); - dentry = rpc_lookup_negative(path, &nd); + down(&parent->d_inode->i_sem); + dentry = lookup_one_len(name, parent, strlen(name)); if (IS_ERR(dentry)) - return dentry; - dir = nd.dentry->d_inode; - inode = rpc_get_inode(dir->i_sb, S_IFSOCK | S_IRUSR | S_IWUSR); - if (!inode) - goto err_dput; + goto out_unlock; + if (dentry->d_inode) { + dentry = ERR_PTR(-EEXIST); + goto out_dput; + } + + inode = rpc_get_inode(parent->d_inode->i_sb, + S_IFSOCK | S_IRUSR | S_IWUSR); + if (!inode) { + dentry = ERR_PTR(-ENOMEM); + goto out_dput; + } + inode->i_ino = iunique(dir->i_sb, 100); inode->i_fop = &rpc_pipe_fops; - d_instantiate(dentry, inode); + rpci = RPC_I(inode); rpci->private = private; rpci->flags = flags; rpci->ops = ops; + + d_instantiate(dentry, inode); + dget(dentry); + up(&parent->d_inode->i_sem); + inode_dir_notify(dir, DN_CREATE); -out: - up(&dir->i_sem); - rpc_release_path(&nd); return dentry; -err_dput: + + out_dput: dput(dentry); - dentry = ERR_PTR(-ENOMEM); - printk(KERN_WARNING "%s: %s() failed to create pipe %s (errno = %d)\n", - __FILE__, __FUNCTION__, path, -ENOMEM); - goto out; + out_unlock: + up(&parent->d_inode->i_sem); + rpc_put_mount(); + return dentry; } -int -rpc_unlink(char *path) +void +rpc_unlink(struct dentry *dentry) { - struct nameidata nd; - struct dentry *dentry; - struct inode *dir; - int error; + struct dentry *parent = dentry->d_parent; - if ((error = rpc_lookup_parent(path, &nd)) != 0) - return error; - dir = nd.dentry->d_inode; - down(&dir->i_sem); - dentry = lookup_hash(&nd.last, nd.dentry); - if (IS_ERR(dentry)) { - error = PTR_ERR(dentry); - goto out_release; - } - d_drop(dentry); + down(&parent->d_inode->i_sem); if (dentry->d_inode) { rpc_close_pipes(dentry->d_inode); rpc_inode_setowner(dentry->d_inode, NULL); - error = simple_unlink(dir, dentry); + simple_unlink(parent->d_inode, dentry); } - dput(dentry); - inode_dir_notify(dir, DN_DELETE); -out_release: - up(&dir->i_sem); - rpc_release_path(&nd); - return error; + up(&parent->d_inode->i_sem); + + inode_dir_notify(parent->d_inode, DN_DELETE); + rpc_put_mount(); } /* -- cgit v1.2.2 From f134585a7343d71f9be7f0cf97e2145f21dd10c6 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Fri, 23 Sep 2005 11:08:25 -0400 Subject: Revert "[PATCH] RPC,NFS: new rpc_pipefs patch" This reverts 17f4e6febca160a9f9dd4bdece9784577a2f4524 commit. --- net/sunrpc/auth_gss/auth_gss.c | 9 +- net/sunrpc/clnt.c | 53 +++----- net/sunrpc/rpc_pipe.c | 268 ++++++++++++++++++++++++++--------------- 3 files changed, 197 insertions(+), 133 deletions(-) (limited to 'net') diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c index bd2555139fa9..d2b08f16c257 100644 --- a/net/sunrpc/auth_gss/auth_gss.c +++ b/net/sunrpc/auth_gss/auth_gss.c @@ -87,6 +87,7 @@ struct gss_auth { struct list_head upcalls; struct rpc_clnt *client; struct dentry *dentry; + char path[48]; spinlock_t lock; }; @@ -689,8 +690,10 @@ gss_create(struct rpc_clnt *clnt, rpc_authflavor_t flavor) if (err) goto err_put_mech; - gss_auth->dentry = rpc_mkpipe(clnt->cl_dentry, gss_auth->mech->gm_name, - clnt, &gss_upcall_ops, RPC_PIPE_WAIT_FOR_OPEN); + snprintf(gss_auth->path, sizeof(gss_auth->path), "%s/%s", + clnt->cl_pathname, + gss_auth->mech->gm_name); + gss_auth->dentry = rpc_mkpipe(gss_auth->path, clnt, &gss_upcall_ops, RPC_PIPE_WAIT_FOR_OPEN); if (IS_ERR(gss_auth->dentry)) { err = PTR_ERR(gss_auth->dentry); goto err_put_mech; @@ -715,7 +718,7 @@ gss_destroy(struct rpc_auth *auth) auth, auth->au_flavor); gss_auth = container_of(auth, struct gss_auth, rpc_auth); - rpc_unlink(gss_auth->dentry); + rpc_unlink(gss_auth->path); gss_mech_put(gss_auth->mech); rpcauth_free_credcache(auth); diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index 63bf591310e0..5a8f01d726e9 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c @@ -67,42 +67,26 @@ static u32 * call_verify(struct rpc_task *task); static int rpc_setup_pipedir(struct rpc_clnt *clnt, char *dir_name) { - static unsigned int clntid; - char name[128]; + static uint32_t clntid; int error; if (dir_name == NULL) return 0; - - retry_parent: - clnt->__cl_parent_dentry = rpc_mkdir(NULL, dir_name, NULL); - if (IS_ERR(clnt->__cl_parent_dentry)) { - error = PTR_ERR(clnt->__cl_parent_dentry); - if (error == -EEXIST) - goto retry_parent; /* XXX(hch): WTF? */ - - printk(KERN_INFO "RPC: Couldn't create pipefs entry %s, error %d\n", - dir_name, error); - return error; - } - - - retry_child: - snprintf(name, sizeof(name), "clnt%x", clntid++); - name[sizeof(name) - 1] = '\0'; - - clnt->cl_dentry = rpc_mkdir(clnt->__cl_parent_dentry, name, clnt); - if (IS_ERR(clnt->cl_dentry)) { + for (;;) { + snprintf(clnt->cl_pathname, sizeof(clnt->cl_pathname), + "%s/clnt%x", dir_name, + (unsigned int)clntid++); + clnt->cl_pathname[sizeof(clnt->cl_pathname) - 1] = '\0'; + clnt->cl_dentry = rpc_mkdir(clnt->cl_pathname, clnt); + if (!IS_ERR(clnt->cl_dentry)) + return 0; error = PTR_ERR(clnt->cl_dentry); - if (error == -EEXIST) - goto retry_child; - printk(KERN_INFO "RPC: Couldn't create pipefs entry %s, error %d\n", - name, error); - rpc_rmdir(clnt->__cl_parent_dentry); - return error; + if (error != -EEXIST) { + printk(KERN_INFO "RPC: Couldn't create pipefs entry %s, error %d\n", + clnt->cl_pathname, error); + return error; + } } - - return 0; } /* @@ -190,8 +174,7 @@ rpc_new_client(struct rpc_xprt *xprt, char *servname, return clnt; out_no_auth: - rpc_rmdir(clnt->cl_dentry); - rpc_rmdir(clnt->__cl_parent_dentry); + rpc_rmdir(clnt->cl_pathname); out_no_path: if (clnt->cl_server != clnt->cl_inline_name) kfree(clnt->cl_server); @@ -319,10 +302,8 @@ rpc_destroy_client(struct rpc_clnt *clnt) rpc_destroy_client(clnt->cl_parent); goto out_free; } - if (clnt->cl_dentry) - rpc_rmdir(clnt->cl_dentry); - if (clnt->__cl_parent_dentry) - rpc_rmdir(clnt->__cl_parent_dentry); + if (clnt->cl_pathname[0]) + rpc_rmdir(clnt->cl_pathname); if (clnt->cl_xprt) { xprt_destroy(clnt->cl_xprt); clnt->cl_xprt = NULL; diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index b382809726d8..ded6c63f11ec 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c @@ -414,6 +414,38 @@ rpc_put_mount(void) simple_release_fs(&rpc_mount, &rpc_mount_count); } +static int +rpc_lookup_parent(char *path, struct nameidata *nd) +{ + if (path[0] == '\0') + return -ENOENT; + if (rpc_get_mount()) { + printk(KERN_WARNING "%s: %s failed to mount " + "pseudofilesystem \n", __FILE__, __FUNCTION__); + return -ENODEV; + } + nd->mnt = mntget(rpc_mount); + nd->dentry = dget(rpc_mount->mnt_root); + nd->last_type = LAST_ROOT; + nd->flags = LOOKUP_PARENT; + nd->depth = 0; + + if (path_walk(path, nd)) { + printk(KERN_WARNING "%s: %s failed to find path %s\n", + __FILE__, __FUNCTION__, path); + rpc_put_mount(); + return -ENOENT; + } + return 0; +} + +static void +rpc_release_path(struct nameidata *nd) +{ + path_release(nd); + rpc_put_mount(); +} + static struct inode * rpc_get_inode(struct super_block *sb, int mode) { @@ -518,149 +550,197 @@ out_bad: return -ENOMEM; } -struct dentry * -rpc_mkdir(struct dentry *parent, char *name, struct rpc_clnt *rpc_client) +static int +__rpc_mkdir(struct inode *dir, struct dentry *dentry) { - struct inode *dir; - struct dentry *dentry; struct inode *inode; + + inode = rpc_get_inode(dir->i_sb, S_IFDIR | S_IRUSR | S_IXUSR); + if (!inode) + goto out_err; + inode->i_ino = iunique(dir->i_sb, 100); + d_instantiate(dentry, inode); + dir->i_nlink++; + inode_dir_notify(dir, DN_CREATE); + rpc_get_mount(); + return 0; +out_err: + printk(KERN_WARNING "%s: %s failed to allocate inode for dentry %s\n", + __FILE__, __FUNCTION__, dentry->d_name.name); + return -ENOMEM; +} + +static int +__rpc_rmdir(struct inode *dir, struct dentry *dentry) +{ int error; - if (!parent) - parent = rpc_mount->mnt_root; + shrink_dcache_parent(dentry); + if (dentry->d_inode) { + rpc_close_pipes(dentry->d_inode); + rpc_inode_setowner(dentry->d_inode, NULL); + } + if ((error = simple_rmdir(dir, dentry)) != 0) + return error; + if (!error) { + inode_dir_notify(dir, DN_DELETE); + d_drop(dentry); + rpc_put_mount(); + } + return 0; +} - dir = parent->d_inode; - - error = rpc_get_mount(); - if (error) - return ERR_PTR(error); +static struct dentry * +rpc_lookup_negative(char *path, struct nameidata *nd) +{ + struct dentry *dentry; + struct inode *dir; + int error; + if ((error = rpc_lookup_parent(path, nd)) != 0) + return ERR_PTR(error); + dir = nd->dentry->d_inode; down(&dir->i_sem); - dentry = lookup_one_len(name, parent, strlen(name)); + dentry = lookup_hash(&nd->last, nd->dentry); if (IS_ERR(dentry)) - goto out_unlock; + goto out_err; if (dentry->d_inode) { + dput(dentry); dentry = ERR_PTR(-EEXIST); - goto out_dput; + goto out_err; } - - inode = rpc_get_inode(dir->i_sb, S_IFDIR | S_IRUSR | S_IXUSR); - if (!inode) - goto out_dput; - inode->i_ino = iunique(dir->i_sb, 100); - dir->i_nlink++; - RPC_I(dentry->d_inode)->private = rpc_client; - - d_instantiate(dentry, inode); - dget(dentry); + return dentry; +out_err: up(&dir->i_sem); + rpc_release_path(nd); + return dentry; +} - inode_dir_notify(dir, DN_CREATE); +struct dentry * +rpc_mkdir(char *path, struct rpc_clnt *rpc_client) +{ + struct nameidata nd; + struct dentry *dentry; + struct inode *dir; + int error; + + dentry = rpc_lookup_negative(path, &nd); + if (IS_ERR(dentry)) + return dentry; + dir = nd.dentry->d_inode; + if ((error = __rpc_mkdir(dir, dentry)) != 0) + goto err_dput; + RPC_I(dentry->d_inode)->private = rpc_client; error = rpc_populate(dentry, authfiles, RPCAUTH_info, RPCAUTH_EOF); if (error) - goto out_depopulate; - - return dentry; - - out_depopulate: - rpc_rmdir(dentry); - out_dput: - dput(dentry); - out_unlock: + goto err_depopulate; +out: up(&dir->i_sem); - rpc_put_mount(); + rpc_release_path(&nd); return dentry; +err_depopulate: + rpc_depopulate(dentry); + __rpc_rmdir(dir, dentry); +err_dput: + dput(dentry); + printk(KERN_WARNING "%s: %s() failed to create directory %s (errno = %d)\n", + __FILE__, __FUNCTION__, path, error); + dentry = ERR_PTR(error); + goto out; } -void -rpc_rmdir(struct dentry *dentry) +int +rpc_rmdir(char *path) { - struct dentry *parent = dentry->d_parent; - - rpc_depopulate(dentry); + struct nameidata nd; + struct dentry *dentry; + struct inode *dir; + int error; - down(&parent->d_inode->i_sem); - if (dentry->d_inode) { - rpc_close_pipes(dentry->d_inode); - rpc_inode_setowner(dentry->d_inode, NULL); - simple_rmdir(parent->d_inode, dentry); + if ((error = rpc_lookup_parent(path, &nd)) != 0) + return error; + dir = nd.dentry->d_inode; + down(&dir->i_sem); + dentry = lookup_hash(&nd.last, nd.dentry); + if (IS_ERR(dentry)) { + error = PTR_ERR(dentry); + goto out_release; } - up(&parent->d_inode->i_sem); - - inode_dir_notify(parent->d_inode, DN_DELETE); - rpc_put_mount(); + rpc_depopulate(dentry); + error = __rpc_rmdir(dir, dentry); + dput(dentry); +out_release: + up(&dir->i_sem); + rpc_release_path(&nd); + return error; } struct dentry * -rpc_mkpipe(struct dentry *parent, char *name, void *private, - struct rpc_pipe_ops *ops, int flags) +rpc_mkpipe(char *path, void *private, struct rpc_pipe_ops *ops, int flags) { - struct inode *dir = parent->d_inode; + struct nameidata nd; struct dentry *dentry; - struct inode *inode; + struct inode *dir, *inode; struct rpc_inode *rpci; - int error; - - error = rpc_get_mount(); - if (error) - return ERR_PTR(error); - down(&parent->d_inode->i_sem); - dentry = lookup_one_len(name, parent, strlen(name)); + dentry = rpc_lookup_negative(path, &nd); if (IS_ERR(dentry)) - goto out_unlock; - if (dentry->d_inode) { - dentry = ERR_PTR(-EEXIST); - goto out_dput; - } - - inode = rpc_get_inode(parent->d_inode->i_sb, - S_IFSOCK | S_IRUSR | S_IWUSR); - if (!inode) { - dentry = ERR_PTR(-ENOMEM); - goto out_dput; - } - + return dentry; + dir = nd.dentry->d_inode; + inode = rpc_get_inode(dir->i_sb, S_IFSOCK | S_IRUSR | S_IWUSR); + if (!inode) + goto err_dput; inode->i_ino = iunique(dir->i_sb, 100); inode->i_fop = &rpc_pipe_fops; - + d_instantiate(dentry, inode); rpci = RPC_I(inode); rpci->private = private; rpci->flags = flags; rpci->ops = ops; - - d_instantiate(dentry, inode); - dget(dentry); - up(&parent->d_inode->i_sem); - inode_dir_notify(dir, DN_CREATE); +out: + up(&dir->i_sem); + rpc_release_path(&nd); return dentry; - - out_dput: +err_dput: dput(dentry); - out_unlock: - up(&parent->d_inode->i_sem); - rpc_put_mount(); - return dentry; + dentry = ERR_PTR(-ENOMEM); + printk(KERN_WARNING "%s: %s() failed to create pipe %s (errno = %d)\n", + __FILE__, __FUNCTION__, path, -ENOMEM); + goto out; } -void -rpc_unlink(struct dentry *dentry) +int +rpc_unlink(char *path) { - struct dentry *parent = dentry->d_parent; + struct nameidata nd; + struct dentry *dentry; + struct inode *dir; + int error; - down(&parent->d_inode->i_sem); + if ((error = rpc_lookup_parent(path, &nd)) != 0) + return error; + dir = nd.dentry->d_inode; + down(&dir->i_sem); + dentry = lookup_hash(&nd.last, nd.dentry); + if (IS_ERR(dentry)) { + error = PTR_ERR(dentry); + goto out_release; + } + d_drop(dentry); if (dentry->d_inode) { rpc_close_pipes(dentry->d_inode); rpc_inode_setowner(dentry->d_inode, NULL); - simple_unlink(parent->d_inode, dentry); + error = simple_unlink(dir, dentry); } - up(&parent->d_inode->i_sem); - - inode_dir_notify(parent->d_inode, DN_DELETE); - rpc_put_mount(); + dput(dentry); + inode_dir_notify(dir, DN_DELETE); +out_release: + up(&dir->i_sem); + rpc_release_path(&nd); + return error; } /* -- cgit v1.2.2 From 6cd7525a00f3b926e8bd2e402954ed3e09a8e924 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Thu, 22 Sep 2005 21:24:59 -0400 Subject: SUNRPC: fix bug in patch "portmapper doesn't need a reserved port" The in-kernel portmapper does in fact need a reserved port when registering new services, but not when performing bind queries. Ensure that we distinguish between the two cases. Signed-off-by: Chuck Lever Signed-off-by: Trond Myklebust --- net/sunrpc/pmap_clnt.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) (limited to 'net') diff --git a/net/sunrpc/pmap_clnt.c b/net/sunrpc/pmap_clnt.c index d8e3f220002b..a398575f94b8 100644 --- a/net/sunrpc/pmap_clnt.c +++ b/net/sunrpc/pmap_clnt.c @@ -26,7 +26,7 @@ #define PMAP_GETPORT 3 static struct rpc_procinfo pmap_procedures[]; -static struct rpc_clnt * pmap_create(char *, struct sockaddr_in *, int); +static struct rpc_clnt * pmap_create(char *, struct sockaddr_in *, int, int); static void pmap_getport_done(struct rpc_task *); static struct rpc_program pmap_program; static DEFINE_SPINLOCK(pmap_lock); @@ -65,7 +65,7 @@ rpc_getport(struct rpc_task *task, struct rpc_clnt *clnt) map->pm_binding = 1; spin_unlock(&pmap_lock); - pmap_clnt = pmap_create(clnt->cl_server, sap, map->pm_prot); + pmap_clnt = pmap_create(clnt->cl_server, sap, map->pm_prot, 0); if (IS_ERR(pmap_clnt)) { task->tk_status = PTR_ERR(pmap_clnt); goto bailout; @@ -112,7 +112,7 @@ rpc_getport_external(struct sockaddr_in *sin, __u32 prog, __u32 vers, int prot) NIPQUAD(sin->sin_addr.s_addr), prog, vers, prot); sprintf(hostname, "%u.%u.%u.%u", NIPQUAD(sin->sin_addr.s_addr)); - pmap_clnt = pmap_create(hostname, sin, prot); + pmap_clnt = pmap_create(hostname, sin, prot, 0); if (IS_ERR(pmap_clnt)) return PTR_ERR(pmap_clnt); @@ -171,7 +171,7 @@ rpc_register(u32 prog, u32 vers, int prot, unsigned short port, int *okay) sin.sin_family = AF_INET; sin.sin_addr.s_addr = htonl(INADDR_LOOPBACK); - pmap_clnt = pmap_create("localhost", &sin, IPPROTO_UDP); + pmap_clnt = pmap_create("localhost", &sin, IPPROTO_UDP, 1); if (IS_ERR(pmap_clnt)) { error = PTR_ERR(pmap_clnt); dprintk("RPC: couldn't create pmap client. Error = %d\n", error); @@ -198,7 +198,7 @@ rpc_register(u32 prog, u32 vers, int prot, unsigned short port, int *okay) } static struct rpc_clnt * -pmap_create(char *hostname, struct sockaddr_in *srvaddr, int proto) +pmap_create(char *hostname, struct sockaddr_in *srvaddr, int proto, int privileged) { struct rpc_xprt *xprt; struct rpc_clnt *clnt; @@ -208,7 +208,8 @@ pmap_create(char *hostname, struct sockaddr_in *srvaddr, int proto) if (IS_ERR(xprt)) return (struct rpc_clnt *)xprt; xprt->addr.sin_port = htons(RPC_PMAP_PORT); - xprt->resvport = 0; + if (!privileged) + xprt->resvport = 0; /* printk("pmap: create clnt\n"); */ clnt = rpc_new_client(xprt, hostname, -- cgit v1.2.2 From 7c254d3dba0fae124b1d33f784ca23572ac386b0 Mon Sep 17 00:00:00 2001 From: Ivo van Doorn Date: Mon, 3 Oct 2005 09:50:40 -0500 Subject: This will move the ieee80211_is_ofdm_rate function to the ieee80211.h header, and I also added the ieee80211_is_cck_rate counterpart. Various drivers currently create there own version of these functions, but I guess the ieee80211 stack is the best place to provide such routines. Signed-off-by: Ivo van Doorn Signed-off-by: James Ketrenos --- net/ieee80211/ieee80211_rx.c | 16 ---------------- 1 file changed, 16 deletions(-) (limited to 'net') diff --git a/net/ieee80211/ieee80211_rx.c b/net/ieee80211/ieee80211_rx.c index fcf05bf677b8..2f4b16c1c143 100644 --- a/net/ieee80211/ieee80211_rx.c +++ b/net/ieee80211/ieee80211_rx.c @@ -1020,22 +1020,6 @@ static int ieee80211_handle_assoc_resp(struct ieee80211_device *ieee, struct iee /***************************************************/ -static inline int ieee80211_is_ofdm_rate(u8 rate) -{ - switch (rate & ~IEEE80211_BASIC_RATE_MASK) { - case IEEE80211_OFDM_RATE_6MB: - case IEEE80211_OFDM_RATE_9MB: - case IEEE80211_OFDM_RATE_12MB: - case IEEE80211_OFDM_RATE_18MB: - case IEEE80211_OFDM_RATE_24MB: - case IEEE80211_OFDM_RATE_36MB: - case IEEE80211_OFDM_RATE_48MB: - case IEEE80211_OFDM_RATE_54MB: - return 1; - } - return 0; -} - static inline int ieee80211_network_init(struct ieee80211_device *ieee, struct ieee80211_probe_response *beacon, struct ieee80211_network *network, -- cgit v1.2.2 From e846cbb11245e648983b50349a1c715202d5ccf0 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Mon, 3 Oct 2005 10:02:14 -0500 Subject: Fix implicit nocast warnings in ieee80211 code: net/ieee80211/ieee80211_tx.c:215:9: warning: implicit cast to nocast type Signed-off-by: Randy Dunlap Signed-off-by: James Ketrenos --- net/ieee80211/ieee80211_tx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/ieee80211/ieee80211_tx.c b/net/ieee80211/ieee80211_tx.c index 8d87897d7eb7..e860777ab8d3 100644 --- a/net/ieee80211/ieee80211_tx.c +++ b/net/ieee80211/ieee80211_tx.c @@ -187,7 +187,7 @@ void ieee80211_txb_free(struct ieee80211_txb *txb) } static struct ieee80211_txb *ieee80211_alloc_txb(int nr_frags, int txb_size, - int gfp_mask) + unsigned int gfp_mask) { struct ieee80211_txb *txb; int i; -- cgit v1.2.2 From ff9e00f1b09d594004f91700a371870f729ffc02 Mon Sep 17 00:00:00 2001 From: Ivo van Doorn Date: Mon, 3 Oct 2005 10:19:25 -0500 Subject: Currently the info_element is parsed by 2 seperate functions, this results in a lot of duplicate code. This will move the parsing stage into a seperate function. Signed-off-by: Ivo van Doorn Signed-off-by: James Ketrenos --- net/ieee80211/ieee80211_rx.c | 267 ++++++++++++++++--------------------------- 1 file changed, 99 insertions(+), 168 deletions(-) (limited to 'net') diff --git a/net/ieee80211/ieee80211_rx.c b/net/ieee80211/ieee80211_rx.c index 2f4b16c1c143..3bf04d6d2b13 100644 --- a/net/ieee80211/ieee80211_rx.c +++ b/net/ieee80211/ieee80211_rx.c @@ -917,158 +917,23 @@ static int ieee80211_parse_qos_info_param_IE(struct ieee80211_info_element return rc; } -static int ieee80211_handle_assoc_resp(struct ieee80211_device *ieee, struct ieee80211_assoc_response - *frame, struct ieee80211_rx_stats *stats) -{ - struct ieee80211_network network_resp; - struct ieee80211_network *network = &network_resp; - struct ieee80211_info_element *info_element; - struct net_device *dev = ieee->dev; - u16 left; - - network->flags = 0; - network->qos_data.active = 0; - network->qos_data.supported = 0; - network->qos_data.param_count = 0; - network->qos_data.old_param_count = 0; - - //network->atim_window = le16_to_cpu(frame->aid) & (0x3FFF); - network->atim_window = le16_to_cpu(frame->aid); - network->listen_interval = le16_to_cpu(frame->status); - - info_element = frame->info_element; - left = stats->len - sizeof(*frame); - - while (left >= sizeof(struct ieee80211_info_element)) { - if (sizeof(struct ieee80211_info_element) + - info_element->len > left) { - IEEE80211_DEBUG_QOS("ASSOC RESP: parse failed: " - "info_element->len + 2 > left : " - "info_element->len+2=%zd left=%d, id=%d.\n", - info_element->len + - sizeof(struct - ieee80211_info_element), - left, info_element->id); - return 1; - } - - switch (info_element->id) { - case MFIE_TYPE_SSID: - if (ieee80211_is_empty_essid(info_element->data, - info_element->len)) { - network->flags |= NETWORK_EMPTY_ESSID; - break; - } - - network->ssid_len = min(info_element->len, - (u8) IW_ESSID_MAX_SIZE); - memcpy(network->ssid, info_element->data, - network->ssid_len); - if (network->ssid_len < IW_ESSID_MAX_SIZE) - memset(network->ssid + network->ssid_len, 0, - IW_ESSID_MAX_SIZE - network->ssid_len); - - IEEE80211_DEBUG_QOS("MFIE_TYPE_SSID: '%s' len=%d.\n", - network->ssid, network->ssid_len); - break; - - case MFIE_TYPE_TIM: - IEEE80211_DEBUG_QOS("MFIE_TYPE_TIM: ignored\n"); - break; - - case MFIE_TYPE_IBSS_SET: - IEEE80211_DEBUG_QOS("MFIE_TYPE_IBSS_SET: ignored\n"); - break; - - case MFIE_TYPE_CHALLENGE: - IEEE80211_DEBUG_QOS("MFIE_TYPE_CHALLENGE: ignored\n"); - break; - - case MFIE_TYPE_GENERIC: - IEEE80211_DEBUG_QOS("MFIE_TYPE_GENERIC: %d bytes\n", - info_element->len); - ieee80211_parse_qos_info_param_IE(info_element, - network); - break; - - case MFIE_TYPE_RSN: - IEEE80211_DEBUG_QOS("MFIE_TYPE_RSN: %d bytes\n", - info_element->len); - break; - - case MFIE_TYPE_QOS_PARAMETER: - printk("QoS Error need to parse QOS_PARAMETER IE\n"); - break; - - default: - IEEE80211_DEBUG_QOS("unsupported IE %d\n", - info_element->id); - break; - } - - left -= sizeof(struct ieee80211_info_element) + - info_element->len; - info_element = (struct ieee80211_info_element *) - &info_element->data[info_element->len]; - } - - if (ieee->handle_assoc_response != NULL) - ieee->handle_assoc_response(dev, frame, network); - - return 0; -} - -/***************************************************/ - -static inline int ieee80211_network_init(struct ieee80211_device *ieee, struct ieee80211_probe_response - *beacon, - struct ieee80211_network *network, - struct ieee80211_rx_stats *stats) +static int ieee80211_parse_info_param(struct ieee80211_info_element *info_element, + u16 length, struct ieee80211_network *network) { + u8 i; #ifdef CONFIG_IEEE80211_DEBUG char rates_str[64]; char *p; #endif - struct ieee80211_info_element *info_element; - u16 left; - u8 i; - network->qos_data.active = 0; - network->qos_data.supported = 0; - network->qos_data.param_count = 0; - /* Pull out fixed field data */ - memcpy(network->bssid, beacon->header.addr3, ETH_ALEN); - network->capability = le16_to_cpu(beacon->capability); - network->last_scanned = jiffies; - network->time_stamp[0] = le32_to_cpu(beacon->time_stamp[0]); - network->time_stamp[1] = le32_to_cpu(beacon->time_stamp[1]); - network->beacon_interval = le16_to_cpu(beacon->beacon_interval); - /* Where to pull this? beacon->listen_interval; */ - network->listen_interval = 0x0A; - network->rates_len = network->rates_ex_len = 0; - network->last_associate = 0; - network->ssid_len = 0; - network->flags = 0; - network->atim_window = 0; - network->erp_value = (network->capability & WLAN_CAPABILITY_IBSS) ? - 0x3 : 0x0; - - if (stats->freq == IEEE80211_52GHZ_BAND) { - /* for A band (No DS info) */ - network->channel = stats->received_channel; - } else - network->flags |= NETWORK_HAS_CCK; - - network->wpa_ie_len = 0; - network->rsn_ie_len = 0; - - info_element = beacon->info_element; - left = stats->len - sizeof(*beacon); - while (left >= sizeof(*info_element)) { - if (sizeof(*info_element) + info_element->len > left) { - IEEE80211_DEBUG_SCAN - ("SCAN: parse failed: info_element->len + 2 > left : info_element->len+2=%Zd left=%d.\n", - info_element->len + sizeof(*info_element), left); + while (length >= sizeof(*info_element)) { + if (sizeof(*info_element) + info_element->len > length) { + IEEE80211_DEBUG_MGMT("Info elem: parse failed: " + "info_element->len + 2 > left : " + "info_element->len+2=%zd left=%d, id=%d.\n", + info_element->len + + sizeof(*info_element), + length, info_element->id); return 1; } @@ -1088,8 +953,8 @@ static inline int ieee80211_network_init(struct ieee80211_device *ieee, struct i memset(network->ssid + network->ssid_len, 0, IW_ESSID_MAX_SIZE - network->ssid_len); - IEEE80211_DEBUG_SCAN("MFIE_TYPE_SSID: '%s' len=%d.\n", - network->ssid, network->ssid_len); + IEEE80211_DEBUG_MGMT("MFIE_TYPE_SSID: '%s' len=%d.\n", + network->ssid, network->ssid_len); break; case MFIE_TYPE_RATES: @@ -1115,7 +980,7 @@ static inline int ieee80211_network_init(struct ieee80211_device *ieee, struct i } } - IEEE80211_DEBUG_SCAN("MFIE_TYPE_RATES: '%s' (%d)\n", + IEEE80211_DEBUG_MGMT("MFIE_TYPE_RATES: '%s' (%d)\n", rates_str, network->rates_len); break; @@ -1142,47 +1007,46 @@ static inline int ieee80211_network_init(struct ieee80211_device *ieee, struct i } } - IEEE80211_DEBUG_SCAN("MFIE_TYPE_RATES_EX: '%s' (%d)\n", + IEEE80211_DEBUG_MGMT("MFIE_TYPE_RATES_EX: '%s' (%d)\n", rates_str, network->rates_ex_len); break; case MFIE_TYPE_DS_SET: - IEEE80211_DEBUG_SCAN("MFIE_TYPE_DS_SET: %d\n", + IEEE80211_DEBUG_MGMT("MFIE_TYPE_DS_SET: %d\n", info_element->data[0]); - if (stats->freq == IEEE80211_24GHZ_BAND) - network->channel = info_element->data[0]; + network->channel = info_element->data[0]; break; case MFIE_TYPE_FH_SET: - IEEE80211_DEBUG_SCAN("MFIE_TYPE_FH_SET: ignored\n"); + IEEE80211_DEBUG_MGMT("MFIE_TYPE_FH_SET: ignored\n"); break; case MFIE_TYPE_CF_SET: - IEEE80211_DEBUG_SCAN("MFIE_TYPE_CF_SET: ignored\n"); + IEEE80211_DEBUG_MGMT("MFIE_TYPE_CF_SET: ignored\n"); break; case MFIE_TYPE_TIM: - IEEE80211_DEBUG_SCAN("MFIE_TYPE_TIM: ignored\n"); + IEEE80211_DEBUG_MGMT("MFIE_TYPE_TIM: ignored\n"); break; case MFIE_TYPE_ERP_INFO: network->erp_value = info_element->data[0]; - IEEE80211_DEBUG_SCAN("MFIE_TYPE_ERP_SET: %d\n", + IEEE80211_DEBUG_MGMT("MFIE_TYPE_ERP_SET: %d\n", network->erp_value); break; case MFIE_TYPE_IBSS_SET: network->atim_window = info_element->data[0]; - IEEE80211_DEBUG_SCAN("MFIE_TYPE_IBSS_SET: %d\n", + IEEE80211_DEBUG_MGMT("MFIE_TYPE_IBSS_SET: %d\n", network->atim_window); break; case MFIE_TYPE_CHALLENGE: - IEEE80211_DEBUG_SCAN("MFIE_TYPE_CHALLENGE: ignored\n"); + IEEE80211_DEBUG_MGMT("MFIE_TYPE_CHALLENGE: ignored\n"); break; case MFIE_TYPE_GENERIC: - IEEE80211_DEBUG_SCAN("MFIE_TYPE_GENERIC: %d bytes\n", + IEEE80211_DEBUG_MGMT("MFIE_TYPE_GENERIC: %d bytes\n", info_element->len); if (!ieee80211_parse_qos_info_param_IE(info_element, network)) @@ -1201,7 +1065,7 @@ static inline int ieee80211_network_init(struct ieee80211_device *ieee, struct i break; case MFIE_TYPE_RSN: - IEEE80211_DEBUG_SCAN("MFIE_TYPE_RSN: %d bytes\n", + IEEE80211_DEBUG_MGMT("MFIE_TYPE_RSN: %d bytes\n", info_element->len); network->rsn_ie_len = min(info_element->len + 2, MAX_WPA_IE_LEN); @@ -1210,21 +1074,88 @@ static inline int ieee80211_network_init(struct ieee80211_device *ieee, struct i break; case MFIE_TYPE_QOS_PARAMETER: - printk(KERN_ERR - "QoS Error need to parse QOS_PARAMETER IE\n"); + printk(KERN_ERR "QoS Error need to parse QOS_PARAMETER IE\n"); break; default: - IEEE80211_DEBUG_SCAN("unsupported IE %d\n", - info_element->id); + IEEE80211_DEBUG_MGMT("unsupported IE %d\n", + info_element->id); break; } - left -= sizeof(*info_element) + info_element->len; - info_element = (struct ieee80211_info_element *) - &info_element->data[info_element->len]; + length -= sizeof(*info_element) + info_element->len; + info_element = (struct ieee80211_info_element *) &info_element->data[info_element->len]; } + return 0; +} + +static int ieee80211_handle_assoc_resp(struct ieee80211_device *ieee, struct ieee80211_assoc_response + *frame, struct ieee80211_rx_stats *stats) +{ + struct ieee80211_network network_resp; + struct ieee80211_network *network = &network_resp; + struct net_device *dev = ieee->dev; + + network->flags = 0; + network->qos_data.active = 0; + network->qos_data.supported = 0; + network->qos_data.param_count = 0; + network->qos_data.old_param_count = 0; + + //network->atim_window = le16_to_cpu(frame->aid) & (0x3FFF); + network->atim_window = le16_to_cpu(frame->aid); + network->listen_interval = le16_to_cpu(frame->status); + + if(ieee80211_parse_info_param(frame->info_element, stats->len - sizeof(*frame), network)) + return 1; + + if (ieee->handle_assoc_response != NULL) + ieee->handle_assoc_response(dev, frame, network); + + return 0; +} + +/***************************************************/ + +static inline int ieee80211_network_init(struct ieee80211_device *ieee, struct ieee80211_probe_response + *beacon, + struct ieee80211_network *network, + struct ieee80211_rx_stats *stats) +{ + network->qos_data.active = 0; + network->qos_data.supported = 0; + network->qos_data.param_count = 0; + + /* Pull out fixed field data */ + memcpy(network->bssid, beacon->header.addr3, ETH_ALEN); + network->capability = le16_to_cpu(beacon->capability); + network->last_scanned = jiffies; + network->time_stamp[0] = le32_to_cpu(beacon->time_stamp[0]); + network->time_stamp[1] = le32_to_cpu(beacon->time_stamp[1]); + network->beacon_interval = le16_to_cpu(beacon->beacon_interval); + /* Where to pull this? beacon->listen_interval; */ + network->listen_interval = 0x0A; + network->rates_len = network->rates_ex_len = 0; + network->last_associate = 0; + network->ssid_len = 0; + network->flags = 0; + network->atim_window = 0; + network->erp_value = (network->capability & WLAN_CAPABILITY_IBSS) ? + 0x3 : 0x0; + + if (stats->freq == IEEE80211_52GHZ_BAND) { + /* for A band (No DS info) */ + network->channel = stats->received_channel; + } else + network->flags |= NETWORK_HAS_CCK; + + network->wpa_ie_len = 0; + network->rsn_ie_len = 0; + + if(ieee80211_parse_info_param(beacon->info_element, stats->len - sizeof(*beacon), network)) + return 1; + network->mode = 0; if (stats->freq == IEEE80211_52GHZ_BAND) network->mode = IEEE_A; -- cgit v1.2.2 From c1bda44a4aedf13251b2704e6e67afb4909195c8 Mon Sep 17 00:00:00 2001 From: Ivo van Doorn Date: Mon, 3 Oct 2005 10:20:47 -0500 Subject: When an assoc_resp is received the network structure is not completely initialized which can cause problems for drivers that expect the network structure to be completely filled in. This patch will make sure the network is filled in as much as possible. Signed-off-by: Ivo van Doorn Signed-off-by: James Ketrenos --- net/ieee80211/ieee80211_rx.c | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) (limited to 'net') diff --git a/net/ieee80211/ieee80211_rx.c b/net/ieee80211/ieee80211_rx.c index 3bf04d6d2b13..8d87d66c2a34 100644 --- a/net/ieee80211/ieee80211_rx.c +++ b/net/ieee80211/ieee80211_rx.c @@ -1106,10 +1106,41 @@ static int ieee80211_handle_assoc_resp(struct ieee80211_device *ieee, struct iee //network->atim_window = le16_to_cpu(frame->aid) & (0x3FFF); network->atim_window = le16_to_cpu(frame->aid); network->listen_interval = le16_to_cpu(frame->status); + memcpy(network->bssid, frame->header.addr3, ETH_ALEN); + network->capability = le16_to_cpu(frame->capability); + network->last_scanned = jiffies; + network->rates_len = network->rates_ex_len = 0; + network->last_associate = 0; + network->ssid_len = 0; + network->erp_value = (network->capability & WLAN_CAPABILITY_IBSS) ? 0x3 : 0x0; + + if (stats->freq == IEEE80211_52GHZ_BAND) { + /* for A band (No DS info) */ + network->channel = stats->received_channel; + } else + network->flags |= NETWORK_HAS_CCK; + + network->wpa_ie_len = 0; + network->rsn_ie_len = 0; if(ieee80211_parse_info_param(frame->info_element, stats->len - sizeof(*frame), network)) return 1; + network->mode = 0; + if (stats->freq == IEEE80211_52GHZ_BAND) + network->mode = IEEE_A; + else { + if (network->flags & NETWORK_HAS_OFDM) + network->mode |= IEEE_G; + if (network->flags & NETWORK_HAS_CCK) + network->mode |= IEEE_B; + } + + if (ieee80211_is_empty_essid(network->ssid, network->ssid_len)) + network->flags |= NETWORK_EMPTY_ESSID; + + memcpy(&network->stats, stats, sizeof(network->stats)); + if (ieee->handle_assoc_response != NULL) ieee->handle_assoc_response(dev, frame, network); @@ -1126,6 +1157,7 @@ static inline int ieee80211_network_init(struct ieee80211_device *ieee, struct i network->qos_data.active = 0; network->qos_data.supported = 0; network->qos_data.param_count = 0; + network->qos_data.old_param_count = 0; /* Pull out fixed field data */ memcpy(network->bssid, beacon->header.addr3, ETH_ALEN); -- cgit v1.2.2 From ff0037b259e8b47843176142131844bc80fd2887 Mon Sep 17 00:00:00 2001 From: James Ketrenos Date: Mon, 3 Oct 2005 10:23:42 -0500 Subject: Lindent and trailing whitespace script executed ieee80211 subsystem Signed-off-by: James Ketrenos --- net/ieee80211/ieee80211_rx.c | 35 +++++++++++++++++++++-------------- 1 file changed, 21 insertions(+), 14 deletions(-) (limited to 'net') diff --git a/net/ieee80211/ieee80211_rx.c b/net/ieee80211/ieee80211_rx.c index 8d87d66c2a34..6b005cb0caa0 100644 --- a/net/ieee80211/ieee80211_rx.c +++ b/net/ieee80211/ieee80211_rx.c @@ -917,8 +917,9 @@ static int ieee80211_parse_qos_info_param_IE(struct ieee80211_info_element return rc; } -static int ieee80211_parse_info_param(struct ieee80211_info_element *info_element, - u16 length, struct ieee80211_network *network) +static int ieee80211_parse_info_param(struct ieee80211_info_element + *info_element, u16 length, + struct ieee80211_network *network) { u8 i; #ifdef CONFIG_IEEE80211_DEBUG @@ -929,11 +930,11 @@ static int ieee80211_parse_info_param(struct ieee80211_info_element *info_elemen while (length >= sizeof(*info_element)) { if (sizeof(*info_element) + info_element->len > length) { IEEE80211_DEBUG_MGMT("Info elem: parse failed: " - "info_element->len + 2 > left : " - "info_element->len+2=%zd left=%d, id=%d.\n", - info_element->len + - sizeof(*info_element), - length, info_element->id); + "info_element->len + 2 > left : " + "info_element->len+2=%zd left=%d, id=%d.\n", + info_element->len + + sizeof(*info_element), + length, info_element->id); return 1; } @@ -954,7 +955,7 @@ static int ieee80211_parse_info_param(struct ieee80211_info_element *info_elemen IW_ESSID_MAX_SIZE - network->ssid_len); IEEE80211_DEBUG_MGMT("MFIE_TYPE_SSID: '%s' len=%d.\n", - network->ssid, network->ssid_len); + network->ssid, network->ssid_len); break; case MFIE_TYPE_RATES: @@ -1074,17 +1075,20 @@ static int ieee80211_parse_info_param(struct ieee80211_info_element *info_elemen break; case MFIE_TYPE_QOS_PARAMETER: - printk(KERN_ERR "QoS Error need to parse QOS_PARAMETER IE\n"); + printk(KERN_ERR + "QoS Error need to parse QOS_PARAMETER IE\n"); break; default: IEEE80211_DEBUG_MGMT("unsupported IE %d\n", - info_element->id); + info_element->id); break; } length -= sizeof(*info_element) + info_element->len; - info_element = (struct ieee80211_info_element *) &info_element->data[info_element->len]; + info_element = + (struct ieee80211_info_element *)&info_element-> + data[info_element->len]; } return 0; @@ -1112,7 +1116,8 @@ static int ieee80211_handle_assoc_resp(struct ieee80211_device *ieee, struct iee network->rates_len = network->rates_ex_len = 0; network->last_associate = 0; network->ssid_len = 0; - network->erp_value = (network->capability & WLAN_CAPABILITY_IBSS) ? 0x3 : 0x0; + network->erp_value = + (network->capability & WLAN_CAPABILITY_IBSS) ? 0x3 : 0x0; if (stats->freq == IEEE80211_52GHZ_BAND) { /* for A band (No DS info) */ @@ -1123,7 +1128,8 @@ static int ieee80211_handle_assoc_resp(struct ieee80211_device *ieee, struct iee network->wpa_ie_len = 0; network->rsn_ie_len = 0; - if(ieee80211_parse_info_param(frame->info_element, stats->len - sizeof(*frame), network)) + if (ieee80211_parse_info_param + (frame->info_element, stats->len - sizeof(*frame), network)) return 1; network->mode = 0; @@ -1185,7 +1191,8 @@ static inline int ieee80211_network_init(struct ieee80211_device *ieee, struct i network->wpa_ie_len = 0; network->rsn_ie_len = 0; - if(ieee80211_parse_info_param(beacon->info_element, stats->len - sizeof(*beacon), network)) + if (ieee80211_parse_info_param + (beacon->info_element, stats->len - sizeof(*beacon), network)) return 1; network->mode = 0; -- cgit v1.2.2 From ea635a517e350eb03ab5f01618417f31b82a9a4d Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Thu, 6 Oct 2005 23:12:58 -0400 Subject: SUNRPC: Retry rpcbind requests if the server's portmapper isn't up After a server crash/reboot, rebinding should always retry, otherwise requests on "hard" mounts will fail when they shouldn't. Test plan: Run a lock-intensive workload against a server while rebooting the server repeatedly. Signed-off-by: Chuck Lever Signed-off-by: Trond Myklebust --- net/sunrpc/clnt.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'net') diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index 5a8f01d726e9..a5f7029b1daa 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c @@ -759,7 +759,8 @@ call_bind_status(struct rpc_task *task) case -EACCES: dprintk("RPC: %4d remote rpcbind: RPC program/version unavailable\n", task->tk_pid); - break; + rpc_delay(task, 3*HZ); + goto retry_bind; case -ETIMEDOUT: dprintk("RPC: %4d rpcbind request timed out\n", task->tk_pid); -- cgit v1.2.2 From 5e5ce5be6f0161d2a069a4f8a1154fe639c5c02f Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 18 Oct 2005 14:20:11 -0700 Subject: RPC: allow call_encode() to delay transmission of an RPC call. Currently, call_encode will cause the entire RPC call to abort if it returns an error. This is unnecessarily rigid, and gets in the way of attempts to allow the NFSv4 layer to order RPC calls that carry sequence ids. Signed-off-by: Trond Myklebust --- net/sunrpc/clnt.c | 23 ++++++++++++----------- net/sunrpc/xprt.c | 8 ++++++++ 2 files changed, 20 insertions(+), 11 deletions(-) (limited to 'net') diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index a5f7029b1daa..534274056329 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c @@ -678,13 +678,11 @@ call_allocate(struct rpc_task *task) static void call_encode(struct rpc_task *task) { - struct rpc_clnt *clnt = task->tk_client; struct rpc_rqst *req = task->tk_rqstp; struct xdr_buf *sndbuf = &req->rq_snd_buf; struct xdr_buf *rcvbuf = &req->rq_rcv_buf; unsigned int bufsiz; kxdrproc_t encode; - int status; u32 *p; dprintk("RPC: %4d call_encode (status %d)\n", @@ -712,12 +710,9 @@ call_encode(struct rpc_task *task) rpc_exit(task, -EIO); return; } - if (encode && (status = rpcauth_wrap_req(task, encode, req, p, - task->tk_msg.rpc_argp)) < 0) { - printk(KERN_WARNING "%s: can't encode arguments: %d\n", - clnt->cl_protname, -status); - rpc_exit(task, status); - } + if (encode != NULL) + task->tk_status = rpcauth_wrap_req(task, encode, req, p, + task->tk_msg.rpc_argp); } /* @@ -865,10 +860,12 @@ call_transmit(struct rpc_task *task) if (task->tk_status != 0) return; /* Encode here so that rpcsec_gss can use correct sequence number. */ - if (!task->tk_rqstp->rq_bytes_sent) + if (task->tk_rqstp->rq_bytes_sent == 0) { call_encode(task); - if (task->tk_status < 0) - return; + /* Did the encode result in an error condition? */ + if (task->tk_status != 0) + goto out_nosend; + } xprt_transmit(task); if (task->tk_status < 0) return; @@ -876,6 +873,10 @@ call_transmit(struct rpc_task *task) task->tk_action = NULL; rpc_wake_up_task(task); } + return; +out_nosend: + /* release socket write lock before attempting to handle error */ + xprt_abort_transmit(task); } /* diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 215be0d0ef6b..1ba55dc38b7a 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -709,6 +709,14 @@ out_unlock: return err; } +void +xprt_abort_transmit(struct rpc_task *task) +{ + struct rpc_xprt *xprt = task->tk_xprt; + + xprt_release_write(xprt, task); +} + /** * xprt_transmit - send an RPC request on a transport * @task: controlling RPC task -- cgit v1.2.2 From 757d18faee58aa4c43bcaf9a44decf17fa68adeb Mon Sep 17 00:00:00 2001 From: Jiri Benc Date: Mon, 10 Oct 2005 19:16:53 +0200 Subject: [PATCH] ieee80211: division by zero fix This fixes division by zero bug in ieee80211_wx_get_scan(). Signed-off-by: Jiri Benc Signed-off-by: Jeff Garzik --- net/ieee80211/ieee80211_wx.c | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) (limited to 'net') diff --git a/net/ieee80211/ieee80211_wx.c b/net/ieee80211/ieee80211_wx.c index ee7a70a13250..1ce7af9bec35 100644 --- a/net/ieee80211/ieee80211_wx.c +++ b/net/ieee80211/ieee80211_wx.c @@ -152,15 +152,18 @@ static inline char *ipw2100_translate_scan(struct ieee80211_device *ieee, iwe.u.qual.level = 0; } else { iwe.u.qual.level = network->stats.rssi; - iwe.u.qual.qual = - (100 * - (ieee->perfect_rssi - ieee->worst_rssi) * - (ieee->perfect_rssi - ieee->worst_rssi) - - (ieee->perfect_rssi - network->stats.rssi) * - (15 * (ieee->perfect_rssi - ieee->worst_rssi) + - 62 * (ieee->perfect_rssi - network->stats.rssi))) / - ((ieee->perfect_rssi - ieee->worst_rssi) * - (ieee->perfect_rssi - ieee->worst_rssi)); + if (ieee->perfect_rssi == ieee->worst_rssi) + iwe.u.qual.qual = 100; + else + iwe.u.qual.qual = + (100 * + (ieee->perfect_rssi - ieee->worst_rssi) * + (ieee->perfect_rssi - ieee->worst_rssi) - + (ieee->perfect_rssi - network->stats.rssi) * + (15 * (ieee->perfect_rssi - ieee->worst_rssi) + + 62 * (ieee->perfect_rssi - network->stats.rssi))) / + ((ieee->perfect_rssi - ieee->worst_rssi) * + (ieee->perfect_rssi - ieee->worst_rssi)); if (iwe.u.qual.qual > 100) iwe.u.qual.qual = 100; else if (iwe.u.qual.qual < 1) -- cgit v1.2.2 From 747c5534c9a6da4aa87e7cdc2209ea98ea27f381 Mon Sep 17 00:00:00 2001 From: Steve Dickson Date: Tue, 18 Oct 2005 23:19:40 -0700 Subject: RPC: stops the release_pipe() funtion from being called twice This patch stops the release_pipe() funtion from being called twice by invalidating the ops pointer in the rpc_inode when rpc_pipe_release() is called. Signed-off-by: Steve Dickson Signed-off-by: Trond Myklebust --- net/sunrpc/rpc_pipe.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'net') diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index ded6c63f11ec..649d609e7d94 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c @@ -177,6 +177,8 @@ rpc_pipe_release(struct inode *inode, struct file *filp) __rpc_purge_upcall(inode, -EPIPE); if (rpci->ops->release_pipe) rpci->ops->release_pipe(inode); + if (!rpci->nreaders && !rpci->nwriters) + rpci->ops = NULL; out: up(&inode->i_sem); return 0; -- cgit v1.2.2 From 293f1eb551a77fe5c8956a559a3c0baea95cd9bc Mon Sep 17 00:00:00 2001 From: "J. Bruce Fields" Date: Thu, 13 Oct 2005 16:54:37 -0400 Subject: SUNRPC: Add support for privacy to generic gss-api code. Add support for privacy to generic gss-api code. This is dead code until we have both a mechanism that supports privacy and code in the client or server that uses it. Signed-off-by: J. Bruce Fields Signed-off-by: Trond Myklebust --- net/sunrpc/auth_gss/gss_mech_switch.c | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) (limited to 'net') diff --git a/net/sunrpc/auth_gss/gss_mech_switch.c b/net/sunrpc/auth_gss/gss_mech_switch.c index 58aeaddd8c79..06d97cb3481a 100644 --- a/net/sunrpc/auth_gss/gss_mech_switch.c +++ b/net/sunrpc/auth_gss/gss_mech_switch.c @@ -276,6 +276,28 @@ gss_verify_mic(struct gss_ctx *context_handle, qstate); } +u32 +gss_wrap(struct gss_ctx *ctx_id, + u32 qop, + int offset, + struct xdr_buf *buf, + struct page **inpages) +{ + return ctx_id->mech_type->gm_ops + ->gss_wrap(ctx_id, qop, offset, buf, inpages); +} + +u32 +gss_unwrap(struct gss_ctx *ctx_id, + u32 *qop, + int offset, + struct xdr_buf *buf) +{ + return ctx_id->mech_type->gm_ops + ->gss_unwrap(ctx_id, qop, offset, buf); +} + + /* gss_delete_sec_context: free all resources associated with context_handle. * Note this differs from the RFC 2744-specified prototype in that we don't * bother returning an output token, since it would never be used anyway. */ -- cgit v1.2.2 From ead5e1c26fdcd969cf40c49cb0589d56879d240d Mon Sep 17 00:00:00 2001 From: "J. Bruce Fields" Date: Thu, 13 Oct 2005 16:54:43 -0400 Subject: SUNRPC: Provide a callback to allow free pages allocated during xdr encoding For privacy, we need to allocate pages to store the encrypted data (passed in pages can't be used without the risk of corrupting data in the page cache). So we need a way to free that memory after the request has been transmitted. Signed-off-by: J. Bruce Fields Signed-off-by: Trond Myklebust --- net/sunrpc/xprt.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'net') diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 1ba55dc38b7a..6dda3860351f 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -839,6 +839,7 @@ static void xprt_request_init(struct rpc_task *task, struct rpc_xprt *xprt) req->rq_task = task; req->rq_xprt = xprt; req->rq_xid = xprt_alloc_xid(xprt); + req->rq_release_snd_buf = NULL; dprintk("RPC: %4d reserved req %p xid %08x\n", task->tk_pid, req, ntohl(req->rq_xid)); } @@ -867,6 +868,8 @@ void xprt_release(struct rpc_task *task) xprt->last_used + xprt->idle_timeout); spin_unlock_bh(&xprt->transport_lock); task->tk_rqstp = NULL; + if (req->rq_release_snd_buf) + req->rq_release_snd_buf(req); memset(req, 0, sizeof(*req)); /* mark unused */ dprintk("RPC: %4d release request %p\n", task->tk_pid, req); -- cgit v1.2.2 From f3680312a737355ddf35c1b68af25e384d7ef0a8 Mon Sep 17 00:00:00 2001 From: "J. Bruce Fields" Date: Thu, 13 Oct 2005 16:54:48 -0400 Subject: SUNRPC: Retry wrap in case of memory allocation failure. For privacy we need to allocate extra pages to hold encrypted page data when wrapping requests. This allocation may fail, and we handle that case by waiting and retrying. Signed-off-by: J. Bruce Fields Signed-off-by: Trond Myklebust --- net/sunrpc/clnt.c | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) (limited to 'net') diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index 534274056329..702ede309b06 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c @@ -710,9 +710,16 @@ call_encode(struct rpc_task *task) rpc_exit(task, -EIO); return; } - if (encode != NULL) - task->tk_status = rpcauth_wrap_req(task, encode, req, p, - task->tk_msg.rpc_argp); + if (encode == NULL) + return; + + task->tk_status = rpcauth_wrap_req(task, encode, req, p, + task->tk_msg.rpc_argp); + if (task->tk_status == -ENOMEM) { + /* XXX: Is this sane? */ + rpc_delay(task, 3*HZ); + task->tk_status = -EAGAIN; + } } /* -- cgit v1.2.2 From 24b2605becc10ca63c4c30808fa59a8abbf68727 Mon Sep 17 00:00:00 2001 From: "J. Bruce Fields" Date: Thu, 13 Oct 2005 16:54:53 -0400 Subject: RPCSEC_GSS: cleanup au_rslack calculation Various xdr encode routines use au_rslack to guess where the reply argument will end up, so we can set up the xdr_buf to recieve data into the right place for zero copy. Currently we calculate the au_rslack estimate when we check the verifier. Normally this only depends on the verifier size. In the integrity case we add a few bytes to allow for a length and sequence number. It's a bit simpler to calculate only the verifier size when we check the verifier, and delay the full calculation till we unwrap. Signed-off-by: J. Bruce Fields Signed-off-by: Trond Myklebust --- net/sunrpc/auth_gss/auth_gss.c | 20 ++++++-------------- 1 file changed, 6 insertions(+), 14 deletions(-) (limited to 'net') diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c index d2b08f16c257..dc95b797ca65 100644 --- a/net/sunrpc/auth_gss/auth_gss.c +++ b/net/sunrpc/auth_gss/auth_gss.c @@ -886,8 +886,6 @@ static u32 * gss_validate(struct rpc_task *task, u32 *p) { struct rpc_cred *cred = task->tk_msg.rpc_cred; - struct gss_cred *gss_cred = container_of(cred, struct gss_cred, - gc_base); struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred); u32 seq, qop_state; struct kvec iov; @@ -915,18 +913,9 @@ gss_validate(struct rpc_task *task, u32 *p) cred->cr_flags &= ~RPCAUTH_CRED_UPTODATE; if (maj_stat) goto out_bad; - switch (gss_cred->gc_service) { - case RPC_GSS_SVC_NONE: - /* verifier data, flavor, length: */ - task->tk_auth->au_rslack = XDR_QUADLEN(len) + 2; - break; - case RPC_GSS_SVC_INTEGRITY: - /* verifier data, flavor, length, length, sequence number: */ - task->tk_auth->au_rslack = XDR_QUADLEN(len) + 4; - break; - case RPC_GSS_SVC_PRIVACY: - goto out_bad; - } + /* We leave it to unwrap to calculate au_rslack. For now we just + * calculate the length of the verifier: */ + task->tk_auth->au_verfsize = XDR_QUADLEN(len) + 2; gss_put_ctx(ctx); dprintk("RPC: %4u GSS gss_validate: gss_verify_mic succeeded.\n", task->tk_pid); @@ -1067,6 +1056,7 @@ gss_unwrap_resp(struct rpc_task *task, struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base); struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred); + u32 *savedp = p; int status = -EIO; if (ctx->gc_proc != RPC_GSS_PROC_DATA) @@ -1082,6 +1072,8 @@ gss_unwrap_resp(struct rpc_task *task, case RPC_GSS_SVC_PRIVACY: break; } + /* take into account extra slack for integrity and privacy cases: */ + task->tk_auth->au_rslack = task->tk_auth->au_verfsize + (p - savedp); out_decode: status = decode(rqstp, p, obj); out: -- cgit v1.2.2 From 2d2da60c63b67174add32f06e8d54c3a0c5cd9cf Mon Sep 17 00:00:00 2001 From: "J. Bruce Fields" Date: Thu, 13 Oct 2005 16:54:58 -0400 Subject: RPCSEC_GSS: client-side privacy support Add the code to the client side to handle privacy. This is dead code until we actually add privacy support to krb5. Signed-off-by: J. Bruce Fields Signed-off-by: Trond Myklebust --- net/sunrpc/auth_gss/auth_gss.c | 149 ++++++++++++++++++++++++++++++++++++++++- 1 file changed, 148 insertions(+), 1 deletion(-) (limited to 'net') diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c index dc95b797ca65..5e4872058ec7 100644 --- a/net/sunrpc/auth_gss/auth_gss.c +++ b/net/sunrpc/auth_gss/auth_gss.c @@ -43,6 +43,7 @@ #include #include #include +#include #include #include #include @@ -975,6 +976,114 @@ gss_wrap_req_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx, return 0; } +static void +priv_release_snd_buf(struct rpc_rqst *rqstp) +{ + int i; + + for (i=0; i < rqstp->rq_enc_pages_num; i++) + __free_page(rqstp->rq_enc_pages[i]); + kfree(rqstp->rq_enc_pages); +} + +static int +alloc_enc_pages(struct rpc_rqst *rqstp) +{ + struct xdr_buf *snd_buf = &rqstp->rq_snd_buf; + int first, last, i; + + if (snd_buf->page_len == 0) { + rqstp->rq_enc_pages_num = 0; + return 0; + } + + first = snd_buf->page_base >> PAGE_CACHE_SHIFT; + last = (snd_buf->page_base + snd_buf->page_len - 1) >> PAGE_CACHE_SHIFT; + rqstp->rq_enc_pages_num = last - first + 1 + 1; + rqstp->rq_enc_pages + = kmalloc(rqstp->rq_enc_pages_num * sizeof(struct page *), + GFP_NOFS); + if (!rqstp->rq_enc_pages) + goto out; + for (i=0; i < rqstp->rq_enc_pages_num; i++) { + rqstp->rq_enc_pages[i] = alloc_page(GFP_NOFS); + if (rqstp->rq_enc_pages[i] == NULL) + goto out_free; + } + rqstp->rq_release_snd_buf = priv_release_snd_buf; + return 0; +out_free: + for (i--; i >= 0; i--) { + __free_page(rqstp->rq_enc_pages[i]); + } +out: + return -EAGAIN; +} + +static inline int +gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx, + kxdrproc_t encode, struct rpc_rqst *rqstp, u32 *p, void *obj) +{ + struct xdr_buf *snd_buf = &rqstp->rq_snd_buf; + u32 offset; + u32 maj_stat; + int status; + u32 *opaque_len; + struct page **inpages; + int first; + int pad; + struct kvec *iov; + char *tmp; + + opaque_len = p++; + offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base; + *p++ = htonl(rqstp->rq_seqno); + + status = encode(rqstp, p, obj); + if (status) + return status; + + status = alloc_enc_pages(rqstp); + if (status) + return status; + first = snd_buf->page_base >> PAGE_CACHE_SHIFT; + inpages = snd_buf->pages + first; + snd_buf->pages = rqstp->rq_enc_pages; + snd_buf->page_base -= first << PAGE_CACHE_SHIFT; + /* Give the tail its own page, in case we need extra space in the + * head when wrapping: */ + if (snd_buf->page_len || snd_buf->tail[0].iov_len) { + tmp = page_address(rqstp->rq_enc_pages[rqstp->rq_enc_pages_num - 1]); + memcpy(tmp, snd_buf->tail[0].iov_base, snd_buf->tail[0].iov_len); + snd_buf->tail[0].iov_base = tmp; + } + maj_stat = gss_wrap(ctx->gc_gss_ctx, GSS_C_QOP_DEFAULT, offset, + snd_buf, inpages); + /* RPC_SLACK_SPACE should prevent this ever happening: */ + BUG_ON(snd_buf->len > snd_buf->buflen); + status = -EIO; + /* We're assuming that when GSS_S_CONTEXT_EXPIRED, the encryption was + * done anyway, so it's safe to put the request on the wire: */ + if (maj_stat == GSS_S_CONTEXT_EXPIRED) + cred->cr_flags &= ~RPCAUTH_CRED_UPTODATE; + else if (maj_stat) + return status; + + *opaque_len = htonl(snd_buf->len - offset); + /* guess whether we're in the head or the tail: */ + if (snd_buf->page_len || snd_buf->tail[0].iov_len) + iov = snd_buf->tail; + else + iov = snd_buf->head; + p = iov->iov_base + iov->iov_len; + pad = 3 - ((snd_buf->len - offset - 1) & 3); + memset(p, 0, pad); + iov->iov_len += pad; + snd_buf->len += pad; + + return 0; +} + static int gss_wrap_req(struct rpc_task *task, kxdrproc_t encode, void *rqstp, u32 *p, void *obj) @@ -1002,6 +1111,8 @@ gss_wrap_req(struct rpc_task *task, rqstp, p, obj); break; case RPC_GSS_SVC_PRIVACY: + status = gss_wrap_req_priv(cred, ctx, encode, + rqstp, p, obj); break; } out: @@ -1048,6 +1159,36 @@ gss_unwrap_resp_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx, return 0; } +static inline int +gss_unwrap_resp_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx, + struct rpc_rqst *rqstp, u32 **p) +{ + struct xdr_buf *rcv_buf = &rqstp->rq_rcv_buf; + u32 offset; + u32 opaque_len; + u32 maj_stat; + int status = -EIO; + + opaque_len = ntohl(*(*p)++); + offset = (u8 *)(*p) - (u8 *)rcv_buf->head[0].iov_base; + if (offset + opaque_len > rcv_buf->len) + return status; + /* remove padding: */ + rcv_buf->len = offset + opaque_len; + + maj_stat = gss_unwrap(ctx->gc_gss_ctx, NULL, + offset, rcv_buf); + if (maj_stat == GSS_S_CONTEXT_EXPIRED) + cred->cr_flags &= ~RPCAUTH_CRED_UPTODATE; + if (maj_stat != GSS_S_COMPLETE) + return status; + if (ntohl(*(*p)++) != rqstp->rq_seqno) + return status; + + return 0; +} + + static int gss_unwrap_resp(struct rpc_task *task, kxdrproc_t decode, void *rqstp, u32 *p, void *obj) @@ -1057,6 +1198,8 @@ gss_unwrap_resp(struct rpc_task *task, gc_base); struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred); u32 *savedp = p; + struct kvec *head = ((struct rpc_rqst *)rqstp)->rq_rcv_buf.head; + int savedlen = head->iov_len; int status = -EIO; if (ctx->gc_proc != RPC_GSS_PROC_DATA) @@ -1070,10 +1213,14 @@ gss_unwrap_resp(struct rpc_task *task, goto out; break; case RPC_GSS_SVC_PRIVACY: + status = gss_unwrap_resp_priv(cred, ctx, rqstp, &p); + if (status) + goto out; break; } /* take into account extra slack for integrity and privacy cases: */ - task->tk_auth->au_rslack = task->tk_auth->au_verfsize + (p - savedp); + task->tk_auth->au_rslack = task->tk_auth->au_verfsize + (p - savedp) + + (savedlen - head->iov_len); out_decode: status = decode(rqstp, p, obj); out: -- cgit v1.2.2 From f7b3af64c653c73feb060a9f94f2df9ab4bba4c3 Mon Sep 17 00:00:00 2001 From: "J. Bruce Fields" Date: Thu, 13 Oct 2005 16:55:03 -0400 Subject: RPCSEC_GSS: Simplify rpcsec_gss crypto code Factor out some code that will be shared by privacy crypto routines Signed-off-by: J. Bruce Fields Signed-off-by: Trond Myklebust --- net/sunrpc/auth_gss/gss_krb5_crypto.c | 106 ++++++++++++++++++++++++---------- 1 file changed, 77 insertions(+), 29 deletions(-) (limited to 'net') diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c index ee6ae74cd1b2..2baf93f8b8f5 100644 --- a/net/sunrpc/auth_gss/gss_krb5_crypto.c +++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c @@ -139,6 +139,82 @@ buf_to_sg(struct scatterlist *sg, char *ptr, int len) { sg->length = len; } +static int +process_xdr_buf(struct xdr_buf *buf, int offset, int len, + int (*actor)(struct scatterlist *, void *), void *data) +{ + int i, page_len, thislen, page_offset, ret = 0; + struct scatterlist sg[1]; + + if (offset >= buf->head[0].iov_len) { + offset -= buf->head[0].iov_len; + } else { + thislen = buf->head[0].iov_len - offset; + if (thislen > len) + thislen = len; + buf_to_sg(sg, buf->head[0].iov_base + offset, thislen); + ret = actor(sg, data); + if (ret) + goto out; + offset = 0; + len -= thislen; + } + if (len == 0) + goto out; + + if (offset >= buf->page_len) { + offset -= buf->page_len; + } else { + page_len = buf->page_len - offset; + if (page_len > len) + page_len = len; + len -= page_len; + page_offset = (offset + buf->page_base) & (PAGE_CACHE_SIZE - 1); + i = (offset + buf->page_base) >> PAGE_CACHE_SHIFT; + thislen = PAGE_CACHE_SIZE - page_offset; + do { + if (thislen > page_len) + thislen = page_len; + sg->page = buf->pages[i]; + sg->offset = page_offset; + sg->length = thislen; + ret = actor(sg, data); + if (ret) + goto out; + page_len -= thislen; + i++; + page_offset = 0; + thislen = PAGE_CACHE_SIZE; + } while (page_len != 0); + offset = 0; + } + if (len == 0) + goto out; + + if (offset < buf->tail[0].iov_len) { + thislen = buf->tail[0].iov_len - offset; + if (thislen > len) + thislen = len; + buf_to_sg(sg, buf->tail[0].iov_base + offset, thislen); + ret = actor(sg, data); + len -= thislen; + } + if (len != 0) + ret = -EINVAL; +out: + return ret; +} + +static int +checksummer(struct scatterlist *sg, void *data) +{ + struct crypto_tfm *tfm = (struct crypto_tfm *)data; + + crypto_digest_update(tfm, sg, 1); + + return 0; +} + /* checksum the plaintext data and hdrlen bytes of the token header */ s32 make_checksum(s32 cksumtype, char *header, int hdrlen, struct xdr_buf *body, @@ -148,8 +224,6 @@ make_checksum(s32 cksumtype, char *header, int hdrlen, struct xdr_buf *body, struct crypto_tfm *tfm = NULL; /* XXX add to ctx? */ struct scatterlist sg[1]; u32 code = GSS_S_FAILURE; - int len, thislen, offset; - int i; switch (cksumtype) { case CKSUMTYPE_RSA_MD5: @@ -169,33 +243,7 @@ make_checksum(s32 cksumtype, char *header, int hdrlen, struct xdr_buf *body, crypto_digest_init(tfm); buf_to_sg(sg, header, hdrlen); crypto_digest_update(tfm, sg, 1); - if (body->head[0].iov_len) { - buf_to_sg(sg, body->head[0].iov_base, body->head[0].iov_len); - crypto_digest_update(tfm, sg, 1); - } - - len = body->page_len; - if (len != 0) { - offset = body->page_base & (PAGE_CACHE_SIZE - 1); - i = body->page_base >> PAGE_CACHE_SHIFT; - thislen = PAGE_CACHE_SIZE - offset; - do { - if (thislen > len) - thislen = len; - sg->page = body->pages[i]; - sg->offset = offset; - sg->length = thislen; - crypto_digest_update(tfm, sg, 1); - len -= thislen; - i++; - offset = 0; - thislen = PAGE_CACHE_SIZE; - } while(len != 0); - } - if (body->tail[0].iov_len) { - buf_to_sg(sg, body->tail[0].iov_base, body->tail[0].iov_len); - crypto_digest_update(tfm, sg, 1); - } + process_xdr_buf(body, 0, body->len, checksummer, tfm); crypto_digest_final(tfm, cksum->data); code = 0; out: -- cgit v1.2.2 From bfa91516b57483fc9c81d8d90325fd2c3c16ac48 Mon Sep 17 00:00:00 2001 From: "J. Bruce Fields" Date: Thu, 13 Oct 2005 16:55:08 -0400 Subject: RPCSEC_GSS: krb5 pre-privacy cleanup The code this was originally derived from processed wrap and mic tokens using the same functions. This required some contortions, and more would be required with the addition of xdr_buf's, so it's better to separate out the two code paths. In preparation for adding privacy support, remove the last vestiges of the old wrap token code. Signed-off-by: J. Bruce Fields Signed-off-by: Trond Myklebust --- net/sunrpc/auth_gss/gss_krb5_mech.c | 5 ++--- net/sunrpc/auth_gss/gss_krb5_seal.c | 38 ++++++----------------------------- net/sunrpc/auth_gss/gss_krb5_unseal.c | 30 ++++++--------------------- 3 files changed, 14 insertions(+), 59 deletions(-) (limited to 'net') diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c index 462c5b86b073..8b9066fdfda5 100644 --- a/net/sunrpc/auth_gss/gss_krb5_mech.c +++ b/net/sunrpc/auth_gss/gss_krb5_mech.c @@ -199,8 +199,7 @@ gss_verify_mic_kerberos(struct gss_ctx *ctx, int qop_state; struct krb5_ctx *kctx = ctx->internal_ctx_id; - maj_stat = krb5_read_token(kctx, mic_token, message, &qop_state, - KG_TOK_MIC_MSG); + maj_stat = krb5_read_token(kctx, mic_token, message, &qop_state); if (!maj_stat && qop_state) *qstate = qop_state; @@ -216,7 +215,7 @@ gss_get_mic_kerberos(struct gss_ctx *ctx, u32 err = 0; struct krb5_ctx *kctx = ctx->internal_ctx_id; - err = krb5_make_token(kctx, qop, message, mic_token, KG_TOK_MIC_MSG); + err = krb5_make_token(kctx, qop, message, mic_token); dprintk("RPC: gss_get_mic_kerberos returning %d\n",err); diff --git a/net/sunrpc/auth_gss/gss_krb5_seal.c b/net/sunrpc/auth_gss/gss_krb5_seal.c index afeeb8715a77..2511834e6e52 100644 --- a/net/sunrpc/auth_gss/gss_krb5_seal.c +++ b/net/sunrpc/auth_gss/gss_krb5_seal.c @@ -70,22 +70,12 @@ # define RPCDBG_FACILITY RPCDBG_AUTH #endif -static inline int -gss_krb5_padding(int blocksize, int length) { - /* Most of the code is block-size independent but in practice we - * use only 8: */ - BUG_ON(blocksize != 8); - return 8 - (length & 7); -} - u32 krb5_make_token(struct krb5_ctx *ctx, int qop_req, - struct xdr_buf *text, struct xdr_netobj *token, - int toktype) + struct xdr_buf *text, struct xdr_netobj *token) { s32 checksum_type; struct xdr_netobj md5cksum = {.len = 0, .data = NULL}; - int blocksize = 0, tmsglen; unsigned char *ptr, *krb5_hdr, *msg_start; s32 now; @@ -111,21 +101,13 @@ krb5_make_token(struct krb5_ctx *ctx, int qop_req, goto out_err; } - if (toktype == KG_TOK_WRAP_MSG) { - blocksize = crypto_tfm_alg_blocksize(ctx->enc); - tmsglen = blocksize + text->len - + gss_krb5_padding(blocksize, blocksize + text->len); - } else { - tmsglen = 0; - } - - token->len = g_token_size(&ctx->mech_used, 22 + tmsglen); + token->len = g_token_size(&ctx->mech_used, 22); ptr = token->data; - g_make_token_header(&ctx->mech_used, 22 + tmsglen, &ptr); + g_make_token_header(&ctx->mech_used, 22, &ptr); - *ptr++ = (unsigned char) ((toktype>>8)&0xff); - *ptr++ = (unsigned char) (toktype&0xff); + *ptr++ = (unsigned char) ((KG_TOK_MIC_MSG>>8)&0xff); + *ptr++ = (unsigned char) (KG_TOK_MIC_MSG&0xff); /* ptr now at byte 2 of header described in rfc 1964, section 1.2.1: */ krb5_hdr = ptr - 2; @@ -133,17 +115,9 @@ krb5_make_token(struct krb5_ctx *ctx, int qop_req, *(u16 *)(krb5_hdr + 2) = htons(ctx->signalg); memset(krb5_hdr + 4, 0xff, 4); - if (toktype == KG_TOK_WRAP_MSG) - *(u16 *)(krb5_hdr + 4) = htons(ctx->sealalg); - if (toktype == KG_TOK_WRAP_MSG) { - /* XXX removing support for now */ + if (make_checksum(checksum_type, krb5_hdr, 8, text, &md5cksum)) goto out_err; - } else { /* Sign only. */ - if (make_checksum(checksum_type, krb5_hdr, 8, text, - &md5cksum)) - goto out_err; - } switch (ctx->signalg) { case SGN_ALG_DES_MAC_MD5: diff --git a/net/sunrpc/auth_gss/gss_krb5_unseal.c b/net/sunrpc/auth_gss/gss_krb5_unseal.c index 8767fc53183d..19eba3df6607 100644 --- a/net/sunrpc/auth_gss/gss_krb5_unseal.c +++ b/net/sunrpc/auth_gss/gss_krb5_unseal.c @@ -68,20 +68,13 @@ #endif -/* message_buffer is an input if toktype is MIC and an output if it is WRAP: - * If toktype is MIC: read_token is a mic token, and message_buffer is the - * data that the mic was supposedly taken over. - * If toktype is WRAP: read_token is a wrap token, and message_buffer is used - * to return the decrypted data. - */ +/* read_token is a mic token, and message_buffer is the data that the mic was + * supposedly taken over. */ -/* XXX will need to change prototype and/or just split into a separate function - * when we add privacy (because read_token will be in pages too). */ u32 krb5_read_token(struct krb5_ctx *ctx, struct xdr_netobj *read_token, - struct xdr_buf *message_buffer, - int *qop_state, int toktype) + struct xdr_buf *message_buffer, int *qop_state) { int signalg; int sealalg; @@ -100,16 +93,12 @@ krb5_read_token(struct krb5_ctx *ctx, read_token->len)) goto out; - if ((*ptr++ != ((toktype>>8)&0xff)) || (*ptr++ != (toktype&0xff))) + if ((*ptr++ != ((KG_TOK_MIC_MSG>>8)&0xff)) || + (*ptr++ != ( KG_TOK_MIC_MSG &0xff)) ) goto out; /* XXX sanity-check bodysize?? */ - if (toktype == KG_TOK_WRAP_MSG) { - /* XXX gone */ - goto out; - } - /* get the sign and seal algorithms */ signalg = ptr[0] + (ptr[1] << 8); @@ -120,14 +109,7 @@ krb5_read_token(struct krb5_ctx *ctx, if ((ptr[4] != 0xff) || (ptr[5] != 0xff)) goto out; - if (((toktype != KG_TOK_WRAP_MSG) && (sealalg != 0xffff)) || - ((toktype == KG_TOK_WRAP_MSG) && (sealalg == 0xffff))) - goto out; - - /* in the current spec, there is only one valid seal algorithm per - key type, so a simple comparison is ok */ - - if ((toktype == KG_TOK_WRAP_MSG) && !(sealalg == ctx->sealalg)) + if (sealalg != 0xffff) goto out; /* there are several mappings of seal algorithms to sign algorithms, -- cgit v1.2.2 From 14ae162c24d985593d5b19437d7f3d8fd0062b59 Mon Sep 17 00:00:00 2001 From: "J. Bruce Fields" Date: Thu, 13 Oct 2005 16:55:13 -0400 Subject: RPCSEC_GSS: Add support for privacy to krb5 rpcsec_gss mechanism. Add support for privacy to the krb5 rpcsec_gss mechanism. Signed-off-by: J. Bruce Fields Signed-off-by: Trond Myklebust --- net/sunrpc/auth_gss/Makefile | 2 +- net/sunrpc/auth_gss/gss_krb5_crypto.c | 156 +++++++++++++- net/sunrpc/auth_gss/gss_krb5_mech.c | 7 + net/sunrpc/auth_gss/gss_krb5_seal.c | 4 +- net/sunrpc/auth_gss/gss_krb5_unseal.c | 2 +- net/sunrpc/auth_gss/gss_krb5_wrap.c | 370 ++++++++++++++++++++++++++++++++++ 6 files changed, 535 insertions(+), 6 deletions(-) create mode 100644 net/sunrpc/auth_gss/gss_krb5_wrap.c (limited to 'net') diff --git a/net/sunrpc/auth_gss/Makefile b/net/sunrpc/auth_gss/Makefile index fe1b874084bc..f3431a7e33da 100644 --- a/net/sunrpc/auth_gss/Makefile +++ b/net/sunrpc/auth_gss/Makefile @@ -10,7 +10,7 @@ auth_rpcgss-objs := auth_gss.o gss_generic_token.o \ obj-$(CONFIG_RPCSEC_GSS_KRB5) += rpcsec_gss_krb5.o rpcsec_gss_krb5-objs := gss_krb5_mech.o gss_krb5_seal.o gss_krb5_unseal.o \ - gss_krb5_seqnum.o + gss_krb5_seqnum.o gss_krb5_wrap.o obj-$(CONFIG_RPCSEC_GSS_SPKM3) += rpcsec_gss_spkm3.o diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c index 2baf93f8b8f5..3f3d5437f02d 100644 --- a/net/sunrpc/auth_gss/gss_krb5_crypto.c +++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c @@ -218,7 +218,7 @@ checksummer(struct scatterlist *sg, void *data) /* checksum the plaintext data and hdrlen bytes of the token header */ s32 make_checksum(s32 cksumtype, char *header, int hdrlen, struct xdr_buf *body, - struct xdr_netobj *cksum) + int body_offset, struct xdr_netobj *cksum) { char *cksumname; struct crypto_tfm *tfm = NULL; /* XXX add to ctx? */ @@ -243,7 +243,8 @@ make_checksum(s32 cksumtype, char *header, int hdrlen, struct xdr_buf *body, crypto_digest_init(tfm); buf_to_sg(sg, header, hdrlen); crypto_digest_update(tfm, sg, 1); - process_xdr_buf(body, 0, body->len, checksummer, tfm); + process_xdr_buf(body, body_offset, body->len - body_offset, + checksummer, tfm); crypto_digest_final(tfm, cksum->data); code = 0; out: @@ -252,3 +253,154 @@ out: } EXPORT_SYMBOL(make_checksum); + +struct encryptor_desc { + u8 iv[8]; /* XXX hard-coded blocksize */ + struct crypto_tfm *tfm; + int pos; + struct xdr_buf *outbuf; + struct page **pages; + struct scatterlist infrags[4]; + struct scatterlist outfrags[4]; + int fragno; + int fraglen; +}; + +static int +encryptor(struct scatterlist *sg, void *data) +{ + struct encryptor_desc *desc = data; + struct xdr_buf *outbuf = desc->outbuf; + struct page *in_page; + int thislen = desc->fraglen + sg->length; + int fraglen, ret; + int page_pos; + + /* Worst case is 4 fragments: head, end of page 1, start + * of page 2, tail. Anything more is a bug. */ + BUG_ON(desc->fragno > 3); + desc->infrags[desc->fragno] = *sg; + desc->outfrags[desc->fragno] = *sg; + + page_pos = desc->pos - outbuf->head[0].iov_len; + if (page_pos >= 0 && page_pos < outbuf->page_len) { + /* pages are not in place: */ + int i = (page_pos + outbuf->page_base) >> PAGE_CACHE_SHIFT; + in_page = desc->pages[i]; + } else { + in_page = sg->page; + } + desc->infrags[desc->fragno].page = in_page; + desc->fragno++; + desc->fraglen += sg->length; + desc->pos += sg->length; + + fraglen = thislen & 7; /* XXX hardcoded blocksize */ + thislen -= fraglen; + + if (thislen == 0) + return 0; + + ret = crypto_cipher_encrypt_iv(desc->tfm, desc->outfrags, desc->infrags, + thislen, desc->iv); + if (ret) + return ret; + if (fraglen) { + desc->outfrags[0].page = sg->page; + desc->outfrags[0].offset = sg->offset + sg->length - fraglen; + desc->outfrags[0].length = fraglen; + desc->infrags[0] = desc->outfrags[0]; + desc->infrags[0].page = in_page; + desc->fragno = 1; + desc->fraglen = fraglen; + } else { + desc->fragno = 0; + desc->fraglen = 0; + } + return 0; +} + +int +gss_encrypt_xdr_buf(struct crypto_tfm *tfm, struct xdr_buf *buf, int offset, + struct page **pages) +{ + int ret; + struct encryptor_desc desc; + + BUG_ON((buf->len - offset) % crypto_tfm_alg_blocksize(tfm) != 0); + + memset(desc.iv, 0, sizeof(desc.iv)); + desc.tfm = tfm; + desc.pos = offset; + desc.outbuf = buf; + desc.pages = pages; + desc.fragno = 0; + desc.fraglen = 0; + + ret = process_xdr_buf(buf, offset, buf->len - offset, encryptor, &desc); + return ret; +} + +EXPORT_SYMBOL(gss_encrypt_xdr_buf); + +struct decryptor_desc { + u8 iv[8]; /* XXX hard-coded blocksize */ + struct crypto_tfm *tfm; + struct scatterlist frags[4]; + int fragno; + int fraglen; +}; + +static int +decryptor(struct scatterlist *sg, void *data) +{ + struct decryptor_desc *desc = data; + int thislen = desc->fraglen + sg->length; + int fraglen, ret; + + /* Worst case is 4 fragments: head, end of page 1, start + * of page 2, tail. Anything more is a bug. */ + BUG_ON(desc->fragno > 3); + desc->frags[desc->fragno] = *sg; + desc->fragno++; + desc->fraglen += sg->length; + + fraglen = thislen & 7; /* XXX hardcoded blocksize */ + thislen -= fraglen; + + if (thislen == 0) + return 0; + + ret = crypto_cipher_decrypt_iv(desc->tfm, desc->frags, desc->frags, + thislen, desc->iv); + if (ret) + return ret; + if (fraglen) { + desc->frags[0].page = sg->page; + desc->frags[0].offset = sg->offset + sg->length - fraglen; + desc->frags[0].length = fraglen; + desc->fragno = 1; + desc->fraglen = fraglen; + } else { + desc->fragno = 0; + desc->fraglen = 0; + } + return 0; +} + +int +gss_decrypt_xdr_buf(struct crypto_tfm *tfm, struct xdr_buf *buf, int offset) +{ + struct decryptor_desc desc; + + /* XXXJBF: */ + BUG_ON((buf->len - offset) % crypto_tfm_alg_blocksize(tfm) != 0); + + memset(desc.iv, 0, sizeof(desc.iv)); + desc.tfm = tfm; + desc.fragno = 0; + desc.fraglen = 0; + return process_xdr_buf(buf, offset, buf->len - offset, decryptor, &desc); +} + +EXPORT_SYMBOL(gss_decrypt_xdr_buf); diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c index 8b9066fdfda5..37a9ad97ccd4 100644 --- a/net/sunrpc/auth_gss/gss_krb5_mech.c +++ b/net/sunrpc/auth_gss/gss_krb5_mech.c @@ -226,6 +226,8 @@ static struct gss_api_ops gss_kerberos_ops = { .gss_import_sec_context = gss_import_sec_context_kerberos, .gss_get_mic = gss_get_mic_kerberos, .gss_verify_mic = gss_verify_mic_kerberos, + .gss_wrap = gss_wrap_kerberos, + .gss_unwrap = gss_unwrap_kerberos, .gss_delete_sec_context = gss_delete_sec_context_kerberos, }; @@ -240,6 +242,11 @@ static struct pf_desc gss_kerberos_pfs[] = { .service = RPC_GSS_SVC_INTEGRITY, .name = "krb5i", }, + [2] = { + .pseudoflavor = RPC_AUTH_GSS_KRB5P, + .service = RPC_GSS_SVC_PRIVACY, + .name = "krb5p", + }, }; static struct gss_api_mech gss_kerberos_mech = { diff --git a/net/sunrpc/auth_gss/gss_krb5_seal.c b/net/sunrpc/auth_gss/gss_krb5_seal.c index 2511834e6e52..fb852d9ab06f 100644 --- a/net/sunrpc/auth_gss/gss_krb5_seal.c +++ b/net/sunrpc/auth_gss/gss_krb5_seal.c @@ -116,8 +116,8 @@ krb5_make_token(struct krb5_ctx *ctx, int qop_req, *(u16 *)(krb5_hdr + 2) = htons(ctx->signalg); memset(krb5_hdr + 4, 0xff, 4); - if (make_checksum(checksum_type, krb5_hdr, 8, text, &md5cksum)) - goto out_err; + if (make_checksum(checksum_type, krb5_hdr, 8, text, 0, &md5cksum)) + goto out_err; switch (ctx->signalg) { case SGN_ALG_DES_MAC_MD5: diff --git a/net/sunrpc/auth_gss/gss_krb5_unseal.c b/net/sunrpc/auth_gss/gss_krb5_unseal.c index 19eba3df6607..c3d6d1bc100c 100644 --- a/net/sunrpc/auth_gss/gss_krb5_unseal.c +++ b/net/sunrpc/auth_gss/gss_krb5_unseal.c @@ -136,7 +136,7 @@ krb5_read_token(struct krb5_ctx *ctx, switch (signalg) { case SGN_ALG_DES_MAC_MD5: ret = make_checksum(checksum_type, ptr - 2, 8, - message_buffer, &md5cksum); + message_buffer, 0, &md5cksum); if (ret) goto out; diff --git a/net/sunrpc/auth_gss/gss_krb5_wrap.c b/net/sunrpc/auth_gss/gss_krb5_wrap.c new file mode 100644 index 000000000000..ddcde6e42b23 --- /dev/null +++ b/net/sunrpc/auth_gss/gss_krb5_wrap.c @@ -0,0 +1,370 @@ +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef RPC_DEBUG +# define RPCDBG_FACILITY RPCDBG_AUTH +#endif + +static inline int +gss_krb5_padding(int blocksize, int length) +{ + /* Most of the code is block-size independent but currently we + * use only 8: */ + BUG_ON(blocksize != 8); + return 8 - (length & 7); +} + +static inline void +gss_krb5_add_padding(struct xdr_buf *buf, int offset, int blocksize) +{ + int padding = gss_krb5_padding(blocksize, buf->len - offset); + char *p; + struct kvec *iov; + + if (buf->page_len || buf->tail[0].iov_len) + iov = &buf->tail[0]; + else + iov = &buf->head[0]; + p = iov->iov_base + iov->iov_len; + iov->iov_len += padding; + buf->len += padding; + memset(p, padding, padding); +} + +static inline int +gss_krb5_remove_padding(struct xdr_buf *buf, int blocksize) +{ + u8 *ptr; + u8 pad; + int len = buf->len; + + if (len <= buf->head[0].iov_len) { + pad = *(u8 *)(buf->head[0].iov_base + len - 1); + if (pad > buf->head[0].iov_len) + return -EINVAL; + buf->head[0].iov_len -= pad; + goto out; + } else + len -= buf->head[0].iov_len; + if (len <= buf->page_len) { + int last = (buf->page_base + len - 1) + >>PAGE_CACHE_SHIFT; + int offset = (buf->page_base + len - 1) + & (PAGE_CACHE_SIZE - 1); + ptr = kmap_atomic(buf->pages[last], KM_SKB_SUNRPC_DATA); + pad = *(ptr + offset); + kunmap_atomic(ptr, KM_SKB_SUNRPC_DATA); + goto out; + } else + len -= buf->page_len; + BUG_ON(len > buf->tail[0].iov_len); + pad = *(u8 *)(buf->tail[0].iov_base + len - 1); +out: + /* XXX: NOTE: we do not adjust the page lengths--they represent + * a range of data in the real filesystem page cache, and we need + * to know that range so the xdr code can properly place read data. + * However adjusting the head length, as we do above, is harmless. + * In the case of a request that fits into a single page, the server + * also uses length and head length together to determine the original + * start of the request to copy the request for deferal; so it's + * easier on the server if we adjust head and tail length in tandem. + * It's not really a problem that we don't fool with the page and + * tail lengths, though--at worst badly formed xdr might lead the + * server to attempt to parse the padding. + * XXX: Document all these weird requirements for gss mechanism + * wrap/unwrap functions. */ + if (pad > blocksize) + return -EINVAL; + if (buf->len > pad) + buf->len -= pad; + else + return -EINVAL; + return 0; +} + +static inline void +make_confounder(char *p, int blocksize) +{ + static u64 i = 0; + u64 *q = (u64 *)p; + + /* rfc1964 claims this should be "random". But all that's really + * necessary is that it be unique. And not even that is necessary in + * our case since our "gssapi" implementation exists only to support + * rpcsec_gss, so we know that the only buffers we will ever encrypt + * already begin with a unique sequence number. Just to hedge my bets + * I'll make a half-hearted attempt at something unique, but ensuring + * uniqueness would mean worrying about atomicity and rollover, and I + * don't care enough. */ + + BUG_ON(blocksize != 8); + *q = i++; +} + +/* Assumptions: the head and tail of inbuf are ours to play with. + * The pages, however, may be real pages in the page cache and we replace + * them with scratch pages from **pages before writing to them. */ +/* XXX: obviously the above should be documentation of wrap interface, + * and shouldn't be in this kerberos-specific file. */ + +/* XXX factor out common code with seal/unseal. */ + +u32 +gss_wrap_kerberos(struct gss_ctx *ctx, u32 qop, int offset, + struct xdr_buf *buf, struct page **pages) +{ + struct krb5_ctx *kctx = ctx->internal_ctx_id; + s32 checksum_type; + struct xdr_netobj md5cksum = {.len = 0, .data = NULL}; + int blocksize = 0, plainlen; + unsigned char *ptr, *krb5_hdr, *msg_start; + s32 now; + int headlen; + struct page **tmp_pages; + + dprintk("RPC: gss_wrap_kerberos\n"); + + now = get_seconds(); + + if (qop != 0) + goto out_err; + + switch (kctx->signalg) { + case SGN_ALG_DES_MAC_MD5: + checksum_type = CKSUMTYPE_RSA_MD5; + break; + default: + dprintk("RPC: gss_krb5_seal: kctx->signalg %d not" + " supported\n", kctx->signalg); + goto out_err; + } + if (kctx->sealalg != SEAL_ALG_NONE && kctx->sealalg != SEAL_ALG_DES) { + dprintk("RPC: gss_krb5_seal: kctx->sealalg %d not supported\n", + kctx->sealalg); + goto out_err; + } + + blocksize = crypto_tfm_alg_blocksize(kctx->enc); + gss_krb5_add_padding(buf, offset, blocksize); + BUG_ON((buf->len - offset) % blocksize); + plainlen = blocksize + buf->len - offset; + + headlen = g_token_size(&kctx->mech_used, 22 + plainlen) - + (buf->len - offset); + + ptr = buf->head[0].iov_base + offset; + /* shift data to make room for header. */ + /* XXX Would be cleverer to encrypt while copying. */ + /* XXX bounds checking, slack, etc. */ + memmove(ptr + headlen, ptr, buf->head[0].iov_len - offset); + buf->head[0].iov_len += headlen; + buf->len += headlen; + BUG_ON((buf->len - offset - headlen) % blocksize); + + g_make_token_header(&kctx->mech_used, 22 + plainlen, &ptr); + + + *ptr++ = (unsigned char) ((KG_TOK_WRAP_MSG>>8)&0xff); + *ptr++ = (unsigned char) (KG_TOK_WRAP_MSG&0xff); + + /* ptr now at byte 2 of header described in rfc 1964, section 1.2.1: */ + krb5_hdr = ptr - 2; + msg_start = krb5_hdr + 24; + /* XXXJBF: */ BUG_ON(buf->head[0].iov_base + offset + headlen != msg_start + blocksize); + + *(u16 *)(krb5_hdr + 2) = htons(kctx->signalg); + memset(krb5_hdr + 4, 0xff, 4); + *(u16 *)(krb5_hdr + 4) = htons(kctx->sealalg); + + make_confounder(msg_start, blocksize); + + /* XXXJBF: UGH!: */ + tmp_pages = buf->pages; + buf->pages = pages; + if (make_checksum(checksum_type, krb5_hdr, 8, buf, + offset + headlen - blocksize, &md5cksum)) + goto out_err; + buf->pages = tmp_pages; + + switch (kctx->signalg) { + case SGN_ALG_DES_MAC_MD5: + if (krb5_encrypt(kctx->seq, NULL, md5cksum.data, + md5cksum.data, md5cksum.len)) + goto out_err; + memcpy(krb5_hdr + 16, + md5cksum.data + md5cksum.len - KRB5_CKSUM_LENGTH, + KRB5_CKSUM_LENGTH); + + dprintk("RPC: make_seal_token: cksum data: \n"); + print_hexl((u32 *) (krb5_hdr + 16), KRB5_CKSUM_LENGTH, 0); + break; + default: + BUG(); + } + + kfree(md5cksum.data); + + /* XXX would probably be more efficient to compute checksum + * and encrypt at the same time: */ + if ((krb5_make_seq_num(kctx->seq, kctx->initiate ? 0 : 0xff, + kctx->seq_send, krb5_hdr + 16, krb5_hdr + 8))) + goto out_err; + + if (gss_encrypt_xdr_buf(kctx->enc, buf, offset + headlen - blocksize, + pages)) + goto out_err; + + kctx->seq_send++; + + return ((kctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE); +out_err: + if (md5cksum.data) kfree(md5cksum.data); + return GSS_S_FAILURE; +} + +u32 +gss_unwrap_kerberos(struct gss_ctx *ctx, u32 *qop, int offset, + struct xdr_buf *buf) +{ + struct krb5_ctx *kctx = ctx->internal_ctx_id; + int signalg; + int sealalg; + s32 checksum_type; + struct xdr_netobj md5cksum = {.len = 0, .data = NULL}; + s32 now; + int direction; + s32 seqnum; + unsigned char *ptr; + int bodysize; + u32 ret = GSS_S_DEFECTIVE_TOKEN; + void *data_start, *orig_start; + int data_len; + int blocksize; + + dprintk("RPC: gss_unwrap_kerberos\n"); + + ptr = (u8 *)buf->head[0].iov_base + offset; + if (g_verify_token_header(&kctx->mech_used, &bodysize, &ptr, + buf->len - offset)) + goto out; + + if ((*ptr++ != ((KG_TOK_WRAP_MSG>>8)&0xff)) || + (*ptr++ != (KG_TOK_WRAP_MSG &0xff)) ) + goto out; + + /* XXX sanity-check bodysize?? */ + + /* get the sign and seal algorithms */ + + signalg = ptr[0] + (ptr[1] << 8); + sealalg = ptr[2] + (ptr[3] << 8); + + /* Sanity checks */ + + if ((ptr[4] != 0xff) || (ptr[5] != 0xff)) + goto out; + + if (sealalg == 0xffff) + goto out; + + /* in the current spec, there is only one valid seal algorithm per + key type, so a simple comparison is ok */ + + if (sealalg != kctx->sealalg) + goto out; + + /* there are several mappings of seal algorithms to sign algorithms, + but few enough that we can try them all. */ + + if ((kctx->sealalg == SEAL_ALG_NONE && signalg > 1) || + (kctx->sealalg == SEAL_ALG_1 && signalg != SGN_ALG_3) || + (kctx->sealalg == SEAL_ALG_DES3KD && + signalg != SGN_ALG_HMAC_SHA1_DES3_KD)) + goto out; + + if (gss_decrypt_xdr_buf(kctx->enc, buf, + ptr + 22 - (unsigned char *)buf->head[0].iov_base)) + goto out; + + /* compute the checksum of the message */ + + /* initialize the the cksum */ + switch (signalg) { + case SGN_ALG_DES_MAC_MD5: + checksum_type = CKSUMTYPE_RSA_MD5; + break; + default: + ret = GSS_S_DEFECTIVE_TOKEN; + goto out; + } + + switch (signalg) { + case SGN_ALG_DES_MAC_MD5: + ret = make_checksum(checksum_type, ptr - 2, 8, buf, + ptr + 22 - (unsigned char *)buf->head[0].iov_base, &md5cksum); + if (ret) + goto out; + + ret = krb5_encrypt(kctx->seq, NULL, md5cksum.data, + md5cksum.data, md5cksum.len); + if (ret) + goto out; + + if (memcmp(md5cksum.data + 8, ptr + 14, 8)) { + ret = GSS_S_BAD_SIG; + goto out; + } + break; + default: + ret = GSS_S_DEFECTIVE_TOKEN; + goto out; + } + + /* it got through unscathed. Make sure the context is unexpired */ + + if (qop) + *qop = GSS_C_QOP_DEFAULT; + + now = get_seconds(); + + ret = GSS_S_CONTEXT_EXPIRED; + if (now > kctx->endtime) + goto out; + + /* do sequencing checks */ + + ret = GSS_S_BAD_SIG; + if ((ret = krb5_get_seq_num(kctx->seq, ptr + 14, ptr + 6, &direction, + &seqnum))) + goto out; + + if ((kctx->initiate && direction != 0xff) || + (!kctx->initiate && direction != 0)) + goto out; + + /* Copy the data back to the right position. XXX: Would probably be + * better to copy and encrypt at the same time. */ + + blocksize = crypto_tfm_alg_blocksize(kctx->enc); + data_start = ptr + 22 + blocksize; + orig_start = buf->head[0].iov_base + offset; + data_len = (buf->head[0].iov_base + buf->head[0].iov_len) - data_start; + memmove(orig_start, data_start, data_len); + buf->head[0].iov_len -= (data_start - orig_start); + buf->len -= (data_start - orig_start); + + ret = GSS_S_DEFECTIVE_TOKEN; + if (gss_krb5_remove_padding(buf, blocksize)) + goto out; + + ret = GSS_S_COMPLETE; +out: + if (md5cksum.data) kfree(md5cksum.data); + return ret; +} -- cgit v1.2.2 From 00fd6e14255fe7a249315746386d640bc4e9e758 Mon Sep 17 00:00:00 2001 From: "J. Bruce Fields" Date: Thu, 13 Oct 2005 16:55:18 -0400 Subject: RPCSEC_GSS remove all qop parameters Not only are the qop parameters that are passed around throughout the gssapi unused by any currently implemented mechanism, but there appears to be some doubt as to whether they will ever be used. Let's just kill them off for now. Signed-off-by: J. Bruce Fields Signed-off-by: Trond Myklebust --- net/sunrpc/auth_gss/auth_gss.c | 20 +++++++------------- net/sunrpc/auth_gss/gss_krb5_mech.c | 12 ++++-------- net/sunrpc/auth_gss/gss_krb5_seal.c | 5 +---- net/sunrpc/auth_gss/gss_krb5_unseal.c | 5 +---- net/sunrpc/auth_gss/gss_krb5_wrap.c | 11 ++--------- net/sunrpc/auth_gss/gss_mech_switch.c | 14 ++++---------- net/sunrpc/auth_gss/gss_spkm3_mech.c | 21 ++++++++------------- net/sunrpc/auth_gss/gss_spkm3_seal.c | 4 +--- net/sunrpc/auth_gss/gss_spkm3_unseal.c | 2 +- net/sunrpc/auth_gss/svcauth_gss.c | 9 ++++----- 10 files changed, 33 insertions(+), 70 deletions(-) (limited to 'net') diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c index 5e4872058ec7..f44f46f1d8e0 100644 --- a/net/sunrpc/auth_gss/auth_gss.c +++ b/net/sunrpc/auth_gss/auth_gss.c @@ -854,9 +854,7 @@ gss_marshal(struct rpc_task *task, u32 *p) *p++ = htonl(RPC_AUTH_GSS); mic.data = (u8 *)(p + 1); - maj_stat = gss_get_mic(ctx->gc_gss_ctx, - GSS_C_QOP_DEFAULT, - &verf_buf, &mic); + maj_stat = gss_get_mic(ctx->gc_gss_ctx, &verf_buf, &mic); if (maj_stat == GSS_S_CONTEXT_EXPIRED) { cred->cr_flags &= ~RPCAUTH_CRED_UPTODATE; } else if (maj_stat != 0) { @@ -888,7 +886,7 @@ gss_validate(struct rpc_task *task, u32 *p) { struct rpc_cred *cred = task->tk_msg.rpc_cred; struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred); - u32 seq, qop_state; + u32 seq; struct kvec iov; struct xdr_buf verf_buf; struct xdr_netobj mic; @@ -909,7 +907,7 @@ gss_validate(struct rpc_task *task, u32 *p) mic.data = (u8 *)p; mic.len = len; - maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &verf_buf, &mic, &qop_state); + maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &verf_buf, &mic); if (maj_stat == GSS_S_CONTEXT_EXPIRED) cred->cr_flags &= ~RPCAUTH_CRED_UPTODATE; if (maj_stat) @@ -961,8 +959,7 @@ gss_wrap_req_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx, p = iov->iov_base + iov->iov_len; mic.data = (u8 *)(p + 1); - maj_stat = gss_get_mic(ctx->gc_gss_ctx, - GSS_C_QOP_DEFAULT, &integ_buf, &mic); + maj_stat = gss_get_mic(ctx->gc_gss_ctx, &integ_buf, &mic); status = -EIO; /* XXX? */ if (maj_stat == GSS_S_CONTEXT_EXPIRED) cred->cr_flags &= ~RPCAUTH_CRED_UPTODATE; @@ -1057,8 +1054,7 @@ gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx, memcpy(tmp, snd_buf->tail[0].iov_base, snd_buf->tail[0].iov_len); snd_buf->tail[0].iov_base = tmp; } - maj_stat = gss_wrap(ctx->gc_gss_ctx, GSS_C_QOP_DEFAULT, offset, - snd_buf, inpages); + maj_stat = gss_wrap(ctx->gc_gss_ctx, offset, snd_buf, inpages); /* RPC_SLACK_SPACE should prevent this ever happening: */ BUG_ON(snd_buf->len > snd_buf->buflen); status = -EIO; @@ -1150,8 +1146,7 @@ gss_unwrap_resp_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx, if (xdr_buf_read_netobj(rcv_buf, &mic, mic_offset)) return status; - maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &integ_buf, - &mic, NULL); + maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &integ_buf, &mic); if (maj_stat == GSS_S_CONTEXT_EXPIRED) cred->cr_flags &= ~RPCAUTH_CRED_UPTODATE; if (maj_stat != GSS_S_COMPLETE) @@ -1176,8 +1171,7 @@ gss_unwrap_resp_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx, /* remove padding: */ rcv_buf->len = offset + opaque_len; - maj_stat = gss_unwrap(ctx->gc_gss_ctx, NULL, - offset, rcv_buf); + maj_stat = gss_unwrap(ctx->gc_gss_ctx, offset, rcv_buf); if (maj_stat == GSS_S_CONTEXT_EXPIRED) cred->cr_flags &= ~RPCAUTH_CRED_UPTODATE; if (maj_stat != GSS_S_COMPLETE) diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c index 37a9ad97ccd4..9ffac2c50b94 100644 --- a/net/sunrpc/auth_gss/gss_krb5_mech.c +++ b/net/sunrpc/auth_gss/gss_krb5_mech.c @@ -193,15 +193,12 @@ gss_delete_sec_context_kerberos(void *internal_ctx) { static u32 gss_verify_mic_kerberos(struct gss_ctx *ctx, struct xdr_buf *message, - struct xdr_netobj *mic_token, - u32 *qstate) { + struct xdr_netobj *mic_token) +{ u32 maj_stat = 0; - int qop_state; struct krb5_ctx *kctx = ctx->internal_ctx_id; - maj_stat = krb5_read_token(kctx, mic_token, message, &qop_state); - if (!maj_stat && qop_state) - *qstate = qop_state; + maj_stat = krb5_read_token(kctx, mic_token, message); dprintk("RPC: gss_verify_mic_kerberos returning %d\n", maj_stat); return maj_stat; @@ -209,13 +206,12 @@ gss_verify_mic_kerberos(struct gss_ctx *ctx, static u32 gss_get_mic_kerberos(struct gss_ctx *ctx, - u32 qop, struct xdr_buf *message, struct xdr_netobj *mic_token) { u32 err = 0; struct krb5_ctx *kctx = ctx->internal_ctx_id; - err = krb5_make_token(kctx, qop, message, mic_token); + err = krb5_make_token(kctx, message, mic_token); dprintk("RPC: gss_get_mic_kerberos returning %d\n",err); diff --git a/net/sunrpc/auth_gss/gss_krb5_seal.c b/net/sunrpc/auth_gss/gss_krb5_seal.c index fb852d9ab06f..15227c727c8b 100644 --- a/net/sunrpc/auth_gss/gss_krb5_seal.c +++ b/net/sunrpc/auth_gss/gss_krb5_seal.c @@ -71,7 +71,7 @@ #endif u32 -krb5_make_token(struct krb5_ctx *ctx, int qop_req, +krb5_make_token(struct krb5_ctx *ctx, struct xdr_buf *text, struct xdr_netobj *token) { s32 checksum_type; @@ -83,9 +83,6 @@ krb5_make_token(struct krb5_ctx *ctx, int qop_req, now = get_seconds(); - if (qop_req != 0) - goto out_err; - switch (ctx->signalg) { case SGN_ALG_DES_MAC_MD5: checksum_type = CKSUMTYPE_RSA_MD5; diff --git a/net/sunrpc/auth_gss/gss_krb5_unseal.c b/net/sunrpc/auth_gss/gss_krb5_unseal.c index c3d6d1bc100c..bcf978627a71 100644 --- a/net/sunrpc/auth_gss/gss_krb5_unseal.c +++ b/net/sunrpc/auth_gss/gss_krb5_unseal.c @@ -74,7 +74,7 @@ u32 krb5_read_token(struct krb5_ctx *ctx, struct xdr_netobj *read_token, - struct xdr_buf *message_buffer, int *qop_state) + struct xdr_buf *message_buffer) { int signalg; int sealalg; @@ -157,9 +157,6 @@ krb5_read_token(struct krb5_ctx *ctx, /* it got through unscathed. Make sure the context is unexpired */ - if (qop_state) - *qop_state = GSS_C_QOP_DEFAULT; - now = get_seconds(); ret = GSS_S_CONTEXT_EXPIRED; diff --git a/net/sunrpc/auth_gss/gss_krb5_wrap.c b/net/sunrpc/auth_gss/gss_krb5_wrap.c index ddcde6e42b23..af777cf9f251 100644 --- a/net/sunrpc/auth_gss/gss_krb5_wrap.c +++ b/net/sunrpc/auth_gss/gss_krb5_wrap.c @@ -116,7 +116,7 @@ make_confounder(char *p, int blocksize) /* XXX factor out common code with seal/unseal. */ u32 -gss_wrap_kerberos(struct gss_ctx *ctx, u32 qop, int offset, +gss_wrap_kerberos(struct gss_ctx *ctx, int offset, struct xdr_buf *buf, struct page **pages) { struct krb5_ctx *kctx = ctx->internal_ctx_id; @@ -132,9 +132,6 @@ gss_wrap_kerberos(struct gss_ctx *ctx, u32 qop, int offset, now = get_seconds(); - if (qop != 0) - goto out_err; - switch (kctx->signalg) { case SGN_ALG_DES_MAC_MD5: checksum_type = CKSUMTYPE_RSA_MD5; @@ -229,8 +226,7 @@ out_err: } u32 -gss_unwrap_kerberos(struct gss_ctx *ctx, u32 *qop, int offset, - struct xdr_buf *buf) +gss_unwrap_kerberos(struct gss_ctx *ctx, int offset, struct xdr_buf *buf) { struct krb5_ctx *kctx = ctx->internal_ctx_id; int signalg; @@ -328,9 +324,6 @@ gss_unwrap_kerberos(struct gss_ctx *ctx, u32 *qop, int offset, /* it got through unscathed. Make sure the context is unexpired */ - if (qop) - *qop = GSS_C_QOP_DEFAULT; - now = get_seconds(); ret = GSS_S_CONTEXT_EXPIRED; diff --git a/net/sunrpc/auth_gss/gss_mech_switch.c b/net/sunrpc/auth_gss/gss_mech_switch.c index 06d97cb3481a..b048bf672da2 100644 --- a/net/sunrpc/auth_gss/gss_mech_switch.c +++ b/net/sunrpc/auth_gss/gss_mech_switch.c @@ -250,13 +250,11 @@ gss_import_sec_context(const void *input_token, size_t bufsize, u32 gss_get_mic(struct gss_ctx *context_handle, - u32 qop, struct xdr_buf *message, struct xdr_netobj *mic_token) { return context_handle->mech_type->gm_ops ->gss_get_mic(context_handle, - qop, message, mic_token); } @@ -266,35 +264,31 @@ gss_get_mic(struct gss_ctx *context_handle, u32 gss_verify_mic(struct gss_ctx *context_handle, struct xdr_buf *message, - struct xdr_netobj *mic_token, - u32 *qstate) + struct xdr_netobj *mic_token) { return context_handle->mech_type->gm_ops ->gss_verify_mic(context_handle, message, - mic_token, - qstate); + mic_token); } u32 gss_wrap(struct gss_ctx *ctx_id, - u32 qop, int offset, struct xdr_buf *buf, struct page **inpages) { return ctx_id->mech_type->gm_ops - ->gss_wrap(ctx_id, qop, offset, buf, inpages); + ->gss_wrap(ctx_id, offset, buf, inpages); } u32 gss_unwrap(struct gss_ctx *ctx_id, - u32 *qop, int offset, struct xdr_buf *buf) { return ctx_id->mech_type->gm_ops - ->gss_unwrap(ctx_id, qop, offset, buf); + ->gss_unwrap(ctx_id, offset, buf); } diff --git a/net/sunrpc/auth_gss/gss_spkm3_mech.c b/net/sunrpc/auth_gss/gss_spkm3_mech.c index 6c97d61baa9b..39b3edc14694 100644 --- a/net/sunrpc/auth_gss/gss_spkm3_mech.c +++ b/net/sunrpc/auth_gss/gss_spkm3_mech.c @@ -224,18 +224,13 @@ gss_delete_sec_context_spkm3(void *internal_ctx) { static u32 gss_verify_mic_spkm3(struct gss_ctx *ctx, struct xdr_buf *signbuf, - struct xdr_netobj *checksum, - u32 *qstate) { + struct xdr_netobj *checksum) +{ u32 maj_stat = 0; - int qop_state = 0; struct spkm3_ctx *sctx = ctx->internal_ctx_id; dprintk("RPC: gss_verify_mic_spkm3 calling spkm3_read_token\n"); - maj_stat = spkm3_read_token(sctx, checksum, signbuf, &qop_state, - SPKM_MIC_TOK); - - if (!maj_stat && qop_state) - *qstate = qop_state; + maj_stat = spkm3_read_token(sctx, checksum, signbuf, SPKM_MIC_TOK); dprintk("RPC: gss_verify_mic_spkm3 returning %d\n", maj_stat); return maj_stat; @@ -243,15 +238,15 @@ gss_verify_mic_spkm3(struct gss_ctx *ctx, static u32 gss_get_mic_spkm3(struct gss_ctx *ctx, - u32 qop, struct xdr_buf *message_buffer, - struct xdr_netobj *message_token) { + struct xdr_netobj *message_token) +{ u32 err = 0; struct spkm3_ctx *sctx = ctx->internal_ctx_id; dprintk("RPC: gss_get_mic_spkm3\n"); - err = spkm3_make_token(sctx, qop, message_buffer, + err = spkm3_make_token(sctx, message_buffer, message_token, SPKM_MIC_TOK); return err; } @@ -264,8 +259,8 @@ static struct gss_api_ops gss_spkm3_ops = { }; static struct pf_desc gss_spkm3_pfs[] = { - {RPC_AUTH_GSS_SPKM, 0, RPC_GSS_SVC_NONE, "spkm3"}, - {RPC_AUTH_GSS_SPKMI, 0, RPC_GSS_SVC_INTEGRITY, "spkm3i"}, + {RPC_AUTH_GSS_SPKM, RPC_GSS_SVC_NONE, "spkm3"}, + {RPC_AUTH_GSS_SPKMI, RPC_GSS_SVC_INTEGRITY, "spkm3i"}, }; static struct gss_api_mech gss_spkm3_mech = { diff --git a/net/sunrpc/auth_gss/gss_spkm3_seal.c b/net/sunrpc/auth_gss/gss_spkm3_seal.c index 25339868d462..148201e929d0 100644 --- a/net/sunrpc/auth_gss/gss_spkm3_seal.c +++ b/net/sunrpc/auth_gss/gss_spkm3_seal.c @@ -51,7 +51,7 @@ */ u32 -spkm3_make_token(struct spkm3_ctx *ctx, int qop_req, +spkm3_make_token(struct spkm3_ctx *ctx, struct xdr_buf * text, struct xdr_netobj * token, int toktype) { @@ -68,8 +68,6 @@ spkm3_make_token(struct spkm3_ctx *ctx, int qop_req, dprintk("RPC: spkm3_make_token\n"); now = jiffies; - if (qop_req != 0) - goto out_err; if (ctx->ctx_id.len != 16) { dprintk("RPC: spkm3_make_token BAD ctx_id.len %d\n", diff --git a/net/sunrpc/auth_gss/gss_spkm3_unseal.c b/net/sunrpc/auth_gss/gss_spkm3_unseal.c index 65ce81bf0bc4..c3c0d9586103 100644 --- a/net/sunrpc/auth_gss/gss_spkm3_unseal.c +++ b/net/sunrpc/auth_gss/gss_spkm3_unseal.c @@ -52,7 +52,7 @@ u32 spkm3_read_token(struct spkm3_ctx *ctx, struct xdr_netobj *read_token, /* checksum */ struct xdr_buf *message_buffer, /* signbuf */ - int *qop_state, int toktype) + int toktype) { s32 code; struct xdr_netobj wire_cksum = {.len =0, .data = NULL}; diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c index e3308195374e..e4ada15ed856 100644 --- a/net/sunrpc/auth_gss/svcauth_gss.c +++ b/net/sunrpc/auth_gss/svcauth_gss.c @@ -566,8 +566,7 @@ gss_verify_header(struct svc_rqst *rqstp, struct rsc *rsci, if (rqstp->rq_deferred) /* skip verification of revisited request */ return SVC_OK; - if (gss_verify_mic(ctx_id, &rpchdr, &checksum, NULL) - != GSS_S_COMPLETE) { + if (gss_verify_mic(ctx_id, &rpchdr, &checksum) != GSS_S_COMPLETE) { *authp = rpcsec_gsserr_credproblem; return SVC_DENIED; } @@ -604,7 +603,7 @@ gss_write_verf(struct svc_rqst *rqstp, struct gss_ctx *ctx_id, u32 seq) xdr_buf_from_iov(&iov, &verf_data); p = rqstp->rq_res.head->iov_base + rqstp->rq_res.head->iov_len; mic.data = (u8 *)(p + 1); - maj_stat = gss_get_mic(ctx_id, 0, &verf_data, &mic); + maj_stat = gss_get_mic(ctx_id, &verf_data, &mic); if (maj_stat != GSS_S_COMPLETE) return -1; *p++ = htonl(mic.len); @@ -710,7 +709,7 @@ unwrap_integ_data(struct xdr_buf *buf, u32 seq, struct gss_ctx *ctx) goto out; if (read_bytes_from_xdr_buf(buf, integ_len + 4, mic.data, mic.len)) goto out; - maj_stat = gss_verify_mic(ctx, &integ_buf, &mic, NULL); + maj_stat = gss_verify_mic(ctx, &integ_buf, &mic); if (maj_stat != GSS_S_COMPLETE) goto out; if (ntohl(svc_getu32(&buf->head[0])) != seq) @@ -1012,7 +1011,7 @@ svcauth_gss_release(struct svc_rqst *rqstp) resv = &resbuf->tail[0]; } mic.data = (u8 *)resv->iov_base + resv->iov_len + 4; - if (gss_get_mic(gsd->rsci->mechctx, 0, &integ_buf, &mic)) + if (gss_get_mic(gsd->rsci->mechctx, &integ_buf, &mic)) goto out_err; svc_putu32(resv, htonl(mic.len)); memset(mic.data + mic.len, 0, -- cgit v1.2.2 From a0857d03b21fa54653c9d2fe7a315381176015b4 Mon Sep 17 00:00:00 2001 From: "J. Bruce Fields" Date: Thu, 13 Oct 2005 16:55:23 -0400 Subject: RPCSEC_GSS: krb5 cleanup Remove some senseless wrappers. Signed-off-by: J. Bruce Fields Signed-off-by: Trond Myklebust --- net/sunrpc/auth_gss/gss_krb5_mech.c | 28 ---------------------------- net/sunrpc/auth_gss/gss_krb5_seal.c | 5 +++-- net/sunrpc/auth_gss/gss_krb5_unseal.c | 6 +++--- 3 files changed, 6 insertions(+), 33 deletions(-) (limited to 'net') diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c index 9ffac2c50b94..5f1f806a0b11 100644 --- a/net/sunrpc/auth_gss/gss_krb5_mech.c +++ b/net/sunrpc/auth_gss/gss_krb5_mech.c @@ -190,34 +190,6 @@ gss_delete_sec_context_kerberos(void *internal_ctx) { kfree(kctx); } -static u32 -gss_verify_mic_kerberos(struct gss_ctx *ctx, - struct xdr_buf *message, - struct xdr_netobj *mic_token) -{ - u32 maj_stat = 0; - struct krb5_ctx *kctx = ctx->internal_ctx_id; - - maj_stat = krb5_read_token(kctx, mic_token, message); - - dprintk("RPC: gss_verify_mic_kerberos returning %d\n", maj_stat); - return maj_stat; -} - -static u32 -gss_get_mic_kerberos(struct gss_ctx *ctx, - struct xdr_buf *message, - struct xdr_netobj *mic_token) { - u32 err = 0; - struct krb5_ctx *kctx = ctx->internal_ctx_id; - - err = krb5_make_token(kctx, message, mic_token); - - dprintk("RPC: gss_get_mic_kerberos returning %d\n",err); - - return err; -} - static struct gss_api_ops gss_kerberos_ops = { .gss_import_sec_context = gss_import_sec_context_kerberos, .gss_get_mic = gss_get_mic_kerberos, diff --git a/net/sunrpc/auth_gss/gss_krb5_seal.c b/net/sunrpc/auth_gss/gss_krb5_seal.c index 15227c727c8b..13f8ae979454 100644 --- a/net/sunrpc/auth_gss/gss_krb5_seal.c +++ b/net/sunrpc/auth_gss/gss_krb5_seal.c @@ -71,9 +71,10 @@ #endif u32 -krb5_make_token(struct krb5_ctx *ctx, - struct xdr_buf *text, struct xdr_netobj *token) +gss_get_mic_kerberos(struct gss_ctx *gss_ctx, struct xdr_buf *text, + struct xdr_netobj *token) { + struct krb5_ctx *ctx = gss_ctx->internal_ctx_id; s32 checksum_type; struct xdr_netobj md5cksum = {.len = 0, .data = NULL}; unsigned char *ptr, *krb5_hdr, *msg_start; diff --git a/net/sunrpc/auth_gss/gss_krb5_unseal.c b/net/sunrpc/auth_gss/gss_krb5_unseal.c index bcf978627a71..2030475d98ed 100644 --- a/net/sunrpc/auth_gss/gss_krb5_unseal.c +++ b/net/sunrpc/auth_gss/gss_krb5_unseal.c @@ -72,10 +72,10 @@ * supposedly taken over. */ u32 -krb5_read_token(struct krb5_ctx *ctx, - struct xdr_netobj *read_token, - struct xdr_buf *message_buffer) +gss_verify_mic_kerberos(struct gss_ctx *gss_ctx, + struct xdr_buf *message_buffer, struct xdr_netobj *read_token) { + struct krb5_ctx *ctx = gss_ctx->internal_ctx_id; int signalg; int sealalg; s32 checksum_type; -- cgit v1.2.2 From 5b74eda78db410b979b7d450221c971fdebf5d29 Mon Sep 17 00:00:00 2001 From: Hong Liu Date: Wed, 19 Oct 2005 16:31:34 -0500 Subject: Fixed problem with not being able to decrypt/encrypt broadcast packets. Signed-off-by: James Ketrenos --- net/ieee80211/ieee80211_rx.c | 3 ++- net/ieee80211/ieee80211_tx.c | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) (limited to 'net') diff --git a/net/ieee80211/ieee80211_rx.c b/net/ieee80211/ieee80211_rx.c index 6b005cb0caa0..ce694cf5c160 100644 --- a/net/ieee80211/ieee80211_rx.c +++ b/net/ieee80211/ieee80211_rx.c @@ -409,7 +409,8 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb, return 1; } - if (is_multicast_ether_addr(hdr->addr1) ? ieee->host_mc_decrypt : + if ((is_multicast_ether_addr(hdr->addr1) || + is_broadcast_ether_addr(hdr->addr2)) ? ieee->host_mc_decrypt : ieee->host_decrypt) { int idx = 0; if (skb->len >= hdrlen + 3) diff --git a/net/ieee80211/ieee80211_tx.c b/net/ieee80211/ieee80211_tx.c index 42c50619aa8e..2955b7aa5a38 100644 --- a/net/ieee80211/ieee80211_tx.c +++ b/net/ieee80211/ieee80211_tx.c @@ -338,7 +338,8 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev) if (host_encrypt || ieee->host_open_frag) { /* Determine fragmentation size based on destination (multicast * and broadcast are not fragmented) */ - if (is_multicast_ether_addr(dest)) + if (is_multicast_ether_addr(dest) || + is_broadcast_ether_addr(dest)) frag_size = MAX_FRAG_THRESHOLD; else frag_size = ieee->fts; -- cgit v1.2.2 From f0f15ab5542f73d07e35eeee713df289599427b2 Mon Sep 17 00:00:00 2001 From: Hong Liu Date: Thu, 20 Oct 2005 11:06:36 -0500 Subject: Fixed oops if an uninitialized key is used for encryption. Without this patch, if you try and use a key that has not been configured, for example: % iwconfig eth1 key deadbeef00 [2] without having configured key [1], then the active key will still be [1], but privacy will now be enabled. Transmission of a packet in this situation will result in a kernel oops. Signed-off-by: James Ketrenos --- net/ieee80211/ieee80211_tx.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) (limited to 'net') diff --git a/net/ieee80211/ieee80211_tx.c b/net/ieee80211/ieee80211_tx.c index 2955b7aa5a38..f4f2a33973a7 100644 --- a/net/ieee80211/ieee80211_tx.c +++ b/net/ieee80211/ieee80211_tx.c @@ -157,11 +157,14 @@ static inline int ieee80211_encrypt_fragment(struct ieee80211_device *ieee, struct ieee80211_crypt_data *crypt = ieee->crypt[ieee->tx_keyidx]; int res; + if (crypt == NULL) + return -1; + /* To encrypt, frame format is: * IV (4 bytes), clear payload (including SNAP), ICV (4 bytes) */ atomic_inc(&crypt->refcnt); res = 0; - if (crypt->ops->encrypt_mpdu) + if (crypt->ops && crypt->ops->encrypt_mpdu) res = crypt->ops->encrypt_mpdu(frag, hdr_len, crypt->priv); atomic_dec(&crypt->refcnt); @@ -264,9 +267,9 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev) encrypt = !(ether_type == ETH_P_PAE && ieee->ieee802_1x) && ieee->sec.encrypt; - host_encrypt = ieee->host_encrypt && encrypt; - host_encrypt_msdu = ieee->host_encrypt_msdu && encrypt; - host_build_iv = ieee->host_build_iv && encrypt; + host_encrypt = ieee->host_encrypt && encrypt && crypt; + host_encrypt_msdu = ieee->host_encrypt_msdu && encrypt && crypt; + host_build_iv = ieee->host_build_iv && encrypt && crypt; if (!encrypt && ieee->ieee802_1x && ieee->drop_unencrypted && ether_type != ETH_P_PAE) { -- cgit v1.2.2 From d3f7bf4fa9626d371f26cd310477202628a8126a Mon Sep 17 00:00:00 2001 From: Michael Buesch Date: Fri, 21 Oct 2005 12:39:52 -0500 Subject: ieee80211 subsystem: * Use GFP mask on TX skb allocation. * Use the tx_headroom and reserve requested space. Signed-off-by: Michael Buesch Signed-off-by: James Ketrenos --- net/ieee80211/ieee80211_tx.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) (limited to 'net') diff --git a/net/ieee80211/ieee80211_tx.c b/net/ieee80211/ieee80211_tx.c index f4f2a33973a7..fb4509032402 100644 --- a/net/ieee80211/ieee80211_tx.c +++ b/net/ieee80211/ieee80211_tx.c @@ -190,7 +190,7 @@ void ieee80211_txb_free(struct ieee80211_txb *txb) } static struct ieee80211_txb *ieee80211_alloc_txb(int nr_frags, int txb_size, - gfp_t gfp_mask) + int headroom, gfp_t gfp_mask) { struct ieee80211_txb *txb; int i; @@ -204,11 +204,13 @@ static struct ieee80211_txb *ieee80211_alloc_txb(int nr_frags, int txb_size, txb->frag_size = txb_size; for (i = 0; i < nr_frags; i++) { - txb->fragments[i] = dev_alloc_skb(txb_size); + txb->fragments[i] = __dev_alloc_skb(txb_size + headroom, + gfp_mask); if (unlikely(!txb->fragments[i])) { i--; break; } + skb_reserve(txb->fragments[i], headroom); } if (unlikely(i != nr_frags)) { while (i >= 0) @@ -384,7 +386,8 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev) /* When we allocate the TXB we allocate enough space for the reserve * and full fragment bytes (bytes_per_frag doesn't include prefix, * postfix, header, FCS, etc.) */ - txb = ieee80211_alloc_txb(nr_frags, frag_size, GFP_ATOMIC); + txb = ieee80211_alloc_txb(nr_frags, frag_size, + ieee->tx_headroom, GFP_ATOMIC); if (unlikely(!txb)) { printk(KERN_WARNING "%s: Could not allocate TXB\n", ieee->dev->name); -- cgit v1.2.2 From 900e0143a575406146ac531fcb91790f166ce52f Mon Sep 17 00:00:00 2001 From: Patrick Caulfield Date: Tue, 11 Oct 2005 08:22:33 +0100 Subject: [DECNET]: Remove some redundant ifdeffed code Signed-off-by: Patrick Caulfield Signed-off-by: Steven Whitehouse Signed-off-by: David S. Miller Signed-off-by: Arnaldo Carvalho de Melo --- net/decnet/af_decnet.c | 13 ------------- 1 file changed, 13 deletions(-) (limited to 'net') diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c index 1186dc44cdff..3f25cadccddd 100644 --- a/net/decnet/af_decnet.c +++ b/net/decnet/af_decnet.c @@ -719,22 +719,9 @@ static int dn_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) if (saddr->sdn_flags & ~SDF_WILD) return -EINVAL; -#if 1 if (!capable(CAP_NET_BIND_SERVICE) && (saddr->sdn_objnum || (saddr->sdn_flags & SDF_WILD))) return -EACCES; -#else - /* - * Maybe put the default actions in the default security ops for - * dn_prot_sock ? Would be nice if the capable call would go there - * too. - */ - if (security_dn_prot_sock(saddr) && - !capable(CAP_NET_BIND_SERVICE) || - saddr->sdn_objnum || (saddr->sdn_flags & SDF_WILD)) - return -EACCES; -#endif - if (!(saddr->sdn_flags & SDF_WILD)) { if (dn_ntohs(saddr->sdn_nodeaddrl)) { -- cgit v1.2.2 From 670c02c2bfd2c8a305a90f5285409a7b0a8fd630 Mon Sep 17 00:00:00 2001 From: John Hawkes Date: Thu, 13 Oct 2005 09:30:31 -0700 Subject: [NET]: Wider use of for_each_*cpu() In 'net' change the explicit use of for-loops and NR_CPUS into the general for_each_cpu() or for_each_online_cpu() constructs, as appropriate. This widens the scope of potential future optimizations of the general constructs, as well as takes advantage of the existing optimizations of first_cpu() and next_cpu(), which is advantageous when the true CPU count is much smaller than NR_CPUS. Signed-off-by: John Hawkes Signed-off-by: David S. Miller Signed-off-by: Arnaldo Carvalho de Melo --- net/core/neighbour.c | 5 +---- net/core/pktgen.c | 5 +---- net/ipv4/icmp.c | 5 +---- net/ipv4/proc.c | 4 +--- net/ipv6/icmp.c | 9 ++------- net/ipv6/proc.c | 4 +--- net/sctp/proc.c | 4 +--- 7 files changed, 8 insertions(+), 28 deletions(-) (limited to 'net') diff --git a/net/core/neighbour.c b/net/core/neighbour.c index 1dcf7fa1f0fe..e68700f950a5 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -1625,12 +1625,9 @@ static int neightbl_fill_info(struct neigh_table *tbl, struct sk_buff *skb, memset(&ndst, 0, sizeof(ndst)); - for (cpu = 0; cpu < NR_CPUS; cpu++) { + for_each_cpu(cpu) { struct neigh_statistics *st; - if (!cpu_possible(cpu)) - continue; - st = per_cpu_ptr(tbl->stats, cpu); ndst.ndts_allocs += st->allocs; ndst.ndts_destroys += st->destroys; diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 5f043d346694..00116d8b3b28 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c @@ -3065,12 +3065,9 @@ static int __init pg_init(void) /* Register us to receive netdevice events */ register_netdevice_notifier(&pktgen_notifier_block); - for (cpu = 0; cpu < NR_CPUS ; cpu++) { + for_each_online_cpu(cpu) { char buf[30]; - if (!cpu_online(cpu)) - continue; - sprintf(buf, "kpktgend_%i", cpu); pktgen_create_thread(buf, cpu); } diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index 90dca711ac9f..175e093ec564 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c @@ -1108,12 +1108,9 @@ void __init icmp_init(struct net_proto_family *ops) struct inet_sock *inet; int i; - for (i = 0; i < NR_CPUS; i++) { + for_each_cpu(i) { int err; - if (!cpu_possible(i)) - continue; - err = sock_create_kern(PF_INET, SOCK_RAW, IPPROTO_ICMP, &per_cpu(__icmp_socket, i)); diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c index f7943ba1f43c..a65e508fbd40 100644 --- a/net/ipv4/proc.c +++ b/net/ipv4/proc.c @@ -90,9 +90,7 @@ fold_field(void *mib[], int offt) unsigned long res = 0; int i; - for (i = 0; i < NR_CPUS; i++) { - if (!cpu_possible(i)) - continue; + for_each_cpu(i) { res += *(((unsigned long *) per_cpu_ptr(mib[0], i)) + offt); res += *(((unsigned long *) per_cpu_ptr(mib[1], i)) + offt); } diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index b7185fb3377c..23e540365a14 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c @@ -700,10 +700,7 @@ int __init icmpv6_init(struct net_proto_family *ops) struct sock *sk; int err, i, j; - for (i = 0; i < NR_CPUS; i++) { - if (!cpu_possible(i)) - continue; - + for_each_cpu(i) { err = sock_create_kern(PF_INET6, SOCK_RAW, IPPROTO_ICMPV6, &per_cpu(__icmpv6_socket, i)); if (err < 0) { @@ -749,9 +746,7 @@ void icmpv6_cleanup(void) { int i; - for (i = 0; i < NR_CPUS; i++) { - if (!cpu_possible(i)) - continue; + for_each_cpu(i) { sock_release(per_cpu(__icmpv6_socket, i)); } inet6_del_protocol(&icmpv6_protocol, IPPROTO_ICMPV6); diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c index 334a5967831e..50a13e75d70e 100644 --- a/net/ipv6/proc.c +++ b/net/ipv6/proc.c @@ -140,9 +140,7 @@ fold_field(void *mib[], int offt) unsigned long res = 0; int i; - for (i = 0; i < NR_CPUS; i++) { - if (!cpu_possible(i)) - continue; + for_each_cpu(i) { res += *(((unsigned long *)per_cpu_ptr(mib[0], i)) + offt); res += *(((unsigned long *)per_cpu_ptr(mib[1], i)) + offt); } diff --git a/net/sctp/proc.c b/net/sctp/proc.c index b74f7772b576..6e4dc28874d7 100644 --- a/net/sctp/proc.c +++ b/net/sctp/proc.c @@ -69,9 +69,7 @@ fold_field(void *mib[], int nr) unsigned long res = 0; int i; - for (i = 0; i < NR_CPUS; i++) { - if (!cpu_possible(i)) - continue; + for_each_cpu(i) { res += *((unsigned long *) (((void *) per_cpu_ptr(mib[0], i)) + sizeof (unsigned long) * nr)); -- cgit v1.2.2 From b7c8921bf1a8a9c1907b1eeb029d3f167be226f3 Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Fri, 14 Oct 2005 15:26:34 -0700 Subject: [PKTGEN]: Sleeping function called under lock pktgen is calling kmalloc GFP_KERNEL and vmalloc with lock held. The simplest fix is to turn the lock into a semaphore, since the thread lock is only used for admin control from user context. Signed-off-by: Stephen Hemminger Signed-off-by: Robert Olsson Signed-off-by: David S. Miller Signed-off-by: Arnaldo Carvalho de Melo --- net/core/pktgen.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'net') diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 00116d8b3b28..8a90bf79261a 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c @@ -177,8 +177,8 @@ #define T_REMDEV (1<<3) /* Remove all devs */ /* Locks */ -#define thread_lock() spin_lock(&_thread_lock) -#define thread_unlock() spin_unlock(&_thread_lock) +#define thread_lock() down(&pktgen_sem) +#define thread_unlock() up(&pktgen_sem) /* If lock -- can be removed after some work */ #define if_lock(t) spin_lock(&(t->if_lock)); @@ -503,7 +503,7 @@ static int pg_delay_d = 0; static int pg_clone_skb_d = 0; static int debug = 0; -static DEFINE_SPINLOCK(_thread_lock); +static DECLARE_MUTEX(pktgen_sem); static struct pktgen_thread *pktgen_threads = NULL; static char module_fname[128]; -- cgit v1.2.2 From 2845b63b504b051a9cb4d78bed8b3594451a1f6f Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Fri, 14 Oct 2005 15:29:48 -0700 Subject: [PKTGEN]: Use kzalloc These are cleanup patches for pktgen that can go in 2.6.15 Can use kzalloc in a couple of places. Signed-off-by: Stephen Hemminger Signed-off-by: Robert Olsson Signed-off-by: David S. Miller Signed-off-by: Arnaldo Carvalho de Melo --- net/core/pktgen.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) (limited to 'net') diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 8a90bf79261a..b597ef1771f4 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c @@ -2869,12 +2869,10 @@ static int pktgen_add_device(struct pktgen_thread *t, const char* ifname) if( (pkt_dev = __pktgen_NN_threads(ifname, FIND)) == NULL) { - pkt_dev = kmalloc(sizeof(struct pktgen_dev), GFP_KERNEL); + pkt_dev = kzalloc(sizeof(struct pktgen_dev), GFP_KERNEL); if (!pkt_dev) return -ENOMEM; - memset(pkt_dev, 0, sizeof(struct pktgen_dev)); - pkt_dev->flows = vmalloc(MAX_CFLOWS*sizeof(struct flow_state)); if (pkt_dev->flows == NULL) { kfree(pkt_dev); @@ -2958,13 +2956,12 @@ static int pktgen_create_thread(const char* name, int cpu) return -EINVAL; } - t = (struct pktgen_thread*)(kmalloc(sizeof(struct pktgen_thread), GFP_KERNEL)); + t = kzalloc(sizeof(struct pktgen_thread), GFP_KERNEL); if (!t) { printk("pktgen: ERROR: out of memory, can't create new thread.\n"); return -ENOMEM; } - memset(t, 0, sizeof(struct pktgen_thread)); strcpy(t->name, name); spin_lock_init(&t->if_lock); t->cpu = cpu; -- cgit v1.2.2 From b4099fab75d5e3eae8d207c0d7159e2f3348686e Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Fri, 14 Oct 2005 15:32:22 -0700 Subject: [PKTGEN]: Spelling and white space Fix some cosmetic issues. Indentation, spelling errors, and some whitespace. Signed-off-by: Stephen Hemminger Signed-off-by: Robert Olsson Signed-off-by: David S. Miller Signed-off-by: Arnaldo Carvalho de Melo --- net/core/pktgen.c | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) (limited to 'net') diff --git a/net/core/pktgen.c b/net/core/pktgen.c index b597ef1771f4..ad053c8cb7fc 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c @@ -75,7 +75,7 @@ * By design there should only be *one* "controlling" process. In practice * multiple write accesses gives unpredictable result. Understood by "write" * to /proc gives result code thats should be read be the "writer". - * For pratical use this should be no problem. + * For practical use this should be no problem. * * Note when adding devices to a specific CPU there good idea to also assign * /proc/irq/XX/smp_affinity so TX-interrupts gets bound to the same CPU. @@ -96,7 +96,7 @@ * New xmit() return, do_div and misc clean up by Stephen Hemminger * 040923 * - * Rany Dunlap fixed u64 printk compiler waring + * Randy Dunlap fixed u64 printk compiler waring * * Remove FCS from BW calculation. Lennert Buytenhek * New time handling. Lennert Buytenhek 041213 @@ -244,7 +244,7 @@ struct pktgen_dev { __u32 seq_num; int clone_skb; /* Use multiple SKBs during packet gen. If this number - * is greater than 1, then that many coppies of the same + * is greater than 1, then that many copies of the same * packet will be sent before a new packet is allocated. * For instance, if you want to send 1024 identical packets * before creating a new packet, set clone_skb to 1024. @@ -396,7 +396,7 @@ static inline s64 divremdi3(s64 x, s64 y, int type) /* End of hacks to deal with 64-bit math on x86 */ -/** Convert to miliseconds */ +/** Convert to milliseconds */ static inline __u64 tv_to_ms(const struct timeval* tv) { __u64 ms = tv->tv_usec / 1000; @@ -425,7 +425,7 @@ static inline __u64 pg_div64(__u64 n, __u64 base) { __u64 tmp = n; /* - * How do we know if the architectrure we are running on + * How do we know if the architecture we are running on * supports division with 64 bit base? * */ @@ -544,12 +544,12 @@ static ssize_t proc_pgctrl_read(struct file* file, char __user * buf, len = strlen(data); if(len > count) { - len =-EFAULT; + len = -EFAULT; goto out; } if (copy_to_user(buf, data, len)) { - len =-EFAULT; + len = -EFAULT; goto out; } @@ -578,7 +578,7 @@ static ssize_t proc_pgctrl_write(struct file* file,const char __user * buf, goto out; } if (copy_from_user(data, buf, count)) { - err =-EFAULT; + err = -EFAULT; goto out_free; } data[count-1] = 0; /* Make string */ @@ -1702,7 +1702,7 @@ static void spin(struct pktgen_dev *pkt_dev, __u64 spin_until_us) start = now = getCurUs(); printk(KERN_INFO "sleeping for %d\n", (int)(spin_until_us - now)); while (now < spin_until_us) { - /* TODO: optimise sleeping behavior */ + /* TODO: optimize sleeping behavior */ if (spin_until_us - now > jiffies_to_usecs(1)+1) schedule_timeout_interruptible(1); else if (spin_until_us - now > 100) { @@ -2361,7 +2361,7 @@ static void pktgen_stop_all_threads_ifs(void) pktgen_stop(t); t = t->next; } - thread_unlock(); + thread_unlock(); } static int thread_is_running(struct pktgen_thread *t ) @@ -2555,7 +2555,7 @@ static void pktgen_rem_thread(struct pktgen_thread *t) if (strlen(t->fname)) remove_proc_entry(t->fname, NULL); - thread_lock(); + thread_lock(); if (tmp == t) pktgen_threads = tmp->next; @@ -2929,7 +2929,7 @@ static struct pktgen_thread *pktgen_find_thread(const char* name) { struct pktgen_thread *t = NULL; - thread_lock(); + thread_lock(); t = pktgen_threads; while (t) { -- cgit v1.2.2 From d50a6b56f0f239cf061630c85add121dc3555339 Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Fri, 14 Oct 2005 15:42:33 -0700 Subject: [PKTGEN]: proc interface revision The code to handle the /proc interface can be cleaned up in several places: * use seq_file for read * don't need to remember all the filenames separately * use for_online_cpu's * don't vmalloc a buffer for small command from user. Committer note: This patch clashed with John Hawkes's "[NET]: Wider use of for_each_*cpu()", so I fixed it up manually. Signed-off-by: Stephen Hemminger Signed-off-by: Robert Olsson Signed-off-by: David S. Miller Signed-off-by: Arnaldo Carvalho de Melo --- net/core/pktgen.c | 472 +++++++++++++++++++++++++----------------------------- 1 file changed, 215 insertions(+), 257 deletions(-) (limited to 'net') diff --git a/net/core/pktgen.c b/net/core/pktgen.c index ad053c8cb7fc..7fc3e9e28c34 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c @@ -137,6 +137,7 @@ #include #include #include +#include #include #include #include @@ -151,7 +152,7 @@ #include -#define VERSION "pktgen v2.62: Packet Generator for packet performance testing.\n" +#define VERSION "pktgen v2.63: Packet Generator for packet performance testing.\n" /* #define PG_DEBUG(a) a */ #define PG_DEBUG(a) @@ -186,7 +187,9 @@ /* Used to help with determining the pkts on receive */ #define PKTGEN_MAGIC 0xbe9be955 -#define PG_PROC_DIR "net/pktgen" +#define PG_PROC_DIR "pktgen" +#define PGCTRL "pgctrl" +static struct proc_dir_entry *pg_proc_dir = NULL; #define MAX_CFLOWS 65536 @@ -202,11 +205,8 @@ struct pktgen_dev { * Try to keep frequent/infrequent used vars. separated. */ - char ifname[32]; - struct proc_dir_entry *proc_ent; + char ifname[IFNAMSIZ]; char result[512]; - /* proc file names */ - char fname[80]; struct pktgen_thread* pg_thread; /* the owner */ struct pktgen_dev *next; /* Used for chaining in the thread's run-queue */ @@ -330,8 +330,6 @@ struct pktgen_thread { struct pktgen_dev *if_list; /* All device here */ struct pktgen_thread* next; char name[32]; - char fname[128]; /* name of proc file */ - struct proc_dir_entry *proc_ent; char result[512]; u32 max_before_softirq; /* We'll call do_softirq to prevent starvation. */ @@ -473,16 +471,6 @@ static inline __u64 tv_diff(const struct timeval* a, const struct timeval* b) static char version[] __initdata = VERSION; -static ssize_t proc_pgctrl_read(struct file* file, char __user * buf, size_t count, loff_t *ppos); -static ssize_t proc_pgctrl_write(struct file* file, const char __user * buf, size_t count, loff_t *ppos); -static int proc_if_read(char *buf , char **start, off_t offset, int len, int *eof, void *data); - -static int proc_thread_read(char *buf , char **start, off_t offset, int len, int *eof, void *data); -static int proc_if_write(struct file *file, const char __user *user_buffer, unsigned long count, void *data); -static int proc_thread_write(struct file *file, const char __user *user_buffer, unsigned long count, void *data); -static int create_proc_dir(void); -static int remove_proc_dir(void); - static int pktgen_remove_device(struct pktgen_thread* t, struct pktgen_dev *i); static int pktgen_add_device(struct pktgen_thread* t, const char* ifname); static struct pktgen_thread* pktgen_find_thread(const char* name); @@ -506,80 +494,38 @@ static int debug = 0; static DECLARE_MUTEX(pktgen_sem); static struct pktgen_thread *pktgen_threads = NULL; -static char module_fname[128]; -static struct proc_dir_entry *module_proc_ent = NULL; - static struct notifier_block pktgen_notifier_block = { .notifier_call = pktgen_device_event, }; -static struct file_operations pktgen_fops = { - .read = proc_pgctrl_read, - .write = proc_pgctrl_write, - /* .ioctl = pktgen_ioctl, later maybe */ -}; - /* * /proc handling functions * */ -static struct proc_dir_entry *pg_proc_dir = NULL; -static int proc_pgctrl_read_eof=0; - -static ssize_t proc_pgctrl_read(struct file* file, char __user * buf, - size_t count, loff_t *ppos) +static int pgctrl_show(struct seq_file *seq, void *v) { - char data[200]; - int len = 0; - - if(proc_pgctrl_read_eof) { - proc_pgctrl_read_eof=0; - len = 0; - goto out; - } - - sprintf(data, "%s", VERSION); - - len = strlen(data); - - if(len > count) { - len = -EFAULT; - goto out; - } - - if (copy_to_user(buf, data, len)) { - len = -EFAULT; - goto out; - } - - *ppos += len; - proc_pgctrl_read_eof=1; /* EOF next call */ - - out: - return len; + seq_puts(seq, VERSION); + return 0; } -static ssize_t proc_pgctrl_write(struct file* file,const char __user * buf, - size_t count, loff_t *ppos) +static ssize_t pgctrl_write(struct file* file,const char __user * buf, + size_t count, loff_t *ppos) { - char *data = NULL; int err = 0; + char data[128]; if (!capable(CAP_NET_ADMIN)){ err = -EPERM; goto out; } - data = (void*)vmalloc ((unsigned int)count); + if (count > sizeof(data)) + count = sizeof(data); - if(!data) { - err = -ENOMEM; - goto out; - } if (copy_from_user(data, buf, count)) { err = -EFAULT; - goto out_free; + goto out; } data[count-1] = 0; /* Make string */ @@ -594,31 +540,40 @@ static ssize_t proc_pgctrl_write(struct file* file,const char __user * buf, err = count; - out_free: - vfree (data); out: return err; } -static int proc_if_read(char *buf , char **start, off_t offset, - int len, int *eof, void *data) +static int pgctrl_open(struct inode *inode, struct file *file) +{ + return single_open(file, pgctrl_show, PDE(inode)->data); +} + +static struct file_operations pktgen_fops = { + .owner = THIS_MODULE, + .open = pgctrl_open, + .read = seq_read, + .llseek = seq_lseek, + .write = pgctrl_write, + .release = single_release, +}; + +static int pktgen_if_show(struct seq_file *seq, void *v) { - char *p; int i; - struct pktgen_dev *pkt_dev = (struct pktgen_dev*)(data); + struct pktgen_dev *pkt_dev = seq->private; __u64 sa; __u64 stopped; __u64 now = getCurUs(); - p = buf; - p += sprintf(p, "Params: count %llu min_pkt_size: %u max_pkt_size: %u\n", - (unsigned long long) pkt_dev->count, - pkt_dev->min_pkt_size, pkt_dev->max_pkt_size); + seq_printf(seq, "Params: count %llu min_pkt_size: %u max_pkt_size: %u\n", + (unsigned long long) pkt_dev->count, + pkt_dev->min_pkt_size, pkt_dev->max_pkt_size); - p += sprintf(p, " frags: %d delay: %u clone_skb: %d ifname: %s\n", - pkt_dev->nfrags, 1000*pkt_dev->delay_us+pkt_dev->delay_ns, pkt_dev->clone_skb, pkt_dev->ifname); + seq_printf(seq, " frags: %d delay: %u clone_skb: %d ifname: %s\n", + pkt_dev->nfrags, 1000*pkt_dev->delay_us+pkt_dev->delay_ns, pkt_dev->clone_skb, pkt_dev->ifname); - p += sprintf(p, " flows: %u flowlen: %u\n", pkt_dev->cflows, pkt_dev->lflow); + seq_printf(seq, " flows: %u flowlen: %u\n", pkt_dev->cflows, pkt_dev->lflow); if(pkt_dev->flags & F_IPV6) { @@ -626,19 +581,19 @@ static int proc_if_read(char *buf , char **start, off_t offset, fmt_ip6(b1, pkt_dev->in6_saddr.s6_addr); fmt_ip6(b2, pkt_dev->min_in6_saddr.s6_addr); fmt_ip6(b3, pkt_dev->max_in6_saddr.s6_addr); - p += sprintf(p, " saddr: %s min_saddr: %s max_saddr: %s\n", b1, b2, b3); + seq_printf(seq, " saddr: %s min_saddr: %s max_saddr: %s\n", b1, b2, b3); fmt_ip6(b1, pkt_dev->in6_daddr.s6_addr); fmt_ip6(b2, pkt_dev->min_in6_daddr.s6_addr); fmt_ip6(b3, pkt_dev->max_in6_daddr.s6_addr); - p += sprintf(p, " daddr: %s min_daddr: %s max_daddr: %s\n", b1, b2, b3); + seq_printf(seq, " daddr: %s min_daddr: %s max_daddr: %s\n", b1, b2, b3); } else - p += sprintf(p, " dst_min: %s dst_max: %s\n src_min: %s src_max: %s\n", - pkt_dev->dst_min, pkt_dev->dst_max, pkt_dev->src_min, pkt_dev->src_max); + seq_printf(seq," dst_min: %s dst_max: %s\n src_min: %s src_max: %s\n", + pkt_dev->dst_min, pkt_dev->dst_max, pkt_dev->src_min, pkt_dev->src_max); - p += sprintf(p, " src_mac: "); + seq_puts(seq, " src_mac: "); if ((pkt_dev->src_mac[0] == 0) && (pkt_dev->src_mac[1] == 0) && @@ -648,89 +603,89 @@ static int proc_if_read(char *buf , char **start, off_t offset, (pkt_dev->src_mac[5] == 0)) for (i = 0; i < 6; i++) - p += sprintf(p, "%02X%s", pkt_dev->odev->dev_addr[i], i == 5 ? " " : ":"); + seq_printf(seq, "%02X%s", pkt_dev->odev->dev_addr[i], i == 5 ? " " : ":"); else for (i = 0; i < 6; i++) - p += sprintf(p, "%02X%s", pkt_dev->src_mac[i], i == 5 ? " " : ":"); + seq_printf(seq, "%02X%s", pkt_dev->src_mac[i], i == 5 ? " " : ":"); - p += sprintf(p, "dst_mac: "); + seq_printf(seq, "dst_mac: "); for (i = 0; i < 6; i++) - p += sprintf(p, "%02X%s", pkt_dev->dst_mac[i], i == 5 ? "\n" : ":"); + seq_printf(seq, "%02X%s", pkt_dev->dst_mac[i], i == 5 ? "\n" : ":"); - p += sprintf(p, " udp_src_min: %d udp_src_max: %d udp_dst_min: %d udp_dst_max: %d\n", - pkt_dev->udp_src_min, pkt_dev->udp_src_max, pkt_dev->udp_dst_min, - pkt_dev->udp_dst_max); + seq_printf(seq, " udp_src_min: %d udp_src_max: %d udp_dst_min: %d udp_dst_max: %d\n", + pkt_dev->udp_src_min, pkt_dev->udp_src_max, pkt_dev->udp_dst_min, + pkt_dev->udp_dst_max); - p += sprintf(p, " src_mac_count: %d dst_mac_count: %d \n Flags: ", - pkt_dev->src_mac_count, pkt_dev->dst_mac_count); + seq_printf(seq, " src_mac_count: %d dst_mac_count: %d \n Flags: ", + pkt_dev->src_mac_count, pkt_dev->dst_mac_count); if (pkt_dev->flags & F_IPV6) - p += sprintf(p, "IPV6 "); + seq_printf(seq, "IPV6 "); if (pkt_dev->flags & F_IPSRC_RND) - p += sprintf(p, "IPSRC_RND "); + seq_printf(seq, "IPSRC_RND "); if (pkt_dev->flags & F_IPDST_RND) - p += sprintf(p, "IPDST_RND "); + seq_printf(seq, "IPDST_RND "); if (pkt_dev->flags & F_TXSIZE_RND) - p += sprintf(p, "TXSIZE_RND "); + seq_printf(seq, "TXSIZE_RND "); if (pkt_dev->flags & F_UDPSRC_RND) - p += sprintf(p, "UDPSRC_RND "); + seq_printf(seq, "UDPSRC_RND "); if (pkt_dev->flags & F_UDPDST_RND) - p += sprintf(p, "UDPDST_RND "); + seq_printf(seq, "UDPDST_RND "); if (pkt_dev->flags & F_MACSRC_RND) - p += sprintf(p, "MACSRC_RND "); + seq_printf(seq, "MACSRC_RND "); if (pkt_dev->flags & F_MACDST_RND) - p += sprintf(p, "MACDST_RND "); + seq_printf(seq, "MACDST_RND "); - p += sprintf(p, "\n"); + seq_puts(seq, "\n"); sa = pkt_dev->started_at; stopped = pkt_dev->stopped_at; if (pkt_dev->running) stopped = now; /* not really stopped, more like last-running-at */ - p += sprintf(p, "Current:\n pkts-sofar: %llu errors: %llu\n started: %lluus stopped: %lluus idle: %lluus\n", - (unsigned long long) pkt_dev->sofar, - (unsigned long long) pkt_dev->errors, - (unsigned long long) sa, - (unsigned long long) stopped, - (unsigned long long) pkt_dev->idle_acc); + seq_printf(seq, "Current:\n pkts-sofar: %llu errors: %llu\n started: %lluus stopped: %lluus idle: %lluus\n", + (unsigned long long) pkt_dev->sofar, + (unsigned long long) pkt_dev->errors, + (unsigned long long) sa, + (unsigned long long) stopped, + (unsigned long long) pkt_dev->idle_acc); - p += sprintf(p, " seq_num: %d cur_dst_mac_offset: %d cur_src_mac_offset: %d\n", - pkt_dev->seq_num, pkt_dev->cur_dst_mac_offset, pkt_dev->cur_src_mac_offset); + seq_printf(seq, " seq_num: %d cur_dst_mac_offset: %d cur_src_mac_offset: %d\n", + pkt_dev->seq_num, pkt_dev->cur_dst_mac_offset, + pkt_dev->cur_src_mac_offset); if(pkt_dev->flags & F_IPV6) { char b1[128], b2[128]; fmt_ip6(b1, pkt_dev->cur_in6_daddr.s6_addr); fmt_ip6(b2, pkt_dev->cur_in6_saddr.s6_addr); - p += sprintf(p, " cur_saddr: %s cur_daddr: %s\n", b2, b1); + seq_printf(seq, " cur_saddr: %s cur_daddr: %s\n", b2, b1); } else - p += sprintf(p, " cur_saddr: 0x%x cur_daddr: 0x%x\n", - pkt_dev->cur_saddr, pkt_dev->cur_daddr); + seq_printf(seq, " cur_saddr: 0x%x cur_daddr: 0x%x\n", + pkt_dev->cur_saddr, pkt_dev->cur_daddr); - p += sprintf(p, " cur_udp_dst: %d cur_udp_src: %d\n", - pkt_dev->cur_udp_dst, pkt_dev->cur_udp_src); + seq_printf(seq, " cur_udp_dst: %d cur_udp_src: %d\n", + pkt_dev->cur_udp_dst, pkt_dev->cur_udp_src); - p += sprintf(p, " flows: %u\n", pkt_dev->nflows); + seq_printf(seq, " flows: %u\n", pkt_dev->nflows); if (pkt_dev->result[0]) - p += sprintf(p, "Result: %s\n", pkt_dev->result); + seq_printf(seq, "Result: %s\n", pkt_dev->result); else - p += sprintf(p, "Result: Idle\n"); - *eof = 1; + seq_printf(seq, "Result: Idle\n"); - return p - buf; + return 0; } @@ -802,13 +757,14 @@ done_str: return i; } -static int proc_if_write(struct file *file, const char __user *user_buffer, - unsigned long count, void *data) +static ssize_t pktgen_if_write(struct file *file, const char __user *user_buffer, + size_t count, loff_t *offset) { + struct seq_file *seq = (struct seq_file *) file->private_data; + struct pktgen_dev *pkt_dev = seq->private; int i = 0, max, len; char name[16], valstr[32]; unsigned long value = 0; - struct pktgen_dev *pkt_dev = (struct pktgen_dev*)(data); char* pg_result = NULL; int tmp = 0; char buf[128]; @@ -849,7 +805,8 @@ static int proc_if_write(struct file *file, const char __user *user_buffer, if (copy_from_user(tb, user_buffer, count)) return -EFAULT; tb[count] = 0; - printk("pktgen: %s,%lu buffer -:%s:-\n", name, count, tb); + printk("pktgen: %s,%lu buffer -:%s:-\n", name, + (unsigned long) count, tb); } if (!strcmp(name, "min_pkt_size")) { @@ -1335,92 +1292,98 @@ static int proc_if_write(struct file *file, const char __user *user_buffer, return -EINVAL; } -static int proc_thread_read(char *buf , char **start, off_t offset, - int len, int *eof, void *data) +static int pktgen_if_open(struct inode *inode, struct file *file) { - char *p; - struct pktgen_thread *t = (struct pktgen_thread*)(data); - struct pktgen_dev *pkt_dev = NULL; + return single_open(file, pktgen_if_show, PDE(inode)->data); +} +static struct file_operations pktgen_if_fops = { + .owner = THIS_MODULE, + .open = pktgen_if_open, + .read = seq_read, + .llseek = seq_lseek, + .write = pktgen_if_write, + .release = single_release, +}; - if (!t) { - printk("pktgen: ERROR: could not find thread in proc_thread_read\n"); - return -EINVAL; - } +static int pktgen_thread_show(struct seq_file *seq, void *v) +{ + struct pktgen_thread *t = seq->private; + struct pktgen_dev *pkt_dev = NULL; + + BUG_ON(!t); - p = buf; - p += sprintf(p, "Name: %s max_before_softirq: %d\n", + seq_printf(seq, "Name: %s max_before_softirq: %d\n", t->name, t->max_before_softirq); - p += sprintf(p, "Running: "); + seq_printf(seq, "Running: "); if_lock(t); for(pkt_dev = t->if_list;pkt_dev; pkt_dev = pkt_dev->next) if(pkt_dev->running) - p += sprintf(p, "%s ", pkt_dev->ifname); + seq_printf(seq, "%s ", pkt_dev->ifname); - p += sprintf(p, "\nStopped: "); + seq_printf(seq, "\nStopped: "); for(pkt_dev = t->if_list;pkt_dev; pkt_dev = pkt_dev->next) if(!pkt_dev->running) - p += sprintf(p, "%s ", pkt_dev->ifname); + seq_printf(seq, "%s ", pkt_dev->ifname); if (t->result[0]) - p += sprintf(p, "\nResult: %s\n", t->result); + seq_printf(seq, "\nResult: %s\n", t->result); else - p += sprintf(p, "\nResult: NA\n"); - - *eof = 1; + seq_printf(seq, "\nResult: NA\n"); if_unlock(t); - return p - buf; + return 0; } -static int proc_thread_write(struct file *file, const char __user *user_buffer, - unsigned long count, void *data) +static ssize_t pktgen_thread_write(struct file *file, + const char __user *user_buffer, + size_t count, loff_t *offset) { + struct seq_file *seq = (struct seq_file *) file->private_data; + struct pktgen_thread *t = seq->private; int i = 0, max, len, ret; char name[40]; - struct pktgen_thread *t; char *pg_result; unsigned long value = 0; - + if (count < 1) { // sprintf(pg_result, "Wrong command format"); return -EINVAL; } - + max = count - i; len = count_trail_chars(&user_buffer[i], max); - if (len < 0) - return len; - + if (len < 0) + return len; + i += len; - + /* Read variable name */ len = strn_len(&user_buffer[i], sizeof(name) - 1); - if (len < 0) - return len; + if (len < 0) + return len; memset(name, 0, sizeof(name)); if (copy_from_user(name, &user_buffer[i], len)) return -EFAULT; i += len; - + max = count -i; len = count_trail_chars(&user_buffer[i], max); - if (len < 0) - return len; - + if (len < 0) + return len; + i += len; - if (debug) - printk("pktgen: t=%s, count=%lu\n", name, count); - + if (debug) + printk("pktgen: t=%s, count=%lu\n", name, + (unsigned long) count); - t = (struct pktgen_thread*)(data); if(!t) { printk("pktgen: ERROR: No thread\n"); ret = -EINVAL; @@ -1474,21 +1437,19 @@ static int proc_thread_write(struct file *file, const char __user *user_buffer, return ret; } -static int create_proc_dir(void) +static int pktgen_thread_open(struct inode *inode, struct file *file) { - pg_proc_dir = proc_mkdir(PG_PROC_DIR, NULL); - - if (!pg_proc_dir) - return -ENODEV; - - return 0; + return single_open(file, pktgen_thread_show, PDE(inode)->data); } -static int remove_proc_dir(void) -{ - remove_proc_entry(PG_PROC_DIR, NULL); - return 0; -} +static struct file_operations pktgen_thread_fops = { + .owner = THIS_MODULE, + .open = pktgen_thread_open, + .read = seq_read, + .llseek = seq_lseek, + .write = pktgen_thread_write, + .release = single_release, +}; /* Think find or remove for NN */ static struct pktgen_dev *__pktgen_NN_threads(const char* ifname, int remove) @@ -2552,8 +2513,7 @@ static void pktgen_rem_thread(struct pktgen_thread *t) struct pktgen_thread *tmp = pktgen_threads; - if (strlen(t->fname)) - remove_proc_entry(t->fname, NULL); + remove_proc_entry(t->name, pg_proc_dir); thread_lock(); @@ -2825,7 +2785,7 @@ static struct pktgen_dev *pktgen_find_dev(struct pktgen_thread *t, const char* i if_lock(t); for(pkt_dev=t->if_list; pkt_dev; pkt_dev = pkt_dev->next ) { - if (strcmp(pkt_dev->ifname, ifname) == 0) { + if (strncmp(pkt_dev->ifname, ifname, IFNAMSIZ) == 0) { break; } } @@ -2864,65 +2824,63 @@ static int add_dev_to_thread(struct pktgen_thread *t, struct pktgen_dev *pkt_dev static int pktgen_add_device(struct pktgen_thread *t, const char* ifname) { struct pktgen_dev *pkt_dev; + struct proc_dir_entry *pe; /* We don't allow a device to be on several threads */ - if( (pkt_dev = __pktgen_NN_threads(ifname, FIND)) == NULL) { - - pkt_dev = kzalloc(sizeof(struct pktgen_dev), GFP_KERNEL); - if (!pkt_dev) - return -ENOMEM; - - pkt_dev->flows = vmalloc(MAX_CFLOWS*sizeof(struct flow_state)); - if (pkt_dev->flows == NULL) { - kfree(pkt_dev); - return -ENOMEM; - } - memset(pkt_dev->flows, 0, MAX_CFLOWS*sizeof(struct flow_state)); - - pkt_dev->min_pkt_size = ETH_ZLEN; - pkt_dev->max_pkt_size = ETH_ZLEN; - pkt_dev->nfrags = 0; - pkt_dev->clone_skb = pg_clone_skb_d; - pkt_dev->delay_us = pg_delay_d / 1000; - pkt_dev->delay_ns = pg_delay_d % 1000; - pkt_dev->count = pg_count_d; - pkt_dev->sofar = 0; - pkt_dev->udp_src_min = 9; /* sink port */ - pkt_dev->udp_src_max = 9; - pkt_dev->udp_dst_min = 9; - pkt_dev->udp_dst_max = 9; - - strncpy(pkt_dev->ifname, ifname, 31); - sprintf(pkt_dev->fname, "%s/%s", PG_PROC_DIR, ifname); - - if (! pktgen_setup_dev(pkt_dev)) { - printk("pktgen: ERROR: pktgen_setup_dev failed.\n"); - if (pkt_dev->flows) - vfree(pkt_dev->flows); - kfree(pkt_dev); - return -ENODEV; - } - - pkt_dev->proc_ent = create_proc_entry(pkt_dev->fname, 0600, NULL); - if (!pkt_dev->proc_ent) { - printk("pktgen: cannot create %s procfs entry.\n", pkt_dev->fname); - if (pkt_dev->flows) - vfree(pkt_dev->flows); - kfree(pkt_dev); - return -EINVAL; - } - pkt_dev->proc_ent->read_proc = proc_if_read; - pkt_dev->proc_ent->write_proc = proc_if_write; - pkt_dev->proc_ent->data = (void*)(pkt_dev); - pkt_dev->proc_ent->owner = THIS_MODULE; - - return add_dev_to_thread(t, pkt_dev); - } - else { + pkt_dev = __pktgen_NN_threads(ifname, FIND); + if (pkt_dev) { printk("pktgen: ERROR: interface already used.\n"); return -EBUSY; } + + pkt_dev = kzalloc(sizeof(struct pktgen_dev), GFP_KERNEL); + if (!pkt_dev) + return -ENOMEM; + + pkt_dev->flows = vmalloc(MAX_CFLOWS*sizeof(struct flow_state)); + if (pkt_dev->flows == NULL) { + kfree(pkt_dev); + return -ENOMEM; + } + memset(pkt_dev->flows, 0, MAX_CFLOWS*sizeof(struct flow_state)); + + pkt_dev->min_pkt_size = ETH_ZLEN; + pkt_dev->max_pkt_size = ETH_ZLEN; + pkt_dev->nfrags = 0; + pkt_dev->clone_skb = pg_clone_skb_d; + pkt_dev->delay_us = pg_delay_d / 1000; + pkt_dev->delay_ns = pg_delay_d % 1000; + pkt_dev->count = pg_count_d; + pkt_dev->sofar = 0; + pkt_dev->udp_src_min = 9; /* sink port */ + pkt_dev->udp_src_max = 9; + pkt_dev->udp_dst_min = 9; + pkt_dev->udp_dst_max = 9; + + strncpy(pkt_dev->ifname, ifname, IFNAMSIZ); + + if (! pktgen_setup_dev(pkt_dev)) { + printk("pktgen: ERROR: pktgen_setup_dev failed.\n"); + if (pkt_dev->flows) + vfree(pkt_dev->flows); + kfree(pkt_dev); + return -ENODEV; + } + + pe = create_proc_entry(ifname, 0600, pg_proc_dir); + if (!pe) { + printk("pktgen: cannot create %s/%s procfs entry.\n", + PG_PROC_DIR, ifname); + if (pkt_dev->flows) + vfree(pkt_dev->flows); + kfree(pkt_dev); + return -EINVAL; + } + pe->proc_fops = &pktgen_if_fops; + pe->data = pkt_dev; + + return add_dev_to_thread(t, pkt_dev); } static struct pktgen_thread *pktgen_find_thread(const char* name) @@ -2945,6 +2903,7 @@ static struct pktgen_thread *pktgen_find_thread(const char* name) static int pktgen_create_thread(const char* name, int cpu) { struct pktgen_thread *t = NULL; + struct proc_dir_entry *pe; if (strlen(name) > 31) { printk("pktgen: ERROR: Thread name cannot be more than 31 characters.\n"); @@ -2966,17 +2925,16 @@ static int pktgen_create_thread(const char* name, int cpu) spin_lock_init(&t->if_lock); t->cpu = cpu; - sprintf(t->fname, "%s/%s", PG_PROC_DIR, t->name); - t->proc_ent = create_proc_entry(t->fname, 0600, NULL); - if (!t->proc_ent) { - printk("pktgen: cannot create %s procfs entry.\n", t->fname); + pe = create_proc_entry(t->name, 0600, pg_proc_dir); + if (!pe) { + printk("pktgen: cannot create %s/%s procfs entry.\n", + PG_PROC_DIR, t->name); kfree(t); return -EINVAL; } - t->proc_ent->read_proc = proc_thread_read; - t->proc_ent->write_proc = proc_thread_write; - t->proc_ent->data = (void*)(t); - t->proc_ent->owner = THIS_MODULE; + + pe->proc_fops = &pktgen_thread_fops; + pe->data = t; t->next = pktgen_threads; pktgen_threads = t; @@ -3031,8 +2989,7 @@ static int pktgen_remove_device(struct pktgen_thread *t, struct pktgen_dev *pkt_ /* Clean up proc file system */ - if (strlen(pkt_dev->fname)) - remove_proc_entry(pkt_dev->fname, NULL); + remove_proc_entry(pkt_dev->ifname, pg_proc_dir); if (pkt_dev->flows) vfree(pkt_dev->flows); @@ -3043,21 +3000,24 @@ static int pktgen_remove_device(struct pktgen_thread *t, struct pktgen_dev *pkt_ static int __init pg_init(void) { int cpu; - printk(version); + struct proc_dir_entry *pe; - module_fname[0] = 0; + printk(version); - create_proc_dir(); + pg_proc_dir = proc_mkdir(PG_PROC_DIR, proc_net); + if (!pg_proc_dir) + return -ENODEV; + pg_proc_dir->owner = THIS_MODULE; - sprintf(module_fname, "%s/pgctrl", PG_PROC_DIR); - module_proc_ent = create_proc_entry(module_fname, 0600, NULL); - if (!module_proc_ent) { - printk("pktgen: ERROR: cannot create %s procfs entry.\n", module_fname); + pe = create_proc_entry(PGCTRL, 0600, pg_proc_dir); + if (pe == NULL) { + printk("pktgen: ERROR: cannot create %s procfs entry.\n", PGCTRL); + proc_net_remove(PG_PROC_DIR); return -EINVAL; } - module_proc_ent->proc_fops = &pktgen_fops; - module_proc_ent->data = NULL; + pe->proc_fops = &pktgen_fops; + pe->data = NULL; /* Register us to receive netdevice events */ register_netdevice_notifier(&pktgen_notifier_block); @@ -3089,10 +3049,8 @@ static void __exit pg_cleanup(void) unregister_netdevice_notifier(&pktgen_notifier_block); /* Clean up proc file system */ - - remove_proc_entry(module_fname, NULL); - - remove_proc_dir(); + remove_proc_entry(PGCTRL, pg_proc_dir); + proc_net_remove(PG_PROC_DIR); } -- cgit v1.2.2 From eed75f191d8318a2b144da8aae9774e1cfcae492 Mon Sep 17 00:00:00 2001 From: Harald Welte Date: Sun, 16 Oct 2005 14:22:59 +0200 Subject: [NETFILTER] ip_conntrack: Make "hashsize" conntrack parameter writable It's fairly simple to resize the hash table, but currently you need to remove and reinsert the module. That's bad (we lose connection state). Harald has even offered to write a daemon which sets this based on load. Signed-off-by: Rusty Russell Signed-off-by: Harald Welte Signed-off-by: Arnaldo Carvalho de Melo --- net/ipv4/netfilter/ip_conntrack_core.c | 132 ++++++++++++++++++++++++--------- 1 file changed, 95 insertions(+), 37 deletions(-) (limited to 'net') diff --git a/net/ipv4/netfilter/ip_conntrack_core.c b/net/ipv4/netfilter/ip_conntrack_core.c index 07a80b56e8dc..422ab68ee7fb 100644 --- a/net/ipv4/netfilter/ip_conntrack_core.c +++ b/net/ipv4/netfilter/ip_conntrack_core.c @@ -50,7 +50,7 @@ #include #include -#define IP_CONNTRACK_VERSION "2.3" +#define IP_CONNTRACK_VERSION "2.4" #if 0 #define DEBUGP printk @@ -148,16 +148,20 @@ DEFINE_PER_CPU(struct ip_conntrack_stat, ip_conntrack_stat); static int ip_conntrack_hash_rnd_initted; static unsigned int ip_conntrack_hash_rnd; -static u_int32_t -hash_conntrack(const struct ip_conntrack_tuple *tuple) +static u_int32_t __hash_conntrack(const struct ip_conntrack_tuple *tuple, + unsigned int size, unsigned int rnd) { -#if 0 - dump_tuple(tuple); -#endif return (jhash_3words(tuple->src.ip, (tuple->dst.ip ^ tuple->dst.protonum), (tuple->src.u.all | (tuple->dst.u.all << 16)), - ip_conntrack_hash_rnd) % ip_conntrack_htable_size); + rnd) % size); +} + +static u_int32_t +hash_conntrack(const struct ip_conntrack_tuple *tuple) +{ + return __hash_conntrack(tuple, ip_conntrack_htable_size, + ip_conntrack_hash_rnd); } int @@ -1341,14 +1345,13 @@ static int kill_all(struct ip_conntrack *i, void *data) return 1; } -static void free_conntrack_hash(void) +static void free_conntrack_hash(struct list_head *hash, int vmalloced,int size) { - if (ip_conntrack_vmalloc) - vfree(ip_conntrack_hash); + if (vmalloced) + vfree(hash); else - free_pages((unsigned long)ip_conntrack_hash, - get_order(sizeof(struct list_head) - * ip_conntrack_htable_size)); + free_pages((unsigned long)hash, + get_order(sizeof(struct list_head) * size)); } void ip_conntrack_flush() @@ -1378,12 +1381,83 @@ void ip_conntrack_cleanup(void) ip_conntrack_flush(); kmem_cache_destroy(ip_conntrack_cachep); kmem_cache_destroy(ip_conntrack_expect_cachep); - free_conntrack_hash(); + free_conntrack_hash(ip_conntrack_hash, ip_conntrack_vmalloc, + ip_conntrack_htable_size); nf_unregister_sockopt(&so_getorigdst); } -static int hashsize; -module_param(hashsize, int, 0400); +static struct list_head *alloc_hashtable(int size, int *vmalloced) +{ + struct list_head *hash; + unsigned int i; + + *vmalloced = 0; + hash = (void*)__get_free_pages(GFP_KERNEL, + get_order(sizeof(struct list_head) + * size)); + if (!hash) { + *vmalloced = 1; + printk(KERN_WARNING"ip_conntrack: falling back to vmalloc.\n"); + hash = vmalloc(sizeof(struct list_head) * size); + } + + if (hash) + for (i = 0; i < size; i++) + INIT_LIST_HEAD(&hash[i]); + + return hash; +} + +int set_hashsize(const char *val, struct kernel_param *kp) +{ + int i, bucket, hashsize, vmalloced; + int old_vmalloced, old_size; + int rnd; + struct list_head *hash, *old_hash; + struct ip_conntrack_tuple_hash *h; + + /* On boot, we can set this without any fancy locking. */ + if (!ip_conntrack_htable_size) + return param_set_int(val, kp); + + hashsize = simple_strtol(val, NULL, 0); + if (!hashsize) + return -EINVAL; + + hash = alloc_hashtable(hashsize, &vmalloced); + if (!hash) + return -ENOMEM; + + /* We have to rehash for the new table anyway, so we also can + * use a new random seed */ + get_random_bytes(&rnd, 4); + + write_lock_bh(&ip_conntrack_lock); + for (i = 0; i < ip_conntrack_htable_size; i++) { + while (!list_empty(&ip_conntrack_hash[i])) { + h = list_entry(ip_conntrack_hash[i].next, + struct ip_conntrack_tuple_hash, list); + list_del(&h->list); + bucket = __hash_conntrack(&h->tuple, hashsize, rnd); + list_add_tail(&h->list, &hash[bucket]); + } + } + old_size = ip_conntrack_htable_size; + old_vmalloced = ip_conntrack_vmalloc; + old_hash = ip_conntrack_hash; + + ip_conntrack_htable_size = hashsize; + ip_conntrack_vmalloc = vmalloced; + ip_conntrack_hash = hash; + ip_conntrack_hash_rnd = rnd; + write_unlock_bh(&ip_conntrack_lock); + + free_conntrack_hash(old_hash, old_vmalloced, old_size); + return 0; +} + +module_param_call(hashsize, set_hashsize, param_get_uint, + &ip_conntrack_htable_size, 0600); int __init ip_conntrack_init(void) { @@ -1392,9 +1466,7 @@ int __init ip_conntrack_init(void) /* Idea from tcp.c: use 1/16384 of memory. On i386: 32MB * machine has 256 buckets. >= 1GB machines have 8192 buckets. */ - if (hashsize) { - ip_conntrack_htable_size = hashsize; - } else { + if (!ip_conntrack_htable_size) { ip_conntrack_htable_size = (((num_physpages << PAGE_SHIFT) / 16384) / sizeof(struct list_head)); @@ -1416,20 +1488,8 @@ int __init ip_conntrack_init(void) return ret; } - /* AK: the hash table is twice as big than needed because it - uses list_head. it would be much nicer to caches to use a - single pointer list head here. */ - ip_conntrack_vmalloc = 0; - ip_conntrack_hash - =(void*)__get_free_pages(GFP_KERNEL, - get_order(sizeof(struct list_head) - *ip_conntrack_htable_size)); - if (!ip_conntrack_hash) { - ip_conntrack_vmalloc = 1; - printk(KERN_WARNING "ip_conntrack: falling back to vmalloc.\n"); - ip_conntrack_hash = vmalloc(sizeof(struct list_head) - * ip_conntrack_htable_size); - } + ip_conntrack_hash = alloc_hashtable(ip_conntrack_htable_size, + &ip_conntrack_vmalloc); if (!ip_conntrack_hash) { printk(KERN_ERR "Unable to create ip_conntrack_hash\n"); goto err_unreg_sockopt; @@ -1461,9 +1521,6 @@ int __init ip_conntrack_init(void) ip_ct_protos[IPPROTO_ICMP] = &ip_conntrack_protocol_icmp; write_unlock_bh(&ip_conntrack_lock); - for (i = 0; i < ip_conntrack_htable_size; i++) - INIT_LIST_HEAD(&ip_conntrack_hash[i]); - /* For use by ipt_REJECT */ ip_ct_attach = ip_conntrack_attach; @@ -1478,7 +1535,8 @@ int __init ip_conntrack_init(void) err_free_conntrack_slab: kmem_cache_destroy(ip_conntrack_cachep); err_free_hash: - free_conntrack_hash(); + free_conntrack_hash(ip_conntrack_hash, ip_conntrack_vmalloc, + ip_conntrack_htable_size); err_unreg_sockopt: nf_unregister_sockopt(&so_getorigdst); -- cgit v1.2.2 From 1371e37da299d4df6267ad0ddf010435782c28e9 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sat, 15 Oct 2005 09:42:39 +1000 Subject: [IPV4]: Kill redundant rcu_dereference on fa_info This patch kills a redundant rcu_dereference on fa->fa_info in fib_trie.c. As this dereference directly follows a list_for_each_entry_rcu line, we have already taken a read barrier with respect to getting an entry from the list. This read barrier guarantees that all values read out of fa are valid. In particular, the contents of structure pointed to by fa->fa_info is initialised before fa->fa_info is actually set (see fn_trie_insert); the setting of fa->fa_info itself is further separated with a write barrier from the insertion of fa into the list. Therefore by taking a read barrier after obtaining fa from the list (which is given by list_for_each_entry_rcu), we can be sure that fa->fa_info contains a valid pointer, as well as the fact that the data pointed to by fa->fa_info is itself valid. Signed-off-by: Herbert Xu Acked-by: Paul E. McKenney Signed-off-by: David S. Miller Signed-off-by: Arnaldo Carvalho de Melo --- net/ipv4/fib_trie.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index 0093ea08c7f5..66247f38b371 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c @@ -2404,7 +2404,7 @@ static int fib_route_seq_show(struct seq_file *seq, void *v) prefix = htonl(l->key); list_for_each_entry_rcu(fa, &li->falh, fa_list) { - const struct fib_info *fi = rcu_dereference(fa->fa_info); + const struct fib_info *fi = fa->fa_info; unsigned flags = fib_flag_trans(fa->fa_type, mask, fi); if (fa->fa_type == RTN_BROADCAST -- cgit v1.2.2 From 80b30c1023dbd795faf948dee0cfb3b270b56d47 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sat, 15 Oct 2005 10:58:30 +1000 Subject: [IPSEC]: Kill obsolete get_mss function Now that we've switched over to storing MTUs in the xfrm_dst entries, we no longer need the dst's get_mss methods. This patch gets rid of them. It also documents the fact that our MTU calculation is not optimal for ESP. Signed-off-by: Herbert Xu Signed-off-by: Arnaldo Carvalho de Melo --- net/xfrm/xfrm_policy.c | 43 ------------------------------------------- net/xfrm/xfrm_state.c | 6 ++++++ 2 files changed, 6 insertions(+), 43 deletions(-) (limited to 'net') diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index cbb0ba34a600..0db9e57013fd 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -1192,46 +1192,6 @@ int xfrm_bundle_ok(struct xfrm_dst *first, struct flowi *fl, int family) EXPORT_SYMBOL(xfrm_bundle_ok); -/* Well... that's _TASK_. We need to scan through transformation - * list and figure out what mss tcp should generate in order to - * final datagram fit to mtu. Mama mia... :-) - * - * Apparently, some easy way exists, but we used to choose the most - * bizarre ones. :-) So, raising Kalashnikov... tra-ta-ta. - * - * Consider this function as something like dark humour. :-) - */ -static int xfrm_get_mss(struct dst_entry *dst, u32 mtu) -{ - int res = mtu - dst->header_len; - - for (;;) { - struct dst_entry *d = dst; - int m = res; - - do { - struct xfrm_state *x = d->xfrm; - if (x) { - spin_lock_bh(&x->lock); - if (x->km.state == XFRM_STATE_VALID && - x->type && x->type->get_max_size) - m = x->type->get_max_size(d->xfrm, m); - else - m += x->props.header_len; - spin_unlock_bh(&x->lock); - } - } while ((d = d->child) != NULL); - - if (m <= mtu) - break; - res -= (m - mtu); - if (res < 88) - return mtu; - } - - return res + dst->header_len; -} - int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo) { int err = 0; @@ -1252,8 +1212,6 @@ int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo) dst_ops->negative_advice = xfrm_negative_advice; if (likely(dst_ops->link_failure == NULL)) dst_ops->link_failure = xfrm_link_failure; - if (likely(dst_ops->get_mss == NULL)) - dst_ops->get_mss = xfrm_get_mss; if (likely(afinfo->garbage_collect == NULL)) afinfo->garbage_collect = __xfrm_garbage_collect; xfrm_policy_afinfo[afinfo->family] = afinfo; @@ -1281,7 +1239,6 @@ int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo) dst_ops->check = NULL; dst_ops->negative_advice = NULL; dst_ops->link_failure = NULL; - dst_ops->get_mss = NULL; afinfo->garbage_collect = NULL; } } diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index 9d206c282cf1..8b9a4747417d 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@ -1026,6 +1026,12 @@ void xfrm_state_delete_tunnel(struct xfrm_state *x) } EXPORT_SYMBOL(xfrm_state_delete_tunnel); +/* + * This function is NOT optimal. For example, with ESP it will give an + * MTU that's usually two bytes short of being optimal. However, it will + * usually give an answer that's a multiple of 4 provided the input is + * also a multiple of 4. + */ int xfrm_state_mtu(struct xfrm_state *x, int mtu) { int res = mtu; -- cgit v1.2.2 From ea7ce406490cb248f44f510f7c0bcc357184640a Mon Sep 17 00:00:00 2001 From: Jayachandran C Date: Thu, 13 Oct 2005 11:43:05 -0700 Subject: [NETLINK]: Remove dead code in af_netlink.c Remove the variable nlk & call to nlk_sk as it does not have any side effect. Signed-off-by: Jayachandran C. Acked-by: James Morris Signed-off-by: Arnaldo Carvalho de Melo --- net/netlink/af_netlink.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'net') diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 678c3f2c0d0b..eab4942ac97d 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -740,11 +740,8 @@ int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock, long t int netlink_sendskb(struct sock *sk, struct sk_buff *skb, int protocol) { - struct netlink_sock *nlk; int len = skb->len; - nlk = nlk_sk(sk); - skb_queue_tail(&sk->sk_receive_queue, skb); sk->sk_data_ready(sk, len); sock_put(sk); -- cgit v1.2.2 From 0d0d2bba97cb091218ea0bcb3d8debcc7bf48397 Mon Sep 17 00:00:00 2001 From: Jayachandran C Date: Thu, 13 Oct 2005 11:43:02 -0700 Subject: [IPV4]: Remove dead code from ip_output.c skb_prev is assigned from skb, which cannot be NULL. This patch removes the unnecessary NULL check. Signed-off-by: Jayachandran C. Acked-by: James Morris Signed-off-by: Arnaldo Carvalho de Melo --- net/ipv4/ip_output.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) (limited to 'net') diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 1ad5202e556b..87e350069abb 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -1023,10 +1023,7 @@ ssize_t ip_append_page(struct sock *sk, struct page *page, int alloclen; skb_prev = skb; - if (skb_prev) - fraggap = skb_prev->len - maxfraglen; - else - fraggap = 0; + fraggap = skb_prev->len - maxfraglen; alloclen = fragheaderlen + hh_len + fraggap + 15; skb = sock_wmalloc(sk, alloclen, 1, sk->sk_allocation); -- cgit v1.2.2 From c83c24861882758b9731e8550225cd1e52a4cd1c Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Tue, 18 Oct 2005 22:07:41 -0700 Subject: [SK_BUFF] kernel-doc: fix skbuff warnings Add kernel-doc to skbuff.h, skbuff.c to eliminate kernel-doc warnings. Signed-off-by: Randy Dunlap Signed-off-by: Arnaldo Carvalho de Melo --- net/core/skbuff.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'net') diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 02cd4cde2112..ef9d46b91eb9 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -122,6 +122,8 @@ void skb_under_panic(struct sk_buff *skb, int sz, void *here) * __alloc_skb - allocate a network buffer * @size: size to allocate * @gfp_mask: allocation mask + * @fclone: allocate from fclone cache instead of head cache + * and allocate a cloned (child) skb * * Allocate a new &sk_buff. The returned buffer has no headroom and a * tail room of size bytes. The object has a reference count of one. -- cgit v1.2.2 From 95df1c04ab3f7ca617774930df62c0893a188c2c Mon Sep 17 00:00:00 2001 From: Ralf Baechle Date: Tue, 18 Oct 2005 21:39:33 +0100 Subject: [AX.25]: Use constant instead of magic number Signed-off-by: Ralf Baechle DL5RB Signed-off-by: Arnaldo Carvalho de Melo --- net/rose/rose_route.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/rose/rose_route.c b/net/rose/rose_route.c index e556d92c0bc4..b18fe5043019 100644 --- a/net/rose/rose_route.c +++ b/net/rose/rose_route.c @@ -727,7 +727,7 @@ int rose_rt_ioctl(unsigned int cmd, void __user *arg) } if (rose_route.mask > 10) /* Mask can't be more than 10 digits */ return -EINVAL; - if (rose_route.ndigis > 8) /* No more than 8 digipeats */ + if (rose_route.ndigis > AX25_MAX_DIGIS) return -EINVAL; err = rose_add_node(&rose_route, dev); dev_put(dev); -- cgit v1.2.2 From dcab5e1eeccf5e226c771ecc013631cde157435f Mon Sep 17 00:00:00 2001 From: David Engel Date: Fri, 21 Oct 2005 22:09:16 -0500 Subject: [IPV4]: Fix setting broadcast for SIOCSIFNETMASK Fix setting of the broadcast address when the netmask is set via SIOCSIFNETMASK in Linux 2.6. The code wanted the old value of ifa->ifa_mask but used it after it had already been overwritten with the new value. Signed-off-by: David Engel Signed-off-by: Arnaldo Carvalho de Melo --- net/ipv4/devinet.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'net') diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index 74f2207e131a..4ec4b2ca6ab1 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c @@ -715,6 +715,7 @@ int devinet_ioctl(unsigned int cmd, void __user *arg) break; ret = 0; if (ifa->ifa_mask != sin->sin_addr.s_addr) { + u32 old_mask = ifa->ifa_mask; inet_del_ifa(in_dev, ifap, 0); ifa->ifa_mask = sin->sin_addr.s_addr; ifa->ifa_prefixlen = inet_mask_len(ifa->ifa_mask); @@ -728,7 +729,7 @@ int devinet_ioctl(unsigned int cmd, void __user *arg) if ((dev->flags & IFF_BROADCAST) && (ifa->ifa_prefixlen < 31) && (ifa->ifa_broadcast == - (ifa->ifa_local|~ifa->ifa_mask))) { + (ifa->ifa_local|~old_mask))) { ifa->ifa_broadcast = (ifa->ifa_local | ~sin->sin_addr.s_addr); } -- cgit v1.2.2 From 077783f87708b24054452e5c07685ead2c28b1eb Mon Sep 17 00:00:00 2001 From: James Ketrenos Date: Mon, 24 Oct 2005 10:27:46 -0500 Subject: [PATCH] ieee80211 build fix James Ketrenos wrote: > [3/4] Use the tx_headroom and reserve requested space. This patch introduced a compile problem; patch below corrects this. Fixed compilation error due to not passing tx_headroom in ieee80211_tx_frame. Signed-off-by: James Ketrenos Signed-off-by: Jeff Garzik --- net/ieee80211/ieee80211_tx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/ieee80211/ieee80211_tx.c b/net/ieee80211/ieee80211_tx.c index fb4509032402..95ccbadbf55b 100644 --- a/net/ieee80211/ieee80211_tx.c +++ b/net/ieee80211/ieee80211_tx.c @@ -541,7 +541,7 @@ int ieee80211_tx_frame(struct ieee80211_device *ieee, /* When we allocate the TXB we allocate enough space for the reserve * and full fragment bytes (bytes_per_frag doesn't include prefix, * postfix, header, FCS, etc.) */ - txb = ieee80211_alloc_txb(1, len, GFP_ATOMIC); + txb = ieee80211_alloc_txb(1, len, ieee->tx_headroom, GFP_ATOMIC); if (unlikely(!txb)) { printk(KERN_WARNING "%s: Could not allocate TXB\n", ieee->dev->name); -- cgit v1.2.2 From 6fa05b17367f04ada501e89d3b9cb56adec0d930 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Thu, 27 Oct 2005 19:08:18 -0400 Subject: Revert "RPC: stops the release_pipe() funtion from being called twice" This reverts 747c5534c9a6da4aa87e7cdc2209ea98ea27f381 commit. --- net/sunrpc/rpc_pipe.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'net') diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index 649d609e7d94..ded6c63f11ec 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c @@ -177,8 +177,6 @@ rpc_pipe_release(struct inode *inode, struct file *filp) __rpc_purge_upcall(inode, -EPIPE); if (rpci->ops->release_pipe) rpci->ops->release_pipe(inode); - if (!rpci->nreaders && !rpci->nwriters) - rpci->ops = NULL; out: up(&inode->i_sem); return 0; -- cgit v1.2.2 From 6070fe6f82c67021367092855c0812a98becf0fa Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Thu, 27 Oct 2005 22:12:46 -0400 Subject: RPC: Ensure that nobody can queue up new upcalls after rpc_close_pipes() Signed-off-by: Trond Myklebust --- net/sunrpc/rpc_pipe.c | 29 +++++++++++++++-------------- 1 file changed, 15 insertions(+), 14 deletions(-) (limited to 'net') diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index ded6c63f11ec..4f188d0a5d11 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c @@ -76,25 +76,35 @@ int rpc_queue_upcall(struct inode *inode, struct rpc_pipe_msg *msg) { struct rpc_inode *rpci = RPC_I(inode); - int res = 0; + int res = -EPIPE; down(&inode->i_sem); + if (rpci->ops == NULL) + goto out; if (rpci->nreaders) { list_add_tail(&msg->list, &rpci->pipe); rpci->pipelen += msg->len; + res = 0; } else if (rpci->flags & RPC_PIPE_WAIT_FOR_OPEN) { if (list_empty(&rpci->pipe)) schedule_delayed_work(&rpci->queue_timeout, RPC_UPCALL_TIMEOUT); list_add_tail(&msg->list, &rpci->pipe); rpci->pipelen += msg->len; - } else - res = -EPIPE; + res = 0; + } +out: up(&inode->i_sem); wake_up(&rpci->waitq); return res; } +static inline void +rpc_inode_setowner(struct inode *inode, void *private) +{ + RPC_I(inode)->private = private; +} + static void rpc_close_pipes(struct inode *inode) { @@ -111,15 +121,10 @@ rpc_close_pipes(struct inode *inode) rpci->ops->release_pipe(inode); rpci->ops = NULL; } + rpc_inode_setowner(inode, NULL); up(&inode->i_sem); } -static inline void -rpc_inode_setowner(struct inode *inode, void *private) -{ - RPC_I(inode)->private = private; -} - static struct inode * rpc_alloc_inode(struct super_block *sb) { @@ -501,7 +506,6 @@ repeat: dentry = dvec[--n]; if (dentry->d_inode) { rpc_close_pipes(dentry->d_inode); - rpc_inode_setowner(dentry->d_inode, NULL); simple_unlink(dir, dentry); } dput(dentry); @@ -576,10 +580,8 @@ __rpc_rmdir(struct inode *dir, struct dentry *dentry) int error; shrink_dcache_parent(dentry); - if (dentry->d_inode) { + if (dentry->d_inode) rpc_close_pipes(dentry->d_inode); - rpc_inode_setowner(dentry->d_inode, NULL); - } if ((error = simple_rmdir(dir, dentry)) != 0) return error; if (!error) { @@ -732,7 +734,6 @@ rpc_unlink(char *path) d_drop(dentry); if (dentry->d_inode) { rpc_close_pipes(dentry->d_inode); - rpc_inode_setowner(dentry->d_inode, NULL); error = simple_unlink(dir, dentry); } dput(dentry); -- cgit v1.2.2 From 7d877f3bda870ab5f001bd92528654471d5966b3 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Fri, 21 Oct 2005 03:20:43 -0400 Subject: [PATCH] gfp_t: net/* Signed-off-by: Al Viro Signed-off-by: Linus Torvalds --- net/core/sock.c | 2 +- net/dccp/output.c | 2 +- net/netlink/af_netlink.c | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) (limited to 'net') diff --git a/net/core/sock.c b/net/core/sock.c index 1c52fe809eda..9602ceb3bac9 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -940,7 +940,7 @@ static struct sk_buff *sock_alloc_send_pskb(struct sock *sk, int noblock, int *errcode) { struct sk_buff *skb; - unsigned int gfp_mask; + gfp_t gfp_mask; long timeo; int err; diff --git a/net/dccp/output.c b/net/dccp/output.c index 29250749f16f..d59f86f7ceab 100644 --- a/net/dccp/output.c +++ b/net/dccp/output.c @@ -495,7 +495,7 @@ void dccp_send_close(struct sock *sk, const int active) { struct dccp_sock *dp = dccp_sk(sk); struct sk_buff *skb; - const unsigned int prio = active ? GFP_KERNEL : GFP_ATOMIC; + const gfp_t prio = active ? GFP_KERNEL : GFP_ATOMIC; skb = alloc_skb(sk->sk_prot->max_header, prio); if (skb == NULL) diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 678c3f2c0d0b..291df2e4c492 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -827,7 +827,7 @@ struct netlink_broadcast_data { int failure; int congested; int delivered; - unsigned int allocation; + gfp_t allocation; struct sk_buff *skb, *skb2; }; -- cgit v1.2.2 From 34abf91f4036c01669e298e649b7ba85cadf82eb Mon Sep 17 00:00:00 2001 From: Dmitry Torokhov Date: Thu, 15 Sep 2005 02:01:40 -0500 Subject: [PATCH] Input: convert net/bluetooth to dynamic input_dev allocation Input: convert net/bluetooth to dynamic input_dev allocation This is required for input_dev sysfs integration Signed-off-by: Dmitry Torokhov Signed-off-by: Greg Kroah-Hartman --- net/bluetooth/hidp/core.c | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) (limited to 'net') diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c index de8af5f42394..860444a7fc0f 100644 --- a/net/bluetooth/hidp/core.c +++ b/net/bluetooth/hidp/core.c @@ -520,7 +520,7 @@ static int hidp_session(void *arg) if (session->input) { input_unregister_device(session->input); - kfree(session->input); + session->input = NULL; } up_write(&hidp_session_sem); @@ -536,6 +536,8 @@ static inline void hidp_setup_input(struct hidp_session *session, struct hidp_co input->private = session; + input->name = "Bluetooth HID Boot Protocol Device"; + input->id.bustype = BUS_BLUETOOTH; input->id.vendor = req->vendor; input->id.product = req->product; @@ -582,16 +584,15 @@ int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock, return -ENOTUNIQ; session = kmalloc(sizeof(struct hidp_session), GFP_KERNEL); - if (!session) + if (!session) return -ENOMEM; memset(session, 0, sizeof(struct hidp_session)); - session->input = kmalloc(sizeof(struct input_dev), GFP_KERNEL); + session->input = input_allocate_device(); if (!session->input) { kfree(session); return -ENOMEM; } - memset(session->input, 0, sizeof(struct input_dev)); down_write(&hidp_session_sem); @@ -651,8 +652,10 @@ unlink: __hidp_unlink_session(session); - if (session->input) + if (session->input) { input_unregister_device(session->input); + session->input = NULL; /* don't try to free it here */ + } failed: up_write(&hidp_session_sem); -- cgit v1.2.2 From 408c1ce2716c7a004851c93f9f9dcf3d763bc240 Mon Sep 17 00:00:00 2001 From: Marcel Holtmann Date: Fri, 28 Oct 2005 19:20:36 +0200 Subject: [Bluetooth] Move CRC table into RFCOMM core This patch moves rfcomm_crc_table[] into the RFCOMM core, because there is no need to keep it in a separate file. Signed-off-by: Marcel Holtmann --- net/bluetooth/rfcomm/Makefile | 2 +- net/bluetooth/rfcomm/core.c | 43 ++++++++++++++++++++++++++ net/bluetooth/rfcomm/crc.c | 71 ------------------------------------------- 3 files changed, 44 insertions(+), 72 deletions(-) delete mode 100644 net/bluetooth/rfcomm/crc.c (limited to 'net') diff --git a/net/bluetooth/rfcomm/Makefile b/net/bluetooth/rfcomm/Makefile index aecec45ec68d..fe07988a3705 100644 --- a/net/bluetooth/rfcomm/Makefile +++ b/net/bluetooth/rfcomm/Makefile @@ -4,5 +4,5 @@ obj-$(CONFIG_BT_RFCOMM) += rfcomm.o -rfcomm-y := core.o sock.o crc.o +rfcomm-y := core.o sock.o rfcomm-$(CONFIG_BT_RFCOMM_TTY) += tty.o diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c index 35adce6482b6..c3d56ead840c 100644 --- a/net/bluetooth/rfcomm/core.c +++ b/net/bluetooth/rfcomm/core.c @@ -133,6 +133,49 @@ static inline void rfcomm_session_put(struct rfcomm_session *s) /* ---- RFCOMM FCS computation ---- */ +/* reversed, 8-bit, poly=0x07 */ +static unsigned char rfcomm_crc_table[256] = { + 0x00, 0x91, 0xe3, 0x72, 0x07, 0x96, 0xe4, 0x75, + 0x0e, 0x9f, 0xed, 0x7c, 0x09, 0x98, 0xea, 0x7b, + 0x1c, 0x8d, 0xff, 0x6e, 0x1b, 0x8a, 0xf8, 0x69, + 0x12, 0x83, 0xf1, 0x60, 0x15, 0x84, 0xf6, 0x67, + + 0x38, 0xa9, 0xdb, 0x4a, 0x3f, 0xae, 0xdc, 0x4d, + 0x36, 0xa7, 0xd5, 0x44, 0x31, 0xa0, 0xd2, 0x43, + 0x24, 0xb5, 0xc7, 0x56, 0x23, 0xb2, 0xc0, 0x51, + 0x2a, 0xbb, 0xc9, 0x58, 0x2d, 0xbc, 0xce, 0x5f, + + 0x70, 0xe1, 0x93, 0x02, 0x77, 0xe6, 0x94, 0x05, + 0x7e, 0xef, 0x9d, 0x0c, 0x79, 0xe8, 0x9a, 0x0b, + 0x6c, 0xfd, 0x8f, 0x1e, 0x6b, 0xfa, 0x88, 0x19, + 0x62, 0xf3, 0x81, 0x10, 0x65, 0xf4, 0x86, 0x17, + + 0x48, 0xd9, 0xab, 0x3a, 0x4f, 0xde, 0xac, 0x3d, + 0x46, 0xd7, 0xa5, 0x34, 0x41, 0xd0, 0xa2, 0x33, + 0x54, 0xc5, 0xb7, 0x26, 0x53, 0xc2, 0xb0, 0x21, + 0x5a, 0xcb, 0xb9, 0x28, 0x5d, 0xcc, 0xbe, 0x2f, + + 0xe0, 0x71, 0x03, 0x92, 0xe7, 0x76, 0x04, 0x95, + 0xee, 0x7f, 0x0d, 0x9c, 0xe9, 0x78, 0x0a, 0x9b, + 0xfc, 0x6d, 0x1f, 0x8e, 0xfb, 0x6a, 0x18, 0x89, + 0xf2, 0x63, 0x11, 0x80, 0xf5, 0x64, 0x16, 0x87, + + 0xd8, 0x49, 0x3b, 0xaa, 0xdf, 0x4e, 0x3c, 0xad, + 0xd6, 0x47, 0x35, 0xa4, 0xd1, 0x40, 0x32, 0xa3, + 0xc4, 0x55, 0x27, 0xb6, 0xc3, 0x52, 0x20, 0xb1, + 0xca, 0x5b, 0x29, 0xb8, 0xcd, 0x5c, 0x2e, 0xbf, + + 0x90, 0x01, 0x73, 0xe2, 0x97, 0x06, 0x74, 0xe5, + 0x9e, 0x0f, 0x7d, 0xec, 0x99, 0x08, 0x7a, 0xeb, + 0x8c, 0x1d, 0x6f, 0xfe, 0x8b, 0x1a, 0x68, 0xf9, + 0x82, 0x13, 0x61, 0xf0, 0x85, 0x14, 0x66, 0xf7, + + 0xa8, 0x39, 0x4b, 0xda, 0xaf, 0x3e, 0x4c, 0xdd, + 0xa6, 0x37, 0x45, 0xd4, 0xa1, 0x30, 0x42, 0xd3, + 0xb4, 0x25, 0x57, 0xc6, 0xb3, 0x22, 0x50, 0xc1, + 0xba, 0x2b, 0x59, 0xc8, 0xbd, 0x2c, 0x5e, 0xcf +}; + /* CRC on 2 bytes */ #define __crc(data) (rfcomm_crc_table[rfcomm_crc_table[0xff ^ data[0]] ^ data[1]]) diff --git a/net/bluetooth/rfcomm/crc.c b/net/bluetooth/rfcomm/crc.c deleted file mode 100644 index 1011bc4a8692..000000000000 --- a/net/bluetooth/rfcomm/crc.c +++ /dev/null @@ -1,71 +0,0 @@ -/* - RFCOMM implementation for Linux Bluetooth stack (BlueZ). - Copyright (C) 2002 Maxim Krasnyansky - Copyright (C) 2002 Marcel Holtmann - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License version 2 as - published by the Free Software Foundation; - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. - IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY - CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - - ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, - COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS - SOFTWARE IS DISCLAIMED. -*/ - -/* - * RFCOMM FCS calculation. - * - * $Id: crc.c,v 1.2 2002/09/21 09:54:32 holtmann Exp $ - */ - -/* reversed, 8-bit, poly=0x07 */ -unsigned char rfcomm_crc_table[256] = { - 0x00, 0x91, 0xe3, 0x72, 0x07, 0x96, 0xe4, 0x75, - 0x0e, 0x9f, 0xed, 0x7c, 0x09, 0x98, 0xea, 0x7b, - 0x1c, 0x8d, 0xff, 0x6e, 0x1b, 0x8a, 0xf8, 0x69, - 0x12, 0x83, 0xf1, 0x60, 0x15, 0x84, 0xf6, 0x67, - - 0x38, 0xa9, 0xdb, 0x4a, 0x3f, 0xae, 0xdc, 0x4d, - 0x36, 0xa7, 0xd5, 0x44, 0x31, 0xa0, 0xd2, 0x43, - 0x24, 0xb5, 0xc7, 0x56, 0x23, 0xb2, 0xc0, 0x51, - 0x2a, 0xbb, 0xc9, 0x58, 0x2d, 0xbc, 0xce, 0x5f, - - 0x70, 0xe1, 0x93, 0x02, 0x77, 0xe6, 0x94, 0x05, - 0x7e, 0xef, 0x9d, 0x0c, 0x79, 0xe8, 0x9a, 0x0b, - 0x6c, 0xfd, 0x8f, 0x1e, 0x6b, 0xfa, 0x88, 0x19, - 0x62, 0xf3, 0x81, 0x10, 0x65, 0xf4, 0x86, 0x17, - - 0x48, 0xd9, 0xab, 0x3a, 0x4f, 0xde, 0xac, 0x3d, - 0x46, 0xd7, 0xa5, 0x34, 0x41, 0xd0, 0xa2, 0x33, - 0x54, 0xc5, 0xb7, 0x26, 0x53, 0xc2, 0xb0, 0x21, - 0x5a, 0xcb, 0xb9, 0x28, 0x5d, 0xcc, 0xbe, 0x2f, - - 0xe0, 0x71, 0x03, 0x92, 0xe7, 0x76, 0x04, 0x95, - 0xee, 0x7f, 0x0d, 0x9c, 0xe9, 0x78, 0x0a, 0x9b, - 0xfc, 0x6d, 0x1f, 0x8e, 0xfb, 0x6a, 0x18, 0x89, - 0xf2, 0x63, 0x11, 0x80, 0xf5, 0x64, 0x16, 0x87, - - 0xd8, 0x49, 0x3b, 0xaa, 0xdf, 0x4e, 0x3c, 0xad, - 0xd6, 0x47, 0x35, 0xa4, 0xd1, 0x40, 0x32, 0xa3, - 0xc4, 0x55, 0x27, 0xb6, 0xc3, 0x52, 0x20, 0xb1, - 0xca, 0x5b, 0x29, 0xb8, 0xcd, 0x5c, 0x2e, 0xbf, - - 0x90, 0x01, 0x73, 0xe2, 0x97, 0x06, 0x74, 0xe5, - 0x9e, 0x0f, 0x7d, 0xec, 0x99, 0x08, 0x7a, 0xeb, - 0x8c, 0x1d, 0x6f, 0xfe, 0x8b, 0x1a, 0x68, 0xf9, - 0x82, 0x13, 0x61, 0xf0, 0x85, 0x14, 0x66, 0xf7, - - 0xa8, 0x39, 0x4b, 0xda, 0xaf, 0x3e, 0x4c, 0xdd, - 0xa6, 0x37, 0x45, 0xd4, 0xa1, 0x30, 0x42, 0xd3, - 0xb4, 0x25, 0x57, 0xc6, 0xb3, 0x22, 0x50, 0xc1, - 0xba, 0x2b, 0x59, 0xc8, 0xbd, 0x2c, 0x5e, 0xcf -}; -- cgit v1.2.2 From 6516455d3b42b33759a33a8102c1b8b48af4d9c9 Mon Sep 17 00:00:00 2001 From: Marcel Holtmann Date: Fri, 28 Oct 2005 19:20:48 +0200 Subject: [Bluetooth] Make more functions static This patch makes another bunch of functions static. Signed-off-by: Marcel Holtmann --- net/bluetooth/af_bluetooth.c | 6 ------ net/bluetooth/hci_core.c | 4 ++-- 2 files changed, 2 insertions(+), 8 deletions(-) (limited to 'net') diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c index 12b43345b54f..03532062a46a 100644 --- a/net/bluetooth/af_bluetooth.c +++ b/net/bluetooth/af_bluetooth.c @@ -308,12 +308,6 @@ static struct net_proto_family bt_sock_family_ops = { .create = bt_sock_create, }; -extern int hci_sock_init(void); -extern int hci_sock_cleanup(void); - -extern int bt_sysfs_init(void); -extern int bt_sysfs_cleanup(void); - static int __init bt_init(void) { BT_INFO("Core ver %s", VERSION); diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 55dc42eac92c..cf0df1c8c933 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -87,7 +87,7 @@ int hci_unregister_notifier(struct notifier_block *nb) return notifier_chain_unregister(&hci_notifier, nb); } -void hci_notify(struct hci_dev *hdev, int event) +static void hci_notify(struct hci_dev *hdev, int event) { notifier_call_chain(&hci_notifier, event, hdev); } @@ -1347,7 +1347,7 @@ static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb) kfree_skb(skb); } -void hci_rx_task(unsigned long arg) +static void hci_rx_task(unsigned long arg) { struct hci_dev *hdev = (struct hci_dev *) arg; struct sk_buff *skb; -- cgit v1.2.2 From dd7f5527b3e68a7b2f715ae1a21164383f418013 Mon Sep 17 00:00:00 2001 From: Marcel Holtmann Date: Fri, 28 Oct 2005 19:20:53 +0200 Subject: [Bluetooth] Update security filter for Extended Inquiry Response This patch updates the HCI security filter with support for the Extended Inquiry Response (EIR) feature. Signed-off-by: Marcel Holtmann --- net/bluetooth/hci_sock.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'net') diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c index 32ef7975a139..799e448750ad 100644 --- a/net/bluetooth/hci_sock.c +++ b/net/bluetooth/hci_sock.c @@ -66,20 +66,20 @@ static struct hci_sec_filter hci_sec_filter = { /* Packet types */ 0x10, /* Events */ - { 0x1000d9fe, 0x0000300c }, + { 0x1000d9fe, 0x0000b00c }, /* Commands */ { { 0x0 }, /* OGF_LINK_CTL */ - { 0xbe000006, 0x00000001, 0x0000, 0x00 }, + { 0xbe000006, 0x00000001, 0x000000, 0x00 }, /* OGF_LINK_POLICY */ - { 0x00005200, 0x00000000, 0x0000, 0x00 }, + { 0x00005200, 0x00000000, 0x000000, 0x00 }, /* OGF_HOST_CTL */ - { 0xaab00200, 0x2b402aaa, 0x0154, 0x00 }, + { 0xaab00200, 0x2b402aaa, 0x020154, 0x00 }, /* OGF_INFO_PARAM */ - { 0x000002be, 0x00000000, 0x0000, 0x00 }, + { 0x000002be, 0x00000000, 0x000000, 0x00 }, /* OGF_STATUS_PARAM */ - { 0x000000ea, 0x00000000, 0x0000, 0x00 } + { 0x000000ea, 0x00000000, 0x000000, 0x00 } } }; -- cgit v1.2.2 From e89e9cf539a28df7d0eb1d0a545368e9920b34ac Mon Sep 17 00:00:00 2001 From: Ananda Raju Date: Tue, 18 Oct 2005 15:46:41 -0700 Subject: [IPv4/IPv6]: UFO Scatter-gather approach Attached is kernel patch for UDP Fragmentation Offload (UFO) feature. 1. This patch incorporate the review comments by Jeff Garzik. 2. Renamed USO as UFO (UDP Fragmentation Offload) 3. udp sendfile support with UFO This patches uses scatter-gather feature of skb to generate large UDP datagram. Below is a "how-to" on changes required in network device driver to use the UFO interface. UDP Fragmentation Offload (UFO) Interface: ------------------------------------------- UFO is a feature wherein the Linux kernel network stack will offload the IP fragmentation functionality of large UDP datagram to hardware. This will reduce the overhead of stack in fragmenting the large UDP datagram to MTU sized packets 1) Drivers indicate their capability of UFO using dev->features |= NETIF_F_UFO | NETIF_F_HW_CSUM | NETIF_F_SG NETIF_F_HW_CSUM is required for UFO over ipv6. 2) UFO packet will be submitted for transmission using driver xmit routine. UFO packet will have a non-zero value for "skb_shinfo(skb)->ufo_size" skb_shinfo(skb)->ufo_size will indicate the length of data part in each IP fragment going out of the adapter after IP fragmentation by hardware. skb->data will contain MAC/IP/UDP header and skb_shinfo(skb)->frags[] contains the data payload. The skb->ip_summed will be set to CHECKSUM_HW indicating that hardware has to do checksum calculation. Hardware should compute the UDP checksum of complete datagram and also ip header checksum of each fragmented IP packet. For IPV6 the UFO provides the fragment identification-id in skb_shinfo(skb)->ip6_frag_id. The adapter should use this ID for generating IPv6 fragments. Signed-off-by: Ananda Raju Signed-off-by: Rusty Russell (forwarded) Signed-off-by: Arnaldo Carvalho de Melo --- net/core/dev.c | 14 +++++++++ net/core/ethtool.c | 53 ++++++++++++++++++++++++++++++++ net/core/skbuff.c | 75 ++++++++++++++++++++++++++++++++++++++++++++++ net/ipv4/ip_output.c | 83 +++++++++++++++++++++++++++++++++++++++++++++++---- net/ipv6/ip6_output.c | 71 ++++++++++++++++++++++++++++++++++++++++++- 5 files changed, 290 insertions(+), 6 deletions(-) (limited to 'net') diff --git a/net/core/dev.c b/net/core/dev.c index a44eeef24edf..8d1541595277 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -2717,6 +2717,20 @@ int register_netdevice(struct net_device *dev) dev->name); dev->features &= ~NETIF_F_TSO; } + if (dev->features & NETIF_F_UFO) { + if (!(dev->features & NETIF_F_HW_CSUM)) { + printk(KERN_ERR "%s: Dropping NETIF_F_UFO since no " + "NETIF_F_HW_CSUM feature.\n", + dev->name); + dev->features &= ~NETIF_F_UFO; + } + if (!(dev->features & NETIF_F_SG)) { + printk(KERN_ERR "%s: Dropping NETIF_F_UFO since no " + "NETIF_F_SG feature.\n", + dev->name); + dev->features &= ~NETIF_F_UFO; + } + } /* * nil rebuild_header routine, diff --git a/net/core/ethtool.c b/net/core/ethtool.c index 404b761e82ce..0350586e9195 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c @@ -93,6 +93,20 @@ int ethtool_op_get_perm_addr(struct net_device *dev, struct ethtool_perm_addr *a } +u32 ethtool_op_get_ufo(struct net_device *dev) +{ + return (dev->features & NETIF_F_UFO) != 0; +} + +int ethtool_op_set_ufo(struct net_device *dev, u32 data) +{ + if (data) + dev->features |= NETIF_F_UFO; + else + dev->features &= ~NETIF_F_UFO; + return 0; +} + /* Handlers for each ethtool command */ static int ethtool_get_settings(struct net_device *dev, void __user *useraddr) @@ -483,6 +497,11 @@ static int __ethtool_set_sg(struct net_device *dev, u32 data) return err; } + if (!data && dev->ethtool_ops->set_ufo) { + err = dev->ethtool_ops->set_ufo(dev, 0); + if (err) + return err; + } return dev->ethtool_ops->set_sg(dev, data); } @@ -569,6 +588,32 @@ static int ethtool_set_tso(struct net_device *dev, char __user *useraddr) return dev->ethtool_ops->set_tso(dev, edata.data); } +static int ethtool_get_ufo(struct net_device *dev, char __user *useraddr) +{ + struct ethtool_value edata = { ETHTOOL_GTSO }; + + if (!dev->ethtool_ops->get_ufo) + return -EOPNOTSUPP; + edata.data = dev->ethtool_ops->get_ufo(dev); + if (copy_to_user(useraddr, &edata, sizeof(edata))) + return -EFAULT; + return 0; +} +static int ethtool_set_ufo(struct net_device *dev, char __user *useraddr) +{ + struct ethtool_value edata; + + if (!dev->ethtool_ops->set_ufo) + return -EOPNOTSUPP; + if (copy_from_user(&edata, useraddr, sizeof(edata))) + return -EFAULT; + if (edata.data && !(dev->features & NETIF_F_SG)) + return -EINVAL; + if (edata.data && !(dev->features & NETIF_F_HW_CSUM)) + return -EINVAL; + return dev->ethtool_ops->set_ufo(dev, edata.data); +} + static int ethtool_self_test(struct net_device *dev, char __user *useraddr) { struct ethtool_test test; @@ -854,6 +899,12 @@ int dev_ethtool(struct ifreq *ifr) case ETHTOOL_GPERMADDR: rc = ethtool_get_perm_addr(dev, useraddr); break; + case ETHTOOL_GUFO: + rc = ethtool_get_ufo(dev, useraddr); + break; + case ETHTOOL_SUFO: + rc = ethtool_set_ufo(dev, useraddr); + break; default: rc = -EOPNOTSUPP; } @@ -882,3 +933,5 @@ EXPORT_SYMBOL(ethtool_op_set_sg); EXPORT_SYMBOL(ethtool_op_set_tso); EXPORT_SYMBOL(ethtool_op_set_tx_csum); EXPORT_SYMBOL(ethtool_op_set_tx_hw_csum); +EXPORT_SYMBOL(ethtool_op_set_ufo); +EXPORT_SYMBOL(ethtool_op_get_ufo); diff --git a/net/core/skbuff.c b/net/core/skbuff.c index ef9d46b91eb9..95501e40100e 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -176,6 +176,8 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, skb_shinfo(skb)->tso_size = 0; skb_shinfo(skb)->tso_segs = 0; skb_shinfo(skb)->frag_list = NULL; + skb_shinfo(skb)->ufo_size = 0; + skb_shinfo(skb)->ip6_frag_id = 0; out: return skb; nodata: @@ -1696,6 +1698,78 @@ unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, return textsearch_find(config, state); } +/** + * skb_append_datato_frags: - append the user data to a skb + * @sk: sock structure + * @skb: skb structure to be appened with user data. + * @getfrag: call back function to be used for getting the user data + * @from: pointer to user message iov + * @length: length of the iov message + * + * Description: This procedure append the user data in the fragment part + * of the skb if any page alloc fails user this procedure returns -ENOMEM + */ +int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb, + int getfrag(void *from, char *to, int offset, + int len, int odd, struct sk_buff *skb), + void *from, int length) +{ + int frg_cnt = 0; + skb_frag_t *frag = NULL; + struct page *page = NULL; + int copy, left; + int offset = 0; + int ret; + + do { + /* Return error if we don't have space for new frag */ + frg_cnt = skb_shinfo(skb)->nr_frags; + if (frg_cnt >= MAX_SKB_FRAGS) + return -EFAULT; + + /* allocate a new page for next frag */ + page = alloc_pages(sk->sk_allocation, 0); + + /* If alloc_page fails just return failure and caller will + * free previous allocated pages by doing kfree_skb() + */ + if (page == NULL) + return -ENOMEM; + + /* initialize the next frag */ + sk->sk_sndmsg_page = page; + sk->sk_sndmsg_off = 0; + skb_fill_page_desc(skb, frg_cnt, page, 0, 0); + skb->truesize += PAGE_SIZE; + atomic_add(PAGE_SIZE, &sk->sk_wmem_alloc); + + /* get the new initialized frag */ + frg_cnt = skb_shinfo(skb)->nr_frags; + frag = &skb_shinfo(skb)->frags[frg_cnt - 1]; + + /* copy the user data to page */ + left = PAGE_SIZE - frag->page_offset; + copy = (length > left)? left : length; + + ret = getfrag(from, (page_address(frag->page) + + frag->page_offset + frag->size), + offset, copy, 0, skb); + if (ret < 0) + return -EFAULT; + + /* copy was successful so update the size parameters */ + sk->sk_sndmsg_off += copy; + frag->size += copy; + skb->len += copy; + skb->data_len += copy; + offset += copy; + length -= copy; + + } while (length > 0); + + return 0; +} + void __init skb_init(void) { skbuff_head_cache = kmem_cache_create("skbuff_head_cache", @@ -1747,3 +1821,4 @@ EXPORT_SYMBOL(skb_prepare_seq_read); EXPORT_SYMBOL(skb_seq_read); EXPORT_SYMBOL(skb_abort_seq_read); EXPORT_SYMBOL(skb_find_text); +EXPORT_SYMBOL(skb_append_datato_frags); diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 87e350069abb..17758234a3e3 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -275,7 +275,8 @@ int ip_output(struct sk_buff *skb) { IP_INC_STATS(IPSTATS_MIB_OUTREQUESTS); - if (skb->len > dst_mtu(skb->dst) && !skb_shinfo(skb)->tso_size) + if (skb->len > dst_mtu(skb->dst) && + !(skb_shinfo(skb)->ufo_size || skb_shinfo(skb)->tso_size)) return ip_fragment(skb, ip_finish_output); else return ip_finish_output(skb); @@ -688,6 +689,60 @@ csum_page(struct page *page, int offset, int copy) return csum; } +inline int ip_ufo_append_data(struct sock *sk, + int getfrag(void *from, char *to, int offset, int len, + int odd, struct sk_buff *skb), + void *from, int length, int hh_len, int fragheaderlen, + int transhdrlen, int mtu,unsigned int flags) +{ + struct sk_buff *skb; + int err; + + /* There is support for UDP fragmentation offload by network + * device, so create one single skb packet containing complete + * udp datagram + */ + if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) { + skb = sock_alloc_send_skb(sk, + hh_len + fragheaderlen + transhdrlen + 20, + (flags & MSG_DONTWAIT), &err); + + if (skb == NULL) + return err; + + /* reserve space for Hardware header */ + skb_reserve(skb, hh_len); + + /* create space for UDP/IP header */ + skb_put(skb,fragheaderlen + transhdrlen); + + /* initialize network header pointer */ + skb->nh.raw = skb->data; + + /* initialize protocol header pointer */ + skb->h.raw = skb->data + fragheaderlen; + + skb->ip_summed = CHECKSUM_HW; + skb->csum = 0; + sk->sk_sndmsg_off = 0; + } + + err = skb_append_datato_frags(sk,skb, getfrag, from, + (length - transhdrlen)); + if (!err) { + /* specify the length of each IP datagram fragment*/ + skb_shinfo(skb)->ufo_size = (mtu - fragheaderlen); + __skb_queue_tail(&sk->sk_write_queue, skb); + + return 0; + } + /* There is not enough support do UFO , + * so follow normal path + */ + kfree_skb(skb); + return err; +} + /* * ip_append_data() and ip_append_page() can make one large IP datagram * from many pieces of data. Each pieces will be holded on the socket @@ -777,6 +832,15 @@ int ip_append_data(struct sock *sk, csummode = CHECKSUM_HW; inet->cork.length += length; + if (((length > mtu) && (sk->sk_protocol == IPPROTO_UDP)) && + (rt->u.dst.dev->features & NETIF_F_UFO)) { + + if(ip_ufo_append_data(sk, getfrag, from, length, hh_len, + fragheaderlen, transhdrlen, mtu, flags)) + goto error; + + return 0; + } /* So, what's going on in the loop below? * @@ -1008,14 +1072,23 @@ ssize_t ip_append_page(struct sock *sk, struct page *page, return -EINVAL; inet->cork.length += size; + if ((sk->sk_protocol == IPPROTO_UDP) && + (rt->u.dst.dev->features & NETIF_F_UFO)) + skb_shinfo(skb)->ufo_size = (mtu - fragheaderlen); + while (size > 0) { int i; - /* Check if the remaining data fits into current packet. */ - len = mtu - skb->len; - if (len < size) - len = maxfraglen - skb->len; + if (skb_shinfo(skb)->ufo_size) + len = size; + else { + + /* Check if the remaining data fits into current packet. */ + len = mtu - skb->len; + if (len < size) + len = maxfraglen - skb->len; + } if (len <= 0) { struct sk_buff *skb_prev; char *data; diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 563b442ffab8..614296a920c6 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -147,7 +147,8 @@ static int ip6_output2(struct sk_buff *skb) int ip6_output(struct sk_buff *skb) { - if (skb->len > dst_mtu(skb->dst) || dst_allfrag(skb->dst)) + if ((skb->len > dst_mtu(skb->dst) && !skb_shinfo(skb)->ufo_size) || + dst_allfrag(skb->dst)) return ip6_fragment(skb, ip6_output2); else return ip6_output2(skb); @@ -768,6 +769,65 @@ out_err_release: *dst = NULL; return err; } +inline int ip6_ufo_append_data(struct sock *sk, + int getfrag(void *from, char *to, int offset, int len, + int odd, struct sk_buff *skb), + void *from, int length, int hh_len, int fragheaderlen, + int transhdrlen, int mtu,unsigned int flags) + +{ + struct sk_buff *skb; + int err; + + /* There is support for UDP large send offload by network + * device, so create one single skb packet containing complete + * udp datagram + */ + if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) { + skb = sock_alloc_send_skb(sk, + hh_len + fragheaderlen + transhdrlen + 20, + (flags & MSG_DONTWAIT), &err); + if (skb == NULL) + return -ENOMEM; + + /* reserve space for Hardware header */ + skb_reserve(skb, hh_len); + + /* create space for UDP/IP header */ + skb_put(skb,fragheaderlen + transhdrlen); + + /* initialize network header pointer */ + skb->nh.raw = skb->data; + + /* initialize protocol header pointer */ + skb->h.raw = skb->data + fragheaderlen; + + skb->ip_summed = CHECKSUM_HW; + skb->csum = 0; + sk->sk_sndmsg_off = 0; + } + + err = skb_append_datato_frags(sk,skb, getfrag, from, + (length - transhdrlen)); + if (!err) { + struct frag_hdr fhdr; + + /* specify the length of each IP datagram fragment*/ + skb_shinfo(skb)->ufo_size = (mtu - fragheaderlen) - + sizeof(struct frag_hdr); + ipv6_select_ident(skb, &fhdr); + skb_shinfo(skb)->ip6_frag_id = fhdr.identification; + __skb_queue_tail(&sk->sk_write_queue, skb); + + return 0; + } + /* There is not enough support do UPD LSO, + * so follow normal path + */ + kfree_skb(skb); + + return err; +} int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb), @@ -860,6 +920,15 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to, */ inet->cork.length += length; + if (((length > mtu) && (sk->sk_protocol == IPPROTO_UDP)) && + (rt->u.dst.dev->features & NETIF_F_UFO)) { + + if(ip6_ufo_append_data(sk, getfrag, from, length, hh_len, + fragheaderlen, transhdrlen, mtu, flags)) + goto error; + + return 0; + } if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) goto alloc_new_skb; -- cgit v1.2.2 From f12baeab9d65e2fe1b43b09b666f5efcb81b9369 Mon Sep 17 00:00:00 2001 From: Yan Zheng Date: Sat, 29 Oct 2005 00:02:32 +0800 Subject: [MCAST] IPv6: Fix algorithm to compute Querier's Query Interval 5.1.3. Maximum Response Code The Maximum Response Code field specifies the maximum time allowed before sending a responding Report. The actual time allowed, called the Maximum Response Delay, is represented in units of milliseconds, and is derived from the Maximum Response Code as follows: If Maximum Response Code < 32768, Maximum Response Delay = Maximum Response Code If Maximum Response Code >=32768, Maximum Response Code represents a floating-point value as follows: 0 1 2 3 4 5 6 7 8 9 A B C D E F +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |1| exp | mant | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ Maximum Response Delay = (mant | 0x1000) << (exp+3) 5.1.9. QQIC (Querier's Query Interval Code) The Querier's Query Interval Code field specifies the [Query Interval] used by the Querier. The actual interval, called the Querier's Query Interval (QQI), is represented in units of seconds, and is derived from the Querier's Query Interval Code as follows: If QQIC < 128, QQI = QQIC If QQIC >= 128, QQIC represents a floating-point value as follows: 0 1 2 3 4 5 6 7 +-+-+-+-+-+-+-+-+ |1| exp | mant | +-+-+-+-+-+-+-+-+ QQI = (mant | 0x10) << (exp + 3) -- rfc3810 #define MLDV2_QQIC(value) MLDV2_EXP(0x80, 4, 3, value) #define MLDV2_MRC(value) MLDV2_EXP(0x8000, 12, 3, value) Above macro are defined in mcast.c. but 1 << 4 == 0x10 and 1 << 12 == 0x1000. So the result computed by original Macro is larger. Signed-off-by: Yan Zheng Acked-by: David L Stevens Signed-off-by: Arnaldo Carvalho de Melo --- net/ipv6/mcast.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index 39a96c768102..c4f2a0ef7489 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c @@ -164,7 +164,7 @@ static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml, #define MLDV2_MASK(value, nb) ((nb)>=32 ? (value) : ((1<<(nb))-1) & (value)) #define MLDV2_EXP(thresh, nbmant, nbexp, value) \ ((value) < (thresh) ? (value) : \ - ((MLDV2_MASK(value, nbmant) | (1<<(nbmant+nbexp))) << \ + ((MLDV2_MASK(value, nbmant) | (1<<(nbmant))) << \ (MLDV2_MASK((value) >> (nbmant), nbexp) + (nbexp)))) #define MLDV2_QQIC(value) MLDV2_EXP(0x80, 4, 3, value) -- cgit v1.2.2 From eaa5c54dbec70e2a93d6ed412bb589bbf9c90a17 Mon Sep 17 00:00:00 2001 From: Ivan Skytte Jorgensen Date: Fri, 28 Oct 2005 15:10:00 -0700 Subject: [SCTP] Rename SCTP specific control message flags. Rename SCTP specific control message flags to use SCTP_ prefix rather than MSG_ prefix as per the latest sctp sockets API draft. Signed-off-by: Ivan Skytte Jorgensen Signed-off-by: Sridhar Samudrala --- net/sctp/sm_make_chunk.c | 2 +- net/sctp/socket.c | 32 ++++++++++++++++---------------- net/sctp/ulpevent.c | 6 +++--- 3 files changed, 20 insertions(+), 20 deletions(-) (limited to 'net') diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index 10e82ec2ebd3..660c61bdf164 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@ -554,7 +554,7 @@ struct sctp_chunk *sctp_make_datafrag_empty(struct sctp_association *asoc, dp.ppid = sinfo->sinfo_ppid; /* Set the flags for an unordered send. */ - if (sinfo->sinfo_flags & MSG_UNORDERED) { + if (sinfo->sinfo_flags & SCTP_UNORDERED) { flags |= SCTP_DATA_UNORDERED; dp.ssn = 0; } else diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 02e068d3450d..170045b6ee98 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -1389,27 +1389,27 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk, SCTP_DEBUG_PRINTK("msg_len: %zu, sinfo_flags: 0x%x\n", msg_len, sinfo_flags); - /* MSG_EOF or MSG_ABORT cannot be set on a TCP-style socket. */ - if (sctp_style(sk, TCP) && (sinfo_flags & (MSG_EOF | MSG_ABORT))) { + /* SCTP_EOF or SCTP_ABORT cannot be set on a TCP-style socket. */ + if (sctp_style(sk, TCP) && (sinfo_flags & (SCTP_EOF | SCTP_ABORT))) { err = -EINVAL; goto out_nounlock; } - /* If MSG_EOF is set, no data can be sent. Disallow sending zero - * length messages when MSG_EOF|MSG_ABORT is not set. - * If MSG_ABORT is set, the message length could be non zero with + /* If SCTP_EOF is set, no data can be sent. Disallow sending zero + * length messages when SCTP_EOF|SCTP_ABORT is not set. + * If SCTP_ABORT is set, the message length could be non zero with * the msg_iov set to the user abort reason. */ - if (((sinfo_flags & MSG_EOF) && (msg_len > 0)) || - (!(sinfo_flags & (MSG_EOF|MSG_ABORT)) && (msg_len == 0))) { + if (((sinfo_flags & SCTP_EOF) && (msg_len > 0)) || + (!(sinfo_flags & (SCTP_EOF|SCTP_ABORT)) && (msg_len == 0))) { err = -EINVAL; goto out_nounlock; } - /* If MSG_ADDR_OVER is set, there must be an address + /* If SCTP_ADDR_OVER is set, there must be an address * specified in msg_name. */ - if ((sinfo_flags & MSG_ADDR_OVER) && (!msg->msg_name)) { + if ((sinfo_flags & SCTP_ADDR_OVER) && (!msg->msg_name)) { err = -EINVAL; goto out_nounlock; } @@ -1458,14 +1458,14 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk, goto out_unlock; } - if (sinfo_flags & MSG_EOF) { + if (sinfo_flags & SCTP_EOF) { SCTP_DEBUG_PRINTK("Shutting down association: %p\n", asoc); sctp_primitive_SHUTDOWN(asoc, NULL); err = 0; goto out_unlock; } - if (sinfo_flags & MSG_ABORT) { + if (sinfo_flags & SCTP_ABORT) { SCTP_DEBUG_PRINTK("Aborting association: %p\n", asoc); sctp_primitive_ABORT(asoc, msg); err = 0; @@ -1477,7 +1477,7 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk, if (!asoc) { SCTP_DEBUG_PRINTK("There is no association yet.\n"); - if (sinfo_flags & (MSG_EOF | MSG_ABORT)) { + if (sinfo_flags & (SCTP_EOF | SCTP_ABORT)) { err = -EINVAL; goto out_unlock; } @@ -1611,10 +1611,10 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk, /* If an address is passed with the sendto/sendmsg call, it is used * to override the primary destination address in the TCP model, or - * when MSG_ADDR_OVER flag is set in the UDP model. + * when SCTP_ADDR_OVER flag is set in the UDP model. */ if ((sctp_style(sk, TCP) && msg_name) || - (sinfo_flags & MSG_ADDR_OVER)) { + (sinfo_flags & SCTP_ADDR_OVER)) { chunk_tp = sctp_assoc_lookup_paddr(asoc, &to); if (!chunk_tp) { err = -EINVAL; @@ -4640,8 +4640,8 @@ SCTP_STATIC int sctp_msghdr_parse(const struct msghdr *msg, /* Minimally, validate the sinfo_flags. */ if (cmsgs->info->sinfo_flags & - ~(MSG_UNORDERED | MSG_ADDR_OVER | - MSG_ABORT | MSG_EOF)) + ~(SCTP_UNORDERED | SCTP_ADDR_OVER | + SCTP_ABORT | SCTP_EOF)) return -EINVAL; break; diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c index 057e7fac3af0..e049f41faa47 100644 --- a/net/sctp/ulpevent.c +++ b/net/sctp/ulpevent.c @@ -698,7 +698,7 @@ struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(struct sctp_association *asoc, event->ssn = ntohs(chunk->subh.data_hdr->ssn); event->ppid = chunk->subh.data_hdr->ppid; if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) { - event->flags |= MSG_UNORDERED; + event->flags |= SCTP_UNORDERED; event->cumtsn = sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map); } event->tsn = ntohl(chunk->subh.data_hdr->tsn); @@ -824,7 +824,7 @@ void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event, * * recvmsg() flags: * - * MSG_UNORDERED - This flag is present when the message was sent + * SCTP_UNORDERED - This flag is present when the message was sent * non-ordered. */ sinfo.sinfo_flags = event->flags; @@ -839,7 +839,7 @@ void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event, * This field will hold the current cumulative TSN as * known by the underlying SCTP layer. Note this field is * ignored when sending and only valid for a receive - * operation when sinfo_flags are set to MSG_UNORDERED. + * operation when sinfo_flags are set to SCTP_UNORDERED. */ sinfo.sinfo_cumtsn = event->cumtsn; /* sinfo_assoc_id: sizeof (sctp_assoc_t) -- cgit v1.2.2 From a1ab3582699def352dab1355adcadd3d47f79f0f Mon Sep 17 00:00:00 2001 From: Ivan Skytte Jorgensen Date: Fri, 28 Oct 2005 15:33:24 -0700 Subject: [SCTP] Fix SCTP_SETADAPTION sockopt to use the correct structure. Signed-off-by: Ivan Skytte Jorgensen Signed-off-by: Sridhar Samudrala --- net/sctp/socket.c | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) (limited to 'net') diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 170045b6ee98..c66c161908c0 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -2384,14 +2384,14 @@ static int sctp_setsockopt_peer_primary_addr(struct sock *sk, char __user *optva static int sctp_setsockopt_adaption_layer(struct sock *sk, char __user *optval, int optlen) { - __u32 val; + struct sctp_setadaption adaption; - if (optlen < sizeof(__u32)) + if (optlen != sizeof(struct sctp_setadaption)) return -EINVAL; - if (copy_from_user(&val, optval, sizeof(__u32))) + if (copy_from_user(&adaption, optval, optlen)) return -EFAULT; - sctp_sk(sk)->adaption_ind = val; + sctp_sk(sk)->adaption_ind = adaption.ssb_adaption_ind; return 0; } @@ -3672,17 +3672,15 @@ static int sctp_getsockopt_primary_addr(struct sock *sk, int len, static int sctp_getsockopt_adaption_layer(struct sock *sk, int len, char __user *optval, int __user *optlen) { - __u32 val; + struct sctp_setadaption adaption; - if (len < sizeof(__u32)) + if (len != sizeof(struct sctp_setadaption)) return -EINVAL; - len = sizeof(__u32); - val = sctp_sk(sk)->adaption_ind; - if (put_user(len, optlen)) - return -EFAULT; - if (copy_to_user(optval, &val, len)) + adaption.ssb_adaption_ind = sctp_sk(sk)->adaption_ind; + if (copy_to_user(optval, &adaption, len)) return -EFAULT; + return 0; } -- cgit v1.2.2 From 96a339985d4c6874d32909e8f1903e6e6c141399 Mon Sep 17 00:00:00 2001 From: Ivan Skytte Jorgensen Date: Fri, 28 Oct 2005 15:36:12 -0700 Subject: [SCTP] Allow SCTP_MAXSEG to revert to default frag point with a '0' value. Signed-off-by: Ivan Skytte Jorgensen Signed-off-by: Sridhar Samudrala --- net/sctp/socket.c | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) (limited to 'net') diff --git a/net/sctp/socket.c b/net/sctp/socket.c index c66c161908c0..97b556c1c450 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -2306,16 +2306,14 @@ static int sctp_setsockopt_maxseg(struct sock *sk, char __user *optval, int optl return -EINVAL; if (get_user(val, (int __user *)optval)) return -EFAULT; - if ((val < 8) || (val > SCTP_MAX_CHUNK_LEN)) + if ((val != 0) && ((val < 8) || (val > SCTP_MAX_CHUNK_LEN))) return -EINVAL; sp->user_frag = val; - if (val) { - /* Update the frag_point of the existing associations. */ - list_for_each(pos, &(sp->ep->asocs)) { - asoc = list_entry(pos, struct sctp_association, asocs); - asoc->frag_point = sctp_frag_point(sp, asoc->pmtu); - } + /* Update the frag_point of the existing associations. */ + list_for_each(pos, &(sp->ep->asocs)) { + asoc = list_entry(pos, struct sctp_association, asocs); + asoc->frag_point = sctp_frag_point(sp, asoc->pmtu); } return 0; -- cgit v1.2.2 From 64a0c1c81e300f0f56f26604c81040784e3717f0 Mon Sep 17 00:00:00 2001 From: Ivan Skytte Jorgensen Date: Fri, 28 Oct 2005 15:39:02 -0700 Subject: [SCTP] Do not allow unprivileged programs initiating new associations on privileged ports. Signed-off-by: Ivan Skytte Jorgensen Signed-off-by: Sridhar Samudrala --- net/sctp/socket.c | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) (limited to 'net') diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 97b556c1c450..b529af5e6f2a 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -1010,6 +1010,19 @@ static int __sctp_connect(struct sock* sk, err = -EAGAIN; goto out_free; } + } else { + /* + * If an unprivileged user inherits a 1-many + * style socket with open associations on a + * privileged port, it MAY be permitted to + * accept new associations, but it SHOULD NOT + * be permitted to open new associations. + */ + if (ep->base.bind_addr.port < PROT_SOCK && + !capable(CAP_NET_BIND_SERVICE)) { + err = -EACCES; + goto out_free; + } } scope = sctp_scope(&to); @@ -1515,6 +1528,19 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk, err = -EAGAIN; goto out_unlock; } + } else { + /* + * If an unprivileged user inherits a one-to-many + * style socket with open associations on a privileged + * port, it MAY be permitted to accept new associations, + * but it SHOULD NOT be permitted to open new + * associations. + */ + if (ep->base.bind_addr.port < PROT_SOCK && + !capable(CAP_NET_BIND_SERVICE)) { + err = -EACCES; + goto out_unlock; + } } scope = sctp_scope(&to); -- cgit v1.2.2 From 360ac8e2f1a38c3497739636c3b702352d1ad0ae Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Tue, 25 Oct 2005 15:03:41 -0700 Subject: [ETH]: ether address compare Expose faster ether compare for use by protocols and other driver. And change name to be more consistent with other ether address manipulation routines in same file Signed-off-by: Stephen Hemminger Signed-off-by: Arnaldo Carvalho de Melo --- net/ethernet/eth.c | 17 ++--------------- 1 file changed, 2 insertions(+), 15 deletions(-) (limited to 'net') diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c index 68a5ca866442..e24577367274 100644 --- a/net/ethernet/eth.c +++ b/net/ethernet/eth.c @@ -146,19 +146,6 @@ int eth_rebuild_header(struct sk_buff *skb) return 0; } -static inline unsigned int compare_eth_addr(const unsigned char *__a, const unsigned char *__b) -{ - const unsigned short *dest = (unsigned short *) __a; - const unsigned short *devaddr = (unsigned short *) __b; - unsigned int res; - - BUILD_BUG_ON(ETH_ALEN != 6); - res = ((dest[0] ^ devaddr[0]) | - (dest[1] ^ devaddr[1]) | - (dest[2] ^ devaddr[2])) != 0; - - return res; -} /* * Determine the packet's protocol ID. The rule here is that we @@ -176,7 +163,7 @@ __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev) eth = eth_hdr(skb); if (*eth->h_dest&1) { - if (!compare_eth_addr(eth->h_dest, dev->broadcast)) + if (!compare_ether_addr(eth->h_dest, dev->broadcast)) skb->pkt_type = PACKET_BROADCAST; else skb->pkt_type = PACKET_MULTICAST; @@ -191,7 +178,7 @@ __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev) */ else if(1 /*dev->flags&IFF_PROMISC*/) { - if (unlikely(compare_eth_addr(eth->h_dest, dev->dev_addr))) + if (unlikely(compare_ether_addr(eth->h_dest, dev->dev_addr))) skb->pkt_type = PACKET_OTHERHOST; } -- cgit v1.2.2 From 9fcc2e8a752f7d3d889114221b67c459557823e9 Mon Sep 17 00:00:00 2001 From: Jayachandran C Date: Thu, 27 Oct 2005 15:10:01 -0700 Subject: [IPV4]: Fix issue reported by Coverity in ipv4/fib_frontend.c fib_del_ifaddr() dereferences ifa->ifa_dev, so the code already assumes that ifa->ifa_dev is non-NULL, the check is unnecessary. Signed-off-by: Jayachandran C. Signed-off-by: Arnaldo Carvalho de Melo --- net/ipv4/fib_frontend.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index e61bc7177eb1..990633c09dfe 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c @@ -591,7 +591,7 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event, break; case NETDEV_DOWN: fib_del_ifaddr(ifa); - if (ifa->ifa_dev && ifa->ifa_dev->ifa_list == NULL) { + if (ifa->ifa_dev->ifa_list == NULL) { /* Last address was deleted from this interface. Disable IP. */ -- cgit v1.2.2 From a6e0eb3791dcefc6dd4db53a23de5cfb18fe9a97 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 29 Oct 2005 12:02:00 +0100 Subject: [PATCH] bluetooth hidp is broken on s390 Bluetooth HIDP selects INPUT and it really needs it to be there - module depends on input core. And input core is never built on s390... Marked as broken on s390, for now; if somebody has better ideas, feel free to fix it and remove dependency... Signed-off-by: Al Viro Acked-by: Marcel Holtmann Signed-off-by: Linus Torvalds --- net/bluetooth/hidp/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/bluetooth/hidp/Kconfig b/net/bluetooth/hidp/Kconfig index 4e958f7d9418..edfea772fb67 100644 --- a/net/bluetooth/hidp/Kconfig +++ b/net/bluetooth/hidp/Kconfig @@ -1,6 +1,6 @@ config BT_HIDP tristate "HIDP protocol support" - depends on BT && BT_L2CAP + depends on BT && BT_L2CAP && (BROKEN || !S390) select INPUT help HIDP (Human Interface Device Protocol) is a transport layer -- cgit v1.2.2 From 378f058cc49bcda7fa63d3cd86d2f9a0a5188b1c Mon Sep 17 00:00:00 2001 From: David Hardeman Date: Sat, 17 Sep 2005 17:55:31 +1000 Subject: [PATCH] Use sg_set_buf/sg_init_one where applicable This patch uses sg_set_buf/sg_init_one in some places where it was duplicated. Signed-off-by: David Hardeman Cc: James Bottomley Cc: Greg KH Cc: "David S. Miller" Cc: Jeff Garzik Signed-off-by: Andrew Morton Signed-off-by: Herbert Xu --- net/ipv6/addrconf.c | 10 +++------- net/sunrpc/auth_gss/gss_krb5_crypto.c | 23 ++++++----------------- 2 files changed, 9 insertions(+), 24 deletions(-) (limited to 'net') diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index a970b4727ce8..41edc14851e8 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -75,7 +75,7 @@ #ifdef CONFIG_IPV6_PRIVACY #include #include -#include +#include #endif #include @@ -1217,12 +1217,8 @@ static int __ipv6_regen_rndid(struct inet6_dev *idev) struct net_device *dev; struct scatterlist sg[2]; - sg[0].page = virt_to_page(idev->entropy); - sg[0].offset = offset_in_page(idev->entropy); - sg[0].length = 8; - sg[1].page = virt_to_page(idev->work_eui64); - sg[1].offset = offset_in_page(idev->work_eui64); - sg[1].length = 8; + sg_set_buf(&sg[0], idev->entropy, 8); + sg_set_buf(&sg[1], idev->work_eui64, 8); dev = idev->dev; diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c index 3f3d5437f02d..e65e1f979275 100644 --- a/net/sunrpc/auth_gss/gss_krb5_crypto.c +++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c @@ -37,7 +37,7 @@ #include #include #include -#include +#include #include #include #include @@ -75,9 +75,7 @@ krb5_encrypt( memcpy(local_iv, iv, crypto_tfm_alg_ivsize(tfm)); memcpy(out, in, length); - sg[0].page = virt_to_page(out); - sg[0].offset = offset_in_page(out); - sg[0].length = length; + sg_set_buf(&sg[0], out, length); ret = crypto_cipher_encrypt_iv(tfm, sg, sg, length, local_iv); @@ -117,9 +115,7 @@ krb5_decrypt( memcpy(local_iv,iv, crypto_tfm_alg_ivsize(tfm)); memcpy(out, in, length); - sg[0].page = virt_to_page(out); - sg[0].offset = offset_in_page(out); - sg[0].length = length; + sg_set_buf(&sg[0], out, length); ret = crypto_cipher_decrypt_iv(tfm, sg, sg, length, local_iv); @@ -132,13 +128,6 @@ out: EXPORT_SYMBOL(krb5_decrypt); -static void -buf_to_sg(struct scatterlist *sg, char *ptr, int len) { - sg->page = virt_to_page(ptr); - sg->offset = offset_in_page(ptr); - sg->length = len; -} - static int process_xdr_buf(struct xdr_buf *buf, int offset, int len, int (*actor)(struct scatterlist *, void *), void *data) @@ -152,7 +141,7 @@ process_xdr_buf(struct xdr_buf *buf, int offset, int len, thislen = buf->head[0].iov_len - offset; if (thislen > len) thislen = len; - buf_to_sg(sg, buf->head[0].iov_base + offset, thislen); + sg_set_buf(sg, buf->head[0].iov_base + offset, thislen); ret = actor(sg, data); if (ret) goto out; @@ -195,7 +184,7 @@ process_xdr_buf(struct xdr_buf *buf, int offset, int len, thislen = buf->tail[0].iov_len - offset; if (thislen > len) thislen = len; - buf_to_sg(sg, buf->tail[0].iov_base + offset, thislen); + sg_set_buf(sg, buf->tail[0].iov_base + offset, thislen); ret = actor(sg, data); len -= thislen; } @@ -241,7 +230,7 @@ make_checksum(s32 cksumtype, char *header, int hdrlen, struct xdr_buf *body, goto out; crypto_digest_init(tfm); - buf_to_sg(sg, header, hdrlen); + sg_set_buf(sg, header, hdrlen); crypto_digest_update(tfm, sg, 1); process_xdr_buf(body, body_offset, body->len - body_offset, checksummer, tfm); -- cgit v1.2.2 From 6df5b9f48dd0e77fa796b9b7d3fde7cc5f1237f2 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Mon, 19 Sep 2005 22:30:11 +1000 Subject: [CRYPTO] Simplify one-member scatterlist expressions This patch rewrites various occurences of &sg[0] where sg is an array of length one to simply sg. Signed-off-by: Herbert Xu --- net/sunrpc/auth_gss/gss_krb5_crypto.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'net') diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c index e65e1f979275..97c981fa6b8e 100644 --- a/net/sunrpc/auth_gss/gss_krb5_crypto.c +++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c @@ -75,7 +75,7 @@ krb5_encrypt( memcpy(local_iv, iv, crypto_tfm_alg_ivsize(tfm)); memcpy(out, in, length); - sg_set_buf(&sg[0], out, length); + sg_set_buf(sg, out, length); ret = crypto_cipher_encrypt_iv(tfm, sg, sg, length, local_iv); @@ -115,7 +115,7 @@ krb5_decrypt( memcpy(local_iv,iv, crypto_tfm_alg_ivsize(tfm)); memcpy(out, in, length); - sg_set_buf(&sg[0], out, length); + sg_set_buf(sg, out, length); ret = crypto_cipher_decrypt_iv(tfm, sg, sg, length, local_iv); -- cgit v1.2.2 From 3fa63c7d82ab9a12a5d0a299069f8df9f35aa011 Mon Sep 17 00:00:00 2001 From: Jean Delvare Date: Sun, 30 Oct 2005 15:02:23 -0800 Subject: [PATCH] Typo fix: dot after newline in printk strings Typo fix: dots appearing after a newline in printk strings. Signed-off-by: Jean Delvare Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- net/ipv4/netfilter/ipt_addrtype.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/ipv4/netfilter/ipt_addrtype.c b/net/ipv4/netfilter/ipt_addrtype.c index f5909a4c3fc7..e19c2a52d00c 100644 --- a/net/ipv4/netfilter/ipt_addrtype.c +++ b/net/ipv4/netfilter/ipt_addrtype.c @@ -48,7 +48,7 @@ static int checkentry(const char *tablename, const struct ipt_ip *ip, unsigned int hook_mask) { if (matchsize != IPT_ALIGN(sizeof(struct ipt_addrtype_info))) { - printk(KERN_ERR "ipt_addrtype: invalid size (%u != %Zu)\n.", + printk(KERN_ERR "ipt_addrtype: invalid size (%u != %Zu)\n", matchsize, IPT_ALIGN(sizeof(struct ipt_addrtype_info))); return 0; } -- cgit v1.2.2