diff options
Diffstat (limited to 'net')
53 files changed, 703 insertions, 282 deletions
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c index 9bf0b737aa51..b2e07f0dd298 100644 --- a/net/9p/trans_virtio.c +++ b/net/9p/trans_virtio.c | |||
@@ -43,6 +43,7 @@ | |||
43 | #include <net/9p/transport.h> | 43 | #include <net/9p/transport.h> |
44 | #include <linux/scatterlist.h> | 44 | #include <linux/scatterlist.h> |
45 | #include <linux/virtio.h> | 45 | #include <linux/virtio.h> |
46 | #include <linux/virtio_ids.h> | ||
46 | #include <linux/virtio_9p.h> | 47 | #include <linux/virtio_9p.h> |
47 | 48 | ||
48 | #define VIRTQUEUE_NUM 128 | 49 | #define VIRTQUEUE_NUM 128 |
@@ -200,7 +201,7 @@ p9_virtio_request(struct p9_client *client, struct p9_req_t *req) | |||
200 | 201 | ||
201 | req->status = REQ_STATUS_SENT; | 202 | req->status = REQ_STATUS_SENT; |
202 | 203 | ||
203 | if (chan->vq->vq_ops->add_buf(chan->vq, chan->sg, out, in, req->tc)) { | 204 | if (chan->vq->vq_ops->add_buf(chan->vq, chan->sg, out, in, req->tc) < 0) { |
204 | P9_DPRINTK(P9_DEBUG_TRANS, | 205 | P9_DPRINTK(P9_DEBUG_TRANS, |
205 | "9p debug: virtio rpc add_buf returned failure"); | 206 | "9p debug: virtio rpc add_buf returned failure"); |
206 | return -EIO; | 207 | return -EIO; |
@@ -334,8 +335,6 @@ static void p9_virtio_remove(struct virtio_device *vdev) | |||
334 | } | 335 | } |
335 | } | 336 | } |
336 | 337 | ||
337 | #define VIRTIO_ID_9P 9 | ||
338 | |||
339 | static struct virtio_device_id id_table[] = { | 338 | static struct virtio_device_id id_table[] = { |
340 | { VIRTIO_ID_9P, VIRTIO_DEV_ANY_ID }, | 339 | { VIRTIO_ID_9P, VIRTIO_DEV_ANY_ID }, |
341 | { 0 }, | 340 | { 0 }, |
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c index 95f7a7a544b4..7f939ce29801 100644 --- a/net/bluetooth/hci_sysfs.c +++ b/net/bluetooth/hci_sysfs.c | |||
@@ -68,7 +68,7 @@ static struct attribute_group bt_link_group = { | |||
68 | .attrs = bt_link_attrs, | 68 | .attrs = bt_link_attrs, |
69 | }; | 69 | }; |
70 | 70 | ||
71 | static struct attribute_group *bt_link_groups[] = { | 71 | static const struct attribute_group *bt_link_groups[] = { |
72 | &bt_link_group, | 72 | &bt_link_group, |
73 | NULL | 73 | NULL |
74 | }; | 74 | }; |
@@ -392,7 +392,7 @@ static struct attribute_group bt_host_group = { | |||
392 | .attrs = bt_host_attrs, | 392 | .attrs = bt_host_attrs, |
393 | }; | 393 | }; |
394 | 394 | ||
395 | static struct attribute_group *bt_host_groups[] = { | 395 | static const struct attribute_group *bt_host_groups[] = { |
396 | &bt_host_group, | 396 | &bt_host_group, |
397 | NULL | 397 | NULL |
398 | }; | 398 | }; |
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c index 09bedeb5579c..49d8495d69be 100644 --- a/net/bluetooth/hidp/core.c +++ b/net/bluetooth/hidp/core.c | |||
@@ -577,11 +577,6 @@ static int hidp_session(void *arg) | |||
577 | } | 577 | } |
578 | 578 | ||
579 | if (session->hid) { | 579 | if (session->hid) { |
580 | if (session->hid->claimed & HID_CLAIMED_INPUT) | ||
581 | hidinput_disconnect(session->hid); | ||
582 | if (session->hid->claimed & HID_CLAIMED_HIDRAW) | ||
583 | hidraw_disconnect(session->hid); | ||
584 | |||
585 | hid_destroy_device(session->hid); | 580 | hid_destroy_device(session->hid); |
586 | session->hid = NULL; | 581 | session->hid = NULL; |
587 | } | 582 | } |
@@ -747,8 +742,6 @@ static void hidp_stop(struct hid_device *hid) | |||
747 | skb_queue_purge(&session->ctrl_transmit); | 742 | skb_queue_purge(&session->ctrl_transmit); |
748 | skb_queue_purge(&session->intr_transmit); | 743 | skb_queue_purge(&session->intr_transmit); |
749 | 744 | ||
750 | if (hid->claimed & HID_CLAIMED_INPUT) | ||
751 | hidinput_disconnect(hid); | ||
752 | hid->claimed = 0; | 745 | hid->claimed = 0; |
753 | } | 746 | } |
754 | 747 | ||
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c index 907a82e9023d..a16a2342f6bf 100644 --- a/net/bridge/br_netfilter.c +++ b/net/bridge/br_netfilter.c | |||
@@ -965,12 +965,12 @@ static struct nf_hook_ops br_nf_ops[] __read_mostly = { | |||
965 | 965 | ||
966 | #ifdef CONFIG_SYSCTL | 966 | #ifdef CONFIG_SYSCTL |
967 | static | 967 | static |
968 | int brnf_sysctl_call_tables(ctl_table * ctl, int write, struct file *filp, | 968 | int brnf_sysctl_call_tables(ctl_table * ctl, int write, |
969 | void __user * buffer, size_t * lenp, loff_t * ppos) | 969 | void __user * buffer, size_t * lenp, loff_t * ppos) |
970 | { | 970 | { |
971 | int ret; | 971 | int ret; |
972 | 972 | ||
973 | ret = proc_dointvec(ctl, write, filp, buffer, lenp, ppos); | 973 | ret = proc_dointvec(ctl, write, buffer, lenp, ppos); |
974 | 974 | ||
975 | if (write && *(int *)(ctl->data)) | 975 | if (write && *(int *)(ctl->data)) |
976 | *(int *)(ctl->data) = 1; | 976 | *(int *)(ctl->data) = 1; |
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index ad91e9e5f475..7d4c57523b09 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c | |||
@@ -493,7 +493,7 @@ void netdev_unregister_kobject(struct net_device * net) | |||
493 | int netdev_register_kobject(struct net_device *net) | 493 | int netdev_register_kobject(struct net_device *net) |
494 | { | 494 | { |
495 | struct device *dev = &(net->dev); | 495 | struct device *dev = &(net->dev); |
496 | struct attribute_group **groups = net->sysfs_groups; | 496 | const struct attribute_group **groups = net->sysfs_groups; |
497 | 497 | ||
498 | dev->class = &net_class; | 498 | dev->class = &net_class; |
499 | dev->platform_data = net; | 499 | dev->platform_data = net; |
diff --git a/net/core/sock.c b/net/core/sock.c index 30d5446512f9..524712a7b154 100644 --- a/net/core/sock.c +++ b/net/core/sock.c | |||
@@ -1206,12 +1206,12 @@ EXPORT_SYMBOL_GPL(sk_setup_caps); | |||
1206 | 1206 | ||
1207 | void __init sk_init(void) | 1207 | void __init sk_init(void) |
1208 | { | 1208 | { |
1209 | if (num_physpages <= 4096) { | 1209 | if (totalram_pages <= 4096) { |
1210 | sysctl_wmem_max = 32767; | 1210 | sysctl_wmem_max = 32767; |
1211 | sysctl_rmem_max = 32767; | 1211 | sysctl_rmem_max = 32767; |
1212 | sysctl_wmem_default = 32767; | 1212 | sysctl_wmem_default = 32767; |
1213 | sysctl_rmem_default = 32767; | 1213 | sysctl_rmem_default = 32767; |
1214 | } else if (num_physpages >= 131072) { | 1214 | } else if (totalram_pages >= 131072) { |
1215 | sysctl_wmem_max = 131071; | 1215 | sysctl_wmem_max = 131071; |
1216 | sysctl_rmem_max = 131071; | 1216 | sysctl_rmem_max = 131071; |
1217 | } | 1217 | } |
diff --git a/net/dccp/proto.c b/net/dccp/proto.c index 923db06c7e55..bc4467082a00 100644 --- a/net/dccp/proto.c +++ b/net/dccp/proto.c | |||
@@ -1049,10 +1049,10 @@ static int __init dccp_init(void) | |||
1049 | * | 1049 | * |
1050 | * The methodology is similar to that of the buffer cache. | 1050 | * The methodology is similar to that of the buffer cache. |
1051 | */ | 1051 | */ |
1052 | if (num_physpages >= (128 * 1024)) | 1052 | if (totalram_pages >= (128 * 1024)) |
1053 | goal = num_physpages >> (21 - PAGE_SHIFT); | 1053 | goal = totalram_pages >> (21 - PAGE_SHIFT); |
1054 | else | 1054 | else |
1055 | goal = num_physpages >> (23 - PAGE_SHIFT); | 1055 | goal = totalram_pages >> (23 - PAGE_SHIFT); |
1056 | 1056 | ||
1057 | if (thash_entries) | 1057 | if (thash_entries) |
1058 | goal = (thash_entries * | 1058 | goal = (thash_entries * |
diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c index 1c6a5bb6f0c8..6e1f085db06a 100644 --- a/net/decnet/dn_dev.c +++ b/net/decnet/dn_dev.c | |||
@@ -164,7 +164,7 @@ static int max_t3[] = { 8191 }; /* Must fit in 16 bits when multiplied by BCT3MU | |||
164 | static int min_priority[1]; | 164 | static int min_priority[1]; |
165 | static int max_priority[] = { 127 }; /* From DECnet spec */ | 165 | static int max_priority[] = { 127 }; /* From DECnet spec */ |
166 | 166 | ||
167 | static int dn_forwarding_proc(ctl_table *, int, struct file *, | 167 | static int dn_forwarding_proc(ctl_table *, int, |
168 | void __user *, size_t *, loff_t *); | 168 | void __user *, size_t *, loff_t *); |
169 | static int dn_forwarding_sysctl(ctl_table *table, | 169 | static int dn_forwarding_sysctl(ctl_table *table, |
170 | void __user *oldval, size_t __user *oldlenp, | 170 | void __user *oldval, size_t __user *oldlenp, |
@@ -274,7 +274,6 @@ static void dn_dev_sysctl_unregister(struct dn_dev_parms *parms) | |||
274 | } | 274 | } |
275 | 275 | ||
276 | static int dn_forwarding_proc(ctl_table *table, int write, | 276 | static int dn_forwarding_proc(ctl_table *table, int write, |
277 | struct file *filep, | ||
278 | void __user *buffer, | 277 | void __user *buffer, |
279 | size_t *lenp, loff_t *ppos) | 278 | size_t *lenp, loff_t *ppos) |
280 | { | 279 | { |
@@ -290,7 +289,7 @@ static int dn_forwarding_proc(ctl_table *table, int write, | |||
290 | dn_db = dev->dn_ptr; | 289 | dn_db = dev->dn_ptr; |
291 | old = dn_db->parms.forwarding; | 290 | old = dn_db->parms.forwarding; |
292 | 291 | ||
293 | err = proc_dointvec(table, write, filep, buffer, lenp, ppos); | 292 | err = proc_dointvec(table, write, buffer, lenp, ppos); |
294 | 293 | ||
295 | if ((err >= 0) && write) { | 294 | if ((err >= 0) && write) { |
296 | if (dn_db->parms.forwarding < 0) | 295 | if (dn_db->parms.forwarding < 0) |
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c index 9383d3e5a1ab..57662cabaf9b 100644 --- a/net/decnet/dn_route.c +++ b/net/decnet/dn_route.c | |||
@@ -1750,7 +1750,7 @@ void __init dn_route_init(void) | |||
1750 | dn_route_timer.expires = jiffies + decnet_dst_gc_interval * HZ; | 1750 | dn_route_timer.expires = jiffies + decnet_dst_gc_interval * HZ; |
1751 | add_timer(&dn_route_timer); | 1751 | add_timer(&dn_route_timer); |
1752 | 1752 | ||
1753 | goal = num_physpages >> (26 - PAGE_SHIFT); | 1753 | goal = totalram_pages >> (26 - PAGE_SHIFT); |
1754 | 1754 | ||
1755 | for(order = 0; (1UL << order) < goal; order++) | 1755 | for(order = 0; (1UL << order) < goal; order++) |
1756 | /* NOTHING */; | 1756 | /* NOTHING */; |
diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c index 5bcd592ae6dd..26b0ab1e9f56 100644 --- a/net/decnet/sysctl_net_decnet.c +++ b/net/decnet/sysctl_net_decnet.c | |||
@@ -165,7 +165,6 @@ static int dn_node_address_strategy(ctl_table *table, | |||
165 | } | 165 | } |
166 | 166 | ||
167 | static int dn_node_address_handler(ctl_table *table, int write, | 167 | static int dn_node_address_handler(ctl_table *table, int write, |
168 | struct file *filp, | ||
169 | void __user *buffer, | 168 | void __user *buffer, |
170 | size_t *lenp, loff_t *ppos) | 169 | size_t *lenp, loff_t *ppos) |
171 | { | 170 | { |
@@ -276,7 +275,6 @@ static int dn_def_dev_strategy(ctl_table *table, | |||
276 | 275 | ||
277 | 276 | ||
278 | static int dn_def_dev_handler(ctl_table *table, int write, | 277 | static int dn_def_dev_handler(ctl_table *table, int write, |
279 | struct file * filp, | ||
280 | void __user *buffer, | 278 | void __user *buffer, |
281 | size_t *lenp, loff_t *ppos) | 279 | size_t *lenp, loff_t *ppos) |
282 | { | 280 | { |
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index 07336c6201f0..e92f1fd28aa5 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c | |||
@@ -1270,10 +1270,10 @@ static void inet_forward_change(struct net *net) | |||
1270 | } | 1270 | } |
1271 | 1271 | ||
1272 | static int devinet_conf_proc(ctl_table *ctl, int write, | 1272 | static int devinet_conf_proc(ctl_table *ctl, int write, |
1273 | struct file *filp, void __user *buffer, | 1273 | void __user *buffer, |
1274 | size_t *lenp, loff_t *ppos) | 1274 | size_t *lenp, loff_t *ppos) |
1275 | { | 1275 | { |
1276 | int ret = proc_dointvec(ctl, write, filp, buffer, lenp, ppos); | 1276 | int ret = proc_dointvec(ctl, write, buffer, lenp, ppos); |
1277 | 1277 | ||
1278 | if (write) { | 1278 | if (write) { |
1279 | struct ipv4_devconf *cnf = ctl->extra1; | 1279 | struct ipv4_devconf *cnf = ctl->extra1; |
@@ -1342,12 +1342,12 @@ static int devinet_conf_sysctl(ctl_table *table, | |||
1342 | } | 1342 | } |
1343 | 1343 | ||
1344 | static int devinet_sysctl_forward(ctl_table *ctl, int write, | 1344 | static int devinet_sysctl_forward(ctl_table *ctl, int write, |
1345 | struct file *filp, void __user *buffer, | 1345 | void __user *buffer, |
1346 | size_t *lenp, loff_t *ppos) | 1346 | size_t *lenp, loff_t *ppos) |
1347 | { | 1347 | { |
1348 | int *valp = ctl->data; | 1348 | int *valp = ctl->data; |
1349 | int val = *valp; | 1349 | int val = *valp; |
1350 | int ret = proc_dointvec(ctl, write, filp, buffer, lenp, ppos); | 1350 | int ret = proc_dointvec(ctl, write, buffer, lenp, ppos); |
1351 | 1351 | ||
1352 | if (write && *valp != val) { | 1352 | if (write && *valp != val) { |
1353 | struct net *net = ctl->extra2; | 1353 | struct net *net = ctl->extra2; |
@@ -1372,12 +1372,12 @@ static int devinet_sysctl_forward(ctl_table *ctl, int write, | |||
1372 | } | 1372 | } |
1373 | 1373 | ||
1374 | int ipv4_doint_and_flush(ctl_table *ctl, int write, | 1374 | int ipv4_doint_and_flush(ctl_table *ctl, int write, |
1375 | struct file *filp, void __user *buffer, | 1375 | void __user *buffer, |
1376 | size_t *lenp, loff_t *ppos) | 1376 | size_t *lenp, loff_t *ppos) |
1377 | { | 1377 | { |
1378 | int *valp = ctl->data; | 1378 | int *valp = ctl->data; |
1379 | int val = *valp; | 1379 | int val = *valp; |
1380 | int ret = proc_dointvec(ctl, write, filp, buffer, lenp, ppos); | 1380 | int ret = proc_dointvec(ctl, write, buffer, lenp, ppos); |
1381 | struct net *net = ctl->extra2; | 1381 | struct net *net = ctl->extra2; |
1382 | 1382 | ||
1383 | if (write && *valp != val) | 1383 | if (write && *valp != val) |
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 91867d3e6328..bb4199252026 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
@@ -3036,7 +3036,7 @@ void ip_rt_multicast_event(struct in_device *in_dev) | |||
3036 | 3036 | ||
3037 | #ifdef CONFIG_SYSCTL | 3037 | #ifdef CONFIG_SYSCTL |
3038 | static int ipv4_sysctl_rtcache_flush(ctl_table *__ctl, int write, | 3038 | static int ipv4_sysctl_rtcache_flush(ctl_table *__ctl, int write, |
3039 | struct file *filp, void __user *buffer, | 3039 | void __user *buffer, |
3040 | size_t *lenp, loff_t *ppos) | 3040 | size_t *lenp, loff_t *ppos) |
3041 | { | 3041 | { |
3042 | if (write) { | 3042 | if (write) { |
@@ -3046,7 +3046,7 @@ static int ipv4_sysctl_rtcache_flush(ctl_table *__ctl, int write, | |||
3046 | 3046 | ||
3047 | memcpy(&ctl, __ctl, sizeof(ctl)); | 3047 | memcpy(&ctl, __ctl, sizeof(ctl)); |
3048 | ctl.data = &flush_delay; | 3048 | ctl.data = &flush_delay; |
3049 | proc_dointvec(&ctl, write, filp, buffer, lenp, ppos); | 3049 | proc_dointvec(&ctl, write, buffer, lenp, ppos); |
3050 | 3050 | ||
3051 | net = (struct net *)__ctl->extra1; | 3051 | net = (struct net *)__ctl->extra1; |
3052 | rt_cache_flush(net, flush_delay); | 3052 | rt_cache_flush(net, flush_delay); |
@@ -3106,12 +3106,11 @@ static void rt_secret_reschedule(int old) | |||
3106 | } | 3106 | } |
3107 | 3107 | ||
3108 | static int ipv4_sysctl_rt_secret_interval(ctl_table *ctl, int write, | 3108 | static int ipv4_sysctl_rt_secret_interval(ctl_table *ctl, int write, |
3109 | struct file *filp, | ||
3110 | void __user *buffer, size_t *lenp, | 3109 | void __user *buffer, size_t *lenp, |
3111 | loff_t *ppos) | 3110 | loff_t *ppos) |
3112 | { | 3111 | { |
3113 | int old = ip_rt_secret_interval; | 3112 | int old = ip_rt_secret_interval; |
3114 | int ret = proc_dointvec_jiffies(ctl, write, filp, buffer, lenp, ppos); | 3113 | int ret = proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos); |
3115 | 3114 | ||
3116 | rt_secret_reschedule(old); | 3115 | rt_secret_reschedule(old); |
3117 | 3116 | ||
@@ -3414,7 +3413,7 @@ int __init ip_rt_init(void) | |||
3414 | alloc_large_system_hash("IP route cache", | 3413 | alloc_large_system_hash("IP route cache", |
3415 | sizeof(struct rt_hash_bucket), | 3414 | sizeof(struct rt_hash_bucket), |
3416 | rhash_entries, | 3415 | rhash_entries, |
3417 | (num_physpages >= 128 * 1024) ? | 3416 | (totalram_pages >= 128 * 1024) ? |
3418 | 15 : 17, | 3417 | 15 : 17, |
3419 | 0, | 3418 | 0, |
3420 | &rt_hash_log, | 3419 | &rt_hash_log, |
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c index cd2b97f1b6e1..a6e0e077ac33 100644 --- a/net/ipv4/syncookies.c +++ b/net/ipv4/syncookies.c | |||
@@ -37,12 +37,13 @@ __initcall(init_syncookies); | |||
37 | #define COOKIEBITS 24 /* Upper bits store count */ | 37 | #define COOKIEBITS 24 /* Upper bits store count */ |
38 | #define COOKIEMASK (((__u32)1 << COOKIEBITS) - 1) | 38 | #define COOKIEMASK (((__u32)1 << COOKIEBITS) - 1) |
39 | 39 | ||
40 | static DEFINE_PER_CPU(__u32, cookie_scratch)[16 + 5 + SHA_WORKSPACE_WORDS]; | 40 | static DEFINE_PER_CPU(__u32 [16 + 5 + SHA_WORKSPACE_WORDS], |
41 | ipv4_cookie_scratch); | ||
41 | 42 | ||
42 | static u32 cookie_hash(__be32 saddr, __be32 daddr, __be16 sport, __be16 dport, | 43 | static u32 cookie_hash(__be32 saddr, __be32 daddr, __be16 sport, __be16 dport, |
43 | u32 count, int c) | 44 | u32 count, int c) |
44 | { | 45 | { |
45 | __u32 *tmp = __get_cpu_var(cookie_scratch); | 46 | __u32 *tmp = __get_cpu_var(ipv4_cookie_scratch); |
46 | 47 | ||
47 | memcpy(tmp + 4, syncookie_secret[c], sizeof(syncookie_secret[c])); | 48 | memcpy(tmp + 4, syncookie_secret[c], sizeof(syncookie_secret[c])); |
48 | tmp[0] = (__force u32)saddr; | 49 | tmp[0] = (__force u32)saddr; |
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index 4710d219f06a..2dcf04d9b005 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c | |||
@@ -36,7 +36,7 @@ static void set_local_port_range(int range[2]) | |||
36 | } | 36 | } |
37 | 37 | ||
38 | /* Validate changes from /proc interface. */ | 38 | /* Validate changes from /proc interface. */ |
39 | static int ipv4_local_port_range(ctl_table *table, int write, struct file *filp, | 39 | static int ipv4_local_port_range(ctl_table *table, int write, |
40 | void __user *buffer, | 40 | void __user *buffer, |
41 | size_t *lenp, loff_t *ppos) | 41 | size_t *lenp, loff_t *ppos) |
42 | { | 42 | { |
@@ -51,7 +51,7 @@ static int ipv4_local_port_range(ctl_table *table, int write, struct file *filp, | |||
51 | }; | 51 | }; |
52 | 52 | ||
53 | inet_get_local_port_range(range, range + 1); | 53 | inet_get_local_port_range(range, range + 1); |
54 | ret = proc_dointvec_minmax(&tmp, write, filp, buffer, lenp, ppos); | 54 | ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos); |
55 | 55 | ||
56 | if (write && ret == 0) { | 56 | if (write && ret == 0) { |
57 | if (range[1] < range[0]) | 57 | if (range[1] < range[0]) |
@@ -91,7 +91,7 @@ static int ipv4_sysctl_local_port_range(ctl_table *table, | |||
91 | } | 91 | } |
92 | 92 | ||
93 | 93 | ||
94 | static int proc_tcp_congestion_control(ctl_table *ctl, int write, struct file * filp, | 94 | static int proc_tcp_congestion_control(ctl_table *ctl, int write, |
95 | void __user *buffer, size_t *lenp, loff_t *ppos) | 95 | void __user *buffer, size_t *lenp, loff_t *ppos) |
96 | { | 96 | { |
97 | char val[TCP_CA_NAME_MAX]; | 97 | char val[TCP_CA_NAME_MAX]; |
@@ -103,7 +103,7 @@ static int proc_tcp_congestion_control(ctl_table *ctl, int write, struct file * | |||
103 | 103 | ||
104 | tcp_get_default_congestion_control(val); | 104 | tcp_get_default_congestion_control(val); |
105 | 105 | ||
106 | ret = proc_dostring(&tbl, write, filp, buffer, lenp, ppos); | 106 | ret = proc_dostring(&tbl, write, buffer, lenp, ppos); |
107 | if (write && ret == 0) | 107 | if (write && ret == 0) |
108 | ret = tcp_set_default_congestion_control(val); | 108 | ret = tcp_set_default_congestion_control(val); |
109 | return ret; | 109 | return ret; |
@@ -129,7 +129,7 @@ static int sysctl_tcp_congestion_control(ctl_table *table, | |||
129 | } | 129 | } |
130 | 130 | ||
131 | static int proc_tcp_available_congestion_control(ctl_table *ctl, | 131 | static int proc_tcp_available_congestion_control(ctl_table *ctl, |
132 | int write, struct file * filp, | 132 | int write, |
133 | void __user *buffer, size_t *lenp, | 133 | void __user *buffer, size_t *lenp, |
134 | loff_t *ppos) | 134 | loff_t *ppos) |
135 | { | 135 | { |
@@ -140,13 +140,13 @@ static int proc_tcp_available_congestion_control(ctl_table *ctl, | |||
140 | if (!tbl.data) | 140 | if (!tbl.data) |
141 | return -ENOMEM; | 141 | return -ENOMEM; |
142 | tcp_get_available_congestion_control(tbl.data, TCP_CA_BUF_MAX); | 142 | tcp_get_available_congestion_control(tbl.data, TCP_CA_BUF_MAX); |
143 | ret = proc_dostring(&tbl, write, filp, buffer, lenp, ppos); | 143 | ret = proc_dostring(&tbl, write, buffer, lenp, ppos); |
144 | kfree(tbl.data); | 144 | kfree(tbl.data); |
145 | return ret; | 145 | return ret; |
146 | } | 146 | } |
147 | 147 | ||
148 | static int proc_allowed_congestion_control(ctl_table *ctl, | 148 | static int proc_allowed_congestion_control(ctl_table *ctl, |
149 | int write, struct file * filp, | 149 | int write, |
150 | void __user *buffer, size_t *lenp, | 150 | void __user *buffer, size_t *lenp, |
151 | loff_t *ppos) | 151 | loff_t *ppos) |
152 | { | 152 | { |
@@ -158,7 +158,7 @@ static int proc_allowed_congestion_control(ctl_table *ctl, | |||
158 | return -ENOMEM; | 158 | return -ENOMEM; |
159 | 159 | ||
160 | tcp_get_allowed_congestion_control(tbl.data, tbl.maxlen); | 160 | tcp_get_allowed_congestion_control(tbl.data, tbl.maxlen); |
161 | ret = proc_dostring(&tbl, write, filp, buffer, lenp, ppos); | 161 | ret = proc_dostring(&tbl, write, buffer, lenp, ppos); |
162 | if (write && ret == 0) | 162 | if (write && ret == 0) |
163 | ret = tcp_set_allowed_congestion_control(tbl.data); | 163 | ret = tcp_set_allowed_congestion_control(tbl.data); |
164 | kfree(tbl.data); | 164 | kfree(tbl.data); |
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 19a0612b8a20..21387ebabf00 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -2862,7 +2862,7 @@ void __init tcp_init(void) | |||
2862 | alloc_large_system_hash("TCP established", | 2862 | alloc_large_system_hash("TCP established", |
2863 | sizeof(struct inet_ehash_bucket), | 2863 | sizeof(struct inet_ehash_bucket), |
2864 | thash_entries, | 2864 | thash_entries, |
2865 | (num_physpages >= 128 * 1024) ? | 2865 | (totalram_pages >= 128 * 1024) ? |
2866 | 13 : 15, | 2866 | 13 : 15, |
2867 | 0, | 2867 | 0, |
2868 | &tcp_hashinfo.ehash_size, | 2868 | &tcp_hashinfo.ehash_size, |
@@ -2879,7 +2879,7 @@ void __init tcp_init(void) | |||
2879 | alloc_large_system_hash("TCP bind", | 2879 | alloc_large_system_hash("TCP bind", |
2880 | sizeof(struct inet_bind_hashbucket), | 2880 | sizeof(struct inet_bind_hashbucket), |
2881 | tcp_hashinfo.ehash_size, | 2881 | tcp_hashinfo.ehash_size, |
2882 | (num_physpages >= 128 * 1024) ? | 2882 | (totalram_pages >= 128 * 1024) ? |
2883 | 13 : 15, | 2883 | 13 : 15, |
2884 | 0, | 2884 | 0, |
2885 | &tcp_hashinfo.bhash_size, | 2885 | &tcp_hashinfo.bhash_size, |
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 55f486d89c88..1fd0a3d775d2 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
@@ -3986,14 +3986,14 @@ static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp) | |||
3986 | #ifdef CONFIG_SYSCTL | 3986 | #ifdef CONFIG_SYSCTL |
3987 | 3987 | ||
3988 | static | 3988 | static |
3989 | int addrconf_sysctl_forward(ctl_table *ctl, int write, struct file * filp, | 3989 | int addrconf_sysctl_forward(ctl_table *ctl, int write, |
3990 | void __user *buffer, size_t *lenp, loff_t *ppos) | 3990 | void __user *buffer, size_t *lenp, loff_t *ppos) |
3991 | { | 3991 | { |
3992 | int *valp = ctl->data; | 3992 | int *valp = ctl->data; |
3993 | int val = *valp; | 3993 | int val = *valp; |
3994 | int ret; | 3994 | int ret; |
3995 | 3995 | ||
3996 | ret = proc_dointvec(ctl, write, filp, buffer, lenp, ppos); | 3996 | ret = proc_dointvec(ctl, write, buffer, lenp, ppos); |
3997 | 3997 | ||
3998 | if (write) | 3998 | if (write) |
3999 | ret = addrconf_fixup_forwarding(ctl, valp, val); | 3999 | ret = addrconf_fixup_forwarding(ctl, valp, val); |
@@ -4090,14 +4090,14 @@ static int addrconf_disable_ipv6(struct ctl_table *table, int *p, int old) | |||
4090 | } | 4090 | } |
4091 | 4091 | ||
4092 | static | 4092 | static |
4093 | int addrconf_sysctl_disable(ctl_table *ctl, int write, struct file * filp, | 4093 | int addrconf_sysctl_disable(ctl_table *ctl, int write, |
4094 | void __user *buffer, size_t *lenp, loff_t *ppos) | 4094 | void __user *buffer, size_t *lenp, loff_t *ppos) |
4095 | { | 4095 | { |
4096 | int *valp = ctl->data; | 4096 | int *valp = ctl->data; |
4097 | int val = *valp; | 4097 | int val = *valp; |
4098 | int ret; | 4098 | int ret; |
4099 | 4099 | ||
4100 | ret = proc_dointvec(ctl, write, filp, buffer, lenp, ppos); | 4100 | ret = proc_dointvec(ctl, write, buffer, lenp, ppos); |
4101 | 4101 | ||
4102 | if (write) | 4102 | if (write) |
4103 | ret = addrconf_disable_ipv6(ctl, valp, val); | 4103 | ret = addrconf_disable_ipv6(ctl, valp, val); |
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index 3907510c2ce3..090675e269ee 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c | |||
@@ -324,7 +324,7 @@ static int ipmr_mfc_seq_show(struct seq_file *seq, void *v) | |||
324 | return 0; | 324 | return 0; |
325 | } | 325 | } |
326 | 326 | ||
327 | static struct seq_operations ipmr_mfc_seq_ops = { | 327 | static const struct seq_operations ipmr_mfc_seq_ops = { |
328 | .start = ipmr_mfc_seq_start, | 328 | .start = ipmr_mfc_seq_start, |
329 | .next = ipmr_mfc_seq_next, | 329 | .next = ipmr_mfc_seq_next, |
330 | .stop = ipmr_mfc_seq_stop, | 330 | .stop = ipmr_mfc_seq_stop, |
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index 7015478797f6..498b9b0b0fad 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c | |||
@@ -1735,7 +1735,7 @@ static void ndisc_warn_deprecated_sysctl(struct ctl_table *ctl, | |||
1735 | } | 1735 | } |
1736 | } | 1736 | } |
1737 | 1737 | ||
1738 | int ndisc_ifinfo_sysctl_change(struct ctl_table *ctl, int write, struct file * filp, void __user *buffer, size_t *lenp, loff_t *ppos) | 1738 | int ndisc_ifinfo_sysctl_change(struct ctl_table *ctl, int write, void __user *buffer, size_t *lenp, loff_t *ppos) |
1739 | { | 1739 | { |
1740 | struct net_device *dev = ctl->extra1; | 1740 | struct net_device *dev = ctl->extra1; |
1741 | struct inet6_dev *idev; | 1741 | struct inet6_dev *idev; |
@@ -1746,16 +1746,16 @@ int ndisc_ifinfo_sysctl_change(struct ctl_table *ctl, int write, struct file * f | |||
1746 | ndisc_warn_deprecated_sysctl(ctl, "syscall", dev ? dev->name : "default"); | 1746 | ndisc_warn_deprecated_sysctl(ctl, "syscall", dev ? dev->name : "default"); |
1747 | 1747 | ||
1748 | if (strcmp(ctl->procname, "retrans_time") == 0) | 1748 | if (strcmp(ctl->procname, "retrans_time") == 0) |
1749 | ret = proc_dointvec(ctl, write, filp, buffer, lenp, ppos); | 1749 | ret = proc_dointvec(ctl, write, buffer, lenp, ppos); |
1750 | 1750 | ||
1751 | else if (strcmp(ctl->procname, "base_reachable_time") == 0) | 1751 | else if (strcmp(ctl->procname, "base_reachable_time") == 0) |
1752 | ret = proc_dointvec_jiffies(ctl, write, | 1752 | ret = proc_dointvec_jiffies(ctl, write, |
1753 | filp, buffer, lenp, ppos); | 1753 | buffer, lenp, ppos); |
1754 | 1754 | ||
1755 | else if ((strcmp(ctl->procname, "retrans_time_ms") == 0) || | 1755 | else if ((strcmp(ctl->procname, "retrans_time_ms") == 0) || |
1756 | (strcmp(ctl->procname, "base_reachable_time_ms") == 0)) | 1756 | (strcmp(ctl->procname, "base_reachable_time_ms") == 0)) |
1757 | ret = proc_dointvec_ms_jiffies(ctl, write, | 1757 | ret = proc_dointvec_ms_jiffies(ctl, write, |
1758 | filp, buffer, lenp, ppos); | 1758 | buffer, lenp, ppos); |
1759 | else | 1759 | else |
1760 | ret = -1; | 1760 | ret = -1; |
1761 | 1761 | ||
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 77aecbe8ff6c..d6fe7646a8ff 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
@@ -2524,13 +2524,13 @@ static const struct file_operations rt6_stats_seq_fops = { | |||
2524 | #ifdef CONFIG_SYSCTL | 2524 | #ifdef CONFIG_SYSCTL |
2525 | 2525 | ||
2526 | static | 2526 | static |
2527 | int ipv6_sysctl_rtcache_flush(ctl_table *ctl, int write, struct file * filp, | 2527 | int ipv6_sysctl_rtcache_flush(ctl_table *ctl, int write, |
2528 | void __user *buffer, size_t *lenp, loff_t *ppos) | 2528 | void __user *buffer, size_t *lenp, loff_t *ppos) |
2529 | { | 2529 | { |
2530 | struct net *net = current->nsproxy->net_ns; | 2530 | struct net *net = current->nsproxy->net_ns; |
2531 | int delay = net->ipv6.sysctl.flush_delay; | 2531 | int delay = net->ipv6.sysctl.flush_delay; |
2532 | if (write) { | 2532 | if (write) { |
2533 | proc_dointvec(ctl, write, filp, buffer, lenp, ppos); | 2533 | proc_dointvec(ctl, write, buffer, lenp, ppos); |
2534 | fib6_run_gc(delay <= 0 ? ~0UL : (unsigned long)delay, net); | 2534 | fib6_run_gc(delay <= 0 ? ~0UL : (unsigned long)delay, net); |
2535 | return 0; | 2535 | return 0; |
2536 | } else | 2536 | } else |
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c index 8c2513982b61..6b6ae913b5d4 100644 --- a/net/ipv6/syncookies.c +++ b/net/ipv6/syncookies.c | |||
@@ -74,12 +74,13 @@ static inline struct sock *get_cookie_sock(struct sock *sk, struct sk_buff *skb, | |||
74 | return child; | 74 | return child; |
75 | } | 75 | } |
76 | 76 | ||
77 | static DEFINE_PER_CPU(__u32, cookie_scratch)[16 + 5 + SHA_WORKSPACE_WORDS]; | 77 | static DEFINE_PER_CPU(__u32 [16 + 5 + SHA_WORKSPACE_WORDS], |
78 | ipv6_cookie_scratch); | ||
78 | 79 | ||
79 | static u32 cookie_hash(struct in6_addr *saddr, struct in6_addr *daddr, | 80 | static u32 cookie_hash(struct in6_addr *saddr, struct in6_addr *daddr, |
80 | __be16 sport, __be16 dport, u32 count, int c) | 81 | __be16 sport, __be16 dport, u32 count, int c) |
81 | { | 82 | { |
82 | __u32 *tmp = __get_cpu_var(cookie_scratch); | 83 | __u32 *tmp = __get_cpu_var(ipv6_cookie_scratch); |
83 | 84 | ||
84 | /* | 85 | /* |
85 | * we have 320 bits of information to hash, copy in the remaining | 86 | * we have 320 bits of information to hash, copy in the remaining |
diff --git a/net/irda/irsysctl.c b/net/irda/irsysctl.c index 57f8817c3979..5c86567e5a78 100644 --- a/net/irda/irsysctl.c +++ b/net/irda/irsysctl.c | |||
@@ -73,12 +73,12 @@ static int min_lap_keepalive_time = 100; /* 100us */ | |||
73 | /* For other sysctl, I've no idea of the range. Maybe Dag could help | 73 | /* For other sysctl, I've no idea of the range. Maybe Dag could help |
74 | * us on that - Jean II */ | 74 | * us on that - Jean II */ |
75 | 75 | ||
76 | static int do_devname(ctl_table *table, int write, struct file *filp, | 76 | static int do_devname(ctl_table *table, int write, |
77 | void __user *buffer, size_t *lenp, loff_t *ppos) | 77 | void __user *buffer, size_t *lenp, loff_t *ppos) |
78 | { | 78 | { |
79 | int ret; | 79 | int ret; |
80 | 80 | ||
81 | ret = proc_dostring(table, write, filp, buffer, lenp, ppos); | 81 | ret = proc_dostring(table, write, buffer, lenp, ppos); |
82 | if (ret == 0 && write) { | 82 | if (ret == 0 && write) { |
83 | struct ias_value *val; | 83 | struct ias_value *val; |
84 | 84 | ||
@@ -90,12 +90,12 @@ static int do_devname(ctl_table *table, int write, struct file *filp, | |||
90 | } | 90 | } |
91 | 91 | ||
92 | 92 | ||
93 | static int do_discovery(ctl_table *table, int write, struct file *filp, | 93 | static int do_discovery(ctl_table *table, int write, |
94 | void __user *buffer, size_t *lenp, loff_t *ppos) | 94 | void __user *buffer, size_t *lenp, loff_t *ppos) |
95 | { | 95 | { |
96 | int ret; | 96 | int ret; |
97 | 97 | ||
98 | ret = proc_dointvec(table, write, filp, buffer, lenp, ppos); | 98 | ret = proc_dointvec(table, write, buffer, lenp, ppos); |
99 | if (ret) | 99 | if (ret) |
100 | return ret; | 100 | return ret; |
101 | 101 | ||
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index fba2892b99e1..446e9bd4b4bc 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c | |||
@@ -1496,14 +1496,14 @@ static int ip_vs_zero_all(void) | |||
1496 | 1496 | ||
1497 | 1497 | ||
1498 | static int | 1498 | static int |
1499 | proc_do_defense_mode(ctl_table *table, int write, struct file * filp, | 1499 | proc_do_defense_mode(ctl_table *table, int write, |
1500 | void __user *buffer, size_t *lenp, loff_t *ppos) | 1500 | void __user *buffer, size_t *lenp, loff_t *ppos) |
1501 | { | 1501 | { |
1502 | int *valp = table->data; | 1502 | int *valp = table->data; |
1503 | int val = *valp; | 1503 | int val = *valp; |
1504 | int rc; | 1504 | int rc; |
1505 | 1505 | ||
1506 | rc = proc_dointvec(table, write, filp, buffer, lenp, ppos); | 1506 | rc = proc_dointvec(table, write, buffer, lenp, ppos); |
1507 | if (write && (*valp != val)) { | 1507 | if (write && (*valp != val)) { |
1508 | if ((*valp < 0) || (*valp > 3)) { | 1508 | if ((*valp < 0) || (*valp > 3)) { |
1509 | /* Restore the correct value */ | 1509 | /* Restore the correct value */ |
@@ -1517,7 +1517,7 @@ proc_do_defense_mode(ctl_table *table, int write, struct file * filp, | |||
1517 | 1517 | ||
1518 | 1518 | ||
1519 | static int | 1519 | static int |
1520 | proc_do_sync_threshold(ctl_table *table, int write, struct file *filp, | 1520 | proc_do_sync_threshold(ctl_table *table, int write, |
1521 | void __user *buffer, size_t *lenp, loff_t *ppos) | 1521 | void __user *buffer, size_t *lenp, loff_t *ppos) |
1522 | { | 1522 | { |
1523 | int *valp = table->data; | 1523 | int *valp = table->data; |
@@ -1527,7 +1527,7 @@ proc_do_sync_threshold(ctl_table *table, int write, struct file *filp, | |||
1527 | /* backup the value first */ | 1527 | /* backup the value first */ |
1528 | memcpy(val, valp, sizeof(val)); | 1528 | memcpy(val, valp, sizeof(val)); |
1529 | 1529 | ||
1530 | rc = proc_dointvec(table, write, filp, buffer, lenp, ppos); | 1530 | rc = proc_dointvec(table, write, buffer, lenp, ppos); |
1531 | if (write && (valp[0] < 0 || valp[1] < 0 || valp[0] >= valp[1])) { | 1531 | if (write && (valp[0] < 0 || valp[1] < 0 || valp[0] >= valp[1])) { |
1532 | /* Restore the correct value */ | 1532 | /* Restore the correct value */ |
1533 | memcpy(valp, val, sizeof(val)); | 1533 | memcpy(valp, val, sizeof(val)); |
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index b37109817a98..7c9ec3dee96e 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c | |||
@@ -1245,9 +1245,9 @@ static int nf_conntrack_init_init_net(void) | |||
1245 | * machine has 512 buckets. >= 1GB machines have 16384 buckets. */ | 1245 | * machine has 512 buckets. >= 1GB machines have 16384 buckets. */ |
1246 | if (!nf_conntrack_htable_size) { | 1246 | if (!nf_conntrack_htable_size) { |
1247 | nf_conntrack_htable_size | 1247 | nf_conntrack_htable_size |
1248 | = (((num_physpages << PAGE_SHIFT) / 16384) | 1248 | = (((totalram_pages << PAGE_SHIFT) / 16384) |
1249 | / sizeof(struct hlist_head)); | 1249 | / sizeof(struct hlist_head)); |
1250 | if (num_physpages > (1024 * 1024 * 1024 / PAGE_SIZE)) | 1250 | if (totalram_pages > (1024 * 1024 * 1024 / PAGE_SIZE)) |
1251 | nf_conntrack_htable_size = 16384; | 1251 | nf_conntrack_htable_size = 16384; |
1252 | if (nf_conntrack_htable_size < 32) | 1252 | if (nf_conntrack_htable_size < 32) |
1253 | nf_conntrack_htable_size = 32; | 1253 | nf_conntrack_htable_size = 32; |
diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c index 4e620305f28c..c93494fef8ef 100644 --- a/net/netfilter/nf_log.c +++ b/net/netfilter/nf_log.c | |||
@@ -226,7 +226,7 @@ static char nf_log_sysctl_fnames[NFPROTO_NUMPROTO-NFPROTO_UNSPEC][3]; | |||
226 | static struct ctl_table nf_log_sysctl_table[NFPROTO_NUMPROTO+1]; | 226 | static struct ctl_table nf_log_sysctl_table[NFPROTO_NUMPROTO+1]; |
227 | static struct ctl_table_header *nf_log_dir_header; | 227 | static struct ctl_table_header *nf_log_dir_header; |
228 | 228 | ||
229 | static int nf_log_proc_dostring(ctl_table *table, int write, struct file *filp, | 229 | static int nf_log_proc_dostring(ctl_table *table, int write, |
230 | void __user *buffer, size_t *lenp, loff_t *ppos) | 230 | void __user *buffer, size_t *lenp, loff_t *ppos) |
231 | { | 231 | { |
232 | const struct nf_logger *logger; | 232 | const struct nf_logger *logger; |
@@ -260,7 +260,7 @@ static int nf_log_proc_dostring(ctl_table *table, int write, struct file *filp, | |||
260 | table->data = "NONE"; | 260 | table->data = "NONE"; |
261 | else | 261 | else |
262 | table->data = logger->name; | 262 | table->data = logger->name; |
263 | r = proc_dostring(table, write, filp, buffer, lenp, ppos); | 263 | r = proc_dostring(table, write, buffer, lenp, ppos); |
264 | mutex_unlock(&nf_log_mutex); | 264 | mutex_unlock(&nf_log_mutex); |
265 | } | 265 | } |
266 | 266 | ||
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c index a6ac83a93348..f01955cce314 100644 --- a/net/netfilter/x_tables.c +++ b/net/netfilter/x_tables.c | |||
@@ -617,7 +617,7 @@ struct xt_table_info *xt_alloc_table_info(unsigned int size) | |||
617 | int cpu; | 617 | int cpu; |
618 | 618 | ||
619 | /* Pedantry: prevent them from hitting BUG() in vmalloc.c --RR */ | 619 | /* Pedantry: prevent them from hitting BUG() in vmalloc.c --RR */ |
620 | if ((SMP_ALIGN(size) >> PAGE_SHIFT) + 2 > num_physpages) | 620 | if ((SMP_ALIGN(size) >> PAGE_SHIFT) + 2 > totalram_pages) |
621 | return NULL; | 621 | return NULL; |
622 | 622 | ||
623 | newinfo = kzalloc(XT_TABLE_INFO_SZ, GFP_KERNEL); | 623 | newinfo = kzalloc(XT_TABLE_INFO_SZ, GFP_KERNEL); |
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c index 219dcdbe388c..dd16e404424f 100644 --- a/net/netfilter/xt_hashlimit.c +++ b/net/netfilter/xt_hashlimit.c | |||
@@ -194,9 +194,9 @@ static int htable_create_v0(struct xt_hashlimit_info *minfo, u_int8_t family) | |||
194 | if (minfo->cfg.size) | 194 | if (minfo->cfg.size) |
195 | size = minfo->cfg.size; | 195 | size = minfo->cfg.size; |
196 | else { | 196 | else { |
197 | size = ((num_physpages << PAGE_SHIFT) / 16384) / | 197 | size = ((totalram_pages << PAGE_SHIFT) / 16384) / |
198 | sizeof(struct list_head); | 198 | sizeof(struct list_head); |
199 | if (num_physpages > (1024 * 1024 * 1024 / PAGE_SIZE)) | 199 | if (totalram_pages > (1024 * 1024 * 1024 / PAGE_SIZE)) |
200 | size = 8192; | 200 | size = 8192; |
201 | if (size < 16) | 201 | if (size < 16) |
202 | size = 16; | 202 | size = 16; |
@@ -266,9 +266,9 @@ static int htable_create(struct xt_hashlimit_mtinfo1 *minfo, u_int8_t family) | |||
266 | if (minfo->cfg.size) { | 266 | if (minfo->cfg.size) { |
267 | size = minfo->cfg.size; | 267 | size = minfo->cfg.size; |
268 | } else { | 268 | } else { |
269 | size = (num_physpages << PAGE_SHIFT) / 16384 / | 269 | size = (totalram_pages << PAGE_SHIFT) / 16384 / |
270 | sizeof(struct list_head); | 270 | sizeof(struct list_head); |
271 | if (num_physpages > 1024 * 1024 * 1024 / PAGE_SIZE) | 271 | if (totalram_pages > 1024 * 1024 * 1024 / PAGE_SIZE) |
272 | size = 8192; | 272 | size = 8192; |
273 | if (size < 16) | 273 | if (size < 16) |
274 | size = 16; | 274 | size = 16; |
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index c5aab6a368ce..55180b99562a 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c | |||
@@ -2091,10 +2091,10 @@ static int __init netlink_proto_init(void) | |||
2091 | if (!nl_table) | 2091 | if (!nl_table) |
2092 | goto panic; | 2092 | goto panic; |
2093 | 2093 | ||
2094 | if (num_physpages >= (128 * 1024)) | 2094 | if (totalram_pages >= (128 * 1024)) |
2095 | limit = num_physpages >> (21 - PAGE_SHIFT); | 2095 | limit = totalram_pages >> (21 - PAGE_SHIFT); |
2096 | else | 2096 | else |
2097 | limit = num_physpages >> (23 - PAGE_SHIFT); | 2097 | limit = totalram_pages >> (23 - PAGE_SHIFT); |
2098 | 2098 | ||
2099 | order = get_bitmask_order(limit) - 1 + PAGE_SHIFT; | 2099 | order = get_bitmask_order(limit) - 1 + PAGE_SHIFT; |
2100 | limit = (1UL << order) / sizeof(struct hlist_head); | 2100 | limit = (1UL << order) / sizeof(struct hlist_head); |
diff --git a/net/phonet/sysctl.c b/net/phonet/sysctl.c index 7b5749ee2765..2220f3322326 100644 --- a/net/phonet/sysctl.c +++ b/net/phonet/sysctl.c | |||
@@ -56,7 +56,7 @@ void phonet_get_local_port_range(int *min, int *max) | |||
56 | } while (read_seqretry(&local_port_range_lock, seq)); | 56 | } while (read_seqretry(&local_port_range_lock, seq)); |
57 | } | 57 | } |
58 | 58 | ||
59 | static int proc_local_port_range(ctl_table *table, int write, struct file *filp, | 59 | static int proc_local_port_range(ctl_table *table, int write, |
60 | void __user *buffer, | 60 | void __user *buffer, |
61 | size_t *lenp, loff_t *ppos) | 61 | size_t *lenp, loff_t *ppos) |
62 | { | 62 | { |
@@ -70,7 +70,7 @@ static int proc_local_port_range(ctl_table *table, int write, struct file *filp, | |||
70 | .extra2 = &local_port_range_max, | 70 | .extra2 = &local_port_range_max, |
71 | }; | 71 | }; |
72 | 72 | ||
73 | ret = proc_dointvec_minmax(&tmp, write, filp, buffer, lenp, ppos); | 73 | ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos); |
74 | 74 | ||
75 | if (write && ret == 0) { | 75 | if (write && ret == 0) { |
76 | if (range[1] < range[0]) | 76 | if (range[1] < range[0]) |
diff --git a/net/rds/ib_stats.c b/net/rds/ib_stats.c index 8d8488306fe4..d2c904dd6fbc 100644 --- a/net/rds/ib_stats.c +++ b/net/rds/ib_stats.c | |||
@@ -37,7 +37,7 @@ | |||
37 | #include "rds.h" | 37 | #include "rds.h" |
38 | #include "ib.h" | 38 | #include "ib.h" |
39 | 39 | ||
40 | DEFINE_PER_CPU(struct rds_ib_statistics, rds_ib_stats) ____cacheline_aligned; | 40 | DEFINE_PER_CPU_SHARED_ALIGNED(struct rds_ib_statistics, rds_ib_stats); |
41 | 41 | ||
42 | static const char *const rds_ib_stat_names[] = { | 42 | static const char *const rds_ib_stat_names[] = { |
43 | "ib_connect_raced", | 43 | "ib_connect_raced", |
diff --git a/net/rds/iw_stats.c b/net/rds/iw_stats.c index d33ea790484e..5fe67f6a1d80 100644 --- a/net/rds/iw_stats.c +++ b/net/rds/iw_stats.c | |||
@@ -37,7 +37,7 @@ | |||
37 | #include "rds.h" | 37 | #include "rds.h" |
38 | #include "iw.h" | 38 | #include "iw.h" |
39 | 39 | ||
40 | DEFINE_PER_CPU(struct rds_iw_statistics, rds_iw_stats) ____cacheline_aligned; | 40 | DEFINE_PER_CPU_SHARED_ALIGNED(struct rds_iw_statistics, rds_iw_stats); |
41 | 41 | ||
42 | static const char *const rds_iw_stat_names[] = { | 42 | static const char *const rds_iw_stat_names[] = { |
43 | "iw_connect_raced", | 43 | "iw_connect_raced", |
diff --git a/net/rds/page.c b/net/rds/page.c index 55c21efdb62e..36790122dfd4 100644 --- a/net/rds/page.c +++ b/net/rds/page.c | |||
@@ -39,7 +39,7 @@ struct rds_page_remainder { | |||
39 | unsigned long r_offset; | 39 | unsigned long r_offset; |
40 | }; | 40 | }; |
41 | 41 | ||
42 | DEFINE_PER_CPU(struct rds_page_remainder, rds_page_remainders) ____cacheline_aligned; | 42 | DEFINE_PER_CPU_SHARED_ALIGNED(struct rds_page_remainder, rds_page_remainders); |
43 | 43 | ||
44 | /* | 44 | /* |
45 | * returns 0 on success or -errno on failure. | 45 | * returns 0 on success or -errno on failure. |
diff --git a/net/rxrpc/ar-call.c b/net/rxrpc/ar-call.c index d9231245a79a..bc0019f704fe 100644 --- a/net/rxrpc/ar-call.c +++ b/net/rxrpc/ar-call.c | |||
@@ -96,7 +96,7 @@ static struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp) | |||
96 | } | 96 | } |
97 | 97 | ||
98 | /* | 98 | /* |
99 | * allocate a new client call and attempt to to get a connection slot for it | 99 | * allocate a new client call and attempt to get a connection slot for it |
100 | */ | 100 | */ |
101 | static struct rxrpc_call *rxrpc_alloc_client_call( | 101 | static struct rxrpc_call *rxrpc_alloc_client_call( |
102 | struct rxrpc_sock *rx, | 102 | struct rxrpc_sock *rx, |
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c index 375d64cb1a3d..2c5c76be18f8 100644 --- a/net/sched/sch_hfsc.c +++ b/net/sched/sch_hfsc.c | |||
@@ -77,7 +77,7 @@ | |||
77 | * The service curve parameters are converted to the internal | 77 | * The service curve parameters are converted to the internal |
78 | * representation. The slope values are scaled to avoid overflow. | 78 | * representation. The slope values are scaled to avoid overflow. |
79 | * the inverse slope values as well as the y-projection of the 1st | 79 | * the inverse slope values as well as the y-projection of the 1st |
80 | * segment are kept in order to to avoid 64-bit divide operations | 80 | * segment are kept in order to avoid 64-bit divide operations |
81 | * that are expensive on 32-bit architectures. | 81 | * that are expensive on 32-bit architectures. |
82 | */ | 82 | */ |
83 | 83 | ||
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index c557f1fb1c66..612dc878e05c 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c | |||
@@ -1184,10 +1184,10 @@ SCTP_STATIC __init int sctp_init(void) | |||
1184 | /* Size and allocate the association hash table. | 1184 | /* Size and allocate the association hash table. |
1185 | * The methodology is similar to that of the tcp hash tables. | 1185 | * The methodology is similar to that of the tcp hash tables. |
1186 | */ | 1186 | */ |
1187 | if (num_physpages >= (128 * 1024)) | 1187 | if (totalram_pages >= (128 * 1024)) |
1188 | goal = num_physpages >> (22 - PAGE_SHIFT); | 1188 | goal = totalram_pages >> (22 - PAGE_SHIFT); |
1189 | else | 1189 | else |
1190 | goal = num_physpages >> (24 - PAGE_SHIFT); | 1190 | goal = totalram_pages >> (24 - PAGE_SHIFT); |
1191 | 1191 | ||
1192 | for (order = 0; (1UL << order) < goal; order++) | 1192 | for (order = 0; (1UL << order) < goal; order++) |
1193 | ; | 1193 | ; |
diff --git a/net/socket.c b/net/socket.c index 2a022c00d85c..49917a1cac7d 100644 --- a/net/socket.c +++ b/net/socket.c | |||
@@ -86,6 +86,7 @@ | |||
86 | #include <linux/audit.h> | 86 | #include <linux/audit.h> |
87 | #include <linux/wireless.h> | 87 | #include <linux/wireless.h> |
88 | #include <linux/nsproxy.h> | 88 | #include <linux/nsproxy.h> |
89 | #include <linux/magic.h> | ||
89 | 90 | ||
90 | #include <asm/uaccess.h> | 91 | #include <asm/uaccess.h> |
91 | #include <asm/unistd.h> | 92 | #include <asm/unistd.h> |
@@ -235,8 +236,6 @@ int move_addr_to_user(struct sockaddr *kaddr, int klen, void __user *uaddr, | |||
235 | return __put_user(klen, ulen); | 236 | return __put_user(klen, ulen); |
236 | } | 237 | } |
237 | 238 | ||
238 | #define SOCKFS_MAGIC 0x534F434B | ||
239 | |||
240 | static struct kmem_cache *sock_inode_cachep __read_mostly; | 239 | static struct kmem_cache *sock_inode_cachep __read_mostly; |
241 | 240 | ||
242 | static struct inode *sock_alloc_inode(struct super_block *sb) | 241 | static struct inode *sock_alloc_inode(struct super_block *sb) |
@@ -285,7 +284,7 @@ static int init_inodecache(void) | |||
285 | return 0; | 284 | return 0; |
286 | } | 285 | } |
287 | 286 | ||
288 | static struct super_operations sockfs_ops = { | 287 | static const struct super_operations sockfs_ops = { |
289 | .alloc_inode = sock_alloc_inode, | 288 | .alloc_inode = sock_alloc_inode, |
290 | .destroy_inode =sock_destroy_inode, | 289 | .destroy_inode =sock_destroy_inode, |
291 | .statfs = simple_statfs, | 290 | .statfs = simple_statfs, |
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c index 0c431c277af5..54a4e042f104 100644 --- a/net/sunrpc/auth.c +++ b/net/sunrpc/auth.c | |||
@@ -385,7 +385,7 @@ rpcauth_init_cred(struct rpc_cred *cred, const struct auth_cred *acred, | |||
385 | EXPORT_SYMBOL_GPL(rpcauth_init_cred); | 385 | EXPORT_SYMBOL_GPL(rpcauth_init_cred); |
386 | 386 | ||
387 | void | 387 | void |
388 | rpcauth_generic_bind_cred(struct rpc_task *task, struct rpc_cred *cred) | 388 | rpcauth_generic_bind_cred(struct rpc_task *task, struct rpc_cred *cred, int lookupflags) |
389 | { | 389 | { |
390 | task->tk_msg.rpc_cred = get_rpccred(cred); | 390 | task->tk_msg.rpc_cred = get_rpccred(cred); |
391 | dprintk("RPC: %5u holding %s cred %p\n", task->tk_pid, | 391 | dprintk("RPC: %5u holding %s cred %p\n", task->tk_pid, |
@@ -394,7 +394,7 @@ rpcauth_generic_bind_cred(struct rpc_task *task, struct rpc_cred *cred) | |||
394 | EXPORT_SYMBOL_GPL(rpcauth_generic_bind_cred); | 394 | EXPORT_SYMBOL_GPL(rpcauth_generic_bind_cred); |
395 | 395 | ||
396 | static void | 396 | static void |
397 | rpcauth_bind_root_cred(struct rpc_task *task) | 397 | rpcauth_bind_root_cred(struct rpc_task *task, int lookupflags) |
398 | { | 398 | { |
399 | struct rpc_auth *auth = task->tk_client->cl_auth; | 399 | struct rpc_auth *auth = task->tk_client->cl_auth; |
400 | struct auth_cred acred = { | 400 | struct auth_cred acred = { |
@@ -405,7 +405,7 @@ rpcauth_bind_root_cred(struct rpc_task *task) | |||
405 | 405 | ||
406 | dprintk("RPC: %5u looking up %s cred\n", | 406 | dprintk("RPC: %5u looking up %s cred\n", |
407 | task->tk_pid, task->tk_client->cl_auth->au_ops->au_name); | 407 | task->tk_pid, task->tk_client->cl_auth->au_ops->au_name); |
408 | ret = auth->au_ops->lookup_cred(auth, &acred, 0); | 408 | ret = auth->au_ops->lookup_cred(auth, &acred, lookupflags); |
409 | if (!IS_ERR(ret)) | 409 | if (!IS_ERR(ret)) |
410 | task->tk_msg.rpc_cred = ret; | 410 | task->tk_msg.rpc_cred = ret; |
411 | else | 411 | else |
@@ -413,14 +413,14 @@ rpcauth_bind_root_cred(struct rpc_task *task) | |||
413 | } | 413 | } |
414 | 414 | ||
415 | static void | 415 | static void |
416 | rpcauth_bind_new_cred(struct rpc_task *task) | 416 | rpcauth_bind_new_cred(struct rpc_task *task, int lookupflags) |
417 | { | 417 | { |
418 | struct rpc_auth *auth = task->tk_client->cl_auth; | 418 | struct rpc_auth *auth = task->tk_client->cl_auth; |
419 | struct rpc_cred *ret; | 419 | struct rpc_cred *ret; |
420 | 420 | ||
421 | dprintk("RPC: %5u looking up %s cred\n", | 421 | dprintk("RPC: %5u looking up %s cred\n", |
422 | task->tk_pid, auth->au_ops->au_name); | 422 | task->tk_pid, auth->au_ops->au_name); |
423 | ret = rpcauth_lookupcred(auth, 0); | 423 | ret = rpcauth_lookupcred(auth, lookupflags); |
424 | if (!IS_ERR(ret)) | 424 | if (!IS_ERR(ret)) |
425 | task->tk_msg.rpc_cred = ret; | 425 | task->tk_msg.rpc_cred = ret; |
426 | else | 426 | else |
@@ -430,12 +430,16 @@ rpcauth_bind_new_cred(struct rpc_task *task) | |||
430 | void | 430 | void |
431 | rpcauth_bindcred(struct rpc_task *task, struct rpc_cred *cred, int flags) | 431 | rpcauth_bindcred(struct rpc_task *task, struct rpc_cred *cred, int flags) |
432 | { | 432 | { |
433 | int lookupflags = 0; | ||
434 | |||
435 | if (flags & RPC_TASK_ASYNC) | ||
436 | lookupflags |= RPCAUTH_LOOKUP_NEW; | ||
433 | if (cred != NULL) | 437 | if (cred != NULL) |
434 | cred->cr_ops->crbind(task, cred); | 438 | cred->cr_ops->crbind(task, cred, lookupflags); |
435 | else if (flags & RPC_TASK_ROOTCREDS) | 439 | else if (flags & RPC_TASK_ROOTCREDS) |
436 | rpcauth_bind_root_cred(task); | 440 | rpcauth_bind_root_cred(task, lookupflags); |
437 | else | 441 | else |
438 | rpcauth_bind_new_cred(task); | 442 | rpcauth_bind_new_cred(task, lookupflags); |
439 | } | 443 | } |
440 | 444 | ||
441 | void | 445 | void |
diff --git a/net/sunrpc/auth_generic.c b/net/sunrpc/auth_generic.c index 4028502f0528..bf88bf8e9365 100644 --- a/net/sunrpc/auth_generic.c +++ b/net/sunrpc/auth_generic.c | |||
@@ -55,13 +55,13 @@ struct rpc_cred *rpc_lookup_machine_cred(void) | |||
55 | EXPORT_SYMBOL_GPL(rpc_lookup_machine_cred); | 55 | EXPORT_SYMBOL_GPL(rpc_lookup_machine_cred); |
56 | 56 | ||
57 | static void | 57 | static void |
58 | generic_bind_cred(struct rpc_task *task, struct rpc_cred *cred) | 58 | generic_bind_cred(struct rpc_task *task, struct rpc_cred *cred, int lookupflags) |
59 | { | 59 | { |
60 | struct rpc_auth *auth = task->tk_client->cl_auth; | 60 | struct rpc_auth *auth = task->tk_client->cl_auth; |
61 | struct auth_cred *acred = &container_of(cred, struct generic_cred, gc_base)->acred; | 61 | struct auth_cred *acred = &container_of(cred, struct generic_cred, gc_base)->acred; |
62 | struct rpc_cred *ret; | 62 | struct rpc_cred *ret; |
63 | 63 | ||
64 | ret = auth->au_ops->lookup_cred(auth, acred, 0); | 64 | ret = auth->au_ops->lookup_cred(auth, acred, lookupflags); |
65 | if (!IS_ERR(ret)) | 65 | if (!IS_ERR(ret)) |
66 | task->tk_msg.rpc_cred = ret; | 66 | task->tk_msg.rpc_cred = ret; |
67 | else | 67 | else |
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c index 2e6a148d277c..f6c51e562a02 100644 --- a/net/sunrpc/auth_gss/svcauth_gss.c +++ b/net/sunrpc/auth_gss/svcauth_gss.c | |||
@@ -1374,8 +1374,10 @@ svcauth_gss_release(struct svc_rqst *rqstp) | |||
1374 | if (stat) | 1374 | if (stat) |
1375 | goto out_err; | 1375 | goto out_err; |
1376 | break; | 1376 | break; |
1377 | default: | 1377 | /* |
1378 | goto out_err; | 1378 | * For any other gc_svc value, svcauth_gss_accept() already set |
1379 | * the auth_error appropriately; just fall through: | ||
1380 | */ | ||
1379 | } | 1381 | } |
1380 | 1382 | ||
1381 | out: | 1383 | out: |
diff --git a/net/sunrpc/auth_null.c b/net/sunrpc/auth_null.c index c70dd7f5258e..1db618f56ecb 100644 --- a/net/sunrpc/auth_null.c +++ b/net/sunrpc/auth_null.c | |||
@@ -8,7 +8,6 @@ | |||
8 | 8 | ||
9 | #include <linux/types.h> | 9 | #include <linux/types.h> |
10 | #include <linux/module.h> | 10 | #include <linux/module.h> |
11 | #include <linux/utsname.h> | ||
12 | #include <linux/sunrpc/clnt.h> | 11 | #include <linux/sunrpc/clnt.h> |
13 | 12 | ||
14 | #ifdef RPC_DEBUG | 13 | #ifdef RPC_DEBUG |
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c index 45cdaff9b361..d6eee291a0e2 100644 --- a/net/sunrpc/cache.c +++ b/net/sunrpc/cache.c | |||
@@ -103,23 +103,21 @@ struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail, | |||
103 | EXPORT_SYMBOL_GPL(sunrpc_cache_lookup); | 103 | EXPORT_SYMBOL_GPL(sunrpc_cache_lookup); |
104 | 104 | ||
105 | 105 | ||
106 | static void queue_loose(struct cache_detail *detail, struct cache_head *ch); | 106 | static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch); |
107 | 107 | ||
108 | static int cache_fresh_locked(struct cache_head *head, time_t expiry) | 108 | static void cache_fresh_locked(struct cache_head *head, time_t expiry) |
109 | { | 109 | { |
110 | head->expiry_time = expiry; | 110 | head->expiry_time = expiry; |
111 | head->last_refresh = get_seconds(); | 111 | head->last_refresh = get_seconds(); |
112 | return !test_and_set_bit(CACHE_VALID, &head->flags); | 112 | set_bit(CACHE_VALID, &head->flags); |
113 | } | 113 | } |
114 | 114 | ||
115 | static void cache_fresh_unlocked(struct cache_head *head, | 115 | static void cache_fresh_unlocked(struct cache_head *head, |
116 | struct cache_detail *detail, int new) | 116 | struct cache_detail *detail) |
117 | { | 117 | { |
118 | if (new) | ||
119 | cache_revisit_request(head); | ||
120 | if (test_and_clear_bit(CACHE_PENDING, &head->flags)) { | 118 | if (test_and_clear_bit(CACHE_PENDING, &head->flags)) { |
121 | cache_revisit_request(head); | 119 | cache_revisit_request(head); |
122 | queue_loose(detail, head); | 120 | cache_dequeue(detail, head); |
123 | } | 121 | } |
124 | } | 122 | } |
125 | 123 | ||
@@ -132,7 +130,6 @@ struct cache_head *sunrpc_cache_update(struct cache_detail *detail, | |||
132 | */ | 130 | */ |
133 | struct cache_head **head; | 131 | struct cache_head **head; |
134 | struct cache_head *tmp; | 132 | struct cache_head *tmp; |
135 | int is_new; | ||
136 | 133 | ||
137 | if (!test_bit(CACHE_VALID, &old->flags)) { | 134 | if (!test_bit(CACHE_VALID, &old->flags)) { |
138 | write_lock(&detail->hash_lock); | 135 | write_lock(&detail->hash_lock); |
@@ -141,9 +138,9 @@ struct cache_head *sunrpc_cache_update(struct cache_detail *detail, | |||
141 | set_bit(CACHE_NEGATIVE, &old->flags); | 138 | set_bit(CACHE_NEGATIVE, &old->flags); |
142 | else | 139 | else |
143 | detail->update(old, new); | 140 | detail->update(old, new); |
144 | is_new = cache_fresh_locked(old, new->expiry_time); | 141 | cache_fresh_locked(old, new->expiry_time); |
145 | write_unlock(&detail->hash_lock); | 142 | write_unlock(&detail->hash_lock); |
146 | cache_fresh_unlocked(old, detail, is_new); | 143 | cache_fresh_unlocked(old, detail); |
147 | return old; | 144 | return old; |
148 | } | 145 | } |
149 | write_unlock(&detail->hash_lock); | 146 | write_unlock(&detail->hash_lock); |
@@ -167,11 +164,11 @@ struct cache_head *sunrpc_cache_update(struct cache_detail *detail, | |||
167 | *head = tmp; | 164 | *head = tmp; |
168 | detail->entries++; | 165 | detail->entries++; |
169 | cache_get(tmp); | 166 | cache_get(tmp); |
170 | is_new = cache_fresh_locked(tmp, new->expiry_time); | 167 | cache_fresh_locked(tmp, new->expiry_time); |
171 | cache_fresh_locked(old, 0); | 168 | cache_fresh_locked(old, 0); |
172 | write_unlock(&detail->hash_lock); | 169 | write_unlock(&detail->hash_lock); |
173 | cache_fresh_unlocked(tmp, detail, is_new); | 170 | cache_fresh_unlocked(tmp, detail); |
174 | cache_fresh_unlocked(old, detail, 0); | 171 | cache_fresh_unlocked(old, detail); |
175 | cache_put(old, detail); | 172 | cache_put(old, detail); |
176 | return tmp; | 173 | return tmp; |
177 | } | 174 | } |
@@ -184,6 +181,22 @@ static int cache_make_upcall(struct cache_detail *cd, struct cache_head *h) | |||
184 | return cd->cache_upcall(cd, h); | 181 | return cd->cache_upcall(cd, h); |
185 | } | 182 | } |
186 | 183 | ||
184 | static inline int cache_is_valid(struct cache_detail *detail, struct cache_head *h) | ||
185 | { | ||
186 | if (!test_bit(CACHE_VALID, &h->flags) || | ||
187 | h->expiry_time < get_seconds()) | ||
188 | return -EAGAIN; | ||
189 | else if (detail->flush_time > h->last_refresh) | ||
190 | return -EAGAIN; | ||
191 | else { | ||
192 | /* entry is valid */ | ||
193 | if (test_bit(CACHE_NEGATIVE, &h->flags)) | ||
194 | return -ENOENT; | ||
195 | else | ||
196 | return 0; | ||
197 | } | ||
198 | } | ||
199 | |||
187 | /* | 200 | /* |
188 | * This is the generic cache management routine for all | 201 | * This is the generic cache management routine for all |
189 | * the authentication caches. | 202 | * the authentication caches. |
@@ -192,8 +205,10 @@ static int cache_make_upcall(struct cache_detail *cd, struct cache_head *h) | |||
192 | * | 205 | * |
193 | * | 206 | * |
194 | * Returns 0 if the cache_head can be used, or cache_puts it and returns | 207 | * Returns 0 if the cache_head can be used, or cache_puts it and returns |
195 | * -EAGAIN if upcall is pending, | 208 | * -EAGAIN if upcall is pending and request has been queued |
196 | * -ETIMEDOUT if upcall failed and should be retried, | 209 | * -ETIMEDOUT if upcall failed or request could not be queue or |
210 | * upcall completed but item is still invalid (implying that | ||
211 | * the cache item has been replaced with a newer one). | ||
197 | * -ENOENT if cache entry was negative | 212 | * -ENOENT if cache entry was negative |
198 | */ | 213 | */ |
199 | int cache_check(struct cache_detail *detail, | 214 | int cache_check(struct cache_detail *detail, |
@@ -203,17 +218,7 @@ int cache_check(struct cache_detail *detail, | |||
203 | long refresh_age, age; | 218 | long refresh_age, age; |
204 | 219 | ||
205 | /* First decide return status as best we can */ | 220 | /* First decide return status as best we can */ |
206 | if (!test_bit(CACHE_VALID, &h->flags) || | 221 | rv = cache_is_valid(detail, h); |
207 | h->expiry_time < get_seconds()) | ||
208 | rv = -EAGAIN; | ||
209 | else if (detail->flush_time > h->last_refresh) | ||
210 | rv = -EAGAIN; | ||
211 | else { | ||
212 | /* entry is valid */ | ||
213 | if (test_bit(CACHE_NEGATIVE, &h->flags)) | ||
214 | rv = -ENOENT; | ||
215 | else rv = 0; | ||
216 | } | ||
217 | 222 | ||
218 | /* now see if we want to start an upcall */ | 223 | /* now see if we want to start an upcall */ |
219 | refresh_age = (h->expiry_time - h->last_refresh); | 224 | refresh_age = (h->expiry_time - h->last_refresh); |
@@ -229,10 +234,11 @@ int cache_check(struct cache_detail *detail, | |||
229 | switch (cache_make_upcall(detail, h)) { | 234 | switch (cache_make_upcall(detail, h)) { |
230 | case -EINVAL: | 235 | case -EINVAL: |
231 | clear_bit(CACHE_PENDING, &h->flags); | 236 | clear_bit(CACHE_PENDING, &h->flags); |
237 | cache_revisit_request(h); | ||
232 | if (rv == -EAGAIN) { | 238 | if (rv == -EAGAIN) { |
233 | set_bit(CACHE_NEGATIVE, &h->flags); | 239 | set_bit(CACHE_NEGATIVE, &h->flags); |
234 | cache_fresh_unlocked(h, detail, | 240 | cache_fresh_locked(h, get_seconds()+CACHE_NEW_EXPIRY); |
235 | cache_fresh_locked(h, get_seconds()+CACHE_NEW_EXPIRY)); | 241 | cache_fresh_unlocked(h, detail); |
236 | rv = -ENOENT; | 242 | rv = -ENOENT; |
237 | } | 243 | } |
238 | break; | 244 | break; |
@@ -245,10 +251,14 @@ int cache_check(struct cache_detail *detail, | |||
245 | } | 251 | } |
246 | } | 252 | } |
247 | 253 | ||
248 | if (rv == -EAGAIN) | 254 | if (rv == -EAGAIN) { |
249 | if (cache_defer_req(rqstp, h) != 0) | 255 | if (cache_defer_req(rqstp, h) < 0) { |
250 | rv = -ETIMEDOUT; | 256 | /* Request is not deferred */ |
251 | 257 | rv = cache_is_valid(detail, h); | |
258 | if (rv == -EAGAIN) | ||
259 | rv = -ETIMEDOUT; | ||
260 | } | ||
261 | } | ||
252 | if (rv) | 262 | if (rv) |
253 | cache_put(h, detail); | 263 | cache_put(h, detail); |
254 | return rv; | 264 | return rv; |
@@ -396,7 +406,7 @@ static int cache_clean(void) | |||
396 | ) | 406 | ) |
397 | continue; | 407 | continue; |
398 | if (test_and_clear_bit(CACHE_PENDING, &ch->flags)) | 408 | if (test_and_clear_bit(CACHE_PENDING, &ch->flags)) |
399 | queue_loose(current_detail, ch); | 409 | cache_dequeue(current_detail, ch); |
400 | 410 | ||
401 | if (atomic_read(&ch->ref.refcount) == 1) | 411 | if (atomic_read(&ch->ref.refcount) == 1) |
402 | break; | 412 | break; |
@@ -412,8 +422,10 @@ static int cache_clean(void) | |||
412 | if (!ch) | 422 | if (!ch) |
413 | current_index ++; | 423 | current_index ++; |
414 | spin_unlock(&cache_list_lock); | 424 | spin_unlock(&cache_list_lock); |
415 | if (ch) | 425 | if (ch) { |
426 | cache_revisit_request(ch); | ||
416 | cache_put(ch, d); | 427 | cache_put(ch, d); |
428 | } | ||
417 | } else | 429 | } else |
418 | spin_unlock(&cache_list_lock); | 430 | spin_unlock(&cache_list_lock); |
419 | 431 | ||
@@ -488,7 +500,7 @@ static int cache_defer_cnt; | |||
488 | 500 | ||
489 | static int cache_defer_req(struct cache_req *req, struct cache_head *item) | 501 | static int cache_defer_req(struct cache_req *req, struct cache_head *item) |
490 | { | 502 | { |
491 | struct cache_deferred_req *dreq; | 503 | struct cache_deferred_req *dreq, *discard; |
492 | int hash = DFR_HASH(item); | 504 | int hash = DFR_HASH(item); |
493 | 505 | ||
494 | if (cache_defer_cnt >= DFR_MAX) { | 506 | if (cache_defer_cnt >= DFR_MAX) { |
@@ -496,11 +508,11 @@ static int cache_defer_req(struct cache_req *req, struct cache_head *item) | |||
496 | * or continue and drop the oldest below | 508 | * or continue and drop the oldest below |
497 | */ | 509 | */ |
498 | if (net_random()&1) | 510 | if (net_random()&1) |
499 | return -ETIMEDOUT; | 511 | return -ENOMEM; |
500 | } | 512 | } |
501 | dreq = req->defer(req); | 513 | dreq = req->defer(req); |
502 | if (dreq == NULL) | 514 | if (dreq == NULL) |
503 | return -ETIMEDOUT; | 515 | return -ENOMEM; |
504 | 516 | ||
505 | dreq->item = item; | 517 | dreq->item = item; |
506 | 518 | ||
@@ -513,23 +525,24 @@ static int cache_defer_req(struct cache_req *req, struct cache_head *item) | |||
513 | list_add(&dreq->hash, &cache_defer_hash[hash]); | 525 | list_add(&dreq->hash, &cache_defer_hash[hash]); |
514 | 526 | ||
515 | /* it is in, now maybe clean up */ | 527 | /* it is in, now maybe clean up */ |
516 | dreq = NULL; | 528 | discard = NULL; |
517 | if (++cache_defer_cnt > DFR_MAX) { | 529 | if (++cache_defer_cnt > DFR_MAX) { |
518 | dreq = list_entry(cache_defer_list.prev, | 530 | discard = list_entry(cache_defer_list.prev, |
519 | struct cache_deferred_req, recent); | 531 | struct cache_deferred_req, recent); |
520 | list_del(&dreq->recent); | 532 | list_del_init(&discard->recent); |
521 | list_del(&dreq->hash); | 533 | list_del_init(&discard->hash); |
522 | cache_defer_cnt--; | 534 | cache_defer_cnt--; |
523 | } | 535 | } |
524 | spin_unlock(&cache_defer_lock); | 536 | spin_unlock(&cache_defer_lock); |
525 | 537 | ||
526 | if (dreq) { | 538 | if (discard) |
527 | /* there was one too many */ | 539 | /* there was one too many */ |
528 | dreq->revisit(dreq, 1); | 540 | discard->revisit(discard, 1); |
529 | } | 541 | |
530 | if (!test_bit(CACHE_PENDING, &item->flags)) { | 542 | if (!test_bit(CACHE_PENDING, &item->flags)) { |
531 | /* must have just been validated... */ | 543 | /* must have just been validated... */ |
532 | cache_revisit_request(item); | 544 | cache_revisit_request(item); |
545 | return -EAGAIN; | ||
533 | } | 546 | } |
534 | return 0; | 547 | return 0; |
535 | } | 548 | } |
@@ -551,7 +564,7 @@ static void cache_revisit_request(struct cache_head *item) | |||
551 | dreq = list_entry(lp, struct cache_deferred_req, hash); | 564 | dreq = list_entry(lp, struct cache_deferred_req, hash); |
552 | lp = lp->next; | 565 | lp = lp->next; |
553 | if (dreq->item == item) { | 566 | if (dreq->item == item) { |
554 | list_del(&dreq->hash); | 567 | list_del_init(&dreq->hash); |
555 | list_move(&dreq->recent, &pending); | 568 | list_move(&dreq->recent, &pending); |
556 | cache_defer_cnt--; | 569 | cache_defer_cnt--; |
557 | } | 570 | } |
@@ -577,7 +590,7 @@ void cache_clean_deferred(void *owner) | |||
577 | 590 | ||
578 | list_for_each_entry_safe(dreq, tmp, &cache_defer_list, recent) { | 591 | list_for_each_entry_safe(dreq, tmp, &cache_defer_list, recent) { |
579 | if (dreq->owner == owner) { | 592 | if (dreq->owner == owner) { |
580 | list_del(&dreq->hash); | 593 | list_del_init(&dreq->hash); |
581 | list_move(&dreq->recent, &pending); | 594 | list_move(&dreq->recent, &pending); |
582 | cache_defer_cnt--; | 595 | cache_defer_cnt--; |
583 | } | 596 | } |
@@ -887,7 +900,7 @@ static int cache_release(struct inode *inode, struct file *filp, | |||
887 | 900 | ||
888 | 901 | ||
889 | 902 | ||
890 | static void queue_loose(struct cache_detail *detail, struct cache_head *ch) | 903 | static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch) |
891 | { | 904 | { |
892 | struct cache_queue *cq; | 905 | struct cache_queue *cq; |
893 | spin_lock(&queue_lock); | 906 | spin_lock(&queue_lock); |
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index fac0ca93f06b..a417d5ab5dd7 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c | |||
@@ -288,6 +288,7 @@ struct rpc_clnt *rpc_create(struct rpc_create_args *args) | |||
288 | .srcaddr = args->saddress, | 288 | .srcaddr = args->saddress, |
289 | .dstaddr = args->address, | 289 | .dstaddr = args->address, |
290 | .addrlen = args->addrsize, | 290 | .addrlen = args->addrsize, |
291 | .bc_xprt = args->bc_xprt, | ||
291 | }; | 292 | }; |
292 | char servername[48]; | 293 | char servername[48]; |
293 | 294 | ||
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index 7f676bdf70d3..49278f830367 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c | |||
@@ -860,7 +860,8 @@ static void rpc_clntdir_depopulate(struct dentry *dentry) | |||
860 | 860 | ||
861 | /** | 861 | /** |
862 | * rpc_create_client_dir - Create a new rpc_client directory in rpc_pipefs | 862 | * rpc_create_client_dir - Create a new rpc_client directory in rpc_pipefs |
863 | * @path: path from the rpc_pipefs root to the new directory | 863 | * @dentry: dentry from the rpc_pipefs root to the new directory |
864 | * @name: &struct qstr for the name | ||
864 | * @rpc_client: rpc client to associate with this directory | 865 | * @rpc_client: rpc client to associate with this directory |
865 | * | 866 | * |
866 | * This creates a directory at the given @path associated with | 867 | * This creates a directory at the given @path associated with |
@@ -930,7 +931,7 @@ void rpc_remove_cache_dir(struct dentry *dentry) | |||
930 | /* | 931 | /* |
931 | * populate the filesystem | 932 | * populate the filesystem |
932 | */ | 933 | */ |
933 | static struct super_operations s_ops = { | 934 | static const struct super_operations s_ops = { |
934 | .alloc_inode = rpc_alloc_inode, | 935 | .alloc_inode = rpc_alloc_inode, |
935 | .destroy_inode = rpc_destroy_inode, | 936 | .destroy_inode = rpc_destroy_inode, |
936 | .statfs = simple_statfs, | 937 | .statfs = simple_statfs, |
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index 8f459abe97cf..cef74ba0666c 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c | |||
@@ -21,6 +21,8 @@ | |||
21 | 21 | ||
22 | #include <linux/sunrpc/clnt.h> | 22 | #include <linux/sunrpc/clnt.h> |
23 | 23 | ||
24 | #include "sunrpc.h" | ||
25 | |||
24 | #ifdef RPC_DEBUG | 26 | #ifdef RPC_DEBUG |
25 | #define RPCDBG_FACILITY RPCDBG_SCHED | 27 | #define RPCDBG_FACILITY RPCDBG_SCHED |
26 | #define RPC_TASK_MAGIC_ID 0xf00baa | 28 | #define RPC_TASK_MAGIC_ID 0xf00baa |
@@ -711,11 +713,6 @@ static void rpc_async_schedule(struct work_struct *work) | |||
711 | __rpc_execute(container_of(work, struct rpc_task, u.tk_work)); | 713 | __rpc_execute(container_of(work, struct rpc_task, u.tk_work)); |
712 | } | 714 | } |
713 | 715 | ||
714 | struct rpc_buffer { | ||
715 | size_t len; | ||
716 | char data[]; | ||
717 | }; | ||
718 | |||
719 | /** | 716 | /** |
720 | * rpc_malloc - allocate an RPC buffer | 717 | * rpc_malloc - allocate an RPC buffer |
721 | * @task: RPC task that will use this buffer | 718 | * @task: RPC task that will use this buffer |
diff --git a/net/sunrpc/sunrpc.h b/net/sunrpc/sunrpc.h index 5d9dd742264b..90c292e2738b 100644 --- a/net/sunrpc/sunrpc.h +++ b/net/sunrpc/sunrpc.h | |||
@@ -27,11 +27,25 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |||
27 | #ifndef _NET_SUNRPC_SUNRPC_H | 27 | #ifndef _NET_SUNRPC_SUNRPC_H |
28 | #define _NET_SUNRPC_SUNRPC_H | 28 | #define _NET_SUNRPC_SUNRPC_H |
29 | 29 | ||
30 | #include <linux/net.h> | ||
31 | |||
32 | /* | ||
33 | * Header for dynamically allocated rpc buffers. | ||
34 | */ | ||
35 | struct rpc_buffer { | ||
36 | size_t len; | ||
37 | char data[]; | ||
38 | }; | ||
39 | |||
30 | static inline int rpc_reply_expected(struct rpc_task *task) | 40 | static inline int rpc_reply_expected(struct rpc_task *task) |
31 | { | 41 | { |
32 | return (task->tk_msg.rpc_proc != NULL) && | 42 | return (task->tk_msg.rpc_proc != NULL) && |
33 | (task->tk_msg.rpc_proc->p_decode != NULL); | 43 | (task->tk_msg.rpc_proc->p_decode != NULL); |
34 | } | 44 | } |
35 | 45 | ||
46 | int svc_send_common(struct socket *sock, struct xdr_buf *xdr, | ||
47 | struct page *headpage, unsigned long headoffset, | ||
48 | struct page *tailpage, unsigned long tailoffset); | ||
49 | |||
36 | #endif /* _NET_SUNRPC_SUNRPC_H */ | 50 | #endif /* _NET_SUNRPC_SUNRPC_H */ |
37 | 51 | ||
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index 27d44332f017..df124f78ee48 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c | |||
@@ -160,6 +160,7 @@ void svc_xprt_init(struct svc_xprt_class *xcl, struct svc_xprt *xprt, | |||
160 | mutex_init(&xprt->xpt_mutex); | 160 | mutex_init(&xprt->xpt_mutex); |
161 | spin_lock_init(&xprt->xpt_lock); | 161 | spin_lock_init(&xprt->xpt_lock); |
162 | set_bit(XPT_BUSY, &xprt->xpt_flags); | 162 | set_bit(XPT_BUSY, &xprt->xpt_flags); |
163 | rpc_init_wait_queue(&xprt->xpt_bc_pending, "xpt_bc_pending"); | ||
163 | } | 164 | } |
164 | EXPORT_SYMBOL_GPL(svc_xprt_init); | 165 | EXPORT_SYMBOL_GPL(svc_xprt_init); |
165 | 166 | ||
@@ -710,10 +711,7 @@ int svc_recv(struct svc_rqst *rqstp, long timeout) | |||
710 | spin_unlock_bh(&pool->sp_lock); | 711 | spin_unlock_bh(&pool->sp_lock); |
711 | 712 | ||
712 | len = 0; | 713 | len = 0; |
713 | if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) { | 714 | if (test_bit(XPT_LISTENER, &xprt->xpt_flags)) { |
714 | dprintk("svc_recv: found XPT_CLOSE\n"); | ||
715 | svc_delete_xprt(xprt); | ||
716 | } else if (test_bit(XPT_LISTENER, &xprt->xpt_flags)) { | ||
717 | struct svc_xprt *newxpt; | 715 | struct svc_xprt *newxpt; |
718 | newxpt = xprt->xpt_ops->xpo_accept(xprt); | 716 | newxpt = xprt->xpt_ops->xpo_accept(xprt); |
719 | if (newxpt) { | 717 | if (newxpt) { |
@@ -739,7 +737,7 @@ int svc_recv(struct svc_rqst *rqstp, long timeout) | |||
739 | svc_xprt_received(newxpt); | 737 | svc_xprt_received(newxpt); |
740 | } | 738 | } |
741 | svc_xprt_received(xprt); | 739 | svc_xprt_received(xprt); |
742 | } else { | 740 | } else if (!test_bit(XPT_CLOSE, &xprt->xpt_flags)) { |
743 | dprintk("svc: server %p, pool %u, transport %p, inuse=%d\n", | 741 | dprintk("svc: server %p, pool %u, transport %p, inuse=%d\n", |
744 | rqstp, pool->sp_id, xprt, | 742 | rqstp, pool->sp_id, xprt, |
745 | atomic_read(&xprt->xpt_ref.refcount)); | 743 | atomic_read(&xprt->xpt_ref.refcount)); |
@@ -752,6 +750,11 @@ int svc_recv(struct svc_rqst *rqstp, long timeout) | |||
752 | dprintk("svc: got len=%d\n", len); | 750 | dprintk("svc: got len=%d\n", len); |
753 | } | 751 | } |
754 | 752 | ||
753 | if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) { | ||
754 | dprintk("svc_recv: found XPT_CLOSE\n"); | ||
755 | svc_delete_xprt(xprt); | ||
756 | } | ||
757 | |||
755 | /* No data, incomplete (TCP) read, or accept() */ | 758 | /* No data, incomplete (TCP) read, or accept() */ |
756 | if (len == 0 || len == -EAGAIN) { | 759 | if (len == 0 || len == -EAGAIN) { |
757 | rqstp->rq_res.len = 0; | 760 | rqstp->rq_res.len = 0; |
@@ -808,6 +811,7 @@ int svc_send(struct svc_rqst *rqstp) | |||
808 | else | 811 | else |
809 | len = xprt->xpt_ops->xpo_sendto(rqstp); | 812 | len = xprt->xpt_ops->xpo_sendto(rqstp); |
810 | mutex_unlock(&xprt->xpt_mutex); | 813 | mutex_unlock(&xprt->xpt_mutex); |
814 | rpc_wake_up(&xprt->xpt_bc_pending); | ||
811 | svc_xprt_release(rqstp); | 815 | svc_xprt_release(rqstp); |
812 | 816 | ||
813 | if (len == -ECONNREFUSED || len == -ENOTCONN || len == -EAGAIN) | 817 | if (len == -ECONNREFUSED || len == -ENOTCONN || len == -EAGAIN) |
@@ -1166,11 +1170,6 @@ static void *svc_pool_stats_start(struct seq_file *m, loff_t *pos) | |||
1166 | 1170 | ||
1167 | dprintk("svc_pool_stats_start, *pidx=%u\n", pidx); | 1171 | dprintk("svc_pool_stats_start, *pidx=%u\n", pidx); |
1168 | 1172 | ||
1169 | lock_kernel(); | ||
1170 | /* bump up the pseudo refcount while traversing */ | ||
1171 | svc_get(serv); | ||
1172 | unlock_kernel(); | ||
1173 | |||
1174 | if (!pidx) | 1173 | if (!pidx) |
1175 | return SEQ_START_TOKEN; | 1174 | return SEQ_START_TOKEN; |
1176 | return (pidx > serv->sv_nrpools ? NULL : &serv->sv_pools[pidx-1]); | 1175 | return (pidx > serv->sv_nrpools ? NULL : &serv->sv_pools[pidx-1]); |
@@ -1198,12 +1197,6 @@ static void *svc_pool_stats_next(struct seq_file *m, void *p, loff_t *pos) | |||
1198 | 1197 | ||
1199 | static void svc_pool_stats_stop(struct seq_file *m, void *p) | 1198 | static void svc_pool_stats_stop(struct seq_file *m, void *p) |
1200 | { | 1199 | { |
1201 | struct svc_serv *serv = m->private; | ||
1202 | |||
1203 | lock_kernel(); | ||
1204 | /* this function really, really should have been called svc_put() */ | ||
1205 | svc_destroy(serv); | ||
1206 | unlock_kernel(); | ||
1207 | } | 1200 | } |
1208 | 1201 | ||
1209 | static int svc_pool_stats_show(struct seq_file *m, void *p) | 1202 | static int svc_pool_stats_show(struct seq_file *m, void *p) |
diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c index 6caffa34ac01..117f68a8aa40 100644 --- a/net/sunrpc/svcauth_unix.c +++ b/net/sunrpc/svcauth_unix.c | |||
@@ -668,6 +668,7 @@ static int unix_gid_find(uid_t uid, struct group_info **gip, | |||
668 | case 0: | 668 | case 0: |
669 | *gip = ug->gi; | 669 | *gip = ug->gi; |
670 | get_group_info(*gip); | 670 | get_group_info(*gip); |
671 | cache_put(&ug->h, &unix_gid_cache); | ||
671 | return 0; | 672 | return 0; |
672 | default: | 673 | default: |
673 | return -EAGAIN; | 674 | return -EAGAIN; |
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index 23128ee191ae..ccc5e83cae5d 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c | |||
@@ -49,6 +49,7 @@ | |||
49 | #include <linux/sunrpc/msg_prot.h> | 49 | #include <linux/sunrpc/msg_prot.h> |
50 | #include <linux/sunrpc/svcsock.h> | 50 | #include <linux/sunrpc/svcsock.h> |
51 | #include <linux/sunrpc/stats.h> | 51 | #include <linux/sunrpc/stats.h> |
52 | #include <linux/sunrpc/xprt.h> | ||
52 | 53 | ||
53 | #define RPCDBG_FACILITY RPCDBG_SVCXPRT | 54 | #define RPCDBG_FACILITY RPCDBG_SVCXPRT |
54 | 55 | ||
@@ -153,49 +154,27 @@ static void svc_set_cmsg_data(struct svc_rqst *rqstp, struct cmsghdr *cmh) | |||
153 | } | 154 | } |
154 | 155 | ||
155 | /* | 156 | /* |
156 | * Generic sendto routine | 157 | * send routine intended to be shared by the fore- and back-channel |
157 | */ | 158 | */ |
158 | static int svc_sendto(struct svc_rqst *rqstp, struct xdr_buf *xdr) | 159 | int svc_send_common(struct socket *sock, struct xdr_buf *xdr, |
160 | struct page *headpage, unsigned long headoffset, | ||
161 | struct page *tailpage, unsigned long tailoffset) | ||
159 | { | 162 | { |
160 | struct svc_sock *svsk = | ||
161 | container_of(rqstp->rq_xprt, struct svc_sock, sk_xprt); | ||
162 | struct socket *sock = svsk->sk_sock; | ||
163 | int slen; | ||
164 | union { | ||
165 | struct cmsghdr hdr; | ||
166 | long all[SVC_PKTINFO_SPACE / sizeof(long)]; | ||
167 | } buffer; | ||
168 | struct cmsghdr *cmh = &buffer.hdr; | ||
169 | int len = 0; | ||
170 | int result; | 163 | int result; |
171 | int size; | 164 | int size; |
172 | struct page **ppage = xdr->pages; | 165 | struct page **ppage = xdr->pages; |
173 | size_t base = xdr->page_base; | 166 | size_t base = xdr->page_base; |
174 | unsigned int pglen = xdr->page_len; | 167 | unsigned int pglen = xdr->page_len; |
175 | unsigned int flags = MSG_MORE; | 168 | unsigned int flags = MSG_MORE; |
176 | RPC_IFDEBUG(char buf[RPC_MAX_ADDRBUFLEN]); | 169 | int slen; |
170 | int len = 0; | ||
177 | 171 | ||
178 | slen = xdr->len; | 172 | slen = xdr->len; |
179 | 173 | ||
180 | if (rqstp->rq_prot == IPPROTO_UDP) { | ||
181 | struct msghdr msg = { | ||
182 | .msg_name = &rqstp->rq_addr, | ||
183 | .msg_namelen = rqstp->rq_addrlen, | ||
184 | .msg_control = cmh, | ||
185 | .msg_controllen = sizeof(buffer), | ||
186 | .msg_flags = MSG_MORE, | ||
187 | }; | ||
188 | |||
189 | svc_set_cmsg_data(rqstp, cmh); | ||
190 | |||
191 | if (sock_sendmsg(sock, &msg, 0) < 0) | ||
192 | goto out; | ||
193 | } | ||
194 | |||
195 | /* send head */ | 174 | /* send head */ |
196 | if (slen == xdr->head[0].iov_len) | 175 | if (slen == xdr->head[0].iov_len) |
197 | flags = 0; | 176 | flags = 0; |
198 | len = kernel_sendpage(sock, rqstp->rq_respages[0], 0, | 177 | len = kernel_sendpage(sock, headpage, headoffset, |
199 | xdr->head[0].iov_len, flags); | 178 | xdr->head[0].iov_len, flags); |
200 | if (len != xdr->head[0].iov_len) | 179 | if (len != xdr->head[0].iov_len) |
201 | goto out; | 180 | goto out; |
@@ -219,16 +198,58 @@ static int svc_sendto(struct svc_rqst *rqstp, struct xdr_buf *xdr) | |||
219 | base = 0; | 198 | base = 0; |
220 | ppage++; | 199 | ppage++; |
221 | } | 200 | } |
201 | |||
222 | /* send tail */ | 202 | /* send tail */ |
223 | if (xdr->tail[0].iov_len) { | 203 | if (xdr->tail[0].iov_len) { |
224 | result = kernel_sendpage(sock, rqstp->rq_respages[0], | 204 | result = kernel_sendpage(sock, tailpage, tailoffset, |
225 | ((unsigned long)xdr->tail[0].iov_base) | 205 | xdr->tail[0].iov_len, 0); |
226 | & (PAGE_SIZE-1), | ||
227 | xdr->tail[0].iov_len, 0); | ||
228 | |||
229 | if (result > 0) | 206 | if (result > 0) |
230 | len += result; | 207 | len += result; |
231 | } | 208 | } |
209 | |||
210 | out: | ||
211 | return len; | ||
212 | } | ||
213 | |||
214 | |||
215 | /* | ||
216 | * Generic sendto routine | ||
217 | */ | ||
218 | static int svc_sendto(struct svc_rqst *rqstp, struct xdr_buf *xdr) | ||
219 | { | ||
220 | struct svc_sock *svsk = | ||
221 | container_of(rqstp->rq_xprt, struct svc_sock, sk_xprt); | ||
222 | struct socket *sock = svsk->sk_sock; | ||
223 | union { | ||
224 | struct cmsghdr hdr; | ||
225 | long all[SVC_PKTINFO_SPACE / sizeof(long)]; | ||
226 | } buffer; | ||
227 | struct cmsghdr *cmh = &buffer.hdr; | ||
228 | int len = 0; | ||
229 | unsigned long tailoff; | ||
230 | unsigned long headoff; | ||
231 | RPC_IFDEBUG(char buf[RPC_MAX_ADDRBUFLEN]); | ||
232 | |||
233 | if (rqstp->rq_prot == IPPROTO_UDP) { | ||
234 | struct msghdr msg = { | ||
235 | .msg_name = &rqstp->rq_addr, | ||
236 | .msg_namelen = rqstp->rq_addrlen, | ||
237 | .msg_control = cmh, | ||
238 | .msg_controllen = sizeof(buffer), | ||
239 | .msg_flags = MSG_MORE, | ||
240 | }; | ||
241 | |||
242 | svc_set_cmsg_data(rqstp, cmh); | ||
243 | |||
244 | if (sock_sendmsg(sock, &msg, 0) < 0) | ||
245 | goto out; | ||
246 | } | ||
247 | |||
248 | tailoff = ((unsigned long)xdr->tail[0].iov_base) & (PAGE_SIZE-1); | ||
249 | headoff = 0; | ||
250 | len = svc_send_common(sock, xdr, rqstp->rq_respages[0], headoff, | ||
251 | rqstp->rq_respages[0], tailoff); | ||
252 | |||
232 | out: | 253 | out: |
233 | dprintk("svc: socket %p sendto([%p %Zu... ], %d) = %d (addr %s)\n", | 254 | dprintk("svc: socket %p sendto([%p %Zu... ], %d) = %d (addr %s)\n", |
234 | svsk, xdr->head[0].iov_base, xdr->head[0].iov_len, | 255 | svsk, xdr->head[0].iov_base, xdr->head[0].iov_len, |
@@ -432,29 +453,49 @@ static void svc_tcp_write_space(struct sock *sk) | |||
432 | } | 453 | } |
433 | 454 | ||
434 | /* | 455 | /* |
456 | * See net/ipv6/ip_sockglue.c : ip_cmsg_recv_pktinfo | ||
457 | */ | ||
458 | static int svc_udp_get_dest_address4(struct svc_rqst *rqstp, | ||
459 | struct cmsghdr *cmh) | ||
460 | { | ||
461 | struct in_pktinfo *pki = CMSG_DATA(cmh); | ||
462 | if (cmh->cmsg_type != IP_PKTINFO) | ||
463 | return 0; | ||
464 | rqstp->rq_daddr.addr.s_addr = pki->ipi_spec_dst.s_addr; | ||
465 | return 1; | ||
466 | } | ||
467 | |||
468 | /* | ||
469 | * See net/ipv6/datagram.c : datagram_recv_ctl | ||
470 | */ | ||
471 | static int svc_udp_get_dest_address6(struct svc_rqst *rqstp, | ||
472 | struct cmsghdr *cmh) | ||
473 | { | ||
474 | struct in6_pktinfo *pki = CMSG_DATA(cmh); | ||
475 | if (cmh->cmsg_type != IPV6_PKTINFO) | ||
476 | return 0; | ||
477 | ipv6_addr_copy(&rqstp->rq_daddr.addr6, &pki->ipi6_addr); | ||
478 | return 1; | ||
479 | } | ||
480 | |||
481 | /* | ||
435 | * Copy the UDP datagram's destination address to the rqstp structure. | 482 | * Copy the UDP datagram's destination address to the rqstp structure. |
436 | * The 'destination' address in this case is the address to which the | 483 | * The 'destination' address in this case is the address to which the |
437 | * peer sent the datagram, i.e. our local address. For multihomed | 484 | * peer sent the datagram, i.e. our local address. For multihomed |
438 | * hosts, this can change from msg to msg. Note that only the IP | 485 | * hosts, this can change from msg to msg. Note that only the IP |
439 | * address changes, the port number should remain the same. | 486 | * address changes, the port number should remain the same. |
440 | */ | 487 | */ |
441 | static void svc_udp_get_dest_address(struct svc_rqst *rqstp, | 488 | static int svc_udp_get_dest_address(struct svc_rqst *rqstp, |
442 | struct cmsghdr *cmh) | 489 | struct cmsghdr *cmh) |
443 | { | 490 | { |
444 | struct svc_sock *svsk = | 491 | switch (cmh->cmsg_level) { |
445 | container_of(rqstp->rq_xprt, struct svc_sock, sk_xprt); | 492 | case SOL_IP: |
446 | switch (svsk->sk_sk->sk_family) { | 493 | return svc_udp_get_dest_address4(rqstp, cmh); |
447 | case AF_INET: { | 494 | case SOL_IPV6: |
448 | struct in_pktinfo *pki = CMSG_DATA(cmh); | 495 | return svc_udp_get_dest_address6(rqstp, cmh); |
449 | rqstp->rq_daddr.addr.s_addr = pki->ipi_spec_dst.s_addr; | ||
450 | break; | ||
451 | } | ||
452 | case AF_INET6: { | ||
453 | struct in6_pktinfo *pki = CMSG_DATA(cmh); | ||
454 | ipv6_addr_copy(&rqstp->rq_daddr.addr6, &pki->ipi6_addr); | ||
455 | break; | ||
456 | } | ||
457 | } | 496 | } |
497 | |||
498 | return 0; | ||
458 | } | 499 | } |
459 | 500 | ||
460 | /* | 501 | /* |
@@ -531,16 +572,15 @@ static int svc_udp_recvfrom(struct svc_rqst *rqstp) | |||
531 | 572 | ||
532 | rqstp->rq_prot = IPPROTO_UDP; | 573 | rqstp->rq_prot = IPPROTO_UDP; |
533 | 574 | ||
534 | if (cmh->cmsg_level != IPPROTO_IP || | 575 | if (!svc_udp_get_dest_address(rqstp, cmh)) { |
535 | cmh->cmsg_type != IP_PKTINFO) { | ||
536 | if (net_ratelimit()) | 576 | if (net_ratelimit()) |
537 | printk("rpcsvc: received unknown control message:" | 577 | printk(KERN_WARNING |
538 | "%d/%d\n", | 578 | "svc: received unknown control message %d/%d; " |
539 | cmh->cmsg_level, cmh->cmsg_type); | 579 | "dropping RPC reply datagram\n", |
580 | cmh->cmsg_level, cmh->cmsg_type); | ||
540 | skb_free_datagram(svsk->sk_sk, skb); | 581 | skb_free_datagram(svsk->sk_sk, skb); |
541 | return 0; | 582 | return 0; |
542 | } | 583 | } |
543 | svc_udp_get_dest_address(rqstp, cmh); | ||
544 | 584 | ||
545 | if (skb_is_nonlinear(skb)) { | 585 | if (skb_is_nonlinear(skb)) { |
546 | /* we have to copy */ | 586 | /* we have to copy */ |
@@ -651,8 +691,7 @@ static struct svc_xprt_class svc_udp_class = { | |||
651 | 691 | ||
652 | static void svc_udp_init(struct svc_sock *svsk, struct svc_serv *serv) | 692 | static void svc_udp_init(struct svc_sock *svsk, struct svc_serv *serv) |
653 | { | 693 | { |
654 | int one = 1; | 694 | int err, level, optname, one = 1; |
655 | mm_segment_t oldfs; | ||
656 | 695 | ||
657 | svc_xprt_init(&svc_udp_class, &svsk->sk_xprt, serv); | 696 | svc_xprt_init(&svc_udp_class, &svsk->sk_xprt, serv); |
658 | clear_bit(XPT_CACHE_AUTH, &svsk->sk_xprt.xpt_flags); | 697 | clear_bit(XPT_CACHE_AUTH, &svsk->sk_xprt.xpt_flags); |
@@ -671,12 +710,22 @@ static void svc_udp_init(struct svc_sock *svsk, struct svc_serv *serv) | |||
671 | set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); | 710 | set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); |
672 | set_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags); | 711 | set_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags); |
673 | 712 | ||
674 | oldfs = get_fs(); | ||
675 | set_fs(KERNEL_DS); | ||
676 | /* make sure we get destination address info */ | 713 | /* make sure we get destination address info */ |
677 | svsk->sk_sock->ops->setsockopt(svsk->sk_sock, IPPROTO_IP, IP_PKTINFO, | 714 | switch (svsk->sk_sk->sk_family) { |
678 | (char __user *)&one, sizeof(one)); | 715 | case AF_INET: |
679 | set_fs(oldfs); | 716 | level = SOL_IP; |
717 | optname = IP_PKTINFO; | ||
718 | break; | ||
719 | case AF_INET6: | ||
720 | level = SOL_IPV6; | ||
721 | optname = IPV6_RECVPKTINFO; | ||
722 | break; | ||
723 | default: | ||
724 | BUG(); | ||
725 | } | ||
726 | err = kernel_setsockopt(svsk->sk_sock, level, optname, | ||
727 | (char *)&one, sizeof(one)); | ||
728 | dprintk("svc: kernel_setsockopt returned %d\n", err); | ||
680 | } | 729 | } |
681 | 730 | ||
682 | /* | 731 | /* |
@@ -826,21 +875,15 @@ failed: | |||
826 | } | 875 | } |
827 | 876 | ||
828 | /* | 877 | /* |
829 | * Receive data from a TCP socket. | 878 | * Receive data. |
879 | * If we haven't gotten the record length yet, get the next four bytes. | ||
880 | * Otherwise try to gobble up as much as possible up to the complete | ||
881 | * record length. | ||
830 | */ | 882 | */ |
831 | static int svc_tcp_recvfrom(struct svc_rqst *rqstp) | 883 | static int svc_tcp_recv_record(struct svc_sock *svsk, struct svc_rqst *rqstp) |
832 | { | 884 | { |
833 | struct svc_sock *svsk = | ||
834 | container_of(rqstp->rq_xprt, struct svc_sock, sk_xprt); | ||
835 | struct svc_serv *serv = svsk->sk_xprt.xpt_server; | 885 | struct svc_serv *serv = svsk->sk_xprt.xpt_server; |
836 | int len; | 886 | int len; |
837 | struct kvec *vec; | ||
838 | int pnum, vlen; | ||
839 | |||
840 | dprintk("svc: tcp_recv %p data %d conn %d close %d\n", | ||
841 | svsk, test_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags), | ||
842 | test_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags), | ||
843 | test_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags)); | ||
844 | 887 | ||
845 | if (test_and_clear_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags)) | 888 | if (test_and_clear_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags)) |
846 | /* sndbuf needs to have room for one request | 889 | /* sndbuf needs to have room for one request |
@@ -861,10 +904,6 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp) | |||
861 | 904 | ||
862 | clear_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); | 905 | clear_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); |
863 | 906 | ||
864 | /* Receive data. If we haven't got the record length yet, get | ||
865 | * the next four bytes. Otherwise try to gobble up as much as | ||
866 | * possible up to the complete record length. | ||
867 | */ | ||
868 | if (svsk->sk_tcplen < sizeof(rpc_fraghdr)) { | 907 | if (svsk->sk_tcplen < sizeof(rpc_fraghdr)) { |
869 | int want = sizeof(rpc_fraghdr) - svsk->sk_tcplen; | 908 | int want = sizeof(rpc_fraghdr) - svsk->sk_tcplen; |
870 | struct kvec iov; | 909 | struct kvec iov; |
@@ -879,7 +918,7 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp) | |||
879 | dprintk("svc: short recvfrom while reading record " | 918 | dprintk("svc: short recvfrom while reading record " |
880 | "length (%d of %d)\n", len, want); | 919 | "length (%d of %d)\n", len, want); |
881 | svc_xprt_received(&svsk->sk_xprt); | 920 | svc_xprt_received(&svsk->sk_xprt); |
882 | return -EAGAIN; /* record header not complete */ | 921 | goto err_again; /* record header not complete */ |
883 | } | 922 | } |
884 | 923 | ||
885 | svsk->sk_reclen = ntohl(svsk->sk_reclen); | 924 | svsk->sk_reclen = ntohl(svsk->sk_reclen); |
@@ -894,6 +933,7 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp) | |||
894 | "per record not supported\n"); | 933 | "per record not supported\n"); |
895 | goto err_delete; | 934 | goto err_delete; |
896 | } | 935 | } |
936 | |||
897 | svsk->sk_reclen &= RPC_FRAGMENT_SIZE_MASK; | 937 | svsk->sk_reclen &= RPC_FRAGMENT_SIZE_MASK; |
898 | dprintk("svc: TCP record, %d bytes\n", svsk->sk_reclen); | 938 | dprintk("svc: TCP record, %d bytes\n", svsk->sk_reclen); |
899 | if (svsk->sk_reclen > serv->sv_max_mesg) { | 939 | if (svsk->sk_reclen > serv->sv_max_mesg) { |
@@ -914,17 +954,121 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp) | |||
914 | dprintk("svc: incomplete TCP record (%d of %d)\n", | 954 | dprintk("svc: incomplete TCP record (%d of %d)\n", |
915 | len, svsk->sk_reclen); | 955 | len, svsk->sk_reclen); |
916 | svc_xprt_received(&svsk->sk_xprt); | 956 | svc_xprt_received(&svsk->sk_xprt); |
917 | return -EAGAIN; /* record not complete */ | 957 | goto err_again; /* record not complete */ |
918 | } | 958 | } |
919 | len = svsk->sk_reclen; | 959 | len = svsk->sk_reclen; |
920 | set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); | 960 | set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); |
921 | 961 | ||
962 | return len; | ||
963 | error: | ||
964 | if (len == -EAGAIN) { | ||
965 | dprintk("RPC: TCP recv_record got EAGAIN\n"); | ||
966 | svc_xprt_received(&svsk->sk_xprt); | ||
967 | } | ||
968 | return len; | ||
969 | err_delete: | ||
970 | set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags); | ||
971 | err_again: | ||
972 | return -EAGAIN; | ||
973 | } | ||
974 | |||
975 | static int svc_process_calldir(struct svc_sock *svsk, struct svc_rqst *rqstp, | ||
976 | struct rpc_rqst **reqpp, struct kvec *vec) | ||
977 | { | ||
978 | struct rpc_rqst *req = NULL; | ||
979 | u32 *p; | ||
980 | u32 xid; | ||
981 | u32 calldir; | ||
982 | int len; | ||
983 | |||
984 | len = svc_recvfrom(rqstp, vec, 1, 8); | ||
985 | if (len < 0) | ||
986 | goto error; | ||
987 | |||
988 | p = (u32 *)rqstp->rq_arg.head[0].iov_base; | ||
989 | xid = *p++; | ||
990 | calldir = *p; | ||
991 | |||
992 | if (calldir == 0) { | ||
993 | /* REQUEST is the most common case */ | ||
994 | vec[0] = rqstp->rq_arg.head[0]; | ||
995 | } else { | ||
996 | /* REPLY */ | ||
997 | if (svsk->sk_bc_xprt) | ||
998 | req = xprt_lookup_rqst(svsk->sk_bc_xprt, xid); | ||
999 | |||
1000 | if (!req) { | ||
1001 | printk(KERN_NOTICE | ||
1002 | "%s: Got unrecognized reply: " | ||
1003 | "calldir 0x%x sk_bc_xprt %p xid %08x\n", | ||
1004 | __func__, ntohl(calldir), | ||
1005 | svsk->sk_bc_xprt, xid); | ||
1006 | vec[0] = rqstp->rq_arg.head[0]; | ||
1007 | goto out; | ||
1008 | } | ||
1009 | |||
1010 | memcpy(&req->rq_private_buf, &req->rq_rcv_buf, | ||
1011 | sizeof(struct xdr_buf)); | ||
1012 | /* copy the xid and call direction */ | ||
1013 | memcpy(req->rq_private_buf.head[0].iov_base, | ||
1014 | rqstp->rq_arg.head[0].iov_base, 8); | ||
1015 | vec[0] = req->rq_private_buf.head[0]; | ||
1016 | } | ||
1017 | out: | ||
1018 | vec[0].iov_base += 8; | ||
1019 | vec[0].iov_len -= 8; | ||
1020 | len = svsk->sk_reclen - 8; | ||
1021 | error: | ||
1022 | *reqpp = req; | ||
1023 | return len; | ||
1024 | } | ||
1025 | |||
1026 | /* | ||
1027 | * Receive data from a TCP socket. | ||
1028 | */ | ||
1029 | static int svc_tcp_recvfrom(struct svc_rqst *rqstp) | ||
1030 | { | ||
1031 | struct svc_sock *svsk = | ||
1032 | container_of(rqstp->rq_xprt, struct svc_sock, sk_xprt); | ||
1033 | struct svc_serv *serv = svsk->sk_xprt.xpt_server; | ||
1034 | int len; | ||
1035 | struct kvec *vec; | ||
1036 | int pnum, vlen; | ||
1037 | struct rpc_rqst *req = NULL; | ||
1038 | |||
1039 | dprintk("svc: tcp_recv %p data %d conn %d close %d\n", | ||
1040 | svsk, test_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags), | ||
1041 | test_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags), | ||
1042 | test_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags)); | ||
1043 | |||
1044 | len = svc_tcp_recv_record(svsk, rqstp); | ||
1045 | if (len < 0) | ||
1046 | goto error; | ||
1047 | |||
922 | vec = rqstp->rq_vec; | 1048 | vec = rqstp->rq_vec; |
923 | vec[0] = rqstp->rq_arg.head[0]; | 1049 | vec[0] = rqstp->rq_arg.head[0]; |
924 | vlen = PAGE_SIZE; | 1050 | vlen = PAGE_SIZE; |
1051 | |||
1052 | /* | ||
1053 | * We have enough data for the whole tcp record. Let's try and read the | ||
1054 | * first 8 bytes to get the xid and the call direction. We can use this | ||
1055 | * to figure out if this is a call or a reply to a callback. If | ||
1056 | * sk_reclen is < 8 (xid and calldir), then this is a malformed packet. | ||
1057 | * In that case, don't bother with the calldir and just read the data. | ||
1058 | * It will be rejected in svc_process. | ||
1059 | */ | ||
1060 | if (len >= 8) { | ||
1061 | len = svc_process_calldir(svsk, rqstp, &req, vec); | ||
1062 | if (len < 0) | ||
1063 | goto err_again; | ||
1064 | vlen -= 8; | ||
1065 | } | ||
1066 | |||
925 | pnum = 1; | 1067 | pnum = 1; |
926 | while (vlen < len) { | 1068 | while (vlen < len) { |
927 | vec[pnum].iov_base = page_address(rqstp->rq_pages[pnum]); | 1069 | vec[pnum].iov_base = (req) ? |
1070 | page_address(req->rq_private_buf.pages[pnum - 1]) : | ||
1071 | page_address(rqstp->rq_pages[pnum]); | ||
928 | vec[pnum].iov_len = PAGE_SIZE; | 1072 | vec[pnum].iov_len = PAGE_SIZE; |
929 | pnum++; | 1073 | pnum++; |
930 | vlen += PAGE_SIZE; | 1074 | vlen += PAGE_SIZE; |
@@ -934,8 +1078,18 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp) | |||
934 | /* Now receive data */ | 1078 | /* Now receive data */ |
935 | len = svc_recvfrom(rqstp, vec, pnum, len); | 1079 | len = svc_recvfrom(rqstp, vec, pnum, len); |
936 | if (len < 0) | 1080 | if (len < 0) |
937 | goto error; | 1081 | goto err_again; |
938 | 1082 | ||
1083 | /* | ||
1084 | * Account for the 8 bytes we read earlier | ||
1085 | */ | ||
1086 | len += 8; | ||
1087 | |||
1088 | if (req) { | ||
1089 | xprt_complete_rqst(req->rq_task, len); | ||
1090 | len = 0; | ||
1091 | goto out; | ||
1092 | } | ||
939 | dprintk("svc: TCP complete record (%d bytes)\n", len); | 1093 | dprintk("svc: TCP complete record (%d bytes)\n", len); |
940 | rqstp->rq_arg.len = len; | 1094 | rqstp->rq_arg.len = len; |
941 | rqstp->rq_arg.page_base = 0; | 1095 | rqstp->rq_arg.page_base = 0; |
@@ -949,6 +1103,7 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp) | |||
949 | rqstp->rq_xprt_ctxt = NULL; | 1103 | rqstp->rq_xprt_ctxt = NULL; |
950 | rqstp->rq_prot = IPPROTO_TCP; | 1104 | rqstp->rq_prot = IPPROTO_TCP; |
951 | 1105 | ||
1106 | out: | ||
952 | /* Reset TCP read info */ | 1107 | /* Reset TCP read info */ |
953 | svsk->sk_reclen = 0; | 1108 | svsk->sk_reclen = 0; |
954 | svsk->sk_tcplen = 0; | 1109 | svsk->sk_tcplen = 0; |
@@ -960,21 +1115,19 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp) | |||
960 | 1115 | ||
961 | return len; | 1116 | return len; |
962 | 1117 | ||
963 | err_delete: | 1118 | err_again: |
964 | set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags); | ||
965 | return -EAGAIN; | ||
966 | |||
967 | error: | ||
968 | if (len == -EAGAIN) { | 1119 | if (len == -EAGAIN) { |
969 | dprintk("RPC: TCP recvfrom got EAGAIN\n"); | 1120 | dprintk("RPC: TCP recvfrom got EAGAIN\n"); |
970 | svc_xprt_received(&svsk->sk_xprt); | 1121 | svc_xprt_received(&svsk->sk_xprt); |
971 | } else { | 1122 | return len; |
1123 | } | ||
1124 | error: | ||
1125 | if (len != -EAGAIN) { | ||
972 | printk(KERN_NOTICE "%s: recvfrom returned errno %d\n", | 1126 | printk(KERN_NOTICE "%s: recvfrom returned errno %d\n", |
973 | svsk->sk_xprt.xpt_server->sv_name, -len); | 1127 | svsk->sk_xprt.xpt_server->sv_name, -len); |
974 | goto err_delete; | 1128 | set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags); |
975 | } | 1129 | } |
976 | 1130 | return -EAGAIN; | |
977 | return len; | ||
978 | } | 1131 | } |
979 | 1132 | ||
980 | /* | 1133 | /* |
diff --git a/net/sunrpc/sysctl.c b/net/sunrpc/sysctl.c index 5231f7aaac0e..42f9748ae093 100644 --- a/net/sunrpc/sysctl.c +++ b/net/sunrpc/sysctl.c | |||
@@ -56,7 +56,7 @@ rpc_unregister_sysctl(void) | |||
56 | } | 56 | } |
57 | } | 57 | } |
58 | 58 | ||
59 | static int proc_do_xprt(ctl_table *table, int write, struct file *file, | 59 | static int proc_do_xprt(ctl_table *table, int write, |
60 | void __user *buffer, size_t *lenp, loff_t *ppos) | 60 | void __user *buffer, size_t *lenp, loff_t *ppos) |
61 | { | 61 | { |
62 | char tmpbuf[256]; | 62 | char tmpbuf[256]; |
@@ -71,7 +71,7 @@ static int proc_do_xprt(ctl_table *table, int write, struct file *file, | |||
71 | } | 71 | } |
72 | 72 | ||
73 | static int | 73 | static int |
74 | proc_dodebug(ctl_table *table, int write, struct file *file, | 74 | proc_dodebug(ctl_table *table, int write, |
75 | void __user *buffer, size_t *lenp, loff_t *ppos) | 75 | void __user *buffer, size_t *lenp, loff_t *ppos) |
76 | { | 76 | { |
77 | char tmpbuf[20], c, *s; | 77 | char tmpbuf[20], c, *s; |
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index f412a852bc73..fd46d42afa89 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c | |||
@@ -832,6 +832,11 @@ static void xprt_timer(struct rpc_task *task) | |||
832 | spin_unlock_bh(&xprt->transport_lock); | 832 | spin_unlock_bh(&xprt->transport_lock); |
833 | } | 833 | } |
834 | 834 | ||
835 | static inline int xprt_has_timer(struct rpc_xprt *xprt) | ||
836 | { | ||
837 | return xprt->idle_timeout != 0; | ||
838 | } | ||
839 | |||
835 | /** | 840 | /** |
836 | * xprt_prepare_transmit - reserve the transport before sending a request | 841 | * xprt_prepare_transmit - reserve the transport before sending a request |
837 | * @task: RPC task about to send a request | 842 | * @task: RPC task about to send a request |
@@ -1013,7 +1018,7 @@ void xprt_release(struct rpc_task *task) | |||
1013 | if (!list_empty(&req->rq_list)) | 1018 | if (!list_empty(&req->rq_list)) |
1014 | list_del(&req->rq_list); | 1019 | list_del(&req->rq_list); |
1015 | xprt->last_used = jiffies; | 1020 | xprt->last_used = jiffies; |
1016 | if (list_empty(&xprt->recv)) | 1021 | if (list_empty(&xprt->recv) && xprt_has_timer(xprt)) |
1017 | mod_timer(&xprt->timer, | 1022 | mod_timer(&xprt->timer, |
1018 | xprt->last_used + xprt->idle_timeout); | 1023 | xprt->last_used + xprt->idle_timeout); |
1019 | spin_unlock_bh(&xprt->transport_lock); | 1024 | spin_unlock_bh(&xprt->transport_lock); |
@@ -1082,8 +1087,11 @@ found: | |||
1082 | #endif /* CONFIG_NFS_V4_1 */ | 1087 | #endif /* CONFIG_NFS_V4_1 */ |
1083 | 1088 | ||
1084 | INIT_WORK(&xprt->task_cleanup, xprt_autoclose); | 1089 | INIT_WORK(&xprt->task_cleanup, xprt_autoclose); |
1085 | setup_timer(&xprt->timer, xprt_init_autodisconnect, | 1090 | if (xprt_has_timer(xprt)) |
1086 | (unsigned long)xprt); | 1091 | setup_timer(&xprt->timer, xprt_init_autodisconnect, |
1092 | (unsigned long)xprt); | ||
1093 | else | ||
1094 | init_timer(&xprt->timer); | ||
1087 | xprt->last_used = jiffies; | 1095 | xprt->last_used = jiffies; |
1088 | xprt->cwnd = RPC_INITCWND; | 1096 | xprt->cwnd = RPC_INITCWND; |
1089 | xprt->bind_index = 0; | 1097 | xprt->bind_index = 0; |
@@ -1102,7 +1110,6 @@ found: | |||
1102 | 1110 | ||
1103 | dprintk("RPC: created transport %p with %u slots\n", xprt, | 1111 | dprintk("RPC: created transport %p with %u slots\n", xprt, |
1104 | xprt->max_reqs); | 1112 | xprt->max_reqs); |
1105 | |||
1106 | return xprt; | 1113 | return xprt; |
1107 | } | 1114 | } |
1108 | 1115 | ||
diff --git a/net/sunrpc/xprtrdma/svc_rdma.c b/net/sunrpc/xprtrdma/svc_rdma.c index 87101177825b..35fb68b9c8ec 100644 --- a/net/sunrpc/xprtrdma/svc_rdma.c +++ b/net/sunrpc/xprtrdma/svc_rdma.c | |||
@@ -80,7 +80,7 @@ struct kmem_cache *svc_rdma_ctxt_cachep; | |||
80 | * current value. | 80 | * current value. |
81 | */ | 81 | */ |
82 | static int read_reset_stat(ctl_table *table, int write, | 82 | static int read_reset_stat(ctl_table *table, int write, |
83 | struct file *filp, void __user *buffer, size_t *lenp, | 83 | void __user *buffer, size_t *lenp, |
84 | loff_t *ppos) | 84 | loff_t *ppos) |
85 | { | 85 | { |
86 | atomic_t *stat = (atomic_t *)table->data; | 86 | atomic_t *stat = (atomic_t *)table->data; |
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c index 5151f9f6c573..0cf5e8c27a10 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c | |||
@@ -730,12 +730,12 @@ static struct svc_rdma_fastreg_mr *rdma_alloc_frmr(struct svcxprt_rdma *xprt) | |||
730 | goto err; | 730 | goto err; |
731 | 731 | ||
732 | mr = ib_alloc_fast_reg_mr(xprt->sc_pd, RPCSVC_MAXPAGES); | 732 | mr = ib_alloc_fast_reg_mr(xprt->sc_pd, RPCSVC_MAXPAGES); |
733 | if (!mr) | 733 | if (IS_ERR(mr)) |
734 | goto err_free_frmr; | 734 | goto err_free_frmr; |
735 | 735 | ||
736 | pl = ib_alloc_fast_reg_page_list(xprt->sc_cm_id->device, | 736 | pl = ib_alloc_fast_reg_page_list(xprt->sc_cm_id->device, |
737 | RPCSVC_MAXPAGES); | 737 | RPCSVC_MAXPAGES); |
738 | if (!pl) | 738 | if (IS_ERR(pl)) |
739 | goto err_free_mr; | 739 | goto err_free_mr; |
740 | 740 | ||
741 | frmr->mr = mr; | 741 | frmr->mr = mr; |
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 62438f3a914d..37c5475ba258 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include <linux/tcp.h> | 32 | #include <linux/tcp.h> |
33 | #include <linux/sunrpc/clnt.h> | 33 | #include <linux/sunrpc/clnt.h> |
34 | #include <linux/sunrpc/sched.h> | 34 | #include <linux/sunrpc/sched.h> |
35 | #include <linux/sunrpc/svcsock.h> | ||
35 | #include <linux/sunrpc/xprtsock.h> | 36 | #include <linux/sunrpc/xprtsock.h> |
36 | #include <linux/file.h> | 37 | #include <linux/file.h> |
37 | #ifdef CONFIG_NFS_V4_1 | 38 | #ifdef CONFIG_NFS_V4_1 |
@@ -43,6 +44,7 @@ | |||
43 | #include <net/udp.h> | 44 | #include <net/udp.h> |
44 | #include <net/tcp.h> | 45 | #include <net/tcp.h> |
45 | 46 | ||
47 | #include "sunrpc.h" | ||
46 | /* | 48 | /* |
47 | * xprtsock tunables | 49 | * xprtsock tunables |
48 | */ | 50 | */ |
@@ -771,6 +773,7 @@ static void xs_close(struct rpc_xprt *xprt) | |||
771 | dprintk("RPC: xs_close xprt %p\n", xprt); | 773 | dprintk("RPC: xs_close xprt %p\n", xprt); |
772 | 774 | ||
773 | xs_reset_transport(transport); | 775 | xs_reset_transport(transport); |
776 | xprt->reestablish_timeout = 0; | ||
774 | 777 | ||
775 | smp_mb__before_clear_bit(); | 778 | smp_mb__before_clear_bit(); |
776 | clear_bit(XPRT_CONNECTION_ABORT, &xprt->state); | 779 | clear_bit(XPRT_CONNECTION_ABORT, &xprt->state); |
@@ -1262,6 +1265,12 @@ static void xs_tcp_data_ready(struct sock *sk, int bytes) | |||
1262 | if (xprt->shutdown) | 1265 | if (xprt->shutdown) |
1263 | goto out; | 1266 | goto out; |
1264 | 1267 | ||
1268 | /* Any data means we had a useful conversation, so | ||
1269 | * the we don't need to delay the next reconnect | ||
1270 | */ | ||
1271 | if (xprt->reestablish_timeout) | ||
1272 | xprt->reestablish_timeout = 0; | ||
1273 | |||
1265 | /* We use rd_desc to pass struct xprt to xs_tcp_data_recv */ | 1274 | /* We use rd_desc to pass struct xprt to xs_tcp_data_recv */ |
1266 | rd_desc.arg.data = xprt; | 1275 | rd_desc.arg.data = xprt; |
1267 | do { | 1276 | do { |
@@ -2032,6 +2041,8 @@ static void xs_connect(struct rpc_task *task) | |||
2032 | &transport->connect_worker, | 2041 | &transport->connect_worker, |
2033 | xprt->reestablish_timeout); | 2042 | xprt->reestablish_timeout); |
2034 | xprt->reestablish_timeout <<= 1; | 2043 | xprt->reestablish_timeout <<= 1; |
2044 | if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO) | ||
2045 | xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; | ||
2035 | if (xprt->reestablish_timeout > XS_TCP_MAX_REEST_TO) | 2046 | if (xprt->reestablish_timeout > XS_TCP_MAX_REEST_TO) |
2036 | xprt->reestablish_timeout = XS_TCP_MAX_REEST_TO; | 2047 | xprt->reestablish_timeout = XS_TCP_MAX_REEST_TO; |
2037 | } else { | 2048 | } else { |
@@ -2098,6 +2109,134 @@ static void xs_tcp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq) | |||
2098 | xprt->stat.bklog_u); | 2109 | xprt->stat.bklog_u); |
2099 | } | 2110 | } |
2100 | 2111 | ||
2112 | /* | ||
2113 | * Allocate a bunch of pages for a scratch buffer for the rpc code. The reason | ||
2114 | * we allocate pages instead doing a kmalloc like rpc_malloc is because we want | ||
2115 | * to use the server side send routines. | ||
2116 | */ | ||
2117 | void *bc_malloc(struct rpc_task *task, size_t size) | ||
2118 | { | ||
2119 | struct page *page; | ||
2120 | struct rpc_buffer *buf; | ||
2121 | |||
2122 | BUG_ON(size > PAGE_SIZE - sizeof(struct rpc_buffer)); | ||
2123 | page = alloc_page(GFP_KERNEL); | ||
2124 | |||
2125 | if (!page) | ||
2126 | return NULL; | ||
2127 | |||
2128 | buf = page_address(page); | ||
2129 | buf->len = PAGE_SIZE; | ||
2130 | |||
2131 | return buf->data; | ||
2132 | } | ||
2133 | |||
2134 | /* | ||
2135 | * Free the space allocated in the bc_alloc routine | ||
2136 | */ | ||
2137 | void bc_free(void *buffer) | ||
2138 | { | ||
2139 | struct rpc_buffer *buf; | ||
2140 | |||
2141 | if (!buffer) | ||
2142 | return; | ||
2143 | |||
2144 | buf = container_of(buffer, struct rpc_buffer, data); | ||
2145 | free_page((unsigned long)buf); | ||
2146 | } | ||
2147 | |||
2148 | /* | ||
2149 | * Use the svc_sock to send the callback. Must be called with svsk->sk_mutex | ||
2150 | * held. Borrows heavily from svc_tcp_sendto and xs_tcp_send_request. | ||
2151 | */ | ||
2152 | static int bc_sendto(struct rpc_rqst *req) | ||
2153 | { | ||
2154 | int len; | ||
2155 | struct xdr_buf *xbufp = &req->rq_snd_buf; | ||
2156 | struct rpc_xprt *xprt = req->rq_xprt; | ||
2157 | struct sock_xprt *transport = | ||
2158 | container_of(xprt, struct sock_xprt, xprt); | ||
2159 | struct socket *sock = transport->sock; | ||
2160 | unsigned long headoff; | ||
2161 | unsigned long tailoff; | ||
2162 | |||
2163 | /* | ||
2164 | * Set up the rpc header and record marker stuff | ||
2165 | */ | ||
2166 | xs_encode_tcp_record_marker(xbufp); | ||
2167 | |||
2168 | tailoff = (unsigned long)xbufp->tail[0].iov_base & ~PAGE_MASK; | ||
2169 | headoff = (unsigned long)xbufp->head[0].iov_base & ~PAGE_MASK; | ||
2170 | len = svc_send_common(sock, xbufp, | ||
2171 | virt_to_page(xbufp->head[0].iov_base), headoff, | ||
2172 | xbufp->tail[0].iov_base, tailoff); | ||
2173 | |||
2174 | if (len != xbufp->len) { | ||
2175 | printk(KERN_NOTICE "Error sending entire callback!\n"); | ||
2176 | len = -EAGAIN; | ||
2177 | } | ||
2178 | |||
2179 | return len; | ||
2180 | } | ||
2181 | |||
2182 | /* | ||
2183 | * The send routine. Borrows from svc_send | ||
2184 | */ | ||
2185 | static int bc_send_request(struct rpc_task *task) | ||
2186 | { | ||
2187 | struct rpc_rqst *req = task->tk_rqstp; | ||
2188 | struct svc_xprt *xprt; | ||
2189 | struct svc_sock *svsk; | ||
2190 | u32 len; | ||
2191 | |||
2192 | dprintk("sending request with xid: %08x\n", ntohl(req->rq_xid)); | ||
2193 | /* | ||
2194 | * Get the server socket associated with this callback xprt | ||
2195 | */ | ||
2196 | xprt = req->rq_xprt->bc_xprt; | ||
2197 | svsk = container_of(xprt, struct svc_sock, sk_xprt); | ||
2198 | |||
2199 | /* | ||
2200 | * Grab the mutex to serialize data as the connection is shared | ||
2201 | * with the fore channel | ||
2202 | */ | ||
2203 | if (!mutex_trylock(&xprt->xpt_mutex)) { | ||
2204 | rpc_sleep_on(&xprt->xpt_bc_pending, task, NULL); | ||
2205 | if (!mutex_trylock(&xprt->xpt_mutex)) | ||
2206 | return -EAGAIN; | ||
2207 | rpc_wake_up_queued_task(&xprt->xpt_bc_pending, task); | ||
2208 | } | ||
2209 | if (test_bit(XPT_DEAD, &xprt->xpt_flags)) | ||
2210 | len = -ENOTCONN; | ||
2211 | else | ||
2212 | len = bc_sendto(req); | ||
2213 | mutex_unlock(&xprt->xpt_mutex); | ||
2214 | |||
2215 | if (len > 0) | ||
2216 | len = 0; | ||
2217 | |||
2218 | return len; | ||
2219 | } | ||
2220 | |||
2221 | /* | ||
2222 | * The close routine. Since this is client initiated, we do nothing | ||
2223 | */ | ||
2224 | |||
2225 | static void bc_close(struct rpc_xprt *xprt) | ||
2226 | { | ||
2227 | return; | ||
2228 | } | ||
2229 | |||
2230 | /* | ||
2231 | * The xprt destroy routine. Again, because this connection is client | ||
2232 | * initiated, we do nothing | ||
2233 | */ | ||
2234 | |||
2235 | static void bc_destroy(struct rpc_xprt *xprt) | ||
2236 | { | ||
2237 | return; | ||
2238 | } | ||
2239 | |||
2101 | static struct rpc_xprt_ops xs_udp_ops = { | 2240 | static struct rpc_xprt_ops xs_udp_ops = { |
2102 | .set_buffer_size = xs_udp_set_buffer_size, | 2241 | .set_buffer_size = xs_udp_set_buffer_size, |
2103 | .reserve_xprt = xprt_reserve_xprt_cong, | 2242 | .reserve_xprt = xprt_reserve_xprt_cong, |
@@ -2134,6 +2273,22 @@ static struct rpc_xprt_ops xs_tcp_ops = { | |||
2134 | .print_stats = xs_tcp_print_stats, | 2273 | .print_stats = xs_tcp_print_stats, |
2135 | }; | 2274 | }; |
2136 | 2275 | ||
2276 | /* | ||
2277 | * The rpc_xprt_ops for the server backchannel | ||
2278 | */ | ||
2279 | |||
2280 | static struct rpc_xprt_ops bc_tcp_ops = { | ||
2281 | .reserve_xprt = xprt_reserve_xprt, | ||
2282 | .release_xprt = xprt_release_xprt, | ||
2283 | .buf_alloc = bc_malloc, | ||
2284 | .buf_free = bc_free, | ||
2285 | .send_request = bc_send_request, | ||
2286 | .set_retrans_timeout = xprt_set_retrans_timeout_def, | ||
2287 | .close = bc_close, | ||
2288 | .destroy = bc_destroy, | ||
2289 | .print_stats = xs_tcp_print_stats, | ||
2290 | }; | ||
2291 | |||
2137 | static struct rpc_xprt *xs_setup_xprt(struct xprt_create *args, | 2292 | static struct rpc_xprt *xs_setup_xprt(struct xprt_create *args, |
2138 | unsigned int slot_table_size) | 2293 | unsigned int slot_table_size) |
2139 | { | 2294 | { |
@@ -2322,11 +2477,93 @@ static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args) | |||
2322 | return ERR_PTR(-EINVAL); | 2477 | return ERR_PTR(-EINVAL); |
2323 | } | 2478 | } |
2324 | 2479 | ||
2480 | /** | ||
2481 | * xs_setup_bc_tcp - Set up transport to use a TCP backchannel socket | ||
2482 | * @args: rpc transport creation arguments | ||
2483 | * | ||
2484 | */ | ||
2485 | static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args) | ||
2486 | { | ||
2487 | struct sockaddr *addr = args->dstaddr; | ||
2488 | struct rpc_xprt *xprt; | ||
2489 | struct sock_xprt *transport; | ||
2490 | struct svc_sock *bc_sock; | ||
2491 | |||
2492 | if (!args->bc_xprt) | ||
2493 | ERR_PTR(-EINVAL); | ||
2494 | |||
2495 | xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries); | ||
2496 | if (IS_ERR(xprt)) | ||
2497 | return xprt; | ||
2498 | transport = container_of(xprt, struct sock_xprt, xprt); | ||
2499 | |||
2500 | xprt->prot = IPPROTO_TCP; | ||
2501 | xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32); | ||
2502 | xprt->max_payload = RPC_MAX_FRAGMENT_SIZE; | ||
2503 | xprt->timeout = &xs_tcp_default_timeout; | ||
2504 | |||
2505 | /* backchannel */ | ||
2506 | xprt_set_bound(xprt); | ||
2507 | xprt->bind_timeout = 0; | ||
2508 | xprt->connect_timeout = 0; | ||
2509 | xprt->reestablish_timeout = 0; | ||
2510 | xprt->idle_timeout = 0; | ||
2511 | |||
2512 | /* | ||
2513 | * The backchannel uses the same socket connection as the | ||
2514 | * forechannel | ||
2515 | */ | ||
2516 | xprt->bc_xprt = args->bc_xprt; | ||
2517 | bc_sock = container_of(args->bc_xprt, struct svc_sock, sk_xprt); | ||
2518 | bc_sock->sk_bc_xprt = xprt; | ||
2519 | transport->sock = bc_sock->sk_sock; | ||
2520 | transport->inet = bc_sock->sk_sk; | ||
2521 | |||
2522 | xprt->ops = &bc_tcp_ops; | ||
2523 | |||
2524 | switch (addr->sa_family) { | ||
2525 | case AF_INET: | ||
2526 | xs_format_peer_addresses(xprt, "tcp", | ||
2527 | RPCBIND_NETID_TCP); | ||
2528 | break; | ||
2529 | case AF_INET6: | ||
2530 | xs_format_peer_addresses(xprt, "tcp", | ||
2531 | RPCBIND_NETID_TCP6); | ||
2532 | break; | ||
2533 | default: | ||
2534 | kfree(xprt); | ||
2535 | return ERR_PTR(-EAFNOSUPPORT); | ||
2536 | } | ||
2537 | |||
2538 | if (xprt_bound(xprt)) | ||
2539 | dprintk("RPC: set up xprt to %s (port %s) via %s\n", | ||
2540 | xprt->address_strings[RPC_DISPLAY_ADDR], | ||
2541 | xprt->address_strings[RPC_DISPLAY_PORT], | ||
2542 | xprt->address_strings[RPC_DISPLAY_PROTO]); | ||
2543 | else | ||
2544 | dprintk("RPC: set up xprt to %s (autobind) via %s\n", | ||
2545 | xprt->address_strings[RPC_DISPLAY_ADDR], | ||
2546 | xprt->address_strings[RPC_DISPLAY_PROTO]); | ||
2547 | |||
2548 | /* | ||
2549 | * Since we don't want connections for the backchannel, we set | ||
2550 | * the xprt status to connected | ||
2551 | */ | ||
2552 | xprt_set_connected(xprt); | ||
2553 | |||
2554 | |||
2555 | if (try_module_get(THIS_MODULE)) | ||
2556 | return xprt; | ||
2557 | kfree(xprt->slot); | ||
2558 | kfree(xprt); | ||
2559 | return ERR_PTR(-EINVAL); | ||
2560 | } | ||
2561 | |||
2325 | static struct xprt_class xs_udp_transport = { | 2562 | static struct xprt_class xs_udp_transport = { |
2326 | .list = LIST_HEAD_INIT(xs_udp_transport.list), | 2563 | .list = LIST_HEAD_INIT(xs_udp_transport.list), |
2327 | .name = "udp", | 2564 | .name = "udp", |
2328 | .owner = THIS_MODULE, | 2565 | .owner = THIS_MODULE, |
2329 | .ident = IPPROTO_UDP, | 2566 | .ident = XPRT_TRANSPORT_UDP, |
2330 | .setup = xs_setup_udp, | 2567 | .setup = xs_setup_udp, |
2331 | }; | 2568 | }; |
2332 | 2569 | ||
@@ -2334,10 +2571,18 @@ static struct xprt_class xs_tcp_transport = { | |||
2334 | .list = LIST_HEAD_INIT(xs_tcp_transport.list), | 2571 | .list = LIST_HEAD_INIT(xs_tcp_transport.list), |
2335 | .name = "tcp", | 2572 | .name = "tcp", |
2336 | .owner = THIS_MODULE, | 2573 | .owner = THIS_MODULE, |
2337 | .ident = IPPROTO_TCP, | 2574 | .ident = XPRT_TRANSPORT_TCP, |
2338 | .setup = xs_setup_tcp, | 2575 | .setup = xs_setup_tcp, |
2339 | }; | 2576 | }; |
2340 | 2577 | ||
2578 | static struct xprt_class xs_bc_tcp_transport = { | ||
2579 | .list = LIST_HEAD_INIT(xs_bc_tcp_transport.list), | ||
2580 | .name = "tcp NFSv4.1 backchannel", | ||
2581 | .owner = THIS_MODULE, | ||
2582 | .ident = XPRT_TRANSPORT_BC_TCP, | ||
2583 | .setup = xs_setup_bc_tcp, | ||
2584 | }; | ||
2585 | |||
2341 | /** | 2586 | /** |
2342 | * init_socket_xprt - set up xprtsock's sysctls, register with RPC client | 2587 | * init_socket_xprt - set up xprtsock's sysctls, register with RPC client |
2343 | * | 2588 | * |
@@ -2351,6 +2596,7 @@ int init_socket_xprt(void) | |||
2351 | 2596 | ||
2352 | xprt_register_transport(&xs_udp_transport); | 2597 | xprt_register_transport(&xs_udp_transport); |
2353 | xprt_register_transport(&xs_tcp_transport); | 2598 | xprt_register_transport(&xs_tcp_transport); |
2599 | xprt_register_transport(&xs_bc_tcp_transport); | ||
2354 | 2600 | ||
2355 | return 0; | 2601 | return 0; |
2356 | } | 2602 | } |
@@ -2370,6 +2616,7 @@ void cleanup_socket_xprt(void) | |||
2370 | 2616 | ||
2371 | xprt_unregister_transport(&xs_udp_transport); | 2617 | xprt_unregister_transport(&xs_udp_transport); |
2372 | xprt_unregister_transport(&xs_tcp_transport); | 2618 | xprt_unregister_transport(&xs_tcp_transport); |
2619 | xprt_unregister_transport(&xs_bc_tcp_transport); | ||
2373 | } | 2620 | } |
2374 | 2621 | ||
2375 | static int param_set_uint_minmax(const char *val, struct kernel_param *kp, | 2622 | static int param_set_uint_minmax(const char *val, struct kernel_param *kp, |
diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c index 429dd06a4ecc..561a45cf2a6a 100644 --- a/net/wireless/wext-compat.c +++ b/net/wireless/wext-compat.c | |||
@@ -834,7 +834,7 @@ int cfg80211_wext_siwtxpower(struct net_device *dev, | |||
834 | return 0; | 834 | return 0; |
835 | } | 835 | } |
836 | 836 | ||
837 | return rdev->ops->set_tx_power(wdev->wiphy, type, dbm);; | 837 | return rdev->ops->set_tx_power(wdev->wiphy, type, dbm); |
838 | } | 838 | } |
839 | EXPORT_SYMBOL_GPL(cfg80211_wext_siwtxpower); | 839 | EXPORT_SYMBOL_GPL(cfg80211_wext_siwtxpower); |
840 | 840 | ||