aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/Kconfig.x86_641
-rw-r--r--drivers/dma/Kconfig1
-rw-r--r--drivers/net/wan/lmc/lmc_main.c55
-rw-r--r--drivers/pci/intel-iommu.c4
-rw-r--r--drivers/rtc/rtc-s3c.c2
-rw-r--r--drivers/serial/serial_txx9.c10
-rw-r--r--drivers/video/Kconfig4
-rw-r--r--drivers/video/aty/radeon_pm.c4
-rw-r--r--drivers/video/omap/Kconfig2
-rw-r--r--fs/ufs/super.c15
-rw-r--r--include/linux/Kbuild1
-rw-r--r--include/linux/ipmi_smi.h2
-rw-r--r--kernel/signal.c2
-rw-r--r--mm/shmem.c15
-rw-r--r--mm/sparse-vmemmap.c1
-rw-r--r--net/core/dev.c10
-rw-r--r--net/core/netpoll.c37
-rw-r--r--net/ipv4/ipvs/ip_vs_sync.c5
-rw-r--r--net/ipv4/proc.c2
-rw-r--r--net/ipv4/tcp.c2
-rw-r--r--net/ipv4/tcp_ipv4.c10
-rw-r--r--net/ipv4/tcp_vegas.c37
-rw-r--r--net/ipv6/ndisc.c2
-rw-r--r--net/ipv6/tcp_ipv6.c10
-rw-r--r--net/netfilter/nf_conntrack_core.c2
-rw-r--r--net/socket.c5
-rw-r--r--net/sunrpc/sysctl.c3
-rw-r--r--net/sunrpc/xprtrdma/rpc_rdma.c10
28 files changed, 157 insertions, 97 deletions
diff --git a/arch/x86/Kconfig.x86_64 b/arch/x86/Kconfig.x86_64
index b45855c368a9..cc468ea61240 100644
--- a/arch/x86/Kconfig.x86_64
+++ b/arch/x86/Kconfig.x86_64
@@ -749,7 +749,6 @@ config PCI_DOMAINS
749config DMAR 749config DMAR
750 bool "Support for DMA Remapping Devices (EXPERIMENTAL)" 750 bool "Support for DMA Remapping Devices (EXPERIMENTAL)"
751 depends on PCI_MSI && ACPI && EXPERIMENTAL 751 depends on PCI_MSI && ACPI && EXPERIMENTAL
752 default y
753 help 752 help
754 DMA remapping (DMAR) devices support enables independent address 753 DMA remapping (DMAR) devices support enables independent address
755 translations for Direct Memory Access (DMA) from devices. 754 translations for Direct Memory Access (DMA) from devices.
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 9c91b0fd134f..6a7d25fc2470 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -43,7 +43,6 @@ comment "DMA Clients"
43config NET_DMA 43config NET_DMA
44 bool "Network: TCP receive copy offload" 44 bool "Network: TCP receive copy offload"
45 depends on DMA_ENGINE && NET 45 depends on DMA_ENGINE && NET
46 default y
47 help 46 help
48 This enables the use of DMA engines in the network stack to 47 This enables the use of DMA engines in the network stack to
49 offload receive copy-to-user operations, freeing CPU cycles. 48 offload receive copy-to-user operations, freeing CPU cycles.
diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c
index 5ea877221f46..64eb57893602 100644
--- a/drivers/net/wan/lmc/lmc_main.c
+++ b/drivers/net/wan/lmc/lmc_main.c
@@ -142,9 +142,10 @@ int lmc_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
142 * To date internally, just copy this out to the user. 142 * To date internally, just copy this out to the user.
143 */ 143 */
144 case LMCIOCGINFO: /*fold01*/ 144 case LMCIOCGINFO: /*fold01*/
145 if (copy_to_user(ifr->ifr_data, &sc->ictl, sizeof (lmc_ctl_t))) 145 if (copy_to_user(ifr->ifr_data, &sc->ictl, sizeof(lmc_ctl_t)))
146 return -EFAULT; 146 ret = -EFAULT;
147 ret = 0; 147 else
148 ret = 0;
148 break; 149 break;
149 150
150 case LMCIOCSINFO: /*fold01*/ 151 case LMCIOCSINFO: /*fold01*/
@@ -159,8 +160,10 @@ int lmc_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
159 break; 160 break;
160 } 161 }
161 162
162 if (copy_from_user(&ctl, ifr->ifr_data, sizeof (lmc_ctl_t))) 163 if (copy_from_user(&ctl, ifr->ifr_data, sizeof(lmc_ctl_t))) {
163 return -EFAULT; 164 ret = -EFAULT;
165 break;
166 }
164 167
165 sc->lmc_media->set_status (sc, &ctl); 168 sc->lmc_media->set_status (sc, &ctl);
166 169
@@ -190,8 +193,10 @@ int lmc_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
190 break; 193 break;
191 } 194 }
192 195
193 if (copy_from_user(&new_type, ifr->ifr_data, sizeof(u_int16_t))) 196 if (copy_from_user(&new_type, ifr->ifr_data, sizeof(u_int16_t))) {
194 return -EFAULT; 197 ret = -EFAULT;
198 break;
199 }
195 200
196 201
197 if (new_type == old_type) 202 if (new_type == old_type)
@@ -229,9 +234,10 @@ int lmc_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
229 sc->lmc_xinfo.Magic1 = 0xDEADBEEF; 234 sc->lmc_xinfo.Magic1 = 0xDEADBEEF;
230 235
231 if (copy_to_user(ifr->ifr_data, &sc->lmc_xinfo, 236 if (copy_to_user(ifr->ifr_data, &sc->lmc_xinfo,
232 sizeof (struct lmc_xinfo))) 237 sizeof(struct lmc_xinfo))) {
233 return -EFAULT; 238 ret = -EFAULT;
234 ret = 0; 239 else
240 ret = 0;
235 241
236 break; 242 break;
237 243
@@ -262,9 +268,9 @@ int lmc_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
262 268
263 if (copy_to_user(ifr->ifr_data, &sc->stats, 269 if (copy_to_user(ifr->ifr_data, &sc->stats,
264 sizeof (struct lmc_statistics))) 270 sizeof (struct lmc_statistics)))
265 return -EFAULT; 271 ret = -EFAULT;
266 272 else
267 ret = 0; 273 ret = 0;
268 break; 274 break;
269 275
270 case LMCIOCCLEARLMCSTATS: /*fold01*/ 276 case LMCIOCCLEARLMCSTATS: /*fold01*/
@@ -292,8 +298,10 @@ int lmc_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
292 break; 298 break;
293 } 299 }
294 300
295 if (copy_from_user(&ctl, ifr->ifr_data, sizeof (lmc_ctl_t))) 301 if (copy_from_user(&ctl, ifr->ifr_data, sizeof(lmc_ctl_t))) {
296 return -EFAULT; 302 ret = -EFAULT;
303 break;
304 }
297 sc->lmc_media->set_circuit_type(sc, ctl.circuit_type); 305 sc->lmc_media->set_circuit_type(sc, ctl.circuit_type);
298 sc->ictl.circuit_type = ctl.circuit_type; 306 sc->ictl.circuit_type = ctl.circuit_type;
299 ret = 0; 307 ret = 0;
@@ -318,12 +326,15 @@ int lmc_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
318 326
319#ifdef DEBUG 327#ifdef DEBUG
320 case LMCIOCDUMPEVENTLOG: 328 case LMCIOCDUMPEVENTLOG:
321 if (copy_to_user(ifr->ifr_data, &lmcEventLogIndex, sizeof (u32))) 329 if (copy_to_user(ifr->ifr_data, &lmcEventLogIndex, sizeof(u32))) {
322 return -EFAULT; 330 ret = -EFAULT;
331 break;
332 }
323 if (copy_to_user(ifr->ifr_data + sizeof (u32), lmcEventLogBuf, sizeof (lmcEventLogBuf))) 333 if (copy_to_user(ifr->ifr_data + sizeof (u32), lmcEventLogBuf, sizeof (lmcEventLogBuf)))
324 return -EFAULT; 334 ret = -EFAULT;
335 else
336 ret = 0;
325 337
326 ret = 0;
327 break; 338 break;
328#endif /* end ifdef _DBG_EVENTLOG */ 339#endif /* end ifdef _DBG_EVENTLOG */
329 case LMCIOCT1CONTROL: /*fold01*/ 340 case LMCIOCT1CONTROL: /*fold01*/
@@ -346,8 +357,10 @@ int lmc_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
346 */ 357 */
347 netif_stop_queue(dev); 358 netif_stop_queue(dev);
348 359
349 if (copy_from_user(&xc, ifr->ifr_data, sizeof (struct lmc_xilinx_control))) 360 if (copy_from_user(&xc, ifr->ifr_data, sizeof(struct lmc_xilinx_control))) {
350 return -EFAULT; 361 ret = -EFAULT;
362 break;
363 }
351 switch(xc.command){ 364 switch(xc.command){
352 case lmc_xilinx_reset: /*fold02*/ 365 case lmc_xilinx_reset: /*fold02*/
353 { 366 {
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index 8af1d9a261e5..e079a5237c94 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -749,8 +749,8 @@ static char *fault_reason_strings[] =
749 749
750char *dmar_get_fault_reason(u8 fault_reason) 750char *dmar_get_fault_reason(u8 fault_reason)
751{ 751{
752 if (fault_reason > MAX_FAULT_REASON_IDX) 752 if (fault_reason >= MAX_FAULT_REASON_IDX)
753 return fault_reason_strings[MAX_FAULT_REASON_IDX]; 753 return fault_reason_strings[MAX_FAULT_REASON_IDX - 1];
754 else 754 else
755 return fault_reason_strings[fault_reason]; 755 return fault_reason_strings[fault_reason];
756} 756}
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index 8c1012b432bb..e2041b4d0c85 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -542,8 +542,6 @@ static int s3c_rtc_probe(struct platform_device *pdev)
542 542
543/* RTC Power management control */ 543/* RTC Power management control */
544 544
545static struct timespec s3c_rtc_delta;
546
547static int ticnt_save; 545static int ticnt_save;
548 546
549static int s3c_rtc_suspend(struct platform_device *pdev, pm_message_t state) 547static int s3c_rtc_suspend(struct platform_device *pdev, pm_message_t state)
diff --git a/drivers/serial/serial_txx9.c b/drivers/serial/serial_txx9.c
index 6846a6c38b6d..7ad21925869a 100644
--- a/drivers/serial/serial_txx9.c
+++ b/drivers/serial/serial_txx9.c
@@ -657,7 +657,15 @@ static void
657serial_txx9_pm(struct uart_port *port, unsigned int state, 657serial_txx9_pm(struct uart_port *port, unsigned int state,
658 unsigned int oldstate) 658 unsigned int oldstate)
659{ 659{
660 if (state == 0) 660 /*
661 * If oldstate was -1 this is called from
662 * uart_configure_port(). In this case do not initialize the
663 * port now, because the port was already initialized (for
664 * non-console port) or should not be initialized here (for
665 * console port). If we initialized the port here we lose
666 * serial console settings.
667 */
668 if (state == 0 && oldstate != -1)
661 serial_txx9_initialize(port); 669 serial_txx9_initialize(port);
662} 670}
663 671
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index fb9d8d0b2c04..61717fa1afb9 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -1892,9 +1892,7 @@ config FB_VIRTUAL
1892 1892
1893 If unsure, say N. 1893 If unsure, say N.
1894 1894
1895if ARCH_OMAP 1895source "drivers/video/omap/Kconfig"
1896 source "drivers/video/omap/Kconfig"
1897endif
1898 1896
1899source "drivers/video/backlight/Kconfig" 1897source "drivers/video/backlight/Kconfig"
1900source "drivers/video/display/Kconfig" 1898source "drivers/video/display/Kconfig"
diff --git a/drivers/video/aty/radeon_pm.c b/drivers/video/aty/radeon_pm.c
index be1d57bf9dc8..83ee3e75386c 100644
--- a/drivers/video/aty/radeon_pm.c
+++ b/drivers/video/aty/radeon_pm.c
@@ -27,8 +27,6 @@
27 27
28#include "ati_ids.h" 28#include "ati_ids.h"
29 29
30static void radeon_reinitialize_M10(struct radeonfb_info *rinfo);
31
32/* 30/*
33 * Workarounds for bugs in PC laptops: 31 * Workarounds for bugs in PC laptops:
34 * - enable D2 sleep in some IBM Thinkpads 32 * - enable D2 sleep in some IBM Thinkpads
@@ -39,6 +37,8 @@ static void radeon_reinitialize_M10(struct radeonfb_info *rinfo);
39 */ 37 */
40 38
41#if defined(CONFIG_PM) && defined(CONFIG_X86) 39#if defined(CONFIG_PM) && defined(CONFIG_X86)
40static void radeon_reinitialize_M10(struct radeonfb_info *rinfo);
41
42struct radeon_device_id { 42struct radeon_device_id {
43 const char *ident; /* (arbitrary) Name */ 43 const char *ident; /* (arbitrary) Name */
44 const unsigned short subsystem_vendor; /* Subsystem Vendor ID */ 44 const unsigned short subsystem_vendor; /* Subsystem Vendor ID */
diff --git a/drivers/video/omap/Kconfig b/drivers/video/omap/Kconfig
index f4fcf11b290d..44408850e2eb 100644
--- a/drivers/video/omap/Kconfig
+++ b/drivers/video/omap/Kconfig
@@ -1,6 +1,6 @@
1config FB_OMAP 1config FB_OMAP
2 tristate "OMAP frame buffer support (EXPERIMENTAL)" 2 tristate "OMAP frame buffer support (EXPERIMENTAL)"
3 depends on FB 3 depends on FB && ARCH_OMAP
4 select FB_CFB_FILLRECT 4 select FB_CFB_FILLRECT
5 select FB_CFB_COPYAREA 5 select FB_CFB_COPYAREA
6 select FB_CFB_IMAGEBLIT 6 select FB_CFB_IMAGEBLIT
diff --git a/fs/ufs/super.c b/fs/ufs/super.c
index 584cf12cc40f..c78c04fd993f 100644
--- a/fs/ufs/super.c
+++ b/fs/ufs/super.c
@@ -933,20 +933,19 @@ magic_found:
933 goto again; 933 goto again;
934 } 934 }
935 935
936 /* Set sbi->s_flags here, used by ufs_get_fs_state() below */ 936 sbi->s_flags = flags;/*after that line some functions use s_flags*/
937 sbi->s_flags = flags;
938 ufs_print_super_stuff(sb, usb1, usb2, usb3); 937 ufs_print_super_stuff(sb, usb1, usb2, usb3);
939 938
940 /* 939 /*
941 * Check, if file system was correctly unmounted. 940 * Check, if file system was correctly unmounted.
942 * If not, make it read only. 941 * If not, make it read only.
943 */ 942 */
944 if ((((flags & UFS_ST_MASK) == UFS_ST_44BSD) || 943 if (((flags & UFS_ST_MASK) == UFS_ST_44BSD) ||
945 ((flags & UFS_ST_MASK) == UFS_ST_OLD) || 944 ((flags & UFS_ST_MASK) == UFS_ST_OLD) ||
946 ((flags & UFS_ST_MASK) == UFS_ST_SUN) || 945 (((flags & UFS_ST_MASK) == UFS_ST_SUN ||
947 ((flags & UFS_ST_MASK) == UFS_ST_SUNOS) || 946 (flags & UFS_ST_MASK) == UFS_ST_SUNOS ||
948 ((flags & UFS_ST_MASK) == UFS_ST_SUNx86)) && 947 (flags & UFS_ST_MASK) == UFS_ST_SUNx86) &&
949 (ufs_get_fs_state(sb, usb1, usb3) == (UFS_FSOK - fs32_to_cpu(sb, usb1->fs_time)))) { 948 (ufs_get_fs_state(sb, usb1, usb3) == (UFS_FSOK - fs32_to_cpu(sb, usb1->fs_time))))) {
950 switch(usb1->fs_clean) { 949 switch(usb1->fs_clean) {
951 case UFS_FSCLEAN: 950 case UFS_FSCLEAN:
952 UFSD("fs is clean\n"); 951 UFSD("fs is clean\n");
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
index 6a65231bc785..bd33c22315c1 100644
--- a/include/linux/Kbuild
+++ b/include/linux/Kbuild
@@ -149,6 +149,7 @@ header-y += ticable.h
149header-y += times.h 149header-y += times.h
150header-y += tiocl.h 150header-y += tiocl.h
151header-y += tipc.h 151header-y += tipc.h
152header-y += tipc_config.h
152header-y += toshiba.h 153header-y += toshiba.h
153header-y += ultrasound.h 154header-y += ultrasound.h
154header-y += un.h 155header-y += un.h
diff --git a/include/linux/ipmi_smi.h b/include/linux/ipmi_smi.h
index 56ae438ae510..6e8cec503380 100644
--- a/include/linux/ipmi_smi.h
+++ b/include/linux/ipmi_smi.h
@@ -173,7 +173,7 @@ static inline int ipmi_demangle_device_id(const unsigned char *data,
173 id->firmware_revision_2 = data[3]; 173 id->firmware_revision_2 = data[3];
174 id->ipmi_version = data[4]; 174 id->ipmi_version = data[4];
175 id->additional_device_support = data[5]; 175 id->additional_device_support = data[5];
176 if (data_len >= 6) { 176 if (data_len >= 11) {
177 id->manufacturer_id = (data[6] | (data[7] << 8) | 177 id->manufacturer_id = (data[6] | (data[7] << 8) |
178 (data[8] << 16)); 178 (data[8] << 16));
179 id->product_id = data[9] | (data[10] << 8); 179 id->product_id = data[9] | (data[10] << 8);
diff --git a/kernel/signal.c b/kernel/signal.c
index 4537bdda1ebf..909a0cc6bc70 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -124,7 +124,7 @@ void recalc_sigpending_and_wake(struct task_struct *t)
124 124
125void recalc_sigpending(void) 125void recalc_sigpending(void)
126{ 126{
127 if (!recalc_sigpending_tsk(current)) 127 if (!recalc_sigpending_tsk(current) && !freezing(current))
128 clear_thread_flag(TIF_SIGPENDING); 128 clear_thread_flag(TIF_SIGPENDING);
129 129
130} 130}
diff --git a/mm/shmem.c b/mm/shmem.c
index 404e53bb2127..253d205914ba 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -915,6 +915,21 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
915 struct inode *inode; 915 struct inode *inode;
916 916
917 BUG_ON(!PageLocked(page)); 917 BUG_ON(!PageLocked(page));
918 /*
919 * shmem_backing_dev_info's capabilities prevent regular writeback or
920 * sync from ever calling shmem_writepage; but a stacking filesystem
921 * may use the ->writepage of its underlying filesystem, in which case
922 * we want to do nothing when that underlying filesystem is tmpfs
923 * (writing out to swap is useful as a response to memory pressure, but
924 * of no use to stabilize the data) - just redirty the page, unlock it
925 * and claim success in this case. AOP_WRITEPAGE_ACTIVATE, and the
926 * page_mapped check below, must be avoided unless we're in reclaim.
927 */
928 if (!wbc->for_reclaim) {
929 set_page_dirty(page);
930 unlock_page(page);
931 return 0;
932 }
918 BUG_ON(page_mapped(page)); 933 BUG_ON(page_mapped(page));
919 934
920 mapping = page->mapping; 935 mapping = page->mapping;
diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
index d3b718b0c20a..22620f6a976b 100644
--- a/mm/sparse-vmemmap.c
+++ b/mm/sparse-vmemmap.c
@@ -24,6 +24,7 @@
24#include <linux/module.h> 24#include <linux/module.h>
25#include <linux/spinlock.h> 25#include <linux/spinlock.h>
26#include <linux/vmalloc.h> 26#include <linux/vmalloc.h>
27#include <linux/sched.h>
27#include <asm/dma.h> 28#include <asm/dma.h>
28#include <asm/pgalloc.h> 29#include <asm/pgalloc.h>
29#include <asm/pgtable.h> 30#include <asm/pgtable.h>
diff --git a/net/core/dev.c b/net/core/dev.c
index 853c8b575f1d..02e7d8377c4a 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2172,7 +2172,15 @@ static void net_rx_action(struct softirq_action *h)
2172 2172
2173 weight = n->weight; 2173 weight = n->weight;
2174 2174
2175 work = n->poll(n, weight); 2175 /* This NAPI_STATE_SCHED test is for avoiding a race
2176 * with netpoll's poll_napi(). Only the entity which
2177 * obtains the lock and sees NAPI_STATE_SCHED set will
2178 * actually make the ->poll() call. Therefore we avoid
2179 * accidently calling ->poll() when NAPI is not scheduled.
2180 */
2181 work = 0;
2182 if (test_bit(NAPI_STATE_SCHED, &n->state))
2183 work = n->poll(n, weight);
2176 2184
2177 WARN_ON_ONCE(work > weight); 2185 WARN_ON_ONCE(work > weight);
2178 2186
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index bf8d18f1b013..c499b5c69bed 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -116,6 +116,29 @@ static __sum16 checksum_udp(struct sk_buff *skb, struct udphdr *uh,
116 * network adapter, forcing superfluous retries and possibly timeouts. 116 * network adapter, forcing superfluous retries and possibly timeouts.
117 * Thus, we set our budget to greater than 1. 117 * Thus, we set our budget to greater than 1.
118 */ 118 */
119static int poll_one_napi(struct netpoll_info *npinfo,
120 struct napi_struct *napi, int budget)
121{
122 int work;
123
124 /* net_rx_action's ->poll() invocations and our's are
125 * synchronized by this test which is only made while
126 * holding the napi->poll_lock.
127 */
128 if (!test_bit(NAPI_STATE_SCHED, &napi->state))
129 return budget;
130
131 npinfo->rx_flags |= NETPOLL_RX_DROP;
132 atomic_inc(&trapped);
133
134 work = napi->poll(napi, budget);
135
136 atomic_dec(&trapped);
137 npinfo->rx_flags &= ~NETPOLL_RX_DROP;
138
139 return budget - work;
140}
141
119static void poll_napi(struct netpoll *np) 142static void poll_napi(struct netpoll *np)
120{ 143{
121 struct netpoll_info *npinfo = np->dev->npinfo; 144 struct netpoll_info *npinfo = np->dev->npinfo;
@@ -123,17 +146,13 @@ static void poll_napi(struct netpoll *np)
123 int budget = 16; 146 int budget = 16;
124 147
125 list_for_each_entry(napi, &np->dev->napi_list, dev_list) { 148 list_for_each_entry(napi, &np->dev->napi_list, dev_list) {
126 if (test_bit(NAPI_STATE_SCHED, &napi->state) && 149 if (napi->poll_owner != smp_processor_id() &&
127 napi->poll_owner != smp_processor_id() &&
128 spin_trylock(&napi->poll_lock)) { 150 spin_trylock(&napi->poll_lock)) {
129 npinfo->rx_flags |= NETPOLL_RX_DROP; 151 budget = poll_one_napi(npinfo, napi, budget);
130 atomic_inc(&trapped);
131
132 napi->poll(napi, budget);
133
134 atomic_dec(&trapped);
135 npinfo->rx_flags &= ~NETPOLL_RX_DROP;
136 spin_unlock(&napi->poll_lock); 152 spin_unlock(&napi->poll_lock);
153
154 if (!budget)
155 break;
137 } 156 }
138 } 157 }
139} 158}
diff --git a/net/ipv4/ipvs/ip_vs_sync.c b/net/ipv4/ipvs/ip_vs_sync.c
index c99f2a33fb9e..0d4d9721cbd4 100644
--- a/net/ipv4/ipvs/ip_vs_sync.c
+++ b/net/ipv4/ipvs/ip_vs_sync.c
@@ -72,7 +72,6 @@ struct ip_vs_sync_thread_data {
72 int state; 72 int state;
73}; 73};
74 74
75#define IP_VS_SYNC_CONN_TIMEOUT (3*60*HZ)
76#define SIMPLE_CONN_SIZE (sizeof(struct ip_vs_sync_conn)) 75#define SIMPLE_CONN_SIZE (sizeof(struct ip_vs_sync_conn))
77#define FULL_CONN_SIZE \ 76#define FULL_CONN_SIZE \
78(sizeof(struct ip_vs_sync_conn) + sizeof(struct ip_vs_sync_conn_options)) 77(sizeof(struct ip_vs_sync_conn) + sizeof(struct ip_vs_sync_conn_options))
@@ -284,6 +283,7 @@ static void ip_vs_process_message(const char *buffer, const size_t buflen)
284 struct ip_vs_sync_conn *s; 283 struct ip_vs_sync_conn *s;
285 struct ip_vs_sync_conn_options *opt; 284 struct ip_vs_sync_conn_options *opt;
286 struct ip_vs_conn *cp; 285 struct ip_vs_conn *cp;
286 struct ip_vs_protocol *pp;
287 char *p; 287 char *p;
288 int i; 288 int i;
289 289
@@ -342,7 +342,8 @@ static void ip_vs_process_message(const char *buffer, const size_t buflen)
342 p += SIMPLE_CONN_SIZE; 342 p += SIMPLE_CONN_SIZE;
343 343
344 atomic_set(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]); 344 atomic_set(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]);
345 cp->timeout = IP_VS_SYNC_CONN_TIMEOUT; 345 pp = ip_vs_proto_get(s->protocol);
346 cp->timeout = pp->timeout_table[cp->state];
346 ip_vs_conn_put(cp); 347 ip_vs_conn_put(cp);
347 348
348 if (p > buffer+buflen) { 349 if (p > buffer+buflen) {
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index 9be0daa9c0ec..ffdccc0972e0 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -304,7 +304,7 @@ static void icmp_put(struct seq_file *seq)
304 for (i=0; icmpmibmap[i].name != NULL; i++) 304 for (i=0; icmpmibmap[i].name != NULL; i++)
305 seq_printf(seq, " %lu", 305 seq_printf(seq, " %lu",
306 snmp_fold_field((void **) icmpmsg_statistics, 306 snmp_fold_field((void **) icmpmsg_statistics,
307 icmpmibmap[i].index)); 307 icmpmibmap[i].index | 0x100));
308} 308}
309 309
310/* 310/*
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 2e6ad6dbba6c..c64072bb504b 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2453,7 +2453,7 @@ void __init tcp_init(void)
2453 0, 2453 0,
2454 &tcp_hashinfo.ehash_size, 2454 &tcp_hashinfo.ehash_size,
2455 NULL, 2455 NULL,
2456 0); 2456 thash_entries ? 0 : 512 * 1024);
2457 tcp_hashinfo.ehash_size = 1 << tcp_hashinfo.ehash_size; 2457 tcp_hashinfo.ehash_size = 1 << tcp_hashinfo.ehash_size;
2458 for (i = 0; i < tcp_hashinfo.ehash_size; i++) { 2458 for (i = 0; i < tcp_hashinfo.ehash_size; i++) {
2459 rwlock_init(&tcp_hashinfo.ehash[i].lock); 2459 rwlock_init(&tcp_hashinfo.ehash[i].lock);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index ad759f1c3777..d3d8d5dfcee3 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -858,16 +858,16 @@ int tcp_v4_md5_do_add(struct sock *sk, __be32 addr,
858 u8 *newkey, u8 newkeylen) 858 u8 *newkey, u8 newkeylen)
859{ 859{
860 /* Add Key to the list */ 860 /* Add Key to the list */
861 struct tcp4_md5sig_key *key; 861 struct tcp_md5sig_key *key;
862 struct tcp_sock *tp = tcp_sk(sk); 862 struct tcp_sock *tp = tcp_sk(sk);
863 struct tcp4_md5sig_key *keys; 863 struct tcp4_md5sig_key *keys;
864 864
865 key = (struct tcp4_md5sig_key *)tcp_v4_md5_do_lookup(sk, addr); 865 key = tcp_v4_md5_do_lookup(sk, addr);
866 if (key) { 866 if (key) {
867 /* Pre-existing entry - just update that one. */ 867 /* Pre-existing entry - just update that one. */
868 kfree(key->base.key); 868 kfree(key->key);
869 key->base.key = newkey; 869 key->key = newkey;
870 key->base.keylen = newkeylen; 870 key->keylen = newkeylen;
871 } else { 871 } else {
872 struct tcp_md5sig_info *md5sig; 872 struct tcp_md5sig_info *md5sig;
873 873
diff --git a/net/ipv4/tcp_vegas.c b/net/ipv4/tcp_vegas.c
index b49dedcda52d..007304e99842 100644
--- a/net/ipv4/tcp_vegas.c
+++ b/net/ipv4/tcp_vegas.c
@@ -266,26 +266,25 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack,
266 */ 266 */
267 diff = (old_wnd << V_PARAM_SHIFT) - target_cwnd; 267 diff = (old_wnd << V_PARAM_SHIFT) - target_cwnd;
268 268
269 if (tp->snd_cwnd <= tp->snd_ssthresh) { 269 if (diff > gamma && tp->snd_ssthresh > 2 ) {
270 /* Slow start. */ 270 /* Going too fast. Time to slow down
271 if (diff > gamma) { 271 * and switch to congestion avoidance.
272 /* Going too fast. Time to slow down 272 */
273 * and switch to congestion avoidance. 273 tp->snd_ssthresh = 2;
274 */ 274
275 tp->snd_ssthresh = 2; 275 /* Set cwnd to match the actual rate
276 276 * exactly:
277 /* Set cwnd to match the actual rate 277 * cwnd = (actual rate) * baseRTT
278 * exactly: 278 * Then we add 1 because the integer
279 * cwnd = (actual rate) * baseRTT 279 * truncation robs us of full link
280 * Then we add 1 because the integer 280 * utilization.
281 * truncation robs us of full link 281 */
282 * utilization. 282 tp->snd_cwnd = min(tp->snd_cwnd,
283 */ 283 (target_cwnd >>
284 tp->snd_cwnd = min(tp->snd_cwnd, 284 V_PARAM_SHIFT)+1);
285 (target_cwnd >>
286 V_PARAM_SHIFT)+1);
287 285
288 } 286 } else if (tp->snd_cwnd <= tp->snd_ssthresh) {
287 /* Slow start. */
289 tcp_slow_start(tp); 288 tcp_slow_start(tp);
290 } else { 289 } else {
291 /* Congestion avoidance. */ 290 /* Congestion avoidance. */
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 20cfc90d5597..36f7dbfb6dbb 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -1670,7 +1670,7 @@ int ndisc_ifinfo_sysctl_change(struct ctl_table *ctl, int write, struct file * f
1670 filp, buffer, lenp, ppos); 1670 filp, buffer, lenp, ppos);
1671 1671
1672 else if ((strcmp(ctl->procname, "retrans_time_ms") == 0) || 1672 else if ((strcmp(ctl->procname, "retrans_time_ms") == 0) ||
1673 (strcmp(ctl->procname, "base_reacable_time_ms") == 0)) 1673 (strcmp(ctl->procname, "base_reachable_time_ms") == 0))
1674 ret = proc_dointvec_ms_jiffies(ctl, write, 1674 ret = proc_dointvec_ms_jiffies(ctl, write,
1675 filp, buffer, lenp, ppos); 1675 filp, buffer, lenp, ppos);
1676 else 1676 else
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 85208026278b..f1523b82cac1 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -561,16 +561,16 @@ static int tcp_v6_md5_do_add(struct sock *sk, struct in6_addr *peer,
561 char *newkey, u8 newkeylen) 561 char *newkey, u8 newkeylen)
562{ 562{
563 /* Add key to the list */ 563 /* Add key to the list */
564 struct tcp6_md5sig_key *key; 564 struct tcp_md5sig_key *key;
565 struct tcp_sock *tp = tcp_sk(sk); 565 struct tcp_sock *tp = tcp_sk(sk);
566 struct tcp6_md5sig_key *keys; 566 struct tcp6_md5sig_key *keys;
567 567
568 key = (struct tcp6_md5sig_key*) tcp_v6_md5_do_lookup(sk, peer); 568 key = tcp_v6_md5_do_lookup(sk, peer);
569 if (key) { 569 if (key) {
570 /* modify existing entry - just update that one */ 570 /* modify existing entry - just update that one */
571 kfree(key->base.key); 571 kfree(key->key);
572 key->base.key = newkey; 572 key->key = newkey;
573 key->base.keylen = newkeylen; 573 key->keylen = newkeylen;
574 } else { 574 } else {
575 /* reallocate new list if current one is full. */ 575 /* reallocate new list if current one is full. */
576 if (!tp->md5sig_info) { 576 if (!tp->md5sig_info) {
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 4d6171bc0829..000c2fb462d0 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -999,7 +999,7 @@ struct hlist_head *nf_ct_alloc_hashtable(int *sizep, int *vmalloced)
999 *vmalloced = 0; 999 *vmalloced = 0;
1000 1000
1001 size = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_head)); 1001 size = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_head));
1002 hash = (void*)__get_free_pages(GFP_KERNEL, 1002 hash = (void*)__get_free_pages(GFP_KERNEL|__GFP_NOWARN,
1003 get_order(sizeof(struct hlist_head) 1003 get_order(sizeof(struct hlist_head)
1004 * size)); 1004 * size));
1005 if (!hash) { 1005 if (!hash) {
diff --git a/net/socket.c b/net/socket.c
index 540013ea8620..5d879fd3d01d 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -1250,11 +1250,14 @@ asmlinkage long sys_socketpair(int family, int type, int protocol,
1250 goto out_release_both; 1250 goto out_release_both;
1251 1251
1252 fd1 = sock_alloc_fd(&newfile1); 1252 fd1 = sock_alloc_fd(&newfile1);
1253 if (unlikely(fd1 < 0)) 1253 if (unlikely(fd1 < 0)) {
1254 err = fd1;
1254 goto out_release_both; 1255 goto out_release_both;
1256 }
1255 1257
1256 fd2 = sock_alloc_fd(&newfile2); 1258 fd2 = sock_alloc_fd(&newfile2);
1257 if (unlikely(fd2 < 0)) { 1259 if (unlikely(fd2 < 0)) {
1260 err = fd2;
1258 put_filp(newfile1); 1261 put_filp(newfile1);
1259 put_unused_fd(fd1); 1262 put_unused_fd(fd1);
1260 goto out_release_both; 1263 goto out_release_both;
diff --git a/net/sunrpc/sysctl.c b/net/sunrpc/sysctl.c
index 864b541bbf51..2be714e9b382 100644
--- a/net/sunrpc/sysctl.c
+++ b/net/sunrpc/sysctl.c
@@ -87,9 +87,8 @@ proc_dodebug(ctl_table *table, int write, struct file *file,
87 left--, s++; 87 left--, s++;
88 *(unsigned int *) table->data = value; 88 *(unsigned int *) table->data = value;
89 /* Display the RPC tasks on writing to rpc_debug */ 89 /* Display the RPC tasks on writing to rpc_debug */
90 if (table->ctl_name == CTL_RPCDEBUG) { 90 if (strcmp(table->procname, "rpc_debug") == 0)
91 rpc_show_tasks(); 91 rpc_show_tasks();
92 }
93 } else { 92 } else {
94 if (!access_ok(VERIFY_WRITE, buffer, left)) 93 if (!access_ok(VERIFY_WRITE, buffer, left))
95 return -EFAULT; 94 return -EFAULT;
diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c
index f877b88091ce..9e11ce715958 100644
--- a/net/sunrpc/xprtrdma/rpc_rdma.c
+++ b/net/sunrpc/xprtrdma/rpc_rdma.c
@@ -221,8 +221,8 @@ rpcrdma_create_chunks(struct rpc_rqst *rqst, struct xdr_buf *target,
221 seg->mr_base); 221 seg->mr_base);
222 dprintk("RPC: %s: read chunk " 222 dprintk("RPC: %s: read chunk "
223 "elem %d@0x%llx:0x%x pos %d (%s)\n", __func__, 223 "elem %d@0x%llx:0x%x pos %d (%s)\n", __func__,
224 seg->mr_len, seg->mr_base, seg->mr_rkey, pos, 224 seg->mr_len, (unsigned long long)seg->mr_base,
225 n < nsegs ? "more" : "last"); 225 seg->mr_rkey, pos, n < nsegs ? "more" : "last");
226 cur_rchunk++; 226 cur_rchunk++;
227 r_xprt->rx_stats.read_chunk_count++; 227 r_xprt->rx_stats.read_chunk_count++;
228 } else { /* write/reply */ 228 } else { /* write/reply */
@@ -234,8 +234,8 @@ rpcrdma_create_chunks(struct rpc_rqst *rqst, struct xdr_buf *target,
234 dprintk("RPC: %s: %s chunk " 234 dprintk("RPC: %s: %s chunk "
235 "elem %d@0x%llx:0x%x (%s)\n", __func__, 235 "elem %d@0x%llx:0x%x (%s)\n", __func__,
236 (type == rpcrdma_replych) ? "reply" : "write", 236 (type == rpcrdma_replych) ? "reply" : "write",
237 seg->mr_len, seg->mr_base, seg->mr_rkey, 237 seg->mr_len, (unsigned long long)seg->mr_base,
238 n < nsegs ? "more" : "last"); 238 seg->mr_rkey, n < nsegs ? "more" : "last");
239 cur_wchunk++; 239 cur_wchunk++;
240 if (type == rpcrdma_replych) 240 if (type == rpcrdma_replych)
241 r_xprt->rx_stats.reply_chunk_count++; 241 r_xprt->rx_stats.reply_chunk_count++;
@@ -577,7 +577,7 @@ rpcrdma_count_chunks(struct rpcrdma_rep *rep, int max, int wrchunk, __be32 **ipt
577 dprintk("RPC: %s: chunk %d@0x%llx:0x%x\n", 577 dprintk("RPC: %s: chunk %d@0x%llx:0x%x\n",
578 __func__, 578 __func__,
579 ntohl(seg->rs_length), 579 ntohl(seg->rs_length),
580 off, 580 (unsigned long long)off,
581 ntohl(seg->rs_handle)); 581 ntohl(seg->rs_handle));
582 } 582 }
583 total_len += ntohl(seg->rs_length); 583 total_len += ntohl(seg->rs_length);