aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/networking/ip-sysctl.txt8
-rw-r--r--MAINTAINERS4
-rw-r--r--Makefile2
-rw-r--r--arch/ia64/sn/kernel/setup.c2
-rw-r--r--arch/microblaze/include/asm/io.h2
-rw-r--r--arch/microblaze/kernel/cpu/cache.c27
-rw-r--r--arch/mips/bcm47xx/prom.c8
-rw-r--r--arch/mips/mm/highmem.c1
-rw-r--r--arch/parisc/Kconfig1
-rw-r--r--arch/parisc/kernel/pci.c7
-rw-r--r--arch/sparc/include/asm/stat.h4
-rw-r--r--arch/x86/include/asm/processor.h2
-rw-r--r--arch/x86/kernel/hw_breakpoint.c30
-rw-r--r--arch/x86/kernel/ptrace.c7
-rw-r--r--block/blk-core.c11
-rw-r--r--drivers/acpi/processor_idle.c28
-rw-r--r--drivers/acpi/processor_perflib.c6
-rw-r--r--drivers/clocksource/cs5535-clockevt.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c30
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c113
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dac.c6
-rw-r--r--drivers/gpu/drm/nouveau/nv50_instmem.c58
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c108
-rw-r--r--drivers/input/input-polldev.c6
-rw-r--r--drivers/input/touchscreen/usbtouchscreen.c8
-rw-r--r--drivers/net/e1000/e1000_main.c19
-rw-r--r--drivers/net/tc35815.c1
-rw-r--r--drivers/net/usb/cdc_ether.c5
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c8
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-tx.c22
-rw-r--r--drivers/platform/x86/acer-wmi.c2
-rw-r--r--drivers/video/efifb.c11
-rw-r--r--drivers/watchdog/bfin_wdt.c13
-rw-r--r--fs/exec.c1
-rw-r--r--include/linux/blkdev.h6
-rw-r--r--kernel/perf_event.c11
-rw-r--r--kernel/sys.c2
-rw-r--r--lib/idr.c4
-rw-r--r--mm/migrate.c36
-rw-r--r--mm/oom_kill.c2
-rw-r--r--net/core/dev.c2
-rw-r--r--security/integrity/ima/ima_iint.c3
-rw-r--r--tools/perf/util/probe-event.c3
47 files changed, 402 insertions, 237 deletions
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index 006b39dec87d..e87f3cdc8a6a 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -1074,10 +1074,10 @@ regen_max_retry - INTEGER
1074 Default: 5 1074 Default: 5
1075 1075
1076max_addresses - INTEGER 1076max_addresses - INTEGER
1077 Number of maximum addresses per interface. 0 disables limitation. 1077 Maximum number of autoconfigured addresses per interface. Setting
1078 It is recommended not set too large value (or 0) because it would 1078 to zero disables the limitation. It is not recommended to set this
1079 be too easy way to crash kernel to allow to create too much of 1079 value too large (or to zero) because it would be an easy way to
1080 autoconfigured addresses. 1080 crash the kernel by allowing too many addresses to be created.
1081 Default: 16 1081 Default: 16
1082 1082
1083disable_ipv6 - BOOLEAN 1083disable_ipv6 - BOOLEAN
diff --git a/MAINTAINERS b/MAINTAINERS
index c95f727236e1..2533fc45a44a 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -3489,9 +3489,9 @@ S: Maintained
3489F: drivers/net/wireless/libertas/ 3489F: drivers/net/wireless/libertas/
3490 3490
3491MARVELL MV643XX ETHERNET DRIVER 3491MARVELL MV643XX ETHERNET DRIVER
3492M: Lennert Buytenhek <buytenh@marvell.com> 3492M: Lennert Buytenhek <buytenh@wantstofly.org>
3493L: netdev@vger.kernel.org 3493L: netdev@vger.kernel.org
3494S: Supported 3494S: Maintained
3495F: drivers/net/mv643xx_eth.* 3495F: drivers/net/mv643xx_eth.*
3496F: include/linux/mv643xx.h 3496F: include/linux/mv643xx.h
3497 3497
diff --git a/Makefile b/Makefile
index 12b1aa1103ee..1b24895212d8 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 2 1VERSION = 2
2PATCHLEVEL = 6 2PATCHLEVEL = 6
3SUBLEVEL = 33 3SUBLEVEL = 33
4EXTRAVERSION = -rc8 4EXTRAVERSION =
5NAME = Man-Eating Seals of Antiquity 5NAME = Man-Eating Seals of Antiquity
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/ia64/sn/kernel/setup.c b/arch/ia64/sn/kernel/setup.c
index ece1bf994499..e456f062f241 100644
--- a/arch/ia64/sn/kernel/setup.c
+++ b/arch/ia64/sn/kernel/setup.c
@@ -71,7 +71,7 @@ EXPORT_SYMBOL(sn_rtc_cycles_per_second);
71DEFINE_PER_CPU(struct sn_hub_info_s, __sn_hub_info); 71DEFINE_PER_CPU(struct sn_hub_info_s, __sn_hub_info);
72EXPORT_PER_CPU_SYMBOL(__sn_hub_info); 72EXPORT_PER_CPU_SYMBOL(__sn_hub_info);
73 73
74DEFINE_PER_CPU(short [MAX_COMPACT_NODES], __sn_cnodeid_to_nasid); 74DEFINE_PER_CPU(short, __sn_cnodeid_to_nasid[MAX_COMPACT_NODES]);
75EXPORT_PER_CPU_SYMBOL(__sn_cnodeid_to_nasid); 75EXPORT_PER_CPU_SYMBOL(__sn_cnodeid_to_nasid);
76 76
77DEFINE_PER_CPU(struct nodepda_s *, __sn_nodepda); 77DEFINE_PER_CPU(struct nodepda_s *, __sn_nodepda);
diff --git a/arch/microblaze/include/asm/io.h b/arch/microblaze/include/asm/io.h
index fc9997b73c09..267c7c779e53 100644
--- a/arch/microblaze/include/asm/io.h
+++ b/arch/microblaze/include/asm/io.h
@@ -217,7 +217,7 @@ static inline void __iomem *__ioremap(phys_addr_t address, unsigned long size,
217 * Little endian 217 * Little endian
218 */ 218 */
219 219
220#define out_le32(a, v) __raw_writel(__cpu_to_le32(v), (a)); 220#define out_le32(a, v) __raw_writel(__cpu_to_le32(v), (a))
221#define out_le16(a, v) __raw_writew(__cpu_to_le16(v), (a)) 221#define out_le16(a, v) __raw_writew(__cpu_to_le16(v), (a))
222 222
223#define in_le32(a) __le32_to_cpu(__raw_readl(a)) 223#define in_le32(a) __le32_to_cpu(__raw_readl(a))
diff --git a/arch/microblaze/kernel/cpu/cache.c b/arch/microblaze/kernel/cpu/cache.c
index d9d63831cc2f..2a56bccce4e0 100644
--- a/arch/microblaze/kernel/cpu/cache.c
+++ b/arch/microblaze/kernel/cpu/cache.c
@@ -172,16 +172,15 @@ do { \
172/* It is used only first parameter for OP - for wic, wdc */ 172/* It is used only first parameter for OP - for wic, wdc */
173#define CACHE_RANGE_LOOP_1(start, end, line_length, op) \ 173#define CACHE_RANGE_LOOP_1(start, end, line_length, op) \
174do { \ 174do { \
175 int step = -line_length; \ 175 int volatile temp; \
176 int count = end - start; \ 176 BUG_ON(end - start <= 0); \
177 BUG_ON(count <= 0); \
178 \ 177 \
179 __asm__ __volatile__ (" 1: addk %0, %0, %1; \ 178 __asm__ __volatile__ (" 1: " #op " %1, r0; \
180 " #op " %0, r0; \ 179 cmpu %0, %1, %2; \
181 bgtid %1, 1b; \ 180 bgtid %0, 1b; \
182 addk %1, %1, %2; \ 181 addk %1, %1, %3; \
183 " : : "r" (start), "r" (count), \ 182 " : : "r" (temp), "r" (start), "r" (end),\
184 "r" (step) : "memory"); \ 183 "r" (line_length) : "memory"); \
185} while (0); 184} while (0);
186 185
187static void __flush_icache_range_msr_irq(unsigned long start, unsigned long end) 186static void __flush_icache_range_msr_irq(unsigned long start, unsigned long end)
@@ -313,16 +312,6 @@ static void __invalidate_dcache_all_wb(void)
313 pr_debug("%s\n", __func__); 312 pr_debug("%s\n", __func__);
314 CACHE_ALL_LOOP2(cpuinfo.dcache_size, cpuinfo.dcache_line_length, 313 CACHE_ALL_LOOP2(cpuinfo.dcache_size, cpuinfo.dcache_line_length,
315 wdc.clear) 314 wdc.clear)
316
317#if 0
318 unsigned int i;
319
320 pr_debug("%s\n", __func__);
321
322 /* Just loop through cache size and invalidate it */
323 for (i = 0; i < cpuinfo.dcache_size; i += cpuinfo.dcache_line_length)
324 __invalidate_dcache(0, i);
325#endif
326} 315}
327 316
328static void __invalidate_dcache_range_wb(unsigned long start, 317static void __invalidate_dcache_range_wb(unsigned long start,
diff --git a/arch/mips/bcm47xx/prom.c b/arch/mips/bcm47xx/prom.c
index c51405e57921..29d3cbf9555f 100644
--- a/arch/mips/bcm47xx/prom.c
+++ b/arch/mips/bcm47xx/prom.c
@@ -141,6 +141,14 @@ static __init void prom_init_mem(void)
141 break; 141 break;
142 } 142 }
143 143
144 /* Ignoring the last page when ddr size is 128M. Cached
145 * accesses to last page is causing the processor to prefetch
146 * using address above 128M stepping out of the ddr address
147 * space.
148 */
149 if (mem == 0x8000000)
150 mem -= 0x1000;
151
144 add_memory_region(0, mem, BOOT_MEM_RAM); 152 add_memory_region(0, mem, BOOT_MEM_RAM);
145} 153}
146 154
diff --git a/arch/mips/mm/highmem.c b/arch/mips/mm/highmem.c
index e274fda329f4..127d732474bf 100644
--- a/arch/mips/mm/highmem.c
+++ b/arch/mips/mm/highmem.c
@@ -1,5 +1,6 @@
1#include <linux/module.h> 1#include <linux/module.h>
2#include <linux/highmem.h> 2#include <linux/highmem.h>
3#include <linux/sched.h>
3#include <linux/smp.h> 4#include <linux/smp.h>
4#include <asm/fixmap.h> 5#include <asm/fixmap.h>
5#include <asm/tlbflush.h> 6#include <asm/tlbflush.h>
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index 524d9352f17e..f388dc68f605 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -18,7 +18,6 @@ config PARISC
18 select BUG 18 select BUG
19 select HAVE_PERF_EVENTS 19 select HAVE_PERF_EVENTS
20 select GENERIC_ATOMIC64 if !64BIT 20 select GENERIC_ATOMIC64 if !64BIT
21 select HAVE_ARCH_TRACEHOOK
22 help 21 help
23 The PA-RISC microprocessor is designed by Hewlett-Packard and used 22 The PA-RISC microprocessor is designed by Hewlett-Packard and used
24 in many of their workstations & servers (HP9000 700 and 800 series, 23 in many of their workstations & servers (HP9000 700 and 800 series,
diff --git a/arch/parisc/kernel/pci.c b/arch/parisc/kernel/pci.c
index f7064abc3bb6..9e74bfe071dc 100644
--- a/arch/parisc/kernel/pci.c
+++ b/arch/parisc/kernel/pci.c
@@ -18,7 +18,6 @@
18 18
19#include <asm/io.h> 19#include <asm/io.h>
20#include <asm/system.h> 20#include <asm/system.h>
21#include <asm/cache.h> /* for L1_CACHE_BYTES */
22#include <asm/superio.h> 21#include <asm/superio.h>
23 22
24#define DEBUG_RESOURCES 0 23#define DEBUG_RESOURCES 0
@@ -123,6 +122,10 @@ static int __init pcibios_init(void)
123 } else { 122 } else {
124 printk(KERN_WARNING "pci_bios != NULL but init() is!\n"); 123 printk(KERN_WARNING "pci_bios != NULL but init() is!\n");
125 } 124 }
125
126 /* Set the CLS for PCI as early as possible. */
127 pci_cache_line_size = pci_dfl_cache_line_size;
128
126 return 0; 129 return 0;
127} 130}
128 131
@@ -171,7 +174,7 @@ void pcibios_set_master(struct pci_dev *dev)
171 ** upper byte is PCI_LATENCY_TIMER. 174 ** upper byte is PCI_LATENCY_TIMER.
172 */ 175 */
173 pci_write_config_word(dev, PCI_CACHE_LINE_SIZE, 176 pci_write_config_word(dev, PCI_CACHE_LINE_SIZE,
174 (0x80 << 8) | (L1_CACHE_BYTES / sizeof(u32))); 177 (0x80 << 8) | pci_cache_line_size);
175} 178}
176 179
177 180
diff --git a/arch/sparc/include/asm/stat.h b/arch/sparc/include/asm/stat.h
index 55db5eca08e2..39327d6a57eb 100644
--- a/arch/sparc/include/asm/stat.h
+++ b/arch/sparc/include/asm/stat.h
@@ -53,8 +53,8 @@ struct stat {
53 ino_t st_ino; 53 ino_t st_ino;
54 mode_t st_mode; 54 mode_t st_mode;
55 short st_nlink; 55 short st_nlink;
56 uid_t st_uid; 56 uid16_t st_uid;
57 gid_t st_gid; 57 gid16_t st_gid;
58 unsigned short st_rdev; 58 unsigned short st_rdev;
59 off_t st_size; 59 off_t st_size;
60 time_t st_atime; 60 time_t st_atime;
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index fc801bab1b3b..b753ea59703a 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -450,6 +450,8 @@ struct thread_struct {
450 struct perf_event *ptrace_bps[HBP_NUM]; 450 struct perf_event *ptrace_bps[HBP_NUM];
451 /* Debug status used for traps, single steps, etc... */ 451 /* Debug status used for traps, single steps, etc... */
452 unsigned long debugreg6; 452 unsigned long debugreg6;
453 /* Keep track of the exact dr7 value set by the user */
454 unsigned long ptrace_dr7;
453 /* Fault info: */ 455 /* Fault info: */
454 unsigned long cr2; 456 unsigned long cr2;
455 unsigned long trap_no; 457 unsigned long trap_no;
diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c
index 05d5fec64a94..bb6006e3e295 100644
--- a/arch/x86/kernel/hw_breakpoint.c
+++ b/arch/x86/kernel/hw_breakpoint.c
@@ -212,25 +212,6 @@ static int arch_check_va_in_kernelspace(unsigned long va, u8 hbp_len)
212 return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE); 212 return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
213} 213}
214 214
215/*
216 * Store a breakpoint's encoded address, length, and type.
217 */
218static int arch_store_info(struct perf_event *bp)
219{
220 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
221 /*
222 * For kernel-addresses, either the address or symbol name can be
223 * specified.
224 */
225 if (info->name)
226 info->address = (unsigned long)
227 kallsyms_lookup_name(info->name);
228 if (info->address)
229 return 0;
230
231 return -EINVAL;
232}
233
234int arch_bp_generic_fields(int x86_len, int x86_type, 215int arch_bp_generic_fields(int x86_len, int x86_type,
235 int *gen_len, int *gen_type) 216 int *gen_len, int *gen_type)
236{ 217{
@@ -362,10 +343,13 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp,
362 return ret; 343 return ret;
363 } 344 }
364 345
365 ret = arch_store_info(bp); 346 /*
366 347 * For kernel-addresses, either the address or symbol name can be
367 if (ret < 0) 348 * specified.
368 return ret; 349 */
350 if (info->name)
351 info->address = (unsigned long)
352 kallsyms_lookup_name(info->name);
369 /* 353 /*
370 * Check that the low-order bits of the address are appropriate 354 * Check that the low-order bits of the address are appropriate
371 * for the alignment implied by len. 355 * for the alignment implied by len.
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index 017d937639fe..0c1033d61e59 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -702,7 +702,7 @@ static unsigned long ptrace_get_debugreg(struct task_struct *tsk, int n)
702 } else if (n == 6) { 702 } else if (n == 6) {
703 val = thread->debugreg6; 703 val = thread->debugreg6;
704 } else if (n == 7) { 704 } else if (n == 7) {
705 val = ptrace_get_dr7(thread->ptrace_bps); 705 val = thread->ptrace_dr7;
706 } 706 }
707 return val; 707 return val;
708} 708}
@@ -778,8 +778,11 @@ int ptrace_set_debugreg(struct task_struct *tsk, int n, unsigned long val)
778 return rc; 778 return rc;
779 } 779 }
780 /* All that's left is DR7 */ 780 /* All that's left is DR7 */
781 if (n == 7) 781 if (n == 7) {
782 rc = ptrace_write_dr7(tsk, val); 782 rc = ptrace_write_dr7(tsk, val);
783 if (!rc)
784 thread->ptrace_dr7 = val;
785 }
783 786
784ret_path: 787ret_path:
785 return rc; 788 return rc;
diff --git a/block/blk-core.c b/block/blk-core.c
index 44b6d691728d..36c0deebc2dc 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1147,7 +1147,7 @@ void init_request_from_bio(struct request *req, struct bio *bio)
1147 */ 1147 */
1148static inline bool queue_should_plug(struct request_queue *q) 1148static inline bool queue_should_plug(struct request_queue *q)
1149{ 1149{
1150 return !(blk_queue_nonrot(q) && blk_queue_queuing(q)); 1150 return !(blk_queue_nonrot(q) && blk_queue_tagged(q));
1151} 1151}
1152 1152
1153static int __make_request(struct request_queue *q, struct bio *bio) 1153static int __make_request(struct request_queue *q, struct bio *bio)
@@ -1856,15 +1856,8 @@ void blk_dequeue_request(struct request *rq)
1856 * and to it is freed is accounted as io that is in progress at 1856 * and to it is freed is accounted as io that is in progress at
1857 * the driver side. 1857 * the driver side.
1858 */ 1858 */
1859 if (blk_account_rq(rq)) { 1859 if (blk_account_rq(rq))
1860 q->in_flight[rq_is_sync(rq)]++; 1860 q->in_flight[rq_is_sync(rq)]++;
1861 /*
1862 * Mark this device as supporting hardware queuing, if
1863 * we have more IOs in flight than 4.
1864 */
1865 if (!blk_queue_queuing(q) && queue_in_flight(q) > 4)
1866 set_bit(QUEUE_FLAG_CQ, &q->queue_flags);
1867 }
1868} 1861}
1869 1862
1870/** 1863/**
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index e88e8ae04fdb..cc978a8c00b7 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -880,12 +880,14 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
880 return(acpi_idle_enter_c1(dev, state)); 880 return(acpi_idle_enter_c1(dev, state));
881 881
882 local_irq_disable(); 882 local_irq_disable();
883 current_thread_info()->status &= ~TS_POLLING; 883 if (cx->entry_method != ACPI_CSTATE_FFH) {
884 /* 884 current_thread_info()->status &= ~TS_POLLING;
885 * TS_POLLING-cleared state must be visible before we test 885 /*
886 * NEED_RESCHED: 886 * TS_POLLING-cleared state must be visible before we test
887 */ 887 * NEED_RESCHED:
888 smp_mb(); 888 */
889 smp_mb();
890 }
889 891
890 if (unlikely(need_resched())) { 892 if (unlikely(need_resched())) {
891 current_thread_info()->status |= TS_POLLING; 893 current_thread_info()->status |= TS_POLLING;
@@ -965,12 +967,14 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
965 } 967 }
966 968
967 local_irq_disable(); 969 local_irq_disable();
968 current_thread_info()->status &= ~TS_POLLING; 970 if (cx->entry_method != ACPI_CSTATE_FFH) {
969 /* 971 current_thread_info()->status &= ~TS_POLLING;
970 * TS_POLLING-cleared state must be visible before we test 972 /*
971 * NEED_RESCHED: 973 * TS_POLLING-cleared state must be visible before we test
972 */ 974 * NEED_RESCHED:
973 smp_mb(); 975 */
976 smp_mb();
977 }
974 978
975 if (unlikely(need_resched())) { 979 if (unlikely(need_resched())) {
976 current_thread_info()->status |= TS_POLLING; 980 current_thread_info()->status |= TS_POLLING;
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index 2cabadcc4d8c..a959f6a07508 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -413,7 +413,11 @@ static int acpi_processor_get_performance_info(struct acpi_processor *pr)
413 if (result) 413 if (result)
414 goto update_bios; 414 goto update_bios;
415 415
416 return 0; 416 /* We need to call _PPC once when cpufreq starts */
417 if (ignore_ppc != 1)
418 result = acpi_processor_get_platform_limit(pr);
419
420 return result;
417 421
418 /* 422 /*
419 * Having _PPC but missing frequencies (_PSS, _PCT) is a very good hint that 423 * Having _PPC but missing frequencies (_PSS, _PCT) is a very good hint that
diff --git a/drivers/clocksource/cs5535-clockevt.c b/drivers/clocksource/cs5535-clockevt.c
index 27d20fac19d1..b314a999aabe 100644
--- a/drivers/clocksource/cs5535-clockevt.c
+++ b/drivers/clocksource/cs5535-clockevt.c
@@ -21,7 +21,7 @@
21 21
22#define DRV_NAME "cs5535-clockevt" 22#define DRV_NAME "cs5535-clockevt"
23 23
24static int timer_irq = CONFIG_CS5535_MFGPT_DEFAULT_IRQ; 24static int timer_irq;
25module_param_named(irq, timer_irq, int, 0644); 25module_param_named(irq, timer_irq, int, 0644);
26MODULE_PARM_DESC(irq, "Which IRQ to use for the clock source MFGPT ticks."); 26MODULE_PARM_DESC(irq, "Which IRQ to use for the clock source MFGPT ticks.");
27 27
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 79beffcf5936..cf4cb3e9a0c2 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -176,6 +176,8 @@ MODULE_DEVICE_TABLE(pci, pciidlist);
176 176
177static int i915_drm_freeze(struct drm_device *dev) 177static int i915_drm_freeze(struct drm_device *dev)
178{ 178{
179 struct drm_i915_private *dev_priv = dev->dev_private;
180
179 pci_save_state(dev->pdev); 181 pci_save_state(dev->pdev);
180 182
181 /* If KMS is active, we do the leavevt stuff here */ 183 /* If KMS is active, we do the leavevt stuff here */
@@ -191,17 +193,12 @@ static int i915_drm_freeze(struct drm_device *dev)
191 193
192 i915_save_state(dev); 194 i915_save_state(dev);
193 195
194 return 0;
195}
196
197static void i915_drm_suspend(struct drm_device *dev)
198{
199 struct drm_i915_private *dev_priv = dev->dev_private;
200
201 intel_opregion_free(dev, 1); 196 intel_opregion_free(dev, 1);
202 197
203 /* Modeset on resume, not lid events */ 198 /* Modeset on resume, not lid events */
204 dev_priv->modeset_on_lid = 0; 199 dev_priv->modeset_on_lid = 0;
200
201 return 0;
205} 202}
206 203
207static int i915_suspend(struct drm_device *dev, pm_message_t state) 204static int i915_suspend(struct drm_device *dev, pm_message_t state)
@@ -221,8 +218,6 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state)
221 if (error) 218 if (error)
222 return error; 219 return error;
223 220
224 i915_drm_suspend(dev);
225
226 if (state.event == PM_EVENT_SUSPEND) { 221 if (state.event == PM_EVENT_SUSPEND) {
227 /* Shut down the device */ 222 /* Shut down the device */
228 pci_disable_device(dev->pdev); 223 pci_disable_device(dev->pdev);
@@ -237,6 +232,10 @@ static int i915_drm_thaw(struct drm_device *dev)
237 struct drm_i915_private *dev_priv = dev->dev_private; 232 struct drm_i915_private *dev_priv = dev->dev_private;
238 int error = 0; 233 int error = 0;
239 234
235 i915_restore_state(dev);
236
237 intel_opregion_init(dev, 1);
238
240 /* KMS EnterVT equivalent */ 239 /* KMS EnterVT equivalent */
241 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 240 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
242 mutex_lock(&dev->struct_mutex); 241 mutex_lock(&dev->struct_mutex);
@@ -263,10 +262,6 @@ static int i915_resume(struct drm_device *dev)
263 262
264 pci_set_master(dev->pdev); 263 pci_set_master(dev->pdev);
265 264
266 i915_restore_state(dev);
267
268 intel_opregion_init(dev, 1);
269
270 return i915_drm_thaw(dev); 265 return i915_drm_thaw(dev);
271} 266}
272 267
@@ -423,8 +418,6 @@ static int i915_pm_suspend(struct device *dev)
423 if (error) 418 if (error)
424 return error; 419 return error;
425 420
426 i915_drm_suspend(drm_dev);
427
428 pci_disable_device(pdev); 421 pci_disable_device(pdev);
429 pci_set_power_state(pdev, PCI_D3hot); 422 pci_set_power_state(pdev, PCI_D3hot);
430 423
@@ -464,13 +457,8 @@ static int i915_pm_poweroff(struct device *dev)
464{ 457{
465 struct pci_dev *pdev = to_pci_dev(dev); 458 struct pci_dev *pdev = to_pci_dev(dev);
466 struct drm_device *drm_dev = pci_get_drvdata(pdev); 459 struct drm_device *drm_dev = pci_get_drvdata(pdev);
467 int error;
468
469 error = i915_drm_freeze(drm_dev);
470 if (!error)
471 i915_drm_suspend(drm_dev);
472 460
473 return error; 461 return i915_drm_freeze(drm_dev);
474} 462}
475 463
476const struct dev_pm_ops i915_pm_ops = { 464const struct dev_pm_ops i915_pm_ops = {
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 5445cefdd03e..1c15ef37b71c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -583,6 +583,7 @@ struct drm_nouveau_private {
583 uint64_t vm_end; 583 uint64_t vm_end;
584 struct nouveau_gpuobj *vm_vram_pt[NV50_VM_VRAM_NR]; 584 struct nouveau_gpuobj *vm_vram_pt[NV50_VM_VRAM_NR];
585 int vm_vram_pt_nr; 585 int vm_vram_pt_nr;
586 uint64_t vram_sys_base;
586 587
587 /* the mtrr covering the FB */ 588 /* the mtrr covering the FB */
588 int fb_mtrr; 589 int fb_mtrr;
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index 8f3a12f614ed..2dc09dbd817d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -285,53 +285,50 @@ nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size,
285 uint32_t flags, uint64_t phys) 285 uint32_t flags, uint64_t phys)
286{ 286{
287 struct drm_nouveau_private *dev_priv = dev->dev_private; 287 struct drm_nouveau_private *dev_priv = dev->dev_private;
288 struct nouveau_gpuobj **pgt; 288 struct nouveau_gpuobj *pgt;
289 unsigned psz, pfl, pages; 289 unsigned block;
290 290 int i;
291 if (virt >= dev_priv->vm_gart_base &&
292 (virt + size) < (dev_priv->vm_gart_base + dev_priv->vm_gart_size)) {
293 psz = 12;
294 pgt = &dev_priv->gart_info.sg_ctxdma;
295 pfl = 0x21;
296 virt -= dev_priv->vm_gart_base;
297 } else
298 if (virt >= dev_priv->vm_vram_base &&
299 (virt + size) < (dev_priv->vm_vram_base + dev_priv->vm_vram_size)) {
300 psz = 16;
301 pgt = dev_priv->vm_vram_pt;
302 pfl = 0x01;
303 virt -= dev_priv->vm_vram_base;
304 } else {
305 NV_ERROR(dev, "Invalid address: 0x%16llx-0x%16llx\n",
306 virt, virt + size - 1);
307 return -EINVAL;
308 }
309 291
310 pages = size >> psz; 292 virt = ((virt - dev_priv->vm_vram_base) >> 16) << 1;
293 size = (size >> 16) << 1;
294
295 phys |= ((uint64_t)flags << 32);
296 phys |= 1;
297 if (dev_priv->vram_sys_base) {
298 phys += dev_priv->vram_sys_base;
299 phys |= 0x30;
300 }
311 301
312 dev_priv->engine.instmem.prepare_access(dev, true); 302 dev_priv->engine.instmem.prepare_access(dev, true);
313 if (flags & 0x80000000) { 303 while (size) {
314 while (pages--) { 304 unsigned offset_h = upper_32_bits(phys);
315 struct nouveau_gpuobj *pt = pgt[virt >> 29]; 305 unsigned offset_l = lower_32_bits(phys);
316 unsigned pte = ((virt & 0x1fffffffULL) >> psz) << 1; 306 unsigned pte, end;
307
308 for (i = 7; i >= 0; i--) {
309 block = 1 << (i + 1);
310 if (size >= block && !(virt & (block - 1)))
311 break;
312 }
313 offset_l |= (i << 7);
317 314
318 nv_wo32(dev, pt, pte++, 0x00000000); 315 phys += block << 15;
319 nv_wo32(dev, pt, pte++, 0x00000000); 316 size -= block;
320 317
321 virt += (1 << psz); 318 while (block) {
322 } 319 pgt = dev_priv->vm_vram_pt[virt >> 14];
323 } else { 320 pte = virt & 0x3ffe;
324 while (pages--) {
325 struct nouveau_gpuobj *pt = pgt[virt >> 29];
326 unsigned pte = ((virt & 0x1fffffffULL) >> psz) << 1;
327 unsigned offset_h = upper_32_bits(phys) & 0xff;
328 unsigned offset_l = lower_32_bits(phys);
329 321
330 nv_wo32(dev, pt, pte++, offset_l | pfl); 322 end = pte + block;
331 nv_wo32(dev, pt, pte++, offset_h | flags); 323 if (end > 16384)
324 end = 16384;
325 block -= (end - pte);
326 virt += (end - pte);
332 327
333 phys += (1 << psz); 328 while (pte < end) {
334 virt += (1 << psz); 329 nv_wo32(dev, pgt, pte++, offset_l);
330 nv_wo32(dev, pgt, pte++, offset_h);
331 }
335 } 332 }
336 } 333 }
337 dev_priv->engine.instmem.finish_access(dev); 334 dev_priv->engine.instmem.finish_access(dev);
@@ -356,7 +353,41 @@ nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size,
356void 353void
357nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size) 354nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size)
358{ 355{
359 nv50_mem_vm_bind_linear(dev, virt, size, 0x80000000, 0); 356 struct drm_nouveau_private *dev_priv = dev->dev_private;
357 struct nouveau_gpuobj *pgt;
358 unsigned pages, pte, end;
359
360 virt -= dev_priv->vm_vram_base;
361 pages = (size >> 16) << 1;
362
363 dev_priv->engine.instmem.prepare_access(dev, true);
364 while (pages) {
365 pgt = dev_priv->vm_vram_pt[virt >> 29];
366 pte = (virt & 0x1ffe0000ULL) >> 15;
367
368 end = pte + pages;
369 if (end > 16384)
370 end = 16384;
371 pages -= (end - pte);
372 virt += (end - pte) << 15;
373
374 while (pte < end)
375 nv_wo32(dev, pgt, pte++, 0);
376 }
377 dev_priv->engine.instmem.finish_access(dev);
378
379 nv_wr32(dev, 0x100c80, 0x00050001);
380 if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
381 NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
382 NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
383 return;
384 }
385
386 nv_wr32(dev, 0x100c80, 0x00000001);
387 if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
388 NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
389 NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
390 }
360} 391}
361 392
362/* 393/*
diff --git a/drivers/gpu/drm/nouveau/nv04_dac.c b/drivers/gpu/drm/nouveau/nv04_dac.c
index d0e038d28948..1d73b15d70da 100644
--- a/drivers/gpu/drm/nouveau/nv04_dac.c
+++ b/drivers/gpu/drm/nouveau/nv04_dac.c
@@ -119,7 +119,7 @@ static enum drm_connector_status nv04_dac_detect(struct drm_encoder *encoder,
119 struct drm_connector *connector) 119 struct drm_connector *connector)
120{ 120{
121 struct drm_device *dev = encoder->dev; 121 struct drm_device *dev = encoder->dev;
122 uint8_t saved_seq1, saved_pi, saved_rpc1; 122 uint8_t saved_seq1, saved_pi, saved_rpc1, saved_cr_mode;
123 uint8_t saved_palette0[3], saved_palette_mask; 123 uint8_t saved_palette0[3], saved_palette_mask;
124 uint32_t saved_rtest_ctrl, saved_rgen_ctrl; 124 uint32_t saved_rtest_ctrl, saved_rgen_ctrl;
125 int i; 125 int i;
@@ -135,6 +135,9 @@ static enum drm_connector_status nv04_dac_detect(struct drm_encoder *encoder,
135 /* only implemented for head A for now */ 135 /* only implemented for head A for now */
136 NVSetOwner(dev, 0); 136 NVSetOwner(dev, 0);
137 137
138 saved_cr_mode = NVReadVgaCrtc(dev, 0, NV_CIO_CR_MODE_INDEX);
139 NVWriteVgaCrtc(dev, 0, NV_CIO_CR_MODE_INDEX, saved_cr_mode | 0x80);
140
138 saved_seq1 = NVReadVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX); 141 saved_seq1 = NVReadVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX);
139 NVWriteVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX, saved_seq1 & ~0x20); 142 NVWriteVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX, saved_seq1 & ~0x20);
140 143
@@ -203,6 +206,7 @@ out:
203 NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_PIXEL_INDEX, saved_pi); 206 NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_PIXEL_INDEX, saved_pi);
204 NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_RPC1_INDEX, saved_rpc1); 207 NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_RPC1_INDEX, saved_rpc1);
205 NVWriteVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX, saved_seq1); 208 NVWriteVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX, saved_seq1);
209 NVWriteVgaCrtc(dev, 0, NV_CIO_CR_MODE_INDEX, saved_cr_mode);
206 210
207 if (blue == 0x18) { 211 if (blue == 0x18) {
208 NV_INFO(dev, "Load detected on head A\n"); 212 NV_INFO(dev, "Load detected on head A\n");
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c
index 94400f777e7f..f0dc4e36ef05 100644
--- a/drivers/gpu/drm/nouveau/nv50_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv50_instmem.c
@@ -76,6 +76,11 @@ nv50_instmem_init(struct drm_device *dev)
76 for (i = 0x1700; i <= 0x1710; i += 4) 76 for (i = 0x1700; i <= 0x1710; i += 4)
77 priv->save1700[(i-0x1700)/4] = nv_rd32(dev, i); 77 priv->save1700[(i-0x1700)/4] = nv_rd32(dev, i);
78 78
79 if (dev_priv->chipset == 0xaa || dev_priv->chipset == 0xac)
80 dev_priv->vram_sys_base = nv_rd32(dev, 0x100e10) << 12;
81 else
82 dev_priv->vram_sys_base = 0;
83
79 /* Reserve the last MiB of VRAM, we should probably try to avoid 84 /* Reserve the last MiB of VRAM, we should probably try to avoid
80 * setting up the below tables over the top of the VBIOS image at 85 * setting up the below tables over the top of the VBIOS image at
81 * some point. 86 * some point.
@@ -172,16 +177,28 @@ nv50_instmem_init(struct drm_device *dev)
172 * We map the entire fake channel into the start of the PRAMIN BAR 177 * We map the entire fake channel into the start of the PRAMIN BAR
173 */ 178 */
174 ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pt_size, 0x1000, 179 ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pt_size, 0x1000,
175 0, &priv->pramin_pt); 180 0, &priv->pramin_pt);
176 if (ret) 181 if (ret)
177 return ret; 182 return ret;
178 183
179 for (i = 0, v = c_offset; i < pt_size; i += 8, v += 0x1000) { 184 v = c_offset | 1;
180 if (v < (c_offset + c_size)) 185 if (dev_priv->vram_sys_base) {
181 BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, v | 1); 186 v += dev_priv->vram_sys_base;
182 else 187 v |= 0x30;
183 BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, 0x00000009); 188 }
189
190 i = 0;
191 while (v < dev_priv->vram_sys_base + c_offset + c_size) {
192 BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, v);
193 BAR0_WI32(priv->pramin_pt->gpuobj, i + 4, 0x00000000);
194 v += 0x1000;
195 i += 8;
196 }
197
198 while (i < pt_size) {
199 BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, 0x00000000);
184 BAR0_WI32(priv->pramin_pt->gpuobj, i + 4, 0x00000000); 200 BAR0_WI32(priv->pramin_pt->gpuobj, i + 4, 0x00000000);
201 i += 8;
185 } 202 }
186 203
187 BAR0_WI32(chan->vm_pd, 0x00, priv->pramin_pt->instance | 0x63); 204 BAR0_WI32(chan->vm_pd, 0x00, priv->pramin_pt->instance | 0x63);
@@ -416,7 +433,9 @@ nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
416{ 433{
417 struct drm_nouveau_private *dev_priv = dev->dev_private; 434 struct drm_nouveau_private *dev_priv = dev->dev_private;
418 struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv; 435 struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
419 uint32_t pte, pte_end, vram; 436 struct nouveau_gpuobj *pramin_pt = priv->pramin_pt->gpuobj;
437 uint32_t pte, pte_end;
438 uint64_t vram;
420 439
421 if (!gpuobj->im_backing || !gpuobj->im_pramin || gpuobj->im_bound) 440 if (!gpuobj->im_backing || !gpuobj->im_pramin || gpuobj->im_bound)
422 return -EINVAL; 441 return -EINVAL;
@@ -424,20 +443,24 @@ nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
424 NV_DEBUG(dev, "st=0x%0llx sz=0x%0llx\n", 443 NV_DEBUG(dev, "st=0x%0llx sz=0x%0llx\n",
425 gpuobj->im_pramin->start, gpuobj->im_pramin->size); 444 gpuobj->im_pramin->start, gpuobj->im_pramin->size);
426 445
427 pte = (gpuobj->im_pramin->start >> 12) << 3; 446 pte = (gpuobj->im_pramin->start >> 12) << 1;
428 pte_end = ((gpuobj->im_pramin->size >> 12) << 3) + pte; 447 pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte;
429 vram = gpuobj->im_backing_start; 448 vram = gpuobj->im_backing_start;
430 449
431 NV_DEBUG(dev, "pramin=0x%llx, pte=%d, pte_end=%d\n", 450 NV_DEBUG(dev, "pramin=0x%llx, pte=%d, pte_end=%d\n",
432 gpuobj->im_pramin->start, pte, pte_end); 451 gpuobj->im_pramin->start, pte, pte_end);
433 NV_DEBUG(dev, "first vram page: 0x%08x\n", gpuobj->im_backing_start); 452 NV_DEBUG(dev, "first vram page: 0x%08x\n", gpuobj->im_backing_start);
434 453
454 vram |= 1;
455 if (dev_priv->vram_sys_base) {
456 vram += dev_priv->vram_sys_base;
457 vram |= 0x30;
458 }
459
435 dev_priv->engine.instmem.prepare_access(dev, true); 460 dev_priv->engine.instmem.prepare_access(dev, true);
436 while (pte < pte_end) { 461 while (pte < pte_end) {
437 nv_wo32(dev, priv->pramin_pt->gpuobj, (pte + 0)/4, vram | 1); 462 nv_wo32(dev, pramin_pt, pte++, lower_32_bits(vram));
438 nv_wo32(dev, priv->pramin_pt->gpuobj, (pte + 4)/4, 0x00000000); 463 nv_wo32(dev, pramin_pt, pte++, upper_32_bits(vram));
439
440 pte += 8;
441 vram += NV50_INSTMEM_PAGE_SIZE; 464 vram += NV50_INSTMEM_PAGE_SIZE;
442 } 465 }
443 dev_priv->engine.instmem.finish_access(dev); 466 dev_priv->engine.instmem.finish_access(dev);
@@ -470,14 +493,13 @@ nv50_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
470 if (gpuobj->im_bound == 0) 493 if (gpuobj->im_bound == 0)
471 return -EINVAL; 494 return -EINVAL;
472 495
473 pte = (gpuobj->im_pramin->start >> 12) << 3; 496 pte = (gpuobj->im_pramin->start >> 12) << 1;
474 pte_end = ((gpuobj->im_pramin->size >> 12) << 3) + pte; 497 pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte;
475 498
476 dev_priv->engine.instmem.prepare_access(dev, true); 499 dev_priv->engine.instmem.prepare_access(dev, true);
477 while (pte < pte_end) { 500 while (pte < pte_end) {
478 nv_wo32(dev, priv->pramin_pt->gpuobj, (pte + 0)/4, 0x00000009); 501 nv_wo32(dev, priv->pramin_pt->gpuobj, pte++, 0x00000000);
479 nv_wo32(dev, priv->pramin_pt->gpuobj, (pte + 4)/4, 0x00000000); 502 nv_wo32(dev, priv->pramin_pt->gpuobj, pte++, 0x00000000);
480 pte += 8;
481 } 503 }
482 dev_priv->engine.instmem.finish_access(dev); 504 dev_priv->engine.instmem.finish_access(dev);
483 505
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index d69caf92ffe7..0897359b3e4e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -182,25 +182,19 @@ static int vmw_cmd_present_check(struct vmw_private *dev_priv,
182 return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.sid); 182 return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.sid);
183} 183}
184 184
185static int vmw_cmd_dma(struct vmw_private *dev_priv, 185static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
186 struct vmw_sw_context *sw_context, 186 struct vmw_sw_context *sw_context,
187 SVGA3dCmdHeader *header) 187 SVGAGuestPtr *ptr,
188 struct vmw_dma_buffer **vmw_bo_p)
188{ 189{
189 uint32_t handle;
190 struct vmw_dma_buffer *vmw_bo = NULL; 190 struct vmw_dma_buffer *vmw_bo = NULL;
191 struct ttm_buffer_object *bo; 191 struct ttm_buffer_object *bo;
192 struct vmw_surface *srf = NULL; 192 uint32_t handle = ptr->gmrId;
193 struct vmw_dma_cmd {
194 SVGA3dCmdHeader header;
195 SVGA3dCmdSurfaceDMA dma;
196 } *cmd;
197 struct vmw_relocation *reloc; 193 struct vmw_relocation *reloc;
198 int ret;
199 uint32_t cur_validate_node; 194 uint32_t cur_validate_node;
200 struct ttm_validate_buffer *val_buf; 195 struct ttm_validate_buffer *val_buf;
196 int ret;
201 197
202 cmd = container_of(header, struct vmw_dma_cmd, header);
203 handle = cmd->dma.guest.ptr.gmrId;
204 ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo); 198 ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo);
205 if (unlikely(ret != 0)) { 199 if (unlikely(ret != 0)) {
206 DRM_ERROR("Could not find or use GMR region.\n"); 200 DRM_ERROR("Could not find or use GMR region.\n");
@@ -209,14 +203,14 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
209 bo = &vmw_bo->base; 203 bo = &vmw_bo->base;
210 204
211 if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) { 205 if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
212 DRM_ERROR("Max number of DMA commands per submission" 206 DRM_ERROR("Max number relocations per submission"
213 " exceeded\n"); 207 " exceeded\n");
214 ret = -EINVAL; 208 ret = -EINVAL;
215 goto out_no_reloc; 209 goto out_no_reloc;
216 } 210 }
217 211
218 reloc = &sw_context->relocs[sw_context->cur_reloc++]; 212 reloc = &sw_context->relocs[sw_context->cur_reloc++];
219 reloc->location = &cmd->dma.guest.ptr; 213 reloc->location = ptr;
220 214
221 cur_validate_node = vmw_dmabuf_validate_node(bo, sw_context->cur_val_buf); 215 cur_validate_node = vmw_dmabuf_validate_node(bo, sw_context->cur_val_buf);
222 if (unlikely(cur_validate_node >= VMWGFX_MAX_GMRS)) { 216 if (unlikely(cur_validate_node >= VMWGFX_MAX_GMRS)) {
@@ -234,7 +228,89 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
234 list_add_tail(&val_buf->head, &sw_context->validate_nodes); 228 list_add_tail(&val_buf->head, &sw_context->validate_nodes);
235 ++sw_context->cur_val_buf; 229 ++sw_context->cur_val_buf;
236 } 230 }
231 *vmw_bo_p = vmw_bo;
232 return 0;
233
234out_no_reloc:
235 vmw_dmabuf_unreference(&vmw_bo);
236 vmw_bo_p = NULL;
237 return ret;
238}
239
240static int vmw_cmd_end_query(struct vmw_private *dev_priv,
241 struct vmw_sw_context *sw_context,
242 SVGA3dCmdHeader *header)
243{
244 struct vmw_dma_buffer *vmw_bo;
245 struct vmw_query_cmd {
246 SVGA3dCmdHeader header;
247 SVGA3dCmdEndQuery q;
248 } *cmd;
249 int ret;
250
251 cmd = container_of(header, struct vmw_query_cmd, header);
252 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
253 if (unlikely(ret != 0))
254 return ret;
255
256 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
257 &cmd->q.guestResult,
258 &vmw_bo);
259 if (unlikely(ret != 0))
260 return ret;
261
262 vmw_dmabuf_unreference(&vmw_bo);
263 return 0;
264}
237 265
266static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
267 struct vmw_sw_context *sw_context,
268 SVGA3dCmdHeader *header)
269{
270 struct vmw_dma_buffer *vmw_bo;
271 struct vmw_query_cmd {
272 SVGA3dCmdHeader header;
273 SVGA3dCmdWaitForQuery q;
274 } *cmd;
275 int ret;
276
277 cmd = container_of(header, struct vmw_query_cmd, header);
278 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
279 if (unlikely(ret != 0))
280 return ret;
281
282 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
283 &cmd->q.guestResult,
284 &vmw_bo);
285 if (unlikely(ret != 0))
286 return ret;
287
288 vmw_dmabuf_unreference(&vmw_bo);
289 return 0;
290}
291
292
293static int vmw_cmd_dma(struct vmw_private *dev_priv,
294 struct vmw_sw_context *sw_context,
295 SVGA3dCmdHeader *header)
296{
297 struct vmw_dma_buffer *vmw_bo = NULL;
298 struct ttm_buffer_object *bo;
299 struct vmw_surface *srf = NULL;
300 struct vmw_dma_cmd {
301 SVGA3dCmdHeader header;
302 SVGA3dCmdSurfaceDMA dma;
303 } *cmd;
304 int ret;
305
306 cmd = container_of(header, struct vmw_dma_cmd, header);
307 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
308 &cmd->dma.guest.ptr,
309 &vmw_bo);
310 if (unlikely(ret != 0))
311 return ret;
312
313 bo = &vmw_bo->base;
238 ret = vmw_user_surface_lookup_handle(dev_priv, sw_context->tfile, 314 ret = vmw_user_surface_lookup_handle(dev_priv, sw_context->tfile,
239 cmd->dma.host.sid, &srf); 315 cmd->dma.host.sid, &srf);
240 if (ret) { 316 if (ret) {
@@ -379,8 +455,8 @@ static vmw_cmd_func vmw_cmd_funcs[SVGA_3D_CMD_MAX] = {
379 VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw), 455 VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw),
380 VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check), 456 VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check),
381 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_cid_check), 457 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_cid_check),
382 VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_cid_check), 458 VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query),
383 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_cid_check), 459 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query),
384 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok), 460 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok),
385 VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN, 461 VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
386 &vmw_cmd_blt_surf_screen_check) 462 &vmw_cmd_blt_surf_screen_check)
diff --git a/drivers/input/input-polldev.c b/drivers/input/input-polldev.c
index aa6713b4a988..291d9393d359 100644
--- a/drivers/input/input-polldev.c
+++ b/drivers/input/input-polldev.c
@@ -100,6 +100,12 @@ static void input_close_polled_device(struct input_dev *input)
100 struct input_polled_dev *dev = input_get_drvdata(input); 100 struct input_polled_dev *dev = input_get_drvdata(input);
101 101
102 cancel_delayed_work_sync(&dev->work); 102 cancel_delayed_work_sync(&dev->work);
103 /*
104 * Clean up work struct to remove references to the workqueue.
105 * It may be destroyed by the next call. This causes problems
106 * at next device open-close in case of poll_interval == 0.
107 */
108 INIT_DELAYED_WORK(&dev->work, dev->work.work.func);
103 input_polldev_stop_workqueue(); 109 input_polldev_stop_workqueue();
104 110
105 if (dev->close) 111 if (dev->close)
diff --git a/drivers/input/touchscreen/usbtouchscreen.c b/drivers/input/touchscreen/usbtouchscreen.c
index 09a5e7341bd5..5256123a5228 100644
--- a/drivers/input/touchscreen/usbtouchscreen.c
+++ b/drivers/input/touchscreen/usbtouchscreen.c
@@ -618,8 +618,8 @@ static int idealtek_read_data(struct usbtouch_usb *dev, unsigned char *pkt)
618#ifdef CONFIG_TOUCHSCREEN_USB_GENERAL_TOUCH 618#ifdef CONFIG_TOUCHSCREEN_USB_GENERAL_TOUCH
619static int general_touch_read_data(struct usbtouch_usb *dev, unsigned char *pkt) 619static int general_touch_read_data(struct usbtouch_usb *dev, unsigned char *pkt)
620{ 620{
621 dev->x = ((pkt[2] & 0x0F) << 8) | pkt[1] ; 621 dev->x = (pkt[2] << 8) | pkt[1];
622 dev->y = ((pkt[4] & 0x0F) << 8) | pkt[3] ; 622 dev->y = (pkt[4] << 8) | pkt[3];
623 dev->press = pkt[5] & 0xff; 623 dev->press = pkt[5] & 0xff;
624 dev->touch = pkt[0] & 0x01; 624 dev->touch = pkt[0] & 0x01;
625 625
@@ -809,9 +809,9 @@ static struct usbtouch_device_info usbtouch_dev_info[] = {
809#ifdef CONFIG_TOUCHSCREEN_USB_GENERAL_TOUCH 809#ifdef CONFIG_TOUCHSCREEN_USB_GENERAL_TOUCH
810 [DEVTYPE_GENERAL_TOUCH] = { 810 [DEVTYPE_GENERAL_TOUCH] = {
811 .min_xc = 0x0, 811 .min_xc = 0x0,
812 .max_xc = 0x0500, 812 .max_xc = 0x7fff,
813 .min_yc = 0x0, 813 .min_yc = 0x0,
814 .max_yc = 0x0500, 814 .max_yc = 0x7fff,
815 .rept_size = 7, 815 .rept_size = 7,
816 .read_data = general_touch_read_data, 816 .read_data = general_touch_read_data,
817 }, 817 },
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index d29bb532eccf..765543663a4f 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -4006,11 +4006,21 @@ check_page:
4006 } 4006 }
4007 } 4007 }
4008 4008
4009 if (!buffer_info->dma) 4009 if (!buffer_info->dma) {
4010 buffer_info->dma = pci_map_page(pdev, 4010 buffer_info->dma = pci_map_page(pdev,
4011 buffer_info->page, 0, 4011 buffer_info->page, 0,
4012 buffer_info->length, 4012 buffer_info->length,
4013 PCI_DMA_FROMDEVICE); 4013 PCI_DMA_FROMDEVICE);
4014 if (pci_dma_mapping_error(pdev, buffer_info->dma)) {
4015 put_page(buffer_info->page);
4016 dev_kfree_skb(skb);
4017 buffer_info->page = NULL;
4018 buffer_info->skb = NULL;
4019 buffer_info->dma = 0;
4020 adapter->alloc_rx_buff_failed++;
4021 break; /* while !buffer_info->skb */
4022 }
4023 }
4014 4024
4015 rx_desc = E1000_RX_DESC(*rx_ring, i); 4025 rx_desc = E1000_RX_DESC(*rx_ring, i);
4016 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); 4026 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
@@ -4101,6 +4111,13 @@ map_skb:
4101 skb->data, 4111 skb->data,
4102 buffer_info->length, 4112 buffer_info->length,
4103 PCI_DMA_FROMDEVICE); 4113 PCI_DMA_FROMDEVICE);
4114 if (pci_dma_mapping_error(pdev, buffer_info->dma)) {
4115 dev_kfree_skb(skb);
4116 buffer_info->skb = NULL;
4117 buffer_info->dma = 0;
4118 adapter->alloc_rx_buff_failed++;
4119 break; /* while !buffer_info->skb */
4120 }
4104 4121
4105 /* 4122 /*
4106 * XXX if it was allocated cleanly it will never map to a 4123 * XXX if it was allocated cleanly it will never map to a
diff --git a/drivers/net/tc35815.c b/drivers/net/tc35815.c
index 75a669d48e5e..d71c1976072e 100644
--- a/drivers/net/tc35815.c
+++ b/drivers/net/tc35815.c
@@ -1437,7 +1437,6 @@ static int tc35815_do_interrupt(struct net_device *dev, u32 status, int limit)
1437 /* Transmit complete. */ 1437 /* Transmit complete. */
1438 lp->lstats.tx_ints++; 1438 lp->lstats.tx_ints++;
1439 tc35815_txdone(dev); 1439 tc35815_txdone(dev);
1440 netif_wake_queue(dev);
1441 if (ret < 0) 1440 if (ret < 0)
1442 ret = 0; 1441 ret = 0;
1443 } 1442 }
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 4f27f022fbf7..5f3b9eaeb04f 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -584,6 +584,11 @@ static const struct usb_device_id products [] = {
584 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE), 584 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
585 .driver_info = (unsigned long) &mbm_info, 585 .driver_info = (unsigned long) &mbm_info,
586}, { 586}, {
587 /* Ericsson C3607w ver 2 */
588 USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x190b, USB_CLASS_COMM,
589 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
590 .driver_info = (unsigned long) &mbm_info,
591}, {
587 /* Toshiba F3507g */ 592 /* Toshiba F3507g */
588 USB_DEVICE_AND_INTERFACE_INFO(0x0930, 0x130b, USB_CLASS_COMM, 593 USB_DEVICE_AND_INTERFACE_INFO(0x0930, 0x130b, USB_CLASS_COMM,
589 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE), 594 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
index 9b4b8b5c7574..31462813bac0 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
+++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
@@ -2008,7 +2008,7 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
2008 IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim scd_ssn " 2008 IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim scd_ssn "
2009 "%d index %d\n", scd_ssn , index); 2009 "%d index %d\n", scd_ssn , index);
2010 freed = iwl_tx_queue_reclaim(priv, txq_id, index); 2010 freed = iwl_tx_queue_reclaim(priv, txq_id, index);
2011 priv->stations[sta_id].tid[tid].tfds_in_queue -= freed; 2011 iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
2012 2012
2013 if (priv->mac80211_registered && 2013 if (priv->mac80211_registered &&
2014 (iwl_queue_space(&txq->q) > txq->q.low_mark) && 2014 (iwl_queue_space(&txq->q) > txq->q.low_mark) &&
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index de45f308b744..cffaae772d51 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -1125,7 +1125,7 @@ static void iwl5000_rx_reply_tx(struct iwl_priv *priv,
1125 scd_ssn , index, txq_id, txq->swq_id); 1125 scd_ssn , index, txq_id, txq->swq_id);
1126 1126
1127 freed = iwl_tx_queue_reclaim(priv, txq_id, index); 1127 freed = iwl_tx_queue_reclaim(priv, txq_id, index);
1128 priv->stations[sta_id].tid[tid].tfds_in_queue -= freed; 1128 iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
1129 1129
1130 if (priv->mac80211_registered && 1130 if (priv->mac80211_registered &&
1131 (iwl_queue_space(&txq->q) > txq->q.low_mark) && 1131 (iwl_queue_space(&txq->q) > txq->q.low_mark) &&
@@ -1153,16 +1153,14 @@ static void iwl5000_rx_reply_tx(struct iwl_priv *priv,
1153 tx_resp->failure_frame); 1153 tx_resp->failure_frame);
1154 1154
1155 freed = iwl_tx_queue_reclaim(priv, txq_id, index); 1155 freed = iwl_tx_queue_reclaim(priv, txq_id, index);
1156 if (ieee80211_is_data_qos(tx_resp->frame_ctrl)) 1156 iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
1157 priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
1158 1157
1159 if (priv->mac80211_registered && 1158 if (priv->mac80211_registered &&
1160 (iwl_queue_space(&txq->q) > txq->q.low_mark)) 1159 (iwl_queue_space(&txq->q) > txq->q.low_mark))
1161 iwl_wake_queue(priv, txq_id); 1160 iwl_wake_queue(priv, txq_id);
1162 } 1161 }
1163 1162
1164 if (ieee80211_is_data_qos(tx_resp->frame_ctrl)) 1163 iwl_txq_check_empty(priv, sta_id, tid, txq_id);
1165 iwl_txq_check_empty(priv, sta_id, tid, txq_id);
1166 1164
1167 if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK)) 1165 if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK))
1168 IWL_ERR(priv, "TODO: Implement Tx ABORT REQUIRED!!!\n"); 1166 IWL_ERR(priv, "TODO: Implement Tx ABORT REQUIRED!!!\n");
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index d10bea64fce3..f36f804804fc 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -2744,8 +2744,8 @@ int iwl_mac_config(struct ieee80211_hw *hw, u32 changed)
2744 if ((le16_to_cpu(priv->staging_rxon.channel) != ch)) 2744 if ((le16_to_cpu(priv->staging_rxon.channel) != ch))
2745 priv->staging_rxon.flags = 0; 2745 priv->staging_rxon.flags = 0;
2746 2746
2747 iwl_set_rxon_ht(priv, ht_conf);
2748 iwl_set_rxon_channel(priv, conf->channel); 2747 iwl_set_rxon_channel(priv, conf->channel);
2748 iwl_set_rxon_ht(priv, ht_conf);
2749 2749
2750 iwl_set_flags_for_band(priv, conf->channel->band); 2750 iwl_set_flags_for_band(priv, conf->channel->band);
2751 spin_unlock_irqrestore(&priv->lock, flags); 2751 spin_unlock_irqrestore(&priv->lock, flags);
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
index 27ca859e7453..b69e972671b2 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.h
+++ b/drivers/net/wireless/iwlwifi/iwl-core.h
@@ -446,6 +446,8 @@ void iwl_hw_txq_ctx_free(struct iwl_priv *priv);
446int iwl_hw_tx_queue_init(struct iwl_priv *priv, 446int iwl_hw_tx_queue_init(struct iwl_priv *priv,
447 struct iwl_tx_queue *txq); 447 struct iwl_tx_queue *txq);
448int iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq); 448int iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq);
449void iwl_free_tfds_in_queue(struct iwl_priv *priv,
450 int sta_id, int tid, int freed);
449int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq, 451int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
450 int slots_num, u32 txq_id); 452 int slots_num, u32 txq_id);
451void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id); 453void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id);
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c
index 87ce2bd292c7..8f4071562857 100644
--- a/drivers/net/wireless/iwlwifi/iwl-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-tx.c
@@ -120,6 +120,20 @@ int iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
120EXPORT_SYMBOL(iwl_txq_update_write_ptr); 120EXPORT_SYMBOL(iwl_txq_update_write_ptr);
121 121
122 122
123void iwl_free_tfds_in_queue(struct iwl_priv *priv,
124 int sta_id, int tid, int freed)
125{
126 if (priv->stations[sta_id].tid[tid].tfds_in_queue >= freed)
127 priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
128 else {
129 IWL_ERR(priv, "free more than tfds_in_queue (%u:%d)\n",
130 priv->stations[sta_id].tid[tid].tfds_in_queue,
131 freed);
132 priv->stations[sta_id].tid[tid].tfds_in_queue = 0;
133 }
134}
135EXPORT_SYMBOL(iwl_free_tfds_in_queue);
136
123/** 137/**
124 * iwl_tx_queue_free - Deallocate DMA queue. 138 * iwl_tx_queue_free - Deallocate DMA queue.
125 * @txq: Transmit queue to deallocate. 139 * @txq: Transmit queue to deallocate.
@@ -1131,6 +1145,7 @@ int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
1131 struct iwl_queue *q = &txq->q; 1145 struct iwl_queue *q = &txq->q;
1132 struct iwl_tx_info *tx_info; 1146 struct iwl_tx_info *tx_info;
1133 int nfreed = 0; 1147 int nfreed = 0;
1148 struct ieee80211_hdr *hdr;
1134 1149
1135 if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) { 1150 if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) {
1136 IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, " 1151 IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, "
@@ -1145,13 +1160,16 @@ int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
1145 1160
1146 tx_info = &txq->txb[txq->q.read_ptr]; 1161 tx_info = &txq->txb[txq->q.read_ptr];
1147 iwl_tx_status(priv, tx_info->skb[0]); 1162 iwl_tx_status(priv, tx_info->skb[0]);
1163
1164 hdr = (struct ieee80211_hdr *)tx_info->skb[0]->data;
1165 if (hdr && ieee80211_is_data_qos(hdr->frame_control))
1166 nfreed++;
1148 tx_info->skb[0] = NULL; 1167 tx_info->skb[0] = NULL;
1149 1168
1150 if (priv->cfg->ops->lib->txq_inval_byte_cnt_tbl) 1169 if (priv->cfg->ops->lib->txq_inval_byte_cnt_tbl)
1151 priv->cfg->ops->lib->txq_inval_byte_cnt_tbl(priv, txq); 1170 priv->cfg->ops->lib->txq_inval_byte_cnt_tbl(priv, txq);
1152 1171
1153 priv->cfg->ops->lib->txq_free_tfd(priv, txq); 1172 priv->cfg->ops->lib->txq_free_tfd(priv, txq);
1154 nfreed++;
1155 } 1173 }
1156 return nfreed; 1174 return nfreed;
1157} 1175}
@@ -1559,7 +1577,7 @@ void iwl_rx_reply_compressed_ba(struct iwl_priv *priv,
1559 if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) { 1577 if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) {
1560 /* calculate mac80211 ampdu sw queue to wake */ 1578 /* calculate mac80211 ampdu sw queue to wake */
1561 int freed = iwl_tx_queue_reclaim(priv, scd_flow, index); 1579 int freed = iwl_tx_queue_reclaim(priv, scd_flow, index);
1562 priv->stations[sta_id].tid[tid].tfds_in_queue -= freed; 1580 iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
1563 1581
1564 if ((iwl_queue_space(&txq->q) > txq->q.low_mark) && 1582 if ((iwl_queue_space(&txq->q) > txq->q.low_mark) &&
1565 priv->mac80211_registered && 1583 priv->mac80211_registered &&
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index 07d14dfdf0b4..226b3e93498c 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -934,7 +934,7 @@ static int __devinit acer_backlight_init(struct device *dev)
934 acer_backlight_device = bd; 934 acer_backlight_device = bd;
935 935
936 bd->props.power = FB_BLANK_UNBLANK; 936 bd->props.power = FB_BLANK_UNBLANK;
937 bd->props.brightness = max_brightness; 937 bd->props.brightness = read_brightness(bd);
938 bd->props.max_brightness = max_brightness; 938 bd->props.max_brightness = max_brightness;
939 backlight_update_status(bd); 939 backlight_update_status(bd);
940 return 0; 940 return 0;
diff --git a/drivers/video/efifb.c b/drivers/video/efifb.c
index eb12182b2059..d25df51bb0d2 100644
--- a/drivers/video/efifb.c
+++ b/drivers/video/efifb.c
@@ -161,8 +161,17 @@ static int efifb_setcolreg(unsigned regno, unsigned red, unsigned green,
161 return 0; 161 return 0;
162} 162}
163 163
164static void efifb_destroy(struct fb_info *info)
165{
166 if (info->screen_base)
167 iounmap(info->screen_base);
168 release_mem_region(info->aperture_base, info->aperture_size);
169 framebuffer_release(info);
170}
171
164static struct fb_ops efifb_ops = { 172static struct fb_ops efifb_ops = {
165 .owner = THIS_MODULE, 173 .owner = THIS_MODULE,
174 .fb_destroy = efifb_destroy,
166 .fb_setcolreg = efifb_setcolreg, 175 .fb_setcolreg = efifb_setcolreg,
167 .fb_fillrect = cfb_fillrect, 176 .fb_fillrect = cfb_fillrect,
168 .fb_copyarea = cfb_copyarea, 177 .fb_copyarea = cfb_copyarea,
@@ -281,7 +290,7 @@ static int __init efifb_probe(struct platform_device *dev)
281 info->par = NULL; 290 info->par = NULL;
282 291
283 info->aperture_base = efifb_fix.smem_start; 292 info->aperture_base = efifb_fix.smem_start;
284 info->aperture_size = size_total; 293 info->aperture_size = size_remap;
285 294
286 info->screen_base = ioremap(efifb_fix.smem_start, efifb_fix.smem_len); 295 info->screen_base = ioremap(efifb_fix.smem_start, efifb_fix.smem_len);
287 if (!info->screen_base) { 296 if (!info->screen_base) {
diff --git a/drivers/watchdog/bfin_wdt.c b/drivers/watchdog/bfin_wdt.c
index c7b3f9df2317..2159e668751c 100644
--- a/drivers/watchdog/bfin_wdt.c
+++ b/drivers/watchdog/bfin_wdt.c
@@ -1,9 +1,8 @@
1/* 1/*
2 * Blackfin On-Chip Watchdog Driver 2 * Blackfin On-Chip Watchdog Driver
3 * Supports BF53[123]/BF53[467]/BF54[2489]/BF561
4 * 3 *
5 * Originally based on softdog.c 4 * Originally based on softdog.c
6 * Copyright 2006-2007 Analog Devices Inc. 5 * Copyright 2006-2010 Analog Devices Inc.
7 * Copyright 2006-2007 Michele d'Amico 6 * Copyright 2006-2007 Michele d'Amico
8 * Copyright 1996 Alan Cox <alan@lxorguk.ukuu.org.uk> 7 * Copyright 1996 Alan Cox <alan@lxorguk.ukuu.org.uk>
9 * 8 *
@@ -137,13 +136,15 @@ static int bfin_wdt_running(void)
137 */ 136 */
138static int bfin_wdt_set_timeout(unsigned long t) 137static int bfin_wdt_set_timeout(unsigned long t)
139{ 138{
140 u32 cnt; 139 u32 cnt, max_t, sclk;
141 unsigned long flags; 140 unsigned long flags;
142 141
143 stampit(); 142 sclk = get_sclk();
143 max_t = -1 / sclk;
144 cnt = t * sclk;
145 stamp("maxtimeout=%us newtimeout=%lus (cnt=%#x)", max_t, t, cnt);
144 146
145 cnt = t * get_sclk(); 147 if (t > max_t) {
146 if (cnt < get_sclk()) {
147 printk(KERN_WARNING PFX "timeout value is too large\n"); 148 printk(KERN_WARNING PFX "timeout value is too large\n");
148 return -EINVAL; 149 return -EINVAL;
149 } 150 }
diff --git a/fs/exec.c b/fs/exec.c
index e95c692ef0e4..cce6bbdbdbb1 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -637,7 +637,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
637 * will align it up. 637 * will align it up.
638 */ 638 */
639 rlim_stack = rlimit(RLIMIT_STACK) & PAGE_MASK; 639 rlim_stack = rlimit(RLIMIT_STACK) & PAGE_MASK;
640 rlim_stack = min(rlim_stack, stack_size);
641#ifdef CONFIG_STACK_GROWSUP 640#ifdef CONFIG_STACK_GROWSUP
642 if (stack_size + stack_expand > rlim_stack) 641 if (stack_size + stack_expand > rlim_stack)
643 stack_base = vma->vm_start + rlim_stack; 642 stack_base = vma->vm_start + rlim_stack;
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index f71f5c58620c..2f17793048e7 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -461,9 +461,8 @@ struct request_queue
461#define QUEUE_FLAG_NONROT 14 /* non-rotational device (SSD) */ 461#define QUEUE_FLAG_NONROT 14 /* non-rotational device (SSD) */
462#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ 462#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */
463#define QUEUE_FLAG_IO_STAT 15 /* do IO stats */ 463#define QUEUE_FLAG_IO_STAT 15 /* do IO stats */
464#define QUEUE_FLAG_CQ 16 /* hardware does queuing */ 464#define QUEUE_FLAG_DISCARD 16 /* supports DISCARD */
465#define QUEUE_FLAG_DISCARD 17 /* supports DISCARD */ 465#define QUEUE_FLAG_NOXMERGES 17 /* No extended merges */
466#define QUEUE_FLAG_NOXMERGES 18 /* No extended merges */
467 466
468#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ 467#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
469 (1 << QUEUE_FLAG_CLUSTER) | \ 468 (1 << QUEUE_FLAG_CLUSTER) | \
@@ -587,7 +586,6 @@ enum {
587 586
588#define blk_queue_plugged(q) test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags) 587#define blk_queue_plugged(q) test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags)
589#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) 588#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
590#define blk_queue_queuing(q) test_bit(QUEUE_FLAG_CQ, &(q)->queue_flags)
591#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) 589#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
592#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) 590#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
593#define blk_queue_noxmerges(q) \ 591#define blk_queue_noxmerges(q) \
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 2b19297742cb..2ae7409bf38f 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -3259,8 +3259,6 @@ static void perf_event_task_output(struct perf_event *event,
3259 task_event->event_id.tid = perf_event_tid(event, task); 3259 task_event->event_id.tid = perf_event_tid(event, task);
3260 task_event->event_id.ptid = perf_event_tid(event, current); 3260 task_event->event_id.ptid = perf_event_tid(event, current);
3261 3261
3262 task_event->event_id.time = perf_clock();
3263
3264 perf_output_put(&handle, task_event->event_id); 3262 perf_output_put(&handle, task_event->event_id);
3265 3263
3266 perf_output_end(&handle); 3264 perf_output_end(&handle);
@@ -3268,7 +3266,7 @@ static void perf_event_task_output(struct perf_event *event,
3268 3266
3269static int perf_event_task_match(struct perf_event *event) 3267static int perf_event_task_match(struct perf_event *event)
3270{ 3268{
3271 if (event->state != PERF_EVENT_STATE_ACTIVE) 3269 if (event->state < PERF_EVENT_STATE_INACTIVE)
3272 return 0; 3270 return 0;
3273 3271
3274 if (event->cpu != -1 && event->cpu != smp_processor_id()) 3272 if (event->cpu != -1 && event->cpu != smp_processor_id())
@@ -3300,7 +3298,7 @@ static void perf_event_task_event(struct perf_task_event *task_event)
3300 cpuctx = &get_cpu_var(perf_cpu_context); 3298 cpuctx = &get_cpu_var(perf_cpu_context);
3301 perf_event_task_ctx(&cpuctx->ctx, task_event); 3299 perf_event_task_ctx(&cpuctx->ctx, task_event);
3302 if (!ctx) 3300 if (!ctx)
3303 ctx = rcu_dereference(task_event->task->perf_event_ctxp); 3301 ctx = rcu_dereference(current->perf_event_ctxp);
3304 if (ctx) 3302 if (ctx)
3305 perf_event_task_ctx(ctx, task_event); 3303 perf_event_task_ctx(ctx, task_event);
3306 put_cpu_var(perf_cpu_context); 3304 put_cpu_var(perf_cpu_context);
@@ -3331,6 +3329,7 @@ static void perf_event_task(struct task_struct *task,
3331 /* .ppid */ 3329 /* .ppid */
3332 /* .tid */ 3330 /* .tid */
3333 /* .ptid */ 3331 /* .ptid */
3332 .time = perf_clock(),
3334 }, 3333 },
3335 }; 3334 };
3336 3335
@@ -3380,7 +3379,7 @@ static void perf_event_comm_output(struct perf_event *event,
3380 3379
3381static int perf_event_comm_match(struct perf_event *event) 3380static int perf_event_comm_match(struct perf_event *event)
3382{ 3381{
3383 if (event->state != PERF_EVENT_STATE_ACTIVE) 3382 if (event->state < PERF_EVENT_STATE_INACTIVE)
3384 return 0; 3383 return 0;
3385 3384
3386 if (event->cpu != -1 && event->cpu != smp_processor_id()) 3385 if (event->cpu != -1 && event->cpu != smp_processor_id())
@@ -3500,7 +3499,7 @@ static void perf_event_mmap_output(struct perf_event *event,
3500static int perf_event_mmap_match(struct perf_event *event, 3499static int perf_event_mmap_match(struct perf_event *event,
3501 struct perf_mmap_event *mmap_event) 3500 struct perf_mmap_event *mmap_event)
3502{ 3501{
3503 if (event->state != PERF_EVENT_STATE_ACTIVE) 3502 if (event->state < PERF_EVENT_STATE_INACTIVE)
3504 return 0; 3503 return 0;
3505 3504
3506 if (event->cpu != -1 && event->cpu != smp_processor_id()) 3505 if (event->cpu != -1 && event->cpu != smp_processor_id())
diff --git a/kernel/sys.c b/kernel/sys.c
index 26a6b73a6b85..18bde979f346 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -222,6 +222,7 @@ SYSCALL_DEFINE2(getpriority, int, which, int, who)
222 if (which > PRIO_USER || which < PRIO_PROCESS) 222 if (which > PRIO_USER || which < PRIO_PROCESS)
223 return -EINVAL; 223 return -EINVAL;
224 224
225 rcu_read_lock();
225 read_lock(&tasklist_lock); 226 read_lock(&tasklist_lock);
226 switch (which) { 227 switch (which) {
227 case PRIO_PROCESS: 228 case PRIO_PROCESS:
@@ -267,6 +268,7 @@ SYSCALL_DEFINE2(getpriority, int, which, int, who)
267 } 268 }
268out_unlock: 269out_unlock:
269 read_unlock(&tasklist_lock); 270 read_unlock(&tasklist_lock);
271 rcu_read_unlock();
270 272
271 return retval; 273 return retval;
272} 274}
diff --git a/lib/idr.c b/lib/idr.c
index 1cac726c44bc..0dc782216d4b 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -156,10 +156,12 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
156 id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1; 156 id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
157 157
158 /* if already at the top layer, we need to grow */ 158 /* if already at the top layer, we need to grow */
159 if (!(p = pa[l])) { 159 if (id >= 1 << (idp->layers * IDR_BITS)) {
160 *starting_id = id; 160 *starting_id = id;
161 return IDR_NEED_TO_GROW; 161 return IDR_NEED_TO_GROW;
162 } 162 }
163 p = pa[l];
164 BUG_ON(!p);
163 165
164 /* If we need to go up one layer, continue the 166 /* If we need to go up one layer, continue the
165 * loop; otherwise, restart from the top. 167 * loop; otherwise, restart from the top.
diff --git a/mm/migrate.c b/mm/migrate.c
index 9a0db5bbabe4..880bd592d38e 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1002,33 +1002,27 @@ static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
1002#define DO_PAGES_STAT_CHUNK_NR 16 1002#define DO_PAGES_STAT_CHUNK_NR 16
1003 const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR]; 1003 const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR];
1004 int chunk_status[DO_PAGES_STAT_CHUNK_NR]; 1004 int chunk_status[DO_PAGES_STAT_CHUNK_NR];
1005 unsigned long i, chunk_nr = DO_PAGES_STAT_CHUNK_NR;
1006 int err;
1007 1005
1008 for (i = 0; i < nr_pages; i += chunk_nr) { 1006 while (nr_pages) {
1009 if (chunk_nr > nr_pages - i) 1007 unsigned long chunk_nr;
1010 chunk_nr = nr_pages - i;
1011 1008
1012 err = copy_from_user(chunk_pages, &pages[i], 1009 chunk_nr = nr_pages;
1013 chunk_nr * sizeof(*chunk_pages)); 1010 if (chunk_nr > DO_PAGES_STAT_CHUNK_NR)
1014 if (err) { 1011 chunk_nr = DO_PAGES_STAT_CHUNK_NR;
1015 err = -EFAULT; 1012
1016 goto out; 1013 if (copy_from_user(chunk_pages, pages, chunk_nr * sizeof(*chunk_pages)))
1017 } 1014 break;
1018 1015
1019 do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status); 1016 do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status);
1020 1017
1021 err = copy_to_user(&status[i], chunk_status, 1018 if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status)))
1022 chunk_nr * sizeof(*chunk_status)); 1019 break;
1023 if (err) {
1024 err = -EFAULT;
1025 goto out;
1026 }
1027 }
1028 err = 0;
1029 1020
1030out: 1021 pages += chunk_nr;
1031 return err; 1022 status += chunk_nr;
1023 nr_pages -= chunk_nr;
1024 }
1025 return nr_pages ? -EFAULT : 0;
1032} 1026}
1033 1027
1034/* 1028/*
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index f52481b1c1e5..237050478f28 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -459,6 +459,8 @@ static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
459 list_for_each_entry(c, &p->children, sibling) { 459 list_for_each_entry(c, &p->children, sibling) {
460 if (c->mm == p->mm) 460 if (c->mm == p->mm)
461 continue; 461 continue;
462 if (mem && !task_in_mem_cgroup(c, mem))
463 continue;
462 if (!oom_kill_task(c)) 464 if (!oom_kill_task(c))
463 return 0; 465 return 0;
464 } 466 }
diff --git a/net/core/dev.c b/net/core/dev.c
index be9924f60ec3..ec874218b206 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2761,7 +2761,7 @@ gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb,
2761 switch (ret) { 2761 switch (ret) {
2762 case GRO_NORMAL: 2762 case GRO_NORMAL:
2763 case GRO_HELD: 2763 case GRO_HELD:
2764 skb->protocol = eth_type_trans(skb, napi->dev); 2764 skb->protocol = eth_type_trans(skb, skb->dev);
2765 2765
2766 if (ret == GRO_HELD) 2766 if (ret == GRO_HELD)
2767 skb_gro_pull(skb, -ETH_HLEN); 2767 skb_gro_pull(skb, -ETH_HLEN);
diff --git a/security/integrity/ima/ima_iint.c b/security/integrity/ima/ima_iint.c
index 0d83edcfc402..2d4d05d92fda 100644
--- a/security/integrity/ima/ima_iint.c
+++ b/security/integrity/ima/ima_iint.c
@@ -63,12 +63,11 @@ int ima_inode_alloc(struct inode *inode)
63 spin_lock(&ima_iint_lock); 63 spin_lock(&ima_iint_lock);
64 rc = radix_tree_insert(&ima_iint_store, (unsigned long)inode, iint); 64 rc = radix_tree_insert(&ima_iint_store, (unsigned long)inode, iint);
65 spin_unlock(&ima_iint_lock); 65 spin_unlock(&ima_iint_lock);
66 radix_tree_preload_end();
66out: 67out:
67 if (rc < 0) 68 if (rc < 0)
68 kmem_cache_free(iint_cache, iint); 69 kmem_cache_free(iint_cache, iint);
69 70
70 radix_tree_preload_end();
71
72 return rc; 71 return rc;
73} 72}
74 73
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index 29465d440043..fde17b090a47 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -272,6 +272,7 @@ int synthesize_perf_probe_point(struct probe_point *pp)
272 int ret; 272 int ret;
273 273
274 pp->probes[0] = buf = zalloc(MAX_CMDLEN); 274 pp->probes[0] = buf = zalloc(MAX_CMDLEN);
275 pp->found = 1;
275 if (!buf) 276 if (!buf)
276 die("Failed to allocate memory by zalloc."); 277 die("Failed to allocate memory by zalloc.");
277 if (pp->offset) { 278 if (pp->offset) {
@@ -294,6 +295,7 @@ int synthesize_perf_probe_point(struct probe_point *pp)
294error: 295error:
295 free(pp->probes[0]); 296 free(pp->probes[0]);
296 pp->probes[0] = NULL; 297 pp->probes[0] = NULL;
298 pp->found = 0;
297 } 299 }
298 return ret; 300 return ret;
299} 301}
@@ -455,6 +457,7 @@ void show_perf_probe_events(void)
455 struct strlist *rawlist; 457 struct strlist *rawlist;
456 struct str_node *ent; 458 struct str_node *ent;
457 459
460 memset(&pp, 0, sizeof(pp));
458 fd = open_kprobe_events(O_RDONLY, 0); 461 fd = open_kprobe_events(O_RDONLY, 0);
459 rawlist = get_trace_kprobe_event_rawlist(fd); 462 rawlist = get_trace_kprobe_event_rawlist(fd);
460 close(fd); 463 close(fd);