aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/m68knommu/platform/coldfire/pit.c1
-rw-r--r--arch/x86/include/asm/mce.h3
-rw-r--r--arch/x86/include/asm/uv/uv_hub.h12
-rw-r--r--arch/x86/kernel/aperture_64.c1
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c26
-rw-r--r--arch/x86/kernel/dumpstack.c14
-rw-r--r--arch/x86/kernel/process.c2
-rw-r--r--arch/x86/xen/enlighten.c4
-rw-r--r--drivers/base/memory.c34
-rw-r--r--drivers/char/agp/amd64-agp.c5
-rw-r--r--drivers/char/agp/intel-agp.c6
-rw-r--r--drivers/char/hw_random/virtio-rng.c6
-rw-r--r--drivers/edac/i5000_edac.c8
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c4
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c6
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h3
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c53
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c68
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h2
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c35
-rw-r--r--drivers/gpu/drm/i915/intel_bios.h40
-rw-r--r--drivers/gpu/drm/i915/intel_display.c84
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c75
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c50
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c17
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c83
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c2
-rw-r--r--drivers/serial/8250_pnp.c2
-rw-r--r--drivers/video/s3c-fb.c14
-rw-r--r--drivers/video/via/accel.c5
-rw-r--r--drivers/video/via/viafbdev.c11
-rw-r--r--drivers/virtio/virtio_balloon.c6
-rw-r--r--fs/ramfs/file-nommu.c26
-rw-r--r--include/linux/kernel.h4
-rw-r--r--include/linux/kfifo.h42
-rw-r--r--include/linux/mm.h1
-rw-r--r--include/linux/mm_types.h4
-rw-r--r--include/linux/sched.h7
-rw-r--r--include/linux/string.h5
-rw-r--r--include/linux/tty.h4
-rw-r--r--ipc/shm.c3
-rw-r--r--kernel/futex.c27
-rw-r--r--kernel/kfifo.c107
-rw-r--r--kernel/smp.c2
-rw-r--r--kernel/trace/ftrace.c6
-rw-r--r--kernel/trace/ring_buffer.c4
-rw-r--r--kernel/trace/trace_events_filter.c29
-rw-r--r--lib/string.c27
-rw-r--r--mm/memcontrol.c11
-rw-r--r--mm/nommu.c102
-rw-r--r--mm/page_alloc.c2
-rw-r--r--mm/util.c2
-rw-r--r--mm/vmscan.c3
-rw-r--r--scripts/markup_oops.pl2
-rwxr-xr-xscripts/recordmcount.pl2
-rw-r--r--tools/perf/Makefile44
56 files changed, 705 insertions, 443 deletions
diff --git a/arch/m68knommu/platform/coldfire/pit.c b/arch/m68knommu/platform/coldfire/pit.c
index d8720ee34510..aebea19abd78 100644
--- a/arch/m68knommu/platform/coldfire/pit.c
+++ b/arch/m68knommu/platform/coldfire/pit.c
@@ -146,7 +146,6 @@ static struct clocksource pit_clk = {
146 .read = pit_read_clk, 146 .read = pit_read_clk,
147 .shift = 20, 147 .shift = 20,
148 .mask = CLOCKSOURCE_MASK(32), 148 .mask = CLOCKSOURCE_MASK(32),
149 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
150}; 149};
151 150
152/***************************************************************************/ 151/***************************************************************************/
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index 858baa061cfc..6c3fdd631ed3 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -108,10 +108,11 @@ struct mce_log {
108#define K8_MCE_THRESHOLD_BANK_5 (MCE_THRESHOLD_BASE + 5 * 9) 108#define K8_MCE_THRESHOLD_BANK_5 (MCE_THRESHOLD_BASE + 5 * 9)
109#define K8_MCE_THRESHOLD_DRAM_ECC (MCE_THRESHOLD_BANK_4 + 0) 109#define K8_MCE_THRESHOLD_DRAM_ECC (MCE_THRESHOLD_BANK_4 + 0)
110 110
111extern struct atomic_notifier_head x86_mce_decoder_chain;
112 111
113#ifdef __KERNEL__ 112#ifdef __KERNEL__
114 113
114extern struct atomic_notifier_head x86_mce_decoder_chain;
115
115#include <linux/percpu.h> 116#include <linux/percpu.h>
116#include <linux/init.h> 117#include <linux/init.h>
117#include <asm/atomic.h> 118#include <asm/atomic.h>
diff --git a/arch/x86/include/asm/uv/uv_hub.h b/arch/x86/include/asm/uv/uv_hub.h
index bc54fa965af3..40be813fefb1 100644
--- a/arch/x86/include/asm/uv/uv_hub.h
+++ b/arch/x86/include/asm/uv/uv_hub.h
@@ -495,5 +495,17 @@ static inline void uv_hub_send_ipi(int pnode, int apicid, int vector)
495 uv_write_global_mmr64(pnode, UVH_IPI_INT, val); 495 uv_write_global_mmr64(pnode, UVH_IPI_INT, val);
496} 496}
497 497
498/*
499 * Get the minimum revision number of the hub chips within the partition.
500 * 1 - initial rev 1.0 silicon
501 * 2 - rev 2.0 production silicon
502 */
503static inline int uv_get_min_hub_revision_id(void)
504{
505 extern int uv_min_hub_revision_id;
506
507 return uv_min_hub_revision_id;
508}
509
498#endif /* CONFIG_X86_64 */ 510#endif /* CONFIG_X86_64 */
499#endif /* _ASM_X86_UV_UV_HUB_H */ 511#endif /* _ASM_X86_UV_UV_HUB_H */
diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c
index 3704997e8b25..f147a95fd84a 100644
--- a/arch/x86/kernel/aperture_64.c
+++ b/arch/x86/kernel/aperture_64.c
@@ -31,6 +31,7 @@
31#include <asm/x86_init.h> 31#include <asm/x86_init.h>
32 32
33int gart_iommu_aperture; 33int gart_iommu_aperture;
34EXPORT_SYMBOL_GPL(gart_iommu_aperture);
34int gart_iommu_aperture_disabled __initdata; 35int gart_iommu_aperture_disabled __initdata;
35int gart_iommu_aperture_allowed __initdata; 36int gart_iommu_aperture_allowed __initdata;
36 37
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index 5f92494dab61..21db3cbea7dc 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -36,6 +36,8 @@ DEFINE_PER_CPU(int, x2apic_extra_bits);
36 36
37static enum uv_system_type uv_system_type; 37static enum uv_system_type uv_system_type;
38static u64 gru_start_paddr, gru_end_paddr; 38static u64 gru_start_paddr, gru_end_paddr;
39int uv_min_hub_revision_id;
40EXPORT_SYMBOL_GPL(uv_min_hub_revision_id);
39 41
40static inline bool is_GRU_range(u64 start, u64 end) 42static inline bool is_GRU_range(u64 start, u64 end)
41{ 43{
@@ -55,12 +57,19 @@ static int early_get_nodeid(void)
55 mmr = early_ioremap(UV_LOCAL_MMR_BASE | UVH_NODE_ID, sizeof(*mmr)); 57 mmr = early_ioremap(UV_LOCAL_MMR_BASE | UVH_NODE_ID, sizeof(*mmr));
56 node_id.v = *mmr; 58 node_id.v = *mmr;
57 early_iounmap(mmr, sizeof(*mmr)); 59 early_iounmap(mmr, sizeof(*mmr));
60
61 /* Currently, all blades have same revision number */
62 uv_min_hub_revision_id = node_id.s.revision;
63
58 return node_id.s.node_id; 64 return node_id.s.node_id;
59} 65}
60 66
61static int __init uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id) 67static int __init uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
62{ 68{
69 int nodeid;
70
63 if (!strcmp(oem_id, "SGI")) { 71 if (!strcmp(oem_id, "SGI")) {
72 nodeid = early_get_nodeid();
64 x86_platform.is_untracked_pat_range = uv_is_untracked_pat_range; 73 x86_platform.is_untracked_pat_range = uv_is_untracked_pat_range;
65 if (!strcmp(oem_table_id, "UVL")) 74 if (!strcmp(oem_table_id, "UVL"))
66 uv_system_type = UV_LEGACY_APIC; 75 uv_system_type = UV_LEGACY_APIC;
@@ -68,7 +77,7 @@ static int __init uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
68 uv_system_type = UV_X2APIC; 77 uv_system_type = UV_X2APIC;
69 else if (!strcmp(oem_table_id, "UVH")) { 78 else if (!strcmp(oem_table_id, "UVH")) {
70 __get_cpu_var(x2apic_extra_bits) = 79 __get_cpu_var(x2apic_extra_bits) =
71 early_get_nodeid() << (UV_APIC_PNODE_SHIFT - 1); 80 nodeid << (UV_APIC_PNODE_SHIFT - 1);
72 uv_system_type = UV_NON_UNIQUE_APIC; 81 uv_system_type = UV_NON_UNIQUE_APIC;
73 return 1; 82 return 1;
74 } 83 }
@@ -374,13 +383,13 @@ static __init void get_lowmem_redirect(unsigned long *base, unsigned long *size)
374 383
375enum map_type {map_wb, map_uc}; 384enum map_type {map_wb, map_uc};
376 385
377static __init void map_high(char *id, unsigned long base, int shift, 386static __init void map_high(char *id, unsigned long base, int pshift,
378 int max_pnode, enum map_type map_type) 387 int bshift, int max_pnode, enum map_type map_type)
379{ 388{
380 unsigned long bytes, paddr; 389 unsigned long bytes, paddr;
381 390
382 paddr = base << shift; 391 paddr = base << pshift;
383 bytes = (1UL << shift) * (max_pnode + 1); 392 bytes = (1UL << bshift) * (max_pnode + 1);
384 printk(KERN_INFO "UV: Map %s_HI 0x%lx - 0x%lx\n", id, paddr, 393 printk(KERN_INFO "UV: Map %s_HI 0x%lx - 0x%lx\n", id, paddr,
385 paddr + bytes); 394 paddr + bytes);
386 if (map_type == map_uc) 395 if (map_type == map_uc)
@@ -396,7 +405,7 @@ static __init void map_gru_high(int max_pnode)
396 405
397 gru.v = uv_read_local_mmr(UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR); 406 gru.v = uv_read_local_mmr(UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR);
398 if (gru.s.enable) { 407 if (gru.s.enable) {
399 map_high("GRU", gru.s.base, shift, max_pnode, map_wb); 408 map_high("GRU", gru.s.base, shift, shift, max_pnode, map_wb);
400 gru_start_paddr = ((u64)gru.s.base << shift); 409 gru_start_paddr = ((u64)gru.s.base << shift);
401 gru_end_paddr = gru_start_paddr + (1UL << shift) * (max_pnode + 1); 410 gru_end_paddr = gru_start_paddr + (1UL << shift) * (max_pnode + 1);
402 411
@@ -410,7 +419,7 @@ static __init void map_mmr_high(int max_pnode)
410 419
411 mmr.v = uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR); 420 mmr.v = uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR);
412 if (mmr.s.enable) 421 if (mmr.s.enable)
413 map_high("MMR", mmr.s.base, shift, max_pnode, map_uc); 422 map_high("MMR", mmr.s.base, shift, shift, max_pnode, map_uc);
414} 423}
415 424
416static __init void map_mmioh_high(int max_pnode) 425static __init void map_mmioh_high(int max_pnode)
@@ -420,7 +429,8 @@ static __init void map_mmioh_high(int max_pnode)
420 429
421 mmioh.v = uv_read_local_mmr(UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR); 430 mmioh.v = uv_read_local_mmr(UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR);
422 if (mmioh.s.enable) 431 if (mmioh.s.enable)
423 map_high("MMIOH", mmioh.s.base, shift, max_pnode, map_uc); 432 map_high("MMIOH", mmioh.s.base, shift, mmioh.s.m_io,
433 max_pnode, map_uc);
424} 434}
425 435
426static __init void map_low_mmrs(void) 436static __init void map_low_mmrs(void)
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
index c56bc2873030..6d817554780a 100644
--- a/arch/x86/kernel/dumpstack.c
+++ b/arch/x86/kernel/dumpstack.c
@@ -123,13 +123,15 @@ print_context_stack_bp(struct thread_info *tinfo,
123 while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) { 123 while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
124 unsigned long addr = *ret_addr; 124 unsigned long addr = *ret_addr;
125 125
126 if (__kernel_text_address(addr)) { 126 if (!__kernel_text_address(addr))
127 ops->address(data, addr, 1); 127 break;
128 frame = frame->next_frame; 128
129 ret_addr = &frame->return_address; 129 ops->address(data, addr, 1);
130 print_ftrace_graph_addr(addr, data, ops, tinfo, graph); 130 frame = frame->next_frame;
131 } 131 ret_addr = &frame->return_address;
132 print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
132 } 133 }
134
133 return (unsigned long)frame; 135 return (unsigned long)frame;
134} 136}
135EXPORT_SYMBOL_GPL(print_context_stack_bp); 137EXPORT_SYMBOL_GPL(print_context_stack_bp);
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index c6ee241c8a98..02c3ee013ccd 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -288,6 +288,8 @@ int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
288 regs.es = __USER_DS; 288 regs.es = __USER_DS;
289 regs.fs = __KERNEL_PERCPU; 289 regs.fs = __KERNEL_PERCPU;
290 regs.gs = __KERNEL_STACK_CANARY; 290 regs.gs = __KERNEL_STACK_CANARY;
291#else
292 regs.ss = __KERNEL_DS;
291#endif 293#endif
292 294
293 regs.orig_ax = -1; 295 regs.orig_ax = -1;
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 2b26dd5930c6..36daccb68642 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -1151,9 +1151,13 @@ asmlinkage void __init xen_start_kernel(void)
1151 1151
1152 /* keep using Xen gdt for now; no urgent need to change it */ 1152 /* keep using Xen gdt for now; no urgent need to change it */
1153 1153
1154#ifdef CONFIG_X86_32
1154 pv_info.kernel_rpl = 1; 1155 pv_info.kernel_rpl = 1;
1155 if (xen_feature(XENFEAT_supervisor_mode_kernel)) 1156 if (xen_feature(XENFEAT_supervisor_mode_kernel))
1156 pv_info.kernel_rpl = 0; 1157 pv_info.kernel_rpl = 0;
1158#else
1159 pv_info.kernel_rpl = 0;
1160#endif
1157 1161
1158 /* set the limit of our address space */ 1162 /* set the limit of our address space */
1159 xen_reserve_top(); 1163 xen_reserve_top();
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index d7d77d4a402c..ae6b6c43cff9 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -309,17 +309,19 @@ static SYSDEV_ATTR(removable, 0444, show_mem_removable, NULL);
309 * Block size attribute stuff 309 * Block size attribute stuff
310 */ 310 */
311static ssize_t 311static ssize_t
312print_block_size(struct class *class, char *buf) 312print_block_size(struct sysdev_class *class,
313 struct sysdev_class_attribute *class_attr,
314 char *buf)
313{ 315{
314 return sprintf(buf, "%lx\n", (unsigned long)PAGES_PER_SECTION * PAGE_SIZE); 316 return sprintf(buf, "%#lx\n", (unsigned long)PAGES_PER_SECTION * PAGE_SIZE);
315} 317}
316 318
317static CLASS_ATTR(block_size_bytes, 0444, print_block_size, NULL); 319static SYSDEV_CLASS_ATTR(block_size_bytes, 0444, print_block_size, NULL);
318 320
319static int block_size_init(void) 321static int block_size_init(void)
320{ 322{
321 return sysfs_create_file(&memory_sysdev_class.kset.kobj, 323 return sysfs_create_file(&memory_sysdev_class.kset.kobj,
322 &class_attr_block_size_bytes.attr); 324 &attr_block_size_bytes.attr);
323} 325}
324 326
325/* 327/*
@@ -330,7 +332,9 @@ static int block_size_init(void)
330 */ 332 */
331#ifdef CONFIG_ARCH_MEMORY_PROBE 333#ifdef CONFIG_ARCH_MEMORY_PROBE
332static ssize_t 334static ssize_t
333memory_probe_store(struct class *class, const char *buf, size_t count) 335memory_probe_store(struct sysdev_class *class,
336 struct sysdev_class_attribute *class_attr,
337 const char *buf, size_t count)
334{ 338{
335 u64 phys_addr; 339 u64 phys_addr;
336 int nid; 340 int nid;
@@ -346,12 +350,12 @@ memory_probe_store(struct class *class, const char *buf, size_t count)
346 350
347 return count; 351 return count;
348} 352}
349static CLASS_ATTR(probe, S_IWUSR, NULL, memory_probe_store); 353static SYSDEV_CLASS_ATTR(probe, S_IWUSR, NULL, memory_probe_store);
350 354
351static int memory_probe_init(void) 355static int memory_probe_init(void)
352{ 356{
353 return sysfs_create_file(&memory_sysdev_class.kset.kobj, 357 return sysfs_create_file(&memory_sysdev_class.kset.kobj,
354 &class_attr_probe.attr); 358 &attr_probe.attr);
355} 359}
356#else 360#else
357static inline int memory_probe_init(void) 361static inline int memory_probe_init(void)
@@ -367,7 +371,9 @@ static inline int memory_probe_init(void)
367 371
368/* Soft offline a page */ 372/* Soft offline a page */
369static ssize_t 373static ssize_t
370store_soft_offline_page(struct class *class, const char *buf, size_t count) 374store_soft_offline_page(struct sysdev_class *class,
375 struct sysdev_class_attribute *class_attr,
376 const char *buf, size_t count)
371{ 377{
372 int ret; 378 int ret;
373 u64 pfn; 379 u64 pfn;
@@ -384,7 +390,9 @@ store_soft_offline_page(struct class *class, const char *buf, size_t count)
384 390
385/* Forcibly offline a page, including killing processes. */ 391/* Forcibly offline a page, including killing processes. */
386static ssize_t 392static ssize_t
387store_hard_offline_page(struct class *class, const char *buf, size_t count) 393store_hard_offline_page(struct sysdev_class *class,
394 struct sysdev_class_attribute *class_attr,
395 const char *buf, size_t count)
388{ 396{
389 int ret; 397 int ret;
390 u64 pfn; 398 u64 pfn;
@@ -397,18 +405,18 @@ store_hard_offline_page(struct class *class, const char *buf, size_t count)
397 return ret ? ret : count; 405 return ret ? ret : count;
398} 406}
399 407
400static CLASS_ATTR(soft_offline_page, 0644, NULL, store_soft_offline_page); 408static SYSDEV_CLASS_ATTR(soft_offline_page, 0644, NULL, store_soft_offline_page);
401static CLASS_ATTR(hard_offline_page, 0644, NULL, store_hard_offline_page); 409static SYSDEV_CLASS_ATTR(hard_offline_page, 0644, NULL, store_hard_offline_page);
402 410
403static __init int memory_fail_init(void) 411static __init int memory_fail_init(void)
404{ 412{
405 int err; 413 int err;
406 414
407 err = sysfs_create_file(&memory_sysdev_class.kset.kobj, 415 err = sysfs_create_file(&memory_sysdev_class.kset.kobj,
408 &class_attr_soft_offline_page.attr); 416 &attr_soft_offline_page.attr);
409 if (!err) 417 if (!err)
410 err = sysfs_create_file(&memory_sysdev_class.kset.kobj, 418 err = sysfs_create_file(&memory_sysdev_class.kset.kobj,
411 &class_attr_hard_offline_page.attr); 419 &attr_hard_offline_page.attr);
412 return err; 420 return err;
413} 421}
414#else 422#else
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c
index 5aa7a586a7ff..1afb8968a342 100644
--- a/drivers/char/agp/amd64-agp.c
+++ b/drivers/char/agp/amd64-agp.c
@@ -725,12 +725,11 @@ static struct pci_driver agp_amd64_pci_driver = {
725int __init agp_amd64_init(void) 725int __init agp_amd64_init(void)
726{ 726{
727 int err = 0; 727 int err = 0;
728 static int done = 0;
729 728
730 if (agp_off) 729 if (agp_off)
731 return -EINVAL; 730 return -EINVAL;
732 731
733 if (done++) 732 if (gart_iommu_aperture)
734 return agp_bridges_found ? 0 : -ENODEV; 733 return agp_bridges_found ? 0 : -ENODEV;
735 734
736 err = pci_register_driver(&agp_amd64_pci_driver); 735 err = pci_register_driver(&agp_amd64_pci_driver);
@@ -771,6 +770,8 @@ int __init agp_amd64_init(void)
771 770
772static void __exit agp_amd64_cleanup(void) 771static void __exit agp_amd64_cleanup(void)
773{ 772{
773 if (gart_iommu_aperture)
774 return;
774 if (aperture_resource) 775 if (aperture_resource)
775 release_resource(aperture_resource); 776 release_resource(aperture_resource);
776 pci_unregister_driver(&agp_amd64_pci_driver); 777 pci_unregister_driver(&agp_amd64_pci_driver);
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index 30c36ac2cd00..3999a5f25f38 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -2460,10 +2460,14 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev,
2460 &bridge->mode); 2460 &bridge->mode);
2461 } 2461 }
2462 2462
2463 if (bridge->driver->mask_memory == intel_i965_mask_memory) 2463 if (bridge->driver->mask_memory == intel_i965_mask_memory) {
2464 if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(36))) 2464 if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(36)))
2465 dev_err(&intel_private.pcidev->dev, 2465 dev_err(&intel_private.pcidev->dev,
2466 "set gfx device dma mask 36bit failed!\n"); 2466 "set gfx device dma mask 36bit failed!\n");
2467 else
2468 pci_set_consistent_dma_mask(intel_private.pcidev,
2469 DMA_BIT_MASK(36));
2470 }
2467 2471
2468 pci_set_drvdata(pdev, bridge); 2472 pci_set_drvdata(pdev, bridge);
2469 return agp_add_bridge(bridge); 2473 return agp_add_bridge(bridge);
diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c
index bdaef8e94021..64fe0a793efd 100644
--- a/drivers/char/hw_random/virtio-rng.c
+++ b/drivers/char/hw_random/virtio-rng.c
@@ -114,7 +114,7 @@ static struct virtio_device_id id_table[] = {
114 { 0 }, 114 { 0 },
115}; 115};
116 116
117static struct virtio_driver virtio_rng = { 117static struct virtio_driver virtio_rng_driver = {
118 .driver.name = KBUILD_MODNAME, 118 .driver.name = KBUILD_MODNAME,
119 .driver.owner = THIS_MODULE, 119 .driver.owner = THIS_MODULE,
120 .id_table = id_table, 120 .id_table = id_table,
@@ -124,12 +124,12 @@ static struct virtio_driver virtio_rng = {
124 124
125static int __init init(void) 125static int __init init(void)
126{ 126{
127 return register_virtio_driver(&virtio_rng); 127 return register_virtio_driver(&virtio_rng_driver);
128} 128}
129 129
130static void __exit fini(void) 130static void __exit fini(void)
131{ 131{
132 unregister_virtio_driver(&virtio_rng); 132 unregister_virtio_driver(&virtio_rng_driver);
133} 133}
134module_init(init); 134module_init(init);
135module_exit(fini); 135module_exit(fini);
diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
index 77a9579d7167..adc10a2ac5f6 100644
--- a/drivers/edac/i5000_edac.c
+++ b/drivers/edac/i5000_edac.c
@@ -577,7 +577,13 @@ static void i5000_process_nonfatal_error_info(struct mem_ctl_info *mci,
577 debugf0("\tUncorrected bits= 0x%x\n", ue_errors); 577 debugf0("\tUncorrected bits= 0x%x\n", ue_errors);
578 578
579 branch = EXTRACT_FBDCHAN_INDX(info->ferr_nf_fbd); 579 branch = EXTRACT_FBDCHAN_INDX(info->ferr_nf_fbd);
580 channel = branch; 580
581 /*
582 * According with i5000 datasheet, bit 28 has no significance
583 * for errors M4Err-M12Err and M17Err-M21Err, on FERR_NF_FBD
584 */
585 channel = branch & 2;
586
581 bank = NREC_BANK(info->nrecmema); 587 bank = NREC_BANK(info->nrecmema);
582 rank = NREC_RANK(info->nrecmema); 588 rank = NREC_RANK(info->nrecmema);
583 rdwr = NREC_RDWR(info->nrecmema); 589 rdwr = NREC_RDWR(info->nrecmema);
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index bbe47812e4b6..e660ac07f3b2 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -134,6 +134,10 @@ static int i915_init_phys_hws(struct drm_device *dev)
134 134
135 memset(dev_priv->hw_status_page, 0, PAGE_SIZE); 135 memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
136 136
137 if (IS_I965G(dev))
138 dev_priv->dma_status_page |= (dev_priv->dma_status_page >> 28) &
139 0xf0;
140
137 I915_WRITE(HWS_PGA, dev_priv->dma_status_page); 141 I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
138 DRM_DEBUG_DRIVER("Enabled hardware status page\n"); 142 DRM_DEBUG_DRIVER("Enabled hardware status page\n");
139 return 0; 143 return 0;
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index be631cc3e4dc..46d88965852a 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -45,6 +45,9 @@ module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400);
45unsigned int i915_powersave = 1; 45unsigned int i915_powersave = 1;
46module_param_named(powersave, i915_powersave, int, 0400); 46module_param_named(powersave, i915_powersave, int, 0400);
47 47
48unsigned int i915_lvds_downclock = 0;
49module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400);
50
48static struct drm_driver driver; 51static struct drm_driver driver;
49 52
50#define INTEL_VGA_DEVICE(id, info) { \ 53#define INTEL_VGA_DEVICE(id, info) { \
@@ -464,8 +467,11 @@ static struct drm_driver driver = {
464 .lastclose = i915_driver_lastclose, 467 .lastclose = i915_driver_lastclose,
465 .preclose = i915_driver_preclose, 468 .preclose = i915_driver_preclose,
466 .postclose = i915_driver_postclose, 469 .postclose = i915_driver_postclose,
470
471 /* Used in place of i915_pm_ops for non-DRIVER_MODESET */
467 .suspend = i915_suspend, 472 .suspend = i915_suspend,
468 .resume = i915_resume, 473 .resume = i915_resume,
474
469 .device_is_agp = i915_driver_device_is_agp, 475 .device_is_agp = i915_driver_device_is_agp,
470 .enable_vblank = i915_enable_vblank, 476 .enable_vblank = i915_enable_vblank,
471 .disable_vblank = i915_disable_vblank, 477 .disable_vblank = i915_disable_vblank,
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 29dd67626967..2c1669488b5a 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -283,6 +283,7 @@ typedef struct drm_i915_private {
283 unsigned int lvds_use_ssc:1; 283 unsigned int lvds_use_ssc:1;
284 unsigned int edp_support:1; 284 unsigned int edp_support:1;
285 int lvds_ssc_freq; 285 int lvds_ssc_freq;
286 int edp_bpp;
286 287
287 struct notifier_block lid_notifier; 288 struct notifier_block lid_notifier;
288 289
@@ -722,6 +723,7 @@ extern struct drm_ioctl_desc i915_ioctls[];
722extern int i915_max_ioctl; 723extern int i915_max_ioctl;
723extern unsigned int i915_fbpercrtc; 724extern unsigned int i915_fbpercrtc;
724extern unsigned int i915_powersave; 725extern unsigned int i915_powersave;
726extern unsigned int i915_lvds_downclock;
725 727
726extern void i915_save_display(struct drm_device *dev); 728extern void i915_save_display(struct drm_device *dev);
727extern void i915_restore_display(struct drm_device *dev); 729extern void i915_restore_display(struct drm_device *dev);
@@ -864,6 +866,7 @@ int i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptib
864int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); 866int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
865int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, 867int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj,
866 int write); 868 int write);
869int i915_gem_object_set_to_display_plane(struct drm_gem_object *obj);
867int i915_gem_attach_phys_object(struct drm_device *dev, 870int i915_gem_attach_phys_object(struct drm_device *dev,
868 struct drm_gem_object *obj, int id); 871 struct drm_gem_object *obj, int id);
869void i915_gem_detach_phys_object(struct drm_device *dev, 872void i915_gem_detach_phys_object(struct drm_device *dev,
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 2748609f05b3..0c67924ca80c 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2837,6 +2837,57 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
2837 return 0; 2837 return 0;
2838} 2838}
2839 2839
2840/*
2841 * Prepare buffer for display plane. Use uninterruptible for possible flush
2842 * wait, as in modesetting process we're not supposed to be interrupted.
2843 */
2844int
2845i915_gem_object_set_to_display_plane(struct drm_gem_object *obj)
2846{
2847 struct drm_device *dev = obj->dev;
2848 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2849 uint32_t old_write_domain, old_read_domains;
2850 int ret;
2851
2852 /* Not valid to be called on unbound objects. */
2853 if (obj_priv->gtt_space == NULL)
2854 return -EINVAL;
2855
2856 i915_gem_object_flush_gpu_write_domain(obj);
2857
2858 /* Wait on any GPU rendering and flushing to occur. */
2859 if (obj_priv->active) {
2860#if WATCH_BUF
2861 DRM_INFO("%s: object %p wait for seqno %08x\n",
2862 __func__, obj, obj_priv->last_rendering_seqno);
2863#endif
2864 ret = i915_do_wait_request(dev, obj_priv->last_rendering_seqno, 0);
2865 if (ret != 0)
2866 return ret;
2867 }
2868
2869 old_write_domain = obj->write_domain;
2870 old_read_domains = obj->read_domains;
2871
2872 obj->read_domains &= I915_GEM_DOMAIN_GTT;
2873
2874 i915_gem_object_flush_cpu_write_domain(obj);
2875
2876 /* It should now be out of any other write domains, and we can update
2877 * the domain values for our changes.
2878 */
2879 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
2880 obj->read_domains |= I915_GEM_DOMAIN_GTT;
2881 obj->write_domain = I915_GEM_DOMAIN_GTT;
2882 obj_priv->dirty = 1;
2883
2884 trace_i915_gem_object_change_domain(obj,
2885 old_read_domains,
2886 old_write_domain);
2887
2888 return 0;
2889}
2890
2840/** 2891/**
2841 * Moves a single object to the CPU read, and possibly write domain. 2892 * Moves a single object to the CPU read, and possibly write domain.
2842 * 2893 *
@@ -4000,8 +4051,6 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
4000 "back to user (%d)\n", 4051 "back to user (%d)\n",
4001 args->buffer_count, ret); 4052 args->buffer_count, ret);
4002 } 4053 }
4003 } else {
4004 DRM_ERROR("i915_gem_do_execbuffer returns %d\n", ret);
4005 } 4054 }
4006 4055
4007 drm_free_large(exec_list); 4056 drm_free_large(exec_list);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 7cd8110051b6..89a071a3e6fb 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -274,7 +274,6 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
274 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 274 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
275 int ret = IRQ_NONE; 275 int ret = IRQ_NONE;
276 u32 de_iir, gt_iir, de_ier, pch_iir; 276 u32 de_iir, gt_iir, de_ier, pch_iir;
277 u32 new_de_iir, new_gt_iir, new_pch_iir;
278 struct drm_i915_master_private *master_priv; 277 struct drm_i915_master_private *master_priv;
279 278
280 /* disable master interrupt before clearing iir */ 279 /* disable master interrupt before clearing iir */
@@ -286,51 +285,42 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
286 gt_iir = I915_READ(GTIIR); 285 gt_iir = I915_READ(GTIIR);
287 pch_iir = I915_READ(SDEIIR); 286 pch_iir = I915_READ(SDEIIR);
288 287
289 for (;;) { 288 if (de_iir == 0 && gt_iir == 0 && pch_iir == 0)
290 if (de_iir == 0 && gt_iir == 0 && pch_iir == 0) 289 goto done;
291 break;
292
293 ret = IRQ_HANDLED;
294 290
295 /* should clear PCH hotplug event before clear CPU irq */ 291 ret = IRQ_HANDLED;
296 I915_WRITE(SDEIIR, pch_iir);
297 new_pch_iir = I915_READ(SDEIIR);
298 292
299 I915_WRITE(DEIIR, de_iir); 293 if (dev->primary->master) {
300 new_de_iir = I915_READ(DEIIR); 294 master_priv = dev->primary->master->driver_priv;
301 I915_WRITE(GTIIR, gt_iir); 295 if (master_priv->sarea_priv)
302 new_gt_iir = I915_READ(GTIIR); 296 master_priv->sarea_priv->last_dispatch =
303 297 READ_BREADCRUMB(dev_priv);
304 if (dev->primary->master) { 298 }
305 master_priv = dev->primary->master->driver_priv;
306 if (master_priv->sarea_priv)
307 master_priv->sarea_priv->last_dispatch =
308 READ_BREADCRUMB(dev_priv);
309 }
310
311 if (gt_iir & GT_USER_INTERRUPT) {
312 u32 seqno = i915_get_gem_seqno(dev);
313 dev_priv->mm.irq_gem_seqno = seqno;
314 trace_i915_gem_request_complete(dev, seqno);
315 DRM_WAKEUP(&dev_priv->irq_queue);
316 dev_priv->hangcheck_count = 0;
317 mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
318 }
319 299
320 if (de_iir & DE_GSE) 300 if (gt_iir & GT_USER_INTERRUPT) {
321 ironlake_opregion_gse_intr(dev); 301 u32 seqno = i915_get_gem_seqno(dev);
302 dev_priv->mm.irq_gem_seqno = seqno;
303 trace_i915_gem_request_complete(dev, seqno);
304 DRM_WAKEUP(&dev_priv->irq_queue);
305 dev_priv->hangcheck_count = 0;
306 mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
307 }
322 308
323 /* check event from PCH */ 309 if (de_iir & DE_GSE)
324 if ((de_iir & DE_PCH_EVENT) && 310 ironlake_opregion_gse_intr(dev);
325 (pch_iir & SDE_HOTPLUG_MASK)) {
326 queue_work(dev_priv->wq, &dev_priv->hotplug_work);
327 }
328 311
329 de_iir = new_de_iir; 312 /* check event from PCH */
330 gt_iir = new_gt_iir; 313 if ((de_iir & DE_PCH_EVENT) &&
331 pch_iir = new_pch_iir; 314 (pch_iir & SDE_HOTPLUG_MASK)) {
315 queue_work(dev_priv->wq, &dev_priv->hotplug_work);
332 } 316 }
333 317
318 /* should clear PCH hotplug event before clear CPU irq */
319 I915_WRITE(SDEIIR, pch_iir);
320 I915_WRITE(GTIIR, gt_iir);
321 I915_WRITE(DEIIR, de_iir);
322
323done:
334 I915_WRITE(DEIER, de_ier); 324 I915_WRITE(DEIER, de_ier);
335 (void)I915_READ(DEIER); 325 (void)I915_READ(DEIER);
336 326
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 149d360d64a3..847006c5218e 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -1815,7 +1815,7 @@
1815#define DSPFW_PLANEB_SHIFT 8 1815#define DSPFW_PLANEB_SHIFT 8
1816#define DSPFW2 0x70038 1816#define DSPFW2 0x70038
1817#define DSPFW_CURSORA_MASK 0x00003f00 1817#define DSPFW_CURSORA_MASK 0x00003f00
1818#define DSPFW_CURSORA_SHIFT 16 1818#define DSPFW_CURSORA_SHIFT 8
1819#define DSPFW3 0x7003c 1819#define DSPFW3 0x7003c
1820#define DSPFW_HPLL_SR_EN (1<<31) 1820#define DSPFW_HPLL_SR_EN (1<<31)
1821#define DSPFW_CURSOR_SR_SHIFT 24 1821#define DSPFW_CURSOR_SR_SHIFT 24
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index f27567747580..15fbc1b5a83e 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -33,6 +33,8 @@
33#define SLAVE_ADDR1 0x70 33#define SLAVE_ADDR1 0x70
34#define SLAVE_ADDR2 0x72 34#define SLAVE_ADDR2 0x72
35 35
36static int panel_type;
37
36static void * 38static void *
37find_section(struct bdb_header *bdb, int section_id) 39find_section(struct bdb_header *bdb, int section_id)
38{ 40{
@@ -128,6 +130,7 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
128 dev_priv->lvds_dither = lvds_options->pixel_dither; 130 dev_priv->lvds_dither = lvds_options->pixel_dither;
129 if (lvds_options->panel_type == 0xff) 131 if (lvds_options->panel_type == 0xff)
130 return; 132 return;
133 panel_type = lvds_options->panel_type;
131 134
132 lvds_lfp_data = find_section(bdb, BDB_LVDS_LFP_DATA); 135 lvds_lfp_data = find_section(bdb, BDB_LVDS_LFP_DATA);
133 if (!lvds_lfp_data) 136 if (!lvds_lfp_data)
@@ -197,7 +200,8 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
197 memset(temp_mode, 0, sizeof(*temp_mode)); 200 memset(temp_mode, 0, sizeof(*temp_mode));
198 } 201 }
199 kfree(temp_mode); 202 kfree(temp_mode);
200 if (temp_downclock < panel_fixed_mode->clock) { 203 if (temp_downclock < panel_fixed_mode->clock &&
204 i915_lvds_downclock) {
201 dev_priv->lvds_downclock_avail = 1; 205 dev_priv->lvds_downclock_avail = 1;
202 dev_priv->lvds_downclock = temp_downclock; 206 dev_priv->lvds_downclock = temp_downclock;
203 DRM_DEBUG_KMS("LVDS downclock is found in VBT. ", 207 DRM_DEBUG_KMS("LVDS downclock is found in VBT. ",
@@ -405,6 +409,34 @@ parse_driver_features(struct drm_i915_private *dev_priv,
405} 409}
406 410
407static void 411static void
412parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
413{
414 struct bdb_edp *edp;
415
416 edp = find_section(bdb, BDB_EDP);
417 if (!edp) {
418 if (SUPPORTS_EDP(dev_priv->dev) && dev_priv->edp_support) {
419 DRM_DEBUG_KMS("No eDP BDB found but eDP panel supported,\
420 assume 18bpp panel color depth.\n");
421 dev_priv->edp_bpp = 18;
422 }
423 return;
424 }
425
426 switch ((edp->color_depth >> (panel_type * 2)) & 3) {
427 case EDP_18BPP:
428 dev_priv->edp_bpp = 18;
429 break;
430 case EDP_24BPP:
431 dev_priv->edp_bpp = 24;
432 break;
433 case EDP_30BPP:
434 dev_priv->edp_bpp = 30;
435 break;
436 }
437}
438
439static void
408parse_device_mapping(struct drm_i915_private *dev_priv, 440parse_device_mapping(struct drm_i915_private *dev_priv,
409 struct bdb_header *bdb) 441 struct bdb_header *bdb)
410{ 442{
@@ -521,6 +553,7 @@ intel_init_bios(struct drm_device *dev)
521 parse_sdvo_device_mapping(dev_priv, bdb); 553 parse_sdvo_device_mapping(dev_priv, bdb);
522 parse_device_mapping(dev_priv, bdb); 554 parse_device_mapping(dev_priv, bdb);
523 parse_driver_features(dev_priv, bdb); 555 parse_driver_features(dev_priv, bdb);
556 parse_edp(dev_priv, bdb);
524 557
525 pci_unmap_rom(pdev, bios); 558 pci_unmap_rom(pdev, bios);
526 559
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index 425ac9d7f724..4c18514f6f80 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -98,6 +98,7 @@ struct vbios_data {
98#define BDB_SDVO_LVDS_PNP_IDS 24 98#define BDB_SDVO_LVDS_PNP_IDS 24
99#define BDB_SDVO_LVDS_POWER_SEQ 25 99#define BDB_SDVO_LVDS_POWER_SEQ 25
100#define BDB_TV_OPTIONS 26 100#define BDB_TV_OPTIONS 26
101#define BDB_EDP 27
101#define BDB_LVDS_OPTIONS 40 102#define BDB_LVDS_OPTIONS 40
102#define BDB_LVDS_LFP_DATA_PTRS 41 103#define BDB_LVDS_LFP_DATA_PTRS 41
103#define BDB_LVDS_LFP_DATA 42 104#define BDB_LVDS_LFP_DATA 42
@@ -426,6 +427,45 @@ struct bdb_driver_features {
426 u8 custom_vbt_version; 427 u8 custom_vbt_version;
427} __attribute__((packed)); 428} __attribute__((packed));
428 429
430#define EDP_18BPP 0
431#define EDP_24BPP 1
432#define EDP_30BPP 2
433#define EDP_RATE_1_62 0
434#define EDP_RATE_2_7 1
435#define EDP_LANE_1 0
436#define EDP_LANE_2 1
437#define EDP_LANE_4 3
438#define EDP_PREEMPHASIS_NONE 0
439#define EDP_PREEMPHASIS_3_5dB 1
440#define EDP_PREEMPHASIS_6dB 2
441#define EDP_PREEMPHASIS_9_5dB 3
442#define EDP_VSWING_0_4V 0
443#define EDP_VSWING_0_6V 1
444#define EDP_VSWING_0_8V 2
445#define EDP_VSWING_1_2V 3
446
447struct edp_power_seq {
448 u16 t3;
449 u16 t7;
450 u16 t9;
451 u16 t10;
452 u16 t12;
453} __attribute__ ((packed));
454
455struct edp_link_params {
456 u8 rate:4;
457 u8 lanes:4;
458 u8 preemphasis:4;
459 u8 vswing:4;
460} __attribute__ ((packed));
461
462struct bdb_edp {
463 struct edp_power_seq power_seqs[16];
464 u32 color_depth;
465 u32 sdrrs_msa_timing_delay;
466 struct edp_link_params link_params[16];
467} __attribute__ ((packed));
468
429bool intel_init_bios(struct drm_device *dev); 469bool intel_init_bios(struct drm_device *dev);
430 470
431/* 471/*
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 002612fae717..45da78ef4a92 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -70,8 +70,6 @@ struct intel_limit {
70 intel_p2_t p2; 70 intel_p2_t p2;
71 bool (* find_pll)(const intel_limit_t *, struct drm_crtc *, 71 bool (* find_pll)(const intel_limit_t *, struct drm_crtc *,
72 int, int, intel_clock_t *); 72 int, int, intel_clock_t *);
73 bool (* find_reduced_pll)(const intel_limit_t *, struct drm_crtc *,
74 int, int, intel_clock_t *);
75}; 73};
76 74
77#define I8XX_DOT_MIN 25000 75#define I8XX_DOT_MIN 25000
@@ -243,11 +241,11 @@ struct intel_limit {
243#define IRONLAKE_VCO_MIN 1760000 241#define IRONLAKE_VCO_MIN 1760000
244#define IRONLAKE_VCO_MAX 3510000 242#define IRONLAKE_VCO_MAX 3510000
245#define IRONLAKE_N_MIN 1 243#define IRONLAKE_N_MIN 1
246#define IRONLAKE_N_MAX 5 244#define IRONLAKE_N_MAX 6
247#define IRONLAKE_M_MIN 79 245#define IRONLAKE_M_MIN 79
248#define IRONLAKE_M_MAX 118 246#define IRONLAKE_M_MAX 127
249#define IRONLAKE_M1_MIN 12 247#define IRONLAKE_M1_MIN 12
250#define IRONLAKE_M1_MAX 23 248#define IRONLAKE_M1_MAX 22
251#define IRONLAKE_M2_MIN 5 249#define IRONLAKE_M2_MIN 5
252#define IRONLAKE_M2_MAX 9 250#define IRONLAKE_M2_MAX 9
253#define IRONLAKE_P_SDVO_DAC_MIN 5 251#define IRONLAKE_P_SDVO_DAC_MIN 5
@@ -274,9 +272,6 @@ static bool
274intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, 272intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
275 int target, int refclk, intel_clock_t *best_clock); 273 int target, int refclk, intel_clock_t *best_clock);
276static bool 274static bool
277intel_find_best_reduced_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
278 int target, int refclk, intel_clock_t *best_clock);
279static bool
280intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, 275intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
281 int target, int refclk, intel_clock_t *best_clock); 276 int target, int refclk, intel_clock_t *best_clock);
282 277
@@ -299,7 +294,6 @@ static const intel_limit_t intel_limits_i8xx_dvo = {
299 .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT, 294 .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT,
300 .p2_slow = I8XX_P2_SLOW, .p2_fast = I8XX_P2_FAST }, 295 .p2_slow = I8XX_P2_SLOW, .p2_fast = I8XX_P2_FAST },
301 .find_pll = intel_find_best_PLL, 296 .find_pll = intel_find_best_PLL,
302 .find_reduced_pll = intel_find_best_reduced_PLL,
303}; 297};
304 298
305static const intel_limit_t intel_limits_i8xx_lvds = { 299static const intel_limit_t intel_limits_i8xx_lvds = {
@@ -314,7 +308,6 @@ static const intel_limit_t intel_limits_i8xx_lvds = {
314 .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT, 308 .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT,
315 .p2_slow = I8XX_P2_LVDS_SLOW, .p2_fast = I8XX_P2_LVDS_FAST }, 309 .p2_slow = I8XX_P2_LVDS_SLOW, .p2_fast = I8XX_P2_LVDS_FAST },
316 .find_pll = intel_find_best_PLL, 310 .find_pll = intel_find_best_PLL,
317 .find_reduced_pll = intel_find_best_reduced_PLL,
318}; 311};
319 312
320static const intel_limit_t intel_limits_i9xx_sdvo = { 313static const intel_limit_t intel_limits_i9xx_sdvo = {
@@ -329,7 +322,6 @@ static const intel_limit_t intel_limits_i9xx_sdvo = {
329 .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT, 322 .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT,
330 .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST }, 323 .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST },
331 .find_pll = intel_find_best_PLL, 324 .find_pll = intel_find_best_PLL,
332 .find_reduced_pll = intel_find_best_reduced_PLL,
333}; 325};
334 326
335static const intel_limit_t intel_limits_i9xx_lvds = { 327static const intel_limit_t intel_limits_i9xx_lvds = {
@@ -347,7 +339,6 @@ static const intel_limit_t intel_limits_i9xx_lvds = {
347 .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT, 339 .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
348 .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_FAST }, 340 .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_FAST },
349 .find_pll = intel_find_best_PLL, 341 .find_pll = intel_find_best_PLL,
350 .find_reduced_pll = intel_find_best_reduced_PLL,
351}; 342};
352 343
353 /* below parameter and function is for G4X Chipset Family*/ 344 /* below parameter and function is for G4X Chipset Family*/
@@ -365,7 +356,6 @@ static const intel_limit_t intel_limits_g4x_sdvo = {
365 .p2_fast = G4X_P2_SDVO_FAST 356 .p2_fast = G4X_P2_SDVO_FAST
366 }, 357 },
367 .find_pll = intel_g4x_find_best_PLL, 358 .find_pll = intel_g4x_find_best_PLL,
368 .find_reduced_pll = intel_g4x_find_best_PLL,
369}; 359};
370 360
371static const intel_limit_t intel_limits_g4x_hdmi = { 361static const intel_limit_t intel_limits_g4x_hdmi = {
@@ -382,7 +372,6 @@ static const intel_limit_t intel_limits_g4x_hdmi = {
382 .p2_fast = G4X_P2_HDMI_DAC_FAST 372 .p2_fast = G4X_P2_HDMI_DAC_FAST
383 }, 373 },
384 .find_pll = intel_g4x_find_best_PLL, 374 .find_pll = intel_g4x_find_best_PLL,
385 .find_reduced_pll = intel_g4x_find_best_PLL,
386}; 375};
387 376
388static const intel_limit_t intel_limits_g4x_single_channel_lvds = { 377static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
@@ -407,7 +396,6 @@ static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
407 .p2_fast = G4X_P2_SINGLE_CHANNEL_LVDS_FAST 396 .p2_fast = G4X_P2_SINGLE_CHANNEL_LVDS_FAST
408 }, 397 },
409 .find_pll = intel_g4x_find_best_PLL, 398 .find_pll = intel_g4x_find_best_PLL,
410 .find_reduced_pll = intel_g4x_find_best_PLL,
411}; 399};
412 400
413static const intel_limit_t intel_limits_g4x_dual_channel_lvds = { 401static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
@@ -432,7 +420,6 @@ static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
432 .p2_fast = G4X_P2_DUAL_CHANNEL_LVDS_FAST 420 .p2_fast = G4X_P2_DUAL_CHANNEL_LVDS_FAST
433 }, 421 },
434 .find_pll = intel_g4x_find_best_PLL, 422 .find_pll = intel_g4x_find_best_PLL,
435 .find_reduced_pll = intel_g4x_find_best_PLL,
436}; 423};
437 424
438static const intel_limit_t intel_limits_g4x_display_port = { 425static const intel_limit_t intel_limits_g4x_display_port = {
@@ -470,7 +457,6 @@ static const intel_limit_t intel_limits_pineview_sdvo = {
470 .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT, 457 .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT,
471 .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST }, 458 .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST },
472 .find_pll = intel_find_best_PLL, 459 .find_pll = intel_find_best_PLL,
473 .find_reduced_pll = intel_find_best_reduced_PLL,
474}; 460};
475 461
476static const intel_limit_t intel_limits_pineview_lvds = { 462static const intel_limit_t intel_limits_pineview_lvds = {
@@ -486,7 +472,6 @@ static const intel_limit_t intel_limits_pineview_lvds = {
486 .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT, 472 .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
487 .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_SLOW }, 473 .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_SLOW },
488 .find_pll = intel_find_best_PLL, 474 .find_pll = intel_find_best_PLL,
489 .find_reduced_pll = intel_find_best_reduced_PLL,
490}; 475};
491 476
492static const intel_limit_t intel_limits_ironlake_sdvo = { 477static const intel_limit_t intel_limits_ironlake_sdvo = {
@@ -768,46 +753,6 @@ intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
768 return (err != target); 753 return (err != target);
769} 754}
770 755
771
772static bool
773intel_find_best_reduced_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
774 int target, int refclk, intel_clock_t *best_clock)
775
776{
777 struct drm_device *dev = crtc->dev;
778 intel_clock_t clock;
779 int err = target;
780 bool found = false;
781
782 memcpy(&clock, best_clock, sizeof(intel_clock_t));
783
784 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
785 for (clock.m2 = limit->m2.min; clock.m2 <= limit->m2.max; clock.m2++) {
786 /* m1 is always 0 in Pineview */
787 if (clock.m2 >= clock.m1 && !IS_PINEVIEW(dev))
788 break;
789 for (clock.n = limit->n.min; clock.n <= limit->n.max;
790 clock.n++) {
791 int this_err;
792
793 intel_clock(dev, refclk, &clock);
794
795 if (!intel_PLL_is_valid(crtc, &clock))
796 continue;
797
798 this_err = abs(clock.dot - target);
799 if (this_err < err) {
800 *best_clock = clock;
801 err = this_err;
802 found = true;
803 }
804 }
805 }
806 }
807
808 return found;
809}
810
811static bool 756static bool
812intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, 757intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
813 int target, int refclk, intel_clock_t *best_clock) 758 int target, int refclk, intel_clock_t *best_clock)
@@ -1262,7 +1207,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
1262 return ret; 1207 return ret;
1263 } 1208 }
1264 1209
1265 ret = i915_gem_object_set_to_gtt_domain(obj, 1); 1210 ret = i915_gem_object_set_to_display_plane(obj);
1266 if (ret != 0) { 1211 if (ret != 0) {
1267 i915_gem_object_unpin(obj); 1212 i915_gem_object_unpin(obj);
1268 mutex_unlock(&dev->struct_mutex); 1213 mutex_unlock(&dev->struct_mutex);
@@ -2910,10 +2855,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
2910 return -EINVAL; 2855 return -EINVAL;
2911 } 2856 }
2912 2857
2913 if (is_lvds && limit->find_reduced_pll && 2858 if (is_lvds && dev_priv->lvds_downclock_avail) {
2914 dev_priv->lvds_downclock_avail) { 2859 has_reduced_clock = limit->find_pll(limit, crtc,
2915 memcpy(&reduced_clock, &clock, sizeof(intel_clock_t));
2916 has_reduced_clock = limit->find_reduced_pll(limit, crtc,
2917 dev_priv->lvds_downclock, 2860 dev_priv->lvds_downclock,
2918 refclk, 2861 refclk,
2919 &reduced_clock); 2862 &reduced_clock);
@@ -2981,6 +2924,21 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
2981 temp |= PIPE_8BPC; 2924 temp |= PIPE_8BPC;
2982 else 2925 else
2983 temp |= PIPE_6BPC; 2926 temp |= PIPE_6BPC;
2927 } else if (is_edp) {
2928 switch (dev_priv->edp_bpp/3) {
2929 case 8:
2930 temp |= PIPE_8BPC;
2931 break;
2932 case 10:
2933 temp |= PIPE_10BPC;
2934 break;
2935 case 6:
2936 temp |= PIPE_6BPC;
2937 break;
2938 case 12:
2939 temp |= PIPE_12BPC;
2940 break;
2941 }
2984 } else 2942 } else
2985 temp |= PIPE_8BPC; 2943 temp |= PIPE_8BPC;
2986 I915_WRITE(pipeconf_reg, temp); 2944 I915_WRITE(pipeconf_reg, temp);
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 1349d9fd01c4..439506cefc14 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -125,9 +125,15 @@ intel_dp_link_clock(uint8_t link_bw)
125 125
126/* I think this is a fiction */ 126/* I think this is a fiction */
127static int 127static int
128intel_dp_link_required(int pixel_clock) 128intel_dp_link_required(struct drm_device *dev,
129 struct intel_output *intel_output, int pixel_clock)
129{ 130{
130 return pixel_clock * 3; 131 struct drm_i915_private *dev_priv = dev->dev_private;
132
133 if (IS_eDP(intel_output))
134 return (pixel_clock * dev_priv->edp_bpp) / 8;
135 else
136 return pixel_clock * 3;
131} 137}
132 138
133static int 139static int
@@ -138,7 +144,8 @@ intel_dp_mode_valid(struct drm_connector *connector,
138 int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_output)); 144 int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_output));
139 int max_lanes = intel_dp_max_lane_count(intel_output); 145 int max_lanes = intel_dp_max_lane_count(intel_output);
140 146
141 if (intel_dp_link_required(mode->clock) > max_link_clock * max_lanes) 147 if (intel_dp_link_required(connector->dev, intel_output, mode->clock)
148 > max_link_clock * max_lanes)
142 return MODE_CLOCK_HIGH; 149 return MODE_CLOCK_HIGH;
143 150
144 if (mode->clock < 10000) 151 if (mode->clock < 10000)
@@ -492,7 +499,8 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
492 for (clock = 0; clock <= max_clock; clock++) { 499 for (clock = 0; clock <= max_clock; clock++) {
493 int link_avail = intel_dp_link_clock(bws[clock]) * lane_count; 500 int link_avail = intel_dp_link_clock(bws[clock]) * lane_count;
494 501
495 if (intel_dp_link_required(mode->clock) <= link_avail) { 502 if (intel_dp_link_required(encoder->dev, intel_output, mode->clock)
503 <= link_avail) {
496 dp_priv->link_bw = bws[clock]; 504 dp_priv->link_bw = bws[clock];
497 dp_priv->lane_count = lane_count; 505 dp_priv->lane_count = lane_count;
498 adjusted_mode->clock = intel_dp_link_clock(dp_priv->link_bw); 506 adjusted_mode->clock = intel_dp_link_clock(dp_priv->link_bw);
@@ -1289,53 +1297,7 @@ intel_dp_hot_plug(struct intel_output *intel_output)
1289 if (dp_priv->dpms_mode == DRM_MODE_DPMS_ON) 1297 if (dp_priv->dpms_mode == DRM_MODE_DPMS_ON)
1290 intel_dp_check_link_status(intel_output); 1298 intel_dp_check_link_status(intel_output);
1291} 1299}
1292/* 1300
1293 * Enumerate the child dev array parsed from VBT to check whether
1294 * the given DP is present.
1295 * If it is present, return 1.
1296 * If it is not present, return false.
1297 * If no child dev is parsed from VBT, it is assumed that the given
1298 * DP is present.
1299 */
1300static int dp_is_present_in_vbt(struct drm_device *dev, int dp_reg)
1301{
1302 struct drm_i915_private *dev_priv = dev->dev_private;
1303 struct child_device_config *p_child;
1304 int i, dp_port, ret;
1305
1306 if (!dev_priv->child_dev_num)
1307 return 1;
1308
1309 dp_port = 0;
1310 if (dp_reg == DP_B || dp_reg == PCH_DP_B)
1311 dp_port = PORT_IDPB;
1312 else if (dp_reg == DP_C || dp_reg == PCH_DP_C)
1313 dp_port = PORT_IDPC;
1314 else if (dp_reg == DP_D || dp_reg == PCH_DP_D)
1315 dp_port = PORT_IDPD;
1316
1317 ret = 0;
1318 for (i = 0; i < dev_priv->child_dev_num; i++) {
1319 p_child = dev_priv->child_dev + i;
1320 /*
1321 * If the device type is not DP, continue.
1322 */
1323 if (p_child->device_type != DEVICE_TYPE_DP &&
1324 p_child->device_type != DEVICE_TYPE_eDP)
1325 continue;
1326 /* Find the eDP port */
1327 if (dp_reg == DP_A && p_child->device_type == DEVICE_TYPE_eDP) {
1328 ret = 1;
1329 break;
1330 }
1331 /* Find the DP port */
1332 if (p_child->dvo_port == dp_port) {
1333 ret = 1;
1334 break;
1335 }
1336 }
1337 return ret;
1338}
1339void 1301void
1340intel_dp_init(struct drm_device *dev, int output_reg) 1302intel_dp_init(struct drm_device *dev, int output_reg)
1341{ 1303{
@@ -1345,10 +1307,6 @@ intel_dp_init(struct drm_device *dev, int output_reg)
1345 struct intel_dp_priv *dp_priv; 1307 struct intel_dp_priv *dp_priv;
1346 const char *name = NULL; 1308 const char *name = NULL;
1347 1309
1348 if (!dp_is_present_in_vbt(dev, output_reg)) {
1349 DRM_DEBUG_KMS("DP is not present. Ignore it\n");
1350 return;
1351 }
1352 intel_output = kcalloc(sizeof(struct intel_output) + 1310 intel_output = kcalloc(sizeof(struct intel_output) +
1353 sizeof(struct intel_dp_priv), 1, GFP_KERNEL); 1311 sizeof(struct intel_dp_priv), 1, GFP_KERNEL);
1354 if (!intel_output) 1312 if (!intel_output)
@@ -1373,11 +1331,10 @@ intel_dp_init(struct drm_device *dev, int output_reg)
1373 else if (output_reg == DP_D || output_reg == PCH_DP_D) 1331 else if (output_reg == DP_D || output_reg == PCH_DP_D)
1374 intel_output->clone_mask = (1 << INTEL_DP_D_CLONE_BIT); 1332 intel_output->clone_mask = (1 << INTEL_DP_D_CLONE_BIT);
1375 1333
1376 if (IS_eDP(intel_output)) { 1334 if (IS_eDP(intel_output))
1377 intel_output->crtc_mask = (1 << 1);
1378 intel_output->clone_mask = (1 << INTEL_EDP_CLONE_BIT); 1335 intel_output->clone_mask = (1 << INTEL_EDP_CLONE_BIT);
1379 } else 1336
1380 intel_output->crtc_mask = (1 << 0) | (1 << 1); 1337 intel_output->crtc_mask = (1 << 0) | (1 << 1);
1381 connector->interlace_allowed = true; 1338 connector->interlace_allowed = true;
1382 connector->doublescan_allowed = 0; 1339 connector->doublescan_allowed = 0;
1383 1340
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 06431941b233..0e268deed761 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -225,52 +225,6 @@ static const struct drm_encoder_funcs intel_hdmi_enc_funcs = {
225 .destroy = intel_hdmi_enc_destroy, 225 .destroy = intel_hdmi_enc_destroy,
226}; 226};
227 227
228/*
229 * Enumerate the child dev array parsed from VBT to check whether
230 * the given HDMI is present.
231 * If it is present, return 1.
232 * If it is not present, return false.
233 * If no child dev is parsed from VBT, it assumes that the given
234 * HDMI is present.
235 */
236static int hdmi_is_present_in_vbt(struct drm_device *dev, int hdmi_reg)
237{
238 struct drm_i915_private *dev_priv = dev->dev_private;
239 struct child_device_config *p_child;
240 int i, hdmi_port, ret;
241
242 if (!dev_priv->child_dev_num)
243 return 1;
244
245 if (hdmi_reg == SDVOB)
246 hdmi_port = DVO_B;
247 else if (hdmi_reg == SDVOC)
248 hdmi_port = DVO_C;
249 else if (hdmi_reg == HDMIB)
250 hdmi_port = DVO_B;
251 else if (hdmi_reg == HDMIC)
252 hdmi_port = DVO_C;
253 else if (hdmi_reg == HDMID)
254 hdmi_port = DVO_D;
255 else
256 return 0;
257
258 ret = 0;
259 for (i = 0; i < dev_priv->child_dev_num; i++) {
260 p_child = dev_priv->child_dev + i;
261 /*
262 * If the device type is not HDMI, continue.
263 */
264 if (p_child->device_type != DEVICE_TYPE_HDMI)
265 continue;
266 /* Find the HDMI port */
267 if (p_child->dvo_port == hdmi_port) {
268 ret = 1;
269 break;
270 }
271 }
272 return ret;
273}
274void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) 228void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
275{ 229{
276 struct drm_i915_private *dev_priv = dev->dev_private; 230 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -278,10 +232,6 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
278 struct intel_output *intel_output; 232 struct intel_output *intel_output;
279 struct intel_hdmi_priv *hdmi_priv; 233 struct intel_hdmi_priv *hdmi_priv;
280 234
281 if (!hdmi_is_present_in_vbt(dev, sdvox_reg)) {
282 DRM_DEBUG_KMS("HDMI is not present. Ignored it \n");
283 return;
284 }
285 intel_output = kcalloc(sizeof(struct intel_output) + 235 intel_output = kcalloc(sizeof(struct intel_output) +
286 sizeof(struct intel_hdmi_priv), 1, GFP_KERNEL); 236 sizeof(struct intel_hdmi_priv), 1, GFP_KERNEL);
287 if (!intel_output) 237 if (!intel_output)
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index f4b4aa242df1..aa74e59bec61 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -602,6 +602,20 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder,
602/* Some lid devices report incorrect lid status, assume they're connected */ 602/* Some lid devices report incorrect lid status, assume they're connected */
603static const struct dmi_system_id bad_lid_status[] = { 603static const struct dmi_system_id bad_lid_status[] = {
604 { 604 {
605 .ident = "Compaq nx9020",
606 .matches = {
607 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
608 DMI_MATCH(DMI_BOARD_NAME, "3084"),
609 },
610 },
611 {
612 .ident = "Samsung SX20S",
613 .matches = {
614 DMI_MATCH(DMI_SYS_VENDOR, "Phoenix Technologies LTD"),
615 DMI_MATCH(DMI_BOARD_NAME, "SX20S"),
616 },
617 },
618 {
605 .ident = "Aspire One", 619 .ident = "Aspire One",
606 .matches = { 620 .matches = {
607 DMI_MATCH(DMI_SYS_VENDOR, "Acer"), 621 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
@@ -912,7 +926,8 @@ static void intel_find_lvds_downclock(struct drm_device *dev,
912 } 926 }
913 } 927 }
914 mutex_unlock(&dev->mode_config.mutex); 928 mutex_unlock(&dev->mode_config.mutex);
915 if (temp_downclock < panel_fixed_mode->clock) { 929 if (temp_downclock < panel_fixed_mode->clock &&
930 i915_lvds_downclock) {
916 /* We found the downclock for LVDS. */ 931 /* We found the downclock for LVDS. */
917 dev_priv->lvds_downclock_avail = 1; 932 dev_priv->lvds_downclock_avail = 1;
918 dev_priv->lvds_downclock = temp_downclock; 933 dev_priv->lvds_downclock = temp_downclock;
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index de5144c8c153..eaacfd0920df 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -462,14 +462,63 @@ static int intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode)
462} 462}
463 463
464/** 464/**
465 * Don't check status code from this as it switches the bus back to the 465 * Try to read the response after issuie the DDC switch command. But it
466 * SDVO chips which defeats the purpose of doing a bus switch in the first 466 * is noted that we must do the action of reading response and issuing DDC
467 * place. 467 * switch command in one I2C transaction. Otherwise when we try to start
468 * another I2C transaction after issuing the DDC bus switch, it will be
469 * switched to the internal SDVO register.
468 */ 470 */
469static void intel_sdvo_set_control_bus_switch(struct intel_output *intel_output, 471static void intel_sdvo_set_control_bus_switch(struct intel_output *intel_output,
470 u8 target) 472 u8 target)
471{ 473{
472 intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_CONTROL_BUS_SWITCH, &target, 1); 474 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
475 u8 out_buf[2], cmd_buf[2], ret_value[2], ret;
476 struct i2c_msg msgs[] = {
477 {
478 .addr = sdvo_priv->slave_addr >> 1,
479 .flags = 0,
480 .len = 2,
481 .buf = out_buf,
482 },
483 /* the following two are to read the response */
484 {
485 .addr = sdvo_priv->slave_addr >> 1,
486 .flags = 0,
487 .len = 1,
488 .buf = cmd_buf,
489 },
490 {
491 .addr = sdvo_priv->slave_addr >> 1,
492 .flags = I2C_M_RD,
493 .len = 1,
494 .buf = ret_value,
495 },
496 };
497
498 intel_sdvo_debug_write(intel_output, SDVO_CMD_SET_CONTROL_BUS_SWITCH,
499 &target, 1);
500 /* write the DDC switch command argument */
501 intel_sdvo_write_byte(intel_output, SDVO_I2C_ARG_0, target);
502
503 out_buf[0] = SDVO_I2C_OPCODE;
504 out_buf[1] = SDVO_CMD_SET_CONTROL_BUS_SWITCH;
505 cmd_buf[0] = SDVO_I2C_CMD_STATUS;
506 cmd_buf[1] = 0;
507 ret_value[0] = 0;
508 ret_value[1] = 0;
509
510 ret = i2c_transfer(intel_output->i2c_bus, msgs, 3);
511 if (ret != 3) {
512 /* failure in I2C transfer */
513 DRM_DEBUG_KMS("I2c transfer returned %d\n", ret);
514 return;
515 }
516 if (ret_value[0] != SDVO_CMD_STATUS_SUCCESS) {
517 DRM_DEBUG_KMS("DDC switch command returns response %d\n",
518 ret_value[0]);
519 return;
520 }
521 return;
473} 522}
474 523
475static bool intel_sdvo_set_target_input(struct intel_output *intel_output, bool target_0, bool target_1) 524static bool intel_sdvo_set_target_input(struct intel_output *intel_output, bool target_0, bool target_1)
@@ -1579,6 +1628,32 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response)
1579 edid = drm_get_edid(&intel_output->base, 1628 edid = drm_get_edid(&intel_output->base,
1580 intel_output->ddc_bus); 1629 intel_output->ddc_bus);
1581 1630
1631 /* This is only applied to SDVO cards with multiple outputs */
1632 if (edid == NULL && intel_sdvo_multifunc_encoder(intel_output)) {
1633 uint8_t saved_ddc, temp_ddc;
1634 saved_ddc = sdvo_priv->ddc_bus;
1635 temp_ddc = sdvo_priv->ddc_bus >> 1;
1636 /*
1637 * Don't use the 1 as the argument of DDC bus switch to get
1638 * the EDID. It is used for SDVO SPD ROM.
1639 */
1640 while(temp_ddc > 1) {
1641 sdvo_priv->ddc_bus = temp_ddc;
1642 edid = drm_get_edid(&intel_output->base,
1643 intel_output->ddc_bus);
1644 if (edid) {
1645 /*
1646 * When we can get the EDID, maybe it is the
1647 * correct DDC bus. Update it.
1648 */
1649 sdvo_priv->ddc_bus = temp_ddc;
1650 break;
1651 }
1652 temp_ddc >>= 1;
1653 }
1654 if (edid == NULL)
1655 sdvo_priv->ddc_bus = saved_ddc;
1656 }
1582 /* when there is no edid and no monitor is connected with VGA 1657 /* when there is no edid and no monitor is connected with VGA
1583 * port, try to use the CRT ddc to read the EDID for DVI-connector 1658 * port, try to use the CRT ddc to read the EDID for DVI-connector
1584 */ 1659 */
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 1d5b9b7b033f..552ec110b741 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1840,8 +1840,6 @@ intel_tv_init(struct drm_device *dev)
1840 drm_connector_attach_property(connector, 1840 drm_connector_attach_property(connector,
1841 dev->mode_config.tv_bottom_margin_property, 1841 dev->mode_config.tv_bottom_margin_property,
1842 tv_priv->margin[TV_MARGIN_BOTTOM]); 1842 tv_priv->margin[TV_MARGIN_BOTTOM]);
1843
1844 dev_priv->hotplug_supported_mask |= TV_HOTPLUG_INT_STATUS;
1845out: 1843out:
1846 drm_sysfs_connector_add(connector); 1844 drm_sysfs_connector_add(connector);
1847} 1845}
diff --git a/drivers/serial/8250_pnp.c b/drivers/serial/8250_pnp.c
index 36ede02ceacf..b5496a19d967 100644
--- a/drivers/serial/8250_pnp.c
+++ b/drivers/serial/8250_pnp.c
@@ -354,6 +354,8 @@ static const struct pnp_device_id pnp_dev_table[] = {
354 { "FUJ02E5", 0 }, 354 { "FUJ02E5", 0 },
355 /* Fujitsu P-series tablet PC device */ 355 /* Fujitsu P-series tablet PC device */
356 { "FUJ02E6", 0 }, 356 { "FUJ02E6", 0 },
357 /* Fujitsu Wacom 2FGT Tablet PC device */
358 { "FUJ02E7", 0 },
357 /* 359 /*
358 * LG C1 EXPRESS DUAL (C1-PB11A3) touch screen (actually a FUJ02E6 in 360 * LG C1 EXPRESS DUAL (C1-PB11A3) touch screen (actually a FUJ02E6 in
359 * disguise) 361 * disguise)
diff --git a/drivers/video/s3c-fb.c b/drivers/video/s3c-fb.c
index adf9632c6b1f..53cb722c45a0 100644
--- a/drivers/video/s3c-fb.c
+++ b/drivers/video/s3c-fb.c
@@ -211,21 +211,23 @@ static int s3c_fb_check_var(struct fb_var_screeninfo *var,
211 211
212/** 212/**
213 * s3c_fb_calc_pixclk() - calculate the divider to create the pixel clock. 213 * s3c_fb_calc_pixclk() - calculate the divider to create the pixel clock.
214 * @id: window id.
215 * @sfb: The hardware state. 214 * @sfb: The hardware state.
216 * @pixclock: The pixel clock wanted, in picoseconds. 215 * @pixclock: The pixel clock wanted, in picoseconds.
217 * 216 *
218 * Given the specified pixel clock, work out the necessary divider to get 217 * Given the specified pixel clock, work out the necessary divider to get
219 * close to the output frequency. 218 * close to the output frequency.
220 */ 219 */
221static int s3c_fb_calc_pixclk(unsigned char id, struct s3c_fb *sfb, unsigned int pixclk) 220static int s3c_fb_calc_pixclk(struct s3c_fb *sfb, unsigned int pixclk)
222{ 221{
223 struct s3c_fb_pd_win *win = sfb->pdata->win[id];
224 unsigned long clk = clk_get_rate(sfb->bus_clk); 222 unsigned long clk = clk_get_rate(sfb->bus_clk);
223 unsigned long long tmp;
225 unsigned int result; 224 unsigned int result;
226 225
227 pixclk *= win->win_mode.refresh; 226 tmp = (unsigned long long)clk;
228 result = clk / pixclk; 227 tmp *= pixclk;
228
229 do_div(tmp, 1000000000UL);
230 result = (unsigned int)tmp / 1000;
229 231
230 dev_dbg(sfb->dev, "pixclk=%u, clk=%lu, div=%d (%lu)\n", 232 dev_dbg(sfb->dev, "pixclk=%u, clk=%lu, div=%d (%lu)\n",
231 pixclk, clk, result, clk / result); 233 pixclk, clk, result, clk / result);
@@ -301,7 +303,7 @@ static int s3c_fb_set_par(struct fb_info *info)
301 /* use window 0 as the basis for the lcd output timings */ 303 /* use window 0 as the basis for the lcd output timings */
302 304
303 if (win_no == 0) { 305 if (win_no == 0) {
304 clkdiv = s3c_fb_calc_pixclk(win_no, sfb, var->pixclock); 306 clkdiv = s3c_fb_calc_pixclk(sfb, var->pixclock);
305 307
306 data = sfb->pdata->vidcon0; 308 data = sfb->pdata->vidcon0;
307 data &= ~(VIDCON0_CLKVAL_F_MASK | VIDCON0_CLKDIR); 309 data &= ~(VIDCON0_CLKVAL_F_MASK | VIDCON0_CLKDIR);
diff --git a/drivers/video/via/accel.c b/drivers/video/via/accel.c
index 9d4f3a49ba4a..d5077dfa9e00 100644
--- a/drivers/video/via/accel.c
+++ b/drivers/video/via/accel.c
@@ -137,7 +137,7 @@ static int hw_bitblt_1(void __iomem *engine, u8 op, u32 width, u32 height,
137 tmp, dst_pitch); 137 tmp, dst_pitch);
138 return -EINVAL; 138 return -EINVAL;
139 } 139 }
140 tmp = (tmp >> 3) | (dst_pitch << (16 - 3)); 140 tmp = VIA_PITCH_ENABLE | (tmp >> 3) | (dst_pitch << (16 - 3));
141 writel(tmp, engine + 0x38); 141 writel(tmp, engine + 0x38);
142 142
143 if (op == VIA_BITBLT_FILL) 143 if (op == VIA_BITBLT_FILL)
@@ -352,6 +352,9 @@ int viafb_init_engine(struct fb_info *info)
352 viapar->shared->vq_vram_addr = viapar->fbmem_free; 352 viapar->shared->vq_vram_addr = viapar->fbmem_free;
353 viapar->fbmem_used += VQ_SIZE; 353 viapar->fbmem_used += VQ_SIZE;
354 354
355 /* Init 2D engine reg to reset 2D engine */
356 writel(0x0, engine + VIA_REG_KEYCONTROL);
357
355 /* Init AGP and VQ regs */ 358 /* Init AGP and VQ regs */
356 switch (chip_name) { 359 switch (chip_name) {
357 case UNICHROME_K8M890: 360 case UNICHROME_K8M890:
diff --git a/drivers/video/via/viafbdev.c b/drivers/video/via/viafbdev.c
index d8df17a7d5fc..3028e7ddc3b5 100644
--- a/drivers/video/via/viafbdev.c
+++ b/drivers/video/via/viafbdev.c
@@ -177,16 +177,15 @@ static int viafb_set_par(struct fb_info *info)
177 } 177 }
178 178
179 if (vmode_index != VIA_RES_INVALID) { 179 if (vmode_index != VIA_RES_INVALID) {
180 viafb_setmode(vmode_index, info->var.xres, info->var.yres,
181 info->var.bits_per_pixel, vmode_index1,
182 viafb_second_xres, viafb_second_yres, viafb_bpp1);
183
184 viafb_update_fix(info); 180 viafb_update_fix(info);
185 viafb_bpp = info->var.bits_per_pixel; 181 viafb_bpp = info->var.bits_per_pixel;
186 if (info->var.accel_flags & FB_ACCELF_TEXT) 182 if (info->var.accel_flags & FB_ACCELF_TEXT)
187 info->flags &= ~FBINFO_HWACCEL_DISABLED; 183 info->flags &= ~FBINFO_HWACCEL_DISABLED;
188 else 184 else
189 info->flags |= FBINFO_HWACCEL_DISABLED; 185 info->flags |= FBINFO_HWACCEL_DISABLED;
186 viafb_setmode(vmode_index, info->var.xres, info->var.yres,
187 info->var.bits_per_pixel, vmode_index1,
188 viafb_second_xres, viafb_second_yres, viafb_bpp1);
190 } 189 }
191 190
192 return 0; 191 return 0;
@@ -872,7 +871,9 @@ static int viafb_cursor(struct fb_info *info, struct fb_cursor *cursor)
872 if (info->flags & FBINFO_HWACCEL_DISABLED || info != viafbinfo) 871 if (info->flags & FBINFO_HWACCEL_DISABLED || info != viafbinfo)
873 return -ENODEV; 872 return -ENODEV;
874 873
875 if (chip_name == UNICHROME_CLE266 && viapar->iga_path == IGA2) 874 /* LCD ouput does not support hw cursors (at least on VN896) */
875 if ((chip_name == UNICHROME_CLE266 && viapar->iga_path == IGA2) ||
876 viafb_LCD_ON)
876 return -ENODEV; 877 return -ENODEV;
877 878
878 viafb_show_hw_cursor(info, HW_Cursor_OFF); 879 viafb_show_hw_cursor(info, HW_Cursor_OFF);
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 9dd588042880..505be88c82ae 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -266,7 +266,7 @@ static void __devexit virtballoon_remove(struct virtio_device *vdev)
266 266
267static unsigned int features[] = { VIRTIO_BALLOON_F_MUST_TELL_HOST }; 267static unsigned int features[] = { VIRTIO_BALLOON_F_MUST_TELL_HOST };
268 268
269static struct virtio_driver virtio_balloon = { 269static struct virtio_driver virtio_balloon_driver = {
270 .feature_table = features, 270 .feature_table = features,
271 .feature_table_size = ARRAY_SIZE(features), 271 .feature_table_size = ARRAY_SIZE(features),
272 .driver.name = KBUILD_MODNAME, 272 .driver.name = KBUILD_MODNAME,
@@ -279,12 +279,12 @@ static struct virtio_driver virtio_balloon = {
279 279
280static int __init init(void) 280static int __init init(void)
281{ 281{
282 return register_virtio_driver(&virtio_balloon); 282 return register_virtio_driver(&virtio_balloon_driver);
283} 283}
284 284
285static void __exit fini(void) 285static void __exit fini(void)
286{ 286{
287 unregister_virtio_driver(&virtio_balloon); 287 unregister_virtio_driver(&virtio_balloon_driver);
288} 288}
289module_init(init); 289module_init(init);
290module_exit(fini); 290module_exit(fini);
diff --git a/fs/ramfs/file-nommu.c b/fs/ramfs/file-nommu.c
index 2efc57173fd7..1739a4aba25f 100644
--- a/fs/ramfs/file-nommu.c
+++ b/fs/ramfs/file-nommu.c
@@ -123,30 +123,6 @@ add_error:
123 123
124/*****************************************************************************/ 124/*****************************************************************************/
125/* 125/*
126 * check that file shrinkage doesn't leave any VMAs dangling in midair
127 */
128static int ramfs_nommu_check_mappings(struct inode *inode,
129 size_t newsize, size_t size)
130{
131 struct vm_area_struct *vma;
132 struct prio_tree_iter iter;
133
134 /* search for VMAs that fall within the dead zone */
135 vma_prio_tree_foreach(vma, &iter, &inode->i_mapping->i_mmap,
136 newsize >> PAGE_SHIFT,
137 (size + PAGE_SIZE - 1) >> PAGE_SHIFT
138 ) {
139 /* found one - only interested if it's shared out of the page
140 * cache */
141 if (vma->vm_flags & VM_SHARED)
142 return -ETXTBSY; /* not quite true, but near enough */
143 }
144
145 return 0;
146}
147
148/*****************************************************************************/
149/*
150 * 126 *
151 */ 127 */
152static int ramfs_nommu_resize(struct inode *inode, loff_t newsize, loff_t size) 128static int ramfs_nommu_resize(struct inode *inode, loff_t newsize, loff_t size)
@@ -164,7 +140,7 @@ static int ramfs_nommu_resize(struct inode *inode, loff_t newsize, loff_t size)
164 140
165 /* check that a decrease in size doesn't cut off any shared mappings */ 141 /* check that a decrease in size doesn't cut off any shared mappings */
166 if (newsize < size) { 142 if (newsize < size) {
167 ret = ramfs_nommu_check_mappings(inode, newsize, size); 143 ret = nommu_shrink_inode_mappings(inode, size, newsize);
168 if (ret < 0) 144 if (ret < 0)
169 return ret; 145 return ret;
170 } 146 }
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 3fc9f5aab5f8..328bca609b9b 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -734,6 +734,10 @@ struct sysinfo {
734/* Force a compilation error if condition is constant and true */ 734/* Force a compilation error if condition is constant and true */
735#define MAYBE_BUILD_BUG_ON(cond) ((void)sizeof(char[1 - 2 * !!(cond)])) 735#define MAYBE_BUILD_BUG_ON(cond) ((void)sizeof(char[1 - 2 * !!(cond)]))
736 736
737/* Force a compilation error if a constant expression is not a power of 2 */
738#define BUILD_BUG_ON_NOT_POWER_OF_2(n) \
739 BUILD_BUG_ON((n) == 0 || (((n) & ((n) - 1)) != 0))
740
737/* Force a compilation error if condition is true, but also produce a 741/* Force a compilation error if condition is true, but also produce a
738 result (of value 0 and type size_t), so the expression can be used 742 result (of value 0 and type size_t), so the expression can be used
739 e.g. in a structure initializer (or where-ever else comma expressions 743 e.g. in a structure initializer (or where-ever else comma expressions
diff --git a/include/linux/kfifo.h b/include/linux/kfifo.h
index 7c6b32a1421c..6f6c5f300af6 100644
--- a/include/linux/kfifo.h
+++ b/include/linux/kfifo.h
@@ -67,7 +67,7 @@ struct kfifo {
67/** 67/**
68 * DECLARE_KFIFO - macro to declare a kfifo and the associated buffer 68 * DECLARE_KFIFO - macro to declare a kfifo and the associated buffer
69 * @name: name of the declared kfifo datatype 69 * @name: name of the declared kfifo datatype
70 * @size: size of the fifo buffer 70 * @size: size of the fifo buffer. Must be a power of two.
71 * 71 *
72 * Note1: the macro can be used inside struct or union declaration 72 * Note1: the macro can be used inside struct or union declaration
73 * Note2: the macro creates two objects: 73 * Note2: the macro creates two objects:
@@ -91,7 +91,7 @@ union { \
91/** 91/**
92 * DEFINE_KFIFO - macro to define and initialize a kfifo 92 * DEFINE_KFIFO - macro to define and initialize a kfifo
93 * @name: name of the declared kfifo datatype 93 * @name: name of the declared kfifo datatype
94 * @size: size of the fifo buffer 94 * @size: size of the fifo buffer. Must be a power of two.
95 * 95 *
96 * Note1: the macro can be used for global and local kfifo data type variables 96 * Note1: the macro can be used for global and local kfifo data type variables
97 * Note2: the macro creates two objects: 97 * Note2: the macro creates two objects:
@@ -104,15 +104,28 @@ union { \
104 104
105#undef __kfifo_initializer 105#undef __kfifo_initializer
106 106
107extern void kfifo_init(struct kfifo *fifo, unsigned char *buffer, 107extern void kfifo_init(struct kfifo *fifo, void *buffer,
108 unsigned int size); 108 unsigned int size);
109extern __must_check int kfifo_alloc(struct kfifo *fifo, unsigned int size, 109extern __must_check int kfifo_alloc(struct kfifo *fifo, unsigned int size,
110 gfp_t gfp_mask); 110 gfp_t gfp_mask);
111extern void kfifo_free(struct kfifo *fifo); 111extern void kfifo_free(struct kfifo *fifo);
112extern unsigned int kfifo_in(struct kfifo *fifo, 112extern unsigned int kfifo_in(struct kfifo *fifo,
113 const unsigned char *from, unsigned int len); 113 const void *from, unsigned int len);
114extern __must_check unsigned int kfifo_out(struct kfifo *fifo, 114extern __must_check unsigned int kfifo_out(struct kfifo *fifo,
115 unsigned char *to, unsigned int len); 115 void *to, unsigned int len);
116extern __must_check unsigned int kfifo_out_peek(struct kfifo *fifo,
117 void *to, unsigned int len, unsigned offset);
118
119/**
120 * kfifo_initialized - Check if kfifo is initialized.
121 * @fifo: fifo to check
122 * Return %true if FIFO is initialized, otherwise %false.
123 * Assumes the fifo was 0 before.
124 */
125static inline bool kfifo_initialized(struct kfifo *fifo)
126{
127 return fifo->buffer != 0;
128}
116 129
117/** 130/**
118 * kfifo_reset - removes the entire FIFO contents 131 * kfifo_reset - removes the entire FIFO contents
@@ -194,7 +207,7 @@ static inline __must_check unsigned int kfifo_avail(struct kfifo *fifo)
194 * bytes copied. 207 * bytes copied.
195 */ 208 */
196static inline unsigned int kfifo_in_locked(struct kfifo *fifo, 209static inline unsigned int kfifo_in_locked(struct kfifo *fifo,
197 const unsigned char *from, unsigned int n, spinlock_t *lock) 210 const void *from, unsigned int n, spinlock_t *lock)
198{ 211{
199 unsigned long flags; 212 unsigned long flags;
200 unsigned int ret; 213 unsigned int ret;
@@ -219,7 +232,7 @@ static inline unsigned int kfifo_in_locked(struct kfifo *fifo,
219 * @to buffer and returns the number of copied bytes. 232 * @to buffer and returns the number of copied bytes.
220 */ 233 */
221static inline __must_check unsigned int kfifo_out_locked(struct kfifo *fifo, 234static inline __must_check unsigned int kfifo_out_locked(struct kfifo *fifo,
222 unsigned char *to, unsigned int n, spinlock_t *lock) 235 void *to, unsigned int n, spinlock_t *lock)
223{ 236{
224 unsigned long flags; 237 unsigned long flags;
225 unsigned int ret; 238 unsigned int ret;
@@ -228,13 +241,6 @@ static inline __must_check unsigned int kfifo_out_locked(struct kfifo *fifo,
228 241
229 ret = kfifo_out(fifo, to, n); 242 ret = kfifo_out(fifo, to, n);
230 243
231 /*
232 * optimization: if the FIFO is empty, set the indices to 0
233 * so we don't wrap the next time
234 */
235 if (kfifo_is_empty(fifo))
236 kfifo_reset(fifo);
237
238 spin_unlock_irqrestore(lock, flags); 244 spin_unlock_irqrestore(lock, flags);
239 245
240 return ret; 246 return ret;
@@ -242,11 +248,11 @@ static inline __must_check unsigned int kfifo_out_locked(struct kfifo *fifo,
242 248
243extern void kfifo_skip(struct kfifo *fifo, unsigned int len); 249extern void kfifo_skip(struct kfifo *fifo, unsigned int len);
244 250
245extern __must_check unsigned int kfifo_from_user(struct kfifo *fifo, 251extern __must_check int kfifo_from_user(struct kfifo *fifo,
246 const void __user *from, unsigned int n); 252 const void __user *from, unsigned int n, unsigned *lenout);
247 253
248extern __must_check unsigned int kfifo_to_user(struct kfifo *fifo, 254extern __must_check int kfifo_to_user(struct kfifo *fifo,
249 void __user *to, unsigned int n); 255 void __user *to, unsigned int n, unsigned *lenout);
250 256
251/* 257/*
252 * __kfifo_add_out internal helper function for updating the out offset 258 * __kfifo_add_out internal helper function for updating the out offset
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 2265f28eb47a..60c467bfbabd 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1089,6 +1089,7 @@ extern void zone_pcp_update(struct zone *zone);
1089 1089
1090/* nommu.c */ 1090/* nommu.c */
1091extern atomic_long_t mmap_pages_allocated; 1091extern atomic_long_t mmap_pages_allocated;
1092extern int nommu_shrink_inode_mappings(struct inode *, size_t, size_t);
1092 1093
1093/* prio_tree.c */ 1094/* prio_tree.c */
1094void vma_prio_tree_add(struct vm_area_struct *, struct vm_area_struct *old); 1095void vma_prio_tree_add(struct vm_area_struct *, struct vm_area_struct *old);
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 84d020bed083..36f96271306c 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -122,7 +122,7 @@ struct vm_region {
122 unsigned long vm_pgoff; /* the offset in vm_file corresponding to vm_start */ 122 unsigned long vm_pgoff; /* the offset in vm_file corresponding to vm_start */
123 struct file *vm_file; /* the backing file or NULL */ 123 struct file *vm_file; /* the backing file or NULL */
124 124
125 atomic_t vm_usage; /* region usage count */ 125 int vm_usage; /* region usage count (access under nommu_region_sem) */
126 bool vm_icache_flushed : 1; /* true if the icache has been flushed for 126 bool vm_icache_flushed : 1; /* true if the icache has been flushed for
127 * this region */ 127 * this region */
128}; 128};
@@ -205,10 +205,12 @@ struct mm_struct {
205 struct vm_area_struct * mmap; /* list of VMAs */ 205 struct vm_area_struct * mmap; /* list of VMAs */
206 struct rb_root mm_rb; 206 struct rb_root mm_rb;
207 struct vm_area_struct * mmap_cache; /* last find_vma result */ 207 struct vm_area_struct * mmap_cache; /* last find_vma result */
208#ifdef CONFIG_MMU
208 unsigned long (*get_unmapped_area) (struct file *filp, 209 unsigned long (*get_unmapped_area) (struct file *filp,
209 unsigned long addr, unsigned long len, 210 unsigned long addr, unsigned long len,
210 unsigned long pgoff, unsigned long flags); 211 unsigned long pgoff, unsigned long flags);
211 void (*unmap_area) (struct mm_struct *mm, unsigned long addr); 212 void (*unmap_area) (struct mm_struct *mm, unsigned long addr);
213#endif
212 unsigned long mmap_base; /* base of mmap area */ 214 unsigned long mmap_base; /* base of mmap area */
213 unsigned long task_size; /* size of task vm space */ 215 unsigned long task_size; /* size of task vm space */
214 unsigned long cached_hole_size; /* if non-zero, the largest hole below free_area_cache */ 216 unsigned long cached_hole_size; /* if non-zero, the largest hole below free_area_cache */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 8d4991be9d53..6f7bba93929b 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -377,6 +377,8 @@ extern int sysctl_max_map_count;
377 377
378#include <linux/aio.h> 378#include <linux/aio.h>
379 379
380#ifdef CONFIG_MMU
381extern void arch_pick_mmap_layout(struct mm_struct *mm);
380extern unsigned long 382extern unsigned long
381arch_get_unmapped_area(struct file *, unsigned long, unsigned long, 383arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
382 unsigned long, unsigned long); 384 unsigned long, unsigned long);
@@ -386,6 +388,9 @@ arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
386 unsigned long flags); 388 unsigned long flags);
387extern void arch_unmap_area(struct mm_struct *, unsigned long); 389extern void arch_unmap_area(struct mm_struct *, unsigned long);
388extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long); 390extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long);
391#else
392static inline void arch_pick_mmap_layout(struct mm_struct *mm) {}
393#endif
389 394
390#if USE_SPLIT_PTLOCKS 395#if USE_SPLIT_PTLOCKS
391/* 396/*
@@ -2491,8 +2496,6 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
2491 2496
2492#endif /* CONFIG_SMP */ 2497#endif /* CONFIG_SMP */
2493 2498
2494extern void arch_pick_mmap_layout(struct mm_struct *mm);
2495
2496#ifdef CONFIG_TRACING 2499#ifdef CONFIG_TRACING
2497extern void 2500extern void
2498__trace_special(void *__tr, void *__data, 2501__trace_special(void *__tr, void *__data,
diff --git a/include/linux/string.h b/include/linux/string.h
index 651839a2a755..a716ee2a8adb 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -72,7 +72,10 @@ static inline __must_check char *strstrip(char *str)
72} 72}
73 73
74#ifndef __HAVE_ARCH_STRSTR 74#ifndef __HAVE_ARCH_STRSTR
75extern char * strstr(const char *,const char *); 75extern char * strstr(const char *, const char *);
76#endif
77#ifndef __HAVE_ARCH_STRNSTR
78extern char * strnstr(const char *, const char *, size_t);
76#endif 79#endif
77#ifndef __HAVE_ARCH_STRLEN 80#ifndef __HAVE_ARCH_STRLEN
78extern __kernel_size_t strlen(const char *); 81extern __kernel_size_t strlen(const char *);
diff --git a/include/linux/tty.h b/include/linux/tty.h
index ef3a2947b102..6abfcf5b5887 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -464,7 +464,7 @@ extern int tty_port_alloc_xmit_buf(struct tty_port *port);
464extern void tty_port_free_xmit_buf(struct tty_port *port); 464extern void tty_port_free_xmit_buf(struct tty_port *port);
465extern void tty_port_put(struct tty_port *port); 465extern void tty_port_put(struct tty_port *port);
466 466
467extern inline struct tty_port *tty_port_get(struct tty_port *port) 467static inline struct tty_port *tty_port_get(struct tty_port *port)
468{ 468{
469 if (port) 469 if (port)
470 kref_get(&port->kref); 470 kref_get(&port->kref);
@@ -486,7 +486,7 @@ extern void tty_port_close(struct tty_port *port,
486 struct tty_struct *tty, struct file *filp); 486 struct tty_struct *tty, struct file *filp);
487extern int tty_port_open(struct tty_port *port, 487extern int tty_port_open(struct tty_port *port,
488 struct tty_struct *tty, struct file *filp); 488 struct tty_struct *tty, struct file *filp);
489extern inline int tty_port_users(struct tty_port *port) 489static inline int tty_port_users(struct tty_port *port)
490{ 490{
491 return port->count + port->blocked_open; 491 return port->count + port->blocked_open;
492} 492}
diff --git a/ipc/shm.c b/ipc/shm.c
index 92fe9236258b..23256b855819 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -298,6 +298,9 @@ static const struct file_operations shm_file_operations = {
298 .mmap = shm_mmap, 298 .mmap = shm_mmap,
299 .fsync = shm_fsync, 299 .fsync = shm_fsync,
300 .release = shm_release, 300 .release = shm_release,
301#ifndef CONFIG_MMU
302 .get_unmapped_area = shm_get_unmapped_area,
303#endif
301}; 304};
302 305
303static const struct file_operations shm_file_operations_huge = { 306static const struct file_operations shm_file_operations_huge = {
diff --git a/kernel/futex.c b/kernel/futex.c
index 8e3c3ffe1b9a..d9b3a2228f9d 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -203,8 +203,6 @@ static void drop_futex_key_refs(union futex_key *key)
203 * @uaddr: virtual address of the futex 203 * @uaddr: virtual address of the futex
204 * @fshared: 0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED 204 * @fshared: 0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED
205 * @key: address where result is stored. 205 * @key: address where result is stored.
206 * @rw: mapping needs to be read/write (values: VERIFY_READ,
207 * VERIFY_WRITE)
208 * 206 *
209 * Returns a negative error code or 0 207 * Returns a negative error code or 0
210 * The key words are stored in *key on success. 208 * The key words are stored in *key on success.
@@ -216,7 +214,7 @@ static void drop_futex_key_refs(union futex_key *key)
216 * lock_page() might sleep, the caller should not hold a spinlock. 214 * lock_page() might sleep, the caller should not hold a spinlock.
217 */ 215 */
218static int 216static int
219get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw) 217get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key)
220{ 218{
221 unsigned long address = (unsigned long)uaddr; 219 unsigned long address = (unsigned long)uaddr;
222 struct mm_struct *mm = current->mm; 220 struct mm_struct *mm = current->mm;
@@ -239,7 +237,7 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
239 * but access_ok() should be faster than find_vma() 237 * but access_ok() should be faster than find_vma()
240 */ 238 */
241 if (!fshared) { 239 if (!fshared) {
242 if (unlikely(!access_ok(rw, uaddr, sizeof(u32)))) 240 if (unlikely(!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))))
243 return -EFAULT; 241 return -EFAULT;
244 key->private.mm = mm; 242 key->private.mm = mm;
245 key->private.address = address; 243 key->private.address = address;
@@ -248,7 +246,7 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
248 } 246 }
249 247
250again: 248again:
251 err = get_user_pages_fast(address, 1, rw == VERIFY_WRITE, &page); 249 err = get_user_pages_fast(address, 1, 1, &page);
252 if (err < 0) 250 if (err < 0)
253 return err; 251 return err;
254 252
@@ -867,7 +865,7 @@ static int futex_wake(u32 __user *uaddr, int fshared, int nr_wake, u32 bitset)
867 if (!bitset) 865 if (!bitset)
868 return -EINVAL; 866 return -EINVAL;
869 867
870 ret = get_futex_key(uaddr, fshared, &key, VERIFY_READ); 868 ret = get_futex_key(uaddr, fshared, &key);
871 if (unlikely(ret != 0)) 869 if (unlikely(ret != 0))
872 goto out; 870 goto out;
873 871
@@ -913,10 +911,10 @@ futex_wake_op(u32 __user *uaddr1, int fshared, u32 __user *uaddr2,
913 int ret, op_ret; 911 int ret, op_ret;
914 912
915retry: 913retry:
916 ret = get_futex_key(uaddr1, fshared, &key1, VERIFY_READ); 914 ret = get_futex_key(uaddr1, fshared, &key1);
917 if (unlikely(ret != 0)) 915 if (unlikely(ret != 0))
918 goto out; 916 goto out;
919 ret = get_futex_key(uaddr2, fshared, &key2, VERIFY_WRITE); 917 ret = get_futex_key(uaddr2, fshared, &key2);
920 if (unlikely(ret != 0)) 918 if (unlikely(ret != 0))
921 goto out_put_key1; 919 goto out_put_key1;
922 920
@@ -1175,11 +1173,10 @@ retry:
1175 pi_state = NULL; 1173 pi_state = NULL;
1176 } 1174 }
1177 1175
1178 ret = get_futex_key(uaddr1, fshared, &key1, VERIFY_READ); 1176 ret = get_futex_key(uaddr1, fshared, &key1);
1179 if (unlikely(ret != 0)) 1177 if (unlikely(ret != 0))
1180 goto out; 1178 goto out;
1181 ret = get_futex_key(uaddr2, fshared, &key2, 1179 ret = get_futex_key(uaddr2, fshared, &key2);
1182 requeue_pi ? VERIFY_WRITE : VERIFY_READ);
1183 if (unlikely(ret != 0)) 1180 if (unlikely(ret != 0))
1184 goto out_put_key1; 1181 goto out_put_key1;
1185 1182
@@ -1738,7 +1735,7 @@ static int futex_wait_setup(u32 __user *uaddr, u32 val, int fshared,
1738 */ 1735 */
1739retry: 1736retry:
1740 q->key = FUTEX_KEY_INIT; 1737 q->key = FUTEX_KEY_INIT;
1741 ret = get_futex_key(uaddr, fshared, &q->key, VERIFY_READ); 1738 ret = get_futex_key(uaddr, fshared, &q->key);
1742 if (unlikely(ret != 0)) 1739 if (unlikely(ret != 0))
1743 return ret; 1740 return ret;
1744 1741
@@ -1904,7 +1901,7 @@ static int futex_lock_pi(u32 __user *uaddr, int fshared,
1904 q.requeue_pi_key = NULL; 1901 q.requeue_pi_key = NULL;
1905retry: 1902retry:
1906 q.key = FUTEX_KEY_INIT; 1903 q.key = FUTEX_KEY_INIT;
1907 ret = get_futex_key(uaddr, fshared, &q.key, VERIFY_WRITE); 1904 ret = get_futex_key(uaddr, fshared, &q.key);
1908 if (unlikely(ret != 0)) 1905 if (unlikely(ret != 0))
1909 goto out; 1906 goto out;
1910 1907
@@ -2023,7 +2020,7 @@ retry:
2023 if ((uval & FUTEX_TID_MASK) != task_pid_vnr(current)) 2020 if ((uval & FUTEX_TID_MASK) != task_pid_vnr(current))
2024 return -EPERM; 2021 return -EPERM;
2025 2022
2026 ret = get_futex_key(uaddr, fshared, &key, VERIFY_WRITE); 2023 ret = get_futex_key(uaddr, fshared, &key);
2027 if (unlikely(ret != 0)) 2024 if (unlikely(ret != 0))
2028 goto out; 2025 goto out;
2029 2026
@@ -2215,7 +2212,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, int fshared,
2215 rt_waiter.task = NULL; 2212 rt_waiter.task = NULL;
2216 2213
2217 key2 = FUTEX_KEY_INIT; 2214 key2 = FUTEX_KEY_INIT;
2218 ret = get_futex_key(uaddr2, fshared, &key2, VERIFY_WRITE); 2215 ret = get_futex_key(uaddr2, fshared, &key2);
2219 if (unlikely(ret != 0)) 2216 if (unlikely(ret != 0))
2220 goto out; 2217 goto out;
2221 2218
diff --git a/kernel/kfifo.c b/kernel/kfifo.c
index e92d519f93b1..32c5c15d750d 100644
--- a/kernel/kfifo.c
+++ b/kernel/kfifo.c
@@ -28,7 +28,7 @@
28#include <linux/log2.h> 28#include <linux/log2.h>
29#include <linux/uaccess.h> 29#include <linux/uaccess.h>
30 30
31static void _kfifo_init(struct kfifo *fifo, unsigned char *buffer, 31static void _kfifo_init(struct kfifo *fifo, void *buffer,
32 unsigned int size) 32 unsigned int size)
33{ 33{
34 fifo->buffer = buffer; 34 fifo->buffer = buffer;
@@ -41,10 +41,10 @@ static void _kfifo_init(struct kfifo *fifo, unsigned char *buffer,
41 * kfifo_init - initialize a FIFO using a preallocated buffer 41 * kfifo_init - initialize a FIFO using a preallocated buffer
42 * @fifo: the fifo to assign the buffer 42 * @fifo: the fifo to assign the buffer
43 * @buffer: the preallocated buffer to be used. 43 * @buffer: the preallocated buffer to be used.
44 * @size: the size of the internal buffer, this have to be a power of 2. 44 * @size: the size of the internal buffer, this has to be a power of 2.
45 * 45 *
46 */ 46 */
47void kfifo_init(struct kfifo *fifo, unsigned char *buffer, unsigned int size) 47void kfifo_init(struct kfifo *fifo, void *buffer, unsigned int size)
48{ 48{
49 /* size must be a power of 2 */ 49 /* size must be a power of 2 */
50 BUG_ON(!is_power_of_2(size)); 50 BUG_ON(!is_power_of_2(size));
@@ -159,8 +159,9 @@ static inline void __kfifo_out_data(struct kfifo *fifo,
159 memcpy(to + l, fifo->buffer, len - l); 159 memcpy(to + l, fifo->buffer, len - l);
160} 160}
161 161
162static inline unsigned int __kfifo_from_user_data(struct kfifo *fifo, 162static inline int __kfifo_from_user_data(struct kfifo *fifo,
163 const void __user *from, unsigned int len, unsigned int off) 163 const void __user *from, unsigned int len, unsigned int off,
164 unsigned *lenout)
164{ 165{
165 unsigned int l; 166 unsigned int l;
166 int ret; 167 int ret;
@@ -177,16 +178,20 @@ static inline unsigned int __kfifo_from_user_data(struct kfifo *fifo,
177 /* first put the data starting from fifo->in to buffer end */ 178 /* first put the data starting from fifo->in to buffer end */
178 l = min(len, fifo->size - off); 179 l = min(len, fifo->size - off);
179 ret = copy_from_user(fifo->buffer + off, from, l); 180 ret = copy_from_user(fifo->buffer + off, from, l);
180 181 if (unlikely(ret)) {
181 if (unlikely(ret)) 182 *lenout = ret;
182 return ret + len - l; 183 return -EFAULT;
184 }
185 *lenout = l;
183 186
184 /* then put the rest (if any) at the beginning of the buffer */ 187 /* then put the rest (if any) at the beginning of the buffer */
185 return copy_from_user(fifo->buffer, from + l, len - l); 188 ret = copy_from_user(fifo->buffer, from + l, len - l);
189 *lenout += ret ? ret : len - l;
190 return ret ? -EFAULT : 0;
186} 191}
187 192
188static inline unsigned int __kfifo_to_user_data(struct kfifo *fifo, 193static inline int __kfifo_to_user_data(struct kfifo *fifo,
189 void __user *to, unsigned int len, unsigned int off) 194 void __user *to, unsigned int len, unsigned int off, unsigned *lenout)
190{ 195{
191 unsigned int l; 196 unsigned int l;
192 int ret; 197 int ret;
@@ -203,12 +208,21 @@ static inline unsigned int __kfifo_to_user_data(struct kfifo *fifo,
203 /* first get the data from fifo->out until the end of the buffer */ 208 /* first get the data from fifo->out until the end of the buffer */
204 l = min(len, fifo->size - off); 209 l = min(len, fifo->size - off);
205 ret = copy_to_user(to, fifo->buffer + off, l); 210 ret = copy_to_user(to, fifo->buffer + off, l);
206 211 *lenout = l;
207 if (unlikely(ret)) 212 if (unlikely(ret)) {
208 return ret + len - l; 213 *lenout -= ret;
214 return -EFAULT;
215 }
209 216
210 /* then get the rest (if any) from the beginning of the buffer */ 217 /* then get the rest (if any) from the beginning of the buffer */
211 return copy_to_user(to + l, fifo->buffer, len - l); 218 len -= l;
219 ret = copy_to_user(to + l, fifo->buffer, len);
220 if (unlikely(ret)) {
221 *lenout += len - ret;
222 return -EFAULT;
223 }
224 *lenout += len;
225 return 0;
212} 226}
213 227
214unsigned int __kfifo_in_n(struct kfifo *fifo, 228unsigned int __kfifo_in_n(struct kfifo *fifo,
@@ -235,7 +249,7 @@ EXPORT_SYMBOL(__kfifo_in_n);
235 * Note that with only one concurrent reader and one concurrent 249 * Note that with only one concurrent reader and one concurrent
236 * writer, you don't need extra locking to use these functions. 250 * writer, you don't need extra locking to use these functions.
237 */ 251 */
238unsigned int kfifo_in(struct kfifo *fifo, const unsigned char *from, 252unsigned int kfifo_in(struct kfifo *fifo, const void *from,
239 unsigned int len) 253 unsigned int len)
240{ 254{
241 len = min(kfifo_avail(fifo), len); 255 len = min(kfifo_avail(fifo), len);
@@ -277,7 +291,7 @@ EXPORT_SYMBOL(__kfifo_out_n);
277 * Note that with only one concurrent reader and one concurrent 291 * Note that with only one concurrent reader and one concurrent
278 * writer, you don't need extra locking to use these functions. 292 * writer, you don't need extra locking to use these functions.
279 */ 293 */
280unsigned int kfifo_out(struct kfifo *fifo, unsigned char *to, unsigned int len) 294unsigned int kfifo_out(struct kfifo *fifo, void *to, unsigned int len)
281{ 295{
282 len = min(kfifo_len(fifo), len); 296 len = min(kfifo_len(fifo), len);
283 297
@@ -288,6 +302,27 @@ unsigned int kfifo_out(struct kfifo *fifo, unsigned char *to, unsigned int len)
288} 302}
289EXPORT_SYMBOL(kfifo_out); 303EXPORT_SYMBOL(kfifo_out);
290 304
305/**
306 * kfifo_out_peek - copy some data from the FIFO, but do not remove it
307 * @fifo: the fifo to be used.
308 * @to: where the data must be copied.
309 * @len: the size of the destination buffer.
310 * @offset: offset into the fifo
311 *
312 * This function copies at most @len bytes at @offset from the FIFO
313 * into the @to buffer and returns the number of copied bytes.
314 * The data is not removed from the FIFO.
315 */
316unsigned int kfifo_out_peek(struct kfifo *fifo, void *to, unsigned int len,
317 unsigned offset)
318{
319 len = min(kfifo_len(fifo), len + offset);
320
321 __kfifo_out_data(fifo, to, len, offset);
322 return len;
323}
324EXPORT_SYMBOL(kfifo_out_peek);
325
291unsigned int __kfifo_out_generic(struct kfifo *fifo, 326unsigned int __kfifo_out_generic(struct kfifo *fifo,
292 void *to, unsigned int len, unsigned int recsize, 327 void *to, unsigned int len, unsigned int recsize,
293 unsigned int *total) 328 unsigned int *total)
@@ -299,10 +334,13 @@ EXPORT_SYMBOL(__kfifo_out_generic);
299unsigned int __kfifo_from_user_n(struct kfifo *fifo, 334unsigned int __kfifo_from_user_n(struct kfifo *fifo,
300 const void __user *from, unsigned int len, unsigned int recsize) 335 const void __user *from, unsigned int len, unsigned int recsize)
301{ 336{
337 unsigned total;
338
302 if (kfifo_avail(fifo) < len + recsize) 339 if (kfifo_avail(fifo) < len + recsize)
303 return len + 1; 340 return len + 1;
304 341
305 return __kfifo_from_user_data(fifo, from, len, recsize); 342 __kfifo_from_user_data(fifo, from, len, recsize, &total);
343 return total;
306} 344}
307EXPORT_SYMBOL(__kfifo_from_user_n); 345EXPORT_SYMBOL(__kfifo_from_user_n);
308 346
@@ -313,18 +351,21 @@ EXPORT_SYMBOL(__kfifo_from_user_n);
313 * @len: the length of the data to be added. 351 * @len: the length of the data to be added.
314 * 352 *
315 * This function copies at most @len bytes from the @from into the 353 * This function copies at most @len bytes from the @from into the
316 * FIFO depending and returns the number of copied bytes. 354 * FIFO depending and returns -EFAULT/0.
317 * 355 *
318 * Note that with only one concurrent reader and one concurrent 356 * Note that with only one concurrent reader and one concurrent
319 * writer, you don't need extra locking to use these functions. 357 * writer, you don't need extra locking to use these functions.
320 */ 358 */
321unsigned int kfifo_from_user(struct kfifo *fifo, 359int kfifo_from_user(struct kfifo *fifo,
322 const void __user *from, unsigned int len) 360 const void __user *from, unsigned int len, unsigned *total)
323{ 361{
362 int ret;
324 len = min(kfifo_avail(fifo), len); 363 len = min(kfifo_avail(fifo), len);
325 len -= __kfifo_from_user_data(fifo, from, len, 0); 364 ret = __kfifo_from_user_data(fifo, from, len, 0, total);
365 if (ret)
366 return ret;
326 __kfifo_add_in(fifo, len); 367 __kfifo_add_in(fifo, len);
327 return len; 368 return 0;
328} 369}
329EXPORT_SYMBOL(kfifo_from_user); 370EXPORT_SYMBOL(kfifo_from_user);
330 371
@@ -339,17 +380,17 @@ unsigned int __kfifo_to_user_n(struct kfifo *fifo,
339 void __user *to, unsigned int len, unsigned int reclen, 380 void __user *to, unsigned int len, unsigned int reclen,
340 unsigned int recsize) 381 unsigned int recsize)
341{ 382{
342 unsigned int ret; 383 unsigned int ret, total;
343 384
344 if (kfifo_len(fifo) < reclen + recsize) 385 if (kfifo_len(fifo) < reclen + recsize)
345 return len; 386 return len;
346 387
347 ret = __kfifo_to_user_data(fifo, to, reclen, recsize); 388 ret = __kfifo_to_user_data(fifo, to, reclen, recsize, &total);
348 389
349 if (likely(ret == 0)) 390 if (likely(ret == 0))
350 __kfifo_add_out(fifo, reclen + recsize); 391 __kfifo_add_out(fifo, reclen + recsize);
351 392
352 return ret; 393 return total;
353} 394}
354EXPORT_SYMBOL(__kfifo_to_user_n); 395EXPORT_SYMBOL(__kfifo_to_user_n);
355 396
@@ -358,20 +399,22 @@ EXPORT_SYMBOL(__kfifo_to_user_n);
358 * @fifo: the fifo to be used. 399 * @fifo: the fifo to be used.
359 * @to: where the data must be copied. 400 * @to: where the data must be copied.
360 * @len: the size of the destination buffer. 401 * @len: the size of the destination buffer.
402 @ @lenout: pointer to output variable with copied data
361 * 403 *
362 * This function copies at most @len bytes from the FIFO into the 404 * This function copies at most @len bytes from the FIFO into the
363 * @to buffer and returns the number of copied bytes. 405 * @to buffer and 0 or -EFAULT.
364 * 406 *
365 * Note that with only one concurrent reader and one concurrent 407 * Note that with only one concurrent reader and one concurrent
366 * writer, you don't need extra locking to use these functions. 408 * writer, you don't need extra locking to use these functions.
367 */ 409 */
368unsigned int kfifo_to_user(struct kfifo *fifo, 410int kfifo_to_user(struct kfifo *fifo,
369 void __user *to, unsigned int len) 411 void __user *to, unsigned int len, unsigned *lenout)
370{ 412{
413 int ret;
371 len = min(kfifo_len(fifo), len); 414 len = min(kfifo_len(fifo), len);
372 len -= __kfifo_to_user_data(fifo, to, len, 0); 415 ret = __kfifo_to_user_data(fifo, to, len, 0, lenout);
373 __kfifo_add_out(fifo, len); 416 __kfifo_add_out(fifo, *lenout);
374 return len; 417 return ret;
375} 418}
376EXPORT_SYMBOL(kfifo_to_user); 419EXPORT_SYMBOL(kfifo_to_user);
377 420
diff --git a/kernel/smp.c b/kernel/smp.c
index de735a6637d0..f10408422444 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -347,7 +347,7 @@ int smp_call_function_any(const struct cpumask *mask,
347 goto call; 347 goto call;
348 348
349 /* Try for same node. */ 349 /* Try for same node. */
350 nodemask = cpumask_of_node(cpu); 350 nodemask = cpumask_of_node(cpu_to_node(cpu));
351 for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids; 351 for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids;
352 cpu = cpumask_next_and(cpu, nodemask, mask)) { 352 cpu = cpumask_next_and(cpu, nodemask, mask)) {
353 if (cpu_online(cpu)) 353 if (cpu_online(cpu))
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 7968762c8167..1e6640f80454 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1690,7 +1690,7 @@ ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
1690static int ftrace_match(char *str, char *regex, int len, int type) 1690static int ftrace_match(char *str, char *regex, int len, int type)
1691{ 1691{
1692 int matched = 0; 1692 int matched = 0;
1693 char *ptr; 1693 int slen;
1694 1694
1695 switch (type) { 1695 switch (type) {
1696 case MATCH_FULL: 1696 case MATCH_FULL:
@@ -1706,8 +1706,8 @@ static int ftrace_match(char *str, char *regex, int len, int type)
1706 matched = 1; 1706 matched = 1;
1707 break; 1707 break;
1708 case MATCH_END_ONLY: 1708 case MATCH_END_ONLY:
1709 ptr = strstr(str, regex); 1709 slen = strlen(str);
1710 if (ptr && (ptr[len] == 0)) 1710 if (slen >= len && memcmp(str + slen - len, regex, len) == 0)
1711 matched = 1; 1711 matched = 1;
1712 break; 1712 break;
1713 } 1713 }
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 2326b04c95c4..edefe3b2801b 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -2869,7 +2869,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
2869 * Splice the empty reader page into the list around the head. 2869 * Splice the empty reader page into the list around the head.
2870 */ 2870 */
2871 reader = rb_set_head_page(cpu_buffer); 2871 reader = rb_set_head_page(cpu_buffer);
2872 cpu_buffer->reader_page->list.next = reader->list.next; 2872 cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next);
2873 cpu_buffer->reader_page->list.prev = reader->list.prev; 2873 cpu_buffer->reader_page->list.prev = reader->list.prev;
2874 2874
2875 /* 2875 /*
@@ -2906,7 +2906,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
2906 * 2906 *
2907 * Now make the new head point back to the reader page. 2907 * Now make the new head point back to the reader page.
2908 */ 2908 */
2909 reader->list.next->prev = &cpu_buffer->reader_page->list; 2909 rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list;
2910 rb_inc_page(cpu_buffer, &cpu_buffer->head_page); 2910 rb_inc_page(cpu_buffer, &cpu_buffer->head_page);
2911 2911
2912 /* Finally update the reader page to the new head */ 2912 /* Finally update the reader page to the new head */
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index 50504cb228de..e42af9aad69f 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -211,8 +211,9 @@ static int filter_pred_pchar(struct filter_pred *pred, void *event,
211{ 211{
212 char **addr = (char **)(event + pred->offset); 212 char **addr = (char **)(event + pred->offset);
213 int cmp, match; 213 int cmp, match;
214 int len = strlen(*addr) + 1; /* including tailing '\0' */
214 215
215 cmp = pred->regex.match(*addr, &pred->regex, pred->regex.field_len); 216 cmp = pred->regex.match(*addr, &pred->regex, len);
216 217
217 match = cmp ^ pred->not; 218 match = cmp ^ pred->not;
218 219
@@ -251,7 +252,18 @@ static int filter_pred_none(struct filter_pred *pred, void *event,
251 return 0; 252 return 0;
252} 253}
253 254
254/* Basic regex callbacks */ 255/*
256 * regex_match_foo - Basic regex callbacks
257 *
258 * @str: the string to be searched
259 * @r: the regex structure containing the pattern string
260 * @len: the length of the string to be searched (including '\0')
261 *
262 * Note:
263 * - @str might not be NULL-terminated if it's of type DYN_STRING
264 * or STATIC_STRING
265 */
266
255static int regex_match_full(char *str, struct regex *r, int len) 267static int regex_match_full(char *str, struct regex *r, int len)
256{ 268{
257 if (strncmp(str, r->pattern, len) == 0) 269 if (strncmp(str, r->pattern, len) == 0)
@@ -261,23 +273,24 @@ static int regex_match_full(char *str, struct regex *r, int len)
261 273
262static int regex_match_front(char *str, struct regex *r, int len) 274static int regex_match_front(char *str, struct regex *r, int len)
263{ 275{
264 if (strncmp(str, r->pattern, len) == 0) 276 if (strncmp(str, r->pattern, r->len) == 0)
265 return 1; 277 return 1;
266 return 0; 278 return 0;
267} 279}
268 280
269static int regex_match_middle(char *str, struct regex *r, int len) 281static int regex_match_middle(char *str, struct regex *r, int len)
270{ 282{
271 if (strstr(str, r->pattern)) 283 if (strnstr(str, r->pattern, len))
272 return 1; 284 return 1;
273 return 0; 285 return 0;
274} 286}
275 287
276static int regex_match_end(char *str, struct regex *r, int len) 288static int regex_match_end(char *str, struct regex *r, int len)
277{ 289{
278 char *ptr = strstr(str, r->pattern); 290 int strlen = len - 1;
279 291
280 if (ptr && (ptr[r->len] == 0)) 292 if (strlen >= r->len &&
293 memcmp(str + strlen - r->len, r->pattern, r->len) == 0)
281 return 1; 294 return 1;
282 return 0; 295 return 0;
283} 296}
@@ -781,10 +794,8 @@ static int filter_add_pred(struct filter_parse_state *ps,
781 pred->regex.field_len = field->size; 794 pred->regex.field_len = field->size;
782 } else if (field->filter_type == FILTER_DYN_STRING) 795 } else if (field->filter_type == FILTER_DYN_STRING)
783 fn = filter_pred_strloc; 796 fn = filter_pred_strloc;
784 else { 797 else
785 fn = filter_pred_pchar; 798 fn = filter_pred_pchar;
786 pred->regex.field_len = strlen(pred->regex.pattern);
787 }
788 } else { 799 } else {
789 if (field->is_signed) 800 if (field->is_signed)
790 ret = strict_strtoll(pred->regex.pattern, 0, &val); 801 ret = strict_strtoll(pred->regex.pattern, 0, &val);
diff --git a/lib/string.c b/lib/string.c
index 9f75b4ec50b8..a1cdcfcc42d0 100644
--- a/lib/string.c
+++ b/lib/string.c
@@ -667,7 +667,7 @@ EXPORT_SYMBOL(memscan);
667 */ 667 */
668char *strstr(const char *s1, const char *s2) 668char *strstr(const char *s1, const char *s2)
669{ 669{
670 int l1, l2; 670 size_t l1, l2;
671 671
672 l2 = strlen(s2); 672 l2 = strlen(s2);
673 if (!l2) 673 if (!l2)
@@ -684,6 +684,31 @@ char *strstr(const char *s1, const char *s2)
684EXPORT_SYMBOL(strstr); 684EXPORT_SYMBOL(strstr);
685#endif 685#endif
686 686
687#ifndef __HAVE_ARCH_STRNSTR
688/**
689 * strnstr - Find the first substring in a length-limited string
690 * @s1: The string to be searched
691 * @s2: The string to search for
692 * @len: the maximum number of characters to search
693 */
694char *strnstr(const char *s1, const char *s2, size_t len)
695{
696 size_t l1 = len, l2;
697
698 l2 = strlen(s2);
699 if (!l2)
700 return (char *)s1;
701 while (l1 >= l2) {
702 l1--;
703 if (!memcmp(s1, s2, l2))
704 return (char *)s1;
705 s1++;
706 }
707 return NULL;
708}
709EXPORT_SYMBOL(strnstr);
710#endif
711
687#ifndef __HAVE_ARCH_MEMCHR 712#ifndef __HAVE_ARCH_MEMCHR
688/** 713/**
689 * memchr - Find a character in an area of memory. 714 * memchr - Find a character in an area of memory.
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 488b644e0e8e..954032b80bed 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2586,7 +2586,7 @@ static int mem_cgroup_force_empty(struct mem_cgroup *mem, bool free_all)
2586 if (free_all) 2586 if (free_all)
2587 goto try_to_free; 2587 goto try_to_free;
2588move_account: 2588move_account:
2589 while (mem->res.usage > 0) { 2589 do {
2590 ret = -EBUSY; 2590 ret = -EBUSY;
2591 if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children)) 2591 if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children))
2592 goto out; 2592 goto out;
@@ -2614,8 +2614,8 @@ move_account:
2614 if (ret == -ENOMEM) 2614 if (ret == -ENOMEM)
2615 goto try_to_free; 2615 goto try_to_free;
2616 cond_resched(); 2616 cond_resched();
2617 } 2617 /* "ret" should also be checked to ensure all lists are empty. */
2618 ret = 0; 2618 } while (mem->res.usage > 0 || ret);
2619out: 2619out:
2620 css_put(&mem->css); 2620 css_put(&mem->css);
2621 return ret; 2621 return ret;
@@ -2648,10 +2648,7 @@ try_to_free:
2648 } 2648 }
2649 lru_add_drain(); 2649 lru_add_drain();
2650 /* try move_account...there may be some *locked* pages. */ 2650 /* try move_account...there may be some *locked* pages. */
2651 if (mem->res.usage) 2651 goto move_account;
2652 goto move_account;
2653 ret = 0;
2654 goto out;
2655} 2652}
2656 2653
2657int mem_cgroup_force_empty_write(struct cgroup *cont, unsigned int event) 2654int mem_cgroup_force_empty_write(struct cgroup *cont, unsigned int event)
diff --git a/mm/nommu.c b/mm/nommu.c
index 17773862619b..48a2ecfaf059 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -552,11 +552,11 @@ static void free_page_series(unsigned long from, unsigned long to)
552static void __put_nommu_region(struct vm_region *region) 552static void __put_nommu_region(struct vm_region *region)
553 __releases(nommu_region_sem) 553 __releases(nommu_region_sem)
554{ 554{
555 kenter("%p{%d}", region, atomic_read(&region->vm_usage)); 555 kenter("%p{%d}", region, region->vm_usage);
556 556
557 BUG_ON(!nommu_region_tree.rb_node); 557 BUG_ON(!nommu_region_tree.rb_node);
558 558
559 if (atomic_dec_and_test(&region->vm_usage)) { 559 if (--region->vm_usage == 0) {
560 if (region->vm_top > region->vm_start) 560 if (region->vm_top > region->vm_start)
561 delete_nommu_region(region); 561 delete_nommu_region(region);
562 up_write(&nommu_region_sem); 562 up_write(&nommu_region_sem);
@@ -1205,7 +1205,7 @@ unsigned long do_mmap_pgoff(struct file *file,
1205 if (!vma) 1205 if (!vma)
1206 goto error_getting_vma; 1206 goto error_getting_vma;
1207 1207
1208 atomic_set(&region->vm_usage, 1); 1208 region->vm_usage = 1;
1209 region->vm_flags = vm_flags; 1209 region->vm_flags = vm_flags;
1210 region->vm_pgoff = pgoff; 1210 region->vm_pgoff = pgoff;
1211 1211
@@ -1272,7 +1272,7 @@ unsigned long do_mmap_pgoff(struct file *file,
1272 } 1272 }
1273 1273
1274 /* we've found a region we can share */ 1274 /* we've found a region we can share */
1275 atomic_inc(&pregion->vm_usage); 1275 pregion->vm_usage++;
1276 vma->vm_region = pregion; 1276 vma->vm_region = pregion;
1277 start = pregion->vm_start; 1277 start = pregion->vm_start;
1278 start += (pgoff - pregion->vm_pgoff) << PAGE_SHIFT; 1278 start += (pgoff - pregion->vm_pgoff) << PAGE_SHIFT;
@@ -1289,7 +1289,7 @@ unsigned long do_mmap_pgoff(struct file *file,
1289 vma->vm_region = NULL; 1289 vma->vm_region = NULL;
1290 vma->vm_start = 0; 1290 vma->vm_start = 0;
1291 vma->vm_end = 0; 1291 vma->vm_end = 0;
1292 atomic_dec(&pregion->vm_usage); 1292 pregion->vm_usage--;
1293 pregion = NULL; 1293 pregion = NULL;
1294 goto error_just_free; 1294 goto error_just_free;
1295 } 1295 }
@@ -1441,10 +1441,9 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
1441 1441
1442 kenter(""); 1442 kenter("");
1443 1443
1444 /* we're only permitted to split anonymous regions that have a single 1444 /* we're only permitted to split anonymous regions (these should have
1445 * owner */ 1445 * only a single usage on the region) */
1446 if (vma->vm_file || 1446 if (vma->vm_file)
1447 atomic_read(&vma->vm_region->vm_usage) != 1)
1448 return -ENOMEM; 1447 return -ENOMEM;
1449 1448
1450 if (mm->map_count >= sysctl_max_map_count) 1449 if (mm->map_count >= sysctl_max_map_count)
@@ -1518,7 +1517,7 @@ static int shrink_vma(struct mm_struct *mm,
1518 1517
1519 /* cut the backing region down to size */ 1518 /* cut the backing region down to size */
1520 region = vma->vm_region; 1519 region = vma->vm_region;
1521 BUG_ON(atomic_read(&region->vm_usage) != 1); 1520 BUG_ON(region->vm_usage != 1);
1522 1521
1523 down_write(&nommu_region_sem); 1522 down_write(&nommu_region_sem);
1524 delete_nommu_region(region); 1523 delete_nommu_region(region);
@@ -1762,27 +1761,6 @@ void unmap_mapping_range(struct address_space *mapping,
1762EXPORT_SYMBOL(unmap_mapping_range); 1761EXPORT_SYMBOL(unmap_mapping_range);
1763 1762
1764/* 1763/*
1765 * ask for an unmapped area at which to create a mapping on a file
1766 */
1767unsigned long get_unmapped_area(struct file *file, unsigned long addr,
1768 unsigned long len, unsigned long pgoff,
1769 unsigned long flags)
1770{
1771 unsigned long (*get_area)(struct file *, unsigned long, unsigned long,
1772 unsigned long, unsigned long);
1773
1774 get_area = current->mm->get_unmapped_area;
1775 if (file && file->f_op && file->f_op->get_unmapped_area)
1776 get_area = file->f_op->get_unmapped_area;
1777
1778 if (!get_area)
1779 return -ENOSYS;
1780
1781 return get_area(file, addr, len, pgoff, flags);
1782}
1783EXPORT_SYMBOL(get_unmapped_area);
1784
1785/*
1786 * Check that a process has enough memory to allocate a new virtual 1764 * Check that a process has enough memory to allocate a new virtual
1787 * mapping. 0 means there is enough memory for the allocation to 1765 * mapping. 0 means there is enough memory for the allocation to
1788 * succeed and -ENOMEM implies there is not. 1766 * succeed and -ENOMEM implies there is not.
@@ -1936,3 +1914,65 @@ int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, in
1936 mmput(mm); 1914 mmput(mm);
1937 return len; 1915 return len;
1938} 1916}
1917
1918/**
1919 * nommu_shrink_inode_mappings - Shrink the shared mappings on an inode
1920 * @inode: The inode to check
1921 * @size: The current filesize of the inode
1922 * @newsize: The proposed filesize of the inode
1923 *
1924 * Check the shared mappings on an inode on behalf of a shrinking truncate to
1925 * make sure that that any outstanding VMAs aren't broken and then shrink the
1926 * vm_regions that extend that beyond so that do_mmap_pgoff() doesn't
1927 * automatically grant mappings that are too large.
1928 */
1929int nommu_shrink_inode_mappings(struct inode *inode, size_t size,
1930 size_t newsize)
1931{
1932 struct vm_area_struct *vma;
1933 struct prio_tree_iter iter;
1934 struct vm_region *region;
1935 pgoff_t low, high;
1936 size_t r_size, r_top;
1937
1938 low = newsize >> PAGE_SHIFT;
1939 high = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1940
1941 down_write(&nommu_region_sem);
1942
1943 /* search for VMAs that fall within the dead zone */
1944 vma_prio_tree_foreach(vma, &iter, &inode->i_mapping->i_mmap,
1945 low, high) {
1946 /* found one - only interested if it's shared out of the page
1947 * cache */
1948 if (vma->vm_flags & VM_SHARED) {
1949 up_write(&nommu_region_sem);
1950 return -ETXTBSY; /* not quite true, but near enough */
1951 }
1952 }
1953
1954 /* reduce any regions that overlap the dead zone - if in existence,
1955 * these will be pointed to by VMAs that don't overlap the dead zone
1956 *
1957 * we don't check for any regions that start beyond the EOF as there
1958 * shouldn't be any
1959 */
1960 vma_prio_tree_foreach(vma, &iter, &inode->i_mapping->i_mmap,
1961 0, ULONG_MAX) {
1962 if (!(vma->vm_flags & VM_SHARED))
1963 continue;
1964
1965 region = vma->vm_region;
1966 r_size = region->vm_top - region->vm_start;
1967 r_top = (region->vm_pgoff << PAGE_SHIFT) + r_size;
1968
1969 if (r_top > newsize) {
1970 region->vm_top -= r_top - newsize;
1971 if (region->vm_end > region->vm_top)
1972 region->vm_end = region->vm_top;
1973 }
1974 }
1975
1976 up_write(&nommu_region_sem);
1977 return 0;
1978}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 4e9f5cc5fb59..6ea4966a6334 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3998,7 +3998,7 @@ void __init add_active_range(unsigned int nid, unsigned long start_pfn,
3998 } 3998 }
3999 3999
4000 /* Merge backward if suitable */ 4000 /* Merge backward if suitable */
4001 if (start_pfn < early_node_map[i].end_pfn && 4001 if (start_pfn < early_node_map[i].start_pfn &&
4002 end_pfn >= early_node_map[i].start_pfn) { 4002 end_pfn >= early_node_map[i].start_pfn) {
4003 early_node_map[i].start_pfn = start_pfn; 4003 early_node_map[i].start_pfn = start_pfn;
4004 return; 4004 return;
diff --git a/mm/util.c b/mm/util.c
index 7c35ad95f927..834db7be240f 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -220,7 +220,7 @@ char *strndup_user(const char __user *s, long n)
220} 220}
221EXPORT_SYMBOL(strndup_user); 221EXPORT_SYMBOL(strndup_user);
222 222
223#ifndef HAVE_ARCH_PICK_MMAP_LAYOUT 223#if defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
224void arch_pick_mmap_layout(struct mm_struct *mm) 224void arch_pick_mmap_layout(struct mm_struct *mm)
225{ 225{
226 mm->mmap_base = TASK_UNMAPPED_BASE; 226 mm->mmap_base = TASK_UNMAPPED_BASE;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 885207a6b6b7..c26986c85ce0 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1922,6 +1922,9 @@ static int sleeping_prematurely(pg_data_t *pgdat, int order, long remaining)
1922 if (!populated_zone(zone)) 1922 if (!populated_zone(zone))
1923 continue; 1923 continue;
1924 1924
1925 if (zone_is_all_unreclaimable(zone))
1926 continue;
1927
1925 if (!zone_watermark_ok(zone, order, high_wmark_pages(zone), 1928 if (!zone_watermark_ok(zone, order, high_wmark_pages(zone),
1926 0, 0)) 1929 0, 0))
1927 return 1; 1930 return 1;
diff --git a/scripts/markup_oops.pl b/scripts/markup_oops.pl
index 5f0fcb712e29..ce3e40b01e48 100644
--- a/scripts/markup_oops.pl
+++ b/scripts/markup_oops.pl
@@ -154,7 +154,7 @@ while (<STDIN>) {
154 if ($line =~ /RIP: 0010:\[\<([a-z0-9]+)\>\]/) { 154 if ($line =~ /RIP: 0010:\[\<([a-z0-9]+)\>\]/) {
155 $target = $1; 155 $target = $1;
156 } 156 }
157 if ($line =~ /EIP is at ([a-zA-Z0-9\_]+)\+(0x[0-9a-f]+)\/0x[a-f0-9]/) { 157 if ($line =~ /EIP is at ([a-zA-Z0-9\_]+)\+0x([0-9a-f]+)\/0x[a-f0-9]/) {
158 $function = $1; 158 $function = $1;
159 $func_offset = $2; 159 $func_offset = $2;
160 } 160 }
diff --git a/scripts/recordmcount.pl b/scripts/recordmcount.pl
index 92f09fe9639e..ea6f6e3adaea 100755
--- a/scripts/recordmcount.pl
+++ b/scripts/recordmcount.pl
@@ -194,7 +194,7 @@ sub check_objcopy
194 } 194 }
195} 195}
196 196
197if ($arch eq "x86") { 197if ($arch =~ /(x86(_64)?)|(i386)/) {
198 if ($bits == 64) { 198 if ($bits == 64) {
199 $arch = "x86_64"; 199 $arch = "x86_64";
200 } else { 200 } else {
diff --git a/tools/perf/Makefile b/tools/perf/Makefile
index 652a470b5f74..2e7fa3a06806 100644
--- a/tools/perf/Makefile
+++ b/tools/perf/Makefile
@@ -250,7 +250,19 @@ PTHREAD_LIBS = -lpthread
250# explicitly what architecture to check for. Fix this up for yours.. 250# explicitly what architecture to check for. Fix this up for yours..
251SPARSE_FLAGS = -D__BIG_ENDIAN__ -D__powerpc__ 251SPARSE_FLAGS = -D__BIG_ENDIAN__ -D__powerpc__
252 252
253ifeq ($(shell sh -c "echo 'int foo(void) {char X[2]; return 3;}' | $(CC) -x c -c -Werror -fstack-protector-all - -o /dev/null "$(QUIET_STDERR)" && echo y"), y) 253ifeq ($(V), 2)
254 QUIET_STDERR = ">/dev/null"
255else
256 QUIET_STDERR = ">/dev/null 2>&1"
257endif
258
259BITBUCKET = "/dev/null"
260
261ifneq ($(shell sh -c "(echo '\#include <stdio.h>'; echo 'int main(void) { return puts(\"hi\"); }') | $(CC) -x c - $(ALL_CFLAGS) -o $(BITBUCKET) "$(QUIET_STDERR)" && echo y"), y)
262 BITBUCKET = .perf.dev.null
263endif
264
265ifeq ($(shell sh -c "echo 'int foo(void) {char X[2]; return 3;}' | $(CC) -x c -c -Werror -fstack-protector-all - -o $(BITBUCKET) "$(QUIET_STDERR)" && echo y"), y)
254 CFLAGS := $(CFLAGS) -fstack-protector-all 266 CFLAGS := $(CFLAGS) -fstack-protector-all
255endif 267endif
256 268
@@ -451,11 +463,6 @@ BUILTIN_OBJS += builtin-kmem.o
451 463
452PERFLIBS = $(LIB_FILE) 464PERFLIBS = $(LIB_FILE)
453 465
454ifeq ($(V), 2)
455 QUIET_STDERR = ">/dev/null"
456else
457 QUIET_STDERR = ">/dev/null 2>&1"
458endif
459# 466#
460# Platform specific tweaks 467# Platform specific tweaks
461# 468#
@@ -483,19 +490,19 @@ ifeq ($(uname_S),Darwin)
483 PTHREAD_LIBS = 490 PTHREAD_LIBS =
484endif 491endif
485 492
486ifeq ($(shell sh -c "(echo '\#include <libelf.h>'; echo 'int main(void) { Elf * elf = elf_begin(0, ELF_C_READ, 0); return (long)elf; }') | $(CC) -x c - $(ALL_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -o /dev/null $(ALL_LDFLAGS) $(EXTLIBS) "$(QUIET_STDERR)" && echo y"), y) 493ifeq ($(shell sh -c "(echo '\#include <libelf.h>'; echo 'int main(void) { Elf * elf = elf_begin(0, ELF_C_READ, 0); return (long)elf; }') | $(CC) -x c - $(ALL_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -o $(BITBUCKET) $(ALL_LDFLAGS) $(EXTLIBS) "$(QUIET_STDERR)" && echo y"), y)
487ifneq ($(shell sh -c "(echo '\#include <gnu/libc-version.h>'; echo 'int main(void) { const char * version = gnu_get_libc_version(); return (long)version; }') | $(CC) -x c - $(ALL_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -o /dev/null $(ALL_LDFLAGS) $(EXTLIBS) "$(QUIET_STDERR)" && echo y"), y) 494ifneq ($(shell sh -c "(echo '\#include <gnu/libc-version.h>'; echo 'int main(void) { const char * version = gnu_get_libc_version(); return (long)version; }') | $(CC) -x c - $(ALL_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -o $(BITBUCKET) $(ALL_LDFLAGS) $(EXTLIBS) "$(QUIET_STDERR)" && echo y"), y)
488 msg := $(error No gnu/libc-version.h found, please install glibc-dev[el]/glibc-static); 495 msg := $(error No gnu/libc-version.h found, please install glibc-dev[el]/glibc-static);
489endif 496endif
490 497
491 ifneq ($(shell sh -c "(echo '\#include <libelf.h>'; echo 'int main(void) { Elf * elf = elf_begin(0, ELF_C_READ_MMAP, 0); return (long)elf; }') | $(CC) -x c - $(ALL_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -o /dev/null $(ALL_LDFLAGS) $(EXTLIBS) "$(QUIET_STDERR)" && echo y"), y) 498 ifneq ($(shell sh -c "(echo '\#include <libelf.h>'; echo 'int main(void) { Elf * elf = elf_begin(0, ELF_C_READ_MMAP, 0); return (long)elf; }') | $(CC) -x c - $(ALL_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -o $(BITBUCKET) $(ALL_LDFLAGS) $(EXTLIBS) "$(QUIET_STDERR)" && echo y"), y)
492 BASIC_CFLAGS += -DLIBELF_NO_MMAP 499 BASIC_CFLAGS += -DLIBELF_NO_MMAP
493 endif 500 endif
494else 501else
495 msg := $(error No libelf.h/libelf found, please install libelf-dev/elfutils-libelf-devel and glibc-dev[el]); 502 msg := $(error No libelf.h/libelf found, please install libelf-dev/elfutils-libelf-devel and glibc-dev[el]);
496endif 503endif
497 504
498ifneq ($(shell sh -c "(echo '\#ifndef _MIPS_SZLONG'; echo '\#define _MIPS_SZLONG 0'; echo '\#endif'; echo '\#include <dwarf.h>'; echo '\#include <libdwarf.h>'; echo 'int main(void) { Dwarf_Debug dbg; Dwarf_Error err; Dwarf_Ranges *rng; dwarf_init(0, DW_DLC_READ, 0, 0, &dbg, &err); dwarf_get_ranges(dbg, 0, &rng, 0, 0, &err); return (long)dbg; }') | $(CC) -x c - $(ALL_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -I/usr/include/libdwarf -ldwarf -lelf -o /dev/null $(ALL_LDFLAGS) $(EXTLIBS) "$(QUIET_STDERR)" && echo y"), y) 505ifneq ($(shell sh -c "(echo '\#ifndef _MIPS_SZLONG'; echo '\#define _MIPS_SZLONG 0'; echo '\#endif'; echo '\#include <dwarf.h>'; echo '\#include <libdwarf.h>'; echo 'int main(void) { Dwarf_Debug dbg; Dwarf_Error err; Dwarf_Ranges *rng; dwarf_init(0, DW_DLC_READ, 0, 0, &dbg, &err); dwarf_get_ranges(dbg, 0, &rng, 0, 0, &err); return (long)dbg; }') | $(CC) -x c - $(ALL_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -I/usr/include/libdwarf -ldwarf -lelf -o $(BITBUCKET) $(ALL_LDFLAGS) $(EXTLIBS) "$(QUIET_STDERR)" && echo y"), y)
499 msg := $(warning No libdwarf.h found or old libdwarf.h found, disables dwarf support. Please install libdwarf-dev/libdwarf-devel >= 20081231); 506 msg := $(warning No libdwarf.h found or old libdwarf.h found, disables dwarf support. Please install libdwarf-dev/libdwarf-devel >= 20081231);
500 BASIC_CFLAGS += -DNO_LIBDWARF 507 BASIC_CFLAGS += -DNO_LIBDWARF
501else 508else
@@ -509,7 +516,7 @@ PERL_EMBED_LDOPTS = `perl -MExtUtils::Embed -e ldopts 2>/dev/null`
509PERL_EMBED_CCOPTS = `perl -MExtUtils::Embed -e ccopts 2>/dev/null` 516PERL_EMBED_CCOPTS = `perl -MExtUtils::Embed -e ccopts 2>/dev/null`
510endif 517endif
511 518
512ifneq ($(shell sh -c "(echo '\#include <EXTERN.h>'; echo '\#include <perl.h>'; echo 'int main(void) { perl_alloc(); return 0; }') | $(CC) -x c - $(PERL_EMBED_CCOPTS) -o /dev/null $(PERL_EMBED_LDOPTS) > /dev/null 2>&1 && echo y"), y) 519ifneq ($(shell sh -c "(echo '\#include <EXTERN.h>'; echo '\#include <perl.h>'; echo 'int main(void) { perl_alloc(); return 0; }') | $(CC) -x c - $(PERL_EMBED_CCOPTS) -o $(BITBUCKET) $(PERL_EMBED_LDOPTS) > /dev/null 2>&1 && echo y"), y)
513 BASIC_CFLAGS += -DNO_LIBPERL 520 BASIC_CFLAGS += -DNO_LIBPERL
514else 521else
515 ALL_LDFLAGS += $(PERL_EMBED_LDOPTS) 522 ALL_LDFLAGS += $(PERL_EMBED_LDOPTS)
@@ -519,20 +526,20 @@ endif
519ifdef NO_DEMANGLE 526ifdef NO_DEMANGLE
520 BASIC_CFLAGS += -DNO_DEMANGLE 527 BASIC_CFLAGS += -DNO_DEMANGLE
521else 528else
522 has_bfd := $(shell sh -c "(echo '\#include <bfd.h>'; echo 'int main(void) { bfd_demangle(0, 0, 0); return 0; }') | $(CC) -x c - $(ALL_CFLAGS) -o /dev/null $(ALL_LDFLAGS) $(EXTLIBS) -lbfd "$(QUIET_STDERR)" && echo y") 529 has_bfd := $(shell sh -c "(echo '\#include <bfd.h>'; echo 'int main(void) { bfd_demangle(0, 0, 0); return 0; }') | $(CC) -x c - $(ALL_CFLAGS) -o $(BITBUCKET) $(ALL_LDFLAGS) $(EXTLIBS) -lbfd "$(QUIET_STDERR)" && echo y")
523 530
524 ifeq ($(has_bfd),y) 531 ifeq ($(has_bfd),y)
525 EXTLIBS += -lbfd 532 EXTLIBS += -lbfd
526 else 533 else
527 has_bfd_iberty := $(shell sh -c "(echo '\#include <bfd.h>'; echo 'int main(void) { bfd_demangle(0, 0, 0); return 0; }') | $(CC) -x c - $(ALL_CFLAGS) -o /dev/null $(ALL_LDFLAGS) $(EXTLIBS) -lbfd -liberty "$(QUIET_STDERR)" && echo y") 534 has_bfd_iberty := $(shell sh -c "(echo '\#include <bfd.h>'; echo 'int main(void) { bfd_demangle(0, 0, 0); return 0; }') | $(CC) -x c - $(ALL_CFLAGS) -o $(BITBUCKET) $(ALL_LDFLAGS) $(EXTLIBS) -lbfd -liberty "$(QUIET_STDERR)" && echo y")
528 ifeq ($(has_bfd_iberty),y) 535 ifeq ($(has_bfd_iberty),y)
529 EXTLIBS += -lbfd -liberty 536 EXTLIBS += -lbfd -liberty
530 else 537 else
531 has_bfd_iberty_z := $(shell sh -c "(echo '\#include <bfd.h>'; echo 'int main(void) { bfd_demangle(0, 0, 0); return 0; }') | $(CC) -x c - $(ALL_CFLAGS) -o /dev/null $(ALL_LDFLAGS) $(EXTLIBS) -lbfd -liberty -lz "$(QUIET_STDERR)" && echo y") 538 has_bfd_iberty_z := $(shell sh -c "(echo '\#include <bfd.h>'; echo 'int main(void) { bfd_demangle(0, 0, 0); return 0; }') | $(CC) -x c - $(ALL_CFLAGS) -o $(BITBUCKET) $(ALL_LDFLAGS) $(EXTLIBS) -lbfd -liberty -lz "$(QUIET_STDERR)" && echo y")
532 ifeq ($(has_bfd_iberty_z),y) 539 ifeq ($(has_bfd_iberty_z),y)
533 EXTLIBS += -lbfd -liberty -lz 540 EXTLIBS += -lbfd -liberty -lz
534 else 541 else
535 has_cplus_demangle := $(shell sh -c "(echo 'extern char *cplus_demangle(const char *, int);'; echo 'int main(void) { cplus_demangle(0, 0); return 0; }') | $(CC) -x c - $(ALL_CFLAGS) -o /dev/null $(ALL_LDFLAGS) $(EXTLIBS) -liberty "$(QUIET_STDERR)" && echo y") 542 has_cplus_demangle := $(shell sh -c "(echo 'extern char *cplus_demangle(const char *, int);'; echo 'int main(void) { cplus_demangle(0, 0); return 0; }') | $(CC) -x c - $(ALL_CFLAGS) -o $(BITBUCKET) $(ALL_LDFLAGS) $(EXTLIBS) -liberty "$(QUIET_STDERR)" && echo y")
536 ifeq ($(has_cplus_demangle),y) 543 ifeq ($(has_cplus_demangle),y)
537 EXTLIBS += -liberty 544 EXTLIBS += -liberty
538 BASIC_CFLAGS += -DHAVE_CPLUS_DEMANGLE 545 BASIC_CFLAGS += -DHAVE_CPLUS_DEMANGLE
@@ -780,7 +787,7 @@ export TAR INSTALL DESTDIR SHELL_PATH
780 787
781SHELL = $(SHELL_PATH) 788SHELL = $(SHELL_PATH)
782 789
783all:: shell_compatibility_test $(ALL_PROGRAMS) $(BUILT_INS) $(OTHER_PROGRAMS) PERF-BUILD-OPTIONS 790all:: .perf.dev.null shell_compatibility_test $(ALL_PROGRAMS) $(BUILT_INS) $(OTHER_PROGRAMS) PERF-BUILD-OPTIONS
784ifneq (,$X) 791ifneq (,$X)
785 $(foreach p,$(patsubst %$X,%,$(filter %$X,$(ALL_PROGRAMS) $(BUILT_INS) perf$X)), test '$p' -ef '$p$X' || $(RM) '$p';) 792 $(foreach p,$(patsubst %$X,%,$(filter %$X,$(ALL_PROGRAMS) $(BUILT_INS) perf$X)), test '$p' -ef '$p$X' || $(RM) '$p';)
786endif 793endif
@@ -1107,6 +1114,11 @@ clean:
1107.PHONY: .FORCE-PERF-VERSION-FILE TAGS tags cscope .FORCE-PERF-CFLAGS 1114.PHONY: .FORCE-PERF-VERSION-FILE TAGS tags cscope .FORCE-PERF-CFLAGS
1108.PHONY: .FORCE-PERF-BUILD-OPTIONS 1115.PHONY: .FORCE-PERF-BUILD-OPTIONS
1109 1116
1117.perf.dev.null:
1118 touch .perf.dev.null
1119
1120.INTERMEDIATE: .perf.dev.null
1121
1110### Make sure built-ins do not have dups and listed in perf.c 1122### Make sure built-ins do not have dups and listed in perf.c
1111# 1123#
1112check-builtins:: 1124check-builtins::