aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/powerpc/platforms/powermac/pci.c14
-rw-r--r--arch/s390/include/asm/elf.h3
-rw-r--r--arch/s390/mm/pgtable.c7
-rw-r--r--arch/sparc/include/asm/spitfire.h2
-rw-r--r--arch/sparc/include/asm/xor_64.h4
-rw-r--r--arch/sparc/kernel/cpu.c12
-rw-r--r--arch/sparc/kernel/cpumap.c2
-rw-r--r--arch/sparc/kernel/head_64.S25
-rw-r--r--arch/sparc/kernel/process_32.c3
-rw-r--r--arch/sparc/kernel/process_64.c3
-rw-r--r--arch/sparc/kernel/setup_32.c2
-rw-r--r--arch/sparc/kernel/setup_64.c18
-rw-r--r--arch/sparc/mm/init_64.c5
-rw-r--r--arch/x86/kernel/rtc.c23
-rw-r--r--arch/x86/platform/mrst/vrtc.c9
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c4
-rw-r--r--drivers/gpu/drm/i915/intel_display.c22
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h3
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c88
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c16
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c44
-rw-r--r--drivers/gpu/drm/radeon/ni.c32
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_cursor.c40
-rw-r--r--drivers/gpu/drm/radeon/rv770.c51
-rw-r--r--drivers/hwmon/coretemp.c28
-rw-r--r--drivers/ide/ide-disk.c7
-rw-r--r--drivers/media/video/omap/omap_vout.c13
-rw-r--r--drivers/media/video/omap3isp/ispccdc.c1
-rw-r--r--drivers/media/video/uvc/uvc_driver.c2
-rw-r--r--drivers/media/video/uvc/uvc_entity.c2
-rw-r--r--drivers/media/video/uvc/uvc_video.c10
-rw-r--r--drivers/media/video/uvc/uvcvideo.h2
-rw-r--r--drivers/media/video/v4l2-dev.c11
-rw-r--r--drivers/media/video/v4l2-device.c2
-rw-r--r--drivers/mfd/jz4740-adc.c2
-rw-r--r--drivers/misc/lis3lv02d/lis3lv02d.c14
-rw-r--r--drivers/pci/pci.c6
-rw-r--r--drivers/pci/probe.c14
-rw-r--r--drivers/s390/cio/cio.c8
-rw-r--r--drivers/spi/spi-topcliff-pch.c93
-rw-r--r--fs/btrfs/file.c24
-rw-r--r--include/linux/irqdomain.h1
-rw-r--r--include/linux/pci.h3
-rw-r--r--include/linux/sched.h1
-rw-r--r--init/main.c2
-rw-r--r--kernel/irq/irqdomain.c6
-rw-r--r--kernel/posix-cpu-timers.c5
-rw-r--r--kernel/resource.c7
-rw-r--r--kernel/sched.c26
-rw-r--r--kernel/sched_rt.c4
-rw-r--r--net/ceph/ceph_common.c1
-rw-r--r--net/ceph/messenger.c1
-rw-r--r--net/ceph/osd_client.c4
-rw-r--r--net/ceph/osdmap.c84
-rw-r--r--sound/pci/hda/hda_intel.c9
-rw-r--r--sound/soc/codecs/wm8753.c4
-rw-r--r--sound/soc/omap/mcpdm.c2
-rw-r--r--sound/soc/omap/mcpdm.h2
-rw-r--r--sound/soc/pxa/zylonite.c8
-rw-r--r--tools/perf/util/evsel.c7
61 files changed, 429 insertions, 427 deletions
diff --git a/arch/powerpc/platforms/powermac/pci.c b/arch/powerpc/platforms/powermac/pci.c
index 5cc83851ad06..31a7d3a7ce25 100644
--- a/arch/powerpc/platforms/powermac/pci.c
+++ b/arch/powerpc/platforms/powermac/pci.c
@@ -561,6 +561,20 @@ static struct pci_ops u4_pcie_pci_ops =
561 .write = u4_pcie_write_config, 561 .write = u4_pcie_write_config,
562}; 562};
563 563
564static void __devinit pmac_pci_fixup_u4_of_node(struct pci_dev *dev)
565{
566 /* Apple's device-tree "hides" the root complex virtual P2P bridge
567 * on U4. However, Linux sees it, causing the PCI <-> OF matching
568 * code to fail to properly match devices below it. This works around
569 * it by setting the node of the bridge to point to the PHB node,
570 * which is not entirely correct but fixes the matching code and
571 * doesn't break anything else. It's also the simplest possible fix.
572 */
573 if (dev->dev.of_node == NULL)
574 dev->dev.of_node = pcibios_get_phb_of_node(dev->bus);
575}
576DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_APPLE, 0x5b, pmac_pci_fixup_u4_of_node);
577
564#endif /* CONFIG_PPC64 */ 578#endif /* CONFIG_PPC64 */
565 579
566#ifdef CONFIG_PPC32 580#ifdef CONFIG_PPC32
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
index 64b61bf72e93..547f1a6a35d4 100644
--- a/arch/s390/include/asm/elf.h
+++ b/arch/s390/include/asm/elf.h
@@ -188,7 +188,8 @@ extern char elf_platform[];
188#define SET_PERSONALITY(ex) \ 188#define SET_PERSONALITY(ex) \
189do { \ 189do { \
190 if (personality(current->personality) != PER_LINUX32) \ 190 if (personality(current->personality) != PER_LINUX32) \
191 set_personality(PER_LINUX); \ 191 set_personality(PER_LINUX | \
192 (current->personality & ~PER_MASK)); \
192 if ((ex).e_ident[EI_CLASS] == ELFCLASS32) \ 193 if ((ex).e_ident[EI_CLASS] == ELFCLASS32) \
193 set_thread_flag(TIF_31BIT); \ 194 set_thread_flag(TIF_31BIT); \
194 else \ 195 else \
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index f69ff3c13496..5d56c2b95b14 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -303,15 +303,15 @@ int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
303 /* Walk the guest addr space page table */ 303 /* Walk the guest addr space page table */
304 table = gmap->table + (((to + off) >> 53) & 0x7ff); 304 table = gmap->table + (((to + off) >> 53) & 0x7ff);
305 if (*table & _REGION_ENTRY_INV) 305 if (*table & _REGION_ENTRY_INV)
306 return 0; 306 goto out;
307 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 307 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
308 table = table + (((to + off) >> 42) & 0x7ff); 308 table = table + (((to + off) >> 42) & 0x7ff);
309 if (*table & _REGION_ENTRY_INV) 309 if (*table & _REGION_ENTRY_INV)
310 return 0; 310 goto out;
311 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 311 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
312 table = table + (((to + off) >> 31) & 0x7ff); 312 table = table + (((to + off) >> 31) & 0x7ff);
313 if (*table & _REGION_ENTRY_INV) 313 if (*table & _REGION_ENTRY_INV)
314 return 0; 314 goto out;
315 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 315 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
316 table = table + (((to + off) >> 20) & 0x7ff); 316 table = table + (((to + off) >> 20) & 0x7ff);
317 317
@@ -319,6 +319,7 @@ int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
319 flush |= gmap_unlink_segment(gmap, table); 319 flush |= gmap_unlink_segment(gmap, table);
320 *table = _SEGMENT_ENTRY_INV; 320 *table = _SEGMENT_ENTRY_INV;
321 } 321 }
322out:
322 up_read(&gmap->mm->mmap_sem); 323 up_read(&gmap->mm->mmap_sem);
323 if (flush) 324 if (flush)
324 gmap_flush_tlb(gmap); 325 gmap_flush_tlb(gmap);
diff --git a/arch/sparc/include/asm/spitfire.h b/arch/sparc/include/asm/spitfire.h
index 55a17c6efeb8..d06a26601753 100644
--- a/arch/sparc/include/asm/spitfire.h
+++ b/arch/sparc/include/asm/spitfire.h
@@ -43,6 +43,8 @@
43#define SUN4V_CHIP_NIAGARA1 0x01 43#define SUN4V_CHIP_NIAGARA1 0x01
44#define SUN4V_CHIP_NIAGARA2 0x02 44#define SUN4V_CHIP_NIAGARA2 0x02
45#define SUN4V_CHIP_NIAGARA3 0x03 45#define SUN4V_CHIP_NIAGARA3 0x03
46#define SUN4V_CHIP_NIAGARA4 0x04
47#define SUN4V_CHIP_NIAGARA5 0x05
46#define SUN4V_CHIP_UNKNOWN 0xff 48#define SUN4V_CHIP_UNKNOWN 0xff
47 49
48#ifndef __ASSEMBLY__ 50#ifndef __ASSEMBLY__
diff --git a/arch/sparc/include/asm/xor_64.h b/arch/sparc/include/asm/xor_64.h
index 9ed6ff679ab7..ee8edc68423e 100644
--- a/arch/sparc/include/asm/xor_64.h
+++ b/arch/sparc/include/asm/xor_64.h
@@ -66,6 +66,8 @@ static struct xor_block_template xor_block_niagara = {
66 ((tlb_type == hypervisor && \ 66 ((tlb_type == hypervisor && \
67 (sun4v_chip_type == SUN4V_CHIP_NIAGARA1 || \ 67 (sun4v_chip_type == SUN4V_CHIP_NIAGARA1 || \
68 sun4v_chip_type == SUN4V_CHIP_NIAGARA2 || \ 68 sun4v_chip_type == SUN4V_CHIP_NIAGARA2 || \
69 sun4v_chip_type == SUN4V_CHIP_NIAGARA3)) ? \ 69 sun4v_chip_type == SUN4V_CHIP_NIAGARA3 || \
70 sun4v_chip_type == SUN4V_CHIP_NIAGARA4 || \
71 sun4v_chip_type == SUN4V_CHIP_NIAGARA5)) ? \
70 &xor_block_niagara : \ 72 &xor_block_niagara : \
71 &xor_block_VIS) 73 &xor_block_VIS)
diff --git a/arch/sparc/kernel/cpu.c b/arch/sparc/kernel/cpu.c
index 9810fd881058..ba9b1cec4e6b 100644
--- a/arch/sparc/kernel/cpu.c
+++ b/arch/sparc/kernel/cpu.c
@@ -481,6 +481,18 @@ static void __init sun4v_cpu_probe(void)
481 sparc_pmu_type = "niagara3"; 481 sparc_pmu_type = "niagara3";
482 break; 482 break;
483 483
484 case SUN4V_CHIP_NIAGARA4:
485 sparc_cpu_type = "UltraSparc T4 (Niagara4)";
486 sparc_fpu_type = "UltraSparc T4 integrated FPU";
487 sparc_pmu_type = "niagara4";
488 break;
489
490 case SUN4V_CHIP_NIAGARA5:
491 sparc_cpu_type = "UltraSparc T5 (Niagara5)";
492 sparc_fpu_type = "UltraSparc T5 integrated FPU";
493 sparc_pmu_type = "niagara5";
494 break;
495
484 default: 496 default:
485 printk(KERN_WARNING "CPU: Unknown sun4v cpu type [%s]\n", 497 printk(KERN_WARNING "CPU: Unknown sun4v cpu type [%s]\n",
486 prom_cpu_compatible); 498 prom_cpu_compatible);
diff --git a/arch/sparc/kernel/cpumap.c b/arch/sparc/kernel/cpumap.c
index 4197e8d62d4c..9323eafccb93 100644
--- a/arch/sparc/kernel/cpumap.c
+++ b/arch/sparc/kernel/cpumap.c
@@ -325,6 +325,8 @@ static int iterate_cpu(struct cpuinfo_tree *t, unsigned int root_index)
325 case SUN4V_CHIP_NIAGARA1: 325 case SUN4V_CHIP_NIAGARA1:
326 case SUN4V_CHIP_NIAGARA2: 326 case SUN4V_CHIP_NIAGARA2:
327 case SUN4V_CHIP_NIAGARA3: 327 case SUN4V_CHIP_NIAGARA3:
328 case SUN4V_CHIP_NIAGARA4:
329 case SUN4V_CHIP_NIAGARA5:
328 rover_inc_table = niagara_iterate_method; 330 rover_inc_table = niagara_iterate_method;
329 break; 331 break;
330 default: 332 default:
diff --git a/arch/sparc/kernel/head_64.S b/arch/sparc/kernel/head_64.S
index 0eac1b2fc53d..0d810c2f1d00 100644
--- a/arch/sparc/kernel/head_64.S
+++ b/arch/sparc/kernel/head_64.S
@@ -133,7 +133,7 @@ prom_sun4v_name:
133prom_niagara_prefix: 133prom_niagara_prefix:
134 .asciz "SUNW,UltraSPARC-T" 134 .asciz "SUNW,UltraSPARC-T"
135prom_sparc_prefix: 135prom_sparc_prefix:
136 .asciz "SPARC-T" 136 .asciz "SPARC-"
137 .align 4 137 .align 4
138prom_root_compatible: 138prom_root_compatible:
139 .skip 64 139 .skip 64
@@ -396,7 +396,7 @@ sun4v_chip_type:
396 or %g1, %lo(prom_cpu_compatible), %g1 396 or %g1, %lo(prom_cpu_compatible), %g1
397 sethi %hi(prom_sparc_prefix), %g7 397 sethi %hi(prom_sparc_prefix), %g7
398 or %g7, %lo(prom_sparc_prefix), %g7 398 or %g7, %lo(prom_sparc_prefix), %g7
399 mov 7, %g3 399 mov 6, %g3
40090: ldub [%g7], %g2 40090: ldub [%g7], %g2
401 ldub [%g1], %g4 401 ldub [%g1], %g4
402 cmp %g2, %g4 402 cmp %g2, %g4
@@ -408,10 +408,23 @@ sun4v_chip_type:
408 408
409 sethi %hi(prom_cpu_compatible), %g1 409 sethi %hi(prom_cpu_compatible), %g1
410 or %g1, %lo(prom_cpu_compatible), %g1 410 or %g1, %lo(prom_cpu_compatible), %g1
411 ldub [%g1 + 7], %g2 411 ldub [%g1 + 6], %g2
412 cmp %g2, 'T'
413 be,pt %xcc, 70f
414 cmp %g2, 'M'
415 bne,pn %xcc, 4f
416 nop
417
41870: ldub [%g1 + 7], %g2
412 cmp %g2, '3' 419 cmp %g2, '3'
413 be,pt %xcc, 5f 420 be,pt %xcc, 5f
414 mov SUN4V_CHIP_NIAGARA3, %g4 421 mov SUN4V_CHIP_NIAGARA3, %g4
422 cmp %g2, '4'
423 be,pt %xcc, 5f
424 mov SUN4V_CHIP_NIAGARA4, %g4
425 cmp %g2, '5'
426 be,pt %xcc, 5f
427 mov SUN4V_CHIP_NIAGARA5, %g4
415 ba,pt %xcc, 4f 428 ba,pt %xcc, 4f
416 nop 429 nop
417 430
@@ -545,6 +558,12 @@ niagara_tlb_fixup:
545 cmp %g1, SUN4V_CHIP_NIAGARA3 558 cmp %g1, SUN4V_CHIP_NIAGARA3
546 be,pt %xcc, niagara2_patch 559 be,pt %xcc, niagara2_patch
547 nop 560 nop
561 cmp %g1, SUN4V_CHIP_NIAGARA4
562 be,pt %xcc, niagara2_patch
563 nop
564 cmp %g1, SUN4V_CHIP_NIAGARA5
565 be,pt %xcc, niagara2_patch
566 nop
548 567
549 call generic_patch_copyops 568 call generic_patch_copyops
550 nop 569 nop
diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
index c8cc461ff75f..f793742eec2b 100644
--- a/arch/sparc/kernel/process_32.c
+++ b/arch/sparc/kernel/process_32.c
@@ -380,8 +380,7 @@ void flush_thread(void)
380#endif 380#endif
381 } 381 }
382 382
383 /* Now, this task is no longer a kernel thread. */ 383 /* This task is no longer a kernel thread. */
384 current->thread.current_ds = USER_DS;
385 if (current->thread.flags & SPARC_FLAG_KTHREAD) { 384 if (current->thread.flags & SPARC_FLAG_KTHREAD) {
386 current->thread.flags &= ~SPARC_FLAG_KTHREAD; 385 current->thread.flags &= ~SPARC_FLAG_KTHREAD;
387 386
diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
index c158a95ec664..d959cd0a4aa4 100644
--- a/arch/sparc/kernel/process_64.c
+++ b/arch/sparc/kernel/process_64.c
@@ -368,9 +368,6 @@ void flush_thread(void)
368 368
369 /* Clear FPU register state. */ 369 /* Clear FPU register state. */
370 t->fpsaved[0] = 0; 370 t->fpsaved[0] = 0;
371
372 if (get_thread_current_ds() != ASI_AIUS)
373 set_fs(USER_DS);
374} 371}
375 372
376/* It's a bit more tricky when 64-bit tasks are involved... */ 373/* It's a bit more tricky when 64-bit tasks are involved... */
diff --git a/arch/sparc/kernel/setup_32.c b/arch/sparc/kernel/setup_32.c
index d26e1f6c717a..3e3e2914c70b 100644
--- a/arch/sparc/kernel/setup_32.c
+++ b/arch/sparc/kernel/setup_32.c
@@ -137,7 +137,7 @@ static void __init process_switch(char c)
137 prom_halt(); 137 prom_halt();
138 break; 138 break;
139 case 'p': 139 case 'p':
140 /* Just ignore, this behavior is now the default. */ 140 prom_early_console.flags &= ~CON_BOOT;
141 break; 141 break;
142 default: 142 default:
143 printk("Unknown boot switch (-%c)\n", c); 143 printk("Unknown boot switch (-%c)\n", c);
diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c
index 3c5bb784214f..c965595aa7e9 100644
--- a/arch/sparc/kernel/setup_64.c
+++ b/arch/sparc/kernel/setup_64.c
@@ -106,7 +106,7 @@ static void __init process_switch(char c)
106 prom_halt(); 106 prom_halt();
107 break; 107 break;
108 case 'p': 108 case 'p':
109 /* Just ignore, this behavior is now the default. */ 109 prom_early_console.flags &= ~CON_BOOT;
110 break; 110 break;
111 case 'P': 111 case 'P':
112 /* Force UltraSPARC-III P-Cache on. */ 112 /* Force UltraSPARC-III P-Cache on. */
@@ -425,10 +425,14 @@ static void __init init_sparc64_elf_hwcap(void)
425 else if (tlb_type == hypervisor) { 425 else if (tlb_type == hypervisor) {
426 if (sun4v_chip_type == SUN4V_CHIP_NIAGARA1 || 426 if (sun4v_chip_type == SUN4V_CHIP_NIAGARA1 ||
427 sun4v_chip_type == SUN4V_CHIP_NIAGARA2 || 427 sun4v_chip_type == SUN4V_CHIP_NIAGARA2 ||
428 sun4v_chip_type == SUN4V_CHIP_NIAGARA3) 428 sun4v_chip_type == SUN4V_CHIP_NIAGARA3 ||
429 sun4v_chip_type == SUN4V_CHIP_NIAGARA4 ||
430 sun4v_chip_type == SUN4V_CHIP_NIAGARA5)
429 cap |= HWCAP_SPARC_BLKINIT; 431 cap |= HWCAP_SPARC_BLKINIT;
430 if (sun4v_chip_type == SUN4V_CHIP_NIAGARA2 || 432 if (sun4v_chip_type == SUN4V_CHIP_NIAGARA2 ||
431 sun4v_chip_type == SUN4V_CHIP_NIAGARA3) 433 sun4v_chip_type == SUN4V_CHIP_NIAGARA3 ||
434 sun4v_chip_type == SUN4V_CHIP_NIAGARA4 ||
435 sun4v_chip_type == SUN4V_CHIP_NIAGARA5)
432 cap |= HWCAP_SPARC_N2; 436 cap |= HWCAP_SPARC_N2;
433 } 437 }
434 438
@@ -452,11 +456,15 @@ static void __init init_sparc64_elf_hwcap(void)
452 if (sun4v_chip_type == SUN4V_CHIP_NIAGARA1) 456 if (sun4v_chip_type == SUN4V_CHIP_NIAGARA1)
453 cap |= AV_SPARC_ASI_BLK_INIT; 457 cap |= AV_SPARC_ASI_BLK_INIT;
454 if (sun4v_chip_type == SUN4V_CHIP_NIAGARA2 || 458 if (sun4v_chip_type == SUN4V_CHIP_NIAGARA2 ||
455 sun4v_chip_type == SUN4V_CHIP_NIAGARA3) 459 sun4v_chip_type == SUN4V_CHIP_NIAGARA3 ||
460 sun4v_chip_type == SUN4V_CHIP_NIAGARA4 ||
461 sun4v_chip_type == SUN4V_CHIP_NIAGARA5)
456 cap |= (AV_SPARC_VIS | AV_SPARC_VIS2 | 462 cap |= (AV_SPARC_VIS | AV_SPARC_VIS2 |
457 AV_SPARC_ASI_BLK_INIT | 463 AV_SPARC_ASI_BLK_INIT |
458 AV_SPARC_POPC); 464 AV_SPARC_POPC);
459 if (sun4v_chip_type == SUN4V_CHIP_NIAGARA3) 465 if (sun4v_chip_type == SUN4V_CHIP_NIAGARA3 ||
466 sun4v_chip_type == SUN4V_CHIP_NIAGARA4 ||
467 sun4v_chip_type == SUN4V_CHIP_NIAGARA5)
460 cap |= (AV_SPARC_VIS3 | AV_SPARC_HPC | 468 cap |= (AV_SPARC_VIS3 | AV_SPARC_HPC |
461 AV_SPARC_FMAF); 469 AV_SPARC_FMAF);
462 } 470 }
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index 581531dbc8b5..8e073d802139 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -511,6 +511,11 @@ static void __init read_obp_translations(void)
511 for (i = 0; i < prom_trans_ents; i++) 511 for (i = 0; i < prom_trans_ents; i++)
512 prom_trans[i].data &= ~0x0003fe0000000000UL; 512 prom_trans[i].data &= ~0x0003fe0000000000UL;
513 } 513 }
514
515 /* Force execute bit on. */
516 for (i = 0; i < prom_trans_ents; i++)
517 prom_trans[i].data |= (tlb_type == hypervisor ?
518 _PAGE_EXEC_4V : _PAGE_EXEC_4U);
514} 519}
515 520
516static void __init hypervisor_tlb_lock(unsigned long vaddr, 521static void __init hypervisor_tlb_lock(unsigned long vaddr,
diff --git a/arch/x86/kernel/rtc.c b/arch/x86/kernel/rtc.c
index 3f2ad2640d85..ccdbc16b8941 100644
--- a/arch/x86/kernel/rtc.c
+++ b/arch/x86/kernel/rtc.c
@@ -42,8 +42,11 @@ int mach_set_rtc_mmss(unsigned long nowtime)
42{ 42{
43 int real_seconds, real_minutes, cmos_minutes; 43 int real_seconds, real_minutes, cmos_minutes;
44 unsigned char save_control, save_freq_select; 44 unsigned char save_control, save_freq_select;
45 unsigned long flags;
45 int retval = 0; 46 int retval = 0;
46 47
48 spin_lock_irqsave(&rtc_lock, flags);
49
47 /* tell the clock it's being set */ 50 /* tell the clock it's being set */
48 save_control = CMOS_READ(RTC_CONTROL); 51 save_control = CMOS_READ(RTC_CONTROL);
49 CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL); 52 CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL);
@@ -93,12 +96,17 @@ int mach_set_rtc_mmss(unsigned long nowtime)
93 CMOS_WRITE(save_control, RTC_CONTROL); 96 CMOS_WRITE(save_control, RTC_CONTROL);
94 CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT); 97 CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
95 98
99 spin_unlock_irqrestore(&rtc_lock, flags);
100
96 return retval; 101 return retval;
97} 102}
98 103
99unsigned long mach_get_cmos_time(void) 104unsigned long mach_get_cmos_time(void)
100{ 105{
101 unsigned int status, year, mon, day, hour, min, sec, century = 0; 106 unsigned int status, year, mon, day, hour, min, sec, century = 0;
107 unsigned long flags;
108
109 spin_lock_irqsave(&rtc_lock, flags);
102 110
103 /* 111 /*
104 * If UIP is clear, then we have >= 244 microseconds before 112 * If UIP is clear, then we have >= 244 microseconds before
@@ -125,6 +133,8 @@ unsigned long mach_get_cmos_time(void)
125 status = CMOS_READ(RTC_CONTROL); 133 status = CMOS_READ(RTC_CONTROL);
126 WARN_ON_ONCE(RTC_ALWAYS_BCD && (status & RTC_DM_BINARY)); 134 WARN_ON_ONCE(RTC_ALWAYS_BCD && (status & RTC_DM_BINARY));
127 135
136 spin_unlock_irqrestore(&rtc_lock, flags);
137
128 if (RTC_ALWAYS_BCD || !(status & RTC_DM_BINARY)) { 138 if (RTC_ALWAYS_BCD || !(status & RTC_DM_BINARY)) {
129 sec = bcd2bin(sec); 139 sec = bcd2bin(sec);
130 min = bcd2bin(min); 140 min = bcd2bin(min);
@@ -169,24 +179,15 @@ EXPORT_SYMBOL(rtc_cmos_write);
169 179
170int update_persistent_clock(struct timespec now) 180int update_persistent_clock(struct timespec now)
171{ 181{
172 unsigned long flags; 182 return x86_platform.set_wallclock(now.tv_sec);
173 int retval;
174
175 spin_lock_irqsave(&rtc_lock, flags);
176 retval = x86_platform.set_wallclock(now.tv_sec);
177 spin_unlock_irqrestore(&rtc_lock, flags);
178
179 return retval;
180} 183}
181 184
182/* not static: needed by APM */ 185/* not static: needed by APM */
183void read_persistent_clock(struct timespec *ts) 186void read_persistent_clock(struct timespec *ts)
184{ 187{
185 unsigned long retval, flags; 188 unsigned long retval;
186 189
187 spin_lock_irqsave(&rtc_lock, flags);
188 retval = x86_platform.get_wallclock(); 190 retval = x86_platform.get_wallclock();
189 spin_unlock_irqrestore(&rtc_lock, flags);
190 191
191 ts->tv_sec = retval; 192 ts->tv_sec = retval;
192 ts->tv_nsec = 0; 193 ts->tv_nsec = 0;
diff --git a/arch/x86/platform/mrst/vrtc.c b/arch/x86/platform/mrst/vrtc.c
index 73d70d65e76e..6d5dbcdd444a 100644
--- a/arch/x86/platform/mrst/vrtc.c
+++ b/arch/x86/platform/mrst/vrtc.c
@@ -58,8 +58,11 @@ EXPORT_SYMBOL_GPL(vrtc_cmos_write);
58unsigned long vrtc_get_time(void) 58unsigned long vrtc_get_time(void)
59{ 59{
60 u8 sec, min, hour, mday, mon; 60 u8 sec, min, hour, mday, mon;
61 unsigned long flags;
61 u32 year; 62 u32 year;
62 63
64 spin_lock_irqsave(&rtc_lock, flags);
65
63 while ((vrtc_cmos_read(RTC_FREQ_SELECT) & RTC_UIP)) 66 while ((vrtc_cmos_read(RTC_FREQ_SELECT) & RTC_UIP))
64 cpu_relax(); 67 cpu_relax();
65 68
@@ -70,6 +73,8 @@ unsigned long vrtc_get_time(void)
70 mon = vrtc_cmos_read(RTC_MONTH); 73 mon = vrtc_cmos_read(RTC_MONTH);
71 year = vrtc_cmos_read(RTC_YEAR); 74 year = vrtc_cmos_read(RTC_YEAR);
72 75
76 spin_unlock_irqrestore(&rtc_lock, flags);
77
73 /* vRTC YEAR reg contains the offset to 1960 */ 78 /* vRTC YEAR reg contains the offset to 1960 */
74 year += 1960; 79 year += 1960;
75 80
@@ -83,8 +88,10 @@ unsigned long vrtc_get_time(void)
83int vrtc_set_mmss(unsigned long nowtime) 88int vrtc_set_mmss(unsigned long nowtime)
84{ 89{
85 int real_sec, real_min; 90 int real_sec, real_min;
91 unsigned long flags;
86 int vrtc_min; 92 int vrtc_min;
87 93
94 spin_lock_irqsave(&rtc_lock, flags);
88 vrtc_min = vrtc_cmos_read(RTC_MINUTES); 95 vrtc_min = vrtc_cmos_read(RTC_MINUTES);
89 96
90 real_sec = nowtime % 60; 97 real_sec = nowtime % 60;
@@ -95,6 +102,8 @@ int vrtc_set_mmss(unsigned long nowtime)
95 102
96 vrtc_cmos_write(real_sec, RTC_SECONDS); 103 vrtc_cmos_write(real_sec, RTC_SECONDS);
97 vrtc_cmos_write(real_min, RTC_MINUTES); 104 vrtc_cmos_write(real_min, RTC_MINUTES);
105 spin_unlock_irqrestore(&rtc_lock, flags);
106
98 return 0; 107 return 0;
99} 108}
100 109
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index ce045a8cf82c..f07e4252b708 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -67,11 +67,11 @@ module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600);
67MODULE_PARM_DESC(i915_enable_rc6, 67MODULE_PARM_DESC(i915_enable_rc6,
68 "Enable power-saving render C-state 6 (default: true)"); 68 "Enable power-saving render C-state 6 (default: true)");
69 69
70unsigned int i915_enable_fbc __read_mostly = 1; 70unsigned int i915_enable_fbc __read_mostly = -1;
71module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600); 71module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600);
72MODULE_PARM_DESC(i915_enable_fbc, 72MODULE_PARM_DESC(i915_enable_fbc,
73 "Enable frame buffer compression for power savings " 73 "Enable frame buffer compression for power savings "
74 "(default: false)"); 74 "(default: -1 (use per-chip default))");
75 75
76unsigned int i915_lvds_downclock __read_mostly = 0; 76unsigned int i915_lvds_downclock __read_mostly = 0;
77module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400); 77module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 56a8554d9039..04411ad2e779 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1799,6 +1799,7 @@ static void intel_update_fbc(struct drm_device *dev)
1799 struct drm_framebuffer *fb; 1799 struct drm_framebuffer *fb;
1800 struct intel_framebuffer *intel_fb; 1800 struct intel_framebuffer *intel_fb;
1801 struct drm_i915_gem_object *obj; 1801 struct drm_i915_gem_object *obj;
1802 int enable_fbc;
1802 1803
1803 DRM_DEBUG_KMS("\n"); 1804 DRM_DEBUG_KMS("\n");
1804 1805
@@ -1839,8 +1840,15 @@ static void intel_update_fbc(struct drm_device *dev)
1839 intel_fb = to_intel_framebuffer(fb); 1840 intel_fb = to_intel_framebuffer(fb);
1840 obj = intel_fb->obj; 1841 obj = intel_fb->obj;
1841 1842
1842 if (!i915_enable_fbc) { 1843 enable_fbc = i915_enable_fbc;
1843 DRM_DEBUG_KMS("fbc disabled per module param (default off)\n"); 1844 if (enable_fbc < 0) {
1845 DRM_DEBUG_KMS("fbc set to per-chip default\n");
1846 enable_fbc = 1;
1847 if (INTEL_INFO(dev)->gen <= 5)
1848 enable_fbc = 0;
1849 }
1850 if (!enable_fbc) {
1851 DRM_DEBUG_KMS("fbc disabled per module param\n");
1844 dev_priv->no_fbc_reason = FBC_MODULE_PARAM; 1852 dev_priv->no_fbc_reason = FBC_MODULE_PARAM;
1845 goto out_disable; 1853 goto out_disable;
1846 } 1854 }
@@ -4687,13 +4695,13 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
4687 bpc = 6; /* min is 18bpp */ 4695 bpc = 6; /* min is 18bpp */
4688 break; 4696 break;
4689 case 24: 4697 case 24:
4690 bpc = min((unsigned int)8, display_bpc); 4698 bpc = 8;
4691 break; 4699 break;
4692 case 30: 4700 case 30:
4693 bpc = min((unsigned int)10, display_bpc); 4701 bpc = 10;
4694 break; 4702 break;
4695 case 48: 4703 case 48:
4696 bpc = min((unsigned int)12, display_bpc); 4704 bpc = 12;
4697 break; 4705 break;
4698 default: 4706 default:
4699 DRM_DEBUG("unsupported depth, assuming 24 bits\n"); 4707 DRM_DEBUG("unsupported depth, assuming 24 bits\n");
@@ -4701,10 +4709,12 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
4701 break; 4709 break;
4702 } 4710 }
4703 4711
4712 display_bpc = min(display_bpc, bpc);
4713
4704 DRM_DEBUG_DRIVER("setting pipe bpc to %d (max display bpc %d)\n", 4714 DRM_DEBUG_DRIVER("setting pipe bpc to %d (max display bpc %d)\n",
4705 bpc, display_bpc); 4715 bpc, display_bpc);
4706 4716
4707 *pipe_bpp = bpc * 3; 4717 *pipe_bpp = display_bpc * 3;
4708 4718
4709 return display_bpc != bpc; 4719 return display_bpc != bpc;
4710} 4720}
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 0b2ee9d39980..fe1099d8817e 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -337,9 +337,6 @@ extern void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
337 struct drm_connector *connector, 337 struct drm_connector *connector,
338 struct intel_load_detect_pipe *old); 338 struct intel_load_detect_pipe *old);
339 339
340extern struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB);
341extern int intel_sdvo_supports_hotplug(struct drm_connector *connector);
342extern void intel_sdvo_set_hotplug(struct drm_connector *connector, int enable);
343extern void intelfb_restore(void); 340extern void intelfb_restore(void);
344extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, 341extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
345 u16 blue, int regno); 342 u16 blue, int regno);
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 30fe554d8936..6348c499616f 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -92,6 +92,11 @@ struct intel_sdvo {
92 */ 92 */
93 uint16_t attached_output; 93 uint16_t attached_output;
94 94
95 /*
96 * Hotplug activation bits for this device
97 */
98 uint8_t hotplug_active[2];
99
95 /** 100 /**
96 * This is used to select the color range of RBG outputs in HDMI mode. 101 * This is used to select the color range of RBG outputs in HDMI mode.
97 * It is only valid when using TMDS encoding and 8 bit per color mode. 102 * It is only valid when using TMDS encoding and 8 bit per color mode.
@@ -1208,74 +1213,20 @@ static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct in
1208 return true; 1213 return true;
1209} 1214}
1210 1215
1211/* No use! */ 1216static int intel_sdvo_supports_hotplug(struct intel_sdvo *intel_sdvo)
1212#if 0
1213struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB)
1214{
1215 struct drm_connector *connector = NULL;
1216 struct intel_sdvo *iout = NULL;
1217 struct intel_sdvo *sdvo;
1218
1219 /* find the sdvo connector */
1220 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1221 iout = to_intel_sdvo(connector);
1222
1223 if (iout->type != INTEL_OUTPUT_SDVO)
1224 continue;
1225
1226 sdvo = iout->dev_priv;
1227
1228 if (sdvo->sdvo_reg == SDVOB && sdvoB)
1229 return connector;
1230
1231 if (sdvo->sdvo_reg == SDVOC && !sdvoB)
1232 return connector;
1233
1234 }
1235
1236 return NULL;
1237}
1238
1239int intel_sdvo_supports_hotplug(struct drm_connector *connector)
1240{ 1217{
1241 u8 response[2]; 1218 u8 response[2];
1242 u8 status;
1243 struct intel_sdvo *intel_sdvo;
1244 DRM_DEBUG_KMS("\n");
1245
1246 if (!connector)
1247 return 0;
1248
1249 intel_sdvo = to_intel_sdvo(connector);
1250 1219
1251 return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT, 1220 return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT,
1252 &response, 2) && response[0]; 1221 &response, 2) && response[0];
1253} 1222}
1254 1223
1255void intel_sdvo_set_hotplug(struct drm_connector *connector, int on) 1224static void intel_sdvo_enable_hotplug(struct intel_encoder *encoder)
1256{ 1225{
1257 u8 response[2]; 1226 struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
1258 u8 status;
1259 struct intel_sdvo *intel_sdvo = to_intel_sdvo(connector);
1260
1261 intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0);
1262 intel_sdvo_read_response(intel_sdvo, &response, 2);
1263
1264 if (on) {
1265 intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0);
1266 status = intel_sdvo_read_response(intel_sdvo, &response, 2);
1267
1268 intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2);
1269 } else {
1270 response[0] = 0;
1271 response[1] = 0;
1272 intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2);
1273 }
1274 1227
1275 intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0); 1228 intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &intel_sdvo->hotplug_active, 2);
1276 intel_sdvo_read_response(intel_sdvo, &response, 2);
1277} 1229}
1278#endif
1279 1230
1280static bool 1231static bool
1281intel_sdvo_multifunc_encoder(struct intel_sdvo *intel_sdvo) 1232intel_sdvo_multifunc_encoder(struct intel_sdvo *intel_sdvo)
@@ -2045,6 +1996,7 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
2045{ 1996{
2046 struct drm_encoder *encoder = &intel_sdvo->base.base; 1997 struct drm_encoder *encoder = &intel_sdvo->base.base;
2047 struct drm_connector *connector; 1998 struct drm_connector *connector;
1999 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
2048 struct intel_connector *intel_connector; 2000 struct intel_connector *intel_connector;
2049 struct intel_sdvo_connector *intel_sdvo_connector; 2001 struct intel_sdvo_connector *intel_sdvo_connector;
2050 2002
@@ -2062,7 +2014,17 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
2062 2014
2063 intel_connector = &intel_sdvo_connector->base; 2015 intel_connector = &intel_sdvo_connector->base;
2064 connector = &intel_connector->base; 2016 connector = &intel_connector->base;
2065 connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; 2017 if (intel_sdvo_supports_hotplug(intel_sdvo) & (1 << device)) {
2018 connector->polled = DRM_CONNECTOR_POLL_HPD;
2019 intel_sdvo->hotplug_active[0] |= 1 << device;
2020 /* Some SDVO devices have one-shot hotplug interrupts.
2021 * Ensure that they get re-enabled when an interrupt happens.
2022 */
2023 intel_encoder->hot_plug = intel_sdvo_enable_hotplug;
2024 intel_sdvo_enable_hotplug(intel_encoder);
2025 }
2026 else
2027 connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
2066 encoder->encoder_type = DRM_MODE_ENCODER_TMDS; 2028 encoder->encoder_type = DRM_MODE_ENCODER_TMDS;
2067 connector->connector_type = DRM_MODE_CONNECTOR_DVID; 2029 connector->connector_type = DRM_MODE_CONNECTOR_DVID;
2068 2030
@@ -2569,6 +2531,14 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
2569 if (!intel_sdvo_get_capabilities(intel_sdvo, &intel_sdvo->caps)) 2531 if (!intel_sdvo_get_capabilities(intel_sdvo, &intel_sdvo->caps))
2570 goto err; 2532 goto err;
2571 2533
2534 /* Set up hotplug command - note paranoia about contents of reply.
2535 * We assume that the hardware is in a sane state, and only touch
2536 * the bits we think we understand.
2537 */
2538 intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG,
2539 &intel_sdvo->hotplug_active, 2);
2540 intel_sdvo->hotplug_active[0] &= ~0x3;
2541
2572 if (intel_sdvo_output_setup(intel_sdvo, 2542 if (intel_sdvo_output_setup(intel_sdvo,
2573 intel_sdvo->caps.output_flags) != true) { 2543 intel_sdvo->caps.output_flags) != true) {
2574 DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n", 2544 DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n",
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 7ad43c6b1db7..4da23889fea6 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -115,6 +115,7 @@ static int radeon_dp_aux_native_write(struct radeon_connector *radeon_connector,
115 u8 msg[20]; 115 u8 msg[20];
116 int msg_bytes = send_bytes + 4; 116 int msg_bytes = send_bytes + 4;
117 u8 ack; 117 u8 ack;
118 unsigned retry;
118 119
119 if (send_bytes > 16) 120 if (send_bytes > 16)
120 return -1; 121 return -1;
@@ -125,20 +126,20 @@ static int radeon_dp_aux_native_write(struct radeon_connector *radeon_connector,
125 msg[3] = (msg_bytes << 4) | (send_bytes - 1); 126 msg[3] = (msg_bytes << 4) | (send_bytes - 1);
126 memcpy(&msg[4], send, send_bytes); 127 memcpy(&msg[4], send, send_bytes);
127 128
128 while (1) { 129 for (retry = 0; retry < 4; retry++) {
129 ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus, 130 ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus,
130 msg, msg_bytes, NULL, 0, delay, &ack); 131 msg, msg_bytes, NULL, 0, delay, &ack);
131 if (ret < 0) 132 if (ret < 0)
132 return ret; 133 return ret;
133 if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) 134 if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
134 break; 135 return send_bytes;
135 else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER) 136 else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
136 udelay(400); 137 udelay(400);
137 else 138 else
138 return -EIO; 139 return -EIO;
139 } 140 }
140 141
141 return send_bytes; 142 return -EIO;
142} 143}
143 144
144static int radeon_dp_aux_native_read(struct radeon_connector *radeon_connector, 145static int radeon_dp_aux_native_read(struct radeon_connector *radeon_connector,
@@ -149,26 +150,29 @@ static int radeon_dp_aux_native_read(struct radeon_connector *radeon_connector,
149 int msg_bytes = 4; 150 int msg_bytes = 4;
150 u8 ack; 151 u8 ack;
151 int ret; 152 int ret;
153 unsigned retry;
152 154
153 msg[0] = address; 155 msg[0] = address;
154 msg[1] = address >> 8; 156 msg[1] = address >> 8;
155 msg[2] = AUX_NATIVE_READ << 4; 157 msg[2] = AUX_NATIVE_READ << 4;
156 msg[3] = (msg_bytes << 4) | (recv_bytes - 1); 158 msg[3] = (msg_bytes << 4) | (recv_bytes - 1);
157 159
158 while (1) { 160 for (retry = 0; retry < 4; retry++) {
159 ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus, 161 ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus,
160 msg, msg_bytes, recv, recv_bytes, delay, &ack); 162 msg, msg_bytes, recv, recv_bytes, delay, &ack);
161 if (ret == 0)
162 return -EPROTO;
163 if (ret < 0) 163 if (ret < 0)
164 return ret; 164 return ret;
165 if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) 165 if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
166 return ret; 166 return ret;
167 else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER) 167 else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
168 udelay(400); 168 udelay(400);
169 else if (ret == 0)
170 return -EPROTO;
169 else 171 else
170 return -EIO; 172 return -EIO;
171 } 173 }
174
175 return -EIO;
172} 176}
173 177
174static void radeon_write_dpcd_reg(struct radeon_connector *radeon_connector, 178static void radeon_write_dpcd_reg(struct radeon_connector *radeon_connector,
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index e8a746712b5b..c4ffa14fb2f4 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -1590,48 +1590,6 @@ static u32 evergreen_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
1590 return backend_map; 1590 return backend_map;
1591} 1591}
1592 1592
1593static void evergreen_program_channel_remap(struct radeon_device *rdev)
1594{
1595 u32 tcp_chan_steer_lo, tcp_chan_steer_hi, mc_shared_chremap, tmp;
1596
1597 tmp = RREG32(MC_SHARED_CHMAP);
1598 switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
1599 case 0:
1600 case 1:
1601 case 2:
1602 case 3:
1603 default:
1604 /* default mapping */
1605 mc_shared_chremap = 0x00fac688;
1606 break;
1607 }
1608
1609 switch (rdev->family) {
1610 case CHIP_HEMLOCK:
1611 case CHIP_CYPRESS:
1612 case CHIP_BARTS:
1613 tcp_chan_steer_lo = 0x54763210;
1614 tcp_chan_steer_hi = 0x0000ba98;
1615 break;
1616 case CHIP_JUNIPER:
1617 case CHIP_REDWOOD:
1618 case CHIP_CEDAR:
1619 case CHIP_PALM:
1620 case CHIP_SUMO:
1621 case CHIP_SUMO2:
1622 case CHIP_TURKS:
1623 case CHIP_CAICOS:
1624 default:
1625 tcp_chan_steer_lo = 0x76543210;
1626 tcp_chan_steer_hi = 0x0000ba98;
1627 break;
1628 }
1629
1630 WREG32(TCP_CHAN_STEER_LO, tcp_chan_steer_lo);
1631 WREG32(TCP_CHAN_STEER_HI, tcp_chan_steer_hi);
1632 WREG32(MC_SHARED_CHREMAP, mc_shared_chremap);
1633}
1634
1635static void evergreen_gpu_init(struct radeon_device *rdev) 1593static void evergreen_gpu_init(struct radeon_device *rdev)
1636{ 1594{
1637 u32 cc_rb_backend_disable = 0; 1595 u32 cc_rb_backend_disable = 0;
@@ -2078,8 +2036,6 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
2078 WREG32(DMIF_ADDR_CONFIG, gb_addr_config); 2036 WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
2079 WREG32(HDP_ADDR_CONFIG, gb_addr_config); 2037 WREG32(HDP_ADDR_CONFIG, gb_addr_config);
2080 2038
2081 evergreen_program_channel_remap(rdev);
2082
2083 num_shader_engines = ((RREG32(GB_ADDR_CONFIG) & NUM_SHADER_ENGINES(3)) >> 12) + 1; 2039 num_shader_engines = ((RREG32(GB_ADDR_CONFIG) & NUM_SHADER_ENGINES(3)) >> 12) + 1;
2084 grbm_gfx_index = INSTANCE_BROADCAST_WRITES; 2040 grbm_gfx_index = INSTANCE_BROADCAST_WRITES;
2085 2041
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 99fbd793c08c..8c79ca97753d 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -569,36 +569,6 @@ static u32 cayman_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
569 return backend_map; 569 return backend_map;
570} 570}
571 571
572static void cayman_program_channel_remap(struct radeon_device *rdev)
573{
574 u32 tcp_chan_steer_lo, tcp_chan_steer_hi, mc_shared_chremap, tmp;
575
576 tmp = RREG32(MC_SHARED_CHMAP);
577 switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
578 case 0:
579 case 1:
580 case 2:
581 case 3:
582 default:
583 /* default mapping */
584 mc_shared_chremap = 0x00fac688;
585 break;
586 }
587
588 switch (rdev->family) {
589 case CHIP_CAYMAN:
590 default:
591 //tcp_chan_steer_lo = 0x54763210
592 tcp_chan_steer_lo = 0x76543210;
593 tcp_chan_steer_hi = 0x0000ba98;
594 break;
595 }
596
597 WREG32(TCP_CHAN_STEER_LO, tcp_chan_steer_lo);
598 WREG32(TCP_CHAN_STEER_HI, tcp_chan_steer_hi);
599 WREG32(MC_SHARED_CHREMAP, mc_shared_chremap);
600}
601
602static u32 cayman_get_disable_mask_per_asic(struct radeon_device *rdev, 572static u32 cayman_get_disable_mask_per_asic(struct radeon_device *rdev,
603 u32 disable_mask_per_se, 573 u32 disable_mask_per_se,
604 u32 max_disable_mask_per_se, 574 u32 max_disable_mask_per_se,
@@ -842,8 +812,6 @@ static void cayman_gpu_init(struct radeon_device *rdev)
842 WREG32(DMIF_ADDR_CONFIG, gb_addr_config); 812 WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
843 WREG32(HDP_ADDR_CONFIG, gb_addr_config); 813 WREG32(HDP_ADDR_CONFIG, gb_addr_config);
844 814
845 cayman_program_channel_remap(rdev);
846
847 /* primary versions */ 815 /* primary versions */
848 WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable); 816 WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
849 WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable); 817 WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index c4b8741dbf58..bce63fd329d4 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -68,11 +68,11 @@ void radeon_connector_hotplug(struct drm_connector *connector)
68 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) { 68 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
69 int saved_dpms = connector->dpms; 69 int saved_dpms = connector->dpms;
70 70
71 if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd) && 71 /* Only turn off the display it it's physically disconnected */
72 radeon_dp_needs_link_train(radeon_connector)) 72 if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd))
73 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
74 else
75 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); 73 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
74 else if (radeon_dp_needs_link_train(radeon_connector))
75 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
76 connector->dpms = saved_dpms; 76 connector->dpms = saved_dpms;
77 } 77 }
78} 78}
diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c
index 3189a7efb2e9..fde25c0d65a0 100644
--- a/drivers/gpu/drm/radeon/radeon_cursor.c
+++ b/drivers/gpu/drm/radeon/radeon_cursor.c
@@ -208,23 +208,25 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc,
208 int xorigin = 0, yorigin = 0; 208 int xorigin = 0, yorigin = 0;
209 int w = radeon_crtc->cursor_width; 209 int w = radeon_crtc->cursor_width;
210 210
211 if (x < 0)
212 xorigin = -x + 1;
213 if (y < 0)
214 yorigin = -y + 1;
215 if (xorigin >= CURSOR_WIDTH)
216 xorigin = CURSOR_WIDTH - 1;
217 if (yorigin >= CURSOR_HEIGHT)
218 yorigin = CURSOR_HEIGHT - 1;
219
220 if (ASIC_IS_AVIVO(rdev)) { 211 if (ASIC_IS_AVIVO(rdev)) {
221 int i = 0;
222 struct drm_crtc *crtc_p;
223
224 /* avivo cursor are offset into the total surface */ 212 /* avivo cursor are offset into the total surface */
225 x += crtc->x; 213 x += crtc->x;
226 y += crtc->y; 214 y += crtc->y;
227 DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y); 215 }
216 DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
217
218 if (x < 0) {
219 xorigin = min(-x, CURSOR_WIDTH - 1);
220 x = 0;
221 }
222 if (y < 0) {
223 yorigin = min(-y, CURSOR_HEIGHT - 1);
224 y = 0;
225 }
226
227 if (ASIC_IS_AVIVO(rdev)) {
228 int i = 0;
229 struct drm_crtc *crtc_p;
228 230
229 /* avivo cursor image can't end on 128 pixel boundary or 231 /* avivo cursor image can't end on 128 pixel boundary or
230 * go past the end of the frame if both crtcs are enabled 232 * go past the end of the frame if both crtcs are enabled
@@ -253,16 +255,12 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc,
253 255
254 radeon_lock_cursor(crtc, true); 256 radeon_lock_cursor(crtc, true);
255 if (ASIC_IS_DCE4(rdev)) { 257 if (ASIC_IS_DCE4(rdev)) {
256 WREG32(EVERGREEN_CUR_POSITION + radeon_crtc->crtc_offset, 258 WREG32(EVERGREEN_CUR_POSITION + radeon_crtc->crtc_offset, (x << 16) | y);
257 ((xorigin ? 0 : x) << 16) |
258 (yorigin ? 0 : y));
259 WREG32(EVERGREEN_CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin); 259 WREG32(EVERGREEN_CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin);
260 WREG32(EVERGREEN_CUR_SIZE + radeon_crtc->crtc_offset, 260 WREG32(EVERGREEN_CUR_SIZE + radeon_crtc->crtc_offset,
261 ((w - 1) << 16) | (radeon_crtc->cursor_height - 1)); 261 ((w - 1) << 16) | (radeon_crtc->cursor_height - 1));
262 } else if (ASIC_IS_AVIVO(rdev)) { 262 } else if (ASIC_IS_AVIVO(rdev)) {
263 WREG32(AVIVO_D1CUR_POSITION + radeon_crtc->crtc_offset, 263 WREG32(AVIVO_D1CUR_POSITION + radeon_crtc->crtc_offset, (x << 16) | y);
264 ((xorigin ? 0 : x) << 16) |
265 (yorigin ? 0 : y));
266 WREG32(AVIVO_D1CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin); 264 WREG32(AVIVO_D1CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin);
267 WREG32(AVIVO_D1CUR_SIZE + radeon_crtc->crtc_offset, 265 WREG32(AVIVO_D1CUR_SIZE + radeon_crtc->crtc_offset,
268 ((w - 1) << 16) | (radeon_crtc->cursor_height - 1)); 266 ((w - 1) << 16) | (radeon_crtc->cursor_height - 1));
@@ -276,8 +274,8 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc,
276 | yorigin)); 274 | yorigin));
277 WREG32(RADEON_CUR_HORZ_VERT_POSN + radeon_crtc->crtc_offset, 275 WREG32(RADEON_CUR_HORZ_VERT_POSN + radeon_crtc->crtc_offset,
278 (RADEON_CUR_LOCK 276 (RADEON_CUR_LOCK
279 | ((xorigin ? 0 : x) << 16) 277 | (x << 16)
280 | (yorigin ? 0 : y))); 278 | y));
281 /* offset is from DISP(2)_BASE_ADDRESS */ 279 /* offset is from DISP(2)_BASE_ADDRESS */
282 WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, (radeon_crtc->legacy_cursor_offset + 280 WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, (radeon_crtc->legacy_cursor_offset +
283 (yorigin * 256))); 281 (yorigin * 256)));
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 4720d000d440..b13c2eedc321 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -536,55 +536,6 @@ static u32 r700_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
536 return backend_map; 536 return backend_map;
537} 537}
538 538
539static void rv770_program_channel_remap(struct radeon_device *rdev)
540{
541 u32 tcp_chan_steer, mc_shared_chremap, tmp;
542 bool force_no_swizzle;
543
544 switch (rdev->family) {
545 case CHIP_RV770:
546 case CHIP_RV730:
547 force_no_swizzle = false;
548 break;
549 case CHIP_RV710:
550 case CHIP_RV740:
551 default:
552 force_no_swizzle = true;
553 break;
554 }
555
556 tmp = RREG32(MC_SHARED_CHMAP);
557 switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
558 case 0:
559 case 1:
560 default:
561 /* default mapping */
562 mc_shared_chremap = 0x00fac688;
563 break;
564 case 2:
565 case 3:
566 if (force_no_swizzle)
567 mc_shared_chremap = 0x00fac688;
568 else
569 mc_shared_chremap = 0x00bbc298;
570 break;
571 }
572
573 if (rdev->family == CHIP_RV740)
574 tcp_chan_steer = 0x00ef2a60;
575 else
576 tcp_chan_steer = 0x00fac688;
577
578 /* RV770 CE has special chremap setup */
579 if (rdev->pdev->device == 0x944e) {
580 tcp_chan_steer = 0x00b08b08;
581 mc_shared_chremap = 0x00b08b08;
582 }
583
584 WREG32(TCP_CHAN_STEER, tcp_chan_steer);
585 WREG32(MC_SHARED_CHREMAP, mc_shared_chremap);
586}
587
588static void rv770_gpu_init(struct radeon_device *rdev) 539static void rv770_gpu_init(struct radeon_device *rdev)
589{ 540{
590 int i, j, num_qd_pipes; 541 int i, j, num_qd_pipes;
@@ -785,8 +736,6 @@ static void rv770_gpu_init(struct radeon_device *rdev)
785 WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff)); 736 WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff));
786 WREG32(HDP_TILING_CONFIG, (gb_tiling_config & 0xffff)); 737 WREG32(HDP_TILING_CONFIG, (gb_tiling_config & 0xffff));
787 738
788 rv770_program_channel_remap(rdev);
789
790 WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable); 739 WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
791 WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); 740 WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
792 WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); 741 WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index 44b23917d4cc..932383786642 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -377,9 +377,9 @@ exit_free:
377} 377}
378 378
379 379
380static int __devinit chk_ucode_version(struct platform_device *pdev) 380static int __cpuinit chk_ucode_version(unsigned int cpu)
381{ 381{
382 struct cpuinfo_x86 *c = &cpu_data(pdev->id); 382 struct cpuinfo_x86 *c = &cpu_data(cpu);
383 int err; 383 int err;
384 u32 edx; 384 u32 edx;
385 385
@@ -390,17 +390,15 @@ static int __devinit chk_ucode_version(struct platform_device *pdev)
390 */ 390 */
391 if (c->x86_model == 0xe && c->x86_mask < 0xc) { 391 if (c->x86_model == 0xe && c->x86_mask < 0xc) {
392 /* check for microcode update */ 392 /* check for microcode update */
393 err = smp_call_function_single(pdev->id, get_ucode_rev_on_cpu, 393 err = smp_call_function_single(cpu, get_ucode_rev_on_cpu,
394 &edx, 1); 394 &edx, 1);
395 if (err) { 395 if (err) {
396 dev_err(&pdev->dev, 396 pr_err("Cannot determine microcode revision of "
397 "Cannot determine microcode revision of " 397 "CPU#%u (%d)!\n", cpu, err);
398 "CPU#%u (%d)!\n", pdev->id, err);
399 return -ENODEV; 398 return -ENODEV;
400 } else if (edx < 0x39) { 399 } else if (edx < 0x39) {
401 dev_err(&pdev->dev, 400 pr_err("Errata AE18 not fixed, update BIOS or "
402 "Errata AE18 not fixed, update BIOS or " 401 "microcode of the CPU!\n");
403 "microcode of the CPU!\n");
404 return -ENODEV; 402 return -ENODEV;
405 } 403 }
406 } 404 }
@@ -508,6 +506,7 @@ static int create_core_data(struct platform_device *pdev,
508 506
509 return 0; 507 return 0;
510exit_free: 508exit_free:
509 pdata->core_data[attr_no] = NULL;
511 kfree(tdata); 510 kfree(tdata);
512 return err; 511 return err;
513} 512}
@@ -544,11 +543,6 @@ static int __devinit coretemp_probe(struct platform_device *pdev)
544 struct platform_data *pdata; 543 struct platform_data *pdata;
545 int err; 544 int err;
546 545
547 /* Check the microcode version of the CPU */
548 err = chk_ucode_version(pdev);
549 if (err)
550 return err;
551
552 /* Initialize the per-package data structures */ 546 /* Initialize the per-package data structures */
553 pdata = kzalloc(sizeof(struct platform_data), GFP_KERNEL); 547 pdata = kzalloc(sizeof(struct platform_data), GFP_KERNEL);
554 if (!pdata) 548 if (!pdata)
@@ -630,7 +624,7 @@ static int __cpuinit coretemp_device_add(unsigned int cpu)
630 } 624 }
631 625
632 pdev_entry->pdev = pdev; 626 pdev_entry->pdev = pdev;
633 pdev_entry->phys_proc_id = TO_PHYS_ID(cpu); 627 pdev_entry->phys_proc_id = pdev->id;
634 628
635 list_add_tail(&pdev_entry->list, &pdev_list); 629 list_add_tail(&pdev_entry->list, &pdev_list);
636 mutex_unlock(&pdev_list_mutex); 630 mutex_unlock(&pdev_list_mutex);
@@ -691,6 +685,10 @@ static void __cpuinit get_core_online(unsigned int cpu)
691 return; 685 return;
692 686
693 if (!pdev) { 687 if (!pdev) {
688 /* Check the microcode version of the CPU */
689 if (chk_ucode_version(cpu))
690 return;
691
694 /* 692 /*
695 * Alright, we have DTS support. 693 * Alright, we have DTS support.
696 * We are bringing the _first_ core in this pkg 694 * We are bringing the _first_ core in this pkg
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
index 274798068a54..16f69be820c7 100644
--- a/drivers/ide/ide-disk.c
+++ b/drivers/ide/ide-disk.c
@@ -435,7 +435,12 @@ static int idedisk_prep_fn(struct request_queue *q, struct request *rq)
435 if (!(rq->cmd_flags & REQ_FLUSH)) 435 if (!(rq->cmd_flags & REQ_FLUSH))
436 return BLKPREP_OK; 436 return BLKPREP_OK;
437 437
438 cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC); 438 if (rq->special) {
439 cmd = rq->special;
440 memset(cmd, 0, sizeof(*cmd));
441 } else {
442 cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
443 }
439 444
440 /* FIXME: map struct ide_taskfile on rq->cmd[] */ 445 /* FIXME: map struct ide_taskfile on rq->cmd[] */
441 BUG_ON(cmd == NULL); 446 BUG_ON(cmd == NULL);
diff --git a/drivers/media/video/omap/omap_vout.c b/drivers/media/video/omap/omap_vout.c
index b5ef36222440..b3a5ecdb33ac 100644
--- a/drivers/media/video/omap/omap_vout.c
+++ b/drivers/media/video/omap/omap_vout.c
@@ -2194,19 +2194,6 @@ static int __init omap_vout_probe(struct platform_device *pdev)
2194 "'%s' Display already enabled\n", 2194 "'%s' Display already enabled\n",
2195 def_display->name); 2195 def_display->name);
2196 } 2196 }
2197 /* set the update mode */
2198 if (def_display->caps &
2199 OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE) {
2200 if (dssdrv->enable_te)
2201 dssdrv->enable_te(def_display, 0);
2202 if (dssdrv->set_update_mode)
2203 dssdrv->set_update_mode(def_display,
2204 OMAP_DSS_UPDATE_MANUAL);
2205 } else {
2206 if (dssdrv->set_update_mode)
2207 dssdrv->set_update_mode(def_display,
2208 OMAP_DSS_UPDATE_AUTO);
2209 }
2210 } 2197 }
2211 } 2198 }
2212 2199
diff --git a/drivers/media/video/omap3isp/ispccdc.c b/drivers/media/video/omap3isp/ispccdc.c
index 9d3459de04b2..80796eb0c53e 100644
--- a/drivers/media/video/omap3isp/ispccdc.c
+++ b/drivers/media/video/omap3isp/ispccdc.c
@@ -31,6 +31,7 @@
31#include <linux/dma-mapping.h> 31#include <linux/dma-mapping.h>
32#include <linux/mm.h> 32#include <linux/mm.h>
33#include <linux/sched.h> 33#include <linux/sched.h>
34#include <linux/slab.h>
34#include <media/v4l2-event.h> 35#include <media/v4l2-event.h>
35 36
36#include "isp.h" 37#include "isp.h"
diff --git a/drivers/media/video/uvc/uvc_driver.c b/drivers/media/video/uvc/uvc_driver.c
index d29f9c2d0854..e4100b1f68df 100644
--- a/drivers/media/video/uvc/uvc_driver.c
+++ b/drivers/media/video/uvc/uvc_driver.c
@@ -1961,7 +1961,7 @@ static int __uvc_resume(struct usb_interface *intf, int reset)
1961 1961
1962 list_for_each_entry(stream, &dev->streams, list) { 1962 list_for_each_entry(stream, &dev->streams, list) {
1963 if (stream->intf == intf) 1963 if (stream->intf == intf)
1964 return uvc_video_resume(stream); 1964 return uvc_video_resume(stream, reset);
1965 } 1965 }
1966 1966
1967 uvc_trace(UVC_TRACE_SUSPEND, "Resume: video streaming USB interface " 1967 uvc_trace(UVC_TRACE_SUSPEND, "Resume: video streaming USB interface "
diff --git a/drivers/media/video/uvc/uvc_entity.c b/drivers/media/video/uvc/uvc_entity.c
index 48fea373c25a..29e239911d0e 100644
--- a/drivers/media/video/uvc/uvc_entity.c
+++ b/drivers/media/video/uvc/uvc_entity.c
@@ -49,7 +49,7 @@ static int uvc_mc_register_entity(struct uvc_video_chain *chain,
49 if (remote == NULL) 49 if (remote == NULL)
50 return -EINVAL; 50 return -EINVAL;
51 51
52 source = (UVC_ENTITY_TYPE(remote) != UVC_TT_STREAMING) 52 source = (UVC_ENTITY_TYPE(remote) == UVC_TT_STREAMING)
53 ? (remote->vdev ? &remote->vdev->entity : NULL) 53 ? (remote->vdev ? &remote->vdev->entity : NULL)
54 : &remote->subdev.entity; 54 : &remote->subdev.entity;
55 if (source == NULL) 55 if (source == NULL)
diff --git a/drivers/media/video/uvc/uvc_video.c b/drivers/media/video/uvc/uvc_video.c
index 8244167c8915..ffd1158628b6 100644
--- a/drivers/media/video/uvc/uvc_video.c
+++ b/drivers/media/video/uvc/uvc_video.c
@@ -1104,10 +1104,18 @@ int uvc_video_suspend(struct uvc_streaming *stream)
1104 * buffers, making sure userspace applications are notified of the problem 1104 * buffers, making sure userspace applications are notified of the problem
1105 * instead of waiting forever. 1105 * instead of waiting forever.
1106 */ 1106 */
1107int uvc_video_resume(struct uvc_streaming *stream) 1107int uvc_video_resume(struct uvc_streaming *stream, int reset)
1108{ 1108{
1109 int ret; 1109 int ret;
1110 1110
1111 /* If the bus has been reset on resume, set the alternate setting to 0.
1112 * This should be the default value, but some devices crash or otherwise
1113 * misbehave if they don't receive a SET_INTERFACE request before any
1114 * other video control request.
1115 */
1116 if (reset)
1117 usb_set_interface(stream->dev->udev, stream->intfnum, 0);
1118
1111 stream->frozen = 0; 1119 stream->frozen = 0;
1112 1120
1113 ret = uvc_commit_video(stream, &stream->ctrl); 1121 ret = uvc_commit_video(stream, &stream->ctrl);
diff --git a/drivers/media/video/uvc/uvcvideo.h b/drivers/media/video/uvc/uvcvideo.h
index df32a43ca86a..cbdd49bf8b67 100644
--- a/drivers/media/video/uvc/uvcvideo.h
+++ b/drivers/media/video/uvc/uvcvideo.h
@@ -638,7 +638,7 @@ extern void uvc_mc_cleanup_entity(struct uvc_entity *entity);
638/* Video */ 638/* Video */
639extern int uvc_video_init(struct uvc_streaming *stream); 639extern int uvc_video_init(struct uvc_streaming *stream);
640extern int uvc_video_suspend(struct uvc_streaming *stream); 640extern int uvc_video_suspend(struct uvc_streaming *stream);
641extern int uvc_video_resume(struct uvc_streaming *stream); 641extern int uvc_video_resume(struct uvc_streaming *stream, int reset);
642extern int uvc_video_enable(struct uvc_streaming *stream, int enable); 642extern int uvc_video_enable(struct uvc_streaming *stream, int enable);
643extern int uvc_probe_video(struct uvc_streaming *stream, 643extern int uvc_probe_video(struct uvc_streaming *stream,
644 struct uvc_streaming_control *probe); 644 struct uvc_streaming_control *probe);
diff --git a/drivers/media/video/v4l2-dev.c b/drivers/media/video/v4l2-dev.c
index 06f14008b346..d72156517726 100644
--- a/drivers/media/video/v4l2-dev.c
+++ b/drivers/media/video/v4l2-dev.c
@@ -173,6 +173,17 @@ static void v4l2_device_release(struct device *cd)
173 media_device_unregister_entity(&vdev->entity); 173 media_device_unregister_entity(&vdev->entity);
174#endif 174#endif
175 175
176 /* Do not call v4l2_device_put if there is no release callback set.
177 * Drivers that have no v4l2_device release callback might free the
178 * v4l2_dev instance in the video_device release callback below, so we
179 * must perform this check here.
180 *
181 * TODO: In the long run all drivers that use v4l2_device should use the
182 * v4l2_device release callback. This check will then be unnecessary.
183 */
184 if (v4l2_dev->release == NULL)
185 v4l2_dev = NULL;
186
176 /* Release video_device and perform other 187 /* Release video_device and perform other
177 cleanups as needed. */ 188 cleanups as needed. */
178 vdev->release(vdev); 189 vdev->release(vdev);
diff --git a/drivers/media/video/v4l2-device.c b/drivers/media/video/v4l2-device.c
index c72856c41434..e6a2c3b302d4 100644
--- a/drivers/media/video/v4l2-device.c
+++ b/drivers/media/video/v4l2-device.c
@@ -38,6 +38,7 @@ int v4l2_device_register(struct device *dev, struct v4l2_device *v4l2_dev)
38 mutex_init(&v4l2_dev->ioctl_lock); 38 mutex_init(&v4l2_dev->ioctl_lock);
39 v4l2_prio_init(&v4l2_dev->prio); 39 v4l2_prio_init(&v4l2_dev->prio);
40 kref_init(&v4l2_dev->ref); 40 kref_init(&v4l2_dev->ref);
41 get_device(dev);
41 v4l2_dev->dev = dev; 42 v4l2_dev->dev = dev;
42 if (dev == NULL) { 43 if (dev == NULL) {
43 /* If dev == NULL, then name must be filled in by the caller */ 44 /* If dev == NULL, then name must be filled in by the caller */
@@ -93,6 +94,7 @@ void v4l2_device_disconnect(struct v4l2_device *v4l2_dev)
93 94
94 if (dev_get_drvdata(v4l2_dev->dev) == v4l2_dev) 95 if (dev_get_drvdata(v4l2_dev->dev) == v4l2_dev)
95 dev_set_drvdata(v4l2_dev->dev, NULL); 96 dev_set_drvdata(v4l2_dev->dev, NULL);
97 put_device(v4l2_dev->dev);
96 v4l2_dev->dev = NULL; 98 v4l2_dev->dev = NULL;
97} 99}
98EXPORT_SYMBOL_GPL(v4l2_device_disconnect); 100EXPORT_SYMBOL_GPL(v4l2_device_disconnect);
diff --git a/drivers/mfd/jz4740-adc.c b/drivers/mfd/jz4740-adc.c
index 21131c7b0f1e..563654c9b19e 100644
--- a/drivers/mfd/jz4740-adc.c
+++ b/drivers/mfd/jz4740-adc.c
@@ -273,7 +273,7 @@ static int __devinit jz4740_adc_probe(struct platform_device *pdev)
273 ct->regs.ack = JZ_REG_ADC_STATUS; 273 ct->regs.ack = JZ_REG_ADC_STATUS;
274 ct->chip.irq_mask = irq_gc_mask_set_bit; 274 ct->chip.irq_mask = irq_gc_mask_set_bit;
275 ct->chip.irq_unmask = irq_gc_mask_clr_bit; 275 ct->chip.irq_unmask = irq_gc_mask_clr_bit;
276 ct->chip.irq_ack = irq_gc_ack; 276 ct->chip.irq_ack = irq_gc_ack_set_bit;
277 277
278 irq_setup_generic_chip(gc, IRQ_MSK(5), 0, 0, IRQ_NOPROBE | IRQ_LEVEL); 278 irq_setup_generic_chip(gc, IRQ_MSK(5), 0, 0, IRQ_NOPROBE | IRQ_LEVEL);
279 279
diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
index b928bc14e97b..8b51cd62d067 100644
--- a/drivers/misc/lis3lv02d/lis3lv02d.c
+++ b/drivers/misc/lis3lv02d/lis3lv02d.c
@@ -375,12 +375,14 @@ void lis3lv02d_poweron(struct lis3lv02d *lis3)
375 * both have been read. So the value read will always be correct. 375 * both have been read. So the value read will always be correct.
376 * Set BOOT bit to refresh factory tuning values. 376 * Set BOOT bit to refresh factory tuning values.
377 */ 377 */
378 lis3->read(lis3, CTRL_REG2, &reg); 378 if (lis3->pdata) {
379 if (lis3->whoami == WAI_12B) 379 lis3->read(lis3, CTRL_REG2, &reg);
380 reg |= CTRL2_BDU | CTRL2_BOOT; 380 if (lis3->whoami == WAI_12B)
381 else 381 reg |= CTRL2_BDU | CTRL2_BOOT;
382 reg |= CTRL2_BOOT_8B; 382 else
383 lis3->write(lis3, CTRL_REG2, reg); 383 reg |= CTRL2_BOOT_8B;
384 lis3->write(lis3, CTRL_REG2, reg);
385 }
384 386
385 /* LIS3 power on delay is quite long */ 387 /* LIS3 power on delay is quite long */
386 msleep(lis3->pwron_delay / lis3lv02d_get_odr()); 388 msleep(lis3->pwron_delay / lis3lv02d_get_odr());
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 4e84fd4a4312..e9651f0a8817 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -77,7 +77,7 @@ unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
77unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE; 77unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE;
78unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE; 78unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE;
79 79
80enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_SAFE; 80enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_TUNE_OFF;
81 81
82/* 82/*
83 * The default CLS is used if arch didn't set CLS explicitly and not 83 * The default CLS is used if arch didn't set CLS explicitly and not
@@ -3568,10 +3568,14 @@ static int __init pci_setup(char *str)
3568 pci_hotplug_io_size = memparse(str + 9, &str); 3568 pci_hotplug_io_size = memparse(str + 9, &str);
3569 } else if (!strncmp(str, "hpmemsize=", 10)) { 3569 } else if (!strncmp(str, "hpmemsize=", 10)) {
3570 pci_hotplug_mem_size = memparse(str + 10, &str); 3570 pci_hotplug_mem_size = memparse(str + 10, &str);
3571 } else if (!strncmp(str, "pcie_bus_tune_off", 17)) {
3572 pcie_bus_config = PCIE_BUS_TUNE_OFF;
3571 } else if (!strncmp(str, "pcie_bus_safe", 13)) { 3573 } else if (!strncmp(str, "pcie_bus_safe", 13)) {
3572 pcie_bus_config = PCIE_BUS_SAFE; 3574 pcie_bus_config = PCIE_BUS_SAFE;
3573 } else if (!strncmp(str, "pcie_bus_perf", 13)) { 3575 } else if (!strncmp(str, "pcie_bus_perf", 13)) {
3574 pcie_bus_config = PCIE_BUS_PERFORMANCE; 3576 pcie_bus_config = PCIE_BUS_PERFORMANCE;
3577 } else if (!strncmp(str, "pcie_bus_peer2peer", 18)) {
3578 pcie_bus_config = PCIE_BUS_PEER2PEER;
3575 } else { 3579 } else {
3576 printk(KERN_ERR "PCI: Unknown option `%s'\n", 3580 printk(KERN_ERR "PCI: Unknown option `%s'\n",
3577 str); 3581 str);
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index f3f94a5c068f..6ab6bd3df4b2 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -1458,12 +1458,24 @@ static int pcie_bus_configure_set(struct pci_dev *dev, void *data)
1458 */ 1458 */
1459void pcie_bus_configure_settings(struct pci_bus *bus, u8 mpss) 1459void pcie_bus_configure_settings(struct pci_bus *bus, u8 mpss)
1460{ 1460{
1461 u8 smpss = mpss; 1461 u8 smpss;
1462 1462
1463 if (!pci_is_pcie(bus->self)) 1463 if (!pci_is_pcie(bus->self))
1464 return; 1464 return;
1465 1465
1466 if (pcie_bus_config == PCIE_BUS_TUNE_OFF)
1467 return;
1468
1469 /* FIXME - Peer to peer DMA is possible, though the endpoint would need
1470 * to be aware to the MPS of the destination. To work around this,
1471 * simply force the MPS of the entire system to the smallest possible.
1472 */
1473 if (pcie_bus_config == PCIE_BUS_PEER2PEER)
1474 smpss = 0;
1475
1466 if (pcie_bus_config == PCIE_BUS_SAFE) { 1476 if (pcie_bus_config == PCIE_BUS_SAFE) {
1477 smpss = mpss;
1478
1467 pcie_find_smpss(bus->self, &smpss); 1479 pcie_find_smpss(bus->self, &smpss);
1468 pci_walk_bus(bus, pcie_find_smpss, &smpss); 1480 pci_walk_bus(bus, pcie_find_smpss, &smpss);
1469 } 1481 }
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index cbde448f9947..eb3140ee821e 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -654,8 +654,8 @@ static struct io_subchannel_private console_priv;
654static int console_subchannel_in_use; 654static int console_subchannel_in_use;
655 655
656/* 656/*
657 * Use tpi to get a pending interrupt, call the interrupt handler and 657 * Use cio_tpi to get a pending interrupt and call the interrupt handler.
658 * return a pointer to the subchannel structure. 658 * Return non-zero if an interrupt was processed, zero otherwise.
659 */ 659 */
660static int cio_tpi(void) 660static int cio_tpi(void)
661{ 661{
@@ -667,6 +667,10 @@ static int cio_tpi(void)
667 tpi_info = (struct tpi_info *)&S390_lowcore.subchannel_id; 667 tpi_info = (struct tpi_info *)&S390_lowcore.subchannel_id;
668 if (tpi(NULL) != 1) 668 if (tpi(NULL) != 1)
669 return 0; 669 return 0;
670 if (tpi_info->adapter_IO) {
671 do_adapter_IO(tpi_info->isc);
672 return 1;
673 }
670 irb = (struct irb *)&S390_lowcore.irb; 674 irb = (struct irb *)&S390_lowcore.irb;
671 /* Store interrupt response block to lowcore. */ 675 /* Store interrupt response block to lowcore. */
672 if (tsch(tpi_info->schid, irb) != 0) 676 if (tsch(tpi_info->schid, irb) != 0)
diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c
index 1d23f3831866..6a80749391db 100644
--- a/drivers/spi/spi-topcliff-pch.c
+++ b/drivers/spi/spi-topcliff-pch.c
@@ -50,6 +50,8 @@
50#define PCH_RX_THOLD 7 50#define PCH_RX_THOLD 7
51#define PCH_RX_THOLD_MAX 15 51#define PCH_RX_THOLD_MAX 15
52 52
53#define PCH_TX_THOLD 2
54
53#define PCH_MAX_BAUDRATE 5000000 55#define PCH_MAX_BAUDRATE 5000000
54#define PCH_MAX_FIFO_DEPTH 16 56#define PCH_MAX_FIFO_DEPTH 16
55 57
@@ -58,6 +60,7 @@
58#define PCH_SLEEP_TIME 10 60#define PCH_SLEEP_TIME 10
59 61
60#define SSN_LOW 0x02U 62#define SSN_LOW 0x02U
63#define SSN_HIGH 0x03U
61#define SSN_NO_CONTROL 0x00U 64#define SSN_NO_CONTROL 0x00U
62#define PCH_MAX_CS 0xFF 65#define PCH_MAX_CS 0xFF
63#define PCI_DEVICE_ID_GE_SPI 0x8816 66#define PCI_DEVICE_ID_GE_SPI 0x8816
@@ -316,16 +319,19 @@ static void pch_spi_handler_sub(struct pch_spi_data *data, u32 reg_spsr_val,
316 319
317 /* if transfer complete interrupt */ 320 /* if transfer complete interrupt */
318 if (reg_spsr_val & SPSR_FI_BIT) { 321 if (reg_spsr_val & SPSR_FI_BIT) {
319 if (tx_index < bpw_len) 322 if ((tx_index == bpw_len) && (rx_index == tx_index)) {
323 /* disable interrupts */
324 pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL);
325
326 /* transfer is completed;
327 inform pch_spi_process_messages */
328 data->transfer_complete = true;
329 data->transfer_active = false;
330 wake_up(&data->wait);
331 } else {
320 dev_err(&data->master->dev, 332 dev_err(&data->master->dev,
321 "%s : Transfer is not completed", __func__); 333 "%s : Transfer is not completed", __func__);
322 /* disable interrupts */ 334 }
323 pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL);
324
325 /* transfer is completed;inform pch_spi_process_messages */
326 data->transfer_complete = true;
327 data->transfer_active = false;
328 wake_up(&data->wait);
329 } 335 }
330} 336}
331 337
@@ -348,16 +354,26 @@ static irqreturn_t pch_spi_handler(int irq, void *dev_id)
348 "%s returning due to suspend\n", __func__); 354 "%s returning due to suspend\n", __func__);
349 return IRQ_NONE; 355 return IRQ_NONE;
350 } 356 }
351 if (data->use_dma)
352 return IRQ_NONE;
353 357
354 io_remap_addr = data->io_remap_addr; 358 io_remap_addr = data->io_remap_addr;
355 spsr = io_remap_addr + PCH_SPSR; 359 spsr = io_remap_addr + PCH_SPSR;
356 360
357 reg_spsr_val = ioread32(spsr); 361 reg_spsr_val = ioread32(spsr);
358 362
359 if (reg_spsr_val & SPSR_ORF_BIT) 363 if (reg_spsr_val & SPSR_ORF_BIT) {
360 dev_err(&board_dat->pdev->dev, "%s Over run error", __func__); 364 dev_err(&board_dat->pdev->dev, "%s Over run error\n", __func__);
365 if (data->current_msg->complete != 0) {
366 data->transfer_complete = true;
367 data->current_msg->status = -EIO;
368 data->current_msg->complete(data->current_msg->context);
369 data->bcurrent_msg_processing = false;
370 data->current_msg = NULL;
371 data->cur_trans = NULL;
372 }
373 }
374
375 if (data->use_dma)
376 return IRQ_NONE;
361 377
362 /* Check if the interrupt is for SPI device */ 378 /* Check if the interrupt is for SPI device */
363 if (reg_spsr_val & (SPSR_FI_BIT | SPSR_RFI_BIT)) { 379 if (reg_spsr_val & (SPSR_FI_BIT | SPSR_RFI_BIT)) {
@@ -756,10 +772,6 @@ static void pch_spi_set_ir(struct pch_spi_data *data)
756 772
757 wait_event_interruptible(data->wait, data->transfer_complete); 773 wait_event_interruptible(data->wait, data->transfer_complete);
758 774
759 pch_spi_writereg(data->master, PCH_SSNXCR, SSN_NO_CONTROL);
760 dev_dbg(&data->master->dev,
761 "%s:no more control over SSN-writing 0 to SSNXCR.", __func__);
762
763 /* clear all interrupts */ 775 /* clear all interrupts */
764 pch_spi_writereg(data->master, PCH_SPSR, 776 pch_spi_writereg(data->master, PCH_SPSR,
765 pch_spi_readreg(data->master, PCH_SPSR)); 777 pch_spi_readreg(data->master, PCH_SPSR));
@@ -815,10 +827,11 @@ static void pch_spi_copy_rx_data_for_dma(struct pch_spi_data *data, int bpw)
815 } 827 }
816} 828}
817 829
818static void pch_spi_start_transfer(struct pch_spi_data *data) 830static int pch_spi_start_transfer(struct pch_spi_data *data)
819{ 831{
820 struct pch_spi_dma_ctrl *dma; 832 struct pch_spi_dma_ctrl *dma;
821 unsigned long flags; 833 unsigned long flags;
834 int rtn;
822 835
823 dma = &data->dma; 836 dma = &data->dma;
824 837
@@ -833,19 +846,23 @@ static void pch_spi_start_transfer(struct pch_spi_data *data)
833 initiating the transfer. */ 846 initiating the transfer. */
834 dev_dbg(&data->master->dev, 847 dev_dbg(&data->master->dev,
835 "%s:waiting for transfer to get over\n", __func__); 848 "%s:waiting for transfer to get over\n", __func__);
836 wait_event_interruptible(data->wait, data->transfer_complete); 849 rtn = wait_event_interruptible_timeout(data->wait,
850 data->transfer_complete,
851 msecs_to_jiffies(2 * HZ));
837 852
838 dma_sync_sg_for_cpu(&data->master->dev, dma->sg_rx_p, dma->nent, 853 dma_sync_sg_for_cpu(&data->master->dev, dma->sg_rx_p, dma->nent,
839 DMA_FROM_DEVICE); 854 DMA_FROM_DEVICE);
855
856 dma_sync_sg_for_cpu(&data->master->dev, dma->sg_tx_p, dma->nent,
857 DMA_FROM_DEVICE);
858 memset(data->dma.tx_buf_virt, 0, PAGE_SIZE);
859
840 async_tx_ack(dma->desc_rx); 860 async_tx_ack(dma->desc_rx);
841 async_tx_ack(dma->desc_tx); 861 async_tx_ack(dma->desc_tx);
842 kfree(dma->sg_tx_p); 862 kfree(dma->sg_tx_p);
843 kfree(dma->sg_rx_p); 863 kfree(dma->sg_rx_p);
844 864
845 spin_lock_irqsave(&data->lock, flags); 865 spin_lock_irqsave(&data->lock, flags);
846 pch_spi_writereg(data->master, PCH_SSNXCR, SSN_NO_CONTROL);
847 dev_dbg(&data->master->dev,
848 "%s:no more control over SSN-writing 0 to SSNXCR.", __func__);
849 866
850 /* clear fifo threshold, disable interrupts, disable SPI transfer */ 867 /* clear fifo threshold, disable interrupts, disable SPI transfer */
851 pch_spi_setclr_reg(data->master, PCH_SPCR, 0, 868 pch_spi_setclr_reg(data->master, PCH_SPCR, 0,
@@ -858,6 +875,8 @@ static void pch_spi_start_transfer(struct pch_spi_data *data)
858 pch_spi_clear_fifo(data->master); 875 pch_spi_clear_fifo(data->master);
859 876
860 spin_unlock_irqrestore(&data->lock, flags); 877 spin_unlock_irqrestore(&data->lock, flags);
878
879 return rtn;
861} 880}
862 881
863static void pch_dma_rx_complete(void *arg) 882static void pch_dma_rx_complete(void *arg)
@@ -1023,8 +1042,7 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw)
1023 /* set receive fifo threshold and transmit fifo threshold */ 1042 /* set receive fifo threshold and transmit fifo threshold */
1024 pch_spi_setclr_reg(data->master, PCH_SPCR, 1043 pch_spi_setclr_reg(data->master, PCH_SPCR,
1025 ((size - 1) << SPCR_RFIC_FIELD) | 1044 ((size - 1) << SPCR_RFIC_FIELD) |
1026 ((PCH_MAX_FIFO_DEPTH - PCH_DMA_TRANS_SIZE) << 1045 (PCH_TX_THOLD << SPCR_TFIC_FIELD),
1027 SPCR_TFIC_FIELD),
1028 MASK_RFIC_SPCR_BITS | MASK_TFIC_SPCR_BITS); 1046 MASK_RFIC_SPCR_BITS | MASK_TFIC_SPCR_BITS);
1029 1047
1030 spin_unlock_irqrestore(&data->lock, flags); 1048 spin_unlock_irqrestore(&data->lock, flags);
@@ -1035,13 +1053,20 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw)
1035 /* offset, length setting */ 1053 /* offset, length setting */
1036 sg = dma->sg_rx_p; 1054 sg = dma->sg_rx_p;
1037 for (i = 0; i < num; i++, sg++) { 1055 for (i = 0; i < num; i++, sg++) {
1038 if (i == 0) { 1056 if (i == (num - 2)) {
1039 sg->offset = 0; 1057 sg->offset = size * i;
1058 sg->offset = sg->offset * (*bpw / 8);
1040 sg_set_page(sg, virt_to_page(dma->rx_buf_virt), rem, 1059 sg_set_page(sg, virt_to_page(dma->rx_buf_virt), rem,
1041 sg->offset); 1060 sg->offset);
1042 sg_dma_len(sg) = rem; 1061 sg_dma_len(sg) = rem;
1062 } else if (i == (num - 1)) {
1063 sg->offset = size * (i - 1) + rem;
1064 sg->offset = sg->offset * (*bpw / 8);
1065 sg_set_page(sg, virt_to_page(dma->rx_buf_virt), size,
1066 sg->offset);
1067 sg_dma_len(sg) = size;
1043 } else { 1068 } else {
1044 sg->offset = rem + size * (i - 1); 1069 sg->offset = size * i;
1045 sg->offset = sg->offset * (*bpw / 8); 1070 sg->offset = sg->offset * (*bpw / 8);
1046 sg_set_page(sg, virt_to_page(dma->rx_buf_virt), size, 1071 sg_set_page(sg, virt_to_page(dma->rx_buf_virt), size,
1047 sg->offset); 1072 sg->offset);
@@ -1065,6 +1090,16 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw)
1065 dma->desc_rx = desc_rx; 1090 dma->desc_rx = desc_rx;
1066 1091
1067 /* TX */ 1092 /* TX */
1093 if (data->bpw_len > PCH_DMA_TRANS_SIZE) {
1094 num = data->bpw_len / PCH_DMA_TRANS_SIZE;
1095 size = PCH_DMA_TRANS_SIZE;
1096 rem = 16;
1097 } else {
1098 num = 1;
1099 size = data->bpw_len;
1100 rem = data->bpw_len;
1101 }
1102
1068 dma->sg_tx_p = kzalloc(sizeof(struct scatterlist)*num, GFP_ATOMIC); 1103 dma->sg_tx_p = kzalloc(sizeof(struct scatterlist)*num, GFP_ATOMIC);
1069 sg_init_table(dma->sg_tx_p, num); /* Initialize SG table */ 1104 sg_init_table(dma->sg_tx_p, num); /* Initialize SG table */
1070 /* offset, length setting */ 1105 /* offset, length setting */
@@ -1162,6 +1197,7 @@ static void pch_spi_process_messages(struct work_struct *pwork)
1162 if (data->use_dma) 1197 if (data->use_dma)
1163 pch_spi_request_dma(data, 1198 pch_spi_request_dma(data,
1164 data->current_msg->spi->bits_per_word); 1199 data->current_msg->spi->bits_per_word);
1200 pch_spi_writereg(data->master, PCH_SSNXCR, SSN_NO_CONTROL);
1165 do { 1201 do {
1166 /* If we are already processing a message get the next 1202 /* If we are already processing a message get the next
1167 transfer structure from the message otherwise retrieve 1203 transfer structure from the message otherwise retrieve
@@ -1184,7 +1220,8 @@ static void pch_spi_process_messages(struct work_struct *pwork)
1184 1220
1185 if (data->use_dma) { 1221 if (data->use_dma) {
1186 pch_spi_handle_dma(data, &bpw); 1222 pch_spi_handle_dma(data, &bpw);
1187 pch_spi_start_transfer(data); 1223 if (!pch_spi_start_transfer(data))
1224 goto out;
1188 pch_spi_copy_rx_data_for_dma(data, bpw); 1225 pch_spi_copy_rx_data_for_dma(data, bpw);
1189 } else { 1226 } else {
1190 pch_spi_set_tx(data, &bpw); 1227 pch_spi_set_tx(data, &bpw);
@@ -1222,6 +1259,8 @@ static void pch_spi_process_messages(struct work_struct *pwork)
1222 1259
1223 } while (data->cur_trans != NULL); 1260 } while (data->cur_trans != NULL);
1224 1261
1262out:
1263 pch_spi_writereg(data->master, PCH_SSNXCR, SSN_HIGH);
1225 if (data->use_dma) 1264 if (data->use_dma)
1226 pch_spi_release_dma(data); 1265 pch_spi_release_dma(data);
1227} 1266}
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index a381cd22f518..e4e57d59edb7 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -1036,11 +1036,13 @@ out:
1036 * on error we return an unlocked page and the error value 1036 * on error we return an unlocked page and the error value
1037 * on success we return a locked page and 0 1037 * on success we return a locked page and 0
1038 */ 1038 */
1039static int prepare_uptodate_page(struct page *page, u64 pos) 1039static int prepare_uptodate_page(struct page *page, u64 pos,
1040 bool force_uptodate)
1040{ 1041{
1041 int ret = 0; 1042 int ret = 0;
1042 1043
1043 if ((pos & (PAGE_CACHE_SIZE - 1)) && !PageUptodate(page)) { 1044 if (((pos & (PAGE_CACHE_SIZE - 1)) || force_uptodate) &&
1045 !PageUptodate(page)) {
1044 ret = btrfs_readpage(NULL, page); 1046 ret = btrfs_readpage(NULL, page);
1045 if (ret) 1047 if (ret)
1046 return ret; 1048 return ret;
@@ -1061,7 +1063,7 @@ static int prepare_uptodate_page(struct page *page, u64 pos)
1061static noinline int prepare_pages(struct btrfs_root *root, struct file *file, 1063static noinline int prepare_pages(struct btrfs_root *root, struct file *file,
1062 struct page **pages, size_t num_pages, 1064 struct page **pages, size_t num_pages,
1063 loff_t pos, unsigned long first_index, 1065 loff_t pos, unsigned long first_index,
1064 size_t write_bytes) 1066 size_t write_bytes, bool force_uptodate)
1065{ 1067{
1066 struct extent_state *cached_state = NULL; 1068 struct extent_state *cached_state = NULL;
1067 int i; 1069 int i;
@@ -1086,10 +1088,11 @@ again:
1086 } 1088 }
1087 1089
1088 if (i == 0) 1090 if (i == 0)
1089 err = prepare_uptodate_page(pages[i], pos); 1091 err = prepare_uptodate_page(pages[i], pos,
1092 force_uptodate);
1090 if (i == num_pages - 1) 1093 if (i == num_pages - 1)
1091 err = prepare_uptodate_page(pages[i], 1094 err = prepare_uptodate_page(pages[i],
1092 pos + write_bytes); 1095 pos + write_bytes, false);
1093 if (err) { 1096 if (err) {
1094 page_cache_release(pages[i]); 1097 page_cache_release(pages[i]);
1095 faili = i - 1; 1098 faili = i - 1;
@@ -1158,6 +1161,7 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
1158 size_t num_written = 0; 1161 size_t num_written = 0;
1159 int nrptrs; 1162 int nrptrs;
1160 int ret = 0; 1163 int ret = 0;
1164 bool force_page_uptodate = false;
1161 1165
1162 nrptrs = min((iov_iter_count(i) + PAGE_CACHE_SIZE - 1) / 1166 nrptrs = min((iov_iter_count(i) + PAGE_CACHE_SIZE - 1) /
1163 PAGE_CACHE_SIZE, PAGE_CACHE_SIZE / 1167 PAGE_CACHE_SIZE, PAGE_CACHE_SIZE /
@@ -1200,7 +1204,8 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
1200 * contents of pages from loop to loop 1204 * contents of pages from loop to loop
1201 */ 1205 */
1202 ret = prepare_pages(root, file, pages, num_pages, 1206 ret = prepare_pages(root, file, pages, num_pages,
1203 pos, first_index, write_bytes); 1207 pos, first_index, write_bytes,
1208 force_page_uptodate);
1204 if (ret) { 1209 if (ret) {
1205 btrfs_delalloc_release_space(inode, 1210 btrfs_delalloc_release_space(inode,
1206 num_pages << PAGE_CACHE_SHIFT); 1211 num_pages << PAGE_CACHE_SHIFT);
@@ -1217,12 +1222,15 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
1217 if (copied < write_bytes) 1222 if (copied < write_bytes)
1218 nrptrs = 1; 1223 nrptrs = 1;
1219 1224
1220 if (copied == 0) 1225 if (copied == 0) {
1226 force_page_uptodate = true;
1221 dirty_pages = 0; 1227 dirty_pages = 0;
1222 else 1228 } else {
1229 force_page_uptodate = false;
1223 dirty_pages = (copied + offset + 1230 dirty_pages = (copied + offset +
1224 PAGE_CACHE_SIZE - 1) >> 1231 PAGE_CACHE_SIZE - 1) >>
1225 PAGE_CACHE_SHIFT; 1232 PAGE_CACHE_SHIFT;
1233 }
1226 1234
1227 /* 1235 /*
1228 * If we had a short copy we need to release the excess delaloc 1236 * If we had a short copy we need to release the excess delaloc
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index e807ad687a07..3ad553e8eae2 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -80,6 +80,7 @@ extern void irq_domain_del(struct irq_domain *domain);
80#endif /* CONFIG_IRQ_DOMAIN */ 80#endif /* CONFIG_IRQ_DOMAIN */
81 81
82#if defined(CONFIG_IRQ_DOMAIN) && defined(CONFIG_OF_IRQ) 82#if defined(CONFIG_IRQ_DOMAIN) && defined(CONFIG_OF_IRQ)
83extern struct irq_domain_ops irq_domain_simple_ops;
83extern void irq_domain_add_simple(struct device_node *controller, int irq_base); 84extern void irq_domain_add_simple(struct device_node *controller, int irq_base);
84extern void irq_domain_generate_simple(const struct of_device_id *match, 85extern void irq_domain_generate_simple(const struct of_device_id *match,
85 u64 phys_base, unsigned int irq_start); 86 u64 phys_base, unsigned int irq_start);
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 8c230cbcbb48..9fc01226055b 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -621,8 +621,9 @@ struct pci_driver {
621extern void pcie_bus_configure_settings(struct pci_bus *bus, u8 smpss); 621extern void pcie_bus_configure_settings(struct pci_bus *bus, u8 smpss);
622 622
623enum pcie_bus_config_types { 623enum pcie_bus_config_types {
624 PCIE_BUS_PERFORMANCE, 624 PCIE_BUS_TUNE_OFF,
625 PCIE_BUS_SAFE, 625 PCIE_BUS_SAFE,
626 PCIE_BUS_PERFORMANCE,
626 PCIE_BUS_PEER2PEER, 627 PCIE_BUS_PEER2PEER,
627}; 628};
628 629
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 4ac2c0578e0f..41d0237fd449 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1956,7 +1956,6 @@ static inline void disable_sched_clock_irqtime(void) {}
1956 1956
1957extern unsigned long long 1957extern unsigned long long
1958task_sched_runtime(struct task_struct *task); 1958task_sched_runtime(struct task_struct *task);
1959extern unsigned long long thread_group_sched_runtime(struct task_struct *task);
1960 1959
1961/* sched_exec is called by processes performing an exec */ 1960/* sched_exec is called by processes performing an exec */
1962#ifdef CONFIG_SMP 1961#ifdef CONFIG_SMP
diff --git a/init/main.c b/init/main.c
index 23702bbdbc1d..03b408dff825 100644
--- a/init/main.c
+++ b/init/main.c
@@ -730,8 +730,8 @@ static void __init do_basic_setup(void)
730 driver_init(); 730 driver_init();
731 init_irq_proc(); 731 init_irq_proc();
732 do_ctors(); 732 do_ctors();
733 do_initcalls();
734 usermodehelper_enable(); 733 usermodehelper_enable();
734 do_initcalls();
735} 735}
736 736
737static void __init do_pre_smp_initcalls(void) 737static void __init do_pre_smp_initcalls(void)
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
index d5828da3fd38..b57a3776de44 100644
--- a/kernel/irq/irqdomain.c
+++ b/kernel/irq/irqdomain.c
@@ -29,7 +29,11 @@ void irq_domain_add(struct irq_domain *domain)
29 */ 29 */
30 for (hwirq = 0; hwirq < domain->nr_irq; hwirq++) { 30 for (hwirq = 0; hwirq < domain->nr_irq; hwirq++) {
31 d = irq_get_irq_data(irq_domain_to_irq(domain, hwirq)); 31 d = irq_get_irq_data(irq_domain_to_irq(domain, hwirq));
32 if (d || d->domain) { 32 if (!d) {
33 WARN(1, "error: assigning domain to non existant irq_desc");
34 return;
35 }
36 if (d->domain) {
33 /* things are broken; just report, don't clean up */ 37 /* things are broken; just report, don't clean up */
34 WARN(1, "error: irq_desc already assigned to a domain"); 38 WARN(1, "error: irq_desc already assigned to a domain");
35 return; 39 return;
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index 58f405b581e7..c8008dd58ef2 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -250,7 +250,7 @@ void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
250 do { 250 do {
251 times->utime = cputime_add(times->utime, t->utime); 251 times->utime = cputime_add(times->utime, t->utime);
252 times->stime = cputime_add(times->stime, t->stime); 252 times->stime = cputime_add(times->stime, t->stime);
253 times->sum_exec_runtime += t->se.sum_exec_runtime; 253 times->sum_exec_runtime += task_sched_runtime(t);
254 } while_each_thread(tsk, t); 254 } while_each_thread(tsk, t);
255out: 255out:
256 rcu_read_unlock(); 256 rcu_read_unlock();
@@ -312,7 +312,8 @@ static int cpu_clock_sample_group(const clockid_t which_clock,
312 cpu->cpu = cputime.utime; 312 cpu->cpu = cputime.utime;
313 break; 313 break;
314 case CPUCLOCK_SCHED: 314 case CPUCLOCK_SCHED:
315 cpu->sched = thread_group_sched_runtime(p); 315 thread_group_cputime(p, &cputime);
316 cpu->sched = cputime.sum_exec_runtime;
316 break; 317 break;
317 } 318 }
318 return 0; 319 return 0;
diff --git a/kernel/resource.c b/kernel/resource.c
index 3b3cedc52592..c8dc249da5ce 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -419,6 +419,9 @@ static int __find_resource(struct resource *root, struct resource *old,
419 else 419 else
420 tmp.end = root->end; 420 tmp.end = root->end;
421 421
422 if (tmp.end < tmp.start)
423 goto next;
424
422 resource_clip(&tmp, constraint->min, constraint->max); 425 resource_clip(&tmp, constraint->min, constraint->max);
423 arch_remove_reservations(&tmp); 426 arch_remove_reservations(&tmp);
424 427
@@ -436,8 +439,10 @@ static int __find_resource(struct resource *root, struct resource *old,
436 return 0; 439 return 0;
437 } 440 }
438 } 441 }
439 if (!this) 442
443next: if (!this || this->end == root->end)
440 break; 444 break;
445
441 if (this != old) 446 if (this != old)
442 tmp.start = this->end + 1; 447 tmp.start = this->end + 1;
443 this = this->sibling; 448 this = this->sibling;
diff --git a/kernel/sched.c b/kernel/sched.c
index ec5f472bc5b9..b50b0f0c9aa9 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3725,30 +3725,6 @@ unsigned long long task_sched_runtime(struct task_struct *p)
3725} 3725}
3726 3726
3727/* 3727/*
3728 * Return sum_exec_runtime for the thread group.
3729 * In case the task is currently running, return the sum plus current's
3730 * pending runtime that have not been accounted yet.
3731 *
3732 * Note that the thread group might have other running tasks as well,
3733 * so the return value not includes other pending runtime that other
3734 * running tasks might have.
3735 */
3736unsigned long long thread_group_sched_runtime(struct task_struct *p)
3737{
3738 struct task_cputime totals;
3739 unsigned long flags;
3740 struct rq *rq;
3741 u64 ns;
3742
3743 rq = task_rq_lock(p, &flags);
3744 thread_group_cputime(p, &totals);
3745 ns = totals.sum_exec_runtime + do_task_delta_exec(p, rq);
3746 task_rq_unlock(rq, p, &flags);
3747
3748 return ns;
3749}
3750
3751/*
3752 * Account user cpu time to a process. 3728 * Account user cpu time to a process.
3753 * @p: the process that the cpu time gets accounted to 3729 * @p: the process that the cpu time gets accounted to
3754 * @cputime: the cpu time spent in user space since the last update 3730 * @cputime: the cpu time spent in user space since the last update
@@ -4372,7 +4348,7 @@ static inline void sched_submit_work(struct task_struct *tsk)
4372 blk_schedule_flush_plug(tsk); 4348 blk_schedule_flush_plug(tsk);
4373} 4349}
4374 4350
4375asmlinkage void schedule(void) 4351asmlinkage void __sched schedule(void)
4376{ 4352{
4377 struct task_struct *tsk = current; 4353 struct task_struct *tsk = current;
4378 4354
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 97540f0c9e47..af1177858be3 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -1050,7 +1050,7 @@ select_task_rq_rt(struct task_struct *p, int sd_flag, int flags)
1050 */ 1050 */
1051 if (curr && unlikely(rt_task(curr)) && 1051 if (curr && unlikely(rt_task(curr)) &&
1052 (curr->rt.nr_cpus_allowed < 2 || 1052 (curr->rt.nr_cpus_allowed < 2 ||
1053 curr->prio < p->prio) && 1053 curr->prio <= p->prio) &&
1054 (p->rt.nr_cpus_allowed > 1)) { 1054 (p->rt.nr_cpus_allowed > 1)) {
1055 int target = find_lowest_rq(p); 1055 int target = find_lowest_rq(p);
1056 1056
@@ -1581,7 +1581,7 @@ static void task_woken_rt(struct rq *rq, struct task_struct *p)
1581 p->rt.nr_cpus_allowed > 1 && 1581 p->rt.nr_cpus_allowed > 1 &&
1582 rt_task(rq->curr) && 1582 rt_task(rq->curr) &&
1583 (rq->curr->rt.nr_cpus_allowed < 2 || 1583 (rq->curr->rt.nr_cpus_allowed < 2 ||
1584 rq->curr->prio < p->prio)) 1584 rq->curr->prio <= p->prio))
1585 push_rt_tasks(rq); 1585 push_rt_tasks(rq);
1586} 1586}
1587 1587
diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c
index 132963abc266..2883ea01e680 100644
--- a/net/ceph/ceph_common.c
+++ b/net/ceph/ceph_common.c
@@ -232,6 +232,7 @@ void ceph_destroy_options(struct ceph_options *opt)
232 ceph_crypto_key_destroy(opt->key); 232 ceph_crypto_key_destroy(opt->key);
233 kfree(opt->key); 233 kfree(opt->key);
234 } 234 }
235 kfree(opt->mon_addr);
235 kfree(opt); 236 kfree(opt);
236} 237}
237EXPORT_SYMBOL(ceph_destroy_options); 238EXPORT_SYMBOL(ceph_destroy_options);
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index c340e2e0765b..9918e9eb276e 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -2307,6 +2307,7 @@ struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags)
2307 m->front_max = front_len; 2307 m->front_max = front_len;
2308 m->front_is_vmalloc = false; 2308 m->front_is_vmalloc = false;
2309 m->more_to_follow = false; 2309 m->more_to_follow = false;
2310 m->ack_stamp = 0;
2310 m->pool = NULL; 2311 m->pool = NULL;
2311 2312
2312 /* middle */ 2313 /* middle */
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 16836a7df7a6..88ad8a2501b5 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -217,6 +217,7 @@ struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
217 INIT_LIST_HEAD(&req->r_unsafe_item); 217 INIT_LIST_HEAD(&req->r_unsafe_item);
218 INIT_LIST_HEAD(&req->r_linger_item); 218 INIT_LIST_HEAD(&req->r_linger_item);
219 INIT_LIST_HEAD(&req->r_linger_osd); 219 INIT_LIST_HEAD(&req->r_linger_osd);
220 INIT_LIST_HEAD(&req->r_req_lru_item);
220 req->r_flags = flags; 221 req->r_flags = flags;
221 222
222 WARN_ON((flags & (CEPH_OSD_FLAG_READ|CEPH_OSD_FLAG_WRITE)) == 0); 223 WARN_ON((flags & (CEPH_OSD_FLAG_READ|CEPH_OSD_FLAG_WRITE)) == 0);
@@ -816,13 +817,10 @@ static void __register_request(struct ceph_osd_client *osdc,
816{ 817{
817 req->r_tid = ++osdc->last_tid; 818 req->r_tid = ++osdc->last_tid;
818 req->r_request->hdr.tid = cpu_to_le64(req->r_tid); 819 req->r_request->hdr.tid = cpu_to_le64(req->r_tid);
819 INIT_LIST_HEAD(&req->r_req_lru_item);
820
821 dout("__register_request %p tid %lld\n", req, req->r_tid); 820 dout("__register_request %p tid %lld\n", req, req->r_tid);
822 __insert_request(osdc, req); 821 __insert_request(osdc, req);
823 ceph_osdc_get_request(req); 822 ceph_osdc_get_request(req);
824 osdc->num_requests++; 823 osdc->num_requests++;
825
826 if (osdc->num_requests == 1) { 824 if (osdc->num_requests == 1) {
827 dout(" first request, scheduling timeout\n"); 825 dout(" first request, scheduling timeout\n");
828 __schedule_osd_timeout(osdc); 826 __schedule_osd_timeout(osdc);
diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c
index e97c3588c3ec..fd863fe76934 100644
--- a/net/ceph/osdmap.c
+++ b/net/ceph/osdmap.c
@@ -339,6 +339,7 @@ static int __insert_pg_mapping(struct ceph_pg_mapping *new,
339 struct ceph_pg_mapping *pg = NULL; 339 struct ceph_pg_mapping *pg = NULL;
340 int c; 340 int c;
341 341
342 dout("__insert_pg_mapping %llx %p\n", *(u64 *)&new->pgid, new);
342 while (*p) { 343 while (*p) {
343 parent = *p; 344 parent = *p;
344 pg = rb_entry(parent, struct ceph_pg_mapping, node); 345 pg = rb_entry(parent, struct ceph_pg_mapping, node);
@@ -366,16 +367,33 @@ static struct ceph_pg_mapping *__lookup_pg_mapping(struct rb_root *root,
366 while (n) { 367 while (n) {
367 pg = rb_entry(n, struct ceph_pg_mapping, node); 368 pg = rb_entry(n, struct ceph_pg_mapping, node);
368 c = pgid_cmp(pgid, pg->pgid); 369 c = pgid_cmp(pgid, pg->pgid);
369 if (c < 0) 370 if (c < 0) {
370 n = n->rb_left; 371 n = n->rb_left;
371 else if (c > 0) 372 } else if (c > 0) {
372 n = n->rb_right; 373 n = n->rb_right;
373 else 374 } else {
375 dout("__lookup_pg_mapping %llx got %p\n",
376 *(u64 *)&pgid, pg);
374 return pg; 377 return pg;
378 }
375 } 379 }
376 return NULL; 380 return NULL;
377} 381}
378 382
383static int __remove_pg_mapping(struct rb_root *root, struct ceph_pg pgid)
384{
385 struct ceph_pg_mapping *pg = __lookup_pg_mapping(root, pgid);
386
387 if (pg) {
388 dout("__remove_pg_mapping %llx %p\n", *(u64 *)&pgid, pg);
389 rb_erase(&pg->node, root);
390 kfree(pg);
391 return 0;
392 }
393 dout("__remove_pg_mapping %llx dne\n", *(u64 *)&pgid);
394 return -ENOENT;
395}
396
379/* 397/*
380 * rbtree of pg pool info 398 * rbtree of pg pool info
381 */ 399 */
@@ -711,7 +729,6 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
711 void *start = *p; 729 void *start = *p;
712 int err = -EINVAL; 730 int err = -EINVAL;
713 u16 version; 731 u16 version;
714 struct rb_node *rbp;
715 732
716 ceph_decode_16_safe(p, end, version, bad); 733 ceph_decode_16_safe(p, end, version, bad);
717 if (version > CEPH_OSDMAP_INC_VERSION) { 734 if (version > CEPH_OSDMAP_INC_VERSION) {
@@ -861,7 +878,6 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
861 } 878 }
862 879
863 /* new_pg_temp */ 880 /* new_pg_temp */
864 rbp = rb_first(&map->pg_temp);
865 ceph_decode_32_safe(p, end, len, bad); 881 ceph_decode_32_safe(p, end, len, bad);
866 while (len--) { 882 while (len--) {
867 struct ceph_pg_mapping *pg; 883 struct ceph_pg_mapping *pg;
@@ -872,18 +888,6 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
872 ceph_decode_copy(p, &pgid, sizeof(pgid)); 888 ceph_decode_copy(p, &pgid, sizeof(pgid));
873 pglen = ceph_decode_32(p); 889 pglen = ceph_decode_32(p);
874 890
875 /* remove any? */
876 while (rbp && pgid_cmp(rb_entry(rbp, struct ceph_pg_mapping,
877 node)->pgid, pgid) <= 0) {
878 struct ceph_pg_mapping *cur =
879 rb_entry(rbp, struct ceph_pg_mapping, node);
880
881 rbp = rb_next(rbp);
882 dout(" removed pg_temp %llx\n", *(u64 *)&cur->pgid);
883 rb_erase(&cur->node, &map->pg_temp);
884 kfree(cur);
885 }
886
887 if (pglen) { 891 if (pglen) {
888 /* insert */ 892 /* insert */
889 ceph_decode_need(p, end, pglen*sizeof(u32), bad); 893 ceph_decode_need(p, end, pglen*sizeof(u32), bad);
@@ -903,17 +907,11 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
903 } 907 }
904 dout(" added pg_temp %llx len %d\n", *(u64 *)&pgid, 908 dout(" added pg_temp %llx len %d\n", *(u64 *)&pgid,
905 pglen); 909 pglen);
910 } else {
911 /* remove */
912 __remove_pg_mapping(&map->pg_temp, pgid);
906 } 913 }
907 } 914 }
908 while (rbp) {
909 struct ceph_pg_mapping *cur =
910 rb_entry(rbp, struct ceph_pg_mapping, node);
911
912 rbp = rb_next(rbp);
913 dout(" removed pg_temp %llx\n", *(u64 *)&cur->pgid);
914 rb_erase(&cur->node, &map->pg_temp);
915 kfree(cur);
916 }
917 915
918 /* ignore the rest */ 916 /* ignore the rest */
919 *p = end; 917 *p = end;
@@ -1046,10 +1044,25 @@ static int *calc_pg_raw(struct ceph_osdmap *osdmap, struct ceph_pg pgid,
1046 struct ceph_pg_mapping *pg; 1044 struct ceph_pg_mapping *pg;
1047 struct ceph_pg_pool_info *pool; 1045 struct ceph_pg_pool_info *pool;
1048 int ruleno; 1046 int ruleno;
1049 unsigned poolid, ps, pps; 1047 unsigned poolid, ps, pps, t;
1050 int preferred; 1048 int preferred;
1051 1049
1050 poolid = le32_to_cpu(pgid.pool);
1051 ps = le16_to_cpu(pgid.ps);
1052 preferred = (s16)le16_to_cpu(pgid.preferred);
1053
1054 pool = __lookup_pg_pool(&osdmap->pg_pools, poolid);
1055 if (!pool)
1056 return NULL;
1057
1052 /* pg_temp? */ 1058 /* pg_temp? */
1059 if (preferred >= 0)
1060 t = ceph_stable_mod(ps, le32_to_cpu(pool->v.lpg_num),
1061 pool->lpgp_num_mask);
1062 else
1063 t = ceph_stable_mod(ps, le32_to_cpu(pool->v.pg_num),
1064 pool->pgp_num_mask);
1065 pgid.ps = cpu_to_le16(t);
1053 pg = __lookup_pg_mapping(&osdmap->pg_temp, pgid); 1066 pg = __lookup_pg_mapping(&osdmap->pg_temp, pgid);
1054 if (pg) { 1067 if (pg) {
1055 *num = pg->len; 1068 *num = pg->len;
@@ -1057,18 +1070,6 @@ static int *calc_pg_raw(struct ceph_osdmap *osdmap, struct ceph_pg pgid,
1057 } 1070 }
1058 1071
1059 /* crush */ 1072 /* crush */
1060 poolid = le32_to_cpu(pgid.pool);
1061 ps = le16_to_cpu(pgid.ps);
1062 preferred = (s16)le16_to_cpu(pgid.preferred);
1063
1064 /* don't forcefeed bad device ids to crush */
1065 if (preferred >= osdmap->max_osd ||
1066 preferred >= osdmap->crush->max_devices)
1067 preferred = -1;
1068
1069 pool = __lookup_pg_pool(&osdmap->pg_pools, poolid);
1070 if (!pool)
1071 return NULL;
1072 ruleno = crush_find_rule(osdmap->crush, pool->v.crush_ruleset, 1073 ruleno = crush_find_rule(osdmap->crush, pool->v.crush_ruleset,
1073 pool->v.type, pool->v.size); 1074 pool->v.type, pool->v.size);
1074 if (ruleno < 0) { 1075 if (ruleno < 0) {
@@ -1078,6 +1079,11 @@ static int *calc_pg_raw(struct ceph_osdmap *osdmap, struct ceph_pg pgid,
1078 return NULL; 1079 return NULL;
1079 } 1080 }
1080 1081
1082 /* don't forcefeed bad device ids to crush */
1083 if (preferred >= osdmap->max_osd ||
1084 preferred >= osdmap->crush->max_devices)
1085 preferred = -1;
1086
1081 if (preferred >= 0) 1087 if (preferred >= 0)
1082 pps = ceph_stable_mod(ps, 1088 pps = ceph_stable_mod(ps,
1083 le32_to_cpu(pool->v.lpgp_num), 1089 le32_to_cpu(pool->v.lpgp_num),
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index be6982289c0d..e9a2a8795d1b 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -1924,7 +1924,8 @@ static unsigned int azx_via_get_position(struct azx *chip,
1924} 1924}
1925 1925
1926static unsigned int azx_get_position(struct azx *chip, 1926static unsigned int azx_get_position(struct azx *chip,
1927 struct azx_dev *azx_dev) 1927 struct azx_dev *azx_dev,
1928 bool with_check)
1928{ 1929{
1929 unsigned int pos; 1930 unsigned int pos;
1930 int stream = azx_dev->substream->stream; 1931 int stream = azx_dev->substream->stream;
@@ -1940,7 +1941,7 @@ static unsigned int azx_get_position(struct azx *chip,
1940 default: 1941 default:
1941 /* use the position buffer */ 1942 /* use the position buffer */
1942 pos = le32_to_cpu(*azx_dev->posbuf); 1943 pos = le32_to_cpu(*azx_dev->posbuf);
1943 if (chip->position_fix[stream] == POS_FIX_AUTO) { 1944 if (with_check && chip->position_fix[stream] == POS_FIX_AUTO) {
1944 if (!pos || pos == (u32)-1) { 1945 if (!pos || pos == (u32)-1) {
1945 printk(KERN_WARNING 1946 printk(KERN_WARNING
1946 "hda-intel: Invalid position buffer, " 1947 "hda-intel: Invalid position buffer, "
@@ -1964,7 +1965,7 @@ static snd_pcm_uframes_t azx_pcm_pointer(struct snd_pcm_substream *substream)
1964 struct azx *chip = apcm->chip; 1965 struct azx *chip = apcm->chip;
1965 struct azx_dev *azx_dev = get_azx_dev(substream); 1966 struct azx_dev *azx_dev = get_azx_dev(substream);
1966 return bytes_to_frames(substream->runtime, 1967 return bytes_to_frames(substream->runtime,
1967 azx_get_position(chip, azx_dev)); 1968 azx_get_position(chip, azx_dev, false));
1968} 1969}
1969 1970
1970/* 1971/*
@@ -1987,7 +1988,7 @@ static int azx_position_ok(struct azx *chip, struct azx_dev *azx_dev)
1987 return -1; /* bogus (too early) interrupt */ 1988 return -1; /* bogus (too early) interrupt */
1988 1989
1989 stream = azx_dev->substream->stream; 1990 stream = azx_dev->substream->stream;
1990 pos = azx_get_position(chip, azx_dev); 1991 pos = azx_get_position(chip, azx_dev, true);
1991 1992
1992 if (WARN_ONCE(!azx_dev->period_bytes, 1993 if (WARN_ONCE(!azx_dev->period_bytes,
1993 "hda-intel: zero azx_dev->period_bytes")) 1994 "hda-intel: zero azx_dev->period_bytes"))
diff --git a/sound/soc/codecs/wm8753.c b/sound/soc/codecs/wm8753.c
index ffa2ffe5ec11..aa091a0d8187 100644
--- a/sound/soc/codecs/wm8753.c
+++ b/sound/soc/codecs/wm8753.c
@@ -1454,8 +1454,8 @@ static int wm8753_probe(struct snd_soc_codec *codec)
1454 /* set the update bits */ 1454 /* set the update bits */
1455 snd_soc_update_bits(codec, WM8753_LDAC, 0x0100, 0x0100); 1455 snd_soc_update_bits(codec, WM8753_LDAC, 0x0100, 0x0100);
1456 snd_soc_update_bits(codec, WM8753_RDAC, 0x0100, 0x0100); 1456 snd_soc_update_bits(codec, WM8753_RDAC, 0x0100, 0x0100);
1457 snd_soc_update_bits(codec, WM8753_LDAC, 0x0100, 0x0100); 1457 snd_soc_update_bits(codec, WM8753_LADC, 0x0100, 0x0100);
1458 snd_soc_update_bits(codec, WM8753_RDAC, 0x0100, 0x0100); 1458 snd_soc_update_bits(codec, WM8753_RADC, 0x0100, 0x0100);
1459 snd_soc_update_bits(codec, WM8753_LOUT1V, 0x0100, 0x0100); 1459 snd_soc_update_bits(codec, WM8753_LOUT1V, 0x0100, 0x0100);
1460 snd_soc_update_bits(codec, WM8753_ROUT1V, 0x0100, 0x0100); 1460 snd_soc_update_bits(codec, WM8753_ROUT1V, 0x0100, 0x0100);
1461 snd_soc_update_bits(codec, WM8753_LOUT2V, 0x0100, 0x0100); 1461 snd_soc_update_bits(codec, WM8753_LOUT2V, 0x0100, 0x0100);
diff --git a/sound/soc/omap/mcpdm.c b/sound/soc/omap/mcpdm.c
index 928f03707451..50e59194ad81 100644
--- a/sound/soc/omap/mcpdm.c
+++ b/sound/soc/omap/mcpdm.c
@@ -449,7 +449,7 @@ exit:
449 return ret; 449 return ret;
450} 450}
451 451
452int __devexit omap_mcpdm_remove(struct platform_device *pdev) 452int omap_mcpdm_remove(struct platform_device *pdev)
453{ 453{
454 struct omap_mcpdm *mcpdm_ptr = platform_get_drvdata(pdev); 454 struct omap_mcpdm *mcpdm_ptr = platform_get_drvdata(pdev);
455 455
diff --git a/sound/soc/omap/mcpdm.h b/sound/soc/omap/mcpdm.h
index df3e16fb51f3..20c20a8649fe 100644
--- a/sound/soc/omap/mcpdm.h
+++ b/sound/soc/omap/mcpdm.h
@@ -150,4 +150,4 @@ extern int omap_mcpdm_request(void);
150extern void omap_mcpdm_free(void); 150extern void omap_mcpdm_free(void);
151extern int omap_mcpdm_set_offset(int offset1, int offset2); 151extern int omap_mcpdm_set_offset(int offset1, int offset2);
152int __devinit omap_mcpdm_probe(struct platform_device *pdev); 152int __devinit omap_mcpdm_probe(struct platform_device *pdev);
153int __devexit omap_mcpdm_remove(struct platform_device *pdev); 153int omap_mcpdm_remove(struct platform_device *pdev);
diff --git a/sound/soc/pxa/zylonite.c b/sound/soc/pxa/zylonite.c
index b6445757fc54..2b8350b52232 100644
--- a/sound/soc/pxa/zylonite.c
+++ b/sound/soc/pxa/zylonite.c
@@ -196,20 +196,20 @@ static int zylonite_probe(struct snd_soc_card *card)
196 if (clk_pout) { 196 if (clk_pout) {
197 pout = clk_get(NULL, "CLK_POUT"); 197 pout = clk_get(NULL, "CLK_POUT");
198 if (IS_ERR(pout)) { 198 if (IS_ERR(pout)) {
199 dev_err(&pdev->dev, "Unable to obtain CLK_POUT: %ld\n", 199 dev_err(card->dev, "Unable to obtain CLK_POUT: %ld\n",
200 PTR_ERR(pout)); 200 PTR_ERR(pout));
201 return PTR_ERR(pout); 201 return PTR_ERR(pout);
202 } 202 }
203 203
204 ret = clk_enable(pout); 204 ret = clk_enable(pout);
205 if (ret != 0) { 205 if (ret != 0) {
206 dev_err(&pdev->dev, "Unable to enable CLK_POUT: %d\n", 206 dev_err(card->dev, "Unable to enable CLK_POUT: %d\n",
207 ret); 207 ret);
208 clk_put(pout); 208 clk_put(pout);
209 return ret; 209 return ret;
210 } 210 }
211 211
212 dev_dbg(&pdev->dev, "MCLK enabled at %luHz\n", 212 dev_dbg(card->dev, "MCLK enabled at %luHz\n",
213 clk_get_rate(pout)); 213 clk_get_rate(pout));
214 } 214 }
215 215
@@ -241,7 +241,7 @@ static int zylonite_resume_pre(struct snd_soc_card *card)
241 if (clk_pout) { 241 if (clk_pout) {
242 ret = clk_enable(pout); 242 ret = clk_enable(pout);
243 if (ret != 0) 243 if (ret != 0)
244 dev_err(&pdev->dev, "Unable to enable CLK_POUT: %d\n", 244 dev_err(card->dev, "Unable to enable CLK_POUT: %d\n",
245 ret); 245 ret);
246 } 246 }
247 247
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index c5748c52318f..e389815078d3 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -449,6 +449,8 @@ int perf_event__parse_sample(const union perf_event *event, u64 type,
449 } 449 }
450 450
451 if (type & PERF_SAMPLE_RAW) { 451 if (type & PERF_SAMPLE_RAW) {
452 const u64 *pdata;
453
452 u.val64 = *array; 454 u.val64 = *array;
453 if (WARN_ONCE(swapped, 455 if (WARN_ONCE(swapped,
454 "Endianness of raw data not corrected!\n")) { 456 "Endianness of raw data not corrected!\n")) {
@@ -462,11 +464,12 @@ int perf_event__parse_sample(const union perf_event *event, u64 type,
462 return -EFAULT; 464 return -EFAULT;
463 465
464 data->raw_size = u.val32[0]; 466 data->raw_size = u.val32[0];
467 pdata = (void *) array + sizeof(u32);
465 468
466 if (sample_overlap(event, &u.val32[1], data->raw_size)) 469 if (sample_overlap(event, pdata, data->raw_size))
467 return -EFAULT; 470 return -EFAULT;
468 471
469 data->raw_data = &u.val32[1]; 472 data->raw_data = (void *) pdata;
470 } 473 }
471 474
472 return 0; 475 return 0;