aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid Woodhouse <dwmw2@infradead.org>2007-08-02 17:36:28 -0400
committerDavid Woodhouse <dwmw2@infradead.org>2007-08-02 17:36:28 -0400
commitf2d40cd92c5604a868b22c44a7858206ae4fcf35 (patch)
tree40f3e49465ede8982f6b95b7ba51b9a9ebf22ff6
parent3ca135e16a393e5c5cf6490ce751e43c59011dde (diff)
parent7a883eaf62f4b943ebec738ce3b0796c67ef5d32 (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
-rw-r--r--Documentation/Changes1
-rw-r--r--MAINTAINERS2
-rw-r--r--arch/frv/mb93090-mb00/pci-vdk.c3
-rw-r--r--arch/i386/boot/edd.c2
-rw-r--r--arch/i386/boot/video-vesa.c2
-rw-r--r--arch/ia64/ia32/sys_ia32.c1
-rw-r--r--arch/ia64/kernel/iosapic.c19
-rw-r--r--arch/ia64/kernel/irq_ia64.c17
-rw-r--r--arch/ia64/kernel/mca.c17
-rw-r--r--arch/ia64/kernel/setup.c1
-rw-r--r--arch/ia64/kernel/smp.c8
-rw-r--r--arch/ia64/kernel/time.c16
-rw-r--r--arch/ia64/sn/kernel/io_common.c2
-rw-r--r--arch/ia64/sn/kernel/setup.c1
-rw-r--r--arch/ia64/sn/kernel/sn2/timer.c7
-rw-r--r--drivers/ata/ata_piix.c74
-rw-r--r--drivers/ata/libata-core.c1
-rw-r--r--drivers/ata/libata-sff.c4
-rw-r--r--drivers/ata/pata_cmd64x.c8
-rw-r--r--drivers/ata/pata_sis.c20
-rw-r--r--drivers/hid/usbhid/hid-core.c5
-rw-r--r--drivers/hid/usbhid/hid-quirks.c138
-rw-r--r--drivers/ide/arm/icside.c3
-rw-r--r--drivers/ide/ide-tape.c2
-rw-r--r--drivers/ide/pci/alim15x3.c2
-rw-r--r--drivers/ide/pci/cmd64x.c4
-rw-r--r--drivers/ide/pci/cs5520.c2
-rw-r--r--drivers/ide/pci/cs5535.c42
-rw-r--r--drivers/ide/pci/it8213.c33
-rw-r--r--drivers/ide/pci/jmicron.c21
-rw-r--r--drivers/ide/pci/piix.c17
-rw-r--r--drivers/ide/pci/scc_pata.c61
-rw-r--r--drivers/ide/pci/sis5513.c1
-rw-r--r--drivers/ide/pci/slc90e66.c15
-rw-r--r--drivers/pci/pci-driver.c2
-rw-r--r--drivers/pci/pci.c7
-rw-r--r--drivers/scsi/ide-scsi.c10
-rw-r--r--include/asm-avr32/bug.h2
-rw-r--r--include/asm-frv/mb86943a.h3
-rw-r--r--include/asm-parisc/bug.h2
-rw-r--r--include/asm-s390/bug.h2
-rw-r--r--include/asm-sh/bug.h2
-rw-r--r--include/linux/pci.h2
-rw-r--r--include/linux/sched.h24
-rw-r--r--include/linux/topology.h1
-rw-r--r--include/net/netlabel.h2
-rw-r--r--kernel/irq/resend.c9
-rw-r--r--kernel/sched.c193
-rw-r--r--kernel/sched_debug.c22
-rw-r--r--kernel/sched_fair.c21
-rw-r--r--kernel/sched_rt.c14
-rw-r--r--kernel/sched_stats.h2
-rw-r--r--net/netlabel/netlabel_user.c4
-rw-r--r--net/xfrm/xfrm_policy.c5
-rw-r--r--security/selinux/hooks.c3
-rw-r--r--security/selinux/netlabel.c16
56 files changed, 516 insertions, 384 deletions
diff --git a/Documentation/Changes b/Documentation/Changes
index 73a8617f1861..cb2b141b1c3e 100644
--- a/Documentation/Changes
+++ b/Documentation/Changes
@@ -45,6 +45,7 @@ o nfs-utils 1.0.5 # showmount --version
45o procps 3.2.0 # ps --version 45o procps 3.2.0 # ps --version
46o oprofile 0.9 # oprofiled --version 46o oprofile 0.9 # oprofiled --version
47o udev 081 # udevinfo -V 47o udev 081 # udevinfo -V
48o grub 0.93 # grub --version
48 49
49Kernel compilation 50Kernel compilation
50================== 51==================
diff --git a/MAINTAINERS b/MAINTAINERS
index 1e15a0edc313..e65e96a14bec 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -3754,7 +3754,7 @@ L: linux-usb-devel@lists.sourceforge.net
3754W: http://www.linux-usb.org/gadget 3754W: http://www.linux-usb.org/gadget
3755S: Maintained 3755S: Maintained
3756 3756
3757USB HID/HIDBP DRIVERS 3757USB HID/HIDBP DRIVERS (USB KEYBOARDS, MICE, REMOTE CONTROLS, ...)
3758P: Jiri Kosina 3758P: Jiri Kosina
3759M: jkosina@suse.cz 3759M: jkosina@suse.cz
3760L: linux-usb-devel@lists.sourceforge.net 3760L: linux-usb-devel@lists.sourceforge.net
diff --git a/arch/frv/mb93090-mb00/pci-vdk.c b/arch/frv/mb93090-mb00/pci-vdk.c
index 0b581e3cf7c7..6d51f133fb23 100644
--- a/arch/frv/mb93090-mb00/pci-vdk.c
+++ b/arch/frv/mb93090-mb00/pci-vdk.c
@@ -400,7 +400,8 @@ int __init pcibios_init(void)
400 __reg_MB86943_pci_sl_mem_base = __region_CS2 + 0x08000000; 400 __reg_MB86943_pci_sl_mem_base = __region_CS2 + 0x08000000;
401 mb(); 401 mb();
402 402
403 *(volatile unsigned long *)(__region_CS2+0x01300014) == 1; 403 /* enable PCI arbitration */
404 __reg_MB86943_pci_arbiter = MB86943_PCIARB_EN;
404 405
405 ioport_resource.start = (__reg_MB86943_sl_pci_io_base << 9) & 0xfffffc00; 406 ioport_resource.start = (__reg_MB86943_sl_pci_io_base << 9) & 0xfffffc00;
406 ioport_resource.end = (__reg_MB86943_sl_pci_io_range << 9) | 0x3ff; 407 ioport_resource.end = (__reg_MB86943_sl_pci_io_range << 9) | 0x3ff;
diff --git a/arch/i386/boot/edd.c b/arch/i386/boot/edd.c
index 77d92daf7923..658834d9f92a 100644
--- a/arch/i386/boot/edd.c
+++ b/arch/i386/boot/edd.c
@@ -127,7 +127,7 @@ static int get_edd_info(u8 devno, struct edd_info *ei)
127 ax = 0x4800; 127 ax = 0x4800;
128 dx = devno; 128 dx = devno;
129 asm("pushfl; int $0x13; popfl" 129 asm("pushfl; int $0x13; popfl"
130 : "+a" (ax), "+d" (dx) 130 : "+a" (ax), "+d" (dx), "=m" (ei->params)
131 : "S" (&ei->params) 131 : "S" (&ei->params)
132 : "ebx", "ecx", "edi"); 132 : "ebx", "ecx", "edi");
133 133
diff --git a/arch/i386/boot/video-vesa.c b/arch/i386/boot/video-vesa.c
index e6aa9eb8d93a..f1bc71e948cf 100644
--- a/arch/i386/boot/video-vesa.c
+++ b/arch/i386/boot/video-vesa.c
@@ -268,7 +268,7 @@ void vesa_store_edid(void)
268 dx = 0; /* EDID block number */ 268 dx = 0; /* EDID block number */
269 di =(size_t) &boot_params.edid_info; /* (ES:)Pointer to block */ 269 di =(size_t) &boot_params.edid_info; /* (ES:)Pointer to block */
270 asm(INT10 270 asm(INT10
271 : "+a" (ax), "+b" (bx), "+d" (dx) 271 : "+a" (ax), "+b" (bx), "+d" (dx), "=m" (boot_params.edid_info)
272 : "c" (cx), "D" (di) 272 : "c" (cx), "D" (di)
273 : "esi"); 273 : "esi");
274#endif /* CONFIG_FIRMWARE_EDID */ 274#endif /* CONFIG_FIRMWARE_EDID */
diff --git a/arch/ia64/ia32/sys_ia32.c b/arch/ia64/ia32/sys_ia32.c
index af10462d44d4..a3405b3c1eef 100644
--- a/arch/ia64/ia32/sys_ia32.c
+++ b/arch/ia64/ia32/sys_ia32.c
@@ -34,7 +34,6 @@
34#include <linux/uio.h> 34#include <linux/uio.h>
35#include <linux/nfs_fs.h> 35#include <linux/nfs_fs.h>
36#include <linux/quota.h> 36#include <linux/quota.h>
37#include <linux/syscalls.h>
38#include <linux/sunrpc/svc.h> 37#include <linux/sunrpc/svc.h>
39#include <linux/nfsd/nfsd.h> 38#include <linux/nfsd/nfsd.h>
40#include <linux/nfsd/cache.h> 39#include <linux/nfsd/cache.h>
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c
index 91e6dc1e7baf..cfe4654838f4 100644
--- a/arch/ia64/kernel/iosapic.c
+++ b/arch/ia64/kernel/iosapic.c
@@ -142,7 +142,7 @@ struct iosapic_rte_info {
142static struct iosapic_intr_info { 142static struct iosapic_intr_info {
143 struct list_head rtes; /* RTEs using this vector (empty => 143 struct list_head rtes; /* RTEs using this vector (empty =>
144 * not an IOSAPIC interrupt) */ 144 * not an IOSAPIC interrupt) */
145 int count; /* # of RTEs that shares this vector */ 145 int count; /* # of registered RTEs */
146 u32 low32; /* current value of low word of 146 u32 low32; /* current value of low word of
147 * Redirection table entry */ 147 * Redirection table entry */
148 unsigned int dest; /* destination CPU physical ID */ 148 unsigned int dest; /* destination CPU physical ID */
@@ -313,7 +313,7 @@ mask_irq (unsigned int irq)
313 int rte_index; 313 int rte_index;
314 struct iosapic_rte_info *rte; 314 struct iosapic_rte_info *rte;
315 315
316 if (list_empty(&iosapic_intr_info[irq].rtes)) 316 if (!iosapic_intr_info[irq].count)
317 return; /* not an IOSAPIC interrupt! */ 317 return; /* not an IOSAPIC interrupt! */
318 318
319 /* set only the mask bit */ 319 /* set only the mask bit */
@@ -331,7 +331,7 @@ unmask_irq (unsigned int irq)
331 int rte_index; 331 int rte_index;
332 struct iosapic_rte_info *rte; 332 struct iosapic_rte_info *rte;
333 333
334 if (list_empty(&iosapic_intr_info[irq].rtes)) 334 if (!iosapic_intr_info[irq].count)
335 return; /* not an IOSAPIC interrupt! */ 335 return; /* not an IOSAPIC interrupt! */
336 336
337 low32 = iosapic_intr_info[irq].low32 &= ~IOSAPIC_MASK; 337 low32 = iosapic_intr_info[irq].low32 &= ~IOSAPIC_MASK;
@@ -363,7 +363,7 @@ iosapic_set_affinity (unsigned int irq, cpumask_t mask)
363 363
364 dest = cpu_physical_id(first_cpu(mask)); 364 dest = cpu_physical_id(first_cpu(mask));
365 365
366 if (list_empty(&iosapic_intr_info[irq].rtes)) 366 if (!iosapic_intr_info[irq].count)
367 return; /* not an IOSAPIC interrupt */ 367 return; /* not an IOSAPIC interrupt */
368 368
369 set_irq_affinity_info(irq, dest, redir); 369 set_irq_affinity_info(irq, dest, redir);
@@ -542,7 +542,7 @@ iosapic_reassign_vector (int irq)
542{ 542{
543 int new_irq; 543 int new_irq;
544 544
545 if (!list_empty(&iosapic_intr_info[irq].rtes)) { 545 if (iosapic_intr_info[irq].count) {
546 new_irq = create_irq(); 546 new_irq = create_irq();
547 if (new_irq < 0) 547 if (new_irq < 0)
548 panic("%s: out of interrupt vectors!\n", __FUNCTION__); 548 panic("%s: out of interrupt vectors!\n", __FUNCTION__);
@@ -560,7 +560,7 @@ iosapic_reassign_vector (int irq)
560 } 560 }
561} 561}
562 562
563static struct iosapic_rte_info *iosapic_alloc_rte (void) 563static struct iosapic_rte_info * __init_refok iosapic_alloc_rte (void)
564{ 564{
565 int i; 565 int i;
566 struct iosapic_rte_info *rte; 566 struct iosapic_rte_info *rte;
@@ -677,7 +677,7 @@ get_target_cpu (unsigned int gsi, int irq)
677 * In case of vector shared by multiple RTEs, all RTEs that 677 * In case of vector shared by multiple RTEs, all RTEs that
678 * share the vector need to use the same destination CPU. 678 * share the vector need to use the same destination CPU.
679 */ 679 */
680 if (!list_empty(&iosapic_intr_info[irq].rtes)) 680 if (iosapic_intr_info[irq].count)
681 return iosapic_intr_info[irq].dest; 681 return iosapic_intr_info[irq].dest;
682 682
683 /* 683 /*
@@ -794,8 +794,9 @@ iosapic_register_intr (unsigned int gsi,
794 err = register_intr(gsi, irq, IOSAPIC_LOWEST_PRIORITY, 794 err = register_intr(gsi, irq, IOSAPIC_LOWEST_PRIORITY,
795 polarity, trigger); 795 polarity, trigger);
796 if (err < 0) { 796 if (err < 0) {
797 spin_unlock(&irq_desc[irq].lock);
797 irq = err; 798 irq = err;
798 goto unlock_all; 799 goto unlock_iosapic_lock;
799 } 800 }
800 801
801 /* 802 /*
@@ -811,7 +812,7 @@ iosapic_register_intr (unsigned int gsi,
811 gsi, (trigger == IOSAPIC_EDGE ? "edge" : "level"), 812 gsi, (trigger == IOSAPIC_EDGE ? "edge" : "level"),
812 (polarity == IOSAPIC_POL_HIGH ? "high" : "low"), 813 (polarity == IOSAPIC_POL_HIGH ? "high" : "low"),
813 cpu_logical_id(dest), dest, irq_to_vector(irq)); 814 cpu_logical_id(dest), dest, irq_to_vector(irq));
814 unlock_all: 815
815 spin_unlock(&irq_desc[irq].lock); 816 spin_unlock(&irq_desc[irq].lock);
816 unlock_iosapic_lock: 817 unlock_iosapic_lock:
817 spin_unlock_irqrestore(&iosapic_lock, flags); 818 spin_unlock_irqrestore(&iosapic_lock, flags);
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c
index 9386b955eed1..c47c8acc96e3 100644
--- a/arch/ia64/kernel/irq_ia64.c
+++ b/arch/ia64/kernel/irq_ia64.c
@@ -101,15 +101,6 @@ int check_irq_used(int irq)
101 return -1; 101 return -1;
102} 102}
103 103
104static void reserve_irq(unsigned int irq)
105{
106 unsigned long flags;
107
108 spin_lock_irqsave(&vector_lock, flags);
109 irq_status[irq] = IRQ_RSVD;
110 spin_unlock_irqrestore(&vector_lock, flags);
111}
112
113static inline int find_unassigned_irq(void) 104static inline int find_unassigned_irq(void)
114{ 105{
115 int irq; 106 int irq;
@@ -302,10 +293,14 @@ static cpumask_t vector_allocation_domain(int cpu)
302 293
303void destroy_and_reserve_irq(unsigned int irq) 294void destroy_and_reserve_irq(unsigned int irq)
304{ 295{
296 unsigned long flags;
297
305 dynamic_irq_cleanup(irq); 298 dynamic_irq_cleanup(irq);
306 299
307 clear_irq_vector(irq); 300 spin_lock_irqsave(&vector_lock, flags);
308 reserve_irq(irq); 301 __clear_irq_vector(irq);
302 irq_status[irq] = IRQ_RSVD;
303 spin_unlock_irqrestore(&vector_lock, flags);
309} 304}
310 305
311static int __reassign_irq_vector(int irq, int cpu) 306static int __reassign_irq_vector(int irq, int cpu)
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index 4b5daa3cc0fe..ff28620cb992 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -1750,8 +1750,17 @@ format_mca_init_stack(void *mca_data, unsigned long offset,
1750 strncpy(p->comm, type, sizeof(p->comm)-1); 1750 strncpy(p->comm, type, sizeof(p->comm)-1);
1751} 1751}
1752 1752
1753/* Do per-CPU MCA-related initialization. */ 1753/* Caller prevents this from being called after init */
1754static void * __init_refok mca_bootmem(void)
1755{
1756 void *p;
1754 1757
1758 p = alloc_bootmem(sizeof(struct ia64_mca_cpu) * NR_CPUS +
1759 KERNEL_STACK_SIZE);
1760 return (void *)ALIGN((unsigned long)p, KERNEL_STACK_SIZE);
1761}
1762
1763/* Do per-CPU MCA-related initialization. */
1755void __cpuinit 1764void __cpuinit
1756ia64_mca_cpu_init(void *cpu_data) 1765ia64_mca_cpu_init(void *cpu_data)
1757{ 1766{
@@ -1763,11 +1772,7 @@ ia64_mca_cpu_init(void *cpu_data)
1763 int cpu; 1772 int cpu;
1764 1773
1765 first_time = 0; 1774 first_time = 0;
1766 mca_data = alloc_bootmem(sizeof(struct ia64_mca_cpu) 1775 mca_data = mca_bootmem();
1767 * NR_CPUS + KERNEL_STACK_SIZE);
1768 mca_data = (void *)(((unsigned long)mca_data +
1769 KERNEL_STACK_SIZE - 1) &
1770 (-KERNEL_STACK_SIZE));
1771 for (cpu = 0; cpu < NR_CPUS; cpu++) { 1776 for (cpu = 0; cpu < NR_CPUS; cpu++) {
1772 format_mca_init_stack(mca_data, 1777 format_mca_init_stack(mca_data,
1773 offsetof(struct ia64_mca_cpu, mca_stack), 1778 offsetof(struct ia64_mca_cpu, mca_stack),
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index 7cecd2964200..cd9a37a552c3 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -60,7 +60,6 @@
60#include <asm/smp.h> 60#include <asm/smp.h>
61#include <asm/system.h> 61#include <asm/system.h>
62#include <asm/unistd.h> 62#include <asm/unistd.h>
63#include <asm/system.h>
64 63
65#if defined(CONFIG_SMP) && (IA64_CPU_SIZE > PAGE_SIZE) 64#if defined(CONFIG_SMP) && (IA64_CPU_SIZE > PAGE_SIZE)
66# error "struct cpuinfo_ia64 too big!" 65# error "struct cpuinfo_ia64 too big!"
diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c
index 0982882bfb80..4e446aa5f4ac 100644
--- a/arch/ia64/kernel/smp.c
+++ b/arch/ia64/kernel/smp.c
@@ -346,7 +346,7 @@ smp_flush_tlb_mm (struct mm_struct *mm)
346} 346}
347 347
348/* 348/*
349 * Run a function on another CPU 349 * Run a function on a specific CPU
350 * <func> The function to run. This must be fast and non-blocking. 350 * <func> The function to run. This must be fast and non-blocking.
351 * <info> An arbitrary pointer to pass to the function. 351 * <info> An arbitrary pointer to pass to the function.
352 * <nonatomic> Currently unused. 352 * <nonatomic> Currently unused.
@@ -366,9 +366,11 @@ smp_call_function_single (int cpuid, void (*func) (void *info), void *info, int
366 int me = get_cpu(); /* prevent preemption and reschedule on another processor */ 366 int me = get_cpu(); /* prevent preemption and reschedule on another processor */
367 367
368 if (cpuid == me) { 368 if (cpuid == me) {
369 printk(KERN_INFO "%s: trying to call self\n", __FUNCTION__); 369 local_irq_disable();
370 func(info);
371 local_irq_enable();
370 put_cpu(); 372 put_cpu();
371 return -EBUSY; 373 return 0;
372 } 374 }
373 375
374 data.func = func; 376 data.func = func;
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c
index 6c0e9e2e1b82..98cfc90cab1d 100644
--- a/arch/ia64/kernel/time.c
+++ b/arch/ia64/kernel/time.c
@@ -240,7 +240,21 @@ ia64_init_itm (void)
240 if (!nojitter) 240 if (!nojitter)
241 itc_jitter_data.itc_jitter = 1; 241 itc_jitter_data.itc_jitter = 1;
242#endif 242#endif
243 } 243 } else
244 /*
245 * ITC is drifty and we have not synchronized the ITCs in smpboot.c.
246 * ITC values may fluctuate significantly between processors.
247 * Clock should not be used for hrtimers. Mark itc as only
248 * useful for boot and testing.
249 *
250 * Note that jitter compensation is off! There is no point of
251 * synchronizing ITCs since they may be large differentials
252 * that change over time.
253 *
254 * The only way to fix this would be to repeatedly sync the
255 * ITCs. Until that time we have to avoid ITC.
256 */
257 clocksource_itc.rating = 50;
244 258
245 /* Setup the CPU local timer tick */ 259 /* Setup the CPU local timer tick */
246 ia64_cpu_local_tick(); 260 ia64_cpu_local_tick();
diff --git a/arch/ia64/sn/kernel/io_common.c b/arch/ia64/sn/kernel/io_common.c
index 787ed642dd49..4594770e685a 100644
--- a/arch/ia64/sn/kernel/io_common.c
+++ b/arch/ia64/sn/kernel/io_common.c
@@ -391,7 +391,7 @@ void sn_bus_free_sysdata(void)
391 * hubdev_init_node() - Creates the HUB data structure and link them to it's 391 * hubdev_init_node() - Creates the HUB data structure and link them to it's
392 * own NODE specific data area. 392 * own NODE specific data area.
393 */ 393 */
394void hubdev_init_node(nodepda_t * npda, cnodeid_t node) 394void __init hubdev_init_node(nodepda_t * npda, cnodeid_t node)
395{ 395{
396 struct hubdev_info *hubdev_info; 396 struct hubdev_info *hubdev_info;
397 int size; 397 int size;
diff --git a/arch/ia64/sn/kernel/setup.c b/arch/ia64/sn/kernel/setup.c
index 684b1c984a44..1f38a3a68390 100644
--- a/arch/ia64/sn/kernel/setup.c
+++ b/arch/ia64/sn/kernel/setup.c
@@ -25,7 +25,6 @@
25#include <linux/interrupt.h> 25#include <linux/interrupt.h>
26#include <linux/acpi.h> 26#include <linux/acpi.h>
27#include <linux/compiler.h> 27#include <linux/compiler.h>
28#include <linux/sched.h>
29#include <linux/root_dev.h> 28#include <linux/root_dev.h>
30#include <linux/nodemask.h> 29#include <linux/nodemask.h>
31#include <linux/pm.h> 30#include <linux/pm.h>
diff --git a/arch/ia64/sn/kernel/sn2/timer.c b/arch/ia64/sn/kernel/sn2/timer.c
index 19e25d2b64fc..cf67fc562054 100644
--- a/arch/ia64/sn/kernel/sn2/timer.c
+++ b/arch/ia64/sn/kernel/sn2/timer.c
@@ -23,16 +23,14 @@
23 23
24extern unsigned long sn_rtc_cycles_per_second; 24extern unsigned long sn_rtc_cycles_per_second;
25 25
26static void __iomem *sn2_mc;
27
28static cycle_t read_sn2(void) 26static cycle_t read_sn2(void)
29{ 27{
30 return (cycle_t)readq(sn2_mc); 28 return (cycle_t)readq(RTC_COUNTER_ADDR);
31} 29}
32 30
33static struct clocksource clocksource_sn2 = { 31static struct clocksource clocksource_sn2 = {
34 .name = "sn2_rtc", 32 .name = "sn2_rtc",
35 .rating = 300, 33 .rating = 450,
36 .read = read_sn2, 34 .read = read_sn2,
37 .mask = (1LL << 55) - 1, 35 .mask = (1LL << 55) - 1,
38 .mult = 0, 36 .mult = 0,
@@ -58,7 +56,6 @@ ia64_sn_udelay (unsigned long usecs)
58 56
59void __init sn_timer_init(void) 57void __init sn_timer_init(void)
60{ 58{
61 sn2_mc = RTC_COUNTER_ADDR;
62 clocksource_sn2.fsys_mmio = RTC_COUNTER_ADDR; 59 clocksource_sn2.fsys_mmio = RTC_COUNTER_ADDR;
63 clocksource_sn2.mult = clocksource_hz2mult(sn_rtc_cycles_per_second, 60 clocksource_sn2.mult = clocksource_hz2mult(sn_rtc_cycles_per_second,
64 clocksource_sn2.shift); 61 clocksource_sn2.shift);
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index ad070861bb53..a78832ea81fa 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -890,37 +890,46 @@ static void ich_set_dmamode (struct ata_port *ap, struct ata_device *adev)
890} 890}
891 891
892#ifdef CONFIG_PM 892#ifdef CONFIG_PM
893static struct dmi_system_id piix_broken_suspend_dmi_table[] = { 893static int piix_broken_suspend(void)
894 { 894{
895 .ident = "TECRA M5", 895 static struct dmi_system_id sysids[] = {
896 .matches = { 896 {
897 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), 897 .ident = "TECRA M5",
898 DMI_MATCH(DMI_PRODUCT_NAME, "TECRA M5"), 898 .matches = {
899 }, 899 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
900 }, 900 DMI_MATCH(DMI_PRODUCT_NAME, "TECRA M5"),
901 { 901 },
902 .ident = "Satellite U200",
903 .matches = {
904 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
905 DMI_MATCH(DMI_PRODUCT_NAME, "Satellite U200"),
906 }, 902 },
907 }, 903 {
908 { 904 .ident = "Satellite U205",
909 .ident = "Satellite U205", 905 .matches = {
910 .matches = { 906 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
911 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), 907 DMI_MATCH(DMI_PRODUCT_NAME, "Satellite U205"),
912 DMI_MATCH(DMI_PRODUCT_NAME, "Satellite U205"), 908 },
913 }, 909 },
914 }, 910 {
915 { 911 .ident = "Portege M500",
916 .ident = "Portege M500", 912 .matches = {
917 .matches = { 913 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
918 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), 914 DMI_MATCH(DMI_PRODUCT_NAME, "PORTEGE M500"),
919 DMI_MATCH(DMI_PRODUCT_NAME, "PORTEGE M500"), 915 },
920 }, 916 },
921 }, 917 { }
922 { } 918 };
923}; 919 static const char *oemstrs[] = {
920 "Tecra M3,",
921 };
922 int i;
923
924 if (dmi_check_system(sysids))
925 return 1;
926
927 for (i = 0; i < ARRAY_SIZE(oemstrs); i++)
928 if (dmi_find_device(DMI_DEV_TYPE_OEM_STRING, oemstrs[i], NULL))
929 return 1;
930
931 return 0;
932}
924 933
925static int piix_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg) 934static int piix_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
926{ 935{
@@ -937,8 +946,7 @@ static int piix_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
937 * cycles and power trying to do something to the sleeping 946 * cycles and power trying to do something to the sleeping
938 * beauty. 947 * beauty.
939 */ 948 */
940 if (dmi_check_system(piix_broken_suspend_dmi_table) && 949 if (piix_broken_suspend() && mesg.event == PM_EVENT_SUSPEND) {
941 mesg.event == PM_EVENT_SUSPEND) {
942 pci_save_state(pdev); 950 pci_save_state(pdev);
943 951
944 /* mark its power state as "unknown", since we don't 952 /* mark its power state as "unknown", since we don't
@@ -973,10 +981,10 @@ static int piix_pci_device_resume(struct pci_dev *pdev)
973 pci_restore_state(pdev); 981 pci_restore_state(pdev);
974 982
975 /* PCI device wasn't disabled during suspend. Use 983 /* PCI device wasn't disabled during suspend. Use
976 * __pci_reenable_device() to avoid affecting the 984 * pci_reenable_device() to avoid affecting the enable
977 * enable count. 985 * count.
978 */ 986 */
979 rc = __pci_reenable_device(pdev); 987 rc = pci_reenable_device(pdev);
980 if (rc) 988 if (rc)
981 dev_printk(KERN_ERR, &pdev->dev, "failed to enable " 989 dev_printk(KERN_ERR, &pdev->dev, "failed to enable "
982 "device after resume (%d)\n", rc); 990 "device after resume (%d)\n", rc);
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 6001aae0b884..60e78bef469f 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -3788,6 +3788,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
3788 { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, }, 3788 { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, },
3789 { "FUJITSU MHV2080BH", "00840028", ATA_HORKAGE_NONCQ, }, 3789 { "FUJITSU MHV2080BH", "00840028", ATA_HORKAGE_NONCQ, },
3790 { "ST9160821AS", "3.CLF", ATA_HORKAGE_NONCQ, }, 3790 { "ST9160821AS", "3.CLF", ATA_HORKAGE_NONCQ, },
3791 { "SAMSUNG HD401LJ", "ZZ100-15", ATA_HORKAGE_NONCQ, },
3791 3792
3792 /* Devices with NCQ limits */ 3793 /* Devices with NCQ limits */
3793 3794
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index 6c289c7b1322..1cce2198baaf 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -573,6 +573,10 @@ int ata_pci_init_bmdma(struct ata_host *host)
573 struct pci_dev *pdev = to_pci_dev(gdev); 573 struct pci_dev *pdev = to_pci_dev(gdev);
574 int i, rc; 574 int i, rc;
575 575
576 /* No BAR4 allocation: No DMA */
577 if (pci_resource_start(pdev, 4) == 0)
578 return 0;
579
576 /* TODO: If we get no DMA mask we should fall back to PIO */ 580 /* TODO: If we get no DMA mask we should fall back to PIO */
577 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); 581 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
578 if (rc) 582 if (rc)
diff --git a/drivers/ata/pata_cmd64x.c b/drivers/ata/pata_cmd64x.c
index dc443e7dc37c..e34b632487d7 100644
--- a/drivers/ata/pata_cmd64x.c
+++ b/drivers/ata/pata_cmd64x.c
@@ -31,7 +31,7 @@
31#include <linux/libata.h> 31#include <linux/libata.h>
32 32
33#define DRV_NAME "pata_cmd64x" 33#define DRV_NAME "pata_cmd64x"
34#define DRV_VERSION "0.2.3" 34#define DRV_VERSION "0.2.4"
35 35
36/* 36/*
37 * CMD64x specific registers definition. 37 * CMD64x specific registers definition.
@@ -397,7 +397,7 @@ static int cmd64x_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
397 .flags = ATA_FLAG_SLAVE_POSS, 397 .flags = ATA_FLAG_SLAVE_POSS,
398 .pio_mask = 0x1f, 398 .pio_mask = 0x1f,
399 .mwdma_mask = 0x07, 399 .mwdma_mask = 0x07,
400 .udma_mask = ATA_UDMA1, 400 .udma_mask = ATA_UDMA2,
401 .port_ops = &cmd64x_port_ops 401 .port_ops = &cmd64x_port_ops
402 }, 402 },
403 { /* CMD 646 rev 1 */ 403 { /* CMD 646 rev 1 */
@@ -412,7 +412,7 @@ static int cmd64x_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
412 .flags = ATA_FLAG_SLAVE_POSS, 412 .flags = ATA_FLAG_SLAVE_POSS,
413 .pio_mask = 0x1f, 413 .pio_mask = 0x1f,
414 .mwdma_mask = 0x07, 414 .mwdma_mask = 0x07,
415 .udma_mask = ATA_UDMA2, 415 .udma_mask = ATA_UDMA4,
416 .port_ops = &cmd648_port_ops 416 .port_ops = &cmd648_port_ops
417 }, 417 },
418 { /* CMD 649 */ 418 { /* CMD 649 */
@@ -420,7 +420,7 @@ static int cmd64x_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
420 .flags = ATA_FLAG_SLAVE_POSS, 420 .flags = ATA_FLAG_SLAVE_POSS,
421 .pio_mask = 0x1f, 421 .pio_mask = 0x1f,
422 .mwdma_mask = 0x07, 422 .mwdma_mask = 0x07,
423 .udma_mask = ATA_UDMA3, 423 .udma_mask = ATA_UDMA5,
424 .port_ops = &cmd648_port_ops 424 .port_ops = &cmd648_port_ops
425 } 425 }
426 }; 426 };
diff --git a/drivers/ata/pata_sis.c b/drivers/ata/pata_sis.c
index 9a829a7cbc60..66bd0e83ac07 100644
--- a/drivers/ata/pata_sis.c
+++ b/drivers/ata/pata_sis.c
@@ -2,6 +2,7 @@
2 * pata_sis.c - SiS ATA driver 2 * pata_sis.c - SiS ATA driver
3 * 3 *
4 * (C) 2005 Red Hat <alan@redhat.com> 4 * (C) 2005 Red Hat <alan@redhat.com>
5 * (C) 2007 Bartlomiej Zolnierkiewicz
5 * 6 *
6 * Based upon linux/drivers/ide/pci/sis5513.c 7 * Based upon linux/drivers/ide/pci/sis5513.c
7 * Copyright (C) 1999-2000 Andre Hedrick <andre@linux-ide.org> 8 * Copyright (C) 1999-2000 Andre Hedrick <andre@linux-ide.org>
@@ -35,7 +36,7 @@
35#include "sis.h" 36#include "sis.h"
36 37
37#define DRV_NAME "pata_sis" 38#define DRV_NAME "pata_sis"
38#define DRV_VERSION "0.5.1" 39#define DRV_VERSION "0.5.2"
39 40
40struct sis_chipset { 41struct sis_chipset {
41 u16 device; /* PCI host ID */ 42 u16 device; /* PCI host ID */
@@ -237,7 +238,7 @@ static void sis_old_set_piomode (struct ata_port *ap, struct ata_device *adev)
237} 238}
238 239
239/** 240/**
240 * sis_100_set_pioode - Initialize host controller PATA PIO timings 241 * sis_100_set_piomode - Initialize host controller PATA PIO timings
241 * @ap: Port whose timings we are configuring 242 * @ap: Port whose timings we are configuring
242 * @adev: Device we are configuring for. 243 * @adev: Device we are configuring for.
243 * 244 *
@@ -262,7 +263,7 @@ static void sis_100_set_piomode (struct ata_port *ap, struct ata_device *adev)
262} 263}
263 264
264/** 265/**
265 * sis_133_set_pioode - Initialize host controller PATA PIO timings 266 * sis_133_set_piomode - Initialize host controller PATA PIO timings
266 * @ap: Port whose timings we are configuring 267 * @ap: Port whose timings we are configuring
267 * @adev: Device we are configuring for. 268 * @adev: Device we are configuring for.
268 * 269 *
@@ -334,7 +335,7 @@ static void sis_old_set_dmamode (struct ata_port *ap, struct ata_device *adev)
334 int drive_pci = sis_old_port_base(adev); 335 int drive_pci = sis_old_port_base(adev);
335 u16 timing; 336 u16 timing;
336 337
337 const u16 mwdma_bits[] = { 0x707, 0x202, 0x202 }; 338 const u16 mwdma_bits[] = { 0x008, 0x302, 0x301 };
338 const u16 udma_bits[] = { 0xE000, 0xC000, 0xA000 }; 339 const u16 udma_bits[] = { 0xE000, 0xC000, 0xA000 };
339 340
340 pci_read_config_word(pdev, drive_pci, &timing); 341 pci_read_config_word(pdev, drive_pci, &timing);
@@ -342,15 +343,15 @@ static void sis_old_set_dmamode (struct ata_port *ap, struct ata_device *adev)
342 if (adev->dma_mode < XFER_UDMA_0) { 343 if (adev->dma_mode < XFER_UDMA_0) {
343 /* bits 3-0 hold recovery timing bits 8-10 active timing and 344 /* bits 3-0 hold recovery timing bits 8-10 active timing and
344 the higer bits are dependant on the device */ 345 the higer bits are dependant on the device */
345 timing &= ~ 0x870F; 346 timing &= ~0x870F;
346 timing |= mwdma_bits[speed]; 347 timing |= mwdma_bits[speed];
347 pci_write_config_word(pdev, drive_pci, timing);
348 } else { 348 } else {
349 /* Bit 15 is UDMA on/off, bit 13-14 are cycle time */ 349 /* Bit 15 is UDMA on/off, bit 13-14 are cycle time */
350 speed = adev->dma_mode - XFER_UDMA_0; 350 speed = adev->dma_mode - XFER_UDMA_0;
351 timing &= ~0x6000; 351 timing &= ~0x6000;
352 timing |= udma_bits[speed]; 352 timing |= udma_bits[speed];
353 } 353 }
354 pci_write_config_word(pdev, drive_pci, timing);
354} 355}
355 356
356/** 357/**
@@ -373,7 +374,7 @@ static void sis_66_set_dmamode (struct ata_port *ap, struct ata_device *adev)
373 int drive_pci = sis_old_port_base(adev); 374 int drive_pci = sis_old_port_base(adev);
374 u16 timing; 375 u16 timing;
375 376
376 const u16 mwdma_bits[] = { 0x707, 0x202, 0x202 }; 377 const u16 mwdma_bits[] = { 0x008, 0x302, 0x301 };
377 const u16 udma_bits[] = { 0xF000, 0xD000, 0xB000, 0xA000, 0x9000}; 378 const u16 udma_bits[] = { 0xF000, 0xD000, 0xB000, 0xA000, 0x9000};
378 379
379 pci_read_config_word(pdev, drive_pci, &timing); 380 pci_read_config_word(pdev, drive_pci, &timing);
@@ -432,8 +433,7 @@ static void sis_100_set_dmamode (struct ata_port *ap, struct ata_device *adev)
432 * @adev: Device to program 433 * @adev: Device to program
433 * 434 *
434 * Set UDMA/MWDMA mode for device, in host controller PCI config space. 435 * Set UDMA/MWDMA mode for device, in host controller PCI config space.
435 * Handles early SiS 961 bridges. Supports MWDMA as well unlike 436 * Handles early SiS 961 bridges.
436 * the old ide/pci driver.
437 * 437 *
438 * LOCKING: 438 * LOCKING:
439 * None (inherited from caller). 439 * None (inherited from caller).
@@ -467,8 +467,6 @@ static void sis_133_early_set_dmamode (struct ata_port *ap, struct ata_device *a
467 * @adev: Device to program 467 * @adev: Device to program
468 * 468 *
469 * Set UDMA/MWDMA mode for device, in host controller PCI config space. 469 * Set UDMA/MWDMA mode for device, in host controller PCI config space.
470 * Handles early SiS 961 bridges. Supports MWDMA as well unlike
471 * the old ide/pci driver.
472 * 470 *
473 * LOCKING: 471 * LOCKING:
474 * None (inherited from caller). 472 * None (inherited from caller).
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index b2baeaeba9be..0a1f2b52a12f 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -743,7 +743,7 @@ static struct hid_device *usb_hid_configure(struct usb_interface *intf)
743 hid->quirks = quirks; 743 hid->quirks = quirks;
744 744
745 if (!(usbhid = kzalloc(sizeof(struct usbhid_device), GFP_KERNEL))) 745 if (!(usbhid = kzalloc(sizeof(struct usbhid_device), GFP_KERNEL)))
746 goto fail; 746 goto fail_no_usbhid;
747 747
748 hid->driver_data = usbhid; 748 hid->driver_data = usbhid;
749 usbhid->hid = hid; 749 usbhid->hid = hid;
@@ -878,6 +878,8 @@ fail:
878 usb_free_urb(usbhid->urbout); 878 usb_free_urb(usbhid->urbout);
879 usb_free_urb(usbhid->urbctrl); 879 usb_free_urb(usbhid->urbctrl);
880 hid_free_buffers(dev, hid); 880 hid_free_buffers(dev, hid);
881 kfree(usbhid);
882fail_no_usbhid:
881 hid_free_device(hid); 883 hid_free_device(hid);
882 884
883 return NULL; 885 return NULL;
@@ -913,6 +915,7 @@ static void hid_disconnect(struct usb_interface *intf)
913 usb_free_urb(usbhid->urbout); 915 usb_free_urb(usbhid->urbout);
914 916
915 hid_free_buffers(hid_to_usb_dev(hid), hid); 917 hid_free_buffers(hid_to_usb_dev(hid), hid);
918 kfree(usbhid);
916 hid_free_device(hid); 919 hid_free_device(hid);
917} 920}
918 921
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 775b9f3b8ce3..6b21a214f419 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -61,7 +61,9 @@
61#define USB_DEVICE_ID_APPLE_GEYSER4_JIS 0x021c 61#define USB_DEVICE_ID_APPLE_GEYSER4_JIS 0x021c
62#define USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY 0x030a 62#define USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY 0x030a
63#define USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY 0x030b 63#define USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY 0x030b
64#define USB_DEVICE_ID_APPLE_IR 0x8240 64
65#define USB_VENDOR_ID_ASUS 0x0b05
66#define USB_DEVICE_ID_ASUS_LCM 0x1726
65 67
66#define USB_VENDOR_ID_ATEN 0x0557 68#define USB_VENDOR_ID_ATEN 0x0557
67#define USB_DEVICE_ID_ATEN_UC100KM 0x2004 69#define USB_DEVICE_ID_ATEN_UC100KM 0x2004
@@ -198,6 +200,70 @@
198 200
199#define USB_VENDOR_ID_LOGITECH 0x046d 201#define USB_VENDOR_ID_LOGITECH 0x046d
200#define USB_DEVICE_ID_LOGITECH_RECEIVER 0xc101 202#define USB_DEVICE_ID_LOGITECH_RECEIVER 0xc101
203#define USB_DEVICE_ID_LOGITECH_HARMONY 0xc110
204#define USB_DEVICE_ID_LOGITECH_HARMONY_2 0xc111
205#define USB_DEVICE_ID_LOGITECH_HARMONY_3 0xc112
206#define USB_DEVICE_ID_LOGITECH_HARMONY_4 0xc113
207#define USB_DEVICE_ID_LOGITECH_HARMONY_5 0xc114
208#define USB_DEVICE_ID_LOGITECH_HARMONY_6 0xc115
209#define USB_DEVICE_ID_LOGITECH_HARMONY_7 0xc116
210#define USB_DEVICE_ID_LOGITECH_HARMONY_8 0xc117
211#define USB_DEVICE_ID_LOGITECH_HARMONY_9 0xc118
212#define USB_DEVICE_ID_LOGITECH_HARMONY_10 0xc119
213#define USB_DEVICE_ID_LOGITECH_HARMONY_11 0xc11a
214#define USB_DEVICE_ID_LOGITECH_HARMONY_12 0xc11b
215#define USB_DEVICE_ID_LOGITECH_HARMONY_13 0xc11c
216#define USB_DEVICE_ID_LOGITECH_HARMONY_14 0xc11d
217#define USB_DEVICE_ID_LOGITECH_HARMONY_15 0xc11e
218#define USB_DEVICE_ID_LOGITECH_HARMONY_16 0xc11f
219#define USB_DEVICE_ID_LOGITECH_HARMONY_17 0xc120
220#define USB_DEVICE_ID_LOGITECH_HARMONY_18 0xc121
221#define USB_DEVICE_ID_LOGITECH_HARMONY_19 0xc122
222#define USB_DEVICE_ID_LOGITECH_HARMONY_20 0xc123
223#define USB_DEVICE_ID_LOGITECH_HARMONY_21 0xc124
224#define USB_DEVICE_ID_LOGITECH_HARMONY_22 0xc125
225#define USB_DEVICE_ID_LOGITECH_HARMONY_23 0xc126
226#define USB_DEVICE_ID_LOGITECH_HARMONY_24 0xc127
227#define USB_DEVICE_ID_LOGITECH_HARMONY_25 0xc128
228#define USB_DEVICE_ID_LOGITECH_HARMONY_26 0xc129
229#define USB_DEVICE_ID_LOGITECH_HARMONY_27 0xc12a
230#define USB_DEVICE_ID_LOGITECH_HARMONY_28 0xc12b
231#define USB_DEVICE_ID_LOGITECH_HARMONY_29 0xc12c
232#define USB_DEVICE_ID_LOGITECH_HARMONY_30 0xc12d
233#define USB_DEVICE_ID_LOGITECH_HARMONY_31 0xc12e
234#define USB_DEVICE_ID_LOGITECH_HARMONY_32 0xc12f
235#define USB_DEVICE_ID_LOGITECH_HARMONY_33 0xc130
236#define USB_DEVICE_ID_LOGITECH_HARMONY_34 0xc131
237#define USB_DEVICE_ID_LOGITECH_HARMONY_35 0xc132
238#define USB_DEVICE_ID_LOGITECH_HARMONY_36 0xc133
239#define USB_DEVICE_ID_LOGITECH_HARMONY_37 0xc134
240#define USB_DEVICE_ID_LOGITECH_HARMONY_38 0xc135
241#define USB_DEVICE_ID_LOGITECH_HARMONY_39 0xc136
242#define USB_DEVICE_ID_LOGITECH_HARMONY_40 0xc137
243#define USB_DEVICE_ID_LOGITECH_HARMONY_41 0xc138
244#define USB_DEVICE_ID_LOGITECH_HARMONY_42 0xc139
245#define USB_DEVICE_ID_LOGITECH_HARMONY_43 0xc13a
246#define USB_DEVICE_ID_LOGITECH_HARMONY_44 0xc13b
247#define USB_DEVICE_ID_LOGITECH_HARMONY_45 0xc13c
248#define USB_DEVICE_ID_LOGITECH_HARMONY_46 0xc13d
249#define USB_DEVICE_ID_LOGITECH_HARMONY_47 0xc13e
250#define USB_DEVICE_ID_LOGITECH_HARMONY_48 0xc13f
251#define USB_DEVICE_ID_LOGITECH_HARMONY_49 0xc140
252#define USB_DEVICE_ID_LOGITECH_HARMONY_50 0xc141
253#define USB_DEVICE_ID_LOGITECH_HARMONY_51 0xc142
254#define USB_DEVICE_ID_LOGITECH_HARMONY_52 0xc143
255#define USB_DEVICE_ID_LOGITECH_HARMONY_53 0xc144
256#define USB_DEVICE_ID_LOGITECH_HARMONY_54 0xc145
257#define USB_DEVICE_ID_LOGITECH_HARMONY_55 0xc146
258#define USB_DEVICE_ID_LOGITECH_HARMONY_56 0xc147
259#define USB_DEVICE_ID_LOGITECH_HARMONY_57 0xc148
260#define USB_DEVICE_ID_LOGITECH_HARMONY_58 0xc149
261#define USB_DEVICE_ID_LOGITECH_HARMONY_59 0xc14a
262#define USB_DEVICE_ID_LOGITECH_HARMONY_60 0xc14b
263#define USB_DEVICE_ID_LOGITECH_HARMONY_61 0xc14c
264#define USB_DEVICE_ID_LOGITECH_HARMONY_62 0xc14d
265#define USB_DEVICE_ID_LOGITECH_HARMONY_63 0xc14e
266#define USB_DEVICE_ID_LOGITECH_HARMONY_64 0xc14f
201#define USB_DEVICE_ID_LOGITECH_WHEEL 0xc294 267#define USB_DEVICE_ID_LOGITECH_WHEEL 0xc294
202#define USB_DEVICE_ID_LOGITECH_KBD 0xc311 268#define USB_DEVICE_ID_LOGITECH_KBD 0xc311
203#define USB_DEVICE_ID_S510_RECEIVER 0xc50c 269#define USB_DEVICE_ID_S510_RECEIVER 0xc50c
@@ -221,6 +287,9 @@
221#define USB_DEVICE_ID_NCR_FIRST 0x0300 287#define USB_DEVICE_ID_NCR_FIRST 0x0300
222#define USB_DEVICE_ID_NCR_LAST 0x03ff 288#define USB_DEVICE_ID_NCR_LAST 0x03ff
223 289
290#define USB_VENDOR_ID_NATIONAL_SEMICONDUCTOR 0x0400
291#define USB_DEVICE_ID_N_S_HARMONY 0xc359
292
224#define USB_VENDOR_ID_NEC 0x073e 293#define USB_VENDOR_ID_NEC 0x073e
225#define USB_DEVICE_ID_NEC_USB_GAME_PAD 0x0301 294#define USB_DEVICE_ID_NEC_USB_GAME_PAD 0x0301
226 295
@@ -315,7 +384,7 @@ static const struct hid_blacklist {
315 { USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_24, HID_QUIRK_IGNORE }, 384 { USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_24, HID_QUIRK_IGNORE },
316 { USB_VENDOR_ID_AIRCABLE, USB_DEVICE_ID_AIRCABLE1, HID_QUIRK_IGNORE }, 385 { USB_VENDOR_ID_AIRCABLE, USB_DEVICE_ID_AIRCABLE1, HID_QUIRK_IGNORE },
317 { USB_VENDOR_ID_ALCOR, USB_DEVICE_ID_ALCOR_USBRS232, HID_QUIRK_IGNORE }, 386 { USB_VENDOR_ID_ALCOR, USB_DEVICE_ID_ALCOR_USBRS232, HID_QUIRK_IGNORE },
318 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IR, HID_QUIRK_IGNORE }, 387 { USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUS_LCM, HID_QUIRK_IGNORE},
319 { USB_VENDOR_ID_BERKSHIRE, USB_DEVICE_ID_BERKSHIRE_PCWD, HID_QUIRK_IGNORE }, 388 { USB_VENDOR_ID_BERKSHIRE, USB_DEVICE_ID_BERKSHIRE_PCWD, HID_QUIRK_IGNORE },
320 { USB_VENDOR_ID_CIDC, 0x0103, HID_QUIRK_IGNORE }, 389 { USB_VENDOR_ID_CIDC, 0x0103, HID_QUIRK_IGNORE },
321 { USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_HIDCOM, HID_QUIRK_IGNORE }, 390 { USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_HIDCOM, HID_QUIRK_IGNORE },
@@ -463,6 +532,71 @@ static const struct hid_blacklist {
463 532
464 { USB_VENDOR_ID_DELL, USB_DEVICE_ID_DELL_W7658, HID_QUIRK_RESET_LEDS }, 533 { USB_VENDOR_ID_DELL, USB_DEVICE_ID_DELL_W7658, HID_QUIRK_RESET_LEDS },
465 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_KBD, HID_QUIRK_RESET_LEDS }, 534 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_KBD, HID_QUIRK_RESET_LEDS },
535 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY, HID_QUIRK_IGNORE },
536 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_2, HID_QUIRK_IGNORE },
537 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_3, HID_QUIRK_IGNORE },
538 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_4, HID_QUIRK_IGNORE },
539 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_5, HID_QUIRK_IGNORE },
540 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_6, HID_QUIRK_IGNORE },
541 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_7, HID_QUIRK_IGNORE },
542 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_8, HID_QUIRK_IGNORE },
543 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_9, HID_QUIRK_IGNORE },
544 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_10, HID_QUIRK_IGNORE },
545 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_11, HID_QUIRK_IGNORE },
546 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_12, HID_QUIRK_IGNORE },
547 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_13, HID_QUIRK_IGNORE },
548 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_14, HID_QUIRK_IGNORE },
549 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_15, HID_QUIRK_IGNORE },
550 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_16, HID_QUIRK_IGNORE },
551 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_17, HID_QUIRK_IGNORE },
552 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_18, HID_QUIRK_IGNORE },
553 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_19, HID_QUIRK_IGNORE },
554 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_20, HID_QUIRK_IGNORE },
555 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_21, HID_QUIRK_IGNORE },
556 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_22, HID_QUIRK_IGNORE },
557 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_23, HID_QUIRK_IGNORE },
558 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_24, HID_QUIRK_IGNORE },
559 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_25, HID_QUIRK_IGNORE },
560 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_26, HID_QUIRK_IGNORE },
561 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_27, HID_QUIRK_IGNORE },
562 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_28, HID_QUIRK_IGNORE },
563 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_29, HID_QUIRK_IGNORE },
564 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_30, HID_QUIRK_IGNORE },
565 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_31, HID_QUIRK_IGNORE },
566 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_32, HID_QUIRK_IGNORE },
567 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_33, HID_QUIRK_IGNORE },
568 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_34, HID_QUIRK_IGNORE },
569 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_35, HID_QUIRK_IGNORE },
570 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_36, HID_QUIRK_IGNORE },
571 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_37, HID_QUIRK_IGNORE },
572 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_38, HID_QUIRK_IGNORE },
573 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_39, HID_QUIRK_IGNORE },
574 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_40, HID_QUIRK_IGNORE },
575 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_41, HID_QUIRK_IGNORE },
576 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_42, HID_QUIRK_IGNORE },
577 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_43, HID_QUIRK_IGNORE },
578 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_44, HID_QUIRK_IGNORE },
579 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_45, HID_QUIRK_IGNORE },
580 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_46, HID_QUIRK_IGNORE },
581 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_47, HID_QUIRK_IGNORE },
582 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_48, HID_QUIRK_IGNORE },
583 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_49, HID_QUIRK_IGNORE },
584 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_50, HID_QUIRK_IGNORE },
585 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_51, HID_QUIRK_IGNORE },
586 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_52, HID_QUIRK_IGNORE },
587 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_53, HID_QUIRK_IGNORE },
588 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_54, HID_QUIRK_IGNORE },
589 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_55, HID_QUIRK_IGNORE },
590 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_56, HID_QUIRK_IGNORE },
591 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_57, HID_QUIRK_IGNORE },
592 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_58, HID_QUIRK_IGNORE },
593 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_59, HID_QUIRK_IGNORE },
594 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_60, HID_QUIRK_IGNORE },
595 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_61, HID_QUIRK_IGNORE },
596 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_62, HID_QUIRK_IGNORE },
597 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_63, HID_QUIRK_IGNORE },
598 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_64, HID_QUIRK_IGNORE },
599 { USB_VENDOR_ID_NATIONAL_SEMICONDUCTOR, USB_DEVICE_ID_N_S_HARMONY, HID_QUIRK_IGNORE },
466 600
467 { 0, 0 } 601 { 0, 0 }
468}; 602};
diff --git a/drivers/ide/arm/icside.c b/drivers/ide/arm/icside.c
index c89b5f4b2d04..8a9b98fcb66d 100644
--- a/drivers/ide/arm/icside.c
+++ b/drivers/ide/arm/icside.c
@@ -693,13 +693,12 @@ icside_probe(struct expansion_card *ec, const struct ecard_id *id)
693 if (ret) 693 if (ret)
694 goto out; 694 goto out;
695 695
696 state = kmalloc(sizeof(struct icside_state), GFP_KERNEL); 696 state = kzalloc(sizeof(struct icside_state), GFP_KERNEL);
697 if (!state) { 697 if (!state) {
698 ret = -ENOMEM; 698 ret = -ENOMEM;
699 goto release; 699 goto release;
700 } 700 }
701 701
702 memset(state, 0, sizeof(state));
703 state->type = ICS_TYPE_NOTYPE; 702 state->type = ICS_TYPE_NOTYPE;
704 state->dev = &ec->dev; 703 state->dev = &ec->dev;
705 704
diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c
index e82bfa5e0ab8..1fa57947bca0 100644
--- a/drivers/ide/ide-tape.c
+++ b/drivers/ide/ide-tape.c
@@ -640,7 +640,7 @@ typedef enum {
640} idetape_chrdev_direction_t; 640} idetape_chrdev_direction_t;
641 641
642struct idetape_bh { 642struct idetape_bh {
643 unsigned short b_size; 643 u32 b_size;
644 atomic_t b_count; 644 atomic_t b_count;
645 struct idetape_bh *b_reqnext; 645 struct idetape_bh *b_reqnext;
646 char *b_data; 646 char *b_data;
diff --git a/drivers/ide/pci/alim15x3.c b/drivers/ide/pci/alim15x3.c
index 5511c86733dc..025689de50e9 100644
--- a/drivers/ide/pci/alim15x3.c
+++ b/drivers/ide/pci/alim15x3.c
@@ -593,7 +593,7 @@ static struct dmi_system_id cable_dmi_table[] = {
593 .ident = "HP Pavilion N5430", 593 .ident = "HP Pavilion N5430",
594 .matches = { 594 .matches = {
595 DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"), 595 DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
596 DMI_MATCH(DMI_BOARD_NAME, "OmniBook N32N-736"), 596 DMI_MATCH(DMI_BOARD_VERSION, "OmniBook N32N-736"),
597 }, 597 },
598 }, 598 },
599 { } 599 { }
diff --git a/drivers/ide/pci/cmd64x.c b/drivers/ide/pci/cmd64x.c
index 19633c5aba15..0e3b5de26e69 100644
--- a/drivers/ide/pci/cmd64x.c
+++ b/drivers/ide/pci/cmd64x.c
@@ -475,11 +475,11 @@ static unsigned int __devinit init_chipset_cmd64x(struct pci_dev *dev, const cha
475 switch (rev) { 475 switch (rev) {
476 case 0x07: 476 case 0x07:
477 case 0x05: 477 case 0x05:
478 printk("%s: UltraDMA capable", name); 478 printk("%s: UltraDMA capable\n", name);
479 break; 479 break;
480 case 0x03: 480 case 0x03:
481 default: 481 default:
482 printk("%s: MultiWord DMA force limited", name); 482 printk("%s: MultiWord DMA force limited\n", name);
483 break; 483 break;
484 case 0x01: 484 case 0x01:
485 printk("%s: MultiWord DMA limited, " 485 printk("%s: MultiWord DMA limited, "
diff --git a/drivers/ide/pci/cs5520.c b/drivers/ide/pci/cs5520.c
index bccedf9b8b28..b89e81656875 100644
--- a/drivers/ide/pci/cs5520.c
+++ b/drivers/ide/pci/cs5520.c
@@ -133,7 +133,7 @@ static void cs5520_tune_drive(ide_drive_t *drive, u8 pio)
133static int cs5520_config_drive_xfer_rate(ide_drive_t *drive) 133static int cs5520_config_drive_xfer_rate(ide_drive_t *drive)
134{ 134{
135 /* Tune the drive for PIO modes up to PIO 4 */ 135 /* Tune the drive for PIO modes up to PIO 4 */
136 cs5520_tune_drive(drive, 4); 136 cs5520_tune_drive(drive, 255);
137 137
138 /* Then tell the core to use DMA operations */ 138 /* Then tell the core to use DMA operations */
139 return 0; 139 return 0;
diff --git a/drivers/ide/pci/cs5535.c b/drivers/ide/pci/cs5535.c
index ce44e38390aa..082ca7da2cbc 100644
--- a/drivers/ide/pci/cs5535.c
+++ b/drivers/ide/pci/cs5535.c
@@ -2,6 +2,7 @@
2 * linux/drivers/ide/pci/cs5535.c 2 * linux/drivers/ide/pci/cs5535.c
3 * 3 *
4 * Copyright (C) 2004-2005 Advanced Micro Devices, Inc. 4 * Copyright (C) 2004-2005 Advanced Micro Devices, Inc.
5 * Copyright (C) 2007 Bartlomiej Zolnierkiewicz
5 * 6 *
6 * History: 7 * History:
7 * 09/20/2005 - Jaya Kumar <jayakumar.ide@gmail.com> 8 * 09/20/2005 - Jaya Kumar <jayakumar.ide@gmail.com>
@@ -83,14 +84,17 @@ static void cs5535_set_speed(ide_drive_t *drive, u8 speed)
83 84
84 /* Set the PIO timings */ 85 /* Set the PIO timings */
85 if ((speed & XFER_MODE) == XFER_PIO) { 86 if ((speed & XFER_MODE) == XFER_PIO) {
86 u8 pioa; 87 ide_drive_t *pair = &drive->hwif->drives[drive->dn ^ 1];
87 u8 piob; 88 u8 cmd, pioa;
88 u8 cmd;
89 89
90 pioa = speed - XFER_PIO_0; 90 cmd = pioa = speed - XFER_PIO_0;
91 piob = ide_get_best_pio_mode(&(drive->hwif->drives[!unit]), 91
92 255, 4); 92 if (pair->present) {
93 cmd = pioa < piob ? pioa : piob; 93 u8 piob = ide_get_best_pio_mode(pair, 255, 4);
94
95 if (piob < cmd)
96 cmd = piob;
97 }
94 98
95 /* Write the speed of the current drive */ 99 /* Write the speed of the current drive */
96 reg = (cs5535_pio_cmd_timings[cmd] << 16) | 100 reg = (cs5535_pio_cmd_timings[cmd] << 16) |
@@ -116,7 +120,7 @@ static void cs5535_set_speed(ide_drive_t *drive, u8 speed)
116 120
117 reg &= 0x80000000UL; /* Preserve the PIO format bit */ 121 reg &= 0x80000000UL; /* Preserve the PIO format bit */
118 122
119 if (speed >= XFER_UDMA_0 && speed <= XFER_UDMA_7) 123 if (speed >= XFER_UDMA_0 && speed <= XFER_UDMA_4)
120 reg |= cs5535_udma_timings[speed - XFER_UDMA_0]; 124 reg |= cs5535_udma_timings[speed - XFER_UDMA_0];
121 else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) 125 else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2)
122 reg |= cs5535_mwdma_timings[speed - XFER_MW_DMA_0]; 126 reg |= cs5535_mwdma_timings[speed - XFER_MW_DMA_0];
@@ -151,32 +155,22 @@ static int cs5535_set_drive(ide_drive_t *drive, u8 speed)
151 * 155 *
152 * A callback from the upper layers for PIO-only tuning. 156 * A callback from the upper layers for PIO-only tuning.
153 */ 157 */
154static void cs5535_tuneproc(ide_drive_t *drive, u8 xferspeed) 158static void cs5535_tuneproc(ide_drive_t *drive, u8 pio)
155{ 159{
156 u8 modes[] = { XFER_PIO_0, XFER_PIO_1, XFER_PIO_2, XFER_PIO_3, 160 pio = ide_get_best_pio_mode(drive, pio, 4);
157 XFER_PIO_4 }; 161 ide_config_drive_speed(drive, XFER_PIO_0 + pio);
158 162 cs5535_set_speed(drive, XFER_PIO_0 + pio);
159 /* cs5535 max pio is pio 4, best_pio will check the blacklist.
160 i think we don't need to rate_filter the incoming xferspeed
161 since we know we're only going to choose pio */
162 xferspeed = ide_get_best_pio_mode(drive, xferspeed, 4);
163 ide_config_drive_speed(drive, modes[xferspeed]);
164 cs5535_set_speed(drive, xferspeed);
165} 163}
166 164
167static int cs5535_dma_check(ide_drive_t *drive) 165static int cs5535_dma_check(ide_drive_t *drive)
168{ 166{
169 u8 speed;
170
171 drive->init_speed = 0; 167 drive->init_speed = 0;
172 168
173 if (ide_tune_dma(drive)) 169 if (ide_tune_dma(drive))
174 return 0; 170 return 0;
175 171
176 if (ide_use_fast_pio(drive)) { 172 if (ide_use_fast_pio(drive))
177 speed = ide_get_best_pio_mode(drive, 255, 4); 173 cs5535_tuneproc(drive, 255);
178 cs5535_set_drive(drive, speed);
179 }
180 174
181 return -1; 175 return -1;
182} 176}
diff --git a/drivers/ide/pci/it8213.c b/drivers/ide/pci/it8213.c
index 95dbed7e6022..70b3245dbf62 100644
--- a/drivers/ide/pci/it8213.c
+++ b/drivers/ide/pci/it8213.c
@@ -21,7 +21,7 @@
21 * it8213_dma_2_pio - return the PIO mode matching DMA 21 * it8213_dma_2_pio - return the PIO mode matching DMA
22 * @xfer_rate: transfer speed 22 * @xfer_rate: transfer speed
23 * 23 *
24 * Returns the nearest equivalent PIO timing for the PIO or DMA 24 * Returns the nearest equivalent PIO timing for the DMA
25 * mode requested by the controller. 25 * mode requested by the controller.
26 */ 26 */
27 27
@@ -35,34 +35,28 @@ static u8 it8213_dma_2_pio (u8 xfer_rate) {
35 case XFER_UDMA_1: 35 case XFER_UDMA_1:
36 case XFER_UDMA_0: 36 case XFER_UDMA_0:
37 case XFER_MW_DMA_2: 37 case XFER_MW_DMA_2:
38 case XFER_PIO_4:
39 return 4; 38 return 4;
40 case XFER_MW_DMA_1: 39 case XFER_MW_DMA_1:
41 case XFER_PIO_3:
42 return 3; 40 return 3;
43 case XFER_SW_DMA_2: 41 case XFER_SW_DMA_2:
44 case XFER_PIO_2:
45 return 2; 42 return 2;
46 case XFER_MW_DMA_0: 43 case XFER_MW_DMA_0:
47 case XFER_SW_DMA_1: 44 case XFER_SW_DMA_1:
48 case XFER_SW_DMA_0: 45 case XFER_SW_DMA_0:
49 case XFER_PIO_1:
50 case XFER_PIO_0:
51 case XFER_PIO_SLOW:
52 default: 46 default:
53 return 0; 47 return 0;
54 } 48 }
55} 49}
56 50
57/* 51/*
58 * it8213_tuneproc - tune a drive 52 * it8213_tune_pio - tune a drive
59 * @drive: drive to tune 53 * @drive: drive to tune
60 * @pio: desired PIO mode 54 * @pio: desired PIO mode
61 * 55 *
62 * Set the interface PIO mode. 56 * Set the interface PIO mode.
63 */ 57 */
64 58
65static void it8213_tuneproc (ide_drive_t *drive, u8 pio) 59static void it8213_tune_pio(ide_drive_t *drive, const u8 pio)
66{ 60{
67 ide_hwif_t *hwif = HWIF(drive); 61 ide_hwif_t *hwif = HWIF(drive);
68 struct pci_dev *dev = hwif->pci_dev; 62 struct pci_dev *dev = hwif->pci_dev;
@@ -82,8 +76,6 @@ static void it8213_tuneproc (ide_drive_t *drive, u8 pio)
82 { 2, 1 }, 76 { 2, 1 },
83 { 2, 3 }, }; 77 { 2, 3 }, };
84 78
85 pio = ide_get_best_pio_mode(drive, pio, 4);
86
87 spin_lock_irqsave(&tune_lock, flags); 79 spin_lock_irqsave(&tune_lock, flags);
88 pci_read_config_word(dev, master_port, &master_data); 80 pci_read_config_word(dev, master_port, &master_data);
89 81
@@ -113,6 +105,13 @@ static void it8213_tuneproc (ide_drive_t *drive, u8 pio)
113 spin_unlock_irqrestore(&tune_lock, flags); 105 spin_unlock_irqrestore(&tune_lock, flags);
114} 106}
115 107
108static void it8213_tuneproc(ide_drive_t *drive, u8 pio)
109{
110 pio = ide_get_best_pio_mode(drive, pio, 4);
111 it8213_tune_pio(drive, pio);
112 ide_config_drive_speed(drive, XFER_PIO_0 + pio);
113}
114
116/** 115/**
117 * it8213_tune_chipset - set controller timings 116 * it8213_tune_chipset - set controller timings
118 * @drive: Drive to set up 117 * @drive: Drive to set up
@@ -193,7 +192,12 @@ static int it8213_tune_chipset (ide_drive_t *drive, u8 xferspeed)
193 if (reg55 & w_flag) 192 if (reg55 & w_flag)
194 pci_write_config_byte(dev, 0x55, (u8) reg55 & ~w_flag); 193 pci_write_config_byte(dev, 0x55, (u8) reg55 & ~w_flag);
195 } 194 }
196 it8213_tuneproc(drive, it8213_dma_2_pio(speed)); 195
196 if (speed > XFER_PIO_4)
197 it8213_tune_pio(drive, it8213_dma_2_pio(speed));
198 else
199 it8213_tune_pio(drive, speed - XFER_PIO_0);
200
197 return ide_config_drive_speed(drive, speed); 201 return ide_config_drive_speed(drive, speed);
198} 202}
199 203
@@ -209,13 +213,10 @@ static int it8213_tune_chipset (ide_drive_t *drive, u8 xferspeed)
209 213
210static int it8213_config_drive_for_dma (ide_drive_t *drive) 214static int it8213_config_drive_for_dma (ide_drive_t *drive)
211{ 215{
212 u8 pio;
213
214 if (ide_tune_dma(drive)) 216 if (ide_tune_dma(drive))
215 return 0; 217 return 0;
216 218
217 pio = ide_get_best_pio_mode(drive, 255, 4); 219 it8213_tuneproc(drive, 255);
218 it8213_tune_chipset(drive, XFER_PIO_0 + pio);
219 220
220 return -1; 221 return -1;
221} 222}
diff --git a/drivers/ide/pci/jmicron.c b/drivers/ide/pci/jmicron.c
index d7ce9dd8de16..65a0ff352b98 100644
--- a/drivers/ide/pci/jmicron.c
+++ b/drivers/ide/pci/jmicron.c
@@ -83,23 +83,10 @@ static u8 __devinit ata66_jmicron(ide_hwif_t *hwif)
83 return ATA_CBL_PATA80; 83 return ATA_CBL_PATA80;
84} 84}
85 85
86static void jmicron_tuneproc (ide_drive_t *drive, byte mode_wanted) 86static void jmicron_tuneproc(ide_drive_t *drive, u8 pio)
87{ 87{
88 return; 88 pio = ide_get_best_pio_mode(drive, pio, 5);
89} 89 ide_config_drive_speed(drive, XFER_PIO_0 + pio);
90
91/**
92 * config_jmicron_chipset_for_pio - set drive timings
93 * @drive: drive to tune
94 * @speed we want
95 *
96 */
97
98static void config_jmicron_chipset_for_pio (ide_drive_t *drive, byte set_speed)
99{
100 u8 speed = XFER_PIO_0 + ide_get_best_pio_mode(drive, 255, 5);
101 if (set_speed)
102 (void) ide_config_drive_speed(drive, speed);
103} 90}
104 91
105/** 92/**
@@ -132,7 +119,7 @@ static int jmicron_config_drive_for_dma (ide_drive_t *drive)
132 if (ide_tune_dma(drive)) 119 if (ide_tune_dma(drive))
133 return 0; 120 return 0;
134 121
135 config_jmicron_chipset_for_pio(drive, 1); 122 jmicron_tuneproc(drive, 255);
136 123
137 return -1; 124 return -1;
138} 125}
diff --git a/drivers/ide/pci/piix.c b/drivers/ide/pci/piix.c
index 4f69cd067e5e..5cfa9378bbb8 100644
--- a/drivers/ide/pci/piix.c
+++ b/drivers/ide/pci/piix.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * linux/drivers/ide/pci/piix.c Version 0.50 Jun 10, 2007 2 * linux/drivers/ide/pci/piix.c Version 0.51 Jul 6, 2007
3 * 3 *
4 * Copyright (C) 1998-1999 Andrzej Krzysztofowicz, Author and Maintainer 4 * Copyright (C) 1998-1999 Andrzej Krzysztofowicz, Author and Maintainer
5 * Copyright (C) 1998-2000 Andre Hedrick <andre@linux-ide.org> 5 * Copyright (C) 1998-2000 Andre Hedrick <andre@linux-ide.org>
@@ -109,7 +109,7 @@ static int no_piix_dma;
109 * piix_dma_2_pio - return the PIO mode matching DMA 109 * piix_dma_2_pio - return the PIO mode matching DMA
110 * @xfer_rate: transfer speed 110 * @xfer_rate: transfer speed
111 * 111 *
112 * Returns the nearest equivalent PIO timing for the PIO or DMA 112 * Returns the nearest equivalent PIO timing for the DMA
113 * mode requested by the controller. 113 * mode requested by the controller.
114 */ 114 */
115 115
@@ -123,20 +123,14 @@ static u8 piix_dma_2_pio (u8 xfer_rate) {
123 case XFER_UDMA_1: 123 case XFER_UDMA_1:
124 case XFER_UDMA_0: 124 case XFER_UDMA_0:
125 case XFER_MW_DMA_2: 125 case XFER_MW_DMA_2:
126 case XFER_PIO_4:
127 return 4; 126 return 4;
128 case XFER_MW_DMA_1: 127 case XFER_MW_DMA_1:
129 case XFER_PIO_3:
130 return 3; 128 return 3;
131 case XFER_SW_DMA_2: 129 case XFER_SW_DMA_2:
132 case XFER_PIO_2:
133 return 2; 130 return 2;
134 case XFER_MW_DMA_0: 131 case XFER_MW_DMA_0:
135 case XFER_SW_DMA_1: 132 case XFER_SW_DMA_1:
136 case XFER_SW_DMA_0: 133 case XFER_SW_DMA_0:
137 case XFER_PIO_1:
138 case XFER_PIO_0:
139 case XFER_PIO_SLOW:
140 default: 134 default:
141 return 0; 135 return 0;
142 } 136 }
@@ -269,6 +263,7 @@ static int piix_tune_chipset (ide_drive_t *drive, u8 xferspeed)
269 case XFER_PIO_4: 263 case XFER_PIO_4:
270 case XFER_PIO_3: 264 case XFER_PIO_3:
271 case XFER_PIO_2: 265 case XFER_PIO_2:
266 case XFER_PIO_1:
272 case XFER_PIO_0: break; 267 case XFER_PIO_0: break;
273 default: return -1; 268 default: return -1;
274 } 269 }
@@ -299,7 +294,11 @@ static int piix_tune_chipset (ide_drive_t *drive, u8 xferspeed)
299 pci_write_config_byte(dev, 0x55, (u8) reg55 & ~w_flag); 294 pci_write_config_byte(dev, 0x55, (u8) reg55 & ~w_flag);
300 } 295 }
301 296
302 piix_tune_pio(drive, piix_dma_2_pio(speed)); 297 if (speed > XFER_PIO_4)
298 piix_tune_pio(drive, piix_dma_2_pio(speed));
299 else
300 piix_tune_pio(drive, speed - XFER_PIO_0);
301
303 return ide_config_drive_speed(drive, speed); 302 return ide_config_drive_speed(drive, speed);
304} 303}
305 304
diff --git a/drivers/ide/pci/scc_pata.c b/drivers/ide/pci/scc_pata.c
index bf19ddfa6cda..eeb0a6d434aa 100644
--- a/drivers/ide/pci/scc_pata.c
+++ b/drivers/ide/pci/scc_pata.c
@@ -190,7 +190,7 @@ scc_ide_outsl(unsigned long port, void *addr, u32 count)
190} 190}
191 191
192/** 192/**
193 * scc_tuneproc - tune a drive PIO mode 193 * scc_tune_pio - tune a drive PIO mode
194 * @drive: drive to tune 194 * @drive: drive to tune
195 * @mode_wanted: the target operating mode 195 * @mode_wanted: the target operating mode
196 * 196 *
@@ -198,7 +198,7 @@ scc_ide_outsl(unsigned long port, void *addr, u32 count)
198 * controller. 198 * controller.
199 */ 199 */
200 200
201static void scc_tuneproc(ide_drive_t *drive, byte mode_wanted) 201static void scc_tune_pio(ide_drive_t *drive, const u8 pio)
202{ 202{
203 ide_hwif_t *hwif = HWIF(drive); 203 ide_hwif_t *hwif = HWIF(drive);
204 struct scc_ports *ports = ide_get_hwifdata(hwif); 204 struct scc_ports *ports = ide_get_hwifdata(hwif);
@@ -207,41 +207,25 @@ static void scc_tuneproc(ide_drive_t *drive, byte mode_wanted)
207 unsigned long piosht_port = ctl_base + 0x000; 207 unsigned long piosht_port = ctl_base + 0x000;
208 unsigned long pioct_port = ctl_base + 0x004; 208 unsigned long pioct_port = ctl_base + 0x004;
209 unsigned long reg; 209 unsigned long reg;
210 unsigned char speed = XFER_PIO_0;
211 int offset; 210 int offset;
212 211
213 mode_wanted = ide_get_best_pio_mode(drive, mode_wanted, 4);
214 switch (mode_wanted) {
215 case 4:
216 speed = XFER_PIO_4;
217 break;
218 case 3:
219 speed = XFER_PIO_3;
220 break;
221 case 2:
222 speed = XFER_PIO_2;
223 break;
224 case 1:
225 speed = XFER_PIO_1;
226 break;
227 case 0:
228 default:
229 speed = XFER_PIO_0;
230 break;
231 }
232
233 reg = in_be32((void __iomem *)cckctrl_port); 212 reg = in_be32((void __iomem *)cckctrl_port);
234 if (reg & CCKCTRL_ATACLKOEN) { 213 if (reg & CCKCTRL_ATACLKOEN) {
235 offset = 1; /* 133MHz */ 214 offset = 1; /* 133MHz */
236 } else { 215 } else {
237 offset = 0; /* 100MHz */ 216 offset = 0; /* 100MHz */
238 } 217 }
239 reg = JCHSTtbl[offset][mode_wanted] << 16 | JCHHTtbl[offset][mode_wanted]; 218 reg = JCHSTtbl[offset][pio] << 16 | JCHHTtbl[offset][pio];
240 out_be32((void __iomem *)piosht_port, reg); 219 out_be32((void __iomem *)piosht_port, reg);
241 reg = JCHCTtbl[offset][mode_wanted]; 220 reg = JCHCTtbl[offset][pio];
242 out_be32((void __iomem *)pioct_port, reg); 221 out_be32((void __iomem *)pioct_port, reg);
222}
243 223
244 ide_config_drive_speed(drive, speed); 224static void scc_tuneproc(ide_drive_t *drive, u8 pio)
225{
226 pio = ide_get_best_pio_mode(drive, pio, 4);
227 scc_tune_pio(drive, pio);
228 ide_config_drive_speed(drive, XFER_PIO_0 + pio);
245} 229}
246 230
247/** 231/**
@@ -280,26 +264,21 @@ static int scc_tune_chipset(ide_drive_t *drive, byte xferspeed)
280 264
281 switch (speed) { 265 switch (speed) {
282 case XFER_UDMA_6: 266 case XFER_UDMA_6:
283 idx = 6;
284 break;
285 case XFER_UDMA_5: 267 case XFER_UDMA_5:
286 idx = 5;
287 break;
288 case XFER_UDMA_4: 268 case XFER_UDMA_4:
289 idx = 4;
290 break;
291 case XFER_UDMA_3: 269 case XFER_UDMA_3:
292 idx = 3;
293 break;
294 case XFER_UDMA_2: 270 case XFER_UDMA_2:
295 idx = 2;
296 break;
297 case XFER_UDMA_1: 271 case XFER_UDMA_1:
298 idx = 1;
299 break;
300 case XFER_UDMA_0: 272 case XFER_UDMA_0:
301 idx = 0; 273 idx = speed - XFER_UDMA_0;
302 break; 274 break;
275 case XFER_PIO_4:
276 case XFER_PIO_3:
277 case XFER_PIO_2:
278 case XFER_PIO_1:
279 case XFER_PIO_0:
280 scc_tune_pio(drive, speed - XFER_PIO_0);
281 return ide_config_drive_speed(drive, speed);
303 default: 282 default:
304 return 1; 283 return 1;
305 } 284 }
@@ -329,7 +308,7 @@ static int scc_tune_chipset(ide_drive_t *drive, byte xferspeed)
329 * required. 308 * required.
330 * If the drive isn't suitable for DMA or we hit other problems 309 * If the drive isn't suitable for DMA or we hit other problems
331 * then we will drop down to PIO and set up PIO appropriately. 310 * then we will drop down to PIO and set up PIO appropriately.
332 * (return 1) 311 * (return -1)
333 */ 312 */
334 313
335static int scc_config_drive_for_dma(ide_drive_t *drive) 314static int scc_config_drive_for_dma(ide_drive_t *drive)
@@ -338,7 +317,7 @@ static int scc_config_drive_for_dma(ide_drive_t *drive)
338 return 0; 317 return 0;
339 318
340 if (ide_use_fast_pio(drive)) 319 if (ide_use_fast_pio(drive))
341 scc_tuneproc(drive, 4); 320 scc_tuneproc(drive, 255);
342 321
343 return -1; 322 return -1;
344} 323}
diff --git a/drivers/ide/pci/sis5513.c b/drivers/ide/pci/sis5513.c
index 63fbb79e8178..26f24802d3e8 100644
--- a/drivers/ide/pci/sis5513.c
+++ b/drivers/ide/pci/sis5513.c
@@ -801,6 +801,7 @@ struct sis_laptop {
801static const struct sis_laptop sis_laptop[] = { 801static const struct sis_laptop sis_laptop[] = {
802 /* devid, subvendor, subdev */ 802 /* devid, subvendor, subdev */
803 { 0x5513, 0x1043, 0x1107 }, /* ASUS A6K */ 803 { 0x5513, 0x1043, 0x1107 }, /* ASUS A6K */
804 { 0x5513, 0x1734, 0x105f }, /* FSC Amilo A1630 */
804 /* end marker */ 805 /* end marker */
805 { 0, } 806 { 0, }
806}; 807};
diff --git a/drivers/ide/pci/slc90e66.c b/drivers/ide/pci/slc90e66.c
index 8e655f2db5cb..628b0664f576 100644
--- a/drivers/ide/pci/slc90e66.c
+++ b/drivers/ide/pci/slc90e66.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * linux/drivers/ide/pci/slc90e66.c Version 0.14 February 8, 2007 2 * linux/drivers/ide/pci/slc90e66.c Version 0.15 Jul 6, 2007
3 * 3 *
4 * Copyright (C) 2000-2002 Andre Hedrick <andre@linux-ide.org> 4 * Copyright (C) 2000-2002 Andre Hedrick <andre@linux-ide.org>
5 * Copyright (C) 2006-2007 MontaVista Software, Inc. <source@mvista.com> 5 * Copyright (C) 2006-2007 MontaVista Software, Inc. <source@mvista.com>
@@ -29,20 +29,14 @@ static u8 slc90e66_dma_2_pio (u8 xfer_rate) {
29 case XFER_UDMA_1: 29 case XFER_UDMA_1:
30 case XFER_UDMA_0: 30 case XFER_UDMA_0:
31 case XFER_MW_DMA_2: 31 case XFER_MW_DMA_2:
32 case XFER_PIO_4:
33 return 4; 32 return 4;
34 case XFER_MW_DMA_1: 33 case XFER_MW_DMA_1:
35 case XFER_PIO_3:
36 return 3; 34 return 3;
37 case XFER_SW_DMA_2: 35 case XFER_SW_DMA_2:
38 case XFER_PIO_2:
39 return 2; 36 return 2;
40 case XFER_MW_DMA_0: 37 case XFER_MW_DMA_0:
41 case XFER_SW_DMA_1: 38 case XFER_SW_DMA_1:
42 case XFER_SW_DMA_0: 39 case XFER_SW_DMA_0:
43 case XFER_PIO_1:
44 case XFER_PIO_0:
45 case XFER_PIO_SLOW:
46 default: 40 default:
47 return 0; 41 return 0;
48 } 42 }
@@ -136,6 +130,7 @@ static int slc90e66_tune_chipset (ide_drive_t *drive, u8 xferspeed)
136 case XFER_PIO_4: 130 case XFER_PIO_4:
137 case XFER_PIO_3: 131 case XFER_PIO_3:
138 case XFER_PIO_2: 132 case XFER_PIO_2:
133 case XFER_PIO_1:
139 case XFER_PIO_0: break; 134 case XFER_PIO_0: break;
140 default: return -1; 135 default: return -1;
141 } 136 }
@@ -156,7 +151,11 @@ static int slc90e66_tune_chipset (ide_drive_t *drive, u8 xferspeed)
156 pci_write_config_word(dev, 0x4a, reg4a & ~a_speed); 151 pci_write_config_word(dev, 0x4a, reg4a & ~a_speed);
157 } 152 }
158 153
159 slc90e66_tune_pio(drive, slc90e66_dma_2_pio(speed)); 154 if (speed > XFER_PIO_4)
155 slc90e66_tune_pio(drive, slc90e66_dma_2_pio(speed));
156 else
157 slc90e66_tune_pio(drive, speed - XFER_PIO_0);
158
160 return ide_config_drive_speed(drive, speed); 159 return ide_config_drive_speed(drive, speed);
161} 160}
162 161
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 8e58ea3d95c0..004bc2487270 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -310,7 +310,7 @@ static int pci_default_resume(struct pci_dev *pci_dev)
310 /* restore the PCI config space */ 310 /* restore the PCI config space */
311 pci_restore_state(pci_dev); 311 pci_restore_state(pci_dev);
312 /* if the device was enabled before suspend, reenable */ 312 /* if the device was enabled before suspend, reenable */
313 retval = __pci_reenable_device(pci_dev); 313 retval = pci_reenable_device(pci_dev);
314 /* if the device was busmaster before the suspend, make it busmaster again */ 314 /* if the device was busmaster before the suspend, make it busmaster again */
315 if (pci_dev->is_busmaster) 315 if (pci_dev->is_busmaster)
316 pci_set_master(pci_dev); 316 pci_set_master(pci_dev);
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 1ee9cd9c86e2..37c00f6fd801 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -695,14 +695,13 @@ static int do_pci_enable_device(struct pci_dev *dev, int bars)
695} 695}
696 696
697/** 697/**
698 * __pci_reenable_device - Resume abandoned device 698 * pci_reenable_device - Resume abandoned device
699 * @dev: PCI device to be resumed 699 * @dev: PCI device to be resumed
700 * 700 *
701 * Note this function is a backend of pci_default_resume and is not supposed 701 * Note this function is a backend of pci_default_resume and is not supposed
702 * to be called by normal code, write proper resume handler and use it instead. 702 * to be called by normal code, write proper resume handler and use it instead.
703 */ 703 */
704int 704int pci_reenable_device(struct pci_dev *dev)
705__pci_reenable_device(struct pci_dev *dev)
706{ 705{
707 if (atomic_read(&dev->enable_cnt)) 706 if (atomic_read(&dev->enable_cnt))
708 return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1); 707 return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1);
@@ -1604,7 +1603,7 @@ early_param("pci", pci_setup);
1604device_initcall(pci_init); 1603device_initcall(pci_init);
1605 1604
1606EXPORT_SYMBOL_GPL(pci_restore_bars); 1605EXPORT_SYMBOL_GPL(pci_restore_bars);
1607EXPORT_SYMBOL(__pci_reenable_device); 1606EXPORT_SYMBOL(pci_reenable_device);
1608EXPORT_SYMBOL(pci_enable_device_bars); 1607EXPORT_SYMBOL(pci_enable_device_bars);
1609EXPORT_SYMBOL(pci_enable_device); 1608EXPORT_SYMBOL(pci_enable_device);
1610EXPORT_SYMBOL(pcim_enable_device); 1609EXPORT_SYMBOL(pcim_enable_device);
diff --git a/drivers/scsi/ide-scsi.c b/drivers/scsi/ide-scsi.c
index bb90df8bdce4..1cc01acc2808 100644
--- a/drivers/scsi/ide-scsi.c
+++ b/drivers/scsi/ide-scsi.c
@@ -328,17 +328,15 @@ static int idescsi_check_condition(ide_drive_t *drive, struct request *failed_co
328 u8 *buf; 328 u8 *buf;
329 329
330 /* stuff a sense request in front of our current request */ 330 /* stuff a sense request in front of our current request */
331 pc = kmalloc (sizeof (idescsi_pc_t), GFP_ATOMIC); 331 pc = kzalloc(sizeof(idescsi_pc_t), GFP_ATOMIC);
332 rq = kmalloc (sizeof (struct request), GFP_ATOMIC); 332 rq = kmalloc(sizeof(struct request), GFP_ATOMIC);
333 buf = kmalloc(SCSI_SENSE_BUFFERSIZE, GFP_ATOMIC); 333 buf = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_ATOMIC);
334 if (pc == NULL || rq == NULL || buf == NULL) { 334 if (!pc || !rq || !buf) {
335 kfree(buf); 335 kfree(buf);
336 kfree(rq); 336 kfree(rq);
337 kfree(pc); 337 kfree(pc);
338 return -ENOMEM; 338 return -ENOMEM;
339 } 339 }
340 memset (pc, 0, sizeof (idescsi_pc_t));
341 memset (buf, 0, SCSI_SENSE_BUFFERSIZE);
342 ide_init_drive_cmd(rq); 340 ide_init_drive_cmd(rq);
343 rq->special = (char *) pc; 341 rq->special = (char *) pc;
344 pc->rq = rq; 342 pc->rq = rq;
diff --git a/include/asm-avr32/bug.h b/include/asm-avr32/bug.h
index afdcd79a2966..331d45bab18f 100644
--- a/include/asm-avr32/bug.h
+++ b/include/asm-avr32/bug.h
@@ -57,7 +57,7 @@
57 57
58#define WARN_ON(condition) \ 58#define WARN_ON(condition) \
59 ({ \ 59 ({ \
60 typeof(condition) __ret_warn_on = (condition); \ 60 int __ret_warn_on = !!(condition); \
61 if (unlikely(__ret_warn_on)) \ 61 if (unlikely(__ret_warn_on)) \
62 _BUG_OR_WARN(BUGFLAG_WARNING); \ 62 _BUG_OR_WARN(BUGFLAG_WARNING); \
63 unlikely(__ret_warn_on); \ 63 unlikely(__ret_warn_on); \
diff --git a/include/asm-frv/mb86943a.h b/include/asm-frv/mb86943a.h
index b89fd0b56bb3..e87ef924bfb4 100644
--- a/include/asm-frv/mb86943a.h
+++ b/include/asm-frv/mb86943a.h
@@ -36,4 +36,7 @@
36#define __reg_MB86943_pci_sl_io_base *(volatile uint32_t *) (__region_CS1 + 0x70) 36#define __reg_MB86943_pci_sl_io_base *(volatile uint32_t *) (__region_CS1 + 0x70)
37#define __reg_MB86943_pci_sl_mem_base *(volatile uint32_t *) (__region_CS1 + 0x78) 37#define __reg_MB86943_pci_sl_mem_base *(volatile uint32_t *) (__region_CS1 + 0x78)
38 38
39#define __reg_MB86943_pci_arbiter *(volatile uint32_t *) (__region_CS2 + 0x01300014)
40#define MB86943_PCIARB_EN 0x00000001
41
39#endif /* _ASM_MB86943A_H */ 42#endif /* _ASM_MB86943A_H */
diff --git a/include/asm-parisc/bug.h b/include/asm-parisc/bug.h
index 83ba510ed5d8..8cfc553fc837 100644
--- a/include/asm-parisc/bug.h
+++ b/include/asm-parisc/bug.h
@@ -74,7 +74,7 @@
74 74
75 75
76#define WARN_ON(x) ({ \ 76#define WARN_ON(x) ({ \
77 typeof(x) __ret_warn_on = (x); \ 77 int __ret_warn_on = !!(x); \
78 if (__builtin_constant_p(__ret_warn_on)) { \ 78 if (__builtin_constant_p(__ret_warn_on)) { \
79 if (__ret_warn_on) \ 79 if (__ret_warn_on) \
80 __WARN(); \ 80 __WARN(); \
diff --git a/include/asm-s390/bug.h b/include/asm-s390/bug.h
index 838684dc6d35..384e3621e341 100644
--- a/include/asm-s390/bug.h
+++ b/include/asm-s390/bug.h
@@ -50,7 +50,7 @@
50#define BUG() __EMIT_BUG(0) 50#define BUG() __EMIT_BUG(0)
51 51
52#define WARN_ON(x) ({ \ 52#define WARN_ON(x) ({ \
53 typeof(x) __ret_warn_on = (x); \ 53 int __ret_warn_on = !!(x); \
54 if (__builtin_constant_p(__ret_warn_on)) { \ 54 if (__builtin_constant_p(__ret_warn_on)) { \
55 if (__ret_warn_on) \ 55 if (__ret_warn_on) \
56 __EMIT_BUG(BUGFLAG_WARNING); \ 56 __EMIT_BUG(BUGFLAG_WARNING); \
diff --git a/include/asm-sh/bug.h b/include/asm-sh/bug.h
index 46f925c815ac..a78d482e8b2f 100644
--- a/include/asm-sh/bug.h
+++ b/include/asm-sh/bug.h
@@ -61,7 +61,7 @@ do { \
61} while (0) 61} while (0)
62 62
63#define WARN_ON(x) ({ \ 63#define WARN_ON(x) ({ \
64 typeof(x) __ret_warn_on = (x); \ 64 int __ret_warn_on = !!(x); \
65 if (__builtin_constant_p(__ret_warn_on)) { \ 65 if (__builtin_constant_p(__ret_warn_on)) { \
66 if (__ret_warn_on) \ 66 if (__ret_warn_on) \
67 __WARN(); \ 67 __WARN(); \
diff --git a/include/linux/pci.h b/include/linux/pci.h
index d8f8a3a96644..e7d8d4e19a53 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -534,7 +534,7 @@ static inline int pci_write_config_dword(struct pci_dev *dev, int where, u32 val
534 534
535int __must_check pci_enable_device(struct pci_dev *dev); 535int __must_check pci_enable_device(struct pci_dev *dev);
536int __must_check pci_enable_device_bars(struct pci_dev *dev, int mask); 536int __must_check pci_enable_device_bars(struct pci_dev *dev, int mask);
537int __must_check __pci_reenable_device(struct pci_dev *); 537int __must_check pci_reenable_device(struct pci_dev *);
538int __must_check pcim_enable_device(struct pci_dev *pdev); 538int __must_check pcim_enable_device(struct pci_dev *pdev);
539void pcim_pin_device(struct pci_dev *pdev); 539void pcim_pin_device(struct pci_dev *pdev);
540 540
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 2e490271acf6..17249fae5014 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -734,7 +734,6 @@ struct sched_domain {
734 unsigned long max_interval; /* Maximum balance interval ms */ 734 unsigned long max_interval; /* Maximum balance interval ms */
735 unsigned int busy_factor; /* less balancing by factor if busy */ 735 unsigned int busy_factor; /* less balancing by factor if busy */
736 unsigned int imbalance_pct; /* No balance until over watermark */ 736 unsigned int imbalance_pct; /* No balance until over watermark */
737 unsigned long long cache_hot_time; /* Task considered cache hot (ns) */
738 unsigned int cache_nice_tries; /* Leave cache hot tasks for # tries */ 737 unsigned int cache_nice_tries; /* Leave cache hot tasks for # tries */
739 unsigned int busy_idx; 738 unsigned int busy_idx;
740 unsigned int idle_idx; 739 unsigned int idle_idx;
@@ -875,7 +874,7 @@ struct sched_class {
875 874
876 void (*set_curr_task) (struct rq *rq); 875 void (*set_curr_task) (struct rq *rq);
877 void (*task_tick) (struct rq *rq, struct task_struct *p); 876 void (*task_tick) (struct rq *rq, struct task_struct *p);
878 void (*task_new) (struct rq *rq, struct task_struct *p); 877 void (*task_new) (struct rq *rq, struct task_struct *p, u64 now);
879}; 878};
880 879
881struct load_weight { 880struct load_weight {
@@ -905,23 +904,28 @@ struct sched_entity {
905 struct rb_node run_node; 904 struct rb_node run_node;
906 unsigned int on_rq; 905 unsigned int on_rq;
907 906
907 u64 exec_start;
908 u64 sum_exec_runtime;
908 u64 wait_start_fair; 909 u64 wait_start_fair;
910 u64 sleep_start_fair;
911
912#ifdef CONFIG_SCHEDSTATS
909 u64 wait_start; 913 u64 wait_start;
910 u64 exec_start; 914 u64 wait_max;
915 s64 sum_wait_runtime;
916
911 u64 sleep_start; 917 u64 sleep_start;
912 u64 sleep_start_fair;
913 u64 block_start;
914 u64 sleep_max; 918 u64 sleep_max;
919 s64 sum_sleep_runtime;
920
921 u64 block_start;
915 u64 block_max; 922 u64 block_max;
916 u64 exec_max; 923 u64 exec_max;
917 u64 wait_max;
918 u64 last_ran;
919 924
920 u64 sum_exec_runtime;
921 s64 sum_wait_runtime;
922 s64 sum_sleep_runtime;
923 unsigned long wait_runtime_overruns; 925 unsigned long wait_runtime_overruns;
924 unsigned long wait_runtime_underruns; 926 unsigned long wait_runtime_underruns;
927#endif
928
925#ifdef CONFIG_FAIR_GROUP_SCHED 929#ifdef CONFIG_FAIR_GROUP_SCHED
926 struct sched_entity *parent; 930 struct sched_entity *parent;
927 /* rq on which this entity is (to be) queued: */ 931 /* rq on which this entity is (to be) queued: */
diff --git a/include/linux/topology.h b/include/linux/topology.h
index d0890a7e5bab..525d437b1253 100644
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -185,7 +185,6 @@
185 .max_interval = 64*num_online_cpus(), \ 185 .max_interval = 64*num_online_cpus(), \
186 .busy_factor = 128, \ 186 .busy_factor = 128, \
187 .imbalance_pct = 133, \ 187 .imbalance_pct = 133, \
188 .cache_hot_time = (10*1000000), \
189 .cache_nice_tries = 1, \ 188 .cache_nice_tries = 1, \
190 .busy_idx = 3, \ 189 .busy_idx = 3, \
191 .idle_idx = 3, \ 190 .idle_idx = 3, \
diff --git a/include/net/netlabel.h b/include/net/netlabel.h
index ffbc7f28335a..2e5b2f6f9fa0 100644
--- a/include/net/netlabel.h
+++ b/include/net/netlabel.h
@@ -132,6 +132,8 @@ struct netlbl_lsm_secattr_catmap {
132#define NETLBL_SECATTR_CACHE 0x00000002 132#define NETLBL_SECATTR_CACHE 0x00000002
133#define NETLBL_SECATTR_MLS_LVL 0x00000004 133#define NETLBL_SECATTR_MLS_LVL 0x00000004
134#define NETLBL_SECATTR_MLS_CAT 0x00000008 134#define NETLBL_SECATTR_MLS_CAT 0x00000008
135#define NETLBL_SECATTR_CACHEABLE (NETLBL_SECATTR_MLS_LVL | \
136 NETLBL_SECATTR_MLS_CAT)
135struct netlbl_lsm_secattr { 137struct netlbl_lsm_secattr {
136 u32 flags; 138 u32 flags;
137 139
diff --git a/kernel/irq/resend.c b/kernel/irq/resend.c
index 5bfeaed7e487..c38272746887 100644
--- a/kernel/irq/resend.c
+++ b/kernel/irq/resend.c
@@ -62,6 +62,15 @@ void check_irq_resend(struct irq_desc *desc, unsigned int irq)
62 */ 62 */
63 desc->chip->enable(irq); 63 desc->chip->enable(irq);
64 64
65 /*
66 * Temporary hack to figure out more about the problem, which
67 * is causing the ancient network cards to die.
68 */
69 if (desc->handle_irq != handle_edge_irq) {
70 WARN_ON_ONCE(1);
71 return;
72 }
73
65 if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) { 74 if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) {
66 desc->status = (status & ~IRQ_PENDING) | IRQ_REPLAY; 75 desc->status = (status & ~IRQ_PENDING) | IRQ_REPLAY;
67 76
diff --git a/kernel/sched.c b/kernel/sched.c
index 238a76957e86..72bb9483d949 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -637,7 +637,7 @@ static u64 div64_likely32(u64 divident, unsigned long divisor)
637 637
638#define WMULT_SHIFT 32 638#define WMULT_SHIFT 32
639 639
640static inline unsigned long 640static unsigned long
641calc_delta_mine(unsigned long delta_exec, unsigned long weight, 641calc_delta_mine(unsigned long delta_exec, unsigned long weight,
642 struct load_weight *lw) 642 struct load_weight *lw)
643{ 643{
@@ -657,7 +657,7 @@ calc_delta_mine(unsigned long delta_exec, unsigned long weight,
657 tmp = (tmp * lw->inv_weight) >> WMULT_SHIFT; 657 tmp = (tmp * lw->inv_weight) >> WMULT_SHIFT;
658 } 658 }
659 659
660 return (unsigned long)min(tmp, (u64)sysctl_sched_runtime_limit); 660 return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX);
661} 661}
662 662
663static inline unsigned long 663static inline unsigned long
@@ -678,46 +678,6 @@ static void update_load_sub(struct load_weight *lw, unsigned long dec)
678 lw->inv_weight = 0; 678 lw->inv_weight = 0;
679} 679}
680 680
681static void __update_curr_load(struct rq *rq, struct load_stat *ls)
682{
683 if (rq->curr != rq->idle && ls->load.weight) {
684 ls->delta_exec += ls->delta_stat;
685 ls->delta_fair += calc_delta_fair(ls->delta_stat, &ls->load);
686 ls->delta_stat = 0;
687 }
688}
689
690/*
691 * Update delta_exec, delta_fair fields for rq.
692 *
693 * delta_fair clock advances at a rate inversely proportional to
694 * total load (rq->ls.load.weight) on the runqueue, while
695 * delta_exec advances at the same rate as wall-clock (provided
696 * cpu is not idle).
697 *
698 * delta_exec / delta_fair is a measure of the (smoothened) load on this
699 * runqueue over any given interval. This (smoothened) load is used
700 * during load balance.
701 *
702 * This function is called /before/ updating rq->ls.load
703 * and when switching tasks.
704 */
705static void update_curr_load(struct rq *rq, u64 now)
706{
707 struct load_stat *ls = &rq->ls;
708 u64 start;
709
710 start = ls->load_update_start;
711 ls->load_update_start = now;
712 ls->delta_stat += now - start;
713 /*
714 * Stagger updates to ls->delta_fair. Very frequent updates
715 * can be expensive.
716 */
717 if (ls->delta_stat >= sysctl_sched_stat_granularity)
718 __update_curr_load(rq, ls);
719}
720
721/* 681/*
722 * To aid in avoiding the subversion of "niceness" due to uneven distribution 682 * To aid in avoiding the subversion of "niceness" due to uneven distribution
723 * of tasks with abnormal "nice" values across CPUs the contribution that 683 * of tasks with abnormal "nice" values across CPUs the contribution that
@@ -727,19 +687,6 @@ static void update_curr_load(struct rq *rq, u64 now)
727 * slice expiry etc. 687 * slice expiry etc.
728 */ 688 */
729 689
730/*
731 * Assume: static_prio_timeslice(NICE_TO_PRIO(0)) == DEF_TIMESLICE
732 * If static_prio_timeslice() is ever changed to break this assumption then
733 * this code will need modification
734 */
735#define TIME_SLICE_NICE_ZERO DEF_TIMESLICE
736#define load_weight(lp) \
737 (((lp) * SCHED_LOAD_SCALE) / TIME_SLICE_NICE_ZERO)
738#define PRIO_TO_LOAD_WEIGHT(prio) \
739 load_weight(static_prio_timeslice(prio))
740#define RTPRIO_TO_LOAD_WEIGHT(rp) \
741 (PRIO_TO_LOAD_WEIGHT(MAX_RT_PRIO) + load_weight(rp))
742
743#define WEIGHT_IDLEPRIO 2 690#define WEIGHT_IDLEPRIO 2
744#define WMULT_IDLEPRIO (1 << 31) 691#define WMULT_IDLEPRIO (1 << 31)
745 692
@@ -781,32 +728,6 @@ static const u32 prio_to_wmult[40] = {
781/* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153, 728/* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153,
782}; 729};
783 730
784static inline void
785inc_load(struct rq *rq, const struct task_struct *p, u64 now)
786{
787 update_curr_load(rq, now);
788 update_load_add(&rq->ls.load, p->se.load.weight);
789}
790
791static inline void
792dec_load(struct rq *rq, const struct task_struct *p, u64 now)
793{
794 update_curr_load(rq, now);
795 update_load_sub(&rq->ls.load, p->se.load.weight);
796}
797
798static inline void inc_nr_running(struct task_struct *p, struct rq *rq, u64 now)
799{
800 rq->nr_running++;
801 inc_load(rq, p, now);
802}
803
804static inline void dec_nr_running(struct task_struct *p, struct rq *rq, u64 now)
805{
806 rq->nr_running--;
807 dec_load(rq, p, now);
808}
809
810static void activate_task(struct rq *rq, struct task_struct *p, int wakeup); 731static void activate_task(struct rq *rq, struct task_struct *p, int wakeup);
811 732
812/* 733/*
@@ -837,6 +758,72 @@ static int balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
837 758
838#define sched_class_highest (&rt_sched_class) 759#define sched_class_highest (&rt_sched_class)
839 760
761static void __update_curr_load(struct rq *rq, struct load_stat *ls)
762{
763 if (rq->curr != rq->idle && ls->load.weight) {
764 ls->delta_exec += ls->delta_stat;
765 ls->delta_fair += calc_delta_fair(ls->delta_stat, &ls->load);
766 ls->delta_stat = 0;
767 }
768}
769
770/*
771 * Update delta_exec, delta_fair fields for rq.
772 *
773 * delta_fair clock advances at a rate inversely proportional to
774 * total load (rq->ls.load.weight) on the runqueue, while
775 * delta_exec advances at the same rate as wall-clock (provided
776 * cpu is not idle).
777 *
778 * delta_exec / delta_fair is a measure of the (smoothened) load on this
779 * runqueue over any given interval. This (smoothened) load is used
780 * during load balance.
781 *
782 * This function is called /before/ updating rq->ls.load
783 * and when switching tasks.
784 */
785static void update_curr_load(struct rq *rq, u64 now)
786{
787 struct load_stat *ls = &rq->ls;
788 u64 start;
789
790 start = ls->load_update_start;
791 ls->load_update_start = now;
792 ls->delta_stat += now - start;
793 /*
794 * Stagger updates to ls->delta_fair. Very frequent updates
795 * can be expensive.
796 */
797 if (ls->delta_stat >= sysctl_sched_stat_granularity)
798 __update_curr_load(rq, ls);
799}
800
801static inline void
802inc_load(struct rq *rq, const struct task_struct *p, u64 now)
803{
804 update_curr_load(rq, now);
805 update_load_add(&rq->ls.load, p->se.load.weight);
806}
807
808static inline void
809dec_load(struct rq *rq, const struct task_struct *p, u64 now)
810{
811 update_curr_load(rq, now);
812 update_load_sub(&rq->ls.load, p->se.load.weight);
813}
814
815static void inc_nr_running(struct task_struct *p, struct rq *rq, u64 now)
816{
817 rq->nr_running++;
818 inc_load(rq, p, now);
819}
820
821static void dec_nr_running(struct task_struct *p, struct rq *rq, u64 now)
822{
823 rq->nr_running--;
824 dec_load(rq, p, now);
825}
826
840static void set_load_weight(struct task_struct *p) 827static void set_load_weight(struct task_struct *p)
841{ 828{
842 task_rq(p)->cfs.wait_runtime -= p->se.wait_runtime; 829 task_rq(p)->cfs.wait_runtime -= p->se.wait_runtime;
@@ -996,18 +983,21 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
996 u64 clock_offset, fair_clock_offset; 983 u64 clock_offset, fair_clock_offset;
997 984
998 clock_offset = old_rq->clock - new_rq->clock; 985 clock_offset = old_rq->clock - new_rq->clock;
999 fair_clock_offset = old_rq->cfs.fair_clock - 986 fair_clock_offset = old_rq->cfs.fair_clock - new_rq->cfs.fair_clock;
1000 new_rq->cfs.fair_clock; 987
1001 if (p->se.wait_start)
1002 p->se.wait_start -= clock_offset;
1003 if (p->se.wait_start_fair) 988 if (p->se.wait_start_fair)
1004 p->se.wait_start_fair -= fair_clock_offset; 989 p->se.wait_start_fair -= fair_clock_offset;
990 if (p->se.sleep_start_fair)
991 p->se.sleep_start_fair -= fair_clock_offset;
992
993#ifdef CONFIG_SCHEDSTATS
994 if (p->se.wait_start)
995 p->se.wait_start -= clock_offset;
1005 if (p->se.sleep_start) 996 if (p->se.sleep_start)
1006 p->se.sleep_start -= clock_offset; 997 p->se.sleep_start -= clock_offset;
1007 if (p->se.block_start) 998 if (p->se.block_start)
1008 p->se.block_start -= clock_offset; 999 p->se.block_start -= clock_offset;
1009 if (p->se.sleep_start_fair) 1000#endif
1010 p->se.sleep_start_fair -= fair_clock_offset;
1011 1001
1012 __set_task_cpu(p, new_cpu); 1002 __set_task_cpu(p, new_cpu);
1013} 1003}
@@ -1568,17 +1558,19 @@ int fastcall wake_up_state(struct task_struct *p, unsigned int state)
1568static void __sched_fork(struct task_struct *p) 1558static void __sched_fork(struct task_struct *p)
1569{ 1559{
1570 p->se.wait_start_fair = 0; 1560 p->se.wait_start_fair = 0;
1571 p->se.wait_start = 0;
1572 p->se.exec_start = 0; 1561 p->se.exec_start = 0;
1573 p->se.sum_exec_runtime = 0; 1562 p->se.sum_exec_runtime = 0;
1574 p->se.delta_exec = 0; 1563 p->se.delta_exec = 0;
1575 p->se.delta_fair_run = 0; 1564 p->se.delta_fair_run = 0;
1576 p->se.delta_fair_sleep = 0; 1565 p->se.delta_fair_sleep = 0;
1577 p->se.wait_runtime = 0; 1566 p->se.wait_runtime = 0;
1567 p->se.sleep_start_fair = 0;
1568
1569#ifdef CONFIG_SCHEDSTATS
1570 p->se.wait_start = 0;
1578 p->se.sum_wait_runtime = 0; 1571 p->se.sum_wait_runtime = 0;
1579 p->se.sum_sleep_runtime = 0; 1572 p->se.sum_sleep_runtime = 0;
1580 p->se.sleep_start = 0; 1573 p->se.sleep_start = 0;
1581 p->se.sleep_start_fair = 0;
1582 p->se.block_start = 0; 1574 p->se.block_start = 0;
1583 p->se.sleep_max = 0; 1575 p->se.sleep_max = 0;
1584 p->se.block_max = 0; 1576 p->se.block_max = 0;
@@ -1586,6 +1578,7 @@ static void __sched_fork(struct task_struct *p)
1586 p->se.wait_max = 0; 1578 p->se.wait_max = 0;
1587 p->se.wait_runtime_overruns = 0; 1579 p->se.wait_runtime_overruns = 0;
1588 p->se.wait_runtime_underruns = 0; 1580 p->se.wait_runtime_underruns = 0;
1581#endif
1589 1582
1590 INIT_LIST_HEAD(&p->run_list); 1583 INIT_LIST_HEAD(&p->run_list);
1591 p->se.on_rq = 0; 1584 p->se.on_rq = 0;
@@ -1654,22 +1647,27 @@ void fastcall wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
1654 unsigned long flags; 1647 unsigned long flags;
1655 struct rq *rq; 1648 struct rq *rq;
1656 int this_cpu; 1649 int this_cpu;
1650 u64 now;
1657 1651
1658 rq = task_rq_lock(p, &flags); 1652 rq = task_rq_lock(p, &flags);
1659 BUG_ON(p->state != TASK_RUNNING); 1653 BUG_ON(p->state != TASK_RUNNING);
1660 this_cpu = smp_processor_id(); /* parent's CPU */ 1654 this_cpu = smp_processor_id(); /* parent's CPU */
1655 now = rq_clock(rq);
1661 1656
1662 p->prio = effective_prio(p); 1657 p->prio = effective_prio(p);
1663 1658
1664 if (!sysctl_sched_child_runs_first || (clone_flags & CLONE_VM) || 1659 if (!p->sched_class->task_new || !sysctl_sched_child_runs_first ||
1665 task_cpu(p) != this_cpu || !current->se.on_rq) { 1660 (clone_flags & CLONE_VM) || task_cpu(p) != this_cpu ||
1661 !current->se.on_rq) {
1662
1666 activate_task(rq, p, 0); 1663 activate_task(rq, p, 0);
1667 } else { 1664 } else {
1668 /* 1665 /*
1669 * Let the scheduling class do new task startup 1666 * Let the scheduling class do new task startup
1670 * management (if any): 1667 * management (if any):
1671 */ 1668 */
1672 p->sched_class->task_new(rq, p); 1669 p->sched_class->task_new(rq, p, now);
1670 inc_nr_running(p, rq, now);
1673 } 1671 }
1674 check_preempt_curr(rq, p); 1672 check_preempt_curr(rq, p);
1675 task_rq_unlock(rq, &flags); 1673 task_rq_unlock(rq, &flags);
@@ -2908,8 +2906,7 @@ static void active_load_balance(struct rq *busiest_rq, int busiest_cpu)
2908 schedstat_inc(sd, alb_cnt); 2906 schedstat_inc(sd, alb_cnt);
2909 2907
2910 if (move_tasks(target_rq, target_cpu, busiest_rq, 1, 2908 if (move_tasks(target_rq, target_cpu, busiest_rq, 1,
2911 RTPRIO_TO_LOAD_WEIGHT(100), sd, CPU_IDLE, 2909 ULONG_MAX, sd, CPU_IDLE, NULL))
2912 NULL))
2913 schedstat_inc(sd, alb_pushed); 2910 schedstat_inc(sd, alb_pushed);
2914 else 2911 else
2915 schedstat_inc(sd, alb_failed); 2912 schedstat_inc(sd, alb_failed);
@@ -5269,8 +5266,6 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd)
5269 sizeof(int), 0644, proc_dointvec_minmax); 5266 sizeof(int), 0644, proc_dointvec_minmax);
5270 set_table_entry(&table[8], 9, "imbalance_pct", &sd->imbalance_pct, 5267 set_table_entry(&table[8], 9, "imbalance_pct", &sd->imbalance_pct,
5271 sizeof(int), 0644, proc_dointvec_minmax); 5268 sizeof(int), 0644, proc_dointvec_minmax);
5272 set_table_entry(&table[9], 10, "cache_hot_time", &sd->cache_hot_time,
5273 sizeof(long long), 0644, proc_doulongvec_minmax);
5274 set_table_entry(&table[10], 11, "cache_nice_tries", 5269 set_table_entry(&table[10], 11, "cache_nice_tries",
5275 &sd->cache_nice_tries, 5270 &sd->cache_nice_tries,
5276 sizeof(int), 0644, proc_dointvec_minmax); 5271 sizeof(int), 0644, proc_dointvec_minmax);
@@ -6590,12 +6585,14 @@ void normalize_rt_tasks(void)
6590 do_each_thread(g, p) { 6585 do_each_thread(g, p) {
6591 p->se.fair_key = 0; 6586 p->se.fair_key = 0;
6592 p->se.wait_runtime = 0; 6587 p->se.wait_runtime = 0;
6588 p->se.exec_start = 0;
6593 p->se.wait_start_fair = 0; 6589 p->se.wait_start_fair = 0;
6590 p->se.sleep_start_fair = 0;
6591#ifdef CONFIG_SCHEDSTATS
6594 p->se.wait_start = 0; 6592 p->se.wait_start = 0;
6595 p->se.exec_start = 0;
6596 p->se.sleep_start = 0; 6593 p->se.sleep_start = 0;
6597 p->se.sleep_start_fair = 0;
6598 p->se.block_start = 0; 6594 p->se.block_start = 0;
6595#endif
6599 task_rq(p)->cfs.fair_clock = 0; 6596 task_rq(p)->cfs.fair_clock = 0;
6600 task_rq(p)->clock = 0; 6597 task_rq(p)->clock = 0;
6601 6598
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
index 0eca442b7792..1c61e5315ad2 100644
--- a/kernel/sched_debug.c
+++ b/kernel/sched_debug.c
@@ -44,11 +44,16 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p, u64 now)
44 (long long)p->se.wait_runtime, 44 (long long)p->se.wait_runtime,
45 (long long)(p->nvcsw + p->nivcsw), 45 (long long)(p->nvcsw + p->nivcsw),
46 p->prio, 46 p->prio,
47#ifdef CONFIG_SCHEDSTATS
47 (long long)p->se.sum_exec_runtime, 48 (long long)p->se.sum_exec_runtime,
48 (long long)p->se.sum_wait_runtime, 49 (long long)p->se.sum_wait_runtime,
49 (long long)p->se.sum_sleep_runtime, 50 (long long)p->se.sum_sleep_runtime,
50 (long long)p->se.wait_runtime_overruns, 51 (long long)p->se.wait_runtime_overruns,
51 (long long)p->se.wait_runtime_underruns); 52 (long long)p->se.wait_runtime_underruns
53#else
54 0LL, 0LL, 0LL, 0LL, 0LL
55#endif
56 );
52} 57}
53 58
54static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu, u64 now) 59static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu, u64 now)
@@ -171,7 +176,7 @@ static int sched_debug_show(struct seq_file *m, void *v)
171 u64 now = ktime_to_ns(ktime_get()); 176 u64 now = ktime_to_ns(ktime_get());
172 int cpu; 177 int cpu;
173 178
174 SEQ_printf(m, "Sched Debug Version: v0.05, %s %.*s\n", 179 SEQ_printf(m, "Sched Debug Version: v0.05-v20, %s %.*s\n",
175 init_utsname()->release, 180 init_utsname()->release,
176 (int)strcspn(init_utsname()->version, " "), 181 (int)strcspn(init_utsname()->version, " "),
177 init_utsname()->version); 182 init_utsname()->version);
@@ -235,21 +240,24 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
235#define P(F) \ 240#define P(F) \
236 SEQ_printf(m, "%-25s:%20Ld\n", #F, (long long)p->F) 241 SEQ_printf(m, "%-25s:%20Ld\n", #F, (long long)p->F)
237 242
238 P(se.wait_start); 243 P(se.wait_runtime);
239 P(se.wait_start_fair); 244 P(se.wait_start_fair);
240 P(se.exec_start); 245 P(se.exec_start);
241 P(se.sleep_start);
242 P(se.sleep_start_fair); 246 P(se.sleep_start_fair);
247 P(se.sum_exec_runtime);
248
249#ifdef CONFIG_SCHEDSTATS
250 P(se.wait_start);
251 P(se.sleep_start);
243 P(se.block_start); 252 P(se.block_start);
244 P(se.sleep_max); 253 P(se.sleep_max);
245 P(se.block_max); 254 P(se.block_max);
246 P(se.exec_max); 255 P(se.exec_max);
247 P(se.wait_max); 256 P(se.wait_max);
248 P(se.wait_runtime);
249 P(se.wait_runtime_overruns); 257 P(se.wait_runtime_overruns);
250 P(se.wait_runtime_underruns); 258 P(se.wait_runtime_underruns);
251 P(se.sum_wait_runtime); 259 P(se.sum_wait_runtime);
252 P(se.sum_exec_runtime); 260#endif
253 SEQ_printf(m, "%-25s:%20Ld\n", 261 SEQ_printf(m, "%-25s:%20Ld\n",
254 "nr_switches", (long long)(p->nvcsw + p->nivcsw)); 262 "nr_switches", (long long)(p->nvcsw + p->nivcsw));
255 P(se.load.weight); 263 P(se.load.weight);
@@ -269,7 +277,9 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
269 277
270void proc_sched_set_task(struct task_struct *p) 278void proc_sched_set_task(struct task_struct *p)
271{ 279{
280#ifdef CONFIG_SCHEDSTATS
272 p->se.sleep_max = p->se.block_max = p->se.exec_max = p->se.wait_max = 0; 281 p->se.sleep_max = p->se.block_max = p->se.exec_max = p->se.wait_max = 0;
273 p->se.wait_runtime_overruns = p->se.wait_runtime_underruns = 0; 282 p->se.wait_runtime_overruns = p->se.wait_runtime_underruns = 0;
283#endif
274 p->se.sum_exec_runtime = 0; 284 p->se.sum_exec_runtime = 0;
275} 285}
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 6971db0a7160..6f579ff5a9bc 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -292,10 +292,7 @@ __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr, u64 now)
292 return; 292 return;
293 293
294 delta_exec = curr->delta_exec; 294 delta_exec = curr->delta_exec;
295#ifdef CONFIG_SCHEDSTATS 295 schedstat_set(curr->exec_max, max((u64)delta_exec, curr->exec_max));
296 if (unlikely(delta_exec > curr->exec_max))
297 curr->exec_max = delta_exec;
298#endif
299 296
300 curr->sum_exec_runtime += delta_exec; 297 curr->sum_exec_runtime += delta_exec;
301 cfs_rq->exec_clock += delta_exec; 298 cfs_rq->exec_clock += delta_exec;
@@ -352,7 +349,7 @@ static inline void
352update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se, u64 now) 349update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se, u64 now)
353{ 350{
354 se->wait_start_fair = cfs_rq->fair_clock; 351 se->wait_start_fair = cfs_rq->fair_clock;
355 se->wait_start = now; 352 schedstat_set(se->wait_start, now);
356} 353}
357 354
358/* 355/*
@@ -425,13 +422,7 @@ __update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se, u64 now)
425{ 422{
426 unsigned long delta_fair = se->delta_fair_run; 423 unsigned long delta_fair = se->delta_fair_run;
427 424
428#ifdef CONFIG_SCHEDSTATS 425 schedstat_set(se->wait_max, max(se->wait_max, now - se->wait_start));
429 {
430 s64 delta_wait = now - se->wait_start;
431 if (unlikely(delta_wait > se->wait_max))
432 se->wait_max = delta_wait;
433 }
434#endif
435 426
436 if (unlikely(se->load.weight != NICE_0_LOAD)) 427 if (unlikely(se->load.weight != NICE_0_LOAD))
437 delta_fair = calc_weighted(delta_fair, se->load.weight, 428 delta_fair = calc_weighted(delta_fair, se->load.weight,
@@ -456,7 +447,7 @@ update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se, u64 now)
456 } 447 }
457 448
458 se->wait_start_fair = 0; 449 se->wait_start_fair = 0;
459 se->wait_start = 0; 450 schedstat_set(se->wait_start, 0);
460} 451}
461 452
462static inline void 453static inline void
@@ -1041,11 +1032,10 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr)
1041 * monopolize the CPU. Note: the parent runqueue is locked, 1032 * monopolize the CPU. Note: the parent runqueue is locked,
1042 * the child is not running yet. 1033 * the child is not running yet.
1043 */ 1034 */
1044static void task_new_fair(struct rq *rq, struct task_struct *p) 1035static void task_new_fair(struct rq *rq, struct task_struct *p, u64 now)
1045{ 1036{
1046 struct cfs_rq *cfs_rq = task_cfs_rq(p); 1037 struct cfs_rq *cfs_rq = task_cfs_rq(p);
1047 struct sched_entity *se = &p->se; 1038 struct sched_entity *se = &p->se;
1048 u64 now = rq_clock(rq);
1049 1039
1050 sched_info_queued(p); 1040 sched_info_queued(p);
1051 1041
@@ -1072,7 +1062,6 @@ static void task_new_fair(struct rq *rq, struct task_struct *p)
1072 p->se.wait_runtime = -(sysctl_sched_granularity / 2); 1062 p->se.wait_runtime = -(sysctl_sched_granularity / 2);
1073 1063
1074 __enqueue_entity(cfs_rq, se); 1064 __enqueue_entity(cfs_rq, se);
1075 inc_nr_running(p, rq, now);
1076} 1065}
1077 1066
1078#ifdef CONFIG_FAIR_GROUP_SCHED 1067#ifdef CONFIG_FAIR_GROUP_SCHED
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 1192a2741b99..002fcf8d3f64 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -18,8 +18,8 @@ static inline void update_curr_rt(struct rq *rq, u64 now)
18 delta_exec = now - curr->se.exec_start; 18 delta_exec = now - curr->se.exec_start;
19 if (unlikely((s64)delta_exec < 0)) 19 if (unlikely((s64)delta_exec < 0))
20 delta_exec = 0; 20 delta_exec = 0;
21 if (unlikely(delta_exec > curr->se.exec_max)) 21
22 curr->se.exec_max = delta_exec; 22 schedstat_set(curr->se.exec_max, max(curr->se.exec_max, delta_exec));
23 23
24 curr->se.sum_exec_runtime += delta_exec; 24 curr->se.sum_exec_runtime += delta_exec;
25 curr->se.exec_start = now; 25 curr->se.exec_start = now;
@@ -229,15 +229,6 @@ static void task_tick_rt(struct rq *rq, struct task_struct *p)
229 requeue_task_rt(rq, p); 229 requeue_task_rt(rq, p);
230} 230}
231 231
232/*
233 * No parent/child timeslice management necessary for RT tasks,
234 * just activate them:
235 */
236static void task_new_rt(struct rq *rq, struct task_struct *p)
237{
238 activate_task(rq, p, 1);
239}
240
241static struct sched_class rt_sched_class __read_mostly = { 232static struct sched_class rt_sched_class __read_mostly = {
242 .enqueue_task = enqueue_task_rt, 233 .enqueue_task = enqueue_task_rt,
243 .dequeue_task = dequeue_task_rt, 234 .dequeue_task = dequeue_task_rt,
@@ -251,5 +242,4 @@ static struct sched_class rt_sched_class __read_mostly = {
251 .load_balance = load_balance_rt, 242 .load_balance = load_balance_rt,
252 243
253 .task_tick = task_tick_rt, 244 .task_tick = task_tick_rt,
254 .task_new = task_new_rt,
255}; 245};
diff --git a/kernel/sched_stats.h b/kernel/sched_stats.h
index c63c38f6fa6e..c20a94dda61e 100644
--- a/kernel/sched_stats.h
+++ b/kernel/sched_stats.h
@@ -116,6 +116,7 @@ rq_sched_info_depart(struct rq *rq, unsigned long long delta)
116} 116}
117# define schedstat_inc(rq, field) do { (rq)->field++; } while (0) 117# define schedstat_inc(rq, field) do { (rq)->field++; } while (0)
118# define schedstat_add(rq, field, amt) do { (rq)->field += (amt); } while (0) 118# define schedstat_add(rq, field, amt) do { (rq)->field += (amt); } while (0)
119# define schedstat_set(var, val) do { var = (val); } while (0)
119#else /* !CONFIG_SCHEDSTATS */ 120#else /* !CONFIG_SCHEDSTATS */
120static inline void 121static inline void
121rq_sched_info_arrive(struct rq *rq, unsigned long long delta) 122rq_sched_info_arrive(struct rq *rq, unsigned long long delta)
@@ -125,6 +126,7 @@ rq_sched_info_depart(struct rq *rq, unsigned long long delta)
125{} 126{}
126# define schedstat_inc(rq, field) do { } while (0) 127# define schedstat_inc(rq, field) do { } while (0)
127# define schedstat_add(rq, field, amt) do { } while (0) 128# define schedstat_add(rq, field, amt) do { } while (0)
129# define schedstat_set(var, val) do { } while (0)
128#endif 130#endif
129 131
130#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) 132#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
diff --git a/net/netlabel/netlabel_user.c b/net/netlabel/netlabel_user.c
index 89dcc485653b..85a96a3fddaa 100644
--- a/net/netlabel/netlabel_user.c
+++ b/net/netlabel/netlabel_user.c
@@ -113,8 +113,10 @@ struct audit_buffer *netlbl_audit_start_common(int type,
113 if (audit_info->secid != 0 && 113 if (audit_info->secid != 0 &&
114 security_secid_to_secctx(audit_info->secid, 114 security_secid_to_secctx(audit_info->secid,
115 &secctx, 115 &secctx,
116 &secctx_len) == 0) 116 &secctx_len) == 0) {
117 audit_log_format(audit_buf, " subj=%s", secctx); 117 audit_log_format(audit_buf, " subj=%s", secctx);
118 security_release_secctx(secctx, secctx_len);
119 }
118 120
119 return audit_buf; 121 return audit_buf;
120} 122}
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 95a47304336d..e5a3be03aa0d 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -2195,9 +2195,10 @@ void xfrm_audit_log(uid_t auid, u32 sid, int type, int result,
2195 } 2195 }
2196 2196
2197 if (sid != 0 && 2197 if (sid != 0 &&
2198 security_secid_to_secctx(sid, &secctx, &secctx_len) == 0) 2198 security_secid_to_secctx(sid, &secctx, &secctx_len) == 0) {
2199 audit_log_format(audit_buf, " subj=%s", secctx); 2199 audit_log_format(audit_buf, " subj=%s", secctx);
2200 else 2200 security_release_secctx(secctx, secctx_len);
2201 } else
2201 audit_log_task_context(audit_buf); 2202 audit_log_task_context(audit_buf);
2202 2203
2203 if (xp) { 2204 if (xp) {
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 0fac6829c63a..6237933f7d82 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -4658,8 +4658,7 @@ static int selinux_secid_to_secctx(u32 secid, char **secdata, u32 *seclen)
4658 4658
4659static void selinux_release_secctx(char *secdata, u32 seclen) 4659static void selinux_release_secctx(char *secdata, u32 seclen)
4660{ 4660{
4661 if (secdata) 4661 kfree(secdata);
4662 kfree(secdata);
4663} 4662}
4664 4663
4665#ifdef CONFIG_KEYS 4664#ifdef CONFIG_KEYS
diff --git a/security/selinux/netlabel.c b/security/selinux/netlabel.c
index 051b14c88e2d..d243ddc723a5 100644
--- a/security/selinux/netlabel.c
+++ b/security/selinux/netlabel.c
@@ -162,9 +162,13 @@ int selinux_netlbl_skbuff_getsid(struct sk_buff *skb, u32 base_sid, u32 *sid)
162 162
163 netlbl_secattr_init(&secattr); 163 netlbl_secattr_init(&secattr);
164 rc = netlbl_skbuff_getattr(skb, &secattr); 164 rc = netlbl_skbuff_getattr(skb, &secattr);
165 if (rc == 0 && secattr.flags != NETLBL_SECATTR_NONE) 165 if (rc == 0 && secattr.flags != NETLBL_SECATTR_NONE) {
166 rc = security_netlbl_secattr_to_sid(&secattr, base_sid, sid); 166 rc = security_netlbl_secattr_to_sid(&secattr, base_sid, sid);
167 else 167 if (rc == 0 &&
168 (secattr.flags & NETLBL_SECATTR_CACHEABLE) &&
169 (secattr.flags & NETLBL_SECATTR_CACHE))
170 netlbl_cache_add(skb, &secattr);
171 } else
168 *sid = SECSID_NULL; 172 *sid = SECSID_NULL;
169 netlbl_secattr_destroy(&secattr); 173 netlbl_secattr_destroy(&secattr);
170 174
@@ -307,11 +311,15 @@ int selinux_netlbl_sock_rcv_skb(struct sk_security_struct *sksec,
307 311
308 netlbl_secattr_init(&secattr); 312 netlbl_secattr_init(&secattr);
309 rc = netlbl_skbuff_getattr(skb, &secattr); 313 rc = netlbl_skbuff_getattr(skb, &secattr);
310 if (rc == 0 && secattr.flags != NETLBL_SECATTR_NONE) 314 if (rc == 0 && secattr.flags != NETLBL_SECATTR_NONE) {
311 rc = security_netlbl_secattr_to_sid(&secattr, 315 rc = security_netlbl_secattr_to_sid(&secattr,
312 SECINITSID_NETMSG, 316 SECINITSID_NETMSG,
313 &nlbl_sid); 317 &nlbl_sid);
314 else 318 if (rc == 0 &&
319 (secattr.flags & NETLBL_SECATTR_CACHEABLE) &&
320 (secattr.flags & NETLBL_SECATTR_CACHE))
321 netlbl_cache_add(skb, &secattr);
322 } else
315 nlbl_sid = SECINITSID_UNLABELED; 323 nlbl_sid = SECINITSID_UNLABELED;
316 netlbl_secattr_destroy(&secattr); 324 netlbl_secattr_destroy(&secattr);
317 if (rc != 0) 325 if (rc != 0)