diff options
109 files changed, 854 insertions, 612 deletions
diff --git a/Documentation/Changes b/Documentation/Changes index 73a8617f1861..cb2b141b1c3e 100644 --- a/Documentation/Changes +++ b/Documentation/Changes | |||
@@ -45,6 +45,7 @@ o nfs-utils 1.0.5 # showmount --version | |||
45 | o procps 3.2.0 # ps --version | 45 | o procps 3.2.0 # ps --version |
46 | o oprofile 0.9 # oprofiled --version | 46 | o oprofile 0.9 # oprofiled --version |
47 | o udev 081 # udevinfo -V | 47 | o udev 081 # udevinfo -V |
48 | o grub 0.93 # grub --version | ||
48 | 49 | ||
49 | Kernel compilation | 50 | Kernel compilation |
50 | ================== | 51 | ================== |
diff --git a/MAINTAINERS b/MAINTAINERS index 1e15a0edc313..e65e96a14bec 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -3754,7 +3754,7 @@ L: linux-usb-devel@lists.sourceforge.net | |||
3754 | W: http://www.linux-usb.org/gadget | 3754 | W: http://www.linux-usb.org/gadget |
3755 | S: Maintained | 3755 | S: Maintained |
3756 | 3756 | ||
3757 | USB HID/HIDBP DRIVERS | 3757 | USB HID/HIDBP DRIVERS (USB KEYBOARDS, MICE, REMOTE CONTROLS, ...) |
3758 | P: Jiri Kosina | 3758 | P: Jiri Kosina |
3759 | M: jkosina@suse.cz | 3759 | M: jkosina@suse.cz |
3760 | L: linux-usb-devel@lists.sourceforge.net | 3760 | L: linux-usb-devel@lists.sourceforge.net |
diff --git a/arch/frv/mb93090-mb00/pci-vdk.c b/arch/frv/mb93090-mb00/pci-vdk.c index 0b581e3cf7c7..6d51f133fb23 100644 --- a/arch/frv/mb93090-mb00/pci-vdk.c +++ b/arch/frv/mb93090-mb00/pci-vdk.c | |||
@@ -400,7 +400,8 @@ int __init pcibios_init(void) | |||
400 | __reg_MB86943_pci_sl_mem_base = __region_CS2 + 0x08000000; | 400 | __reg_MB86943_pci_sl_mem_base = __region_CS2 + 0x08000000; |
401 | mb(); | 401 | mb(); |
402 | 402 | ||
403 | *(volatile unsigned long *)(__region_CS2+0x01300014) == 1; | 403 | /* enable PCI arbitration */ |
404 | __reg_MB86943_pci_arbiter = MB86943_PCIARB_EN; | ||
404 | 405 | ||
405 | ioport_resource.start = (__reg_MB86943_sl_pci_io_base << 9) & 0xfffffc00; | 406 | ioport_resource.start = (__reg_MB86943_sl_pci_io_base << 9) & 0xfffffc00; |
406 | ioport_resource.end = (__reg_MB86943_sl_pci_io_range << 9) | 0x3ff; | 407 | ioport_resource.end = (__reg_MB86943_sl_pci_io_range << 9) | 0x3ff; |
diff --git a/arch/i386/boot/edd.c b/arch/i386/boot/edd.c index 77d92daf7923..658834d9f92a 100644 --- a/arch/i386/boot/edd.c +++ b/arch/i386/boot/edd.c | |||
@@ -127,7 +127,7 @@ static int get_edd_info(u8 devno, struct edd_info *ei) | |||
127 | ax = 0x4800; | 127 | ax = 0x4800; |
128 | dx = devno; | 128 | dx = devno; |
129 | asm("pushfl; int $0x13; popfl" | 129 | asm("pushfl; int $0x13; popfl" |
130 | : "+a" (ax), "+d" (dx) | 130 | : "+a" (ax), "+d" (dx), "=m" (ei->params) |
131 | : "S" (&ei->params) | 131 | : "S" (&ei->params) |
132 | : "ebx", "ecx", "edi"); | 132 | : "ebx", "ecx", "edi"); |
133 | 133 | ||
diff --git a/arch/i386/boot/video-vesa.c b/arch/i386/boot/video-vesa.c index e6aa9eb8d93a..f1bc71e948cf 100644 --- a/arch/i386/boot/video-vesa.c +++ b/arch/i386/boot/video-vesa.c | |||
@@ -268,7 +268,7 @@ void vesa_store_edid(void) | |||
268 | dx = 0; /* EDID block number */ | 268 | dx = 0; /* EDID block number */ |
269 | di =(size_t) &boot_params.edid_info; /* (ES:)Pointer to block */ | 269 | di =(size_t) &boot_params.edid_info; /* (ES:)Pointer to block */ |
270 | asm(INT10 | 270 | asm(INT10 |
271 | : "+a" (ax), "+b" (bx), "+d" (dx) | 271 | : "+a" (ax), "+b" (bx), "+d" (dx), "=m" (boot_params.edid_info) |
272 | : "c" (cx), "D" (di) | 272 | : "c" (cx), "D" (di) |
273 | : "esi"); | 273 | : "esi"); |
274 | #endif /* CONFIG_FIRMWARE_EDID */ | 274 | #endif /* CONFIG_FIRMWARE_EDID */ |
diff --git a/arch/ia64/ia32/sys_ia32.c b/arch/ia64/ia32/sys_ia32.c index af10462d44d4..a3405b3c1eef 100644 --- a/arch/ia64/ia32/sys_ia32.c +++ b/arch/ia64/ia32/sys_ia32.c | |||
@@ -34,7 +34,6 @@ | |||
34 | #include <linux/uio.h> | 34 | #include <linux/uio.h> |
35 | #include <linux/nfs_fs.h> | 35 | #include <linux/nfs_fs.h> |
36 | #include <linux/quota.h> | 36 | #include <linux/quota.h> |
37 | #include <linux/syscalls.h> | ||
38 | #include <linux/sunrpc/svc.h> | 37 | #include <linux/sunrpc/svc.h> |
39 | #include <linux/nfsd/nfsd.h> | 38 | #include <linux/nfsd/nfsd.h> |
40 | #include <linux/nfsd/cache.h> | 39 | #include <linux/nfsd/cache.h> |
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c index 91e6dc1e7baf..cfe4654838f4 100644 --- a/arch/ia64/kernel/iosapic.c +++ b/arch/ia64/kernel/iosapic.c | |||
@@ -142,7 +142,7 @@ struct iosapic_rte_info { | |||
142 | static struct iosapic_intr_info { | 142 | static struct iosapic_intr_info { |
143 | struct list_head rtes; /* RTEs using this vector (empty => | 143 | struct list_head rtes; /* RTEs using this vector (empty => |
144 | * not an IOSAPIC interrupt) */ | 144 | * not an IOSAPIC interrupt) */ |
145 | int count; /* # of RTEs that shares this vector */ | 145 | int count; /* # of registered RTEs */ |
146 | u32 low32; /* current value of low word of | 146 | u32 low32; /* current value of low word of |
147 | * Redirection table entry */ | 147 | * Redirection table entry */ |
148 | unsigned int dest; /* destination CPU physical ID */ | 148 | unsigned int dest; /* destination CPU physical ID */ |
@@ -313,7 +313,7 @@ mask_irq (unsigned int irq) | |||
313 | int rte_index; | 313 | int rte_index; |
314 | struct iosapic_rte_info *rte; | 314 | struct iosapic_rte_info *rte; |
315 | 315 | ||
316 | if (list_empty(&iosapic_intr_info[irq].rtes)) | 316 | if (!iosapic_intr_info[irq].count) |
317 | return; /* not an IOSAPIC interrupt! */ | 317 | return; /* not an IOSAPIC interrupt! */ |
318 | 318 | ||
319 | /* set only the mask bit */ | 319 | /* set only the mask bit */ |
@@ -331,7 +331,7 @@ unmask_irq (unsigned int irq) | |||
331 | int rte_index; | 331 | int rte_index; |
332 | struct iosapic_rte_info *rte; | 332 | struct iosapic_rte_info *rte; |
333 | 333 | ||
334 | if (list_empty(&iosapic_intr_info[irq].rtes)) | 334 | if (!iosapic_intr_info[irq].count) |
335 | return; /* not an IOSAPIC interrupt! */ | 335 | return; /* not an IOSAPIC interrupt! */ |
336 | 336 | ||
337 | low32 = iosapic_intr_info[irq].low32 &= ~IOSAPIC_MASK; | 337 | low32 = iosapic_intr_info[irq].low32 &= ~IOSAPIC_MASK; |
@@ -363,7 +363,7 @@ iosapic_set_affinity (unsigned int irq, cpumask_t mask) | |||
363 | 363 | ||
364 | dest = cpu_physical_id(first_cpu(mask)); | 364 | dest = cpu_physical_id(first_cpu(mask)); |
365 | 365 | ||
366 | if (list_empty(&iosapic_intr_info[irq].rtes)) | 366 | if (!iosapic_intr_info[irq].count) |
367 | return; /* not an IOSAPIC interrupt */ | 367 | return; /* not an IOSAPIC interrupt */ |
368 | 368 | ||
369 | set_irq_affinity_info(irq, dest, redir); | 369 | set_irq_affinity_info(irq, dest, redir); |
@@ -542,7 +542,7 @@ iosapic_reassign_vector (int irq) | |||
542 | { | 542 | { |
543 | int new_irq; | 543 | int new_irq; |
544 | 544 | ||
545 | if (!list_empty(&iosapic_intr_info[irq].rtes)) { | 545 | if (iosapic_intr_info[irq].count) { |
546 | new_irq = create_irq(); | 546 | new_irq = create_irq(); |
547 | if (new_irq < 0) | 547 | if (new_irq < 0) |
548 | panic("%s: out of interrupt vectors!\n", __FUNCTION__); | 548 | panic("%s: out of interrupt vectors!\n", __FUNCTION__); |
@@ -560,7 +560,7 @@ iosapic_reassign_vector (int irq) | |||
560 | } | 560 | } |
561 | } | 561 | } |
562 | 562 | ||
563 | static struct iosapic_rte_info *iosapic_alloc_rte (void) | 563 | static struct iosapic_rte_info * __init_refok iosapic_alloc_rte (void) |
564 | { | 564 | { |
565 | int i; | 565 | int i; |
566 | struct iosapic_rte_info *rte; | 566 | struct iosapic_rte_info *rte; |
@@ -677,7 +677,7 @@ get_target_cpu (unsigned int gsi, int irq) | |||
677 | * In case of vector shared by multiple RTEs, all RTEs that | 677 | * In case of vector shared by multiple RTEs, all RTEs that |
678 | * share the vector need to use the same destination CPU. | 678 | * share the vector need to use the same destination CPU. |
679 | */ | 679 | */ |
680 | if (!list_empty(&iosapic_intr_info[irq].rtes)) | 680 | if (iosapic_intr_info[irq].count) |
681 | return iosapic_intr_info[irq].dest; | 681 | return iosapic_intr_info[irq].dest; |
682 | 682 | ||
683 | /* | 683 | /* |
@@ -794,8 +794,9 @@ iosapic_register_intr (unsigned int gsi, | |||
794 | err = register_intr(gsi, irq, IOSAPIC_LOWEST_PRIORITY, | 794 | err = register_intr(gsi, irq, IOSAPIC_LOWEST_PRIORITY, |
795 | polarity, trigger); | 795 | polarity, trigger); |
796 | if (err < 0) { | 796 | if (err < 0) { |
797 | spin_unlock(&irq_desc[irq].lock); | ||
797 | irq = err; | 798 | irq = err; |
798 | goto unlock_all; | 799 | goto unlock_iosapic_lock; |
799 | } | 800 | } |
800 | 801 | ||
801 | /* | 802 | /* |
@@ -811,7 +812,7 @@ iosapic_register_intr (unsigned int gsi, | |||
811 | gsi, (trigger == IOSAPIC_EDGE ? "edge" : "level"), | 812 | gsi, (trigger == IOSAPIC_EDGE ? "edge" : "level"), |
812 | (polarity == IOSAPIC_POL_HIGH ? "high" : "low"), | 813 | (polarity == IOSAPIC_POL_HIGH ? "high" : "low"), |
813 | cpu_logical_id(dest), dest, irq_to_vector(irq)); | 814 | cpu_logical_id(dest), dest, irq_to_vector(irq)); |
814 | unlock_all: | 815 | |
815 | spin_unlock(&irq_desc[irq].lock); | 816 | spin_unlock(&irq_desc[irq].lock); |
816 | unlock_iosapic_lock: | 817 | unlock_iosapic_lock: |
817 | spin_unlock_irqrestore(&iosapic_lock, flags); | 818 | spin_unlock_irqrestore(&iosapic_lock, flags); |
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c index 9386b955eed1..c47c8acc96e3 100644 --- a/arch/ia64/kernel/irq_ia64.c +++ b/arch/ia64/kernel/irq_ia64.c | |||
@@ -101,15 +101,6 @@ int check_irq_used(int irq) | |||
101 | return -1; | 101 | return -1; |
102 | } | 102 | } |
103 | 103 | ||
104 | static void reserve_irq(unsigned int irq) | ||
105 | { | ||
106 | unsigned long flags; | ||
107 | |||
108 | spin_lock_irqsave(&vector_lock, flags); | ||
109 | irq_status[irq] = IRQ_RSVD; | ||
110 | spin_unlock_irqrestore(&vector_lock, flags); | ||
111 | } | ||
112 | |||
113 | static inline int find_unassigned_irq(void) | 104 | static inline int find_unassigned_irq(void) |
114 | { | 105 | { |
115 | int irq; | 106 | int irq; |
@@ -302,10 +293,14 @@ static cpumask_t vector_allocation_domain(int cpu) | |||
302 | 293 | ||
303 | void destroy_and_reserve_irq(unsigned int irq) | 294 | void destroy_and_reserve_irq(unsigned int irq) |
304 | { | 295 | { |
296 | unsigned long flags; | ||
297 | |||
305 | dynamic_irq_cleanup(irq); | 298 | dynamic_irq_cleanup(irq); |
306 | 299 | ||
307 | clear_irq_vector(irq); | 300 | spin_lock_irqsave(&vector_lock, flags); |
308 | reserve_irq(irq); | 301 | __clear_irq_vector(irq); |
302 | irq_status[irq] = IRQ_RSVD; | ||
303 | spin_unlock_irqrestore(&vector_lock, flags); | ||
309 | } | 304 | } |
310 | 305 | ||
311 | static int __reassign_irq_vector(int irq, int cpu) | 306 | static int __reassign_irq_vector(int irq, int cpu) |
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index 4b5daa3cc0fe..ff28620cb992 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c | |||
@@ -1750,8 +1750,17 @@ format_mca_init_stack(void *mca_data, unsigned long offset, | |||
1750 | strncpy(p->comm, type, sizeof(p->comm)-1); | 1750 | strncpy(p->comm, type, sizeof(p->comm)-1); |
1751 | } | 1751 | } |
1752 | 1752 | ||
1753 | /* Do per-CPU MCA-related initialization. */ | 1753 | /* Caller prevents this from being called after init */ |
1754 | static void * __init_refok mca_bootmem(void) | ||
1755 | { | ||
1756 | void *p; | ||
1754 | 1757 | ||
1758 | p = alloc_bootmem(sizeof(struct ia64_mca_cpu) * NR_CPUS + | ||
1759 | KERNEL_STACK_SIZE); | ||
1760 | return (void *)ALIGN((unsigned long)p, KERNEL_STACK_SIZE); | ||
1761 | } | ||
1762 | |||
1763 | /* Do per-CPU MCA-related initialization. */ | ||
1755 | void __cpuinit | 1764 | void __cpuinit |
1756 | ia64_mca_cpu_init(void *cpu_data) | 1765 | ia64_mca_cpu_init(void *cpu_data) |
1757 | { | 1766 | { |
@@ -1763,11 +1772,7 @@ ia64_mca_cpu_init(void *cpu_data) | |||
1763 | int cpu; | 1772 | int cpu; |
1764 | 1773 | ||
1765 | first_time = 0; | 1774 | first_time = 0; |
1766 | mca_data = alloc_bootmem(sizeof(struct ia64_mca_cpu) | 1775 | mca_data = mca_bootmem(); |
1767 | * NR_CPUS + KERNEL_STACK_SIZE); | ||
1768 | mca_data = (void *)(((unsigned long)mca_data + | ||
1769 | KERNEL_STACK_SIZE - 1) & | ||
1770 | (-KERNEL_STACK_SIZE)); | ||
1771 | for (cpu = 0; cpu < NR_CPUS; cpu++) { | 1776 | for (cpu = 0; cpu < NR_CPUS; cpu++) { |
1772 | format_mca_init_stack(mca_data, | 1777 | format_mca_init_stack(mca_data, |
1773 | offsetof(struct ia64_mca_cpu, mca_stack), | 1778 | offsetof(struct ia64_mca_cpu, mca_stack), |
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c index 7cecd2964200..cd9a37a552c3 100644 --- a/arch/ia64/kernel/setup.c +++ b/arch/ia64/kernel/setup.c | |||
@@ -60,7 +60,6 @@ | |||
60 | #include <asm/smp.h> | 60 | #include <asm/smp.h> |
61 | #include <asm/system.h> | 61 | #include <asm/system.h> |
62 | #include <asm/unistd.h> | 62 | #include <asm/unistd.h> |
63 | #include <asm/system.h> | ||
64 | 63 | ||
65 | #if defined(CONFIG_SMP) && (IA64_CPU_SIZE > PAGE_SIZE) | 64 | #if defined(CONFIG_SMP) && (IA64_CPU_SIZE > PAGE_SIZE) |
66 | # error "struct cpuinfo_ia64 too big!" | 65 | # error "struct cpuinfo_ia64 too big!" |
diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c index 0982882bfb80..4e446aa5f4ac 100644 --- a/arch/ia64/kernel/smp.c +++ b/arch/ia64/kernel/smp.c | |||
@@ -346,7 +346,7 @@ smp_flush_tlb_mm (struct mm_struct *mm) | |||
346 | } | 346 | } |
347 | 347 | ||
348 | /* | 348 | /* |
349 | * Run a function on another CPU | 349 | * Run a function on a specific CPU |
350 | * <func> The function to run. This must be fast and non-blocking. | 350 | * <func> The function to run. This must be fast and non-blocking. |
351 | * <info> An arbitrary pointer to pass to the function. | 351 | * <info> An arbitrary pointer to pass to the function. |
352 | * <nonatomic> Currently unused. | 352 | * <nonatomic> Currently unused. |
@@ -366,9 +366,11 @@ smp_call_function_single (int cpuid, void (*func) (void *info), void *info, int | |||
366 | int me = get_cpu(); /* prevent preemption and reschedule on another processor */ | 366 | int me = get_cpu(); /* prevent preemption and reschedule on another processor */ |
367 | 367 | ||
368 | if (cpuid == me) { | 368 | if (cpuid == me) { |
369 | printk(KERN_INFO "%s: trying to call self\n", __FUNCTION__); | 369 | local_irq_disable(); |
370 | func(info); | ||
371 | local_irq_enable(); | ||
370 | put_cpu(); | 372 | put_cpu(); |
371 | return -EBUSY; | 373 | return 0; |
372 | } | 374 | } |
373 | 375 | ||
374 | data.func = func; | 376 | data.func = func; |
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c index 6c0e9e2e1b82..98cfc90cab1d 100644 --- a/arch/ia64/kernel/time.c +++ b/arch/ia64/kernel/time.c | |||
@@ -240,7 +240,21 @@ ia64_init_itm (void) | |||
240 | if (!nojitter) | 240 | if (!nojitter) |
241 | itc_jitter_data.itc_jitter = 1; | 241 | itc_jitter_data.itc_jitter = 1; |
242 | #endif | 242 | #endif |
243 | } | 243 | } else |
244 | /* | ||
245 | * ITC is drifty and we have not synchronized the ITCs in smpboot.c. | ||
246 | * ITC values may fluctuate significantly between processors. | ||
247 | * Clock should not be used for hrtimers. Mark itc as only | ||
248 | * useful for boot and testing. | ||
249 | * | ||
250 | * Note that jitter compensation is off! There is no point of | ||
251 | * synchronizing ITCs since they may be large differentials | ||
252 | * that change over time. | ||
253 | * | ||
254 | * The only way to fix this would be to repeatedly sync the | ||
255 | * ITCs. Until that time we have to avoid ITC. | ||
256 | */ | ||
257 | clocksource_itc.rating = 50; | ||
244 | 258 | ||
245 | /* Setup the CPU local timer tick */ | 259 | /* Setup the CPU local timer tick */ |
246 | ia64_cpu_local_tick(); | 260 | ia64_cpu_local_tick(); |
diff --git a/arch/ia64/sn/kernel/io_common.c b/arch/ia64/sn/kernel/io_common.c index 787ed642dd49..4594770e685a 100644 --- a/arch/ia64/sn/kernel/io_common.c +++ b/arch/ia64/sn/kernel/io_common.c | |||
@@ -391,7 +391,7 @@ void sn_bus_free_sysdata(void) | |||
391 | * hubdev_init_node() - Creates the HUB data structure and link them to it's | 391 | * hubdev_init_node() - Creates the HUB data structure and link them to it's |
392 | * own NODE specific data area. | 392 | * own NODE specific data area. |
393 | */ | 393 | */ |
394 | void hubdev_init_node(nodepda_t * npda, cnodeid_t node) | 394 | void __init hubdev_init_node(nodepda_t * npda, cnodeid_t node) |
395 | { | 395 | { |
396 | struct hubdev_info *hubdev_info; | 396 | struct hubdev_info *hubdev_info; |
397 | int size; | 397 | int size; |
diff --git a/arch/ia64/sn/kernel/setup.c b/arch/ia64/sn/kernel/setup.c index 684b1c984a44..1f38a3a68390 100644 --- a/arch/ia64/sn/kernel/setup.c +++ b/arch/ia64/sn/kernel/setup.c | |||
@@ -25,7 +25,6 @@ | |||
25 | #include <linux/interrupt.h> | 25 | #include <linux/interrupt.h> |
26 | #include <linux/acpi.h> | 26 | #include <linux/acpi.h> |
27 | #include <linux/compiler.h> | 27 | #include <linux/compiler.h> |
28 | #include <linux/sched.h> | ||
29 | #include <linux/root_dev.h> | 28 | #include <linux/root_dev.h> |
30 | #include <linux/nodemask.h> | 29 | #include <linux/nodemask.h> |
31 | #include <linux/pm.h> | 30 | #include <linux/pm.h> |
diff --git a/arch/ia64/sn/kernel/sn2/timer.c b/arch/ia64/sn/kernel/sn2/timer.c index 19e25d2b64fc..cf67fc562054 100644 --- a/arch/ia64/sn/kernel/sn2/timer.c +++ b/arch/ia64/sn/kernel/sn2/timer.c | |||
@@ -23,16 +23,14 @@ | |||
23 | 23 | ||
24 | extern unsigned long sn_rtc_cycles_per_second; | 24 | extern unsigned long sn_rtc_cycles_per_second; |
25 | 25 | ||
26 | static void __iomem *sn2_mc; | ||
27 | |||
28 | static cycle_t read_sn2(void) | 26 | static cycle_t read_sn2(void) |
29 | { | 27 | { |
30 | return (cycle_t)readq(sn2_mc); | 28 | return (cycle_t)readq(RTC_COUNTER_ADDR); |
31 | } | 29 | } |
32 | 30 | ||
33 | static struct clocksource clocksource_sn2 = { | 31 | static struct clocksource clocksource_sn2 = { |
34 | .name = "sn2_rtc", | 32 | .name = "sn2_rtc", |
35 | .rating = 300, | 33 | .rating = 450, |
36 | .read = read_sn2, | 34 | .read = read_sn2, |
37 | .mask = (1LL << 55) - 1, | 35 | .mask = (1LL << 55) - 1, |
38 | .mult = 0, | 36 | .mult = 0, |
@@ -58,7 +56,6 @@ ia64_sn_udelay (unsigned long usecs) | |||
58 | 56 | ||
59 | void __init sn_timer_init(void) | 57 | void __init sn_timer_init(void) |
60 | { | 58 | { |
61 | sn2_mc = RTC_COUNTER_ADDR; | ||
62 | clocksource_sn2.fsys_mmio = RTC_COUNTER_ADDR; | 59 | clocksource_sn2.fsys_mmio = RTC_COUNTER_ADDR; |
63 | clocksource_sn2.mult = clocksource_hz2mult(sn_rtc_cycles_per_second, | 60 | clocksource_sn2.mult = clocksource_hz2mult(sn_rtc_cycles_per_second, |
64 | clocksource_sn2.shift); | 61 | clocksource_sn2.shift); |
diff --git a/arch/sparc/kernel/entry.S b/arch/sparc/kernel/entry.S index eac38388f5fd..88d2cefd01be 100644 --- a/arch/sparc/kernel/entry.S +++ b/arch/sparc/kernel/entry.S | |||
@@ -1,7 +1,6 @@ | |||
1 | /* $Id: entry.S,v 1.170 2001/11/13 00:57:05 davem Exp $ | 1 | /* arch/sparc/kernel/entry.S: Sparc trap low-level entry points. |
2 | * arch/sparc/kernel/entry.S: Sparc trap low-level entry points. | ||
3 | * | 2 | * |
4 | * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) | 3 | * Copyright (C) 1995, 2007 David S. Miller (davem@davemloft.net) |
5 | * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be) | 4 | * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be) |
6 | * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx) | 5 | * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx) |
7 | * Copyright (C) 1996-1999 Jakub Jelinek (jj@sunsite.mff.cuni.cz) | 6 | * Copyright (C) 1996-1999 Jakub Jelinek (jj@sunsite.mff.cuni.cz) |
@@ -129,7 +128,7 @@ trap_low: | |||
129 | RESTORE_ALL | 128 | RESTORE_ALL |
130 | #endif | 129 | #endif |
131 | 130 | ||
132 | #ifdef CONFIG_BLK_DEV_FD | 131 | #if defined(CONFIG_BLK_DEV_FD) || defined(CONFIG_BLK_DEV_FD_MODULE) |
133 | .text | 132 | .text |
134 | .align 4 | 133 | .align 4 |
135 | .globl floppy_hardint | 134 | .globl floppy_hardint |
diff --git a/arch/sparc/kernel/irq.c b/arch/sparc/kernel/irq.c index 75b2240ad0f9..b76dc03fc318 100644 --- a/arch/sparc/kernel/irq.c +++ b/arch/sparc/kernel/irq.c | |||
@@ -351,34 +351,14 @@ void handler_irq(int irq, struct pt_regs * regs) | |||
351 | set_irq_regs(old_regs); | 351 | set_irq_regs(old_regs); |
352 | } | 352 | } |
353 | 353 | ||
354 | #ifdef CONFIG_BLK_DEV_FD | 354 | #if defined(CONFIG_BLK_DEV_FD) || defined(CONFIG_BLK_DEV_FD_MODULE) |
355 | extern void floppy_interrupt(int irq, void *dev_id); | ||
356 | |||
357 | void sparc_floppy_irq(int irq, void *dev_id, struct pt_regs *regs) | ||
358 | { | ||
359 | struct pt_regs *old_regs; | ||
360 | int cpu = smp_processor_id(); | ||
361 | |||
362 | old_regs = set_irq_regs(regs); | ||
363 | disable_pil_irq(irq); | ||
364 | irq_enter(); | ||
365 | kstat_cpu(cpu).irqs[irq]++; | ||
366 | floppy_interrupt(irq, dev_id); | ||
367 | irq_exit(); | ||
368 | enable_pil_irq(irq); | ||
369 | set_irq_regs(old_regs); | ||
370 | // XXX Eek, it's totally changed with preempt_count() and such | ||
371 | // if (softirq_pending(cpu)) | ||
372 | // do_softirq(); | ||
373 | } | ||
374 | #endif | ||
375 | 355 | ||
376 | /* Fast IRQs on the Sparc can only have one routine attached to them, | 356 | /* Fast IRQs on the Sparc can only have one routine attached to them, |
377 | * thus no sharing possible. | 357 | * thus no sharing possible. |
378 | */ | 358 | */ |
379 | int request_fast_irq(unsigned int irq, | 359 | static int request_fast_irq(unsigned int irq, |
380 | irq_handler_t handler, | 360 | void (*handler)(void), |
381 | unsigned long irqflags, const char *devname) | 361 | unsigned long irqflags, const char *devname) |
382 | { | 362 | { |
383 | struct irqaction *action; | 363 | struct irqaction *action; |
384 | unsigned long flags; | 364 | unsigned long flags; |
@@ -457,7 +437,6 @@ int request_fast_irq(unsigned int irq, | |||
457 | */ | 437 | */ |
458 | flush_cache_all(); | 438 | flush_cache_all(); |
459 | 439 | ||
460 | action->handler = handler; | ||
461 | action->flags = irqflags; | 440 | action->flags = irqflags; |
462 | cpus_clear(action->mask); | 441 | cpus_clear(action->mask); |
463 | action->name = devname; | 442 | action->name = devname; |
@@ -475,6 +454,61 @@ out: | |||
475 | return ret; | 454 | return ret; |
476 | } | 455 | } |
477 | 456 | ||
457 | /* These variables are used to access state from the assembler | ||
458 | * interrupt handler, floppy_hardint, so we cannot put these in | ||
459 | * the floppy driver image because that would not work in the | ||
460 | * modular case. | ||
461 | */ | ||
462 | volatile unsigned char *fdc_status; | ||
463 | EXPORT_SYMBOL(fdc_status); | ||
464 | |||
465 | char *pdma_vaddr; | ||
466 | EXPORT_SYMBOL(pdma_vaddr); | ||
467 | |||
468 | unsigned long pdma_size; | ||
469 | EXPORT_SYMBOL(pdma_size); | ||
470 | |||
471 | volatile int doing_pdma; | ||
472 | EXPORT_SYMBOL(doing_pdma); | ||
473 | |||
474 | char *pdma_base; | ||
475 | EXPORT_SYMBOL(pdma_base); | ||
476 | |||
477 | unsigned long pdma_areasize; | ||
478 | EXPORT_SYMBOL(pdma_areasize); | ||
479 | |||
480 | extern void floppy_hardint(void); | ||
481 | |||
482 | static irqreturn_t (*floppy_irq_handler)(int irq, void *dev_id); | ||
483 | |||
484 | void sparc_floppy_irq(int irq, void *dev_id, struct pt_regs *regs) | ||
485 | { | ||
486 | struct pt_regs *old_regs; | ||
487 | int cpu = smp_processor_id(); | ||
488 | |||
489 | old_regs = set_irq_regs(regs); | ||
490 | disable_pil_irq(irq); | ||
491 | irq_enter(); | ||
492 | kstat_cpu(cpu).irqs[irq]++; | ||
493 | floppy_irq_handler(irq, dev_id); | ||
494 | irq_exit(); | ||
495 | enable_pil_irq(irq); | ||
496 | set_irq_regs(old_regs); | ||
497 | // XXX Eek, it's totally changed with preempt_count() and such | ||
498 | // if (softirq_pending(cpu)) | ||
499 | // do_softirq(); | ||
500 | } | ||
501 | |||
502 | int sparc_floppy_request_irq(int irq, unsigned long flags, | ||
503 | irqreturn_t (*irq_handler)(int irq, void *)) | ||
504 | { | ||
505 | floppy_irq_handler = irq_handler; | ||
506 | return request_fast_irq(irq, floppy_hardint, flags, "floppy"); | ||
507 | } | ||
508 | EXPORT_SYMBOL(sparc_floppy_request_irq); | ||
509 | |||
510 | #endif | ||
511 | |||
478 | int request_irq(unsigned int irq, | 512 | int request_irq(unsigned int irq, |
479 | irq_handler_t handler, | 513 | irq_handler_t handler, |
480 | unsigned long irqflags, const char * devname, void *dev_id) | 514 | unsigned long irqflags, const char * devname, void *dev_id) |
diff --git a/arch/sparc/kernel/sparc_ksyms.c b/arch/sparc/kernel/sparc_ksyms.c index 7b4abde43028..ef647acc479e 100644 --- a/arch/sparc/kernel/sparc_ksyms.c +++ b/arch/sparc/kernel/sparc_ksyms.c | |||
@@ -143,7 +143,6 @@ EXPORT_SYMBOL(mstk48t02_regs); | |||
143 | EXPORT_SYMBOL(set_auxio); | 143 | EXPORT_SYMBOL(set_auxio); |
144 | EXPORT_SYMBOL(get_auxio); | 144 | EXPORT_SYMBOL(get_auxio); |
145 | #endif | 145 | #endif |
146 | EXPORT_SYMBOL(request_fast_irq); | ||
147 | EXPORT_SYMBOL(io_remap_pfn_range); | 146 | EXPORT_SYMBOL(io_remap_pfn_range); |
148 | /* P3: iounit_xxx may be needed, sun4d users */ | 147 | /* P3: iounit_xxx may be needed, sun4d users */ |
149 | /* EXPORT_SYMBOL(iounit_map_dma_init); */ | 148 | /* EXPORT_SYMBOL(iounit_map_dma_init); */ |
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c index ad070861bb53..a78832ea81fa 100644 --- a/drivers/ata/ata_piix.c +++ b/drivers/ata/ata_piix.c | |||
@@ -890,37 +890,46 @@ static void ich_set_dmamode (struct ata_port *ap, struct ata_device *adev) | |||
890 | } | 890 | } |
891 | 891 | ||
892 | #ifdef CONFIG_PM | 892 | #ifdef CONFIG_PM |
893 | static struct dmi_system_id piix_broken_suspend_dmi_table[] = { | 893 | static int piix_broken_suspend(void) |
894 | { | 894 | { |
895 | .ident = "TECRA M5", | 895 | static struct dmi_system_id sysids[] = { |
896 | .matches = { | 896 | { |
897 | DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), | 897 | .ident = "TECRA M5", |
898 | DMI_MATCH(DMI_PRODUCT_NAME, "TECRA M5"), | 898 | .matches = { |
899 | }, | 899 | DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), |
900 | }, | 900 | DMI_MATCH(DMI_PRODUCT_NAME, "TECRA M5"), |
901 | { | 901 | }, |
902 | .ident = "Satellite U200", | ||
903 | .matches = { | ||
904 | DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), | ||
905 | DMI_MATCH(DMI_PRODUCT_NAME, "Satellite U200"), | ||
906 | }, | 902 | }, |
907 | }, | 903 | { |
908 | { | 904 | .ident = "Satellite U205", |
909 | .ident = "Satellite U205", | 905 | .matches = { |
910 | .matches = { | 906 | DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), |
911 | DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), | 907 | DMI_MATCH(DMI_PRODUCT_NAME, "Satellite U205"), |
912 | DMI_MATCH(DMI_PRODUCT_NAME, "Satellite U205"), | 908 | }, |
913 | }, | 909 | }, |
914 | }, | 910 | { |
915 | { | 911 | .ident = "Portege M500", |
916 | .ident = "Portege M500", | 912 | .matches = { |
917 | .matches = { | 913 | DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), |
918 | DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), | 914 | DMI_MATCH(DMI_PRODUCT_NAME, "PORTEGE M500"), |
919 | DMI_MATCH(DMI_PRODUCT_NAME, "PORTEGE M500"), | 915 | }, |
920 | }, | 916 | }, |
921 | }, | 917 | { } |
922 | { } | 918 | }; |
923 | }; | 919 | static const char *oemstrs[] = { |
920 | "Tecra M3,", | ||
921 | }; | ||
922 | int i; | ||
923 | |||
924 | if (dmi_check_system(sysids)) | ||
925 | return 1; | ||
926 | |||
927 | for (i = 0; i < ARRAY_SIZE(oemstrs); i++) | ||
928 | if (dmi_find_device(DMI_DEV_TYPE_OEM_STRING, oemstrs[i], NULL)) | ||
929 | return 1; | ||
930 | |||
931 | return 0; | ||
932 | } | ||
924 | 933 | ||
925 | static int piix_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg) | 934 | static int piix_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg) |
926 | { | 935 | { |
@@ -937,8 +946,7 @@ static int piix_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg) | |||
937 | * cycles and power trying to do something to the sleeping | 946 | * cycles and power trying to do something to the sleeping |
938 | * beauty. | 947 | * beauty. |
939 | */ | 948 | */ |
940 | if (dmi_check_system(piix_broken_suspend_dmi_table) && | 949 | if (piix_broken_suspend() && mesg.event == PM_EVENT_SUSPEND) { |
941 | mesg.event == PM_EVENT_SUSPEND) { | ||
942 | pci_save_state(pdev); | 950 | pci_save_state(pdev); |
943 | 951 | ||
944 | /* mark its power state as "unknown", since we don't | 952 | /* mark its power state as "unknown", since we don't |
@@ -973,10 +981,10 @@ static int piix_pci_device_resume(struct pci_dev *pdev) | |||
973 | pci_restore_state(pdev); | 981 | pci_restore_state(pdev); |
974 | 982 | ||
975 | /* PCI device wasn't disabled during suspend. Use | 983 | /* PCI device wasn't disabled during suspend. Use |
976 | * __pci_reenable_device() to avoid affecting the | 984 | * pci_reenable_device() to avoid affecting the enable |
977 | * enable count. | 985 | * count. |
978 | */ | 986 | */ |
979 | rc = __pci_reenable_device(pdev); | 987 | rc = pci_reenable_device(pdev); |
980 | if (rc) | 988 | if (rc) |
981 | dev_printk(KERN_ERR, &pdev->dev, "failed to enable " | 989 | dev_printk(KERN_ERR, &pdev->dev, "failed to enable " |
982 | "device after resume (%d)\n", rc); | 990 | "device after resume (%d)\n", rc); |
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 6001aae0b884..60e78bef469f 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c | |||
@@ -3788,6 +3788,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { | |||
3788 | { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, }, | 3788 | { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, }, |
3789 | { "FUJITSU MHV2080BH", "00840028", ATA_HORKAGE_NONCQ, }, | 3789 | { "FUJITSU MHV2080BH", "00840028", ATA_HORKAGE_NONCQ, }, |
3790 | { "ST9160821AS", "3.CLF", ATA_HORKAGE_NONCQ, }, | 3790 | { "ST9160821AS", "3.CLF", ATA_HORKAGE_NONCQ, }, |
3791 | { "SAMSUNG HD401LJ", "ZZ100-15", ATA_HORKAGE_NONCQ, }, | ||
3791 | 3792 | ||
3792 | /* Devices with NCQ limits */ | 3793 | /* Devices with NCQ limits */ |
3793 | 3794 | ||
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c index 6c289c7b1322..1cce2198baaf 100644 --- a/drivers/ata/libata-sff.c +++ b/drivers/ata/libata-sff.c | |||
@@ -573,6 +573,10 @@ int ata_pci_init_bmdma(struct ata_host *host) | |||
573 | struct pci_dev *pdev = to_pci_dev(gdev); | 573 | struct pci_dev *pdev = to_pci_dev(gdev); |
574 | int i, rc; | 574 | int i, rc; |
575 | 575 | ||
576 | /* No BAR4 allocation: No DMA */ | ||
577 | if (pci_resource_start(pdev, 4) == 0) | ||
578 | return 0; | ||
579 | |||
576 | /* TODO: If we get no DMA mask we should fall back to PIO */ | 580 | /* TODO: If we get no DMA mask we should fall back to PIO */ |
577 | rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); | 581 | rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); |
578 | if (rc) | 582 | if (rc) |
diff --git a/drivers/ata/pata_cmd64x.c b/drivers/ata/pata_cmd64x.c index dc443e7dc37c..e34b632487d7 100644 --- a/drivers/ata/pata_cmd64x.c +++ b/drivers/ata/pata_cmd64x.c | |||
@@ -31,7 +31,7 @@ | |||
31 | #include <linux/libata.h> | 31 | #include <linux/libata.h> |
32 | 32 | ||
33 | #define DRV_NAME "pata_cmd64x" | 33 | #define DRV_NAME "pata_cmd64x" |
34 | #define DRV_VERSION "0.2.3" | 34 | #define DRV_VERSION "0.2.4" |
35 | 35 | ||
36 | /* | 36 | /* |
37 | * CMD64x specific registers definition. | 37 | * CMD64x specific registers definition. |
@@ -397,7 +397,7 @@ static int cmd64x_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
397 | .flags = ATA_FLAG_SLAVE_POSS, | 397 | .flags = ATA_FLAG_SLAVE_POSS, |
398 | .pio_mask = 0x1f, | 398 | .pio_mask = 0x1f, |
399 | .mwdma_mask = 0x07, | 399 | .mwdma_mask = 0x07, |
400 | .udma_mask = ATA_UDMA1, | 400 | .udma_mask = ATA_UDMA2, |
401 | .port_ops = &cmd64x_port_ops | 401 | .port_ops = &cmd64x_port_ops |
402 | }, | 402 | }, |
403 | { /* CMD 646 rev 1 */ | 403 | { /* CMD 646 rev 1 */ |
@@ -412,7 +412,7 @@ static int cmd64x_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
412 | .flags = ATA_FLAG_SLAVE_POSS, | 412 | .flags = ATA_FLAG_SLAVE_POSS, |
413 | .pio_mask = 0x1f, | 413 | .pio_mask = 0x1f, |
414 | .mwdma_mask = 0x07, | 414 | .mwdma_mask = 0x07, |
415 | .udma_mask = ATA_UDMA2, | 415 | .udma_mask = ATA_UDMA4, |
416 | .port_ops = &cmd648_port_ops | 416 | .port_ops = &cmd648_port_ops |
417 | }, | 417 | }, |
418 | { /* CMD 649 */ | 418 | { /* CMD 649 */ |
@@ -420,7 +420,7 @@ static int cmd64x_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
420 | .flags = ATA_FLAG_SLAVE_POSS, | 420 | .flags = ATA_FLAG_SLAVE_POSS, |
421 | .pio_mask = 0x1f, | 421 | .pio_mask = 0x1f, |
422 | .mwdma_mask = 0x07, | 422 | .mwdma_mask = 0x07, |
423 | .udma_mask = ATA_UDMA3, | 423 | .udma_mask = ATA_UDMA5, |
424 | .port_ops = &cmd648_port_ops | 424 | .port_ops = &cmd648_port_ops |
425 | } | 425 | } |
426 | }; | 426 | }; |
diff --git a/drivers/ata/pata_sis.c b/drivers/ata/pata_sis.c index 9a829a7cbc60..66bd0e83ac07 100644 --- a/drivers/ata/pata_sis.c +++ b/drivers/ata/pata_sis.c | |||
@@ -2,6 +2,7 @@ | |||
2 | * pata_sis.c - SiS ATA driver | 2 | * pata_sis.c - SiS ATA driver |
3 | * | 3 | * |
4 | * (C) 2005 Red Hat <alan@redhat.com> | 4 | * (C) 2005 Red Hat <alan@redhat.com> |
5 | * (C) 2007 Bartlomiej Zolnierkiewicz | ||
5 | * | 6 | * |
6 | * Based upon linux/drivers/ide/pci/sis5513.c | 7 | * Based upon linux/drivers/ide/pci/sis5513.c |
7 | * Copyright (C) 1999-2000 Andre Hedrick <andre@linux-ide.org> | 8 | * Copyright (C) 1999-2000 Andre Hedrick <andre@linux-ide.org> |
@@ -35,7 +36,7 @@ | |||
35 | #include "sis.h" | 36 | #include "sis.h" |
36 | 37 | ||
37 | #define DRV_NAME "pata_sis" | 38 | #define DRV_NAME "pata_sis" |
38 | #define DRV_VERSION "0.5.1" | 39 | #define DRV_VERSION "0.5.2" |
39 | 40 | ||
40 | struct sis_chipset { | 41 | struct sis_chipset { |
41 | u16 device; /* PCI host ID */ | 42 | u16 device; /* PCI host ID */ |
@@ -237,7 +238,7 @@ static void sis_old_set_piomode (struct ata_port *ap, struct ata_device *adev) | |||
237 | } | 238 | } |
238 | 239 | ||
239 | /** | 240 | /** |
240 | * sis_100_set_pioode - Initialize host controller PATA PIO timings | 241 | * sis_100_set_piomode - Initialize host controller PATA PIO timings |
241 | * @ap: Port whose timings we are configuring | 242 | * @ap: Port whose timings we are configuring |
242 | * @adev: Device we are configuring for. | 243 | * @adev: Device we are configuring for. |
243 | * | 244 | * |
@@ -262,7 +263,7 @@ static void sis_100_set_piomode (struct ata_port *ap, struct ata_device *adev) | |||
262 | } | 263 | } |
263 | 264 | ||
264 | /** | 265 | /** |
265 | * sis_133_set_pioode - Initialize host controller PATA PIO timings | 266 | * sis_133_set_piomode - Initialize host controller PATA PIO timings |
266 | * @ap: Port whose timings we are configuring | 267 | * @ap: Port whose timings we are configuring |
267 | * @adev: Device we are configuring for. | 268 | * @adev: Device we are configuring for. |
268 | * | 269 | * |
@@ -334,7 +335,7 @@ static void sis_old_set_dmamode (struct ata_port *ap, struct ata_device *adev) | |||
334 | int drive_pci = sis_old_port_base(adev); | 335 | int drive_pci = sis_old_port_base(adev); |
335 | u16 timing; | 336 | u16 timing; |
336 | 337 | ||
337 | const u16 mwdma_bits[] = { 0x707, 0x202, 0x202 }; | 338 | const u16 mwdma_bits[] = { 0x008, 0x302, 0x301 }; |
338 | const u16 udma_bits[] = { 0xE000, 0xC000, 0xA000 }; | 339 | const u16 udma_bits[] = { 0xE000, 0xC000, 0xA000 }; |
339 | 340 | ||
340 | pci_read_config_word(pdev, drive_pci, &timing); | 341 | pci_read_config_word(pdev, drive_pci, &timing); |
@@ -342,15 +343,15 @@ static void sis_old_set_dmamode (struct ata_port *ap, struct ata_device *adev) | |||
342 | if (adev->dma_mode < XFER_UDMA_0) { | 343 | if (adev->dma_mode < XFER_UDMA_0) { |
343 | /* bits 3-0 hold recovery timing bits 8-10 active timing and | 344 | /* bits 3-0 hold recovery timing bits 8-10 active timing and |
344 | the higer bits are dependant on the device */ | 345 | the higer bits are dependant on the device */ |
345 | timing &= ~ 0x870F; | 346 | timing &= ~0x870F; |
346 | timing |= mwdma_bits[speed]; | 347 | timing |= mwdma_bits[speed]; |
347 | pci_write_config_word(pdev, drive_pci, timing); | ||
348 | } else { | 348 | } else { |
349 | /* Bit 15 is UDMA on/off, bit 13-14 are cycle time */ | 349 | /* Bit 15 is UDMA on/off, bit 13-14 are cycle time */ |
350 | speed = adev->dma_mode - XFER_UDMA_0; | 350 | speed = adev->dma_mode - XFER_UDMA_0; |
351 | timing &= ~0x6000; | 351 | timing &= ~0x6000; |
352 | timing |= udma_bits[speed]; | 352 | timing |= udma_bits[speed]; |
353 | } | 353 | } |
354 | pci_write_config_word(pdev, drive_pci, timing); | ||
354 | } | 355 | } |
355 | 356 | ||
356 | /** | 357 | /** |
@@ -373,7 +374,7 @@ static void sis_66_set_dmamode (struct ata_port *ap, struct ata_device *adev) | |||
373 | int drive_pci = sis_old_port_base(adev); | 374 | int drive_pci = sis_old_port_base(adev); |
374 | u16 timing; | 375 | u16 timing; |
375 | 376 | ||
376 | const u16 mwdma_bits[] = { 0x707, 0x202, 0x202 }; | 377 | const u16 mwdma_bits[] = { 0x008, 0x302, 0x301 }; |
377 | const u16 udma_bits[] = { 0xF000, 0xD000, 0xB000, 0xA000, 0x9000}; | 378 | const u16 udma_bits[] = { 0xF000, 0xD000, 0xB000, 0xA000, 0x9000}; |
378 | 379 | ||
379 | pci_read_config_word(pdev, drive_pci, &timing); | 380 | pci_read_config_word(pdev, drive_pci, &timing); |
@@ -432,8 +433,7 @@ static void sis_100_set_dmamode (struct ata_port *ap, struct ata_device *adev) | |||
432 | * @adev: Device to program | 433 | * @adev: Device to program |
433 | * | 434 | * |
434 | * Set UDMA/MWDMA mode for device, in host controller PCI config space. | 435 | * Set UDMA/MWDMA mode for device, in host controller PCI config space. |
435 | * Handles early SiS 961 bridges. Supports MWDMA as well unlike | 436 | * Handles early SiS 961 bridges. |
436 | * the old ide/pci driver. | ||
437 | * | 437 | * |
438 | * LOCKING: | 438 | * LOCKING: |
439 | * None (inherited from caller). | 439 | * None (inherited from caller). |
@@ -467,8 +467,6 @@ static void sis_133_early_set_dmamode (struct ata_port *ap, struct ata_device *a | |||
467 | * @adev: Device to program | 467 | * @adev: Device to program |
468 | * | 468 | * |
469 | * Set UDMA/MWDMA mode for device, in host controller PCI config space. | 469 | * Set UDMA/MWDMA mode for device, in host controller PCI config space. |
470 | * Handles early SiS 961 bridges. Supports MWDMA as well unlike | ||
471 | * the old ide/pci driver. | ||
472 | * | 470 | * |
473 | * LOCKING: | 471 | * LOCKING: |
474 | * None (inherited from caller). | 472 | * None (inherited from caller). |
diff --git a/drivers/firewire/fw-ohci.c b/drivers/firewire/fw-ohci.c index db703758db98..7e427b4c74b5 100644 --- a/drivers/firewire/fw-ohci.c +++ b/drivers/firewire/fw-ohci.c | |||
@@ -907,6 +907,8 @@ static void bus_reset_tasklet(unsigned long data) | |||
907 | int self_id_count, i, j, reg; | 907 | int self_id_count, i, j, reg; |
908 | int generation, new_generation; | 908 | int generation, new_generation; |
909 | unsigned long flags; | 909 | unsigned long flags; |
910 | void *free_rom = NULL; | ||
911 | dma_addr_t free_rom_bus = 0; | ||
910 | 912 | ||
911 | reg = reg_read(ohci, OHCI1394_NodeID); | 913 | reg = reg_read(ohci, OHCI1394_NodeID); |
912 | if (!(reg & OHCI1394_NodeID_idValid)) { | 914 | if (!(reg & OHCI1394_NodeID_idValid)) { |
@@ -970,8 +972,8 @@ static void bus_reset_tasklet(unsigned long data) | |||
970 | */ | 972 | */ |
971 | 973 | ||
972 | if (ohci->next_config_rom != NULL) { | 974 | if (ohci->next_config_rom != NULL) { |
973 | dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, | 975 | free_rom = ohci->config_rom; |
974 | ohci->config_rom, ohci->config_rom_bus); | 976 | free_rom_bus = ohci->config_rom_bus; |
975 | ohci->config_rom = ohci->next_config_rom; | 977 | ohci->config_rom = ohci->next_config_rom; |
976 | ohci->config_rom_bus = ohci->next_config_rom_bus; | 978 | ohci->config_rom_bus = ohci->next_config_rom_bus; |
977 | ohci->next_config_rom = NULL; | 979 | ohci->next_config_rom = NULL; |
@@ -990,6 +992,10 @@ static void bus_reset_tasklet(unsigned long data) | |||
990 | 992 | ||
991 | spin_unlock_irqrestore(&ohci->lock, flags); | 993 | spin_unlock_irqrestore(&ohci->lock, flags); |
992 | 994 | ||
995 | if (free_rom) | ||
996 | dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, | ||
997 | free_rom, free_rom_bus); | ||
998 | |||
993 | fw_core_handle_bus_reset(&ohci->card, ohci->node_id, generation, | 999 | fw_core_handle_bus_reset(&ohci->card, ohci->node_id, generation, |
994 | self_id_count, ohci->self_id_buffer); | 1000 | self_id_count, ohci->self_id_buffer); |
995 | } | 1001 | } |
@@ -1186,7 +1192,7 @@ ohci_set_config_rom(struct fw_card *card, u32 *config_rom, size_t length) | |||
1186 | { | 1192 | { |
1187 | struct fw_ohci *ohci; | 1193 | struct fw_ohci *ohci; |
1188 | unsigned long flags; | 1194 | unsigned long flags; |
1189 | int retval = 0; | 1195 | int retval = -EBUSY; |
1190 | __be32 *next_config_rom; | 1196 | __be32 *next_config_rom; |
1191 | dma_addr_t next_config_rom_bus; | 1197 | dma_addr_t next_config_rom_bus; |
1192 | 1198 | ||
@@ -1240,10 +1246,7 @@ ohci_set_config_rom(struct fw_card *card, u32 *config_rom, size_t length) | |||
1240 | 1246 | ||
1241 | reg_write(ohci, OHCI1394_ConfigROMmap, | 1247 | reg_write(ohci, OHCI1394_ConfigROMmap, |
1242 | ohci->next_config_rom_bus); | 1248 | ohci->next_config_rom_bus); |
1243 | } else { | 1249 | retval = 0; |
1244 | dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, | ||
1245 | next_config_rom, next_config_rom_bus); | ||
1246 | retval = -EBUSY; | ||
1247 | } | 1250 | } |
1248 | 1251 | ||
1249 | spin_unlock_irqrestore(&ohci->lock, flags); | 1252 | spin_unlock_irqrestore(&ohci->lock, flags); |
@@ -1257,6 +1260,9 @@ ohci_set_config_rom(struct fw_card *card, u32 *config_rom, size_t length) | |||
1257 | */ | 1260 | */ |
1258 | if (retval == 0) | 1261 | if (retval == 0) |
1259 | fw_core_initiate_bus_reset(&ohci->card, 1); | 1262 | fw_core_initiate_bus_reset(&ohci->card, 1); |
1263 | else | ||
1264 | dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, | ||
1265 | next_config_rom, next_config_rom_bus); | ||
1260 | 1266 | ||
1261 | return retval; | 1267 | return retval; |
1262 | } | 1268 | } |
diff --git a/drivers/firewire/fw-sbp2.c b/drivers/firewire/fw-sbp2.c index 3e4a369d0057..ba816ef6def1 100644 --- a/drivers/firewire/fw-sbp2.c +++ b/drivers/firewire/fw-sbp2.c | |||
@@ -984,6 +984,7 @@ static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done) | |||
984 | struct fw_unit *unit = sd->unit; | 984 | struct fw_unit *unit = sd->unit; |
985 | struct fw_device *device = fw_device(unit->device.parent); | 985 | struct fw_device *device = fw_device(unit->device.parent); |
986 | struct sbp2_command_orb *orb; | 986 | struct sbp2_command_orb *orb; |
987 | unsigned max_payload; | ||
987 | 988 | ||
988 | /* | 989 | /* |
989 | * Bidirectional commands are not yet implemented, and unknown | 990 | * Bidirectional commands are not yet implemented, and unknown |
@@ -1017,8 +1018,10 @@ static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done) | |||
1017 | * specifies the max payload size as 2 ^ (max_payload + 2), so | 1018 | * specifies the max payload size as 2 ^ (max_payload + 2), so |
1018 | * if we set this to max_speed + 7, we get the right value. | 1019 | * if we set this to max_speed + 7, we get the right value. |
1019 | */ | 1020 | */ |
1021 | max_payload = min(device->max_speed + 7, | ||
1022 | device->card->max_receive - 1); | ||
1020 | orb->request.misc = | 1023 | orb->request.misc = |
1021 | COMMAND_ORB_MAX_PAYLOAD(device->max_speed + 7) | | 1024 | COMMAND_ORB_MAX_PAYLOAD(max_payload) | |
1022 | COMMAND_ORB_SPEED(device->max_speed) | | 1025 | COMMAND_ORB_SPEED(device->max_speed) | |
1023 | COMMAND_ORB_NOTIFY; | 1026 | COMMAND_ORB_NOTIFY; |
1024 | 1027 | ||
diff --git a/drivers/firewire/fw-transaction.c b/drivers/firewire/fw-transaction.c index 3ce8e2fbe15f..3e1cb12e43cd 100644 --- a/drivers/firewire/fw-transaction.c +++ b/drivers/firewire/fw-transaction.c | |||
@@ -734,7 +734,7 @@ fw_core_handle_response(struct fw_card *card, struct fw_packet *p) | |||
734 | } | 734 | } |
735 | EXPORT_SYMBOL(fw_core_handle_response); | 735 | EXPORT_SYMBOL(fw_core_handle_response); |
736 | 736 | ||
737 | const struct fw_address_region topology_map_region = | 737 | static const struct fw_address_region topology_map_region = |
738 | { .start = 0xfffff0001000ull, .end = 0xfffff0001400ull, }; | 738 | { .start = 0xfffff0001000ull, .end = 0xfffff0001400ull, }; |
739 | 739 | ||
740 | static void | 740 | static void |
@@ -772,7 +772,7 @@ static struct fw_address_handler topology_map = { | |||
772 | .address_callback = handle_topology_map, | 772 | .address_callback = handle_topology_map, |
773 | }; | 773 | }; |
774 | 774 | ||
775 | const struct fw_address_region registers_region = | 775 | static const struct fw_address_region registers_region = |
776 | { .start = 0xfffff0000000ull, .end = 0xfffff0000400ull, }; | 776 | { .start = 0xfffff0000000ull, .end = 0xfffff0000400ull, }; |
777 | 777 | ||
778 | static void | 778 | static void |
diff --git a/drivers/firewire/fw-transaction.h b/drivers/firewire/fw-transaction.h index 5ceaccd10564..fa7967b57408 100644 --- a/drivers/firewire/fw-transaction.h +++ b/drivers/firewire/fw-transaction.h | |||
@@ -231,7 +231,7 @@ struct fw_card { | |||
231 | unsigned long reset_jiffies; | 231 | unsigned long reset_jiffies; |
232 | 232 | ||
233 | unsigned long long guid; | 233 | unsigned long long guid; |
234 | int max_receive; | 234 | unsigned max_receive; |
235 | int link_speed; | 235 | int link_speed; |
236 | int config_rom_generation; | 236 | int config_rom_generation; |
237 | 237 | ||
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c index b2baeaeba9be..0a1f2b52a12f 100644 --- a/drivers/hid/usbhid/hid-core.c +++ b/drivers/hid/usbhid/hid-core.c | |||
@@ -743,7 +743,7 @@ static struct hid_device *usb_hid_configure(struct usb_interface *intf) | |||
743 | hid->quirks = quirks; | 743 | hid->quirks = quirks; |
744 | 744 | ||
745 | if (!(usbhid = kzalloc(sizeof(struct usbhid_device), GFP_KERNEL))) | 745 | if (!(usbhid = kzalloc(sizeof(struct usbhid_device), GFP_KERNEL))) |
746 | goto fail; | 746 | goto fail_no_usbhid; |
747 | 747 | ||
748 | hid->driver_data = usbhid; | 748 | hid->driver_data = usbhid; |
749 | usbhid->hid = hid; | 749 | usbhid->hid = hid; |
@@ -878,6 +878,8 @@ fail: | |||
878 | usb_free_urb(usbhid->urbout); | 878 | usb_free_urb(usbhid->urbout); |
879 | usb_free_urb(usbhid->urbctrl); | 879 | usb_free_urb(usbhid->urbctrl); |
880 | hid_free_buffers(dev, hid); | 880 | hid_free_buffers(dev, hid); |
881 | kfree(usbhid); | ||
882 | fail_no_usbhid: | ||
881 | hid_free_device(hid); | 883 | hid_free_device(hid); |
882 | 884 | ||
883 | return NULL; | 885 | return NULL; |
@@ -913,6 +915,7 @@ static void hid_disconnect(struct usb_interface *intf) | |||
913 | usb_free_urb(usbhid->urbout); | 915 | usb_free_urb(usbhid->urbout); |
914 | 916 | ||
915 | hid_free_buffers(hid_to_usb_dev(hid), hid); | 917 | hid_free_buffers(hid_to_usb_dev(hid), hid); |
918 | kfree(usbhid); | ||
916 | hid_free_device(hid); | 919 | hid_free_device(hid); |
917 | } | 920 | } |
918 | 921 | ||
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c index 775b9f3b8ce3..6b21a214f419 100644 --- a/drivers/hid/usbhid/hid-quirks.c +++ b/drivers/hid/usbhid/hid-quirks.c | |||
@@ -61,7 +61,9 @@ | |||
61 | #define USB_DEVICE_ID_APPLE_GEYSER4_JIS 0x021c | 61 | #define USB_DEVICE_ID_APPLE_GEYSER4_JIS 0x021c |
62 | #define USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY 0x030a | 62 | #define USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY 0x030a |
63 | #define USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY 0x030b | 63 | #define USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY 0x030b |
64 | #define USB_DEVICE_ID_APPLE_IR 0x8240 | 64 | |
65 | #define USB_VENDOR_ID_ASUS 0x0b05 | ||
66 | #define USB_DEVICE_ID_ASUS_LCM 0x1726 | ||
65 | 67 | ||
66 | #define USB_VENDOR_ID_ATEN 0x0557 | 68 | #define USB_VENDOR_ID_ATEN 0x0557 |
67 | #define USB_DEVICE_ID_ATEN_UC100KM 0x2004 | 69 | #define USB_DEVICE_ID_ATEN_UC100KM 0x2004 |
@@ -198,6 +200,70 @@ | |||
198 | 200 | ||
199 | #define USB_VENDOR_ID_LOGITECH 0x046d | 201 | #define USB_VENDOR_ID_LOGITECH 0x046d |
200 | #define USB_DEVICE_ID_LOGITECH_RECEIVER 0xc101 | 202 | #define USB_DEVICE_ID_LOGITECH_RECEIVER 0xc101 |
203 | #define USB_DEVICE_ID_LOGITECH_HARMONY 0xc110 | ||
204 | #define USB_DEVICE_ID_LOGITECH_HARMONY_2 0xc111 | ||
205 | #define USB_DEVICE_ID_LOGITECH_HARMONY_3 0xc112 | ||
206 | #define USB_DEVICE_ID_LOGITECH_HARMONY_4 0xc113 | ||
207 | #define USB_DEVICE_ID_LOGITECH_HARMONY_5 0xc114 | ||
208 | #define USB_DEVICE_ID_LOGITECH_HARMONY_6 0xc115 | ||
209 | #define USB_DEVICE_ID_LOGITECH_HARMONY_7 0xc116 | ||
210 | #define USB_DEVICE_ID_LOGITECH_HARMONY_8 0xc117 | ||
211 | #define USB_DEVICE_ID_LOGITECH_HARMONY_9 0xc118 | ||
212 | #define USB_DEVICE_ID_LOGITECH_HARMONY_10 0xc119 | ||
213 | #define USB_DEVICE_ID_LOGITECH_HARMONY_11 0xc11a | ||
214 | #define USB_DEVICE_ID_LOGITECH_HARMONY_12 0xc11b | ||
215 | #define USB_DEVICE_ID_LOGITECH_HARMONY_13 0xc11c | ||
216 | #define USB_DEVICE_ID_LOGITECH_HARMONY_14 0xc11d | ||
217 | #define USB_DEVICE_ID_LOGITECH_HARMONY_15 0xc11e | ||
218 | #define USB_DEVICE_ID_LOGITECH_HARMONY_16 0xc11f | ||
219 | #define USB_DEVICE_ID_LOGITECH_HARMONY_17 0xc120 | ||
220 | #define USB_DEVICE_ID_LOGITECH_HARMONY_18 0xc121 | ||
221 | #define USB_DEVICE_ID_LOGITECH_HARMONY_19 0xc122 | ||
222 | #define USB_DEVICE_ID_LOGITECH_HARMONY_20 0xc123 | ||
223 | #define USB_DEVICE_ID_LOGITECH_HARMONY_21 0xc124 | ||
224 | #define USB_DEVICE_ID_LOGITECH_HARMONY_22 0xc125 | ||
225 | #define USB_DEVICE_ID_LOGITECH_HARMONY_23 0xc126 | ||
226 | #define USB_DEVICE_ID_LOGITECH_HARMONY_24 0xc127 | ||
227 | #define USB_DEVICE_ID_LOGITECH_HARMONY_25 0xc128 | ||
228 | #define USB_DEVICE_ID_LOGITECH_HARMONY_26 0xc129 | ||
229 | #define USB_DEVICE_ID_LOGITECH_HARMONY_27 0xc12a | ||
230 | #define USB_DEVICE_ID_LOGITECH_HARMONY_28 0xc12b | ||
231 | #define USB_DEVICE_ID_LOGITECH_HARMONY_29 0xc12c | ||
232 | #define USB_DEVICE_ID_LOGITECH_HARMONY_30 0xc12d | ||
233 | #define USB_DEVICE_ID_LOGITECH_HARMONY_31 0xc12e | ||
234 | #define USB_DEVICE_ID_LOGITECH_HARMONY_32 0xc12f | ||
235 | #define USB_DEVICE_ID_LOGITECH_HARMONY_33 0xc130 | ||
236 | #define USB_DEVICE_ID_LOGITECH_HARMONY_34 0xc131 | ||
237 | #define USB_DEVICE_ID_LOGITECH_HARMONY_35 0xc132 | ||
238 | #define USB_DEVICE_ID_LOGITECH_HARMONY_36 0xc133 | ||
239 | #define USB_DEVICE_ID_LOGITECH_HARMONY_37 0xc134 | ||
240 | #define USB_DEVICE_ID_LOGITECH_HARMONY_38 0xc135 | ||
241 | #define USB_DEVICE_ID_LOGITECH_HARMONY_39 0xc136 | ||
242 | #define USB_DEVICE_ID_LOGITECH_HARMONY_40 0xc137 | ||
243 | #define USB_DEVICE_ID_LOGITECH_HARMONY_41 0xc138 | ||
244 | #define USB_DEVICE_ID_LOGITECH_HARMONY_42 0xc139 | ||
245 | #define USB_DEVICE_ID_LOGITECH_HARMONY_43 0xc13a | ||
246 | #define USB_DEVICE_ID_LOGITECH_HARMONY_44 0xc13b | ||
247 | #define USB_DEVICE_ID_LOGITECH_HARMONY_45 0xc13c | ||
248 | #define USB_DEVICE_ID_LOGITECH_HARMONY_46 0xc13d | ||
249 | #define USB_DEVICE_ID_LOGITECH_HARMONY_47 0xc13e | ||
250 | #define USB_DEVICE_ID_LOGITECH_HARMONY_48 0xc13f | ||
251 | #define USB_DEVICE_ID_LOGITECH_HARMONY_49 0xc140 | ||
252 | #define USB_DEVICE_ID_LOGITECH_HARMONY_50 0xc141 | ||
253 | #define USB_DEVICE_ID_LOGITECH_HARMONY_51 0xc142 | ||
254 | #define USB_DEVICE_ID_LOGITECH_HARMONY_52 0xc143 | ||
255 | #define USB_DEVICE_ID_LOGITECH_HARMONY_53 0xc144 | ||
256 | #define USB_DEVICE_ID_LOGITECH_HARMONY_54 0xc145 | ||
257 | #define USB_DEVICE_ID_LOGITECH_HARMONY_55 0xc146 | ||
258 | #define USB_DEVICE_ID_LOGITECH_HARMONY_56 0xc147 | ||
259 | #define USB_DEVICE_ID_LOGITECH_HARMONY_57 0xc148 | ||
260 | #define USB_DEVICE_ID_LOGITECH_HARMONY_58 0xc149 | ||
261 | #define USB_DEVICE_ID_LOGITECH_HARMONY_59 0xc14a | ||
262 | #define USB_DEVICE_ID_LOGITECH_HARMONY_60 0xc14b | ||
263 | #define USB_DEVICE_ID_LOGITECH_HARMONY_61 0xc14c | ||
264 | #define USB_DEVICE_ID_LOGITECH_HARMONY_62 0xc14d | ||
265 | #define USB_DEVICE_ID_LOGITECH_HARMONY_63 0xc14e | ||
266 | #define USB_DEVICE_ID_LOGITECH_HARMONY_64 0xc14f | ||
201 | #define USB_DEVICE_ID_LOGITECH_WHEEL 0xc294 | 267 | #define USB_DEVICE_ID_LOGITECH_WHEEL 0xc294 |
202 | #define USB_DEVICE_ID_LOGITECH_KBD 0xc311 | 268 | #define USB_DEVICE_ID_LOGITECH_KBD 0xc311 |
203 | #define USB_DEVICE_ID_S510_RECEIVER 0xc50c | 269 | #define USB_DEVICE_ID_S510_RECEIVER 0xc50c |
@@ -221,6 +287,9 @@ | |||
221 | #define USB_DEVICE_ID_NCR_FIRST 0x0300 | 287 | #define USB_DEVICE_ID_NCR_FIRST 0x0300 |
222 | #define USB_DEVICE_ID_NCR_LAST 0x03ff | 288 | #define USB_DEVICE_ID_NCR_LAST 0x03ff |
223 | 289 | ||
290 | #define USB_VENDOR_ID_NATIONAL_SEMICONDUCTOR 0x0400 | ||
291 | #define USB_DEVICE_ID_N_S_HARMONY 0xc359 | ||
292 | |||
224 | #define USB_VENDOR_ID_NEC 0x073e | 293 | #define USB_VENDOR_ID_NEC 0x073e |
225 | #define USB_DEVICE_ID_NEC_USB_GAME_PAD 0x0301 | 294 | #define USB_DEVICE_ID_NEC_USB_GAME_PAD 0x0301 |
226 | 295 | ||
@@ -315,7 +384,7 @@ static const struct hid_blacklist { | |||
315 | { USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_24, HID_QUIRK_IGNORE }, | 384 | { USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_24, HID_QUIRK_IGNORE }, |
316 | { USB_VENDOR_ID_AIRCABLE, USB_DEVICE_ID_AIRCABLE1, HID_QUIRK_IGNORE }, | 385 | { USB_VENDOR_ID_AIRCABLE, USB_DEVICE_ID_AIRCABLE1, HID_QUIRK_IGNORE }, |
317 | { USB_VENDOR_ID_ALCOR, USB_DEVICE_ID_ALCOR_USBRS232, HID_QUIRK_IGNORE }, | 386 | { USB_VENDOR_ID_ALCOR, USB_DEVICE_ID_ALCOR_USBRS232, HID_QUIRK_IGNORE }, |
318 | { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IR, HID_QUIRK_IGNORE }, | 387 | { USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUS_LCM, HID_QUIRK_IGNORE}, |
319 | { USB_VENDOR_ID_BERKSHIRE, USB_DEVICE_ID_BERKSHIRE_PCWD, HID_QUIRK_IGNORE }, | 388 | { USB_VENDOR_ID_BERKSHIRE, USB_DEVICE_ID_BERKSHIRE_PCWD, HID_QUIRK_IGNORE }, |
320 | { USB_VENDOR_ID_CIDC, 0x0103, HID_QUIRK_IGNORE }, | 389 | { USB_VENDOR_ID_CIDC, 0x0103, HID_QUIRK_IGNORE }, |
321 | { USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_HIDCOM, HID_QUIRK_IGNORE }, | 390 | { USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_HIDCOM, HID_QUIRK_IGNORE }, |
@@ -463,6 +532,71 @@ static const struct hid_blacklist { | |||
463 | 532 | ||
464 | { USB_VENDOR_ID_DELL, USB_DEVICE_ID_DELL_W7658, HID_QUIRK_RESET_LEDS }, | 533 | { USB_VENDOR_ID_DELL, USB_DEVICE_ID_DELL_W7658, HID_QUIRK_RESET_LEDS }, |
465 | { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_KBD, HID_QUIRK_RESET_LEDS }, | 534 | { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_KBD, HID_QUIRK_RESET_LEDS }, |
535 | { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY, HID_QUIRK_IGNORE }, | ||
536 | { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_2, HID_QUIRK_IGNORE }, | ||
537 | { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_3, HID_QUIRK_IGNORE }, | ||
538 | { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_4, HID_QUIRK_IGNORE }, | ||
539 | { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_5, HID_QUIRK_IGNORE }, | ||
540 | { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_6, HID_QUIRK_IGNORE }, | ||
541 | { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_7, HID_QUIRK_IGNORE }, | ||
542 | { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_8, HID_QUIRK_IGNORE }, | ||
543 | { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_9, HID_QUIRK_IGNORE }, | ||
544 | { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_10, HID_QUIRK_IGNORE }, | ||
545 | { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_11, HID_QUIRK_IGNORE }, | ||
546 | { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_12, HID_QUIRK_IGNORE }, | ||
547 | { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_13, HID_QUIRK_IGNORE }, | ||
548 | { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_14, HID_QUIRK_IGNORE }, | ||
549 | { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_15, HID_QUIRK_IGNORE }, | ||
550 | { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_16, HID_QUIRK_IGNORE }, | ||
551 | { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_17, HID_QUIRK_IGNORE }, | ||
552 | { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_18, HID_QUIRK_IGNORE }, | ||
553 | { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_19, HID_QUIRK_IGNORE }, | ||
554 | { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_20, HID_QUIRK_IGNORE }, | ||
555 | { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_21, HID_QUIRK_IGNORE }, | ||
556 | { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_22, HID_QUIRK_IGNORE }, | ||
557 | { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_23, HID_QUIRK_IGNORE }, | ||
558 | { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_24, HID_QUIRK_IGNORE }, | ||
559 | { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_25, HID_QUIRK_IGNORE }, | ||
560 | { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_26, HID_QUIRK_IGNORE }, | ||
561 | { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_27, HID_QUIRK_IGNORE }, | ||
562 | { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_28, HID_QUIRK_IGNORE }, | ||
563 | { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_29, HID_QUIRK_IGNORE }, | ||
564 | { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_30, HID_QUIRK_IGNORE }, | ||
565 | { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_31, HID_QUIRK_IGNORE }, | ||
566 | { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_32, HID_QUIRK_IGNORE }, | ||
567 | { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_33, HID_QUIRK_IGNORE }, | ||
568 | { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_34, HID_QUIRK_IGNORE }, | ||
569 | { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_35, HID_QUIRK_IGNORE }, | ||
570 | { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_36, HID_QUIRK_IGNORE }, | ||
571 | { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_37, HID_QUIRK_IGNORE }, | ||
572 | { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_38, HID_QUIRK_IGNORE }, | ||
573 | { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_39, HID_QUIRK_IGNORE }, | ||
574 | { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_40, HID_QUIRK_IGNORE }, | ||
575 | { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_41, HID_QUIRK_IGNORE }, | ||
576 | { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_42, HID_QUIRK_IGNORE }, | ||
577 | { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_43, HID_QUIRK_IGNORE }, | ||
578 | { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_44, HID_QUIRK_IGNORE }, | ||
579 | { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_45, HID_QUIRK_IGNORE }, | ||
580 | { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_46, HID_QUIRK_IGNORE }, | ||
581 | { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_47, HID_QUIRK_IGNORE }, | ||
582 | { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_48, HID_QUIRK_IGNORE }, | ||
583 | { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_49, HID_QUIRK_IGNORE }, | ||
584 | { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_50, HID_QUIRK_IGNORE }, | ||
585 | { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_51, HID_QUIRK_IGNORE }, | ||
586 | { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_52, HID_QUIRK_IGNORE }, | ||
587 | { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_53, HID_QUIRK_IGNORE }, | ||
588 | { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_54, HID_QUIRK_IGNORE }, | ||
589 | { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_55, HID_QUIRK_IGNORE }, | ||
590 | { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_56, HID_QUIRK_IGNORE }, | ||
591 | { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_57, HID_QUIRK_IGNORE }, | ||
592 | { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_58, HID_QUIRK_IGNORE }, | ||
593 | { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_59, HID_QUIRK_IGNORE }, | ||
594 | { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_60, HID_QUIRK_IGNORE }, | ||
595 | { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_61, HID_QUIRK_IGNORE }, | ||
596 | { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_62, HID_QUIRK_IGNORE }, | ||
597 | { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_63, HID_QUIRK_IGNORE }, | ||
598 | { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_64, HID_QUIRK_IGNORE }, | ||
599 | { USB_VENDOR_ID_NATIONAL_SEMICONDUCTOR, USB_DEVICE_ID_N_S_HARMONY, HID_QUIRK_IGNORE }, | ||
466 | 600 | ||
467 | { 0, 0 } | 601 | { 0, 0 } |
468 | }; | 602 | }; |
diff --git a/drivers/ide/arm/icside.c b/drivers/ide/arm/icside.c index c89b5f4b2d04..8a9b98fcb66d 100644 --- a/drivers/ide/arm/icside.c +++ b/drivers/ide/arm/icside.c | |||
@@ -693,13 +693,12 @@ icside_probe(struct expansion_card *ec, const struct ecard_id *id) | |||
693 | if (ret) | 693 | if (ret) |
694 | goto out; | 694 | goto out; |
695 | 695 | ||
696 | state = kmalloc(sizeof(struct icside_state), GFP_KERNEL); | 696 | state = kzalloc(sizeof(struct icside_state), GFP_KERNEL); |
697 | if (!state) { | 697 | if (!state) { |
698 | ret = -ENOMEM; | 698 | ret = -ENOMEM; |
699 | goto release; | 699 | goto release; |
700 | } | 700 | } |
701 | 701 | ||
702 | memset(state, 0, sizeof(state)); | ||
703 | state->type = ICS_TYPE_NOTYPE; | 702 | state->type = ICS_TYPE_NOTYPE; |
704 | state->dev = &ec->dev; | 703 | state->dev = &ec->dev; |
705 | 704 | ||
diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c index e82bfa5e0ab8..1fa57947bca0 100644 --- a/drivers/ide/ide-tape.c +++ b/drivers/ide/ide-tape.c | |||
@@ -640,7 +640,7 @@ typedef enum { | |||
640 | } idetape_chrdev_direction_t; | 640 | } idetape_chrdev_direction_t; |
641 | 641 | ||
642 | struct idetape_bh { | 642 | struct idetape_bh { |
643 | unsigned short b_size; | 643 | u32 b_size; |
644 | atomic_t b_count; | 644 | atomic_t b_count; |
645 | struct idetape_bh *b_reqnext; | 645 | struct idetape_bh *b_reqnext; |
646 | char *b_data; | 646 | char *b_data; |
diff --git a/drivers/ide/pci/alim15x3.c b/drivers/ide/pci/alim15x3.c index 5511c86733dc..025689de50e9 100644 --- a/drivers/ide/pci/alim15x3.c +++ b/drivers/ide/pci/alim15x3.c | |||
@@ -593,7 +593,7 @@ static struct dmi_system_id cable_dmi_table[] = { | |||
593 | .ident = "HP Pavilion N5430", | 593 | .ident = "HP Pavilion N5430", |
594 | .matches = { | 594 | .matches = { |
595 | DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"), | 595 | DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"), |
596 | DMI_MATCH(DMI_BOARD_NAME, "OmniBook N32N-736"), | 596 | DMI_MATCH(DMI_BOARD_VERSION, "OmniBook N32N-736"), |
597 | }, | 597 | }, |
598 | }, | 598 | }, |
599 | { } | 599 | { } |
diff --git a/drivers/ide/pci/cmd64x.c b/drivers/ide/pci/cmd64x.c index 19633c5aba15..0e3b5de26e69 100644 --- a/drivers/ide/pci/cmd64x.c +++ b/drivers/ide/pci/cmd64x.c | |||
@@ -475,11 +475,11 @@ static unsigned int __devinit init_chipset_cmd64x(struct pci_dev *dev, const cha | |||
475 | switch (rev) { | 475 | switch (rev) { |
476 | case 0x07: | 476 | case 0x07: |
477 | case 0x05: | 477 | case 0x05: |
478 | printk("%s: UltraDMA capable", name); | 478 | printk("%s: UltraDMA capable\n", name); |
479 | break; | 479 | break; |
480 | case 0x03: | 480 | case 0x03: |
481 | default: | 481 | default: |
482 | printk("%s: MultiWord DMA force limited", name); | 482 | printk("%s: MultiWord DMA force limited\n", name); |
483 | break; | 483 | break; |
484 | case 0x01: | 484 | case 0x01: |
485 | printk("%s: MultiWord DMA limited, " | 485 | printk("%s: MultiWord DMA limited, " |
diff --git a/drivers/ide/pci/cs5520.c b/drivers/ide/pci/cs5520.c index bccedf9b8b28..b89e81656875 100644 --- a/drivers/ide/pci/cs5520.c +++ b/drivers/ide/pci/cs5520.c | |||
@@ -133,7 +133,7 @@ static void cs5520_tune_drive(ide_drive_t *drive, u8 pio) | |||
133 | static int cs5520_config_drive_xfer_rate(ide_drive_t *drive) | 133 | static int cs5520_config_drive_xfer_rate(ide_drive_t *drive) |
134 | { | 134 | { |
135 | /* Tune the drive for PIO modes up to PIO 4 */ | 135 | /* Tune the drive for PIO modes up to PIO 4 */ |
136 | cs5520_tune_drive(drive, 4); | 136 | cs5520_tune_drive(drive, 255); |
137 | 137 | ||
138 | /* Then tell the core to use DMA operations */ | 138 | /* Then tell the core to use DMA operations */ |
139 | return 0; | 139 | return 0; |
diff --git a/drivers/ide/pci/cs5535.c b/drivers/ide/pci/cs5535.c index ce44e38390aa..082ca7da2cbc 100644 --- a/drivers/ide/pci/cs5535.c +++ b/drivers/ide/pci/cs5535.c | |||
@@ -2,6 +2,7 @@ | |||
2 | * linux/drivers/ide/pci/cs5535.c | 2 | * linux/drivers/ide/pci/cs5535.c |
3 | * | 3 | * |
4 | * Copyright (C) 2004-2005 Advanced Micro Devices, Inc. | 4 | * Copyright (C) 2004-2005 Advanced Micro Devices, Inc. |
5 | * Copyright (C) 2007 Bartlomiej Zolnierkiewicz | ||
5 | * | 6 | * |
6 | * History: | 7 | * History: |
7 | * 09/20/2005 - Jaya Kumar <jayakumar.ide@gmail.com> | 8 | * 09/20/2005 - Jaya Kumar <jayakumar.ide@gmail.com> |
@@ -83,14 +84,17 @@ static void cs5535_set_speed(ide_drive_t *drive, u8 speed) | |||
83 | 84 | ||
84 | /* Set the PIO timings */ | 85 | /* Set the PIO timings */ |
85 | if ((speed & XFER_MODE) == XFER_PIO) { | 86 | if ((speed & XFER_MODE) == XFER_PIO) { |
86 | u8 pioa; | 87 | ide_drive_t *pair = &drive->hwif->drives[drive->dn ^ 1]; |
87 | u8 piob; | 88 | u8 cmd, pioa; |
88 | u8 cmd; | ||
89 | 89 | ||
90 | pioa = speed - XFER_PIO_0; | 90 | cmd = pioa = speed - XFER_PIO_0; |
91 | piob = ide_get_best_pio_mode(&(drive->hwif->drives[!unit]), | 91 | |
92 | 255, 4); | 92 | if (pair->present) { |
93 | cmd = pioa < piob ? pioa : piob; | 93 | u8 piob = ide_get_best_pio_mode(pair, 255, 4); |
94 | |||
95 | if (piob < cmd) | ||
96 | cmd = piob; | ||
97 | } | ||
94 | 98 | ||
95 | /* Write the speed of the current drive */ | 99 | /* Write the speed of the current drive */ |
96 | reg = (cs5535_pio_cmd_timings[cmd] << 16) | | 100 | reg = (cs5535_pio_cmd_timings[cmd] << 16) | |
@@ -116,7 +120,7 @@ static void cs5535_set_speed(ide_drive_t *drive, u8 speed) | |||
116 | 120 | ||
117 | reg &= 0x80000000UL; /* Preserve the PIO format bit */ | 121 | reg &= 0x80000000UL; /* Preserve the PIO format bit */ |
118 | 122 | ||
119 | if (speed >= XFER_UDMA_0 && speed <= XFER_UDMA_7) | 123 | if (speed >= XFER_UDMA_0 && speed <= XFER_UDMA_4) |
120 | reg |= cs5535_udma_timings[speed - XFER_UDMA_0]; | 124 | reg |= cs5535_udma_timings[speed - XFER_UDMA_0]; |
121 | else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) | 125 | else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) |
122 | reg |= cs5535_mwdma_timings[speed - XFER_MW_DMA_0]; | 126 | reg |= cs5535_mwdma_timings[speed - XFER_MW_DMA_0]; |
@@ -151,32 +155,22 @@ static int cs5535_set_drive(ide_drive_t *drive, u8 speed) | |||
151 | * | 155 | * |
152 | * A callback from the upper layers for PIO-only tuning. | 156 | * A callback from the upper layers for PIO-only tuning. |
153 | */ | 157 | */ |
154 | static void cs5535_tuneproc(ide_drive_t *drive, u8 xferspeed) | 158 | static void cs5535_tuneproc(ide_drive_t *drive, u8 pio) |
155 | { | 159 | { |
156 | u8 modes[] = { XFER_PIO_0, XFER_PIO_1, XFER_PIO_2, XFER_PIO_3, | 160 | pio = ide_get_best_pio_mode(drive, pio, 4); |
157 | XFER_PIO_4 }; | 161 | ide_config_drive_speed(drive, XFER_PIO_0 + pio); |
158 | 162 | cs5535_set_speed(drive, XFER_PIO_0 + pio); | |
159 | /* cs5535 max pio is pio 4, best_pio will check the blacklist. | ||
160 | i think we don't need to rate_filter the incoming xferspeed | ||
161 | since we know we're only going to choose pio */ | ||
162 | xferspeed = ide_get_best_pio_mode(drive, xferspeed, 4); | ||
163 | ide_config_drive_speed(drive, modes[xferspeed]); | ||
164 | cs5535_set_speed(drive, xferspeed); | ||
165 | } | 163 | } |
166 | 164 | ||
167 | static int cs5535_dma_check(ide_drive_t *drive) | 165 | static int cs5535_dma_check(ide_drive_t *drive) |
168 | { | 166 | { |
169 | u8 speed; | ||
170 | |||
171 | drive->init_speed = 0; | 167 | drive->init_speed = 0; |
172 | 168 | ||
173 | if (ide_tune_dma(drive)) | 169 | if (ide_tune_dma(drive)) |
174 | return 0; | 170 | return 0; |
175 | 171 | ||
176 | if (ide_use_fast_pio(drive)) { | 172 | if (ide_use_fast_pio(drive)) |
177 | speed = ide_get_best_pio_mode(drive, 255, 4); | 173 | cs5535_tuneproc(drive, 255); |
178 | cs5535_set_drive(drive, speed); | ||
179 | } | ||
180 | 174 | ||
181 | return -1; | 175 | return -1; |
182 | } | 176 | } |
diff --git a/drivers/ide/pci/it8213.c b/drivers/ide/pci/it8213.c index 95dbed7e6022..70b3245dbf62 100644 --- a/drivers/ide/pci/it8213.c +++ b/drivers/ide/pci/it8213.c | |||
@@ -21,7 +21,7 @@ | |||
21 | * it8213_dma_2_pio - return the PIO mode matching DMA | 21 | * it8213_dma_2_pio - return the PIO mode matching DMA |
22 | * @xfer_rate: transfer speed | 22 | * @xfer_rate: transfer speed |
23 | * | 23 | * |
24 | * Returns the nearest equivalent PIO timing for the PIO or DMA | 24 | * Returns the nearest equivalent PIO timing for the DMA |
25 | * mode requested by the controller. | 25 | * mode requested by the controller. |
26 | */ | 26 | */ |
27 | 27 | ||
@@ -35,34 +35,28 @@ static u8 it8213_dma_2_pio (u8 xfer_rate) { | |||
35 | case XFER_UDMA_1: | 35 | case XFER_UDMA_1: |
36 | case XFER_UDMA_0: | 36 | case XFER_UDMA_0: |
37 | case XFER_MW_DMA_2: | 37 | case XFER_MW_DMA_2: |
38 | case XFER_PIO_4: | ||
39 | return 4; | 38 | return 4; |
40 | case XFER_MW_DMA_1: | 39 | case XFER_MW_DMA_1: |
41 | case XFER_PIO_3: | ||
42 | return 3; | 40 | return 3; |
43 | case XFER_SW_DMA_2: | 41 | case XFER_SW_DMA_2: |
44 | case XFER_PIO_2: | ||
45 | return 2; | 42 | return 2; |
46 | case XFER_MW_DMA_0: | 43 | case XFER_MW_DMA_0: |
47 | case XFER_SW_DMA_1: | 44 | case XFER_SW_DMA_1: |
48 | case XFER_SW_DMA_0: | 45 | case XFER_SW_DMA_0: |
49 | case XFER_PIO_1: | ||
50 | case XFER_PIO_0: | ||
51 | case XFER_PIO_SLOW: | ||
52 | default: | 46 | default: |
53 | return 0; | 47 | return 0; |
54 | } | 48 | } |
55 | } | 49 | } |
56 | 50 | ||
57 | /* | 51 | /* |
58 | * it8213_tuneproc - tune a drive | 52 | * it8213_tune_pio - tune a drive |
59 | * @drive: drive to tune | 53 | * @drive: drive to tune |
60 | * @pio: desired PIO mode | 54 | * @pio: desired PIO mode |
61 | * | 55 | * |
62 | * Set the interface PIO mode. | 56 | * Set the interface PIO mode. |
63 | */ | 57 | */ |
64 | 58 | ||
65 | static void it8213_tuneproc (ide_drive_t *drive, u8 pio) | 59 | static void it8213_tune_pio(ide_drive_t *drive, const u8 pio) |
66 | { | 60 | { |
67 | ide_hwif_t *hwif = HWIF(drive); | 61 | ide_hwif_t *hwif = HWIF(drive); |
68 | struct pci_dev *dev = hwif->pci_dev; | 62 | struct pci_dev *dev = hwif->pci_dev; |
@@ -82,8 +76,6 @@ static void it8213_tuneproc (ide_drive_t *drive, u8 pio) | |||
82 | { 2, 1 }, | 76 | { 2, 1 }, |
83 | { 2, 3 }, }; | 77 | { 2, 3 }, }; |
84 | 78 | ||
85 | pio = ide_get_best_pio_mode(drive, pio, 4); | ||
86 | |||
87 | spin_lock_irqsave(&tune_lock, flags); | 79 | spin_lock_irqsave(&tune_lock, flags); |
88 | pci_read_config_word(dev, master_port, &master_data); | 80 | pci_read_config_word(dev, master_port, &master_data); |
89 | 81 | ||
@@ -113,6 +105,13 @@ static void it8213_tuneproc (ide_drive_t *drive, u8 pio) | |||
113 | spin_unlock_irqrestore(&tune_lock, flags); | 105 | spin_unlock_irqrestore(&tune_lock, flags); |
114 | } | 106 | } |
115 | 107 | ||
108 | static void it8213_tuneproc(ide_drive_t *drive, u8 pio) | ||
109 | { | ||
110 | pio = ide_get_best_pio_mode(drive, pio, 4); | ||
111 | it8213_tune_pio(drive, pio); | ||
112 | ide_config_drive_speed(drive, XFER_PIO_0 + pio); | ||
113 | } | ||
114 | |||
116 | /** | 115 | /** |
117 | * it8213_tune_chipset - set controller timings | 116 | * it8213_tune_chipset - set controller timings |
118 | * @drive: Drive to set up | 117 | * @drive: Drive to set up |
@@ -193,7 +192,12 @@ static int it8213_tune_chipset (ide_drive_t *drive, u8 xferspeed) | |||
193 | if (reg55 & w_flag) | 192 | if (reg55 & w_flag) |
194 | pci_write_config_byte(dev, 0x55, (u8) reg55 & ~w_flag); | 193 | pci_write_config_byte(dev, 0x55, (u8) reg55 & ~w_flag); |
195 | } | 194 | } |
196 | it8213_tuneproc(drive, it8213_dma_2_pio(speed)); | 195 | |
196 | if (speed > XFER_PIO_4) | ||
197 | it8213_tune_pio(drive, it8213_dma_2_pio(speed)); | ||
198 | else | ||
199 | it8213_tune_pio(drive, speed - XFER_PIO_0); | ||
200 | |||
197 | return ide_config_drive_speed(drive, speed); | 201 | return ide_config_drive_speed(drive, speed); |
198 | } | 202 | } |
199 | 203 | ||
@@ -209,13 +213,10 @@ static int it8213_tune_chipset (ide_drive_t *drive, u8 xferspeed) | |||
209 | 213 | ||
210 | static int it8213_config_drive_for_dma (ide_drive_t *drive) | 214 | static int it8213_config_drive_for_dma (ide_drive_t *drive) |
211 | { | 215 | { |
212 | u8 pio; | ||
213 | |||
214 | if (ide_tune_dma(drive)) | 216 | if (ide_tune_dma(drive)) |
215 | return 0; | 217 | return 0; |
216 | 218 | ||
217 | pio = ide_get_best_pio_mode(drive, 255, 4); | 219 | it8213_tuneproc(drive, 255); |
218 | it8213_tune_chipset(drive, XFER_PIO_0 + pio); | ||
219 | 220 | ||
220 | return -1; | 221 | return -1; |
221 | } | 222 | } |
diff --git a/drivers/ide/pci/jmicron.c b/drivers/ide/pci/jmicron.c index d7ce9dd8de16..65a0ff352b98 100644 --- a/drivers/ide/pci/jmicron.c +++ b/drivers/ide/pci/jmicron.c | |||
@@ -83,23 +83,10 @@ static u8 __devinit ata66_jmicron(ide_hwif_t *hwif) | |||
83 | return ATA_CBL_PATA80; | 83 | return ATA_CBL_PATA80; |
84 | } | 84 | } |
85 | 85 | ||
86 | static void jmicron_tuneproc (ide_drive_t *drive, byte mode_wanted) | 86 | static void jmicron_tuneproc(ide_drive_t *drive, u8 pio) |
87 | { | 87 | { |
88 | return; | 88 | pio = ide_get_best_pio_mode(drive, pio, 5); |
89 | } | 89 | ide_config_drive_speed(drive, XFER_PIO_0 + pio); |
90 | |||
91 | /** | ||
92 | * config_jmicron_chipset_for_pio - set drive timings | ||
93 | * @drive: drive to tune | ||
94 | * @speed we want | ||
95 | * | ||
96 | */ | ||
97 | |||
98 | static void config_jmicron_chipset_for_pio (ide_drive_t *drive, byte set_speed) | ||
99 | { | ||
100 | u8 speed = XFER_PIO_0 + ide_get_best_pio_mode(drive, 255, 5); | ||
101 | if (set_speed) | ||
102 | (void) ide_config_drive_speed(drive, speed); | ||
103 | } | 90 | } |
104 | 91 | ||
105 | /** | 92 | /** |
@@ -132,7 +119,7 @@ static int jmicron_config_drive_for_dma (ide_drive_t *drive) | |||
132 | if (ide_tune_dma(drive)) | 119 | if (ide_tune_dma(drive)) |
133 | return 0; | 120 | return 0; |
134 | 121 | ||
135 | config_jmicron_chipset_for_pio(drive, 1); | 122 | jmicron_tuneproc(drive, 255); |
136 | 123 | ||
137 | return -1; | 124 | return -1; |
138 | } | 125 | } |
diff --git a/drivers/ide/pci/piix.c b/drivers/ide/pci/piix.c index 4f69cd067e5e..5cfa9378bbb8 100644 --- a/drivers/ide/pci/piix.c +++ b/drivers/ide/pci/piix.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * linux/drivers/ide/pci/piix.c Version 0.50 Jun 10, 2007 | 2 | * linux/drivers/ide/pci/piix.c Version 0.51 Jul 6, 2007 |
3 | * | 3 | * |
4 | * Copyright (C) 1998-1999 Andrzej Krzysztofowicz, Author and Maintainer | 4 | * Copyright (C) 1998-1999 Andrzej Krzysztofowicz, Author and Maintainer |
5 | * Copyright (C) 1998-2000 Andre Hedrick <andre@linux-ide.org> | 5 | * Copyright (C) 1998-2000 Andre Hedrick <andre@linux-ide.org> |
@@ -109,7 +109,7 @@ static int no_piix_dma; | |||
109 | * piix_dma_2_pio - return the PIO mode matching DMA | 109 | * piix_dma_2_pio - return the PIO mode matching DMA |
110 | * @xfer_rate: transfer speed | 110 | * @xfer_rate: transfer speed |
111 | * | 111 | * |
112 | * Returns the nearest equivalent PIO timing for the PIO or DMA | 112 | * Returns the nearest equivalent PIO timing for the DMA |
113 | * mode requested by the controller. | 113 | * mode requested by the controller. |
114 | */ | 114 | */ |
115 | 115 | ||
@@ -123,20 +123,14 @@ static u8 piix_dma_2_pio (u8 xfer_rate) { | |||
123 | case XFER_UDMA_1: | 123 | case XFER_UDMA_1: |
124 | case XFER_UDMA_0: | 124 | case XFER_UDMA_0: |
125 | case XFER_MW_DMA_2: | 125 | case XFER_MW_DMA_2: |
126 | case XFER_PIO_4: | ||
127 | return 4; | 126 | return 4; |
128 | case XFER_MW_DMA_1: | 127 | case XFER_MW_DMA_1: |
129 | case XFER_PIO_3: | ||
130 | return 3; | 128 | return 3; |
131 | case XFER_SW_DMA_2: | 129 | case XFER_SW_DMA_2: |
132 | case XFER_PIO_2: | ||
133 | return 2; | 130 | return 2; |
134 | case XFER_MW_DMA_0: | 131 | case XFER_MW_DMA_0: |
135 | case XFER_SW_DMA_1: | 132 | case XFER_SW_DMA_1: |
136 | case XFER_SW_DMA_0: | 133 | case XFER_SW_DMA_0: |
137 | case XFER_PIO_1: | ||
138 | case XFER_PIO_0: | ||
139 | case XFER_PIO_SLOW: | ||
140 | default: | 134 | default: |
141 | return 0; | 135 | return 0; |
142 | } | 136 | } |
@@ -269,6 +263,7 @@ static int piix_tune_chipset (ide_drive_t *drive, u8 xferspeed) | |||
269 | case XFER_PIO_4: | 263 | case XFER_PIO_4: |
270 | case XFER_PIO_3: | 264 | case XFER_PIO_3: |
271 | case XFER_PIO_2: | 265 | case XFER_PIO_2: |
266 | case XFER_PIO_1: | ||
272 | case XFER_PIO_0: break; | 267 | case XFER_PIO_0: break; |
273 | default: return -1; | 268 | default: return -1; |
274 | } | 269 | } |
@@ -299,7 +294,11 @@ static int piix_tune_chipset (ide_drive_t *drive, u8 xferspeed) | |||
299 | pci_write_config_byte(dev, 0x55, (u8) reg55 & ~w_flag); | 294 | pci_write_config_byte(dev, 0x55, (u8) reg55 & ~w_flag); |
300 | } | 295 | } |
301 | 296 | ||
302 | piix_tune_pio(drive, piix_dma_2_pio(speed)); | 297 | if (speed > XFER_PIO_4) |
298 | piix_tune_pio(drive, piix_dma_2_pio(speed)); | ||
299 | else | ||
300 | piix_tune_pio(drive, speed - XFER_PIO_0); | ||
301 | |||
303 | return ide_config_drive_speed(drive, speed); | 302 | return ide_config_drive_speed(drive, speed); |
304 | } | 303 | } |
305 | 304 | ||
diff --git a/drivers/ide/pci/scc_pata.c b/drivers/ide/pci/scc_pata.c index bf19ddfa6cda..eeb0a6d434aa 100644 --- a/drivers/ide/pci/scc_pata.c +++ b/drivers/ide/pci/scc_pata.c | |||
@@ -190,7 +190,7 @@ scc_ide_outsl(unsigned long port, void *addr, u32 count) | |||
190 | } | 190 | } |
191 | 191 | ||
192 | /** | 192 | /** |
193 | * scc_tuneproc - tune a drive PIO mode | 193 | * scc_tune_pio - tune a drive PIO mode |
194 | * @drive: drive to tune | 194 | * @drive: drive to tune |
195 | * @mode_wanted: the target operating mode | 195 | * @mode_wanted: the target operating mode |
196 | * | 196 | * |
@@ -198,7 +198,7 @@ scc_ide_outsl(unsigned long port, void *addr, u32 count) | |||
198 | * controller. | 198 | * controller. |
199 | */ | 199 | */ |
200 | 200 | ||
201 | static void scc_tuneproc(ide_drive_t *drive, byte mode_wanted) | 201 | static void scc_tune_pio(ide_drive_t *drive, const u8 pio) |
202 | { | 202 | { |
203 | ide_hwif_t *hwif = HWIF(drive); | 203 | ide_hwif_t *hwif = HWIF(drive); |
204 | struct scc_ports *ports = ide_get_hwifdata(hwif); | 204 | struct scc_ports *ports = ide_get_hwifdata(hwif); |
@@ -207,41 +207,25 @@ static void scc_tuneproc(ide_drive_t *drive, byte mode_wanted) | |||
207 | unsigned long piosht_port = ctl_base + 0x000; | 207 | unsigned long piosht_port = ctl_base + 0x000; |
208 | unsigned long pioct_port = ctl_base + 0x004; | 208 | unsigned long pioct_port = ctl_base + 0x004; |
209 | unsigned long reg; | 209 | unsigned long reg; |
210 | unsigned char speed = XFER_PIO_0; | ||
211 | int offset; | 210 | int offset; |
212 | 211 | ||
213 | mode_wanted = ide_get_best_pio_mode(drive, mode_wanted, 4); | ||
214 | switch (mode_wanted) { | ||
215 | case 4: | ||
216 | speed = XFER_PIO_4; | ||
217 | break; | ||
218 | case 3: | ||
219 | speed = XFER_PIO_3; | ||
220 | break; | ||
221 | case 2: | ||
222 | speed = XFER_PIO_2; | ||
223 | break; | ||
224 | case 1: | ||
225 | speed = XFER_PIO_1; | ||
226 | break; | ||
227 | case 0: | ||
228 | default: | ||
229 | speed = XFER_PIO_0; | ||
230 | break; | ||
231 | } | ||
232 | |||
233 | reg = in_be32((void __iomem *)cckctrl_port); | 212 | reg = in_be32((void __iomem *)cckctrl_port); |
234 | if (reg & CCKCTRL_ATACLKOEN) { | 213 | if (reg & CCKCTRL_ATACLKOEN) { |
235 | offset = 1; /* 133MHz */ | 214 | offset = 1; /* 133MHz */ |
236 | } else { | 215 | } else { |
237 | offset = 0; /* 100MHz */ | 216 | offset = 0; /* 100MHz */ |
238 | } | 217 | } |
239 | reg = JCHSTtbl[offset][mode_wanted] << 16 | JCHHTtbl[offset][mode_wanted]; | 218 | reg = JCHSTtbl[offset][pio] << 16 | JCHHTtbl[offset][pio]; |
240 | out_be32((void __iomem *)piosht_port, reg); | 219 | out_be32((void __iomem *)piosht_port, reg); |
241 | reg = JCHCTtbl[offset][mode_wanted]; | 220 | reg = JCHCTtbl[offset][pio]; |
242 | out_be32((void __iomem *)pioct_port, reg); | 221 | out_be32((void __iomem *)pioct_port, reg); |
222 | } | ||
243 | 223 | ||
244 | ide_config_drive_speed(drive, speed); | 224 | static void scc_tuneproc(ide_drive_t *drive, u8 pio) |
225 | { | ||
226 | pio = ide_get_best_pio_mode(drive, pio, 4); | ||
227 | scc_tune_pio(drive, pio); | ||
228 | ide_config_drive_speed(drive, XFER_PIO_0 + pio); | ||
245 | } | 229 | } |
246 | 230 | ||
247 | /** | 231 | /** |
@@ -280,26 +264,21 @@ static int scc_tune_chipset(ide_drive_t *drive, byte xferspeed) | |||
280 | 264 | ||
281 | switch (speed) { | 265 | switch (speed) { |
282 | case XFER_UDMA_6: | 266 | case XFER_UDMA_6: |
283 | idx = 6; | ||
284 | break; | ||
285 | case XFER_UDMA_5: | 267 | case XFER_UDMA_5: |
286 | idx = 5; | ||
287 | break; | ||
288 | case XFER_UDMA_4: | 268 | case XFER_UDMA_4: |
289 | idx = 4; | ||
290 | break; | ||
291 | case XFER_UDMA_3: | 269 | case XFER_UDMA_3: |
292 | idx = 3; | ||
293 | break; | ||
294 | case XFER_UDMA_2: | 270 | case XFER_UDMA_2: |
295 | idx = 2; | ||
296 | break; | ||
297 | case XFER_UDMA_1: | 271 | case XFER_UDMA_1: |
298 | idx = 1; | ||
299 | break; | ||
300 | case XFER_UDMA_0: | 272 | case XFER_UDMA_0: |
301 | idx = 0; | 273 | idx = speed - XFER_UDMA_0; |
302 | break; | 274 | break; |
275 | case XFER_PIO_4: | ||
276 | case XFER_PIO_3: | ||
277 | case XFER_PIO_2: | ||
278 | case XFER_PIO_1: | ||
279 | case XFER_PIO_0: | ||
280 | scc_tune_pio(drive, speed - XFER_PIO_0); | ||
281 | return ide_config_drive_speed(drive, speed); | ||
303 | default: | 282 | default: |
304 | return 1; | 283 | return 1; |
305 | } | 284 | } |
@@ -329,7 +308,7 @@ static int scc_tune_chipset(ide_drive_t *drive, byte xferspeed) | |||
329 | * required. | 308 | * required. |
330 | * If the drive isn't suitable for DMA or we hit other problems | 309 | * If the drive isn't suitable for DMA or we hit other problems |
331 | * then we will drop down to PIO and set up PIO appropriately. | 310 | * then we will drop down to PIO and set up PIO appropriately. |
332 | * (return 1) | 311 | * (return -1) |
333 | */ | 312 | */ |
334 | 313 | ||
335 | static int scc_config_drive_for_dma(ide_drive_t *drive) | 314 | static int scc_config_drive_for_dma(ide_drive_t *drive) |
@@ -338,7 +317,7 @@ static int scc_config_drive_for_dma(ide_drive_t *drive) | |||
338 | return 0; | 317 | return 0; |
339 | 318 | ||
340 | if (ide_use_fast_pio(drive)) | 319 | if (ide_use_fast_pio(drive)) |
341 | scc_tuneproc(drive, 4); | 320 | scc_tuneproc(drive, 255); |
342 | 321 | ||
343 | return -1; | 322 | return -1; |
344 | } | 323 | } |
diff --git a/drivers/ide/pci/sis5513.c b/drivers/ide/pci/sis5513.c index 63fbb79e8178..26f24802d3e8 100644 --- a/drivers/ide/pci/sis5513.c +++ b/drivers/ide/pci/sis5513.c | |||
@@ -801,6 +801,7 @@ struct sis_laptop { | |||
801 | static const struct sis_laptop sis_laptop[] = { | 801 | static const struct sis_laptop sis_laptop[] = { |
802 | /* devid, subvendor, subdev */ | 802 | /* devid, subvendor, subdev */ |
803 | { 0x5513, 0x1043, 0x1107 }, /* ASUS A6K */ | 803 | { 0x5513, 0x1043, 0x1107 }, /* ASUS A6K */ |
804 | { 0x5513, 0x1734, 0x105f }, /* FSC Amilo A1630 */ | ||
804 | /* end marker */ | 805 | /* end marker */ |
805 | { 0, } | 806 | { 0, } |
806 | }; | 807 | }; |
diff --git a/drivers/ide/pci/slc90e66.c b/drivers/ide/pci/slc90e66.c index 8e655f2db5cb..628b0664f576 100644 --- a/drivers/ide/pci/slc90e66.c +++ b/drivers/ide/pci/slc90e66.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * linux/drivers/ide/pci/slc90e66.c Version 0.14 February 8, 2007 | 2 | * linux/drivers/ide/pci/slc90e66.c Version 0.15 Jul 6, 2007 |
3 | * | 3 | * |
4 | * Copyright (C) 2000-2002 Andre Hedrick <andre@linux-ide.org> | 4 | * Copyright (C) 2000-2002 Andre Hedrick <andre@linux-ide.org> |
5 | * Copyright (C) 2006-2007 MontaVista Software, Inc. <source@mvista.com> | 5 | * Copyright (C) 2006-2007 MontaVista Software, Inc. <source@mvista.com> |
@@ -29,20 +29,14 @@ static u8 slc90e66_dma_2_pio (u8 xfer_rate) { | |||
29 | case XFER_UDMA_1: | 29 | case XFER_UDMA_1: |
30 | case XFER_UDMA_0: | 30 | case XFER_UDMA_0: |
31 | case XFER_MW_DMA_2: | 31 | case XFER_MW_DMA_2: |
32 | case XFER_PIO_4: | ||
33 | return 4; | 32 | return 4; |
34 | case XFER_MW_DMA_1: | 33 | case XFER_MW_DMA_1: |
35 | case XFER_PIO_3: | ||
36 | return 3; | 34 | return 3; |
37 | case XFER_SW_DMA_2: | 35 | case XFER_SW_DMA_2: |
38 | case XFER_PIO_2: | ||
39 | return 2; | 36 | return 2; |
40 | case XFER_MW_DMA_0: | 37 | case XFER_MW_DMA_0: |
41 | case XFER_SW_DMA_1: | 38 | case XFER_SW_DMA_1: |
42 | case XFER_SW_DMA_0: | 39 | case XFER_SW_DMA_0: |
43 | case XFER_PIO_1: | ||
44 | case XFER_PIO_0: | ||
45 | case XFER_PIO_SLOW: | ||
46 | default: | 40 | default: |
47 | return 0; | 41 | return 0; |
48 | } | 42 | } |
@@ -136,6 +130,7 @@ static int slc90e66_tune_chipset (ide_drive_t *drive, u8 xferspeed) | |||
136 | case XFER_PIO_4: | 130 | case XFER_PIO_4: |
137 | case XFER_PIO_3: | 131 | case XFER_PIO_3: |
138 | case XFER_PIO_2: | 132 | case XFER_PIO_2: |
133 | case XFER_PIO_1: | ||
139 | case XFER_PIO_0: break; | 134 | case XFER_PIO_0: break; |
140 | default: return -1; | 135 | default: return -1; |
141 | } | 136 | } |
@@ -156,7 +151,11 @@ static int slc90e66_tune_chipset (ide_drive_t *drive, u8 xferspeed) | |||
156 | pci_write_config_word(dev, 0x4a, reg4a & ~a_speed); | 151 | pci_write_config_word(dev, 0x4a, reg4a & ~a_speed); |
157 | } | 152 | } |
158 | 153 | ||
159 | slc90e66_tune_pio(drive, slc90e66_dma_2_pio(speed)); | 154 | if (speed > XFER_PIO_4) |
155 | slc90e66_tune_pio(drive, slc90e66_dma_2_pio(speed)); | ||
156 | else | ||
157 | slc90e66_tune_pio(drive, speed - XFER_PIO_0); | ||
158 | |||
160 | return ide_config_drive_speed(drive, speed); | 159 | return ide_config_drive_speed(drive, speed); |
161 | } | 160 | } |
162 | 161 | ||
diff --git a/drivers/ieee1394/Kconfig b/drivers/ieee1394/Kconfig index 8012b3b0ce75..545663ef820b 100644 --- a/drivers/ieee1394/Kconfig +++ b/drivers/ieee1394/Kconfig | |||
@@ -97,7 +97,7 @@ config IEEE1394_SBP2 | |||
97 | 97 | ||
98 | config IEEE1394_SBP2_PHYS_DMA | 98 | config IEEE1394_SBP2_PHYS_DMA |
99 | bool "Enable replacement for physical DMA in SBP2" | 99 | bool "Enable replacement for physical DMA in SBP2" |
100 | depends on IEEE1394 && IEEE1394_SBP2 && EXPERIMENTAL && (X86_32 || PPC_32) | 100 | depends on IEEE1394_SBP2 && VIRT_TO_BUS && EXPERIMENTAL |
101 | help | 101 | help |
102 | This builds sbp2 for use with non-OHCI host adapters which do not | 102 | This builds sbp2 for use with non-OHCI host adapters which do not |
103 | support physical DMA or for when ohci1394 is run with phys_dma=0. | 103 | support physical DMA or for when ohci1394 is run with phys_dma=0. |
diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c index e882cb951b47..47dbe8f17e82 100644 --- a/drivers/ieee1394/sbp2.c +++ b/drivers/ieee1394/sbp2.c | |||
@@ -773,11 +773,6 @@ static struct sbp2_lu *sbp2_alloc_device(struct unit_directory *ud) | |||
773 | SBP2_ERR("failed to register lower 4GB address range"); | 773 | SBP2_ERR("failed to register lower 4GB address range"); |
774 | goto failed_alloc; | 774 | goto failed_alloc; |
775 | } | 775 | } |
776 | #else | ||
777 | if (dma_set_mask(hi->host->device.parent, DMA_32BIT_MASK)) { | ||
778 | SBP2_ERR("failed to set 4GB DMA mask"); | ||
779 | goto failed_alloc; | ||
780 | } | ||
781 | #endif | 776 | #endif |
782 | } | 777 | } |
783 | 778 | ||
diff --git a/drivers/mtd/nand/at91_nand.c b/drivers/mtd/nand/at91_nand.c index 512e999177f7..b2a5672df6e0 100644 --- a/drivers/mtd/nand/at91_nand.c +++ b/drivers/mtd/nand/at91_nand.c | |||
@@ -128,7 +128,10 @@ static int __init at91_nand_probe(struct platform_device *pdev) | |||
128 | nand_chip->IO_ADDR_R = host->io_base; | 128 | nand_chip->IO_ADDR_R = host->io_base; |
129 | nand_chip->IO_ADDR_W = host->io_base; | 129 | nand_chip->IO_ADDR_W = host->io_base; |
130 | nand_chip->cmd_ctrl = at91_nand_cmd_ctrl; | 130 | nand_chip->cmd_ctrl = at91_nand_cmd_ctrl; |
131 | nand_chip->dev_ready = at91_nand_device_ready; | 131 | |
132 | if (host->board->rdy_pin) | ||
133 | nand_chip->dev_ready = at91_nand_device_ready; | ||
134 | |||
132 | nand_chip->ecc.mode = NAND_ECC_SOFT; /* enable ECC */ | 135 | nand_chip->ecc.mode = NAND_ECC_SOFT; /* enable ECC */ |
133 | nand_chip->chip_delay = 20; /* 20us command delay time */ | 136 | nand_chip->chip_delay = 20; /* 20us command delay time */ |
134 | 137 | ||
diff --git a/drivers/mtd/nand/edb7312.c b/drivers/mtd/nand/edb7312.c index 1daf8231aaef..0146cdc48039 100644 --- a/drivers/mtd/nand/edb7312.c +++ b/drivers/mtd/nand/edb7312.c | |||
@@ -74,7 +74,7 @@ static struct mtd_partition partition_info[] = { | |||
74 | /* | 74 | /* |
75 | * hardware specific access to control-lines | 75 | * hardware specific access to control-lines |
76 | * | 76 | * |
77 | * NAND_NCE: bit 0 -> bit 7 | 77 | * NAND_NCE: bit 0 -> bit 6 (bit 7 = 1) |
78 | * NAND_CLE: bit 1 -> bit 4 | 78 | * NAND_CLE: bit 1 -> bit 4 |
79 | * NAND_ALE: bit 2 -> bit 5 | 79 | * NAND_ALE: bit 2 -> bit 5 |
80 | */ | 80 | */ |
@@ -83,12 +83,12 @@ static void ep7312_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl) | |||
83 | struct nand_chip *chip = mtd->priv; | 83 | struct nand_chip *chip = mtd->priv; |
84 | 84 | ||
85 | if (ctrl & NAND_CTRL_CHANGE) { | 85 | if (ctrl & NAND_CTRL_CHANGE) { |
86 | unsigned char bits; | 86 | unsigned char bits = 0x80; |
87 | 87 | ||
88 | bits = (ctrl & (NAND_CLE | NAND_ALE)) << 3; | 88 | bits |= (ctrl & (NAND_CLE | NAND_ALE)) << 3; |
89 | bits = (ctrl & NAND_NCE) << 7; | 89 | bits |= (ctrl & NAND_NCE) ? 0x00 : 0x40; |
90 | 90 | ||
91 | clps_writeb((clps_readb(ep7312_pxdr) & 0xB0) | 0x10, | 91 | clps_writeb((clps_readb(ep7312_pxdr) & 0xF0) | bits, |
92 | ep7312_pxdr); | 92 | ep7312_pxdr); |
93 | } | 93 | } |
94 | if (cmd != NAND_CMD_NONE) | 94 | if (cmd != NAND_CMD_NONE) |
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index 7e68203fe1ba..24ac6778b1a8 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c | |||
@@ -24,6 +24,7 @@ | |||
24 | * if we have HW ecc support. | 24 | * if we have HW ecc support. |
25 | * The AG-AND chips have nice features for speed improvement, | 25 | * The AG-AND chips have nice features for speed improvement, |
26 | * which are not supported yet. Read / program 4 pages in one go. | 26 | * which are not supported yet. Read / program 4 pages in one go. |
27 | * BBT table is not serialized, has to be fixed | ||
27 | * | 28 | * |
28 | * This program is free software; you can redistribute it and/or modify | 29 | * This program is free software; you can redistribute it and/or modify |
29 | * it under the terms of the GNU General Public License version 2 as | 30 | * it under the terms of the GNU General Public License version 2 as |
@@ -360,6 +361,7 @@ static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs) | |||
360 | /* We write two bytes, so we dont have to mess with 16 bit | 361 | /* We write two bytes, so we dont have to mess with 16 bit |
361 | * access | 362 | * access |
362 | */ | 363 | */ |
364 | nand_get_device(chip, mtd, FL_WRITING); | ||
363 | ofs += mtd->oobsize; | 365 | ofs += mtd->oobsize; |
364 | chip->ops.len = chip->ops.ooblen = 2; | 366 | chip->ops.len = chip->ops.ooblen = 2; |
365 | chip->ops.datbuf = NULL; | 367 | chip->ops.datbuf = NULL; |
@@ -367,9 +369,11 @@ static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs) | |||
367 | chip->ops.ooboffs = chip->badblockpos & ~0x01; | 369 | chip->ops.ooboffs = chip->badblockpos & ~0x01; |
368 | 370 | ||
369 | ret = nand_do_write_oob(mtd, ofs, &chip->ops); | 371 | ret = nand_do_write_oob(mtd, ofs, &chip->ops); |
372 | nand_release_device(mtd); | ||
370 | } | 373 | } |
371 | if (!ret) | 374 | if (!ret) |
372 | mtd->ecc_stats.badblocks++; | 375 | mtd->ecc_stats.badblocks++; |
376 | |||
373 | return ret; | 377 | return ret; |
374 | } | 378 | } |
375 | 379 | ||
@@ -768,7 +772,7 @@ static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip, | |||
768 | uint8_t *p = buf; | 772 | uint8_t *p = buf; |
769 | uint8_t *ecc_calc = chip->buffers->ecccalc; | 773 | uint8_t *ecc_calc = chip->buffers->ecccalc; |
770 | uint8_t *ecc_code = chip->buffers->ecccode; | 774 | uint8_t *ecc_code = chip->buffers->ecccode; |
771 | int *eccpos = chip->ecc.layout->eccpos; | 775 | uint32_t *eccpos = chip->ecc.layout->eccpos; |
772 | 776 | ||
773 | chip->ecc.read_page_raw(mtd, chip, buf); | 777 | chip->ecc.read_page_raw(mtd, chip, buf); |
774 | 778 | ||
@@ -810,7 +814,7 @@ static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, | |||
810 | uint8_t *p = buf; | 814 | uint8_t *p = buf; |
811 | uint8_t *ecc_calc = chip->buffers->ecccalc; | 815 | uint8_t *ecc_calc = chip->buffers->ecccalc; |
812 | uint8_t *ecc_code = chip->buffers->ecccode; | 816 | uint8_t *ecc_code = chip->buffers->ecccode; |
813 | int *eccpos = chip->ecc.layout->eccpos; | 817 | uint32_t *eccpos = chip->ecc.layout->eccpos; |
814 | 818 | ||
815 | for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) { | 819 | for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) { |
816 | chip->ecc.hwctl(mtd, NAND_ECC_READ); | 820 | chip->ecc.hwctl(mtd, NAND_ECC_READ); |
@@ -1416,7 +1420,7 @@ static void nand_write_page_swecc(struct mtd_info *mtd, struct nand_chip *chip, | |||
1416 | int eccsteps = chip->ecc.steps; | 1420 | int eccsteps = chip->ecc.steps; |
1417 | uint8_t *ecc_calc = chip->buffers->ecccalc; | 1421 | uint8_t *ecc_calc = chip->buffers->ecccalc; |
1418 | const uint8_t *p = buf; | 1422 | const uint8_t *p = buf; |
1419 | int *eccpos = chip->ecc.layout->eccpos; | 1423 | uint32_t *eccpos = chip->ecc.layout->eccpos; |
1420 | 1424 | ||
1421 | /* Software ecc calculation */ | 1425 | /* Software ecc calculation */ |
1422 | for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) | 1426 | for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) |
@@ -1442,7 +1446,7 @@ static void nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, | |||
1442 | int eccsteps = chip->ecc.steps; | 1446 | int eccsteps = chip->ecc.steps; |
1443 | uint8_t *ecc_calc = chip->buffers->ecccalc; | 1447 | uint8_t *ecc_calc = chip->buffers->ecccalc; |
1444 | const uint8_t *p = buf; | 1448 | const uint8_t *p = buf; |
1445 | int *eccpos = chip->ecc.layout->eccpos; | 1449 | uint32_t *eccpos = chip->ecc.layout->eccpos; |
1446 | 1450 | ||
1447 | for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) { | 1451 | for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) { |
1448 | chip->ecc.hwctl(mtd, NAND_ECC_WRITE); | 1452 | chip->ecc.hwctl(mtd, NAND_ECC_WRITE); |
diff --git a/drivers/mtd/rfd_ftl.c b/drivers/mtd/rfd_ftl.c index d4b1ba8f23ef..006c03aacb55 100644 --- a/drivers/mtd/rfd_ftl.c +++ b/drivers/mtd/rfd_ftl.c | |||
@@ -779,6 +779,7 @@ static void rfd_ftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd) | |||
779 | else { | 779 | else { |
780 | if (!mtd->erasesize) { | 780 | if (!mtd->erasesize) { |
781 | printk(KERN_WARNING PREFIX "please provide block_size"); | 781 | printk(KERN_WARNING PREFIX "please provide block_size"); |
782 | kfree(part); | ||
782 | return; | 783 | return; |
783 | } | 784 | } |
784 | else | 785 | else |
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index 8e58ea3d95c0..004bc2487270 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c | |||
@@ -310,7 +310,7 @@ static int pci_default_resume(struct pci_dev *pci_dev) | |||
310 | /* restore the PCI config space */ | 310 | /* restore the PCI config space */ |
311 | pci_restore_state(pci_dev); | 311 | pci_restore_state(pci_dev); |
312 | /* if the device was enabled before suspend, reenable */ | 312 | /* if the device was enabled before suspend, reenable */ |
313 | retval = __pci_reenable_device(pci_dev); | 313 | retval = pci_reenable_device(pci_dev); |
314 | /* if the device was busmaster before the suspend, make it busmaster again */ | 314 | /* if the device was busmaster before the suspend, make it busmaster again */ |
315 | if (pci_dev->is_busmaster) | 315 | if (pci_dev->is_busmaster) |
316 | pci_set_master(pci_dev); | 316 | pci_set_master(pci_dev); |
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 1ee9cd9c86e2..37c00f6fd801 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c | |||
@@ -695,14 +695,13 @@ static int do_pci_enable_device(struct pci_dev *dev, int bars) | |||
695 | } | 695 | } |
696 | 696 | ||
697 | /** | 697 | /** |
698 | * __pci_reenable_device - Resume abandoned device | 698 | * pci_reenable_device - Resume abandoned device |
699 | * @dev: PCI device to be resumed | 699 | * @dev: PCI device to be resumed |
700 | * | 700 | * |
701 | * Note this function is a backend of pci_default_resume and is not supposed | 701 | * Note this function is a backend of pci_default_resume and is not supposed |
702 | * to be called by normal code, write proper resume handler and use it instead. | 702 | * to be called by normal code, write proper resume handler and use it instead. |
703 | */ | 703 | */ |
704 | int | 704 | int pci_reenable_device(struct pci_dev *dev) |
705 | __pci_reenable_device(struct pci_dev *dev) | ||
706 | { | 705 | { |
707 | if (atomic_read(&dev->enable_cnt)) | 706 | if (atomic_read(&dev->enable_cnt)) |
708 | return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1); | 707 | return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1); |
@@ -1604,7 +1603,7 @@ early_param("pci", pci_setup); | |||
1604 | device_initcall(pci_init); | 1603 | device_initcall(pci_init); |
1605 | 1604 | ||
1606 | EXPORT_SYMBOL_GPL(pci_restore_bars); | 1605 | EXPORT_SYMBOL_GPL(pci_restore_bars); |
1607 | EXPORT_SYMBOL(__pci_reenable_device); | 1606 | EXPORT_SYMBOL(pci_reenable_device); |
1608 | EXPORT_SYMBOL(pci_enable_device_bars); | 1607 | EXPORT_SYMBOL(pci_enable_device_bars); |
1609 | EXPORT_SYMBOL(pci_enable_device); | 1608 | EXPORT_SYMBOL(pci_enable_device); |
1610 | EXPORT_SYMBOL(pcim_enable_device); | 1609 | EXPORT_SYMBOL(pcim_enable_device); |
diff --git a/drivers/scsi/ide-scsi.c b/drivers/scsi/ide-scsi.c index bb90df8bdce4..1cc01acc2808 100644 --- a/drivers/scsi/ide-scsi.c +++ b/drivers/scsi/ide-scsi.c | |||
@@ -328,17 +328,15 @@ static int idescsi_check_condition(ide_drive_t *drive, struct request *failed_co | |||
328 | u8 *buf; | 328 | u8 *buf; |
329 | 329 | ||
330 | /* stuff a sense request in front of our current request */ | 330 | /* stuff a sense request in front of our current request */ |
331 | pc = kmalloc (sizeof (idescsi_pc_t), GFP_ATOMIC); | 331 | pc = kzalloc(sizeof(idescsi_pc_t), GFP_ATOMIC); |
332 | rq = kmalloc (sizeof (struct request), GFP_ATOMIC); | 332 | rq = kmalloc(sizeof(struct request), GFP_ATOMIC); |
333 | buf = kmalloc(SCSI_SENSE_BUFFERSIZE, GFP_ATOMIC); | 333 | buf = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_ATOMIC); |
334 | if (pc == NULL || rq == NULL || buf == NULL) { | 334 | if (!pc || !rq || !buf) { |
335 | kfree(buf); | 335 | kfree(buf); |
336 | kfree(rq); | 336 | kfree(rq); |
337 | kfree(pc); | 337 | kfree(pc); |
338 | return -ENOMEM; | 338 | return -ENOMEM; |
339 | } | 339 | } |
340 | memset (pc, 0, sizeof (idescsi_pc_t)); | ||
341 | memset (buf, 0, SCSI_SENSE_BUFFERSIZE); | ||
342 | ide_init_drive_cmd(rq); | 340 | ide_init_drive_cmd(rq); |
343 | rq->special = (char *) pc; | 341 | rq->special = (char *) pc; |
344 | pc->rq = rq; | 342 | pc->rq = rq; |
diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c index 2f5a5ac1b271..301313002f6b 100644 --- a/drivers/serial/8250.c +++ b/drivers/serial/8250.c | |||
@@ -2514,7 +2514,7 @@ static int __init serial8250_console_setup(struct console *co, char *options) | |||
2514 | return uart_set_options(port, co, baud, parity, bits, flow); | 2514 | return uart_set_options(port, co, baud, parity, bits, flow); |
2515 | } | 2515 | } |
2516 | 2516 | ||
2517 | static int __init serial8250_console_early_setup(void) | 2517 | static int serial8250_console_early_setup(void) |
2518 | { | 2518 | { |
2519 | return serial8250_find_port_for_earlycon(); | 2519 | return serial8250_find_port_for_earlycon(); |
2520 | } | 2520 | } |
diff --git a/drivers/serial/8250_early.c b/drivers/serial/8250_early.c index 150cad5c2eba..4d4c9f01be8d 100644 --- a/drivers/serial/8250_early.c +++ b/drivers/serial/8250_early.c | |||
@@ -227,7 +227,7 @@ int __init setup_early_serial8250_console(char *cmdline) | |||
227 | return 0; | 227 | return 0; |
228 | } | 228 | } |
229 | 229 | ||
230 | int __init serial8250_find_port_for_earlycon(void) | 230 | int serial8250_find_port_for_earlycon(void) |
231 | { | 231 | { |
232 | struct early_serial8250_device *device = &early_device; | 232 | struct early_serial8250_device *device = &early_device; |
233 | struct uart_port *port = &device->port; | 233 | struct uart_port *port = &device->port; |
diff --git a/drivers/video/cg6.c b/drivers/video/cg6.c index 87c747123538..ee9046db9c7d 100644 --- a/drivers/video/cg6.c +++ b/drivers/video/cg6.c | |||
@@ -677,6 +677,7 @@ static int __devinit cg6_probe(struct of_device *op, const struct of_device_id * | |||
677 | struct fb_info *info; | 677 | struct fb_info *info; |
678 | struct cg6_par *par; | 678 | struct cg6_par *par; |
679 | int linebytes, err; | 679 | int linebytes, err; |
680 | int dblbuf; | ||
680 | 681 | ||
681 | info = framebuffer_alloc(sizeof(struct cg6_par), &op->dev); | 682 | info = framebuffer_alloc(sizeof(struct cg6_par), &op->dev); |
682 | 683 | ||
@@ -698,7 +699,9 @@ static int __devinit cg6_probe(struct of_device *op, const struct of_device_id * | |||
698 | linebytes = of_getintprop_default(dp, "linebytes", | 699 | linebytes = of_getintprop_default(dp, "linebytes", |
699 | info->var.xres); | 700 | info->var.xres); |
700 | par->fbsize = PAGE_ALIGN(linebytes * info->var.yres); | 701 | par->fbsize = PAGE_ALIGN(linebytes * info->var.yres); |
701 | if (of_find_property(dp, "dblbuf", NULL)) | 702 | |
703 | dblbuf = of_getintprop_default(dp, "dblbuf", 0); | ||
704 | if (dblbuf) | ||
702 | par->fbsize *= 4; | 705 | par->fbsize *= 4; |
703 | 706 | ||
704 | par->fbc = of_ioremap(&op->resource[0], CG6_FBC_OFFSET, | 707 | par->fbc = of_ioremap(&op->resource[0], CG6_FBC_OFFSET, |
diff --git a/fs/jffs2/background.c b/fs/jffs2/background.c index 143c5530caf3..504643f2e98b 100644 --- a/fs/jffs2/background.c +++ b/fs/jffs2/background.c | |||
@@ -84,7 +84,7 @@ static int jffs2_garbage_collect_thread(void *_c) | |||
84 | set_freezable(); | 84 | set_freezable(); |
85 | for (;;) { | 85 | for (;;) { |
86 | allow_signal(SIGHUP); | 86 | allow_signal(SIGHUP); |
87 | 87 | again: | |
88 | if (!jffs2_thread_should_wake(c)) { | 88 | if (!jffs2_thread_should_wake(c)) { |
89 | set_current_state (TASK_INTERRUPTIBLE); | 89 | set_current_state (TASK_INTERRUPTIBLE); |
90 | D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread sleeping...\n")); | 90 | D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread sleeping...\n")); |
@@ -95,9 +95,6 @@ static int jffs2_garbage_collect_thread(void *_c) | |||
95 | schedule(); | 95 | schedule(); |
96 | } | 96 | } |
97 | 97 | ||
98 | if (try_to_freeze()) | ||
99 | continue; | ||
100 | |||
101 | /* This thread is purely an optimisation. But if it runs when | 98 | /* This thread is purely an optimisation. But if it runs when |
102 | other things could be running, it actually makes things a | 99 | other things could be running, it actually makes things a |
103 | lot worse. Use yield() and put it at the back of the runqueue | 100 | lot worse. Use yield() and put it at the back of the runqueue |
@@ -112,6 +109,9 @@ static int jffs2_garbage_collect_thread(void *_c) | |||
112 | siginfo_t info; | 109 | siginfo_t info; |
113 | unsigned long signr; | 110 | unsigned long signr; |
114 | 111 | ||
112 | if (try_to_freeze()) | ||
113 | goto again; | ||
114 | |||
115 | signr = dequeue_signal_lock(current, ¤t->blocked, &info); | 115 | signr = dequeue_signal_lock(current, ¤t->blocked, &info); |
116 | 116 | ||
117 | switch(signr) { | 117 | switch(signr) { |
diff --git a/fs/jffs2/nodelist.h b/fs/jffs2/nodelist.h index 25126a062cae..bc5509fe577b 100644 --- a/fs/jffs2/nodelist.h +++ b/fs/jffs2/nodelist.h | |||
@@ -139,6 +139,11 @@ static inline struct jffs2_inode_cache *jffs2_raw_ref_to_ic(struct jffs2_raw_nod | |||
139 | #define ref_obsolete(ref) (((ref)->flash_offset & 3) == REF_OBSOLETE) | 139 | #define ref_obsolete(ref) (((ref)->flash_offset & 3) == REF_OBSOLETE) |
140 | #define mark_ref_normal(ref) do { (ref)->flash_offset = ref_offset(ref) | REF_NORMAL; } while(0) | 140 | #define mark_ref_normal(ref) do { (ref)->flash_offset = ref_offset(ref) | REF_NORMAL; } while(0) |
141 | 141 | ||
142 | /* Dirent nodes should be REF_PRISTINE only if they are not a deletion | ||
143 | dirent. Deletion dirents should be REF_NORMAL so that GC gets to | ||
144 | throw them away when appropriate */ | ||
145 | #define dirent_node_state(rd) ( (je32_to_cpu((rd)->ino)?REF_PRISTINE:REF_NORMAL) ) | ||
146 | |||
142 | /* NB: REF_PRISTINE for an inode-less node (ref->next_in_ino == NULL) indicates | 147 | /* NB: REF_PRISTINE for an inode-less node (ref->next_in_ino == NULL) indicates |
143 | it is an unknown node of type JFFS2_NODETYPE_RWCOMPAT_COPY, so it'll get | 148 | it is an unknown node of type JFFS2_NODETYPE_RWCOMPAT_COPY, so it'll get |
144 | copied. If you need to do anything different to GC inode-less nodes, then | 149 | copied. If you need to do anything different to GC inode-less nodes, then |
diff --git a/fs/jffs2/readinode.c b/fs/jffs2/readinode.c index 7b363786c2d2..b5baa356fed2 100644 --- a/fs/jffs2/readinode.c +++ b/fs/jffs2/readinode.c | |||
@@ -104,7 +104,7 @@ static int check_node_data(struct jffs2_sb_info *c, struct jffs2_tmp_dnode_info | |||
104 | 104 | ||
105 | if (crc != tn->data_crc) { | 105 | if (crc != tn->data_crc) { |
106 | JFFS2_NOTICE("wrong data CRC in data node at 0x%08x: read %#08x, calculated %#08x.\n", | 106 | JFFS2_NOTICE("wrong data CRC in data node at 0x%08x: read %#08x, calculated %#08x.\n", |
107 | ofs, tn->data_crc, crc); | 107 | ref_offset(ref), tn->data_crc, crc); |
108 | return 1; | 108 | return 1; |
109 | } | 109 | } |
110 | 110 | ||
@@ -613,7 +613,7 @@ static inline int read_direntry(struct jffs2_sb_info *c, struct jffs2_raw_node_r | |||
613 | jeb->unchecked_size -= len; | 613 | jeb->unchecked_size -= len; |
614 | c->used_size += len; | 614 | c->used_size += len; |
615 | c->unchecked_size -= len; | 615 | c->unchecked_size -= len; |
616 | ref->flash_offset = ref_offset(ref) | REF_PRISTINE; | 616 | ref->flash_offset = ref_offset(ref) | dirent_node_state(rd); |
617 | spin_unlock(&c->erase_completion_lock); | 617 | spin_unlock(&c->erase_completion_lock); |
618 | } | 618 | } |
619 | 619 | ||
diff --git a/fs/jffs2/scan.c b/fs/jffs2/scan.c index 2a1c976c7924..6c75cd433342 100644 --- a/fs/jffs2/scan.c +++ b/fs/jffs2/scan.c | |||
@@ -1049,7 +1049,8 @@ static int jffs2_scan_dirent_node(struct jffs2_sb_info *c, struct jffs2_eraseblo | |||
1049 | return -ENOMEM; | 1049 | return -ENOMEM; |
1050 | } | 1050 | } |
1051 | 1051 | ||
1052 | fd->raw = jffs2_link_node_ref(c, jeb, ofs | REF_PRISTINE, PAD(je32_to_cpu(rd->totlen)), ic); | 1052 | fd->raw = jffs2_link_node_ref(c, jeb, ofs | dirent_node_state(rd), |
1053 | PAD(je32_to_cpu(rd->totlen)), ic); | ||
1053 | 1054 | ||
1054 | fd->next = NULL; | 1055 | fd->next = NULL; |
1055 | fd->version = je32_to_cpu(rd->version); | 1056 | fd->version = je32_to_cpu(rd->version); |
diff --git a/fs/jffs2/write.c b/fs/jffs2/write.c index c9fe0ab3a329..bc6185933664 100644 --- a/fs/jffs2/write.c +++ b/fs/jffs2/write.c | |||
@@ -173,6 +173,12 @@ struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2 | |||
173 | flash_ofs |= REF_NORMAL; | 173 | flash_ofs |= REF_NORMAL; |
174 | } | 174 | } |
175 | fn->raw = jffs2_add_physical_node_ref(c, flash_ofs, PAD(sizeof(*ri)+datalen), f->inocache); | 175 | fn->raw = jffs2_add_physical_node_ref(c, flash_ofs, PAD(sizeof(*ri)+datalen), f->inocache); |
176 | if (IS_ERR(fn->raw)) { | ||
177 | void *hold_err = fn->raw; | ||
178 | /* Release the full_dnode which is now useless, and return */ | ||
179 | jffs2_free_full_dnode(fn); | ||
180 | return ERR_PTR(PTR_ERR(hold_err)); | ||
181 | } | ||
176 | fn->ofs = je32_to_cpu(ri->offset); | 182 | fn->ofs = je32_to_cpu(ri->offset); |
177 | fn->size = je32_to_cpu(ri->dsize); | 183 | fn->size = je32_to_cpu(ri->dsize); |
178 | fn->frags = 0; | 184 | fn->frags = 0; |
@@ -290,7 +296,14 @@ struct jffs2_full_dirent *jffs2_write_dirent(struct jffs2_sb_info *c, struct jff | |||
290 | return ERR_PTR(ret?ret:-EIO); | 296 | return ERR_PTR(ret?ret:-EIO); |
291 | } | 297 | } |
292 | /* Mark the space used */ | 298 | /* Mark the space used */ |
293 | fd->raw = jffs2_add_physical_node_ref(c, flash_ofs | REF_PRISTINE, PAD(sizeof(*rd)+namelen), f->inocache); | 299 | fd->raw = jffs2_add_physical_node_ref(c, flash_ofs | dirent_node_state(rd), |
300 | PAD(sizeof(*rd)+namelen), f->inocache); | ||
301 | if (IS_ERR(fd->raw)) { | ||
302 | void *hold_err = fd->raw; | ||
303 | /* Release the full_dirent which is now useless, and return */ | ||
304 | jffs2_free_full_dirent(fd); | ||
305 | return ERR_PTR(PTR_ERR(hold_err)); | ||
306 | } | ||
294 | 307 | ||
295 | if (retried) { | 308 | if (retried) { |
296 | jffs2_dbg_acct_sanity_check(c,NULL); | 309 | jffs2_dbg_acct_sanity_check(c,NULL); |
diff --git a/include/asm-avr32/bug.h b/include/asm-avr32/bug.h index afdcd79a2966..331d45bab18f 100644 --- a/include/asm-avr32/bug.h +++ b/include/asm-avr32/bug.h | |||
@@ -57,7 +57,7 @@ | |||
57 | 57 | ||
58 | #define WARN_ON(condition) \ | 58 | #define WARN_ON(condition) \ |
59 | ({ \ | 59 | ({ \ |
60 | typeof(condition) __ret_warn_on = (condition); \ | 60 | int __ret_warn_on = !!(condition); \ |
61 | if (unlikely(__ret_warn_on)) \ | 61 | if (unlikely(__ret_warn_on)) \ |
62 | _BUG_OR_WARN(BUGFLAG_WARNING); \ | 62 | _BUG_OR_WARN(BUGFLAG_WARNING); \ |
63 | unlikely(__ret_warn_on); \ | 63 | unlikely(__ret_warn_on); \ |
diff --git a/include/asm-frv/mb86943a.h b/include/asm-frv/mb86943a.h index b89fd0b56bb3..e87ef924bfb4 100644 --- a/include/asm-frv/mb86943a.h +++ b/include/asm-frv/mb86943a.h | |||
@@ -36,4 +36,7 @@ | |||
36 | #define __reg_MB86943_pci_sl_io_base *(volatile uint32_t *) (__region_CS1 + 0x70) | 36 | #define __reg_MB86943_pci_sl_io_base *(volatile uint32_t *) (__region_CS1 + 0x70) |
37 | #define __reg_MB86943_pci_sl_mem_base *(volatile uint32_t *) (__region_CS1 + 0x78) | 37 | #define __reg_MB86943_pci_sl_mem_base *(volatile uint32_t *) (__region_CS1 + 0x78) |
38 | 38 | ||
39 | #define __reg_MB86943_pci_arbiter *(volatile uint32_t *) (__region_CS2 + 0x01300014) | ||
40 | #define MB86943_PCIARB_EN 0x00000001 | ||
41 | |||
39 | #endif /* _ASM_MB86943A_H */ | 42 | #endif /* _ASM_MB86943A_H */ |
diff --git a/include/asm-parisc/bug.h b/include/asm-parisc/bug.h index 83ba510ed5d8..8cfc553fc837 100644 --- a/include/asm-parisc/bug.h +++ b/include/asm-parisc/bug.h | |||
@@ -74,7 +74,7 @@ | |||
74 | 74 | ||
75 | 75 | ||
76 | #define WARN_ON(x) ({ \ | 76 | #define WARN_ON(x) ({ \ |
77 | typeof(x) __ret_warn_on = (x); \ | 77 | int __ret_warn_on = !!(x); \ |
78 | if (__builtin_constant_p(__ret_warn_on)) { \ | 78 | if (__builtin_constant_p(__ret_warn_on)) { \ |
79 | if (__ret_warn_on) \ | 79 | if (__ret_warn_on) \ |
80 | __WARN(); \ | 80 | __WARN(); \ |
diff --git a/include/asm-s390/bug.h b/include/asm-s390/bug.h index 838684dc6d35..384e3621e341 100644 --- a/include/asm-s390/bug.h +++ b/include/asm-s390/bug.h | |||
@@ -50,7 +50,7 @@ | |||
50 | #define BUG() __EMIT_BUG(0) | 50 | #define BUG() __EMIT_BUG(0) |
51 | 51 | ||
52 | #define WARN_ON(x) ({ \ | 52 | #define WARN_ON(x) ({ \ |
53 | typeof(x) __ret_warn_on = (x); \ | 53 | int __ret_warn_on = !!(x); \ |
54 | if (__builtin_constant_p(__ret_warn_on)) { \ | 54 | if (__builtin_constant_p(__ret_warn_on)) { \ |
55 | if (__ret_warn_on) \ | 55 | if (__ret_warn_on) \ |
56 | __EMIT_BUG(BUGFLAG_WARNING); \ | 56 | __EMIT_BUG(BUGFLAG_WARNING); \ |
diff --git a/include/asm-sh/bug.h b/include/asm-sh/bug.h index 46f925c815ac..a78d482e8b2f 100644 --- a/include/asm-sh/bug.h +++ b/include/asm-sh/bug.h | |||
@@ -61,7 +61,7 @@ do { \ | |||
61 | } while (0) | 61 | } while (0) |
62 | 62 | ||
63 | #define WARN_ON(x) ({ \ | 63 | #define WARN_ON(x) ({ \ |
64 | typeof(x) __ret_warn_on = (x); \ | 64 | int __ret_warn_on = !!(x); \ |
65 | if (__builtin_constant_p(__ret_warn_on)) { \ | 65 | if (__builtin_constant_p(__ret_warn_on)) { \ |
66 | if (__ret_warn_on) \ | 66 | if (__ret_warn_on) \ |
67 | __WARN(); \ | 67 | __WARN(); \ |
diff --git a/include/asm-sparc/fcntl.h b/include/asm-sparc/fcntl.h index 5db60b5ae7b0..7bbdfc77accd 100644 --- a/include/asm-sparc/fcntl.h +++ b/include/asm-sparc/fcntl.h | |||
@@ -16,6 +16,7 @@ | |||
16 | #define O_LARGEFILE 0x40000 | 16 | #define O_LARGEFILE 0x40000 |
17 | #define O_DIRECT 0x100000 /* direct disk access hint */ | 17 | #define O_DIRECT 0x100000 /* direct disk access hint */ |
18 | #define O_NOATIME 0x200000 | 18 | #define O_NOATIME 0x200000 |
19 | #define O_CLOEXEC 0x400000 | ||
19 | 20 | ||
20 | #define F_GETOWN 5 /* for sockets. */ | 21 | #define F_GETOWN 5 /* for sockets. */ |
21 | #define F_SETOWN 6 /* for sockets. */ | 22 | #define F_SETOWN 6 /* for sockets. */ |
@@ -31,6 +32,5 @@ | |||
31 | #define __ARCH_FLOCK_PAD short __unused; | 32 | #define __ARCH_FLOCK_PAD short __unused; |
32 | #define __ARCH_FLOCK64_PAD short __unused; | 33 | #define __ARCH_FLOCK64_PAD short __unused; |
33 | 34 | ||
34 | #include <asm-generic/fcntl.h> | ||
35 | 35 | ||
36 | #endif | 36 | #endif |
diff --git a/include/asm-sparc/floppy.h b/include/asm-sparc/floppy.h index 28ce2b9c3da8..acd06d8ff70a 100644 --- a/include/asm-sparc/floppy.h +++ b/include/asm-sparc/floppy.h | |||
@@ -48,7 +48,7 @@ struct sun_flpy_controller { | |||
48 | 48 | ||
49 | /* You'll only ever find one controller on a SparcStation anyways. */ | 49 | /* You'll only ever find one controller on a SparcStation anyways. */ |
50 | static struct sun_flpy_controller *sun_fdc = NULL; | 50 | static struct sun_flpy_controller *sun_fdc = NULL; |
51 | volatile unsigned char *fdc_status; | 51 | extern volatile unsigned char *fdc_status; |
52 | 52 | ||
53 | struct sun_floppy_ops { | 53 | struct sun_floppy_ops { |
54 | unsigned char (*fd_inb)(int port); | 54 | unsigned char (*fd_inb)(int port); |
@@ -225,13 +225,13 @@ static void sun_82077_fd_outb(unsigned char value, int port) | |||
225 | * underruns. If non-zero, doing_pdma encodes the direction of | 225 | * underruns. If non-zero, doing_pdma encodes the direction of |
226 | * the transfer for debugging. 1=read 2=write | 226 | * the transfer for debugging. 1=read 2=write |
227 | */ | 227 | */ |
228 | char *pdma_vaddr; | 228 | extern char *pdma_vaddr; |
229 | unsigned long pdma_size; | 229 | extern unsigned long pdma_size; |
230 | volatile int doing_pdma = 0; | 230 | extern volatile int doing_pdma; |
231 | 231 | ||
232 | /* This is software state */ | 232 | /* This is software state */ |
233 | char *pdma_base = NULL; | 233 | extern char *pdma_base; |
234 | unsigned long pdma_areasize; | 234 | extern unsigned long pdma_areasize; |
235 | 235 | ||
236 | /* Common routines to all controller types on the Sparc. */ | 236 | /* Common routines to all controller types on the Sparc. */ |
237 | static __inline__ void virtual_dma_init(void) | 237 | static __inline__ void virtual_dma_init(void) |
@@ -281,7 +281,8 @@ static __inline__ void sun_fd_enable_dma(void) | |||
281 | } | 281 | } |
282 | 282 | ||
283 | /* Our low-level entry point in arch/sparc/kernel/entry.S */ | 283 | /* Our low-level entry point in arch/sparc/kernel/entry.S */ |
284 | irqreturn_t floppy_hardint(int irq, void *unused); | 284 | extern int sparc_floppy_request_irq(int irq, unsigned long flags, |
285 | irqreturn_t (*irq_handler)(int irq, void *)); | ||
285 | 286 | ||
286 | static int sun_fd_request_irq(void) | 287 | static int sun_fd_request_irq(void) |
287 | { | 288 | { |
@@ -290,8 +291,9 @@ static int sun_fd_request_irq(void) | |||
290 | 291 | ||
291 | if(!once) { | 292 | if(!once) { |
292 | once = 1; | 293 | once = 1; |
293 | error = request_fast_irq(FLOPPY_IRQ, floppy_hardint, | 294 | error = sparc_floppy_request_irq(FLOPPY_IRQ, |
294 | IRQF_DISABLED, "floppy"); | 295 | IRQF_DISABLED, |
296 | floppy_interrupt); | ||
295 | return ((error == 0) ? 0 : -1); | 297 | return ((error == 0) ? 0 : -1); |
296 | } else return 0; | 298 | } else return 0; |
297 | } | 299 | } |
diff --git a/include/asm-sparc/irq.h b/include/asm-sparc/irq.h index 61fb99643afd..fe205cc444b8 100644 --- a/include/asm-sparc/irq.h +++ b/include/asm-sparc/irq.h | |||
@@ -1,7 +1,6 @@ | |||
1 | /* $Id: irq.h,v 1.32 2000/08/26 02:42:28 anton Exp $ | 1 | /* irq.h: IRQ registers on the Sparc. |
2 | * irq.h: IRQ registers on the Sparc. | ||
3 | * | 2 | * |
4 | * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) | 3 | * Copyright (C) 1995, 2007 David S. Miller (davem@davemloft.net) |
5 | */ | 4 | */ |
6 | 5 | ||
7 | #ifndef _SPARC_IRQ_H | 6 | #ifndef _SPARC_IRQ_H |
@@ -13,6 +12,4 @@ | |||
13 | 12 | ||
14 | #define irq_canonicalize(irq) (irq) | 13 | #define irq_canonicalize(irq) (irq) |
15 | 14 | ||
16 | extern int request_fast_irq(unsigned int irq, irq_handler_t handler, unsigned long flags, __const__ char *devname); | ||
17 | |||
18 | #endif | 15 | #endif |
diff --git a/include/asm-sparc64/dma-mapping.h b/include/asm-sparc64/dma-mapping.h index a72a5f271f31..1fc655452b81 100644 --- a/include/asm-sparc64/dma-mapping.h +++ b/include/asm-sparc64/dma-mapping.h | |||
@@ -108,6 +108,25 @@ static inline void dma_sync_single_for_device(struct device *dev, | |||
108 | dma_ops->sync_single_for_device(dev, dma_handle, size, direction); | 108 | dma_ops->sync_single_for_device(dev, dma_handle, size, direction); |
109 | } | 109 | } |
110 | 110 | ||
111 | static inline void dma_sync_single_range_for_cpu(struct device *dev, | ||
112 | dma_addr_t dma_handle, | ||
113 | unsigned long offset, | ||
114 | size_t size, | ||
115 | enum dma_data_direction direction) | ||
116 | { | ||
117 | dma_sync_single_for_cpu(dev, dma_handle+offset, size, direction); | ||
118 | } | ||
119 | |||
120 | static inline void dma_sync_single_range_for_device(struct device *dev, | ||
121 | dma_addr_t dma_handle, | ||
122 | unsigned long offset, | ||
123 | size_t size, | ||
124 | enum dma_data_direction direction) | ||
125 | { | ||
126 | dma_sync_single_for_device(dev, dma_handle+offset, size, direction); | ||
127 | } | ||
128 | |||
129 | |||
111 | static inline void dma_sync_sg_for_cpu(struct device *dev, | 130 | static inline void dma_sync_sg_for_cpu(struct device *dev, |
112 | struct scatterlist *sg, int nelems, | 131 | struct scatterlist *sg, int nelems, |
113 | enum dma_data_direction direction) | 132 | enum dma_data_direction direction) |
diff --git a/include/asm-sparc64/fcntl.h b/include/asm-sparc64/fcntl.h index b2aecf0054bd..111f6b3b8925 100644 --- a/include/asm-sparc64/fcntl.h +++ b/include/asm-sparc64/fcntl.h | |||
@@ -16,7 +16,7 @@ | |||
16 | #define O_LARGEFILE 0x40000 | 16 | #define O_LARGEFILE 0x40000 |
17 | #define O_DIRECT 0x100000 /* direct disk access hint */ | 17 | #define O_DIRECT 0x100000 /* direct disk access hint */ |
18 | #define O_NOATIME 0x200000 | 18 | #define O_NOATIME 0x200000 |
19 | 19 | #define O_CLOEXEC 0x400000 | |
20 | 20 | ||
21 | #define F_GETOWN 5 /* for sockets. */ | 21 | #define F_GETOWN 5 /* for sockets. */ |
22 | #define F_SETOWN 6 /* for sockets. */ | 22 | #define F_SETOWN 6 /* for sockets. */ |
diff --git a/include/linux/pci.h b/include/linux/pci.h index d8f8a3a96644..e7d8d4e19a53 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h | |||
@@ -534,7 +534,7 @@ static inline int pci_write_config_dword(struct pci_dev *dev, int where, u32 val | |||
534 | 534 | ||
535 | int __must_check pci_enable_device(struct pci_dev *dev); | 535 | int __must_check pci_enable_device(struct pci_dev *dev); |
536 | int __must_check pci_enable_device_bars(struct pci_dev *dev, int mask); | 536 | int __must_check pci_enable_device_bars(struct pci_dev *dev, int mask); |
537 | int __must_check __pci_reenable_device(struct pci_dev *); | 537 | int __must_check pci_reenable_device(struct pci_dev *); |
538 | int __must_check pcim_enable_device(struct pci_dev *pdev); | 538 | int __must_check pcim_enable_device(struct pci_dev *pdev); |
539 | void pcim_pin_device(struct pci_dev *pdev); | 539 | void pcim_pin_device(struct pci_dev *pdev); |
540 | 540 | ||
diff --git a/include/linux/sched.h b/include/linux/sched.h index 2e490271acf6..17249fae5014 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -734,7 +734,6 @@ struct sched_domain { | |||
734 | unsigned long max_interval; /* Maximum balance interval ms */ | 734 | unsigned long max_interval; /* Maximum balance interval ms */ |
735 | unsigned int busy_factor; /* less balancing by factor if busy */ | 735 | unsigned int busy_factor; /* less balancing by factor if busy */ |
736 | unsigned int imbalance_pct; /* No balance until over watermark */ | 736 | unsigned int imbalance_pct; /* No balance until over watermark */ |
737 | unsigned long long cache_hot_time; /* Task considered cache hot (ns) */ | ||
738 | unsigned int cache_nice_tries; /* Leave cache hot tasks for # tries */ | 737 | unsigned int cache_nice_tries; /* Leave cache hot tasks for # tries */ |
739 | unsigned int busy_idx; | 738 | unsigned int busy_idx; |
740 | unsigned int idle_idx; | 739 | unsigned int idle_idx; |
@@ -875,7 +874,7 @@ struct sched_class { | |||
875 | 874 | ||
876 | void (*set_curr_task) (struct rq *rq); | 875 | void (*set_curr_task) (struct rq *rq); |
877 | void (*task_tick) (struct rq *rq, struct task_struct *p); | 876 | void (*task_tick) (struct rq *rq, struct task_struct *p); |
878 | void (*task_new) (struct rq *rq, struct task_struct *p); | 877 | void (*task_new) (struct rq *rq, struct task_struct *p, u64 now); |
879 | }; | 878 | }; |
880 | 879 | ||
881 | struct load_weight { | 880 | struct load_weight { |
@@ -905,23 +904,28 @@ struct sched_entity { | |||
905 | struct rb_node run_node; | 904 | struct rb_node run_node; |
906 | unsigned int on_rq; | 905 | unsigned int on_rq; |
907 | 906 | ||
907 | u64 exec_start; | ||
908 | u64 sum_exec_runtime; | ||
908 | u64 wait_start_fair; | 909 | u64 wait_start_fair; |
910 | u64 sleep_start_fair; | ||
911 | |||
912 | #ifdef CONFIG_SCHEDSTATS | ||
909 | u64 wait_start; | 913 | u64 wait_start; |
910 | u64 exec_start; | 914 | u64 wait_max; |
915 | s64 sum_wait_runtime; | ||
916 | |||
911 | u64 sleep_start; | 917 | u64 sleep_start; |
912 | u64 sleep_start_fair; | ||
913 | u64 block_start; | ||
914 | u64 sleep_max; | 918 | u64 sleep_max; |
919 | s64 sum_sleep_runtime; | ||
920 | |||
921 | u64 block_start; | ||
915 | u64 block_max; | 922 | u64 block_max; |
916 | u64 exec_max; | 923 | u64 exec_max; |
917 | u64 wait_max; | ||
918 | u64 last_ran; | ||
919 | 924 | ||
920 | u64 sum_exec_runtime; | ||
921 | s64 sum_wait_runtime; | ||
922 | s64 sum_sleep_runtime; | ||
923 | unsigned long wait_runtime_overruns; | 925 | unsigned long wait_runtime_overruns; |
924 | unsigned long wait_runtime_underruns; | 926 | unsigned long wait_runtime_underruns; |
927 | #endif | ||
928 | |||
925 | #ifdef CONFIG_FAIR_GROUP_SCHED | 929 | #ifdef CONFIG_FAIR_GROUP_SCHED |
926 | struct sched_entity *parent; | 930 | struct sched_entity *parent; |
927 | /* rq on which this entity is (to be) queued: */ | 931 | /* rq on which this entity is (to be) queued: */ |
diff --git a/include/linux/topology.h b/include/linux/topology.h index d0890a7e5bab..525d437b1253 100644 --- a/include/linux/topology.h +++ b/include/linux/topology.h | |||
@@ -185,7 +185,6 @@ | |||
185 | .max_interval = 64*num_online_cpus(), \ | 185 | .max_interval = 64*num_online_cpus(), \ |
186 | .busy_factor = 128, \ | 186 | .busy_factor = 128, \ |
187 | .imbalance_pct = 133, \ | 187 | .imbalance_pct = 133, \ |
188 | .cache_hot_time = (10*1000000), \ | ||
189 | .cache_nice_tries = 1, \ | 188 | .cache_nice_tries = 1, \ |
190 | .busy_idx = 3, \ | 189 | .busy_idx = 3, \ |
191 | .idle_idx = 3, \ | 190 | .idle_idx = 3, \ |
diff --git a/include/net/netlabel.h b/include/net/netlabel.h index ffbc7f28335a..2e5b2f6f9fa0 100644 --- a/include/net/netlabel.h +++ b/include/net/netlabel.h | |||
@@ -132,6 +132,8 @@ struct netlbl_lsm_secattr_catmap { | |||
132 | #define NETLBL_SECATTR_CACHE 0x00000002 | 132 | #define NETLBL_SECATTR_CACHE 0x00000002 |
133 | #define NETLBL_SECATTR_MLS_LVL 0x00000004 | 133 | #define NETLBL_SECATTR_MLS_LVL 0x00000004 |
134 | #define NETLBL_SECATTR_MLS_CAT 0x00000008 | 134 | #define NETLBL_SECATTR_MLS_CAT 0x00000008 |
135 | #define NETLBL_SECATTR_CACHEABLE (NETLBL_SECATTR_MLS_LVL | \ | ||
136 | NETLBL_SECATTR_MLS_CAT) | ||
135 | struct netlbl_lsm_secattr { | 137 | struct netlbl_lsm_secattr { |
136 | u32 flags; | 138 | u32 flags; |
137 | 139 | ||
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h index 16baef4dab7e..d529045c1679 100644 --- a/include/net/sctp/sctp.h +++ b/include/net/sctp/sctp.h | |||
@@ -190,6 +190,16 @@ void sctp_assocs_proc_exit(void); | |||
190 | 190 | ||
191 | 191 | ||
192 | /* | 192 | /* |
193 | * Module global variables | ||
194 | */ | ||
195 | |||
196 | /* | ||
197 | * sctp/protocol.c | ||
198 | */ | ||
199 | extern struct kmem_cache *sctp_chunk_cachep __read_mostly; | ||
200 | extern struct kmem_cache *sctp_bucket_cachep __read_mostly; | ||
201 | |||
202 | /* | ||
193 | * Section: Macros, externs, and inlines | 203 | * Section: Macros, externs, and inlines |
194 | */ | 204 | */ |
195 | 205 | ||
diff --git a/include/net/tcp.h b/include/net/tcp.h index c209361ab74a..185c7ecce4cc 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h | |||
@@ -281,7 +281,7 @@ extern int tcp_v4_remember_stamp(struct sock *sk); | |||
281 | 281 | ||
282 | extern int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw); | 282 | extern int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw); |
283 | 283 | ||
284 | extern int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, | 284 | extern int tcp_sendmsg(struct kiocb *iocb, struct socket *sock, |
285 | struct msghdr *msg, size_t size); | 285 | struct msghdr *msg, size_t size); |
286 | extern ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags); | 286 | extern ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags); |
287 | 287 | ||
diff --git a/kernel/exit.c b/kernel/exit.c index 464c2b172f07..9578c1ae19ca 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
@@ -813,7 +813,7 @@ static void exit_notify(struct task_struct *tsk) | |||
813 | __kill_pgrp_info(SIGCONT, SEND_SIG_PRIV, pgrp); | 813 | __kill_pgrp_info(SIGCONT, SEND_SIG_PRIV, pgrp); |
814 | } | 814 | } |
815 | 815 | ||
816 | /* Let father know we died | 816 | /* Let father know we died |
817 | * | 817 | * |
818 | * Thread signals are configurable, but you aren't going to use | 818 | * Thread signals are configurable, but you aren't going to use |
819 | * that to send signals to arbitary processes. | 819 | * that to send signals to arbitary processes. |
@@ -826,9 +826,7 @@ static void exit_notify(struct task_struct *tsk) | |||
826 | * If our self_exec id doesn't match our parent_exec_id then | 826 | * If our self_exec id doesn't match our parent_exec_id then |
827 | * we have changed execution domain as these two values started | 827 | * we have changed execution domain as these two values started |
828 | * the same after a fork. | 828 | * the same after a fork. |
829 | * | ||
830 | */ | 829 | */ |
831 | |||
832 | if (tsk->exit_signal != SIGCHLD && tsk->exit_signal != -1 && | 830 | if (tsk->exit_signal != SIGCHLD && tsk->exit_signal != -1 && |
833 | ( tsk->parent_exec_id != t->self_exec_id || | 831 | ( tsk->parent_exec_id != t->self_exec_id || |
834 | tsk->self_exec_id != tsk->parent_exec_id) | 832 | tsk->self_exec_id != tsk->parent_exec_id) |
@@ -848,9 +846,7 @@ static void exit_notify(struct task_struct *tsk) | |||
848 | } | 846 | } |
849 | 847 | ||
850 | state = EXIT_ZOMBIE; | 848 | state = EXIT_ZOMBIE; |
851 | if (tsk->exit_signal == -1 && | 849 | if (tsk->exit_signal == -1 && likely(!tsk->ptrace)) |
852 | (likely(tsk->ptrace == 0) || | ||
853 | unlikely(tsk->parent->signal->flags & SIGNAL_GROUP_EXIT))) | ||
854 | state = EXIT_DEAD; | 850 | state = EXIT_DEAD; |
855 | tsk->exit_state = state; | 851 | tsk->exit_state = state; |
856 | 852 | ||
diff --git a/kernel/irq/resend.c b/kernel/irq/resend.c index 5bfeaed7e487..c38272746887 100644 --- a/kernel/irq/resend.c +++ b/kernel/irq/resend.c | |||
@@ -62,6 +62,15 @@ void check_irq_resend(struct irq_desc *desc, unsigned int irq) | |||
62 | */ | 62 | */ |
63 | desc->chip->enable(irq); | 63 | desc->chip->enable(irq); |
64 | 64 | ||
65 | /* | ||
66 | * Temporary hack to figure out more about the problem, which | ||
67 | * is causing the ancient network cards to die. | ||
68 | */ | ||
69 | if (desc->handle_irq != handle_edge_irq) { | ||
70 | WARN_ON_ONCE(1); | ||
71 | return; | ||
72 | } | ||
73 | |||
65 | if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) { | 74 | if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) { |
66 | desc->status = (status & ~IRQ_PENDING) | IRQ_REPLAY; | 75 | desc->status = (status & ~IRQ_PENDING) | IRQ_REPLAY; |
67 | 76 | ||
diff --git a/kernel/printk.c b/kernel/printk.c index 051d27e36a6c..bd2cd062878d 100644 --- a/kernel/printk.c +++ b/kernel/printk.c | |||
@@ -732,7 +732,7 @@ int __init add_preferred_console(char *name, int idx, char *options) | |||
732 | return 0; | 732 | return 0; |
733 | } | 733 | } |
734 | 734 | ||
735 | int __init update_console_cmdline(char *name, int idx, char *name_new, int idx_new, char *options) | 735 | int update_console_cmdline(char *name, int idx, char *name_new, int idx_new, char *options) |
736 | { | 736 | { |
737 | struct console_cmdline *c; | 737 | struct console_cmdline *c; |
738 | int i; | 738 | int i; |
diff --git a/kernel/sched.c b/kernel/sched.c index 238a76957e86..72bb9483d949 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -637,7 +637,7 @@ static u64 div64_likely32(u64 divident, unsigned long divisor) | |||
637 | 637 | ||
638 | #define WMULT_SHIFT 32 | 638 | #define WMULT_SHIFT 32 |
639 | 639 | ||
640 | static inline unsigned long | 640 | static unsigned long |
641 | calc_delta_mine(unsigned long delta_exec, unsigned long weight, | 641 | calc_delta_mine(unsigned long delta_exec, unsigned long weight, |
642 | struct load_weight *lw) | 642 | struct load_weight *lw) |
643 | { | 643 | { |
@@ -657,7 +657,7 @@ calc_delta_mine(unsigned long delta_exec, unsigned long weight, | |||
657 | tmp = (tmp * lw->inv_weight) >> WMULT_SHIFT; | 657 | tmp = (tmp * lw->inv_weight) >> WMULT_SHIFT; |
658 | } | 658 | } |
659 | 659 | ||
660 | return (unsigned long)min(tmp, (u64)sysctl_sched_runtime_limit); | 660 | return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX); |
661 | } | 661 | } |
662 | 662 | ||
663 | static inline unsigned long | 663 | static inline unsigned long |
@@ -678,46 +678,6 @@ static void update_load_sub(struct load_weight *lw, unsigned long dec) | |||
678 | lw->inv_weight = 0; | 678 | lw->inv_weight = 0; |
679 | } | 679 | } |
680 | 680 | ||
681 | static void __update_curr_load(struct rq *rq, struct load_stat *ls) | ||
682 | { | ||
683 | if (rq->curr != rq->idle && ls->load.weight) { | ||
684 | ls->delta_exec += ls->delta_stat; | ||
685 | ls->delta_fair += calc_delta_fair(ls->delta_stat, &ls->load); | ||
686 | ls->delta_stat = 0; | ||
687 | } | ||
688 | } | ||
689 | |||
690 | /* | ||
691 | * Update delta_exec, delta_fair fields for rq. | ||
692 | * | ||
693 | * delta_fair clock advances at a rate inversely proportional to | ||
694 | * total load (rq->ls.load.weight) on the runqueue, while | ||
695 | * delta_exec advances at the same rate as wall-clock (provided | ||
696 | * cpu is not idle). | ||
697 | * | ||
698 | * delta_exec / delta_fair is a measure of the (smoothened) load on this | ||
699 | * runqueue over any given interval. This (smoothened) load is used | ||
700 | * during load balance. | ||
701 | * | ||
702 | * This function is called /before/ updating rq->ls.load | ||
703 | * and when switching tasks. | ||
704 | */ | ||
705 | static void update_curr_load(struct rq *rq, u64 now) | ||
706 | { | ||
707 | struct load_stat *ls = &rq->ls; | ||
708 | u64 start; | ||
709 | |||
710 | start = ls->load_update_start; | ||
711 | ls->load_update_start = now; | ||
712 | ls->delta_stat += now - start; | ||
713 | /* | ||
714 | * Stagger updates to ls->delta_fair. Very frequent updates | ||
715 | * can be expensive. | ||
716 | */ | ||
717 | if (ls->delta_stat >= sysctl_sched_stat_granularity) | ||
718 | __update_curr_load(rq, ls); | ||
719 | } | ||
720 | |||
721 | /* | 681 | /* |
722 | * To aid in avoiding the subversion of "niceness" due to uneven distribution | 682 | * To aid in avoiding the subversion of "niceness" due to uneven distribution |
723 | * of tasks with abnormal "nice" values across CPUs the contribution that | 683 | * of tasks with abnormal "nice" values across CPUs the contribution that |
@@ -727,19 +687,6 @@ static void update_curr_load(struct rq *rq, u64 now) | |||
727 | * slice expiry etc. | 687 | * slice expiry etc. |
728 | */ | 688 | */ |
729 | 689 | ||
730 | /* | ||
731 | * Assume: static_prio_timeslice(NICE_TO_PRIO(0)) == DEF_TIMESLICE | ||
732 | * If static_prio_timeslice() is ever changed to break this assumption then | ||
733 | * this code will need modification | ||
734 | */ | ||
735 | #define TIME_SLICE_NICE_ZERO DEF_TIMESLICE | ||
736 | #define load_weight(lp) \ | ||
737 | (((lp) * SCHED_LOAD_SCALE) / TIME_SLICE_NICE_ZERO) | ||
738 | #define PRIO_TO_LOAD_WEIGHT(prio) \ | ||
739 | load_weight(static_prio_timeslice(prio)) | ||
740 | #define RTPRIO_TO_LOAD_WEIGHT(rp) \ | ||
741 | (PRIO_TO_LOAD_WEIGHT(MAX_RT_PRIO) + load_weight(rp)) | ||
742 | |||
743 | #define WEIGHT_IDLEPRIO 2 | 690 | #define WEIGHT_IDLEPRIO 2 |
744 | #define WMULT_IDLEPRIO (1 << 31) | 691 | #define WMULT_IDLEPRIO (1 << 31) |
745 | 692 | ||
@@ -781,32 +728,6 @@ static const u32 prio_to_wmult[40] = { | |||
781 | /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153, | 728 | /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153, |
782 | }; | 729 | }; |
783 | 730 | ||
784 | static inline void | ||
785 | inc_load(struct rq *rq, const struct task_struct *p, u64 now) | ||
786 | { | ||
787 | update_curr_load(rq, now); | ||
788 | update_load_add(&rq->ls.load, p->se.load.weight); | ||
789 | } | ||
790 | |||
791 | static inline void | ||
792 | dec_load(struct rq *rq, const struct task_struct *p, u64 now) | ||
793 | { | ||
794 | update_curr_load(rq, now); | ||
795 | update_load_sub(&rq->ls.load, p->se.load.weight); | ||
796 | } | ||
797 | |||
798 | static inline void inc_nr_running(struct task_struct *p, struct rq *rq, u64 now) | ||
799 | { | ||
800 | rq->nr_running++; | ||
801 | inc_load(rq, p, now); | ||
802 | } | ||
803 | |||
804 | static inline void dec_nr_running(struct task_struct *p, struct rq *rq, u64 now) | ||
805 | { | ||
806 | rq->nr_running--; | ||
807 | dec_load(rq, p, now); | ||
808 | } | ||
809 | |||
810 | static void activate_task(struct rq *rq, struct task_struct *p, int wakeup); | 731 | static void activate_task(struct rq *rq, struct task_struct *p, int wakeup); |
811 | 732 | ||
812 | /* | 733 | /* |
@@ -837,6 +758,72 @@ static int balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest, | |||
837 | 758 | ||
838 | #define sched_class_highest (&rt_sched_class) | 759 | #define sched_class_highest (&rt_sched_class) |
839 | 760 | ||
761 | static void __update_curr_load(struct rq *rq, struct load_stat *ls) | ||
762 | { | ||
763 | if (rq->curr != rq->idle && ls->load.weight) { | ||
764 | ls->delta_exec += ls->delta_stat; | ||
765 | ls->delta_fair += calc_delta_fair(ls->delta_stat, &ls->load); | ||
766 | ls->delta_stat = 0; | ||
767 | } | ||
768 | } | ||
769 | |||
770 | /* | ||
771 | * Update delta_exec, delta_fair fields for rq. | ||
772 | * | ||
773 | * delta_fair clock advances at a rate inversely proportional to | ||
774 | * total load (rq->ls.load.weight) on the runqueue, while | ||
775 | * delta_exec advances at the same rate as wall-clock (provided | ||
776 | * cpu is not idle). | ||
777 | * | ||
778 | * delta_exec / delta_fair is a measure of the (smoothened) load on this | ||
779 | * runqueue over any given interval. This (smoothened) load is used | ||
780 | * during load balance. | ||
781 | * | ||
782 | * This function is called /before/ updating rq->ls.load | ||
783 | * and when switching tasks. | ||
784 | */ | ||
785 | static void update_curr_load(struct rq *rq, u64 now) | ||
786 | { | ||
787 | struct load_stat *ls = &rq->ls; | ||
788 | u64 start; | ||
789 | |||
790 | start = ls->load_update_start; | ||
791 | ls->load_update_start = now; | ||
792 | ls->delta_stat += now - start; | ||
793 | /* | ||
794 | * Stagger updates to ls->delta_fair. Very frequent updates | ||
795 | * can be expensive. | ||
796 | */ | ||
797 | if (ls->delta_stat >= sysctl_sched_stat_granularity) | ||
798 | __update_curr_load(rq, ls); | ||
799 | } | ||
800 | |||
801 | static inline void | ||
802 | inc_load(struct rq *rq, const struct task_struct *p, u64 now) | ||
803 | { | ||
804 | update_curr_load(rq, now); | ||
805 | update_load_add(&rq->ls.load, p->se.load.weight); | ||
806 | } | ||
807 | |||
808 | static inline void | ||
809 | dec_load(struct rq *rq, const struct task_struct *p, u64 now) | ||
810 | { | ||
811 | update_curr_load(rq, now); | ||
812 | update_load_sub(&rq->ls.load, p->se.load.weight); | ||
813 | } | ||
814 | |||
815 | static void inc_nr_running(struct task_struct *p, struct rq *rq, u64 now) | ||
816 | { | ||
817 | rq->nr_running++; | ||
818 | inc_load(rq, p, now); | ||
819 | } | ||
820 | |||
821 | static void dec_nr_running(struct task_struct *p, struct rq *rq, u64 now) | ||
822 | { | ||
823 | rq->nr_running--; | ||
824 | dec_load(rq, p, now); | ||
825 | } | ||
826 | |||
840 | static void set_load_weight(struct task_struct *p) | 827 | static void set_load_weight(struct task_struct *p) |
841 | { | 828 | { |
842 | task_rq(p)->cfs.wait_runtime -= p->se.wait_runtime; | 829 | task_rq(p)->cfs.wait_runtime -= p->se.wait_runtime; |
@@ -996,18 +983,21 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu) | |||
996 | u64 clock_offset, fair_clock_offset; | 983 | u64 clock_offset, fair_clock_offset; |
997 | 984 | ||
998 | clock_offset = old_rq->clock - new_rq->clock; | 985 | clock_offset = old_rq->clock - new_rq->clock; |
999 | fair_clock_offset = old_rq->cfs.fair_clock - | 986 | fair_clock_offset = old_rq->cfs.fair_clock - new_rq->cfs.fair_clock; |
1000 | new_rq->cfs.fair_clock; | 987 | |
1001 | if (p->se.wait_start) | ||
1002 | p->se.wait_start -= clock_offset; | ||
1003 | if (p->se.wait_start_fair) | 988 | if (p->se.wait_start_fair) |
1004 | p->se.wait_start_fair -= fair_clock_offset; | 989 | p->se.wait_start_fair -= fair_clock_offset; |
990 | if (p->se.sleep_start_fair) | ||
991 | p->se.sleep_start_fair -= fair_clock_offset; | ||
992 | |||
993 | #ifdef CONFIG_SCHEDSTATS | ||
994 | if (p->se.wait_start) | ||
995 | p->se.wait_start -= clock_offset; | ||
1005 | if (p->se.sleep_start) | 996 | if (p->se.sleep_start) |
1006 | p->se.sleep_start -= clock_offset; | 997 | p->se.sleep_start -= clock_offset; |
1007 | if (p->se.block_start) | 998 | if (p->se.block_start) |
1008 | p->se.block_start -= clock_offset; | 999 | p->se.block_start -= clock_offset; |
1009 | if (p->se.sleep_start_fair) | 1000 | #endif |
1010 | p->se.sleep_start_fair -= fair_clock_offset; | ||
1011 | 1001 | ||
1012 | __set_task_cpu(p, new_cpu); | 1002 | __set_task_cpu(p, new_cpu); |
1013 | } | 1003 | } |
@@ -1568,17 +1558,19 @@ int fastcall wake_up_state(struct task_struct *p, unsigned int state) | |||
1568 | static void __sched_fork(struct task_struct *p) | 1558 | static void __sched_fork(struct task_struct *p) |
1569 | { | 1559 | { |
1570 | p->se.wait_start_fair = 0; | 1560 | p->se.wait_start_fair = 0; |
1571 | p->se.wait_start = 0; | ||
1572 | p->se.exec_start = 0; | 1561 | p->se.exec_start = 0; |
1573 | p->se.sum_exec_runtime = 0; | 1562 | p->se.sum_exec_runtime = 0; |
1574 | p->se.delta_exec = 0; | 1563 | p->se.delta_exec = 0; |
1575 | p->se.delta_fair_run = 0; | 1564 | p->se.delta_fair_run = 0; |
1576 | p->se.delta_fair_sleep = 0; | 1565 | p->se.delta_fair_sleep = 0; |
1577 | p->se.wait_runtime = 0; | 1566 | p->se.wait_runtime = 0; |
1567 | p->se.sleep_start_fair = 0; | ||
1568 | |||
1569 | #ifdef CONFIG_SCHEDSTATS | ||
1570 | p->se.wait_start = 0; | ||
1578 | p->se.sum_wait_runtime = 0; | 1571 | p->se.sum_wait_runtime = 0; |
1579 | p->se.sum_sleep_runtime = 0; | 1572 | p->se.sum_sleep_runtime = 0; |
1580 | p->se.sleep_start = 0; | 1573 | p->se.sleep_start = 0; |
1581 | p->se.sleep_start_fair = 0; | ||
1582 | p->se.block_start = 0; | 1574 | p->se.block_start = 0; |
1583 | p->se.sleep_max = 0; | 1575 | p->se.sleep_max = 0; |
1584 | p->se.block_max = 0; | 1576 | p->se.block_max = 0; |
@@ -1586,6 +1578,7 @@ static void __sched_fork(struct task_struct *p) | |||
1586 | p->se.wait_max = 0; | 1578 | p->se.wait_max = 0; |
1587 | p->se.wait_runtime_overruns = 0; | 1579 | p->se.wait_runtime_overruns = 0; |
1588 | p->se.wait_runtime_underruns = 0; | 1580 | p->se.wait_runtime_underruns = 0; |
1581 | #endif | ||
1589 | 1582 | ||
1590 | INIT_LIST_HEAD(&p->run_list); | 1583 | INIT_LIST_HEAD(&p->run_list); |
1591 | p->se.on_rq = 0; | 1584 | p->se.on_rq = 0; |
@@ -1654,22 +1647,27 @@ void fastcall wake_up_new_task(struct task_struct *p, unsigned long clone_flags) | |||
1654 | unsigned long flags; | 1647 | unsigned long flags; |
1655 | struct rq *rq; | 1648 | struct rq *rq; |
1656 | int this_cpu; | 1649 | int this_cpu; |
1650 | u64 now; | ||
1657 | 1651 | ||
1658 | rq = task_rq_lock(p, &flags); | 1652 | rq = task_rq_lock(p, &flags); |
1659 | BUG_ON(p->state != TASK_RUNNING); | 1653 | BUG_ON(p->state != TASK_RUNNING); |
1660 | this_cpu = smp_processor_id(); /* parent's CPU */ | 1654 | this_cpu = smp_processor_id(); /* parent's CPU */ |
1655 | now = rq_clock(rq); | ||
1661 | 1656 | ||
1662 | p->prio = effective_prio(p); | 1657 | p->prio = effective_prio(p); |
1663 | 1658 | ||
1664 | if (!sysctl_sched_child_runs_first || (clone_flags & CLONE_VM) || | 1659 | if (!p->sched_class->task_new || !sysctl_sched_child_runs_first || |
1665 | task_cpu(p) != this_cpu || !current->se.on_rq) { | 1660 | (clone_flags & CLONE_VM) || task_cpu(p) != this_cpu || |
1661 | !current->se.on_rq) { | ||
1662 | |||
1666 | activate_task(rq, p, 0); | 1663 | activate_task(rq, p, 0); |
1667 | } else { | 1664 | } else { |
1668 | /* | 1665 | /* |
1669 | * Let the scheduling class do new task startup | 1666 | * Let the scheduling class do new task startup |
1670 | * management (if any): | 1667 | * management (if any): |
1671 | */ | 1668 | */ |
1672 | p->sched_class->task_new(rq, p); | 1669 | p->sched_class->task_new(rq, p, now); |
1670 | inc_nr_running(p, rq, now); | ||
1673 | } | 1671 | } |
1674 | check_preempt_curr(rq, p); | 1672 | check_preempt_curr(rq, p); |
1675 | task_rq_unlock(rq, &flags); | 1673 | task_rq_unlock(rq, &flags); |
@@ -2908,8 +2906,7 @@ static void active_load_balance(struct rq *busiest_rq, int busiest_cpu) | |||
2908 | schedstat_inc(sd, alb_cnt); | 2906 | schedstat_inc(sd, alb_cnt); |
2909 | 2907 | ||
2910 | if (move_tasks(target_rq, target_cpu, busiest_rq, 1, | 2908 | if (move_tasks(target_rq, target_cpu, busiest_rq, 1, |
2911 | RTPRIO_TO_LOAD_WEIGHT(100), sd, CPU_IDLE, | 2909 | ULONG_MAX, sd, CPU_IDLE, NULL)) |
2912 | NULL)) | ||
2913 | schedstat_inc(sd, alb_pushed); | 2910 | schedstat_inc(sd, alb_pushed); |
2914 | else | 2911 | else |
2915 | schedstat_inc(sd, alb_failed); | 2912 | schedstat_inc(sd, alb_failed); |
@@ -5269,8 +5266,6 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd) | |||
5269 | sizeof(int), 0644, proc_dointvec_minmax); | 5266 | sizeof(int), 0644, proc_dointvec_minmax); |
5270 | set_table_entry(&table[8], 9, "imbalance_pct", &sd->imbalance_pct, | 5267 | set_table_entry(&table[8], 9, "imbalance_pct", &sd->imbalance_pct, |
5271 | sizeof(int), 0644, proc_dointvec_minmax); | 5268 | sizeof(int), 0644, proc_dointvec_minmax); |
5272 | set_table_entry(&table[9], 10, "cache_hot_time", &sd->cache_hot_time, | ||
5273 | sizeof(long long), 0644, proc_doulongvec_minmax); | ||
5274 | set_table_entry(&table[10], 11, "cache_nice_tries", | 5269 | set_table_entry(&table[10], 11, "cache_nice_tries", |
5275 | &sd->cache_nice_tries, | 5270 | &sd->cache_nice_tries, |
5276 | sizeof(int), 0644, proc_dointvec_minmax); | 5271 | sizeof(int), 0644, proc_dointvec_minmax); |
@@ -6590,12 +6585,14 @@ void normalize_rt_tasks(void) | |||
6590 | do_each_thread(g, p) { | 6585 | do_each_thread(g, p) { |
6591 | p->se.fair_key = 0; | 6586 | p->se.fair_key = 0; |
6592 | p->se.wait_runtime = 0; | 6587 | p->se.wait_runtime = 0; |
6588 | p->se.exec_start = 0; | ||
6593 | p->se.wait_start_fair = 0; | 6589 | p->se.wait_start_fair = 0; |
6590 | p->se.sleep_start_fair = 0; | ||
6591 | #ifdef CONFIG_SCHEDSTATS | ||
6594 | p->se.wait_start = 0; | 6592 | p->se.wait_start = 0; |
6595 | p->se.exec_start = 0; | ||
6596 | p->se.sleep_start = 0; | 6593 | p->se.sleep_start = 0; |
6597 | p->se.sleep_start_fair = 0; | ||
6598 | p->se.block_start = 0; | 6594 | p->se.block_start = 0; |
6595 | #endif | ||
6599 | task_rq(p)->cfs.fair_clock = 0; | 6596 | task_rq(p)->cfs.fair_clock = 0; |
6600 | task_rq(p)->clock = 0; | 6597 | task_rq(p)->clock = 0; |
6601 | 6598 | ||
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c index 0eca442b7792..1c61e5315ad2 100644 --- a/kernel/sched_debug.c +++ b/kernel/sched_debug.c | |||
@@ -44,11 +44,16 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p, u64 now) | |||
44 | (long long)p->se.wait_runtime, | 44 | (long long)p->se.wait_runtime, |
45 | (long long)(p->nvcsw + p->nivcsw), | 45 | (long long)(p->nvcsw + p->nivcsw), |
46 | p->prio, | 46 | p->prio, |
47 | #ifdef CONFIG_SCHEDSTATS | ||
47 | (long long)p->se.sum_exec_runtime, | 48 | (long long)p->se.sum_exec_runtime, |
48 | (long long)p->se.sum_wait_runtime, | 49 | (long long)p->se.sum_wait_runtime, |
49 | (long long)p->se.sum_sleep_runtime, | 50 | (long long)p->se.sum_sleep_runtime, |
50 | (long long)p->se.wait_runtime_overruns, | 51 | (long long)p->se.wait_runtime_overruns, |
51 | (long long)p->se.wait_runtime_underruns); | 52 | (long long)p->se.wait_runtime_underruns |
53 | #else | ||
54 | 0LL, 0LL, 0LL, 0LL, 0LL | ||
55 | #endif | ||
56 | ); | ||
52 | } | 57 | } |
53 | 58 | ||
54 | static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu, u64 now) | 59 | static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu, u64 now) |
@@ -171,7 +176,7 @@ static int sched_debug_show(struct seq_file *m, void *v) | |||
171 | u64 now = ktime_to_ns(ktime_get()); | 176 | u64 now = ktime_to_ns(ktime_get()); |
172 | int cpu; | 177 | int cpu; |
173 | 178 | ||
174 | SEQ_printf(m, "Sched Debug Version: v0.05, %s %.*s\n", | 179 | SEQ_printf(m, "Sched Debug Version: v0.05-v20, %s %.*s\n", |
175 | init_utsname()->release, | 180 | init_utsname()->release, |
176 | (int)strcspn(init_utsname()->version, " "), | 181 | (int)strcspn(init_utsname()->version, " "), |
177 | init_utsname()->version); | 182 | init_utsname()->version); |
@@ -235,21 +240,24 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m) | |||
235 | #define P(F) \ | 240 | #define P(F) \ |
236 | SEQ_printf(m, "%-25s:%20Ld\n", #F, (long long)p->F) | 241 | SEQ_printf(m, "%-25s:%20Ld\n", #F, (long long)p->F) |
237 | 242 | ||
238 | P(se.wait_start); | 243 | P(se.wait_runtime); |
239 | P(se.wait_start_fair); | 244 | P(se.wait_start_fair); |
240 | P(se.exec_start); | 245 | P(se.exec_start); |
241 | P(se.sleep_start); | ||
242 | P(se.sleep_start_fair); | 246 | P(se.sleep_start_fair); |
247 | P(se.sum_exec_runtime); | ||
248 | |||
249 | #ifdef CONFIG_SCHEDSTATS | ||
250 | P(se.wait_start); | ||
251 | P(se.sleep_start); | ||
243 | P(se.block_start); | 252 | P(se.block_start); |
244 | P(se.sleep_max); | 253 | P(se.sleep_max); |
245 | P(se.block_max); | 254 | P(se.block_max); |
246 | P(se.exec_max); | 255 | P(se.exec_max); |
247 | P(se.wait_max); | 256 | P(se.wait_max); |
248 | P(se.wait_runtime); | ||
249 | P(se.wait_runtime_overruns); | 257 | P(se.wait_runtime_overruns); |
250 | P(se.wait_runtime_underruns); | 258 | P(se.wait_runtime_underruns); |
251 | P(se.sum_wait_runtime); | 259 | P(se.sum_wait_runtime); |
252 | P(se.sum_exec_runtime); | 260 | #endif |
253 | SEQ_printf(m, "%-25s:%20Ld\n", | 261 | SEQ_printf(m, "%-25s:%20Ld\n", |
254 | "nr_switches", (long long)(p->nvcsw + p->nivcsw)); | 262 | "nr_switches", (long long)(p->nvcsw + p->nivcsw)); |
255 | P(se.load.weight); | 263 | P(se.load.weight); |
@@ -269,7 +277,9 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m) | |||
269 | 277 | ||
270 | void proc_sched_set_task(struct task_struct *p) | 278 | void proc_sched_set_task(struct task_struct *p) |
271 | { | 279 | { |
280 | #ifdef CONFIG_SCHEDSTATS | ||
272 | p->se.sleep_max = p->se.block_max = p->se.exec_max = p->se.wait_max = 0; | 281 | p->se.sleep_max = p->se.block_max = p->se.exec_max = p->se.wait_max = 0; |
273 | p->se.wait_runtime_overruns = p->se.wait_runtime_underruns = 0; | 282 | p->se.wait_runtime_overruns = p->se.wait_runtime_underruns = 0; |
283 | #endif | ||
274 | p->se.sum_exec_runtime = 0; | 284 | p->se.sum_exec_runtime = 0; |
275 | } | 285 | } |
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 6971db0a7160..6f579ff5a9bc 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -292,10 +292,7 @@ __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr, u64 now) | |||
292 | return; | 292 | return; |
293 | 293 | ||
294 | delta_exec = curr->delta_exec; | 294 | delta_exec = curr->delta_exec; |
295 | #ifdef CONFIG_SCHEDSTATS | 295 | schedstat_set(curr->exec_max, max((u64)delta_exec, curr->exec_max)); |
296 | if (unlikely(delta_exec > curr->exec_max)) | ||
297 | curr->exec_max = delta_exec; | ||
298 | #endif | ||
299 | 296 | ||
300 | curr->sum_exec_runtime += delta_exec; | 297 | curr->sum_exec_runtime += delta_exec; |
301 | cfs_rq->exec_clock += delta_exec; | 298 | cfs_rq->exec_clock += delta_exec; |
@@ -352,7 +349,7 @@ static inline void | |||
352 | update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se, u64 now) | 349 | update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se, u64 now) |
353 | { | 350 | { |
354 | se->wait_start_fair = cfs_rq->fair_clock; | 351 | se->wait_start_fair = cfs_rq->fair_clock; |
355 | se->wait_start = now; | 352 | schedstat_set(se->wait_start, now); |
356 | } | 353 | } |
357 | 354 | ||
358 | /* | 355 | /* |
@@ -425,13 +422,7 @@ __update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se, u64 now) | |||
425 | { | 422 | { |
426 | unsigned long delta_fair = se->delta_fair_run; | 423 | unsigned long delta_fair = se->delta_fair_run; |
427 | 424 | ||
428 | #ifdef CONFIG_SCHEDSTATS | 425 | schedstat_set(se->wait_max, max(se->wait_max, now - se->wait_start)); |
429 | { | ||
430 | s64 delta_wait = now - se->wait_start; | ||
431 | if (unlikely(delta_wait > se->wait_max)) | ||
432 | se->wait_max = delta_wait; | ||
433 | } | ||
434 | #endif | ||
435 | 426 | ||
436 | if (unlikely(se->load.weight != NICE_0_LOAD)) | 427 | if (unlikely(se->load.weight != NICE_0_LOAD)) |
437 | delta_fair = calc_weighted(delta_fair, se->load.weight, | 428 | delta_fair = calc_weighted(delta_fair, se->load.weight, |
@@ -456,7 +447,7 @@ update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se, u64 now) | |||
456 | } | 447 | } |
457 | 448 | ||
458 | se->wait_start_fair = 0; | 449 | se->wait_start_fair = 0; |
459 | se->wait_start = 0; | 450 | schedstat_set(se->wait_start, 0); |
460 | } | 451 | } |
461 | 452 | ||
462 | static inline void | 453 | static inline void |
@@ -1041,11 +1032,10 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr) | |||
1041 | * monopolize the CPU. Note: the parent runqueue is locked, | 1032 | * monopolize the CPU. Note: the parent runqueue is locked, |
1042 | * the child is not running yet. | 1033 | * the child is not running yet. |
1043 | */ | 1034 | */ |
1044 | static void task_new_fair(struct rq *rq, struct task_struct *p) | 1035 | static void task_new_fair(struct rq *rq, struct task_struct *p, u64 now) |
1045 | { | 1036 | { |
1046 | struct cfs_rq *cfs_rq = task_cfs_rq(p); | 1037 | struct cfs_rq *cfs_rq = task_cfs_rq(p); |
1047 | struct sched_entity *se = &p->se; | 1038 | struct sched_entity *se = &p->se; |
1048 | u64 now = rq_clock(rq); | ||
1049 | 1039 | ||
1050 | sched_info_queued(p); | 1040 | sched_info_queued(p); |
1051 | 1041 | ||
@@ -1072,7 +1062,6 @@ static void task_new_fair(struct rq *rq, struct task_struct *p) | |||
1072 | p->se.wait_runtime = -(sysctl_sched_granularity / 2); | 1062 | p->se.wait_runtime = -(sysctl_sched_granularity / 2); |
1073 | 1063 | ||
1074 | __enqueue_entity(cfs_rq, se); | 1064 | __enqueue_entity(cfs_rq, se); |
1075 | inc_nr_running(p, rq, now); | ||
1076 | } | 1065 | } |
1077 | 1066 | ||
1078 | #ifdef CONFIG_FAIR_GROUP_SCHED | 1067 | #ifdef CONFIG_FAIR_GROUP_SCHED |
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index 1192a2741b99..002fcf8d3f64 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
@@ -18,8 +18,8 @@ static inline void update_curr_rt(struct rq *rq, u64 now) | |||
18 | delta_exec = now - curr->se.exec_start; | 18 | delta_exec = now - curr->se.exec_start; |
19 | if (unlikely((s64)delta_exec < 0)) | 19 | if (unlikely((s64)delta_exec < 0)) |
20 | delta_exec = 0; | 20 | delta_exec = 0; |
21 | if (unlikely(delta_exec > curr->se.exec_max)) | 21 | |
22 | curr->se.exec_max = delta_exec; | 22 | schedstat_set(curr->se.exec_max, max(curr->se.exec_max, delta_exec)); |
23 | 23 | ||
24 | curr->se.sum_exec_runtime += delta_exec; | 24 | curr->se.sum_exec_runtime += delta_exec; |
25 | curr->se.exec_start = now; | 25 | curr->se.exec_start = now; |
@@ -229,15 +229,6 @@ static void task_tick_rt(struct rq *rq, struct task_struct *p) | |||
229 | requeue_task_rt(rq, p); | 229 | requeue_task_rt(rq, p); |
230 | } | 230 | } |
231 | 231 | ||
232 | /* | ||
233 | * No parent/child timeslice management necessary for RT tasks, | ||
234 | * just activate them: | ||
235 | */ | ||
236 | static void task_new_rt(struct rq *rq, struct task_struct *p) | ||
237 | { | ||
238 | activate_task(rq, p, 1); | ||
239 | } | ||
240 | |||
241 | static struct sched_class rt_sched_class __read_mostly = { | 232 | static struct sched_class rt_sched_class __read_mostly = { |
242 | .enqueue_task = enqueue_task_rt, | 233 | .enqueue_task = enqueue_task_rt, |
243 | .dequeue_task = dequeue_task_rt, | 234 | .dequeue_task = dequeue_task_rt, |
@@ -251,5 +242,4 @@ static struct sched_class rt_sched_class __read_mostly = { | |||
251 | .load_balance = load_balance_rt, | 242 | .load_balance = load_balance_rt, |
252 | 243 | ||
253 | .task_tick = task_tick_rt, | 244 | .task_tick = task_tick_rt, |
254 | .task_new = task_new_rt, | ||
255 | }; | 245 | }; |
diff --git a/kernel/sched_stats.h b/kernel/sched_stats.h index c63c38f6fa6e..c20a94dda61e 100644 --- a/kernel/sched_stats.h +++ b/kernel/sched_stats.h | |||
@@ -116,6 +116,7 @@ rq_sched_info_depart(struct rq *rq, unsigned long long delta) | |||
116 | } | 116 | } |
117 | # define schedstat_inc(rq, field) do { (rq)->field++; } while (0) | 117 | # define schedstat_inc(rq, field) do { (rq)->field++; } while (0) |
118 | # define schedstat_add(rq, field, amt) do { (rq)->field += (amt); } while (0) | 118 | # define schedstat_add(rq, field, amt) do { (rq)->field += (amt); } while (0) |
119 | # define schedstat_set(var, val) do { var = (val); } while (0) | ||
119 | #else /* !CONFIG_SCHEDSTATS */ | 120 | #else /* !CONFIG_SCHEDSTATS */ |
120 | static inline void | 121 | static inline void |
121 | rq_sched_info_arrive(struct rq *rq, unsigned long long delta) | 122 | rq_sched_info_arrive(struct rq *rq, unsigned long long delta) |
@@ -125,6 +126,7 @@ rq_sched_info_depart(struct rq *rq, unsigned long long delta) | |||
125 | {} | 126 | {} |
126 | # define schedstat_inc(rq, field) do { } while (0) | 127 | # define schedstat_inc(rq, field) do { } while (0) |
127 | # define schedstat_add(rq, field, amt) do { } while (0) | 128 | # define schedstat_add(rq, field, amt) do { } while (0) |
129 | # define schedstat_set(var, val) do { } while (0) | ||
128 | #endif | 130 | #endif |
129 | 131 | ||
130 | #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) | 132 | #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) |
diff --git a/kernel/signal.c b/kernel/signal.c index ef8156a6aad5..b27c01a66448 100644 --- a/kernel/signal.c +++ b/kernel/signal.c | |||
@@ -1561,10 +1561,6 @@ static inline int may_ptrace_stop(void) | |||
1561 | (current->ptrace & PT_ATTACHED))) | 1561 | (current->ptrace & PT_ATTACHED))) |
1562 | return 0; | 1562 | return 0; |
1563 | 1563 | ||
1564 | if (unlikely(current->signal == current->parent->signal) && | ||
1565 | unlikely(current->signal->flags & SIGNAL_GROUP_EXIT)) | ||
1566 | return 0; | ||
1567 | |||
1568 | /* | 1564 | /* |
1569 | * Are we in the middle of do_coredump? | 1565 | * Are we in the middle of do_coredump? |
1570 | * If so and our tracer is also part of the coredump stopping | 1566 | * If so and our tracer is also part of the coredump stopping |
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 06c08e5740fb..e68103475cca 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c | |||
@@ -831,7 +831,7 @@ const struct proto_ops inet_stream_ops = { | |||
831 | .shutdown = inet_shutdown, | 831 | .shutdown = inet_shutdown, |
832 | .setsockopt = sock_common_setsockopt, | 832 | .setsockopt = sock_common_setsockopt, |
833 | .getsockopt = sock_common_getsockopt, | 833 | .getsockopt = sock_common_getsockopt, |
834 | .sendmsg = inet_sendmsg, | 834 | .sendmsg = tcp_sendmsg, |
835 | .recvmsg = sock_common_recvmsg, | 835 | .recvmsg = sock_common_recvmsg, |
836 | .mmap = sock_no_mmap, | 836 | .mmap = sock_no_mmap, |
837 | .sendpage = tcp_sendpage, | 837 | .sendpage = tcp_sendpage, |
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c index 27c7918e442a..b3dd5de9a258 100644 --- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c +++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c | |||
@@ -294,15 +294,14 @@ static int exp_open(struct inode *inode, struct file *file) | |||
294 | struct ct_expect_iter_state *st; | 294 | struct ct_expect_iter_state *st; |
295 | int ret; | 295 | int ret; |
296 | 296 | ||
297 | st = kmalloc(sizeof(struct ct_expect_iter_state), GFP_KERNEL); | 297 | st = kzalloc(sizeof(struct ct_expect_iter_state), GFP_KERNEL); |
298 | if (st == NULL) | 298 | if (!st) |
299 | return -ENOMEM; | 299 | return -ENOMEM; |
300 | ret = seq_open(file, &exp_seq_ops); | 300 | ret = seq_open(file, &exp_seq_ops); |
301 | if (ret) | 301 | if (ret) |
302 | goto out_free; | 302 | goto out_free; |
303 | seq = file->private_data; | 303 | seq = file->private_data; |
304 | seq->private = st; | 304 | seq->private = st; |
305 | memset(st, 0, sizeof(struct ct_expect_iter_state)); | ||
306 | return ret; | 305 | return ret; |
307 | out_free: | 306 | out_free: |
308 | kfree(st); | 307 | kfree(st); |
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index 24d7c9f31918..c6d71526f625 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c | |||
@@ -900,8 +900,9 @@ static int raw_seq_open(struct inode *inode, struct file *file) | |||
900 | { | 900 | { |
901 | struct seq_file *seq; | 901 | struct seq_file *seq; |
902 | int rc = -ENOMEM; | 902 | int rc = -ENOMEM; |
903 | struct raw_iter_state *s = kmalloc(sizeof(*s), GFP_KERNEL); | 903 | struct raw_iter_state *s; |
904 | 904 | ||
905 | s = kzalloc(sizeof(*s), GFP_KERNEL); | ||
905 | if (!s) | 906 | if (!s) |
906 | goto out; | 907 | goto out; |
907 | rc = seq_open(file, &raw_seq_ops); | 908 | rc = seq_open(file, &raw_seq_ops); |
@@ -910,7 +911,6 @@ static int raw_seq_open(struct inode *inode, struct file *file) | |||
910 | 911 | ||
911 | seq = file->private_data; | 912 | seq = file->private_data; |
912 | seq->private = s; | 913 | seq->private = s; |
913 | memset(s, 0, sizeof(*s)); | ||
914 | out: | 914 | out: |
915 | return rc; | 915 | return rc; |
916 | out_kfree: | 916 | out_kfree: |
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index df42b7fb3268..c7ca94bd152c 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
@@ -374,8 +374,9 @@ static int rt_cache_seq_open(struct inode *inode, struct file *file) | |||
374 | { | 374 | { |
375 | struct seq_file *seq; | 375 | struct seq_file *seq; |
376 | int rc = -ENOMEM; | 376 | int rc = -ENOMEM; |
377 | struct rt_cache_iter_state *s = kmalloc(sizeof(*s), GFP_KERNEL); | 377 | struct rt_cache_iter_state *s; |
378 | 378 | ||
379 | s = kzalloc(sizeof(*s), GFP_KERNEL); | ||
379 | if (!s) | 380 | if (!s) |
380 | goto out; | 381 | goto out; |
381 | rc = seq_open(file, &rt_cache_seq_ops); | 382 | rc = seq_open(file, &rt_cache_seq_ops); |
@@ -383,7 +384,6 @@ static int rt_cache_seq_open(struct inode *inode, struct file *file) | |||
383 | goto out_kfree; | 384 | goto out_kfree; |
384 | seq = file->private_data; | 385 | seq = file->private_data; |
385 | seq->private = s; | 386 | seq->private = s; |
386 | memset(s, 0, sizeof(*s)); | ||
387 | out: | 387 | out: |
388 | return rc; | 388 | return rc; |
389 | out_kfree: | 389 | out_kfree: |
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index da4c0b6ab79a..7e740112b238 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -658,9 +658,10 @@ static inline int select_size(struct sock *sk) | |||
658 | return tmp; | 658 | return tmp; |
659 | } | 659 | } |
660 | 660 | ||
661 | int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | 661 | int tcp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, |
662 | size_t size) | 662 | size_t size) |
663 | { | 663 | { |
664 | struct sock *sk = sock->sk; | ||
664 | struct iovec *iov; | 665 | struct iovec *iov; |
665 | struct tcp_sock *tp = tcp_sk(sk); | 666 | struct tcp_sock *tp = tcp_sk(sk); |
666 | struct sk_buff *skb; | 667 | struct sk_buff *skb; |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 378ca8a086a3..f030435e0eb4 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -102,11 +102,14 @@ int sysctl_tcp_abc __read_mostly; | |||
102 | #define FLAG_DATA_LOST 0x80 /* SACK detected data lossage. */ | 102 | #define FLAG_DATA_LOST 0x80 /* SACK detected data lossage. */ |
103 | #define FLAG_SLOWPATH 0x100 /* Do not skip RFC checks for window update.*/ | 103 | #define FLAG_SLOWPATH 0x100 /* Do not skip RFC checks for window update.*/ |
104 | #define FLAG_ONLY_ORIG_SACKED 0x200 /* SACKs only non-rexmit sent before RTO */ | 104 | #define FLAG_ONLY_ORIG_SACKED 0x200 /* SACKs only non-rexmit sent before RTO */ |
105 | #define FLAG_SND_UNA_ADVANCED 0x400 /* Snd_una was changed (!= FLAG_DATA_ACKED) */ | ||
106 | #define FLAG_DSACKING_ACK 0x800 /* SACK blocks contained DSACK info */ | ||
105 | 107 | ||
106 | #define FLAG_ACKED (FLAG_DATA_ACKED|FLAG_SYN_ACKED) | 108 | #define FLAG_ACKED (FLAG_DATA_ACKED|FLAG_SYN_ACKED) |
107 | #define FLAG_NOT_DUP (FLAG_DATA|FLAG_WIN_UPDATE|FLAG_ACKED) | 109 | #define FLAG_NOT_DUP (FLAG_DATA|FLAG_WIN_UPDATE|FLAG_ACKED) |
108 | #define FLAG_CA_ALERT (FLAG_DATA_SACKED|FLAG_ECE) | 110 | #define FLAG_CA_ALERT (FLAG_DATA_SACKED|FLAG_ECE) |
109 | #define FLAG_FORWARD_PROGRESS (FLAG_ACKED|FLAG_DATA_SACKED) | 111 | #define FLAG_FORWARD_PROGRESS (FLAG_ACKED|FLAG_DATA_SACKED) |
112 | #define FLAG_ANY_PROGRESS (FLAG_FORWARD_PROGRESS|FLAG_SND_UNA_ADVANCED) | ||
110 | 113 | ||
111 | #define IsReno(tp) ((tp)->rx_opt.sack_ok == 0) | 114 | #define IsReno(tp) ((tp)->rx_opt.sack_ok == 0) |
112 | #define IsFack(tp) ((tp)->rx_opt.sack_ok & 2) | 115 | #define IsFack(tp) ((tp)->rx_opt.sack_ok & 2) |
@@ -964,12 +967,14 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_ | |||
964 | 967 | ||
965 | /* Check for D-SACK. */ | 968 | /* Check for D-SACK. */ |
966 | if (before(ntohl(sp[0].start_seq), TCP_SKB_CB(ack_skb)->ack_seq)) { | 969 | if (before(ntohl(sp[0].start_seq), TCP_SKB_CB(ack_skb)->ack_seq)) { |
970 | flag |= FLAG_DSACKING_ACK; | ||
967 | found_dup_sack = 1; | 971 | found_dup_sack = 1; |
968 | tp->rx_opt.sack_ok |= 4; | 972 | tp->rx_opt.sack_ok |= 4; |
969 | NET_INC_STATS_BH(LINUX_MIB_TCPDSACKRECV); | 973 | NET_INC_STATS_BH(LINUX_MIB_TCPDSACKRECV); |
970 | } else if (num_sacks > 1 && | 974 | } else if (num_sacks > 1 && |
971 | !after(ntohl(sp[0].end_seq), ntohl(sp[1].end_seq)) && | 975 | !after(ntohl(sp[0].end_seq), ntohl(sp[1].end_seq)) && |
972 | !before(ntohl(sp[0].start_seq), ntohl(sp[1].start_seq))) { | 976 | !before(ntohl(sp[0].start_seq), ntohl(sp[1].start_seq))) { |
977 | flag |= FLAG_DSACKING_ACK; | ||
973 | found_dup_sack = 1; | 978 | found_dup_sack = 1; |
974 | tp->rx_opt.sack_ok |= 4; | 979 | tp->rx_opt.sack_ok |= 4; |
975 | NET_INC_STATS_BH(LINUX_MIB_TCPDSACKOFORECV); | 980 | NET_INC_STATS_BH(LINUX_MIB_TCPDSACKOFORECV); |
@@ -1856,7 +1861,7 @@ static void tcp_cwnd_down(struct sock *sk, int flag) | |||
1856 | struct tcp_sock *tp = tcp_sk(sk); | 1861 | struct tcp_sock *tp = tcp_sk(sk); |
1857 | int decr = tp->snd_cwnd_cnt + 1; | 1862 | int decr = tp->snd_cwnd_cnt + 1; |
1858 | 1863 | ||
1859 | if ((flag&FLAG_FORWARD_PROGRESS) || | 1864 | if ((flag&(FLAG_ANY_PROGRESS|FLAG_DSACKING_ACK)) || |
1860 | (IsReno(tp) && !(flag&FLAG_NOT_DUP))) { | 1865 | (IsReno(tp) && !(flag&FLAG_NOT_DUP))) { |
1861 | tp->snd_cwnd_cnt = decr&1; | 1866 | tp->snd_cwnd_cnt = decr&1; |
1862 | decr >>= 1; | 1867 | decr >>= 1; |
@@ -2107,15 +2112,13 @@ static void tcp_mtup_probe_success(struct sock *sk, struct sk_buff *skb) | |||
2107 | * tcp_xmit_retransmit_queue(). | 2112 | * tcp_xmit_retransmit_queue(). |
2108 | */ | 2113 | */ |
2109 | static void | 2114 | static void |
2110 | tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una, | 2115 | tcp_fastretrans_alert(struct sock *sk, int prior_packets, int flag) |
2111 | int prior_packets, int flag) | ||
2112 | { | 2116 | { |
2113 | struct inet_connection_sock *icsk = inet_csk(sk); | 2117 | struct inet_connection_sock *icsk = inet_csk(sk); |
2114 | struct tcp_sock *tp = tcp_sk(sk); | 2118 | struct tcp_sock *tp = tcp_sk(sk); |
2115 | int is_dupack = (tp->snd_una == prior_snd_una && | 2119 | int is_dupack = !(flag&(FLAG_SND_UNA_ADVANCED|FLAG_NOT_DUP)); |
2116 | (!(flag&FLAG_NOT_DUP) || | 2120 | int do_lost = is_dupack || ((flag&FLAG_DATA_SACKED) && |
2117 | ((flag&FLAG_DATA_SACKED) && | 2121 | (tp->fackets_out > tp->reordering)); |
2118 | (tp->fackets_out > tp->reordering)))); | ||
2119 | 2122 | ||
2120 | /* Some technical things: | 2123 | /* Some technical things: |
2121 | * 1. Reno does not count dupacks (sacked_out) automatically. */ | 2124 | * 1. Reno does not count dupacks (sacked_out) automatically. */ |
@@ -2192,14 +2195,14 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una, | |||
2192 | /* F. Process state. */ | 2195 | /* F. Process state. */ |
2193 | switch (icsk->icsk_ca_state) { | 2196 | switch (icsk->icsk_ca_state) { |
2194 | case TCP_CA_Recovery: | 2197 | case TCP_CA_Recovery: |
2195 | if (prior_snd_una == tp->snd_una) { | 2198 | if (!(flag & FLAG_SND_UNA_ADVANCED)) { |
2196 | if (IsReno(tp) && is_dupack) | 2199 | if (IsReno(tp) && is_dupack) |
2197 | tcp_add_reno_sack(sk); | 2200 | tcp_add_reno_sack(sk); |
2198 | } else { | 2201 | } else { |
2199 | int acked = prior_packets - tp->packets_out; | 2202 | int acked = prior_packets - tp->packets_out; |
2200 | if (IsReno(tp)) | 2203 | if (IsReno(tp)) |
2201 | tcp_remove_reno_sacks(sk, acked); | 2204 | tcp_remove_reno_sacks(sk, acked); |
2202 | is_dupack = tcp_try_undo_partial(sk, acked); | 2205 | do_lost = tcp_try_undo_partial(sk, acked); |
2203 | } | 2206 | } |
2204 | break; | 2207 | break; |
2205 | case TCP_CA_Loss: | 2208 | case TCP_CA_Loss: |
@@ -2215,7 +2218,7 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una, | |||
2215 | /* Loss is undone; fall through to processing in Open state. */ | 2218 | /* Loss is undone; fall through to processing in Open state. */ |
2216 | default: | 2219 | default: |
2217 | if (IsReno(tp)) { | 2220 | if (IsReno(tp)) { |
2218 | if (tp->snd_una != prior_snd_una) | 2221 | if (flag & FLAG_SND_UNA_ADVANCED) |
2219 | tcp_reset_reno_sack(tp); | 2222 | tcp_reset_reno_sack(tp); |
2220 | if (is_dupack) | 2223 | if (is_dupack) |
2221 | tcp_add_reno_sack(sk); | 2224 | tcp_add_reno_sack(sk); |
@@ -2264,7 +2267,7 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una, | |||
2264 | tcp_set_ca_state(sk, TCP_CA_Recovery); | 2267 | tcp_set_ca_state(sk, TCP_CA_Recovery); |
2265 | } | 2268 | } |
2266 | 2269 | ||
2267 | if (is_dupack || tcp_head_timedout(sk)) | 2270 | if (do_lost || tcp_head_timedout(sk)) |
2268 | tcp_update_scoreboard(sk); | 2271 | tcp_update_scoreboard(sk); |
2269 | tcp_cwnd_down(sk, flag); | 2272 | tcp_cwnd_down(sk, flag); |
2270 | tcp_xmit_retransmit_queue(sk); | 2273 | tcp_xmit_retransmit_queue(sk); |
@@ -2684,7 +2687,7 @@ static void tcp_undo_spur_to_response(struct sock *sk, int flag) | |||
2684 | * to prove that the RTO is indeed spurious. It transfers the control | 2687 | * to prove that the RTO is indeed spurious. It transfers the control |
2685 | * from F-RTO to the conventional RTO recovery | 2688 | * from F-RTO to the conventional RTO recovery |
2686 | */ | 2689 | */ |
2687 | static int tcp_process_frto(struct sock *sk, u32 prior_snd_una, int flag) | 2690 | static int tcp_process_frto(struct sock *sk, int flag) |
2688 | { | 2691 | { |
2689 | struct tcp_sock *tp = tcp_sk(sk); | 2692 | struct tcp_sock *tp = tcp_sk(sk); |
2690 | 2693 | ||
@@ -2704,8 +2707,7 @@ static int tcp_process_frto(struct sock *sk, u32 prior_snd_una, int flag) | |||
2704 | * ACK isn't duplicate nor advances window, e.g., opposite dir | 2707 | * ACK isn't duplicate nor advances window, e.g., opposite dir |
2705 | * data, winupdate | 2708 | * data, winupdate |
2706 | */ | 2709 | */ |
2707 | if ((tp->snd_una == prior_snd_una) && (flag&FLAG_NOT_DUP) && | 2710 | if (!(flag&FLAG_ANY_PROGRESS) && (flag&FLAG_NOT_DUP)) |
2708 | !(flag&FLAG_FORWARD_PROGRESS)) | ||
2709 | return 1; | 2711 | return 1; |
2710 | 2712 | ||
2711 | if (!(flag&FLAG_DATA_ACKED)) { | 2713 | if (!(flag&FLAG_DATA_ACKED)) { |
@@ -2785,6 +2787,9 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag) | |||
2785 | if (before(ack, prior_snd_una)) | 2787 | if (before(ack, prior_snd_una)) |
2786 | goto old_ack; | 2788 | goto old_ack; |
2787 | 2789 | ||
2790 | if (after(ack, prior_snd_una)) | ||
2791 | flag |= FLAG_SND_UNA_ADVANCED; | ||
2792 | |||
2788 | if (sysctl_tcp_abc) { | 2793 | if (sysctl_tcp_abc) { |
2789 | if (icsk->icsk_ca_state < TCP_CA_CWR) | 2794 | if (icsk->icsk_ca_state < TCP_CA_CWR) |
2790 | tp->bytes_acked += ack - prior_snd_una; | 2795 | tp->bytes_acked += ack - prior_snd_una; |
@@ -2837,14 +2842,14 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag) | |||
2837 | flag |= tcp_clean_rtx_queue(sk, &seq_rtt); | 2842 | flag |= tcp_clean_rtx_queue(sk, &seq_rtt); |
2838 | 2843 | ||
2839 | if (tp->frto_counter) | 2844 | if (tp->frto_counter) |
2840 | frto_cwnd = tcp_process_frto(sk, prior_snd_una, flag); | 2845 | frto_cwnd = tcp_process_frto(sk, flag); |
2841 | 2846 | ||
2842 | if (tcp_ack_is_dubious(sk, flag)) { | 2847 | if (tcp_ack_is_dubious(sk, flag)) { |
2843 | /* Advance CWND, if state allows this. */ | 2848 | /* Advance CWND, if state allows this. */ |
2844 | if ((flag & FLAG_DATA_ACKED) && !frto_cwnd && | 2849 | if ((flag & FLAG_DATA_ACKED) && !frto_cwnd && |
2845 | tcp_may_raise_cwnd(sk, flag)) | 2850 | tcp_may_raise_cwnd(sk, flag)) |
2846 | tcp_cong_avoid(sk, ack, prior_in_flight, 0); | 2851 | tcp_cong_avoid(sk, ack, prior_in_flight, 0); |
2847 | tcp_fastretrans_alert(sk, prior_snd_una, prior_packets, flag); | 2852 | tcp_fastretrans_alert(sk, prior_packets, flag); |
2848 | } else { | 2853 | } else { |
2849 | if ((flag & FLAG_DATA_ACKED) && !frto_cwnd) | 2854 | if ((flag & FLAG_DATA_ACKED) && !frto_cwnd) |
2850 | tcp_cong_avoid(sk, ack, prior_in_flight, 1); | 2855 | tcp_cong_avoid(sk, ack, prior_in_flight, 1); |
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 3f5f7423b95c..9c94627c8c7e 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -2425,7 +2425,6 @@ struct proto tcp_prot = { | |||
2425 | .shutdown = tcp_shutdown, | 2425 | .shutdown = tcp_shutdown, |
2426 | .setsockopt = tcp_setsockopt, | 2426 | .setsockopt = tcp_setsockopt, |
2427 | .getsockopt = tcp_getsockopt, | 2427 | .getsockopt = tcp_getsockopt, |
2428 | .sendmsg = tcp_sendmsg, | ||
2429 | .recvmsg = tcp_recvmsg, | 2428 | .recvmsg = tcp_recvmsg, |
2430 | .backlog_rcv = tcp_v4_do_rcv, | 2429 | .backlog_rcv = tcp_v4_do_rcv, |
2431 | .hash = tcp_v4_hash, | 2430 | .hash = tcp_v4_hash, |
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index eed09373a45d..b5f96372ad73 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c | |||
@@ -484,7 +484,7 @@ const struct proto_ops inet6_stream_ops = { | |||
484 | .shutdown = inet_shutdown, /* ok */ | 484 | .shutdown = inet_shutdown, /* ok */ |
485 | .setsockopt = sock_common_setsockopt, /* ok */ | 485 | .setsockopt = sock_common_setsockopt, /* ok */ |
486 | .getsockopt = sock_common_getsockopt, /* ok */ | 486 | .getsockopt = sock_common_getsockopt, /* ok */ |
487 | .sendmsg = inet_sendmsg, /* ok */ | 487 | .sendmsg = tcp_sendmsg, /* ok */ |
488 | .recvmsg = sock_common_recvmsg, /* ok */ | 488 | .recvmsg = sock_common_recvmsg, /* ok */ |
489 | .mmap = sock_no_mmap, | 489 | .mmap = sock_no_mmap, |
490 | .sendpage = tcp_sendpage, | 490 | .sendpage = tcp_sendpage, |
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index f10f3689d671..cbdb78487915 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
@@ -2115,7 +2115,6 @@ struct proto tcpv6_prot = { | |||
2115 | .shutdown = tcp_shutdown, | 2115 | .shutdown = tcp_shutdown, |
2116 | .setsockopt = tcp_setsockopt, | 2116 | .setsockopt = tcp_setsockopt, |
2117 | .getsockopt = tcp_getsockopt, | 2117 | .getsockopt = tcp_getsockopt, |
2118 | .sendmsg = tcp_sendmsg, | ||
2119 | .recvmsg = tcp_recvmsg, | 2118 | .recvmsg = tcp_recvmsg, |
2120 | .backlog_rcv = tcp_v6_do_rcv, | 2119 | .backlog_rcv = tcp_v6_do_rcv, |
2121 | .hash = tcp_v6_hash, | 2120 | .hash = tcp_v6_hash, |
diff --git a/net/key/af_key.c b/net/key/af_key.c index 7b0a95abe934..5502df115a63 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c | |||
@@ -1206,6 +1206,9 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct sadb_msg *hdr, | |||
1206 | x->sel.prefixlen_s = addr->sadb_address_prefixlen; | 1206 | x->sel.prefixlen_s = addr->sadb_address_prefixlen; |
1207 | } | 1207 | } |
1208 | 1208 | ||
1209 | if (!x->sel.family) | ||
1210 | x->sel.family = x->props.family; | ||
1211 | |||
1209 | if (ext_hdrs[SADB_X_EXT_NAT_T_TYPE-1]) { | 1212 | if (ext_hdrs[SADB_X_EXT_NAT_T_TYPE-1]) { |
1210 | struct sadb_x_nat_t_type* n_type; | 1213 | struct sadb_x_nat_t_type* n_type; |
1211 | struct xfrm_encap_tmpl *natt; | 1214 | struct xfrm_encap_tmpl *natt; |
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c index eb6695dcd73b..3ac64e25f10c 100644 --- a/net/netfilter/nf_conntrack_expect.c +++ b/net/netfilter/nf_conntrack_expect.c | |||
@@ -477,15 +477,14 @@ static int exp_open(struct inode *inode, struct file *file) | |||
477 | struct ct_expect_iter_state *st; | 477 | struct ct_expect_iter_state *st; |
478 | int ret; | 478 | int ret; |
479 | 479 | ||
480 | st = kmalloc(sizeof(struct ct_expect_iter_state), GFP_KERNEL); | 480 | st = kzalloc(sizeof(struct ct_expect_iter_state), GFP_KERNEL); |
481 | if (st == NULL) | 481 | if (!st) |
482 | return -ENOMEM; | 482 | return -ENOMEM; |
483 | ret = seq_open(file, &exp_seq_ops); | 483 | ret = seq_open(file, &exp_seq_ops); |
484 | if (ret) | 484 | if (ret) |
485 | goto out_free; | 485 | goto out_free; |
486 | seq = file->private_data; | 486 | seq = file->private_data; |
487 | seq->private = st; | 487 | seq->private = st; |
488 | memset(st, 0, sizeof(struct ct_expect_iter_state)); | ||
489 | return ret; | 488 | return ret; |
490 | out_free: | 489 | out_free: |
491 | kfree(st); | 490 | kfree(st); |
diff --git a/net/netlabel/netlabel_user.c b/net/netlabel/netlabel_user.c index 89dcc485653b..85a96a3fddaa 100644 --- a/net/netlabel/netlabel_user.c +++ b/net/netlabel/netlabel_user.c | |||
@@ -113,8 +113,10 @@ struct audit_buffer *netlbl_audit_start_common(int type, | |||
113 | if (audit_info->secid != 0 && | 113 | if (audit_info->secid != 0 && |
114 | security_secid_to_secctx(audit_info->secid, | 114 | security_secid_to_secctx(audit_info->secid, |
115 | &secctx, | 115 | &secctx, |
116 | &secctx_len) == 0) | 116 | &secctx_len) == 0) { |
117 | audit_log_format(audit_buf, " subj=%s", secctx); | 117 | audit_log_format(audit_buf, " subj=%s", secctx); |
118 | security_release_secctx(secctx, secctx_len); | ||
119 | } | ||
118 | 120 | ||
119 | return audit_buf; | 121 | return audit_buf; |
120 | } | 122 | } |
diff --git a/net/sctp/input.c b/net/sctp/input.c index d57ff7f3c576..47e56017f4ce 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c | |||
@@ -590,7 +590,7 @@ out_unlock: | |||
590 | * Return 0 - If further processing is needed. | 590 | * Return 0 - If further processing is needed. |
591 | * Return 1 - If the packet can be discarded right away. | 591 | * Return 1 - If the packet can be discarded right away. |
592 | */ | 592 | */ |
593 | int sctp_rcv_ootb(struct sk_buff *skb) | 593 | static int sctp_rcv_ootb(struct sk_buff *skb) |
594 | { | 594 | { |
595 | sctp_chunkhdr_t *ch; | 595 | sctp_chunkhdr_t *ch; |
596 | __u8 *ch_end; | 596 | __u8 *ch_end; |
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index 2c29394fd92e..f8aa23dda1c1 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c | |||
@@ -641,6 +641,8 @@ static struct sock *sctp_v6_create_accept_sk(struct sock *sk, | |||
641 | newsctp6sk = (struct sctp6_sock *)newsk; | 641 | newsctp6sk = (struct sctp6_sock *)newsk; |
642 | inet_sk(newsk)->pinet6 = &newsctp6sk->inet6; | 642 | inet_sk(newsk)->pinet6 = &newsctp6sk->inet6; |
643 | 643 | ||
644 | sctp_sk(newsk)->v4mapped = sctp_sk(sk)->v4mapped; | ||
645 | |||
644 | newinet = inet_sk(newsk); | 646 | newinet = inet_sk(newsk); |
645 | newnp = inet6_sk(newsk); | 647 | newnp = inet6_sk(newsk); |
646 | 648 | ||
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index 8d18f570c2e6..51c4d7fef1d2 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c | |||
@@ -65,8 +65,6 @@ | |||
65 | #include <net/sctp/sctp.h> | 65 | #include <net/sctp/sctp.h> |
66 | #include <net/sctp/sm.h> | 66 | #include <net/sctp/sm.h> |
67 | 67 | ||
68 | extern struct kmem_cache *sctp_chunk_cachep; | ||
69 | |||
70 | SCTP_STATIC | 68 | SCTP_STATIC |
71 | struct sctp_chunk *sctp_make_chunk(const struct sctp_association *asoc, | 69 | struct sctp_chunk *sctp_make_chunk(const struct sctp_association *asoc, |
72 | __u8 type, __u8 flags, int paylen); | 70 | __u8 type, __u8 flags, int paylen); |
@@ -115,15 +113,12 @@ void sctp_init_cause(struct sctp_chunk *chunk, __be16 cause_code, | |||
115 | const void *payload, size_t paylen) | 113 | const void *payload, size_t paylen) |
116 | { | 114 | { |
117 | sctp_errhdr_t err; | 115 | sctp_errhdr_t err; |
118 | int padlen; | ||
119 | __u16 len; | 116 | __u16 len; |
120 | 117 | ||
121 | /* Cause code constants are now defined in network order. */ | 118 | /* Cause code constants are now defined in network order. */ |
122 | err.cause = cause_code; | 119 | err.cause = cause_code; |
123 | len = sizeof(sctp_errhdr_t) + paylen; | 120 | len = sizeof(sctp_errhdr_t) + paylen; |
124 | padlen = len % 4; | ||
125 | err.length = htons(len); | 121 | err.length = htons(len); |
126 | len += padlen; | ||
127 | chunk->subh.err_hdr = sctp_addto_chunk(chunk, sizeof(sctp_errhdr_t), &err); | 122 | chunk->subh.err_hdr = sctp_addto_chunk(chunk, sizeof(sctp_errhdr_t), &err); |
128 | sctp_addto_chunk(chunk, paylen, payload); | 123 | sctp_addto_chunk(chunk, paylen, payload); |
129 | } | 124 | } |
@@ -1454,7 +1449,6 @@ no_hmac: | |||
1454 | do_gettimeofday(&tv); | 1449 | do_gettimeofday(&tv); |
1455 | 1450 | ||
1456 | if (!asoc && tv_lt(bear_cookie->expiration, tv)) { | 1451 | if (!asoc && tv_lt(bear_cookie->expiration, tv)) { |
1457 | __u16 len; | ||
1458 | /* | 1452 | /* |
1459 | * Section 3.3.10.3 Stale Cookie Error (3) | 1453 | * Section 3.3.10.3 Stale Cookie Error (3) |
1460 | * | 1454 | * |
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index fd2dfdd7d7fd..71cad56dd73f 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c | |||
@@ -97,6 +97,13 @@ static sctp_disposition_t sctp_stop_t1_and_abort(sctp_cmd_seq_t *commands, | |||
97 | const struct sctp_association *asoc, | 97 | const struct sctp_association *asoc, |
98 | struct sctp_transport *transport); | 98 | struct sctp_transport *transport); |
99 | 99 | ||
100 | static sctp_disposition_t sctp_sf_abort_violation( | ||
101 | const struct sctp_association *asoc, | ||
102 | void *arg, | ||
103 | sctp_cmd_seq_t *commands, | ||
104 | const __u8 *payload, | ||
105 | const size_t paylen); | ||
106 | |||
100 | static sctp_disposition_t sctp_sf_violation_chunklen( | 107 | static sctp_disposition_t sctp_sf_violation_chunklen( |
101 | const struct sctp_endpoint *ep, | 108 | const struct sctp_endpoint *ep, |
102 | const struct sctp_association *asoc, | 109 | const struct sctp_association *asoc, |
@@ -104,6 +111,13 @@ static sctp_disposition_t sctp_sf_violation_chunklen( | |||
104 | void *arg, | 111 | void *arg, |
105 | sctp_cmd_seq_t *commands); | 112 | sctp_cmd_seq_t *commands); |
106 | 113 | ||
114 | static sctp_disposition_t sctp_sf_violation_ctsn( | ||
115 | const struct sctp_endpoint *ep, | ||
116 | const struct sctp_association *asoc, | ||
117 | const sctp_subtype_t type, | ||
118 | void *arg, | ||
119 | sctp_cmd_seq_t *commands); | ||
120 | |||
107 | /* Small helper function that checks if the chunk length | 121 | /* Small helper function that checks if the chunk length |
108 | * is of the appropriate length. The 'required_length' argument | 122 | * is of the appropriate length. The 'required_length' argument |
109 | * is set to be the size of a specific chunk we are testing. | 123 | * is set to be the size of a specific chunk we are testing. |
@@ -2880,6 +2894,13 @@ sctp_disposition_t sctp_sf_eat_sack_6_2(const struct sctp_endpoint *ep, | |||
2880 | return SCTP_DISPOSITION_DISCARD; | 2894 | return SCTP_DISPOSITION_DISCARD; |
2881 | } | 2895 | } |
2882 | 2896 | ||
2897 | /* If Cumulative TSN Ack beyond the max tsn currently | ||
2898 | * send, terminating the association and respond to the | ||
2899 | * sender with an ABORT. | ||
2900 | */ | ||
2901 | if (!TSN_lt(ctsn, asoc->next_tsn)) | ||
2902 | return sctp_sf_violation_ctsn(ep, asoc, type, arg, commands); | ||
2903 | |||
2883 | /* Return this SACK for further processing. */ | 2904 | /* Return this SACK for further processing. */ |
2884 | sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_SACK, SCTP_SACKH(sackh)); | 2905 | sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_SACK, SCTP_SACKH(sackh)); |
2885 | 2906 | ||
@@ -3691,40 +3712,21 @@ sctp_disposition_t sctp_sf_violation(const struct sctp_endpoint *ep, | |||
3691 | return SCTP_DISPOSITION_VIOLATION; | 3712 | return SCTP_DISPOSITION_VIOLATION; |
3692 | } | 3713 | } |
3693 | 3714 | ||
3694 | |||
3695 | /* | 3715 | /* |
3696 | * Handle a protocol violation when the chunk length is invalid. | 3716 | * Common function to handle a protocol violation. |
3697 | * "Invalid" length is identified as smaller then the minimal length a | ||
3698 | * given chunk can be. For example, a SACK chunk has invalid length | ||
3699 | * if it's length is set to be smaller then the size of sctp_sack_chunk_t. | ||
3700 | * | ||
3701 | * We inform the other end by sending an ABORT with a Protocol Violation | ||
3702 | * error code. | ||
3703 | * | ||
3704 | * Section: Not specified | ||
3705 | * Verification Tag: Nothing to do | ||
3706 | * Inputs | ||
3707 | * (endpoint, asoc, chunk) | ||
3708 | * | ||
3709 | * Outputs | ||
3710 | * (reply_msg, msg_up, counters) | ||
3711 | * | ||
3712 | * Generate an ABORT chunk and terminate the association. | ||
3713 | */ | 3717 | */ |
3714 | static sctp_disposition_t sctp_sf_violation_chunklen( | 3718 | static sctp_disposition_t sctp_sf_abort_violation( |
3715 | const struct sctp_endpoint *ep, | ||
3716 | const struct sctp_association *asoc, | 3719 | const struct sctp_association *asoc, |
3717 | const sctp_subtype_t type, | ||
3718 | void *arg, | 3720 | void *arg, |
3719 | sctp_cmd_seq_t *commands) | 3721 | sctp_cmd_seq_t *commands, |
3722 | const __u8 *payload, | ||
3723 | const size_t paylen) | ||
3720 | { | 3724 | { |
3721 | struct sctp_chunk *chunk = arg; | 3725 | struct sctp_chunk *chunk = arg; |
3722 | struct sctp_chunk *abort = NULL; | 3726 | struct sctp_chunk *abort = NULL; |
3723 | char err_str[]="The following chunk had invalid length:"; | ||
3724 | 3727 | ||
3725 | /* Make the abort chunk. */ | 3728 | /* Make the abort chunk. */ |
3726 | abort = sctp_make_abort_violation(asoc, chunk, err_str, | 3729 | abort = sctp_make_abort_violation(asoc, chunk, payload, paylen); |
3727 | sizeof(err_str)); | ||
3728 | if (!abort) | 3730 | if (!abort) |
3729 | goto nomem; | 3731 | goto nomem; |
3730 | 3732 | ||
@@ -3756,6 +3758,57 @@ nomem: | |||
3756 | return SCTP_DISPOSITION_NOMEM; | 3758 | return SCTP_DISPOSITION_NOMEM; |
3757 | } | 3759 | } |
3758 | 3760 | ||
3761 | /* | ||
3762 | * Handle a protocol violation when the chunk length is invalid. | ||
3763 | * "Invalid" length is identified as smaller then the minimal length a | ||
3764 | * given chunk can be. For example, a SACK chunk has invalid length | ||
3765 | * if it's length is set to be smaller then the size of sctp_sack_chunk_t. | ||
3766 | * | ||
3767 | * We inform the other end by sending an ABORT with a Protocol Violation | ||
3768 | * error code. | ||
3769 | * | ||
3770 | * Section: Not specified | ||
3771 | * Verification Tag: Nothing to do | ||
3772 | * Inputs | ||
3773 | * (endpoint, asoc, chunk) | ||
3774 | * | ||
3775 | * Outputs | ||
3776 | * (reply_msg, msg_up, counters) | ||
3777 | * | ||
3778 | * Generate an ABORT chunk and terminate the association. | ||
3779 | */ | ||
3780 | static sctp_disposition_t sctp_sf_violation_chunklen( | ||
3781 | const struct sctp_endpoint *ep, | ||
3782 | const struct sctp_association *asoc, | ||
3783 | const sctp_subtype_t type, | ||
3784 | void *arg, | ||
3785 | sctp_cmd_seq_t *commands) | ||
3786 | { | ||
3787 | char err_str[]="The following chunk had invalid length:"; | ||
3788 | |||
3789 | return sctp_sf_abort_violation(asoc, arg, commands, err_str, | ||
3790 | sizeof(err_str)); | ||
3791 | } | ||
3792 | |||
3793 | /* Handle a protocol violation when the peer trying to advance the | ||
3794 | * cumulative tsn ack to a point beyond the max tsn currently sent. | ||
3795 | * | ||
3796 | * We inform the other end by sending an ABORT with a Protocol Violation | ||
3797 | * error code. | ||
3798 | */ | ||
3799 | static sctp_disposition_t sctp_sf_violation_ctsn( | ||
3800 | const struct sctp_endpoint *ep, | ||
3801 | const struct sctp_association *asoc, | ||
3802 | const sctp_subtype_t type, | ||
3803 | void *arg, | ||
3804 | sctp_cmd_seq_t *commands) | ||
3805 | { | ||
3806 | char err_str[]="The cumulative tsn ack beyond the max tsn currently sent:"; | ||
3807 | |||
3808 | return sctp_sf_abort_violation(asoc, arg, commands, err_str, | ||
3809 | sizeof(err_str)); | ||
3810 | } | ||
3811 | |||
3759 | /*************************************************************************** | 3812 | /*************************************************************************** |
3760 | * These are the state functions for handling primitive (Section 10) events. | 3813 | * These are the state functions for handling primitive (Section 10) events. |
3761 | ***************************************************************************/ | 3814 | ***************************************************************************/ |
diff --git a/net/sctp/socket.c b/net/sctp/socket.c index ee88f2ea5101..01c6364245b7 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c | |||
@@ -107,8 +107,6 @@ static void sctp_sock_migrate(struct sock *, struct sock *, | |||
107 | struct sctp_association *, sctp_socket_type_t); | 107 | struct sctp_association *, sctp_socket_type_t); |
108 | static char *sctp_hmac_alg = SCTP_COOKIE_HMAC_ALG; | 108 | static char *sctp_hmac_alg = SCTP_COOKIE_HMAC_ALG; |
109 | 109 | ||
110 | extern struct kmem_cache *sctp_bucket_cachep; | ||
111 | |||
112 | /* Get the sndbuf space available at the time on the association. */ | 110 | /* Get the sndbuf space available at the time on the association. */ |
113 | static inline int sctp_wspace(struct sctp_association *asoc) | 111 | static inline int sctp_wspace(struct sctp_association *asoc) |
114 | { | 112 | { |
@@ -433,7 +431,7 @@ out: | |||
433 | * | 431 | * |
434 | * Only sctp_setsockopt_bindx() is supposed to call this function. | 432 | * Only sctp_setsockopt_bindx() is supposed to call this function. |
435 | */ | 433 | */ |
436 | int sctp_bindx_add(struct sock *sk, struct sockaddr *addrs, int addrcnt) | 434 | static int sctp_bindx_add(struct sock *sk, struct sockaddr *addrs, int addrcnt) |
437 | { | 435 | { |
438 | int cnt; | 436 | int cnt; |
439 | int retval = 0; | 437 | int retval = 0; |
@@ -602,7 +600,7 @@ out: | |||
602 | * | 600 | * |
603 | * Only sctp_setsockopt_bindx() is supposed to call this function. | 601 | * Only sctp_setsockopt_bindx() is supposed to call this function. |
604 | */ | 602 | */ |
605 | int sctp_bindx_rem(struct sock *sk, struct sockaddr *addrs, int addrcnt) | 603 | static int sctp_bindx_rem(struct sock *sk, struct sockaddr *addrs, int addrcnt) |
606 | { | 604 | { |
607 | struct sctp_sock *sp = sctp_sk(sk); | 605 | struct sctp_sock *sp = sctp_sk(sk); |
608 | struct sctp_endpoint *ep = sp->ep; | 606 | struct sctp_endpoint *ep = sp->ep; |
@@ -977,7 +975,7 @@ static int __sctp_connect(struct sock* sk, | |||
977 | int err = 0; | 975 | int err = 0; |
978 | int addrcnt = 0; | 976 | int addrcnt = 0; |
979 | int walk_size = 0; | 977 | int walk_size = 0; |
980 | union sctp_addr *sa_addr; | 978 | union sctp_addr *sa_addr = NULL; |
981 | void *addr_buf; | 979 | void *addr_buf; |
982 | unsigned short port; | 980 | unsigned short port; |
983 | unsigned int f_flags = 0; | 981 | unsigned int f_flags = 0; |
@@ -1011,7 +1009,10 @@ static int __sctp_connect(struct sock* sk, | |||
1011 | goto out_free; | 1009 | goto out_free; |
1012 | } | 1010 | } |
1013 | 1011 | ||
1014 | err = sctp_verify_addr(sk, sa_addr, af->sockaddr_len); | 1012 | /* Save current address so we can work with it */ |
1013 | memcpy(&to, sa_addr, af->sockaddr_len); | ||
1014 | |||
1015 | err = sctp_verify_addr(sk, &to, af->sockaddr_len); | ||
1015 | if (err) | 1016 | if (err) |
1016 | goto out_free; | 1017 | goto out_free; |
1017 | 1018 | ||
@@ -1021,12 +1022,11 @@ static int __sctp_connect(struct sock* sk, | |||
1021 | if (asoc && asoc->peer.port && asoc->peer.port != port) | 1022 | if (asoc && asoc->peer.port && asoc->peer.port != port) |
1022 | goto out_free; | 1023 | goto out_free; |
1023 | 1024 | ||
1024 | memcpy(&to, sa_addr, af->sockaddr_len); | ||
1025 | 1025 | ||
1026 | /* Check if there already is a matching association on the | 1026 | /* Check if there already is a matching association on the |
1027 | * endpoint (other than the one created here). | 1027 | * endpoint (other than the one created here). |
1028 | */ | 1028 | */ |
1029 | asoc2 = sctp_endpoint_lookup_assoc(ep, sa_addr, &transport); | 1029 | asoc2 = sctp_endpoint_lookup_assoc(ep, &to, &transport); |
1030 | if (asoc2 && asoc2 != asoc) { | 1030 | if (asoc2 && asoc2 != asoc) { |
1031 | if (asoc2->state >= SCTP_STATE_ESTABLISHED) | 1031 | if (asoc2->state >= SCTP_STATE_ESTABLISHED) |
1032 | err = -EISCONN; | 1032 | err = -EISCONN; |
@@ -1039,7 +1039,7 @@ static int __sctp_connect(struct sock* sk, | |||
1039 | * make sure that there is no peeled-off association matching | 1039 | * make sure that there is no peeled-off association matching |
1040 | * the peer address even on another socket. | 1040 | * the peer address even on another socket. |
1041 | */ | 1041 | */ |
1042 | if (sctp_endpoint_is_peeled_off(ep, sa_addr)) { | 1042 | if (sctp_endpoint_is_peeled_off(ep, &to)) { |
1043 | err = -EADDRNOTAVAIL; | 1043 | err = -EADDRNOTAVAIL; |
1044 | goto out_free; | 1044 | goto out_free; |
1045 | } | 1045 | } |
@@ -1070,7 +1070,7 @@ static int __sctp_connect(struct sock* sk, | |||
1070 | } | 1070 | } |
1071 | } | 1071 | } |
1072 | 1072 | ||
1073 | scope = sctp_scope(sa_addr); | 1073 | scope = sctp_scope(&to); |
1074 | asoc = sctp_association_new(ep, sk, scope, GFP_KERNEL); | 1074 | asoc = sctp_association_new(ep, sk, scope, GFP_KERNEL); |
1075 | if (!asoc) { | 1075 | if (!asoc) { |
1076 | err = -ENOMEM; | 1076 | err = -ENOMEM; |
@@ -1079,7 +1079,7 @@ static int __sctp_connect(struct sock* sk, | |||
1079 | } | 1079 | } |
1080 | 1080 | ||
1081 | /* Prime the peer's transport structures. */ | 1081 | /* Prime the peer's transport structures. */ |
1082 | transport = sctp_assoc_add_peer(asoc, sa_addr, GFP_KERNEL, | 1082 | transport = sctp_assoc_add_peer(asoc, &to, GFP_KERNEL, |
1083 | SCTP_UNKNOWN); | 1083 | SCTP_UNKNOWN); |
1084 | if (!transport) { | 1084 | if (!transport) { |
1085 | err = -ENOMEM; | 1085 | err = -ENOMEM; |
@@ -1103,8 +1103,8 @@ static int __sctp_connect(struct sock* sk, | |||
1103 | 1103 | ||
1104 | /* Initialize sk's dport and daddr for getpeername() */ | 1104 | /* Initialize sk's dport and daddr for getpeername() */ |
1105 | inet_sk(sk)->dport = htons(asoc->peer.port); | 1105 | inet_sk(sk)->dport = htons(asoc->peer.port); |
1106 | af = sctp_get_af_specific(to.sa.sa_family); | 1106 | af = sctp_get_af_specific(sa_addr->sa.sa_family); |
1107 | af->to_sk_daddr(&to, sk); | 1107 | af->to_sk_daddr(sa_addr, sk); |
1108 | sk->sk_err = 0; | 1108 | sk->sk_err = 0; |
1109 | 1109 | ||
1110 | /* in-kernel sockets don't generally have a file allocated to them | 1110 | /* in-kernel sockets don't generally have a file allocated to them |
@@ -1531,7 +1531,6 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk, | |||
1531 | goto out_unlock; | 1531 | goto out_unlock; |
1532 | } | 1532 | } |
1533 | if (sinfo_flags & SCTP_ABORT) { | 1533 | if (sinfo_flags & SCTP_ABORT) { |
1534 | struct sctp_chunk *chunk; | ||
1535 | 1534 | ||
1536 | chunk = sctp_make_abort_user(asoc, msg, msg_len); | 1535 | chunk = sctp_make_abort_user(asoc, msg, msg_len); |
1537 | if (!chunk) { | 1536 | if (!chunk) { |
@@ -4353,7 +4352,7 @@ static int sctp_getsockopt_local_addrs(struct sock *sk, int len, | |||
4353 | space_left, &bytes_copied); | 4352 | space_left, &bytes_copied); |
4354 | if (cnt < 0) { | 4353 | if (cnt < 0) { |
4355 | err = cnt; | 4354 | err = cnt; |
4356 | goto error; | 4355 | goto error_lock; |
4357 | } | 4356 | } |
4358 | goto copy_getaddrs; | 4357 | goto copy_getaddrs; |
4359 | } | 4358 | } |
@@ -4367,7 +4366,7 @@ static int sctp_getsockopt_local_addrs(struct sock *sk, int len, | |||
4367 | addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len; | 4366 | addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len; |
4368 | if (space_left < addrlen) { | 4367 | if (space_left < addrlen) { |
4369 | err = -ENOMEM; /*fixme: right error?*/ | 4368 | err = -ENOMEM; /*fixme: right error?*/ |
4370 | goto error; | 4369 | goto error_lock; |
4371 | } | 4370 | } |
4372 | memcpy(buf, &temp, addrlen); | 4371 | memcpy(buf, &temp, addrlen); |
4373 | buf += addrlen; | 4372 | buf += addrlen; |
@@ -4381,15 +4380,21 @@ copy_getaddrs: | |||
4381 | 4380 | ||
4382 | if (copy_to_user(to, addrs, bytes_copied)) { | 4381 | if (copy_to_user(to, addrs, bytes_copied)) { |
4383 | err = -EFAULT; | 4382 | err = -EFAULT; |
4384 | goto error; | 4383 | goto out; |
4385 | } | 4384 | } |
4386 | if (put_user(cnt, &((struct sctp_getaddrs __user *)optval)->addr_num)) { | 4385 | if (put_user(cnt, &((struct sctp_getaddrs __user *)optval)->addr_num)) { |
4387 | err = -EFAULT; | 4386 | err = -EFAULT; |
4388 | goto error; | 4387 | goto out; |
4389 | } | 4388 | } |
4390 | if (put_user(bytes_copied, optlen)) | 4389 | if (put_user(bytes_copied, optlen)) |
4391 | err = -EFAULT; | 4390 | err = -EFAULT; |
4392 | error: | 4391 | |
4392 | goto out; | ||
4393 | |||
4394 | error_lock: | ||
4395 | sctp_read_unlock(addr_lock); | ||
4396 | |||
4397 | out: | ||
4393 | kfree(addrs); | 4398 | kfree(addrs); |
4394 | return err; | 4399 | return err; |
4395 | } | 4400 | } |
@@ -5964,7 +5969,7 @@ static int sctp_wait_for_accept(struct sock *sk, long timeo) | |||
5964 | return err; | 5969 | return err; |
5965 | } | 5970 | } |
5966 | 5971 | ||
5967 | void sctp_wait_for_close(struct sock *sk, long timeout) | 5972 | static void sctp_wait_for_close(struct sock *sk, long timeout) |
5968 | { | 5973 | { |
5969 | DEFINE_WAIT(wait); | 5974 | DEFINE_WAIT(wait); |
5970 | 5975 | ||
diff --git a/net/sctp/tsnmap.c b/net/sctp/tsnmap.c index d3192a1babcc..1ff0daade304 100644 --- a/net/sctp/tsnmap.c +++ b/net/sctp/tsnmap.c | |||
@@ -161,7 +161,7 @@ SCTP_STATIC int sctp_tsnmap_next_gap_ack(const struct sctp_tsnmap *map, | |||
161 | __u16 *start, __u16 *end) | 161 | __u16 *start, __u16 *end) |
162 | { | 162 | { |
163 | int started, ended; | 163 | int started, ended; |
164 | __u16 _start, _end, offset; | 164 | __u16 start_, end_, offset; |
165 | 165 | ||
166 | /* We haven't found a gap yet. */ | 166 | /* We haven't found a gap yet. */ |
167 | started = ended = 0; | 167 | started = ended = 0; |
@@ -175,7 +175,7 @@ SCTP_STATIC int sctp_tsnmap_next_gap_ack(const struct sctp_tsnmap *map, | |||
175 | 175 | ||
176 | offset = iter->start - map->base_tsn; | 176 | offset = iter->start - map->base_tsn; |
177 | sctp_tsnmap_find_gap_ack(map->tsn_map, offset, map->len, 0, | 177 | sctp_tsnmap_find_gap_ack(map->tsn_map, offset, map->len, 0, |
178 | &started, &_start, &ended, &_end); | 178 | &started, &start_, &ended, &end_); |
179 | } | 179 | } |
180 | 180 | ||
181 | /* Do we need to check the overflow map? */ | 181 | /* Do we need to check the overflow map? */ |
@@ -193,8 +193,8 @@ SCTP_STATIC int sctp_tsnmap_next_gap_ack(const struct sctp_tsnmap *map, | |||
193 | offset, | 193 | offset, |
194 | map->len, | 194 | map->len, |
195 | map->len, | 195 | map->len, |
196 | &started, &_start, | 196 | &started, &start_, |
197 | &ended, &_end); | 197 | &ended, &end_); |
198 | } | 198 | } |
199 | 199 | ||
200 | /* The Gap Ack Block happens to end at the end of the | 200 | /* The Gap Ack Block happens to end at the end of the |
@@ -202,7 +202,7 @@ SCTP_STATIC int sctp_tsnmap_next_gap_ack(const struct sctp_tsnmap *map, | |||
202 | */ | 202 | */ |
203 | if (started && !ended) { | 203 | if (started && !ended) { |
204 | ended++; | 204 | ended++; |
205 | _end = map->len + map->len - 1; | 205 | end_ = map->len + map->len - 1; |
206 | } | 206 | } |
207 | 207 | ||
208 | /* If we found a Gap Ack Block, return the start and end and | 208 | /* If we found a Gap Ack Block, return the start and end and |
@@ -215,8 +215,8 @@ SCTP_STATIC int sctp_tsnmap_next_gap_ack(const struct sctp_tsnmap *map, | |||
215 | int gap = map->cumulative_tsn_ack_point - | 215 | int gap = map->cumulative_tsn_ack_point - |
216 | map->base_tsn; | 216 | map->base_tsn; |
217 | 217 | ||
218 | *start = _start - gap; | 218 | *start = start_ - gap; |
219 | *end = _end - gap; | 219 | *end = end_ - gap; |
220 | 220 | ||
221 | /* Move the iterator forward. */ | 221 | /* Move the iterator forward. */ |
222 | iter->start = map->cumulative_tsn_ack_point + *end + 1; | 222 | iter->start = map->cumulative_tsn_ack_point + *end + 1; |
diff --git a/net/tipc/link.c b/net/tipc/link.c index 1d674e0848fa..1b17fecee747 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c | |||
@@ -2383,10 +2383,10 @@ void tipc_link_changeover(struct link *l_ptr) | |||
2383 | struct tipc_msg *msg = buf_msg(crs); | 2383 | struct tipc_msg *msg = buf_msg(crs); |
2384 | 2384 | ||
2385 | if ((msg_user(msg) == MSG_BUNDLER) && split_bundles) { | 2385 | if ((msg_user(msg) == MSG_BUNDLER) && split_bundles) { |
2386 | u32 msgcount = msg_msgcnt(msg); | ||
2387 | struct tipc_msg *m = msg_get_wrapped(msg); | 2386 | struct tipc_msg *m = msg_get_wrapped(msg); |
2388 | unchar* pos = (unchar*)m; | 2387 | unchar* pos = (unchar*)m; |
2389 | 2388 | ||
2389 | msgcount = msg_msgcnt(msg); | ||
2390 | while (msgcount--) { | 2390 | while (msgcount--) { |
2391 | msg_set_seqno(m,msg_seqno(msg)); | 2391 | msg_set_seqno(m,msg_seqno(msg)); |
2392 | tipc_link_tunnel(l_ptr, &tunnel_hdr, m, | 2392 | tipc_link_tunnel(l_ptr, &tunnel_hdr, m, |
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c index d8473eefcd23..ac7dfdda7973 100644 --- a/net/tipc/name_table.c +++ b/net/tipc/name_table.c | |||
@@ -501,7 +501,7 @@ end_node: | |||
501 | * sequence overlapping with the requested sequence | 501 | * sequence overlapping with the requested sequence |
502 | */ | 502 | */ |
503 | 503 | ||
504 | void tipc_nameseq_subscribe(struct name_seq *nseq, struct subscription *s) | 504 | static void tipc_nameseq_subscribe(struct name_seq *nseq, struct subscription *s) |
505 | { | 505 | { |
506 | struct sub_seq *sseq = nseq->sseqs; | 506 | struct sub_seq *sseq = nseq->sseqs; |
507 | 507 | ||
diff --git a/net/tipc/node.c b/net/tipc/node.c index e2e452a62ba1..598f4d3a0098 100644 --- a/net/tipc/node.c +++ b/net/tipc/node.c | |||
@@ -241,8 +241,6 @@ struct node *tipc_node_attach_link(struct link *l_ptr) | |||
241 | char addr_string[16]; | 241 | char addr_string[16]; |
242 | 242 | ||
243 | if (n_ptr->link_cnt >= 2) { | 243 | if (n_ptr->link_cnt >= 2) { |
244 | char addr_string[16]; | ||
245 | |||
246 | err("Attempt to create third link to %s\n", | 244 | err("Attempt to create third link to %s\n", |
247 | addr_string_fill(addr_string, n_ptr->addr)); | 245 | addr_string_fill(addr_string, n_ptr->addr)); |
248 | return NULL; | 246 | return NULL; |
diff --git a/net/wanrouter/wanmain.c b/net/wanrouter/wanmain.c index 849cc06bd914..9ab31a3ce3ad 100644 --- a/net/wanrouter/wanmain.c +++ b/net/wanrouter/wanmain.c | |||
@@ -46,7 +46,6 @@ | |||
46 | #include <linux/capability.h> | 46 | #include <linux/capability.h> |
47 | #include <linux/errno.h> /* return codes */ | 47 | #include <linux/errno.h> /* return codes */ |
48 | #include <linux/kernel.h> | 48 | #include <linux/kernel.h> |
49 | #include <linux/init.h> | ||
50 | #include <linux/module.h> /* support for loadable modules */ | 49 | #include <linux/module.h> /* support for loadable modules */ |
51 | #include <linux/slab.h> /* kmalloc(), kfree() */ | 50 | #include <linux/slab.h> /* kmalloc(), kfree() */ |
52 | #include <linux/mm.h> | 51 | #include <linux/mm.h> |
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 95a47304336d..e5a3be03aa0d 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c | |||
@@ -2195,9 +2195,10 @@ void xfrm_audit_log(uid_t auid, u32 sid, int type, int result, | |||
2195 | } | 2195 | } |
2196 | 2196 | ||
2197 | if (sid != 0 && | 2197 | if (sid != 0 && |
2198 | security_secid_to_secctx(sid, &secctx, &secctx_len) == 0) | 2198 | security_secid_to_secctx(sid, &secctx, &secctx_len) == 0) { |
2199 | audit_log_format(audit_buf, " subj=%s", secctx); | 2199 | audit_log_format(audit_buf, " subj=%s", secctx); |
2200 | else | 2200 | security_release_secctx(secctx, secctx_len); |
2201 | } else | ||
2201 | audit_log_task_context(audit_buf); | 2202 | audit_log_task_context(audit_buf); |
2202 | 2203 | ||
2203 | if (xp) { | 2204 | if (xp) { |
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index 0fac6829c63a..6237933f7d82 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c | |||
@@ -4658,8 +4658,7 @@ static int selinux_secid_to_secctx(u32 secid, char **secdata, u32 *seclen) | |||
4658 | 4658 | ||
4659 | static void selinux_release_secctx(char *secdata, u32 seclen) | 4659 | static void selinux_release_secctx(char *secdata, u32 seclen) |
4660 | { | 4660 | { |
4661 | if (secdata) | 4661 | kfree(secdata); |
4662 | kfree(secdata); | ||
4663 | } | 4662 | } |
4664 | 4663 | ||
4665 | #ifdef CONFIG_KEYS | 4664 | #ifdef CONFIG_KEYS |
diff --git a/security/selinux/netlabel.c b/security/selinux/netlabel.c index 051b14c88e2d..d243ddc723a5 100644 --- a/security/selinux/netlabel.c +++ b/security/selinux/netlabel.c | |||
@@ -162,9 +162,13 @@ int selinux_netlbl_skbuff_getsid(struct sk_buff *skb, u32 base_sid, u32 *sid) | |||
162 | 162 | ||
163 | netlbl_secattr_init(&secattr); | 163 | netlbl_secattr_init(&secattr); |
164 | rc = netlbl_skbuff_getattr(skb, &secattr); | 164 | rc = netlbl_skbuff_getattr(skb, &secattr); |
165 | if (rc == 0 && secattr.flags != NETLBL_SECATTR_NONE) | 165 | if (rc == 0 && secattr.flags != NETLBL_SECATTR_NONE) { |
166 | rc = security_netlbl_secattr_to_sid(&secattr, base_sid, sid); | 166 | rc = security_netlbl_secattr_to_sid(&secattr, base_sid, sid); |
167 | else | 167 | if (rc == 0 && |
168 | (secattr.flags & NETLBL_SECATTR_CACHEABLE) && | ||
169 | (secattr.flags & NETLBL_SECATTR_CACHE)) | ||
170 | netlbl_cache_add(skb, &secattr); | ||
171 | } else | ||
168 | *sid = SECSID_NULL; | 172 | *sid = SECSID_NULL; |
169 | netlbl_secattr_destroy(&secattr); | 173 | netlbl_secattr_destroy(&secattr); |
170 | 174 | ||
@@ -307,11 +311,15 @@ int selinux_netlbl_sock_rcv_skb(struct sk_security_struct *sksec, | |||
307 | 311 | ||
308 | netlbl_secattr_init(&secattr); | 312 | netlbl_secattr_init(&secattr); |
309 | rc = netlbl_skbuff_getattr(skb, &secattr); | 313 | rc = netlbl_skbuff_getattr(skb, &secattr); |
310 | if (rc == 0 && secattr.flags != NETLBL_SECATTR_NONE) | 314 | if (rc == 0 && secattr.flags != NETLBL_SECATTR_NONE) { |
311 | rc = security_netlbl_secattr_to_sid(&secattr, | 315 | rc = security_netlbl_secattr_to_sid(&secattr, |
312 | SECINITSID_NETMSG, | 316 | SECINITSID_NETMSG, |
313 | &nlbl_sid); | 317 | &nlbl_sid); |
314 | else | 318 | if (rc == 0 && |
319 | (secattr.flags & NETLBL_SECATTR_CACHEABLE) && | ||
320 | (secattr.flags & NETLBL_SECATTR_CACHE)) | ||
321 | netlbl_cache_add(skb, &secattr); | ||
322 | } else | ||
315 | nlbl_sid = SECINITSID_UNLABELED; | 323 | nlbl_sid = SECINITSID_UNLABELED; |
316 | netlbl_secattr_destroy(&secattr); | 324 | netlbl_secattr_destroy(&secattr); |
317 | if (rc != 0) | 325 | if (rc != 0) |
diff --git a/sound/sparc/cs4231.c b/sound/sparc/cs4231.c index dca0344cc1bc..f2950cab74a6 100644 --- a/sound/sparc/cs4231.c +++ b/sound/sparc/cs4231.c | |||
@@ -74,7 +74,6 @@ struct cs4231_dma_control { | |||
74 | void (*enable)(struct cs4231_dma_control *dma_cont, int on); | 74 | void (*enable)(struct cs4231_dma_control *dma_cont, int on); |
75 | int (*request)(struct cs4231_dma_control *dma_cont, dma_addr_t bus_addr, size_t len); | 75 | int (*request)(struct cs4231_dma_control *dma_cont, dma_addr_t bus_addr, size_t len); |
76 | unsigned int (*address)(struct cs4231_dma_control *dma_cont); | 76 | unsigned int (*address)(struct cs4231_dma_control *dma_cont); |
77 | void (*reset)(struct snd_cs4231 *chip); | ||
78 | void (*preallocate)(struct snd_cs4231 *chip, struct snd_pcm *pcm); | 77 | void (*preallocate)(struct snd_cs4231 *chip, struct snd_pcm *pcm); |
79 | #ifdef EBUS_SUPPORT | 78 | #ifdef EBUS_SUPPORT |
80 | struct ebus_dma_info ebus_info; | 79 | struct ebus_dma_info ebus_info; |
@@ -1214,10 +1213,6 @@ static int __init snd_cs4231_probe(struct snd_cs4231 *chip) | |||
1214 | 1213 | ||
1215 | spin_lock_irqsave(&chip->lock, flags); | 1214 | spin_lock_irqsave(&chip->lock, flags); |
1216 | 1215 | ||
1217 | |||
1218 | /* Reset DMA engine (sbus only). */ | ||
1219 | chip->p_dma.reset(chip); | ||
1220 | |||
1221 | __cs4231_readb(chip, CS4231P(chip, STATUS)); /* clear any pendings IRQ */ | 1216 | __cs4231_readb(chip, CS4231P(chip, STATUS)); /* clear any pendings IRQ */ |
1222 | __cs4231_writeb(chip, 0, CS4231P(chip, STATUS)); | 1217 | __cs4231_writeb(chip, 0, CS4231P(chip, STATUS)); |
1223 | mb(); | 1218 | mb(); |
@@ -1861,14 +1856,13 @@ static void sbus_dma_enable(struct cs4231_dma_control *dma_cont, int on) | |||
1861 | if (!on) { | 1856 | if (!on) { |
1862 | sbus_writel(0, base->regs + base->dir + APCNC); | 1857 | sbus_writel(0, base->regs + base->dir + APCNC); |
1863 | sbus_writel(0, base->regs + base->dir + APCNVA); | 1858 | sbus_writel(0, base->regs + base->dir + APCNVA); |
1864 | sbus_writel(0, base->regs + base->dir + APCC); | 1859 | if ( base->dir == APC_PLAY ) { |
1865 | sbus_writel(0, base->regs + base->dir + APCVA); | 1860 | sbus_writel(0, base->regs + base->dir + APCC); |
1861 | sbus_writel(0, base->regs + base->dir + APCVA); | ||
1862 | } | ||
1866 | 1863 | ||
1867 | /* ACK any APC interrupts. */ | 1864 | udelay(1200); |
1868 | csr = sbus_readl(base->regs + APCCSR); | ||
1869 | sbus_writel(csr, base->regs + APCCSR); | ||
1870 | } | 1865 | } |
1871 | udelay(1000); | ||
1872 | csr = sbus_readl(base->regs + APCCSR); | 1866 | csr = sbus_readl(base->regs + APCCSR); |
1873 | shift = 0; | 1867 | shift = 0; |
1874 | if ( base->dir == APC_PLAY ) | 1868 | if ( base->dir == APC_PLAY ) |
@@ -1894,23 +1888,6 @@ static unsigned int sbus_dma_addr(struct cs4231_dma_control *dma_cont) | |||
1894 | return sbus_readl(base->regs + base->dir + APCVA); | 1888 | return sbus_readl(base->regs + base->dir + APCVA); |
1895 | } | 1889 | } |
1896 | 1890 | ||
1897 | static void sbus_dma_reset(struct snd_cs4231 *chip) | ||
1898 | { | ||
1899 | sbus_writel(APC_CHIP_RESET, chip->port + APCCSR); | ||
1900 | sbus_writel(0x00, chip->port + APCCSR); | ||
1901 | sbus_writel(sbus_readl(chip->port + APCCSR) | APC_CDC_RESET, | ||
1902 | chip->port + APCCSR); | ||
1903 | |||
1904 | udelay(20); | ||
1905 | |||
1906 | sbus_writel(sbus_readl(chip->port + APCCSR) & ~APC_CDC_RESET, | ||
1907 | chip->port + APCCSR); | ||
1908 | sbus_writel(sbus_readl(chip->port + APCCSR) | (APC_XINT_ENA | | ||
1909 | APC_XINT_PENA | | ||
1910 | APC_XINT_CENA), | ||
1911 | chip->port + APCCSR); | ||
1912 | } | ||
1913 | |||
1914 | static void sbus_dma_preallocate(struct snd_cs4231 *chip, struct snd_pcm *pcm) | 1891 | static void sbus_dma_preallocate(struct snd_cs4231 *chip, struct snd_pcm *pcm) |
1915 | { | 1892 | { |
1916 | snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_SBUS, | 1893 | snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_SBUS, |
@@ -1986,14 +1963,12 @@ static int __init snd_cs4231_sbus_create(struct snd_card *card, | |||
1986 | chip->p_dma.enable = sbus_dma_enable; | 1963 | chip->p_dma.enable = sbus_dma_enable; |
1987 | chip->p_dma.request = sbus_dma_request; | 1964 | chip->p_dma.request = sbus_dma_request; |
1988 | chip->p_dma.address = sbus_dma_addr; | 1965 | chip->p_dma.address = sbus_dma_addr; |
1989 | chip->p_dma.reset = sbus_dma_reset; | ||
1990 | chip->p_dma.preallocate = sbus_dma_preallocate; | 1966 | chip->p_dma.preallocate = sbus_dma_preallocate; |
1991 | 1967 | ||
1992 | chip->c_dma.prepare = sbus_dma_prepare; | 1968 | chip->c_dma.prepare = sbus_dma_prepare; |
1993 | chip->c_dma.enable = sbus_dma_enable; | 1969 | chip->c_dma.enable = sbus_dma_enable; |
1994 | chip->c_dma.request = sbus_dma_request; | 1970 | chip->c_dma.request = sbus_dma_request; |
1995 | chip->c_dma.address = sbus_dma_addr; | 1971 | chip->c_dma.address = sbus_dma_addr; |
1996 | chip->c_dma.reset = sbus_dma_reset; | ||
1997 | chip->c_dma.preallocate = sbus_dma_preallocate; | 1972 | chip->c_dma.preallocate = sbus_dma_preallocate; |
1998 | 1973 | ||
1999 | if (request_irq(sdev->irqs[0], snd_cs4231_sbus_interrupt, | 1974 | if (request_irq(sdev->irqs[0], snd_cs4231_sbus_interrupt, |
@@ -2087,11 +2062,6 @@ static unsigned int _ebus_dma_addr(struct cs4231_dma_control *dma_cont) | |||
2087 | return ebus_dma_addr(&dma_cont->ebus_info); | 2062 | return ebus_dma_addr(&dma_cont->ebus_info); |
2088 | } | 2063 | } |
2089 | 2064 | ||
2090 | static void _ebus_dma_reset(struct snd_cs4231 *chip) | ||
2091 | { | ||
2092 | return; | ||
2093 | } | ||
2094 | |||
2095 | static void _ebus_dma_preallocate(struct snd_cs4231 *chip, struct snd_pcm *pcm) | 2065 | static void _ebus_dma_preallocate(struct snd_cs4231 *chip, struct snd_pcm *pcm) |
2096 | { | 2066 | { |
2097 | snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, | 2067 | snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, |
@@ -2171,14 +2141,12 @@ static int __init snd_cs4231_ebus_create(struct snd_card *card, | |||
2171 | chip->p_dma.enable = _ebus_dma_enable; | 2141 | chip->p_dma.enable = _ebus_dma_enable; |
2172 | chip->p_dma.request = _ebus_dma_request; | 2142 | chip->p_dma.request = _ebus_dma_request; |
2173 | chip->p_dma.address = _ebus_dma_addr; | 2143 | chip->p_dma.address = _ebus_dma_addr; |
2174 | chip->p_dma.reset = _ebus_dma_reset; | ||
2175 | chip->p_dma.preallocate = _ebus_dma_preallocate; | 2144 | chip->p_dma.preallocate = _ebus_dma_preallocate; |
2176 | 2145 | ||
2177 | chip->c_dma.prepare = _ebus_dma_prepare; | 2146 | chip->c_dma.prepare = _ebus_dma_prepare; |
2178 | chip->c_dma.enable = _ebus_dma_enable; | 2147 | chip->c_dma.enable = _ebus_dma_enable; |
2179 | chip->c_dma.request = _ebus_dma_request; | 2148 | chip->c_dma.request = _ebus_dma_request; |
2180 | chip->c_dma.address = _ebus_dma_addr; | 2149 | chip->c_dma.address = _ebus_dma_addr; |
2181 | chip->c_dma.reset = _ebus_dma_reset; | ||
2182 | chip->c_dma.preallocate = _ebus_dma_preallocate; | 2150 | chip->c_dma.preallocate = _ebus_dma_preallocate; |
2183 | 2151 | ||
2184 | chip->port = ioremap(edev->resource[0].start, 0x10); | 2152 | chip->port = ioremap(edev->resource[0].start, 0x10); |