aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--MAINTAINERS7
-rw-r--r--arch/mips/Kconfig4
-rw-r--r--arch/mips/au1000/common/dbdma.c2
-rw-r--r--arch/mips/au1000/common/irq.c16
-rw-r--r--arch/mips/au1000/pb1200/irqmap.c2
-rw-r--r--arch/mips/oprofile/op_model_mipsxx.c44
-rw-r--r--arch/mips/pci/pci-bcm1480.c5
-rw-r--r--arch/s390/kernel/entry.S2
-rw-r--r--arch/s390/kernel/setup.c4
-rw-r--r--arch/sparc/kernel/devices.c4
-rw-r--r--arch/sparc/kernel/pcic.c8
-rw-r--r--arch/sparc64/defconfig9
-rw-r--r--arch/sparc64/kernel/isa.c2
-rw-r--r--arch/sparc64/kernel/ldc.c15
-rw-r--r--arch/sparc64/kernel/pci_sun4v.c4
-rw-r--r--arch/sparc64/kernel/smp.c5
-rw-r--r--arch/um/Makefile-i3865
-rw-r--r--arch/x86/Kconfig8
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c2
-rw-r--r--arch/x86/kernel/nmi_32.c3
-rw-r--r--arch/x86/kernel/nmi_64.c3
-rw-r--r--arch/x86/kernel/topology.c5
-rw-r--r--crypto/fcrypt.c88
-rw-r--r--drivers/ata/ahci.c4
-rw-r--r--drivers/ata/pata_amd.c5
-rw-r--r--drivers/ata/pata_via.c4
-rw-r--r--drivers/ata/sata_mv.c9
-rw-r--r--drivers/ata/sata_nv.c32
-rw-r--r--drivers/char/cs5535_gpio.c5
-rw-r--r--drivers/input/touchscreen/ads7846.c13
-rw-r--r--drivers/mmc/host/mmc_spi.c10
-rw-r--r--[-rwxr-xr-x]drivers/net/chelsio/cxgb2.c0
-rw-r--r--[-rwxr-xr-x]drivers/net/chelsio/pm3393.c0
-rw-r--r--[-rwxr-xr-x]drivers/net/chelsio/sge.c0
-rw-r--r--[-rwxr-xr-x]drivers/net/chelsio/sge.h0
-rw-r--r--drivers/net/fec_mpc52xx.c4
-rw-r--r--drivers/net/gianfar.c2
-rw-r--r--drivers/net/myri10ge/myri10ge.c1
-rw-r--r--drivers/net/pasemi_mac.c4
-rw-r--r--drivers/net/phy/mdio_bus.c9
-rw-r--r--drivers/net/phy/phy_device.c12
-rw-r--r--drivers/net/sky2.c6
-rw-r--r--drivers/net/smc911x.c2
-rw-r--r--drivers/rtc/interface.c4
-rw-r--r--drivers/rtc/rtc-dev.c6
-rw-r--r--drivers/rtc/rtc-max6902.c12
-rw-r--r--drivers/s390/block/dcssblk.c4
-rw-r--r--drivers/s390/cio/css.c1
-rw-r--r--drivers/s390/cio/device_id.c37
-rw-r--r--drivers/s390/net/ctcmain.c1
-rw-r--r--drivers/spi/at25.c7
-rw-r--r--drivers/spi/spi.c19
-rw-r--r--drivers/spi/spi_bfin5xx.c866
-rw-r--r--fs/aio.c7
-rw-r--r--fs/bfs/inode.c3
-rw-r--r--fs/cifs/cifsacl.c33
-rw-r--r--fs/jbd/checkpoint.c12
-rw-r--r--fs/jbd/commit.c8
-rw-r--r--fs/ocfs2/cluster/tcp.c20
-rw-r--r--fs/proc/generic.c9
-rw-r--r--fs/proc/inode.c9
-rw-r--r--fs/proc/root.c1
-rw-r--r--fs/reiserfs/procfs.c6
-rw-r--r--fs/ufs/dir.c2
-rw-r--r--fs/ufs/super.c4
-rw-r--r--include/asm-blackfin/bfin5xx_spi.h3
-rw-r--r--include/asm-blackfin/mach-bf533/portmux.h2
-rw-r--r--include/asm-blackfin/mach-bf548/defBF54x_base.h17
-rw-r--r--include/asm-mips/mach-au1x00/au1000.h21
-rw-r--r--include/linux/inet_lro.h3
-rw-r--r--include/linux/jbd.h2
-rw-r--r--include/linux/mm.h16
-rw-r--r--include/linux/phy.h1
-rw-r--r--include/linux/proc_fs.h1
-rw-r--r--include/linux/thread_info.h17
-rw-r--r--kernel/Kconfig.instrumentation8
-rw-r--r--kernel/fork.c21
-rw-r--r--kernel/futex.c27
-rw-r--r--kernel/lockdep.c29
-rw-r--r--kernel/sched.c146
-rw-r--r--kernel/sched_fair.c7
-rw-r--r--kernel/sysctl.c4
-rw-r--r--kernel/sysctl_check.c2
-rw-r--r--mm/backing-dev.c4
-rw-r--r--mm/filemap_xip.c2
-rw-r--r--mm/mmap.c15
-rw-r--r--mm/nommu.c3
-rw-r--r--mm/slab.c1
-rw-r--r--mm/slob.c1
-rw-r--r--mm/slub.c6
-rw-r--r--net/ipv4/inet_lro.c3
-rw-r--r--net/ipv4/ipvs/ip_vs_lblc.c7
-rw-r--r--net/ipv4/ipvs/ip_vs_lblcr.c7
-rw-r--r--net/ipv4/ipvs/ip_vs_sched.c27
-rw-r--r--net/ipv4/tcp_input.c17
-rw-r--r--net/ipv4/tcp_output.c3
-rw-r--r--net/irda/ircomm/ircomm_tty.c2
-rw-r--r--net/rose/rose_dev.c2
-rw-r--r--security/dummy.c2
-rw-r--r--security/selinux/selinuxfs.c65
100 files changed, 1129 insertions, 805 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index 98f4d897a9f9..f3d7256bc74e 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2597,13 +2597,6 @@ L: https://tango.0pointer.de/mailman/listinfo/s270-linux
2597W: http://0pointer.de/lennart/tchibo.html 2597W: http://0pointer.de/lennart/tchibo.html
2598S: Maintained 2598S: Maintained
2599 2599
2600MTRR AND SIMILAR SUPPORT [i386]
2601P: Richard Gooch
2602M: rgooch@atnf.csiro.au
2603L: linux-kernel@vger.kernel.org
2604W: http://www.atnf.csiro.au/~rgooch/linux/kernel-patches.html
2605S: Maintained
2606
2607MULTIMEDIA CARD (MMC), SECURE DIGITAL (SD) AND SDIO SUBSYSTEM 2600MULTIMEDIA CARD (MMC), SECURE DIGITAL (SD) AND SDIO SUBSYSTEM
2608P: Pierre Ossman 2601P: Pierre Ossman
2609M: drzeus-mmc@drzeus.cx 2602M: drzeus-mmc@drzeus.cx
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 455bd1f560aa..c6fc405a6c8e 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -714,6 +714,10 @@ config ARCH_HAS_ILOG2_U64
714 bool 714 bool
715 default n 715 default n
716 716
717config ARCH_SUPPORTS_OPROFILE
718 bool
719 default y if !MIPS_MT_SMTC
720
717config GENERIC_FIND_NEXT_BIT 721config GENERIC_FIND_NEXT_BIT
718 bool 722 bool
719 default y 723 default y
diff --git a/arch/mips/au1000/common/dbdma.c b/arch/mips/au1000/common/dbdma.c
index 9d6ad43fded6..edf91f41a786 100644
--- a/arch/mips/au1000/common/dbdma.c
+++ b/arch/mips/au1000/common/dbdma.c
@@ -859,7 +859,7 @@ dbdma_interrupt(int irq, void *dev_id)
859 859
860 intstat = dbdma_gptr->ddma_intstat; 860 intstat = dbdma_gptr->ddma_intstat;
861 au_sync(); 861 au_sync();
862 chan_index = ffs(intstat); 862 chan_index = __ffs(intstat);
863 863
864 ctp = chan_tab_ptr[chan_index]; 864 ctp = chan_tab_ptr[chan_index];
865 cp = ctp->chan_ptr; 865 cp = ctp->chan_ptr;
diff --git a/arch/mips/au1000/common/irq.c b/arch/mips/au1000/common/irq.c
index ddfb7f0a17a6..3c7714f057ac 100644
--- a/arch/mips/au1000/common/irq.c
+++ b/arch/mips/au1000/common/irq.c
@@ -462,9 +462,9 @@ static void intc0_req0_irqdispatch(void)
462 return; 462 return;
463 } 463 }
464#endif 464#endif
465 bit = ffs(intc0_req0); 465 bit = __ffs(intc0_req0);
466 intc0_req0 &= ~(1 << bit); 466 intc0_req0 &= ~(1 << bit);
467 do_IRQ(MIPS_CPU_IRQ_BASE + bit); 467 do_IRQ(AU1000_INTC0_INT_BASE + bit);
468} 468}
469 469
470 470
@@ -478,9 +478,9 @@ static void intc0_req1_irqdispatch(void)
478 if (!intc0_req1) 478 if (!intc0_req1)
479 return; 479 return;
480 480
481 bit = ffs(intc0_req1); 481 bit = __ffs(intc0_req1);
482 intc0_req1 &= ~(1 << bit); 482 intc0_req1 &= ~(1 << bit);
483 do_IRQ(bit); 483 do_IRQ(AU1000_INTC0_INT_BASE + bit);
484} 484}
485 485
486 486
@@ -498,9 +498,9 @@ static void intc1_req0_irqdispatch(void)
498 if (!intc1_req0) 498 if (!intc1_req0)
499 return; 499 return;
500 500
501 bit = ffs(intc1_req0); 501 bit = __ffs(intc1_req0);
502 intc1_req0 &= ~(1 << bit); 502 intc1_req0 &= ~(1 << bit);
503 do_IRQ(MIPS_CPU_IRQ_BASE + 32 + bit); 503 do_IRQ(AU1000_INTC1_INT_BASE + bit);
504} 504}
505 505
506 506
@@ -514,9 +514,9 @@ static void intc1_req1_irqdispatch(void)
514 if (!intc1_req1) 514 if (!intc1_req1)
515 return; 515 return;
516 516
517 bit = ffs(intc1_req1); 517 bit = __ffs(intc1_req1);
518 intc1_req1 &= ~(1 << bit); 518 intc1_req1 &= ~(1 << bit);
519 do_IRQ(MIPS_CPU_IRQ_BASE + 32 + bit); 519 do_IRQ(AU1000_INTC1_INT_BASE + bit);
520} 520}
521 521
522asmlinkage void plat_irq_dispatch(void) 522asmlinkage void plat_irq_dispatch(void)
diff --git a/arch/mips/au1000/pb1200/irqmap.c b/arch/mips/au1000/pb1200/irqmap.c
index c096be4ed4e7..8fcd0df86f93 100644
--- a/arch/mips/au1000/pb1200/irqmap.c
+++ b/arch/mips/au1000/pb1200/irqmap.c
@@ -74,7 +74,7 @@ irqreturn_t pb1200_cascade_handler( int irq, void *dev_id)
74 bcsr->int_status = bisr; 74 bcsr->int_status = bisr;
75 for( ; bisr; bisr &= (bisr-1) ) 75 for( ; bisr; bisr &= (bisr-1) )
76 { 76 {
77 extirq_nr = PB1200_INT_BEGIN + ffs(bisr); 77 extirq_nr = PB1200_INT_BEGIN + __ffs(bisr);
78 /* Ack and dispatch IRQ */ 78 /* Ack and dispatch IRQ */
79 do_IRQ(extirq_nr); 79 do_IRQ(extirq_nr);
80 } 80 }
diff --git a/arch/mips/oprofile/op_model_mipsxx.c b/arch/mips/oprofile/op_model_mipsxx.c
index 423bc2c473df..bdfa07aecd97 100644
--- a/arch/mips/oprofile/op_model_mipsxx.c
+++ b/arch/mips/oprofile/op_model_mipsxx.c
@@ -6,6 +6,7 @@
6 * Copyright (C) 2004, 05, 06 by Ralf Baechle 6 * Copyright (C) 2004, 05, 06 by Ralf Baechle
7 * Copyright (C) 2005 by MIPS Technologies, Inc. 7 * Copyright (C) 2005 by MIPS Technologies, Inc.
8 */ 8 */
9#include <linux/cpumask.h>
9#include <linux/oprofile.h> 10#include <linux/oprofile.h>
10#include <linux/interrupt.h> 11#include <linux/interrupt.h>
11#include <linux/smp.h> 12#include <linux/smp.h>
@@ -33,11 +34,45 @@
33#ifdef CONFIG_MIPS_MT_SMP 34#ifdef CONFIG_MIPS_MT_SMP
34#define WHAT (M_TC_EN_VPE | M_PERFCTL_VPEID(smp_processor_id())) 35#define WHAT (M_TC_EN_VPE | M_PERFCTL_VPEID(smp_processor_id()))
35#define vpe_id() smp_processor_id() 36#define vpe_id() smp_processor_id()
37
38/*
39 * The number of bits to shift to convert between counters per core and
40 * counters per VPE. There is no reasonable interface atm to obtain the
41 * number of VPEs used by Linux and in the 34K this number is fixed to two
42 * anyways so we hardcore a few things here for the moment. The way it's
43 * done here will ensure that oprofile VSMP kernel will run right on a lesser
44 * core like a 24K also or with maxcpus=1.
45 */
46static inline unsigned int vpe_shift(void)
47{
48 if (num_possible_cpus() > 1)
49 return 1;
50
51 return 0;
52}
53
36#else 54#else
55
37#define WHAT 0 56#define WHAT 0
38#define vpe_id() 0 57#define vpe_id() 0
58
59static inline unsigned int vpe_shift(void)
60{
61 return 0;
62}
63
39#endif 64#endif
40 65
66static inline unsigned int counters_total_to_per_cpu(unsigned int counters)
67{
68 return counters >> vpe_shift();
69}
70
71static inline unsigned int counters_per_cpu_to_total(unsigned int counters)
72{
73 return counters << vpe_shift();
74}
75
41#define __define_perf_accessors(r, n, np) \ 76#define __define_perf_accessors(r, n, np) \
42 \ 77 \
43static inline unsigned int r_c0_ ## r ## n(void) \ 78static inline unsigned int r_c0_ ## r ## n(void) \
@@ -269,9 +304,7 @@ static int __init mipsxx_init(void)
269 304
270 reset_counters(counters); 305 reset_counters(counters);
271 306
272#ifdef CONFIG_MIPS_MT_SMP 307 counters = counters_total_to_per_cpu(counters);
273 counters >>= 1;
274#endif
275 308
276 op_model_mipsxx_ops.num_counters = counters; 309 op_model_mipsxx_ops.num_counters = counters;
277 switch (current_cpu_type()) { 310 switch (current_cpu_type()) {
@@ -330,9 +363,8 @@ static int __init mipsxx_init(void)
330static void mipsxx_exit(void) 363static void mipsxx_exit(void)
331{ 364{
332 int counters = op_model_mipsxx_ops.num_counters; 365 int counters = op_model_mipsxx_ops.num_counters;
333#ifdef CONFIG_MIPS_MT_SMP 366
334 counters <<= 1; 367 counters = counters_per_cpu_to_total(counters);
335#endif
336 reset_counters(counters); 368 reset_counters(counters);
337 369
338 perf_irq = null_perf_irq; 370 perf_irq = null_perf_irq;
diff --git a/arch/mips/pci/pci-bcm1480.c b/arch/mips/pci/pci-bcm1480.c
index bc647cb77298..47f316c86ab1 100644
--- a/arch/mips/pci/pci-bcm1480.c
+++ b/arch/mips/pci/pci-bcm1480.c
@@ -76,7 +76,10 @@ static inline void WRITECFG32(u32 addr, u32 data)
76 76
77int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) 77int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
78{ 78{
79 return K_BCM1480_INT_PCI_INTA + pin; 79 if (pin == 0)
80 return -1;
81
82 return K_BCM1480_INT_PCI_INTA - 1 + pin;
80} 83}
81 84
82/* Do platform specific device initialization at pci_enable_device() time */ 85/* Do platform specific device initialization at pci_enable_device() time */
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index b2b2edc40eb1..1a6dac8df6fb 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -1079,8 +1079,10 @@ cleanup_io_leave_insn:
1079.Lexecve_tail: .long execve_tail 1079.Lexecve_tail: .long execve_tail
1080.Ljump_table: .long pgm_check_table 1080.Ljump_table: .long pgm_check_table
1081.Lschedule: .long schedule 1081.Lschedule: .long schedule
1082#ifdef CONFIG_PREEMPT
1082.Lpreempt_schedule_irq: 1083.Lpreempt_schedule_irq:
1083 .long preempt_schedule_irq 1084 .long preempt_schedule_irq
1085#endif
1084.Ltrace: .long syscall_trace 1086.Ltrace: .long syscall_trace
1085.Lschedtail: .long schedule_tail 1087.Lschedtail: .long schedule_tail
1086.Lsysc_table: .long sys_call_table 1088.Lsysc_table: .long sys_call_table
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 50f8f1e3760e..577aa7dd660e 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -486,9 +486,7 @@ static void setup_addressing_mode(void)
486 if (s390_noexec) { 486 if (s390_noexec) {
487 printk("S390 execute protection active, "); 487 printk("S390 execute protection active, ");
488 set_amode_and_uaccess(PSW_ASC_SECONDARY, PSW32_ASC_SECONDARY); 488 set_amode_and_uaccess(PSW_ASC_SECONDARY, PSW32_ASC_SECONDARY);
489 return; 489 } else if (switch_amode) {
490 }
491 if (switch_amode) {
492 printk("S390 address spaces switched, "); 490 printk("S390 address spaces switched, ");
493 set_amode_and_uaccess(PSW_ASC_PRIMARY, PSW32_ASC_PRIMARY); 491 set_amode_and_uaccess(PSW_ASC_PRIMARY, PSW32_ASC_PRIMARY);
494 } 492 }
diff --git a/arch/sparc/kernel/devices.c b/arch/sparc/kernel/devices.c
index af90a5f9ab57..b240b8863fd0 100644
--- a/arch/sparc/kernel/devices.c
+++ b/arch/sparc/kernel/devices.c
@@ -62,8 +62,10 @@ static int __cpu_find_by(int (*compare)(int, int, void *), void *compare_arg,
62 int err = check_cpu_node(dp->node, &cur_inst, 62 int err = check_cpu_node(dp->node, &cur_inst,
63 compare, compare_arg, 63 compare, compare_arg,
64 prom_node, mid); 64 prom_node, mid);
65 if (!err) 65 if (!err) {
66 of_node_put(dp);
66 return 0; 67 return 0;
68 }
67 } 69 }
68 70
69 return -ENODEV; 71 return -ENODEV;
diff --git a/arch/sparc/kernel/pcic.c b/arch/sparc/kernel/pcic.c
index f2d432edc92d..4cd5d7818dc6 100644
--- a/arch/sparc/kernel/pcic.c
+++ b/arch/sparc/kernel/pcic.c
@@ -329,7 +329,7 @@ int __init pcic_probe(void)
329 pcic->pcic_res_cfg_addr.name = "pcic_cfg_addr"; 329 pcic->pcic_res_cfg_addr.name = "pcic_cfg_addr";
330 if ((pcic->pcic_config_space_addr = 330 if ((pcic->pcic_config_space_addr =
331 ioremap(regs[2].phys_addr, regs[2].reg_size * 2)) == 0) { 331 ioremap(regs[2].phys_addr, regs[2].reg_size * 2)) == 0) {
332 prom_printf("PCIC: Error, cannot map" 332 prom_printf("PCIC: Error, cannot map "
333 "PCI Configuration Space Address.\n"); 333 "PCI Configuration Space Address.\n");
334 prom_halt(); 334 prom_halt();
335 } 335 }
@@ -341,7 +341,7 @@ int __init pcic_probe(void)
341 pcic->pcic_res_cfg_data.name = "pcic_cfg_data"; 341 pcic->pcic_res_cfg_data.name = "pcic_cfg_data";
342 if ((pcic->pcic_config_space_data = 342 if ((pcic->pcic_config_space_data =
343 ioremap(regs[3].phys_addr, regs[3].reg_size * 2)) == 0) { 343 ioremap(regs[3].phys_addr, regs[3].reg_size * 2)) == 0) {
344 prom_printf("PCIC: Error, cannot map" 344 prom_printf("PCIC: Error, cannot map "
345 "PCI Configuration Space Data.\n"); 345 "PCI Configuration Space Data.\n");
346 prom_halt(); 346 prom_halt();
347 } 347 }
@@ -518,8 +518,8 @@ static void pcic_map_pci_device(struct linux_pcic *pcic,
518 * board in a PCI slot. We must remap it 518 * board in a PCI slot. We must remap it
519 * under 64K but it is not done yet. XXX 519 * under 64K but it is not done yet. XXX
520 */ 520 */
521 printk("PCIC: Skipping I/O space at 0x%lx," 521 printk("PCIC: Skipping I/O space at 0x%lx, "
522 "this will Oops if a driver attaches;" 522 "this will Oops if a driver attaches "
523 "device '%s' at %02x:%02x)\n", address, 523 "device '%s' at %02x:%02x)\n", address,
524 namebuf, dev->bus->number, dev->devfn); 524 namebuf, dev->bus->number, dev->devfn);
525 } 525 }
diff --git a/arch/sparc64/defconfig b/arch/sparc64/defconfig
index 22734ac08c8a..f62d9f6c5e2a 100644
--- a/arch/sparc64/defconfig
+++ b/arch/sparc64/defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.24-rc1 3# Linux kernel version: 2.6.24-rc4
4# Wed Oct 31 15:36:47 2007 4# Tue Dec 4 00:37:59 2007
5# 5#
6CONFIG_SPARC=y 6CONFIG_SPARC=y
7CONFIG_SPARC64=y 7CONFIG_SPARC64=y
@@ -47,6 +47,7 @@ CONFIG_POSIX_MQUEUE=y
47# CONFIG_BSD_PROCESS_ACCT is not set 47# CONFIG_BSD_PROCESS_ACCT is not set
48# CONFIG_TASKSTATS is not set 48# CONFIG_TASKSTATS is not set
49# CONFIG_USER_NS is not set 49# CONFIG_USER_NS is not set
50# CONFIG_PID_NS is not set
50# CONFIG_AUDIT is not set 51# CONFIG_AUDIT is not set
51# CONFIG_IKCONFIG is not set 52# CONFIG_IKCONFIG is not set
52CONFIG_LOG_BUF_SHIFT=18 53CONFIG_LOG_BUF_SHIFT=18
@@ -154,6 +155,7 @@ CONFIG_PCI_DOMAINS=y
154CONFIG_PCI_SYSCALL=y 155CONFIG_PCI_SYSCALL=y
155CONFIG_ARCH_SUPPORTS_MSI=y 156CONFIG_ARCH_SUPPORTS_MSI=y
156CONFIG_PCI_MSI=y 157CONFIG_PCI_MSI=y
158# CONFIG_PCI_LEGACY is not set
157# CONFIG_PCI_DEBUG is not set 159# CONFIG_PCI_DEBUG is not set
158CONFIG_SUN_OPENPROMFS=m 160CONFIG_SUN_OPENPROMFS=m
159CONFIG_SPARC32_COMPAT=y 161CONFIG_SPARC32_COMPAT=y
@@ -359,7 +361,6 @@ CONFIG_IDE_GENERIC=y
359CONFIG_BLK_DEV_IDEPCI=y 361CONFIG_BLK_DEV_IDEPCI=y
360# CONFIG_IDEPCI_SHARE_IRQ is not set 362# CONFIG_IDEPCI_SHARE_IRQ is not set
361CONFIG_IDEPCI_PCIBUS_ORDER=y 363CONFIG_IDEPCI_PCIBUS_ORDER=y
362# CONFIG_BLK_DEV_OFFBOARD is not set
363# CONFIG_BLK_DEV_GENERIC is not set 364# CONFIG_BLK_DEV_GENERIC is not set
364# CONFIG_BLK_DEV_OPTI621 is not set 365# CONFIG_BLK_DEV_OPTI621 is not set
365CONFIG_BLK_DEV_IDEDMA_PCI=y 366CONFIG_BLK_DEV_IDEDMA_PCI=y
@@ -584,7 +585,6 @@ CONFIG_NIU=m
584# CONFIG_USB_KAWETH is not set 585# CONFIG_USB_KAWETH is not set
585# CONFIG_USB_PEGASUS is not set 586# CONFIG_USB_PEGASUS is not set
586# CONFIG_USB_RTL8150 is not set 587# CONFIG_USB_RTL8150 is not set
587# CONFIG_USB_USBNET_MII is not set
588# CONFIG_USB_USBNET is not set 588# CONFIG_USB_USBNET is not set
589# CONFIG_WAN is not set 589# CONFIG_WAN is not set
590# CONFIG_FDDI is not set 590# CONFIG_FDDI is not set
@@ -780,6 +780,7 @@ CONFIG_HWMON=y
780# CONFIG_SENSORS_ADT7470 is not set 780# CONFIG_SENSORS_ADT7470 is not set
781# CONFIG_SENSORS_ATXP1 is not set 781# CONFIG_SENSORS_ATXP1 is not set
782# CONFIG_SENSORS_DS1621 is not set 782# CONFIG_SENSORS_DS1621 is not set
783# CONFIG_SENSORS_I5K_AMB is not set
783# CONFIG_SENSORS_F71805F is not set 784# CONFIG_SENSORS_F71805F is not set
784# CONFIG_SENSORS_F71882FG is not set 785# CONFIG_SENSORS_F71882FG is not set
785# CONFIG_SENSORS_F75375S is not set 786# CONFIG_SENSORS_F75375S is not set
diff --git a/arch/sparc64/kernel/isa.c b/arch/sparc64/kernel/isa.c
index 0f19dce1c905..b5f7b354084f 100644
--- a/arch/sparc64/kernel/isa.c
+++ b/arch/sparc64/kernel/isa.c
@@ -155,6 +155,7 @@ void __init isa_init(void)
155 isa_br = kzalloc(sizeof(*isa_br), GFP_KERNEL); 155 isa_br = kzalloc(sizeof(*isa_br), GFP_KERNEL);
156 if (!isa_br) { 156 if (!isa_br) {
157 printk(KERN_DEBUG "isa: cannot allocate sparc_isa_bridge"); 157 printk(KERN_DEBUG "isa: cannot allocate sparc_isa_bridge");
158 pci_dev_put(pdev);
158 return; 159 return;
159 } 160 }
160 161
@@ -168,6 +169,7 @@ void __init isa_init(void)
168 printk(KERN_DEBUG "isa: device registration error for %s!\n", 169 printk(KERN_DEBUG "isa: device registration error for %s!\n",
169 dp->path_component_name); 170 dp->path_component_name);
170 kfree(isa_br); 171 kfree(isa_br);
172 pci_dev_put(pdev);
171 return; 173 return;
172 } 174 }
173 175
diff --git a/arch/sparc64/kernel/ldc.c b/arch/sparc64/kernel/ldc.c
index 217478a94128..63969f610284 100644
--- a/arch/sparc64/kernel/ldc.c
+++ b/arch/sparc64/kernel/ldc.c
@@ -2338,6 +2338,7 @@ static int __init ldc_init(void)
2338 unsigned long major, minor; 2338 unsigned long major, minor;
2339 struct mdesc_handle *hp; 2339 struct mdesc_handle *hp;
2340 const u64 *v; 2340 const u64 *v;
2341 int err;
2341 u64 mp; 2342 u64 mp;
2342 2343
2343 hp = mdesc_grab(); 2344 hp = mdesc_grab();
@@ -2345,29 +2346,33 @@ static int __init ldc_init(void)
2345 return -ENODEV; 2346 return -ENODEV;
2346 2347
2347 mp = mdesc_node_by_name(hp, MDESC_NODE_NULL, "platform"); 2348 mp = mdesc_node_by_name(hp, MDESC_NODE_NULL, "platform");
2349 err = -ENODEV;
2348 if (mp == MDESC_NODE_NULL) 2350 if (mp == MDESC_NODE_NULL)
2349 return -ENODEV; 2351 goto out;
2350 2352
2351 v = mdesc_get_property(hp, mp, "domaining-enabled", NULL); 2353 v = mdesc_get_property(hp, mp, "domaining-enabled", NULL);
2352 if (!v) 2354 if (!v)
2353 return -ENODEV; 2355 goto out;
2354 2356
2355 major = 1; 2357 major = 1;
2356 minor = 0; 2358 minor = 0;
2357 if (sun4v_hvapi_register(HV_GRP_LDOM, major, &minor)) { 2359 if (sun4v_hvapi_register(HV_GRP_LDOM, major, &minor)) {
2358 printk(KERN_INFO PFX "Could not register LDOM hvapi.\n"); 2360 printk(KERN_INFO PFX "Could not register LDOM hvapi.\n");
2359 return -ENODEV; 2361 goto out;
2360 } 2362 }
2361 2363
2362 printk(KERN_INFO "%s", version); 2364 printk(KERN_INFO "%s", version);
2363 2365
2364 if (!*v) { 2366 if (!*v) {
2365 printk(KERN_INFO PFX "Domaining disabled.\n"); 2367 printk(KERN_INFO PFX "Domaining disabled.\n");
2366 return -ENODEV; 2368 goto out;
2367 } 2369 }
2368 ldom_domaining_enabled = 1; 2370 ldom_domaining_enabled = 1;
2371 err = 0;
2369 2372
2370 return 0; 2373out:
2374 mdesc_release(hp);
2375 return err;
2371} 2376}
2372 2377
2373core_initcall(ldc_init); 2378core_initcall(ldc_init);
diff --git a/arch/sparc64/kernel/pci_sun4v.c b/arch/sparc64/kernel/pci_sun4v.c
index 8c4875bdb4a8..e587a372f3fe 100644
--- a/arch/sparc64/kernel/pci_sun4v.c
+++ b/arch/sparc64/kernel/pci_sun4v.c
@@ -1022,6 +1022,10 @@ void __init sun4v_pci_init(struct device_node *dp, char *model_name)
1022 } 1022 }
1023 1023
1024 prop = of_find_property(dp, "reg", NULL); 1024 prop = of_find_property(dp, "reg", NULL);
1025 if (!prop) {
1026 prom_printf("SUN4V_PCI: Could not find config registers\n");
1027 prom_halt();
1028 }
1025 regs = prop->value; 1029 regs = prop->value;
1026 1030
1027 devhandle = (regs->phys_addr >> 32UL) & 0x0fffffff; 1031 devhandle = (regs->phys_addr >> 32UL) & 0x0fffffff;
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
index 7cd8d94df0dc..894b506f9636 100644
--- a/arch/sparc64/kernel/smp.c
+++ b/arch/sparc64/kernel/smp.c
@@ -236,8 +236,9 @@ void smp_synchronize_tick_client(void)
236 t[i].rt, t[i].master, t[i].diff, t[i].lat); 236 t[i].rt, t[i].master, t[i].diff, t[i].lat);
237#endif 237#endif
238 238
239 printk(KERN_INFO "CPU %d: synchronized TICK with master CPU (last diff %ld cycles," 239 printk(KERN_INFO "CPU %d: synchronized TICK with master CPU "
240 "maxerr %lu cycles)\n", smp_processor_id(), delta, rt); 240 "(last diff %ld cycles, maxerr %lu cycles)\n",
241 smp_processor_id(), delta, rt);
241} 242}
242 243
243static void smp_start_sync_tick_client(int cpu); 244static void smp_start_sync_tick_client(int cpu);
diff --git a/arch/um/Makefile-i386 b/arch/um/Makefile-i386
index 67290117d909..561e373bd850 100644
--- a/arch/um/Makefile-i386
+++ b/arch/um/Makefile-i386
@@ -22,11 +22,6 @@ export LDFLAGS HOSTCFLAGS HOSTLDFLAGS UML_OBJCOPYFLAGS
22endif 22endif
23endif 23endif
24 24
25KBUILD_CFLAGS += -DCONFIG_X86_32
26KBUILD_AFLAGS += -DCONFIG_X86_32
27CONFIG_X86_32 := y
28export CONFIG_X86_32
29
30# First of all, tune CFLAGS for the specific CPU. This actually sets cflags-y. 25# First of all, tune CFLAGS for the specific CPU. This actually sets cflags-y.
31include $(srctree)/arch/x86/Makefile_32.cpu 26include $(srctree)/arch/x86/Makefile_32.cpu
32 27
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 368864dfe6eb..80b7ba4056db 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -112,8 +112,9 @@ config GENERIC_TIME_VSYSCALL
112 bool 112 bool
113 default X86_64 113 default X86_64
114 114
115 115config ARCH_SUPPORTS_OPROFILE
116 116 bool
117 default y
117 118
118 119
119config ZONE_DMA32 120config ZONE_DMA32
@@ -148,7 +149,8 @@ config X86_SMP
148 149
149config X86_HT 150config X86_HT
150 bool 151 bool
151 depends on SMP && !(X86_VISWS || X86_VOYAGER || MK8) 152 depends on SMP
153 depends on (X86_32 && !(X86_VISWS || X86_VOYAGER)) || (X86_64 && !MK8)
152 default y 154 default y
153 155
154config X86_BIOS_REBOOT 156config X86_BIOS_REBOOT
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index 9921b01fe199..606fe4d55a91 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -497,7 +497,7 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) {}
497static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index) {} 497static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index) {}
498#endif 498#endif
499 499
500static void free_cache_attributes(unsigned int cpu) 500static void __cpuinit free_cache_attributes(unsigned int cpu)
501{ 501{
502 int i; 502 int i;
503 503
diff --git a/arch/x86/kernel/nmi_32.c b/arch/x86/kernel/nmi_32.c
index f5cc47c60b13..80ca72e5ac29 100644
--- a/arch/x86/kernel/nmi_32.c
+++ b/arch/x86/kernel/nmi_32.c
@@ -106,7 +106,8 @@ static int __init check_nmi_watchdog(void)
106 if (!per_cpu(wd_enabled, cpu)) 106 if (!per_cpu(wd_enabled, cpu))
107 continue; 107 continue;
108 if (nmi_count(cpu) - prev_nmi_count[cpu] <= 5) { 108 if (nmi_count(cpu) - prev_nmi_count[cpu] <= 5) {
109 printk("CPU#%d: NMI appears to be stuck (%d->%d)!\n", 109 printk(KERN_WARNING "WARNING: CPU#%d: NMI "
110 "appears to be stuck (%d->%d)!\n",
110 cpu, 111 cpu,
111 prev_nmi_count[cpu], 112 prev_nmi_count[cpu],
112 nmi_count(cpu)); 113 nmi_count(cpu));
diff --git a/arch/x86/kernel/nmi_64.c b/arch/x86/kernel/nmi_64.c
index a576fd740062..4253c4e8849c 100644
--- a/arch/x86/kernel/nmi_64.c
+++ b/arch/x86/kernel/nmi_64.c
@@ -109,7 +109,8 @@ int __init check_nmi_watchdog (void)
109 if (!per_cpu(wd_enabled, cpu)) 109 if (!per_cpu(wd_enabled, cpu))
110 continue; 110 continue;
111 if (cpu_pda(cpu)->__nmi_count - counts[cpu] <= 5) { 111 if (cpu_pda(cpu)->__nmi_count - counts[cpu] <= 5) {
112 printk("CPU#%d: NMI appears to be stuck (%d->%d)!\n", 112 printk(KERN_WARNING "WARNING: CPU#%d: NMI "
113 "appears to be stuck (%d->%d)!\n",
113 cpu, 114 cpu,
114 counts[cpu], 115 counts[cpu],
115 cpu_pda(cpu)->__nmi_count); 116 cpu_pda(cpu)->__nmi_count);
diff --git a/arch/x86/kernel/topology.c b/arch/x86/kernel/topology.c
index 8caa0b777466..7e16d675eb85 100644
--- a/arch/x86/kernel/topology.c
+++ b/arch/x86/kernel/topology.c
@@ -33,7 +33,7 @@
33 33
34static struct i386_cpu cpu_devices[NR_CPUS]; 34static struct i386_cpu cpu_devices[NR_CPUS];
35 35
36int arch_register_cpu(int num) 36int __cpuinit arch_register_cpu(int num)
37{ 37{
38 /* 38 /*
39 * CPU0 cannot be offlined due to several 39 * CPU0 cannot be offlined due to several
@@ -53,7 +53,8 @@ int arch_register_cpu(int num)
53} 53}
54 54
55#ifdef CONFIG_HOTPLUG_CPU 55#ifdef CONFIG_HOTPLUG_CPU
56void arch_unregister_cpu(int num) { 56void arch_unregister_cpu(int num)
57{
57 return unregister_cpu(&cpu_devices[num].cpu); 58 return unregister_cpu(&cpu_devices[num].cpu);
58} 59}
59EXPORT_SYMBOL(arch_register_cpu); 60EXPORT_SYMBOL(arch_register_cpu);
diff --git a/crypto/fcrypt.c b/crypto/fcrypt.c
index d161949fdb94..a32cb68bbc60 100644
--- a/crypto/fcrypt.c
+++ b/crypto/fcrypt.c
@@ -51,7 +51,7 @@
51#define ROUNDS 16 51#define ROUNDS 16
52 52
53struct fcrypt_ctx { 53struct fcrypt_ctx {
54 u32 sched[ROUNDS]; 54 __be32 sched[ROUNDS];
55}; 55};
56 56
57/* Rotate right two 32 bit numbers as a 56 bit number */ 57/* Rotate right two 32 bit numbers as a 56 bit number */
@@ -73,8 +73,8 @@ do { \
73 * /afs/transarc.com/public/afsps/afs.rel31b.export-src/rxkad/sboxes.h 73 * /afs/transarc.com/public/afsps/afs.rel31b.export-src/rxkad/sboxes.h
74 */ 74 */
75#undef Z 75#undef Z
76#define Z(x) __constant_be32_to_cpu(x << 3) 76#define Z(x) __constant_cpu_to_be32(x << 3)
77static const u32 sbox0[256] = { 77static const __be32 sbox0[256] = {
78 Z(0xea), Z(0x7f), Z(0xb2), Z(0x64), Z(0x9d), Z(0xb0), Z(0xd9), Z(0x11), 78 Z(0xea), Z(0x7f), Z(0xb2), Z(0x64), Z(0x9d), Z(0xb0), Z(0xd9), Z(0x11),
79 Z(0xcd), Z(0x86), Z(0x86), Z(0x91), Z(0x0a), Z(0xb2), Z(0x93), Z(0x06), 79 Z(0xcd), Z(0x86), Z(0x86), Z(0x91), Z(0x0a), Z(0xb2), Z(0x93), Z(0x06),
80 Z(0x0e), Z(0x06), Z(0xd2), Z(0x65), Z(0x73), Z(0xc5), Z(0x28), Z(0x60), 80 Z(0x0e), Z(0x06), Z(0xd2), Z(0x65), Z(0x73), Z(0xc5), Z(0x28), Z(0x60),
@@ -110,8 +110,8 @@ static const u32 sbox0[256] = {
110}; 110};
111 111
112#undef Z 112#undef Z
113#define Z(x) __constant_be32_to_cpu((x << 27) | (x >> 5)) 113#define Z(x) __constant_cpu_to_be32((x << 27) | (x >> 5))
114static const u32 sbox1[256] = { 114static const __be32 sbox1[256] = {
115 Z(0x77), Z(0x14), Z(0xa6), Z(0xfe), Z(0xb2), Z(0x5e), Z(0x8c), Z(0x3e), 115 Z(0x77), Z(0x14), Z(0xa6), Z(0xfe), Z(0xb2), Z(0x5e), Z(0x8c), Z(0x3e),
116 Z(0x67), Z(0x6c), Z(0xa1), Z(0x0d), Z(0xc2), Z(0xa2), Z(0xc1), Z(0x85), 116 Z(0x67), Z(0x6c), Z(0xa1), Z(0x0d), Z(0xc2), Z(0xa2), Z(0xc1), Z(0x85),
117 Z(0x6c), Z(0x7b), Z(0x67), Z(0xc6), Z(0x23), Z(0xe3), Z(0xf2), Z(0x89), 117 Z(0x6c), Z(0x7b), Z(0x67), Z(0xc6), Z(0x23), Z(0xe3), Z(0xf2), Z(0x89),
@@ -147,8 +147,8 @@ static const u32 sbox1[256] = {
147}; 147};
148 148
149#undef Z 149#undef Z
150#define Z(x) __constant_be32_to_cpu(x << 11) 150#define Z(x) __constant_cpu_to_be32(x << 11)
151static const u32 sbox2[256] = { 151static const __be32 sbox2[256] = {
152 Z(0xf0), Z(0x37), Z(0x24), Z(0x53), Z(0x2a), Z(0x03), Z(0x83), Z(0x86), 152 Z(0xf0), Z(0x37), Z(0x24), Z(0x53), Z(0x2a), Z(0x03), Z(0x83), Z(0x86),
153 Z(0xd1), Z(0xec), Z(0x50), Z(0xf0), Z(0x42), Z(0x78), Z(0x2f), Z(0x6d), 153 Z(0xd1), Z(0xec), Z(0x50), Z(0xf0), Z(0x42), Z(0x78), Z(0x2f), Z(0x6d),
154 Z(0xbf), Z(0x80), Z(0x87), Z(0x27), Z(0x95), Z(0xe2), Z(0xc5), Z(0x5d), 154 Z(0xbf), Z(0x80), Z(0x87), Z(0x27), Z(0x95), Z(0xe2), Z(0xc5), Z(0x5d),
@@ -184,8 +184,8 @@ static const u32 sbox2[256] = {
184}; 184};
185 185
186#undef Z 186#undef Z
187#define Z(x) __constant_be32_to_cpu(x << 19) 187#define Z(x) __constant_cpu_to_be32(x << 19)
188static const u32 sbox3[256] = { 188static const __be32 sbox3[256] = {
189 Z(0xa9), Z(0x2a), Z(0x48), Z(0x51), Z(0x84), Z(0x7e), Z(0x49), Z(0xe2), 189 Z(0xa9), Z(0x2a), Z(0x48), Z(0x51), Z(0x84), Z(0x7e), Z(0x49), Z(0xe2),
190 Z(0xb5), Z(0xb7), Z(0x42), Z(0x33), Z(0x7d), Z(0x5d), Z(0xa6), Z(0x12), 190 Z(0xb5), Z(0xb7), Z(0x42), Z(0x33), Z(0x7d), Z(0x5d), Z(0xa6), Z(0x12),
191 Z(0x44), Z(0x48), Z(0x6d), Z(0x28), Z(0xaa), Z(0x20), Z(0x6d), Z(0x57), 191 Z(0x44), Z(0x48), Z(0x6d), Z(0x28), Z(0xaa), Z(0x20), Z(0x6d), Z(0x57),
@@ -225,7 +225,7 @@ static const u32 sbox3[256] = {
225 */ 225 */
226#define F_ENCRYPT(R, L, sched) \ 226#define F_ENCRYPT(R, L, sched) \
227do { \ 227do { \
228 union lc4 { u32 l; u8 c[4]; } u; \ 228 union lc4 { __be32 l; u8 c[4]; } u; \
229 u.l = sched ^ R; \ 229 u.l = sched ^ R; \
230 L ^= sbox0[u.c[0]] ^ sbox1[u.c[1]] ^ sbox2[u.c[2]] ^ sbox3[u.c[3]]; \ 230 L ^= sbox0[u.c[0]] ^ sbox1[u.c[1]] ^ sbox2[u.c[2]] ^ sbox3[u.c[3]]; \
231} while(0) 231} while(0)
@@ -237,7 +237,7 @@ static void fcrypt_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
237{ 237{
238 const struct fcrypt_ctx *ctx = crypto_tfm_ctx(tfm); 238 const struct fcrypt_ctx *ctx = crypto_tfm_ctx(tfm);
239 struct { 239 struct {
240 u32 l, r; 240 __be32 l, r;
241 } X; 241 } X;
242 242
243 memcpy(&X, src, sizeof(X)); 243 memcpy(&X, src, sizeof(X));
@@ -269,7 +269,7 @@ static void fcrypt_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
269{ 269{
270 const struct fcrypt_ctx *ctx = crypto_tfm_ctx(tfm); 270 const struct fcrypt_ctx *ctx = crypto_tfm_ctx(tfm);
271 struct { 271 struct {
272 u32 l, r; 272 __be32 l, r;
273 } X; 273 } X;
274 274
275 memcpy(&X, src, sizeof(X)); 275 memcpy(&X, src, sizeof(X));
@@ -328,22 +328,22 @@ static int fcrypt_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int key
328 k |= (*key) >> 1; 328 k |= (*key) >> 1;
329 329
330 /* Use lower 32 bits for schedule, rotate by 11 each round (16 times) */ 330 /* Use lower 32 bits for schedule, rotate by 11 each round (16 times) */
331 ctx->sched[0x0] = be32_to_cpu(k); ror56_64(k, 11); 331 ctx->sched[0x0] = cpu_to_be32(k); ror56_64(k, 11);
332 ctx->sched[0x1] = be32_to_cpu(k); ror56_64(k, 11); 332 ctx->sched[0x1] = cpu_to_be32(k); ror56_64(k, 11);
333 ctx->sched[0x2] = be32_to_cpu(k); ror56_64(k, 11); 333 ctx->sched[0x2] = cpu_to_be32(k); ror56_64(k, 11);
334 ctx->sched[0x3] = be32_to_cpu(k); ror56_64(k, 11); 334 ctx->sched[0x3] = cpu_to_be32(k); ror56_64(k, 11);
335 ctx->sched[0x4] = be32_to_cpu(k); ror56_64(k, 11); 335 ctx->sched[0x4] = cpu_to_be32(k); ror56_64(k, 11);
336 ctx->sched[0x5] = be32_to_cpu(k); ror56_64(k, 11); 336 ctx->sched[0x5] = cpu_to_be32(k); ror56_64(k, 11);
337 ctx->sched[0x6] = be32_to_cpu(k); ror56_64(k, 11); 337 ctx->sched[0x6] = cpu_to_be32(k); ror56_64(k, 11);
338 ctx->sched[0x7] = be32_to_cpu(k); ror56_64(k, 11); 338 ctx->sched[0x7] = cpu_to_be32(k); ror56_64(k, 11);
339 ctx->sched[0x8] = be32_to_cpu(k); ror56_64(k, 11); 339 ctx->sched[0x8] = cpu_to_be32(k); ror56_64(k, 11);
340 ctx->sched[0x9] = be32_to_cpu(k); ror56_64(k, 11); 340 ctx->sched[0x9] = cpu_to_be32(k); ror56_64(k, 11);
341 ctx->sched[0xa] = be32_to_cpu(k); ror56_64(k, 11); 341 ctx->sched[0xa] = cpu_to_be32(k); ror56_64(k, 11);
342 ctx->sched[0xb] = be32_to_cpu(k); ror56_64(k, 11); 342 ctx->sched[0xb] = cpu_to_be32(k); ror56_64(k, 11);
343 ctx->sched[0xc] = be32_to_cpu(k); ror56_64(k, 11); 343 ctx->sched[0xc] = cpu_to_be32(k); ror56_64(k, 11);
344 ctx->sched[0xd] = be32_to_cpu(k); ror56_64(k, 11); 344 ctx->sched[0xd] = cpu_to_be32(k); ror56_64(k, 11);
345 ctx->sched[0xe] = be32_to_cpu(k); ror56_64(k, 11); 345 ctx->sched[0xe] = cpu_to_be32(k); ror56_64(k, 11);
346 ctx->sched[0xf] = be32_to_cpu(k); 346 ctx->sched[0xf] = cpu_to_be32(k);
347 347
348 return 0; 348 return 0;
349#else 349#else
@@ -369,22 +369,22 @@ static int fcrypt_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int key
369 lo |= (*key) >> 1; 369 lo |= (*key) >> 1;
370 370
371 /* Use lower 32 bits for schedule, rotate by 11 each round (16 times) */ 371 /* Use lower 32 bits for schedule, rotate by 11 each round (16 times) */
372 ctx->sched[0x0] = be32_to_cpu(lo); ror56(hi, lo, 11); 372 ctx->sched[0x0] = cpu_to_be32(lo); ror56(hi, lo, 11);
373 ctx->sched[0x1] = be32_to_cpu(lo); ror56(hi, lo, 11); 373 ctx->sched[0x1] = cpu_to_be32(lo); ror56(hi, lo, 11);
374 ctx->sched[0x2] = be32_to_cpu(lo); ror56(hi, lo, 11); 374 ctx->sched[0x2] = cpu_to_be32(lo); ror56(hi, lo, 11);
375 ctx->sched[0x3] = be32_to_cpu(lo); ror56(hi, lo, 11); 375 ctx->sched[0x3] = cpu_to_be32(lo); ror56(hi, lo, 11);
376 ctx->sched[0x4] = be32_to_cpu(lo); ror56(hi, lo, 11); 376 ctx->sched[0x4] = cpu_to_be32(lo); ror56(hi, lo, 11);
377 ctx->sched[0x5] = be32_to_cpu(lo); ror56(hi, lo, 11); 377 ctx->sched[0x5] = cpu_to_be32(lo); ror56(hi, lo, 11);
378 ctx->sched[0x6] = be32_to_cpu(lo); ror56(hi, lo, 11); 378 ctx->sched[0x6] = cpu_to_be32(lo); ror56(hi, lo, 11);
379 ctx->sched[0x7] = be32_to_cpu(lo); ror56(hi, lo, 11); 379 ctx->sched[0x7] = cpu_to_be32(lo); ror56(hi, lo, 11);
380 ctx->sched[0x8] = be32_to_cpu(lo); ror56(hi, lo, 11); 380 ctx->sched[0x8] = cpu_to_be32(lo); ror56(hi, lo, 11);
381 ctx->sched[0x9] = be32_to_cpu(lo); ror56(hi, lo, 11); 381 ctx->sched[0x9] = cpu_to_be32(lo); ror56(hi, lo, 11);
382 ctx->sched[0xa] = be32_to_cpu(lo); ror56(hi, lo, 11); 382 ctx->sched[0xa] = cpu_to_be32(lo); ror56(hi, lo, 11);
383 ctx->sched[0xb] = be32_to_cpu(lo); ror56(hi, lo, 11); 383 ctx->sched[0xb] = cpu_to_be32(lo); ror56(hi, lo, 11);
384 ctx->sched[0xc] = be32_to_cpu(lo); ror56(hi, lo, 11); 384 ctx->sched[0xc] = cpu_to_be32(lo); ror56(hi, lo, 11);
385 ctx->sched[0xd] = be32_to_cpu(lo); ror56(hi, lo, 11); 385 ctx->sched[0xd] = cpu_to_be32(lo); ror56(hi, lo, 11);
386 ctx->sched[0xe] = be32_to_cpu(lo); ror56(hi, lo, 11); 386 ctx->sched[0xe] = cpu_to_be32(lo); ror56(hi, lo, 11);
387 ctx->sched[0xf] = be32_to_cpu(lo); 387 ctx->sched[0xf] = cpu_to_be32(lo);
388 return 0; 388 return 0;
389#endif 389#endif
390} 390}
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index ed9b407e42d4..4688dbf2d111 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -536,6 +536,10 @@ static const struct pci_device_id ahci_pci_tbl[] = {
536 { PCI_VDEVICE(NVIDIA, 0x0ad9), board_ahci }, /* MCP77 */ 536 { PCI_VDEVICE(NVIDIA, 0x0ad9), board_ahci }, /* MCP77 */
537 { PCI_VDEVICE(NVIDIA, 0x0ada), board_ahci }, /* MCP77 */ 537 { PCI_VDEVICE(NVIDIA, 0x0ada), board_ahci }, /* MCP77 */
538 { PCI_VDEVICE(NVIDIA, 0x0adb), board_ahci }, /* MCP77 */ 538 { PCI_VDEVICE(NVIDIA, 0x0adb), board_ahci }, /* MCP77 */
539 { PCI_VDEVICE(NVIDIA, 0x0ab4), board_ahci }, /* MCP79 */
540 { PCI_VDEVICE(NVIDIA, 0x0ab5), board_ahci }, /* MCP79 */
541 { PCI_VDEVICE(NVIDIA, 0x0ab6), board_ahci }, /* MCP79 */
542 { PCI_VDEVICE(NVIDIA, 0x0ab7), board_ahci }, /* MCP79 */
539 { PCI_VDEVICE(NVIDIA, 0x0ab8), board_ahci }, /* MCP79 */ 543 { PCI_VDEVICE(NVIDIA, 0x0ab8), board_ahci }, /* MCP79 */
540 { PCI_VDEVICE(NVIDIA, 0x0ab9), board_ahci }, /* MCP79 */ 544 { PCI_VDEVICE(NVIDIA, 0x0ab9), board_ahci }, /* MCP79 */
541 { PCI_VDEVICE(NVIDIA, 0x0aba), board_ahci }, /* MCP79 */ 545 { PCI_VDEVICE(NVIDIA, 0x0aba), board_ahci }, /* MCP79 */
diff --git a/drivers/ata/pata_amd.c b/drivers/ata/pata_amd.c
index c5779ad4abca..3cc27b514654 100644
--- a/drivers/ata/pata_amd.c
+++ b/drivers/ata/pata_amd.c
@@ -25,7 +25,7 @@
25#include <linux/libata.h> 25#include <linux/libata.h>
26 26
27#define DRV_NAME "pata_amd" 27#define DRV_NAME "pata_amd"
28#define DRV_VERSION "0.3.9" 28#define DRV_VERSION "0.3.10"
29 29
30/** 30/**
31 * timing_setup - shared timing computation and load 31 * timing_setup - shared timing computation and load
@@ -115,7 +115,8 @@ static void timing_setup(struct ata_port *ap, struct ata_device *adev, int offse
115 } 115 }
116 116
117 /* UDMA timing */ 117 /* UDMA timing */
118 pci_write_config_byte(pdev, offset + 0x10 + (3 - dn), t); 118 if (at.udma)
119 pci_write_config_byte(pdev, offset + 0x10 + (3 - dn), t);
119} 120}
120 121
121/** 122/**
diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c
index a4175fbdd170..453d72bf2598 100644
--- a/drivers/ata/pata_via.c
+++ b/drivers/ata/pata_via.c
@@ -63,7 +63,7 @@
63#include <linux/dmi.h> 63#include <linux/dmi.h>
64 64
65#define DRV_NAME "pata_via" 65#define DRV_NAME "pata_via"
66#define DRV_VERSION "0.3.2" 66#define DRV_VERSION "0.3.3"
67 67
68/* 68/*
69 * The following comes directly from Vojtech Pavlik's ide/pci/via82cxxx 69 * The following comes directly from Vojtech Pavlik's ide/pci/via82cxxx
@@ -296,7 +296,7 @@ static void via_do_set_mode(struct ata_port *ap, struct ata_device *adev, int mo
296 } 296 }
297 297
298 /* Set UDMA unless device is not UDMA capable */ 298 /* Set UDMA unless device is not UDMA capable */
299 if (udma_type) { 299 if (udma_type && t.udma) {
300 u8 cable80_status; 300 u8 cable80_status;
301 301
302 /* Get 80-wire cable detection bit */ 302 /* Get 80-wire cable detection bit */
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 8d864e5e97ed..fe0105d35bae 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -2503,6 +2503,15 @@ static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
2503 2503
2504 case chip_7042: 2504 case chip_7042:
2505 hp_flags |= MV_HP_PCIE; 2505 hp_flags |= MV_HP_PCIE;
2506 if (pdev->vendor == PCI_VENDOR_ID_TTI &&
2507 (pdev->device == 0x2300 || pdev->device == 0x2310))
2508 {
2509 printk(KERN_WARNING "sata_mv: Highpoint RocketRAID BIOS"
2510 " will CORRUPT DATA on attached drives when"
2511 " configured as \"Legacy\". BEWARE!\n");
2512 printk(KERN_WARNING "sata_mv: Use BIOS \"JBOD\" volumes"
2513 " instead for safety.\n");
2514 }
2506 case chip_6042: 2515 case chip_6042:
2507 hpriv->ops = &mv6xxx_ops; 2516 hpriv->ops = &mv6xxx_ops;
2508 hp_flags |= MV_HP_GEN_IIE; 2517 hp_flags |= MV_HP_GEN_IIE;
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
index 44f9e5d9e362..ed5dc7cb50cd 100644
--- a/drivers/ata/sata_nv.c
+++ b/drivers/ata/sata_nv.c
@@ -791,11 +791,13 @@ static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc)
791 791
792static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf) 792static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
793{ 793{
794 /* Since commands where a result TF is requested are not 794 /* Other than when internal or pass-through commands are executed,
795 executed in ADMA mode, the only time this function will be called 795 the only time this function will be called in ADMA mode will be
796 in ADMA mode will be if a command fails. In this case we 796 if a command fails. In the failure case we don't care about going
797 don't care about going into register mode with ADMA commands 797 into register mode with ADMA commands pending, as the commands will
798 pending, as the commands will all shortly be aborted anyway. */ 798 all shortly be aborted anyway. We assume that NCQ commands are not
799 issued via passthrough, which is the only way that switching into
800 ADMA mode could abort outstanding commands. */
799 nv_adma_register_mode(ap); 801 nv_adma_register_mode(ap);
800 802
801 ata_tf_read(ap, tf); 803 ata_tf_read(ap, tf);
@@ -1359,11 +1361,9 @@ static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc)
1359 struct nv_adma_port_priv *pp = qc->ap->private_data; 1361 struct nv_adma_port_priv *pp = qc->ap->private_data;
1360 1362
1361 /* ADMA engine can only be used for non-ATAPI DMA commands, 1363 /* ADMA engine can only be used for non-ATAPI DMA commands,
1362 or interrupt-driven no-data commands, where a result taskfile 1364 or interrupt-driven no-data commands. */
1363 is not required. */
1364 if ((pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) || 1365 if ((pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
1365 (qc->tf.flags & ATA_TFLAG_POLLING) || 1366 (qc->tf.flags & ATA_TFLAG_POLLING))
1366 (qc->flags & ATA_QCFLAG_RESULT_TF))
1367 return 1; 1367 return 1;
1368 1368
1369 if ((qc->flags & ATA_QCFLAG_DMAMAP) || 1369 if ((qc->flags & ATA_QCFLAG_DMAMAP) ||
@@ -1381,6 +1381,8 @@ static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
1381 NV_CPB_CTL_IEN; 1381 NV_CPB_CTL_IEN;
1382 1382
1383 if (nv_adma_use_reg_mode(qc)) { 1383 if (nv_adma_use_reg_mode(qc)) {
1384 BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1385 (qc->flags & ATA_QCFLAG_DMAMAP));
1384 nv_adma_register_mode(qc->ap); 1386 nv_adma_register_mode(qc->ap);
1385 ata_qc_prep(qc); 1387 ata_qc_prep(qc);
1386 return; 1388 return;
@@ -1425,9 +1427,21 @@ static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
1425 1427
1426 VPRINTK("ENTER\n"); 1428 VPRINTK("ENTER\n");
1427 1429
1430 /* We can't handle result taskfile with NCQ commands, since
1431 retrieving the taskfile switches us out of ADMA mode and would abort
1432 existing commands. */
1433 if (unlikely(qc->tf.protocol == ATA_PROT_NCQ &&
1434 (qc->flags & ATA_QCFLAG_RESULT_TF))) {
1435 ata_dev_printk(qc->dev, KERN_ERR,
1436 "NCQ w/ RESULT_TF not allowed\n");
1437 return AC_ERR_SYSTEM;
1438 }
1439
1428 if (nv_adma_use_reg_mode(qc)) { 1440 if (nv_adma_use_reg_mode(qc)) {
1429 /* use ATA register mode */ 1441 /* use ATA register mode */
1430 VPRINTK("using ATA register mode: 0x%lx\n", qc->flags); 1442 VPRINTK("using ATA register mode: 0x%lx\n", qc->flags);
1443 BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1444 (qc->flags & ATA_QCFLAG_DMAMAP));
1431 nv_adma_register_mode(qc->ap); 1445 nv_adma_register_mode(qc->ap);
1432 return ata_qc_issue_prot(qc); 1446 return ata_qc_issue_prot(qc);
1433 } else 1447 } else
diff --git a/drivers/char/cs5535_gpio.c b/drivers/char/cs5535_gpio.c
index fe6d2407baed..c2d23cae9515 100644
--- a/drivers/char/cs5535_gpio.c
+++ b/drivers/char/cs5535_gpio.c
@@ -104,6 +104,11 @@ static ssize_t cs5535_gpio_write(struct file *file, const char __user *data,
104 for (j = 0; j < ARRAY_SIZE(rm); j++) { 104 for (j = 0; j < ARRAY_SIZE(rm); j++) {
105 if (c == rm[j].on) { 105 if (c == rm[j].on) {
106 outl(m1, base + rm[j].wr_offset); 106 outl(m1, base + rm[j].wr_offset);
107 /* If enabling output, turn off AUX 1 and AUX 2 */
108 if (c == 'O') {
109 outl(m0, base + 0x10);
110 outl(m0, base + 0x14);
111 }
107 break; 112 break;
108 } else if (c == rm[j].off) { 113 } else if (c == rm[j].off) {
109 outl(m0, base + rm[j].wr_offset); 114 outl(m0, base + rm[j].wr_offset);
diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c
index f59aecf5ec15..fd9c5d51870a 100644
--- a/drivers/input/touchscreen/ads7846.c
+++ b/drivers/input/touchscreen/ads7846.c
@@ -267,13 +267,12 @@ static int ads7846_read12_ser(struct device *dev, unsigned command)
267 ts->irq_disabled = 0; 267 ts->irq_disabled = 0;
268 enable_irq(spi->irq); 268 enable_irq(spi->irq);
269 269
270 if (req->msg.status) 270 if (status == 0) {
271 status = req->msg.status; 271 /* on-wire is a must-ignore bit, a BE12 value, then padding */
272 272 sample = be16_to_cpu(req->sample);
273 /* on-wire is a must-ignore bit, a BE12 value, then padding */ 273 sample = sample >> 3;
274 sample = be16_to_cpu(req->sample); 274 sample &= 0x0fff;
275 sample = sample >> 3; 275 }
276 sample &= 0x0fff;
277 276
278 kfree(req); 277 kfree(req);
279 return status ? status : sample; 278 return status ? status : sample;
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
index a6469218f194..365024b83d3d 100644
--- a/drivers/mmc/host/mmc_spi.c
+++ b/drivers/mmc/host/mmc_spi.c
@@ -176,8 +176,6 @@ mmc_spi_readbytes(struct mmc_spi_host *host, unsigned len)
176 DMA_FROM_DEVICE); 176 DMA_FROM_DEVICE);
177 177
178 status = spi_sync(host->spi, &host->readback); 178 status = spi_sync(host->spi, &host->readback);
179 if (status == 0)
180 status = host->readback.status;
181 179
182 if (host->dma_dev) 180 if (host->dma_dev)
183 dma_sync_single_for_cpu(host->dma_dev, 181 dma_sync_single_for_cpu(host->dma_dev,
@@ -480,8 +478,6 @@ mmc_spi_command_send(struct mmc_spi_host *host,
480 DMA_BIDIRECTIONAL); 478 DMA_BIDIRECTIONAL);
481 } 479 }
482 status = spi_sync(host->spi, &host->m); 480 status = spi_sync(host->spi, &host->m);
483 if (status == 0)
484 status = host->m.status;
485 481
486 if (host->dma_dev) 482 if (host->dma_dev)
487 dma_sync_single_for_cpu(host->dma_dev, 483 dma_sync_single_for_cpu(host->dma_dev,
@@ -624,8 +620,6 @@ mmc_spi_writeblock(struct mmc_spi_host *host, struct spi_transfer *t)
624 DMA_BIDIRECTIONAL); 620 DMA_BIDIRECTIONAL);
625 621
626 status = spi_sync(spi, &host->m); 622 status = spi_sync(spi, &host->m);
627 if (status == 0)
628 status = host->m.status;
629 623
630 if (status != 0) { 624 if (status != 0) {
631 dev_dbg(&spi->dev, "write error (%d)\n", status); 625 dev_dbg(&spi->dev, "write error (%d)\n", status);
@@ -726,8 +720,6 @@ mmc_spi_readblock(struct mmc_spi_host *host, struct spi_transfer *t)
726 } 720 }
727 721
728 status = spi_sync(spi, &host->m); 722 status = spi_sync(spi, &host->m);
729 if (status == 0)
730 status = host->m.status;
731 723
732 if (host->dma_dev) { 724 if (host->dma_dev) {
733 dma_sync_single_for_cpu(host->dma_dev, 725 dma_sync_single_for_cpu(host->dma_dev,
@@ -905,8 +897,6 @@ mmc_spi_data_do(struct mmc_spi_host *host, struct mmc_command *cmd,
905 DMA_BIDIRECTIONAL); 897 DMA_BIDIRECTIONAL);
906 898
907 tmp = spi_sync(spi, &host->m); 899 tmp = spi_sync(spi, &host->m);
908 if (tmp == 0)
909 tmp = host->m.status;
910 900
911 if (host->dma_dev) 901 if (host->dma_dev)
912 dma_sync_single_for_cpu(host->dma_dev, 902 dma_sync_single_for_cpu(host->dma_dev,
diff --git a/drivers/net/chelsio/cxgb2.c b/drivers/net/chelsio/cxgb2.c
index c5975047c89b..c5975047c89b 100755..100644
--- a/drivers/net/chelsio/cxgb2.c
+++ b/drivers/net/chelsio/cxgb2.c
diff --git a/drivers/net/chelsio/pm3393.c b/drivers/net/chelsio/pm3393.c
index 2117c4fbb107..2117c4fbb107 100755..100644
--- a/drivers/net/chelsio/pm3393.c
+++ b/drivers/net/chelsio/pm3393.c
diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c
index b301c0428ae0..b301c0428ae0 100755..100644
--- a/drivers/net/chelsio/sge.c
+++ b/drivers/net/chelsio/sge.c
diff --git a/drivers/net/chelsio/sge.h b/drivers/net/chelsio/sge.h
index cced9dff91c5..cced9dff91c5 100755..100644
--- a/drivers/net/chelsio/sge.h
+++ b/drivers/net/chelsio/sge.h
diff --git a/drivers/net/fec_mpc52xx.c b/drivers/net/fec_mpc52xx.c
index bf5a7caa5b52..79f7eade4773 100644
--- a/drivers/net/fec_mpc52xx.c
+++ b/drivers/net/fec_mpc52xx.c
@@ -422,7 +422,7 @@ static irqreturn_t mpc52xx_fec_rx_interrupt(int irq, void *dev_id)
422 422
423 rskb = bcom_retrieve_buffer(priv->rx_dmatsk, &status, 423 rskb = bcom_retrieve_buffer(priv->rx_dmatsk, &status,
424 (struct bcom_bd **)&bd); 424 (struct bcom_bd **)&bd);
425 dma_unmap_single(&dev->dev, bd->skb_pa, skb->len, DMA_FROM_DEVICE); 425 dma_unmap_single(&dev->dev, bd->skb_pa, rskb->len, DMA_FROM_DEVICE);
426 426
427 /* Test for errors in received frame */ 427 /* Test for errors in received frame */
428 if (status & BCOM_FEC_RX_BD_ERRORS) { 428 if (status & BCOM_FEC_RX_BD_ERRORS) {
@@ -467,7 +467,7 @@ static irqreturn_t mpc52xx_fec_rx_interrupt(int irq, void *dev_id)
467 bcom_prepare_next_buffer(priv->rx_dmatsk); 467 bcom_prepare_next_buffer(priv->rx_dmatsk);
468 468
469 bd->status = FEC_RX_BUFFER_SIZE; 469 bd->status = FEC_RX_BUFFER_SIZE;
470 bd->skb_pa = dma_map_single(&dev->dev, rskb->data, 470 bd->skb_pa = dma_map_single(&dev->dev, skb->data,
471 FEC_RX_BUFFER_SIZE, DMA_FROM_DEVICE); 471 FEC_RX_BUFFER_SIZE, DMA_FROM_DEVICE);
472 472
473 bcom_submit_next_buffer(priv->rx_dmatsk, skb); 473 bcom_submit_next_buffer(priv->rx_dmatsk, skb);
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 38268d7335a8..0431e9ed0fac 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -696,7 +696,7 @@ int startup_gfar(struct net_device *dev)
696{ 696{
697 struct txbd8 *txbdp; 697 struct txbd8 *txbdp;
698 struct rxbd8 *rxbdp; 698 struct rxbd8 *rxbdp;
699 dma_addr_t addr; 699 dma_addr_t addr = 0;
700 unsigned long vaddr; 700 unsigned long vaddr;
701 int i; 701 int i;
702 struct gfar_private *priv = netdev_priv(dev); 702 struct gfar_private *priv = netdev_priv(dev);
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index 0f306ddb5630..8def8657251f 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -1979,6 +1979,7 @@ static int myri10ge_open(struct net_device *dev)
1979 lro_mgr->lro_arr = mgp->rx_done.lro_desc; 1979 lro_mgr->lro_arr = mgp->rx_done.lro_desc;
1980 lro_mgr->get_frag_header = myri10ge_get_frag_header; 1980 lro_mgr->get_frag_header = myri10ge_get_frag_header;
1981 lro_mgr->max_aggr = myri10ge_lro_max_pkts; 1981 lro_mgr->max_aggr = myri10ge_lro_max_pkts;
1982 lro_mgr->frag_align_pad = 2;
1982 if (lro_mgr->max_aggr > MAX_SKB_FRAGS) 1983 if (lro_mgr->max_aggr > MAX_SKB_FRAGS)
1983 lro_mgr->max_aggr = MAX_SKB_FRAGS; 1984 lro_mgr->max_aggr = MAX_SKB_FRAGS;
1984 1985
diff --git a/drivers/net/pasemi_mac.c b/drivers/net/pasemi_mac.c
index 09b4fde8d924..816a59e801b2 100644
--- a/drivers/net/pasemi_mac.c
+++ b/drivers/net/pasemi_mac.c
@@ -586,7 +586,7 @@ static int pasemi_mac_clean_rx(struct pasemi_mac *mac, int limit)
586 /* CRC error flagged */ 586 /* CRC error flagged */
587 mac->netdev->stats.rx_errors++; 587 mac->netdev->stats.rx_errors++;
588 mac->netdev->stats.rx_crc_errors++; 588 mac->netdev->stats.rx_crc_errors++;
589 dev_kfree_skb_irq(skb); 589 /* No need to free skb, it'll be reused */
590 goto next; 590 goto next;
591 } 591 }
592 592
@@ -1362,7 +1362,7 @@ pasemi_mac_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1362 1362
1363 netif_napi_add(dev, &mac->napi, pasemi_mac_poll, 64); 1363 netif_napi_add(dev, &mac->napi, pasemi_mac_poll, 64);
1364 1364
1365 dev->features = NETIF_F_HW_CSUM | NETIF_F_LLTX | NETIF_F_SG; 1365 dev->features = NETIF_F_IP_CSUM | NETIF_F_LLTX | NETIF_F_SG;
1366 1366
1367 /* These should come out of the device tree eventually */ 1367 /* These should come out of the device tree eventually */
1368 mac->dma_txch = index; 1368 mac->dma_txch = index;
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index fc2f0e695a13..c30196d0ad16 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -91,9 +91,12 @@ int mdiobus_register(struct mii_bus *bus)
91 91
92 err = device_register(&phydev->dev); 92 err = device_register(&phydev->dev);
93 93
94 if (err) 94 if (err) {
95 printk(KERN_ERR "phy %d failed to register\n", 95 printk(KERN_ERR "phy %d failed to register\n",
96 i); 96 i);
97 phy_device_free(phydev);
98 phydev = NULL;
99 }
97 } 100 }
98 101
99 bus->phy_map[i] = phydev; 102 bus->phy_map[i] = phydev;
@@ -110,10 +113,8 @@ void mdiobus_unregister(struct mii_bus *bus)
110 int i; 113 int i;
111 114
112 for (i = 0; i < PHY_MAX_ADDR; i++) { 115 for (i = 0; i < PHY_MAX_ADDR; i++) {
113 if (bus->phy_map[i]) { 116 if (bus->phy_map[i])
114 device_unregister(&bus->phy_map[i]->dev); 117 device_unregister(&bus->phy_map[i]->dev);
115 kfree(bus->phy_map[i]);
116 }
117 } 118 }
118} 119}
119EXPORT_SYMBOL(mdiobus_unregister); 120EXPORT_SYMBOL(mdiobus_unregister);
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index f6e484812a98..5b9e1751e1b4 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -44,6 +44,16 @@ static struct phy_driver genphy_driver;
44extern int mdio_bus_init(void); 44extern int mdio_bus_init(void);
45extern void mdio_bus_exit(void); 45extern void mdio_bus_exit(void);
46 46
47void phy_device_free(struct phy_device *phydev)
48{
49 kfree(phydev);
50}
51
52static void phy_device_release(struct device *dev)
53{
54 phy_device_free(to_phy_device(dev));
55}
56
47struct phy_device* phy_device_create(struct mii_bus *bus, int addr, int phy_id) 57struct phy_device* phy_device_create(struct mii_bus *bus, int addr, int phy_id)
48{ 58{
49 struct phy_device *dev; 59 struct phy_device *dev;
@@ -54,6 +64,8 @@ struct phy_device* phy_device_create(struct mii_bus *bus, int addr, int phy_id)
54 if (NULL == dev) 64 if (NULL == dev)
55 return (struct phy_device*) PTR_ERR((void*)-ENOMEM); 65 return (struct phy_device*) PTR_ERR((void*)-ENOMEM);
56 66
67 dev->dev.release = phy_device_release;
68
57 dev->speed = 0; 69 dev->speed = 0;
58 dev->duplex = -1; 70 dev->duplex = -1;
59 dev->pause = dev->asym_pause = 0; 71 dev->pause = dev->asym_pause = 0;
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 3d1dfc948405..6197afb3ed83 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -2906,16 +2906,14 @@ static void sky2_restart(struct work_struct *work)
2906 int i, err; 2906 int i, err;
2907 2907
2908 rtnl_lock(); 2908 rtnl_lock();
2909 sky2_write32(hw, B0_IMSK, 0);
2910 sky2_read32(hw, B0_IMSK);
2911 napi_disable(&hw->napi);
2912
2913 for (i = 0; i < hw->ports; i++) { 2909 for (i = 0; i < hw->ports; i++) {
2914 dev = hw->dev[i]; 2910 dev = hw->dev[i];
2915 if (netif_running(dev)) 2911 if (netif_running(dev))
2916 sky2_down(dev); 2912 sky2_down(dev);
2917 } 2913 }
2918 2914
2915 napi_disable(&hw->napi);
2916 sky2_write32(hw, B0_IMSK, 0);
2919 sky2_reset(hw); 2917 sky2_reset(hw);
2920 sky2_write32(hw, B0_IMSK, Y2_IS_BASE); 2918 sky2_write32(hw, B0_IMSK, Y2_IS_BASE);
2921 napi_enable(&hw->napi); 2919 napi_enable(&hw->napi);
diff --git a/drivers/net/smc911x.c b/drivers/net/smc911x.c
index 1a3d80bfe9ea..76cc1d3adf71 100644
--- a/drivers/net/smc911x.c
+++ b/drivers/net/smc911x.c
@@ -1299,9 +1299,9 @@ smc911x_rx_dma_irq(int dma, void *data)
1299 PRINT_PKT(skb->data, skb->len); 1299 PRINT_PKT(skb->data, skb->len);
1300 dev->last_rx = jiffies; 1300 dev->last_rx = jiffies;
1301 skb->protocol = eth_type_trans(skb, dev); 1301 skb->protocol = eth_type_trans(skb, dev);
1302 netif_rx(skb);
1303 dev->stats.rx_packets++; 1302 dev->stats.rx_packets++;
1304 dev->stats.rx_bytes += skb->len; 1303 dev->stats.rx_bytes += skb->len;
1304 netif_rx(skb);
1305 1305
1306 spin_lock_irqsave(&lp->lock, flags); 1306 spin_lock_irqsave(&lp->lock, flags);
1307 pkts = (SMC_GET_RX_FIFO_INF() & RX_FIFO_INF_RXSUSED_) >> 16; 1307 pkts = (SMC_GET_RX_FIFO_INF() & RX_FIFO_INF_RXSUSED_) >> 16;
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index a4f56e95cf96..f1e00ff54ce8 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -293,7 +293,7 @@ int rtc_irq_register(struct rtc_device *rtc, struct rtc_task *task)
293 return -EINVAL; 293 return -EINVAL;
294 294
295 /* Cannot register while the char dev is in use */ 295 /* Cannot register while the char dev is in use */
296 if (test_and_set_bit(RTC_DEV_BUSY, &rtc->flags)) 296 if (test_and_set_bit_lock(RTC_DEV_BUSY, &rtc->flags))
297 return -EBUSY; 297 return -EBUSY;
298 298
299 spin_lock_irq(&rtc->irq_task_lock); 299 spin_lock_irq(&rtc->irq_task_lock);
@@ -303,7 +303,7 @@ int rtc_irq_register(struct rtc_device *rtc, struct rtc_task *task)
303 } 303 }
304 spin_unlock_irq(&rtc->irq_task_lock); 304 spin_unlock_irq(&rtc->irq_task_lock);
305 305
306 clear_bit(RTC_DEV_BUSY, &rtc->flags); 306 clear_bit_unlock(RTC_DEV_BUSY, &rtc->flags);
307 307
308 return retval; 308 return retval;
309} 309}
diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
index ae1bf177d625..025c60a17a4a 100644
--- a/drivers/rtc/rtc-dev.c
+++ b/drivers/rtc/rtc-dev.c
@@ -26,7 +26,7 @@ static int rtc_dev_open(struct inode *inode, struct file *file)
26 struct rtc_device, char_dev); 26 struct rtc_device, char_dev);
27 const struct rtc_class_ops *ops = rtc->ops; 27 const struct rtc_class_ops *ops = rtc->ops;
28 28
29 if (test_and_set_bit(RTC_DEV_BUSY, &rtc->flags)) 29 if (test_and_set_bit_lock(RTC_DEV_BUSY, &rtc->flags))
30 return -EBUSY; 30 return -EBUSY;
31 31
32 file->private_data = rtc; 32 file->private_data = rtc;
@@ -41,7 +41,7 @@ static int rtc_dev_open(struct inode *inode, struct file *file)
41 } 41 }
42 42
43 /* something has gone wrong */ 43 /* something has gone wrong */
44 clear_bit(RTC_DEV_BUSY, &rtc->flags); 44 clear_bit_unlock(RTC_DEV_BUSY, &rtc->flags);
45 return err; 45 return err;
46} 46}
47 47
@@ -402,7 +402,7 @@ static int rtc_dev_release(struct inode *inode, struct file *file)
402 if (rtc->ops->release) 402 if (rtc->ops->release)
403 rtc->ops->release(rtc->dev.parent); 403 rtc->ops->release(rtc->dev.parent);
404 404
405 clear_bit(RTC_DEV_BUSY, &rtc->flags); 405 clear_bit_unlock(RTC_DEV_BUSY, &rtc->flags);
406 return 0; 406 return 0;
407} 407}
408 408
diff --git a/drivers/rtc/rtc-max6902.c b/drivers/rtc/rtc-max6902.c
index 3e183cfee10f..1f956dc5d56e 100644
--- a/drivers/rtc/rtc-max6902.c
+++ b/drivers/rtc/rtc-max6902.c
@@ -89,13 +89,9 @@ static int max6902_get_reg(struct device *dev, unsigned char address,
89 89
90 /* do the i/o */ 90 /* do the i/o */
91 status = spi_sync(spi, &message); 91 status = spi_sync(spi, &message);
92 if (status == 0)
93 status = message.status;
94 else
95 return status;
96
97 *data = chip->rx_buf[1];
98 92
93 if (status == 0)
94 *data = chip->rx_buf[1];
99 return status; 95 return status;
100} 96}
101 97
@@ -125,9 +121,7 @@ static int max6902_get_datetime(struct device *dev, struct rtc_time *dt)
125 121
126 /* do the i/o */ 122 /* do the i/o */
127 status = spi_sync(spi, &message); 123 status = spi_sync(spi, &message);
128 if (status == 0) 124 if (status)
129 status = message.status;
130 else
131 return status; 125 return status;
132 126
133 /* The chip sends data in this order: 127 /* The chip sends data in this order:
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 5e083d1f57e7..15a5789b7734 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -472,11 +472,11 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
472 if (rc) 472 if (rc)
473 goto unregister_dev; 473 goto unregister_dev;
474 474
475 add_disk(dev_info->gd);
476
477 blk_queue_make_request(dev_info->dcssblk_queue, dcssblk_make_request); 475 blk_queue_make_request(dev_info->dcssblk_queue, dcssblk_make_request);
478 blk_queue_hardsect_size(dev_info->dcssblk_queue, 4096); 476 blk_queue_hardsect_size(dev_info->dcssblk_queue, 4096);
479 477
478 add_disk(dev_info->gd);
479
480 switch (dev_info->segment_type) { 480 switch (dev_info->segment_type) {
481 case SEG_TYPE_SR: 481 case SEG_TYPE_SR:
482 case SEG_TYPE_ER: 482 case SEG_TYPE_ER:
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 6db31089d2d7..c3df2cd009a4 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -451,6 +451,7 @@ static int reprobe_subchannel(struct subchannel_id schid, void *data)
451 break; 451 break;
452 case -ENXIO: 452 case -ENXIO:
453 case -ENOMEM: 453 case -ENOMEM:
454 case -EIO:
454 /* These should abort looping */ 455 /* These should abort looping */
455 break; 456 break;
456 default: 457 default:
diff --git a/drivers/s390/cio/device_id.c b/drivers/s390/cio/device_id.c
index 2f6bf462425e..156f3f9786b5 100644
--- a/drivers/s390/cio/device_id.c
+++ b/drivers/s390/cio/device_id.c
@@ -113,6 +113,7 @@ __ccw_device_sense_id_start(struct ccw_device *cdev)
113{ 113{
114 struct subchannel *sch; 114 struct subchannel *sch;
115 struct ccw1 *ccw; 115 struct ccw1 *ccw;
116 int ret;
116 117
117 sch = to_subchannel(cdev->dev.parent); 118 sch = to_subchannel(cdev->dev.parent);
118 /* Setup sense channel program. */ 119 /* Setup sense channel program. */
@@ -124,9 +125,25 @@ __ccw_device_sense_id_start(struct ccw_device *cdev)
124 125
125 /* Reset device status. */ 126 /* Reset device status. */
126 memset(&cdev->private->irb, 0, sizeof(struct irb)); 127 memset(&cdev->private->irb, 0, sizeof(struct irb));
127 cdev->private->flags.intretry = 0;
128 128
129 return cio_start(sch, ccw, LPM_ANYPATH); 129 /* Try on every path. */
130 ret = -ENODEV;
131 while (cdev->private->imask != 0) {
132 if ((sch->opm & cdev->private->imask) != 0 &&
133 cdev->private->iretry > 0) {
134 cdev->private->iretry--;
135 /* Reset internal retry indication. */
136 cdev->private->flags.intretry = 0;
137 ret = cio_start (sch, cdev->private->iccws,
138 cdev->private->imask);
139 /* ret is 0, -EBUSY, -EACCES or -ENODEV */
140 if (ret != -EACCES)
141 return ret;
142 }
143 cdev->private->imask >>= 1;
144 cdev->private->iretry = 5;
145 }
146 return ret;
130} 147}
131 148
132void 149void
@@ -136,7 +153,8 @@ ccw_device_sense_id_start(struct ccw_device *cdev)
136 153
137 memset (&cdev->private->senseid, 0, sizeof (struct senseid)); 154 memset (&cdev->private->senseid, 0, sizeof (struct senseid));
138 cdev->private->senseid.cu_type = 0xFFFF; 155 cdev->private->senseid.cu_type = 0xFFFF;
139 cdev->private->iretry = 3; 156 cdev->private->imask = 0x80;
157 cdev->private->iretry = 5;
140 ret = __ccw_device_sense_id_start(cdev); 158 ret = __ccw_device_sense_id_start(cdev);
141 if (ret && ret != -EBUSY) 159 if (ret && ret != -EBUSY)
142 ccw_device_sense_id_done(cdev, ret); 160 ccw_device_sense_id_done(cdev, ret);
@@ -252,13 +270,14 @@ ccw_device_sense_id_irq(struct ccw_device *cdev, enum dev_event dev_event)
252 ccw_device_sense_id_done(cdev, ret); 270 ccw_device_sense_id_done(cdev, ret);
253 break; 271 break;
254 case -EACCES: /* channel is not operational. */ 272 case -EACCES: /* channel is not operational. */
273 sch->lpm &= ~cdev->private->imask;
274 cdev->private->imask >>= 1;
275 cdev->private->iretry = 5;
276 /* fall through. */
255 case -EAGAIN: /* try again. */ 277 case -EAGAIN: /* try again. */
256 cdev->private->iretry--; 278 ret = __ccw_device_sense_id_start(cdev);
257 if (cdev->private->iretry > 0) { 279 if (ret == 0 || ret == -EBUSY)
258 ret = __ccw_device_sense_id_start(cdev); 280 break;
259 if (ret == 0 || ret == -EBUSY)
260 break;
261 }
262 /* fall through. */ 281 /* fall through. */
263 default: /* Sense ID failed. Try asking VM. */ 282 default: /* Sense ID failed. Try asking VM. */
264 if (MACHINE_IS_VM) { 283 if (MACHINE_IS_VM) {
diff --git a/drivers/s390/net/ctcmain.c b/drivers/s390/net/ctcmain.c
index b3b6f654365c..97adc701a819 100644
--- a/drivers/s390/net/ctcmain.c
+++ b/drivers/s390/net/ctcmain.c
@@ -2802,7 +2802,6 @@ void ctc_init_netdevice(struct net_device * dev)
2802 dev->type = ARPHRD_SLIP; 2802 dev->type = ARPHRD_SLIP;
2803 dev->tx_queue_len = 100; 2803 dev->tx_queue_len = 100;
2804 dev->flags = IFF_POINTOPOINT | IFF_NOARP; 2804 dev->flags = IFF_POINTOPOINT | IFF_NOARP;
2805 SET_MODULE_OWNER(dev);
2806} 2805}
2807 2806
2808 2807
diff --git a/drivers/spi/at25.c b/drivers/spi/at25.c
index e007833cca59..290dbe99647a 100644
--- a/drivers/spi/at25.c
+++ b/drivers/spi/at25.c
@@ -21,6 +21,13 @@
21#include <linux/spi/eeprom.h> 21#include <linux/spi/eeprom.h>
22 22
23 23
24/*
25 * NOTE: this is an *EEPROM* driver. The vagaries of product naming
26 * mean that some AT25 products are EEPROMs, and others are FLASH.
27 * Handle FLASH chips with the drivers/mtd/devices/m25p80.c driver,
28 * not this one!
29 */
30
24struct at25_data { 31struct at25_data {
25 struct spi_device *spi; 32 struct spi_device *spi;
26 struct mutex lock; 33 struct mutex lock;
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index b31f4431849b..93e9de46977a 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -541,10 +541,7 @@ static void spi_complete(void *arg)
541 * Also, the caller is guaranteeing that the memory associated with the 541 * Also, the caller is guaranteeing that the memory associated with the
542 * message will not be freed before this call returns. 542 * message will not be freed before this call returns.
543 * 543 *
544 * The return value is a negative error code if the message could not be 544 * It returns zero on success, else a negative error code.
545 * submitted, else zero. When the value is zero, then message->status is
546 * also defined; it's the completion code for the transfer, either zero
547 * or a negative error code from the controller driver.
548 */ 545 */
549int spi_sync(struct spi_device *spi, struct spi_message *message) 546int spi_sync(struct spi_device *spi, struct spi_message *message)
550{ 547{
@@ -554,8 +551,10 @@ int spi_sync(struct spi_device *spi, struct spi_message *message)
554 message->complete = spi_complete; 551 message->complete = spi_complete;
555 message->context = &done; 552 message->context = &done;
556 status = spi_async(spi, message); 553 status = spi_async(spi, message);
557 if (status == 0) 554 if (status == 0) {
558 wait_for_completion(&done); 555 wait_for_completion(&done);
556 status = message->status;
557 }
559 message->context = NULL; 558 message->context = NULL;
560 return status; 559 return status;
561} 560}
@@ -589,7 +588,7 @@ int spi_write_then_read(struct spi_device *spi,
589 const u8 *txbuf, unsigned n_tx, 588 const u8 *txbuf, unsigned n_tx,
590 u8 *rxbuf, unsigned n_rx) 589 u8 *rxbuf, unsigned n_rx)
591{ 590{
592 static DECLARE_MUTEX(lock); 591 static DEFINE_MUTEX(lock);
593 592
594 int status; 593 int status;
595 struct spi_message message; 594 struct spi_message message;
@@ -615,7 +614,7 @@ int spi_write_then_read(struct spi_device *spi,
615 } 614 }
616 615
617 /* ... unless someone else is using the pre-allocated buffer */ 616 /* ... unless someone else is using the pre-allocated buffer */
618 if (down_trylock(&lock)) { 617 if (!mutex_trylock(&lock)) {
619 local_buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL); 618 local_buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL);
620 if (!local_buf) 619 if (!local_buf)
621 return -ENOMEM; 620 return -ENOMEM;
@@ -628,13 +627,11 @@ int spi_write_then_read(struct spi_device *spi,
628 627
629 /* do the i/o */ 628 /* do the i/o */
630 status = spi_sync(spi, &message); 629 status = spi_sync(spi, &message);
631 if (status == 0) { 630 if (status == 0)
632 memcpy(rxbuf, x[1].rx_buf, n_rx); 631 memcpy(rxbuf, x[1].rx_buf, n_rx);
633 status = message.status;
634 }
635 632
636 if (x[0].tx_buf == buf) 633 if (x[0].tx_buf == buf)
637 up(&lock); 634 mutex_unlock(&lock);
638 else 635 else
639 kfree(local_buf); 636 kfree(local_buf);
640 637
diff --git a/drivers/spi/spi_bfin5xx.c b/drivers/spi/spi_bfin5xx.c
index 2ef11bb70b2e..22697b812205 100644
--- a/drivers/spi/spi_bfin5xx.c
+++ b/drivers/spi/spi_bfin5xx.c
@@ -1,17 +1,22 @@
1/* 1/*
2 * File: drivers/spi/bfin5xx_spi.c 2 * File: drivers/spi/bfin5xx_spi.c
3 * Based on: N/A 3 * Maintainer:
4 * Author: Luke Yang (Analog Devices Inc.) 4 * Bryan Wu <bryan.wu@analog.com>
5 * Original Author:
6 * Luke Yang (Analog Devices Inc.)
5 * 7 *
6 * Created: March. 10th 2006 8 * Created: March. 10th 2006
7 * Description: SPI controller driver for Blackfin 5xx 9 * Description: SPI controller driver for Blackfin BF5xx
8 * Bugs: Enter bugs at http://blackfin.uclinux.org/ 10 * Bugs: Enter bugs at http://blackfin.uclinux.org/
9 * 11 *
10 * Modified: 12 * Modified:
11 * March 10, 2006 bfin5xx_spi.c Created. (Luke Yang) 13 * March 10, 2006 bfin5xx_spi.c Created. (Luke Yang)
12 * August 7, 2006 added full duplex mode (Axel Weiss & Luke Yang) 14 * August 7, 2006 added full duplex mode (Axel Weiss & Luke Yang)
15 * July 17, 2007 add support for BF54x SPI0 controller (Bryan Wu)
16 * July 30, 2007 add platfrom_resource interface to support multi-port
17 * SPI controller (Bryan Wu)
13 * 18 *
14 * Copyright 2004-2006 Analog Devices Inc. 19 * Copyright 2004-2007 Analog Devices Inc.
15 * 20 *
16 * This program is free software ; you can redistribute it and/or modify 21 * This program is free software ; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by 22 * it under the terms of the GNU General Public License as published by
@@ -31,50 +36,39 @@
31 36
32#include <linux/init.h> 37#include <linux/init.h>
33#include <linux/module.h> 38#include <linux/module.h>
39#include <linux/delay.h>
34#include <linux/device.h> 40#include <linux/device.h>
41#include <linux/io.h>
35#include <linux/ioport.h> 42#include <linux/ioport.h>
43#include <linux/irq.h>
36#include <linux/errno.h> 44#include <linux/errno.h>
37#include <linux/interrupt.h> 45#include <linux/interrupt.h>
38#include <linux/platform_device.h> 46#include <linux/platform_device.h>
39#include <linux/dma-mapping.h> 47#include <linux/dma-mapping.h>
40#include <linux/spi/spi.h> 48#include <linux/spi/spi.h>
41#include <linux/workqueue.h> 49#include <linux/workqueue.h>
42#include <linux/delay.h>
43 50
44#include <asm/io.h>
45#include <asm/irq.h>
46#include <asm/delay.h>
47#include <asm/dma.h> 51#include <asm/dma.h>
48 52#include <asm/portmux.h>
49#include <asm/bfin5xx_spi.h> 53#include <asm/bfin5xx_spi.h>
50 54
51MODULE_AUTHOR("Luke Yang"); 55#define DRV_NAME "bfin-spi"
52MODULE_DESCRIPTION("Blackfin 5xx SPI Contoller"); 56#define DRV_AUTHOR "Bryan Wu, Luke Yang"
53MODULE_LICENSE("GPL"); 57#define DRV_DESC "Blackfin BF5xx on-chip SPI Contoller Driver"
58#define DRV_VERSION "1.0"
54 59
55#define IS_DMA_ALIGNED(x) (((u32)(x)&0x07)==0) 60MODULE_AUTHOR(DRV_AUTHOR);
61MODULE_DESCRIPTION(DRV_DESC);
62MODULE_LICENSE("GPL");
56 63
57#define DEFINE_SPI_REG(reg, off) \ 64#define IS_DMA_ALIGNED(x) (((u32)(x)&0x07) == 0)
58static inline u16 read_##reg(void) \
59 { return *(volatile unsigned short*)(SPI0_REGBASE + off); } \
60static inline void write_##reg(u16 v) \
61 {*(volatile unsigned short*)(SPI0_REGBASE + off) = v;\
62 SSYNC();}
63 65
64DEFINE_SPI_REG(CTRL, 0x00) 66#define START_STATE ((void *)0)
65DEFINE_SPI_REG(FLAG, 0x04) 67#define RUNNING_STATE ((void *)1)
66DEFINE_SPI_REG(STAT, 0x08) 68#define DONE_STATE ((void *)2)
67DEFINE_SPI_REG(TDBR, 0x0C) 69#define ERROR_STATE ((void *)-1)
68DEFINE_SPI_REG(RDBR, 0x10) 70#define QUEUE_RUNNING 0
69DEFINE_SPI_REG(BAUD, 0x14) 71#define QUEUE_STOPPED 1
70DEFINE_SPI_REG(SHAW, 0x18)
71#define START_STATE ((void*)0)
72#define RUNNING_STATE ((void*)1)
73#define DONE_STATE ((void*)2)
74#define ERROR_STATE ((void*)-1)
75#define QUEUE_RUNNING 0
76#define QUEUE_STOPPED 1
77int dma_requested;
78 72
79struct driver_data { 73struct driver_data {
80 /* Driver model hookup */ 74 /* Driver model hookup */
@@ -83,6 +77,12 @@ struct driver_data {
83 /* SPI framework hookup */ 77 /* SPI framework hookup */
84 struct spi_master *master; 78 struct spi_master *master;
85 79
80 /* Regs base of SPI controller */
81 void __iomem *regs_base;
82
83 /* Pin request list */
84 u16 *pin_req;
85
86 /* BFIN hookup */ 86 /* BFIN hookup */
87 struct bfin5xx_spi_master *master_info; 87 struct bfin5xx_spi_master *master_info;
88 88
@@ -107,12 +107,18 @@ struct driver_data {
107 void *tx_end; 107 void *tx_end;
108 void *rx; 108 void *rx;
109 void *rx_end; 109 void *rx_end;
110
111 /* DMA stuffs */
112 int dma_channel;
110 int dma_mapped; 113 int dma_mapped;
114 int dma_requested;
111 dma_addr_t rx_dma; 115 dma_addr_t rx_dma;
112 dma_addr_t tx_dma; 116 dma_addr_t tx_dma;
117
113 size_t rx_map_len; 118 size_t rx_map_len;
114 size_t tx_map_len; 119 size_t tx_map_len;
115 u8 n_bytes; 120 u8 n_bytes;
121 int cs_change;
116 void (*write) (struct driver_data *); 122 void (*write) (struct driver_data *);
117 void (*read) (struct driver_data *); 123 void (*read) (struct driver_data *);
118 void (*duplex) (struct driver_data *); 124 void (*duplex) (struct driver_data *);
@@ -129,28 +135,40 @@ struct chip_data {
129 u8 enable_dma; 135 u8 enable_dma;
130 u8 bits_per_word; /* 8 or 16 */ 136 u8 bits_per_word; /* 8 or 16 */
131 u8 cs_change_per_word; 137 u8 cs_change_per_word;
132 u8 cs_chg_udelay; 138 u16 cs_chg_udelay; /* Some devices require > 255usec delay */
133 void (*write) (struct driver_data *); 139 void (*write) (struct driver_data *);
134 void (*read) (struct driver_data *); 140 void (*read) (struct driver_data *);
135 void (*duplex) (struct driver_data *); 141 void (*duplex) (struct driver_data *);
136}; 142};
137 143
144#define DEFINE_SPI_REG(reg, off) \
145static inline u16 read_##reg(struct driver_data *drv_data) \
146 { return bfin_read16(drv_data->regs_base + off); } \
147static inline void write_##reg(struct driver_data *drv_data, u16 v) \
148 { bfin_write16(drv_data->regs_base + off, v); }
149
150DEFINE_SPI_REG(CTRL, 0x00)
151DEFINE_SPI_REG(FLAG, 0x04)
152DEFINE_SPI_REG(STAT, 0x08)
153DEFINE_SPI_REG(TDBR, 0x0C)
154DEFINE_SPI_REG(RDBR, 0x10)
155DEFINE_SPI_REG(BAUD, 0x14)
156DEFINE_SPI_REG(SHAW, 0x18)
157
138static void bfin_spi_enable(struct driver_data *drv_data) 158static void bfin_spi_enable(struct driver_data *drv_data)
139{ 159{
140 u16 cr; 160 u16 cr;
141 161
142 cr = read_CTRL(); 162 cr = read_CTRL(drv_data);
143 write_CTRL(cr | BIT_CTL_ENABLE); 163 write_CTRL(drv_data, (cr | BIT_CTL_ENABLE));
144 SSYNC();
145} 164}
146 165
147static void bfin_spi_disable(struct driver_data *drv_data) 166static void bfin_spi_disable(struct driver_data *drv_data)
148{ 167{
149 u16 cr; 168 u16 cr;
150 169
151 cr = read_CTRL(); 170 cr = read_CTRL(drv_data);
152 write_CTRL(cr & (~BIT_CTL_ENABLE)); 171 write_CTRL(drv_data, (cr & (~BIT_CTL_ENABLE)));
153 SSYNC();
154} 172}
155 173
156/* Caculate the SPI_BAUD register value based on input HZ */ 174/* Caculate the SPI_BAUD register value based on input HZ */
@@ -170,83 +188,71 @@ static int flush(struct driver_data *drv_data)
170 unsigned long limit = loops_per_jiffy << 1; 188 unsigned long limit = loops_per_jiffy << 1;
171 189
172 /* wait for stop and clear stat */ 190 /* wait for stop and clear stat */
173 while (!(read_STAT() & BIT_STAT_SPIF) && limit--) 191 while (!(read_STAT(drv_data) & BIT_STAT_SPIF) && limit--)
174 continue; 192 cpu_relax();
175 193
176 write_STAT(BIT_STAT_CLR); 194 write_STAT(drv_data, BIT_STAT_CLR);
177 195
178 return limit; 196 return limit;
179} 197}
180 198
199/* Chip select operation functions for cs_change flag */
200static void cs_active(struct driver_data *drv_data, struct chip_data *chip)
201{
202 u16 flag = read_FLAG(drv_data);
203
204 flag |= chip->flag;
205 flag &= ~(chip->flag << 8);
206
207 write_FLAG(drv_data, flag);
208}
209
210static void cs_deactive(struct driver_data *drv_data, struct chip_data *chip)
211{
212 u16 flag = read_FLAG(drv_data);
213
214 flag |= (chip->flag << 8);
215
216 write_FLAG(drv_data, flag);
217
218 /* Move delay here for consistency */
219 if (chip->cs_chg_udelay)
220 udelay(chip->cs_chg_udelay);
221}
222
223#define MAX_SPI_SSEL 7
224
181/* stop controller and re-config current chip*/ 225/* stop controller and re-config current chip*/
182static void restore_state(struct driver_data *drv_data) 226static int restore_state(struct driver_data *drv_data)
183{ 227{
184 struct chip_data *chip = drv_data->cur_chip; 228 struct chip_data *chip = drv_data->cur_chip;
229 int ret = 0;
185 230
186 /* Clear status and disable clock */ 231 /* Clear status and disable clock */
187 write_STAT(BIT_STAT_CLR); 232 write_STAT(drv_data, BIT_STAT_CLR);
188 bfin_spi_disable(drv_data); 233 bfin_spi_disable(drv_data);
189 dev_dbg(&drv_data->pdev->dev, "restoring spi ctl state\n"); 234 dev_dbg(&drv_data->pdev->dev, "restoring spi ctl state\n");
190 235
191#if defined(CONFIG_BF534) || defined(CONFIG_BF536) || defined(CONFIG_BF537) 236 /* Load the registers */
192 dev_dbg(&drv_data->pdev->dev, 237 write_CTRL(drv_data, chip->ctl_reg);
193 "chip select number is %d\n", chip->chip_select_num); 238 write_BAUD(drv_data, chip->baud);
194
195 switch (chip->chip_select_num) {
196 case 1:
197 bfin_write_PORTF_FER(bfin_read_PORTF_FER() | 0x3c00);
198 SSYNC();
199 break;
200
201 case 2:
202 case 3:
203 bfin_write_PORT_MUX(bfin_read_PORT_MUX() | PJSE_SPI);
204 SSYNC();
205 bfin_write_PORTF_FER(bfin_read_PORTF_FER() | 0x3800);
206 SSYNC();
207 break;
208
209 case 4:
210 bfin_write_PORT_MUX(bfin_read_PORT_MUX() | PFS4E_SPI);
211 SSYNC();
212 bfin_write_PORTF_FER(bfin_read_PORTF_FER() | 0x3840);
213 SSYNC();
214 break;
215 239
216 case 5: 240 bfin_spi_enable(drv_data);
217 bfin_write_PORT_MUX(bfin_read_PORT_MUX() | PFS5E_SPI); 241 cs_active(drv_data, chip);
218 SSYNC();
219 bfin_write_PORTF_FER(bfin_read_PORTF_FER() | 0x3820);
220 SSYNC();
221 break;
222 242
223 case 6: 243 if (ret)
224 bfin_write_PORT_MUX(bfin_read_PORT_MUX() | PFS6E_SPI); 244 dev_dbg(&drv_data->pdev->dev,
225 SSYNC(); 245 ": request chip select number %d failed\n",
226 bfin_write_PORTF_FER(bfin_read_PORTF_FER() | 0x3810); 246 chip->chip_select_num);
227 SSYNC();
228 break;
229 247
230 case 7: 248 return ret;
231 bfin_write_PORT_MUX(bfin_read_PORT_MUX() | PJCE_SPI);
232 SSYNC();
233 bfin_write_PORTF_FER(bfin_read_PORTF_FER() | 0x3800);
234 SSYNC();
235 break;
236 }
237#endif
238
239 /* Load the registers */
240 write_CTRL(chip->ctl_reg);
241 write_BAUD(chip->baud);
242 write_FLAG(chip->flag);
243} 249}
244 250
245/* used to kick off transfer in rx mode */ 251/* used to kick off transfer in rx mode */
246static unsigned short dummy_read(void) 252static unsigned short dummy_read(struct driver_data *drv_data)
247{ 253{
248 unsigned short tmp; 254 unsigned short tmp;
249 tmp = read_RDBR(); 255 tmp = read_RDBR(drv_data);
250 return tmp; 256 return tmp;
251} 257}
252 258
@@ -255,9 +261,9 @@ static void null_writer(struct driver_data *drv_data)
255 u8 n_bytes = drv_data->n_bytes; 261 u8 n_bytes = drv_data->n_bytes;
256 262
257 while (drv_data->tx < drv_data->tx_end) { 263 while (drv_data->tx < drv_data->tx_end) {
258 write_TDBR(0); 264 write_TDBR(drv_data, 0);
259 while ((read_STAT() & BIT_STAT_TXS)) 265 while ((read_STAT(drv_data) & BIT_STAT_TXS))
260 continue; 266 cpu_relax();
261 drv_data->tx += n_bytes; 267 drv_data->tx += n_bytes;
262 } 268 }
263} 269}
@@ -265,75 +271,78 @@ static void null_writer(struct driver_data *drv_data)
265static void null_reader(struct driver_data *drv_data) 271static void null_reader(struct driver_data *drv_data)
266{ 272{
267 u8 n_bytes = drv_data->n_bytes; 273 u8 n_bytes = drv_data->n_bytes;
268 dummy_read(); 274 dummy_read(drv_data);
269 275
270 while (drv_data->rx < drv_data->rx_end) { 276 while (drv_data->rx < drv_data->rx_end) {
271 while (!(read_STAT() & BIT_STAT_RXS)) 277 while (!(read_STAT(drv_data) & BIT_STAT_RXS))
272 continue; 278 cpu_relax();
273 dummy_read(); 279 dummy_read(drv_data);
274 drv_data->rx += n_bytes; 280 drv_data->rx += n_bytes;
275 } 281 }
276} 282}
277 283
278static void u8_writer(struct driver_data *drv_data) 284static void u8_writer(struct driver_data *drv_data)
279{ 285{
280 dev_dbg(&drv_data->pdev->dev, 286 dev_dbg(&drv_data->pdev->dev,
281 "cr8-s is 0x%x\n", read_STAT()); 287 "cr8-s is 0x%x\n", read_STAT(drv_data));
288
289 /* poll for SPI completion before start */
290 while (!(read_STAT(drv_data) & BIT_STAT_SPIF))
291 cpu_relax();
292
282 while (drv_data->tx < drv_data->tx_end) { 293 while (drv_data->tx < drv_data->tx_end) {
283 write_TDBR(*(u8 *) (drv_data->tx)); 294 write_TDBR(drv_data, (*(u8 *) (drv_data->tx)));
284 while (read_STAT() & BIT_STAT_TXS) 295 while (read_STAT(drv_data) & BIT_STAT_TXS)
285 continue; 296 cpu_relax();
286 ++drv_data->tx; 297 ++drv_data->tx;
287 } 298 }
288
289 /* poll for SPI completion before returning */
290 while (!(read_STAT() & BIT_STAT_SPIF))
291 continue;
292} 299}
293 300
294static void u8_cs_chg_writer(struct driver_data *drv_data) 301static void u8_cs_chg_writer(struct driver_data *drv_data)
295{ 302{
296 struct chip_data *chip = drv_data->cur_chip; 303 struct chip_data *chip = drv_data->cur_chip;
297 304
305 /* poll for SPI completion before start */
306 while (!(read_STAT(drv_data) & BIT_STAT_SPIF))
307 cpu_relax();
308
298 while (drv_data->tx < drv_data->tx_end) { 309 while (drv_data->tx < drv_data->tx_end) {
299 write_FLAG(chip->flag); 310 cs_active(drv_data, chip);
300 SSYNC(); 311
301 312 write_TDBR(drv_data, (*(u8 *) (drv_data->tx)));
302 write_TDBR(*(u8 *) (drv_data->tx)); 313 while (read_STAT(drv_data) & BIT_STAT_TXS)
303 while (read_STAT() & BIT_STAT_TXS) 314 cpu_relax();
304 continue; 315
305 while (!(read_STAT() & BIT_STAT_SPIF)) 316 cs_deactive(drv_data, chip);
306 continue; 317
307 write_FLAG(0xFF00 | chip->flag);
308 SSYNC();
309 if (chip->cs_chg_udelay)
310 udelay(chip->cs_chg_udelay);
311 ++drv_data->tx; 318 ++drv_data->tx;
312 } 319 }
313 write_FLAG(0xFF00);
314 SSYNC();
315} 320}
316 321
317static void u8_reader(struct driver_data *drv_data) 322static void u8_reader(struct driver_data *drv_data)
318{ 323{
319 dev_dbg(&drv_data->pdev->dev, 324 dev_dbg(&drv_data->pdev->dev,
320 "cr-8 is 0x%x\n", read_STAT()); 325 "cr-8 is 0x%x\n", read_STAT(drv_data));
326
327 /* poll for SPI completion before start */
328 while (!(read_STAT(drv_data) & BIT_STAT_SPIF))
329 cpu_relax();
321 330
322 /* clear TDBR buffer before read(else it will be shifted out) */ 331 /* clear TDBR buffer before read(else it will be shifted out) */
323 write_TDBR(0xFFFF); 332 write_TDBR(drv_data, 0xFFFF);
324 333
325 dummy_read(); 334 dummy_read(drv_data);
326 335
327 while (drv_data->rx < drv_data->rx_end - 1) { 336 while (drv_data->rx < drv_data->rx_end - 1) {
328 while (!(read_STAT() & BIT_STAT_RXS)) 337 while (!(read_STAT(drv_data) & BIT_STAT_RXS))
329 continue; 338 cpu_relax();
330 *(u8 *) (drv_data->rx) = read_RDBR(); 339 *(u8 *) (drv_data->rx) = read_RDBR(drv_data);
331 ++drv_data->rx; 340 ++drv_data->rx;
332 } 341 }
333 342
334 while (!(read_STAT() & BIT_STAT_RXS)) 343 while (!(read_STAT(drv_data) & BIT_STAT_RXS))
335 continue; 344 cpu_relax();
336 *(u8 *) (drv_data->rx) = read_SHAW(); 345 *(u8 *) (drv_data->rx) = read_SHAW(drv_data);
337 ++drv_data->rx; 346 ++drv_data->rx;
338} 347}
339 348
@@ -341,36 +350,47 @@ static void u8_cs_chg_reader(struct driver_data *drv_data)
341{ 350{
342 struct chip_data *chip = drv_data->cur_chip; 351 struct chip_data *chip = drv_data->cur_chip;
343 352
344 while (drv_data->rx < drv_data->rx_end) { 353 /* poll for SPI completion before start */
345 write_FLAG(chip->flag); 354 while (!(read_STAT(drv_data) & BIT_STAT_SPIF))
346 SSYNC(); 355 cpu_relax();
347 356
348 read_RDBR(); /* kick off */ 357 /* clear TDBR buffer before read(else it will be shifted out) */
349 while (!(read_STAT() & BIT_STAT_RXS)) 358 write_TDBR(drv_data, 0xFFFF);
350 continue; 359
351 while (!(read_STAT() & BIT_STAT_SPIF)) 360 cs_active(drv_data, chip);
352 continue; 361 dummy_read(drv_data);
353 *(u8 *) (drv_data->rx) = read_SHAW(); 362
354 write_FLAG(0xFF00 | chip->flag); 363 while (drv_data->rx < drv_data->rx_end - 1) {
355 SSYNC(); 364 cs_deactive(drv_data, chip);
356 if (chip->cs_chg_udelay) 365
357 udelay(chip->cs_chg_udelay); 366 while (!(read_STAT(drv_data) & BIT_STAT_RXS))
367 cpu_relax();
368 cs_active(drv_data, chip);
369 *(u8 *) (drv_data->rx) = read_RDBR(drv_data);
358 ++drv_data->rx; 370 ++drv_data->rx;
359 } 371 }
360 write_FLAG(0xFF00); 372 cs_deactive(drv_data, chip);
361 SSYNC(); 373
374 while (!(read_STAT(drv_data) & BIT_STAT_RXS))
375 cpu_relax();
376 *(u8 *) (drv_data->rx) = read_SHAW(drv_data);
377 ++drv_data->rx;
362} 378}
363 379
364static void u8_duplex(struct driver_data *drv_data) 380static void u8_duplex(struct driver_data *drv_data)
365{ 381{
382 /* poll for SPI completion before start */
383 while (!(read_STAT(drv_data) & BIT_STAT_SPIF))
384 cpu_relax();
385
366 /* in duplex mode, clk is triggered by writing of TDBR */ 386 /* in duplex mode, clk is triggered by writing of TDBR */
367 while (drv_data->rx < drv_data->rx_end) { 387 while (drv_data->rx < drv_data->rx_end) {
368 write_TDBR(*(u8 *) (drv_data->tx)); 388 write_TDBR(drv_data, (*(u8 *) (drv_data->tx)));
369 while (!(read_STAT() & BIT_STAT_SPIF)) 389 while (read_STAT(drv_data) & BIT_STAT_TXS)
370 continue; 390 cpu_relax();
371 while (!(read_STAT() & BIT_STAT_RXS)) 391 while (!(read_STAT(drv_data) & BIT_STAT_RXS))
372 continue; 392 cpu_relax();
373 *(u8 *) (drv_data->rx) = read_RDBR(); 393 *(u8 *) (drv_data->rx) = read_RDBR(drv_data);
374 ++drv_data->rx; 394 ++drv_data->rx;
375 ++drv_data->tx; 395 ++drv_data->tx;
376 } 396 }
@@ -380,83 +400,89 @@ static void u8_cs_chg_duplex(struct driver_data *drv_data)
380{ 400{
381 struct chip_data *chip = drv_data->cur_chip; 401 struct chip_data *chip = drv_data->cur_chip;
382 402
403 /* poll for SPI completion before start */
404 while (!(read_STAT(drv_data) & BIT_STAT_SPIF))
405 cpu_relax();
406
383 while (drv_data->rx < drv_data->rx_end) { 407 while (drv_data->rx < drv_data->rx_end) {
384 write_FLAG(chip->flag); 408 cs_active(drv_data, chip);
385 SSYNC(); 409
386 410 write_TDBR(drv_data, (*(u8 *) (drv_data->tx)));
387 write_TDBR(*(u8 *) (drv_data->tx)); 411 while (read_STAT(drv_data) & BIT_STAT_TXS)
388 while (!(read_STAT() & BIT_STAT_SPIF)) 412 cpu_relax();
389 continue; 413 while (!(read_STAT(drv_data) & BIT_STAT_RXS))
390 while (!(read_STAT() & BIT_STAT_RXS)) 414 cpu_relax();
391 continue; 415 *(u8 *) (drv_data->rx) = read_RDBR(drv_data);
392 *(u8 *) (drv_data->rx) = read_RDBR(); 416
393 write_FLAG(0xFF00 | chip->flag); 417 cs_deactive(drv_data, chip);
394 SSYNC(); 418
395 if (chip->cs_chg_udelay)
396 udelay(chip->cs_chg_udelay);
397 ++drv_data->rx; 419 ++drv_data->rx;
398 ++drv_data->tx; 420 ++drv_data->tx;
399 } 421 }
400 write_FLAG(0xFF00);
401 SSYNC();
402} 422}
403 423
404static void u16_writer(struct driver_data *drv_data) 424static void u16_writer(struct driver_data *drv_data)
405{ 425{
406 dev_dbg(&drv_data->pdev->dev, 426 dev_dbg(&drv_data->pdev->dev,
407 "cr16 is 0x%x\n", read_STAT()); 427 "cr16 is 0x%x\n", read_STAT(drv_data));
428
429 /* poll for SPI completion before start */
430 while (!(read_STAT(drv_data) & BIT_STAT_SPIF))
431 cpu_relax();
408 432
409 while (drv_data->tx < drv_data->tx_end) { 433 while (drv_data->tx < drv_data->tx_end) {
410 write_TDBR(*(u16 *) (drv_data->tx)); 434 write_TDBR(drv_data, (*(u16 *) (drv_data->tx)));
411 while ((read_STAT() & BIT_STAT_TXS)) 435 while ((read_STAT(drv_data) & BIT_STAT_TXS))
412 continue; 436 cpu_relax();
413 drv_data->tx += 2; 437 drv_data->tx += 2;
414 } 438 }
415
416 /* poll for SPI completion before returning */
417 while (!(read_STAT() & BIT_STAT_SPIF))
418 continue;
419} 439}
420 440
421static void u16_cs_chg_writer(struct driver_data *drv_data) 441static void u16_cs_chg_writer(struct driver_data *drv_data)
422{ 442{
423 struct chip_data *chip = drv_data->cur_chip; 443 struct chip_data *chip = drv_data->cur_chip;
424 444
445 /* poll for SPI completion before start */
446 while (!(read_STAT(drv_data) & BIT_STAT_SPIF))
447 cpu_relax();
448
425 while (drv_data->tx < drv_data->tx_end) { 449 while (drv_data->tx < drv_data->tx_end) {
426 write_FLAG(chip->flag); 450 cs_active(drv_data, chip);
427 SSYNC(); 451
428 452 write_TDBR(drv_data, (*(u16 *) (drv_data->tx)));
429 write_TDBR(*(u16 *) (drv_data->tx)); 453 while ((read_STAT(drv_data) & BIT_STAT_TXS))
430 while ((read_STAT() & BIT_STAT_TXS)) 454 cpu_relax();
431 continue; 455
432 while (!(read_STAT() & BIT_STAT_SPIF)) 456 cs_deactive(drv_data, chip);
433 continue; 457
434 write_FLAG(0xFF00 | chip->flag);
435 SSYNC();
436 if (chip->cs_chg_udelay)
437 udelay(chip->cs_chg_udelay);
438 drv_data->tx += 2; 458 drv_data->tx += 2;
439 } 459 }
440 write_FLAG(0xFF00);
441 SSYNC();
442} 460}
443 461
444static void u16_reader(struct driver_data *drv_data) 462static void u16_reader(struct driver_data *drv_data)
445{ 463{
446 dev_dbg(&drv_data->pdev->dev, 464 dev_dbg(&drv_data->pdev->dev,
447 "cr-16 is 0x%x\n", read_STAT()); 465 "cr-16 is 0x%x\n", read_STAT(drv_data));
448 dummy_read(); 466
467 /* poll for SPI completion before start */
468 while (!(read_STAT(drv_data) & BIT_STAT_SPIF))
469 cpu_relax();
470
471 /* clear TDBR buffer before read(else it will be shifted out) */
472 write_TDBR(drv_data, 0xFFFF);
473
474 dummy_read(drv_data);
449 475
450 while (drv_data->rx < (drv_data->rx_end - 2)) { 476 while (drv_data->rx < (drv_data->rx_end - 2)) {
451 while (!(read_STAT() & BIT_STAT_RXS)) 477 while (!(read_STAT(drv_data) & BIT_STAT_RXS))
452 continue; 478 cpu_relax();
453 *(u16 *) (drv_data->rx) = read_RDBR(); 479 *(u16 *) (drv_data->rx) = read_RDBR(drv_data);
454 drv_data->rx += 2; 480 drv_data->rx += 2;
455 } 481 }
456 482
457 while (!(read_STAT() & BIT_STAT_RXS)) 483 while (!(read_STAT(drv_data) & BIT_STAT_RXS))
458 continue; 484 cpu_relax();
459 *(u16 *) (drv_data->rx) = read_SHAW(); 485 *(u16 *) (drv_data->rx) = read_SHAW(drv_data);
460 drv_data->rx += 2; 486 drv_data->rx += 2;
461} 487}
462 488
@@ -464,36 +490,47 @@ static void u16_cs_chg_reader(struct driver_data *drv_data)
464{ 490{
465 struct chip_data *chip = drv_data->cur_chip; 491 struct chip_data *chip = drv_data->cur_chip;
466 492
467 while (drv_data->rx < drv_data->rx_end) { 493 /* poll for SPI completion before start */
468 write_FLAG(chip->flag); 494 while (!(read_STAT(drv_data) & BIT_STAT_SPIF))
469 SSYNC(); 495 cpu_relax();
470 496
471 read_RDBR(); /* kick off */ 497 /* clear TDBR buffer before read(else it will be shifted out) */
472 while (!(read_STAT() & BIT_STAT_RXS)) 498 write_TDBR(drv_data, 0xFFFF);
473 continue; 499
474 while (!(read_STAT() & BIT_STAT_SPIF)) 500 cs_active(drv_data, chip);
475 continue; 501 dummy_read(drv_data);
476 *(u16 *) (drv_data->rx) = read_SHAW(); 502
477 write_FLAG(0xFF00 | chip->flag); 503 while (drv_data->rx < drv_data->rx_end - 2) {
478 SSYNC(); 504 cs_deactive(drv_data, chip);
479 if (chip->cs_chg_udelay) 505
480 udelay(chip->cs_chg_udelay); 506 while (!(read_STAT(drv_data) & BIT_STAT_RXS))
507 cpu_relax();
508 cs_active(drv_data, chip);
509 *(u16 *) (drv_data->rx) = read_RDBR(drv_data);
481 drv_data->rx += 2; 510 drv_data->rx += 2;
482 } 511 }
483 write_FLAG(0xFF00); 512 cs_deactive(drv_data, chip);
484 SSYNC(); 513
514 while (!(read_STAT(drv_data) & BIT_STAT_RXS))
515 cpu_relax();
516 *(u16 *) (drv_data->rx) = read_SHAW(drv_data);
517 drv_data->rx += 2;
485} 518}
486 519
487static void u16_duplex(struct driver_data *drv_data) 520static void u16_duplex(struct driver_data *drv_data)
488{ 521{
522 /* poll for SPI completion before start */
523 while (!(read_STAT(drv_data) & BIT_STAT_SPIF))
524 cpu_relax();
525
489 /* in duplex mode, clk is triggered by writing of TDBR */ 526 /* in duplex mode, clk is triggered by writing of TDBR */
490 while (drv_data->tx < drv_data->tx_end) { 527 while (drv_data->tx < drv_data->tx_end) {
491 write_TDBR(*(u16 *) (drv_data->tx)); 528 write_TDBR(drv_data, (*(u16 *) (drv_data->tx)));
492 while (!(read_STAT() & BIT_STAT_SPIF)) 529 while (read_STAT(drv_data) & BIT_STAT_TXS)
493 continue; 530 cpu_relax();
494 while (!(read_STAT() & BIT_STAT_RXS)) 531 while (!(read_STAT(drv_data) & BIT_STAT_RXS))
495 continue; 532 cpu_relax();
496 *(u16 *) (drv_data->rx) = read_RDBR(); 533 *(u16 *) (drv_data->rx) = read_RDBR(drv_data);
497 drv_data->rx += 2; 534 drv_data->rx += 2;
498 drv_data->tx += 2; 535 drv_data->tx += 2;
499 } 536 }
@@ -503,25 +540,25 @@ static void u16_cs_chg_duplex(struct driver_data *drv_data)
503{ 540{
504 struct chip_data *chip = drv_data->cur_chip; 541 struct chip_data *chip = drv_data->cur_chip;
505 542
543 /* poll for SPI completion before start */
544 while (!(read_STAT(drv_data) & BIT_STAT_SPIF))
545 cpu_relax();
546
506 while (drv_data->tx < drv_data->tx_end) { 547 while (drv_data->tx < drv_data->tx_end) {
507 write_FLAG(chip->flag); 548 cs_active(drv_data, chip);
508 SSYNC(); 549
509 550 write_TDBR(drv_data, (*(u16 *) (drv_data->tx)));
510 write_TDBR(*(u16 *) (drv_data->tx)); 551 while (read_STAT(drv_data) & BIT_STAT_TXS)
511 while (!(read_STAT() & BIT_STAT_SPIF)) 552 cpu_relax();
512 continue; 553 while (!(read_STAT(drv_data) & BIT_STAT_RXS))
513 while (!(read_STAT() & BIT_STAT_RXS)) 554 cpu_relax();
514 continue; 555 *(u16 *) (drv_data->rx) = read_RDBR(drv_data);
515 *(u16 *) (drv_data->rx) = read_RDBR(); 556
516 write_FLAG(0xFF00 | chip->flag); 557 cs_deactive(drv_data, chip);
517 SSYNC(); 558
518 if (chip->cs_chg_udelay)
519 udelay(chip->cs_chg_udelay);
520 drv_data->rx += 2; 559 drv_data->rx += 2;
521 drv_data->tx += 2; 560 drv_data->tx += 2;
522 } 561 }
523 write_FLAG(0xFF00);
524 SSYNC();
525} 562}
526 563
527/* test if ther is more transfer to be done */ 564/* test if ther is more transfer to be done */
@@ -546,6 +583,7 @@ static void *next_transfer(struct driver_data *drv_data)
546 */ 583 */
547static void giveback(struct driver_data *drv_data) 584static void giveback(struct driver_data *drv_data)
548{ 585{
586 struct chip_data *chip = drv_data->cur_chip;
549 struct spi_transfer *last_transfer; 587 struct spi_transfer *last_transfer;
550 unsigned long flags; 588 unsigned long flags;
551 struct spi_message *msg; 589 struct spi_message *msg;
@@ -565,10 +603,13 @@ static void giveback(struct driver_data *drv_data)
565 603
566 /* disable chip select signal. And not stop spi in autobuffer mode */ 604 /* disable chip select signal. And not stop spi in autobuffer mode */
567 if (drv_data->tx_dma != 0xFFFF) { 605 if (drv_data->tx_dma != 0xFFFF) {
568 write_FLAG(0xFF00); 606 cs_deactive(drv_data, chip);
569 bfin_spi_disable(drv_data); 607 bfin_spi_disable(drv_data);
570 } 608 }
571 609
610 if (!drv_data->cs_change)
611 cs_deactive(drv_data, chip);
612
572 if (msg->complete) 613 if (msg->complete)
573 msg->complete(msg->context); 614 msg->complete(msg->context);
574} 615}
@@ -576,14 +617,15 @@ static void giveback(struct driver_data *drv_data)
576static irqreturn_t dma_irq_handler(int irq, void *dev_id) 617static irqreturn_t dma_irq_handler(int irq, void *dev_id)
577{ 618{
578 struct driver_data *drv_data = (struct driver_data *)dev_id; 619 struct driver_data *drv_data = (struct driver_data *)dev_id;
620 struct chip_data *chip = drv_data->cur_chip;
579 struct spi_message *msg = drv_data->cur_msg; 621 struct spi_message *msg = drv_data->cur_msg;
580 622
581 dev_dbg(&drv_data->pdev->dev, "in dma_irq_handler\n"); 623 dev_dbg(&drv_data->pdev->dev, "in dma_irq_handler\n");
582 clear_dma_irqstat(CH_SPI); 624 clear_dma_irqstat(drv_data->dma_channel);
583 625
584 /* Wait for DMA to complete */ 626 /* Wait for DMA to complete */
585 while (get_dma_curr_irqstat(CH_SPI) & DMA_RUN) 627 while (get_dma_curr_irqstat(drv_data->dma_channel) & DMA_RUN)
586 continue; 628 cpu_relax();
587 629
588 /* 630 /*
589 * wait for the last transaction shifted out. HRM states: 631 * wait for the last transaction shifted out. HRM states:
@@ -592,18 +634,19 @@ static irqreturn_t dma_irq_handler(int irq, void *dev_id)
592 * register until it goes low for 2 successive reads 634 * register until it goes low for 2 successive reads
593 */ 635 */
594 if (drv_data->tx != NULL) { 636 if (drv_data->tx != NULL) {
595 while ((bfin_read_SPI_STAT() & TXS) || 637 while ((read_STAT(drv_data) & TXS) ||
596 (bfin_read_SPI_STAT() & TXS)) 638 (read_STAT(drv_data) & TXS))
597 continue; 639 cpu_relax();
598 } 640 }
599 641
600 while (!(bfin_read_SPI_STAT() & SPIF)) 642 while (!(read_STAT(drv_data) & SPIF))
601 continue; 643 cpu_relax();
602
603 bfin_spi_disable(drv_data);
604 644
605 msg->actual_length += drv_data->len_in_bytes; 645 msg->actual_length += drv_data->len_in_bytes;
606 646
647 if (drv_data->cs_change)
648 cs_deactive(drv_data, chip);
649
607 /* Move to next transfer */ 650 /* Move to next transfer */
608 msg->state = next_transfer(drv_data); 651 msg->state = next_transfer(drv_data);
609 652
@@ -613,8 +656,8 @@ static irqreturn_t dma_irq_handler(int irq, void *dev_id)
613 /* free the irq handler before next transfer */ 656 /* free the irq handler before next transfer */
614 dev_dbg(&drv_data->pdev->dev, 657 dev_dbg(&drv_data->pdev->dev,
615 "disable dma channel irq%d\n", 658 "disable dma channel irq%d\n",
616 CH_SPI); 659 drv_data->dma_channel);
617 dma_disable_irq(CH_SPI); 660 dma_disable_irq(drv_data->dma_channel);
618 661
619 return IRQ_HANDLED; 662 return IRQ_HANDLED;
620} 663}
@@ -690,31 +733,67 @@ static void pump_transfers(unsigned long data)
690 drv_data->rx_dma = transfer->rx_dma; 733 drv_data->rx_dma = transfer->rx_dma;
691 drv_data->tx_dma = transfer->tx_dma; 734 drv_data->tx_dma = transfer->tx_dma;
692 drv_data->len_in_bytes = transfer->len; 735 drv_data->len_in_bytes = transfer->len;
736 drv_data->cs_change = transfer->cs_change;
737
738 /* Bits per word setup */
739 switch (transfer->bits_per_word) {
740 case 8:
741 drv_data->n_bytes = 1;
742 width = CFG_SPI_WORDSIZE8;
743 drv_data->read = chip->cs_change_per_word ?
744 u8_cs_chg_reader : u8_reader;
745 drv_data->write = chip->cs_change_per_word ?
746 u8_cs_chg_writer : u8_writer;
747 drv_data->duplex = chip->cs_change_per_word ?
748 u8_cs_chg_duplex : u8_duplex;
749 break;
750
751 case 16:
752 drv_data->n_bytes = 2;
753 width = CFG_SPI_WORDSIZE16;
754 drv_data->read = chip->cs_change_per_word ?
755 u16_cs_chg_reader : u16_reader;
756 drv_data->write = chip->cs_change_per_word ?
757 u16_cs_chg_writer : u16_writer;
758 drv_data->duplex = chip->cs_change_per_word ?
759 u16_cs_chg_duplex : u16_duplex;
760 break;
761
762 default:
763 /* No change, the same as default setting */
764 drv_data->n_bytes = chip->n_bytes;
765 width = chip->width;
766 drv_data->write = drv_data->tx ? chip->write : null_writer;
767 drv_data->read = drv_data->rx ? chip->read : null_reader;
768 drv_data->duplex = chip->duplex ? chip->duplex : null_writer;
769 break;
770 }
771 cr = (read_CTRL(drv_data) & (~BIT_CTL_TIMOD));
772 cr |= (width << 8);
773 write_CTRL(drv_data, cr);
693 774
694 width = chip->width;
695 if (width == CFG_SPI_WORDSIZE16) { 775 if (width == CFG_SPI_WORDSIZE16) {
696 drv_data->len = (transfer->len) >> 1; 776 drv_data->len = (transfer->len) >> 1;
697 } else { 777 } else {
698 drv_data->len = transfer->len; 778 drv_data->len = transfer->len;
699 } 779 }
700 drv_data->write = drv_data->tx ? chip->write : null_writer; 780 dev_dbg(&drv_data->pdev->dev, "transfer: ",
701 drv_data->read = drv_data->rx ? chip->read : null_reader; 781 "drv_data->write is %p, chip->write is %p, null_wr is %p\n",
702 drv_data->duplex = chip->duplex ? chip->duplex : null_writer; 782 drv_data->write, chip->write, null_writer);
703 dev_dbg(&drv_data->pdev->dev,
704 "transfer: drv_data->write is %p, chip->write is %p, null_wr is %p\n",
705 drv_data->write, chip->write, null_writer);
706 783
707 /* speed and width has been set on per message */ 784 /* speed and width has been set on per message */
708 message->state = RUNNING_STATE; 785 message->state = RUNNING_STATE;
709 dma_config = 0; 786 dma_config = 0;
710 787
711 /* restore spi status for each spi transfer */ 788 /* Speed setup (surely valid because already checked) */
712 if (transfer->speed_hz) { 789 if (transfer->speed_hz)
713 write_BAUD(hz_to_spi_baud(transfer->speed_hz)); 790 write_BAUD(drv_data, hz_to_spi_baud(transfer->speed_hz));
714 } else { 791 else
715 write_BAUD(chip->baud); 792 write_BAUD(drv_data, chip->baud);
716 } 793
717 write_FLAG(chip->flag); 794 write_STAT(drv_data, BIT_STAT_CLR);
795 cr = (read_CTRL(drv_data) & (~BIT_CTL_TIMOD));
796 cs_active(drv_data, chip);
718 797
719 dev_dbg(&drv_data->pdev->dev, 798 dev_dbg(&drv_data->pdev->dev,
720 "now pumping a transfer: width is %d, len is %d\n", 799 "now pumping a transfer: width is %d, len is %d\n",
@@ -727,25 +806,25 @@ static void pump_transfers(unsigned long data)
727 */ 806 */
728 if (drv_data->cur_chip->enable_dma && drv_data->len > 6) { 807 if (drv_data->cur_chip->enable_dma && drv_data->len > 6) {
729 808
730 write_STAT(BIT_STAT_CLR); 809 disable_dma(drv_data->dma_channel);
731 disable_dma(CH_SPI); 810 clear_dma_irqstat(drv_data->dma_channel);
732 clear_dma_irqstat(CH_SPI);
733 bfin_spi_disable(drv_data); 811 bfin_spi_disable(drv_data);
734 812
735 /* config dma channel */ 813 /* config dma channel */
736 dev_dbg(&drv_data->pdev->dev, "doing dma transfer\n"); 814 dev_dbg(&drv_data->pdev->dev, "doing dma transfer\n");
737 if (width == CFG_SPI_WORDSIZE16) { 815 if (width == CFG_SPI_WORDSIZE16) {
738 set_dma_x_count(CH_SPI, drv_data->len); 816 set_dma_x_count(drv_data->dma_channel, drv_data->len);
739 set_dma_x_modify(CH_SPI, 2); 817 set_dma_x_modify(drv_data->dma_channel, 2);
740 dma_width = WDSIZE_16; 818 dma_width = WDSIZE_16;
741 } else { 819 } else {
742 set_dma_x_count(CH_SPI, drv_data->len); 820 set_dma_x_count(drv_data->dma_channel, drv_data->len);
743 set_dma_x_modify(CH_SPI, 1); 821 set_dma_x_modify(drv_data->dma_channel, 1);
744 dma_width = WDSIZE_8; 822 dma_width = WDSIZE_8;
745 } 823 }
746 824
747 /* set transfer width,direction. And enable spi */ 825 /* poll for SPI completion before start */
748 cr = (read_CTRL() & (~BIT_CTL_TIMOD)); 826 while (!(read_STAT(drv_data) & BIT_STAT_SPIF))
827 cpu_relax();
749 828
750 /* dirty hack for autobuffer DMA mode */ 829 /* dirty hack for autobuffer DMA mode */
751 if (drv_data->tx_dma == 0xFFFF) { 830 if (drv_data->tx_dma == 0xFFFF) {
@@ -755,13 +834,18 @@ static void pump_transfers(unsigned long data)
755 /* no irq in autobuffer mode */ 834 /* no irq in autobuffer mode */
756 dma_config = 835 dma_config =
757 (DMAFLOW_AUTO | RESTART | dma_width | DI_EN); 836 (DMAFLOW_AUTO | RESTART | dma_width | DI_EN);
758 set_dma_config(CH_SPI, dma_config); 837 set_dma_config(drv_data->dma_channel, dma_config);
759 set_dma_start_addr(CH_SPI, (unsigned long)drv_data->tx); 838 set_dma_start_addr(drv_data->dma_channel,
760 enable_dma(CH_SPI); 839 (unsigned long)drv_data->tx);
761 write_CTRL(cr | CFG_SPI_DMAWRITE | (width << 8) | 840 enable_dma(drv_data->dma_channel);
762 (CFG_SPI_ENABLE << 14)); 841
763 842 /* start SPI transfer */
764 /* just return here, there can only be one transfer in this mode */ 843 write_CTRL(drv_data,
844 (cr | CFG_SPI_DMAWRITE | BIT_CTL_ENABLE));
845
846 /* just return here, there can only be one transfer
847 * in this mode
848 */
765 message->status = 0; 849 message->status = 0;
766 giveback(drv_data); 850 giveback(drv_data);
767 return; 851 return;
@@ -772,58 +856,51 @@ static void pump_transfers(unsigned long data)
772 /* set transfer mode, and enable SPI */ 856 /* set transfer mode, and enable SPI */
773 dev_dbg(&drv_data->pdev->dev, "doing DMA in.\n"); 857 dev_dbg(&drv_data->pdev->dev, "doing DMA in.\n");
774 858
775 /* disable SPI before write to TDBR */
776 write_CTRL(cr & ~BIT_CTL_ENABLE);
777
778 /* clear tx reg soformer data is not shifted out */ 859 /* clear tx reg soformer data is not shifted out */
779 write_TDBR(0xFF); 860 write_TDBR(drv_data, 0xFFFF);
780 861
781 set_dma_x_count(CH_SPI, drv_data->len); 862 set_dma_x_count(drv_data->dma_channel, drv_data->len);
782 863
783 /* start dma */ 864 /* start dma */
784 dma_enable_irq(CH_SPI); 865 dma_enable_irq(drv_data->dma_channel);
785 dma_config = (WNR | RESTART | dma_width | DI_EN); 866 dma_config = (WNR | RESTART | dma_width | DI_EN);
786 set_dma_config(CH_SPI, dma_config); 867 set_dma_config(drv_data->dma_channel, dma_config);
787 set_dma_start_addr(CH_SPI, (unsigned long)drv_data->rx); 868 set_dma_start_addr(drv_data->dma_channel,
788 enable_dma(CH_SPI); 869 (unsigned long)drv_data->rx);
870 enable_dma(drv_data->dma_channel);
871
872 /* start SPI transfer */
873 write_CTRL(drv_data,
874 (cr | CFG_SPI_DMAREAD | BIT_CTL_ENABLE));
789 875
790 cr |=
791 CFG_SPI_DMAREAD | (width << 8) | (CFG_SPI_ENABLE <<
792 14);
793 /* set transfer mode, and enable SPI */
794 write_CTRL(cr);
795 } else if (drv_data->tx != NULL) { 876 } else if (drv_data->tx != NULL) {
796 dev_dbg(&drv_data->pdev->dev, "doing DMA out.\n"); 877 dev_dbg(&drv_data->pdev->dev, "doing DMA out.\n");
797 878
798 /* start dma */ 879 /* start dma */
799 dma_enable_irq(CH_SPI); 880 dma_enable_irq(drv_data->dma_channel);
800 dma_config = (RESTART | dma_width | DI_EN); 881 dma_config = (RESTART | dma_width | DI_EN);
801 set_dma_config(CH_SPI, dma_config); 882 set_dma_config(drv_data->dma_channel, dma_config);
802 set_dma_start_addr(CH_SPI, (unsigned long)drv_data->tx); 883 set_dma_start_addr(drv_data->dma_channel,
803 enable_dma(CH_SPI); 884 (unsigned long)drv_data->tx);
804 885 enable_dma(drv_data->dma_channel);
805 write_CTRL(cr | CFG_SPI_DMAWRITE | (width << 8) | 886
806 (CFG_SPI_ENABLE << 14)); 887 /* start SPI transfer */
807 888 write_CTRL(drv_data,
889 (cr | CFG_SPI_DMAWRITE | BIT_CTL_ENABLE));
808 } 890 }
809 } else { 891 } else {
810 /* IO mode write then read */ 892 /* IO mode write then read */
811 dev_dbg(&drv_data->pdev->dev, "doing IO transfer\n"); 893 dev_dbg(&drv_data->pdev->dev, "doing IO transfer\n");
812 894
813 write_STAT(BIT_STAT_CLR);
814
815 if (drv_data->tx != NULL && drv_data->rx != NULL) { 895 if (drv_data->tx != NULL && drv_data->rx != NULL) {
816 /* full duplex mode */ 896 /* full duplex mode */
817 BUG_ON((drv_data->tx_end - drv_data->tx) != 897 BUG_ON((drv_data->tx_end - drv_data->tx) !=
818 (drv_data->rx_end - drv_data->rx)); 898 (drv_data->rx_end - drv_data->rx));
819 cr = (read_CTRL() & (~BIT_CTL_TIMOD));
820 cr |= CFG_SPI_WRITE | (width << 8) |
821 (CFG_SPI_ENABLE << 14);
822 dev_dbg(&drv_data->pdev->dev, 899 dev_dbg(&drv_data->pdev->dev,
823 "IO duplex: cr is 0x%x\n", cr); 900 "IO duplex: cr is 0x%x\n", cr);
824 901
825 write_CTRL(cr); 902 /* set SPI transfer mode */
826 SSYNC(); 903 write_CTRL(drv_data, (cr | CFG_SPI_WRITE));
827 904
828 drv_data->duplex(drv_data); 905 drv_data->duplex(drv_data);
829 906
@@ -831,14 +908,11 @@ static void pump_transfers(unsigned long data)
831 tranf_success = 0; 908 tranf_success = 0;
832 } else if (drv_data->tx != NULL) { 909 } else if (drv_data->tx != NULL) {
833 /* write only half duplex */ 910 /* write only half duplex */
834 cr = (read_CTRL() & (~BIT_CTL_TIMOD)); 911 dev_dbg(&drv_data->pdev->dev,
835 cr |= CFG_SPI_WRITE | (width << 8) |
836 (CFG_SPI_ENABLE << 14);
837 dev_dbg(&drv_data->pdev->dev,
838 "IO write: cr is 0x%x\n", cr); 912 "IO write: cr is 0x%x\n", cr);
839 913
840 write_CTRL(cr); 914 /* set SPI transfer mode */
841 SSYNC(); 915 write_CTRL(drv_data, (cr | CFG_SPI_WRITE));
842 916
843 drv_data->write(drv_data); 917 drv_data->write(drv_data);
844 918
@@ -846,14 +920,11 @@ static void pump_transfers(unsigned long data)
846 tranf_success = 0; 920 tranf_success = 0;
847 } else if (drv_data->rx != NULL) { 921 } else if (drv_data->rx != NULL) {
848 /* read only half duplex */ 922 /* read only half duplex */
849 cr = (read_CTRL() & (~BIT_CTL_TIMOD)); 923 dev_dbg(&drv_data->pdev->dev,
850 cr |= CFG_SPI_READ | (width << 8) |
851 (CFG_SPI_ENABLE << 14);
852 dev_dbg(&drv_data->pdev->dev,
853 "IO read: cr is 0x%x\n", cr); 924 "IO read: cr is 0x%x\n", cr);
854 925
855 write_CTRL(cr); 926 /* set SPI transfer mode */
856 SSYNC(); 927 write_CTRL(drv_data, (cr | CFG_SPI_READ));
857 928
858 drv_data->read(drv_data); 929 drv_data->read(drv_data);
859 if (drv_data->rx != drv_data->rx_end) 930 if (drv_data->rx != drv_data->rx_end)
@@ -861,7 +932,7 @@ static void pump_transfers(unsigned long data)
861 } 932 }
862 933
863 if (!tranf_success) { 934 if (!tranf_success) {
864 dev_dbg(&drv_data->pdev->dev, 935 dev_dbg(&drv_data->pdev->dev,
865 "IO write error!\n"); 936 "IO write error!\n");
866 message->state = ERROR_STATE; 937 message->state = ERROR_STATE;
867 } else { 938 } else {
@@ -881,9 +952,11 @@ static void pump_transfers(unsigned long data)
881/* pop a msg from queue and kick off real transfer */ 952/* pop a msg from queue and kick off real transfer */
882static void pump_messages(struct work_struct *work) 953static void pump_messages(struct work_struct *work)
883{ 954{
884 struct driver_data *drv_data = container_of(work, struct driver_data, pump_messages); 955 struct driver_data *drv_data;
885 unsigned long flags; 956 unsigned long flags;
886 957
958 drv_data = container_of(work, struct driver_data, pump_messages);
959
887 /* Lock queue and check for queue work */ 960 /* Lock queue and check for queue work */
888 spin_lock_irqsave(&drv_data->lock, flags); 961 spin_lock_irqsave(&drv_data->lock, flags);
889 if (list_empty(&drv_data->queue) || drv_data->run == QUEUE_STOPPED) { 962 if (list_empty(&drv_data->queue) || drv_data->run == QUEUE_STOPPED) {
@@ -902,6 +975,14 @@ static void pump_messages(struct work_struct *work)
902 /* Extract head of queue */ 975 /* Extract head of queue */
903 drv_data->cur_msg = list_entry(drv_data->queue.next, 976 drv_data->cur_msg = list_entry(drv_data->queue.next,
904 struct spi_message, queue); 977 struct spi_message, queue);
978
979 /* Setup the SSP using the per chip configuration */
980 drv_data->cur_chip = spi_get_ctldata(drv_data->cur_msg->spi);
981 if (restore_state(drv_data)) {
982 spin_unlock_irqrestore(&drv_data->lock, flags);
983 return;
984 };
985
905 list_del_init(&drv_data->cur_msg->queue); 986 list_del_init(&drv_data->cur_msg->queue);
906 987
907 /* Initial message state */ 988 /* Initial message state */
@@ -909,15 +990,12 @@ static void pump_messages(struct work_struct *work)
909 drv_data->cur_transfer = list_entry(drv_data->cur_msg->transfers.next, 990 drv_data->cur_transfer = list_entry(drv_data->cur_msg->transfers.next,
910 struct spi_transfer, transfer_list); 991 struct spi_transfer, transfer_list);
911 992
912 /* Setup the SSP using the per chip configuration */ 993 dev_dbg(&drv_data->pdev->dev, "got a message to pump, "
913 drv_data->cur_chip = spi_get_ctldata(drv_data->cur_msg->spi); 994 "state is set to: baud %d, flag 0x%x, ctl 0x%x\n",
914 restore_state(drv_data); 995 drv_data->cur_chip->baud, drv_data->cur_chip->flag,
996 drv_data->cur_chip->ctl_reg);
997
915 dev_dbg(&drv_data->pdev->dev, 998 dev_dbg(&drv_data->pdev->dev,
916 "got a message to pump, state is set to: baud %d, flag 0x%x, ctl 0x%x\n",
917 drv_data->cur_chip->baud, drv_data->cur_chip->flag,
918 drv_data->cur_chip->ctl_reg);
919
920 dev_dbg(&drv_data->pdev->dev,
921 "the first transfer len is %d\n", 999 "the first transfer len is %d\n",
922 drv_data->cur_transfer->len); 1000 drv_data->cur_transfer->len);
923 1001
@@ -959,6 +1037,22 @@ static int transfer(struct spi_device *spi, struct spi_message *msg)
959 return 0; 1037 return 0;
960} 1038}
961 1039
1040#define MAX_SPI_SSEL 7
1041
1042static u16 ssel[3][MAX_SPI_SSEL] = {
1043 {P_SPI0_SSEL1, P_SPI0_SSEL2, P_SPI0_SSEL3,
1044 P_SPI0_SSEL4, P_SPI0_SSEL5,
1045 P_SPI0_SSEL6, P_SPI0_SSEL7},
1046
1047 {P_SPI1_SSEL1, P_SPI1_SSEL2, P_SPI1_SSEL3,
1048 P_SPI1_SSEL4, P_SPI1_SSEL5,
1049 P_SPI1_SSEL6, P_SPI1_SSEL7},
1050
1051 {P_SPI2_SSEL1, P_SPI2_SSEL2, P_SPI2_SSEL3,
1052 P_SPI2_SSEL4, P_SPI2_SSEL5,
1053 P_SPI2_SSEL6, P_SPI2_SSEL7},
1054};
1055
962/* first setup for new devices */ 1056/* first setup for new devices */
963static int setup(struct spi_device *spi) 1057static int setup(struct spi_device *spi)
964{ 1058{
@@ -993,6 +1087,18 @@ static int setup(struct spi_device *spi)
993 1087
994 /* chip_info isn't always needed */ 1088 /* chip_info isn't always needed */
995 if (chip_info) { 1089 if (chip_info) {
1090 /* Make sure people stop trying to set fields via ctl_reg
1091 * when they should actually be using common SPI framework.
1092 * Currently we let through: WOM EMISO PSSE GM SZ TIMOD.
1093 * Not sure if a user actually needs/uses any of these,
1094 * but let's assume (for now) they do.
1095 */
1096 if (chip_info->ctl_reg & (SPE|MSTR|CPOL|CPHA|LSBF|SIZE)) {
1097 dev_err(&spi->dev, "do not set bits in ctl_reg "
1098 "that the SPI framework manages\n");
1099 return -EINVAL;
1100 }
1101
996 chip->enable_dma = chip_info->enable_dma != 0 1102 chip->enable_dma = chip_info->enable_dma != 0
997 && drv_data->master_info->enable_dma; 1103 && drv_data->master_info->enable_dma;
998 chip->ctl_reg = chip_info->ctl_reg; 1104 chip->ctl_reg = chip_info->ctl_reg;
@@ -1015,20 +1121,20 @@ static int setup(struct spi_device *spi)
1015 * if any one SPI chip is registered and wants DMA, request the 1121 * if any one SPI chip is registered and wants DMA, request the
1016 * DMA channel for it 1122 * DMA channel for it
1017 */ 1123 */
1018 if (chip->enable_dma && !dma_requested) { 1124 if (chip->enable_dma && !drv_data->dma_requested) {
1019 /* register dma irq handler */ 1125 /* register dma irq handler */
1020 if (request_dma(CH_SPI, "BF53x_SPI_DMA") < 0) { 1126 if (request_dma(drv_data->dma_channel, "BF53x_SPI_DMA") < 0) {
1021 dev_dbg(&spi->dev, 1127 dev_dbg(&spi->dev,
1022 "Unable to request BlackFin SPI DMA channel\n"); 1128 "Unable to request BlackFin SPI DMA channel\n");
1023 return -ENODEV; 1129 return -ENODEV;
1024 } 1130 }
1025 if (set_dma_callback(CH_SPI, (void *)dma_irq_handler, drv_data) 1131 if (set_dma_callback(drv_data->dma_channel,
1026 < 0) { 1132 (void *)dma_irq_handler, drv_data) < 0) {
1027 dev_dbg(&spi->dev, "Unable to set dma callback\n"); 1133 dev_dbg(&spi->dev, "Unable to set dma callback\n");
1028 return -EPERM; 1134 return -EPERM;
1029 } 1135 }
1030 dma_disable_irq(CH_SPI); 1136 dma_disable_irq(drv_data->dma_channel);
1031 dma_requested = 1; 1137 drv_data->dma_requested = 1;
1032 } 1138 }
1033 1139
1034 /* 1140 /*
@@ -1077,6 +1183,14 @@ static int setup(struct spi_device *spi)
1077 1183
1078 spi_set_ctldata(spi, chip); 1184 spi_set_ctldata(spi, chip);
1079 1185
1186 dev_dbg(&spi->dev, "chip select number is %d\n", chip->chip_select_num);
1187 if ((chip->chip_select_num > 0)
1188 && (chip->chip_select_num <= spi->master->num_chipselect))
1189 peripheral_request(ssel[spi->master->bus_num]
1190 [chip->chip_select_num-1], DRV_NAME);
1191
1192 cs_deactive(drv_data, chip);
1193
1080 return 0; 1194 return 0;
1081} 1195}
1082 1196
@@ -1088,6 +1202,11 @@ static void cleanup(struct spi_device *spi)
1088{ 1202{
1089 struct chip_data *chip = spi_get_ctldata(spi); 1203 struct chip_data *chip = spi_get_ctldata(spi);
1090 1204
1205 if ((chip->chip_select_num > 0)
1206 && (chip->chip_select_num <= spi->master->num_chipselect))
1207 peripheral_free(ssel[spi->master->bus_num]
1208 [chip->chip_select_num-1]);
1209
1091 kfree(chip); 1210 kfree(chip);
1092} 1211}
1093 1212
@@ -1183,6 +1302,7 @@ static int __init bfin5xx_spi_probe(struct platform_device *pdev)
1183 struct bfin5xx_spi_master *platform_info; 1302 struct bfin5xx_spi_master *platform_info;
1184 struct spi_master *master; 1303 struct spi_master *master;
1185 struct driver_data *drv_data = 0; 1304 struct driver_data *drv_data = 0;
1305 struct resource *res;
1186 int status = 0; 1306 int status = 0;
1187 1307
1188 platform_info = dev->platform_data; 1308 platform_info = dev->platform_data;
@@ -1193,10 +1313,12 @@ static int __init bfin5xx_spi_probe(struct platform_device *pdev)
1193 dev_err(&pdev->dev, "can not alloc spi_master\n"); 1313 dev_err(&pdev->dev, "can not alloc spi_master\n");
1194 return -ENOMEM; 1314 return -ENOMEM;
1195 } 1315 }
1316
1196 drv_data = spi_master_get_devdata(master); 1317 drv_data = spi_master_get_devdata(master);
1197 drv_data->master = master; 1318 drv_data->master = master;
1198 drv_data->master_info = platform_info; 1319 drv_data->master_info = platform_info;
1199 drv_data->pdev = pdev; 1320 drv_data->pdev = pdev;
1321 drv_data->pin_req = platform_info->pin_req;
1200 1322
1201 master->bus_num = pdev->id; 1323 master->bus_num = pdev->id;
1202 master->num_chipselect = platform_info->num_chipselect; 1324 master->num_chipselect = platform_info->num_chipselect;
@@ -1204,15 +1326,38 @@ static int __init bfin5xx_spi_probe(struct platform_device *pdev)
1204 master->setup = setup; 1326 master->setup = setup;
1205 master->transfer = transfer; 1327 master->transfer = transfer;
1206 1328
1329 /* Find and map our resources */
1330 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1331 if (res == NULL) {
1332 dev_err(dev, "Cannot get IORESOURCE_MEM\n");
1333 status = -ENOENT;
1334 goto out_error_get_res;
1335 }
1336
1337 drv_data->regs_base = ioremap(res->start, (res->end - res->start + 1));
1338 if (drv_data->regs_base == NULL) {
1339 dev_err(dev, "Cannot map IO\n");
1340 status = -ENXIO;
1341 goto out_error_ioremap;
1342 }
1343
1344 drv_data->dma_channel = platform_get_irq(pdev, 0);
1345 if (drv_data->dma_channel < 0) {
1346 dev_err(dev, "No DMA channel specified\n");
1347 status = -ENOENT;
1348 goto out_error_no_dma_ch;
1349 }
1350
1207 /* Initial and start queue */ 1351 /* Initial and start queue */
1208 status = init_queue(drv_data); 1352 status = init_queue(drv_data);
1209 if (status != 0) { 1353 if (status != 0) {
1210 dev_err(&pdev->dev, "problem initializing queue\n"); 1354 dev_err(dev, "problem initializing queue\n");
1211 goto out_error_queue_alloc; 1355 goto out_error_queue_alloc;
1212 } 1356 }
1357
1213 status = start_queue(drv_data); 1358 status = start_queue(drv_data);
1214 if (status != 0) { 1359 if (status != 0) {
1215 dev_err(&pdev->dev, "problem starting queue\n"); 1360 dev_err(dev, "problem starting queue\n");
1216 goto out_error_queue_alloc; 1361 goto out_error_queue_alloc;
1217 } 1362 }
1218 1363
@@ -1220,15 +1365,30 @@ static int __init bfin5xx_spi_probe(struct platform_device *pdev)
1220 platform_set_drvdata(pdev, drv_data); 1365 platform_set_drvdata(pdev, drv_data);
1221 status = spi_register_master(master); 1366 status = spi_register_master(master);
1222 if (status != 0) { 1367 if (status != 0) {
1223 dev_err(&pdev->dev, "problem registering spi master\n"); 1368 dev_err(dev, "problem registering spi master\n");
1224 goto out_error_queue_alloc; 1369 goto out_error_queue_alloc;
1225 } 1370 }
1226 dev_dbg(&pdev->dev, "controller probe successfully\n"); 1371
1372 status = peripheral_request_list(drv_data->pin_req, DRV_NAME);
1373 if (status != 0) {
1374 dev_err(&pdev->dev, ": Requesting Peripherals failed\n");
1375 goto out_error;
1376 }
1377
1378 dev_info(dev, "%s, Version %s, regs_base@%p, dma channel@%d\n",
1379 DRV_DESC, DRV_VERSION, drv_data->regs_base,
1380 drv_data->dma_channel);
1227 return status; 1381 return status;
1228 1382
1229 out_error_queue_alloc: 1383out_error_queue_alloc:
1230 destroy_queue(drv_data); 1384 destroy_queue(drv_data);
1385out_error_no_dma_ch:
1386 iounmap((void *) drv_data->regs_base);
1387out_error_ioremap:
1388out_error_get_res:
1389out_error:
1231 spi_master_put(master); 1390 spi_master_put(master);
1391
1232 return status; 1392 return status;
1233} 1393}
1234 1394
@@ -1251,13 +1411,15 @@ static int __devexit bfin5xx_spi_remove(struct platform_device *pdev)
1251 1411
1252 /* Release DMA */ 1412 /* Release DMA */
1253 if (drv_data->master_info->enable_dma) { 1413 if (drv_data->master_info->enable_dma) {
1254 if (dma_channel_active(CH_SPI)) 1414 if (dma_channel_active(drv_data->dma_channel))
1255 free_dma(CH_SPI); 1415 free_dma(drv_data->dma_channel);
1256 } 1416 }
1257 1417
1258 /* Disconnect from the SPI framework */ 1418 /* Disconnect from the SPI framework */
1259 spi_unregister_master(drv_data->master); 1419 spi_unregister_master(drv_data->master);
1260 1420
1421 peripheral_free_list(drv_data->pin_req);
1422
1261 /* Prevent double remove */ 1423 /* Prevent double remove */
1262 platform_set_drvdata(pdev, NULL); 1424 platform_set_drvdata(pdev, NULL);
1263 1425
@@ -1305,7 +1467,7 @@ static int bfin5xx_spi_resume(struct platform_device *pdev)
1305MODULE_ALIAS("bfin-spi-master"); /* for platform bus hotplug */ 1467MODULE_ALIAS("bfin-spi-master"); /* for platform bus hotplug */
1306static struct platform_driver bfin5xx_spi_driver = { 1468static struct platform_driver bfin5xx_spi_driver = {
1307 .driver = { 1469 .driver = {
1308 .name = "bfin-spi-master", 1470 .name = DRV_NAME,
1309 .owner = THIS_MODULE, 1471 .owner = THIS_MODULE,
1310 }, 1472 },
1311 .suspend = bfin5xx_spi_suspend, 1473 .suspend = bfin5xx_spi_suspend,
diff --git a/fs/aio.c b/fs/aio.c
index f12db415c0f6..9dec7d2d546e 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -1161,7 +1161,12 @@ retry:
1161 ret = 0; 1161 ret = 0;
1162 if (to.timed_out) /* Only check after read evt */ 1162 if (to.timed_out) /* Only check after read evt */
1163 break; 1163 break;
1164 io_schedule(); 1164 /* Try to only show up in io wait if there are ops
1165 * in flight */
1166 if (ctx->reqs_active)
1167 io_schedule();
1168 else
1169 schedule();
1165 if (signal_pending(tsk)) { 1170 if (signal_pending(tsk)) {
1166 ret = -EINTR; 1171 ret = -EINTR;
1167 break; 1172 break;
diff --git a/fs/bfs/inode.c b/fs/bfs/inode.c
index 294c41baef6e..a64a71d444f5 100644
--- a/fs/bfs/inode.c
+++ b/fs/bfs/inode.c
@@ -178,7 +178,8 @@ static void bfs_delete_inode(struct inode *inode)
178 brelse(bh); 178 brelse(bh);
179 179
180 if (bi->i_dsk_ino) { 180 if (bi->i_dsk_ino) {
181 info->si_freeb += BFS_FILEBLOCKS(bi); 181 if (bi->i_sblock)
182 info->si_freeb += bi->i_eblock + 1 - bi->i_sblock;
182 info->si_freei++; 183 info->si_freei++;
183 clear_bit(ino, info->si_imap); 184 clear_bit(ino, info->si_imap);
184 dump_imap("delete_inode", s); 185 dump_imap("delete_inode", s);
diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c
index f02fdef463a7..c312adcba4fc 100644
--- a/fs/cifs/cifsacl.c
+++ b/fs/cifs/cifsacl.c
@@ -134,9 +134,10 @@ int compare_sids(const struct cifs_sid *ctsid, const struct cifs_sid *cwsid)
134 pmode is the existing mode (we only want to overwrite part of this 134 pmode is the existing mode (we only want to overwrite part of this
135 bits to set can be: S_IRWXU, S_IRWXG or S_IRWXO ie 00700 or 00070 or 00007 135 bits to set can be: S_IRWXU, S_IRWXG or S_IRWXO ie 00700 or 00070 or 00007
136*/ 136*/
137static void access_flags_to_mode(__u32 ace_flags, int type, umode_t *pmode, 137static void access_flags_to_mode(__le32 ace_flags, int type, umode_t *pmode,
138 umode_t *pbits_to_set) 138 umode_t *pbits_to_set)
139{ 139{
140 __u32 flags = le32_to_cpu(ace_flags);
140 /* the order of ACEs is important. The canonical order is to begin with 141 /* the order of ACEs is important. The canonical order is to begin with
141 DENY entries followed by ALLOW, otherwise an allow entry could be 142 DENY entries followed by ALLOW, otherwise an allow entry could be
142 encountered first, making the subsequent deny entry like "dead code" 143 encountered first, making the subsequent deny entry like "dead code"
@@ -146,17 +147,17 @@ static void access_flags_to_mode(__u32 ace_flags, int type, umode_t *pmode,
146 /* For deny ACEs we change the mask so that subsequent allow access 147 /* For deny ACEs we change the mask so that subsequent allow access
147 control entries do not turn on the bits we are denying */ 148 control entries do not turn on the bits we are denying */
148 if (type == ACCESS_DENIED) { 149 if (type == ACCESS_DENIED) {
149 if (ace_flags & GENERIC_ALL) { 150 if (flags & GENERIC_ALL) {
150 *pbits_to_set &= ~S_IRWXUGO; 151 *pbits_to_set &= ~S_IRWXUGO;
151 } 152 }
152 if ((ace_flags & GENERIC_WRITE) || 153 if ((flags & GENERIC_WRITE) ||
153 ((ace_flags & FILE_WRITE_RIGHTS) == FILE_WRITE_RIGHTS)) 154 ((flags & FILE_WRITE_RIGHTS) == FILE_WRITE_RIGHTS))
154 *pbits_to_set &= ~S_IWUGO; 155 *pbits_to_set &= ~S_IWUGO;
155 if ((ace_flags & GENERIC_READ) || 156 if ((flags & GENERIC_READ) ||
156 ((ace_flags & FILE_READ_RIGHTS) == FILE_READ_RIGHTS)) 157 ((flags & FILE_READ_RIGHTS) == FILE_READ_RIGHTS))
157 *pbits_to_set &= ~S_IRUGO; 158 *pbits_to_set &= ~S_IRUGO;
158 if ((ace_flags & GENERIC_EXECUTE) || 159 if ((flags & GENERIC_EXECUTE) ||
159 ((ace_flags & FILE_EXEC_RIGHTS) == FILE_EXEC_RIGHTS)) 160 ((flags & FILE_EXEC_RIGHTS) == FILE_EXEC_RIGHTS))
160 *pbits_to_set &= ~S_IXUGO; 161 *pbits_to_set &= ~S_IXUGO;
161 return; 162 return;
162 } else if (type != ACCESS_ALLOWED) { 163 } else if (type != ACCESS_ALLOWED) {
@@ -165,25 +166,25 @@ static void access_flags_to_mode(__u32 ace_flags, int type, umode_t *pmode,
165 } 166 }
166 /* else ACCESS_ALLOWED type */ 167 /* else ACCESS_ALLOWED type */
167 168
168 if (ace_flags & GENERIC_ALL) { 169 if (flags & GENERIC_ALL) {
169 *pmode |= (S_IRWXUGO & (*pbits_to_set)); 170 *pmode |= (S_IRWXUGO & (*pbits_to_set));
170#ifdef CONFIG_CIFS_DEBUG2 171#ifdef CONFIG_CIFS_DEBUG2
171 cFYI(1, ("all perms")); 172 cFYI(1, ("all perms"));
172#endif 173#endif
173 return; 174 return;
174 } 175 }
175 if ((ace_flags & GENERIC_WRITE) || 176 if ((flags & GENERIC_WRITE) ||
176 ((ace_flags & FILE_WRITE_RIGHTS) == FILE_WRITE_RIGHTS)) 177 ((flags & FILE_WRITE_RIGHTS) == FILE_WRITE_RIGHTS))
177 *pmode |= (S_IWUGO & (*pbits_to_set)); 178 *pmode |= (S_IWUGO & (*pbits_to_set));
178 if ((ace_flags & GENERIC_READ) || 179 if ((flags & GENERIC_READ) ||
179 ((ace_flags & FILE_READ_RIGHTS) == FILE_READ_RIGHTS)) 180 ((flags & FILE_READ_RIGHTS) == FILE_READ_RIGHTS))
180 *pmode |= (S_IRUGO & (*pbits_to_set)); 181 *pmode |= (S_IRUGO & (*pbits_to_set));
181 if ((ace_flags & GENERIC_EXECUTE) || 182 if ((flags & GENERIC_EXECUTE) ||
182 ((ace_flags & FILE_EXEC_RIGHTS) == FILE_EXEC_RIGHTS)) 183 ((flags & FILE_EXEC_RIGHTS) == FILE_EXEC_RIGHTS))
183 *pmode |= (S_IXUGO & (*pbits_to_set)); 184 *pmode |= (S_IXUGO & (*pbits_to_set));
184 185
185#ifdef CONFIG_CIFS_DEBUG2 186#ifdef CONFIG_CIFS_DEBUG2
186 cFYI(1, ("access flags 0x%x mode now 0x%x", ace_flags, *pmode)); 187 cFYI(1, ("access flags 0x%x mode now 0x%x", flags, *pmode));
187#endif 188#endif
188 return; 189 return;
189} 190}
diff --git a/fs/jbd/checkpoint.c b/fs/jbd/checkpoint.c
index 47552d4a6324..0f69c416eebc 100644
--- a/fs/jbd/checkpoint.c
+++ b/fs/jbd/checkpoint.c
@@ -602,15 +602,15 @@ int __journal_remove_checkpoint(struct journal_head *jh)
602 602
603 /* 603 /*
604 * There is one special case to worry about: if we have just pulled the 604 * There is one special case to worry about: if we have just pulled the
605 * buffer off a committing transaction's forget list, then even if the 605 * buffer off a running or committing transaction's checkpoing list,
606 * checkpoint list is empty, the transaction obviously cannot be 606 * then even if the checkpoint list is empty, the transaction obviously
607 * dropped! 607 * cannot be dropped!
608 * 608 *
609 * The locking here around j_committing_transaction is a bit sleazy. 609 * The locking here around t_state is a bit sleazy.
610 * See the comment at the end of journal_commit_transaction(). 610 * See the comment at the end of journal_commit_transaction().
611 */ 611 */
612 if (transaction == journal->j_committing_transaction) { 612 if (transaction->t_state != T_FINISHED) {
613 JBUFFER_TRACE(jh, "belongs to committing transaction"); 613 JBUFFER_TRACE(jh, "belongs to running/committing transaction");
614 goto out; 614 goto out;
615 } 615 }
616 616
diff --git a/fs/jbd/commit.c b/fs/jbd/commit.c
index 8f1f2aa5fb39..610264b99a8e 100644
--- a/fs/jbd/commit.c
+++ b/fs/jbd/commit.c
@@ -858,10 +858,10 @@ restart_loop:
858 } 858 }
859 spin_unlock(&journal->j_list_lock); 859 spin_unlock(&journal->j_list_lock);
860 /* 860 /*
861 * This is a bit sleazy. We borrow j_list_lock to protect 861 * This is a bit sleazy. We use j_list_lock to protect transition
862 * journal->j_committing_transaction in __journal_remove_checkpoint. 862 * of a transaction into T_FINISHED state and calling
863 * Really, __journal_remove_checkpoint should be using j_state_lock but 863 * __journal_drop_transaction(). Otherwise we could race with
864 * it's a bit hassle to hold that across __journal_remove_checkpoint 864 * other checkpointing code processing the transaction...
865 */ 865 */
866 spin_lock(&journal->j_state_lock); 866 spin_lock(&journal->j_state_lock);
867 spin_lock(&journal->j_list_lock); 867 spin_lock(&journal->j_list_lock);
diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c
index d84bd155997b..ee50c9610e7f 100644
--- a/fs/ocfs2/cluster/tcp.c
+++ b/fs/ocfs2/cluster/tcp.c
@@ -72,14 +72,6 @@
72 72
73#include "tcp_internal.h" 73#include "tcp_internal.h"
74 74
75/*
76 * The linux network stack isn't sparse endian clean.. It has macros like
77 * ntohs() which perform the endian checks and structs like sockaddr_in
78 * which aren't annotated. So __force is found here to get the build
79 * clean. When they emerge from the dark ages and annotate the code
80 * we can remove these.
81 */
82
83#define SC_NODEF_FMT "node %s (num %u) at %u.%u.%u.%u:%u" 75#define SC_NODEF_FMT "node %s (num %u) at %u.%u.%u.%u:%u"
84#define SC_NODEF_ARGS(sc) sc->sc_node->nd_name, sc->sc_node->nd_num, \ 76#define SC_NODEF_ARGS(sc) sc->sc_node->nd_name, sc->sc_node->nd_num, \
85 NIPQUAD(sc->sc_node->nd_ipv4_address), \ 77 NIPQUAD(sc->sc_node->nd_ipv4_address), \
@@ -1500,7 +1492,7 @@ static void o2net_start_connect(struct work_struct *work)
1500 1492
1501 myaddr.sin_family = AF_INET; 1493 myaddr.sin_family = AF_INET;
1502 myaddr.sin_addr.s_addr = mynode->nd_ipv4_address; 1494 myaddr.sin_addr.s_addr = mynode->nd_ipv4_address;
1503 myaddr.sin_port = (__force u16)htons(0); /* any port */ 1495 myaddr.sin_port = htons(0); /* any port */
1504 1496
1505 ret = sock->ops->bind(sock, (struct sockaddr *)&myaddr, 1497 ret = sock->ops->bind(sock, (struct sockaddr *)&myaddr,
1506 sizeof(myaddr)); 1498 sizeof(myaddr));
@@ -1701,11 +1693,11 @@ static int o2net_accept_one(struct socket *sock)
1701 if (ret < 0) 1693 if (ret < 0)
1702 goto out; 1694 goto out;
1703 1695
1704 node = o2nm_get_node_by_ip((__force __be32)sin.sin_addr.s_addr); 1696 node = o2nm_get_node_by_ip(sin.sin_addr.s_addr);
1705 if (node == NULL) { 1697 if (node == NULL) {
1706 mlog(ML_NOTICE, "attempt to connect from unknown node at " 1698 mlog(ML_NOTICE, "attempt to connect from unknown node at "
1707 "%u.%u.%u.%u:%d\n", NIPQUAD(sin.sin_addr.s_addr), 1699 "%u.%u.%u.%u:%d\n", NIPQUAD(sin.sin_addr.s_addr),
1708 ntohs((__force __be16)sin.sin_port)); 1700 ntohs(sin.sin_port));
1709 ret = -EINVAL; 1701 ret = -EINVAL;
1710 goto out; 1702 goto out;
1711 } 1703 }
@@ -1714,7 +1706,7 @@ static int o2net_accept_one(struct socket *sock)
1714 mlog(ML_NOTICE, "unexpected connect attempted from a lower " 1706 mlog(ML_NOTICE, "unexpected connect attempted from a lower "
1715 "numbered node '%s' at " "%u.%u.%u.%u:%d with num %u\n", 1707 "numbered node '%s' at " "%u.%u.%u.%u:%d with num %u\n",
1716 node->nd_name, NIPQUAD(sin.sin_addr.s_addr), 1708 node->nd_name, NIPQUAD(sin.sin_addr.s_addr),
1717 ntohs((__force __be16)sin.sin_port), node->nd_num); 1709 ntohs(sin.sin_port), node->nd_num);
1718 ret = -EINVAL; 1710 ret = -EINVAL;
1719 goto out; 1711 goto out;
1720 } 1712 }
@@ -1725,7 +1717,7 @@ static int o2net_accept_one(struct socket *sock)
1725 mlog(ML_CONN, "attempt to connect from node '%s' at " 1717 mlog(ML_CONN, "attempt to connect from node '%s' at "
1726 "%u.%u.%u.%u:%d but it isn't heartbeating\n", 1718 "%u.%u.%u.%u:%d but it isn't heartbeating\n",
1727 node->nd_name, NIPQUAD(sin.sin_addr.s_addr), 1719 node->nd_name, NIPQUAD(sin.sin_addr.s_addr),
1728 ntohs((__force __be16)sin.sin_port)); 1720 ntohs(sin.sin_port));
1729 ret = -EINVAL; 1721 ret = -EINVAL;
1730 goto out; 1722 goto out;
1731 } 1723 }
@@ -1742,7 +1734,7 @@ static int o2net_accept_one(struct socket *sock)
1742 mlog(ML_NOTICE, "attempt to connect from node '%s' at " 1734 mlog(ML_NOTICE, "attempt to connect from node '%s' at "
1743 "%u.%u.%u.%u:%d but it already has an open connection\n", 1735 "%u.%u.%u.%u:%d but it already has an open connection\n",
1744 node->nd_name, NIPQUAD(sin.sin_addr.s_addr), 1736 node->nd_name, NIPQUAD(sin.sin_addr.s_addr),
1745 ntohs((__force __be16)sin.sin_port)); 1737 ntohs(sin.sin_port));
1746 goto out; 1738 goto out;
1747 } 1739 }
1748 1740
diff --git a/fs/proc/generic.c b/fs/proc/generic.c
index 5fccfe222a63..8d49838e5554 100644
--- a/fs/proc/generic.c
+++ b/fs/proc/generic.c
@@ -595,6 +595,7 @@ static struct proc_dir_entry *proc_create(struct proc_dir_entry **parent,
595 ent->namelen = len; 595 ent->namelen = len;
596 ent->mode = mode; 596 ent->mode = mode;
597 ent->nlink = nlink; 597 ent->nlink = nlink;
598 atomic_set(&ent->count, 1);
598 ent->pde_users = 0; 599 ent->pde_users = 0;
599 spin_lock_init(&ent->pde_unload_lock); 600 spin_lock_init(&ent->pde_unload_lock);
600 ent->pde_unload_completion = NULL; 601 ent->pde_unload_completion = NULL;
@@ -692,7 +693,6 @@ void free_proc_entry(struct proc_dir_entry *de)
692 693
693/* 694/*
694 * Remove a /proc entry and free it if it's not currently in use. 695 * Remove a /proc entry and free it if it's not currently in use.
695 * If it is in use, we set the 'deleted' flag.
696 */ 696 */
697void remove_proc_entry(const char *name, struct proc_dir_entry *parent) 697void remove_proc_entry(const char *name, struct proc_dir_entry *parent)
698{ 698{
@@ -741,13 +741,8 @@ continue_removing:
741 parent->nlink--; 741 parent->nlink--;
742 de->nlink = 0; 742 de->nlink = 0;
743 WARN_ON(de->subdir); 743 WARN_ON(de->subdir);
744 if (!atomic_read(&de->count)) 744 if (atomic_dec_and_test(&de->count))
745 free_proc_entry(de); 745 free_proc_entry(de);
746 else {
747 de->deleted = 1;
748 printk("remove_proc_entry: %s/%s busy, count=%d\n",
749 parent->name, de->name, atomic_read(&de->count));
750 }
751 break; 746 break;
752 } 747 }
753 spin_unlock(&proc_subdir_lock); 748 spin_unlock(&proc_subdir_lock);
diff --git a/fs/proc/inode.c b/fs/proc/inode.c
index abe6a3f04368..1a551d92e1d8 100644
--- a/fs/proc/inode.c
+++ b/fs/proc/inode.c
@@ -43,13 +43,8 @@ void de_put(struct proc_dir_entry *de)
43 return; 43 return;
44 } 44 }
45 45
46 if (atomic_dec_and_test(&de->count)) { 46 if (atomic_dec_and_test(&de->count))
47 if (de->deleted) { 47 free_proc_entry(de);
48 printk("de_put: deferred delete of %s\n",
49 de->name);
50 free_proc_entry(de);
51 }
52 }
53 unlock_kernel(); 48 unlock_kernel();
54 } 49 }
55} 50}
diff --git a/fs/proc/root.c b/fs/proc/root.c
index ec9cb3b6c93b..81f99e691f99 100644
--- a/fs/proc/root.c
+++ b/fs/proc/root.c
@@ -207,6 +207,7 @@ struct proc_dir_entry proc_root = {
207 .name = "/proc", 207 .name = "/proc",
208 .mode = S_IFDIR | S_IRUGO | S_IXUGO, 208 .mode = S_IFDIR | S_IRUGO | S_IXUGO,
209 .nlink = 2, 209 .nlink = 2,
210 .count = ATOMIC_INIT(1),
210 .proc_iops = &proc_root_inode_operations, 211 .proc_iops = &proc_root_inode_operations,
211 .proc_fops = &proc_root_operations, 212 .proc_fops = &proc_root_operations,
212 .parent = &proc_root, 213 .parent = &proc_root,
diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
index 9aa7a06e093f..001144621672 100644
--- a/fs/reiserfs/procfs.c
+++ b/fs/reiserfs/procfs.c
@@ -420,12 +420,6 @@ static void *r_start(struct seq_file *m, loff_t * pos)
420 return NULL; 420 return NULL;
421 421
422 up_write(&s->s_umount); 422 up_write(&s->s_umount);
423
424 if (de->deleted) {
425 deactivate_super(s);
426 return NULL;
427 }
428
429 return s; 423 return s;
430} 424}
431 425
diff --git a/fs/ufs/dir.c b/fs/ufs/dir.c
index 30f8c2bb0c3e..aaf2878305ce 100644
--- a/fs/ufs/dir.c
+++ b/fs/ufs/dir.c
@@ -179,7 +179,7 @@ bad_entry:
179 goto fail; 179 goto fail;
180Eend: 180Eend:
181 p = (struct ufs_dir_entry *)(kaddr + offs); 181 p = (struct ufs_dir_entry *)(kaddr + offs);
182 ufs_error (sb, "ext2_check_page", 182 ufs_error(sb, __FUNCTION__,
183 "entry in directory #%lu spans the page boundary" 183 "entry in directory #%lu spans the page boundary"
184 "offset=%lu", 184 "offset=%lu",
185 dir->i_ino, (page->index<<PAGE_CACHE_SHIFT)+offs); 185 dir->i_ino, (page->index<<PAGE_CACHE_SHIFT)+offs);
diff --git a/fs/ufs/super.c b/fs/ufs/super.c
index c78c04fd993f..0072cb33ebec 100644
--- a/fs/ufs/super.c
+++ b/fs/ufs/super.c
@@ -755,13 +755,13 @@ static int ufs_fill_super(struct super_block *sb, void *data, int silent)
755 break; 755 break;
756 756
757 case UFS_MOUNT_UFSTYPE_NEXTSTEP: 757 case UFS_MOUNT_UFSTYPE_NEXTSTEP:
758 /*TODO: check may be we need set special dir block size?*/
759 UFSD("ufstype=nextstep\n"); 758 UFSD("ufstype=nextstep\n");
760 uspi->s_fsize = block_size = 1024; 759 uspi->s_fsize = block_size = 1024;
761 uspi->s_fmask = ~(1024 - 1); 760 uspi->s_fmask = ~(1024 - 1);
762 uspi->s_fshift = 10; 761 uspi->s_fshift = 10;
763 uspi->s_sbsize = super_block_size = 2048; 762 uspi->s_sbsize = super_block_size = 2048;
764 uspi->s_sbbase = 0; 763 uspi->s_sbbase = 0;
764 uspi->s_dirblksize = 1024;
765 flags |= UFS_DE_OLD | UFS_UID_OLD | UFS_ST_OLD | UFS_CG_OLD; 765 flags |= UFS_DE_OLD | UFS_UID_OLD | UFS_ST_OLD | UFS_CG_OLD;
766 if (!(sb->s_flags & MS_RDONLY)) { 766 if (!(sb->s_flags & MS_RDONLY)) {
767 if (!silent) 767 if (!silent)
@@ -771,13 +771,13 @@ static int ufs_fill_super(struct super_block *sb, void *data, int silent)
771 break; 771 break;
772 772
773 case UFS_MOUNT_UFSTYPE_NEXTSTEP_CD: 773 case UFS_MOUNT_UFSTYPE_NEXTSTEP_CD:
774 /*TODO: check may be we need set special dir block size?*/
775 UFSD("ufstype=nextstep-cd\n"); 774 UFSD("ufstype=nextstep-cd\n");
776 uspi->s_fsize = block_size = 2048; 775 uspi->s_fsize = block_size = 2048;
777 uspi->s_fmask = ~(2048 - 1); 776 uspi->s_fmask = ~(2048 - 1);
778 uspi->s_fshift = 11; 777 uspi->s_fshift = 11;
779 uspi->s_sbsize = super_block_size = 2048; 778 uspi->s_sbsize = super_block_size = 2048;
780 uspi->s_sbbase = 0; 779 uspi->s_sbbase = 0;
780 uspi->s_dirblksize = 1024;
781 flags |= UFS_DE_OLD | UFS_UID_OLD | UFS_ST_OLD | UFS_CG_OLD; 781 flags |= UFS_DE_OLD | UFS_UID_OLD | UFS_ST_OLD | UFS_CG_OLD;
782 if (!(sb->s_flags & MS_RDONLY)) { 782 if (!(sb->s_flags & MS_RDONLY)) {
783 if (!silent) 783 if (!silent)
diff --git a/include/asm-blackfin/bfin5xx_spi.h b/include/asm-blackfin/bfin5xx_spi.h
index f617d8765451..1a0b57f6a3d4 100644
--- a/include/asm-blackfin/bfin5xx_spi.h
+++ b/include/asm-blackfin/bfin5xx_spi.h
@@ -152,6 +152,7 @@
152struct bfin5xx_spi_master { 152struct bfin5xx_spi_master {
153 u16 num_chipselect; 153 u16 num_chipselect;
154 u8 enable_dma; 154 u8 enable_dma;
155 u16 pin_req[4];
155}; 156};
156 157
157/* spi_board_info.controller_data for SPI slave devices, 158/* spi_board_info.controller_data for SPI slave devices,
@@ -162,7 +163,7 @@ struct bfin5xx_spi_chip {
162 u8 enable_dma; 163 u8 enable_dma;
163 u8 bits_per_word; 164 u8 bits_per_word;
164 u8 cs_change_per_word; 165 u8 cs_change_per_word;
165 u8 cs_chg_udelay; 166 u16 cs_chg_udelay; /* Some devices require 16-bit delays */
166}; 167};
167 168
168#endif /* _SPI_CHANNEL_H_ */ 169#endif /* _SPI_CHANNEL_H_ */
diff --git a/include/asm-blackfin/mach-bf533/portmux.h b/include/asm-blackfin/mach-bf533/portmux.h
index b88d7a03ee3e..137f4884acfe 100644
--- a/include/asm-blackfin/mach-bf533/portmux.h
+++ b/include/asm-blackfin/mach-bf533/portmux.h
@@ -42,7 +42,7 @@
42#define P_SPORT0_DRPRI (P_DONTCARE) 42#define P_SPORT0_DRPRI (P_DONTCARE)
43 43
44#define P_SPI0_MOSI (P_DONTCARE) 44#define P_SPI0_MOSI (P_DONTCARE)
45#define P_SPI0_MIS0 (P_DONTCARE) 45#define P_SPI0_MISO (P_DONTCARE)
46#define P_SPI0_SCK (P_DONTCARE) 46#define P_SPI0_SCK (P_DONTCARE)
47#define P_SPI0_SSEL7 (P_DEFINED | P_IDENT(GPIO_PF7)) 47#define P_SPI0_SSEL7 (P_DEFINED | P_IDENT(GPIO_PF7))
48#define P_SPI0_SSEL6 (P_DEFINED | P_IDENT(GPIO_PF6)) 48#define P_SPI0_SSEL6 (P_DEFINED | P_IDENT(GPIO_PF6))
diff --git a/include/asm-blackfin/mach-bf548/defBF54x_base.h b/include/asm-blackfin/mach-bf548/defBF54x_base.h
index da979cb62f7d..319a48590c9c 100644
--- a/include/asm-blackfin/mach-bf548/defBF54x_base.h
+++ b/include/asm-blackfin/mach-bf548/defBF54x_base.h
@@ -1644,8 +1644,25 @@
1644#define RESTART 0x20 /* Work Unit Transitions */ 1644#define RESTART 0x20 /* Work Unit Transitions */
1645#define DI_SEL 0x40 /* Data Interrupt Timing Select */ 1645#define DI_SEL 0x40 /* Data Interrupt Timing Select */
1646#define DI_EN 0x80 /* Data Interrupt Enable */ 1646#define DI_EN 0x80 /* Data Interrupt Enable */
1647
1647#define NDSIZE 0xf00 /* Flex Descriptor Size */ 1648#define NDSIZE 0xf00 /* Flex Descriptor Size */
1649#define NDSIZE_0 0x0000 /* Next Descriptor Size = 0 (Stop/Autobuffer) */
1650#define NDSIZE_1 0x0100 /* Next Descriptor Size = 1 */
1651#define NDSIZE_2 0x0200 /* Next Descriptor Size = 2 */
1652#define NDSIZE_3 0x0300 /* Next Descriptor Size = 3 */
1653#define NDSIZE_4 0x0400 /* Next Descriptor Size = 4 */
1654#define NDSIZE_5 0x0500 /* Next Descriptor Size = 5 */
1655#define NDSIZE_6 0x0600 /* Next Descriptor Size = 6 */
1656#define NDSIZE_7 0x0700 /* Next Descriptor Size = 7 */
1657#define NDSIZE_8 0x0800 /* Next Descriptor Size = 8 */
1658#define NDSIZE_9 0x0900 /* Next Descriptor Size = 9 */
1659
1648#define DMAFLOW 0xf000 /* Next Operation */ 1660#define DMAFLOW 0xf000 /* Next Operation */
1661#define DMAFLOW_STOP 0x0000 /* Stop Mode */
1662#define DMAFLOW_AUTO 0x1000 /* Autobuffer Mode */
1663#define DMAFLOW_ARRAY 0x4000 /* Descriptor Array Mode */
1664#define DMAFLOW_SMALL 0x6000 /* Small Model Descriptor List Mode */
1665#define DMAFLOW_LARGE 0x7000 /* Large Model Descriptor List Mode */
1649 1666
1650/* Bit masks for DMAx_IRQ_STATUS, MDMA_Sx_IRQ_STATUS, MDMA_Dx_IRQ_STATUS */ 1667/* Bit masks for DMAx_IRQ_STATUS, MDMA_Sx_IRQ_STATUS, MDMA_Dx_IRQ_STATUS */
1651 1668
diff --git a/include/asm-mips/mach-au1x00/au1000.h b/include/asm-mips/mach-au1x00/au1000.h
index 3bdce9126f16..bf7701243d71 100644
--- a/include/asm-mips/mach-au1x00/au1000.h
+++ b/include/asm-mips/mach-au1x00/au1000.h
@@ -526,7 +526,7 @@ extern struct au1xxx_irqmap au1xxx_irq_map[];
526/* Au1000 */ 526/* Au1000 */
527#ifdef CONFIG_SOC_AU1000 527#ifdef CONFIG_SOC_AU1000
528enum soc_au1000_ints { 528enum soc_au1000_ints {
529 AU1000_FIRST_INT = MIPS_CPU_IRQ_BASE, 529 AU1000_FIRST_INT = MIPS_CPU_IRQ_BASE + 8,
530 AU1000_UART0_INT = AU1000_FIRST_INT, 530 AU1000_UART0_INT = AU1000_FIRST_INT,
531 AU1000_UART1_INT, /* au1000 */ 531 AU1000_UART1_INT, /* au1000 */
532 AU1000_UART2_INT, /* au1000 */ 532 AU1000_UART2_INT, /* au1000 */
@@ -605,7 +605,7 @@ enum soc_au1000_ints {
605/* Au1500 */ 605/* Au1500 */
606#ifdef CONFIG_SOC_AU1500 606#ifdef CONFIG_SOC_AU1500
607enum soc_au1500_ints { 607enum soc_au1500_ints {
608 AU1500_FIRST_INT = MIPS_CPU_IRQ_BASE, 608 AU1500_FIRST_INT = MIPS_CPU_IRQ_BASE + 8,
609 AU1500_UART0_INT = AU1500_FIRST_INT, 609 AU1500_UART0_INT = AU1500_FIRST_INT,
610 AU1000_PCI_INTA, /* au1500 */ 610 AU1000_PCI_INTA, /* au1500 */
611 AU1000_PCI_INTB, /* au1500 */ 611 AU1000_PCI_INTB, /* au1500 */
@@ -686,7 +686,7 @@ enum soc_au1500_ints {
686/* Au1100 */ 686/* Au1100 */
687#ifdef CONFIG_SOC_AU1100 687#ifdef CONFIG_SOC_AU1100
688enum soc_au1100_ints { 688enum soc_au1100_ints {
689 AU1100_FIRST_INT = MIPS_CPU_IRQ_BASE, 689 AU1100_FIRST_INT = MIPS_CPU_IRQ_BASE + 8,
690 AU1100_UART0_INT, 690 AU1100_UART0_INT,
691 AU1100_UART1_INT, 691 AU1100_UART1_INT,
692 AU1100_SD_INT, 692 AU1100_SD_INT,
@@ -761,7 +761,7 @@ enum soc_au1100_ints {
761 761
762#ifdef CONFIG_SOC_AU1550 762#ifdef CONFIG_SOC_AU1550
763enum soc_au1550_ints { 763enum soc_au1550_ints {
764 AU1550_FIRST_INT = MIPS_CPU_IRQ_BASE, 764 AU1550_FIRST_INT = MIPS_CPU_IRQ_BASE + 8,
765 AU1550_UART0_INT = AU1550_FIRST_INT, 765 AU1550_UART0_INT = AU1550_FIRST_INT,
766 AU1550_PCI_INTA, 766 AU1550_PCI_INTA,
767 AU1550_PCI_INTB, 767 AU1550_PCI_INTB,
@@ -851,7 +851,7 @@ enum soc_au1550_ints {
851 851
852#ifdef CONFIG_SOC_AU1200 852#ifdef CONFIG_SOC_AU1200
853enum soc_au1200_ints { 853enum soc_au1200_ints {
854 AU1200_FIRST_INT = MIPS_CPU_IRQ_BASE, 854 AU1200_FIRST_INT = MIPS_CPU_IRQ_BASE + 8,
855 AU1200_UART0_INT = AU1200_FIRST_INT, 855 AU1200_UART0_INT = AU1200_FIRST_INT,
856 AU1200_SWT_INT, 856 AU1200_SWT_INT,
857 AU1200_SD_INT, 857 AU1200_SD_INT,
@@ -948,11 +948,12 @@ enum soc_au1200_ints {
948 948
949#endif /* CONFIG_SOC_AU1200 */ 949#endif /* CONFIG_SOC_AU1200 */
950 950
951#define AU1000_INTC0_INT_BASE (MIPS_CPU_IRQ_BASE + 0) 951#define AU1000_INTC0_INT_BASE (MIPS_CPU_IRQ_BASE + 8)
952#define AU1000_INTC0_INT_LAST (MIPS_CPU_IRQ_BASE + 31) 952#define AU1000_INTC0_INT_LAST (AU1000_INTC0_INT_BASE + 31)
953#define AU1000_INTC1_INT_BASE (MIPS_CPU_IRQ_BASE + 32) 953#define AU1000_INTC1_INT_BASE (AU1000_INTC0_INT_BASE + 32)
954#define AU1000_INTC1_INT_LAST (MIPS_CPU_IRQ_BASE + 63) 954#define AU1000_INTC1_INT_LAST (AU1000_INTC1_INT_BASE + 31)
955#define AU1000_MAX_INTR (MIPS_CPU_IRQ_BASE + 63) 955
956#define AU1000_MAX_INTR AU1000_INTC1_INT_LAST
956#define INTX 0xFF /* not valid */ 957#define INTX 0xFF /* not valid */
957 958
958/* Programmable Counters 0 and 1 */ 959/* Programmable Counters 0 and 1 */
diff --git a/include/linux/inet_lro.h b/include/linux/inet_lro.h
index 1246d46abbc0..80335b7d77c5 100644
--- a/include/linux/inet_lro.h
+++ b/include/linux/inet_lro.h
@@ -91,6 +91,9 @@ struct net_lro_mgr {
91 int max_desc; /* Max number of LRO descriptors */ 91 int max_desc; /* Max number of LRO descriptors */
92 int max_aggr; /* Max number of LRO packets to be aggregated */ 92 int max_aggr; /* Max number of LRO packets to be aggregated */
93 93
94 int frag_align_pad; /* Padding required to properly align layer 3
95 * headers in generated skb when using frags */
96
94 struct net_lro_desc *lro_arr; /* Array of LRO descriptors */ 97 struct net_lro_desc *lro_arr; /* Array of LRO descriptors */
95 98
96 /* 99 /*
diff --git a/include/linux/jbd.h b/include/linux/jbd.h
index 16e7ed855a18..d9ecd13393b0 100644
--- a/include/linux/jbd.h
+++ b/include/linux/jbd.h
@@ -439,6 +439,8 @@ struct transaction_s
439 /* 439 /*
440 * Transaction's current state 440 * Transaction's current state
441 * [no locking - only kjournald alters this] 441 * [no locking - only kjournald alters this]
442 * [j_list_lock] guards transition of a transaction into T_FINISHED
443 * state and subsequent call of __journal_drop_transaction()
442 * FIXME: needs barriers 444 * FIXME: needs barriers
443 * KLUDGE: [use j_state_lock] 445 * KLUDGE: [use j_state_lock]
444 */ 446 */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 520238cbae5d..1b7b95c67aca 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -12,6 +12,7 @@
12#include <linux/prio_tree.h> 12#include <linux/prio_tree.h>
13#include <linux/debug_locks.h> 13#include <linux/debug_locks.h>
14#include <linux/mm_types.h> 14#include <linux/mm_types.h>
15#include <linux/security.h>
15 16
16struct mempolicy; 17struct mempolicy;
17struct anon_vma; 18struct anon_vma;
@@ -513,6 +514,21 @@ static inline void set_page_links(struct page *page, enum zone_type zone,
513} 514}
514 515
515/* 516/*
517 * If a hint addr is less than mmap_min_addr change hint to be as
518 * low as possible but still greater than mmap_min_addr
519 */
520static inline unsigned long round_hint_to_min(unsigned long hint)
521{
522#ifdef CONFIG_SECURITY
523 hint &= PAGE_MASK;
524 if (((void *)hint != NULL) &&
525 (hint < mmap_min_addr))
526 return PAGE_ALIGN(mmap_min_addr);
527#endif
528 return hint;
529}
530
531/*
516 * Some inline functions in vmstat.h depend on page_zone() 532 * Some inline functions in vmstat.h depend on page_zone()
517 */ 533 */
518#include <linux/vmstat.h> 534#include <linux/vmstat.h>
diff --git a/include/linux/phy.h b/include/linux/phy.h
index e10763d79181..554836edd915 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -403,6 +403,7 @@ int phy_mii_ioctl(struct phy_device *phydev,
403int phy_start_interrupts(struct phy_device *phydev); 403int phy_start_interrupts(struct phy_device *phydev);
404void phy_print_status(struct phy_device *phydev); 404void phy_print_status(struct phy_device *phydev);
405struct phy_device* phy_device_create(struct mii_bus *bus, int addr, int phy_id); 405struct phy_device* phy_device_create(struct mii_bus *bus, int addr, int phy_id);
406void phy_device_free(struct phy_device *phydev);
406 407
407extern struct bus_type mdio_bus_type; 408extern struct bus_type mdio_bus_type;
408#endif /* __PHY_H */ 409#endif /* __PHY_H */
diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
index 523528d237b0..a5316829215b 100644
--- a/include/linux/proc_fs.h
+++ b/include/linux/proc_fs.h
@@ -77,7 +77,6 @@ struct proc_dir_entry {
77 read_proc_t *read_proc; 77 read_proc_t *read_proc;
78 write_proc_t *write_proc; 78 write_proc_t *write_proc;
79 atomic_t count; /* use count */ 79 atomic_t count; /* use count */
80 int deleted; /* delete flag */
81 int pde_users; /* number of callers into module in progress */ 80 int pde_users; /* number of callers into module in progress */
82 spinlock_t pde_unload_lock; /* proc_fops checks and pde_users bumps */ 81 spinlock_t pde_unload_lock; /* proc_fops checks and pde_users bumps */
83 struct completion *pde_unload_completion; 82 struct completion *pde_unload_completion;
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
index 1c4eb41dbd89..9c4ad755d7e5 100644
--- a/include/linux/thread_info.h
+++ b/include/linux/thread_info.h
@@ -7,12 +7,25 @@
7#ifndef _LINUX_THREAD_INFO_H 7#ifndef _LINUX_THREAD_INFO_H
8#define _LINUX_THREAD_INFO_H 8#define _LINUX_THREAD_INFO_H
9 9
10#include <linux/types.h>
11
10/* 12/*
11 * System call restart block. 13 * System call restart block.
12 */ 14 */
13struct restart_block { 15struct restart_block {
14 long (*fn)(struct restart_block *); 16 long (*fn)(struct restart_block *);
15 unsigned long arg0, arg1, arg2, arg3; 17 union {
18 struct {
19 unsigned long arg0, arg1, arg2, arg3;
20 };
21 /* For futex_wait */
22 struct {
23 u32 *uaddr;
24 u32 val;
25 u32 flags;
26 u64 time;
27 } futex;
28 };
16}; 29};
17 30
18extern long do_no_restart_syscall(struct restart_block *parm); 31extern long do_no_restart_syscall(struct restart_block *parm);
diff --git a/kernel/Kconfig.instrumentation b/kernel/Kconfig.instrumentation
index 2ea1e347df45..468f47ad7503 100644
--- a/kernel/Kconfig.instrumentation
+++ b/kernel/Kconfig.instrumentation
@@ -20,8 +20,8 @@ config PROFILING
20 20
21config OPROFILE 21config OPROFILE
22 tristate "OProfile system profiling (EXPERIMENTAL)" 22 tristate "OProfile system profiling (EXPERIMENTAL)"
23 depends on PROFILING 23 depends on PROFILING && !UML
24 depends on (ALPHA || ARM || BLACKFIN || X86_32 || IA64 || M32R || MIPS || PARISC || PPC || S390 || SUPERH || SPARC || X86_64) && !UML 24 depends on ARCH_SUPPORTS_OPROFILE || ALPHA || ARM || BLACKFIN || IA64 || M32R || PARISC || PPC || S390 || SUPERH || SPARC
25 help 25 help
26 OProfile is a profiling system capable of profiling the 26 OProfile is a profiling system capable of profiling the
27 whole system, include the kernel, kernel modules, libraries, 27 whole system, include the kernel, kernel modules, libraries,
@@ -31,8 +31,8 @@ config OPROFILE
31 31
32config KPROBES 32config KPROBES
33 bool "Kprobes" 33 bool "Kprobes"
34 depends on KALLSYMS && MODULES 34 depends on KALLSYMS && MODULES && !UML
35 depends on (X86_32 || IA64 || PPC || S390 || SPARC64 || X86_64 || AVR32) && !UML 35 depends on X86_32 || IA64 || PPC || S390 || SPARC64 || X86_64 || AVR32
36 help 36 help
37 Kprobes allows you to trap at almost any kernel address and 37 Kprobes allows you to trap at almost any kernel address and
38 execute a callback function. register_kprobe() establishes 38 execute a callback function. register_kprobe() establishes
diff --git a/kernel/fork.c b/kernel/fork.c
index 8ca1a14cdc8c..8dd8ff281009 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1292,23 +1292,14 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1292 __ptrace_link(p, current->parent); 1292 __ptrace_link(p, current->parent);
1293 1293
1294 if (thread_group_leader(p)) { 1294 if (thread_group_leader(p)) {
1295 if (clone_flags & CLONE_NEWPID) { 1295 if (clone_flags & CLONE_NEWPID)
1296 p->nsproxy->pid_ns->child_reaper = p; 1296 p->nsproxy->pid_ns->child_reaper = p;
1297 p->signal->tty = NULL;
1298 set_task_pgrp(p, p->pid);
1299 set_task_session(p, p->pid);
1300 attach_pid(p, PIDTYPE_PGID, pid);
1301 attach_pid(p, PIDTYPE_SID, pid);
1302 } else {
1303 p->signal->tty = current->signal->tty;
1304 set_task_pgrp(p, task_pgrp_nr(current));
1305 set_task_session(p, task_session_nr(current));
1306 attach_pid(p, PIDTYPE_PGID,
1307 task_pgrp(current));
1308 attach_pid(p, PIDTYPE_SID,
1309 task_session(current));
1310 }
1311 1297
1298 p->signal->tty = current->signal->tty;
1299 set_task_pgrp(p, task_pgrp_nr(current));
1300 set_task_session(p, task_session_nr(current));
1301 attach_pid(p, PIDTYPE_PGID, task_pgrp(current));
1302 attach_pid(p, PIDTYPE_SID, task_session(current));
1312 list_add_tail_rcu(&p->tasks, &init_task.tasks); 1303 list_add_tail_rcu(&p->tasks, &init_task.tasks);
1313 __get_cpu_var(process_counts)++; 1304 __get_cpu_var(process_counts)++;
1314 } 1305 }
diff --git a/kernel/futex.c b/kernel/futex.c
index 9dc591ab681a..172a1aeeafdb 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -658,7 +658,7 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
658 658
659 if (curval == -EFAULT) 659 if (curval == -EFAULT)
660 ret = -EFAULT; 660 ret = -EFAULT;
661 if (curval != uval) 661 else if (curval != uval)
662 ret = -EINVAL; 662 ret = -EINVAL;
663 if (ret) { 663 if (ret) {
664 spin_unlock(&pi_state->pi_mutex.wait_lock); 664 spin_unlock(&pi_state->pi_mutex.wait_lock);
@@ -1149,9 +1149,9 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
1149 1149
1150/* 1150/*
1151 * In case we must use restart_block to restart a futex_wait, 1151 * In case we must use restart_block to restart a futex_wait,
1152 * we encode in the 'arg3' shared capability 1152 * we encode in the 'flags' shared capability
1153 */ 1153 */
1154#define ARG3_SHARED 1 1154#define FLAGS_SHARED 1
1155 1155
1156static long futex_wait_restart(struct restart_block *restart); 1156static long futex_wait_restart(struct restart_block *restart);
1157 1157
@@ -1290,12 +1290,13 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared,
1290 struct restart_block *restart; 1290 struct restart_block *restart;
1291 restart = &current_thread_info()->restart_block; 1291 restart = &current_thread_info()->restart_block;
1292 restart->fn = futex_wait_restart; 1292 restart->fn = futex_wait_restart;
1293 restart->arg0 = (unsigned long)uaddr; 1293 restart->futex.uaddr = (u32 *)uaddr;
1294 restart->arg1 = (unsigned long)val; 1294 restart->futex.val = val;
1295 restart->arg2 = (unsigned long)abs_time; 1295 restart->futex.time = abs_time->tv64;
1296 restart->arg3 = 0; 1296 restart->futex.flags = 0;
1297
1297 if (fshared) 1298 if (fshared)
1298 restart->arg3 |= ARG3_SHARED; 1299 restart->futex.flags |= FLAGS_SHARED;
1299 return -ERESTART_RESTARTBLOCK; 1300 return -ERESTART_RESTARTBLOCK;
1300 } 1301 }
1301 1302
@@ -1310,15 +1311,15 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared,
1310 1311
1311static long futex_wait_restart(struct restart_block *restart) 1312static long futex_wait_restart(struct restart_block *restart)
1312{ 1313{
1313 u32 __user *uaddr = (u32 __user *)restart->arg0; 1314 u32 __user *uaddr = (u32 __user *)restart->futex.uaddr;
1314 u32 val = (u32)restart->arg1;
1315 ktime_t *abs_time = (ktime_t *)restart->arg2;
1316 struct rw_semaphore *fshared = NULL; 1315 struct rw_semaphore *fshared = NULL;
1316 ktime_t t;
1317 1317
1318 t.tv64 = restart->futex.time;
1318 restart->fn = do_no_restart_syscall; 1319 restart->fn = do_no_restart_syscall;
1319 if (restart->arg3 & ARG3_SHARED) 1320 if (restart->futex.flags & FLAGS_SHARED)
1320 fshared = &current->mm->mmap_sem; 1321 fshared = &current->mm->mmap_sem;
1321 return (long)futex_wait(uaddr, fshared, val, abs_time); 1322 return (long)futex_wait(uaddr, fshared, restart->futex.val, &t);
1322} 1323}
1323 1324
1324 1325
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index ed38bbfc48a3..0f389621bb6b 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -3054,11 +3054,6 @@ void __init lockdep_info(void)
3054#endif 3054#endif
3055} 3055}
3056 3056
3057static inline int in_range(const void *start, const void *addr, const void *end)
3058{
3059 return addr >= start && addr <= end;
3060}
3061
3062static void 3057static void
3063print_freed_lock_bug(struct task_struct *curr, const void *mem_from, 3058print_freed_lock_bug(struct task_struct *curr, const void *mem_from,
3064 const void *mem_to, struct held_lock *hlock) 3059 const void *mem_to, struct held_lock *hlock)
@@ -3080,6 +3075,13 @@ print_freed_lock_bug(struct task_struct *curr, const void *mem_from,
3080 dump_stack(); 3075 dump_stack();
3081} 3076}
3082 3077
3078static inline int not_in_range(const void* mem_from, unsigned long mem_len,
3079 const void* lock_from, unsigned long lock_len)
3080{
3081 return lock_from + lock_len <= mem_from ||
3082 mem_from + mem_len <= lock_from;
3083}
3084
3083/* 3085/*
3084 * Called when kernel memory is freed (or unmapped), or if a lock 3086 * Called when kernel memory is freed (or unmapped), or if a lock
3085 * is destroyed or reinitialized - this code checks whether there is 3087 * is destroyed or reinitialized - this code checks whether there is
@@ -3087,7 +3089,6 @@ print_freed_lock_bug(struct task_struct *curr, const void *mem_from,
3087 */ 3089 */
3088void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len) 3090void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len)
3089{ 3091{
3090 const void *mem_to = mem_from + mem_len, *lock_from, *lock_to;
3091 struct task_struct *curr = current; 3092 struct task_struct *curr = current;
3092 struct held_lock *hlock; 3093 struct held_lock *hlock;
3093 unsigned long flags; 3094 unsigned long flags;
@@ -3100,14 +3101,11 @@ void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len)
3100 for (i = 0; i < curr->lockdep_depth; i++) { 3101 for (i = 0; i < curr->lockdep_depth; i++) {
3101 hlock = curr->held_locks + i; 3102 hlock = curr->held_locks + i;
3102 3103
3103 lock_from = (void *)hlock->instance; 3104 if (not_in_range(mem_from, mem_len, hlock->instance,
3104 lock_to = (void *)(hlock->instance + 1); 3105 sizeof(*hlock->instance)))
3105
3106 if (!in_range(mem_from, lock_from, mem_to) &&
3107 !in_range(mem_from, lock_to, mem_to))
3108 continue; 3106 continue;
3109 3107
3110 print_freed_lock_bug(curr, mem_from, mem_to, hlock); 3108 print_freed_lock_bug(curr, mem_from, mem_from + mem_len, hlock);
3111 break; 3109 break;
3112 } 3110 }
3113 local_irq_restore(flags); 3111 local_irq_restore(flags);
@@ -3173,6 +3171,13 @@ retry:
3173 printk(" locked it.\n"); 3171 printk(" locked it.\n");
3174 3172
3175 do_each_thread(g, p) { 3173 do_each_thread(g, p) {
3174 /*
3175 * It's not reliable to print a task's held locks
3176 * if it's not sleeping (or if it's not the current
3177 * task):
3178 */
3179 if (p->state == TASK_RUNNING && p != current)
3180 continue;
3176 if (p->lockdep_depth) 3181 if (p->lockdep_depth)
3177 lockdep_print_held_locks(p); 3182 lockdep_print_held_locks(p);
3178 if (!unlock) 3183 if (!unlock)
diff --git a/kernel/sched.c b/kernel/sched.c
index 59ff6b140edb..67d9d1799d86 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -209,9 +209,8 @@ static inline struct task_group *task_group(struct task_struct *p)
209 tg = container_of(task_subsys_state(p, cpu_cgroup_subsys_id), 209 tg = container_of(task_subsys_state(p, cpu_cgroup_subsys_id),
210 struct task_group, css); 210 struct task_group, css);
211#else 211#else
212 tg = &init_task_group; 212 tg = &init_task_group;
213#endif 213#endif
214
215 return tg; 214 return tg;
216} 215}
217 216
@@ -249,15 +248,16 @@ struct cfs_rq {
249#ifdef CONFIG_FAIR_GROUP_SCHED 248#ifdef CONFIG_FAIR_GROUP_SCHED
250 struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */ 249 struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */
251 250
252 /* leaf cfs_rqs are those that hold tasks (lowest schedulable entity in 251 /*
252 * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in
253 * a hierarchy). Non-leaf lrqs hold other higher schedulable entities 253 * a hierarchy). Non-leaf lrqs hold other higher schedulable entities
254 * (like users, containers etc.) 254 * (like users, containers etc.)
255 * 255 *
256 * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a cpu. This 256 * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a cpu. This
257 * list is used during load balance. 257 * list is used during load balance.
258 */ 258 */
259 struct list_head leaf_cfs_rq_list; /* Better name : task_cfs_rq_list? */ 259 struct list_head leaf_cfs_rq_list;
260 struct task_group *tg; /* group that "owns" this runqueue */ 260 struct task_group *tg; /* group that "owns" this runqueue */
261#endif 261#endif
262}; 262};
263 263
@@ -300,7 +300,7 @@ struct rq {
300 /* list of leaf cfs_rq on this cpu: */ 300 /* list of leaf cfs_rq on this cpu: */
301 struct list_head leaf_cfs_rq_list; 301 struct list_head leaf_cfs_rq_list;
302#endif 302#endif
303 struct rt_rq rt; 303 struct rt_rq rt;
304 304
305 /* 305 /*
306 * This is part of a global counter where only the total sum 306 * This is part of a global counter where only the total sum
@@ -457,8 +457,8 @@ enum {
457 SCHED_FEAT_NEW_FAIR_SLEEPERS = 1, 457 SCHED_FEAT_NEW_FAIR_SLEEPERS = 1,
458 SCHED_FEAT_WAKEUP_PREEMPT = 2, 458 SCHED_FEAT_WAKEUP_PREEMPT = 2,
459 SCHED_FEAT_START_DEBIT = 4, 459 SCHED_FEAT_START_DEBIT = 4,
460 SCHED_FEAT_TREE_AVG = 8, 460 SCHED_FEAT_TREE_AVG = 8,
461 SCHED_FEAT_APPROX_AVG = 16, 461 SCHED_FEAT_APPROX_AVG = 16,
462}; 462};
463 463
464const_debug unsigned int sysctl_sched_features = 464const_debug unsigned int sysctl_sched_features =
@@ -591,7 +591,7 @@ static inline struct rq *__task_rq_lock(struct task_struct *p)
591 591
592/* 592/*
593 * task_rq_lock - lock the runqueue a given task resides on and disable 593 * task_rq_lock - lock the runqueue a given task resides on and disable
594 * interrupts. Note the ordering: we can safely lookup the task_rq without 594 * interrupts. Note the ordering: we can safely lookup the task_rq without
595 * explicitly disabling preemption. 595 * explicitly disabling preemption.
596 */ 596 */
597static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags) 597static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
@@ -779,7 +779,7 @@ static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
779 * To aid in avoiding the subversion of "niceness" due to uneven distribution 779 * To aid in avoiding the subversion of "niceness" due to uneven distribution
780 * of tasks with abnormal "nice" values across CPUs the contribution that 780 * of tasks with abnormal "nice" values across CPUs the contribution that
781 * each task makes to its run queue's load is weighted according to its 781 * each task makes to its run queue's load is weighted according to its
782 * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a 782 * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a
783 * scaled version of the new time slice allocation that they receive on time 783 * scaled version of the new time slice allocation that they receive on time
784 * slice expiry etc. 784 * slice expiry etc.
785 */ 785 */
@@ -1854,7 +1854,7 @@ prepare_task_switch(struct rq *rq, struct task_struct *prev,
1854 * and do any other architecture-specific cleanup actions. 1854 * and do any other architecture-specific cleanup actions.
1855 * 1855 *
1856 * Note that we may have delayed dropping an mm in context_switch(). If 1856 * Note that we may have delayed dropping an mm in context_switch(). If
1857 * so, we finish that here outside of the runqueue lock. (Doing it 1857 * so, we finish that here outside of the runqueue lock. (Doing it
1858 * with the lock held can cause deadlocks; see schedule() for 1858 * with the lock held can cause deadlocks; see schedule() for
1859 * details.) 1859 * details.)
1860 */ 1860 */
@@ -2136,7 +2136,7 @@ static void double_lock_balance(struct rq *this_rq, struct rq *busiest)
2136/* 2136/*
2137 * If dest_cpu is allowed for this process, migrate the task to it. 2137 * If dest_cpu is allowed for this process, migrate the task to it.
2138 * This is accomplished by forcing the cpu_allowed mask to only 2138 * This is accomplished by forcing the cpu_allowed mask to only
2139 * allow dest_cpu, which will force the cpu onto dest_cpu. Then 2139 * allow dest_cpu, which will force the cpu onto dest_cpu. Then
2140 * the cpu_allowed mask is restored. 2140 * the cpu_allowed mask is restored.
2141 */ 2141 */
2142static void sched_migrate_task(struct task_struct *p, int dest_cpu) 2142static void sched_migrate_task(struct task_struct *p, int dest_cpu)
@@ -2581,7 +2581,7 @@ group_next:
2581 * tasks around. Thus we look for the minimum possible imbalance. 2581 * tasks around. Thus we look for the minimum possible imbalance.
2582 * Negative imbalances (*we* are more loaded than anyone else) will 2582 * Negative imbalances (*we* are more loaded than anyone else) will
2583 * be counted as no imbalance for these purposes -- we can't fix that 2583 * be counted as no imbalance for these purposes -- we can't fix that
2584 * by pulling tasks to us. Be careful of negative numbers as they'll 2584 * by pulling tasks to us. Be careful of negative numbers as they'll
2585 * appear as very large values with unsigned longs. 2585 * appear as very large values with unsigned longs.
2586 */ 2586 */
2587 if (max_load <= busiest_load_per_task) 2587 if (max_load <= busiest_load_per_task)
@@ -3016,7 +3016,7 @@ static void active_load_balance(struct rq *busiest_rq, int busiest_cpu)
3016 3016
3017 /* 3017 /*
3018 * This condition is "impossible", if it occurs 3018 * This condition is "impossible", if it occurs
3019 * we need to fix it. Originally reported by 3019 * we need to fix it. Originally reported by
3020 * Bjorn Helgaas on a 128-cpu setup. 3020 * Bjorn Helgaas on a 128-cpu setup.
3021 */ 3021 */
3022 BUG_ON(busiest_rq == target_rq); 3022 BUG_ON(busiest_rq == target_rq);
@@ -3048,7 +3048,7 @@ static void active_load_balance(struct rq *busiest_rq, int busiest_cpu)
3048#ifdef CONFIG_NO_HZ 3048#ifdef CONFIG_NO_HZ
3049static struct { 3049static struct {
3050 atomic_t load_balancer; 3050 atomic_t load_balancer;
3051 cpumask_t cpu_mask; 3051 cpumask_t cpu_mask;
3052} nohz ____cacheline_aligned = { 3052} nohz ____cacheline_aligned = {
3053 .load_balancer = ATOMIC_INIT(-1), 3053 .load_balancer = ATOMIC_INIT(-1),
3054 .cpu_mask = CPU_MASK_NONE, 3054 .cpu_mask = CPU_MASK_NONE,
@@ -3552,7 +3552,7 @@ static noinline void __schedule_bug(struct task_struct *prev)
3552static inline void schedule_debug(struct task_struct *prev) 3552static inline void schedule_debug(struct task_struct *prev)
3553{ 3553{
3554 /* 3554 /*
3555 * Test if we are atomic. Since do_exit() needs to call into 3555 * Test if we are atomic. Since do_exit() needs to call into
3556 * schedule() atomically, we ignore that path for now. 3556 * schedule() atomically, we ignore that path for now.
3557 * Otherwise, whine if we are scheduling when we should not be. 3557 * Otherwise, whine if we are scheduling when we should not be.
3558 */ 3558 */
@@ -3674,7 +3674,7 @@ EXPORT_SYMBOL(schedule);
3674#ifdef CONFIG_PREEMPT 3674#ifdef CONFIG_PREEMPT
3675/* 3675/*
3676 * this is the entry point to schedule() from in-kernel preemption 3676 * this is the entry point to schedule() from in-kernel preemption
3677 * off of preempt_enable. Kernel preemptions off return from interrupt 3677 * off of preempt_enable. Kernel preemptions off return from interrupt
3678 * occur there and call schedule directly. 3678 * occur there and call schedule directly.
3679 */ 3679 */
3680asmlinkage void __sched preempt_schedule(void) 3680asmlinkage void __sched preempt_schedule(void)
@@ -3686,7 +3686,7 @@ asmlinkage void __sched preempt_schedule(void)
3686#endif 3686#endif
3687 /* 3687 /*
3688 * If there is a non-zero preempt_count or interrupts are disabled, 3688 * If there is a non-zero preempt_count or interrupts are disabled,
3689 * we do not want to preempt the current task. Just return.. 3689 * we do not want to preempt the current task. Just return..
3690 */ 3690 */
3691 if (likely(ti->preempt_count || irqs_disabled())) 3691 if (likely(ti->preempt_count || irqs_disabled()))
3692 return; 3692 return;
@@ -3772,12 +3772,12 @@ int default_wake_function(wait_queue_t *curr, unsigned mode, int sync,
3772EXPORT_SYMBOL(default_wake_function); 3772EXPORT_SYMBOL(default_wake_function);
3773 3773
3774/* 3774/*
3775 * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just 3775 * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
3776 * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve 3776 * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
3777 * number) then we wake all the non-exclusive tasks and one exclusive task. 3777 * number) then we wake all the non-exclusive tasks and one exclusive task.
3778 * 3778 *
3779 * There are circumstances in which we can try to wake a task which has already 3779 * There are circumstances in which we can try to wake a task which has already
3780 * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns 3780 * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
3781 * zero in this (rare) case, and we handle it by continuing to scan the queue. 3781 * zero in this (rare) case, and we handle it by continuing to scan the queue.
3782 */ 3782 */
3783static void __wake_up_common(wait_queue_head_t *q, unsigned int mode, 3783static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
@@ -4390,8 +4390,8 @@ do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
4390 * @policy: new policy. 4390 * @policy: new policy.
4391 * @param: structure containing the new RT priority. 4391 * @param: structure containing the new RT priority.
4392 */ 4392 */
4393asmlinkage long sys_sched_setscheduler(pid_t pid, int policy, 4393asmlinkage long
4394 struct sched_param __user *param) 4394sys_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
4395{ 4395{
4396 /* negative values for policy are not valid */ 4396 /* negative values for policy are not valid */
4397 if (policy < 0) 4397 if (policy < 0)
@@ -4491,7 +4491,7 @@ long sched_setaffinity(pid_t pid, cpumask_t new_mask)
4491 4491
4492 /* 4492 /*
4493 * It is not safe to call set_cpus_allowed with the 4493 * It is not safe to call set_cpus_allowed with the
4494 * tasklist_lock held. We will bump the task_struct's 4494 * tasklist_lock held. We will bump the task_struct's
4495 * usage count and then drop tasklist_lock. 4495 * usage count and then drop tasklist_lock.
4496 */ 4496 */
4497 get_task_struct(p); 4497 get_task_struct(p);
@@ -4687,7 +4687,7 @@ EXPORT_SYMBOL(cond_resched);
4687 * cond_resched_lock() - if a reschedule is pending, drop the given lock, 4687 * cond_resched_lock() - if a reschedule is pending, drop the given lock,
4688 * call schedule, and on return reacquire the lock. 4688 * call schedule, and on return reacquire the lock.
4689 * 4689 *
4690 * This works OK both with and without CONFIG_PREEMPT. We do strange low-level 4690 * This works OK both with and without CONFIG_PREEMPT. We do strange low-level
4691 * operations here to prevent schedule() from being called twice (once via 4691 * operations here to prevent schedule() from being called twice (once via
4692 * spin_unlock(), once by hand). 4692 * spin_unlock(), once by hand).
4693 */ 4693 */
@@ -4741,7 +4741,7 @@ void __sched yield(void)
4741EXPORT_SYMBOL(yield); 4741EXPORT_SYMBOL(yield);
4742 4742
4743/* 4743/*
4744 * This task is about to go to sleep on IO. Increment rq->nr_iowait so 4744 * This task is about to go to sleep on IO. Increment rq->nr_iowait so
4745 * that process accounting knows that this is a task in IO wait state. 4745 * that process accounting knows that this is a task in IO wait state.
4746 * 4746 *
4747 * But don't do that if it is a deliberate, throttling IO wait (this task 4747 * But don't do that if it is a deliberate, throttling IO wait (this task
@@ -4850,17 +4850,21 @@ long sys_sched_rr_get_interval(pid_t pid, struct timespec __user *interval)
4850 if (retval) 4850 if (retval)
4851 goto out_unlock; 4851 goto out_unlock;
4852 4852
4853 if (p->policy == SCHED_FIFO) 4853 /*
4854 time_slice = 0; 4854 * Time slice is 0 for SCHED_FIFO tasks and for SCHED_OTHER
4855 else if (p->policy == SCHED_RR) 4855 * tasks that are on an otherwise idle runqueue:
4856 */
4857 time_slice = 0;
4858 if (p->policy == SCHED_RR) {
4856 time_slice = DEF_TIMESLICE; 4859 time_slice = DEF_TIMESLICE;
4857 else { 4860 } else {
4858 struct sched_entity *se = &p->se; 4861 struct sched_entity *se = &p->se;
4859 unsigned long flags; 4862 unsigned long flags;
4860 struct rq *rq; 4863 struct rq *rq;
4861 4864
4862 rq = task_rq_lock(p, &flags); 4865 rq = task_rq_lock(p, &flags);
4863 time_slice = NS_TO_JIFFIES(sched_slice(cfs_rq_of(se), se)); 4866 if (rq->cfs.load.weight)
4867 time_slice = NS_TO_JIFFIES(sched_slice(&rq->cfs, se));
4864 task_rq_unlock(rq, &flags); 4868 task_rq_unlock(rq, &flags);
4865 } 4869 }
4866 read_unlock(&tasklist_lock); 4870 read_unlock(&tasklist_lock);
@@ -5046,7 +5050,7 @@ static inline void sched_init_granularity(void)
5046 * is removed from the allowed bitmask. 5050 * is removed from the allowed bitmask.
5047 * 5051 *
5048 * NOTE: the caller must have a valid reference to the task, the 5052 * NOTE: the caller must have a valid reference to the task, the
5049 * task must not exit() & deallocate itself prematurely. The 5053 * task must not exit() & deallocate itself prematurely. The
5050 * call is not atomic; no spinlocks may be held. 5054 * call is not atomic; no spinlocks may be held.
5051 */ 5055 */
5052int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) 5056int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
@@ -5083,7 +5087,7 @@ out:
5083EXPORT_SYMBOL_GPL(set_cpus_allowed); 5087EXPORT_SYMBOL_GPL(set_cpus_allowed);
5084 5088
5085/* 5089/*
5086 * Move (not current) task off this cpu, onto dest cpu. We're doing 5090 * Move (not current) task off this cpu, onto dest cpu. We're doing
5087 * this because either it can't run here any more (set_cpus_allowed() 5091 * this because either it can't run here any more (set_cpus_allowed()
5088 * away from this CPU, or CPU going down), or because we're 5092 * away from this CPU, or CPU going down), or because we're
5089 * attempting to rebalance this task on exec (sched_exec). 5093 * attempting to rebalance this task on exec (sched_exec).
@@ -5228,7 +5232,7 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
5228 * Try to stay on the same cpuset, where the 5232 * Try to stay on the same cpuset, where the
5229 * current cpuset may be a subset of all cpus. 5233 * current cpuset may be a subset of all cpus.
5230 * The cpuset_cpus_allowed_locked() variant of 5234 * The cpuset_cpus_allowed_locked() variant of
5231 * cpuset_cpus_allowed() will not block. It must be 5235 * cpuset_cpus_allowed() will not block. It must be
5232 * called within calls to cpuset_lock/cpuset_unlock. 5236 * called within calls to cpuset_lock/cpuset_unlock.
5233 */ 5237 */
5234 rq = task_rq_lock(p, &flags); 5238 rq = task_rq_lock(p, &flags);
@@ -5241,10 +5245,11 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
5241 * kernel threads (both mm NULL), since they never 5245 * kernel threads (both mm NULL), since they never
5242 * leave kernel. 5246 * leave kernel.
5243 */ 5247 */
5244 if (p->mm && printk_ratelimit()) 5248 if (p->mm && printk_ratelimit()) {
5245 printk(KERN_INFO "process %d (%s) no " 5249 printk(KERN_INFO "process %d (%s) no "
5246 "longer affine to cpu%d\n", 5250 "longer affine to cpu%d\n",
5247 task_pid_nr(p), p->comm, dead_cpu); 5251 task_pid_nr(p), p->comm, dead_cpu);
5252 }
5248 } 5253 }
5249 } while (!__migrate_task_irq(p, dead_cpu, dest_cpu)); 5254 } while (!__migrate_task_irq(p, dead_cpu, dest_cpu));
5250} 5255}
@@ -5346,7 +5351,7 @@ static void migrate_dead(unsigned int dead_cpu, struct task_struct *p)
5346 5351
5347 /* 5352 /*
5348 * Drop lock around migration; if someone else moves it, 5353 * Drop lock around migration; if someone else moves it,
5349 * that's OK. No task can be added to this CPU, so iteration is 5354 * that's OK. No task can be added to this CPU, so iteration is
5350 * fine. 5355 * fine.
5351 */ 5356 */
5352 spin_unlock_irq(&rq->lock); 5357 spin_unlock_irq(&rq->lock);
@@ -5410,7 +5415,7 @@ static void sd_free_ctl_entry(struct ctl_table **tablep)
5410 /* 5415 /*
5411 * In the intermediate directories, both the child directory and 5416 * In the intermediate directories, both the child directory and
5412 * procname are dynamically allocated and could fail but the mode 5417 * procname are dynamically allocated and could fail but the mode
5413 * will always be set. In the lowest directory the names are 5418 * will always be set. In the lowest directory the names are
5414 * static strings and all have proc handlers. 5419 * static strings and all have proc handlers.
5415 */ 5420 */
5416 for (entry = *tablep; entry->mode; entry++) { 5421 for (entry = *tablep; entry->mode; entry++) {
@@ -5581,7 +5586,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
5581 case CPU_UP_CANCELED_FROZEN: 5586 case CPU_UP_CANCELED_FROZEN:
5582 if (!cpu_rq(cpu)->migration_thread) 5587 if (!cpu_rq(cpu)->migration_thread)
5583 break; 5588 break;
5584 /* Unbind it from offline cpu so it can run. Fall thru. */ 5589 /* Unbind it from offline cpu so it can run. Fall thru. */
5585 kthread_bind(cpu_rq(cpu)->migration_thread, 5590 kthread_bind(cpu_rq(cpu)->migration_thread,
5586 any_online_cpu(cpu_online_map)); 5591 any_online_cpu(cpu_online_map));
5587 kthread_stop(cpu_rq(cpu)->migration_thread); 5592 kthread_stop(cpu_rq(cpu)->migration_thread);
@@ -5608,9 +5613,11 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
5608 migrate_nr_uninterruptible(rq); 5613 migrate_nr_uninterruptible(rq);
5609 BUG_ON(rq->nr_running != 0); 5614 BUG_ON(rq->nr_running != 0);
5610 5615
5611 /* No need to migrate the tasks: it was best-effort if 5616 /*
5612 * they didn't take sched_hotcpu_mutex. Just wake up 5617 * No need to migrate the tasks: it was best-effort if
5613 * the requestors. */ 5618 * they didn't take sched_hotcpu_mutex. Just wake up
5619 * the requestors.
5620 */
5614 spin_lock_irq(&rq->lock); 5621 spin_lock_irq(&rq->lock);
5615 while (!list_empty(&rq->migration_queue)) { 5622 while (!list_empty(&rq->migration_queue)) {
5616 struct migration_req *req; 5623 struct migration_req *req;
@@ -5918,7 +5925,7 @@ init_sched_build_groups(cpumask_t span, const cpumask_t *cpu_map,
5918 * @node: node whose sched_domain we're building 5925 * @node: node whose sched_domain we're building
5919 * @used_nodes: nodes already in the sched_domain 5926 * @used_nodes: nodes already in the sched_domain
5920 * 5927 *
5921 * Find the next node to include in a given scheduling domain. Simply 5928 * Find the next node to include in a given scheduling domain. Simply
5922 * finds the closest node not already in the @used_nodes map. 5929 * finds the closest node not already in the @used_nodes map.
5923 * 5930 *
5924 * Should use nodemask_t. 5931 * Should use nodemask_t.
@@ -5958,7 +5965,7 @@ static int find_next_best_node(int node, unsigned long *used_nodes)
5958 * @node: node whose cpumask we're constructing 5965 * @node: node whose cpumask we're constructing
5959 * @size: number of nodes to include in this span 5966 * @size: number of nodes to include in this span
5960 * 5967 *
5961 * Given a node, construct a good cpumask for its sched_domain to span. It 5968 * Given a node, construct a good cpumask for its sched_domain to span. It
5962 * should be one that prevents unnecessary balancing, but also spreads tasks 5969 * should be one that prevents unnecessary balancing, but also spreads tasks
5963 * out optimally. 5970 * out optimally.
5964 */ 5971 */
@@ -5995,8 +6002,8 @@ int sched_smt_power_savings = 0, sched_mc_power_savings = 0;
5995static DEFINE_PER_CPU(struct sched_domain, cpu_domains); 6002static DEFINE_PER_CPU(struct sched_domain, cpu_domains);
5996static DEFINE_PER_CPU(struct sched_group, sched_group_cpus); 6003static DEFINE_PER_CPU(struct sched_group, sched_group_cpus);
5997 6004
5998static int cpu_to_cpu_group(int cpu, const cpumask_t *cpu_map, 6005static int
5999 struct sched_group **sg) 6006cpu_to_cpu_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg)
6000{ 6007{
6001 if (sg) 6008 if (sg)
6002 *sg = &per_cpu(sched_group_cpus, cpu); 6009 *sg = &per_cpu(sched_group_cpus, cpu);
@@ -6013,8 +6020,8 @@ static DEFINE_PER_CPU(struct sched_group, sched_group_core);
6013#endif 6020#endif
6014 6021
6015#if defined(CONFIG_SCHED_MC) && defined(CONFIG_SCHED_SMT) 6022#if defined(CONFIG_SCHED_MC) && defined(CONFIG_SCHED_SMT)
6016static int cpu_to_core_group(int cpu, const cpumask_t *cpu_map, 6023static int
6017 struct sched_group **sg) 6024cpu_to_core_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg)
6018{ 6025{
6019 int group; 6026 int group;
6020 cpumask_t mask = per_cpu(cpu_sibling_map, cpu); 6027 cpumask_t mask = per_cpu(cpu_sibling_map, cpu);
@@ -6025,8 +6032,8 @@ static int cpu_to_core_group(int cpu, const cpumask_t *cpu_map,
6025 return group; 6032 return group;
6026} 6033}
6027#elif defined(CONFIG_SCHED_MC) 6034#elif defined(CONFIG_SCHED_MC)
6028static int cpu_to_core_group(int cpu, const cpumask_t *cpu_map, 6035static int
6029 struct sched_group **sg) 6036cpu_to_core_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg)
6030{ 6037{
6031 if (sg) 6038 if (sg)
6032 *sg = &per_cpu(sched_group_core, cpu); 6039 *sg = &per_cpu(sched_group_core, cpu);
@@ -6037,8 +6044,8 @@ static int cpu_to_core_group(int cpu, const cpumask_t *cpu_map,
6037static DEFINE_PER_CPU(struct sched_domain, phys_domains); 6044static DEFINE_PER_CPU(struct sched_domain, phys_domains);
6038static DEFINE_PER_CPU(struct sched_group, sched_group_phys); 6045static DEFINE_PER_CPU(struct sched_group, sched_group_phys);
6039 6046
6040static int cpu_to_phys_group(int cpu, const cpumask_t *cpu_map, 6047static int
6041 struct sched_group **sg) 6048cpu_to_phys_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg)
6042{ 6049{
6043 int group; 6050 int group;
6044#ifdef CONFIG_SCHED_MC 6051#ifdef CONFIG_SCHED_MC
@@ -6218,7 +6225,7 @@ static int build_sched_domains(const cpumask_t *cpu_map)
6218 * Allocate the per-node list of sched groups 6225 * Allocate the per-node list of sched groups
6219 */ 6226 */
6220 sched_group_nodes = kcalloc(MAX_NUMNODES, sizeof(struct sched_group *), 6227 sched_group_nodes = kcalloc(MAX_NUMNODES, sizeof(struct sched_group *),
6221 GFP_KERNEL); 6228 GFP_KERNEL);
6222 if (!sched_group_nodes) { 6229 if (!sched_group_nodes) {
6223 printk(KERN_WARNING "Can not alloc sched group node list\n"); 6230 printk(KERN_WARNING "Can not alloc sched group node list\n");
6224 return -ENOMEM; 6231 return -ENOMEM;
@@ -6465,7 +6472,7 @@ static int ndoms_cur; /* number of sched domains in 'doms_cur' */
6465static cpumask_t fallback_doms; 6472static cpumask_t fallback_doms;
6466 6473
6467/* 6474/*
6468 * Set up scheduler domains and groups. Callers must hold the hotplug lock. 6475 * Set up scheduler domains and groups. Callers must hold the hotplug lock.
6469 * For now this just excludes isolated cpus, but could be used to 6476 * For now this just excludes isolated cpus, but could be used to
6470 * exclude other special cases in the future. 6477 * exclude other special cases in the future.
6471 */ 6478 */
@@ -6507,19 +6514,19 @@ static void detach_destroy_domains(const cpumask_t *cpu_map)
6507 6514
6508/* 6515/*
6509 * Partition sched domains as specified by the 'ndoms_new' 6516 * Partition sched domains as specified by the 'ndoms_new'
6510 * cpumasks in the array doms_new[] of cpumasks. This compares 6517 * cpumasks in the array doms_new[] of cpumasks. This compares
6511 * doms_new[] to the current sched domain partitioning, doms_cur[]. 6518 * doms_new[] to the current sched domain partitioning, doms_cur[].
6512 * It destroys each deleted domain and builds each new domain. 6519 * It destroys each deleted domain and builds each new domain.
6513 * 6520 *
6514 * 'doms_new' is an array of cpumask_t's of length 'ndoms_new'. 6521 * 'doms_new' is an array of cpumask_t's of length 'ndoms_new'.
6515 * The masks don't intersect (don't overlap.) We should setup one 6522 * The masks don't intersect (don't overlap.) We should setup one
6516 * sched domain for each mask. CPUs not in any of the cpumasks will 6523 * sched domain for each mask. CPUs not in any of the cpumasks will
6517 * not be load balanced. If the same cpumask appears both in the 6524 * not be load balanced. If the same cpumask appears both in the
6518 * current 'doms_cur' domains and in the new 'doms_new', we can leave 6525 * current 'doms_cur' domains and in the new 'doms_new', we can leave
6519 * it as it is. 6526 * it as it is.
6520 * 6527 *
6521 * The passed in 'doms_new' should be kmalloc'd. This routine takes 6528 * The passed in 'doms_new' should be kmalloc'd. This routine takes
6522 * ownership of it and will kfree it when done with it. If the caller 6529 * ownership of it and will kfree it when done with it. If the caller
6523 * failed the kmalloc call, then it can pass in doms_new == NULL, 6530 * failed the kmalloc call, then it can pass in doms_new == NULL,
6524 * and partition_sched_domains() will fallback to the single partition 6531 * and partition_sched_domains() will fallback to the single partition
6525 * 'fallback_doms'. 6532 * 'fallback_doms'.
@@ -6649,7 +6656,7 @@ int sched_create_sysfs_power_savings_entries(struct sysdev_class *cls)
6649#endif 6656#endif
6650 6657
6651/* 6658/*
6652 * Force a reinitialization of the sched domains hierarchy. The domains 6659 * Force a reinitialization of the sched domains hierarchy. The domains
6653 * and groups cannot be updated in place without racing with the balancing 6660 * and groups cannot be updated in place without racing with the balancing
6654 * code, so we temporarily attach all running cpus to the NULL domain 6661 * code, so we temporarily attach all running cpus to the NULL domain
6655 * which will prevent rebalancing while the sched domains are recalculated. 6662 * which will prevent rebalancing while the sched domains are recalculated.
@@ -6939,8 +6946,8 @@ struct task_struct *curr_task(int cpu)
6939 * @p: the task pointer to set. 6946 * @p: the task pointer to set.
6940 * 6947 *
6941 * Description: This function must only be used when non-maskable interrupts 6948 * Description: This function must only be used when non-maskable interrupts
6942 * are serviced on a separate stack. It allows the architecture to switch the 6949 * are serviced on a separate stack. It allows the architecture to switch the
6943 * notion of the current task on a cpu in a non-blocking manner. This function 6950 * notion of the current task on a cpu in a non-blocking manner. This function
6944 * must be called with all CPU's synchronized, and interrupts disabled, the 6951 * must be called with all CPU's synchronized, and interrupts disabled, the
6945 * and caller must save the original value of the current task (see 6952 * and caller must save the original value of the current task (see
6946 * curr_task() above) and restore that value before reenabling interrupts and 6953 * curr_task() above) and restore that value before reenabling interrupts and
@@ -7189,16 +7196,17 @@ cpu_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cgrp)
7189 return &tg->css; 7196 return &tg->css;
7190} 7197}
7191 7198
7192static void cpu_cgroup_destroy(struct cgroup_subsys *ss, 7199static void
7193 struct cgroup *cgrp) 7200cpu_cgroup_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
7194{ 7201{
7195 struct task_group *tg = cgroup_tg(cgrp); 7202 struct task_group *tg = cgroup_tg(cgrp);
7196 7203
7197 sched_destroy_group(tg); 7204 sched_destroy_group(tg);
7198} 7205}
7199 7206
7200static int cpu_cgroup_can_attach(struct cgroup_subsys *ss, 7207static int
7201 struct cgroup *cgrp, struct task_struct *tsk) 7208cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
7209 struct task_struct *tsk)
7202{ 7210{
7203 /* We don't support RT-tasks being in separate groups */ 7211 /* We don't support RT-tasks being in separate groups */
7204 if (tsk->sched_class != &fair_sched_class) 7212 if (tsk->sched_class != &fair_sched_class)
@@ -7304,8 +7312,8 @@ static struct cgroup_subsys_state *cpuacct_create(
7304} 7312}
7305 7313
7306/* destroy an existing cpu accounting group */ 7314/* destroy an existing cpu accounting group */
7307static void cpuacct_destroy(struct cgroup_subsys *ss, 7315static void
7308 struct cgroup *cont) 7316cpuacct_destroy(struct cgroup_subsys *ss, struct cgroup *cont)
7309{ 7317{
7310 struct cpuacct *ca = cgroup_ca(cont); 7318 struct cpuacct *ca = cgroup_ca(cont);
7311 7319
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 37bb265598db..c33f0ceb3de9 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -799,8 +799,9 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int sleep)
799 */ 799 */
800static void yield_task_fair(struct rq *rq) 800static void yield_task_fair(struct rq *rq)
801{ 801{
802 struct cfs_rq *cfs_rq = task_cfs_rq(rq->curr); 802 struct task_struct *curr = rq->curr;
803 struct sched_entity *rightmost, *se = &rq->curr->se; 803 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
804 struct sched_entity *rightmost, *se = &curr->se;
804 805
805 /* 806 /*
806 * Are we the only task in the tree? 807 * Are we the only task in the tree?
@@ -808,7 +809,7 @@ static void yield_task_fair(struct rq *rq)
808 if (unlikely(cfs_rq->nr_running == 1)) 809 if (unlikely(cfs_rq->nr_running == 1))
809 return; 810 return;
810 811
811 if (likely(!sysctl_sched_compat_yield)) { 812 if (likely(!sysctl_sched_compat_yield) && curr->policy != SCHED_BATCH) {
812 __update_rq_clock(rq); 813 __update_rq_clock(rq);
813 /* 814 /*
814 * Update run-time statistics of the 'current'. 815 * Update run-time statistics of the 'current'.
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 0deed82a6156..8ac51714b08c 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -1588,6 +1588,10 @@ struct ctl_table_header *register_sysctl_table(struct ctl_table * table)
1588void unregister_sysctl_table(struct ctl_table_header * header) 1588void unregister_sysctl_table(struct ctl_table_header * header)
1589{ 1589{
1590 might_sleep(); 1590 might_sleep();
1591
1592 if (header == NULL)
1593 return;
1594
1591 spin_lock(&sysctl_lock); 1595 spin_lock(&sysctl_lock);
1592 start_unregistering(header); 1596 start_unregistering(header);
1593 spin_unlock(&sysctl_lock); 1597 spin_unlock(&sysctl_lock);
diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c
index 6972f26c65f7..bed939f82c31 100644
--- a/kernel/sysctl_check.c
+++ b/kernel/sysctl_check.c
@@ -96,7 +96,7 @@ static struct trans_ctl_table trans_kern_table[] = {
96 96
97 { KERN_PTY, "pty", trans_pty_table }, 97 { KERN_PTY, "pty", trans_pty_table },
98 { KERN_NGROUPS_MAX, "ngroups_max" }, 98 { KERN_NGROUPS_MAX, "ngroups_max" },
99 { KERN_SPARC_SCONS_PWROFF, "scons_poweroff" }, 99 { KERN_SPARC_SCONS_PWROFF, "scons-poweroff" },
100 { KERN_HZ_TIMER, "hz_timer" }, 100 { KERN_HZ_TIMER, "hz_timer" },
101 { KERN_UNKNOWN_NMI_PANIC, "unknown_nmi_panic" }, 101 { KERN_UNKNOWN_NMI_PANIC, "unknown_nmi_panic" },
102 { KERN_BOOTLOADER_TYPE, "bootloader_type" }, 102 { KERN_BOOTLOADER_TYPE, "bootloader_type" },
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index b0ceb29da4c7..e8644b1e5527 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -7,7 +7,7 @@
7 7
8int bdi_init(struct backing_dev_info *bdi) 8int bdi_init(struct backing_dev_info *bdi)
9{ 9{
10 int i, j; 10 int i;
11 int err; 11 int err;
12 12
13 for (i = 0; i < NR_BDI_STAT_ITEMS; i++) { 13 for (i = 0; i < NR_BDI_STAT_ITEMS; i++) {
@@ -21,7 +21,7 @@ int bdi_init(struct backing_dev_info *bdi)
21 21
22 if (err) { 22 if (err) {
23err: 23err:
24 for (j = 0; j < i; j++) 24 while (i--)
25 percpu_counter_destroy(&bdi->bdi_stat[i]); 25 percpu_counter_destroy(&bdi->bdi_stat[i]);
26 } 26 }
27 27
diff --git a/mm/filemap_xip.c b/mm/filemap_xip.c
index 32132f3cd641..e233fff61b4b 100644
--- a/mm/filemap_xip.c
+++ b/mm/filemap_xip.c
@@ -314,7 +314,7 @@ __xip_file_write(struct file *filp, const char __user *buf,
314 fault_in_pages_readable(buf, bytes); 314 fault_in_pages_readable(buf, bytes);
315 kaddr = kmap_atomic(page, KM_USER0); 315 kaddr = kmap_atomic(page, KM_USER0);
316 copied = bytes - 316 copied = bytes -
317 __copy_from_user_inatomic_nocache(kaddr, buf, bytes); 317 __copy_from_user_inatomic_nocache(kaddr + offset, buf, bytes);
318 kunmap_atomic(kaddr, KM_USER0); 318 kunmap_atomic(kaddr, KM_USER0);
319 flush_dcache_page(page); 319 flush_dcache_page(page);
320 320
diff --git a/mm/mmap.c b/mm/mmap.c
index facc1a75bd4f..15678aa6ec73 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -912,6 +912,9 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr,
912 if (!len) 912 if (!len)
913 return -EINVAL; 913 return -EINVAL;
914 914
915 if (!(flags & MAP_FIXED))
916 addr = round_hint_to_min(addr);
917
915 error = arch_mmap_check(addr, len, flags); 918 error = arch_mmap_check(addr, len, flags);
916 if (error) 919 if (error)
917 return error; 920 return error;
@@ -1615,6 +1618,12 @@ static inline int expand_downwards(struct vm_area_struct *vma,
1615 */ 1618 */
1616 if (unlikely(anon_vma_prepare(vma))) 1619 if (unlikely(anon_vma_prepare(vma)))
1617 return -ENOMEM; 1620 return -ENOMEM;
1621
1622 address &= PAGE_MASK;
1623 error = security_file_mmap(0, 0, 0, 0, address, 1);
1624 if (error)
1625 return error;
1626
1618 anon_vma_lock(vma); 1627 anon_vma_lock(vma);
1619 1628
1620 /* 1629 /*
@@ -1622,8 +1631,6 @@ static inline int expand_downwards(struct vm_area_struct *vma,
1622 * is required to hold the mmap_sem in read mode. We need the 1631 * is required to hold the mmap_sem in read mode. We need the
1623 * anon_vma lock to serialize against concurrent expand_stacks. 1632 * anon_vma lock to serialize against concurrent expand_stacks.
1624 */ 1633 */
1625 address &= PAGE_MASK;
1626 error = 0;
1627 1634
1628 /* Somebody else might have raced and expanded it already */ 1635 /* Somebody else might have raced and expanded it already */
1629 if (address < vma->vm_start) { 1636 if (address < vma->vm_start) {
@@ -1934,6 +1941,10 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
1934 if (is_hugepage_only_range(mm, addr, len)) 1941 if (is_hugepage_only_range(mm, addr, len))
1935 return -EINVAL; 1942 return -EINVAL;
1936 1943
1944 error = security_file_mmap(0, 0, 0, 0, addr, 1);
1945 if (error)
1946 return error;
1947
1937 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags; 1948 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
1938 1949
1939 error = arch_mmap_check(addr, len, flags); 1950 error = arch_mmap_check(addr, len, flags);
diff --git a/mm/nommu.c b/mm/nommu.c
index 35622c590925..b989cb928a7c 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -829,6 +829,9 @@ unsigned long do_mmap_pgoff(struct file *file,
829 void *result; 829 void *result;
830 int ret; 830 int ret;
831 831
832 if (!(flags & MAP_FIXED))
833 addr = round_hint_to_min(addr);
834
832 /* decide whether we should attempt the mapping, and if so what sort of 835 /* decide whether we should attempt the mapping, and if so what sort of
833 * mapping */ 836 * mapping */
834 ret = validate_mmap_request(file, addr, len, prot, flags, pgoff, 837 ret = validate_mmap_request(file, addr, len, prot, flags, pgoff,
diff --git a/mm/slab.c b/mm/slab.c
index 202465a193c1..2e338a5f7b14 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -4475,3 +4475,4 @@ size_t ksize(const void *objp)
4475 4475
4476 return obj_size(virt_to_cache(objp)); 4476 return obj_size(virt_to_cache(objp));
4477} 4477}
4478EXPORT_SYMBOL(ksize);
diff --git a/mm/slob.c b/mm/slob.c
index 08a9bd91a1aa..ee2ef8af0d43 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -495,6 +495,7 @@ size_t ksize(const void *block)
495 else 495 else
496 return sp->page.private; 496 return sp->page.private;
497} 497}
498EXPORT_SYMBOL(ksize);
498 499
499struct kmem_cache { 500struct kmem_cache {
500 unsigned int size, align; 501 unsigned int size, align;
diff --git a/mm/slub.c b/mm/slub.c
index 9acb413858ac..b9f37cb0f2e6 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2558,8 +2558,12 @@ size_t ksize(const void *object)
2558 if (unlikely(object == ZERO_SIZE_PTR)) 2558 if (unlikely(object == ZERO_SIZE_PTR))
2559 return 0; 2559 return 0;
2560 2560
2561 page = get_object_page(object); 2561 page = virt_to_head_page(object);
2562 BUG_ON(!page); 2562 BUG_ON(!page);
2563
2564 if (unlikely(!PageSlab(page)))
2565 return PAGE_SIZE << compound_order(page);
2566
2563 s = page->slab; 2567 s = page->slab;
2564 BUG_ON(!s); 2568 BUG_ON(!s);
2565 2569
diff --git a/net/ipv4/inet_lro.c b/net/ipv4/inet_lro.c
index ac3b1d3dba2e..9a96c277393d 100644
--- a/net/ipv4/inet_lro.c
+++ b/net/ipv4/inet_lro.c
@@ -401,10 +401,11 @@ static struct sk_buff *lro_gen_skb(struct net_lro_mgr *lro_mgr,
401 int data_len = len; 401 int data_len = len;
402 int hdr_len = min(len, hlen); 402 int hdr_len = min(len, hlen);
403 403
404 skb = netdev_alloc_skb(lro_mgr->dev, hlen); 404 skb = netdev_alloc_skb(lro_mgr->dev, hlen + lro_mgr->frag_align_pad);
405 if (!skb) 405 if (!skb)
406 return NULL; 406 return NULL;
407 407
408 skb_reserve(skb, lro_mgr->frag_align_pad);
408 skb->len = len; 409 skb->len = len;
409 skb->data_len = len - hdr_len; 410 skb->data_len = len - hdr_len;
410 skb->truesize += true_size; 411 skb->truesize += true_size;
diff --git a/net/ipv4/ipvs/ip_vs_lblc.c b/net/ipv4/ipvs/ip_vs_lblc.c
index b843a11d7cf7..ad89644ef5d2 100644
--- a/net/ipv4/ipvs/ip_vs_lblc.c
+++ b/net/ipv4/ipvs/ip_vs_lblc.c
@@ -580,9 +580,14 @@ static struct ip_vs_scheduler ip_vs_lblc_scheduler =
580 580
581static int __init ip_vs_lblc_init(void) 581static int __init ip_vs_lblc_init(void)
582{ 582{
583 int ret;
584
583 INIT_LIST_HEAD(&ip_vs_lblc_scheduler.n_list); 585 INIT_LIST_HEAD(&ip_vs_lblc_scheduler.n_list);
584 sysctl_header = register_sysctl_table(lblc_root_table); 586 sysctl_header = register_sysctl_table(lblc_root_table);
585 return register_ip_vs_scheduler(&ip_vs_lblc_scheduler); 587 ret = register_ip_vs_scheduler(&ip_vs_lblc_scheduler);
588 if (ret)
589 unregister_sysctl_table(sysctl_header);
590 return ret;
586} 591}
587 592
588 593
diff --git a/net/ipv4/ipvs/ip_vs_lblcr.c b/net/ipv4/ipvs/ip_vs_lblcr.c
index e5b323a6b2f7..2a5ed85a3352 100644
--- a/net/ipv4/ipvs/ip_vs_lblcr.c
+++ b/net/ipv4/ipvs/ip_vs_lblcr.c
@@ -769,9 +769,14 @@ static struct ip_vs_scheduler ip_vs_lblcr_scheduler =
769 769
770static int __init ip_vs_lblcr_init(void) 770static int __init ip_vs_lblcr_init(void)
771{ 771{
772 int ret;
773
772 INIT_LIST_HEAD(&ip_vs_lblcr_scheduler.n_list); 774 INIT_LIST_HEAD(&ip_vs_lblcr_scheduler.n_list);
773 sysctl_header = register_sysctl_table(lblcr_root_table); 775 sysctl_header = register_sysctl_table(lblcr_root_table);
774 return register_ip_vs_scheduler(&ip_vs_lblcr_scheduler); 776 ret = register_ip_vs_scheduler(&ip_vs_lblcr_scheduler);
777 if (ret)
778 unregister_sysctl_table(sysctl_header);
779 return ret;
775} 780}
776 781
777 782
diff --git a/net/ipv4/ipvs/ip_vs_sched.c b/net/ipv4/ipvs/ip_vs_sched.c
index 1602304abbf9..432235861908 100644
--- a/net/ipv4/ipvs/ip_vs_sched.c
+++ b/net/ipv4/ipvs/ip_vs_sched.c
@@ -183,19 +183,6 @@ int register_ip_vs_scheduler(struct ip_vs_scheduler *scheduler)
183 /* increase the module use count */ 183 /* increase the module use count */
184 ip_vs_use_count_inc(); 184 ip_vs_use_count_inc();
185 185
186 /*
187 * Make sure that the scheduler with this name doesn't exist
188 * in the scheduler list.
189 */
190 sched = ip_vs_sched_getbyname(scheduler->name);
191 if (sched) {
192 ip_vs_scheduler_put(sched);
193 ip_vs_use_count_dec();
194 IP_VS_ERR("register_ip_vs_scheduler(): [%s] scheduler "
195 "already existed in the system\n", scheduler->name);
196 return -EINVAL;
197 }
198
199 write_lock_bh(&__ip_vs_sched_lock); 186 write_lock_bh(&__ip_vs_sched_lock);
200 187
201 if (scheduler->n_list.next != &scheduler->n_list) { 188 if (scheduler->n_list.next != &scheduler->n_list) {
@@ -207,6 +194,20 @@ int register_ip_vs_scheduler(struct ip_vs_scheduler *scheduler)
207 } 194 }
208 195
209 /* 196 /*
197 * Make sure that the scheduler with this name doesn't exist
198 * in the scheduler list.
199 */
200 list_for_each_entry(sched, &ip_vs_schedulers, n_list) {
201 if (strcmp(scheduler->name, sched->name) == 0) {
202 write_unlock_bh(&__ip_vs_sched_lock);
203 ip_vs_use_count_dec();
204 IP_VS_ERR("register_ip_vs_scheduler(): [%s] scheduler "
205 "already existed in the system\n",
206 scheduler->name);
207 return -EINVAL;
208 }
209 }
210 /*
210 * Add it into the d-linked scheduler list 211 * Add it into the d-linked scheduler list
211 */ 212 */
212 list_add(&scheduler->n_list, &ip_vs_schedulers); 213 list_add(&scheduler->n_list, &ip_vs_schedulers);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 0f0c1c9829a1..b9e429d2d1de 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -3003,17 +3003,13 @@ static int tcp_process_frto(struct sock *sk, int flag)
3003 } 3003 }
3004 3004
3005 if (tp->frto_counter == 1) { 3005 if (tp->frto_counter == 1) {
3006 /* Sending of the next skb must be allowed or no F-RTO */ 3006 /* tcp_may_send_now needs to see updated state */
3007 if (!tcp_send_head(sk) ||
3008 after(TCP_SKB_CB(tcp_send_head(sk))->end_seq,
3009 tp->snd_una + tp->snd_wnd)) {
3010 tcp_enter_frto_loss(sk, (tp->frto_counter == 1 ? 2 : 3),
3011 flag);
3012 return 1;
3013 }
3014
3015 tp->snd_cwnd = tcp_packets_in_flight(tp) + 2; 3007 tp->snd_cwnd = tcp_packets_in_flight(tp) + 2;
3016 tp->frto_counter = 2; 3008 tp->frto_counter = 2;
3009
3010 if (!tcp_may_send_now(sk))
3011 tcp_enter_frto_loss(sk, 2, flag);
3012
3017 return 1; 3013 return 1;
3018 } else { 3014 } else {
3019 switch (sysctl_tcp_frto_response) { 3015 switch (sysctl_tcp_frto_response) {
@@ -3069,6 +3065,7 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
3069 } 3065 }
3070 3066
3071 prior_fackets = tp->fackets_out; 3067 prior_fackets = tp->fackets_out;
3068 prior_in_flight = tcp_packets_in_flight(tp);
3072 3069
3073 if (!(flag&FLAG_SLOWPATH) && after(ack, prior_snd_una)) { 3070 if (!(flag&FLAG_SLOWPATH) && after(ack, prior_snd_una)) {
3074 /* Window is constant, pure forward advance. 3071 /* Window is constant, pure forward advance.
@@ -3108,8 +3105,6 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
3108 if (!prior_packets) 3105 if (!prior_packets)
3109 goto no_queue; 3106 goto no_queue;
3110 3107
3111 prior_in_flight = tcp_packets_in_flight(tp);
3112
3113 /* See if we can take anything off of the retransmit queue. */ 3108 /* See if we can take anything off of the retransmit queue. */
3114 flag |= tcp_clean_rtx_queue(sk, &seq_rtt, prior_fackets); 3109 flag |= tcp_clean_rtx_queue(sk, &seq_rtt, prior_fackets);
3115 3110
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index e5130a7fe181..f4c1eef89af0 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1162,8 +1162,7 @@ int tcp_may_send_now(struct sock *sk)
1162 return (skb && 1162 return (skb &&
1163 tcp_snd_test(sk, skb, tcp_current_mss(sk, 1), 1163 tcp_snd_test(sk, skb, tcp_current_mss(sk, 1),
1164 (tcp_skb_is_last(sk, skb) ? 1164 (tcp_skb_is_last(sk, skb) ?
1165 TCP_NAGLE_PUSH : 1165 tp->nonagle : TCP_NAGLE_PUSH)));
1166 tp->nonagle)));
1167} 1166}
1168 1167
1169/* Trim TSO SKB to LEN bytes, put the remaining data into a new packet 1168/* Trim TSO SKB to LEN bytes, put the remaining data into a new packet
diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
index 1120b150e211..be627e1f04d8 100644
--- a/net/irda/ircomm/ircomm_tty.c
+++ b/net/irda/ircomm/ircomm_tty.c
@@ -1245,6 +1245,7 @@ static void ircomm_tty_flow_indication(void *instance, void *sap,
1245 self->flow = cmd; 1245 self->flow = cmd;
1246} 1246}
1247 1247
1248#ifdef CONFIG_PROC_FS
1248static int ircomm_tty_line_info(struct ircomm_tty_cb *self, char *buf) 1249static int ircomm_tty_line_info(struct ircomm_tty_cb *self, char *buf)
1249{ 1250{
1250 int ret=0; 1251 int ret=0;
@@ -1354,7 +1355,6 @@ static int ircomm_tty_line_info(struct ircomm_tty_cb *self, char *buf)
1354 * 1355 *
1355 * 1356 *
1356 */ 1357 */
1357#ifdef CONFIG_PROC_FS
1358static int ircomm_tty_read_proc(char *buf, char **start, off_t offset, int len, 1358static int ircomm_tty_read_proc(char *buf, char **start, off_t offset, int len,
1359 int *eof, void *unused) 1359 int *eof, void *unused)
1360{ 1360{
diff --git a/net/rose/rose_dev.c b/net/rose/rose_dev.c
index 1b6741f1d746..12cfcf09556b 100644
--- a/net/rose/rose_dev.c
+++ b/net/rose/rose_dev.c
@@ -55,13 +55,13 @@ static int rose_header(struct sk_buff *skb, struct net_device *dev,
55 55
56static int rose_rebuild_header(struct sk_buff *skb) 56static int rose_rebuild_header(struct sk_buff *skb)
57{ 57{
58#ifdef CONFIG_INET
58 struct net_device *dev = skb->dev; 59 struct net_device *dev = skb->dev;
59 struct net_device_stats *stats = netdev_priv(dev); 60 struct net_device_stats *stats = netdev_priv(dev);
60 unsigned char *bp = (unsigned char *)skb->data; 61 unsigned char *bp = (unsigned char *)skb->data;
61 struct sk_buff *skbn; 62 struct sk_buff *skbn;
62 unsigned int len; 63 unsigned int len;
63 64
64#ifdef CONFIG_INET
65 if (arp_find(bp + 7, skb)) { 65 if (arp_find(bp + 7, skb)) {
66 return 1; 66 return 1;
67 } 67 }
diff --git a/security/dummy.c b/security/dummy.c
index 6d895ade73de..3ccfbbe973b6 100644
--- a/security/dummy.c
+++ b/security/dummy.c
@@ -426,7 +426,7 @@ static int dummy_file_mmap (struct file *file, unsigned long reqprot,
426 unsigned long addr, 426 unsigned long addr,
427 unsigned long addr_only) 427 unsigned long addr_only)
428{ 428{
429 if (addr < mmap_min_addr) 429 if ((addr < mmap_min_addr) && !capable(CAP_SYS_RAWIO))
430 return -EACCES; 430 return -EACCES;
431 return 0; 431 return 0;
432} 432}
diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
index f5f3e6da5da7..2fa483f26113 100644
--- a/security/selinux/selinuxfs.c
+++ b/security/selinux/selinuxfs.c
@@ -65,6 +65,7 @@ static DEFINE_MUTEX(sel_mutex);
65/* global data for booleans */ 65/* global data for booleans */
66static struct dentry *bool_dir = NULL; 66static struct dentry *bool_dir = NULL;
67static int bool_num = 0; 67static int bool_num = 0;
68static char **bool_pending_names;
68static int *bool_pending_values = NULL; 69static int *bool_pending_values = NULL;
69 70
70/* global data for classes */ 71/* global data for classes */
@@ -832,15 +833,16 @@ static ssize_t sel_read_bool(struct file *filep, char __user *buf,
832 ssize_t length; 833 ssize_t length;
833 ssize_t ret; 834 ssize_t ret;
834 int cur_enforcing; 835 int cur_enforcing;
835 struct inode *inode; 836 struct inode *inode = filep->f_path.dentry->d_inode;
837 unsigned index = inode->i_ino & SEL_INO_MASK;
838 const char *name = filep->f_path.dentry->d_name.name;
836 839
837 mutex_lock(&sel_mutex); 840 mutex_lock(&sel_mutex);
838 841
839 ret = -EFAULT; 842 if (index >= bool_num || strcmp(name, bool_pending_names[index])) {
840 843 ret = -EINVAL;
841 /* check to see if this file has been deleted */
842 if (!filep->f_op)
843 goto out; 844 goto out;
845 }
844 846
845 if (count > PAGE_SIZE) { 847 if (count > PAGE_SIZE) {
846 ret = -EINVAL; 848 ret = -EINVAL;
@@ -851,15 +853,13 @@ static ssize_t sel_read_bool(struct file *filep, char __user *buf,
851 goto out; 853 goto out;
852 } 854 }
853 855
854 inode = filep->f_path.dentry->d_inode; 856 cur_enforcing = security_get_bool_value(index);
855 cur_enforcing = security_get_bool_value(inode->i_ino&SEL_INO_MASK);
856 if (cur_enforcing < 0) { 857 if (cur_enforcing < 0) {
857 ret = cur_enforcing; 858 ret = cur_enforcing;
858 goto out; 859 goto out;
859 } 860 }
860
861 length = scnprintf(page, PAGE_SIZE, "%d %d", cur_enforcing, 861 length = scnprintf(page, PAGE_SIZE, "%d %d", cur_enforcing,
862 bool_pending_values[inode->i_ino&SEL_INO_MASK]); 862 bool_pending_values[index]);
863 ret = simple_read_from_buffer(buf, count, ppos, page, length); 863 ret = simple_read_from_buffer(buf, count, ppos, page, length);
864out: 864out:
865 mutex_unlock(&sel_mutex); 865 mutex_unlock(&sel_mutex);
@@ -872,9 +872,11 @@ static ssize_t sel_write_bool(struct file *filep, const char __user *buf,
872 size_t count, loff_t *ppos) 872 size_t count, loff_t *ppos)
873{ 873{
874 char *page = NULL; 874 char *page = NULL;
875 ssize_t length = -EFAULT; 875 ssize_t length;
876 int new_value; 876 int new_value;
877 struct inode *inode; 877 struct inode *inode = filep->f_path.dentry->d_inode;
878 unsigned index = inode->i_ino & SEL_INO_MASK;
879 const char *name = filep->f_path.dentry->d_name.name;
878 880
879 mutex_lock(&sel_mutex); 881 mutex_lock(&sel_mutex);
880 882
@@ -882,16 +884,19 @@ static ssize_t sel_write_bool(struct file *filep, const char __user *buf,
882 if (length) 884 if (length)
883 goto out; 885 goto out;
884 886
885 /* check to see if this file has been deleted */ 887 if (index >= bool_num || strcmp(name, bool_pending_names[index])) {
886 if (!filep->f_op) 888 length = -EINVAL;
887 goto out; 889 goto out;
890 }
888 891
889 if (count >= PAGE_SIZE) { 892 if (count >= PAGE_SIZE) {
890 length = -ENOMEM; 893 length = -ENOMEM;
891 goto out; 894 goto out;
892 } 895 }
896
893 if (*ppos != 0) { 897 if (*ppos != 0) {
894 /* No partial writes. */ 898 /* No partial writes. */
899 length = -EINVAL;
895 goto out; 900 goto out;
896 } 901 }
897 page = (char*)get_zeroed_page(GFP_KERNEL); 902 page = (char*)get_zeroed_page(GFP_KERNEL);
@@ -900,6 +905,7 @@ static ssize_t sel_write_bool(struct file *filep, const char __user *buf,
900 goto out; 905 goto out;
901 } 906 }
902 907
908 length = -EFAULT;
903 if (copy_from_user(page, buf, count)) 909 if (copy_from_user(page, buf, count))
904 goto out; 910 goto out;
905 911
@@ -910,8 +916,7 @@ static ssize_t sel_write_bool(struct file *filep, const char __user *buf,
910 if (new_value) 916 if (new_value)
911 new_value = 1; 917 new_value = 1;
912 918
913 inode = filep->f_path.dentry->d_inode; 919 bool_pending_values[index] = new_value;
914 bool_pending_values[inode->i_ino&SEL_INO_MASK] = new_value;
915 length = count; 920 length = count;
916 921
917out: 922out:
@@ -931,7 +936,7 @@ static ssize_t sel_commit_bools_write(struct file *filep,
931 size_t count, loff_t *ppos) 936 size_t count, loff_t *ppos)
932{ 937{
933 char *page = NULL; 938 char *page = NULL;
934 ssize_t length = -EFAULT; 939 ssize_t length;
935 int new_value; 940 int new_value;
936 941
937 mutex_lock(&sel_mutex); 942 mutex_lock(&sel_mutex);
@@ -940,10 +945,6 @@ static ssize_t sel_commit_bools_write(struct file *filep,
940 if (length) 945 if (length)
941 goto out; 946 goto out;
942 947
943 /* check to see if this file has been deleted */
944 if (!filep->f_op)
945 goto out;
946
947 if (count >= PAGE_SIZE) { 948 if (count >= PAGE_SIZE) {
948 length = -ENOMEM; 949 length = -ENOMEM;
949 goto out; 950 goto out;
@@ -958,6 +959,7 @@ static ssize_t sel_commit_bools_write(struct file *filep,
958 goto out; 959 goto out;
959 } 960 }
960 961
962 length = -EFAULT;
961 if (copy_from_user(page, buf, count)) 963 if (copy_from_user(page, buf, count))
962 goto out; 964 goto out;
963 965
@@ -982,11 +984,9 @@ static const struct file_operations sel_commit_bools_ops = {
982 .write = sel_commit_bools_write, 984 .write = sel_commit_bools_write,
983}; 985};
984 986
985/* partial revoke() from fs/proc/generic.c proc_kill_inodes */
986static void sel_remove_entries(struct dentry *de) 987static void sel_remove_entries(struct dentry *de)
987{ 988{
988 struct list_head *p, *node; 989 struct list_head *node;
989 struct super_block *sb = de->d_sb;
990 990
991 spin_lock(&dcache_lock); 991 spin_lock(&dcache_lock);
992 node = de->d_subdirs.next; 992 node = de->d_subdirs.next;
@@ -1006,18 +1006,6 @@ static void sel_remove_entries(struct dentry *de)
1006 } 1006 }
1007 1007
1008 spin_unlock(&dcache_lock); 1008 spin_unlock(&dcache_lock);
1009
1010 file_list_lock();
1011 list_for_each(p, &sb->s_files) {
1012 struct file * filp = list_entry(p, struct file, f_u.fu_list);
1013 struct dentry * dentry = filp->f_path.dentry;
1014
1015 if (dentry->d_parent != de) {
1016 continue;
1017 }
1018 filp->f_op = NULL;
1019 }
1020 file_list_unlock();
1021} 1009}
1022 1010
1023#define BOOL_DIR_NAME "booleans" 1011#define BOOL_DIR_NAME "booleans"
@@ -1036,7 +1024,9 @@ static int sel_make_bools(void)
1036 u32 sid; 1024 u32 sid;
1037 1025
1038 /* remove any existing files */ 1026 /* remove any existing files */
1027 kfree(bool_pending_names);
1039 kfree(bool_pending_values); 1028 kfree(bool_pending_values);
1029 bool_pending_names = NULL;
1040 bool_pending_values = NULL; 1030 bool_pending_values = NULL;
1041 1031
1042 sel_remove_entries(dir); 1032 sel_remove_entries(dir);
@@ -1078,16 +1068,17 @@ static int sel_make_bools(void)
1078 d_add(dentry, inode); 1068 d_add(dentry, inode);
1079 } 1069 }
1080 bool_num = num; 1070 bool_num = num;
1071 bool_pending_names = names;
1081 bool_pending_values = values; 1072 bool_pending_values = values;
1082out: 1073out:
1083 free_page((unsigned long)page); 1074 free_page((unsigned long)page);
1075 return ret;
1076err:
1084 if (names) { 1077 if (names) {
1085 for (i = 0; i < num; i++) 1078 for (i = 0; i < num; i++)
1086 kfree(names[i]); 1079 kfree(names[i]);
1087 kfree(names); 1080 kfree(names);
1088 } 1081 }
1089 return ret;
1090err:
1091 kfree(values); 1082 kfree(values);
1092 sel_remove_entries(dir); 1083 sel_remove_entries(dir);
1093 ret = -ENOMEM; 1084 ret = -ENOMEM;