aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/filesystems/Locking3
-rw-r--r--arch/alpha/Kconfig3
-rw-r--r--arch/alpha/include/asm/io.h8
-rw-r--r--arch/alpha/kernel/Makefile4
-rw-r--r--arch/alpha/kernel/irq.c31
-rw-r--r--arch/alpha/kernel/irq_alpha.c24
-rw-r--r--arch/alpha/kernel/irq_i8259.c26
-rw-r--r--arch/alpha/kernel/irq_pyxis.c27
-rw-r--r--arch/alpha/kernel/irq_srm.c27
-rw-r--r--arch/alpha/kernel/osf_sys.c3
-rw-r--r--arch/alpha/kernel/sys_alcor.c27
-rw-r--r--arch/alpha/kernel/sys_cabriolet.c28
-rw-r--r--arch/alpha/kernel/sys_dp264.c50
-rw-r--r--arch/alpha/kernel/sys_eb64p.c27
-rw-r--r--arch/alpha/kernel/sys_eiger.c27
-rw-r--r--arch/alpha/kernel/sys_jensen.c55
-rw-r--r--arch/alpha/kernel/sys_marvel.c50
-rw-r--r--arch/alpha/kernel/sys_mikasa.c27
-rw-r--r--arch/alpha/kernel/sys_noritake.c27
-rw-r--r--arch/alpha/kernel/sys_rawhide.c27
-rw-r--r--arch/alpha/kernel/sys_rx164.c27
-rw-r--r--arch/alpha/kernel/sys_sable.c28
-rw-r--r--arch/alpha/kernel/sys_takara.c27
-rw-r--r--arch/alpha/kernel/sys_titan.c31
-rw-r--r--arch/alpha/kernel/sys_wildfire.c42
-rw-r--r--arch/alpha/lib/Makefile4
-rw-r--r--arch/alpha/math-emu/Makefile2
-rw-r--r--arch/alpha/mm/Makefile2
-rw-r--r--arch/alpha/oprofile/Makefile2
-rw-r--r--arch/arm/mach-omap1/board-ams-delta.c17
-rw-r--r--arch/arm/plat-nomadik/include/plat/ste_dma40.h8
-rw-r--r--arch/arm/plat-omap/include/plat/onenand.h1
-rw-r--r--arch/microblaze/kernel/prom.c4
-rw-r--r--arch/mips/kernel/prom.c6
-rw-r--r--arch/powerpc/kernel/prom.c4
-rw-r--r--drivers/dma/Kconfig9
-rw-r--r--drivers/dma/amba-pl08x.c1168
-rw-r--r--drivers/dma/at_hdmac.c19
-rw-r--r--drivers/dma/fsldma.c4
-rw-r--r--drivers/dma/intel_mid_dma.c33
-rw-r--r--drivers/dma/iop-adma.c4
-rw-r--r--drivers/dma/pch_dma.c19
-rw-r--r--drivers/dma/ste_dma40.c191
-rw-r--r--drivers/dma/ste_dma40_ll.c246
-rw-r--r--drivers/dma/ste_dma40_ll.h36
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h15
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c26
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mm.c182
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mm.h4
-rw-r--r--drivers/gpu/drm/nouveau/nv40_graph.c3
-rw-r--r--drivers/gpu/drm/nouveau/nv40_grctx.c21
-rw-r--r--drivers/gpu/drm/nouveau/nv40_mc.c14
-rw-r--r--drivers/gpu/drm/nouveau/nv50_instmem.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_graph.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_vm.c4
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c30
-rw-r--r--drivers/gpu/drm/radeon/r100.c11
-rw-r--r--drivers/gpu/drm/radeon/r300.c11
-rw-r--r--drivers/gpu/drm/radeon/r600.c23
-rw-r--r--drivers/gpu/drm/radeon/radeon.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c4
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/evergreen2
-rw-r--r--drivers/gpu/drm/radeon/rs600.c16
-rw-r--r--drivers/gpu/drm/radeon/rv770.c5
-rw-r--r--drivers/mtd/Kconfig19
-rw-r--r--drivers/mtd/Makefile2
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0001.c55
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c116
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0020.c1
-rw-r--r--drivers/mtd/chips/cfi_util.c2
-rw-r--r--drivers/mtd/chips/fwh_lock.h2
-rw-r--r--drivers/mtd/devices/m25p80.c39
-rw-r--r--drivers/mtd/devices/sst25l.c4
-rw-r--r--drivers/mtd/maps/amd76xrom.c7
-rw-r--r--drivers/mtd/maps/bcm963xx-flash.c5
-rw-r--r--drivers/mtd/maps/ck804xrom.c7
-rw-r--r--drivers/mtd/maps/esb2rom.c9
-rw-r--r--drivers/mtd/maps/ichxrom.c9
-rw-r--r--drivers/mtd/maps/physmap_of.c4
-rw-r--r--drivers/mtd/maps/scx200_docflash.c5
-rw-r--r--drivers/mtd/maps/tqm8xxl.c2
-rw-r--r--drivers/mtd/mtdchar.c10
-rw-r--r--drivers/mtd/mtdconcat.c1
-rw-r--r--drivers/mtd/mtdoops.c3
-rw-r--r--drivers/mtd/mtdpart.c30
-rw-r--r--drivers/mtd/nand/Kconfig1
-rw-r--r--drivers/mtd/nand/ams-delta.c80
-rw-r--r--drivers/mtd/nand/fsl_elbc_nand.c2
-rw-r--r--drivers/mtd/nand/fsmc_nand.c89
-rw-r--r--drivers/mtd/nand/jz4740_nand.c57
-rw-r--r--drivers/mtd/nand/mxc_nand.c2
-rw-r--r--drivers/mtd/nand/nand_base.c25
-rw-r--r--drivers/mtd/nand/nand_bbt.c3
-rw-r--r--drivers/mtd/nand/nandsim.c39
-rw-r--r--drivers/mtd/nand/pasemi_nand.c2
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c2
-rw-r--r--drivers/mtd/nand/txx9ndfmc.c5
-rw-r--r--drivers/mtd/onenand/omap2.c80
-rw-r--r--drivers/mtd/onenand/onenand_base.c81
-rw-r--r--drivers/mtd/onenand/onenand_bbt.c10
-rw-r--r--drivers/mtd/onenand/samsung.c7
-rw-r--r--drivers/mtd/ubi/build.c28
-rw-r--r--drivers/mtd/ubi/vtbl.c6
-rw-r--r--drivers/of/fdt.c8
-rw-r--r--drivers/spi/spi_sh_msiof.c2
-rw-r--r--drivers/staging/smbfs/dir.c4
-rw-r--r--fs/Kconfig17
-rw-r--r--fs/aio.c2
-rw-r--r--fs/btrfs/file.c113
-rw-r--r--fs/btrfs/inode.c111
-rw-r--r--fs/cifs/connect.c2
-rw-r--r--fs/cifs/netmisc.c4
-rw-r--r--fs/compat.c10
-rw-r--r--fs/ext4/ext4.h2
-rw-r--r--fs/ext4/extents.c11
-rw-r--r--fs/ext4/file.c2
-rw-r--r--fs/file_table.c2
-rw-r--r--fs/gfs2/file.c258
-rw-r--r--fs/gfs2/ops_inode.c258
-rw-r--r--fs/hpfs/inode.c2
-rw-r--r--fs/internal.h3
-rw-r--r--fs/ioctl.c10
-rw-r--r--fs/jffs2/build.c5
-rw-r--r--fs/jffs2/jffs2_fs_sb.h2
-rw-r--r--fs/jffs2/xattr.c12
-rw-r--r--fs/namei.c31
-rw-r--r--fs/namespace.c63
-rw-r--r--fs/ocfs2/file.c10
-rw-r--r--fs/open.c4
-rw-r--r--fs/xfs/linux-2.6/xfs_file.c56
-rw-r--r--fs/xfs/linux-2.6/xfs_iops.c57
-rw-r--r--fs/xfs/support/debug.c6
-rw-r--r--include/linux/amba/pl08x.h99
-rw-r--r--include/linux/dmaengine.h2
-rw-r--r--include/linux/file.h2
-rw-r--r--include/linux/fs.h8
-rw-r--r--include/linux/mtd/cfi.h3
-rw-r--r--include/linux/mtd/fsmc.h26
-rw-r--r--include/linux/mtd/mtd.h11
-rw-r--r--include/linux/mtd/nand.h2
-rw-r--r--include/linux/mtd/onenand.h14
-rw-r--r--include/linux/mtd/partitions.h2
-rw-r--r--include/linux/of_fdt.h2
144 files changed, 2403 insertions, 2507 deletions
diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking
index 651d5237c155..4471a416c274 100644
--- a/Documentation/filesystems/Locking
+++ b/Documentation/filesystems/Locking
@@ -60,7 +60,6 @@ ata *);
60 ssize_t (*listxattr) (struct dentry *, char *, size_t); 60 ssize_t (*listxattr) (struct dentry *, char *, size_t);
61 int (*removexattr) (struct dentry *, const char *); 61 int (*removexattr) (struct dentry *, const char *);
62 void (*truncate_range)(struct inode *, loff_t, loff_t); 62 void (*truncate_range)(struct inode *, loff_t, loff_t);
63 long (*fallocate)(struct inode *inode, int mode, loff_t offset, loff_t len);
64 int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start, u64 len); 63 int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start, u64 len);
65 64
66locking rules: 65locking rules:
@@ -88,7 +87,6 @@ getxattr: no
88listxattr: no 87listxattr: no
89removexattr: yes 88removexattr: yes
90truncate_range: yes 89truncate_range: yes
91fallocate: no
92fiemap: no 90fiemap: no
93 Additionally, ->rmdir(), ->unlink() and ->rename() have ->i_mutex on 91 Additionally, ->rmdir(), ->unlink() and ->rename() have ->i_mutex on
94victim. 92victim.
@@ -437,6 +435,7 @@ prototypes:
437 ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, 435 ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *,
438 size_t, unsigned int); 436 size_t, unsigned int);
439 int (*setlease)(struct file *, long, struct file_lock **); 437 int (*setlease)(struct file *, long, struct file_lock **);
438 long (*fallocate)(struct file *, int, loff_t, loff_t);
440}; 439};
441 440
442locking rules: 441locking rules:
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index 943fe6930f77..fc95ee1bcf6f 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -68,6 +68,9 @@ config GENERIC_IOMAP
68 bool 68 bool
69 default n 69 default n
70 70
71config GENERIC_HARDIRQS_NO__DO_IRQ
72 def_bool y
73
71config GENERIC_HARDIRQS 74config GENERIC_HARDIRQS
72 bool 75 bool
73 default y 76 default y
diff --git a/arch/alpha/include/asm/io.h b/arch/alpha/include/asm/io.h
index eda9b909aa05..56ff96501350 100644
--- a/arch/alpha/include/asm/io.h
+++ b/arch/alpha/include/asm/io.h
@@ -37,8 +37,9 @@
37 */ 37 */
38extern inline void __set_hae(unsigned long new_hae) 38extern inline void __set_hae(unsigned long new_hae)
39{ 39{
40 unsigned long flags; 40 unsigned long flags = swpipl(IPL_MAX);
41 local_irq_save(flags); 41
42 barrier();
42 43
43 alpha_mv.hae_cache = new_hae; 44 alpha_mv.hae_cache = new_hae;
44 *alpha_mv.hae_register = new_hae; 45 *alpha_mv.hae_register = new_hae;
@@ -46,7 +47,8 @@ extern inline void __set_hae(unsigned long new_hae)
46 /* Re-read to make sure it was written. */ 47 /* Re-read to make sure it was written. */
47 new_hae = *alpha_mv.hae_register; 48 new_hae = *alpha_mv.hae_register;
48 49
49 local_irq_restore(flags); 50 setipl(flags);
51 barrier();
50} 52}
51 53
52extern inline void set_hae(unsigned long new_hae) 54extern inline void set_hae(unsigned long new_hae)
diff --git a/arch/alpha/kernel/Makefile b/arch/alpha/kernel/Makefile
index 1ee9b5b629b8..9bb7b858ed23 100644
--- a/arch/alpha/kernel/Makefile
+++ b/arch/alpha/kernel/Makefile
@@ -3,8 +3,8 @@
3# 3#
4 4
5extra-y := head.o vmlinux.lds 5extra-y := head.o vmlinux.lds
6EXTRA_AFLAGS := $(KBUILD_CFLAGS) 6asflags-y := $(KBUILD_CFLAGS)
7EXTRA_CFLAGS := -Werror -Wno-sign-compare 7ccflags-y := -Werror -Wno-sign-compare
8 8
9obj-y := entry.o traps.o process.o init_task.o osf_sys.o irq.o \ 9obj-y := entry.o traps.o process.o init_task.o osf_sys.o irq.o \
10 irq_alpha.o signal.o setup.o ptrace.o time.o \ 10 irq_alpha.o signal.o setup.o ptrace.o time.o \
diff --git a/arch/alpha/kernel/irq.c b/arch/alpha/kernel/irq.c
index fe912984d9b1..9ab234f48dd8 100644
--- a/arch/alpha/kernel/irq.c
+++ b/arch/alpha/kernel/irq.c
@@ -44,10 +44,11 @@ static char irq_user_affinity[NR_IRQS];
44 44
45int irq_select_affinity(unsigned int irq) 45int irq_select_affinity(unsigned int irq)
46{ 46{
47 struct irq_desc *desc = irq_to_desc[irq];
47 static int last_cpu; 48 static int last_cpu;
48 int cpu = last_cpu + 1; 49 int cpu = last_cpu + 1;
49 50
50 if (!irq_desc[irq].chip->set_affinity || irq_user_affinity[irq]) 51 if (!desc || !get_irq_desc_chip(desc)->set_affinity || irq_user_affinity[irq])
51 return 1; 52 return 1;
52 53
53 while (!cpu_possible(cpu) || 54 while (!cpu_possible(cpu) ||
@@ -55,8 +56,8 @@ int irq_select_affinity(unsigned int irq)
55 cpu = (cpu < (NR_CPUS-1) ? cpu + 1 : 0); 56 cpu = (cpu < (NR_CPUS-1) ? cpu + 1 : 0);
56 last_cpu = cpu; 57 last_cpu = cpu;
57 58
58 cpumask_copy(irq_desc[irq].affinity, cpumask_of(cpu)); 59 cpumask_copy(desc->affinity, cpumask_of(cpu));
59 irq_desc[irq].chip->set_affinity(irq, cpumask_of(cpu)); 60 get_irq_desc_chip(desc)->set_affinity(irq, cpumask_of(cpu));
60 return 0; 61 return 0;
61} 62}
62#endif /* CONFIG_SMP */ 63#endif /* CONFIG_SMP */
@@ -67,6 +68,7 @@ show_interrupts(struct seq_file *p, void *v)
67 int j; 68 int j;
68 int irq = *(loff_t *) v; 69 int irq = *(loff_t *) v;
69 struct irqaction * action; 70 struct irqaction * action;
71 struct irq_desc *desc;
70 unsigned long flags; 72 unsigned long flags;
71 73
72#ifdef CONFIG_SMP 74#ifdef CONFIG_SMP
@@ -79,8 +81,13 @@ show_interrupts(struct seq_file *p, void *v)
79#endif 81#endif
80 82
81 if (irq < ACTUAL_NR_IRQS) { 83 if (irq < ACTUAL_NR_IRQS) {
82 raw_spin_lock_irqsave(&irq_desc[irq].lock, flags); 84 desc = irq_to_desc(irq);
83 action = irq_desc[irq].action; 85
86 if (!desc)
87 return 0;
88
89 raw_spin_lock_irqsave(&desc->lock, flags);
90 action = desc->action;
84 if (!action) 91 if (!action)
85 goto unlock; 92 goto unlock;
86 seq_printf(p, "%3d: ", irq); 93 seq_printf(p, "%3d: ", irq);
@@ -90,7 +97,7 @@ show_interrupts(struct seq_file *p, void *v)
90 for_each_online_cpu(j) 97 for_each_online_cpu(j)
91 seq_printf(p, "%10u ", kstat_irqs_cpu(irq, j)); 98 seq_printf(p, "%10u ", kstat_irqs_cpu(irq, j));
92#endif 99#endif
93 seq_printf(p, " %14s", irq_desc[irq].chip->name); 100 seq_printf(p, " %14s", get_irq_desc_chip(desc)->name);
94 seq_printf(p, " %c%s", 101 seq_printf(p, " %c%s",
95 (action->flags & IRQF_DISABLED)?'+':' ', 102 (action->flags & IRQF_DISABLED)?'+':' ',
96 action->name); 103 action->name);
@@ -103,7 +110,7 @@ show_interrupts(struct seq_file *p, void *v)
103 110
104 seq_putc(p, '\n'); 111 seq_putc(p, '\n');
105unlock: 112unlock:
106 raw_spin_unlock_irqrestore(&irq_desc[irq].lock, flags); 113 raw_spin_unlock_irqrestore(&desc->lock, flags);
107 } else if (irq == ACTUAL_NR_IRQS) { 114 } else if (irq == ACTUAL_NR_IRQS) {
108#ifdef CONFIG_SMP 115#ifdef CONFIG_SMP
109 seq_puts(p, "IPI: "); 116 seq_puts(p, "IPI: ");
@@ -142,8 +149,10 @@ handle_irq(int irq)
142 * handled by some other CPU. (or is disabled) 149 * handled by some other CPU. (or is disabled)
143 */ 150 */
144 static unsigned int illegal_count=0; 151 static unsigned int illegal_count=0;
152 struct irq_desc *desc = irq_to_desc(irq);
145 153
146 if ((unsigned) irq > ACTUAL_NR_IRQS && illegal_count < MAX_ILLEGAL_IRQS ) { 154 if (!desc || ((unsigned) irq > ACTUAL_NR_IRQS &&
155 illegal_count < MAX_ILLEGAL_IRQS)) {
147 irq_err_count++; 156 irq_err_count++;
148 illegal_count++; 157 illegal_count++;
149 printk(KERN_CRIT "device_interrupt: invalid interrupt %d\n", 158 printk(KERN_CRIT "device_interrupt: invalid interrupt %d\n",
@@ -151,14 +160,14 @@ handle_irq(int irq)
151 return; 160 return;
152 } 161 }
153 162
154 irq_enter();
155 /* 163 /*
156 * __do_IRQ() must be called with IPL_MAX. Note that we do not 164 * From here we must proceed with IPL_MAX. Note that we do not
157 * explicitly enable interrupts afterwards - some MILO PALcode 165 * explicitly enable interrupts afterwards - some MILO PALcode
158 * (namely LX164 one) seems to have severe problems with RTI 166 * (namely LX164 one) seems to have severe problems with RTI
159 * at IPL 0. 167 * at IPL 0.
160 */ 168 */
161 local_irq_disable(); 169 local_irq_disable();
162 __do_IRQ(irq); 170 irq_enter();
171 generic_handle_irq_desc(irq, desc);
163 irq_exit(); 172 irq_exit();
164} 173}
diff --git a/arch/alpha/kernel/irq_alpha.c b/arch/alpha/kernel/irq_alpha.c
index 4c8bb374eb0a..2d0679b60939 100644
--- a/arch/alpha/kernel/irq_alpha.c
+++ b/arch/alpha/kernel/irq_alpha.c
@@ -219,31 +219,23 @@ process_mcheck_info(unsigned long vector, unsigned long la_ptr,
219 * processed by PALcode, and comes in via entInt vector 1. 219 * processed by PALcode, and comes in via entInt vector 1.
220 */ 220 */
221 221
222static void rtc_enable_disable(unsigned int irq) { }
223static unsigned int rtc_startup(unsigned int irq) { return 0; }
224
225struct irqaction timer_irqaction = { 222struct irqaction timer_irqaction = {
226 .handler = timer_interrupt, 223 .handler = timer_interrupt,
227 .flags = IRQF_DISABLED, 224 .flags = IRQF_DISABLED,
228 .name = "timer", 225 .name = "timer",
229}; 226};
230 227
231static struct irq_chip rtc_irq_type = {
232 .name = "RTC",
233 .startup = rtc_startup,
234 .shutdown = rtc_enable_disable,
235 .enable = rtc_enable_disable,
236 .disable = rtc_enable_disable,
237 .ack = rtc_enable_disable,
238 .end = rtc_enable_disable,
239};
240
241void __init 228void __init
242init_rtc_irq(void) 229init_rtc_irq(void)
243{ 230{
244 irq_desc[RTC_IRQ].status = IRQ_DISABLED; 231 struct irq_desc *desc = irq_to_desc(RTC_IRQ);
245 irq_desc[RTC_IRQ].chip = &rtc_irq_type; 232
246 setup_irq(RTC_IRQ, &timer_irqaction); 233 if (desc) {
234 desc->status |= IRQ_DISABLED;
235 set_irq_chip_and_handler_name(RTC_IRQ, &no_irq_chip,
236 handle_simple_irq, "RTC");
237 setup_irq(RTC_IRQ, &timer_irqaction);
238 }
247} 239}
248 240
249/* Dummy irqactions. */ 241/* Dummy irqactions. */
diff --git a/arch/alpha/kernel/irq_i8259.c b/arch/alpha/kernel/irq_i8259.c
index 83a9ac280890..956ea0ed1694 100644
--- a/arch/alpha/kernel/irq_i8259.c
+++ b/arch/alpha/kernel/irq_i8259.c
@@ -69,28 +69,11 @@ i8259a_mask_and_ack_irq(unsigned int irq)
69 spin_unlock(&i8259_irq_lock); 69 spin_unlock(&i8259_irq_lock);
70} 70}
71 71
72unsigned int
73i8259a_startup_irq(unsigned int irq)
74{
75 i8259a_enable_irq(irq);
76 return 0; /* never anything pending */
77}
78
79void
80i8259a_end_irq(unsigned int irq)
81{
82 if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
83 i8259a_enable_irq(irq);
84}
85
86struct irq_chip i8259a_irq_type = { 72struct irq_chip i8259a_irq_type = {
87 .name = "XT-PIC", 73 .name = "XT-PIC",
88 .startup = i8259a_startup_irq, 74 .unmask = i8259a_enable_irq,
89 .shutdown = i8259a_disable_irq, 75 .mask = i8259a_disable_irq,
90 .enable = i8259a_enable_irq, 76 .mask_ack = i8259a_mask_and_ack_irq,
91 .disable = i8259a_disable_irq,
92 .ack = i8259a_mask_and_ack_irq,
93 .end = i8259a_end_irq,
94}; 77};
95 78
96void __init 79void __init
@@ -107,8 +90,7 @@ init_i8259a_irqs(void)
107 outb(0xff, 0xA1); /* mask all of 8259A-2 */ 90 outb(0xff, 0xA1); /* mask all of 8259A-2 */
108 91
109 for (i = 0; i < 16; i++) { 92 for (i = 0; i < 16; i++) {
110 irq_desc[i].status = IRQ_DISABLED; 93 set_irq_chip_and_handler(i, &i8259a_irq_type, handle_level_irq);
111 irq_desc[i].chip = &i8259a_irq_type;
112 } 94 }
113 95
114 setup_irq(2, &cascade); 96 setup_irq(2, &cascade);
diff --git a/arch/alpha/kernel/irq_pyxis.c b/arch/alpha/kernel/irq_pyxis.c
index 989ce46a0cf3..2863458c853e 100644
--- a/arch/alpha/kernel/irq_pyxis.c
+++ b/arch/alpha/kernel/irq_pyxis.c
@@ -40,20 +40,6 @@ pyxis_disable_irq(unsigned int irq)
40 pyxis_update_irq_hw(cached_irq_mask &= ~(1UL << (irq - 16))); 40 pyxis_update_irq_hw(cached_irq_mask &= ~(1UL << (irq - 16)));
41} 41}
42 42
43static unsigned int
44pyxis_startup_irq(unsigned int irq)
45{
46 pyxis_enable_irq(irq);
47 return 0;
48}
49
50static void
51pyxis_end_irq(unsigned int irq)
52{
53 if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
54 pyxis_enable_irq(irq);
55}
56
57static void 43static void
58pyxis_mask_and_ack_irq(unsigned int irq) 44pyxis_mask_and_ack_irq(unsigned int irq)
59{ 45{
@@ -72,12 +58,9 @@ pyxis_mask_and_ack_irq(unsigned int irq)
72 58
73static struct irq_chip pyxis_irq_type = { 59static struct irq_chip pyxis_irq_type = {
74 .name = "PYXIS", 60 .name = "PYXIS",
75 .startup = pyxis_startup_irq, 61 .mask_ack = pyxis_mask_and_ack_irq,
76 .shutdown = pyxis_disable_irq, 62 .mask = pyxis_disable_irq,
77 .enable = pyxis_enable_irq, 63 .unmask = pyxis_enable_irq,
78 .disable = pyxis_disable_irq,
79 .ack = pyxis_mask_and_ack_irq,
80 .end = pyxis_end_irq,
81}; 64};
82 65
83void 66void
@@ -119,8 +102,8 @@ init_pyxis_irqs(unsigned long ignore_mask)
119 for (i = 16; i < 48; ++i) { 102 for (i = 16; i < 48; ++i) {
120 if ((ignore_mask >> i) & 1) 103 if ((ignore_mask >> i) & 1)
121 continue; 104 continue;
122 irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL; 105 set_irq_chip_and_handler(i, &pyxis_irq_type, handle_level_irq);
123 irq_desc[i].chip = &pyxis_irq_type; 106 irq_to_desc(i)->status |= IRQ_LEVEL;
124 } 107 }
125 108
126 setup_irq(16+7, &isa_cascade_irqaction); 109 setup_irq(16+7, &isa_cascade_irqaction);
diff --git a/arch/alpha/kernel/irq_srm.c b/arch/alpha/kernel/irq_srm.c
index d63e93e1e8bf..0e57e828b413 100644
--- a/arch/alpha/kernel/irq_srm.c
+++ b/arch/alpha/kernel/irq_srm.c
@@ -33,29 +33,12 @@ srm_disable_irq(unsigned int irq)
33 spin_unlock(&srm_irq_lock); 33 spin_unlock(&srm_irq_lock);
34} 34}
35 35
36static unsigned int
37srm_startup_irq(unsigned int irq)
38{
39 srm_enable_irq(irq);
40 return 0;
41}
42
43static void
44srm_end_irq(unsigned int irq)
45{
46 if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
47 srm_enable_irq(irq);
48}
49
50/* Handle interrupts from the SRM, assuming no additional weirdness. */ 36/* Handle interrupts from the SRM, assuming no additional weirdness. */
51static struct irq_chip srm_irq_type = { 37static struct irq_chip srm_irq_type = {
52 .name = "SRM", 38 .name = "SRM",
53 .startup = srm_startup_irq, 39 .unmask = srm_enable_irq,
54 .shutdown = srm_disable_irq, 40 .mask = srm_disable_irq,
55 .enable = srm_enable_irq, 41 .mask_ack = srm_disable_irq,
56 .disable = srm_disable_irq,
57 .ack = srm_disable_irq,
58 .end = srm_end_irq,
59}; 42};
60 43
61void __init 44void __init
@@ -68,8 +51,8 @@ init_srm_irqs(long max, unsigned long ignore_mask)
68 for (i = 16; i < max; ++i) { 51 for (i = 16; i < max; ++i) {
69 if (i < 64 && ((ignore_mask >> i) & 1)) 52 if (i < 64 && ((ignore_mask >> i) & 1))
70 continue; 53 continue;
71 irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL; 54 set_irq_chip_and_handler(i, &srm_irq_type, handle_level_irq);
72 irq_desc[i].chip = &srm_irq_type; 55 irq_to_desc(i)->status |= IRQ_LEVEL;
73 } 56 }
74} 57}
75 58
diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
index 547e8b84b2f7..fe698b5045e9 100644
--- a/arch/alpha/kernel/osf_sys.c
+++ b/arch/alpha/kernel/osf_sys.c
@@ -951,9 +951,6 @@ SYSCALL_DEFINE2(osf_utimes, const char __user *, filename,
951 return do_utimes(AT_FDCWD, filename, tvs ? tv : NULL, 0); 951 return do_utimes(AT_FDCWD, filename, tvs ? tv : NULL, 0);
952} 952}
953 953
954#define MAX_SELECT_SECONDS \
955 ((unsigned long) (MAX_SCHEDULE_TIMEOUT / HZ)-1)
956
957SYSCALL_DEFINE5(osf_select, int, n, fd_set __user *, inp, fd_set __user *, outp, 954SYSCALL_DEFINE5(osf_select, int, n, fd_set __user *, inp, fd_set __user *, outp,
958 fd_set __user *, exp, struct timeval32 __user *, tvp) 955 fd_set __user *, exp, struct timeval32 __user *, tvp)
959{ 956{
diff --git a/arch/alpha/kernel/sys_alcor.c b/arch/alpha/kernel/sys_alcor.c
index 20a30b8b9655..7bef61768236 100644
--- a/arch/alpha/kernel/sys_alcor.c
+++ b/arch/alpha/kernel/sys_alcor.c
@@ -65,13 +65,6 @@ alcor_mask_and_ack_irq(unsigned int irq)
65 *(vuip)GRU_INT_CLEAR = 0; mb(); 65 *(vuip)GRU_INT_CLEAR = 0; mb();
66} 66}
67 67
68static unsigned int
69alcor_startup_irq(unsigned int irq)
70{
71 alcor_enable_irq(irq);
72 return 0;
73}
74
75static void 68static void
76alcor_isa_mask_and_ack_irq(unsigned int irq) 69alcor_isa_mask_and_ack_irq(unsigned int irq)
77{ 70{
@@ -82,21 +75,11 @@ alcor_isa_mask_and_ack_irq(unsigned int irq)
82 *(vuip)GRU_INT_CLEAR = 0; mb(); 75 *(vuip)GRU_INT_CLEAR = 0; mb();
83} 76}
84 77
85static void
86alcor_end_irq(unsigned int irq)
87{
88 if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
89 alcor_enable_irq(irq);
90}
91
92static struct irq_chip alcor_irq_type = { 78static struct irq_chip alcor_irq_type = {
93 .name = "ALCOR", 79 .name = "ALCOR",
94 .startup = alcor_startup_irq, 80 .unmask = alcor_enable_irq,
95 .shutdown = alcor_disable_irq, 81 .mask = alcor_disable_irq,
96 .enable = alcor_enable_irq, 82 .mask_ack = alcor_mask_and_ack_irq,
97 .disable = alcor_disable_irq,
98 .ack = alcor_mask_and_ack_irq,
99 .end = alcor_end_irq,
100}; 83};
101 84
102static void 85static void
@@ -142,8 +125,8 @@ alcor_init_irq(void)
142 on while IRQ probing. */ 125 on while IRQ probing. */
143 if (i >= 16+20 && i <= 16+30) 126 if (i >= 16+20 && i <= 16+30)
144 continue; 127 continue;
145 irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL; 128 set_irq_chip_and_handler(i, &alcor_irq_type, handle_level_irq);
146 irq_desc[i].chip = &alcor_irq_type; 129 irq_to_desc(i)->status |= IRQ_LEVEL;
147 } 130 }
148 i8259a_irq_type.ack = alcor_isa_mask_and_ack_irq; 131 i8259a_irq_type.ack = alcor_isa_mask_and_ack_irq;
149 132
diff --git a/arch/alpha/kernel/sys_cabriolet.c b/arch/alpha/kernel/sys_cabriolet.c
index 14c8898d19ec..b0c916493aea 100644
--- a/arch/alpha/kernel/sys_cabriolet.c
+++ b/arch/alpha/kernel/sys_cabriolet.c
@@ -57,28 +57,11 @@ cabriolet_disable_irq(unsigned int irq)
57 cabriolet_update_irq_hw(irq, cached_irq_mask |= 1UL << irq); 57 cabriolet_update_irq_hw(irq, cached_irq_mask |= 1UL << irq);
58} 58}
59 59
60static unsigned int
61cabriolet_startup_irq(unsigned int irq)
62{
63 cabriolet_enable_irq(irq);
64 return 0; /* never anything pending */
65}
66
67static void
68cabriolet_end_irq(unsigned int irq)
69{
70 if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
71 cabriolet_enable_irq(irq);
72}
73
74static struct irq_chip cabriolet_irq_type = { 60static struct irq_chip cabriolet_irq_type = {
75 .name = "CABRIOLET", 61 .name = "CABRIOLET",
76 .startup = cabriolet_startup_irq, 62 .unmask = cabriolet_enable_irq,
77 .shutdown = cabriolet_disable_irq, 63 .mask = cabriolet_disable_irq,
78 .enable = cabriolet_enable_irq, 64 .mask_ack = cabriolet_disable_irq,
79 .disable = cabriolet_disable_irq,
80 .ack = cabriolet_disable_irq,
81 .end = cabriolet_end_irq,
82}; 65};
83 66
84static void 67static void
@@ -122,8 +105,9 @@ common_init_irq(void (*srm_dev_int)(unsigned long v))
122 outb(0xff, 0x806); 105 outb(0xff, 0x806);
123 106
124 for (i = 16; i < 35; ++i) { 107 for (i = 16; i < 35; ++i) {
125 irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL; 108 set_irq_chip_and_handler(i, &cabriolet_irq_type,
126 irq_desc[i].chip = &cabriolet_irq_type; 109 handle_level_irq);
110 irq_to_desc(i)->status |= IRQ_LEVEL;
127 } 111 }
128 } 112 }
129 113
diff --git a/arch/alpha/kernel/sys_dp264.c b/arch/alpha/kernel/sys_dp264.c
index 4026502ab707..edad5f759ccd 100644
--- a/arch/alpha/kernel/sys_dp264.c
+++ b/arch/alpha/kernel/sys_dp264.c
@@ -115,20 +115,6 @@ dp264_disable_irq(unsigned int irq)
115 spin_unlock(&dp264_irq_lock); 115 spin_unlock(&dp264_irq_lock);
116} 116}
117 117
118static unsigned int
119dp264_startup_irq(unsigned int irq)
120{
121 dp264_enable_irq(irq);
122 return 0; /* never anything pending */
123}
124
125static void
126dp264_end_irq(unsigned int irq)
127{
128 if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
129 dp264_enable_irq(irq);
130}
131
132static void 118static void
133clipper_enable_irq(unsigned int irq) 119clipper_enable_irq(unsigned int irq)
134{ 120{
@@ -147,20 +133,6 @@ clipper_disable_irq(unsigned int irq)
147 spin_unlock(&dp264_irq_lock); 133 spin_unlock(&dp264_irq_lock);
148} 134}
149 135
150static unsigned int
151clipper_startup_irq(unsigned int irq)
152{
153 clipper_enable_irq(irq);
154 return 0; /* never anything pending */
155}
156
157static void
158clipper_end_irq(unsigned int irq)
159{
160 if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
161 clipper_enable_irq(irq);
162}
163
164static void 136static void
165cpu_set_irq_affinity(unsigned int irq, cpumask_t affinity) 137cpu_set_irq_affinity(unsigned int irq, cpumask_t affinity)
166{ 138{
@@ -200,23 +172,17 @@ clipper_set_affinity(unsigned int irq, const struct cpumask *affinity)
200 172
201static struct irq_chip dp264_irq_type = { 173static struct irq_chip dp264_irq_type = {
202 .name = "DP264", 174 .name = "DP264",
203 .startup = dp264_startup_irq, 175 .unmask = dp264_enable_irq,
204 .shutdown = dp264_disable_irq, 176 .mask = dp264_disable_irq,
205 .enable = dp264_enable_irq, 177 .mask_ack = dp264_disable_irq,
206 .disable = dp264_disable_irq,
207 .ack = dp264_disable_irq,
208 .end = dp264_end_irq,
209 .set_affinity = dp264_set_affinity, 178 .set_affinity = dp264_set_affinity,
210}; 179};
211 180
212static struct irq_chip clipper_irq_type = { 181static struct irq_chip clipper_irq_type = {
213 .name = "CLIPPER", 182 .name = "CLIPPER",
214 .startup = clipper_startup_irq, 183 .unmask = clipper_enable_irq,
215 .shutdown = clipper_disable_irq, 184 .mask = clipper_disable_irq,
216 .enable = clipper_enable_irq, 185 .mask_ack = clipper_disable_irq,
217 .disable = clipper_disable_irq,
218 .ack = clipper_disable_irq,
219 .end = clipper_end_irq,
220 .set_affinity = clipper_set_affinity, 186 .set_affinity = clipper_set_affinity,
221}; 187};
222 188
@@ -302,8 +268,8 @@ init_tsunami_irqs(struct irq_chip * ops, int imin, int imax)
302{ 268{
303 long i; 269 long i;
304 for (i = imin; i <= imax; ++i) { 270 for (i = imin; i <= imax; ++i) {
305 irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL; 271 irq_to_desc(i)->status |= IRQ_LEVEL;
306 irq_desc[i].chip = ops; 272 set_irq_chip_and_handler(i, ops, handle_level_irq);
307 } 273 }
308} 274}
309 275
diff --git a/arch/alpha/kernel/sys_eb64p.c b/arch/alpha/kernel/sys_eb64p.c
index df2090ce5e7f..ae5f29d127b0 100644
--- a/arch/alpha/kernel/sys_eb64p.c
+++ b/arch/alpha/kernel/sys_eb64p.c
@@ -55,28 +55,11 @@ eb64p_disable_irq(unsigned int irq)
55 eb64p_update_irq_hw(irq, cached_irq_mask |= 1 << irq); 55 eb64p_update_irq_hw(irq, cached_irq_mask |= 1 << irq);
56} 56}
57 57
58static unsigned int
59eb64p_startup_irq(unsigned int irq)
60{
61 eb64p_enable_irq(irq);
62 return 0; /* never anything pending */
63}
64
65static void
66eb64p_end_irq(unsigned int irq)
67{
68 if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
69 eb64p_enable_irq(irq);
70}
71
72static struct irq_chip eb64p_irq_type = { 58static struct irq_chip eb64p_irq_type = {
73 .name = "EB64P", 59 .name = "EB64P",
74 .startup = eb64p_startup_irq, 60 .unmask = eb64p_enable_irq,
75 .shutdown = eb64p_disable_irq, 61 .mask = eb64p_disable_irq,
76 .enable = eb64p_enable_irq, 62 .mask_ack = eb64p_disable_irq,
77 .disable = eb64p_disable_irq,
78 .ack = eb64p_disable_irq,
79 .end = eb64p_end_irq,
80}; 63};
81 64
82static void 65static void
@@ -135,8 +118,8 @@ eb64p_init_irq(void)
135 init_i8259a_irqs(); 118 init_i8259a_irqs();
136 119
137 for (i = 16; i < 32; ++i) { 120 for (i = 16; i < 32; ++i) {
138 irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL; 121 irq_to_desc(i)->status |= IRQ_LEVEL;
139 irq_desc[i].chip = &eb64p_irq_type; 122 set_irq_chip_and_handler(i, &eb64p_irq_type, handle_level_irq);
140 } 123 }
141 124
142 common_init_isa_dma(); 125 common_init_isa_dma();
diff --git a/arch/alpha/kernel/sys_eiger.c b/arch/alpha/kernel/sys_eiger.c
index 3ca1dbcf4044..1121bc5c6c6c 100644
--- a/arch/alpha/kernel/sys_eiger.c
+++ b/arch/alpha/kernel/sys_eiger.c
@@ -66,28 +66,11 @@ eiger_disable_irq(unsigned int irq)
66 eiger_update_irq_hw(irq, mask); 66 eiger_update_irq_hw(irq, mask);
67} 67}
68 68
69static unsigned int
70eiger_startup_irq(unsigned int irq)
71{
72 eiger_enable_irq(irq);
73 return 0; /* never anything pending */
74}
75
76static void
77eiger_end_irq(unsigned int irq)
78{
79 if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
80 eiger_enable_irq(irq);
81}
82
83static struct irq_chip eiger_irq_type = { 69static struct irq_chip eiger_irq_type = {
84 .name = "EIGER", 70 .name = "EIGER",
85 .startup = eiger_startup_irq, 71 .unmask = eiger_enable_irq,
86 .shutdown = eiger_disable_irq, 72 .mask = eiger_disable_irq,
87 .enable = eiger_enable_irq, 73 .mask_ack = eiger_disable_irq,
88 .disable = eiger_disable_irq,
89 .ack = eiger_disable_irq,
90 .end = eiger_end_irq,
91}; 74};
92 75
93static void 76static void
@@ -153,8 +136,8 @@ eiger_init_irq(void)
153 init_i8259a_irqs(); 136 init_i8259a_irqs();
154 137
155 for (i = 16; i < 128; ++i) { 138 for (i = 16; i < 128; ++i) {
156 irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL; 139 irq_to_desc(i)->status |= IRQ_LEVEL;
157 irq_desc[i].chip = &eiger_irq_type; 140 set_irq_chip_and_handler(i, &eiger_irq_type, handle_level_irq);
158 } 141 }
159} 142}
160 143
diff --git a/arch/alpha/kernel/sys_jensen.c b/arch/alpha/kernel/sys_jensen.c
index 7a7ae36fff91..34f55e03d331 100644
--- a/arch/alpha/kernel/sys_jensen.c
+++ b/arch/alpha/kernel/sys_jensen.c
@@ -62,30 +62,6 @@
62 * world. 62 * world.
63 */ 63 */
64 64
65static unsigned int
66jensen_local_startup(unsigned int irq)
67{
68 /* the parport is really hw IRQ 1, silly Jensen. */
69 if (irq == 7)
70 i8259a_startup_irq(1);
71 else
72 /*
73 * For all true local interrupts, set the flag that prevents
74 * the IPL from being dropped during handler processing.
75 */
76 if (irq_desc[irq].action)
77 irq_desc[irq].action->flags |= IRQF_DISABLED;
78 return 0;
79}
80
81static void
82jensen_local_shutdown(unsigned int irq)
83{
84 /* the parport is really hw IRQ 1, silly Jensen. */
85 if (irq == 7)
86 i8259a_disable_irq(1);
87}
88
89static void 65static void
90jensen_local_enable(unsigned int irq) 66jensen_local_enable(unsigned int irq)
91{ 67{
@@ -103,29 +79,18 @@ jensen_local_disable(unsigned int irq)
103} 79}
104 80
105static void 81static void
106jensen_local_ack(unsigned int irq) 82jensen_local_mask_ack(unsigned int irq)
107{ 83{
108 /* the parport is really hw IRQ 1, silly Jensen. */ 84 /* the parport is really hw IRQ 1, silly Jensen. */
109 if (irq == 7) 85 if (irq == 7)
110 i8259a_mask_and_ack_irq(1); 86 i8259a_mask_and_ack_irq(1);
111} 87}
112 88
113static void
114jensen_local_end(unsigned int irq)
115{
116 /* the parport is really hw IRQ 1, silly Jensen. */
117 if (irq == 7)
118 i8259a_end_irq(1);
119}
120
121static struct irq_chip jensen_local_irq_type = { 89static struct irq_chip jensen_local_irq_type = {
122 .name = "LOCAL", 90 .name = "LOCAL",
123 .startup = jensen_local_startup, 91 .unmask = jensen_local_enable,
124 .shutdown = jensen_local_shutdown, 92 .mask = jensen_local_disable,
125 .enable = jensen_local_enable, 93 .mask_ack = jensen_local_mask_ack,
126 .disable = jensen_local_disable,
127 .ack = jensen_local_ack,
128 .end = jensen_local_end,
129}; 94};
130 95
131static void 96static void
@@ -158,7 +123,7 @@ jensen_device_interrupt(unsigned long vector)
158 } 123 }
159 124
160 /* If there is no handler yet... */ 125 /* If there is no handler yet... */
161 if (irq_desc[irq].action == NULL) { 126 if (!irq_has_action(irq)) {
162 /* If it is a local interrupt that cannot be masked... */ 127 /* If it is a local interrupt that cannot be masked... */
163 if (vector >= 0x900) 128 if (vector >= 0x900)
164 { 129 {
@@ -206,11 +171,11 @@ jensen_init_irq(void)
206{ 171{
207 init_i8259a_irqs(); 172 init_i8259a_irqs();
208 173
209 irq_desc[1].chip = &jensen_local_irq_type; 174 set_irq_chip_and_handler(1, &jensen_local_irq_type, handle_level_irq);
210 irq_desc[4].chip = &jensen_local_irq_type; 175 set_irq_chip_and_handler(4, &jensen_local_irq_type, handle_level_irq);
211 irq_desc[3].chip = &jensen_local_irq_type; 176 set_irq_chip_and_handler(3, &jensen_local_irq_type, handle_level_irq);
212 irq_desc[7].chip = &jensen_local_irq_type; 177 set_irq_chip_and_handler(7, &jensen_local_irq_type, handle_level_irq);
213 irq_desc[9].chip = &jensen_local_irq_type; 178 set_irq_chip_and_handler(9, &jensen_local_irq_type, handle_level_irq);
214 179
215 common_init_isa_dma(); 180 common_init_isa_dma();
216} 181}
diff --git a/arch/alpha/kernel/sys_marvel.c b/arch/alpha/kernel/sys_marvel.c
index 0bb3b5c4f693..2bfc9f1b1ddc 100644
--- a/arch/alpha/kernel/sys_marvel.c
+++ b/arch/alpha/kernel/sys_marvel.c
@@ -143,20 +143,6 @@ io7_disable_irq(unsigned int irq)
143 spin_unlock(&io7->irq_lock); 143 spin_unlock(&io7->irq_lock);
144} 144}
145 145
146static unsigned int
147io7_startup_irq(unsigned int irq)
148{
149 io7_enable_irq(irq);
150 return 0; /* never anything pending */
151}
152
153static void
154io7_end_irq(unsigned int irq)
155{
156 if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
157 io7_enable_irq(irq);
158}
159
160static void 146static void
161marvel_irq_noop(unsigned int irq) 147marvel_irq_noop(unsigned int irq)
162{ 148{
@@ -171,32 +157,22 @@ marvel_irq_noop_return(unsigned int irq)
171 157
172static struct irq_chip marvel_legacy_irq_type = { 158static struct irq_chip marvel_legacy_irq_type = {
173 .name = "LEGACY", 159 .name = "LEGACY",
174 .startup = marvel_irq_noop_return, 160 .mask = marvel_irq_noop,
175 .shutdown = marvel_irq_noop, 161 .unmask = marvel_irq_noop,
176 .enable = marvel_irq_noop,
177 .disable = marvel_irq_noop,
178 .ack = marvel_irq_noop,
179 .end = marvel_irq_noop,
180}; 162};
181 163
182static struct irq_chip io7_lsi_irq_type = { 164static struct irq_chip io7_lsi_irq_type = {
183 .name = "LSI", 165 .name = "LSI",
184 .startup = io7_startup_irq, 166 .unmask = io7_enable_irq,
185 .shutdown = io7_disable_irq, 167 .mask = io7_disable_irq,
186 .enable = io7_enable_irq, 168 .mask_ack = io7_disable_irq,
187 .disable = io7_disable_irq,
188 .ack = io7_disable_irq,
189 .end = io7_end_irq,
190}; 169};
191 170
192static struct irq_chip io7_msi_irq_type = { 171static struct irq_chip io7_msi_irq_type = {
193 .name = "MSI", 172 .name = "MSI",
194 .startup = io7_startup_irq, 173 .unmask = io7_enable_irq,
195 .shutdown = io7_disable_irq, 174 .mask = io7_disable_irq,
196 .enable = io7_enable_irq,
197 .disable = io7_disable_irq,
198 .ack = marvel_irq_noop, 175 .ack = marvel_irq_noop,
199 .end = io7_end_irq,
200}; 176};
201 177
202static void 178static void
@@ -304,8 +280,8 @@ init_io7_irqs(struct io7 *io7,
304 280
305 /* Set up the lsi irqs. */ 281 /* Set up the lsi irqs. */
306 for (i = 0; i < 128; ++i) { 282 for (i = 0; i < 128; ++i) {
307 irq_desc[base + i].status = IRQ_DISABLED | IRQ_LEVEL; 283 irq_to_desc(base + i)->status |= IRQ_LEVEL;
308 irq_desc[base + i].chip = lsi_ops; 284 set_irq_chip_and_handler(base + i, lsi_ops, handle_level_irq);
309 } 285 }
310 286
311 /* Disable the implemented irqs in hardware. */ 287 /* Disable the implemented irqs in hardware. */
@@ -318,8 +294,8 @@ init_io7_irqs(struct io7 *io7,
318 294
319 /* Set up the msi irqs. */ 295 /* Set up the msi irqs. */
320 for (i = 128; i < (128 + 512); ++i) { 296 for (i = 128; i < (128 + 512); ++i) {
321 irq_desc[base + i].status = IRQ_DISABLED | IRQ_LEVEL; 297 irq_to_desc(base + i)->status |= IRQ_LEVEL;
322 irq_desc[base + i].chip = msi_ops; 298 set_irq_chip_and_handler(base + i, msi_ops, handle_level_irq);
323 } 299 }
324 300
325 for (i = 0; i < 16; ++i) 301 for (i = 0; i < 16; ++i)
@@ -336,8 +312,8 @@ marvel_init_irq(void)
336 312
337 /* Reserve the legacy irqs. */ 313 /* Reserve the legacy irqs. */
338 for (i = 0; i < 16; ++i) { 314 for (i = 0; i < 16; ++i) {
339 irq_desc[i].status = IRQ_DISABLED; 315 set_irq_chip_and_handler(i, &marvel_legacy_irq_type,
340 irq_desc[i].chip = &marvel_legacy_irq_type; 316 handle_level_irq);
341 } 317 }
342 318
343 /* Init the io7 irqs. */ 319 /* Init the io7 irqs. */
diff --git a/arch/alpha/kernel/sys_mikasa.c b/arch/alpha/kernel/sys_mikasa.c
index ee8865169811..bcc1639e8efb 100644
--- a/arch/alpha/kernel/sys_mikasa.c
+++ b/arch/alpha/kernel/sys_mikasa.c
@@ -54,28 +54,11 @@ mikasa_disable_irq(unsigned int irq)
54 mikasa_update_irq_hw(cached_irq_mask &= ~(1 << (irq - 16))); 54 mikasa_update_irq_hw(cached_irq_mask &= ~(1 << (irq - 16)));
55} 55}
56 56
57static unsigned int
58mikasa_startup_irq(unsigned int irq)
59{
60 mikasa_enable_irq(irq);
61 return 0;
62}
63
64static void
65mikasa_end_irq(unsigned int irq)
66{
67 if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
68 mikasa_enable_irq(irq);
69}
70
71static struct irq_chip mikasa_irq_type = { 57static struct irq_chip mikasa_irq_type = {
72 .name = "MIKASA", 58 .name = "MIKASA",
73 .startup = mikasa_startup_irq, 59 .unmask = mikasa_enable_irq,
74 .shutdown = mikasa_disable_irq, 60 .mask = mikasa_disable_irq,
75 .enable = mikasa_enable_irq, 61 .mask_ack = mikasa_disable_irq,
76 .disable = mikasa_disable_irq,
77 .ack = mikasa_disable_irq,
78 .end = mikasa_end_irq,
79}; 62};
80 63
81static void 64static void
@@ -115,8 +98,8 @@ mikasa_init_irq(void)
115 mikasa_update_irq_hw(0); 98 mikasa_update_irq_hw(0);
116 99
117 for (i = 16; i < 32; ++i) { 100 for (i = 16; i < 32; ++i) {
118 irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL; 101 irq_to_desc(i)->status |= IRQ_LEVEL;
119 irq_desc[i].chip = &mikasa_irq_type; 102 set_irq_chip_and_handler(i, &mikasa_irq_type, handle_level_irq);
120 } 103 }
121 104
122 init_i8259a_irqs(); 105 init_i8259a_irqs();
diff --git a/arch/alpha/kernel/sys_noritake.c b/arch/alpha/kernel/sys_noritake.c
index 86503fe73a88..e88f4ae1260e 100644
--- a/arch/alpha/kernel/sys_noritake.c
+++ b/arch/alpha/kernel/sys_noritake.c
@@ -59,28 +59,11 @@ noritake_disable_irq(unsigned int irq)
59 noritake_update_irq_hw(irq, cached_irq_mask &= ~(1 << (irq - 16))); 59 noritake_update_irq_hw(irq, cached_irq_mask &= ~(1 << (irq - 16)));
60} 60}
61 61
62static unsigned int
63noritake_startup_irq(unsigned int irq)
64{
65 noritake_enable_irq(irq);
66 return 0;
67}
68
69static void
70noritake_end_irq(unsigned int irq)
71{
72 if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
73 noritake_enable_irq(irq);
74}
75
76static struct irq_chip noritake_irq_type = { 62static struct irq_chip noritake_irq_type = {
77 .name = "NORITAKE", 63 .name = "NORITAKE",
78 .startup = noritake_startup_irq, 64 .unmask = noritake_enable_irq,
79 .shutdown = noritake_disable_irq, 65 .mask = noritake_disable_irq,
80 .enable = noritake_enable_irq, 66 .mask_ack = noritake_disable_irq,
81 .disable = noritake_disable_irq,
82 .ack = noritake_disable_irq,
83 .end = noritake_end_irq,
84}; 67};
85 68
86static void 69static void
@@ -144,8 +127,8 @@ noritake_init_irq(void)
144 outw(0, 0x54c); 127 outw(0, 0x54c);
145 128
146 for (i = 16; i < 48; ++i) { 129 for (i = 16; i < 48; ++i) {
147 irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL; 130 irq_to_desc(i)->status |= IRQ_LEVEL;
148 irq_desc[i].chip = &noritake_irq_type; 131 set_irq_chip_and_handler(i, &noritake_irq_type, handle_level_irq);
149 } 132 }
150 133
151 init_i8259a_irqs(); 134 init_i8259a_irqs();
diff --git a/arch/alpha/kernel/sys_rawhide.c b/arch/alpha/kernel/sys_rawhide.c
index 26c322bf89ee..6a51364dd1cc 100644
--- a/arch/alpha/kernel/sys_rawhide.c
+++ b/arch/alpha/kernel/sys_rawhide.c
@@ -121,28 +121,11 @@ rawhide_mask_and_ack_irq(unsigned int irq)
121 spin_unlock(&rawhide_irq_lock); 121 spin_unlock(&rawhide_irq_lock);
122} 122}
123 123
124static unsigned int
125rawhide_startup_irq(unsigned int irq)
126{
127 rawhide_enable_irq(irq);
128 return 0;
129}
130
131static void
132rawhide_end_irq(unsigned int irq)
133{
134 if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
135 rawhide_enable_irq(irq);
136}
137
138static struct irq_chip rawhide_irq_type = { 124static struct irq_chip rawhide_irq_type = {
139 .name = "RAWHIDE", 125 .name = "RAWHIDE",
140 .startup = rawhide_startup_irq, 126 .unmask = rawhide_enable_irq,
141 .shutdown = rawhide_disable_irq, 127 .mask = rawhide_disable_irq,
142 .enable = rawhide_enable_irq, 128 .mask_ack = rawhide_mask_and_ack_irq,
143 .disable = rawhide_disable_irq,
144 .ack = rawhide_mask_and_ack_irq,
145 .end = rawhide_end_irq,
146}; 129};
147 130
148static void 131static void
@@ -194,8 +177,8 @@ rawhide_init_irq(void)
194 } 177 }
195 178
196 for (i = 16; i < 128; ++i) { 179 for (i = 16; i < 128; ++i) {
197 irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL; 180 irq_to_desc(i)->status |= IRQ_LEVEL;
198 irq_desc[i].chip = &rawhide_irq_type; 181 set_irq_chip_and_handler(i, &rawhide_irq_type, handle_level_irq);
199 } 182 }
200 183
201 init_i8259a_irqs(); 184 init_i8259a_irqs();
diff --git a/arch/alpha/kernel/sys_rx164.c b/arch/alpha/kernel/sys_rx164.c
index be161129eab9..89e7e37ec84c 100644
--- a/arch/alpha/kernel/sys_rx164.c
+++ b/arch/alpha/kernel/sys_rx164.c
@@ -58,28 +58,11 @@ rx164_disable_irq(unsigned int irq)
58 rx164_update_irq_hw(cached_irq_mask &= ~(1UL << (irq - 16))); 58 rx164_update_irq_hw(cached_irq_mask &= ~(1UL << (irq - 16)));
59} 59}
60 60
61static unsigned int
62rx164_startup_irq(unsigned int irq)
63{
64 rx164_enable_irq(irq);
65 return 0;
66}
67
68static void
69rx164_end_irq(unsigned int irq)
70{
71 if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
72 rx164_enable_irq(irq);
73}
74
75static struct irq_chip rx164_irq_type = { 61static struct irq_chip rx164_irq_type = {
76 .name = "RX164", 62 .name = "RX164",
77 .startup = rx164_startup_irq, 63 .unmask = rx164_enable_irq,
78 .shutdown = rx164_disable_irq, 64 .mask = rx164_disable_irq,
79 .enable = rx164_enable_irq, 65 .mask_ack = rx164_disable_irq,
80 .disable = rx164_disable_irq,
81 .ack = rx164_disable_irq,
82 .end = rx164_end_irq,
83}; 66};
84 67
85static void 68static void
@@ -116,8 +99,8 @@ rx164_init_irq(void)
116 99
117 rx164_update_irq_hw(0); 100 rx164_update_irq_hw(0);
118 for (i = 16; i < 40; ++i) { 101 for (i = 16; i < 40; ++i) {
119 irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL; 102 irq_to_desc(i)->status |= IRQ_LEVEL;
120 irq_desc[i].chip = &rx164_irq_type; 103 set_irq_chip_and_handler(i, &rx164_irq_type, handle_level_irq);
121 } 104 }
122 105
123 init_i8259a_irqs(); 106 init_i8259a_irqs();
diff --git a/arch/alpha/kernel/sys_sable.c b/arch/alpha/kernel/sys_sable.c
index b2abe27a23cf..5c4423d1b06c 100644
--- a/arch/alpha/kernel/sys_sable.c
+++ b/arch/alpha/kernel/sys_sable.c
@@ -474,20 +474,6 @@ sable_lynx_disable_irq(unsigned int irq)
474#endif 474#endif
475} 475}
476 476
477static unsigned int
478sable_lynx_startup_irq(unsigned int irq)
479{
480 sable_lynx_enable_irq(irq);
481 return 0;
482}
483
484static void
485sable_lynx_end_irq(unsigned int irq)
486{
487 if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
488 sable_lynx_enable_irq(irq);
489}
490
491static void 477static void
492sable_lynx_mask_and_ack_irq(unsigned int irq) 478sable_lynx_mask_and_ack_irq(unsigned int irq)
493{ 479{
@@ -503,12 +489,9 @@ sable_lynx_mask_and_ack_irq(unsigned int irq)
503 489
504static struct irq_chip sable_lynx_irq_type = { 490static struct irq_chip sable_lynx_irq_type = {
505 .name = "SABLE/LYNX", 491 .name = "SABLE/LYNX",
506 .startup = sable_lynx_startup_irq, 492 .unmask = sable_lynx_enable_irq,
507 .shutdown = sable_lynx_disable_irq, 493 .mask = sable_lynx_disable_irq,
508 .enable = sable_lynx_enable_irq, 494 .mask_ack = sable_lynx_mask_and_ack_irq,
509 .disable = sable_lynx_disable_irq,
510 .ack = sable_lynx_mask_and_ack_irq,
511 .end = sable_lynx_end_irq,
512}; 495};
513 496
514static void 497static void
@@ -535,8 +518,9 @@ sable_lynx_init_irq(int nr_of_irqs)
535 long i; 518 long i;
536 519
537 for (i = 0; i < nr_of_irqs; ++i) { 520 for (i = 0; i < nr_of_irqs; ++i) {
538 irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL; 521 irq_to_desc(i)->status |= IRQ_LEVEL;
539 irq_desc[i].chip = &sable_lynx_irq_type; 522 set_irq_chip_and_handler(i, &sable_lynx_irq_type,
523 handle_level_irq);
540 } 524 }
541 525
542 common_init_isa_dma(); 526 common_init_isa_dma();
diff --git a/arch/alpha/kernel/sys_takara.c b/arch/alpha/kernel/sys_takara.c
index 4da596b6adbb..f8a1e8a862fb 100644
--- a/arch/alpha/kernel/sys_takara.c
+++ b/arch/alpha/kernel/sys_takara.c
@@ -60,28 +60,11 @@ takara_disable_irq(unsigned int irq)
60 takara_update_irq_hw(irq, mask); 60 takara_update_irq_hw(irq, mask);
61} 61}
62 62
63static unsigned int
64takara_startup_irq(unsigned int irq)
65{
66 takara_enable_irq(irq);
67 return 0; /* never anything pending */
68}
69
70static void
71takara_end_irq(unsigned int irq)
72{
73 if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
74 takara_enable_irq(irq);
75}
76
77static struct irq_chip takara_irq_type = { 63static struct irq_chip takara_irq_type = {
78 .name = "TAKARA", 64 .name = "TAKARA",
79 .startup = takara_startup_irq, 65 .unmask = takara_enable_irq,
80 .shutdown = takara_disable_irq, 66 .mask = takara_disable_irq,
81 .enable = takara_enable_irq, 67 .mask_ack = takara_disable_irq,
82 .disable = takara_disable_irq,
83 .ack = takara_disable_irq,
84 .end = takara_end_irq,
85}; 68};
86 69
87static void 70static void
@@ -153,8 +136,8 @@ takara_init_irq(void)
153 takara_update_irq_hw(i, -1); 136 takara_update_irq_hw(i, -1);
154 137
155 for (i = 16; i < 128; ++i) { 138 for (i = 16; i < 128; ++i) {
156 irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL; 139 irq_to_desc(i)->status |= IRQ_LEVEL;
157 irq_desc[i].chip = &takara_irq_type; 140 set_irq_chip_and_handler(i, &takara_irq_type, handle_level_irq);
158 } 141 }
159 142
160 common_init_isa_dma(); 143 common_init_isa_dma();
diff --git a/arch/alpha/kernel/sys_titan.c b/arch/alpha/kernel/sys_titan.c
index 9008d0f20c53..e02494bf5ef3 100644
--- a/arch/alpha/kernel/sys_titan.c
+++ b/arch/alpha/kernel/sys_titan.c
@@ -129,20 +129,6 @@ titan_disable_irq(unsigned int irq)
129 spin_unlock(&titan_irq_lock); 129 spin_unlock(&titan_irq_lock);
130} 130}
131 131
132static unsigned int
133titan_startup_irq(unsigned int irq)
134{
135 titan_enable_irq(irq);
136 return 0; /* never anything pending */
137}
138
139static void
140titan_end_irq(unsigned int irq)
141{
142 if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
143 titan_enable_irq(irq);
144}
145
146static void 132static void
147titan_cpu_set_irq_affinity(unsigned int irq, cpumask_t affinity) 133titan_cpu_set_irq_affinity(unsigned int irq, cpumask_t affinity)
148{ 134{
@@ -189,20 +175,17 @@ init_titan_irqs(struct irq_chip * ops, int imin, int imax)
189{ 175{
190 long i; 176 long i;
191 for (i = imin; i <= imax; ++i) { 177 for (i = imin; i <= imax; ++i) {
192 irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL; 178 irq_to_desc(i)->status |= IRQ_LEVEL;
193 irq_desc[i].chip = ops; 179 set_irq_chip_and_handler(i, ops, handle_level_irq);
194 } 180 }
195} 181}
196 182
197static struct irq_chip titan_irq_type = { 183static struct irq_chip titan_irq_type = {
198 .name = "TITAN", 184 .name = "TITAN",
199 .startup = titan_startup_irq, 185 .unmask = titan_enable_irq,
200 .shutdown = titan_disable_irq, 186 .mask = titan_disable_irq,
201 .enable = titan_enable_irq, 187 .mask_ack = titan_disable_irq,
202 .disable = titan_disable_irq, 188 .set_affinity = titan_set_irq_affinity,
203 .ack = titan_disable_irq,
204 .end = titan_end_irq,
205 .set_affinity = titan_set_irq_affinity,
206}; 189};
207 190
208static irqreturn_t 191static irqreturn_t
diff --git a/arch/alpha/kernel/sys_wildfire.c b/arch/alpha/kernel/sys_wildfire.c
index 62fd972e18ef..eec52594d410 100644
--- a/arch/alpha/kernel/sys_wildfire.c
+++ b/arch/alpha/kernel/sys_wildfire.c
@@ -139,32 +139,11 @@ wildfire_mask_and_ack_irq(unsigned int irq)
139 spin_unlock(&wildfire_irq_lock); 139 spin_unlock(&wildfire_irq_lock);
140} 140}
141 141
142static unsigned int
143wildfire_startup_irq(unsigned int irq)
144{
145 wildfire_enable_irq(irq);
146 return 0; /* never anything pending */
147}
148
149static void
150wildfire_end_irq(unsigned int irq)
151{
152#if 0
153 if (!irq_desc[irq].action)
154 printk("got irq %d\n", irq);
155#endif
156 if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
157 wildfire_enable_irq(irq);
158}
159
160static struct irq_chip wildfire_irq_type = { 142static struct irq_chip wildfire_irq_type = {
161 .name = "WILDFIRE", 143 .name = "WILDFIRE",
162 .startup = wildfire_startup_irq, 144 .unmask = wildfire_enable_irq,
163 .shutdown = wildfire_disable_irq, 145 .mask = wildfire_disable_irq,
164 .enable = wildfire_enable_irq, 146 .mask_ack = wildfire_mask_and_ack_irq,
165 .disable = wildfire_disable_irq,
166 .ack = wildfire_mask_and_ack_irq,
167 .end = wildfire_end_irq,
168}; 147};
169 148
170static void __init 149static void __init
@@ -198,15 +177,18 @@ wildfire_init_irq_per_pca(int qbbno, int pcano)
198 for (i = 0; i < 16; ++i) { 177 for (i = 0; i < 16; ++i) {
199 if (i == 2) 178 if (i == 2)
200 continue; 179 continue;
201 irq_desc[i+irq_bias].status = IRQ_DISABLED | IRQ_LEVEL; 180 irq_to_desc(i+irq_bias)->status |= IRQ_LEVEL;
202 irq_desc[i+irq_bias].chip = &wildfire_irq_type; 181 set_irq_chip_and_handler(i+irq_bias, &wildfire_irq_type,
182 handle_level_irq);
203 } 183 }
204 184
205 irq_desc[36+irq_bias].status = IRQ_DISABLED | IRQ_LEVEL; 185 irq_to_desc(36+irq_bias)->status |= IRQ_LEVEL;
206 irq_desc[36+irq_bias].chip = &wildfire_irq_type; 186 set_irq_chip_and_handler(36+irq_bias, &wildfire_irq_type,
187 handle_level_irq);
207 for (i = 40; i < 64; ++i) { 188 for (i = 40; i < 64; ++i) {
208 irq_desc[i+irq_bias].status = IRQ_DISABLED | IRQ_LEVEL; 189 irq_to_desc(i+irq_bias)->status |= IRQ_LEVEL;
209 irq_desc[i+irq_bias].chip = &wildfire_irq_type; 190 set_irq_chip_and_handler(i+irq_bias, &wildfire_irq_type,
191 handle_level_irq);
210 } 192 }
211 193
212 setup_irq(32+irq_bias, &isa_enable); 194 setup_irq(32+irq_bias, &isa_enable);
diff --git a/arch/alpha/lib/Makefile b/arch/alpha/lib/Makefile
index 9b72c59c95be..c0a83ab62b78 100644
--- a/arch/alpha/lib/Makefile
+++ b/arch/alpha/lib/Makefile
@@ -2,8 +2,8 @@
2# Makefile for alpha-specific library files.. 2# Makefile for alpha-specific library files..
3# 3#
4 4
5EXTRA_AFLAGS := $(KBUILD_CFLAGS) 5asflags-y := $(KBUILD_CFLAGS)
6EXTRA_CFLAGS := -Werror 6ccflags-y := -Werror
7 7
8# Many of these routines have implementations tuned for ev6. 8# Many of these routines have implementations tuned for ev6.
9# Choose them iff we're targeting ev6 specifically. 9# Choose them iff we're targeting ev6 specifically.
diff --git a/arch/alpha/math-emu/Makefile b/arch/alpha/math-emu/Makefile
index 359ef087e69e..7f4671995245 100644
--- a/arch/alpha/math-emu/Makefile
+++ b/arch/alpha/math-emu/Makefile
@@ -2,7 +2,7 @@
2# Makefile for the FPU instruction emulation. 2# Makefile for the FPU instruction emulation.
3# 3#
4 4
5EXTRA_CFLAGS := -w 5ccflags-y := -w
6 6
7obj-$(CONFIG_MATHEMU) += math-emu.o 7obj-$(CONFIG_MATHEMU) += math-emu.o
8 8
diff --git a/arch/alpha/mm/Makefile b/arch/alpha/mm/Makefile
index 09399c5386cb..c993d3f93cf6 100644
--- a/arch/alpha/mm/Makefile
+++ b/arch/alpha/mm/Makefile
@@ -2,7 +2,7 @@
2# Makefile for the linux alpha-specific parts of the memory manager. 2# Makefile for the linux alpha-specific parts of the memory manager.
3# 3#
4 4
5EXTRA_CFLAGS := -Werror 5ccflags-y := -Werror
6 6
7obj-y := init.o fault.o extable.o 7obj-y := init.o fault.o extable.o
8 8
diff --git a/arch/alpha/oprofile/Makefile b/arch/alpha/oprofile/Makefile
index 4aa56247bdc6..3473de751b03 100644
--- a/arch/alpha/oprofile/Makefile
+++ b/arch/alpha/oprofile/Makefile
@@ -1,4 +1,4 @@
1EXTRA_CFLAGS := -Werror -Wno-sign-compare 1ccflags-y := -Werror -Wno-sign-compare
2 2
3obj-$(CONFIG_OPROFILE) += oprofile.o 3obj-$(CONFIG_OPROFILE) += oprofile.o
4 4
diff --git a/arch/arm/mach-omap1/board-ams-delta.c b/arch/arm/mach-omap1/board-ams-delta.c
index bd0495a9ac3b..22cc8c8df6cb 100644
--- a/arch/arm/mach-omap1/board-ams-delta.c
+++ b/arch/arm/mach-omap1/board-ams-delta.c
@@ -179,6 +179,22 @@ static struct omap_board_config_kernel ams_delta_config[] = {
179 { OMAP_TAG_LCD, &ams_delta_lcd_config }, 179 { OMAP_TAG_LCD, &ams_delta_lcd_config },
180}; 180};
181 181
182static struct resource ams_delta_nand_resources[] = {
183 [0] = {
184 .start = OMAP1_MPUIO_BASE,
185 .end = OMAP1_MPUIO_BASE +
186 OMAP_MPUIO_IO_CNTL + sizeof(u32) - 1,
187 .flags = IORESOURCE_MEM,
188 },
189};
190
191static struct platform_device ams_delta_nand_device = {
192 .name = "ams-delta-nand",
193 .id = -1,
194 .num_resources = ARRAY_SIZE(ams_delta_nand_resources),
195 .resource = ams_delta_nand_resources,
196};
197
182static struct resource ams_delta_kp_resources[] = { 198static struct resource ams_delta_kp_resources[] = {
183 [0] = { 199 [0] = {
184 .start = INT_KEYBOARD, 200 .start = INT_KEYBOARD,
@@ -265,6 +281,7 @@ static struct omap1_cam_platform_data ams_delta_camera_platform_data = {
265}; 281};
266 282
267static struct platform_device *ams_delta_devices[] __initdata = { 283static struct platform_device *ams_delta_devices[] __initdata = {
284 &ams_delta_nand_device,
268 &ams_delta_kp_device, 285 &ams_delta_kp_device,
269 &ams_delta_lcd_device, 286 &ams_delta_lcd_device,
270 &ams_delta_led_device, 287 &ams_delta_led_device,
diff --git a/arch/arm/plat-nomadik/include/plat/ste_dma40.h b/arch/arm/plat-nomadik/include/plat/ste_dma40.h
index 74b62f10d07f..4d6dd4c39b75 100644
--- a/arch/arm/plat-nomadik/include/plat/ste_dma40.h
+++ b/arch/arm/plat-nomadik/include/plat/ste_dma40.h
@@ -13,6 +13,14 @@
13#include <linux/workqueue.h> 13#include <linux/workqueue.h>
14#include <linux/interrupt.h> 14#include <linux/interrupt.h>
15 15
16/*
17 * Maxium size for a single dma descriptor
18 * Size is limited to 16 bits.
19 * Size is in the units of addr-widths (1,2,4,8 bytes)
20 * Larger transfers will be split up to multiple linked desc
21 */
22#define STEDMA40_MAX_SEG_SIZE 0xFFFF
23
16/* dev types for memcpy */ 24/* dev types for memcpy */
17#define STEDMA40_DEV_DST_MEMORY (-1) 25#define STEDMA40_DEV_DST_MEMORY (-1)
18#define STEDMA40_DEV_SRC_MEMORY (-1) 26#define STEDMA40_DEV_SRC_MEMORY (-1)
diff --git a/arch/arm/plat-omap/include/plat/onenand.h b/arch/arm/plat-omap/include/plat/onenand.h
index 72f433d7d827..affe87e9ece7 100644
--- a/arch/arm/plat-omap/include/plat/onenand.h
+++ b/arch/arm/plat-omap/include/plat/onenand.h
@@ -23,6 +23,7 @@ struct omap_onenand_platform_data {
23 int (*onenand_setup)(void __iomem *, int freq); 23 int (*onenand_setup)(void __iomem *, int freq);
24 int dma_channel; 24 int dma_channel;
25 u8 flags; 25 u8 flags;
26 u8 regulator_can_sleep;
26}; 27};
27 28
28#define ONENAND_MAX_PARTITIONS 8 29#define ONENAND_MAX_PARTITIONS 8
diff --git a/arch/microblaze/kernel/prom.c b/arch/microblaze/kernel/prom.c
index c881393f07fd..bceaa5543e39 100644
--- a/arch/microblaze/kernel/prom.c
+++ b/arch/microblaze/kernel/prom.c
@@ -47,9 +47,9 @@ void __init early_init_dt_add_memory_arch(u64 base, u64 size)
47 memblock_add(base, size); 47 memblock_add(base, size);
48} 48}
49 49
50u64 __init early_init_dt_alloc_memory_arch(u64 size, u64 align) 50void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
51{ 51{
52 return memblock_alloc(size, align); 52 return __va(memblock_alloc(size, align));
53} 53}
54 54
55#ifdef CONFIG_EARLY_PRINTK 55#ifdef CONFIG_EARLY_PRINTK
diff --git a/arch/mips/kernel/prom.c b/arch/mips/kernel/prom.c
index 9dbe58368953..a19811e98a41 100644
--- a/arch/mips/kernel/prom.c
+++ b/arch/mips/kernel/prom.c
@@ -45,11 +45,9 @@ void __init free_mem_mach(unsigned long addr, unsigned long size)
45 return free_bootmem(addr, size); 45 return free_bootmem(addr, size);
46} 46}
47 47
48u64 __init early_init_dt_alloc_memory_arch(u64 size, u64 align) 48void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
49{ 49{
50 return virt_to_phys( 50 return __alloc_bootmem(size, align, __pa(MAX_DMA_ADDRESS));
51 __alloc_bootmem(size, align, __pa(MAX_DMA_ADDRESS))
52 );
53} 51}
54 52
55#ifdef CONFIG_BLK_DEV_INITRD 53#ifdef CONFIG_BLK_DEV_INITRD
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index 9e3132db718b..7185f0da7dc3 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -519,9 +519,9 @@ void __init early_init_dt_add_memory_arch(u64 base, u64 size)
519 memblock_add(base, size); 519 memblock_add(base, size);
520} 520}
521 521
522u64 __init early_init_dt_alloc_memory_arch(u64 size, u64 align) 522void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
523{ 523{
524 return memblock_alloc(size, align); 524 return __va(memblock_alloc(size, align));
525} 525}
526 526
527#ifdef CONFIG_BLK_DEV_INITRD 527#ifdef CONFIG_BLK_DEV_INITRD
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index ef138731c0ea..1c28816152fa 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -200,11 +200,16 @@ config PL330_DMA
200 platform_data for a dma-pl330 device. 200 platform_data for a dma-pl330 device.
201 201
202config PCH_DMA 202config PCH_DMA
203 tristate "Topcliff (Intel EG20T) PCH DMA support" 203 tristate "Intel EG20T PCH / OKI SEMICONDUCTOR ML7213 IOH DMA support"
204 depends on PCI && X86 204 depends on PCI && X86
205 select DMA_ENGINE 205 select DMA_ENGINE
206 help 206 help
207 Enable support for the Topcliff (Intel EG20T) PCH DMA engine. 207 Enable support for Intel EG20T PCH DMA engine.
208
209 This driver also can be used for OKI SEMICONDUCTOR ML7213 IOH(Input/
210 Output Hub) which is for IVI(In-Vehicle Infotainment) use.
211 ML7213 is companion chip for Intel Atom E6xx series.
212 ML7213 is completely compatible for Intel EG20T PCH.
208 213
209config IMX_SDMA 214config IMX_SDMA
210 tristate "i.MX SDMA support" 215 tristate "i.MX SDMA support"
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index b605cc9ac3a2..297f48b0cba9 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -19,14 +19,14 @@
19 * this program; if not, write to the Free Software Foundation, Inc., 59 19 * this program; if not, write to the Free Software Foundation, Inc., 59
20 * Temple Place - Suite 330, Boston, MA 02111-1307, USA. 20 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 * 21 *
22 * The full GNU General Public License is iin this distribution in the 22 * The full GNU General Public License is in this distribution in the file
23 * file called COPYING. 23 * called COPYING.
24 * 24 *
25 * Documentation: ARM DDI 0196G == PL080 25 * Documentation: ARM DDI 0196G == PL080
26 * Documentation: ARM DDI 0218E == PL081 26 * Documentation: ARM DDI 0218E == PL081
27 * 27 *
28 * PL080 & PL081 both have 16 sets of DMA signals that can be routed to 28 * PL080 & PL081 both have 16 sets of DMA signals that can be routed to any
29 * any channel. 29 * channel.
30 * 30 *
31 * The PL080 has 8 channels available for simultaneous use, and the PL081 31 * The PL080 has 8 channels available for simultaneous use, and the PL081
32 * has only two channels. So on these DMA controllers the number of channels 32 * has only two channels. So on these DMA controllers the number of channels
@@ -53,7 +53,23 @@
53 * 53 *
54 * ASSUMES default (little) endianness for DMA transfers 54 * ASSUMES default (little) endianness for DMA transfers
55 * 55 *
56 * Only DMAC flow control is implemented 56 * The PL08x has two flow control settings:
57 * - DMAC flow control: the transfer size defines the number of transfers
58 * which occur for the current LLI entry, and the DMAC raises TC at the
59 * end of every LLI entry. Observed behaviour shows the DMAC listening
60 * to both the BREQ and SREQ signals (contrary to documented),
61 * transferring data if either is active. The LBREQ and LSREQ signals
62 * are ignored.
63 *
64 * - Peripheral flow control: the transfer size is ignored (and should be
65 * zero). The data is transferred from the current LLI entry, until
66 * after the final transfer signalled by LBREQ or LSREQ. The DMAC
67 * will then move to the next LLI entry.
68 *
69 * Only the former works sanely with scatter lists, so we only implement
70 * the DMAC flow control method. However, peripherals which use the LBREQ
71 * and LSREQ signals (eg, MMCI) are unable to use this mode, which through
72 * these hardware restrictions prevents them from using scatter DMA.
57 * 73 *
58 * Global TODO: 74 * Global TODO:
59 * - Break out common code from arch/arm/mach-s3c64xx and share 75 * - Break out common code from arch/arm/mach-s3c64xx and share
@@ -61,50 +77,39 @@
61#include <linux/device.h> 77#include <linux/device.h>
62#include <linux/init.h> 78#include <linux/init.h>
63#include <linux/module.h> 79#include <linux/module.h>
64#include <linux/pci.h>
65#include <linux/interrupt.h> 80#include <linux/interrupt.h>
66#include <linux/slab.h> 81#include <linux/slab.h>
67#include <linux/dmapool.h> 82#include <linux/dmapool.h>
68#include <linux/amba/bus.h>
69#include <linux/dmaengine.h> 83#include <linux/dmaengine.h>
84#include <linux/amba/bus.h>
70#include <linux/amba/pl08x.h> 85#include <linux/amba/pl08x.h>
71#include <linux/debugfs.h> 86#include <linux/debugfs.h>
72#include <linux/seq_file.h> 87#include <linux/seq_file.h>
73 88
74#include <asm/hardware/pl080.h> 89#include <asm/hardware/pl080.h>
75#include <asm/dma.h>
76#include <asm/mach/dma.h>
77#include <asm/atomic.h>
78#include <asm/processor.h>
79#include <asm/cacheflush.h>
80 90
81#define DRIVER_NAME "pl08xdmac" 91#define DRIVER_NAME "pl08xdmac"
82 92
83/** 93/**
84 * struct vendor_data - vendor-specific config parameters 94 * struct vendor_data - vendor-specific config parameters for PL08x derivatives
85 * for PL08x derivates
86 * @name: the name of this specific variant
87 * @channels: the number of channels available in this variant 95 * @channels: the number of channels available in this variant
88 * @dualmaster: whether this version supports dual AHB masters 96 * @dualmaster: whether this version supports dual AHB masters or not.
89 * or not.
90 */ 97 */
91struct vendor_data { 98struct vendor_data {
92 char *name;
93 u8 channels; 99 u8 channels;
94 bool dualmaster; 100 bool dualmaster;
95}; 101};
96 102
97/* 103/*
98 * PL08X private data structures 104 * PL08X private data structures
99 * An LLI struct - see pl08x TRM 105 * An LLI struct - see PL08x TRM. Note that next uses bit[0] as a bus bit,
100 * Note that next uses bit[0] as a bus bit, 106 * start & end do not - their bus bit info is in cctl. Also note that these
101 * start & end do not - their bus bit info 107 * are fixed 32-bit quantities.
102 * is in cctl
103 */ 108 */
104struct lli { 109struct pl08x_lli {
105 dma_addr_t src; 110 u32 src;
106 dma_addr_t dst; 111 u32 dst;
107 dma_addr_t next; 112 u32 lli;
108 u32 cctl; 113 u32 cctl;
109}; 114};
110 115
@@ -119,6 +124,8 @@ struct lli {
119 * @phy_chans: array of data for the physical channels 124 * @phy_chans: array of data for the physical channels
120 * @pool: a pool for the LLI descriptors 125 * @pool: a pool for the LLI descriptors
121 * @pool_ctr: counter of LLIs in the pool 126 * @pool_ctr: counter of LLIs in the pool
127 * @lli_buses: bitmask to or in to LLI pointer selecting AHB port for LLI fetches
128 * @mem_buses: set to indicate memory transfers on AHB2.
122 * @lock: a spinlock for this struct 129 * @lock: a spinlock for this struct
123 */ 130 */
124struct pl08x_driver_data { 131struct pl08x_driver_data {
@@ -126,11 +133,13 @@ struct pl08x_driver_data {
126 struct dma_device memcpy; 133 struct dma_device memcpy;
127 void __iomem *base; 134 void __iomem *base;
128 struct amba_device *adev; 135 struct amba_device *adev;
129 struct vendor_data *vd; 136 const struct vendor_data *vd;
130 struct pl08x_platform_data *pd; 137 struct pl08x_platform_data *pd;
131 struct pl08x_phy_chan *phy_chans; 138 struct pl08x_phy_chan *phy_chans;
132 struct dma_pool *pool; 139 struct dma_pool *pool;
133 int pool_ctr; 140 int pool_ctr;
141 u8 lli_buses;
142 u8 mem_buses;
134 spinlock_t lock; 143 spinlock_t lock;
135}; 144};
136 145
@@ -152,9 +161,9 @@ struct pl08x_driver_data {
152/* Size (bytes) of each LLI buffer allocated for one transfer */ 161/* Size (bytes) of each LLI buffer allocated for one transfer */
153# define PL08X_LLI_TSFR_SIZE 0x2000 162# define PL08X_LLI_TSFR_SIZE 0x2000
154 163
155/* Maximimum times we call dma_pool_alloc on this pool without freeing */ 164/* Maximum times we call dma_pool_alloc on this pool without freeing */
156#define PL08X_MAX_ALLOCS 0x40 165#define PL08X_MAX_ALLOCS 0x40
157#define MAX_NUM_TSFR_LLIS (PL08X_LLI_TSFR_SIZE/sizeof(struct lli)) 166#define MAX_NUM_TSFR_LLIS (PL08X_LLI_TSFR_SIZE/sizeof(struct pl08x_lli))
158#define PL08X_ALIGN 8 167#define PL08X_ALIGN 8
159 168
160static inline struct pl08x_dma_chan *to_pl08x_chan(struct dma_chan *chan) 169static inline struct pl08x_dma_chan *to_pl08x_chan(struct dma_chan *chan)
@@ -162,6 +171,11 @@ static inline struct pl08x_dma_chan *to_pl08x_chan(struct dma_chan *chan)
162 return container_of(chan, struct pl08x_dma_chan, chan); 171 return container_of(chan, struct pl08x_dma_chan, chan);
163} 172}
164 173
174static inline struct pl08x_txd *to_pl08x_txd(struct dma_async_tx_descriptor *tx)
175{
176 return container_of(tx, struct pl08x_txd, tx);
177}
178
165/* 179/*
166 * Physical channel handling 180 * Physical channel handling
167 */ 181 */
@@ -177,88 +191,47 @@ static int pl08x_phy_channel_busy(struct pl08x_phy_chan *ch)
177 191
178/* 192/*
179 * Set the initial DMA register values i.e. those for the first LLI 193 * Set the initial DMA register values i.e. those for the first LLI
180 * The next lli pointer and the configuration interrupt bit have 194 * The next LLI pointer and the configuration interrupt bit have
181 * been set when the LLIs were constructed 195 * been set when the LLIs were constructed. Poke them into the hardware
196 * and start the transfer.
182 */ 197 */
183static void pl08x_set_cregs(struct pl08x_driver_data *pl08x, 198static void pl08x_start_txd(struct pl08x_dma_chan *plchan,
184 struct pl08x_phy_chan *ch) 199 struct pl08x_txd *txd)
185{
186 /* Wait for channel inactive */
187 while (pl08x_phy_channel_busy(ch))
188 ;
189
190 dev_vdbg(&pl08x->adev->dev,
191 "WRITE channel %d: csrc=%08x, cdst=%08x, "
192 "cctl=%08x, clli=%08x, ccfg=%08x\n",
193 ch->id,
194 ch->csrc,
195 ch->cdst,
196 ch->cctl,
197 ch->clli,
198 ch->ccfg);
199
200 writel(ch->csrc, ch->base + PL080_CH_SRC_ADDR);
201 writel(ch->cdst, ch->base + PL080_CH_DST_ADDR);
202 writel(ch->clli, ch->base + PL080_CH_LLI);
203 writel(ch->cctl, ch->base + PL080_CH_CONTROL);
204 writel(ch->ccfg, ch->base + PL080_CH_CONFIG);
205}
206
207static inline void pl08x_config_phychan_for_txd(struct pl08x_dma_chan *plchan)
208{ 200{
209 struct pl08x_channel_data *cd = plchan->cd; 201 struct pl08x_driver_data *pl08x = plchan->host;
210 struct pl08x_phy_chan *phychan = plchan->phychan; 202 struct pl08x_phy_chan *phychan = plchan->phychan;
211 struct pl08x_txd *txd = plchan->at; 203 struct pl08x_lli *lli = &txd->llis_va[0];
212
213 /* Copy the basic control register calculated at transfer config */
214 phychan->csrc = txd->csrc;
215 phychan->cdst = txd->cdst;
216 phychan->clli = txd->clli;
217 phychan->cctl = txd->cctl;
218
219 /* Assign the signal to the proper control registers */
220 phychan->ccfg = cd->ccfg;
221 phychan->ccfg &= ~PL080_CONFIG_SRC_SEL_MASK;
222 phychan->ccfg &= ~PL080_CONFIG_DST_SEL_MASK;
223 /* If it wasn't set from AMBA, ignore it */
224 if (txd->direction == DMA_TO_DEVICE)
225 /* Select signal as destination */
226 phychan->ccfg |=
227 (phychan->signal << PL080_CONFIG_DST_SEL_SHIFT);
228 else if (txd->direction == DMA_FROM_DEVICE)
229 /* Select signal as source */
230 phychan->ccfg |=
231 (phychan->signal << PL080_CONFIG_SRC_SEL_SHIFT);
232 /* Always enable error interrupts */
233 phychan->ccfg |= PL080_CONFIG_ERR_IRQ_MASK;
234 /* Always enable terminal interrupts */
235 phychan->ccfg |= PL080_CONFIG_TC_IRQ_MASK;
236}
237
238/*
239 * Enable the DMA channel
240 * Assumes all other configuration bits have been set
241 * as desired before this code is called
242 */
243static void pl08x_enable_phy_chan(struct pl08x_driver_data *pl08x,
244 struct pl08x_phy_chan *ch)
245{
246 u32 val; 204 u32 val;
247 205
248 /* 206 plchan->at = txd;
249 * Do not access config register until channel shows as disabled
250 */
251 while (readl(pl08x->base + PL080_EN_CHAN) & (1 << ch->id))
252 ;
253 207
254 /* 208 /* Wait for channel inactive */
255 * Do not access config register until channel shows as inactive 209 while (pl08x_phy_channel_busy(phychan))
256 */ 210 cpu_relax();
257 val = readl(ch->base + PL080_CH_CONFIG); 211
212 dev_vdbg(&pl08x->adev->dev,
213 "WRITE channel %d: csrc=0x%08x, cdst=0x%08x, "
214 "clli=0x%08x, cctl=0x%08x, ccfg=0x%08x\n",
215 phychan->id, lli->src, lli->dst, lli->lli, lli->cctl,
216 txd->ccfg);
217
218 writel(lli->src, phychan->base + PL080_CH_SRC_ADDR);
219 writel(lli->dst, phychan->base + PL080_CH_DST_ADDR);
220 writel(lli->lli, phychan->base + PL080_CH_LLI);
221 writel(lli->cctl, phychan->base + PL080_CH_CONTROL);
222 writel(txd->ccfg, phychan->base + PL080_CH_CONFIG);
223
224 /* Enable the DMA channel */
225 /* Do not access config register until channel shows as disabled */
226 while (readl(pl08x->base + PL080_EN_CHAN) & (1 << phychan->id))
227 cpu_relax();
228
229 /* Do not access config register until channel shows as inactive */
230 val = readl(phychan->base + PL080_CH_CONFIG);
258 while ((val & PL080_CONFIG_ACTIVE) || (val & PL080_CONFIG_ENABLE)) 231 while ((val & PL080_CONFIG_ACTIVE) || (val & PL080_CONFIG_ENABLE))
259 val = readl(ch->base + PL080_CH_CONFIG); 232 val = readl(phychan->base + PL080_CH_CONFIG);
260 233
261 writel(val | PL080_CONFIG_ENABLE, ch->base + PL080_CH_CONFIG); 234 writel(val | PL080_CONFIG_ENABLE, phychan->base + PL080_CH_CONFIG);
262} 235}
263 236
264/* 237/*
@@ -266,10 +239,8 @@ static void pl08x_enable_phy_chan(struct pl08x_driver_data *pl08x,
266 * 239 *
267 * Disabling individual channels could lose data. 240 * Disabling individual channels could lose data.
268 * 241 *
269 * Disable the peripheral DMA after disabling the DMAC 242 * Disable the peripheral DMA after disabling the DMAC in order to allow
270 * in order to allow the DMAC FIFO to drain, and 243 * the DMAC FIFO to drain, and hence allow the channel to show inactive
271 * hence allow the channel to show inactive
272 *
273 */ 244 */
274static void pl08x_pause_phy_chan(struct pl08x_phy_chan *ch) 245static void pl08x_pause_phy_chan(struct pl08x_phy_chan *ch)
275{ 246{
@@ -282,7 +253,7 @@ static void pl08x_pause_phy_chan(struct pl08x_phy_chan *ch)
282 253
283 /* Wait for channel inactive */ 254 /* Wait for channel inactive */
284 while (pl08x_phy_channel_busy(ch)) 255 while (pl08x_phy_channel_busy(ch))
285 ; 256 cpu_relax();
286} 257}
287 258
288static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch) 259static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch)
@@ -333,54 +304,56 @@ static inline u32 get_bytes_in_cctl(u32 cctl)
333static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan) 304static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan)
334{ 305{
335 struct pl08x_phy_chan *ch; 306 struct pl08x_phy_chan *ch;
336 struct pl08x_txd *txdi = NULL;
337 struct pl08x_txd *txd; 307 struct pl08x_txd *txd;
338 unsigned long flags; 308 unsigned long flags;
339 u32 bytes = 0; 309 size_t bytes = 0;
340 310
341 spin_lock_irqsave(&plchan->lock, flags); 311 spin_lock_irqsave(&plchan->lock, flags);
342
343 ch = plchan->phychan; 312 ch = plchan->phychan;
344 txd = plchan->at; 313 txd = plchan->at;
345 314
346 /* 315 /*
347 * Next follow the LLIs to get the number of pending bytes in the 316 * Follow the LLIs to get the number of remaining
348 * currently active transaction. 317 * bytes in the currently active transaction.
349 */ 318 */
350 if (ch && txd) { 319 if (ch && txd) {
351 struct lli *llis_va = txd->llis_va; 320 u32 clli = readl(ch->base + PL080_CH_LLI) & ~PL080_LLI_LM_AHB2;
352 struct lli *llis_bus = (struct lli *) txd->llis_bus;
353 u32 clli = readl(ch->base + PL080_CH_LLI);
354 321
355 /* First get the bytes in the current active LLI */ 322 /* First get the remaining bytes in the active transfer */
356 bytes = get_bytes_in_cctl(readl(ch->base + PL080_CH_CONTROL)); 323 bytes = get_bytes_in_cctl(readl(ch->base + PL080_CH_CONTROL));
357 324
358 if (clli) { 325 if (clli) {
359 int i = 0; 326 struct pl08x_lli *llis_va = txd->llis_va;
327 dma_addr_t llis_bus = txd->llis_bus;
328 int index;
329
330 BUG_ON(clli < llis_bus || clli >= llis_bus +
331 sizeof(struct pl08x_lli) * MAX_NUM_TSFR_LLIS);
332
333 /*
334 * Locate the next LLI - as this is an array,
335 * it's simple maths to find.
336 */
337 index = (clli - llis_bus) / sizeof(struct pl08x_lli);
360 338
361 /* Forward to the LLI pointed to by clli */ 339 for (; index < MAX_NUM_TSFR_LLIS; index++) {
362 while ((clli != (u32) &(llis_bus[i])) && 340 bytes += get_bytes_in_cctl(llis_va[index].cctl);
363 (i < MAX_NUM_TSFR_LLIS))
364 i++;
365 341
366 while (clli) {
367 bytes += get_bytes_in_cctl(llis_va[i].cctl);
368 /* 342 /*
369 * A clli of 0x00000000 will terminate the 343 * A LLI pointer of 0 terminates the LLI list
370 * LLI list
371 */ 344 */
372 clli = llis_va[i].next; 345 if (!llis_va[index].lli)
373 i++; 346 break;
374 } 347 }
375 } 348 }
376 } 349 }
377 350
378 /* Sum up all queued transactions */ 351 /* Sum up all queued transactions */
379 if (!list_empty(&plchan->desc_list)) { 352 if (!list_empty(&plchan->pend_list)) {
380 list_for_each_entry(txdi, &plchan->desc_list, node) { 353 struct pl08x_txd *txdi;
354 list_for_each_entry(txdi, &plchan->pend_list, node) {
381 bytes += txdi->len; 355 bytes += txdi->len;
382 } 356 }
383
384 } 357 }
385 358
386 spin_unlock_irqrestore(&plchan->lock, flags); 359 spin_unlock_irqrestore(&plchan->lock, flags);
@@ -390,6 +363,10 @@ static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan)
390 363
391/* 364/*
392 * Allocate a physical channel for a virtual channel 365 * Allocate a physical channel for a virtual channel
366 *
367 * Try to locate a physical channel to be used for this transfer. If all
368 * are taken return NULL and the requester will have to cope by using
369 * some fallback PIO mode or retrying later.
393 */ 370 */
394static struct pl08x_phy_chan * 371static struct pl08x_phy_chan *
395pl08x_get_phy_channel(struct pl08x_driver_data *pl08x, 372pl08x_get_phy_channel(struct pl08x_driver_data *pl08x,
@@ -399,12 +376,6 @@ pl08x_get_phy_channel(struct pl08x_driver_data *pl08x,
399 unsigned long flags; 376 unsigned long flags;
400 int i; 377 int i;
401 378
402 /*
403 * Try to locate a physical channel to be used for
404 * this transfer. If all are taken return NULL and
405 * the requester will have to cope by using some fallback
406 * PIO mode or retrying later.
407 */
408 for (i = 0; i < pl08x->vd->channels; i++) { 379 for (i = 0; i < pl08x->vd->channels; i++) {
409 ch = &pl08x->phy_chans[i]; 380 ch = &pl08x->phy_chans[i];
410 381
@@ -465,11 +436,11 @@ static inline unsigned int pl08x_get_bytes_for_cctl(unsigned int coded)
465} 436}
466 437
467static inline u32 pl08x_cctl_bits(u32 cctl, u8 srcwidth, u8 dstwidth, 438static inline u32 pl08x_cctl_bits(u32 cctl, u8 srcwidth, u8 dstwidth,
468 u32 tsize) 439 size_t tsize)
469{ 440{
470 u32 retbits = cctl; 441 u32 retbits = cctl;
471 442
472 /* Remove all src, dst and transfersize bits */ 443 /* Remove all src, dst and transfer size bits */
473 retbits &= ~PL080_CONTROL_DWIDTH_MASK; 444 retbits &= ~PL080_CONTROL_DWIDTH_MASK;
474 retbits &= ~PL080_CONTROL_SWIDTH_MASK; 445 retbits &= ~PL080_CONTROL_SWIDTH_MASK;
475 retbits &= ~PL080_CONTROL_TRANSFER_SIZE_MASK; 446 retbits &= ~PL080_CONTROL_TRANSFER_SIZE_MASK;
@@ -509,95 +480,87 @@ static inline u32 pl08x_cctl_bits(u32 cctl, u8 srcwidth, u8 dstwidth,
509 return retbits; 480 return retbits;
510} 481}
511 482
483struct pl08x_lli_build_data {
484 struct pl08x_txd *txd;
485 struct pl08x_driver_data *pl08x;
486 struct pl08x_bus_data srcbus;
487 struct pl08x_bus_data dstbus;
488 size_t remainder;
489};
490
512/* 491/*
513 * Autoselect a master bus to use for the transfer 492 * Autoselect a master bus to use for the transfer this prefers the
514 * this prefers the destination bus if both available 493 * destination bus if both available if fixed address on one bus the
515 * if fixed address on one bus the other will be chosen 494 * other will be chosen
516 */ 495 */
517void pl08x_choose_master_bus(struct pl08x_bus_data *src_bus, 496static void pl08x_choose_master_bus(struct pl08x_lli_build_data *bd,
518 struct pl08x_bus_data *dst_bus, struct pl08x_bus_data **mbus, 497 struct pl08x_bus_data **mbus, struct pl08x_bus_data **sbus, u32 cctl)
519 struct pl08x_bus_data **sbus, u32 cctl)
520{ 498{
521 if (!(cctl & PL080_CONTROL_DST_INCR)) { 499 if (!(cctl & PL080_CONTROL_DST_INCR)) {
522 *mbus = src_bus; 500 *mbus = &bd->srcbus;
523 *sbus = dst_bus; 501 *sbus = &bd->dstbus;
524 } else if (!(cctl & PL080_CONTROL_SRC_INCR)) { 502 } else if (!(cctl & PL080_CONTROL_SRC_INCR)) {
525 *mbus = dst_bus; 503 *mbus = &bd->dstbus;
526 *sbus = src_bus; 504 *sbus = &bd->srcbus;
527 } else { 505 } else {
528 if (dst_bus->buswidth == 4) { 506 if (bd->dstbus.buswidth == 4) {
529 *mbus = dst_bus; 507 *mbus = &bd->dstbus;
530 *sbus = src_bus; 508 *sbus = &bd->srcbus;
531 } else if (src_bus->buswidth == 4) { 509 } else if (bd->srcbus.buswidth == 4) {
532 *mbus = src_bus; 510 *mbus = &bd->srcbus;
533 *sbus = dst_bus; 511 *sbus = &bd->dstbus;
534 } else if (dst_bus->buswidth == 2) { 512 } else if (bd->dstbus.buswidth == 2) {
535 *mbus = dst_bus; 513 *mbus = &bd->dstbus;
536 *sbus = src_bus; 514 *sbus = &bd->srcbus;
537 } else if (src_bus->buswidth == 2) { 515 } else if (bd->srcbus.buswidth == 2) {
538 *mbus = src_bus; 516 *mbus = &bd->srcbus;
539 *sbus = dst_bus; 517 *sbus = &bd->dstbus;
540 } else { 518 } else {
541 /* src_bus->buswidth == 1 */ 519 /* bd->srcbus.buswidth == 1 */
542 *mbus = dst_bus; 520 *mbus = &bd->dstbus;
543 *sbus = src_bus; 521 *sbus = &bd->srcbus;
544 } 522 }
545 } 523 }
546} 524}
547 525
548/* 526/*
549 * Fills in one LLI for a certain transfer descriptor 527 * Fills in one LLI for a certain transfer descriptor and advance the counter
550 * and advance the counter
551 */ 528 */
552int pl08x_fill_lli_for_desc(struct pl08x_driver_data *pl08x, 529static void pl08x_fill_lli_for_desc(struct pl08x_lli_build_data *bd,
553 struct pl08x_txd *txd, int num_llis, int len, 530 int num_llis, int len, u32 cctl)
554 u32 cctl, u32 *remainder)
555{ 531{
556 struct lli *llis_va = txd->llis_va; 532 struct pl08x_lli *llis_va = bd->txd->llis_va;
557 struct lli *llis_bus = (struct lli *) txd->llis_bus; 533 dma_addr_t llis_bus = bd->txd->llis_bus;
558 534
559 BUG_ON(num_llis >= MAX_NUM_TSFR_LLIS); 535 BUG_ON(num_llis >= MAX_NUM_TSFR_LLIS);
560 536
561 llis_va[num_llis].cctl = cctl; 537 llis_va[num_llis].cctl = cctl;
562 llis_va[num_llis].src = txd->srcbus.addr; 538 llis_va[num_llis].src = bd->srcbus.addr;
563 llis_va[num_llis].dst = txd->dstbus.addr; 539 llis_va[num_llis].dst = bd->dstbus.addr;
564 540 llis_va[num_llis].lli = llis_bus + (num_llis + 1) * sizeof(struct pl08x_lli);
565 /* 541 if (bd->pl08x->lli_buses & PL08X_AHB2)
566 * On versions with dual masters, you can optionally AND on 542 llis_va[num_llis].lli |= PL080_LLI_LM_AHB2;
567 * PL080_LLI_LM_AHB2 to the LLI to tell the hardware to read
568 * in new LLIs with that controller, but we always try to
569 * choose AHB1 to point into memory. The idea is to have AHB2
570 * fixed on the peripheral and AHB1 messing around in the
571 * memory. So we don't manipulate this bit currently.
572 */
573
574 llis_va[num_llis].next =
575 (dma_addr_t)((u32) &(llis_bus[num_llis + 1]));
576 543
577 if (cctl & PL080_CONTROL_SRC_INCR) 544 if (cctl & PL080_CONTROL_SRC_INCR)
578 txd->srcbus.addr += len; 545 bd->srcbus.addr += len;
579 if (cctl & PL080_CONTROL_DST_INCR) 546 if (cctl & PL080_CONTROL_DST_INCR)
580 txd->dstbus.addr += len; 547 bd->dstbus.addr += len;
581 548
582 *remainder -= len; 549 BUG_ON(bd->remainder < len);
583 550
584 return num_llis + 1; 551 bd->remainder -= len;
585} 552}
586 553
587/* 554/*
588 * Return number of bytes to fill to boundary, or len 555 * Return number of bytes to fill to boundary, or len.
556 * This calculation works for any value of addr.
589 */ 557 */
590static inline u32 pl08x_pre_boundary(u32 addr, u32 len) 558static inline size_t pl08x_pre_boundary(u32 addr, size_t len)
591{ 559{
592 u32 boundary; 560 size_t boundary_len = PL08X_BOUNDARY_SIZE -
593 561 (addr & (PL08X_BOUNDARY_SIZE - 1));
594 boundary = ((addr >> PL08X_BOUNDARY_SHIFT) + 1)
595 << PL08X_BOUNDARY_SHIFT;
596 562
597 if (boundary < addr + len) 563 return min(boundary_len, len);
598 return boundary - addr;
599 else
600 return len;
601} 564}
602 565
603/* 566/*
@@ -608,20 +571,13 @@ static inline u32 pl08x_pre_boundary(u32 addr, u32 len)
608static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, 571static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
609 struct pl08x_txd *txd) 572 struct pl08x_txd *txd)
610{ 573{
611 struct pl08x_channel_data *cd = txd->cd;
612 struct pl08x_bus_data *mbus, *sbus; 574 struct pl08x_bus_data *mbus, *sbus;
613 u32 remainder; 575 struct pl08x_lli_build_data bd;
614 int num_llis = 0; 576 int num_llis = 0;
615 u32 cctl; 577 u32 cctl;
616 int max_bytes_per_lli; 578 size_t max_bytes_per_lli;
617 int total_bytes = 0; 579 size_t total_bytes = 0;
618 struct lli *llis_va; 580 struct pl08x_lli *llis_va;
619 struct lli *llis_bus;
620
621 if (!txd) {
622 dev_err(&pl08x->adev->dev, "%s no descriptor\n", __func__);
623 return 0;
624 }
625 581
626 txd->llis_va = dma_pool_alloc(pl08x->pool, GFP_NOWAIT, 582 txd->llis_va = dma_pool_alloc(pl08x->pool, GFP_NOWAIT,
627 &txd->llis_bus); 583 &txd->llis_bus);
@@ -632,121 +588,79 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
632 588
633 pl08x->pool_ctr++; 589 pl08x->pool_ctr++;
634 590
635 /* 591 /* Get the default CCTL */
636 * Initialize bus values for this transfer 592 cctl = txd->cctl;
637 * from the passed optimal values
638 */
639 if (!cd) {
640 dev_err(&pl08x->adev->dev, "%s no channel data\n", __func__);
641 return 0;
642 }
643 593
644 /* Get the default CCTL from the platform data */ 594 bd.txd = txd;
645 cctl = cd->cctl; 595 bd.pl08x = pl08x;
646 596 bd.srcbus.addr = txd->src_addr;
647 /* 597 bd.dstbus.addr = txd->dst_addr;
648 * On the PL080 we have two bus masters and we
649 * should select one for source and one for
650 * destination. We try to use AHB2 for the
651 * bus which does not increment (typically the
652 * peripheral) else we just choose something.
653 */
654 cctl &= ~(PL080_CONTROL_DST_AHB2 | PL080_CONTROL_SRC_AHB2);
655 if (pl08x->vd->dualmaster) {
656 if (cctl & PL080_CONTROL_SRC_INCR)
657 /* Source increments, use AHB2 for destination */
658 cctl |= PL080_CONTROL_DST_AHB2;
659 else if (cctl & PL080_CONTROL_DST_INCR)
660 /* Destination increments, use AHB2 for source */
661 cctl |= PL080_CONTROL_SRC_AHB2;
662 else
663 /* Just pick something, source AHB1 dest AHB2 */
664 cctl |= PL080_CONTROL_DST_AHB2;
665 }
666 598
667 /* Find maximum width of the source bus */ 599 /* Find maximum width of the source bus */
668 txd->srcbus.maxwidth = 600 bd.srcbus.maxwidth =
669 pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_SWIDTH_MASK) >> 601 pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_SWIDTH_MASK) >>
670 PL080_CONTROL_SWIDTH_SHIFT); 602 PL080_CONTROL_SWIDTH_SHIFT);
671 603
672 /* Find maximum width of the destination bus */ 604 /* Find maximum width of the destination bus */
673 txd->dstbus.maxwidth = 605 bd.dstbus.maxwidth =
674 pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_DWIDTH_MASK) >> 606 pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_DWIDTH_MASK) >>
675 PL080_CONTROL_DWIDTH_SHIFT); 607 PL080_CONTROL_DWIDTH_SHIFT);
676 608
677 /* Set up the bus widths to the maximum */ 609 /* Set up the bus widths to the maximum */
678 txd->srcbus.buswidth = txd->srcbus.maxwidth; 610 bd.srcbus.buswidth = bd.srcbus.maxwidth;
679 txd->dstbus.buswidth = txd->dstbus.maxwidth; 611 bd.dstbus.buswidth = bd.dstbus.maxwidth;
680 dev_vdbg(&pl08x->adev->dev, 612 dev_vdbg(&pl08x->adev->dev,
681 "%s source bus is %d bytes wide, dest bus is %d bytes wide\n", 613 "%s source bus is %d bytes wide, dest bus is %d bytes wide\n",
682 __func__, txd->srcbus.buswidth, txd->dstbus.buswidth); 614 __func__, bd.srcbus.buswidth, bd.dstbus.buswidth);
683 615
684 616
685 /* 617 /*
686 * Bytes transferred == tsize * MIN(buswidths), not max(buswidths) 618 * Bytes transferred == tsize * MIN(buswidths), not max(buswidths)
687 */ 619 */
688 max_bytes_per_lli = min(txd->srcbus.buswidth, txd->dstbus.buswidth) * 620 max_bytes_per_lli = min(bd.srcbus.buswidth, bd.dstbus.buswidth) *
689 PL080_CONTROL_TRANSFER_SIZE_MASK; 621 PL080_CONTROL_TRANSFER_SIZE_MASK;
690 dev_vdbg(&pl08x->adev->dev, 622 dev_vdbg(&pl08x->adev->dev,
691 "%s max bytes per lli = %d\n", 623 "%s max bytes per lli = %zu\n",
692 __func__, max_bytes_per_lli); 624 __func__, max_bytes_per_lli);
693 625
694 /* We need to count this down to zero */ 626 /* We need to count this down to zero */
695 remainder = txd->len; 627 bd.remainder = txd->len;
696 dev_vdbg(&pl08x->adev->dev, 628 dev_vdbg(&pl08x->adev->dev,
697 "%s remainder = %d\n", 629 "%s remainder = %zu\n",
698 __func__, remainder); 630 __func__, bd.remainder);
699 631
700 /* 632 /*
701 * Choose bus to align to 633 * Choose bus to align to
702 * - prefers destination bus if both available 634 * - prefers destination bus if both available
703 * - if fixed address on one bus chooses other 635 * - if fixed address on one bus chooses other
704 * - modifies cctl to choose an apropriate master
705 */
706 pl08x_choose_master_bus(&txd->srcbus, &txd->dstbus,
707 &mbus, &sbus, cctl);
708
709
710 /*
711 * The lowest bit of the LLI register
712 * is also used to indicate which master to
713 * use for reading the LLIs.
714 */ 636 */
637 pl08x_choose_master_bus(&bd, &mbus, &sbus, cctl);
715 638
716 if (txd->len < mbus->buswidth) { 639 if (txd->len < mbus->buswidth) {
717 /* 640 /* Less than a bus width available - send as single bytes */
718 * Less than a bus width available 641 while (bd.remainder) {
719 * - send as single bytes
720 */
721 while (remainder) {
722 dev_vdbg(&pl08x->adev->dev, 642 dev_vdbg(&pl08x->adev->dev,
723 "%s single byte LLIs for a transfer of " 643 "%s single byte LLIs for a transfer of "
724 "less than a bus width (remain %08x)\n", 644 "less than a bus width (remain 0x%08x)\n",
725 __func__, remainder); 645 __func__, bd.remainder);
726 cctl = pl08x_cctl_bits(cctl, 1, 1, 1); 646 cctl = pl08x_cctl_bits(cctl, 1, 1, 1);
727 num_llis = 647 pl08x_fill_lli_for_desc(&bd, num_llis++, 1, cctl);
728 pl08x_fill_lli_for_desc(pl08x, txd, num_llis, 1,
729 cctl, &remainder);
730 total_bytes++; 648 total_bytes++;
731 } 649 }
732 } else { 650 } else {
733 /* 651 /* Make one byte LLIs until master bus is aligned */
734 * Make one byte LLIs until master bus is aligned
735 * - slave will then be aligned also
736 */
737 while ((mbus->addr) % (mbus->buswidth)) { 652 while ((mbus->addr) % (mbus->buswidth)) {
738 dev_vdbg(&pl08x->adev->dev, 653 dev_vdbg(&pl08x->adev->dev,
739 "%s adjustment lli for less than bus width " 654 "%s adjustment lli for less than bus width "
740 "(remain %08x)\n", 655 "(remain 0x%08x)\n",
741 __func__, remainder); 656 __func__, bd.remainder);
742 cctl = pl08x_cctl_bits(cctl, 1, 1, 1); 657 cctl = pl08x_cctl_bits(cctl, 1, 1, 1);
743 num_llis = pl08x_fill_lli_for_desc 658 pl08x_fill_lli_for_desc(&bd, num_llis++, 1, cctl);
744 (pl08x, txd, num_llis, 1, cctl, &remainder);
745 total_bytes++; 659 total_bytes++;
746 } 660 }
747 661
748 /* 662 /*
749 * Master now aligned 663 * Master now aligned
750 * - if slave is not then we must set its width down 664 * - if slave is not then we must set its width down
751 */ 665 */
752 if (sbus->addr % sbus->buswidth) { 666 if (sbus->addr % sbus->buswidth) {
@@ -761,63 +675,51 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
761 * Make largest possible LLIs until less than one bus 675 * Make largest possible LLIs until less than one bus
762 * width left 676 * width left
763 */ 677 */
764 while (remainder > (mbus->buswidth - 1)) { 678 while (bd.remainder > (mbus->buswidth - 1)) {
765 int lli_len, target_len; 679 size_t lli_len, target_len, tsize, odd_bytes;
766 int tsize;
767 int odd_bytes;
768 680
769 /* 681 /*
770 * If enough left try to send max possible, 682 * If enough left try to send max possible,
771 * otherwise try to send the remainder 683 * otherwise try to send the remainder
772 */ 684 */
773 target_len = remainder; 685 target_len = min(bd.remainder, max_bytes_per_lli);
774 if (remainder > max_bytes_per_lli)
775 target_len = max_bytes_per_lli;
776 686
777 /* 687 /*
778 * Set bus lengths for incrementing busses 688 * Set bus lengths for incrementing buses to the
779 * to number of bytes which fill to next memory 689 * number of bytes which fill to next memory boundary,
780 * boundary 690 * limiting on the target length calculated above.
781 */ 691 */
782 if (cctl & PL080_CONTROL_SRC_INCR) 692 if (cctl & PL080_CONTROL_SRC_INCR)
783 txd->srcbus.fill_bytes = 693 bd.srcbus.fill_bytes =
784 pl08x_pre_boundary( 694 pl08x_pre_boundary(bd.srcbus.addr,
785 txd->srcbus.addr, 695 target_len);
786 remainder);
787 else 696 else
788 txd->srcbus.fill_bytes = 697 bd.srcbus.fill_bytes = target_len;
789 max_bytes_per_lli;
790 698
791 if (cctl & PL080_CONTROL_DST_INCR) 699 if (cctl & PL080_CONTROL_DST_INCR)
792 txd->dstbus.fill_bytes = 700 bd.dstbus.fill_bytes =
793 pl08x_pre_boundary( 701 pl08x_pre_boundary(bd.dstbus.addr,
794 txd->dstbus.addr, 702 target_len);
795 remainder);
796 else 703 else
797 txd->dstbus.fill_bytes = 704 bd.dstbus.fill_bytes = target_len;
798 max_bytes_per_lli;
799 705
800 /* 706 /* Find the nearest */
801 * Find the nearest 707 lli_len = min(bd.srcbus.fill_bytes,
802 */ 708 bd.dstbus.fill_bytes);
803 lli_len = min(txd->srcbus.fill_bytes,
804 txd->dstbus.fill_bytes);
805 709
806 BUG_ON(lli_len > remainder); 710 BUG_ON(lli_len > bd.remainder);
807 711
808 if (lli_len <= 0) { 712 if (lli_len <= 0) {
809 dev_err(&pl08x->adev->dev, 713 dev_err(&pl08x->adev->dev,
810 "%s lli_len is %d, <= 0\n", 714 "%s lli_len is %zu, <= 0\n",
811 __func__, lli_len); 715 __func__, lli_len);
812 return 0; 716 return 0;
813 } 717 }
814 718
815 if (lli_len == target_len) { 719 if (lli_len == target_len) {
816 /* 720 /*
817 * Can send what we wanted 721 * Can send what we wanted.
818 */ 722 * Maintain alignment
819 /*
820 * Maintain alignment
821 */ 723 */
822 lli_len = (lli_len/mbus->buswidth) * 724 lli_len = (lli_len/mbus->buswidth) *
823 mbus->buswidth; 725 mbus->buswidth;
@@ -825,17 +727,14 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
825 } else { 727 } else {
826 /* 728 /*
827 * So now we know how many bytes to transfer 729 * So now we know how many bytes to transfer
828 * to get to the nearest boundary 730 * to get to the nearest boundary. The next
829 * The next lli will past the boundary 731 * LLI will past the boundary. However, we
830 * - however we may be working to a boundary 732 * may be working to a boundary on the slave
831 * on the slave bus 733 * bus. We need to ensure the master stays
832 * We need to ensure the master stays aligned 734 * aligned, and that we are working in
735 * multiples of the bus widths.
833 */ 736 */
834 odd_bytes = lli_len % mbus->buswidth; 737 odd_bytes = lli_len % mbus->buswidth;
835 /*
836 * - and that we are working in multiples
837 * of the bus widths
838 */
839 lli_len -= odd_bytes; 738 lli_len -= odd_bytes;
840 739
841 } 740 }
@@ -855,41 +754,38 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
855 754
856 if (target_len != lli_len) { 755 if (target_len != lli_len) {
857 dev_vdbg(&pl08x->adev->dev, 756 dev_vdbg(&pl08x->adev->dev,
858 "%s can't send what we want. Desired %08x, lli of %08x bytes in txd of %08x\n", 757 "%s can't send what we want. Desired 0x%08zx, lli of 0x%08zx bytes in txd of 0x%08zx\n",
859 __func__, target_len, lli_len, txd->len); 758 __func__, target_len, lli_len, txd->len);
860 } 759 }
861 760
862 cctl = pl08x_cctl_bits(cctl, 761 cctl = pl08x_cctl_bits(cctl,
863 txd->srcbus.buswidth, 762 bd.srcbus.buswidth,
864 txd->dstbus.buswidth, 763 bd.dstbus.buswidth,
865 tsize); 764 tsize);
866 765
867 dev_vdbg(&pl08x->adev->dev, 766 dev_vdbg(&pl08x->adev->dev,
868 "%s fill lli with single lli chunk of size %08x (remainder %08x)\n", 767 "%s fill lli with single lli chunk of size 0x%08zx (remainder 0x%08zx)\n",
869 __func__, lli_len, remainder); 768 __func__, lli_len, bd.remainder);
870 num_llis = pl08x_fill_lli_for_desc(pl08x, txd, 769 pl08x_fill_lli_for_desc(&bd, num_llis++,
871 num_llis, lli_len, cctl, 770 lli_len, cctl);
872 &remainder);
873 total_bytes += lli_len; 771 total_bytes += lli_len;
874 } 772 }
875 773
876 774
877 if (odd_bytes) { 775 if (odd_bytes) {
878 /* 776 /*
879 * Creep past the boundary, 777 * Creep past the boundary, maintaining
880 * maintaining master alignment 778 * master alignment
881 */ 779 */
882 int j; 780 int j;
883 for (j = 0; (j < mbus->buswidth) 781 for (j = 0; (j < mbus->buswidth)
884 && (remainder); j++) { 782 && (bd.remainder); j++) {
885 cctl = pl08x_cctl_bits(cctl, 1, 1, 1); 783 cctl = pl08x_cctl_bits(cctl, 1, 1, 1);
886 dev_vdbg(&pl08x->adev->dev, 784 dev_vdbg(&pl08x->adev->dev,
887 "%s align with boundardy, single byte (remain %08x)\n", 785 "%s align with boundary, single byte (remain 0x%08zx)\n",
888 __func__, remainder); 786 __func__, bd.remainder);
889 num_llis = 787 pl08x_fill_lli_for_desc(&bd,
890 pl08x_fill_lli_for_desc(pl08x, 788 num_llis++, 1, cctl);
891 txd, num_llis, 1,
892 cctl, &remainder);
893 total_bytes++; 789 total_bytes++;
894 } 790 }
895 } 791 }
@@ -898,25 +794,18 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
898 /* 794 /*
899 * Send any odd bytes 795 * Send any odd bytes
900 */ 796 */
901 if (remainder < 0) { 797 while (bd.remainder) {
902 dev_err(&pl08x->adev->dev, "%s remainder not fitted 0x%08x bytes\n",
903 __func__, remainder);
904 return 0;
905 }
906
907 while (remainder) {
908 cctl = pl08x_cctl_bits(cctl, 1, 1, 1); 798 cctl = pl08x_cctl_bits(cctl, 1, 1, 1);
909 dev_vdbg(&pl08x->adev->dev, 799 dev_vdbg(&pl08x->adev->dev,
910 "%s align with boundardy, single odd byte (remain %d)\n", 800 "%s align with boundary, single odd byte (remain %zu)\n",
911 __func__, remainder); 801 __func__, bd.remainder);
912 num_llis = pl08x_fill_lli_for_desc(pl08x, txd, num_llis, 802 pl08x_fill_lli_for_desc(&bd, num_llis++, 1, cctl);
913 1, cctl, &remainder);
914 total_bytes++; 803 total_bytes++;
915 } 804 }
916 } 805 }
917 if (total_bytes != txd->len) { 806 if (total_bytes != txd->len) {
918 dev_err(&pl08x->adev->dev, 807 dev_err(&pl08x->adev->dev,
919 "%s size of encoded lli:s don't match total txd, transferred 0x%08x from size 0x%08x\n", 808 "%s size of encoded lli:s don't match total txd, transferred 0x%08zx from size 0x%08zx\n",
920 __func__, total_bytes, txd->len); 809 __func__, total_bytes, txd->len);
921 return 0; 810 return 0;
922 } 811 }
@@ -927,41 +816,12 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
927 __func__, (u32) MAX_NUM_TSFR_LLIS); 816 __func__, (u32) MAX_NUM_TSFR_LLIS);
928 return 0; 817 return 0;
929 } 818 }
930 /*
931 * Decide whether this is a loop or a terminated transfer
932 */
933 llis_va = txd->llis_va;
934 llis_bus = (struct lli *) txd->llis_bus;
935 819
936 if (cd->circular_buffer) { 820 llis_va = txd->llis_va;
937 /* 821 /* The final LLI terminates the LLI. */
938 * Loop the circular buffer so that the next element 822 llis_va[num_llis - 1].lli = 0;
939 * points back to the beginning of the LLI. 823 /* The final LLI element shall also fire an interrupt. */
940 */ 824 llis_va[num_llis - 1].cctl |= PL080_CONTROL_TC_IRQ_EN;
941 llis_va[num_llis - 1].next =
942 (dma_addr_t)((unsigned int)&(llis_bus[0]));
943 } else {
944 /*
945 * On non-circular buffers, the final LLI terminates
946 * the LLI.
947 */
948 llis_va[num_llis - 1].next = 0;
949 /*
950 * The final LLI element shall also fire an interrupt
951 */
952 llis_va[num_llis - 1].cctl |= PL080_CONTROL_TC_IRQ_EN;
953 }
954
955 /* Now store the channel register values */
956 txd->csrc = llis_va[0].src;
957 txd->cdst = llis_va[0].dst;
958 if (num_llis > 1)
959 txd->clli = llis_va[0].next;
960 else
961 txd->clli = 0;
962
963 txd->cctl = llis_va[0].cctl;
964 /* ccfg will be set at physical channel allocation time */
965 825
966#ifdef VERBOSE_DEBUG 826#ifdef VERBOSE_DEBUG
967 { 827 {
@@ -969,13 +829,13 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
969 829
970 for (i = 0; i < num_llis; i++) { 830 for (i = 0; i < num_llis; i++) {
971 dev_vdbg(&pl08x->adev->dev, 831 dev_vdbg(&pl08x->adev->dev,
972 "lli %d @%p: csrc=%08x, cdst=%08x, cctl=%08x, clli=%08x\n", 832 "lli %d @%p: csrc=0x%08x, cdst=0x%08x, cctl=0x%08x, clli=0x%08x\n",
973 i, 833 i,
974 &llis_va[i], 834 &llis_va[i],
975 llis_va[i].src, 835 llis_va[i].src,
976 llis_va[i].dst, 836 llis_va[i].dst,
977 llis_va[i].cctl, 837 llis_va[i].cctl,
978 llis_va[i].next 838 llis_va[i].lli
979 ); 839 );
980 } 840 }
981 } 841 }
@@ -988,14 +848,8 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
988static void pl08x_free_txd(struct pl08x_driver_data *pl08x, 848static void pl08x_free_txd(struct pl08x_driver_data *pl08x,
989 struct pl08x_txd *txd) 849 struct pl08x_txd *txd)
990{ 850{
991 if (!txd)
992 dev_err(&pl08x->adev->dev,
993 "%s no descriptor to free\n",
994 __func__);
995
996 /* Free the LLI */ 851 /* Free the LLI */
997 dma_pool_free(pl08x->pool, txd->llis_va, 852 dma_pool_free(pl08x->pool, txd->llis_va, txd->llis_bus);
998 txd->llis_bus);
999 853
1000 pl08x->pool_ctr--; 854 pl08x->pool_ctr--;
1001 855
@@ -1008,13 +862,12 @@ static void pl08x_free_txd_list(struct pl08x_driver_data *pl08x,
1008 struct pl08x_txd *txdi = NULL; 862 struct pl08x_txd *txdi = NULL;
1009 struct pl08x_txd *next; 863 struct pl08x_txd *next;
1010 864
1011 if (!list_empty(&plchan->desc_list)) { 865 if (!list_empty(&plchan->pend_list)) {
1012 list_for_each_entry_safe(txdi, 866 list_for_each_entry_safe(txdi,
1013 next, &plchan->desc_list, node) { 867 next, &plchan->pend_list, node) {
1014 list_del(&txdi->node); 868 list_del(&txdi->node);
1015 pl08x_free_txd(pl08x, txdi); 869 pl08x_free_txd(pl08x, txdi);
1016 } 870 }
1017
1018 } 871 }
1019} 872}
1020 873
@@ -1069,6 +922,12 @@ static int prep_phy_channel(struct pl08x_dma_chan *plchan,
1069 return -EBUSY; 922 return -EBUSY;
1070 } 923 }
1071 ch->signal = ret; 924 ch->signal = ret;
925
926 /* Assign the flow control signal to this channel */
927 if (txd->direction == DMA_TO_DEVICE)
928 txd->ccfg |= ch->signal << PL080_CONFIG_DST_SEL_SHIFT;
929 else if (txd->direction == DMA_FROM_DEVICE)
930 txd->ccfg |= ch->signal << PL080_CONFIG_SRC_SEL_SHIFT;
1072 } 931 }
1073 932
1074 dev_dbg(&pl08x->adev->dev, "allocated physical channel %d and signal %d for xfer on %s\n", 933 dev_dbg(&pl08x->adev->dev, "allocated physical channel %d and signal %d for xfer on %s\n",
@@ -1076,19 +935,54 @@ static int prep_phy_channel(struct pl08x_dma_chan *plchan,
1076 ch->signal, 935 ch->signal,
1077 plchan->name); 936 plchan->name);
1078 937
938 plchan->phychan_hold++;
1079 plchan->phychan = ch; 939 plchan->phychan = ch;
1080 940
1081 return 0; 941 return 0;
1082} 942}
1083 943
944static void release_phy_channel(struct pl08x_dma_chan *plchan)
945{
946 struct pl08x_driver_data *pl08x = plchan->host;
947
948 if ((plchan->phychan->signal >= 0) && pl08x->pd->put_signal) {
949 pl08x->pd->put_signal(plchan);
950 plchan->phychan->signal = -1;
951 }
952 pl08x_put_phy_channel(pl08x, plchan->phychan);
953 plchan->phychan = NULL;
954}
955
1084static dma_cookie_t pl08x_tx_submit(struct dma_async_tx_descriptor *tx) 956static dma_cookie_t pl08x_tx_submit(struct dma_async_tx_descriptor *tx)
1085{ 957{
1086 struct pl08x_dma_chan *plchan = to_pl08x_chan(tx->chan); 958 struct pl08x_dma_chan *plchan = to_pl08x_chan(tx->chan);
959 struct pl08x_txd *txd = to_pl08x_txd(tx);
960 unsigned long flags;
1087 961
1088 atomic_inc(&plchan->last_issued); 962 spin_lock_irqsave(&plchan->lock, flags);
1089 tx->cookie = atomic_read(&plchan->last_issued); 963
1090 /* This unlock follows the lock in the prep() function */ 964 plchan->chan.cookie += 1;
1091 spin_unlock_irqrestore(&plchan->lock, plchan->lockflags); 965 if (plchan->chan.cookie < 0)
966 plchan->chan.cookie = 1;
967 tx->cookie = plchan->chan.cookie;
968
969 /* Put this onto the pending list */
970 list_add_tail(&txd->node, &plchan->pend_list);
971
972 /*
973 * If there was no physical channel available for this memcpy,
974 * stack the request up and indicate that the channel is waiting
975 * for a free physical channel.
976 */
977 if (!plchan->slave && !plchan->phychan) {
978 /* Do this memcpy whenever there is a channel ready */
979 plchan->state = PL08X_CHAN_WAITING;
980 plchan->waiting = txd;
981 } else {
982 plchan->phychan_hold--;
983 }
984
985 spin_unlock_irqrestore(&plchan->lock, flags);
1092 986
1093 return tx->cookie; 987 return tx->cookie;
1094} 988}
@@ -1102,10 +996,9 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_interrupt(
1102} 996}
1103 997
1104/* 998/*
1105 * Code accessing dma_async_is_complete() in a tight loop 999 * Code accessing dma_async_is_complete() in a tight loop may give problems.
1106 * may give problems - could schedule where indicated. 1000 * If slaves are relying on interrupts to signal completion this function
1107 * If slaves are relying on interrupts to signal completion this 1001 * must not be called with interrupts disabled.
1108 * function must not be called with interrupts disabled
1109 */ 1002 */
1110static enum dma_status 1003static enum dma_status
1111pl08x_dma_tx_status(struct dma_chan *chan, 1004pl08x_dma_tx_status(struct dma_chan *chan,
@@ -1118,7 +1011,7 @@ pl08x_dma_tx_status(struct dma_chan *chan,
1118 enum dma_status ret; 1011 enum dma_status ret;
1119 u32 bytesleft = 0; 1012 u32 bytesleft = 0;
1120 1013
1121 last_used = atomic_read(&plchan->last_issued); 1014 last_used = plchan->chan.cookie;
1122 last_complete = plchan->lc; 1015 last_complete = plchan->lc;
1123 1016
1124 ret = dma_async_is_complete(cookie, last_complete, last_used); 1017 ret = dma_async_is_complete(cookie, last_complete, last_used);
@@ -1128,13 +1021,9 @@ pl08x_dma_tx_status(struct dma_chan *chan,
1128 } 1021 }
1129 1022
1130 /* 1023 /*
1131 * schedule(); could be inserted here
1132 */
1133
1134 /*
1135 * This cookie not complete yet 1024 * This cookie not complete yet
1136 */ 1025 */
1137 last_used = atomic_read(&plchan->last_issued); 1026 last_used = plchan->chan.cookie;
1138 last_complete = plchan->lc; 1027 last_complete = plchan->lc;
1139 1028
1140 /* Get number of bytes left in the active transactions and queue */ 1029 /* Get number of bytes left in the active transactions and queue */
@@ -1199,37 +1088,35 @@ static const struct burst_table burst_sizes[] = {
1199 }, 1088 },
1200}; 1089};
1201 1090
1202static void dma_set_runtime_config(struct dma_chan *chan, 1091static int dma_set_runtime_config(struct dma_chan *chan,
1203 struct dma_slave_config *config) 1092 struct dma_slave_config *config)
1204{ 1093{
1205 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1094 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1206 struct pl08x_driver_data *pl08x = plchan->host; 1095 struct pl08x_driver_data *pl08x = plchan->host;
1207 struct pl08x_channel_data *cd = plchan->cd; 1096 struct pl08x_channel_data *cd = plchan->cd;
1208 enum dma_slave_buswidth addr_width; 1097 enum dma_slave_buswidth addr_width;
1098 dma_addr_t addr;
1209 u32 maxburst; 1099 u32 maxburst;
1210 u32 cctl = 0; 1100 u32 cctl = 0;
1211 /* Mask out all except src and dst channel */ 1101 int i;
1212 u32 ccfg = cd->ccfg & 0x000003DEU; 1102
1213 int i = 0; 1103 if (!plchan->slave)
1104 return -EINVAL;
1214 1105
1215 /* Transfer direction */ 1106 /* Transfer direction */
1216 plchan->runtime_direction = config->direction; 1107 plchan->runtime_direction = config->direction;
1217 if (config->direction == DMA_TO_DEVICE) { 1108 if (config->direction == DMA_TO_DEVICE) {
1218 plchan->runtime_addr = config->dst_addr; 1109 addr = config->dst_addr;
1219 cctl |= PL080_CONTROL_SRC_INCR;
1220 ccfg |= PL080_FLOW_MEM2PER << PL080_CONFIG_FLOW_CONTROL_SHIFT;
1221 addr_width = config->dst_addr_width; 1110 addr_width = config->dst_addr_width;
1222 maxburst = config->dst_maxburst; 1111 maxburst = config->dst_maxburst;
1223 } else if (config->direction == DMA_FROM_DEVICE) { 1112 } else if (config->direction == DMA_FROM_DEVICE) {
1224 plchan->runtime_addr = config->src_addr; 1113 addr = config->src_addr;
1225 cctl |= PL080_CONTROL_DST_INCR;
1226 ccfg |= PL080_FLOW_PER2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT;
1227 addr_width = config->src_addr_width; 1114 addr_width = config->src_addr_width;
1228 maxburst = config->src_maxburst; 1115 maxburst = config->src_maxburst;
1229 } else { 1116 } else {
1230 dev_err(&pl08x->adev->dev, 1117 dev_err(&pl08x->adev->dev,
1231 "bad runtime_config: alien transfer direction\n"); 1118 "bad runtime_config: alien transfer direction\n");
1232 return; 1119 return -EINVAL;
1233 } 1120 }
1234 1121
1235 switch (addr_width) { 1122 switch (addr_width) {
@@ -1248,42 +1135,40 @@ static void dma_set_runtime_config(struct dma_chan *chan,
1248 default: 1135 default:
1249 dev_err(&pl08x->adev->dev, 1136 dev_err(&pl08x->adev->dev,
1250 "bad runtime_config: alien address width\n"); 1137 "bad runtime_config: alien address width\n");
1251 return; 1138 return -EINVAL;
1252 } 1139 }
1253 1140
1254 /* 1141 /*
1255 * Now decide on a maxburst: 1142 * Now decide on a maxburst:
1256 * If this channel will only request single transfers, set 1143 * If this channel will only request single transfers, set this
1257 * this down to ONE element. 1144 * down to ONE element. Also select one element if no maxburst
1145 * is specified.
1258 */ 1146 */
1259 if (plchan->cd->single) { 1147 if (plchan->cd->single || maxburst == 0) {
1260 cctl |= (PL080_BSIZE_1 << PL080_CONTROL_SB_SIZE_SHIFT) | 1148 cctl |= (PL080_BSIZE_1 << PL080_CONTROL_SB_SIZE_SHIFT) |
1261 (PL080_BSIZE_1 << PL080_CONTROL_DB_SIZE_SHIFT); 1149 (PL080_BSIZE_1 << PL080_CONTROL_DB_SIZE_SHIFT);
1262 } else { 1150 } else {
1263 while (i < ARRAY_SIZE(burst_sizes)) { 1151 for (i = 0; i < ARRAY_SIZE(burst_sizes); i++)
1264 if (burst_sizes[i].burstwords <= maxburst) 1152 if (burst_sizes[i].burstwords <= maxburst)
1265 break; 1153 break;
1266 i++;
1267 }
1268 cctl |= burst_sizes[i].reg; 1154 cctl |= burst_sizes[i].reg;
1269 } 1155 }
1270 1156
1271 /* Access the cell in privileged mode, non-bufferable, non-cacheable */ 1157 plchan->runtime_addr = addr;
1272 cctl &= ~PL080_CONTROL_PROT_MASK;
1273 cctl |= PL080_CONTROL_PROT_SYS;
1274 1158
1275 /* Modify the default channel data to fit PrimeCell request */ 1159 /* Modify the default channel data to fit PrimeCell request */
1276 cd->cctl = cctl; 1160 cd->cctl = cctl;
1277 cd->ccfg = ccfg;
1278 1161
1279 dev_dbg(&pl08x->adev->dev, 1162 dev_dbg(&pl08x->adev->dev,
1280 "configured channel %s (%s) for %s, data width %d, " 1163 "configured channel %s (%s) for %s, data width %d, "
1281 "maxburst %d words, LE, CCTL=%08x, CCFG=%08x\n", 1164 "maxburst %d words, LE, CCTL=0x%08x\n",
1282 dma_chan_name(chan), plchan->name, 1165 dma_chan_name(chan), plchan->name,
1283 (config->direction == DMA_FROM_DEVICE) ? "RX" : "TX", 1166 (config->direction == DMA_FROM_DEVICE) ? "RX" : "TX",
1284 addr_width, 1167 addr_width,
1285 maxburst, 1168 maxburst,
1286 cctl, ccfg); 1169 cctl);
1170
1171 return 0;
1287} 1172}
1288 1173
1289/* 1174/*
@@ -1293,35 +1178,26 @@ static void dma_set_runtime_config(struct dma_chan *chan,
1293static void pl08x_issue_pending(struct dma_chan *chan) 1178static void pl08x_issue_pending(struct dma_chan *chan)
1294{ 1179{
1295 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1180 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1296 struct pl08x_driver_data *pl08x = plchan->host;
1297 unsigned long flags; 1181 unsigned long flags;
1298 1182
1299 spin_lock_irqsave(&plchan->lock, flags); 1183 spin_lock_irqsave(&plchan->lock, flags);
1300 /* Something is already active */ 1184 /* Something is already active, or we're waiting for a channel... */
1301 if (plchan->at) { 1185 if (plchan->at || plchan->state == PL08X_CHAN_WAITING) {
1302 spin_unlock_irqrestore(&plchan->lock, flags); 1186 spin_unlock_irqrestore(&plchan->lock, flags);
1303 return;
1304 }
1305
1306 /* Didn't get a physical channel so waiting for it ... */
1307 if (plchan->state == PL08X_CHAN_WAITING)
1308 return; 1187 return;
1188 }
1309 1189
1310 /* Take the first element in the queue and execute it */ 1190 /* Take the first element in the queue and execute it */
1311 if (!list_empty(&plchan->desc_list)) { 1191 if (!list_empty(&plchan->pend_list)) {
1312 struct pl08x_txd *next; 1192 struct pl08x_txd *next;
1313 1193
1314 next = list_first_entry(&plchan->desc_list, 1194 next = list_first_entry(&plchan->pend_list,
1315 struct pl08x_txd, 1195 struct pl08x_txd,
1316 node); 1196 node);
1317 list_del(&next->node); 1197 list_del(&next->node);
1318 plchan->at = next;
1319 plchan->state = PL08X_CHAN_RUNNING; 1198 plchan->state = PL08X_CHAN_RUNNING;
1320 1199
1321 /* Configure the physical channel for the active txd */ 1200 pl08x_start_txd(plchan, next);
1322 pl08x_config_phychan_for_txd(plchan);
1323 pl08x_set_cregs(pl08x, plchan->phychan);
1324 pl08x_enable_phy_chan(pl08x, plchan->phychan);
1325 } 1201 }
1326 1202
1327 spin_unlock_irqrestore(&plchan->lock, flags); 1203 spin_unlock_irqrestore(&plchan->lock, flags);
@@ -1330,30 +1206,17 @@ static void pl08x_issue_pending(struct dma_chan *chan)
1330static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan, 1206static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan,
1331 struct pl08x_txd *txd) 1207 struct pl08x_txd *txd)
1332{ 1208{
1333 int num_llis;
1334 struct pl08x_driver_data *pl08x = plchan->host; 1209 struct pl08x_driver_data *pl08x = plchan->host;
1335 int ret; 1210 unsigned long flags;
1211 int num_llis, ret;
1336 1212
1337 num_llis = pl08x_fill_llis_for_desc(pl08x, txd); 1213 num_llis = pl08x_fill_llis_for_desc(pl08x, txd);
1338 1214 if (!num_llis) {
1339 if (!num_llis) 1215 kfree(txd);
1340 return -EINVAL; 1216 return -EINVAL;
1217 }
1341 1218
1342 spin_lock_irqsave(&plchan->lock, plchan->lockflags); 1219 spin_lock_irqsave(&plchan->lock, flags);
1343
1344 /*
1345 * If this device is not using a circular buffer then
1346 * queue this new descriptor for transfer.
1347 * The descriptor for a circular buffer continues
1348 * to be used until the channel is freed.
1349 */
1350 if (txd->cd->circular_buffer)
1351 dev_err(&pl08x->adev->dev,
1352 "%s attempting to queue a circular buffer\n",
1353 __func__);
1354 else
1355 list_add_tail(&txd->node,
1356 &plchan->desc_list);
1357 1220
1358 /* 1221 /*
1359 * See if we already have a physical channel allocated, 1222 * See if we already have a physical channel allocated,
@@ -1362,45 +1225,74 @@ static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan,
1362 ret = prep_phy_channel(plchan, txd); 1225 ret = prep_phy_channel(plchan, txd);
1363 if (ret) { 1226 if (ret) {
1364 /* 1227 /*
1365 * No physical channel available, we will 1228 * No physical channel was available.
1366 * stack up the memcpy channels until there is a channel 1229 *
1367 * available to handle it whereas slave transfers may 1230 * memcpy transfers can be sorted out at submission time.
1368 * have been denied due to platform channel muxing restrictions 1231 *
1369 * and since there is no guarantee that this will ever be 1232 * Slave transfers may have been denied due to platform
1370 * resolved, and since the signal must be aquired AFTER 1233 * channel muxing restrictions. Since there is no guarantee
1371 * aquiring the physical channel, we will let them be NACK:ed 1234 * that this will ever be resolved, and the signal must be
1372 * with -EBUSY here. The drivers can alway retry the prep() 1235 * acquired AFTER acquiring the physical channel, we will let
1373 * call if they are eager on doing this using DMA. 1236 * them be NACK:ed with -EBUSY here. The drivers can retry
1237 * the prep() call if they are eager on doing this using DMA.
1374 */ 1238 */
1375 if (plchan->slave) { 1239 if (plchan->slave) {
1376 pl08x_free_txd_list(pl08x, plchan); 1240 pl08x_free_txd_list(pl08x, plchan);
1377 spin_unlock_irqrestore(&plchan->lock, plchan->lockflags); 1241 pl08x_free_txd(pl08x, txd);
1242 spin_unlock_irqrestore(&plchan->lock, flags);
1378 return -EBUSY; 1243 return -EBUSY;
1379 } 1244 }
1380 /* Do this memcpy whenever there is a channel ready */
1381 plchan->state = PL08X_CHAN_WAITING;
1382 plchan->waiting = txd;
1383 } else 1245 } else
1384 /* 1246 /*
1385 * Else we're all set, paused and ready to roll, 1247 * Else we're all set, paused and ready to roll, status
1386 * status will switch to PL08X_CHAN_RUNNING when 1248 * will switch to PL08X_CHAN_RUNNING when we call
1387 * we call issue_pending(). If there is something 1249 * issue_pending(). If there is something running on the
1388 * running on the channel already we don't change 1250 * channel already we don't change its state.
1389 * its state.
1390 */ 1251 */
1391 if (plchan->state == PL08X_CHAN_IDLE) 1252 if (plchan->state == PL08X_CHAN_IDLE)
1392 plchan->state = PL08X_CHAN_PAUSED; 1253 plchan->state = PL08X_CHAN_PAUSED;
1393 1254
1394 /* 1255 spin_unlock_irqrestore(&plchan->lock, flags);
1395 * Notice that we leave plchan->lock locked on purpose:
1396 * it will be unlocked in the subsequent tx_submit()
1397 * call. This is a consequence of the current API.
1398 */
1399 1256
1400 return 0; 1257 return 0;
1401} 1258}
1402 1259
1403/* 1260/*
1261 * Given the source and destination available bus masks, select which
1262 * will be routed to each port. We try to have source and destination
1263 * on separate ports, but always respect the allowable settings.
1264 */
1265static u32 pl08x_select_bus(struct pl08x_driver_data *pl08x, u8 src, u8 dst)
1266{
1267 u32 cctl = 0;
1268
1269 if (!(dst & PL08X_AHB1) || ((dst & PL08X_AHB2) && (src & PL08X_AHB1)))
1270 cctl |= PL080_CONTROL_DST_AHB2;
1271 if (!(src & PL08X_AHB1) || ((src & PL08X_AHB2) && !(dst & PL08X_AHB2)))
1272 cctl |= PL080_CONTROL_SRC_AHB2;
1273
1274 return cctl;
1275}
1276
1277static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan,
1278 unsigned long flags)
1279{
1280 struct pl08x_txd *txd = kzalloc(sizeof(struct pl08x_txd), GFP_NOWAIT);
1281
1282 if (txd) {
1283 dma_async_tx_descriptor_init(&txd->tx, &plchan->chan);
1284 txd->tx.flags = flags;
1285 txd->tx.tx_submit = pl08x_tx_submit;
1286 INIT_LIST_HEAD(&txd->node);
1287
1288 /* Always enable error and terminal interrupts */
1289 txd->ccfg = PL080_CONFIG_ERR_IRQ_MASK |
1290 PL080_CONFIG_TC_IRQ_MASK;
1291 }
1292 return txd;
1293}
1294
1295/*
1404 * Initialize a descriptor to be used by memcpy submit 1296 * Initialize a descriptor to be used by memcpy submit
1405 */ 1297 */
1406static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy( 1298static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
@@ -1412,40 +1304,38 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
1412 struct pl08x_txd *txd; 1304 struct pl08x_txd *txd;
1413 int ret; 1305 int ret;
1414 1306
1415 txd = kzalloc(sizeof(struct pl08x_txd), GFP_NOWAIT); 1307 txd = pl08x_get_txd(plchan, flags);
1416 if (!txd) { 1308 if (!txd) {
1417 dev_err(&pl08x->adev->dev, 1309 dev_err(&pl08x->adev->dev,
1418 "%s no memory for descriptor\n", __func__); 1310 "%s no memory for descriptor\n", __func__);
1419 return NULL; 1311 return NULL;
1420 } 1312 }
1421 1313
1422 dma_async_tx_descriptor_init(&txd->tx, chan);
1423 txd->direction = DMA_NONE; 1314 txd->direction = DMA_NONE;
1424 txd->srcbus.addr = src; 1315 txd->src_addr = src;
1425 txd->dstbus.addr = dest; 1316 txd->dst_addr = dest;
1317 txd->len = len;
1426 1318
1427 /* Set platform data for m2m */ 1319 /* Set platform data for m2m */
1428 txd->cd = &pl08x->pd->memcpy_channel; 1320 txd->ccfg |= PL080_FLOW_MEM2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT;
1321 txd->cctl = pl08x->pd->memcpy_channel.cctl &
1322 ~(PL080_CONTROL_DST_AHB2 | PL080_CONTROL_SRC_AHB2);
1323
1429 /* Both to be incremented or the code will break */ 1324 /* Both to be incremented or the code will break */
1430 txd->cd->cctl |= PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR; 1325 txd->cctl |= PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR;
1431 txd->tx.tx_submit = pl08x_tx_submit; 1326
1432 txd->tx.callback = NULL; 1327 if (pl08x->vd->dualmaster)
1433 txd->tx.callback_param = NULL; 1328 txd->cctl |= pl08x_select_bus(pl08x,
1434 txd->len = len; 1329 pl08x->mem_buses, pl08x->mem_buses);
1435 1330
1436 INIT_LIST_HEAD(&txd->node);
1437 ret = pl08x_prep_channel_resources(plchan, txd); 1331 ret = pl08x_prep_channel_resources(plchan, txd);
1438 if (ret) 1332 if (ret)
1439 return NULL; 1333 return NULL;
1440 /*
1441 * NB: the channel lock is held at this point so tx_submit()
1442 * must be called in direct succession.
1443 */
1444 1334
1445 return &txd->tx; 1335 return &txd->tx;
1446} 1336}
1447 1337
1448struct dma_async_tx_descriptor *pl08x_prep_slave_sg( 1338static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
1449 struct dma_chan *chan, struct scatterlist *sgl, 1339 struct dma_chan *chan, struct scatterlist *sgl,
1450 unsigned int sg_len, enum dma_data_direction direction, 1340 unsigned int sg_len, enum dma_data_direction direction,
1451 unsigned long flags) 1341 unsigned long flags)
@@ -1453,6 +1343,7 @@ struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
1453 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1343 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1454 struct pl08x_driver_data *pl08x = plchan->host; 1344 struct pl08x_driver_data *pl08x = plchan->host;
1455 struct pl08x_txd *txd; 1345 struct pl08x_txd *txd;
1346 u8 src_buses, dst_buses;
1456 int ret; 1347 int ret;
1457 1348
1458 /* 1349 /*
@@ -1467,14 +1358,12 @@ struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
1467 dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n", 1358 dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n",
1468 __func__, sgl->length, plchan->name); 1359 __func__, sgl->length, plchan->name);
1469 1360
1470 txd = kzalloc(sizeof(struct pl08x_txd), GFP_NOWAIT); 1361 txd = pl08x_get_txd(plchan, flags);
1471 if (!txd) { 1362 if (!txd) {
1472 dev_err(&pl08x->adev->dev, "%s no txd\n", __func__); 1363 dev_err(&pl08x->adev->dev, "%s no txd\n", __func__);
1473 return NULL; 1364 return NULL;
1474 } 1365 }
1475 1366
1476 dma_async_tx_descriptor_init(&txd->tx, chan);
1477
1478 if (direction != plchan->runtime_direction) 1367 if (direction != plchan->runtime_direction)
1479 dev_err(&pl08x->adev->dev, "%s DMA setup does not match " 1368 dev_err(&pl08x->adev->dev, "%s DMA setup does not match "
1480 "the direction configured for the PrimeCell\n", 1369 "the direction configured for the PrimeCell\n",
@@ -1486,37 +1375,47 @@ struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
1486 * channel target address dynamically at runtime. 1375 * channel target address dynamically at runtime.
1487 */ 1376 */
1488 txd->direction = direction; 1377 txd->direction = direction;
1378 txd->len = sgl->length;
1379
1380 txd->cctl = plchan->cd->cctl &
1381 ~(PL080_CONTROL_SRC_AHB2 | PL080_CONTROL_DST_AHB2 |
1382 PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR |
1383 PL080_CONTROL_PROT_MASK);
1384
1385 /* Access the cell in privileged mode, non-bufferable, non-cacheable */
1386 txd->cctl |= PL080_CONTROL_PROT_SYS;
1387
1489 if (direction == DMA_TO_DEVICE) { 1388 if (direction == DMA_TO_DEVICE) {
1490 txd->srcbus.addr = sgl->dma_address; 1389 txd->ccfg |= PL080_FLOW_MEM2PER << PL080_CONFIG_FLOW_CONTROL_SHIFT;
1390 txd->cctl |= PL080_CONTROL_SRC_INCR;
1391 txd->src_addr = sgl->dma_address;
1491 if (plchan->runtime_addr) 1392 if (plchan->runtime_addr)
1492 txd->dstbus.addr = plchan->runtime_addr; 1393 txd->dst_addr = plchan->runtime_addr;
1493 else 1394 else
1494 txd->dstbus.addr = plchan->cd->addr; 1395 txd->dst_addr = plchan->cd->addr;
1396 src_buses = pl08x->mem_buses;
1397 dst_buses = plchan->cd->periph_buses;
1495 } else if (direction == DMA_FROM_DEVICE) { 1398 } else if (direction == DMA_FROM_DEVICE) {
1399 txd->ccfg |= PL080_FLOW_PER2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT;
1400 txd->cctl |= PL080_CONTROL_DST_INCR;
1496 if (plchan->runtime_addr) 1401 if (plchan->runtime_addr)
1497 txd->srcbus.addr = plchan->runtime_addr; 1402 txd->src_addr = plchan->runtime_addr;
1498 else 1403 else
1499 txd->srcbus.addr = plchan->cd->addr; 1404 txd->src_addr = plchan->cd->addr;
1500 txd->dstbus.addr = sgl->dma_address; 1405 txd->dst_addr = sgl->dma_address;
1406 src_buses = plchan->cd->periph_buses;
1407 dst_buses = pl08x->mem_buses;
1501 } else { 1408 } else {
1502 dev_err(&pl08x->adev->dev, 1409 dev_err(&pl08x->adev->dev,
1503 "%s direction unsupported\n", __func__); 1410 "%s direction unsupported\n", __func__);
1504 return NULL; 1411 return NULL;
1505 } 1412 }
1506 txd->cd = plchan->cd; 1413
1507 txd->tx.tx_submit = pl08x_tx_submit; 1414 txd->cctl |= pl08x_select_bus(pl08x, src_buses, dst_buses);
1508 txd->tx.callback = NULL;
1509 txd->tx.callback_param = NULL;
1510 txd->len = sgl->length;
1511 INIT_LIST_HEAD(&txd->node);
1512 1415
1513 ret = pl08x_prep_channel_resources(plchan, txd); 1416 ret = pl08x_prep_channel_resources(plchan, txd);
1514 if (ret) 1417 if (ret)
1515 return NULL; 1418 return NULL;
1516 /*
1517 * NB: the channel lock is held at this point so tx_submit()
1518 * must be called in direct succession.
1519 */
1520 1419
1521 return &txd->tx; 1420 return &txd->tx;
1522} 1421}
@@ -1531,10 +1430,8 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
1531 1430
1532 /* Controls applicable to inactive channels */ 1431 /* Controls applicable to inactive channels */
1533 if (cmd == DMA_SLAVE_CONFIG) { 1432 if (cmd == DMA_SLAVE_CONFIG) {
1534 dma_set_runtime_config(chan, 1433 return dma_set_runtime_config(chan,
1535 (struct dma_slave_config *) 1434 (struct dma_slave_config *)arg);
1536 arg);
1537 return 0;
1538 } 1435 }
1539 1436
1540 /* 1437 /*
@@ -1558,16 +1455,8 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
1558 * Mark physical channel as free and free any slave 1455 * Mark physical channel as free and free any slave
1559 * signal 1456 * signal
1560 */ 1457 */
1561 if ((plchan->phychan->signal >= 0) && 1458 release_phy_channel(plchan);
1562 pl08x->pd->put_signal) {
1563 pl08x->pd->put_signal(plchan);
1564 plchan->phychan->signal = -1;
1565 }
1566 pl08x_put_phy_channel(pl08x, plchan->phychan);
1567 plchan->phychan = NULL;
1568 } 1459 }
1569 /* Stop any pending tasklet */
1570 tasklet_disable(&plchan->tasklet);
1571 /* Dequeue jobs and free LLIs */ 1460 /* Dequeue jobs and free LLIs */
1572 if (plchan->at) { 1461 if (plchan->at) {
1573 pl08x_free_txd(pl08x, plchan->at); 1462 pl08x_free_txd(pl08x, plchan->at);
@@ -1609,10 +1498,9 @@ bool pl08x_filter_id(struct dma_chan *chan, void *chan_id)
1609 1498
1610/* 1499/*
1611 * Just check that the device is there and active 1500 * Just check that the device is there and active
1612 * TODO: turn this bit on/off depending on the number of 1501 * TODO: turn this bit on/off depending on the number of physical channels
1613 * physical channels actually used, if it is zero... well 1502 * actually used, if it is zero... well shut it off. That will save some
1614 * shut it off. That will save some power. Cut the clock 1503 * power. Cut the clock at the same time.
1615 * at the same time.
1616 */ 1504 */
1617static void pl08x_ensure_on(struct pl08x_driver_data *pl08x) 1505static void pl08x_ensure_on(struct pl08x_driver_data *pl08x)
1618{ 1506{
@@ -1620,78 +1508,66 @@ static void pl08x_ensure_on(struct pl08x_driver_data *pl08x)
1620 1508
1621 val = readl(pl08x->base + PL080_CONFIG); 1509 val = readl(pl08x->base + PL080_CONFIG);
1622 val &= ~(PL080_CONFIG_M2_BE | PL080_CONFIG_M1_BE | PL080_CONFIG_ENABLE); 1510 val &= ~(PL080_CONFIG_M2_BE | PL080_CONFIG_M1_BE | PL080_CONFIG_ENABLE);
1623 /* We implictly clear bit 1 and that means little-endian mode */ 1511 /* We implicitly clear bit 1 and that means little-endian mode */
1624 val |= PL080_CONFIG_ENABLE; 1512 val |= PL080_CONFIG_ENABLE;
1625 writel(val, pl08x->base + PL080_CONFIG); 1513 writel(val, pl08x->base + PL080_CONFIG);
1626} 1514}
1627 1515
1516static void pl08x_unmap_buffers(struct pl08x_txd *txd)
1517{
1518 struct device *dev = txd->tx.chan->device->dev;
1519
1520 if (!(txd->tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
1521 if (txd->tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE)
1522 dma_unmap_single(dev, txd->src_addr, txd->len,
1523 DMA_TO_DEVICE);
1524 else
1525 dma_unmap_page(dev, txd->src_addr, txd->len,
1526 DMA_TO_DEVICE);
1527 }
1528 if (!(txd->tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
1529 if (txd->tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE)
1530 dma_unmap_single(dev, txd->dst_addr, txd->len,
1531 DMA_FROM_DEVICE);
1532 else
1533 dma_unmap_page(dev, txd->dst_addr, txd->len,
1534 DMA_FROM_DEVICE);
1535 }
1536}
1537
1628static void pl08x_tasklet(unsigned long data) 1538static void pl08x_tasklet(unsigned long data)
1629{ 1539{
1630 struct pl08x_dma_chan *plchan = (struct pl08x_dma_chan *) data; 1540 struct pl08x_dma_chan *plchan = (struct pl08x_dma_chan *) data;
1631 struct pl08x_phy_chan *phychan = plchan->phychan;
1632 struct pl08x_driver_data *pl08x = plchan->host; 1541 struct pl08x_driver_data *pl08x = plchan->host;
1542 struct pl08x_txd *txd;
1543 unsigned long flags;
1633 1544
1634 if (!plchan) 1545 spin_lock_irqsave(&plchan->lock, flags);
1635 BUG();
1636
1637 spin_lock(&plchan->lock);
1638
1639 if (plchan->at) {
1640 dma_async_tx_callback callback =
1641 plchan->at->tx.callback;
1642 void *callback_param =
1643 plchan->at->tx.callback_param;
1644
1645 /*
1646 * Update last completed
1647 */
1648 plchan->lc =
1649 (plchan->at->tx.cookie);
1650
1651 /*
1652 * Callback to signal completion
1653 */
1654 if (callback)
1655 callback(callback_param);
1656 1546
1657 /* 1547 txd = plchan->at;
1658 * Device callbacks should NOT clear 1548 plchan->at = NULL;
1659 * the current transaction on the channel
1660 * Linus: sometimes they should?
1661 */
1662 if (!plchan->at)
1663 BUG();
1664 1549
1665 /* 1550 if (txd) {
1666 * Free the descriptor if it's not for a device 1551 /* Update last completed */
1667 * using a circular buffer 1552 plchan->lc = txd->tx.cookie;
1668 */
1669 if (!plchan->at->cd->circular_buffer) {
1670 pl08x_free_txd(pl08x, plchan->at);
1671 plchan->at = NULL;
1672 }
1673 /*
1674 * else descriptor for circular
1675 * buffers only freed when
1676 * client has disabled dma
1677 */
1678 } 1553 }
1679 /* 1554
1680 * If a new descriptor is queued, set it up 1555 /* If a new descriptor is queued, set it up plchan->at is NULL here */
1681 * plchan->at is NULL here 1556 if (!list_empty(&plchan->pend_list)) {
1682 */
1683 if (!list_empty(&plchan->desc_list)) {
1684 struct pl08x_txd *next; 1557 struct pl08x_txd *next;
1685 1558
1686 next = list_first_entry(&plchan->desc_list, 1559 next = list_first_entry(&plchan->pend_list,
1687 struct pl08x_txd, 1560 struct pl08x_txd,
1688 node); 1561 node);
1689 list_del(&next->node); 1562 list_del(&next->node);
1690 plchan->at = next; 1563
1691 /* Configure the physical channel for the next txd */ 1564 pl08x_start_txd(plchan, next);
1692 pl08x_config_phychan_for_txd(plchan); 1565 } else if (plchan->phychan_hold) {
1693 pl08x_set_cregs(pl08x, plchan->phychan); 1566 /*
1694 pl08x_enable_phy_chan(pl08x, plchan->phychan); 1567 * This channel is still in use - we have a new txd being
1568 * prepared and will soon be queued. Don't give up the
1569 * physical channel.
1570 */
1695 } else { 1571 } else {
1696 struct pl08x_dma_chan *waiting = NULL; 1572 struct pl08x_dma_chan *waiting = NULL;
1697 1573
@@ -1699,20 +1575,14 @@ static void pl08x_tasklet(unsigned long data)
1699 * No more jobs, so free up the physical channel 1575 * No more jobs, so free up the physical channel
1700 * Free any allocated signal on slave transfers too 1576 * Free any allocated signal on slave transfers too
1701 */ 1577 */
1702 if ((phychan->signal >= 0) && pl08x->pd->put_signal) { 1578 release_phy_channel(plchan);
1703 pl08x->pd->put_signal(plchan);
1704 phychan->signal = -1;
1705 }
1706 pl08x_put_phy_channel(pl08x, phychan);
1707 plchan->phychan = NULL;
1708 plchan->state = PL08X_CHAN_IDLE; 1579 plchan->state = PL08X_CHAN_IDLE;
1709 1580
1710 /* 1581 /*
1711 * And NOW before anyone else can grab that free:d 1582 * And NOW before anyone else can grab that free:d up
1712 * up physical channel, see if there is some memcpy 1583 * physical channel, see if there is some memcpy pending
1713 * pending that seriously needs to start because of 1584 * that seriously needs to start because of being stacked
1714 * being stacked up while we were choking the 1585 * up while we were choking the physical channels with data.
1715 * physical channels with data.
1716 */ 1586 */
1717 list_for_each_entry(waiting, &pl08x->memcpy.channels, 1587 list_for_each_entry(waiting, &pl08x->memcpy.channels,
1718 chan.device_node) { 1588 chan.device_node) {
@@ -1724,6 +1594,7 @@ static void pl08x_tasklet(unsigned long data)
1724 ret = prep_phy_channel(waiting, 1594 ret = prep_phy_channel(waiting,
1725 waiting->waiting); 1595 waiting->waiting);
1726 BUG_ON(ret); 1596 BUG_ON(ret);
1597 waiting->phychan_hold--;
1727 waiting->state = PL08X_CHAN_RUNNING; 1598 waiting->state = PL08X_CHAN_RUNNING;
1728 waiting->waiting = NULL; 1599 waiting->waiting = NULL;
1729 pl08x_issue_pending(&waiting->chan); 1600 pl08x_issue_pending(&waiting->chan);
@@ -1732,7 +1603,25 @@ static void pl08x_tasklet(unsigned long data)
1732 } 1603 }
1733 } 1604 }
1734 1605
1735 spin_unlock(&plchan->lock); 1606 spin_unlock_irqrestore(&plchan->lock, flags);
1607
1608 if (txd) {
1609 dma_async_tx_callback callback = txd->tx.callback;
1610 void *callback_param = txd->tx.callback_param;
1611
1612 /* Don't try to unmap buffers on slave channels */
1613 if (!plchan->slave)
1614 pl08x_unmap_buffers(txd);
1615
1616 /* Free the descriptor */
1617 spin_lock_irqsave(&plchan->lock, flags);
1618 pl08x_free_txd(pl08x, txd);
1619 spin_unlock_irqrestore(&plchan->lock, flags);
1620
1621 /* Callback to signal completion */
1622 if (callback)
1623 callback(callback_param);
1624 }
1736} 1625}
1737 1626
1738static irqreturn_t pl08x_irq(int irq, void *dev) 1627static irqreturn_t pl08x_irq(int irq, void *dev)
@@ -1744,9 +1633,7 @@ static irqreturn_t pl08x_irq(int irq, void *dev)
1744 1633
1745 val = readl(pl08x->base + PL080_ERR_STATUS); 1634 val = readl(pl08x->base + PL080_ERR_STATUS);
1746 if (val) { 1635 if (val) {
1747 /* 1636 /* An error interrupt (on one or more channels) */
1748 * An error interrupt (on one or more channels)
1749 */
1750 dev_err(&pl08x->adev->dev, 1637 dev_err(&pl08x->adev->dev,
1751 "%s error interrupt, register value 0x%08x\n", 1638 "%s error interrupt, register value 0x%08x\n",
1752 __func__, val); 1639 __func__, val);
@@ -1770,9 +1657,7 @@ static irqreturn_t pl08x_irq(int irq, void *dev)
1770 mask |= (1 << i); 1657 mask |= (1 << i);
1771 } 1658 }
1772 } 1659 }
1773 /* 1660 /* Clear only the terminal interrupts on channels we processed */
1774 * Clear only the terminal interrupts on channels we processed
1775 */
1776 writel(mask, pl08x->base + PL080_TC_CLEAR); 1661 writel(mask, pl08x->base + PL080_TC_CLEAR);
1777 1662
1778 return mask ? IRQ_HANDLED : IRQ_NONE; 1663 return mask ? IRQ_HANDLED : IRQ_NONE;
@@ -1791,6 +1676,7 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
1791 int i; 1676 int i;
1792 1677
1793 INIT_LIST_HEAD(&dmadev->channels); 1678 INIT_LIST_HEAD(&dmadev->channels);
1679
1794 /* 1680 /*
1795 * Register as many many memcpy as we have physical channels, 1681 * Register as many many memcpy as we have physical channels,
1796 * we won't always be able to use all but the code will have 1682 * we won't always be able to use all but the code will have
@@ -1819,16 +1705,23 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
1819 return -ENOMEM; 1705 return -ENOMEM;
1820 } 1706 }
1821 } 1707 }
1708 if (chan->cd->circular_buffer) {
1709 dev_err(&pl08x->adev->dev,
1710 "channel %s: circular buffers not supported\n",
1711 chan->name);
1712 kfree(chan);
1713 continue;
1714 }
1822 dev_info(&pl08x->adev->dev, 1715 dev_info(&pl08x->adev->dev,
1823 "initialize virtual channel \"%s\"\n", 1716 "initialize virtual channel \"%s\"\n",
1824 chan->name); 1717 chan->name);
1825 1718
1826 chan->chan.device = dmadev; 1719 chan->chan.device = dmadev;
1827 atomic_set(&chan->last_issued, 0); 1720 chan->chan.cookie = 0;
1828 chan->lc = atomic_read(&chan->last_issued); 1721 chan->lc = 0;
1829 1722
1830 spin_lock_init(&chan->lock); 1723 spin_lock_init(&chan->lock);
1831 INIT_LIST_HEAD(&chan->desc_list); 1724 INIT_LIST_HEAD(&chan->pend_list);
1832 tasklet_init(&chan->tasklet, pl08x_tasklet, 1725 tasklet_init(&chan->tasklet, pl08x_tasklet,
1833 (unsigned long) chan); 1726 (unsigned long) chan);
1834 1727
@@ -1898,7 +1791,7 @@ static int pl08x_debugfs_show(struct seq_file *s, void *data)
1898 seq_printf(s, "CHANNEL:\tSTATE:\n"); 1791 seq_printf(s, "CHANNEL:\tSTATE:\n");
1899 seq_printf(s, "--------\t------\n"); 1792 seq_printf(s, "--------\t------\n");
1900 list_for_each_entry(chan, &pl08x->memcpy.channels, chan.device_node) { 1793 list_for_each_entry(chan, &pl08x->memcpy.channels, chan.device_node) {
1901 seq_printf(s, "%s\t\t\%s\n", chan->name, 1794 seq_printf(s, "%s\t\t%s\n", chan->name,
1902 pl08x_state_str(chan->state)); 1795 pl08x_state_str(chan->state));
1903 } 1796 }
1904 1797
@@ -1906,7 +1799,7 @@ static int pl08x_debugfs_show(struct seq_file *s, void *data)
1906 seq_printf(s, "CHANNEL:\tSTATE:\n"); 1799 seq_printf(s, "CHANNEL:\tSTATE:\n");
1907 seq_printf(s, "--------\t------\n"); 1800 seq_printf(s, "--------\t------\n");
1908 list_for_each_entry(chan, &pl08x->slave.channels, chan.device_node) { 1801 list_for_each_entry(chan, &pl08x->slave.channels, chan.device_node) {
1909 seq_printf(s, "%s\t\t\%s\n", chan->name, 1802 seq_printf(s, "%s\t\t%s\n", chan->name,
1910 pl08x_state_str(chan->state)); 1803 pl08x_state_str(chan->state));
1911 } 1804 }
1912 1805
@@ -1942,7 +1835,7 @@ static inline void init_pl08x_debugfs(struct pl08x_driver_data *pl08x)
1942static int pl08x_probe(struct amba_device *adev, struct amba_id *id) 1835static int pl08x_probe(struct amba_device *adev, struct amba_id *id)
1943{ 1836{
1944 struct pl08x_driver_data *pl08x; 1837 struct pl08x_driver_data *pl08x;
1945 struct vendor_data *vd = id->data; 1838 const struct vendor_data *vd = id->data;
1946 int ret = 0; 1839 int ret = 0;
1947 int i; 1840 int i;
1948 1841
@@ -1990,6 +1883,14 @@ static int pl08x_probe(struct amba_device *adev, struct amba_id *id)
1990 pl08x->adev = adev; 1883 pl08x->adev = adev;
1991 pl08x->vd = vd; 1884 pl08x->vd = vd;
1992 1885
1886 /* By default, AHB1 only. If dualmaster, from platform */
1887 pl08x->lli_buses = PL08X_AHB1;
1888 pl08x->mem_buses = PL08X_AHB1;
1889 if (pl08x->vd->dualmaster) {
1890 pl08x->lli_buses = pl08x->pd->lli_buses;
1891 pl08x->mem_buses = pl08x->pd->mem_buses;
1892 }
1893
1993 /* A DMA memory pool for LLIs, align on 1-byte boundary */ 1894 /* A DMA memory pool for LLIs, align on 1-byte boundary */
1994 pl08x->pool = dma_pool_create(DRIVER_NAME, &pl08x->adev->dev, 1895 pl08x->pool = dma_pool_create(DRIVER_NAME, &pl08x->adev->dev,
1995 PL08X_LLI_TSFR_SIZE, PL08X_ALIGN, 0); 1896 PL08X_LLI_TSFR_SIZE, PL08X_ALIGN, 0);
@@ -2009,14 +1910,12 @@ static int pl08x_probe(struct amba_device *adev, struct amba_id *id)
2009 /* Turn on the PL08x */ 1910 /* Turn on the PL08x */
2010 pl08x_ensure_on(pl08x); 1911 pl08x_ensure_on(pl08x);
2011 1912
2012 /* 1913 /* Attach the interrupt handler */
2013 * Attach the interrupt handler
2014 */
2015 writel(0x000000FF, pl08x->base + PL080_ERR_CLEAR); 1914 writel(0x000000FF, pl08x->base + PL080_ERR_CLEAR);
2016 writel(0x000000FF, pl08x->base + PL080_TC_CLEAR); 1915 writel(0x000000FF, pl08x->base + PL080_TC_CLEAR);
2017 1916
2018 ret = request_irq(adev->irq[0], pl08x_irq, IRQF_DISABLED, 1917 ret = request_irq(adev->irq[0], pl08x_irq, IRQF_DISABLED,
2019 vd->name, pl08x); 1918 DRIVER_NAME, pl08x);
2020 if (ret) { 1919 if (ret) {
2021 dev_err(&adev->dev, "%s failed to request interrupt %d\n", 1920 dev_err(&adev->dev, "%s failed to request interrupt %d\n",
2022 __func__, adev->irq[0]); 1921 __func__, adev->irq[0]);
@@ -2087,8 +1986,9 @@ static int pl08x_probe(struct amba_device *adev, struct amba_id *id)
2087 1986
2088 amba_set_drvdata(adev, pl08x); 1987 amba_set_drvdata(adev, pl08x);
2089 init_pl08x_debugfs(pl08x); 1988 init_pl08x_debugfs(pl08x);
2090 dev_info(&pl08x->adev->dev, "ARM(R) %s DMA block initialized @%08x\n", 1989 dev_info(&pl08x->adev->dev, "DMA: PL%03x rev%u at 0x%08llx irq %d\n",
2091 vd->name, adev->res.start); 1990 amba_part(adev), amba_rev(adev),
1991 (unsigned long long)adev->res.start, adev->irq[0]);
2092 return 0; 1992 return 0;
2093 1993
2094out_no_slave_reg: 1994out_no_slave_reg:
@@ -2115,13 +2015,11 @@ out_no_pl08x:
2115 2015
2116/* PL080 has 8 channels and the PL080 have just 2 */ 2016/* PL080 has 8 channels and the PL080 have just 2 */
2117static struct vendor_data vendor_pl080 = { 2017static struct vendor_data vendor_pl080 = {
2118 .name = "PL080",
2119 .channels = 8, 2018 .channels = 8,
2120 .dualmaster = true, 2019 .dualmaster = true,
2121}; 2020};
2122 2021
2123static struct vendor_data vendor_pl081 = { 2022static struct vendor_data vendor_pl081 = {
2124 .name = "PL081",
2125 .channels = 2, 2023 .channels = 2,
2126 .dualmaster = false, 2024 .dualmaster = false,
2127}; 2025};
@@ -2160,7 +2058,7 @@ static int __init pl08x_init(void)
2160 retval = amba_driver_register(&pl08x_amba_driver); 2058 retval = amba_driver_register(&pl08x_amba_driver);
2161 if (retval) 2059 if (retval)
2162 printk(KERN_WARNING DRIVER_NAME 2060 printk(KERN_WARNING DRIVER_NAME
2163 "failed to register as an amba device (%d)\n", 2061 "failed to register as an AMBA device (%d)\n",
2164 retval); 2062 retval);
2165 return retval; 2063 return retval;
2166} 2064}
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index ea0ee81cff53..3d7d705f026f 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -253,7 +253,7 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
253 /* move myself to free_list */ 253 /* move myself to free_list */
254 list_move(&desc->desc_node, &atchan->free_list); 254 list_move(&desc->desc_node, &atchan->free_list);
255 255
256 /* unmap dma addresses */ 256 /* unmap dma addresses (not on slave channels) */
257 if (!atchan->chan_common.private) { 257 if (!atchan->chan_common.private) {
258 struct device *parent = chan2parent(&atchan->chan_common); 258 struct device *parent = chan2parent(&atchan->chan_common);
259 if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { 259 if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
@@ -583,7 +583,6 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
583 desc->lli.ctrlb = ctrlb; 583 desc->lli.ctrlb = ctrlb;
584 584
585 desc->txd.cookie = 0; 585 desc->txd.cookie = 0;
586 async_tx_ack(&desc->txd);
587 586
588 if (!first) { 587 if (!first) {
589 first = desc; 588 first = desc;
@@ -604,7 +603,7 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
604 /* set end-of-link to the last link descriptor of list*/ 603 /* set end-of-link to the last link descriptor of list*/
605 set_desc_eol(desc); 604 set_desc_eol(desc);
606 605
607 desc->txd.flags = flags; /* client is in control of this ack */ 606 first->txd.flags = flags; /* client is in control of this ack */
608 607
609 return &first->txd; 608 return &first->txd;
610 609
@@ -670,7 +669,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
670 if (!desc) 669 if (!desc)
671 goto err_desc_get; 670 goto err_desc_get;
672 671
673 mem = sg_phys(sg); 672 mem = sg_dma_address(sg);
674 len = sg_dma_len(sg); 673 len = sg_dma_len(sg);
675 mem_width = 2; 674 mem_width = 2;
676 if (unlikely(mem & 3 || len & 3)) 675 if (unlikely(mem & 3 || len & 3))
@@ -712,7 +711,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
712 if (!desc) 711 if (!desc)
713 goto err_desc_get; 712 goto err_desc_get;
714 713
715 mem = sg_phys(sg); 714 mem = sg_dma_address(sg);
716 len = sg_dma_len(sg); 715 len = sg_dma_len(sg);
717 mem_width = 2; 716 mem_width = 2;
718 if (unlikely(mem & 3 || len & 3)) 717 if (unlikely(mem & 3 || len & 3))
@@ -749,8 +748,8 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
749 first->txd.cookie = -EBUSY; 748 first->txd.cookie = -EBUSY;
750 first->len = total_len; 749 first->len = total_len;
751 750
752 /* last link descriptor of list is responsible of flags */ 751 /* first link descriptor of list is responsible of flags */
753 prev->txd.flags = flags; /* client is in control of this ack */ 752 first->txd.flags = flags; /* client is in control of this ack */
754 753
755 return &first->txd; 754 return &first->txd;
756 755
@@ -854,11 +853,11 @@ static void atc_issue_pending(struct dma_chan *chan)
854 853
855 dev_vdbg(chan2dev(chan), "issue_pending\n"); 854 dev_vdbg(chan2dev(chan), "issue_pending\n");
856 855
856 spin_lock_bh(&atchan->lock);
857 if (!atc_chan_is_enabled(atchan)) { 857 if (!atc_chan_is_enabled(atchan)) {
858 spin_lock_bh(&atchan->lock);
859 atc_advance_work(atchan); 858 atc_advance_work(atchan);
860 spin_unlock_bh(&atchan->lock);
861 } 859 }
860 spin_unlock_bh(&atchan->lock);
862} 861}
863 862
864/** 863/**
@@ -1210,7 +1209,7 @@ static int __init at_dma_init(void)
1210{ 1209{
1211 return platform_driver_probe(&at_dma_driver, at_dma_probe); 1210 return platform_driver_probe(&at_dma_driver, at_dma_probe);
1212} 1211}
1213module_init(at_dma_init); 1212subsys_initcall(at_dma_init);
1214 1213
1215static void __exit at_dma_exit(void) 1214static void __exit at_dma_exit(void)
1216{ 1215{
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index e5e172d21692..4de947a450fc 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Freescale MPC85xx, MPC83xx DMA Engine support 2 * Freescale MPC85xx, MPC83xx DMA Engine support
3 * 3 *
4 * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved. 4 * Copyright (C) 2007-2010 Freescale Semiconductor, Inc. All rights reserved.
5 * 5 *
6 * Author: 6 * Author:
7 * Zhang Wei <wei.zhang@freescale.com>, Jul 2007 7 * Zhang Wei <wei.zhang@freescale.com>, Jul 2007
@@ -1324,6 +1324,8 @@ static int __devinit fsldma_of_probe(struct platform_device *op,
1324 fdev->common.device_control = fsl_dma_device_control; 1324 fdev->common.device_control = fsl_dma_device_control;
1325 fdev->common.dev = &op->dev; 1325 fdev->common.dev = &op->dev;
1326 1326
1327 dma_set_mask(&(op->dev), DMA_BIT_MASK(36));
1328
1327 dev_set_drvdata(&op->dev, fdev); 1329 dev_set_drvdata(&op->dev, fdev);
1328 1330
1329 /* 1331 /*
diff --git a/drivers/dma/intel_mid_dma.c b/drivers/dma/intel_mid_dma.c
index 78266382797e..798f46a4590d 100644
--- a/drivers/dma/intel_mid_dma.c
+++ b/drivers/dma/intel_mid_dma.c
@@ -664,11 +664,20 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy(
664 /*calculate CTL_LO*/ 664 /*calculate CTL_LO*/
665 ctl_lo.ctl_lo = 0; 665 ctl_lo.ctl_lo = 0;
666 ctl_lo.ctlx.int_en = 1; 666 ctl_lo.ctlx.int_en = 1;
667 ctl_lo.ctlx.dst_tr_width = mids->dma_slave.dst_addr_width;
668 ctl_lo.ctlx.src_tr_width = mids->dma_slave.src_addr_width;
669 ctl_lo.ctlx.dst_msize = mids->dma_slave.src_maxburst; 667 ctl_lo.ctlx.dst_msize = mids->dma_slave.src_maxburst;
670 ctl_lo.ctlx.src_msize = mids->dma_slave.dst_maxburst; 668 ctl_lo.ctlx.src_msize = mids->dma_slave.dst_maxburst;
671 669
670 /*
671 * Here we need some translation from "enum dma_slave_buswidth"
672 * to the format for our dma controller
673 * standard intel_mid_dmac's format
674 * 1 Byte 0b000
675 * 2 Bytes 0b001
676 * 4 Bytes 0b010
677 */
678 ctl_lo.ctlx.dst_tr_width = mids->dma_slave.dst_addr_width / 2;
679 ctl_lo.ctlx.src_tr_width = mids->dma_slave.src_addr_width / 2;
680
672 if (mids->cfg_mode == LNW_DMA_MEM_TO_MEM) { 681 if (mids->cfg_mode == LNW_DMA_MEM_TO_MEM) {
673 ctl_lo.ctlx.tt_fc = 0; 682 ctl_lo.ctlx.tt_fc = 0;
674 ctl_lo.ctlx.sinc = 0; 683 ctl_lo.ctlx.sinc = 0;
@@ -746,8 +755,18 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg(
746 BUG_ON(!mids); 755 BUG_ON(!mids);
747 756
748 if (!midc->dma->pimr_mask) { 757 if (!midc->dma->pimr_mask) {
749 pr_debug("MDMA: SG list is not supported by this controller\n"); 758 /* We can still handle sg list with only one item */
750 return NULL; 759 if (sg_len == 1) {
760 txd = intel_mid_dma_prep_memcpy(chan,
761 mids->dma_slave.dst_addr,
762 mids->dma_slave.src_addr,
763 sgl->length,
764 flags);
765 return txd;
766 } else {
767 pr_warn("MDMA: SG list is not supported by this controller\n");
768 return NULL;
769 }
751 } 770 }
752 771
753 pr_debug("MDMA: SG Length = %d, direction = %d, Flags = %#lx\n", 772 pr_debug("MDMA: SG Length = %d, direction = %d, Flags = %#lx\n",
@@ -758,6 +777,7 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg(
758 pr_err("MDMA: Prep memcpy failed\n"); 777 pr_err("MDMA: Prep memcpy failed\n");
759 return NULL; 778 return NULL;
760 } 779 }
780
761 desc = to_intel_mid_dma_desc(txd); 781 desc = to_intel_mid_dma_desc(txd);
762 desc->dirn = direction; 782 desc->dirn = direction;
763 ctl_lo.ctl_lo = desc->ctl_lo; 783 ctl_lo.ctl_lo = desc->ctl_lo;
@@ -1021,11 +1041,6 @@ static irqreturn_t intel_mid_dma_interrupt(int irq, void *data)
1021 1041
1022 /*DMA Interrupt*/ 1042 /*DMA Interrupt*/
1023 pr_debug("MDMA:Got an interrupt on irq %d\n", irq); 1043 pr_debug("MDMA:Got an interrupt on irq %d\n", irq);
1024 if (!mid) {
1025 pr_err("ERR_MDMA:null pointer mid\n");
1026 return -EINVAL;
1027 }
1028
1029 pr_debug("MDMA: Status %x, Mask %x\n", tfr_status, mid->intr_mask); 1044 pr_debug("MDMA: Status %x, Mask %x\n", tfr_status, mid->intr_mask);
1030 tfr_status &= mid->intr_mask; 1045 tfr_status &= mid->intr_mask;
1031 if (tfr_status) { 1046 if (tfr_status) {
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index 161c452923b8..c6b01f535b29 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -1261,7 +1261,7 @@ out:
1261 return err; 1261 return err;
1262} 1262}
1263 1263
1264#ifdef CONFIG_MD_RAID6_PQ 1264#ifdef CONFIG_RAID6_PQ
1265static int __devinit 1265static int __devinit
1266iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device) 1266iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device)
1267{ 1267{
@@ -1584,7 +1584,7 @@ static int __devinit iop_adma_probe(struct platform_device *pdev)
1584 1584
1585 if (dma_has_cap(DMA_PQ, dma_dev->cap_mask) && 1585 if (dma_has_cap(DMA_PQ, dma_dev->cap_mask) &&
1586 dma_has_cap(DMA_PQ_VAL, dma_dev->cap_mask)) { 1586 dma_has_cap(DMA_PQ_VAL, dma_dev->cap_mask)) {
1587 #ifdef CONFIG_MD_RAID6_PQ 1587 #ifdef CONFIG_RAID6_PQ
1588 ret = iop_adma_pq_zero_sum_self_test(adev); 1588 ret = iop_adma_pq_zero_sum_self_test(adev);
1589 dev_dbg(&pdev->dev, "pq self test returned %d\n", ret); 1589 dev_dbg(&pdev->dev, "pq self test returned %d\n", ret);
1590 #else 1590 #else
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
index c064c89420d0..1c38418ae61f 100644
--- a/drivers/dma/pch_dma.c
+++ b/drivers/dma/pch_dma.c
@@ -1,6 +1,7 @@
1/* 1/*
2 * Topcliff PCH DMA controller driver 2 * Topcliff PCH DMA controller driver
3 * Copyright (c) 2010 Intel Corporation 3 * Copyright (c) 2010 Intel Corporation
4 * Copyright (C) 2011 OKI SEMICONDUCTOR CO., LTD.
4 * 5 *
5 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as 7 * it under the terms of the GNU General Public License version 2 as
@@ -921,12 +922,19 @@ static void __devexit pch_dma_remove(struct pci_dev *pdev)
921} 922}
922 923
923/* PCI Device ID of DMA device */ 924/* PCI Device ID of DMA device */
924#define PCI_DEVICE_ID_PCH_DMA_8CH 0x8810 925#define PCI_VENDOR_ID_ROHM 0x10DB
925#define PCI_DEVICE_ID_PCH_DMA_4CH 0x8815 926#define PCI_DEVICE_ID_EG20T_PCH_DMA_8CH 0x8810
927#define PCI_DEVICE_ID_EG20T_PCH_DMA_4CH 0x8815
928#define PCI_DEVICE_ID_ML7213_DMA1_8CH 0x8026
929#define PCI_DEVICE_ID_ML7213_DMA2_8CH 0x802B
930#define PCI_DEVICE_ID_ML7213_DMA3_4CH 0x8034
926 931
927static const struct pci_device_id pch_dma_id_table[] = { 932static const struct pci_device_id pch_dma_id_table[] = {
928 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_PCH_DMA_8CH), 8 }, 933 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_8CH), 8 },
929 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_PCH_DMA_4CH), 4 }, 934 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_4CH), 4 },
935 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA1_8CH), 8}, /* UART Video */
936 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA2_8CH), 8}, /* PCMIF SPI */
937 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA3_4CH), 4}, /* FPGA */
930 { 0, }, 938 { 0, },
931}; 939};
932 940
@@ -954,6 +962,7 @@ static void __exit pch_dma_exit(void)
954module_init(pch_dma_init); 962module_init(pch_dma_init);
955module_exit(pch_dma_exit); 963module_exit(pch_dma_exit);
956 964
957MODULE_DESCRIPTION("Topcliff PCH DMA controller driver"); 965MODULE_DESCRIPTION("Intel EG20T PCH / OKI SEMICONDUCTOR ML7213 IOH "
966 "DMA controller driver");
958MODULE_AUTHOR("Yong Wang <yong.y.wang@intel.com>"); 967MODULE_AUTHOR("Yong Wang <yong.y.wang@intel.com>");
959MODULE_LICENSE("GPL v2"); 968MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index fab68a553205..6e1d46a65d0e 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright (C) ST-Ericsson SA 2007-2010 2 * Copyright (C) Ericsson AB 2007-2008
3 * Copyright (C) ST-Ericsson SA 2008-2010
3 * Author: Per Forlin <per.forlin@stericsson.com> for ST-Ericsson 4 * Author: Per Forlin <per.forlin@stericsson.com> for ST-Ericsson
4 * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson 5 * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
5 * License terms: GNU General Public License (GPL) version 2 6 * License terms: GNU General Public License (GPL) version 2
@@ -554,8 +555,66 @@ static struct d40_desc *d40_last_queued(struct d40_chan *d40c)
554 return d; 555 return d;
555} 556}
556 557
557/* Support functions for logical channels */ 558static int d40_psize_2_burst_size(bool is_log, int psize)
559{
560 if (is_log) {
561 if (psize == STEDMA40_PSIZE_LOG_1)
562 return 1;
563 } else {
564 if (psize == STEDMA40_PSIZE_PHY_1)
565 return 1;
566 }
567
568 return 2 << psize;
569}
570
571/*
572 * The dma only supports transmitting packages up to
573 * STEDMA40_MAX_SEG_SIZE << data_width. Calculate the total number of
574 * dma elements required to send the entire sg list
575 */
576static int d40_size_2_dmalen(int size, u32 data_width1, u32 data_width2)
577{
578 int dmalen;
579 u32 max_w = max(data_width1, data_width2);
580 u32 min_w = min(data_width1, data_width2);
581 u32 seg_max = ALIGN(STEDMA40_MAX_SEG_SIZE << min_w, 1 << max_w);
582
583 if (seg_max > STEDMA40_MAX_SEG_SIZE)
584 seg_max -= (1 << max_w);
585
586 if (!IS_ALIGNED(size, 1 << max_w))
587 return -EINVAL;
588
589 if (size <= seg_max)
590 dmalen = 1;
591 else {
592 dmalen = size / seg_max;
593 if (dmalen * seg_max < size)
594 dmalen++;
595 }
596 return dmalen;
597}
598
599static int d40_sg_2_dmalen(struct scatterlist *sgl, int sg_len,
600 u32 data_width1, u32 data_width2)
601{
602 struct scatterlist *sg;
603 int i;
604 int len = 0;
605 int ret;
606
607 for_each_sg(sgl, sg, sg_len, i) {
608 ret = d40_size_2_dmalen(sg_dma_len(sg),
609 data_width1, data_width2);
610 if (ret < 0)
611 return ret;
612 len += ret;
613 }
614 return len;
615}
558 616
617/* Support functions for logical channels */
559 618
560static int d40_channel_execute_command(struct d40_chan *d40c, 619static int d40_channel_execute_command(struct d40_chan *d40c,
561 enum d40_command command) 620 enum d40_command command)
@@ -1241,6 +1300,21 @@ static int d40_validate_conf(struct d40_chan *d40c,
1241 res = -EINVAL; 1300 res = -EINVAL;
1242 } 1301 }
1243 1302
1303 if (d40_psize_2_burst_size(is_log, conf->src_info.psize) *
1304 (1 << conf->src_info.data_width) !=
1305 d40_psize_2_burst_size(is_log, conf->dst_info.psize) *
1306 (1 << conf->dst_info.data_width)) {
1307 /*
1308 * The DMAC hardware only supports
1309 * src (burst x width) == dst (burst x width)
1310 */
1311
1312 dev_err(&d40c->chan.dev->device,
1313 "[%s] src (burst x width) != dst (burst x width)\n",
1314 __func__);
1315 res = -EINVAL;
1316 }
1317
1244 return res; 1318 return res;
1245} 1319}
1246 1320
@@ -1638,13 +1712,21 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
1638 if (d40d == NULL) 1712 if (d40d == NULL)
1639 goto err; 1713 goto err;
1640 1714
1641 d40d->lli_len = sgl_len; 1715 d40d->lli_len = d40_sg_2_dmalen(sgl_dst, sgl_len,
1716 d40c->dma_cfg.src_info.data_width,
1717 d40c->dma_cfg.dst_info.data_width);
1718 if (d40d->lli_len < 0) {
1719 dev_err(&d40c->chan.dev->device,
1720 "[%s] Unaligned size\n", __func__);
1721 goto err;
1722 }
1723
1642 d40d->lli_current = 0; 1724 d40d->lli_current = 0;
1643 d40d->txd.flags = dma_flags; 1725 d40d->txd.flags = dma_flags;
1644 1726
1645 if (d40c->log_num != D40_PHY_CHAN) { 1727 if (d40c->log_num != D40_PHY_CHAN) {
1646 1728
1647 if (d40_pool_lli_alloc(d40d, sgl_len, true) < 0) { 1729 if (d40_pool_lli_alloc(d40d, d40d->lli_len, true) < 0) {
1648 dev_err(&d40c->chan.dev->device, 1730 dev_err(&d40c->chan.dev->device,
1649 "[%s] Out of memory\n", __func__); 1731 "[%s] Out of memory\n", __func__);
1650 goto err; 1732 goto err;
@@ -1654,15 +1736,17 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
1654 sgl_len, 1736 sgl_len,
1655 d40d->lli_log.src, 1737 d40d->lli_log.src,
1656 d40c->log_def.lcsp1, 1738 d40c->log_def.lcsp1,
1657 d40c->dma_cfg.src_info.data_width); 1739 d40c->dma_cfg.src_info.data_width,
1740 d40c->dma_cfg.dst_info.data_width);
1658 1741
1659 (void) d40_log_sg_to_lli(sgl_dst, 1742 (void) d40_log_sg_to_lli(sgl_dst,
1660 sgl_len, 1743 sgl_len,
1661 d40d->lli_log.dst, 1744 d40d->lli_log.dst,
1662 d40c->log_def.lcsp3, 1745 d40c->log_def.lcsp3,
1663 d40c->dma_cfg.dst_info.data_width); 1746 d40c->dma_cfg.dst_info.data_width,
1747 d40c->dma_cfg.src_info.data_width);
1664 } else { 1748 } else {
1665 if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) { 1749 if (d40_pool_lli_alloc(d40d, d40d->lli_len, false) < 0) {
1666 dev_err(&d40c->chan.dev->device, 1750 dev_err(&d40c->chan.dev->device,
1667 "[%s] Out of memory\n", __func__); 1751 "[%s] Out of memory\n", __func__);
1668 goto err; 1752 goto err;
@@ -1675,6 +1759,7 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
1675 virt_to_phys(d40d->lli_phy.src), 1759 virt_to_phys(d40d->lli_phy.src),
1676 d40c->src_def_cfg, 1760 d40c->src_def_cfg,
1677 d40c->dma_cfg.src_info.data_width, 1761 d40c->dma_cfg.src_info.data_width,
1762 d40c->dma_cfg.dst_info.data_width,
1678 d40c->dma_cfg.src_info.psize); 1763 d40c->dma_cfg.src_info.psize);
1679 1764
1680 if (res < 0) 1765 if (res < 0)
@@ -1687,6 +1772,7 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
1687 virt_to_phys(d40d->lli_phy.dst), 1772 virt_to_phys(d40d->lli_phy.dst),
1688 d40c->dst_def_cfg, 1773 d40c->dst_def_cfg,
1689 d40c->dma_cfg.dst_info.data_width, 1774 d40c->dma_cfg.dst_info.data_width,
1775 d40c->dma_cfg.src_info.data_width,
1690 d40c->dma_cfg.dst_info.psize); 1776 d40c->dma_cfg.dst_info.psize);
1691 1777
1692 if (res < 0) 1778 if (res < 0)
@@ -1826,7 +1912,6 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
1826 struct d40_chan *d40c = container_of(chan, struct d40_chan, 1912 struct d40_chan *d40c = container_of(chan, struct d40_chan,
1827 chan); 1913 chan);
1828 unsigned long flags; 1914 unsigned long flags;
1829 int err = 0;
1830 1915
1831 if (d40c->phy_chan == NULL) { 1916 if (d40c->phy_chan == NULL) {
1832 dev_err(&d40c->chan.dev->device, 1917 dev_err(&d40c->chan.dev->device,
@@ -1844,6 +1929,15 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
1844 } 1929 }
1845 1930
1846 d40d->txd.flags = dma_flags; 1931 d40d->txd.flags = dma_flags;
1932 d40d->lli_len = d40_size_2_dmalen(size,
1933 d40c->dma_cfg.src_info.data_width,
1934 d40c->dma_cfg.dst_info.data_width);
1935 if (d40d->lli_len < 0) {
1936 dev_err(&d40c->chan.dev->device,
1937 "[%s] Unaligned size\n", __func__);
1938 goto err;
1939 }
1940
1847 1941
1848 dma_async_tx_descriptor_init(&d40d->txd, chan); 1942 dma_async_tx_descriptor_init(&d40d->txd, chan);
1849 1943
@@ -1851,37 +1945,40 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
1851 1945
1852 if (d40c->log_num != D40_PHY_CHAN) { 1946 if (d40c->log_num != D40_PHY_CHAN) {
1853 1947
1854 if (d40_pool_lli_alloc(d40d, 1, true) < 0) { 1948 if (d40_pool_lli_alloc(d40d, d40d->lli_len, true) < 0) {
1855 dev_err(&d40c->chan.dev->device, 1949 dev_err(&d40c->chan.dev->device,
1856 "[%s] Out of memory\n", __func__); 1950 "[%s] Out of memory\n", __func__);
1857 goto err; 1951 goto err;
1858 } 1952 }
1859 d40d->lli_len = 1;
1860 d40d->lli_current = 0; 1953 d40d->lli_current = 0;
1861 1954
1862 d40_log_fill_lli(d40d->lli_log.src, 1955 if (d40_log_buf_to_lli(d40d->lli_log.src,
1863 src, 1956 src,
1864 size, 1957 size,
1865 d40c->log_def.lcsp1, 1958 d40c->log_def.lcsp1,
1866 d40c->dma_cfg.src_info.data_width, 1959 d40c->dma_cfg.src_info.data_width,
1867 true); 1960 d40c->dma_cfg.dst_info.data_width,
1961 true) == NULL)
1962 goto err;
1868 1963
1869 d40_log_fill_lli(d40d->lli_log.dst, 1964 if (d40_log_buf_to_lli(d40d->lli_log.dst,
1870 dst, 1965 dst,
1871 size, 1966 size,
1872 d40c->log_def.lcsp3, 1967 d40c->log_def.lcsp3,
1873 d40c->dma_cfg.dst_info.data_width, 1968 d40c->dma_cfg.dst_info.data_width,
1874 true); 1969 d40c->dma_cfg.src_info.data_width,
1970 true) == NULL)
1971 goto err;
1875 1972
1876 } else { 1973 } else {
1877 1974
1878 if (d40_pool_lli_alloc(d40d, 1, false) < 0) { 1975 if (d40_pool_lli_alloc(d40d, d40d->lli_len, false) < 0) {
1879 dev_err(&d40c->chan.dev->device, 1976 dev_err(&d40c->chan.dev->device,
1880 "[%s] Out of memory\n", __func__); 1977 "[%s] Out of memory\n", __func__);
1881 goto err; 1978 goto err;
1882 } 1979 }
1883 1980
1884 err = d40_phy_fill_lli(d40d->lli_phy.src, 1981 if (d40_phy_buf_to_lli(d40d->lli_phy.src,
1885 src, 1982 src,
1886 size, 1983 size,
1887 d40c->dma_cfg.src_info.psize, 1984 d40c->dma_cfg.src_info.psize,
@@ -1889,11 +1986,11 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
1889 d40c->src_def_cfg, 1986 d40c->src_def_cfg,
1890 true, 1987 true,
1891 d40c->dma_cfg.src_info.data_width, 1988 d40c->dma_cfg.src_info.data_width,
1892 false); 1989 d40c->dma_cfg.dst_info.data_width,
1893 if (err) 1990 false) == NULL)
1894 goto err_fill_lli; 1991 goto err;
1895 1992
1896 err = d40_phy_fill_lli(d40d->lli_phy.dst, 1993 if (d40_phy_buf_to_lli(d40d->lli_phy.dst,
1897 dst, 1994 dst,
1898 size, 1995 size,
1899 d40c->dma_cfg.dst_info.psize, 1996 d40c->dma_cfg.dst_info.psize,
@@ -1901,10 +1998,9 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
1901 d40c->dst_def_cfg, 1998 d40c->dst_def_cfg,
1902 true, 1999 true,
1903 d40c->dma_cfg.dst_info.data_width, 2000 d40c->dma_cfg.dst_info.data_width,
1904 false); 2001 d40c->dma_cfg.src_info.data_width,
1905 2002 false) == NULL)
1906 if (err) 2003 goto err;
1907 goto err_fill_lli;
1908 2004
1909 (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src, 2005 (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
1910 d40d->lli_pool.size, DMA_TO_DEVICE); 2006 d40d->lli_pool.size, DMA_TO_DEVICE);
@@ -1913,9 +2009,6 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
1913 spin_unlock_irqrestore(&d40c->lock, flags); 2009 spin_unlock_irqrestore(&d40c->lock, flags);
1914 return &d40d->txd; 2010 return &d40d->txd;
1915 2011
1916err_fill_lli:
1917 dev_err(&d40c->chan.dev->device,
1918 "[%s] Failed filling in PHY LLI\n", __func__);
1919err: 2012err:
1920 if (d40d) 2013 if (d40d)
1921 d40_desc_free(d40c, d40d); 2014 d40_desc_free(d40c, d40d);
@@ -1945,13 +2038,21 @@ static int d40_prep_slave_sg_log(struct d40_desc *d40d,
1945 dma_addr_t dev_addr = 0; 2038 dma_addr_t dev_addr = 0;
1946 int total_size; 2039 int total_size;
1947 2040
1948 if (d40_pool_lli_alloc(d40d, sg_len, true) < 0) { 2041 d40d->lli_len = d40_sg_2_dmalen(sgl, sg_len,
2042 d40c->dma_cfg.src_info.data_width,
2043 d40c->dma_cfg.dst_info.data_width);
2044 if (d40d->lli_len < 0) {
2045 dev_err(&d40c->chan.dev->device,
2046 "[%s] Unaligned size\n", __func__);
2047 return -EINVAL;
2048 }
2049
2050 if (d40_pool_lli_alloc(d40d, d40d->lli_len, true) < 0) {
1949 dev_err(&d40c->chan.dev->device, 2051 dev_err(&d40c->chan.dev->device,
1950 "[%s] Out of memory\n", __func__); 2052 "[%s] Out of memory\n", __func__);
1951 return -ENOMEM; 2053 return -ENOMEM;
1952 } 2054 }
1953 2055
1954 d40d->lli_len = sg_len;
1955 d40d->lli_current = 0; 2056 d40d->lli_current = 0;
1956 2057
1957 if (direction == DMA_FROM_DEVICE) 2058 if (direction == DMA_FROM_DEVICE)
@@ -1993,13 +2094,21 @@ static int d40_prep_slave_sg_phy(struct d40_desc *d40d,
1993 dma_addr_t dst_dev_addr; 2094 dma_addr_t dst_dev_addr;
1994 int res; 2095 int res;
1995 2096
1996 if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) { 2097 d40d->lli_len = d40_sg_2_dmalen(sgl, sgl_len,
2098 d40c->dma_cfg.src_info.data_width,
2099 d40c->dma_cfg.dst_info.data_width);
2100 if (d40d->lli_len < 0) {
2101 dev_err(&d40c->chan.dev->device,
2102 "[%s] Unaligned size\n", __func__);
2103 return -EINVAL;
2104 }
2105
2106 if (d40_pool_lli_alloc(d40d, d40d->lli_len, false) < 0) {
1997 dev_err(&d40c->chan.dev->device, 2107 dev_err(&d40c->chan.dev->device,
1998 "[%s] Out of memory\n", __func__); 2108 "[%s] Out of memory\n", __func__);
1999 return -ENOMEM; 2109 return -ENOMEM;
2000 } 2110 }
2001 2111
2002 d40d->lli_len = sgl_len;
2003 d40d->lli_current = 0; 2112 d40d->lli_current = 0;
2004 2113
2005 if (direction == DMA_FROM_DEVICE) { 2114 if (direction == DMA_FROM_DEVICE) {
@@ -2024,6 +2133,7 @@ static int d40_prep_slave_sg_phy(struct d40_desc *d40d,
2024 virt_to_phys(d40d->lli_phy.src), 2133 virt_to_phys(d40d->lli_phy.src),
2025 d40c->src_def_cfg, 2134 d40c->src_def_cfg,
2026 d40c->dma_cfg.src_info.data_width, 2135 d40c->dma_cfg.src_info.data_width,
2136 d40c->dma_cfg.dst_info.data_width,
2027 d40c->dma_cfg.src_info.psize); 2137 d40c->dma_cfg.src_info.psize);
2028 if (res < 0) 2138 if (res < 0)
2029 return res; 2139 return res;
@@ -2035,6 +2145,7 @@ static int d40_prep_slave_sg_phy(struct d40_desc *d40d,
2035 virt_to_phys(d40d->lli_phy.dst), 2145 virt_to_phys(d40d->lli_phy.dst),
2036 d40c->dst_def_cfg, 2146 d40c->dst_def_cfg,
2037 d40c->dma_cfg.dst_info.data_width, 2147 d40c->dma_cfg.dst_info.data_width,
2148 d40c->dma_cfg.src_info.data_width,
2038 d40c->dma_cfg.dst_info.psize); 2149 d40c->dma_cfg.dst_info.psize);
2039 if (res < 0) 2150 if (res < 0)
2040 return res; 2151 return res;
@@ -2244,6 +2355,8 @@ static void d40_set_runtime_config(struct dma_chan *chan,
2244 psize = STEDMA40_PSIZE_PHY_8; 2355 psize = STEDMA40_PSIZE_PHY_8;
2245 else if (config_maxburst >= 4) 2356 else if (config_maxburst >= 4)
2246 psize = STEDMA40_PSIZE_PHY_4; 2357 psize = STEDMA40_PSIZE_PHY_4;
2358 else if (config_maxburst >= 2)
2359 psize = STEDMA40_PSIZE_PHY_2;
2247 else 2360 else
2248 psize = STEDMA40_PSIZE_PHY_1; 2361 psize = STEDMA40_PSIZE_PHY_1;
2249 } 2362 }
diff --git a/drivers/dma/ste_dma40_ll.c b/drivers/dma/ste_dma40_ll.c
index 8557cb88b255..0b096a38322d 100644
--- a/drivers/dma/ste_dma40_ll.c
+++ b/drivers/dma/ste_dma40_ll.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (C) ST-Ericsson SA 2007-2010 2 * Copyright (C) ST-Ericsson SA 2007-2010
3 * Author: Per Friden <per.friden@stericsson.com> for ST-Ericsson 3 * Author: Per Forlin <per.forlin@stericsson.com> for ST-Ericsson
4 * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson 4 * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
5 * License terms: GNU General Public License (GPL) version 2 5 * License terms: GNU General Public License (GPL) version 2
6 */ 6 */
@@ -122,15 +122,15 @@ void d40_phy_cfg(struct stedma40_chan_cfg *cfg,
122 *dst_cfg = dst; 122 *dst_cfg = dst;
123} 123}
124 124
125int d40_phy_fill_lli(struct d40_phy_lli *lli, 125static int d40_phy_fill_lli(struct d40_phy_lli *lli,
126 dma_addr_t data, 126 dma_addr_t data,
127 u32 data_size, 127 u32 data_size,
128 int psize, 128 int psize,
129 dma_addr_t next_lli, 129 dma_addr_t next_lli,
130 u32 reg_cfg, 130 u32 reg_cfg,
131 bool term_int, 131 bool term_int,
132 u32 data_width, 132 u32 data_width,
133 bool is_device) 133 bool is_device)
134{ 134{
135 int num_elems; 135 int num_elems;
136 136
@@ -139,13 +139,6 @@ int d40_phy_fill_lli(struct d40_phy_lli *lli,
139 else 139 else
140 num_elems = 2 << psize; 140 num_elems = 2 << psize;
141 141
142 /*
143 * Size is 16bit. data_width is 8, 16, 32 or 64 bit
144 * Block large than 64 KiB must be split.
145 */
146 if (data_size > (0xffff << data_width))
147 return -EINVAL;
148
149 /* Must be aligned */ 142 /* Must be aligned */
150 if (!IS_ALIGNED(data, 0x1 << data_width)) 143 if (!IS_ALIGNED(data, 0x1 << data_width))
151 return -EINVAL; 144 return -EINVAL;
@@ -187,55 +180,118 @@ int d40_phy_fill_lli(struct d40_phy_lli *lli,
187 return 0; 180 return 0;
188} 181}
189 182
183static int d40_seg_size(int size, int data_width1, int data_width2)
184{
185 u32 max_w = max(data_width1, data_width2);
186 u32 min_w = min(data_width1, data_width2);
187 u32 seg_max = ALIGN(STEDMA40_MAX_SEG_SIZE << min_w, 1 << max_w);
188
189 if (seg_max > STEDMA40_MAX_SEG_SIZE)
190 seg_max -= (1 << max_w);
191
192 if (size <= seg_max)
193 return size;
194
195 if (size <= 2 * seg_max)
196 return ALIGN(size / 2, 1 << max_w);
197
198 return seg_max;
199}
200
201struct d40_phy_lli *d40_phy_buf_to_lli(struct d40_phy_lli *lli,
202 dma_addr_t addr,
203 u32 size,
204 int psize,
205 dma_addr_t lli_phys,
206 u32 reg_cfg,
207 bool term_int,
208 u32 data_width1,
209 u32 data_width2,
210 bool is_device)
211{
212 int err;
213 dma_addr_t next = lli_phys;
214 int size_rest = size;
215 int size_seg = 0;
216
217 do {
218 size_seg = d40_seg_size(size_rest, data_width1, data_width2);
219 size_rest -= size_seg;
220
221 if (term_int && size_rest == 0)
222 next = 0;
223 else
224 next = ALIGN(next + sizeof(struct d40_phy_lli),
225 D40_LLI_ALIGN);
226
227 err = d40_phy_fill_lli(lli,
228 addr,
229 size_seg,
230 psize,
231 next,
232 reg_cfg,
233 !next,
234 data_width1,
235 is_device);
236
237 if (err)
238 goto err;
239
240 lli++;
241 if (!is_device)
242 addr += size_seg;
243 } while (size_rest);
244
245 return lli;
246
247 err:
248 return NULL;
249}
250
190int d40_phy_sg_to_lli(struct scatterlist *sg, 251int d40_phy_sg_to_lli(struct scatterlist *sg,
191 int sg_len, 252 int sg_len,
192 dma_addr_t target, 253 dma_addr_t target,
193 struct d40_phy_lli *lli, 254 struct d40_phy_lli *lli_sg,
194 dma_addr_t lli_phys, 255 dma_addr_t lli_phys,
195 u32 reg_cfg, 256 u32 reg_cfg,
196 u32 data_width, 257 u32 data_width1,
258 u32 data_width2,
197 int psize) 259 int psize)
198{ 260{
199 int total_size = 0; 261 int total_size = 0;
200 int i; 262 int i;
201 struct scatterlist *current_sg = sg; 263 struct scatterlist *current_sg = sg;
202 dma_addr_t next_lli_phys;
203 dma_addr_t dst; 264 dma_addr_t dst;
204 int err = 0; 265 struct d40_phy_lli *lli = lli_sg;
266 dma_addr_t l_phys = lli_phys;
205 267
206 for_each_sg(sg, current_sg, sg_len, i) { 268 for_each_sg(sg, current_sg, sg_len, i) {
207 269
208 total_size += sg_dma_len(current_sg); 270 total_size += sg_dma_len(current_sg);
209 271
210 /* If this scatter list entry is the last one, no next link */
211 if (sg_len - 1 == i)
212 next_lli_phys = 0;
213 else
214 next_lli_phys = ALIGN(lli_phys + (i + 1) *
215 sizeof(struct d40_phy_lli),
216 D40_LLI_ALIGN);
217
218 if (target) 272 if (target)
219 dst = target; 273 dst = target;
220 else 274 else
221 dst = sg_phys(current_sg); 275 dst = sg_phys(current_sg);
222 276
223 err = d40_phy_fill_lli(&lli[i], 277 l_phys = ALIGN(lli_phys + (lli - lli_sg) *
224 dst, 278 sizeof(struct d40_phy_lli), D40_LLI_ALIGN);
225 sg_dma_len(current_sg), 279
226 psize, 280 lli = d40_phy_buf_to_lli(lli,
227 next_lli_phys, 281 dst,
228 reg_cfg, 282 sg_dma_len(current_sg),
229 !next_lli_phys, 283 psize,
230 data_width, 284 l_phys,
231 target == dst); 285 reg_cfg,
232 if (err) 286 sg_len - 1 == i,
233 goto err; 287 data_width1,
288 data_width2,
289 target == dst);
290 if (lli == NULL)
291 return -EINVAL;
234 } 292 }
235 293
236 return total_size; 294 return total_size;
237err:
238 return err;
239} 295}
240 296
241 297
@@ -315,17 +371,20 @@ void d40_log_lli_lcla_write(struct d40_log_lli *lcla,
315 writel(lli_dst->lcsp13, &lcla[1].lcsp13); 371 writel(lli_dst->lcsp13, &lcla[1].lcsp13);
316} 372}
317 373
318void d40_log_fill_lli(struct d40_log_lli *lli, 374static void d40_log_fill_lli(struct d40_log_lli *lli,
319 dma_addr_t data, u32 data_size, 375 dma_addr_t data, u32 data_size,
320 u32 reg_cfg, 376 u32 reg_cfg,
321 u32 data_width, 377 u32 data_width,
322 bool addr_inc) 378 bool addr_inc)
323{ 379{
324 lli->lcsp13 = reg_cfg; 380 lli->lcsp13 = reg_cfg;
325 381
326 /* The number of elements to transfer */ 382 /* The number of elements to transfer */
327 lli->lcsp02 = ((data_size >> data_width) << 383 lli->lcsp02 = ((data_size >> data_width) <<
328 D40_MEM_LCSP0_ECNT_POS) & D40_MEM_LCSP0_ECNT_MASK; 384 D40_MEM_LCSP0_ECNT_POS) & D40_MEM_LCSP0_ECNT_MASK;
385
386 BUG_ON((data_size >> data_width) > STEDMA40_MAX_SEG_SIZE);
387
329 /* 16 LSBs address of the current element */ 388 /* 16 LSBs address of the current element */
330 lli->lcsp02 |= data & D40_MEM_LCSP0_SPTR_MASK; 389 lli->lcsp02 |= data & D40_MEM_LCSP0_SPTR_MASK;
331 /* 16 MSBs address of the current element */ 390 /* 16 MSBs address of the current element */
@@ -348,55 +407,94 @@ int d40_log_sg_to_dev(struct scatterlist *sg,
348 int total_size = 0; 407 int total_size = 0;
349 struct scatterlist *current_sg = sg; 408 struct scatterlist *current_sg = sg;
350 int i; 409 int i;
410 struct d40_log_lli *lli_src = lli->src;
411 struct d40_log_lli *lli_dst = lli->dst;
351 412
352 for_each_sg(sg, current_sg, sg_len, i) { 413 for_each_sg(sg, current_sg, sg_len, i) {
353 total_size += sg_dma_len(current_sg); 414 total_size += sg_dma_len(current_sg);
354 415
355 if (direction == DMA_TO_DEVICE) { 416 if (direction == DMA_TO_DEVICE) {
356 d40_log_fill_lli(&lli->src[i], 417 lli_src =
357 sg_phys(current_sg), 418 d40_log_buf_to_lli(lli_src,
358 sg_dma_len(current_sg), 419 sg_phys(current_sg),
359 lcsp->lcsp1, src_data_width, 420 sg_dma_len(current_sg),
360 true); 421 lcsp->lcsp1, src_data_width,
361 d40_log_fill_lli(&lli->dst[i], 422 dst_data_width,
362 dev_addr, 423 true);
363 sg_dma_len(current_sg), 424 lli_dst =
364 lcsp->lcsp3, dst_data_width, 425 d40_log_buf_to_lli(lli_dst,
365 false); 426 dev_addr,
427 sg_dma_len(current_sg),
428 lcsp->lcsp3, dst_data_width,
429 src_data_width,
430 false);
366 } else { 431 } else {
367 d40_log_fill_lli(&lli->dst[i], 432 lli_dst =
368 sg_phys(current_sg), 433 d40_log_buf_to_lli(lli_dst,
369 sg_dma_len(current_sg), 434 sg_phys(current_sg),
370 lcsp->lcsp3, dst_data_width, 435 sg_dma_len(current_sg),
371 true); 436 lcsp->lcsp3, dst_data_width,
372 d40_log_fill_lli(&lli->src[i], 437 src_data_width,
373 dev_addr, 438 true);
374 sg_dma_len(current_sg), 439 lli_src =
375 lcsp->lcsp1, src_data_width, 440 d40_log_buf_to_lli(lli_src,
376 false); 441 dev_addr,
442 sg_dma_len(current_sg),
443 lcsp->lcsp1, src_data_width,
444 dst_data_width,
445 false);
377 } 446 }
378 } 447 }
379 return total_size; 448 return total_size;
380} 449}
381 450
451struct d40_log_lli *d40_log_buf_to_lli(struct d40_log_lli *lli_sg,
452 dma_addr_t addr,
453 int size,
454 u32 lcsp13, /* src or dst*/
455 u32 data_width1,
456 u32 data_width2,
457 bool addr_inc)
458{
459 struct d40_log_lli *lli = lli_sg;
460 int size_rest = size;
461 int size_seg = 0;
462
463 do {
464 size_seg = d40_seg_size(size_rest, data_width1, data_width2);
465 size_rest -= size_seg;
466
467 d40_log_fill_lli(lli,
468 addr,
469 size_seg,
470 lcsp13, data_width1,
471 addr_inc);
472 if (addr_inc)
473 addr += size_seg;
474 lli++;
475 } while (size_rest);
476
477 return lli;
478}
479
382int d40_log_sg_to_lli(struct scatterlist *sg, 480int d40_log_sg_to_lli(struct scatterlist *sg,
383 int sg_len, 481 int sg_len,
384 struct d40_log_lli *lli_sg, 482 struct d40_log_lli *lli_sg,
385 u32 lcsp13, /* src or dst*/ 483 u32 lcsp13, /* src or dst*/
386 u32 data_width) 484 u32 data_width1, u32 data_width2)
387{ 485{
388 int total_size = 0; 486 int total_size = 0;
389 struct scatterlist *current_sg = sg; 487 struct scatterlist *current_sg = sg;
390 int i; 488 int i;
489 struct d40_log_lli *lli = lli_sg;
391 490
392 for_each_sg(sg, current_sg, sg_len, i) { 491 for_each_sg(sg, current_sg, sg_len, i) {
393 total_size += sg_dma_len(current_sg); 492 total_size += sg_dma_len(current_sg);
394 493 lli = d40_log_buf_to_lli(lli,
395 d40_log_fill_lli(&lli_sg[i], 494 sg_phys(current_sg),
396 sg_phys(current_sg), 495 sg_dma_len(current_sg),
397 sg_dma_len(current_sg), 496 lcsp13,
398 lcsp13, data_width, 497 data_width1, data_width2, true);
399 true);
400 } 498 }
401 return total_size; 499 return total_size;
402} 500}
diff --git a/drivers/dma/ste_dma40_ll.h b/drivers/dma/ste_dma40_ll.h
index 9e419b907544..9cc43495bea2 100644
--- a/drivers/dma/ste_dma40_ll.h
+++ b/drivers/dma/ste_dma40_ll.h
@@ -292,18 +292,20 @@ int d40_phy_sg_to_lli(struct scatterlist *sg,
292 struct d40_phy_lli *lli, 292 struct d40_phy_lli *lli,
293 dma_addr_t lli_phys, 293 dma_addr_t lli_phys,
294 u32 reg_cfg, 294 u32 reg_cfg,
295 u32 data_width, 295 u32 data_width1,
296 u32 data_width2,
296 int psize); 297 int psize);
297 298
298int d40_phy_fill_lli(struct d40_phy_lli *lli, 299struct d40_phy_lli *d40_phy_buf_to_lli(struct d40_phy_lli *lli,
299 dma_addr_t data, 300 dma_addr_t data,
300 u32 data_size, 301 u32 data_size,
301 int psize, 302 int psize,
302 dma_addr_t next_lli, 303 dma_addr_t next_lli,
303 u32 reg_cfg, 304 u32 reg_cfg,
304 bool term_int, 305 bool term_int,
305 u32 data_width, 306 u32 data_width1,
306 bool is_device); 307 u32 data_width2,
308 bool is_device);
307 309
308void d40_phy_lli_write(void __iomem *virtbase, 310void d40_phy_lli_write(void __iomem *virtbase,
309 u32 phy_chan_num, 311 u32 phy_chan_num,
@@ -312,12 +314,12 @@ void d40_phy_lli_write(void __iomem *virtbase,
312 314
313/* Logical channels */ 315/* Logical channels */
314 316
315void d40_log_fill_lli(struct d40_log_lli *lli, 317struct d40_log_lli *d40_log_buf_to_lli(struct d40_log_lli *lli_sg,
316 dma_addr_t data, 318 dma_addr_t addr,
317 u32 data_size, 319 int size,
318 u32 reg_cfg, 320 u32 lcsp13, /* src or dst*/
319 u32 data_width, 321 u32 data_width1, u32 data_width2,
320 bool addr_inc); 322 bool addr_inc);
321 323
322int d40_log_sg_to_dev(struct scatterlist *sg, 324int d40_log_sg_to_dev(struct scatterlist *sg,
323 int sg_len, 325 int sg_len,
@@ -332,7 +334,7 @@ int d40_log_sg_to_lli(struct scatterlist *sg,
332 int sg_len, 334 int sg_len,
333 struct d40_log_lli *lli_sg, 335 struct d40_log_lli *lli_sg,
334 u32 lcsp13, /* src or dst*/ 336 u32 lcsp13, /* src or dst*/
335 u32 data_width); 337 u32 data_width1, u32 data_width2);
336 338
337void d40_log_lli_lcpa_write(struct d40_log_lli_full *lcpa, 339void d40_log_lli_lcpa_write(struct d40_log_lli_full *lcpa,
338 struct d40_log_lli *lli_dst, 340 struct d40_log_lli *lli_dst,
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 46e32573b3a3..01bffc4412d2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -160,6 +160,7 @@ enum nouveau_flags {
160#define NVOBJ_FLAG_ZERO_ALLOC (1 << 1) 160#define NVOBJ_FLAG_ZERO_ALLOC (1 << 1)
161#define NVOBJ_FLAG_ZERO_FREE (1 << 2) 161#define NVOBJ_FLAG_ZERO_FREE (1 << 2)
162#define NVOBJ_FLAG_VM (1 << 3) 162#define NVOBJ_FLAG_VM (1 << 3)
163#define NVOBJ_FLAG_VM_USER (1 << 4)
163 164
164#define NVOBJ_CINST_GLOBAL 0xdeadbeef 165#define NVOBJ_CINST_GLOBAL 0xdeadbeef
165 166
@@ -1576,6 +1577,20 @@ nv_match_device(struct drm_device *dev, unsigned device,
1576 dev->pdev->subsystem_device == sub_device; 1577 dev->pdev->subsystem_device == sub_device;
1577} 1578}
1578 1579
1580/* returns 1 if device is one of the nv4x using the 0x4497 object class,
1581 * helpful to determine a number of other hardware features
1582 */
1583static inline int
1584nv44_graph_class(struct drm_device *dev)
1585{
1586 struct drm_nouveau_private *dev_priv = dev->dev_private;
1587
1588 if ((dev_priv->chipset & 0xf0) == 0x60)
1589 return 1;
1590
1591 return !(0x0baf & (1 << (dev_priv->chipset & 0x0f)));
1592}
1593
1579/* memory type/access flags, do not match hardware values */ 1594/* memory type/access flags, do not match hardware values */
1580#define NV_MEM_ACCESS_RO 1 1595#define NV_MEM_ACCESS_RO 1
1581#define NV_MEM_ACCESS_WO 2 1596#define NV_MEM_ACCESS_WO 2
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 6d56a54b6e2e..60769d2f9a66 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -352,8 +352,8 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
352 FBINFO_HWACCEL_IMAGEBLIT; 352 FBINFO_HWACCEL_IMAGEBLIT;
353 info->flags |= FBINFO_CAN_FORCE_OUTPUT; 353 info->flags |= FBINFO_CAN_FORCE_OUTPUT;
354 info->fbops = &nouveau_fbcon_sw_ops; 354 info->fbops = &nouveau_fbcon_sw_ops;
355 info->fix.smem_start = dev->mode_config.fb_base + 355 info->fix.smem_start = nvbo->bo.mem.bus.base +
356 (nvbo->bo.mem.start << PAGE_SHIFT); 356 nvbo->bo.mem.bus.offset;
357 info->fix.smem_len = size; 357 info->fix.smem_len = size;
358 358
359 info->screen_base = nvbo_kmap_obj_iovirtual(nouveau_fb->nvbo); 359 info->screen_base = nvbo_kmap_obj_iovirtual(nouveau_fb->nvbo);
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index 69044eb104bb..26347b7cd872 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -742,30 +742,24 @@ nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
742{ 742{
743 struct nouveau_mm *mm = man->priv; 743 struct nouveau_mm *mm = man->priv;
744 struct nouveau_mm_node *r; 744 struct nouveau_mm_node *r;
745 u64 total = 0, ttotal[3] = {}, tused[3] = {}, tfree[3] = {}; 745 u32 total = 0, free = 0;
746 int i;
747 746
748 mutex_lock(&mm->mutex); 747 mutex_lock(&mm->mutex);
749 list_for_each_entry(r, &mm->nodes, nl_entry) { 748 list_for_each_entry(r, &mm->nodes, nl_entry) {
750 printk(KERN_DEBUG "%s %s-%d: 0x%010llx 0x%010llx\n", 749 printk(KERN_DEBUG "%s %d: 0x%010llx 0x%010llx\n",
751 prefix, r->free ? "free" : "used", r->type, 750 prefix, r->type, ((u64)r->offset << 12),
752 ((u64)r->offset << 12),
753 (((u64)r->offset + r->length) << 12)); 751 (((u64)r->offset + r->length) << 12));
752
754 total += r->length; 753 total += r->length;
755 ttotal[r->type] += r->length; 754 if (!r->type)
756 if (r->free) 755 free += r->length;
757 tfree[r->type] += r->length;
758 else
759 tused[r->type] += r->length;
760 } 756 }
761 mutex_unlock(&mm->mutex); 757 mutex_unlock(&mm->mutex);
762 758
763 printk(KERN_DEBUG "%s total: 0x%010llx\n", prefix, total << 12); 759 printk(KERN_DEBUG "%s total: 0x%010llx free: 0x%010llx\n",
764 for (i = 0; i < 3; i++) { 760 prefix, (u64)total << 12, (u64)free << 12);
765 printk(KERN_DEBUG "%s type %d: 0x%010llx, " 761 printk(KERN_DEBUG "%s block: 0x%08x\n",
766 "used 0x%010llx, free 0x%010llx\n", prefix, 762 prefix, mm->block_size << 12);
767 i, ttotal[i] << 12, tused[i] << 12, tfree[i] << 12);
768 }
769} 763}
770 764
771const struct ttm_mem_type_manager_func nouveau_vram_manager = { 765const struct ttm_mem_type_manager_func nouveau_vram_manager = {
diff --git a/drivers/gpu/drm/nouveau/nouveau_mm.c b/drivers/gpu/drm/nouveau/nouveau_mm.c
index cdbb11eb701b..8844b50c3e54 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mm.c
@@ -48,175 +48,76 @@ region_split(struct nouveau_mm *rmm, struct nouveau_mm_node *a, u32 size)
48 48
49 b->offset = a->offset; 49 b->offset = a->offset;
50 b->length = size; 50 b->length = size;
51 b->free = a->free;
52 b->type = a->type; 51 b->type = a->type;
53 a->offset += size; 52 a->offset += size;
54 a->length -= size; 53 a->length -= size;
55 list_add_tail(&b->nl_entry, &a->nl_entry); 54 list_add_tail(&b->nl_entry, &a->nl_entry);
56 if (b->free) 55 if (b->type == 0)
57 list_add_tail(&b->fl_entry, &a->fl_entry); 56 list_add_tail(&b->fl_entry, &a->fl_entry);
58 return b; 57 return b;
59} 58}
60 59
61static struct nouveau_mm_node * 60#define node(root, dir) ((root)->nl_entry.dir == &rmm->nodes) ? NULL : \
62nouveau_mm_merge(struct nouveau_mm *rmm, struct nouveau_mm_node *this) 61 list_entry((root)->nl_entry.dir, struct nouveau_mm_node, nl_entry)
63{
64 struct nouveau_mm_node *prev, *next;
65
66 /* try to merge with free adjacent entries of same type */
67 prev = list_entry(this->nl_entry.prev, struct nouveau_mm_node, nl_entry);
68 if (this->nl_entry.prev != &rmm->nodes) {
69 if (prev->free && prev->type == this->type) {
70 prev->length += this->length;
71 region_put(rmm, this);
72 this = prev;
73 }
74 }
75
76 next = list_entry(this->nl_entry.next, struct nouveau_mm_node, nl_entry);
77 if (this->nl_entry.next != &rmm->nodes) {
78 if (next->free && next->type == this->type) {
79 next->offset = this->offset;
80 next->length += this->length;
81 region_put(rmm, this);
82 this = next;
83 }
84 }
85
86 return this;
87}
88 62
89void 63void
90nouveau_mm_put(struct nouveau_mm *rmm, struct nouveau_mm_node *this) 64nouveau_mm_put(struct nouveau_mm *rmm, struct nouveau_mm_node *this)
91{ 65{
92 u32 block_s, block_l; 66 struct nouveau_mm_node *prev = node(this, prev);
67 struct nouveau_mm_node *next = node(this, next);
93 68
94 this->free = true;
95 list_add(&this->fl_entry, &rmm->free); 69 list_add(&this->fl_entry, &rmm->free);
96 this = nouveau_mm_merge(rmm, this); 70 this->type = 0;
97
98 /* any entirely free blocks now? we'll want to remove typing
99 * on them now so they can be use for any memory allocation
100 */
101 block_s = roundup(this->offset, rmm->block_size);
102 if (block_s + rmm->block_size > this->offset + this->length)
103 return;
104 71
105 /* split off any still-typed region at the start */ 72 if (prev && prev->type == 0) {
106 if (block_s != this->offset) { 73 prev->length += this->length;
107 if (!region_split(rmm, this, block_s - this->offset)) 74 region_put(rmm, this);
108 return; 75 this = prev;
109 } 76 }
110 77
111 /* split off the soon-to-be-untyped block(s) */ 78 if (next && next->type == 0) {
112 block_l = rounddown(this->length, rmm->block_size); 79 next->offset = this->offset;
113 if (block_l != this->length) { 80 next->length += this->length;
114 this = region_split(rmm, this, block_l); 81 region_put(rmm, this);
115 if (!this)
116 return;
117 } 82 }
118
119 /* mark as having no type, and retry merge with any adjacent
120 * untyped blocks
121 */
122 this->type = 0;
123 nouveau_mm_merge(rmm, this);
124} 83}
125 84
126int 85int
127nouveau_mm_get(struct nouveau_mm *rmm, int type, u32 size, u32 size_nc, 86nouveau_mm_get(struct nouveau_mm *rmm, int type, u32 size, u32 size_nc,
128 u32 align, struct nouveau_mm_node **pnode) 87 u32 align, struct nouveau_mm_node **pnode)
129{ 88{
130 struct nouveau_mm_node *this, *tmp, *next; 89 struct nouveau_mm_node *prev, *this, *next;
131 u32 splitoff, avail, alloc; 90 u32 min = size_nc ? size_nc : size;
132 91 u32 align_mask = align - 1;
133 list_for_each_entry_safe(this, tmp, &rmm->free, fl_entry) { 92 u32 splitoff;
134 next = list_entry(this->nl_entry.next, struct nouveau_mm_node, nl_entry); 93 u32 s, e;
135 if (this->nl_entry.next == &rmm->nodes) 94
136 next = NULL; 95 list_for_each_entry(this, &rmm->free, fl_entry) {
137 96 e = this->offset + this->length;
138 /* skip wrongly typed blocks */ 97 s = this->offset;
139 if (this->type && this->type != type) 98
99 prev = node(this, prev);
100 if (prev && prev->type != type)
101 s = roundup(s, rmm->block_size);
102
103 next = node(this, next);
104 if (next && next->type != type)
105 e = rounddown(e, rmm->block_size);
106
107 s = (s + align_mask) & ~align_mask;
108 e &= ~align_mask;
109 if (s > e || e - s < min)
140 continue; 110 continue;
141 111
142 /* account for alignment */ 112 splitoff = s - this->offset;
143 splitoff = this->offset & (align - 1); 113 if (splitoff && !region_split(rmm, this, splitoff))
144 if (splitoff) 114 return -ENOMEM;
145 splitoff = align - splitoff;
146
147 if (this->length <= splitoff)
148 continue;
149
150 /* determine total memory available from this, and
151 * the next block (if appropriate)
152 */
153 avail = this->length;
154 if (next && next->free && (!next->type || next->type == type))
155 avail += next->length;
156
157 avail -= splitoff;
158
159 /* determine allocation size */
160 if (size_nc) {
161 alloc = min(avail, size);
162 alloc = rounddown(alloc, size_nc);
163 if (alloc == 0)
164 continue;
165 } else {
166 alloc = size;
167 if (avail < alloc)
168 continue;
169 }
170
171 /* untyped block, split off a chunk that's a multiple
172 * of block_size and type it
173 */
174 if (!this->type) {
175 u32 block = roundup(alloc + splitoff, rmm->block_size);
176 if (this->length < block)
177 continue;
178
179 this = region_split(rmm, this, block);
180 if (!this)
181 return -ENOMEM;
182
183 this->type = type;
184 }
185
186 /* stealing memory from adjacent block */
187 if (alloc > this->length) {
188 u32 amount = alloc - (this->length - splitoff);
189
190 if (!next->type) {
191 amount = roundup(amount, rmm->block_size);
192
193 next = region_split(rmm, next, amount);
194 if (!next)
195 return -ENOMEM;
196
197 next->type = type;
198 }
199
200 this->length += amount;
201 next->offset += amount;
202 next->length -= amount;
203 if (!next->length) {
204 list_del(&next->nl_entry);
205 list_del(&next->fl_entry);
206 kfree(next);
207 }
208 }
209
210 if (splitoff) {
211 if (!region_split(rmm, this, splitoff))
212 return -ENOMEM;
213 }
214 115
215 this = region_split(rmm, this, alloc); 116 this = region_split(rmm, this, min(size, e - s));
216 if (this == NULL) 117 if (!this)
217 return -ENOMEM; 118 return -ENOMEM;
218 119
219 this->free = false; 120 this->type = type;
220 list_del(&this->fl_entry); 121 list_del(&this->fl_entry);
221 *pnode = this; 122 *pnode = this;
222 return 0; 123 return 0;
@@ -234,7 +135,6 @@ nouveau_mm_init(struct nouveau_mm **prmm, u32 offset, u32 length, u32 block)
234 heap = kzalloc(sizeof(*heap), GFP_KERNEL); 135 heap = kzalloc(sizeof(*heap), GFP_KERNEL);
235 if (!heap) 136 if (!heap)
236 return -ENOMEM; 137 return -ENOMEM;
237 heap->free = true;
238 heap->offset = roundup(offset, block); 138 heap->offset = roundup(offset, block);
239 heap->length = rounddown(offset + length, block) - heap->offset; 139 heap->length = rounddown(offset + length, block) - heap->offset;
240 140
diff --git a/drivers/gpu/drm/nouveau/nouveau_mm.h b/drivers/gpu/drm/nouveau/nouveau_mm.h
index af3844933036..798eaf39691c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_mm.h
@@ -30,9 +30,7 @@ struct nouveau_mm_node {
30 struct list_head fl_entry; 30 struct list_head fl_entry;
31 struct list_head rl_entry; 31 struct list_head rl_entry;
32 32
33 bool free; 33 u8 type;
34 int type;
35
36 u32 offset; 34 u32 offset;
37 u32 length; 35 u32 length;
38}; 36};
diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c
index 19ef92a0375a..8870d72388c8 100644
--- a/drivers/gpu/drm/nouveau/nv40_graph.c
+++ b/drivers/gpu/drm/nouveau/nv40_graph.c
@@ -451,8 +451,7 @@ nv40_graph_register(struct drm_device *dev)
451 NVOBJ_CLASS(dev, 0x309e, GR); /* swzsurf */ 451 NVOBJ_CLASS(dev, 0x309e, GR); /* swzsurf */
452 452
453 /* curie */ 453 /* curie */
454 if (dev_priv->chipset >= 0x60 || 454 if (nv44_graph_class(dev))
455 0x00005450 & (1 << (dev_priv->chipset & 0x0f)))
456 NVOBJ_CLASS(dev, 0x4497, GR); 455 NVOBJ_CLASS(dev, 0x4497, GR);
457 else 456 else
458 NVOBJ_CLASS(dev, 0x4097, GR); 457 NVOBJ_CLASS(dev, 0x4097, GR);
diff --git a/drivers/gpu/drm/nouveau/nv40_grctx.c b/drivers/gpu/drm/nouveau/nv40_grctx.c
index ce585093264e..f70447d131d7 100644
--- a/drivers/gpu/drm/nouveau/nv40_grctx.c
+++ b/drivers/gpu/drm/nouveau/nv40_grctx.c
@@ -118,17 +118,6 @@
118 */ 118 */
119 119
120static int 120static int
121nv40_graph_4097(struct drm_device *dev)
122{
123 struct drm_nouveau_private *dev_priv = dev->dev_private;
124
125 if ((dev_priv->chipset & 0xf0) == 0x60)
126 return 0;
127
128 return !!(0x0baf & (1 << dev_priv->chipset));
129}
130
131static int
132nv40_graph_vs_count(struct drm_device *dev) 121nv40_graph_vs_count(struct drm_device *dev)
133{ 122{
134 struct drm_nouveau_private *dev_priv = dev->dev_private; 123 struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -219,7 +208,7 @@ nv40_graph_construct_general(struct nouveau_grctx *ctx)
219 gr_def(ctx, 0x4009dc, 0x80000000); 208 gr_def(ctx, 0x4009dc, 0x80000000);
220 } else { 209 } else {
221 cp_ctx(ctx, 0x400840, 20); 210 cp_ctx(ctx, 0x400840, 20);
222 if (!nv40_graph_4097(ctx->dev)) { 211 if (nv44_graph_class(ctx->dev)) {
223 for (i = 0; i < 8; i++) 212 for (i = 0; i < 8; i++)
224 gr_def(ctx, 0x400860 + (i * 4), 0x00000001); 213 gr_def(ctx, 0x400860 + (i * 4), 0x00000001);
225 } 214 }
@@ -228,7 +217,7 @@ nv40_graph_construct_general(struct nouveau_grctx *ctx)
228 gr_def(ctx, 0x400888, 0x00000040); 217 gr_def(ctx, 0x400888, 0x00000040);
229 cp_ctx(ctx, 0x400894, 11); 218 cp_ctx(ctx, 0x400894, 11);
230 gr_def(ctx, 0x400894, 0x00000040); 219 gr_def(ctx, 0x400894, 0x00000040);
231 if (nv40_graph_4097(ctx->dev)) { 220 if (!nv44_graph_class(ctx->dev)) {
232 for (i = 0; i < 8; i++) 221 for (i = 0; i < 8; i++)
233 gr_def(ctx, 0x4008a0 + (i * 4), 0x80000000); 222 gr_def(ctx, 0x4008a0 + (i * 4), 0x80000000);
234 } 223 }
@@ -546,7 +535,7 @@ nv40_graph_construct_state3d_2(struct nouveau_grctx *ctx)
546static void 535static void
547nv40_graph_construct_state3d_3(struct nouveau_grctx *ctx) 536nv40_graph_construct_state3d_3(struct nouveau_grctx *ctx)
548{ 537{
549 int len = nv40_graph_4097(ctx->dev) ? 0x0684 : 0x0084; 538 int len = nv44_graph_class(ctx->dev) ? 0x0084 : 0x0684;
550 539
551 cp_out (ctx, 0x300000); 540 cp_out (ctx, 0x300000);
552 cp_lsr (ctx, len - 4); 541 cp_lsr (ctx, len - 4);
@@ -582,11 +571,11 @@ nv40_graph_construct_shader(struct nouveau_grctx *ctx)
582 } else { 571 } else {
583 b0_offset = 0x1d40/4; /* 2200 */ 572 b0_offset = 0x1d40/4; /* 2200 */
584 b1_offset = 0x3f40/4; /* 0b00 : 0a40 */ 573 b1_offset = 0x3f40/4; /* 0b00 : 0a40 */
585 vs_len = nv40_graph_4097(dev) ? 0x4a40/4 : 0x4980/4; 574 vs_len = nv44_graph_class(dev) ? 0x4980/4 : 0x4a40/4;
586 } 575 }
587 576
588 cp_lsr(ctx, vs_len * vs_nr + 0x300/4); 577 cp_lsr(ctx, vs_len * vs_nr + 0x300/4);
589 cp_out(ctx, nv40_graph_4097(dev) ? 0x800041 : 0x800029); 578 cp_out(ctx, nv44_graph_class(dev) ? 0x800029 : 0x800041);
590 579
591 offset = ctx->ctxvals_pos; 580 offset = ctx->ctxvals_pos;
592 ctx->ctxvals_pos += (0x0300/4 + (vs_nr * vs_len)); 581 ctx->ctxvals_pos += (0x0300/4 + (vs_nr * vs_len));
diff --git a/drivers/gpu/drm/nouveau/nv40_mc.c b/drivers/gpu/drm/nouveau/nv40_mc.c
index e4e72c12ab6a..03c0d4c3f355 100644
--- a/drivers/gpu/drm/nouveau/nv40_mc.c
+++ b/drivers/gpu/drm/nouveau/nv40_mc.c
@@ -6,27 +6,17 @@
6int 6int
7nv40_mc_init(struct drm_device *dev) 7nv40_mc_init(struct drm_device *dev)
8{ 8{
9 struct drm_nouveau_private *dev_priv = dev->dev_private;
10 uint32_t tmp;
11
12 /* Power up everything, resetting each individual unit will 9 /* Power up everything, resetting each individual unit will
13 * be done later if needed. 10 * be done later if needed.
14 */ 11 */
15 nv_wr32(dev, NV03_PMC_ENABLE, 0xFFFFFFFF); 12 nv_wr32(dev, NV03_PMC_ENABLE, 0xFFFFFFFF);
16 13
17 switch (dev_priv->chipset) { 14 if (nv44_graph_class(dev)) {
18 case 0x44: 15 u32 tmp = nv_rd32(dev, NV04_PFB_FIFO_DATA);
19 case 0x46: /* G72 */
20 case 0x4e:
21 case 0x4c: /* C51_G7X */
22 tmp = nv_rd32(dev, NV04_PFB_FIFO_DATA);
23 nv_wr32(dev, NV40_PMC_1700, tmp); 16 nv_wr32(dev, NV40_PMC_1700, tmp);
24 nv_wr32(dev, NV40_PMC_1704, 0); 17 nv_wr32(dev, NV40_PMC_1704, 0);
25 nv_wr32(dev, NV40_PMC_1708, 0); 18 nv_wr32(dev, NV40_PMC_1708, 0);
26 nv_wr32(dev, NV40_PMC_170C, tmp); 19 nv_wr32(dev, NV40_PMC_170C, tmp);
27 break;
28 default:
29 break;
30 } 20 }
31 21
32 return 0; 22 return 0;
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c
index 2e1b1cd19a4b..ea0041810ae3 100644
--- a/drivers/gpu/drm/nouveau/nv50_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv50_instmem.c
@@ -332,8 +332,11 @@ nv50_instmem_get(struct nouveau_gpuobj *gpuobj, u32 size, u32 align)
332 gpuobj->vinst = node->vram->offset; 332 gpuobj->vinst = node->vram->offset;
333 333
334 if (gpuobj->flags & NVOBJ_FLAG_VM) { 334 if (gpuobj->flags & NVOBJ_FLAG_VM) {
335 ret = nouveau_vm_get(dev_priv->chan_vm, size, 12, 335 u32 flags = NV_MEM_ACCESS_RW;
336 NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS, 336 if (!(gpuobj->flags & NVOBJ_FLAG_VM_USER))
337 flags |= NV_MEM_ACCESS_SYS;
338
339 ret = nouveau_vm_get(dev_priv->chan_vm, size, 12, flags,
337 &node->chan_vma); 340 &node->chan_vma);
338 if (ret) { 341 if (ret) {
339 vram->put(dev, &node->vram); 342 vram->put(dev, &node->vram);
diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.c b/drivers/gpu/drm/nouveau/nvc0_graph.c
index 5feacd5d5fa4..e6ea7d83187f 100644
--- a/drivers/gpu/drm/nouveau/nvc0_graph.c
+++ b/drivers/gpu/drm/nouveau/nvc0_graph.c
@@ -105,7 +105,8 @@ nvc0_graph_create_context_mmio_list(struct nouveau_channel *chan)
105 if (ret) 105 if (ret)
106 return ret; 106 return ret;
107 107
108 ret = nouveau_gpuobj_new(dev, NULL, 384 * 1024, 4096, NVOBJ_FLAG_VM, 108 ret = nouveau_gpuobj_new(dev, NULL, 384 * 1024, 4096,
109 NVOBJ_FLAG_VM | NVOBJ_FLAG_VM_USER,
109 &grch->unk418810); 110 &grch->unk418810);
110 if (ret) 111 if (ret)
111 return ret; 112 return ret;
diff --git a/drivers/gpu/drm/nouveau/nvc0_vm.c b/drivers/gpu/drm/nouveau/nvc0_vm.c
index 4b9251bb0ff4..e4e83c2caf5b 100644
--- a/drivers/gpu/drm/nouveau/nvc0_vm.c
+++ b/drivers/gpu/drm/nouveau/nvc0_vm.c
@@ -48,8 +48,8 @@ nvc0_vm_addr(struct nouveau_vma *vma, u64 phys, u32 memtype, u32 target)
48 phys >>= 8; 48 phys >>= 8;
49 49
50 phys |= 0x00000001; /* present */ 50 phys |= 0x00000001; /* present */
51// if (vma->access & NV_MEM_ACCESS_SYS) 51 if (vma->access & NV_MEM_ACCESS_SYS)
52// phys |= 0x00000002; 52 phys |= 0x00000002;
53 53
54 phys |= ((u64)target << 32); 54 phys |= ((u64)target << 32);
55 phys |= ((u64)memtype << 36); 55 phys |= ((u64)memtype << 36);
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 7fe8ebdcdc0e..a8973acb3987 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -3002,31 +3002,6 @@ int evergreen_copy_blit(struct radeon_device *rdev,
3002 return 0; 3002 return 0;
3003} 3003}
3004 3004
3005static bool evergreen_card_posted(struct radeon_device *rdev)
3006{
3007 u32 reg;
3008
3009 /* first check CRTCs */
3010 if (rdev->flags & RADEON_IS_IGP)
3011 reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
3012 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
3013 else
3014 reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
3015 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) |
3016 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
3017 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) |
3018 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
3019 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
3020 if (reg & EVERGREEN_CRTC_MASTER_EN)
3021 return true;
3022
3023 /* then check MEM_SIZE, in case the crtcs are off */
3024 if (RREG32(CONFIG_MEMSIZE))
3025 return true;
3026
3027 return false;
3028}
3029
3030/* Plan is to move initialization in that function and use 3005/* Plan is to move initialization in that function and use
3031 * helper function so that radeon_device_init pretty much 3006 * helper function so that radeon_device_init pretty much
3032 * do nothing more than calling asic specific function. This 3007 * do nothing more than calling asic specific function. This
@@ -3063,7 +3038,7 @@ int evergreen_init(struct radeon_device *rdev)
3063 if (radeon_asic_reset(rdev)) 3038 if (radeon_asic_reset(rdev))
3064 dev_warn(rdev->dev, "GPU reset failed !\n"); 3039 dev_warn(rdev->dev, "GPU reset failed !\n");
3065 /* Post card if necessary */ 3040 /* Post card if necessary */
3066 if (!evergreen_card_posted(rdev)) { 3041 if (!radeon_card_posted(rdev)) {
3067 if (!rdev->bios) { 3042 if (!rdev->bios) {
3068 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n"); 3043 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
3069 return -EINVAL; 3044 return -EINVAL;
@@ -3158,6 +3133,9 @@ static void evergreen_pcie_gen2_enable(struct radeon_device *rdev)
3158{ 3133{
3159 u32 link_width_cntl, speed_cntl; 3134 u32 link_width_cntl, speed_cntl;
3160 3135
3136 if (radeon_pcie_gen2 == 0)
3137 return;
3138
3161 if (rdev->flags & RADEON_IS_IGP) 3139 if (rdev->flags & RADEON_IS_IGP)
3162 return; 3140 return;
3163 3141
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index f637595b14e1..46da5142b131 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -2086,12 +2086,13 @@ int r100_asic_reset(struct radeon_device *rdev)
2086{ 2086{
2087 struct r100_mc_save save; 2087 struct r100_mc_save save;
2088 u32 status, tmp; 2088 u32 status, tmp;
2089 int ret = 0;
2089 2090
2090 r100_mc_stop(rdev, &save);
2091 status = RREG32(R_000E40_RBBM_STATUS); 2091 status = RREG32(R_000E40_RBBM_STATUS);
2092 if (!G_000E40_GUI_ACTIVE(status)) { 2092 if (!G_000E40_GUI_ACTIVE(status)) {
2093 return 0; 2093 return 0;
2094 } 2094 }
2095 r100_mc_stop(rdev, &save);
2095 status = RREG32(R_000E40_RBBM_STATUS); 2096 status = RREG32(R_000E40_RBBM_STATUS);
2096 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); 2097 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
2097 /* stop CP */ 2098 /* stop CP */
@@ -2131,11 +2132,11 @@ int r100_asic_reset(struct radeon_device *rdev)
2131 G_000E40_TAM_BUSY(status) || G_000E40_PB_BUSY(status)) { 2132 G_000E40_TAM_BUSY(status) || G_000E40_PB_BUSY(status)) {
2132 dev_err(rdev->dev, "failed to reset GPU\n"); 2133 dev_err(rdev->dev, "failed to reset GPU\n");
2133 rdev->gpu_lockup = true; 2134 rdev->gpu_lockup = true;
2134 return -1; 2135 ret = -1;
2135 } 2136 } else
2137 dev_info(rdev->dev, "GPU reset succeed\n");
2136 r100_mc_resume(rdev, &save); 2138 r100_mc_resume(rdev, &save);
2137 dev_info(rdev->dev, "GPU reset succeed\n"); 2139 return ret;
2138 return 0;
2139} 2140}
2140 2141
2141void r100_set_common_regs(struct radeon_device *rdev) 2142void r100_set_common_regs(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index fae5e709f270..cf862ca580bf 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -405,12 +405,13 @@ int r300_asic_reset(struct radeon_device *rdev)
405{ 405{
406 struct r100_mc_save save; 406 struct r100_mc_save save;
407 u32 status, tmp; 407 u32 status, tmp;
408 int ret = 0;
408 409
409 r100_mc_stop(rdev, &save);
410 status = RREG32(R_000E40_RBBM_STATUS); 410 status = RREG32(R_000E40_RBBM_STATUS);
411 if (!G_000E40_GUI_ACTIVE(status)) { 411 if (!G_000E40_GUI_ACTIVE(status)) {
412 return 0; 412 return 0;
413 } 413 }
414 r100_mc_stop(rdev, &save);
414 status = RREG32(R_000E40_RBBM_STATUS); 415 status = RREG32(R_000E40_RBBM_STATUS);
415 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); 416 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
416 /* stop CP */ 417 /* stop CP */
@@ -451,11 +452,11 @@ int r300_asic_reset(struct radeon_device *rdev)
451 if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) { 452 if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) {
452 dev_err(rdev->dev, "failed to reset GPU\n"); 453 dev_err(rdev->dev, "failed to reset GPU\n");
453 rdev->gpu_lockup = true; 454 rdev->gpu_lockup = true;
454 return -1; 455 ret = -1;
455 } 456 } else
457 dev_info(rdev->dev, "GPU reset succeed\n");
456 r100_mc_resume(rdev, &save); 458 r100_mc_resume(rdev, &save);
457 dev_info(rdev->dev, "GPU reset succeed\n"); 459 return ret;
458 return 0;
459} 460}
460 461
461/* 462/*
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 6b50716267c0..aca2236268fa 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -2358,24 +2358,6 @@ void r600_clear_surface_reg(struct radeon_device *rdev, int reg)
2358 /* FIXME: implement */ 2358 /* FIXME: implement */
2359} 2359}
2360 2360
2361
2362bool r600_card_posted(struct radeon_device *rdev)
2363{
2364 uint32_t reg;
2365
2366 /* first check CRTCs */
2367 reg = RREG32(D1CRTC_CONTROL) |
2368 RREG32(D2CRTC_CONTROL);
2369 if (reg & CRTC_EN)
2370 return true;
2371
2372 /* then check MEM_SIZE, in case the crtcs are off */
2373 if (RREG32(CONFIG_MEMSIZE))
2374 return true;
2375
2376 return false;
2377}
2378
2379int r600_startup(struct radeon_device *rdev) 2361int r600_startup(struct radeon_device *rdev)
2380{ 2362{
2381 int r; 2363 int r;
@@ -2536,7 +2518,7 @@ int r600_init(struct radeon_device *rdev)
2536 if (r) 2518 if (r)
2537 return r; 2519 return r;
2538 /* Post card if necessary */ 2520 /* Post card if necessary */
2539 if (!r600_card_posted(rdev)) { 2521 if (!radeon_card_posted(rdev)) {
2540 if (!rdev->bios) { 2522 if (!rdev->bios) {
2541 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n"); 2523 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
2542 return -EINVAL; 2524 return -EINVAL;
@@ -3658,6 +3640,9 @@ static void r600_pcie_gen2_enable(struct radeon_device *rdev)
3658 u32 link_width_cntl, lanes, speed_cntl, training_cntl, tmp; 3640 u32 link_width_cntl, lanes, speed_cntl, training_cntl, tmp;
3659 u16 link_cntl2; 3641 u16 link_cntl2;
3660 3642
3643 if (radeon_pcie_gen2 == 0)
3644 return;
3645
3661 if (rdev->flags & RADEON_IS_IGP) 3646 if (rdev->flags & RADEON_IS_IGP)
3662 return; 3647 return;
3663 3648
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index e9486630a467..71d2a554bbe6 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -92,6 +92,7 @@ extern int radeon_tv;
92extern int radeon_audio; 92extern int radeon_audio;
93extern int radeon_disp_priority; 93extern int radeon_disp_priority;
94extern int radeon_hw_i2c; 94extern int radeon_hw_i2c;
95extern int radeon_pcie_gen2;
95 96
96/* 97/*
97 * Copy from radeon_drv.h so we don't have to include both and have conflicting 98 * Copy from radeon_drv.h so we don't have to include both and have conflicting
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index be5cb4f28c29..d5680a0c87af 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -104,6 +104,7 @@ int radeon_tv = 1;
104int radeon_audio = 1; 104int radeon_audio = 1;
105int radeon_disp_priority = 0; 105int radeon_disp_priority = 0;
106int radeon_hw_i2c = 0; 106int radeon_hw_i2c = 0;
107int radeon_pcie_gen2 = 0;
107 108
108MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers"); 109MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
109module_param_named(no_wb, radeon_no_wb, int, 0444); 110module_param_named(no_wb, radeon_no_wb, int, 0444);
@@ -147,6 +148,9 @@ module_param_named(disp_priority, radeon_disp_priority, int, 0444);
147MODULE_PARM_DESC(hw_i2c, "hw i2c engine enable (0 = disable)"); 148MODULE_PARM_DESC(hw_i2c, "hw i2c engine enable (0 = disable)");
148module_param_named(hw_i2c, radeon_hw_i2c, int, 0444); 149module_param_named(hw_i2c, radeon_hw_i2c, int, 0444);
149 150
151MODULE_PARM_DESC(pcie_gen2, "PCIE Gen2 mode (1 = enable)");
152module_param_named(pcie_gen2, radeon_pcie_gen2, int, 0444);
153
150static int radeon_suspend(struct drm_device *dev, pm_message_t state) 154static int radeon_suspend(struct drm_device *dev, pm_message_t state)
151{ 155{
152 drm_radeon_private_t *dev_priv = dev->dev_private; 156 drm_radeon_private_t *dev_priv = dev->dev_private;
diff --git a/drivers/gpu/drm/radeon/reg_srcs/evergreen b/drivers/gpu/drm/radeon/reg_srcs/evergreen
index ac40fd39d787..9177f9191837 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/evergreen
+++ b/drivers/gpu/drm/radeon/reg_srcs/evergreen
@@ -439,7 +439,7 @@ evergreen 0x9400
4390x000286EC SPI_COMPUTE_NUM_THREAD_X 4390x000286EC SPI_COMPUTE_NUM_THREAD_X
4400x000286F0 SPI_COMPUTE_NUM_THREAD_Y 4400x000286F0 SPI_COMPUTE_NUM_THREAD_Y
4410x000286F4 SPI_COMPUTE_NUM_THREAD_Z 4410x000286F4 SPI_COMPUTE_NUM_THREAD_Z
4420x000286F8 GDS_ADDR_SIZE 4420x00028724 GDS_ADDR_SIZE
4430x00028780 CB_BLEND0_CONTROL 4430x00028780 CB_BLEND0_CONTROL
4440x00028784 CB_BLEND1_CONTROL 4440x00028784 CB_BLEND1_CONTROL
4450x00028788 CB_BLEND2_CONTROL 4450x00028788 CB_BLEND2_CONTROL
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index b4192acaab5f..5afe294ed51f 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -339,16 +339,16 @@ void rs600_bm_disable(struct radeon_device *rdev)
339 339
340int rs600_asic_reset(struct radeon_device *rdev) 340int rs600_asic_reset(struct radeon_device *rdev)
341{ 341{
342 u32 status, tmp;
343
344 struct rv515_mc_save save; 342 struct rv515_mc_save save;
343 u32 status, tmp;
344 int ret = 0;
345 345
346 /* Stops all mc clients */
347 rv515_mc_stop(rdev, &save);
348 status = RREG32(R_000E40_RBBM_STATUS); 346 status = RREG32(R_000E40_RBBM_STATUS);
349 if (!G_000E40_GUI_ACTIVE(status)) { 347 if (!G_000E40_GUI_ACTIVE(status)) {
350 return 0; 348 return 0;
351 } 349 }
350 /* Stops all mc clients */
351 rv515_mc_stop(rdev, &save);
352 status = RREG32(R_000E40_RBBM_STATUS); 352 status = RREG32(R_000E40_RBBM_STATUS);
353 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); 353 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
354 /* stop CP */ 354 /* stop CP */
@@ -392,11 +392,11 @@ int rs600_asic_reset(struct radeon_device *rdev)
392 if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) { 392 if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) {
393 dev_err(rdev->dev, "failed to reset GPU\n"); 393 dev_err(rdev->dev, "failed to reset GPU\n");
394 rdev->gpu_lockup = true; 394 rdev->gpu_lockup = true;
395 return -1; 395 ret = -1;
396 } 396 } else
397 dev_info(rdev->dev, "GPU reset succeed\n");
397 rv515_mc_resume(rdev, &save); 398 rv515_mc_resume(rdev, &save);
398 dev_info(rdev->dev, "GPU reset succeed\n"); 399 return ret;
399 return 0;
400} 400}
401 401
402/* 402/*
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 3a264aa3a79a..491dc9000655 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -1268,7 +1268,7 @@ int rv770_init(struct radeon_device *rdev)
1268 if (r) 1268 if (r)
1269 return r; 1269 return r;
1270 /* Post card if necessary */ 1270 /* Post card if necessary */
1271 if (!r600_card_posted(rdev)) { 1271 if (!radeon_card_posted(rdev)) {
1272 if (!rdev->bios) { 1272 if (!rdev->bios) {
1273 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n"); 1273 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
1274 return -EINVAL; 1274 return -EINVAL;
@@ -1372,6 +1372,9 @@ static void rv770_pcie_gen2_enable(struct radeon_device *rdev)
1372 u32 link_width_cntl, lanes, speed_cntl, tmp; 1372 u32 link_width_cntl, lanes, speed_cntl, tmp;
1373 u16 link_cntl2; 1373 u16 link_cntl2;
1374 1374
1375 if (radeon_pcie_gen2 == 0)
1376 return;
1377
1375 if (rdev->flags & RADEON_IS_IGP) 1378 if (rdev->flags & RADEON_IS_IGP)
1376 return; 1379 return;
1377 1380
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index b1f768917395..77414702cb00 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -53,9 +53,10 @@ config MTD_PARTITIONS
53 devices. Partitioning on NFTL 'devices' is a different - that's the 53 devices. Partitioning on NFTL 'devices' is a different - that's the
54 'normal' form of partitioning used on a block device. 54 'normal' form of partitioning used on a block device.
55 55
56if MTD_PARTITIONS
57
56config MTD_REDBOOT_PARTS 58config MTD_REDBOOT_PARTS
57 tristate "RedBoot partition table parsing" 59 tristate "RedBoot partition table parsing"
58 depends on MTD_PARTITIONS
59 ---help--- 60 ---help---
60 RedBoot is a ROM monitor and bootloader which deals with multiple 61 RedBoot is a ROM monitor and bootloader which deals with multiple
61 'images' in flash devices by putting a table one of the erase 62 'images' in flash devices by putting a table one of the erase
@@ -72,9 +73,10 @@ config MTD_REDBOOT_PARTS
72 SA1100 map driver (CONFIG_MTD_SA1100) has an option for this, for 73 SA1100 map driver (CONFIG_MTD_SA1100) has an option for this, for
73 example. 74 example.
74 75
76if MTD_REDBOOT_PARTS
77
75config MTD_REDBOOT_DIRECTORY_BLOCK 78config MTD_REDBOOT_DIRECTORY_BLOCK
76 int "Location of RedBoot partition table" 79 int "Location of RedBoot partition table"
77 depends on MTD_REDBOOT_PARTS
78 default "-1" 80 default "-1"
79 ---help--- 81 ---help---
80 This option is the Linux counterpart to the 82 This option is the Linux counterpart to the
@@ -91,18 +93,18 @@ config MTD_REDBOOT_DIRECTORY_BLOCK
91 93
92config MTD_REDBOOT_PARTS_UNALLOCATED 94config MTD_REDBOOT_PARTS_UNALLOCATED
93 bool "Include unallocated flash regions" 95 bool "Include unallocated flash regions"
94 depends on MTD_REDBOOT_PARTS
95 help 96 help
96 If you need to register each unallocated flash region as a MTD 97 If you need to register each unallocated flash region as a MTD
97 'partition', enable this option. 98 'partition', enable this option.
98 99
99config MTD_REDBOOT_PARTS_READONLY 100config MTD_REDBOOT_PARTS_READONLY
100 bool "Force read-only for RedBoot system images" 101 bool "Force read-only for RedBoot system images"
101 depends on MTD_REDBOOT_PARTS
102 help 102 help
103 If you need to force read-only for 'RedBoot', 'RedBoot Config' and 103 If you need to force read-only for 'RedBoot', 'RedBoot Config' and
104 'FIS directory' images, enable this option. 104 'FIS directory' images, enable this option.
105 105
106endif # MTD_REDBOOT_PARTS
107
106config MTD_CMDLINE_PARTS 108config MTD_CMDLINE_PARTS
107 bool "Command line partition table parsing" 109 bool "Command line partition table parsing"
108 depends on MTD_PARTITIONS = "y" && MTD = "y" 110 depends on MTD_PARTITIONS = "y" && MTD = "y"
@@ -142,7 +144,7 @@ config MTD_CMDLINE_PARTS
142 144
143config MTD_AFS_PARTS 145config MTD_AFS_PARTS
144 tristate "ARM Firmware Suite partition parsing" 146 tristate "ARM Firmware Suite partition parsing"
145 depends on ARM && MTD_PARTITIONS 147 depends on ARM
146 ---help--- 148 ---help---
147 The ARM Firmware Suite allows the user to divide flash devices into 149 The ARM Firmware Suite allows the user to divide flash devices into
148 multiple 'images'. Each such image has a header containing its name 150 multiple 'images'. Each such image has a header containing its name
@@ -158,8 +160,8 @@ config MTD_AFS_PARTS
158 example. 160 example.
159 161
160config MTD_OF_PARTS 162config MTD_OF_PARTS
161 tristate "Flash partition map based on OF description" 163 def_bool y
162 depends on OF && MTD_PARTITIONS 164 depends on OF
163 help 165 help
164 This provides a partition parsing function which derives 166 This provides a partition parsing function which derives
165 the partition map from the children of the flash node, 167 the partition map from the children of the flash node,
@@ -167,10 +169,11 @@ config MTD_OF_PARTS
167 169
168config MTD_AR7_PARTS 170config MTD_AR7_PARTS
169 tristate "TI AR7 partitioning support" 171 tristate "TI AR7 partitioning support"
170 depends on MTD_PARTITIONS
171 ---help--- 172 ---help---
172 TI AR7 partitioning support 173 TI AR7 partitioning support
173 174
175endif # MTD_PARTITIONS
176
174comment "User Modules And Translation Layers" 177comment "User Modules And Translation Layers"
175 178
176config MTD_CHAR 179config MTD_CHAR
diff --git a/drivers/mtd/Makefile b/drivers/mtd/Makefile
index 760abc533395..d4e7f25b1ebb 100644
--- a/drivers/mtd/Makefile
+++ b/drivers/mtd/Makefile
@@ -6,13 +6,13 @@
6obj-$(CONFIG_MTD) += mtd.o 6obj-$(CONFIG_MTD) += mtd.o
7mtd-y := mtdcore.o mtdsuper.o 7mtd-y := mtdcore.o mtdsuper.o
8mtd-$(CONFIG_MTD_PARTITIONS) += mtdpart.o 8mtd-$(CONFIG_MTD_PARTITIONS) += mtdpart.o
9mtd-$(CONFIG_MTD_OF_PARTS) += ofpart.o
9 10
10obj-$(CONFIG_MTD_CONCAT) += mtdconcat.o 11obj-$(CONFIG_MTD_CONCAT) += mtdconcat.o
11obj-$(CONFIG_MTD_REDBOOT_PARTS) += redboot.o 12obj-$(CONFIG_MTD_REDBOOT_PARTS) += redboot.o
12obj-$(CONFIG_MTD_CMDLINE_PARTS) += cmdlinepart.o 13obj-$(CONFIG_MTD_CMDLINE_PARTS) += cmdlinepart.o
13obj-$(CONFIG_MTD_AFS_PARTS) += afs.o 14obj-$(CONFIG_MTD_AFS_PARTS) += afs.o
14obj-$(CONFIG_MTD_AR7_PARTS) += ar7part.o 15obj-$(CONFIG_MTD_AR7_PARTS) += ar7part.o
15obj-$(CONFIG_MTD_OF_PARTS) += ofpart.o
16 16
17# 'Users' - code which presents functionality to userspace. 17# 'Users' - code which presents functionality to userspace.
18obj-$(CONFIG_MTD_CHAR) += mtdchar.o 18obj-$(CONFIG_MTD_CHAR) += mtdchar.o
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index ad9268b44416..a8c3e1c9b02a 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -162,7 +162,7 @@ static void cfi_tell_features(struct cfi_pri_intelext *extp)
162#endif 162#endif
163 163
164/* Atmel chips don't use the same PRI format as Intel chips */ 164/* Atmel chips don't use the same PRI format as Intel chips */
165static void fixup_convert_atmel_pri(struct mtd_info *mtd, void *param) 165static void fixup_convert_atmel_pri(struct mtd_info *mtd)
166{ 166{
167 struct map_info *map = mtd->priv; 167 struct map_info *map = mtd->priv;
168 struct cfi_private *cfi = map->fldrv_priv; 168 struct cfi_private *cfi = map->fldrv_priv;
@@ -202,7 +202,7 @@ static void fixup_convert_atmel_pri(struct mtd_info *mtd, void *param)
202 cfi->cfiq->BufWriteTimeoutMax = 0; 202 cfi->cfiq->BufWriteTimeoutMax = 0;
203} 203}
204 204
205static void fixup_at49bv640dx_lock(struct mtd_info *mtd, void *param) 205static void fixup_at49bv640dx_lock(struct mtd_info *mtd)
206{ 206{
207 struct map_info *map = mtd->priv; 207 struct map_info *map = mtd->priv;
208 struct cfi_private *cfi = map->fldrv_priv; 208 struct cfi_private *cfi = map->fldrv_priv;
@@ -214,7 +214,7 @@ static void fixup_at49bv640dx_lock(struct mtd_info *mtd, void *param)
214 214
215#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE 215#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
216/* Some Intel Strata Flash prior to FPO revision C has bugs in this area */ 216/* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
217static void fixup_intel_strataflash(struct mtd_info *mtd, void* param) 217static void fixup_intel_strataflash(struct mtd_info *mtd)
218{ 218{
219 struct map_info *map = mtd->priv; 219 struct map_info *map = mtd->priv;
220 struct cfi_private *cfi = map->fldrv_priv; 220 struct cfi_private *cfi = map->fldrv_priv;
@@ -227,7 +227,7 @@ static void fixup_intel_strataflash(struct mtd_info *mtd, void* param)
227#endif 227#endif
228 228
229#ifdef CMDSET0001_DISABLE_WRITE_SUSPEND 229#ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
230static void fixup_no_write_suspend(struct mtd_info *mtd, void* param) 230static void fixup_no_write_suspend(struct mtd_info *mtd)
231{ 231{
232 struct map_info *map = mtd->priv; 232 struct map_info *map = mtd->priv;
233 struct cfi_private *cfi = map->fldrv_priv; 233 struct cfi_private *cfi = map->fldrv_priv;
@@ -240,7 +240,7 @@ static void fixup_no_write_suspend(struct mtd_info *mtd, void* param)
240} 240}
241#endif 241#endif
242 242
243static void fixup_st_m28w320ct(struct mtd_info *mtd, void* param) 243static void fixup_st_m28w320ct(struct mtd_info *mtd)
244{ 244{
245 struct map_info *map = mtd->priv; 245 struct map_info *map = mtd->priv;
246 struct cfi_private *cfi = map->fldrv_priv; 246 struct cfi_private *cfi = map->fldrv_priv;
@@ -249,7 +249,7 @@ static void fixup_st_m28w320ct(struct mtd_info *mtd, void* param)
249 cfi->cfiq->BufWriteTimeoutMax = 0; /* Not supported */ 249 cfi->cfiq->BufWriteTimeoutMax = 0; /* Not supported */
250} 250}
251 251
252static void fixup_st_m28w320cb(struct mtd_info *mtd, void* param) 252static void fixup_st_m28w320cb(struct mtd_info *mtd)
253{ 253{
254 struct map_info *map = mtd->priv; 254 struct map_info *map = mtd->priv;
255 struct cfi_private *cfi = map->fldrv_priv; 255 struct cfi_private *cfi = map->fldrv_priv;
@@ -259,7 +259,7 @@ static void fixup_st_m28w320cb(struct mtd_info *mtd, void* param)
259 (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e; 259 (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
260}; 260};
261 261
262static void fixup_use_point(struct mtd_info *mtd, void *param) 262static void fixup_use_point(struct mtd_info *mtd)
263{ 263{
264 struct map_info *map = mtd->priv; 264 struct map_info *map = mtd->priv;
265 if (!mtd->point && map_is_linear(map)) { 265 if (!mtd->point && map_is_linear(map)) {
@@ -268,7 +268,7 @@ static void fixup_use_point(struct mtd_info *mtd, void *param)
268 } 268 }
269} 269}
270 270
271static void fixup_use_write_buffers(struct mtd_info *mtd, void *param) 271static void fixup_use_write_buffers(struct mtd_info *mtd)
272{ 272{
273 struct map_info *map = mtd->priv; 273 struct map_info *map = mtd->priv;
274 struct cfi_private *cfi = map->fldrv_priv; 274 struct cfi_private *cfi = map->fldrv_priv;
@@ -282,7 +282,7 @@ static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
282/* 282/*
283 * Some chips power-up with all sectors locked by default. 283 * Some chips power-up with all sectors locked by default.
284 */ 284 */
285static void fixup_unlock_powerup_lock(struct mtd_info *mtd, void *param) 285static void fixup_unlock_powerup_lock(struct mtd_info *mtd)
286{ 286{
287 struct map_info *map = mtd->priv; 287 struct map_info *map = mtd->priv;
288 struct cfi_private *cfi = map->fldrv_priv; 288 struct cfi_private *cfi = map->fldrv_priv;
@@ -295,31 +295,31 @@ static void fixup_unlock_powerup_lock(struct mtd_info *mtd, void *param)
295} 295}
296 296
297static struct cfi_fixup cfi_fixup_table[] = { 297static struct cfi_fixup cfi_fixup_table[] = {
298 { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL }, 298 { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri },
299 { CFI_MFR_ATMEL, AT49BV640D, fixup_at49bv640dx_lock, NULL }, 299 { CFI_MFR_ATMEL, AT49BV640D, fixup_at49bv640dx_lock },
300 { CFI_MFR_ATMEL, AT49BV640DT, fixup_at49bv640dx_lock, NULL }, 300 { CFI_MFR_ATMEL, AT49BV640DT, fixup_at49bv640dx_lock },
301#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE 301#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
302 { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash, NULL }, 302 { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash },
303#endif 303#endif
304#ifdef CMDSET0001_DISABLE_WRITE_SUSPEND 304#ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
305 { CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend, NULL }, 305 { CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend },
306#endif 306#endif
307#if !FORCE_WORD_WRITE 307#if !FORCE_WORD_WRITE
308 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL }, 308 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers },
309#endif 309#endif
310 { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct, NULL }, 310 { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct },
311 { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb, NULL }, 311 { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb },
312 { CFI_MFR_INTEL, CFI_ID_ANY, fixup_unlock_powerup_lock, NULL, }, 312 { CFI_MFR_INTEL, CFI_ID_ANY, fixup_unlock_powerup_lock },
313 { 0, 0, NULL, NULL } 313 { 0, 0, NULL }
314}; 314};
315 315
316static struct cfi_fixup jedec_fixup_table[] = { 316static struct cfi_fixup jedec_fixup_table[] = {
317 { CFI_MFR_INTEL, I82802AB, fixup_use_fwh_lock, NULL, }, 317 { CFI_MFR_INTEL, I82802AB, fixup_use_fwh_lock },
318 { CFI_MFR_INTEL, I82802AC, fixup_use_fwh_lock, NULL, }, 318 { CFI_MFR_INTEL, I82802AC, fixup_use_fwh_lock },
319 { CFI_MFR_ST, M50LPW080, fixup_use_fwh_lock, NULL, }, 319 { CFI_MFR_ST, M50LPW080, fixup_use_fwh_lock },
320 { CFI_MFR_ST, M50FLW080A, fixup_use_fwh_lock, NULL, }, 320 { CFI_MFR_ST, M50FLW080A, fixup_use_fwh_lock },
321 { CFI_MFR_ST, M50FLW080B, fixup_use_fwh_lock, NULL, }, 321 { CFI_MFR_ST, M50FLW080B, fixup_use_fwh_lock },
322 { 0, 0, NULL, NULL } 322 { 0, 0, NULL }
323}; 323};
324static struct cfi_fixup fixup_table[] = { 324static struct cfi_fixup fixup_table[] = {
325 /* The CFI vendor ids and the JEDEC vendor IDs appear 325 /* The CFI vendor ids and the JEDEC vendor IDs appear
@@ -327,8 +327,8 @@ static struct cfi_fixup fixup_table[] = {
327 * well. This table is to pick all cases where 327 * well. This table is to pick all cases where
328 * we know that is the case. 328 * we know that is the case.
329 */ 329 */
330 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point, NULL }, 330 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point },
331 { 0, 0, NULL, NULL } 331 { 0, 0, NULL }
332}; 332};
333 333
334static void cfi_fixup_major_minor(struct cfi_private *cfi, 334static void cfi_fixup_major_minor(struct cfi_private *cfi,
@@ -455,6 +455,7 @@ struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
455 mtd->flags = MTD_CAP_NORFLASH; 455 mtd->flags = MTD_CAP_NORFLASH;
456 mtd->name = map->name; 456 mtd->name = map->name;
457 mtd->writesize = 1; 457 mtd->writesize = 1;
458 mtd->writebufsize = 1 << cfi->cfiq->MaxBufWriteSize;
458 459
459 mtd->reboot_notifier.notifier_call = cfi_intelext_reboot; 460 mtd->reboot_notifier.notifier_call = cfi_intelext_reboot;
460 461
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index 3b8e32d87977..f072fcfde04e 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -134,7 +134,7 @@ static void cfi_tell_features(struct cfi_pri_amdstd *extp)
134 134
135#ifdef AMD_BOOTLOC_BUG 135#ifdef AMD_BOOTLOC_BUG
136/* Wheee. Bring me the head of someone at AMD. */ 136/* Wheee. Bring me the head of someone at AMD. */
137static void fixup_amd_bootblock(struct mtd_info *mtd, void* param) 137static void fixup_amd_bootblock(struct mtd_info *mtd)
138{ 138{
139 struct map_info *map = mtd->priv; 139 struct map_info *map = mtd->priv;
140 struct cfi_private *cfi = map->fldrv_priv; 140 struct cfi_private *cfi = map->fldrv_priv;
@@ -186,7 +186,7 @@ static void fixup_amd_bootblock(struct mtd_info *mtd, void* param)
186} 186}
187#endif 187#endif
188 188
189static void fixup_use_write_buffers(struct mtd_info *mtd, void *param) 189static void fixup_use_write_buffers(struct mtd_info *mtd)
190{ 190{
191 struct map_info *map = mtd->priv; 191 struct map_info *map = mtd->priv;
192 struct cfi_private *cfi = map->fldrv_priv; 192 struct cfi_private *cfi = map->fldrv_priv;
@@ -197,7 +197,7 @@ static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
197} 197}
198 198
199/* Atmel chips don't use the same PRI format as AMD chips */ 199/* Atmel chips don't use the same PRI format as AMD chips */
200static void fixup_convert_atmel_pri(struct mtd_info *mtd, void *param) 200static void fixup_convert_atmel_pri(struct mtd_info *mtd)
201{ 201{
202 struct map_info *map = mtd->priv; 202 struct map_info *map = mtd->priv;
203 struct cfi_private *cfi = map->fldrv_priv; 203 struct cfi_private *cfi = map->fldrv_priv;
@@ -228,14 +228,14 @@ static void fixup_convert_atmel_pri(struct mtd_info *mtd, void *param)
228 cfi->cfiq->BufWriteTimeoutMax = 0; 228 cfi->cfiq->BufWriteTimeoutMax = 0;
229} 229}
230 230
231static void fixup_use_secsi(struct mtd_info *mtd, void *param) 231static void fixup_use_secsi(struct mtd_info *mtd)
232{ 232{
233 /* Setup for chips with a secsi area */ 233 /* Setup for chips with a secsi area */
234 mtd->read_user_prot_reg = cfi_amdstd_secsi_read; 234 mtd->read_user_prot_reg = cfi_amdstd_secsi_read;
235 mtd->read_fact_prot_reg = cfi_amdstd_secsi_read; 235 mtd->read_fact_prot_reg = cfi_amdstd_secsi_read;
236} 236}
237 237
238static void fixup_use_erase_chip(struct mtd_info *mtd, void *param) 238static void fixup_use_erase_chip(struct mtd_info *mtd)
239{ 239{
240 struct map_info *map = mtd->priv; 240 struct map_info *map = mtd->priv;
241 struct cfi_private *cfi = map->fldrv_priv; 241 struct cfi_private *cfi = map->fldrv_priv;
@@ -250,7 +250,7 @@ static void fixup_use_erase_chip(struct mtd_info *mtd, void *param)
250 * Some Atmel chips (e.g. the AT49BV6416) power-up with all sectors 250 * Some Atmel chips (e.g. the AT49BV6416) power-up with all sectors
251 * locked by default. 251 * locked by default.
252 */ 252 */
253static void fixup_use_atmel_lock(struct mtd_info *mtd, void *param) 253static void fixup_use_atmel_lock(struct mtd_info *mtd)
254{ 254{
255 mtd->lock = cfi_atmel_lock; 255 mtd->lock = cfi_atmel_lock;
256 mtd->unlock = cfi_atmel_unlock; 256 mtd->unlock = cfi_atmel_unlock;
@@ -271,7 +271,7 @@ static void fixup_old_sst_eraseregion(struct mtd_info *mtd)
271 cfi->cfiq->NumEraseRegions = 1; 271 cfi->cfiq->NumEraseRegions = 1;
272} 272}
273 273
274static void fixup_sst39vf(struct mtd_info *mtd, void *param) 274static void fixup_sst39vf(struct mtd_info *mtd)
275{ 275{
276 struct map_info *map = mtd->priv; 276 struct map_info *map = mtd->priv;
277 struct cfi_private *cfi = map->fldrv_priv; 277 struct cfi_private *cfi = map->fldrv_priv;
@@ -282,7 +282,7 @@ static void fixup_sst39vf(struct mtd_info *mtd, void *param)
282 cfi->addr_unlock2 = 0x2AAA; 282 cfi->addr_unlock2 = 0x2AAA;
283} 283}
284 284
285static void fixup_sst39vf_rev_b(struct mtd_info *mtd, void *param) 285static void fixup_sst39vf_rev_b(struct mtd_info *mtd)
286{ 286{
287 struct map_info *map = mtd->priv; 287 struct map_info *map = mtd->priv;
288 struct cfi_private *cfi = map->fldrv_priv; 288 struct cfi_private *cfi = map->fldrv_priv;
@@ -295,12 +295,12 @@ static void fixup_sst39vf_rev_b(struct mtd_info *mtd, void *param)
295 cfi->sector_erase_cmd = CMD(0x50); 295 cfi->sector_erase_cmd = CMD(0x50);
296} 296}
297 297
298static void fixup_sst38vf640x_sectorsize(struct mtd_info *mtd, void *param) 298static void fixup_sst38vf640x_sectorsize(struct mtd_info *mtd)
299{ 299{
300 struct map_info *map = mtd->priv; 300 struct map_info *map = mtd->priv;
301 struct cfi_private *cfi = map->fldrv_priv; 301 struct cfi_private *cfi = map->fldrv_priv;
302 302
303 fixup_sst39vf_rev_b(mtd, param); 303 fixup_sst39vf_rev_b(mtd);
304 304
305 /* 305 /*
306 * CFI reports 1024 sectors (0x03ff+1) of 64KBytes (0x0100*256) where 306 * CFI reports 1024 sectors (0x03ff+1) of 64KBytes (0x0100*256) where
@@ -310,7 +310,7 @@ static void fixup_sst38vf640x_sectorsize(struct mtd_info *mtd, void *param)
310 pr_warning("%s: Bad 38VF640x CFI data; adjusting sector size from 64 to 8KiB\n", mtd->name); 310 pr_warning("%s: Bad 38VF640x CFI data; adjusting sector size from 64 to 8KiB\n", mtd->name);
311} 311}
312 312
313static void fixup_s29gl064n_sectors(struct mtd_info *mtd, void *param) 313static void fixup_s29gl064n_sectors(struct mtd_info *mtd)
314{ 314{
315 struct map_info *map = mtd->priv; 315 struct map_info *map = mtd->priv;
316 struct cfi_private *cfi = map->fldrv_priv; 316 struct cfi_private *cfi = map->fldrv_priv;
@@ -321,7 +321,7 @@ static void fixup_s29gl064n_sectors(struct mtd_info *mtd, void *param)
321 } 321 }
322} 322}
323 323
324static void fixup_s29gl032n_sectors(struct mtd_info *mtd, void *param) 324static void fixup_s29gl032n_sectors(struct mtd_info *mtd)
325{ 325{
326 struct map_info *map = mtd->priv; 326 struct map_info *map = mtd->priv;
327 struct cfi_private *cfi = map->fldrv_priv; 327 struct cfi_private *cfi = map->fldrv_priv;
@@ -334,47 +334,47 @@ static void fixup_s29gl032n_sectors(struct mtd_info *mtd, void *param)
334 334
335/* Used to fix CFI-Tables of chips without Extended Query Tables */ 335/* Used to fix CFI-Tables of chips without Extended Query Tables */
336static struct cfi_fixup cfi_nopri_fixup_table[] = { 336static struct cfi_fixup cfi_nopri_fixup_table[] = {
337 { CFI_MFR_SST, 0x234A, fixup_sst39vf, NULL, }, /* SST39VF1602 */ 337 { CFI_MFR_SST, 0x234a, fixup_sst39vf }, /* SST39VF1602 */
338 { CFI_MFR_SST, 0x234B, fixup_sst39vf, NULL, }, /* SST39VF1601 */ 338 { CFI_MFR_SST, 0x234b, fixup_sst39vf }, /* SST39VF1601 */
339 { CFI_MFR_SST, 0x235A, fixup_sst39vf, NULL, }, /* SST39VF3202 */ 339 { CFI_MFR_SST, 0x235a, fixup_sst39vf }, /* SST39VF3202 */
340 { CFI_MFR_SST, 0x235B, fixup_sst39vf, NULL, }, /* SST39VF3201 */ 340 { CFI_MFR_SST, 0x235b, fixup_sst39vf }, /* SST39VF3201 */
341 { CFI_MFR_SST, 0x235C, fixup_sst39vf_rev_b, NULL, }, /* SST39VF3202B */ 341 { CFI_MFR_SST, 0x235c, fixup_sst39vf_rev_b }, /* SST39VF3202B */
342 { CFI_MFR_SST, 0x235D, fixup_sst39vf_rev_b, NULL, }, /* SST39VF3201B */ 342 { CFI_MFR_SST, 0x235d, fixup_sst39vf_rev_b }, /* SST39VF3201B */
343 { CFI_MFR_SST, 0x236C, fixup_sst39vf_rev_b, NULL, }, /* SST39VF6402B */ 343 { CFI_MFR_SST, 0x236c, fixup_sst39vf_rev_b }, /* SST39VF6402B */
344 { CFI_MFR_SST, 0x236D, fixup_sst39vf_rev_b, NULL, }, /* SST39VF6401B */ 344 { CFI_MFR_SST, 0x236d, fixup_sst39vf_rev_b }, /* SST39VF6401B */
345 { 0, 0, NULL, NULL } 345 { 0, 0, NULL }
346}; 346};
347 347
348static struct cfi_fixup cfi_fixup_table[] = { 348static struct cfi_fixup cfi_fixup_table[] = {
349 { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL }, 349 { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri },
350#ifdef AMD_BOOTLOC_BUG 350#ifdef AMD_BOOTLOC_BUG
351 { CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock, NULL }, 351 { CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock },
352 { CFI_MFR_MACRONIX, CFI_ID_ANY, fixup_amd_bootblock, NULL }, 352 { CFI_MFR_MACRONIX, CFI_ID_ANY, fixup_amd_bootblock },
353#endif 353#endif
354 { CFI_MFR_AMD, 0x0050, fixup_use_secsi, NULL, }, 354 { CFI_MFR_AMD, 0x0050, fixup_use_secsi },
355 { CFI_MFR_AMD, 0x0053, fixup_use_secsi, NULL, }, 355 { CFI_MFR_AMD, 0x0053, fixup_use_secsi },
356 { CFI_MFR_AMD, 0x0055, fixup_use_secsi, NULL, }, 356 { CFI_MFR_AMD, 0x0055, fixup_use_secsi },
357 { CFI_MFR_AMD, 0x0056, fixup_use_secsi, NULL, }, 357 { CFI_MFR_AMD, 0x0056, fixup_use_secsi },
358 { CFI_MFR_AMD, 0x005C, fixup_use_secsi, NULL, }, 358 { CFI_MFR_AMD, 0x005C, fixup_use_secsi },
359 { CFI_MFR_AMD, 0x005F, fixup_use_secsi, NULL, }, 359 { CFI_MFR_AMD, 0x005F, fixup_use_secsi },
360 { CFI_MFR_AMD, 0x0c01, fixup_s29gl064n_sectors, NULL, }, 360 { CFI_MFR_AMD, 0x0c01, fixup_s29gl064n_sectors },
361 { CFI_MFR_AMD, 0x1301, fixup_s29gl064n_sectors, NULL, }, 361 { CFI_MFR_AMD, 0x1301, fixup_s29gl064n_sectors },
362 { CFI_MFR_AMD, 0x1a00, fixup_s29gl032n_sectors, NULL, }, 362 { CFI_MFR_AMD, 0x1a00, fixup_s29gl032n_sectors },
363 { CFI_MFR_AMD, 0x1a01, fixup_s29gl032n_sectors, NULL, }, 363 { CFI_MFR_AMD, 0x1a01, fixup_s29gl032n_sectors },
364 { CFI_MFR_SST, 0x536A, fixup_sst38vf640x_sectorsize, NULL, }, /* SST38VF6402 */ 364 { CFI_MFR_SST, 0x536a, fixup_sst38vf640x_sectorsize }, /* SST38VF6402 */
365 { CFI_MFR_SST, 0x536B, fixup_sst38vf640x_sectorsize, NULL, }, /* SST38VF6401 */ 365 { CFI_MFR_SST, 0x536b, fixup_sst38vf640x_sectorsize }, /* SST38VF6401 */
366 { CFI_MFR_SST, 0x536C, fixup_sst38vf640x_sectorsize, NULL, }, /* SST38VF6404 */ 366 { CFI_MFR_SST, 0x536c, fixup_sst38vf640x_sectorsize }, /* SST38VF6404 */
367 { CFI_MFR_SST, 0x536D, fixup_sst38vf640x_sectorsize, NULL, }, /* SST38VF6403 */ 367 { CFI_MFR_SST, 0x536d, fixup_sst38vf640x_sectorsize }, /* SST38VF6403 */
368#if !FORCE_WORD_WRITE 368#if !FORCE_WORD_WRITE
369 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL, }, 369 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers },
370#endif 370#endif
371 { 0, 0, NULL, NULL } 371 { 0, 0, NULL }
372}; 372};
373static struct cfi_fixup jedec_fixup_table[] = { 373static struct cfi_fixup jedec_fixup_table[] = {
374 { CFI_MFR_SST, SST49LF004B, fixup_use_fwh_lock, NULL, }, 374 { CFI_MFR_SST, SST49LF004B, fixup_use_fwh_lock },
375 { CFI_MFR_SST, SST49LF040B, fixup_use_fwh_lock, NULL, }, 375 { CFI_MFR_SST, SST49LF040B, fixup_use_fwh_lock },
376 { CFI_MFR_SST, SST49LF008A, fixup_use_fwh_lock, NULL, }, 376 { CFI_MFR_SST, SST49LF008A, fixup_use_fwh_lock },
377 { 0, 0, NULL, NULL } 377 { 0, 0, NULL }
378}; 378};
379 379
380static struct cfi_fixup fixup_table[] = { 380static struct cfi_fixup fixup_table[] = {
@@ -383,18 +383,30 @@ static struct cfi_fixup fixup_table[] = {
383 * well. This table is to pick all cases where 383 * well. This table is to pick all cases where
384 * we know that is the case. 384 * we know that is the case.
385 */ 385 */
386 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_erase_chip, NULL }, 386 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_erase_chip },
387 { CFI_MFR_ATMEL, AT49BV6416, fixup_use_atmel_lock, NULL }, 387 { CFI_MFR_ATMEL, AT49BV6416, fixup_use_atmel_lock },
388 { 0, 0, NULL, NULL } 388 { 0, 0, NULL }
389}; 389};
390 390
391 391
392static void cfi_fixup_major_minor(struct cfi_private *cfi, 392static void cfi_fixup_major_minor(struct cfi_private *cfi,
393 struct cfi_pri_amdstd *extp) 393 struct cfi_pri_amdstd *extp)
394{ 394{
395 if (cfi->mfr == CFI_MFR_SAMSUNG && cfi->id == 0x257e && 395 if (cfi->mfr == CFI_MFR_SAMSUNG) {
396 extp->MajorVersion == '0') 396 if ((extp->MajorVersion == '0' && extp->MinorVersion == '0') ||
397 extp->MajorVersion = '1'; 397 (extp->MajorVersion == '3' && extp->MinorVersion == '3')) {
398 /*
399 * Samsung K8P2815UQB and K8D6x16UxM chips
400 * report major=0 / minor=0.
401 * K8D3x16UxC chips report major=3 / minor=3.
402 */
403 printk(KERN_NOTICE " Fixing Samsung's Amd/Fujitsu"
404 " Extended Query version to 1.%c\n",
405 extp->MinorVersion);
406 extp->MajorVersion = '1';
407 }
408 }
409
398 /* 410 /*
399 * SST 38VF640x chips report major=0xFF / minor=0xFF. 411 * SST 38VF640x chips report major=0xFF / minor=0xFF.
400 */ 412 */
@@ -428,6 +440,10 @@ struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
428 mtd->flags = MTD_CAP_NORFLASH; 440 mtd->flags = MTD_CAP_NORFLASH;
429 mtd->name = map->name; 441 mtd->name = map->name;
430 mtd->writesize = 1; 442 mtd->writesize = 1;
443 mtd->writebufsize = 1 << cfi->cfiq->MaxBufWriteSize;
444
445 DEBUG(MTD_DEBUG_LEVEL3, "MTD %s(): write buffer size %d\n",
446 __func__, mtd->writebufsize);
431 447
432 mtd->reboot_notifier.notifier_call = cfi_amdstd_reboot; 448 mtd->reboot_notifier.notifier_call = cfi_amdstd_reboot;
433 449
diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
index 314af1f5a370..c04b7658abe9 100644
--- a/drivers/mtd/chips/cfi_cmdset_0020.c
+++ b/drivers/mtd/chips/cfi_cmdset_0020.c
@@ -238,6 +238,7 @@ static struct mtd_info *cfi_staa_setup(struct map_info *map)
238 mtd->resume = cfi_staa_resume; 238 mtd->resume = cfi_staa_resume;
239 mtd->flags = MTD_CAP_NORFLASH & ~MTD_BIT_WRITEABLE; 239 mtd->flags = MTD_CAP_NORFLASH & ~MTD_BIT_WRITEABLE;
240 mtd->writesize = 8; /* FIXME: Should be 0 for STMicro flashes w/out ECC */ 240 mtd->writesize = 8; /* FIXME: Should be 0 for STMicro flashes w/out ECC */
241 mtd->writebufsize = 1 << cfi->cfiq->MaxBufWriteSize;
241 map->fldrv = &cfi_staa_chipdrv; 242 map->fldrv = &cfi_staa_chipdrv;
242 __module_get(THIS_MODULE); 243 __module_get(THIS_MODULE);
243 mtd->name = map->name; 244 mtd->name = map->name;
diff --git a/drivers/mtd/chips/cfi_util.c b/drivers/mtd/chips/cfi_util.c
index 360525c637d2..6ae3d111e1e7 100644
--- a/drivers/mtd/chips/cfi_util.c
+++ b/drivers/mtd/chips/cfi_util.c
@@ -156,7 +156,7 @@ void cfi_fixup(struct mtd_info *mtd, struct cfi_fixup *fixups)
156 for (f=fixups; f->fixup; f++) { 156 for (f=fixups; f->fixup; f++) {
157 if (((f->mfr == CFI_MFR_ANY) || (f->mfr == cfi->mfr)) && 157 if (((f->mfr == CFI_MFR_ANY) || (f->mfr == cfi->mfr)) &&
158 ((f->id == CFI_ID_ANY) || (f->id == cfi->id))) { 158 ((f->id == CFI_ID_ANY) || (f->id == cfi->id))) {
159 f->fixup(mtd, f->param); 159 f->fixup(mtd);
160 } 160 }
161 } 161 }
162} 162}
diff --git a/drivers/mtd/chips/fwh_lock.h b/drivers/mtd/chips/fwh_lock.h
index d18064977192..5e3cc80128aa 100644
--- a/drivers/mtd/chips/fwh_lock.h
+++ b/drivers/mtd/chips/fwh_lock.h
@@ -98,7 +98,7 @@ static int fwh_unlock_varsize(struct mtd_info *mtd, loff_t ofs, uint64_t len)
98 return ret; 98 return ret;
99} 99}
100 100
101static void fixup_use_fwh_lock(struct mtd_info *mtd, void *param) 101static void fixup_use_fwh_lock(struct mtd_info *mtd)
102{ 102{
103 printk(KERN_NOTICE "using fwh lock/unlock method\n"); 103 printk(KERN_NOTICE "using fwh lock/unlock method\n");
104 /* Setup for the chips with the fwh lock method */ 104 /* Setup for the chips with the fwh lock method */
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index bf5a002209bd..e4eba6cc1b2e 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -51,6 +51,10 @@
51#define OPCODE_WRDI 0x04 /* Write disable */ 51#define OPCODE_WRDI 0x04 /* Write disable */
52#define OPCODE_AAI_WP 0xad /* Auto address increment word program */ 52#define OPCODE_AAI_WP 0xad /* Auto address increment word program */
53 53
54/* Used for Macronix flashes only. */
55#define OPCODE_EN4B 0xb7 /* Enter 4-byte mode */
56#define OPCODE_EX4B 0xe9 /* Exit 4-byte mode */
57
54/* Status Register bits. */ 58/* Status Register bits. */
55#define SR_WIP 1 /* Write in progress */ 59#define SR_WIP 1 /* Write in progress */
56#define SR_WEL 2 /* Write enable latch */ 60#define SR_WEL 2 /* Write enable latch */
@@ -62,7 +66,7 @@
62 66
63/* Define max times to check status register before we give up. */ 67/* Define max times to check status register before we give up. */
64#define MAX_READY_WAIT_JIFFIES (40 * HZ) /* M25P16 specs 40s max chip erase */ 68#define MAX_READY_WAIT_JIFFIES (40 * HZ) /* M25P16 specs 40s max chip erase */
65#define MAX_CMD_SIZE 4 69#define MAX_CMD_SIZE 5
66 70
67#ifdef CONFIG_M25PXX_USE_FAST_READ 71#ifdef CONFIG_M25PXX_USE_FAST_READ
68#define OPCODE_READ OPCODE_FAST_READ 72#define OPCODE_READ OPCODE_FAST_READ
@@ -152,6 +156,16 @@ static inline int write_disable(struct m25p *flash)
152} 156}
153 157
154/* 158/*
159 * Enable/disable 4-byte addressing mode.
160 */
161static inline int set_4byte(struct m25p *flash, int enable)
162{
163 u8 code = enable ? OPCODE_EN4B : OPCODE_EX4B;
164
165 return spi_write_then_read(flash->spi, &code, 1, NULL, 0);
166}
167
168/*
155 * Service routine to read status register until ready, or timeout occurs. 169 * Service routine to read status register until ready, or timeout occurs.
156 * Returns non-zero if error. 170 * Returns non-zero if error.
157 */ 171 */
@@ -207,6 +221,7 @@ static void m25p_addr2cmd(struct m25p *flash, unsigned int addr, u8 *cmd)
207 cmd[1] = addr >> (flash->addr_width * 8 - 8); 221 cmd[1] = addr >> (flash->addr_width * 8 - 8);
208 cmd[2] = addr >> (flash->addr_width * 8 - 16); 222 cmd[2] = addr >> (flash->addr_width * 8 - 16);
209 cmd[3] = addr >> (flash->addr_width * 8 - 24); 223 cmd[3] = addr >> (flash->addr_width * 8 - 24);
224 cmd[4] = addr >> (flash->addr_width * 8 - 32);
210} 225}
211 226
212static int m25p_cmdsz(struct m25p *flash) 227static int m25p_cmdsz(struct m25p *flash)
@@ -482,6 +497,10 @@ static int sst_write(struct mtd_info *mtd, loff_t to, size_t len,
482 size_t actual; 497 size_t actual;
483 int cmd_sz, ret; 498 int cmd_sz, ret;
484 499
500 DEBUG(MTD_DEBUG_LEVEL2, "%s: %s %s 0x%08x, len %zd\n",
501 dev_name(&flash->spi->dev), __func__, "to",
502 (u32)to, len);
503
485 *retlen = 0; 504 *retlen = 0;
486 505
487 /* sanity checks */ 506 /* sanity checks */
@@ -607,7 +626,6 @@ struct flash_info {
607 .sector_size = (_sector_size), \ 626 .sector_size = (_sector_size), \
608 .n_sectors = (_n_sectors), \ 627 .n_sectors = (_n_sectors), \
609 .page_size = 256, \ 628 .page_size = 256, \
610 .addr_width = 3, \
611 .flags = (_flags), \ 629 .flags = (_flags), \
612 }) 630 })
613 631
@@ -635,7 +653,7 @@ static const struct spi_device_id m25p_ids[] = {
635 { "at26f004", INFO(0x1f0400, 0, 64 * 1024, 8, SECT_4K) }, 653 { "at26f004", INFO(0x1f0400, 0, 64 * 1024, 8, SECT_4K) },
636 { "at26df081a", INFO(0x1f4501, 0, 64 * 1024, 16, SECT_4K) }, 654 { "at26df081a", INFO(0x1f4501, 0, 64 * 1024, 16, SECT_4K) },
637 { "at26df161a", INFO(0x1f4601, 0, 64 * 1024, 32, SECT_4K) }, 655 { "at26df161a", INFO(0x1f4601, 0, 64 * 1024, 32, SECT_4K) },
638 { "at26df321", INFO(0x1f4701, 0, 64 * 1024, 64, SECT_4K) }, 656 { "at26df321", INFO(0x1f4700, 0, 64 * 1024, 64, SECT_4K) },
639 657
640 /* EON -- en25pxx */ 658 /* EON -- en25pxx */
641 { "en25p32", INFO(0x1c2016, 0, 64 * 1024, 64, 0) }, 659 { "en25p32", INFO(0x1c2016, 0, 64 * 1024, 64, 0) },
@@ -653,6 +671,8 @@ static const struct spi_device_id m25p_ids[] = {
653 { "mx25l6405d", INFO(0xc22017, 0, 64 * 1024, 128, 0) }, 671 { "mx25l6405d", INFO(0xc22017, 0, 64 * 1024, 128, 0) },
654 { "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256, 0) }, 672 { "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256, 0) },
655 { "mx25l12855e", INFO(0xc22618, 0, 64 * 1024, 256, 0) }, 673 { "mx25l12855e", INFO(0xc22618, 0, 64 * 1024, 256, 0) },
674 { "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512, 0) },
675 { "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) },
656 676
657 /* Spansion -- single (large) sector size only, at least 677 /* Spansion -- single (large) sector size only, at least
658 * for the chips listed here (without boot sectors). 678 * for the chips listed here (without boot sectors).
@@ -764,6 +784,7 @@ static const struct spi_device_id *__devinit jedec_probe(struct spi_device *spi)
764 return &m25p_ids[tmp]; 784 return &m25p_ids[tmp];
765 } 785 }
766 } 786 }
787 dev_err(&spi->dev, "unrecognized JEDEC id %06x\n", jedec);
767 return ERR_PTR(-ENODEV); 788 return ERR_PTR(-ENODEV);
768} 789}
769 790
@@ -883,7 +904,17 @@ static int __devinit m25p_probe(struct spi_device *spi)
883 904
884 flash->mtd.dev.parent = &spi->dev; 905 flash->mtd.dev.parent = &spi->dev;
885 flash->page_size = info->page_size; 906 flash->page_size = info->page_size;
886 flash->addr_width = info->addr_width; 907
908 if (info->addr_width)
909 flash->addr_width = info->addr_width;
910 else {
911 /* enable 4-byte addressing if the device exceeds 16MiB */
912 if (flash->mtd.size > 0x1000000) {
913 flash->addr_width = 4;
914 set_4byte(flash, 1);
915 } else
916 flash->addr_width = 3;
917 }
887 918
888 dev_info(&spi->dev, "%s (%lld Kbytes)\n", id->name, 919 dev_info(&spi->dev, "%s (%lld Kbytes)\n", id->name,
889 (long long)flash->mtd.size >> 10); 920 (long long)flash->mtd.size >> 10);
diff --git a/drivers/mtd/devices/sst25l.c b/drivers/mtd/devices/sst25l.c
index 684247a8a5ed..c163e619abc9 100644
--- a/drivers/mtd/devices/sst25l.c
+++ b/drivers/mtd/devices/sst25l.c
@@ -335,7 +335,7 @@ out:
335 return ret; 335 return ret;
336} 336}
337 337
338static struct flash_info *__init sst25l_match_device(struct spi_device *spi) 338static struct flash_info *__devinit sst25l_match_device(struct spi_device *spi)
339{ 339{
340 struct flash_info *flash_info = NULL; 340 struct flash_info *flash_info = NULL;
341 struct spi_message m; 341 struct spi_message m;
@@ -375,7 +375,7 @@ static struct flash_info *__init sst25l_match_device(struct spi_device *spi)
375 return flash_info; 375 return flash_info;
376} 376}
377 377
378static int __init sst25l_probe(struct spi_device *spi) 378static int __devinit sst25l_probe(struct spi_device *spi)
379{ 379{
380 struct flash_info *flash_info; 380 struct flash_info *flash_info;
381 struct sst25l_flash *flash; 381 struct sst25l_flash *flash;
diff --git a/drivers/mtd/maps/amd76xrom.c b/drivers/mtd/maps/amd76xrom.c
index 19fe92db0c46..77d64ce19e9f 100644
--- a/drivers/mtd/maps/amd76xrom.c
+++ b/drivers/mtd/maps/amd76xrom.c
@@ -149,11 +149,8 @@ static int __devinit amd76xrom_init_one (struct pci_dev *pdev,
149 if (request_resource(&iomem_resource, &window->rsrc)) { 149 if (request_resource(&iomem_resource, &window->rsrc)) {
150 window->rsrc.parent = NULL; 150 window->rsrc.parent = NULL;
151 printk(KERN_ERR MOD_NAME 151 printk(KERN_ERR MOD_NAME
152 " %s(): Unable to register resource" 152 " %s(): Unable to register resource %pR - kernel bug?\n",
153 " 0x%.16llx-0x%.16llx - kernel bug?\n", 153 __func__, &window->rsrc);
154 __func__,
155 (unsigned long long)window->rsrc.start,
156 (unsigned long long)window->rsrc.end);
157 } 154 }
158 155
159 156
diff --git a/drivers/mtd/maps/bcm963xx-flash.c b/drivers/mtd/maps/bcm963xx-flash.c
index d175c120ee84..1f3049590d9e 100644
--- a/drivers/mtd/maps/bcm963xx-flash.c
+++ b/drivers/mtd/maps/bcm963xx-flash.c
@@ -196,10 +196,15 @@ static int bcm963xx_probe(struct platform_device *pdev)
196 bcm963xx_mtd_info = do_map_probe("cfi_probe", &bcm963xx_map); 196 bcm963xx_mtd_info = do_map_probe("cfi_probe", &bcm963xx_map);
197 if (!bcm963xx_mtd_info) { 197 if (!bcm963xx_mtd_info) {
198 dev_err(&pdev->dev, "failed to probe using CFI\n"); 198 dev_err(&pdev->dev, "failed to probe using CFI\n");
199 bcm963xx_mtd_info = do_map_probe("jedec_probe", &bcm963xx_map);
200 if (bcm963xx_mtd_info)
201 goto probe_ok;
202 dev_err(&pdev->dev, "failed to probe using JEDEC\n");
199 err = -EIO; 203 err = -EIO;
200 goto err_probe; 204 goto err_probe;
201 } 205 }
202 206
207probe_ok:
203 bcm963xx_mtd_info->owner = THIS_MODULE; 208 bcm963xx_mtd_info->owner = THIS_MODULE;
204 209
205 /* This is mutually exclusive */ 210 /* This is mutually exclusive */
diff --git a/drivers/mtd/maps/ck804xrom.c b/drivers/mtd/maps/ck804xrom.c
index ddb462bea9b5..5fdb7b26cea3 100644
--- a/drivers/mtd/maps/ck804xrom.c
+++ b/drivers/mtd/maps/ck804xrom.c
@@ -178,11 +178,8 @@ static int __devinit ck804xrom_init_one (struct pci_dev *pdev,
178 if (request_resource(&iomem_resource, &window->rsrc)) { 178 if (request_resource(&iomem_resource, &window->rsrc)) {
179 window->rsrc.parent = NULL; 179 window->rsrc.parent = NULL;
180 printk(KERN_ERR MOD_NAME 180 printk(KERN_ERR MOD_NAME
181 " %s(): Unable to register resource" 181 " %s(): Unable to register resource %pR - kernel bug?\n",
182 " 0x%.016llx-0x%.016llx - kernel bug?\n", 182 __func__, &window->rsrc);
183 __func__,
184 (unsigned long long)window->rsrc.start,
185 (unsigned long long)window->rsrc.end);
186 } 183 }
187 184
188 185
diff --git a/drivers/mtd/maps/esb2rom.c b/drivers/mtd/maps/esb2rom.c
index d12c93dc1aad..4feb7507ab7c 100644
--- a/drivers/mtd/maps/esb2rom.c
+++ b/drivers/mtd/maps/esb2rom.c
@@ -242,12 +242,9 @@ static int __devinit esb2rom_init_one(struct pci_dev *pdev,
242 window->rsrc.flags = IORESOURCE_MEM | IORESOURCE_BUSY; 242 window->rsrc.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
243 if (request_resource(&iomem_resource, &window->rsrc)) { 243 if (request_resource(&iomem_resource, &window->rsrc)) {
244 window->rsrc.parent = NULL; 244 window->rsrc.parent = NULL;
245 printk(KERN_DEBUG MOD_NAME 245 printk(KERN_DEBUG MOD_NAME ": "
246 ": %s(): Unable to register resource" 246 "%s(): Unable to register resource %pR - kernel bug?\n",
247 " 0x%.08llx-0x%.08llx - kernel bug?\n", 247 __func__, &window->rsrc);
248 __func__,
249 (unsigned long long)window->rsrc.start,
250 (unsigned long long)window->rsrc.end);
251 } 248 }
252 249
253 /* Map the firmware hub into my address space. */ 250 /* Map the firmware hub into my address space. */
diff --git a/drivers/mtd/maps/ichxrom.c b/drivers/mtd/maps/ichxrom.c
index f102bf243a74..1337a4191a0c 100644
--- a/drivers/mtd/maps/ichxrom.c
+++ b/drivers/mtd/maps/ichxrom.c
@@ -175,12 +175,9 @@ static int __devinit ichxrom_init_one (struct pci_dev *pdev,
175 window->rsrc.flags = IORESOURCE_MEM | IORESOURCE_BUSY; 175 window->rsrc.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
176 if (request_resource(&iomem_resource, &window->rsrc)) { 176 if (request_resource(&iomem_resource, &window->rsrc)) {
177 window->rsrc.parent = NULL; 177 window->rsrc.parent = NULL;
178 printk(KERN_DEBUG MOD_NAME 178 printk(KERN_DEBUG MOD_NAME ": "
179 ": %s(): Unable to register resource" 179 "%s(): Unable to register resource %pR - kernel bug?\n",
180 " 0x%.16llx-0x%.16llx - kernel bug?\n", 180 __func__, &window->rsrc);
181 __func__,
182 (unsigned long long)window->rsrc.start,
183 (unsigned long long)window->rsrc.end);
184 } 181 }
185 182
186 /* Map the firmware hub into my address space. */ 183 /* Map the firmware hub into my address space. */
diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c
index 9861814aa027..8506578e6a35 100644
--- a/drivers/mtd/maps/physmap_of.c
+++ b/drivers/mtd/maps/physmap_of.c
@@ -274,9 +274,7 @@ static int __devinit of_flash_probe(struct platform_device *dev,
274 continue; 274 continue;
275 } 275 }
276 276
277 dev_dbg(&dev->dev, "of_flash device: %.8llx-%.8llx\n", 277 dev_dbg(&dev->dev, "of_flash device: %pR\n", &res);
278 (unsigned long long)res.start,
279 (unsigned long long)res.end);
280 278
281 err = -EBUSY; 279 err = -EBUSY;
282 res_size = resource_size(&res); 280 res_size = resource_size(&res);
diff --git a/drivers/mtd/maps/scx200_docflash.c b/drivers/mtd/maps/scx200_docflash.c
index b5391ebb736e..027e628a4f1d 100644
--- a/drivers/mtd/maps/scx200_docflash.c
+++ b/drivers/mtd/maps/scx200_docflash.c
@@ -166,9 +166,8 @@ static int __init init_scx200_docflash(void)
166 outl(pmr, scx200_cb_base + SCx200_PMR); 166 outl(pmr, scx200_cb_base + SCx200_PMR);
167 } 167 }
168 168
169 printk(KERN_INFO NAME ": DOCCS mapped at 0x%llx-0x%llx, width %d\n", 169 printk(KERN_INFO NAME ": DOCCS mapped at %pR, width %d\n",
170 (unsigned long long)docmem.start, 170 &docmem, width);
171 (unsigned long long)docmem.end, width);
172 171
173 scx200_docflash_map.size = size; 172 scx200_docflash_map.size = size;
174 if (width == 8) 173 if (width == 8)
diff --git a/drivers/mtd/maps/tqm8xxl.c b/drivers/mtd/maps/tqm8xxl.c
index 60146984f4be..c08e140d40ed 100644
--- a/drivers/mtd/maps/tqm8xxl.c
+++ b/drivers/mtd/maps/tqm8xxl.c
@@ -139,7 +139,7 @@ static int __init init_tqm_mtd(void)
139 goto error_mem; 139 goto error_mem;
140 } 140 }
141 141
142 map_banks[idx]->name = (char *)kmalloc(16, GFP_KERNEL); 142 map_banks[idx]->name = kmalloc(16, GFP_KERNEL);
143 143
144 if (!map_banks[idx]->name) { 144 if (!map_banks[idx]->name) {
145 ret = -ENOMEM; 145 ret = -ENOMEM;
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index 98240575a18d..145b3d0dc0db 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -522,10 +522,6 @@ static int mtd_blkpg_ioctl(struct mtd_info *mtd,
522 if (!capable(CAP_SYS_ADMIN)) 522 if (!capable(CAP_SYS_ADMIN))
523 return -EPERM; 523 return -EPERM;
524 524
525 /* Only master mtd device must be used to control partitions */
526 if (!mtd_is_master(mtd))
527 return -EINVAL;
528
529 if (copy_from_user(&a, arg, sizeof(struct blkpg_ioctl_arg))) 525 if (copy_from_user(&a, arg, sizeof(struct blkpg_ioctl_arg)))
530 return -EFAULT; 526 return -EFAULT;
531 527
@@ -535,6 +531,10 @@ static int mtd_blkpg_ioctl(struct mtd_info *mtd,
535 switch (a.op) { 531 switch (a.op) {
536 case BLKPG_ADD_PARTITION: 532 case BLKPG_ADD_PARTITION:
537 533
534 /* Only master mtd device must be used to add partitions */
535 if (mtd_is_partition(mtd))
536 return -EINVAL;
537
538 return mtd_add_partition(mtd, p.devname, p.start, p.length); 538 return mtd_add_partition(mtd, p.devname, p.start, p.length);
539 539
540 case BLKPG_DEL_PARTITION: 540 case BLKPG_DEL_PARTITION:
@@ -601,6 +601,7 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
601 } 601 }
602 602
603 case MEMGETINFO: 603 case MEMGETINFO:
604 memset(&info, 0, sizeof(info));
604 info.type = mtd->type; 605 info.type = mtd->type;
605 info.flags = mtd->flags; 606 info.flags = mtd->flags;
606 info.size = mtd->size; 607 info.size = mtd->size;
@@ -609,7 +610,6 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
609 info.oobsize = mtd->oobsize; 610 info.oobsize = mtd->oobsize;
610 /* The below fields are obsolete */ 611 /* The below fields are obsolete */
611 info.ecctype = -1; 612 info.ecctype = -1;
612 info.eccsize = 0;
613 if (copy_to_user(argp, &info, sizeof(struct mtd_info_user))) 613 if (copy_to_user(argp, &info, sizeof(struct mtd_info_user)))
614 return -EFAULT; 614 return -EFAULT;
615 break; 615 break;
diff --git a/drivers/mtd/mtdconcat.c b/drivers/mtd/mtdconcat.c
index bf8de0943103..5f5777bd3f75 100644
--- a/drivers/mtd/mtdconcat.c
+++ b/drivers/mtd/mtdconcat.c
@@ -776,6 +776,7 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to c
776 concat->mtd.size = subdev[0]->size; 776 concat->mtd.size = subdev[0]->size;
777 concat->mtd.erasesize = subdev[0]->erasesize; 777 concat->mtd.erasesize = subdev[0]->erasesize;
778 concat->mtd.writesize = subdev[0]->writesize; 778 concat->mtd.writesize = subdev[0]->writesize;
779 concat->mtd.writebufsize = subdev[0]->writebufsize;
779 concat->mtd.subpage_sft = subdev[0]->subpage_sft; 780 concat->mtd.subpage_sft = subdev[0]->subpage_sft;
780 concat->mtd.oobsize = subdev[0]->oobsize; 781 concat->mtd.oobsize = subdev[0]->oobsize;
781 concat->mtd.oobavail = subdev[0]->oobavail; 782 concat->mtd.oobavail = subdev[0]->oobavail;
diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c
index c948150079be..e3e40f440323 100644
--- a/drivers/mtd/mtdoops.c
+++ b/drivers/mtd/mtdoops.c
@@ -401,7 +401,8 @@ static void mtdoops_notify_remove(struct mtd_info *mtd)
401 printk(KERN_WARNING "mtdoops: could not unregister kmsg_dumper\n"); 401 printk(KERN_WARNING "mtdoops: could not unregister kmsg_dumper\n");
402 402
403 cxt->mtd = NULL; 403 cxt->mtd = NULL;
404 flush_scheduled_work(); 404 flush_work_sync(&cxt->work_erase);
405 flush_work_sync(&cxt->work_write);
405} 406}
406 407
407 408
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index 79e3689f1e16..0a4760174782 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -120,8 +120,25 @@ static int part_read_oob(struct mtd_info *mtd, loff_t from,
120 return -EINVAL; 120 return -EINVAL;
121 if (ops->datbuf && from + ops->len > mtd->size) 121 if (ops->datbuf && from + ops->len > mtd->size)
122 return -EINVAL; 122 return -EINVAL;
123 res = part->master->read_oob(part->master, from + part->offset, ops);
124 123
124 /*
125 * If OOB is also requested, make sure that we do not read past the end
126 * of this partition.
127 */
128 if (ops->oobbuf) {
129 size_t len, pages;
130
131 if (ops->mode == MTD_OOB_AUTO)
132 len = mtd->oobavail;
133 else
134 len = mtd->oobsize;
135 pages = mtd_div_by_ws(mtd->size, mtd);
136 pages -= mtd_div_by_ws(from, mtd);
137 if (ops->ooboffs + ops->ooblen > pages * len)
138 return -EINVAL;
139 }
140
141 res = part->master->read_oob(part->master, from + part->offset, ops);
125 if (unlikely(res)) { 142 if (unlikely(res)) {
126 if (res == -EUCLEAN) 143 if (res == -EUCLEAN)
127 mtd->ecc_stats.corrected++; 144 mtd->ecc_stats.corrected++;
@@ -384,6 +401,7 @@ static struct mtd_part *allocate_partition(struct mtd_info *master,
384 slave->mtd.flags = master->flags & ~part->mask_flags; 401 slave->mtd.flags = master->flags & ~part->mask_flags;
385 slave->mtd.size = part->size; 402 slave->mtd.size = part->size;
386 slave->mtd.writesize = master->writesize; 403 slave->mtd.writesize = master->writesize;
404 slave->mtd.writebufsize = master->writebufsize;
387 slave->mtd.oobsize = master->oobsize; 405 slave->mtd.oobsize = master->oobsize;
388 slave->mtd.oobavail = master->oobavail; 406 slave->mtd.oobavail = master->oobavail;
389 slave->mtd.subpage_sft = master->subpage_sft; 407 slave->mtd.subpage_sft = master->subpage_sft;
@@ -720,19 +738,19 @@ int parse_mtd_partitions(struct mtd_info *master, const char **types,
720} 738}
721EXPORT_SYMBOL_GPL(parse_mtd_partitions); 739EXPORT_SYMBOL_GPL(parse_mtd_partitions);
722 740
723int mtd_is_master(struct mtd_info *mtd) 741int mtd_is_partition(struct mtd_info *mtd)
724{ 742{
725 struct mtd_part *part; 743 struct mtd_part *part;
726 int nopart = 0; 744 int ispart = 0;
727 745
728 mutex_lock(&mtd_partitions_mutex); 746 mutex_lock(&mtd_partitions_mutex);
729 list_for_each_entry(part, &mtd_partitions, list) 747 list_for_each_entry(part, &mtd_partitions, list)
730 if (&part->mtd == mtd) { 748 if (&part->mtd == mtd) {
731 nopart = 1; 749 ispart = 1;
732 break; 750 break;
733 } 751 }
734 mutex_unlock(&mtd_partitions_mutex); 752 mutex_unlock(&mtd_partitions_mutex);
735 753
736 return nopart; 754 return ispart;
737} 755}
738EXPORT_SYMBOL_GPL(mtd_is_master); 756EXPORT_SYMBOL_GPL(mtd_is_partition);
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 8229802b4346..c89592239bc7 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -96,6 +96,7 @@ config MTD_NAND_SPIA
96config MTD_NAND_AMS_DELTA 96config MTD_NAND_AMS_DELTA
97 tristate "NAND Flash device on Amstrad E3" 97 tristate "NAND Flash device on Amstrad E3"
98 depends on MACH_AMS_DELTA 98 depends on MACH_AMS_DELTA
99 default y
99 help 100 help
100 Support for NAND flash on Amstrad E3 (Delta). 101 Support for NAND flash on Amstrad E3 (Delta).
101 102
diff --git a/drivers/mtd/nand/ams-delta.c b/drivers/mtd/nand/ams-delta.c
index 2548e1065bf8..a067d090cb31 100644
--- a/drivers/mtd/nand/ams-delta.c
+++ b/drivers/mtd/nand/ams-delta.c
@@ -4,6 +4,8 @@
4 * Copyright (C) 2006 Jonathan McDowell <noodles@earth.li> 4 * Copyright (C) 2006 Jonathan McDowell <noodles@earth.li>
5 * 5 *
6 * Derived from drivers/mtd/toto.c 6 * Derived from drivers/mtd/toto.c
7 * Converted to platform driver by Janusz Krzysztofik <jkrzyszt@tis.icnet.pl>
8 * Partially stolen from drivers/mtd/nand/plat_nand.c
7 * 9 *
8 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as 11 * it under the terms of the GNU General Public License version 2 as
@@ -62,9 +64,10 @@ static struct mtd_partition partition_info[] = {
62static void ams_delta_write_byte(struct mtd_info *mtd, u_char byte) 64static void ams_delta_write_byte(struct mtd_info *mtd, u_char byte)
63{ 65{
64 struct nand_chip *this = mtd->priv; 66 struct nand_chip *this = mtd->priv;
67 void __iomem *io_base = this->priv;
65 68
66 omap_writew(0, (OMAP1_MPUIO_BASE + OMAP_MPUIO_IO_CNTL)); 69 writew(0, io_base + OMAP_MPUIO_IO_CNTL);
67 omap_writew(byte, this->IO_ADDR_W); 70 writew(byte, this->IO_ADDR_W);
68 ams_delta_latch2_write(AMS_DELTA_LATCH2_NAND_NWE, 0); 71 ams_delta_latch2_write(AMS_DELTA_LATCH2_NAND_NWE, 0);
69 ndelay(40); 72 ndelay(40);
70 ams_delta_latch2_write(AMS_DELTA_LATCH2_NAND_NWE, 73 ams_delta_latch2_write(AMS_DELTA_LATCH2_NAND_NWE,
@@ -75,11 +78,12 @@ static u_char ams_delta_read_byte(struct mtd_info *mtd)
75{ 78{
76 u_char res; 79 u_char res;
77 struct nand_chip *this = mtd->priv; 80 struct nand_chip *this = mtd->priv;
81 void __iomem *io_base = this->priv;
78 82
79 ams_delta_latch2_write(AMS_DELTA_LATCH2_NAND_NRE, 0); 83 ams_delta_latch2_write(AMS_DELTA_LATCH2_NAND_NRE, 0);
80 ndelay(40); 84 ndelay(40);
81 omap_writew(~0, (OMAP1_MPUIO_BASE + OMAP_MPUIO_IO_CNTL)); 85 writew(~0, io_base + OMAP_MPUIO_IO_CNTL);
82 res = omap_readw(this->IO_ADDR_R); 86 res = readw(this->IO_ADDR_R);
83 ams_delta_latch2_write(AMS_DELTA_LATCH2_NAND_NRE, 87 ams_delta_latch2_write(AMS_DELTA_LATCH2_NAND_NRE,
84 AMS_DELTA_LATCH2_NAND_NRE); 88 AMS_DELTA_LATCH2_NAND_NRE);
85 89
@@ -151,11 +155,16 @@ static int ams_delta_nand_ready(struct mtd_info *mtd)
151/* 155/*
152 * Main initialization routine 156 * Main initialization routine
153 */ 157 */
154static int __init ams_delta_init(void) 158static int __devinit ams_delta_init(struct platform_device *pdev)
155{ 159{
156 struct nand_chip *this; 160 struct nand_chip *this;
161 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
162 void __iomem *io_base;
157 int err = 0; 163 int err = 0;
158 164
165 if (!res)
166 return -ENXIO;
167
159 /* Allocate memory for MTD device structure and private data */ 168 /* Allocate memory for MTD device structure and private data */
160 ams_delta_mtd = kmalloc(sizeof(struct mtd_info) + 169 ams_delta_mtd = kmalloc(sizeof(struct mtd_info) +
161 sizeof(struct nand_chip), GFP_KERNEL); 170 sizeof(struct nand_chip), GFP_KERNEL);
@@ -177,9 +186,25 @@ static int __init ams_delta_init(void)
177 /* Link the private data with the MTD structure */ 186 /* Link the private data with the MTD structure */
178 ams_delta_mtd->priv = this; 187 ams_delta_mtd->priv = this;
179 188
189 if (!request_mem_region(res->start, resource_size(res),
190 dev_name(&pdev->dev))) {
191 dev_err(&pdev->dev, "request_mem_region failed\n");
192 err = -EBUSY;
193 goto out_free;
194 }
195
196 io_base = ioremap(res->start, resource_size(res));
197 if (io_base == NULL) {
198 dev_err(&pdev->dev, "ioremap failed\n");
199 err = -EIO;
200 goto out_release_io;
201 }
202
203 this->priv = io_base;
204
180 /* Set address of NAND IO lines */ 205 /* Set address of NAND IO lines */
181 this->IO_ADDR_R = (OMAP1_MPUIO_BASE + OMAP_MPUIO_INPUT_LATCH); 206 this->IO_ADDR_R = io_base + OMAP_MPUIO_INPUT_LATCH;
182 this->IO_ADDR_W = (OMAP1_MPUIO_BASE + OMAP_MPUIO_OUTPUT); 207 this->IO_ADDR_W = io_base + OMAP_MPUIO_OUTPUT;
183 this->read_byte = ams_delta_read_byte; 208 this->read_byte = ams_delta_read_byte;
184 this->write_buf = ams_delta_write_buf; 209 this->write_buf = ams_delta_write_buf;
185 this->read_buf = ams_delta_read_buf; 210 this->read_buf = ams_delta_read_buf;
@@ -195,6 +220,8 @@ static int __init ams_delta_init(void)
195 this->chip_delay = 30; 220 this->chip_delay = 30;
196 this->ecc.mode = NAND_ECC_SOFT; 221 this->ecc.mode = NAND_ECC_SOFT;
197 222
223 platform_set_drvdata(pdev, io_base);
224
198 /* Set chip enabled, but */ 225 /* Set chip enabled, but */
199 ams_delta_latch2_write(NAND_MASK, AMS_DELTA_LATCH2_NAND_NRE | 226 ams_delta_latch2_write(NAND_MASK, AMS_DELTA_LATCH2_NAND_NRE |
200 AMS_DELTA_LATCH2_NAND_NWE | 227 AMS_DELTA_LATCH2_NAND_NWE |
@@ -214,25 +241,56 @@ static int __init ams_delta_init(void)
214 goto out; 241 goto out;
215 242
216 out_mtd: 243 out_mtd:
244 platform_set_drvdata(pdev, NULL);
245 iounmap(io_base);
246out_release_io:
247 release_mem_region(res->start, resource_size(res));
248out_free:
217 kfree(ams_delta_mtd); 249 kfree(ams_delta_mtd);
218 out: 250 out:
219 return err; 251 return err;
220} 252}
221 253
222module_init(ams_delta_init);
223
224/* 254/*
225 * Clean up routine 255 * Clean up routine
226 */ 256 */
227static void __exit ams_delta_cleanup(void) 257static int __devexit ams_delta_cleanup(struct platform_device *pdev)
228{ 258{
259 void __iomem *io_base = platform_get_drvdata(pdev);
260 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
261
229 /* Release resources, unregister device */ 262 /* Release resources, unregister device */
230 nand_release(ams_delta_mtd); 263 nand_release(ams_delta_mtd);
231 264
265 iounmap(io_base);
266 release_mem_region(res->start, resource_size(res));
267
232 /* Free the MTD device structure */ 268 /* Free the MTD device structure */
233 kfree(ams_delta_mtd); 269 kfree(ams_delta_mtd);
270
271 return 0;
272}
273
274static struct platform_driver ams_delta_nand_driver = {
275 .probe = ams_delta_init,
276 .remove = __devexit_p(ams_delta_cleanup),
277 .driver = {
278 .name = "ams-delta-nand",
279 .owner = THIS_MODULE,
280 },
281};
282
283static int __init ams_delta_nand_init(void)
284{
285 return platform_driver_register(&ams_delta_nand_driver);
286}
287module_init(ams_delta_nand_init);
288
289static void __exit ams_delta_nand_exit(void)
290{
291 platform_driver_unregister(&ams_delta_nand_driver);
234} 292}
235module_exit(ams_delta_cleanup); 293module_exit(ams_delta_nand_exit);
236 294
237MODULE_LICENSE("GPL"); 295MODULE_LICENSE("GPL");
238MODULE_AUTHOR("Jonathan McDowell <noodles@earth.li>"); 296MODULE_AUTHOR("Jonathan McDowell <noodles@earth.li>");
diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c
index c141b07b25d1..7a13d42cbabd 100644
--- a/drivers/mtd/nand/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/fsl_elbc_nand.c
@@ -388,6 +388,8 @@ static void fsl_elbc_cmdfunc(struct mtd_info *mtd, unsigned int command,
388 "page_addr: 0x%x, column: 0x%x.\n", 388 "page_addr: 0x%x, column: 0x%x.\n",
389 page_addr, column); 389 page_addr, column);
390 390
391 elbc_fcm_ctrl->column = column;
392 elbc_fcm_ctrl->oob = 0;
391 elbc_fcm_ctrl->use_mdr = 1; 393 elbc_fcm_ctrl->use_mdr = 1;
392 394
393 fcr = (NAND_CMD_STATUS << FCR_CMD1_SHIFT) | 395 fcr = (NAND_CMD_STATUS << FCR_CMD1_SHIFT) |
diff --git a/drivers/mtd/nand/fsmc_nand.c b/drivers/mtd/nand/fsmc_nand.c
index 02edfba25b0c..205b10b9f9b9 100644
--- a/drivers/mtd/nand/fsmc_nand.c
+++ b/drivers/mtd/nand/fsmc_nand.c
@@ -31,6 +31,7 @@
31#include <linux/io.h> 31#include <linux/io.h>
32#include <linux/slab.h> 32#include <linux/slab.h>
33#include <linux/mtd/fsmc.h> 33#include <linux/mtd/fsmc.h>
34#include <linux/amba/bus.h>
34#include <mtd/mtd-abi.h> 35#include <mtd/mtd-abi.h>
35 36
36static struct nand_ecclayout fsmc_ecc1_layout = { 37static struct nand_ecclayout fsmc_ecc1_layout = {
@@ -119,21 +120,36 @@ static struct fsmc_eccplace fsmc_ecc4_sp_place = {
119 } 120 }
120}; 121};
121 122
122/*
123 * Default partition tables to be used if the partition information not
124 * provided through platform data
125 */
126#define PARTITION(n, off, sz) {.name = n, .offset = off, .size = sz}
127 123
124#ifdef CONFIG_MTD_PARTITIONS
128/* 125/*
126 * Default partition tables to be used if the partition information not
127 * provided through platform data.
128 *
129 * Default partition layout for small page(= 512 bytes) devices 129 * Default partition layout for small page(= 512 bytes) devices
130 * Size for "Root file system" is updated in driver based on actual device size 130 * Size for "Root file system" is updated in driver based on actual device size
131 */ 131 */
132static struct mtd_partition partition_info_16KB_blk[] = { 132static struct mtd_partition partition_info_16KB_blk[] = {
133 PARTITION("X-loader", 0, 4 * 0x4000), 133 {
134 PARTITION("U-Boot", 0x10000, 20 * 0x4000), 134 .name = "X-loader",
135 PARTITION("Kernel", 0x60000, 256 * 0x4000), 135 .offset = 0,
136 PARTITION("Root File System", 0x460000, 0), 136 .size = 4*0x4000,
137 },
138 {
139 .name = "U-Boot",
140 .offset = 0x10000,
141 .size = 20*0x4000,
142 },
143 {
144 .name = "Kernel",
145 .offset = 0x60000,
146 .size = 256*0x4000,
147 },
148 {
149 .name = "Root File System",
150 .offset = 0x460000,
151 .size = 0,
152 },
137}; 153};
138 154
139/* 155/*
@@ -141,19 +157,37 @@ static struct mtd_partition partition_info_16KB_blk[] = {
141 * Size for "Root file system" is updated in driver based on actual device size 157 * Size for "Root file system" is updated in driver based on actual device size
142 */ 158 */
143static struct mtd_partition partition_info_128KB_blk[] = { 159static struct mtd_partition partition_info_128KB_blk[] = {
144 PARTITION("X-loader", 0, 4 * 0x20000), 160 {
145 PARTITION("U-Boot", 0x80000, 12 * 0x20000), 161 .name = "X-loader",
146 PARTITION("Kernel", 0x200000, 48 * 0x20000), 162 .offset = 0,
147 PARTITION("Root File System", 0x800000, 0), 163 .size = 4*0x20000,
164 },
165 {
166 .name = "U-Boot",
167 .offset = 0x80000,
168 .size = 12*0x20000,
169 },
170 {
171 .name = "Kernel",
172 .offset = 0x200000,
173 .size = 48*0x20000,
174 },
175 {
176 .name = "Root File System",
177 .offset = 0x800000,
178 .size = 0,
179 },
148}; 180};
149 181
150#ifdef CONFIG_MTD_CMDLINE_PARTS 182#ifdef CONFIG_MTD_CMDLINE_PARTS
151const char *part_probes[] = { "cmdlinepart", NULL }; 183const char *part_probes[] = { "cmdlinepart", NULL };
152#endif 184#endif
185#endif
153 186
154/** 187/**
155 * struct fsmc_nand_data - atructure for FSMC NAND device state 188 * struct fsmc_nand_data - structure for FSMC NAND device state
156 * 189 *
190 * @pid: Part ID on the AMBA PrimeCell format
157 * @mtd: MTD info for a NAND flash. 191 * @mtd: MTD info for a NAND flash.
158 * @nand: Chip related info for a NAND flash. 192 * @nand: Chip related info for a NAND flash.
159 * @partitions: Partition info for a NAND Flash. 193 * @partitions: Partition info for a NAND Flash.
@@ -169,6 +203,7 @@ const char *part_probes[] = { "cmdlinepart", NULL };
169 * @regs_va: FSMC regs base address. 203 * @regs_va: FSMC regs base address.
170 */ 204 */
171struct fsmc_nand_data { 205struct fsmc_nand_data {
206 u32 pid;
172 struct mtd_info mtd; 207 struct mtd_info mtd;
173 struct nand_chip nand; 208 struct nand_chip nand;
174 struct mtd_partition *partitions; 209 struct mtd_partition *partitions;
@@ -508,7 +543,9 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
508 struct nand_chip *nand; 543 struct nand_chip *nand;
509 struct fsmc_regs *regs; 544 struct fsmc_regs *regs;
510 struct resource *res; 545 struct resource *res;
511 int nr_parts, ret = 0; 546 int ret = 0;
547 u32 pid;
548 int i;
512 549
513 if (!pdata) { 550 if (!pdata) {
514 dev_err(&pdev->dev, "platform data is NULL\n"); 551 dev_err(&pdev->dev, "platform data is NULL\n");
@@ -598,6 +635,18 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
598 if (ret) 635 if (ret)
599 goto err_probe1; 636 goto err_probe1;
600 637
638 /*
639 * This device ID is actually a common AMBA ID as used on the
640 * AMBA PrimeCell bus. However it is not a PrimeCell.
641 */
642 for (pid = 0, i = 0; i < 4; i++)
643 pid |= (readl(host->regs_va + resource_size(res) - 0x20 + 4 * i) & 255) << (i * 8);
644 host->pid = pid;
645 dev_info(&pdev->dev, "FSMC device partno %03x, manufacturer %02x, "
646 "revision %02x, config %02x\n",
647 AMBA_PART_BITS(pid), AMBA_MANF_BITS(pid),
648 AMBA_REV_BITS(pid), AMBA_CONFIG_BITS(pid));
649
601 host->bank = pdata->bank; 650 host->bank = pdata->bank;
602 host->select_chip = pdata->select_bank; 651 host->select_chip = pdata->select_bank;
603 regs = host->regs_va; 652 regs = host->regs_va;
@@ -625,7 +674,7 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
625 674
626 fsmc_nand_setup(regs, host->bank, nand->options & NAND_BUSWIDTH_16); 675 fsmc_nand_setup(regs, host->bank, nand->options & NAND_BUSWIDTH_16);
627 676
628 if (get_fsmc_version(host->regs_va) == FSMC_VER8) { 677 if (AMBA_REV_BITS(host->pid) >= 8) {
629 nand->ecc.read_page = fsmc_read_page_hwecc; 678 nand->ecc.read_page = fsmc_read_page_hwecc;
630 nand->ecc.calculate = fsmc_read_hwecc_ecc4; 679 nand->ecc.calculate = fsmc_read_hwecc_ecc4;
631 nand->ecc.correct = fsmc_correct_data; 680 nand->ecc.correct = fsmc_correct_data;
@@ -645,7 +694,7 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
645 goto err_probe; 694 goto err_probe;
646 } 695 }
647 696
648 if (get_fsmc_version(host->regs_va) == FSMC_VER8) { 697 if (AMBA_REV_BITS(host->pid) >= 8) {
649 if (host->mtd.writesize == 512) { 698 if (host->mtd.writesize == 512) {
650 nand->ecc.layout = &fsmc_ecc4_sp_layout; 699 nand->ecc.layout = &fsmc_ecc4_sp_layout;
651 host->ecc_place = &fsmc_ecc4_sp_place; 700 host->ecc_place = &fsmc_ecc4_sp_place;
@@ -676,11 +725,9 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
676 * Check if partition info passed via command line 725 * Check if partition info passed via command line
677 */ 726 */
678 host->mtd.name = "nand"; 727 host->mtd.name = "nand";
679 nr_parts = parse_mtd_partitions(&host->mtd, part_probes, 728 host->nr_partitions = parse_mtd_partitions(&host->mtd, part_probes,
680 &host->partitions, 0); 729 &host->partitions, 0);
681 if (nr_parts > 0) { 730 if (host->nr_partitions <= 0) {
682 host->nr_partitions = nr_parts;
683 } else {
684#endif 731#endif
685 /* 732 /*
686 * Check if partition info passed via command line 733 * Check if partition info passed via command line
diff --git a/drivers/mtd/nand/jz4740_nand.c b/drivers/mtd/nand/jz4740_nand.c
index 67343fc31bd5..cea38a5d4ac5 100644
--- a/drivers/mtd/nand/jz4740_nand.c
+++ b/drivers/mtd/nand/jz4740_nand.c
@@ -251,58 +251,6 @@ static int jz_nand_correct_ecc_rs(struct mtd_info *mtd, uint8_t *dat,
251 return 0; 251 return 0;
252} 252}
253 253
254
255/* Copy paste of nand_read_page_hwecc_oob_first except for different eccpos
256 * handling. The ecc area is for 4k chips 72 bytes long and thus does not fit
257 * into the eccpos array. */
258static int jz_nand_read_page_hwecc_oob_first(struct mtd_info *mtd,
259 struct nand_chip *chip, uint8_t *buf, int page)
260{
261 int i, eccsize = chip->ecc.size;
262 int eccbytes = chip->ecc.bytes;
263 int eccsteps = chip->ecc.steps;
264 uint8_t *p = buf;
265 unsigned int ecc_offset = chip->page_shift;
266
267 /* Read the OOB area first */
268 chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
269 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
270 chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
271
272 for (i = ecc_offset; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
273 int stat;
274
275 chip->ecc.hwctl(mtd, NAND_ECC_READ);
276 chip->read_buf(mtd, p, eccsize);
277
278 stat = chip->ecc.correct(mtd, p, &chip->oob_poi[i], NULL);
279 if (stat < 0)
280 mtd->ecc_stats.failed++;
281 else
282 mtd->ecc_stats.corrected += stat;
283 }
284 return 0;
285}
286
287/* Copy-and-paste of nand_write_page_hwecc with different eccpos handling. */
288static void jz_nand_write_page_hwecc(struct mtd_info *mtd,
289 struct nand_chip *chip, const uint8_t *buf)
290{
291 int i, eccsize = chip->ecc.size;
292 int eccbytes = chip->ecc.bytes;
293 int eccsteps = chip->ecc.steps;
294 const uint8_t *p = buf;
295 unsigned int ecc_offset = chip->page_shift;
296
297 for (i = ecc_offset; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
298 chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
299 chip->write_buf(mtd, p, eccsize);
300 chip->ecc.calculate(mtd, p, &chip->oob_poi[i]);
301 }
302
303 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
304}
305
306#ifdef CONFIG_MTD_CMDLINE_PARTS 254#ifdef CONFIG_MTD_CMDLINE_PARTS
307static const char *part_probes[] = {"cmdline", NULL}; 255static const char *part_probes[] = {"cmdline", NULL};
308#endif 256#endif
@@ -393,9 +341,6 @@ static int __devinit jz_nand_probe(struct platform_device *pdev)
393 chip->ecc.size = 512; 341 chip->ecc.size = 512;
394 chip->ecc.bytes = 9; 342 chip->ecc.bytes = 9;
395 343
396 chip->ecc.read_page = jz_nand_read_page_hwecc_oob_first;
397 chip->ecc.write_page = jz_nand_write_page_hwecc;
398
399 if (pdata) 344 if (pdata)
400 chip->ecc.layout = pdata->ecc_layout; 345 chip->ecc.layout = pdata->ecc_layout;
401 346
@@ -489,7 +434,7 @@ static int __devexit jz_nand_remove(struct platform_device *pdev)
489 return 0; 434 return 0;
490} 435}
491 436
492struct platform_driver jz_nand_driver = { 437static struct platform_driver jz_nand_driver = {
493 .probe = jz_nand_probe, 438 .probe = jz_nand_probe,
494 .remove = __devexit_p(jz_nand_remove), 439 .remove = __devexit_p(jz_nand_remove),
495 .driver = { 440 .driver = {
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index 214b03afdd48..ef932ba55a0b 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -1009,7 +1009,7 @@ static int __init mxcnd_probe(struct platform_device *pdev)
1009 struct mxc_nand_platform_data *pdata = pdev->dev.platform_data; 1009 struct mxc_nand_platform_data *pdata = pdev->dev.platform_data;
1010 struct mxc_nand_host *host; 1010 struct mxc_nand_host *host;
1011 struct resource *res; 1011 struct resource *res;
1012 int err = 0, nr_parts = 0; 1012 int err = 0, __maybe_unused nr_parts = 0;
1013 struct nand_ecclayout *oob_smallpage, *oob_largepage; 1013 struct nand_ecclayout *oob_smallpage, *oob_largepage;
1014 1014
1015 /* Allocate memory for MTD device structure and private data */ 1015 /* Allocate memory for MTD device structure and private data */
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 31bf376b82a0..a9c6ce745767 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -2865,20 +2865,24 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
2865 2865
2866 /* check version */ 2866 /* check version */
2867 val = le16_to_cpu(p->revision); 2867 val = le16_to_cpu(p->revision);
2868 if (val == 1 || val > (1 << 4)) { 2868 if (val & (1 << 5))
2869 printk(KERN_INFO "%s: unsupported ONFI version: %d\n", 2869 chip->onfi_version = 23;
2870 __func__, val); 2870 else if (val & (1 << 4))
2871 return 0;
2872 }
2873
2874 if (val & (1 << 4))
2875 chip->onfi_version = 22; 2871 chip->onfi_version = 22;
2876 else if (val & (1 << 3)) 2872 else if (val & (1 << 3))
2877 chip->onfi_version = 21; 2873 chip->onfi_version = 21;
2878 else if (val & (1 << 2)) 2874 else if (val & (1 << 2))
2879 chip->onfi_version = 20; 2875 chip->onfi_version = 20;
2880 else 2876 else if (val & (1 << 1))
2881 chip->onfi_version = 10; 2877 chip->onfi_version = 10;
2878 else
2879 chip->onfi_version = 0;
2880
2881 if (!chip->onfi_version) {
2882 printk(KERN_INFO "%s: unsupported ONFI version: %d\n",
2883 __func__, val);
2884 return 0;
2885 }
2882 2886
2883 sanitize_string(p->manufacturer, sizeof(p->manufacturer)); 2887 sanitize_string(p->manufacturer, sizeof(p->manufacturer));
2884 sanitize_string(p->model, sizeof(p->model)); 2888 sanitize_string(p->model, sizeof(p->model));
@@ -2887,7 +2891,7 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
2887 mtd->writesize = le32_to_cpu(p->byte_per_page); 2891 mtd->writesize = le32_to_cpu(p->byte_per_page);
2888 mtd->erasesize = le32_to_cpu(p->pages_per_block) * mtd->writesize; 2892 mtd->erasesize = le32_to_cpu(p->pages_per_block) * mtd->writesize;
2889 mtd->oobsize = le16_to_cpu(p->spare_bytes_per_page); 2893 mtd->oobsize = le16_to_cpu(p->spare_bytes_per_page);
2890 chip->chipsize = le32_to_cpu(p->blocks_per_lun) * mtd->erasesize; 2894 chip->chipsize = (uint64_t)le32_to_cpu(p->blocks_per_lun) * mtd->erasesize;
2891 busw = 0; 2895 busw = 0;
2892 if (le16_to_cpu(p->features) & 1) 2896 if (le16_to_cpu(p->features) & 1)
2893 busw = NAND_BUSWIDTH_16; 2897 busw = NAND_BUSWIDTH_16;
@@ -3157,7 +3161,7 @@ ident_done:
3157 printk(KERN_INFO "NAND device: Manufacturer ID:" 3161 printk(KERN_INFO "NAND device: Manufacturer ID:"
3158 " 0x%02x, Chip ID: 0x%02x (%s %s)\n", *maf_id, *dev_id, 3162 " 0x%02x, Chip ID: 0x%02x (%s %s)\n", *maf_id, *dev_id,
3159 nand_manuf_ids[maf_idx].name, 3163 nand_manuf_ids[maf_idx].name,
3160 chip->onfi_version ? type->name : chip->onfi_params.model); 3164 chip->onfi_version ? chip->onfi_params.model : type->name);
3161 3165
3162 return type; 3166 return type;
3163} 3167}
@@ -3435,6 +3439,7 @@ int nand_scan_tail(struct mtd_info *mtd)
3435 mtd->resume = nand_resume; 3439 mtd->resume = nand_resume;
3436 mtd->block_isbad = nand_block_isbad; 3440 mtd->block_isbad = nand_block_isbad;
3437 mtd->block_markbad = nand_block_markbad; 3441 mtd->block_markbad = nand_block_markbad;
3442 mtd->writebufsize = mtd->writesize;
3438 3443
3439 /* propagate ecc.layout to mtd_info */ 3444 /* propagate ecc.layout to mtd_info */
3440 mtd->ecclayout = chip->ecc.layout; 3445 mtd->ecclayout = chip->ecc.layout;
diff --git a/drivers/mtd/nand/nand_bbt.c b/drivers/mtd/nand/nand_bbt.c
index 586b981f0e61..6ebd869993aa 100644
--- a/drivers/mtd/nand/nand_bbt.c
+++ b/drivers/mtd/nand/nand_bbt.c
@@ -1092,7 +1092,8 @@ static void mark_bbt_region(struct mtd_info *mtd, struct nand_bbt_descr *td)
1092 1092
1093/** 1093/**
1094 * verify_bbt_descr - verify the bad block description 1094 * verify_bbt_descr - verify the bad block description
1095 * @bd: the table to verify 1095 * @mtd: MTD device structure
1096 * @bd: the table to verify
1096 * 1097 *
1097 * This functions performs a few sanity checks on the bad block description 1098 * This functions performs a few sanity checks on the bad block description
1098 * table. 1099 * table.
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
index a6a73aab1253..a5aa99f014ba 100644
--- a/drivers/mtd/nand/nandsim.c
+++ b/drivers/mtd/nand/nandsim.c
@@ -210,12 +210,12 @@ MODULE_PARM_DESC(bbt, "0 OOB, 1 BBT with marker in OOB, 2 BBT with marker in d
210#define STATE_CMD_READ0 0x00000001 /* read data from the beginning of page */ 210#define STATE_CMD_READ0 0x00000001 /* read data from the beginning of page */
211#define STATE_CMD_READ1 0x00000002 /* read data from the second half of page */ 211#define STATE_CMD_READ1 0x00000002 /* read data from the second half of page */
212#define STATE_CMD_READSTART 0x00000003 /* read data second command (large page devices) */ 212#define STATE_CMD_READSTART 0x00000003 /* read data second command (large page devices) */
213#define STATE_CMD_PAGEPROG 0x00000004 /* start page programm */ 213#define STATE_CMD_PAGEPROG 0x00000004 /* start page program */
214#define STATE_CMD_READOOB 0x00000005 /* read OOB area */ 214#define STATE_CMD_READOOB 0x00000005 /* read OOB area */
215#define STATE_CMD_ERASE1 0x00000006 /* sector erase first command */ 215#define STATE_CMD_ERASE1 0x00000006 /* sector erase first command */
216#define STATE_CMD_STATUS 0x00000007 /* read status */ 216#define STATE_CMD_STATUS 0x00000007 /* read status */
217#define STATE_CMD_STATUS_M 0x00000008 /* read multi-plane status (isn't implemented) */ 217#define STATE_CMD_STATUS_M 0x00000008 /* read multi-plane status (isn't implemented) */
218#define STATE_CMD_SEQIN 0x00000009 /* sequential data imput */ 218#define STATE_CMD_SEQIN 0x00000009 /* sequential data input */
219#define STATE_CMD_READID 0x0000000A /* read ID */ 219#define STATE_CMD_READID 0x0000000A /* read ID */
220#define STATE_CMD_ERASE2 0x0000000B /* sector erase second command */ 220#define STATE_CMD_ERASE2 0x0000000B /* sector erase second command */
221#define STATE_CMD_RESET 0x0000000C /* reset */ 221#define STATE_CMD_RESET 0x0000000C /* reset */
@@ -230,7 +230,7 @@ MODULE_PARM_DESC(bbt, "0 OOB, 1 BBT with marker in OOB, 2 BBT with marker in d
230#define STATE_ADDR_ZERO 0x00000040 /* one byte zero address was accepted */ 230#define STATE_ADDR_ZERO 0x00000040 /* one byte zero address was accepted */
231#define STATE_ADDR_MASK 0x00000070 /* address states mask */ 231#define STATE_ADDR_MASK 0x00000070 /* address states mask */
232 232
233/* Durind data input/output the simulator is in these states */ 233/* During data input/output the simulator is in these states */
234#define STATE_DATAIN 0x00000100 /* waiting for data input */ 234#define STATE_DATAIN 0x00000100 /* waiting for data input */
235#define STATE_DATAIN_MASK 0x00000100 /* data input states mask */ 235#define STATE_DATAIN_MASK 0x00000100 /* data input states mask */
236 236
@@ -248,7 +248,7 @@ MODULE_PARM_DESC(bbt, "0 OOB, 1 BBT with marker in OOB, 2 BBT with marker in d
248 248
249/* Simulator's actions bit masks */ 249/* Simulator's actions bit masks */
250#define ACTION_CPY 0x00100000 /* copy page/OOB to the internal buffer */ 250#define ACTION_CPY 0x00100000 /* copy page/OOB to the internal buffer */
251#define ACTION_PRGPAGE 0x00200000 /* programm the internal buffer to flash */ 251#define ACTION_PRGPAGE 0x00200000 /* program the internal buffer to flash */
252#define ACTION_SECERASE 0x00300000 /* erase sector */ 252#define ACTION_SECERASE 0x00300000 /* erase sector */
253#define ACTION_ZEROOFF 0x00400000 /* don't add any offset to address */ 253#define ACTION_ZEROOFF 0x00400000 /* don't add any offset to address */
254#define ACTION_HALFOFF 0x00500000 /* add to address half of page */ 254#define ACTION_HALFOFF 0x00500000 /* add to address half of page */
@@ -263,18 +263,18 @@ MODULE_PARM_DESC(bbt, "0 OOB, 1 BBT with marker in OOB, 2 BBT with marker in d
263#define OPT_PAGE512 0x00000002 /* 512-byte page chips */ 263#define OPT_PAGE512 0x00000002 /* 512-byte page chips */
264#define OPT_PAGE2048 0x00000008 /* 2048-byte page chips */ 264#define OPT_PAGE2048 0x00000008 /* 2048-byte page chips */
265#define OPT_SMARTMEDIA 0x00000010 /* SmartMedia technology chips */ 265#define OPT_SMARTMEDIA 0x00000010 /* SmartMedia technology chips */
266#define OPT_AUTOINCR 0x00000020 /* page number auto inctimentation is possible */ 266#define OPT_AUTOINCR 0x00000020 /* page number auto incrementation is possible */
267#define OPT_PAGE512_8BIT 0x00000040 /* 512-byte page chips with 8-bit bus width */ 267#define OPT_PAGE512_8BIT 0x00000040 /* 512-byte page chips with 8-bit bus width */
268#define OPT_PAGE4096 0x00000080 /* 4096-byte page chips */ 268#define OPT_PAGE4096 0x00000080 /* 4096-byte page chips */
269#define OPT_LARGEPAGE (OPT_PAGE2048 | OPT_PAGE4096) /* 2048 & 4096-byte page chips */ 269#define OPT_LARGEPAGE (OPT_PAGE2048 | OPT_PAGE4096) /* 2048 & 4096-byte page chips */
270#define OPT_SMALLPAGE (OPT_PAGE256 | OPT_PAGE512) /* 256 and 512-byte page chips */ 270#define OPT_SMALLPAGE (OPT_PAGE256 | OPT_PAGE512) /* 256 and 512-byte page chips */
271 271
272/* Remove action bits ftom state */ 272/* Remove action bits from state */
273#define NS_STATE(x) ((x) & ~ACTION_MASK) 273#define NS_STATE(x) ((x) & ~ACTION_MASK)
274 274
275/* 275/*
276 * Maximum previous states which need to be saved. Currently saving is 276 * Maximum previous states which need to be saved. Currently saving is
277 * only needed for page programm operation with preceeded read command 277 * only needed for page program operation with preceded read command
278 * (which is only valid for 512-byte pages). 278 * (which is only valid for 512-byte pages).
279 */ 279 */
280#define NS_MAX_PREVSTATES 1 280#define NS_MAX_PREVSTATES 1
@@ -380,16 +380,16 @@ static struct nandsim_operations {
380 /* Read OOB */ 380 /* Read OOB */
381 {OPT_SMALLPAGE, {STATE_CMD_READOOB | ACTION_OOBOFF, STATE_ADDR_PAGE | ACTION_CPY, 381 {OPT_SMALLPAGE, {STATE_CMD_READOOB | ACTION_OOBOFF, STATE_ADDR_PAGE | ACTION_CPY,
382 STATE_DATAOUT, STATE_READY}}, 382 STATE_DATAOUT, STATE_READY}},
383 /* Programm page starting from the beginning */ 383 /* Program page starting from the beginning */
384 {OPT_ANY, {STATE_CMD_SEQIN, STATE_ADDR_PAGE, STATE_DATAIN, 384 {OPT_ANY, {STATE_CMD_SEQIN, STATE_ADDR_PAGE, STATE_DATAIN,
385 STATE_CMD_PAGEPROG | ACTION_PRGPAGE, STATE_READY}}, 385 STATE_CMD_PAGEPROG | ACTION_PRGPAGE, STATE_READY}},
386 /* Programm page starting from the beginning */ 386 /* Program page starting from the beginning */
387 {OPT_SMALLPAGE, {STATE_CMD_READ0, STATE_CMD_SEQIN | ACTION_ZEROOFF, STATE_ADDR_PAGE, 387 {OPT_SMALLPAGE, {STATE_CMD_READ0, STATE_CMD_SEQIN | ACTION_ZEROOFF, STATE_ADDR_PAGE,
388 STATE_DATAIN, STATE_CMD_PAGEPROG | ACTION_PRGPAGE, STATE_READY}}, 388 STATE_DATAIN, STATE_CMD_PAGEPROG | ACTION_PRGPAGE, STATE_READY}},
389 /* Programm page starting from the second half */ 389 /* Program page starting from the second half */
390 {OPT_PAGE512, {STATE_CMD_READ1, STATE_CMD_SEQIN | ACTION_HALFOFF, STATE_ADDR_PAGE, 390 {OPT_PAGE512, {STATE_CMD_READ1, STATE_CMD_SEQIN | ACTION_HALFOFF, STATE_ADDR_PAGE,
391 STATE_DATAIN, STATE_CMD_PAGEPROG | ACTION_PRGPAGE, STATE_READY}}, 391 STATE_DATAIN, STATE_CMD_PAGEPROG | ACTION_PRGPAGE, STATE_READY}},
392 /* Programm OOB */ 392 /* Program OOB */
393 {OPT_SMALLPAGE, {STATE_CMD_READOOB, STATE_CMD_SEQIN | ACTION_OOBOFF, STATE_ADDR_PAGE, 393 {OPT_SMALLPAGE, {STATE_CMD_READOOB, STATE_CMD_SEQIN | ACTION_OOBOFF, STATE_ADDR_PAGE,
394 STATE_DATAIN, STATE_CMD_PAGEPROG | ACTION_PRGPAGE, STATE_READY}}, 394 STATE_DATAIN, STATE_CMD_PAGEPROG | ACTION_PRGPAGE, STATE_READY}},
395 /* Erase sector */ 395 /* Erase sector */
@@ -470,7 +470,7 @@ static int alloc_device(struct nandsim *ns)
470 err = -EINVAL; 470 err = -EINVAL;
471 goto err_close; 471 goto err_close;
472 } 472 }
473 ns->pages_written = vmalloc(ns->geom.pgnum); 473 ns->pages_written = vzalloc(ns->geom.pgnum);
474 if (!ns->pages_written) { 474 if (!ns->pages_written) {
475 NS_ERR("alloc_device: unable to allocate pages written array\n"); 475 NS_ERR("alloc_device: unable to allocate pages written array\n");
476 err = -ENOMEM; 476 err = -ENOMEM;
@@ -483,7 +483,6 @@ static int alloc_device(struct nandsim *ns)
483 goto err_free; 483 goto err_free;
484 } 484 }
485 ns->cfile = cfile; 485 ns->cfile = cfile;
486 memset(ns->pages_written, 0, ns->geom.pgnum);
487 return 0; 486 return 0;
488 } 487 }
489 488
@@ -1171,9 +1170,9 @@ static inline void switch_to_ready_state(struct nandsim *ns, u_char status)
1171 * of supported operations. 1170 * of supported operations.
1172 * 1171 *
1173 * Operation can be unknown because of the following. 1172 * Operation can be unknown because of the following.
1174 * 1. New command was accepted and this is the firs call to find the 1173 * 1. New command was accepted and this is the first call to find the
1175 * correspondent states chain. In this case ns->npstates = 0; 1174 * correspondent states chain. In this case ns->npstates = 0;
1176 * 2. There is several operations which begin with the same command(s) 1175 * 2. There are several operations which begin with the same command(s)
1177 * (for example program from the second half and read from the 1176 * (for example program from the second half and read from the
1178 * second half operations both begin with the READ1 command). In this 1177 * second half operations both begin with the READ1 command). In this
1179 * case the ns->pstates[] array contains previous states. 1178 * case the ns->pstates[] array contains previous states.
@@ -1186,7 +1185,7 @@ static inline void switch_to_ready_state(struct nandsim *ns, u_char status)
1186 * ns->ops, ns->state, ns->nxstate are initialized, ns->npstate is 1185 * ns->ops, ns->state, ns->nxstate are initialized, ns->npstate is
1187 * zeroed). 1186 * zeroed).
1188 * 1187 *
1189 * If there are several maches, the current state is pushed to the 1188 * If there are several matches, the current state is pushed to the
1190 * ns->pstates. 1189 * ns->pstates.
1191 * 1190 *
1192 * The operation can be unknown only while commands are input to the chip. 1191 * The operation can be unknown only while commands are input to the chip.
@@ -1195,10 +1194,10 @@ static inline void switch_to_ready_state(struct nandsim *ns, u_char status)
1195 * operation is searched using the following pattern: 1194 * operation is searched using the following pattern:
1196 * ns->pstates[0], ... ns->pstates[ns->npstates], <address input> 1195 * ns->pstates[0], ... ns->pstates[ns->npstates], <address input>
1197 * 1196 *
1198 * It is supposed that this pattern must either match one operation on 1197 * It is supposed that this pattern must either match one operation or
1199 * none. There can't be ambiguity in that case. 1198 * none. There can't be ambiguity in that case.
1200 * 1199 *
1201 * If no matches found, the functions does the following: 1200 * If no matches found, the function does the following:
1202 * 1. if there are saved states present, try to ignore them and search 1201 * 1. if there are saved states present, try to ignore them and search
1203 * again only using the last command. If nothing was found, switch 1202 * again only using the last command. If nothing was found, switch
1204 * to the STATE_READY state. 1203 * to the STATE_READY state.
@@ -1668,7 +1667,7 @@ static int do_state_action(struct nandsim *ns, uint32_t action)
1668 1667
1669 case ACTION_PRGPAGE: 1668 case ACTION_PRGPAGE:
1670 /* 1669 /*
1671 * Programm page - move internal buffer data to the page. 1670 * Program page - move internal buffer data to the page.
1672 */ 1671 */
1673 1672
1674 if (ns->lines.wp) { 1673 if (ns->lines.wp) {
@@ -1933,7 +1932,7 @@ static u_char ns_nand_read_byte(struct mtd_info *mtd)
1933 NS_DBG("read_byte: all bytes were read\n"); 1932 NS_DBG("read_byte: all bytes were read\n");
1934 1933
1935 /* 1934 /*
1936 * The OPT_AUTOINCR allows to read next conseqitive pages without 1935 * The OPT_AUTOINCR allows to read next consecutive pages without
1937 * new read operation cycle. 1936 * new read operation cycle.
1938 */ 1937 */
1939 if ((ns->options & OPT_AUTOINCR) && NS_STATE(ns->state) == STATE_DATAOUT) { 1938 if ((ns->options & OPT_AUTOINCR) && NS_STATE(ns->state) == STATE_DATAOUT) {
diff --git a/drivers/mtd/nand/pasemi_nand.c b/drivers/mtd/nand/pasemi_nand.c
index 6ddb2461d740..bb277a54986f 100644
--- a/drivers/mtd/nand/pasemi_nand.c
+++ b/drivers/mtd/nand/pasemi_nand.c
@@ -107,7 +107,7 @@ static int __devinit pasemi_nand_probe(struct platform_device *ofdev,
107 if (pasemi_nand_mtd) 107 if (pasemi_nand_mtd)
108 return -ENODEV; 108 return -ENODEV;
109 109
110 pr_debug("pasemi_nand at %llx-%llx\n", res.start, res.end); 110 pr_debug("pasemi_nand at %pR\n", &res);
111 111
112 /* Allocate memory for MTD device structure and private data */ 112 /* Allocate memory for MTD device structure and private data */
113 pasemi_nand_mtd = kzalloc(sizeof(struct mtd_info) + 113 pasemi_nand_mtd = kzalloc(sizeof(struct mtd_info) +
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index 17f8518cc5eb..ea2c288df3f6 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -885,6 +885,7 @@ static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
885 /* set info fields needed to __readid */ 885 /* set info fields needed to __readid */
886 info->read_id_bytes = (info->page_size == 2048) ? 4 : 2; 886 info->read_id_bytes = (info->page_size == 2048) ? 4 : 2;
887 info->reg_ndcr = ndcr; 887 info->reg_ndcr = ndcr;
888 info->cmdset = &default_cmdset;
888 889
889 if (__readid(info, &id)) 890 if (__readid(info, &id))
890 return -ENODEV; 891 return -ENODEV;
@@ -915,7 +916,6 @@ static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
915 916
916 info->ndtr0cs0 = nand_readl(info, NDTR0CS0); 917 info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
917 info->ndtr1cs0 = nand_readl(info, NDTR1CS0); 918 info->ndtr1cs0 = nand_readl(info, NDTR1CS0);
918 info->cmdset = &default_cmdset;
919 919
920 return 0; 920 return 0;
921} 921}
diff --git a/drivers/mtd/nand/txx9ndfmc.c b/drivers/mtd/nand/txx9ndfmc.c
index 054a41c0ef4a..ca270a4881a4 100644
--- a/drivers/mtd/nand/txx9ndfmc.c
+++ b/drivers/mtd/nand/txx9ndfmc.c
@@ -277,8 +277,9 @@ static int txx9ndfmc_nand_scan(struct mtd_info *mtd)
277 ret = nand_scan_ident(mtd, 1, NULL); 277 ret = nand_scan_ident(mtd, 1, NULL);
278 if (!ret) { 278 if (!ret) {
279 if (mtd->writesize >= 512) { 279 if (mtd->writesize >= 512) {
280 chip->ecc.size = mtd->writesize; 280 /* Hardware ECC 6 byte ECC per 512 Byte data */
281 chip->ecc.bytes = 3 * (mtd->writesize / 256); 281 chip->ecc.size = 512;
282 chip->ecc.bytes = 6;
282 } 283 }
283 ret = nand_scan_tail(mtd); 284 ret = nand_scan_tail(mtd);
284 } 285 }
diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c
index d0894ca7798b..ac31f461cc1c 100644
--- a/drivers/mtd/onenand/omap2.c
+++ b/drivers/mtd/onenand/omap2.c
@@ -35,6 +35,7 @@
35#include <linux/dma-mapping.h> 35#include <linux/dma-mapping.h>
36#include <linux/io.h> 36#include <linux/io.h>
37#include <linux/slab.h> 37#include <linux/slab.h>
38#include <linux/regulator/consumer.h>
38 39
39#include <asm/mach/flash.h> 40#include <asm/mach/flash.h>
40#include <plat/gpmc.h> 41#include <plat/gpmc.h>
@@ -63,8 +64,13 @@ struct omap2_onenand {
63 int dma_channel; 64 int dma_channel;
64 int freq; 65 int freq;
65 int (*setup)(void __iomem *base, int freq); 66 int (*setup)(void __iomem *base, int freq);
67 struct regulator *regulator;
66}; 68};
67 69
70#ifdef CONFIG_MTD_PARTITIONS
71static const char *part_probes[] = { "cmdlinepart", NULL, };
72#endif
73
68static void omap2_onenand_dma_cb(int lch, u16 ch_status, void *data) 74static void omap2_onenand_dma_cb(int lch, u16 ch_status, void *data)
69{ 75{
70 struct omap2_onenand *c = data; 76 struct omap2_onenand *c = data;
@@ -108,8 +114,9 @@ static void wait_warn(char *msg, int state, unsigned int ctrl,
108static int omap2_onenand_wait(struct mtd_info *mtd, int state) 114static int omap2_onenand_wait(struct mtd_info *mtd, int state)
109{ 115{
110 struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd); 116 struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
117 struct onenand_chip *this = mtd->priv;
111 unsigned int intr = 0; 118 unsigned int intr = 0;
112 unsigned int ctrl; 119 unsigned int ctrl, ctrl_mask;
113 unsigned long timeout; 120 unsigned long timeout;
114 u32 syscfg; 121 u32 syscfg;
115 122
@@ -180,7 +187,8 @@ retry:
180 if (result == 0) { 187 if (result == 0) {
181 /* Timeout after 20ms */ 188 /* Timeout after 20ms */
182 ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS); 189 ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
183 if (ctrl & ONENAND_CTRL_ONGO) { 190 if (ctrl & ONENAND_CTRL_ONGO &&
191 !this->ongoing) {
184 /* 192 /*
185 * The operation seems to be still going 193 * The operation seems to be still going
186 * so give it some more time. 194 * so give it some more time.
@@ -269,7 +277,11 @@ retry:
269 return -EIO; 277 return -EIO;
270 } 278 }
271 279
272 if (ctrl & 0xFE9F) 280 ctrl_mask = 0xFE9F;
281 if (this->ongoing)
282 ctrl_mask &= ~0x8000;
283
284 if (ctrl & ctrl_mask)
273 wait_warn("unexpected controller status", state, ctrl, intr); 285 wait_warn("unexpected controller status", state, ctrl, intr);
274 286
275 return 0; 287 return 0;
@@ -591,6 +603,30 @@ static void omap2_onenand_shutdown(struct platform_device *pdev)
591 memset((__force void *)c->onenand.base, 0, ONENAND_BUFRAM_SIZE); 603 memset((__force void *)c->onenand.base, 0, ONENAND_BUFRAM_SIZE);
592} 604}
593 605
606static int omap2_onenand_enable(struct mtd_info *mtd)
607{
608 int ret;
609 struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
610
611 ret = regulator_enable(c->regulator);
612 if (ret != 0)
613 dev_err(&c->pdev->dev, "cant enable regulator\n");
614
615 return ret;
616}
617
618static int omap2_onenand_disable(struct mtd_info *mtd)
619{
620 int ret;
621 struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
622
623 ret = regulator_disable(c->regulator);
624 if (ret != 0)
625 dev_err(&c->pdev->dev, "cant disable regulator\n");
626
627 return ret;
628}
629
594static int __devinit omap2_onenand_probe(struct platform_device *pdev) 630static int __devinit omap2_onenand_probe(struct platform_device *pdev)
595{ 631{
596 struct omap_onenand_platform_data *pdata; 632 struct omap_onenand_platform_data *pdata;
@@ -705,8 +741,18 @@ static int __devinit omap2_onenand_probe(struct platform_device *pdev)
705 } 741 }
706 } 742 }
707 743
744 if (pdata->regulator_can_sleep) {
745 c->regulator = regulator_get(&pdev->dev, "vonenand");
746 if (IS_ERR(c->regulator)) {
747 dev_err(&pdev->dev, "Failed to get regulator\n");
748 goto err_release_dma;
749 }
750 c->onenand.enable = omap2_onenand_enable;
751 c->onenand.disable = omap2_onenand_disable;
752 }
753
708 if ((r = onenand_scan(&c->mtd, 1)) < 0) 754 if ((r = onenand_scan(&c->mtd, 1)) < 0)
709 goto err_release_dma; 755 goto err_release_regulator;
710 756
711 switch ((c->onenand.version_id >> 4) & 0xf) { 757 switch ((c->onenand.version_id >> 4) & 0xf) {
712 case 0: 758 case 0:
@@ -727,13 +773,15 @@ static int __devinit omap2_onenand_probe(struct platform_device *pdev)
727 } 773 }
728 774
729#ifdef CONFIG_MTD_PARTITIONS 775#ifdef CONFIG_MTD_PARTITIONS
730 if (pdata->parts != NULL) 776 r = parse_mtd_partitions(&c->mtd, part_probes, &c->parts, 0);
731 r = add_mtd_partitions(&c->mtd, pdata->parts, 777 if (r > 0)
732 pdata->nr_parts); 778 r = add_mtd_partitions(&c->mtd, c->parts, r);
779 else if (pdata->parts != NULL)
780 r = add_mtd_partitions(&c->mtd, pdata->parts, pdata->nr_parts);
733 else 781 else
734#endif 782#endif
735 r = add_mtd_device(&c->mtd); 783 r = add_mtd_device(&c->mtd);
736 if (r < 0) 784 if (r)
737 goto err_release_onenand; 785 goto err_release_onenand;
738 786
739 platform_set_drvdata(pdev, c); 787 platform_set_drvdata(pdev, c);
@@ -742,6 +790,8 @@ static int __devinit omap2_onenand_probe(struct platform_device *pdev)
742 790
743err_release_onenand: 791err_release_onenand:
744 onenand_release(&c->mtd); 792 onenand_release(&c->mtd);
793err_release_regulator:
794 regulator_put(c->regulator);
745err_release_dma: 795err_release_dma:
746 if (c->dma_channel != -1) 796 if (c->dma_channel != -1)
747 omap_free_dma(c->dma_channel); 797 omap_free_dma(c->dma_channel);
@@ -757,6 +807,7 @@ err_release_mem_region:
757err_free_cs: 807err_free_cs:
758 gpmc_cs_free(c->gpmc_cs); 808 gpmc_cs_free(c->gpmc_cs);
759err_kfree: 809err_kfree:
810 kfree(c->parts);
760 kfree(c); 811 kfree(c);
761 812
762 return r; 813 return r;
@@ -766,18 +817,8 @@ static int __devexit omap2_onenand_remove(struct platform_device *pdev)
766{ 817{
767 struct omap2_onenand *c = dev_get_drvdata(&pdev->dev); 818 struct omap2_onenand *c = dev_get_drvdata(&pdev->dev);
768 819
769 BUG_ON(c == NULL);
770
771#ifdef CONFIG_MTD_PARTITIONS
772 if (c->parts)
773 del_mtd_partitions(&c->mtd);
774 else
775 del_mtd_device(&c->mtd);
776#else
777 del_mtd_device(&c->mtd);
778#endif
779
780 onenand_release(&c->mtd); 820 onenand_release(&c->mtd);
821 regulator_put(c->regulator);
781 if (c->dma_channel != -1) 822 if (c->dma_channel != -1)
782 omap_free_dma(c->dma_channel); 823 omap_free_dma(c->dma_channel);
783 omap2_onenand_shutdown(pdev); 824 omap2_onenand_shutdown(pdev);
@@ -789,6 +830,7 @@ static int __devexit omap2_onenand_remove(struct platform_device *pdev)
789 iounmap(c->onenand.base); 830 iounmap(c->onenand.base);
790 release_mem_region(c->phys_base, ONENAND_IO_SIZE); 831 release_mem_region(c->phys_base, ONENAND_IO_SIZE);
791 gpmc_cs_free(c->gpmc_cs); 832 gpmc_cs_free(c->gpmc_cs);
833 kfree(c->parts);
792 kfree(c); 834 kfree(c);
793 835
794 return 0; 836 return 0;
diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c
index 6b3a875647c9..bac41caa8df7 100644
--- a/drivers/mtd/onenand/onenand_base.c
+++ b/drivers/mtd/onenand/onenand_base.c
@@ -400,8 +400,7 @@ static int onenand_command(struct mtd_info *mtd, int cmd, loff_t addr, size_t le
400 value = onenand_bufferram_address(this, block); 400 value = onenand_bufferram_address(this, block);
401 this->write_word(value, this->base + ONENAND_REG_START_ADDRESS2); 401 this->write_word(value, this->base + ONENAND_REG_START_ADDRESS2);
402 402
403 if (ONENAND_IS_MLC(this) || ONENAND_IS_2PLANE(this) || 403 if (ONENAND_IS_2PLANE(this) || ONENAND_IS_4KB_PAGE(this))
404 ONENAND_IS_4KB_PAGE(this))
405 /* It is always BufferRAM0 */ 404 /* It is always BufferRAM0 */
406 ONENAND_SET_BUFFERRAM0(this); 405 ONENAND_SET_BUFFERRAM0(this);
407 else 406 else
@@ -430,7 +429,7 @@ static int onenand_command(struct mtd_info *mtd, int cmd, loff_t addr, size_t le
430 case FLEXONENAND_CMD_RECOVER_LSB: 429 case FLEXONENAND_CMD_RECOVER_LSB:
431 case ONENAND_CMD_READ: 430 case ONENAND_CMD_READ:
432 case ONENAND_CMD_READOOB: 431 case ONENAND_CMD_READOOB:
433 if (ONENAND_IS_MLC(this) || ONENAND_IS_4KB_PAGE(this)) 432 if (ONENAND_IS_4KB_PAGE(this))
434 /* It is always BufferRAM0 */ 433 /* It is always BufferRAM0 */
435 dataram = ONENAND_SET_BUFFERRAM0(this); 434 dataram = ONENAND_SET_BUFFERRAM0(this);
436 else 435 else
@@ -949,6 +948,8 @@ static int onenand_get_device(struct mtd_info *mtd, int new_state)
949 if (this->state == FL_READY) { 948 if (this->state == FL_READY) {
950 this->state = new_state; 949 this->state = new_state;
951 spin_unlock(&this->chip_lock); 950 spin_unlock(&this->chip_lock);
951 if (new_state != FL_PM_SUSPENDED && this->enable)
952 this->enable(mtd);
952 break; 953 break;
953 } 954 }
954 if (new_state == FL_PM_SUSPENDED) { 955 if (new_state == FL_PM_SUSPENDED) {
@@ -975,6 +976,8 @@ static void onenand_release_device(struct mtd_info *mtd)
975{ 976{
976 struct onenand_chip *this = mtd->priv; 977 struct onenand_chip *this = mtd->priv;
977 978
979 if (this->state != FL_PM_SUSPENDED && this->disable)
980 this->disable(mtd);
978 /* Release the chip */ 981 /* Release the chip */
979 spin_lock(&this->chip_lock); 982 spin_lock(&this->chip_lock);
980 this->state = FL_READY; 983 this->state = FL_READY;
@@ -1353,7 +1356,7 @@ static int onenand_read_oob_nolock(struct mtd_info *mtd, loff_t from,
1353 1356
1354 stats = mtd->ecc_stats; 1357 stats = mtd->ecc_stats;
1355 1358
1356 readcmd = ONENAND_IS_MLC(this) ? ONENAND_CMD_READ : ONENAND_CMD_READOOB; 1359 readcmd = ONENAND_IS_4KB_PAGE(this) ? ONENAND_CMD_READ : ONENAND_CMD_READOOB;
1357 1360
1358 while (read < len) { 1361 while (read < len) {
1359 cond_resched(); 1362 cond_resched();
@@ -1429,7 +1432,7 @@ static int onenand_read(struct mtd_info *mtd, loff_t from, size_t len,
1429 int ret; 1432 int ret;
1430 1433
1431 onenand_get_device(mtd, FL_READING); 1434 onenand_get_device(mtd, FL_READING);
1432 ret = ONENAND_IS_MLC(this) || ONENAND_IS_4KB_PAGE(this) ? 1435 ret = ONENAND_IS_4KB_PAGE(this) ?
1433 onenand_mlc_read_ops_nolock(mtd, from, &ops) : 1436 onenand_mlc_read_ops_nolock(mtd, from, &ops) :
1434 onenand_read_ops_nolock(mtd, from, &ops); 1437 onenand_read_ops_nolock(mtd, from, &ops);
1435 onenand_release_device(mtd); 1438 onenand_release_device(mtd);
@@ -1464,7 +1467,7 @@ static int onenand_read_oob(struct mtd_info *mtd, loff_t from,
1464 1467
1465 onenand_get_device(mtd, FL_READING); 1468 onenand_get_device(mtd, FL_READING);
1466 if (ops->datbuf) 1469 if (ops->datbuf)
1467 ret = ONENAND_IS_MLC(this) || ONENAND_IS_4KB_PAGE(this) ? 1470 ret = ONENAND_IS_4KB_PAGE(this) ?
1468 onenand_mlc_read_ops_nolock(mtd, from, ops) : 1471 onenand_mlc_read_ops_nolock(mtd, from, ops) :
1469 onenand_read_ops_nolock(mtd, from, ops); 1472 onenand_read_ops_nolock(mtd, from, ops);
1470 else 1473 else
@@ -1485,8 +1488,7 @@ static int onenand_bbt_wait(struct mtd_info *mtd, int state)
1485{ 1488{
1486 struct onenand_chip *this = mtd->priv; 1489 struct onenand_chip *this = mtd->priv;
1487 unsigned long timeout; 1490 unsigned long timeout;
1488 unsigned int interrupt; 1491 unsigned int interrupt, ctrl, ecc, addr1, addr8;
1489 unsigned int ctrl;
1490 1492
1491 /* The 20 msec is enough */ 1493 /* The 20 msec is enough */
1492 timeout = jiffies + msecs_to_jiffies(20); 1494 timeout = jiffies + msecs_to_jiffies(20);
@@ -1498,25 +1500,28 @@ static int onenand_bbt_wait(struct mtd_info *mtd, int state)
1498 /* To get correct interrupt status in timeout case */ 1500 /* To get correct interrupt status in timeout case */
1499 interrupt = this->read_word(this->base + ONENAND_REG_INTERRUPT); 1501 interrupt = this->read_word(this->base + ONENAND_REG_INTERRUPT);
1500 ctrl = this->read_word(this->base + ONENAND_REG_CTRL_STATUS); 1502 ctrl = this->read_word(this->base + ONENAND_REG_CTRL_STATUS);
1503 addr1 = this->read_word(this->base + ONENAND_REG_START_ADDRESS1);
1504 addr8 = this->read_word(this->base + ONENAND_REG_START_ADDRESS8);
1501 1505
1502 if (interrupt & ONENAND_INT_READ) { 1506 if (interrupt & ONENAND_INT_READ) {
1503 int ecc = onenand_read_ecc(this); 1507 ecc = onenand_read_ecc(this);
1504 if (ecc & ONENAND_ECC_2BIT_ALL) { 1508 if (ecc & ONENAND_ECC_2BIT_ALL) {
1505 printk(KERN_WARNING "%s: ecc error = 0x%04x, " 1509 printk(KERN_DEBUG "%s: ecc 0x%04x ctrl 0x%04x "
1506 "controller error 0x%04x\n", 1510 "intr 0x%04x addr1 %#x addr8 %#x\n",
1507 __func__, ecc, ctrl); 1511 __func__, ecc, ctrl, interrupt, addr1, addr8);
1508 return ONENAND_BBT_READ_ECC_ERROR; 1512 return ONENAND_BBT_READ_ECC_ERROR;
1509 } 1513 }
1510 } else { 1514 } else {
1511 printk(KERN_ERR "%s: read timeout! ctrl=0x%04x intr=0x%04x\n", 1515 printk(KERN_ERR "%s: read timeout! ctrl 0x%04x "
1512 __func__, ctrl, interrupt); 1516 "intr 0x%04x addr1 %#x addr8 %#x\n",
1517 __func__, ctrl, interrupt, addr1, addr8);
1513 return ONENAND_BBT_READ_FATAL_ERROR; 1518 return ONENAND_BBT_READ_FATAL_ERROR;
1514 } 1519 }
1515 1520
1516 /* Initial bad block case: 0x2400 or 0x0400 */ 1521 /* Initial bad block case: 0x2400 or 0x0400 */
1517 if (ctrl & ONENAND_CTRL_ERROR) { 1522 if (ctrl & ONENAND_CTRL_ERROR) {
1518 printk(KERN_DEBUG "%s: controller error = 0x%04x\n", 1523 printk(KERN_DEBUG "%s: ctrl 0x%04x intr 0x%04x addr1 %#x "
1519 __func__, ctrl); 1524 "addr8 %#x\n", __func__, ctrl, interrupt, addr1, addr8);
1520 return ONENAND_BBT_READ_ERROR; 1525 return ONENAND_BBT_READ_ERROR;
1521 } 1526 }
1522 1527
@@ -1558,7 +1563,7 @@ int onenand_bbt_read_oob(struct mtd_info *mtd, loff_t from,
1558 1563
1559 column = from & (mtd->oobsize - 1); 1564 column = from & (mtd->oobsize - 1);
1560 1565
1561 readcmd = ONENAND_IS_MLC(this) ? ONENAND_CMD_READ : ONENAND_CMD_READOOB; 1566 readcmd = ONENAND_IS_4KB_PAGE(this) ? ONENAND_CMD_READ : ONENAND_CMD_READOOB;
1562 1567
1563 while (read < len) { 1568 while (read < len) {
1564 cond_resched(); 1569 cond_resched();
@@ -1612,7 +1617,7 @@ static int onenand_verify_oob(struct mtd_info *mtd, const u_char *buf, loff_t to
1612 u_char *oob_buf = this->oob_buf; 1617 u_char *oob_buf = this->oob_buf;
1613 int status, i, readcmd; 1618 int status, i, readcmd;
1614 1619
1615 readcmd = ONENAND_IS_MLC(this) ? ONENAND_CMD_READ : ONENAND_CMD_READOOB; 1620 readcmd = ONENAND_IS_4KB_PAGE(this) ? ONENAND_CMD_READ : ONENAND_CMD_READOOB;
1616 1621
1617 this->command(mtd, readcmd, to, mtd->oobsize); 1622 this->command(mtd, readcmd, to, mtd->oobsize);
1618 onenand_update_bufferram(mtd, to, 0); 1623 onenand_update_bufferram(mtd, to, 0);
@@ -1845,7 +1850,7 @@ static int onenand_write_ops_nolock(struct mtd_info *mtd, loff_t to,
1845 const u_char *buf = ops->datbuf; 1850 const u_char *buf = ops->datbuf;
1846 const u_char *oob = ops->oobbuf; 1851 const u_char *oob = ops->oobbuf;
1847 u_char *oobbuf; 1852 u_char *oobbuf;
1848 int ret = 0; 1853 int ret = 0, cmd;
1849 1854
1850 DEBUG(MTD_DEBUG_LEVEL3, "%s: to = 0x%08x, len = %i\n", 1855 DEBUG(MTD_DEBUG_LEVEL3, "%s: to = 0x%08x, len = %i\n",
1851 __func__, (unsigned int) to, (int) len); 1856 __func__, (unsigned int) to, (int) len);
@@ -1954,7 +1959,19 @@ static int onenand_write_ops_nolock(struct mtd_info *mtd, loff_t to,
1954 ONENAND_SET_NEXT_BUFFERRAM(this); 1959 ONENAND_SET_NEXT_BUFFERRAM(this);
1955 } 1960 }
1956 1961
1957 this->command(mtd, ONENAND_CMD_PROG, to, mtd->writesize); 1962 this->ongoing = 0;
1963 cmd = ONENAND_CMD_PROG;
1964
1965 /* Exclude 1st OTP and OTP blocks for cache program feature */
1966 if (ONENAND_IS_CACHE_PROGRAM(this) &&
1967 likely(onenand_block(this, to) != 0) &&
1968 ONENAND_IS_4KB_PAGE(this) &&
1969 ((written + thislen) < len)) {
1970 cmd = ONENAND_CMD_2X_CACHE_PROG;
1971 this->ongoing = 1;
1972 }
1973
1974 this->command(mtd, cmd, to, mtd->writesize);
1958 1975
1959 /* 1976 /*
1960 * 2 PLANE, MLC, and Flex-OneNAND wait here 1977 * 2 PLANE, MLC, and Flex-OneNAND wait here
@@ -2067,7 +2084,7 @@ static int onenand_write_oob_nolock(struct mtd_info *mtd, loff_t to,
2067 2084
2068 oobbuf = this->oob_buf; 2085 oobbuf = this->oob_buf;
2069 2086
2070 oobcmd = ONENAND_IS_MLC(this) ? ONENAND_CMD_PROG : ONENAND_CMD_PROGOOB; 2087 oobcmd = ONENAND_IS_4KB_PAGE(this) ? ONENAND_CMD_PROG : ONENAND_CMD_PROGOOB;
2071 2088
2072 /* Loop until all data write */ 2089 /* Loop until all data write */
2073 while (written < len) { 2090 while (written < len) {
@@ -2086,7 +2103,7 @@ static int onenand_write_oob_nolock(struct mtd_info *mtd, loff_t to,
2086 memcpy(oobbuf + column, buf, thislen); 2103 memcpy(oobbuf + column, buf, thislen);
2087 this->write_bufferram(mtd, ONENAND_SPARERAM, oobbuf, 0, mtd->oobsize); 2104 this->write_bufferram(mtd, ONENAND_SPARERAM, oobbuf, 0, mtd->oobsize);
2088 2105
2089 if (ONENAND_IS_MLC(this) || ONENAND_IS_4KB_PAGE(this)) { 2106 if (ONENAND_IS_4KB_PAGE(this)) {
2090 /* Set main area of DataRAM to 0xff*/ 2107 /* Set main area of DataRAM to 0xff*/
2091 memset(this->page_buf, 0xff, mtd->writesize); 2108 memset(this->page_buf, 0xff, mtd->writesize);
2092 this->write_bufferram(mtd, ONENAND_DATARAM, 2109 this->write_bufferram(mtd, ONENAND_DATARAM,
@@ -2481,7 +2498,8 @@ static int onenand_erase(struct mtd_info *mtd, struct erase_info *instr)
2481 /* Grab the lock and see if the device is available */ 2498 /* Grab the lock and see if the device is available */
2482 onenand_get_device(mtd, FL_ERASING); 2499 onenand_get_device(mtd, FL_ERASING);
2483 2500
2484 if (region || instr->len < MB_ERASE_MIN_BLK_COUNT * block_size) { 2501 if (ONENAND_IS_4KB_PAGE(this) || region ||
2502 instr->len < MB_ERASE_MIN_BLK_COUNT * block_size) {
2485 /* region is set for Flex-OneNAND (no mb erase) */ 2503 /* region is set for Flex-OneNAND (no mb erase) */
2486 ret = onenand_block_by_block_erase(mtd, instr, 2504 ret = onenand_block_by_block_erase(mtd, instr,
2487 region, block_size); 2505 region, block_size);
@@ -3029,7 +3047,7 @@ static int do_otp_read(struct mtd_info *mtd, loff_t from, size_t len,
3029 this->command(mtd, ONENAND_CMD_OTP_ACCESS, 0, 0); 3047 this->command(mtd, ONENAND_CMD_OTP_ACCESS, 0, 0);
3030 this->wait(mtd, FL_OTPING); 3048 this->wait(mtd, FL_OTPING);
3031 3049
3032 ret = ONENAND_IS_MLC(this) || ONENAND_IS_4KB_PAGE(this) ? 3050 ret = ONENAND_IS_4KB_PAGE(this) ?
3033 onenand_mlc_read_ops_nolock(mtd, from, &ops) : 3051 onenand_mlc_read_ops_nolock(mtd, from, &ops) :
3034 onenand_read_ops_nolock(mtd, from, &ops); 3052 onenand_read_ops_nolock(mtd, from, &ops);
3035 3053
@@ -3377,8 +3395,10 @@ static void onenand_check_features(struct mtd_info *mtd)
3377 case ONENAND_DEVICE_DENSITY_4Gb: 3395 case ONENAND_DEVICE_DENSITY_4Gb:
3378 if (ONENAND_IS_DDP(this)) 3396 if (ONENAND_IS_DDP(this))
3379 this->options |= ONENAND_HAS_2PLANE; 3397 this->options |= ONENAND_HAS_2PLANE;
3380 else if (numbufs == 1) 3398 else if (numbufs == 1) {
3381 this->options |= ONENAND_HAS_4KB_PAGE; 3399 this->options |= ONENAND_HAS_4KB_PAGE;
3400 this->options |= ONENAND_HAS_CACHE_PROGRAM;
3401 }
3382 3402
3383 case ONENAND_DEVICE_DENSITY_2Gb: 3403 case ONENAND_DEVICE_DENSITY_2Gb:
3384 /* 2Gb DDP does not have 2 plane */ 3404 /* 2Gb DDP does not have 2 plane */
@@ -3399,7 +3419,11 @@ static void onenand_check_features(struct mtd_info *mtd)
3399 break; 3419 break;
3400 } 3420 }
3401 3421
3402 if (ONENAND_IS_MLC(this) || ONENAND_IS_4KB_PAGE(this)) 3422 /* The MLC has 4KiB pagesize. */
3423 if (ONENAND_IS_MLC(this))
3424 this->options |= ONENAND_HAS_4KB_PAGE;
3425
3426 if (ONENAND_IS_4KB_PAGE(this))
3403 this->options &= ~ONENAND_HAS_2PLANE; 3427 this->options &= ~ONENAND_HAS_2PLANE;
3404 3428
3405 if (FLEXONENAND(this)) { 3429 if (FLEXONENAND(this)) {
@@ -3415,6 +3439,8 @@ static void onenand_check_features(struct mtd_info *mtd)
3415 printk(KERN_DEBUG "Chip has 2 plane\n"); 3439 printk(KERN_DEBUG "Chip has 2 plane\n");
3416 if (this->options & ONENAND_HAS_4KB_PAGE) 3440 if (this->options & ONENAND_HAS_4KB_PAGE)
3417 printk(KERN_DEBUG "Chip has 4KiB pagesize\n"); 3441 printk(KERN_DEBUG "Chip has 4KiB pagesize\n");
3442 if (this->options & ONENAND_HAS_CACHE_PROGRAM)
3443 printk(KERN_DEBUG "Chip has cache program feature\n");
3418} 3444}
3419 3445
3420/** 3446/**
@@ -3831,7 +3857,7 @@ static int onenand_probe(struct mtd_info *mtd)
3831 /* The data buffer size is equal to page size */ 3857 /* The data buffer size is equal to page size */
3832 mtd->writesize = this->read_word(this->base + ONENAND_REG_DATA_BUFFER_SIZE); 3858 mtd->writesize = this->read_word(this->base + ONENAND_REG_DATA_BUFFER_SIZE);
3833 /* We use the full BufferRAM */ 3859 /* We use the full BufferRAM */
3834 if (ONENAND_IS_MLC(this) || ONENAND_IS_4KB_PAGE(this)) 3860 if (ONENAND_IS_4KB_PAGE(this))
3835 mtd->writesize <<= 1; 3861 mtd->writesize <<= 1;
3836 3862
3837 mtd->oobsize = mtd->writesize >> 5; 3863 mtd->oobsize = mtd->writesize >> 5;
@@ -4054,6 +4080,7 @@ int onenand_scan(struct mtd_info *mtd, int maxchips)
4054 mtd->block_isbad = onenand_block_isbad; 4080 mtd->block_isbad = onenand_block_isbad;
4055 mtd->block_markbad = onenand_block_markbad; 4081 mtd->block_markbad = onenand_block_markbad;
4056 mtd->owner = THIS_MODULE; 4082 mtd->owner = THIS_MODULE;
4083 mtd->writebufsize = mtd->writesize;
4057 4084
4058 /* Unlock whole block */ 4085 /* Unlock whole block */
4059 this->unlock_all(mtd); 4086 this->unlock_all(mtd);
diff --git a/drivers/mtd/onenand/onenand_bbt.c b/drivers/mtd/onenand/onenand_bbt.c
index 01ab5b3c453b..fc2c16a0fd1c 100644
--- a/drivers/mtd/onenand/onenand_bbt.c
+++ b/drivers/mtd/onenand/onenand_bbt.c
@@ -91,16 +91,18 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr
91 for (j = 0; j < len; j++) { 91 for (j = 0; j < len; j++) {
92 /* No need to read pages fully, 92 /* No need to read pages fully,
93 * just read required OOB bytes */ 93 * just read required OOB bytes */
94 ret = onenand_bbt_read_oob(mtd, from + j * mtd->writesize + bd->offs, &ops); 94 ret = onenand_bbt_read_oob(mtd,
95 from + j * this->writesize + bd->offs, &ops);
95 96
96 /* If it is a initial bad block, just ignore it */ 97 /* If it is a initial bad block, just ignore it */
97 if (ret == ONENAND_BBT_READ_FATAL_ERROR) 98 if (ret == ONENAND_BBT_READ_FATAL_ERROR)
98 return -EIO; 99 return -EIO;
99 100
100 if (ret || check_short_pattern(&buf[j * scanlen], scanlen, mtd->writesize, bd)) { 101 if (ret || check_short_pattern(&buf[j * scanlen],
102 scanlen, this->writesize, bd)) {
101 bbm->bbt[i >> 3] |= 0x03 << (i & 0x6); 103 bbm->bbt[i >> 3] |= 0x03 << (i & 0x6);
102 printk(KERN_WARNING "Bad eraseblock %d at 0x%08x\n", 104 printk(KERN_INFO "OneNAND eraseblock %d is an "
103 i >> 1, (unsigned int) from); 105 "initial bad block\n", i >> 1);
104 mtd->ecc_stats.badblocks++; 106 mtd->ecc_stats.badblocks++;
105 break; 107 break;
106 } 108 }
diff --git a/drivers/mtd/onenand/samsung.c b/drivers/mtd/onenand/samsung.c
index 0de7a05e6de0..a4c74a9ba430 100644
--- a/drivers/mtd/onenand/samsung.c
+++ b/drivers/mtd/onenand/samsung.c
@@ -651,7 +651,7 @@ static int s5pc110_read_bufferram(struct mtd_info *mtd, int area,
651 void __iomem *p; 651 void __iomem *p;
652 void *buf = (void *) buffer; 652 void *buf = (void *) buffer;
653 dma_addr_t dma_src, dma_dst; 653 dma_addr_t dma_src, dma_dst;
654 int err, page_dma = 0; 654 int err, ofs, page_dma = 0;
655 struct device *dev = &onenand->pdev->dev; 655 struct device *dev = &onenand->pdev->dev;
656 656
657 p = this->base + area; 657 p = this->base + area;
@@ -677,10 +677,13 @@ static int s5pc110_read_bufferram(struct mtd_info *mtd, int area,
677 if (!page) 677 if (!page)
678 goto normal; 678 goto normal;
679 679
680 /* Page offset */
681 ofs = ((size_t) buf & ~PAGE_MASK);
680 page_dma = 1; 682 page_dma = 1;
683
681 /* DMA routine */ 684 /* DMA routine */
682 dma_src = onenand->phys_base + (p - this->base); 685 dma_src = onenand->phys_base + (p - this->base);
683 dma_dst = dma_map_page(dev, page, 0, count, DMA_FROM_DEVICE); 686 dma_dst = dma_map_page(dev, page, ofs, count, DMA_FROM_DEVICE);
684 } else { 687 } else {
685 /* DMA routine */ 688 /* DMA routine */
686 dma_src = onenand->phys_base + (p - this->base); 689 dma_src = onenand->phys_base + (p - this->base);
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 5ebe280225d6..f49e49dc5928 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -672,7 +672,33 @@ static int io_init(struct ubi_device *ubi)
672 ubi->nor_flash = 1; 672 ubi->nor_flash = 1;
673 } 673 }
674 674
675 ubi->min_io_size = ubi->mtd->writesize; 675 /*
676 * Set UBI min. I/O size (@ubi->min_io_size). We use @mtd->writebufsize
677 * for these purposes, not @mtd->writesize. At the moment this does not
678 * matter for NAND, because currently @mtd->writebufsize is equivalent to
679 * @mtd->writesize for all NANDs. However, some CFI NOR flashes may
680 * have @mtd->writebufsize which is multiple of @mtd->writesize.
681 *
682 * The reason we use @mtd->writebufsize for @ubi->min_io_size is that
683 * UBI and UBIFS recovery algorithms rely on the fact that if there was
684 * an unclean power cut, then we can find offset of the last corrupted
685 * node, align the offset to @ubi->min_io_size, read the rest of the
686 * eraseblock starting from this offset, and check whether there are
687 * only 0xFF bytes. If yes, then we are probably dealing with a
688 * corruption caused by a power cut, if not, then this is probably some
689 * severe corruption.
690 *
691 * Thus, we have to use the maximum write unit size of the flash, which
692 * is @mtd->writebufsize, because @mtd->writesize is the minimum write
693 * size, not the maximum.
694 */
695 if (ubi->mtd->type == MTD_NANDFLASH)
696 ubi_assert(ubi->mtd->writebufsize == ubi->mtd->writesize);
697 else if (ubi->mtd->type == MTD_NORFLASH)
698 ubi_assert(ubi->mtd->writebufsize % ubi->mtd->writesize == 0);
699
700 ubi->min_io_size = ubi->mtd->writebufsize;
701
676 ubi->hdrs_min_io_size = ubi->mtd->writesize >> ubi->mtd->subpage_sft; 702 ubi->hdrs_min_io_size = ubi->mtd->writesize >> ubi->mtd->subpage_sft;
677 703
678 /* 704 /*
diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c
index fcdb7f65fe0b..0b8141fc5c26 100644
--- a/drivers/mtd/ubi/vtbl.c
+++ b/drivers/mtd/ubi/vtbl.c
@@ -425,12 +425,11 @@ static struct ubi_vtbl_record *process_lvol(struct ubi_device *ubi,
425 425
426 /* Read both LEB 0 and LEB 1 into memory */ 426 /* Read both LEB 0 and LEB 1 into memory */
427 ubi_rb_for_each_entry(rb, seb, &sv->root, u.rb) { 427 ubi_rb_for_each_entry(rb, seb, &sv->root, u.rb) {
428 leb[seb->lnum] = vmalloc(ubi->vtbl_size); 428 leb[seb->lnum] = vzalloc(ubi->vtbl_size);
429 if (!leb[seb->lnum]) { 429 if (!leb[seb->lnum]) {
430 err = -ENOMEM; 430 err = -ENOMEM;
431 goto out_free; 431 goto out_free;
432 } 432 }
433 memset(leb[seb->lnum], 0, ubi->vtbl_size);
434 433
435 err = ubi_io_read_data(ubi, leb[seb->lnum], seb->pnum, 0, 434 err = ubi_io_read_data(ubi, leb[seb->lnum], seb->pnum, 0,
436 ubi->vtbl_size); 435 ubi->vtbl_size);
@@ -516,10 +515,9 @@ static struct ubi_vtbl_record *create_empty_lvol(struct ubi_device *ubi,
516 int i; 515 int i;
517 struct ubi_vtbl_record *vtbl; 516 struct ubi_vtbl_record *vtbl;
518 517
519 vtbl = vmalloc(ubi->vtbl_size); 518 vtbl = vzalloc(ubi->vtbl_size);
520 if (!vtbl) 519 if (!vtbl)
521 return ERR_PTR(-ENOMEM); 520 return ERR_PTR(-ENOMEM);
522 memset(vtbl, 0, ubi->vtbl_size);
523 521
524 for (i = 0; i < ubi->vtbl_slots; i++) 522 for (i = 0; i < ubi->vtbl_slots; i++)
525 memcpy(&vtbl[i], &empty_vtbl_record, UBI_VTBL_RECORD_SIZE); 523 memcpy(&vtbl[i], &empty_vtbl_record, UBI_VTBL_RECORD_SIZE);
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index c787c3d95c60..af824e7e0367 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -692,12 +692,6 @@ int __init early_init_dt_scan_chosen(unsigned long node, const char *uname,
692 return 1; 692 return 1;
693} 693}
694 694
695static void *__init early_device_tree_alloc(u64 size, u64 align)
696{
697 unsigned long mem = early_init_dt_alloc_memory_arch(size, align);
698 return __va(mem);
699}
700
701/** 695/**
702 * unflatten_device_tree - create tree of device_nodes from flat blob 696 * unflatten_device_tree - create tree of device_nodes from flat blob
703 * 697 *
@@ -709,7 +703,7 @@ static void *__init early_device_tree_alloc(u64 size, u64 align)
709void __init unflatten_device_tree(void) 703void __init unflatten_device_tree(void)
710{ 704{
711 __unflatten_device_tree(initial_boot_params, &allnodes, 705 __unflatten_device_tree(initial_boot_params, &allnodes,
712 early_device_tree_alloc); 706 early_init_dt_alloc_memory_arch);
713 707
714 /* Get pointer to OF "/chosen" node for use everywhere */ 708 /* Get pointer to OF "/chosen" node for use everywhere */
715 of_chosen = of_find_node_by_path("/chosen"); 709 of_chosen = of_find_node_by_path("/chosen");
diff --git a/drivers/spi/spi_sh_msiof.c b/drivers/spi/spi_sh_msiof.c
index d93b66743ba7..56f60c8ea0ab 100644
--- a/drivers/spi/spi_sh_msiof.c
+++ b/drivers/spi/spi_sh_msiof.c
@@ -635,7 +635,7 @@ static int sh_msiof_spi_remove(struct platform_device *pdev)
635 ret = spi_bitbang_stop(&p->bitbang); 635 ret = spi_bitbang_stop(&p->bitbang);
636 if (!ret) { 636 if (!ret) {
637 pm_runtime_disable(&pdev->dev); 637 pm_runtime_disable(&pdev->dev);
638 free_irq(platform_get_irq(pdev, 0), sh_msiof_spi_irq); 638 free_irq(platform_get_irq(pdev, 0), p);
639 iounmap(p->mapbase); 639 iounmap(p->mapbase);
640 clk_put(p->clk); 640 clk_put(p->clk);
641 spi_master_put(p->bitbang.master); 641 spi_master_put(p->bitbang.master);
diff --git a/drivers/staging/smbfs/dir.c b/drivers/staging/smbfs/dir.c
index 87a3a9bd5842..f204d33910ec 100644
--- a/drivers/staging/smbfs/dir.c
+++ b/drivers/staging/smbfs/dir.c
@@ -283,7 +283,7 @@ static int smb_compare_dentry(const struct dentry *,
283 unsigned int, const char *, const struct qstr *); 283 unsigned int, const char *, const struct qstr *);
284static int smb_delete_dentry(const struct dentry *); 284static int smb_delete_dentry(const struct dentry *);
285 285
286static const struct dentry_operations smbfs_dentry_operations = 286const struct dentry_operations smbfs_dentry_operations =
287{ 287{
288 .d_revalidate = smb_lookup_validate, 288 .d_revalidate = smb_lookup_validate,
289 .d_hash = smb_hash_dentry, 289 .d_hash = smb_hash_dentry,
@@ -291,7 +291,7 @@ static const struct dentry_operations smbfs_dentry_operations =
291 .d_delete = smb_delete_dentry, 291 .d_delete = smb_delete_dentry,
292}; 292};
293 293
294static const struct dentry_operations smbfs_dentry_operations_case = 294const struct dentry_operations smbfs_dentry_operations_case =
295{ 295{
296 .d_revalidate = smb_lookup_validate, 296 .d_revalidate = smb_lookup_validate,
297 .d_delete = smb_delete_dentry, 297 .d_delete = smb_delete_dentry,
diff --git a/fs/Kconfig b/fs/Kconfig
index 771f457402d4..9a7921ae4763 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -30,15 +30,6 @@ config FS_MBCACHE
30source "fs/reiserfs/Kconfig" 30source "fs/reiserfs/Kconfig"
31source "fs/jfs/Kconfig" 31source "fs/jfs/Kconfig"
32 32
33config FS_POSIX_ACL
34# Posix ACL utility routines (for now, only ext2/ext3/jfs/reiserfs/nfs4)
35#
36# NOTE: you can implement Posix ACLs without these helpers (XFS does).
37# Never use this symbol for ifdefs.
38#
39 bool
40 default n
41
42source "fs/xfs/Kconfig" 33source "fs/xfs/Kconfig"
43source "fs/gfs2/Kconfig" 34source "fs/gfs2/Kconfig"
44source "fs/ocfs2/Kconfig" 35source "fs/ocfs2/Kconfig"
@@ -47,6 +38,14 @@ source "fs/nilfs2/Kconfig"
47 38
48endif # BLOCK 39endif # BLOCK
49 40
41# Posix ACL utility routines
42#
43# Note: Posix ACLs can be implemented without these helpers. Never use
44# this symbol for ifdefs in core code.
45#
46config FS_POSIX_ACL
47 def_bool n
48
50config EXPORTFS 49config EXPORTFS
51 tristate 50 tristate
52 51
diff --git a/fs/aio.c b/fs/aio.c
index 5e00f15c54aa..fc557a3be0a9 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -87,7 +87,7 @@ static int __init aio_setup(void)
87 87
88 aio_wq = create_workqueue("aio"); 88 aio_wq = create_workqueue("aio");
89 abe_pool = mempool_create_kmalloc_pool(1, sizeof(struct aio_batch_entry)); 89 abe_pool = mempool_create_kmalloc_pool(1, sizeof(struct aio_batch_entry));
90 BUG_ON(!abe_pool); 90 BUG_ON(!aio_wq || !abe_pool);
91 91
92 pr_debug("aio_setup: sizeof(struct page) = %d\n", (int)sizeof(struct page)); 92 pr_debug("aio_setup: sizeof(struct page) = %d\n", (int)sizeof(struct page));
93 93
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 66836d85763b..a9e0a4eaf3d9 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -24,6 +24,7 @@
24#include <linux/string.h> 24#include <linux/string.h>
25#include <linux/backing-dev.h> 25#include <linux/backing-dev.h>
26#include <linux/mpage.h> 26#include <linux/mpage.h>
27#include <linux/falloc.h>
27#include <linux/swap.h> 28#include <linux/swap.h>
28#include <linux/writeback.h> 29#include <linux/writeback.h>
29#include <linux/statfs.h> 30#include <linux/statfs.h>
@@ -1237,6 +1238,117 @@ static int btrfs_file_mmap(struct file *filp, struct vm_area_struct *vma)
1237 return 0; 1238 return 0;
1238} 1239}
1239 1240
1241static long btrfs_fallocate(struct file *file, int mode,
1242 loff_t offset, loff_t len)
1243{
1244 struct inode *inode = file->f_path.dentry->d_inode;
1245 struct extent_state *cached_state = NULL;
1246 u64 cur_offset;
1247 u64 last_byte;
1248 u64 alloc_start;
1249 u64 alloc_end;
1250 u64 alloc_hint = 0;
1251 u64 locked_end;
1252 u64 mask = BTRFS_I(inode)->root->sectorsize - 1;
1253 struct extent_map *em;
1254 int ret;
1255
1256 alloc_start = offset & ~mask;
1257 alloc_end = (offset + len + mask) & ~mask;
1258
1259 /* We only support the FALLOC_FL_KEEP_SIZE mode */
1260 if (mode & ~FALLOC_FL_KEEP_SIZE)
1261 return -EOPNOTSUPP;
1262
1263 /*
1264 * wait for ordered IO before we have any locks. We'll loop again
1265 * below with the locks held.
1266 */
1267 btrfs_wait_ordered_range(inode, alloc_start, alloc_end - alloc_start);
1268
1269 mutex_lock(&inode->i_mutex);
1270 ret = inode_newsize_ok(inode, alloc_end);
1271 if (ret)
1272 goto out;
1273
1274 if (alloc_start > inode->i_size) {
1275 ret = btrfs_cont_expand(inode, alloc_start);
1276 if (ret)
1277 goto out;
1278 }
1279
1280 ret = btrfs_check_data_free_space(inode, alloc_end - alloc_start);
1281 if (ret)
1282 goto out;
1283
1284 locked_end = alloc_end - 1;
1285 while (1) {
1286 struct btrfs_ordered_extent *ordered;
1287
1288 /* the extent lock is ordered inside the running
1289 * transaction
1290 */
1291 lock_extent_bits(&BTRFS_I(inode)->io_tree, alloc_start,
1292 locked_end, 0, &cached_state, GFP_NOFS);
1293 ordered = btrfs_lookup_first_ordered_extent(inode,
1294 alloc_end - 1);
1295 if (ordered &&
1296 ordered->file_offset + ordered->len > alloc_start &&
1297 ordered->file_offset < alloc_end) {
1298 btrfs_put_ordered_extent(ordered);
1299 unlock_extent_cached(&BTRFS_I(inode)->io_tree,
1300 alloc_start, locked_end,
1301 &cached_state, GFP_NOFS);
1302 /*
1303 * we can't wait on the range with the transaction
1304 * running or with the extent lock held
1305 */
1306 btrfs_wait_ordered_range(inode, alloc_start,
1307 alloc_end - alloc_start);
1308 } else {
1309 if (ordered)
1310 btrfs_put_ordered_extent(ordered);
1311 break;
1312 }
1313 }
1314
1315 cur_offset = alloc_start;
1316 while (1) {
1317 em = btrfs_get_extent(inode, NULL, 0, cur_offset,
1318 alloc_end - cur_offset, 0);
1319 BUG_ON(IS_ERR(em) || !em);
1320 last_byte = min(extent_map_end(em), alloc_end);
1321 last_byte = (last_byte + mask) & ~mask;
1322 if (em->block_start == EXTENT_MAP_HOLE ||
1323 (cur_offset >= inode->i_size &&
1324 !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
1325 ret = btrfs_prealloc_file_range(inode, mode, cur_offset,
1326 last_byte - cur_offset,
1327 1 << inode->i_blkbits,
1328 offset + len,
1329 &alloc_hint);
1330 if (ret < 0) {
1331 free_extent_map(em);
1332 break;
1333 }
1334 }
1335 free_extent_map(em);
1336
1337 cur_offset = last_byte;
1338 if (cur_offset >= alloc_end) {
1339 ret = 0;
1340 break;
1341 }
1342 }
1343 unlock_extent_cached(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
1344 &cached_state, GFP_NOFS);
1345
1346 btrfs_free_reserved_data_space(inode, alloc_end - alloc_start);
1347out:
1348 mutex_unlock(&inode->i_mutex);
1349 return ret;
1350}
1351
1240const struct file_operations btrfs_file_operations = { 1352const struct file_operations btrfs_file_operations = {
1241 .llseek = generic_file_llseek, 1353 .llseek = generic_file_llseek,
1242 .read = do_sync_read, 1354 .read = do_sync_read,
@@ -1248,6 +1360,7 @@ const struct file_operations btrfs_file_operations = {
1248 .open = generic_file_open, 1360 .open = generic_file_open,
1249 .release = btrfs_release_file, 1361 .release = btrfs_release_file,
1250 .fsync = btrfs_sync_file, 1362 .fsync = btrfs_sync_file,
1363 .fallocate = btrfs_fallocate,
1251 .unlocked_ioctl = btrfs_ioctl, 1364 .unlocked_ioctl = btrfs_ioctl,
1252#ifdef CONFIG_COMPAT 1365#ifdef CONFIG_COMPAT
1253 .compat_ioctl = btrfs_ioctl, 1366 .compat_ioctl = btrfs_ioctl,
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index a3798a3aa0d2..902afbf50811 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -7098,116 +7098,6 @@ int btrfs_prealloc_file_range_trans(struct inode *inode,
7098 min_size, actual_len, alloc_hint, trans); 7098 min_size, actual_len, alloc_hint, trans);
7099} 7099}
7100 7100
7101static long btrfs_fallocate(struct inode *inode, int mode,
7102 loff_t offset, loff_t len)
7103{
7104 struct extent_state *cached_state = NULL;
7105 u64 cur_offset;
7106 u64 last_byte;
7107 u64 alloc_start;
7108 u64 alloc_end;
7109 u64 alloc_hint = 0;
7110 u64 locked_end;
7111 u64 mask = BTRFS_I(inode)->root->sectorsize - 1;
7112 struct extent_map *em;
7113 int ret;
7114
7115 alloc_start = offset & ~mask;
7116 alloc_end = (offset + len + mask) & ~mask;
7117
7118 /* We only support the FALLOC_FL_KEEP_SIZE mode */
7119 if (mode && (mode != FALLOC_FL_KEEP_SIZE))
7120 return -EOPNOTSUPP;
7121
7122 /*
7123 * wait for ordered IO before we have any locks. We'll loop again
7124 * below with the locks held.
7125 */
7126 btrfs_wait_ordered_range(inode, alloc_start, alloc_end - alloc_start);
7127
7128 mutex_lock(&inode->i_mutex);
7129 ret = inode_newsize_ok(inode, alloc_end);
7130 if (ret)
7131 goto out;
7132
7133 if (alloc_start > inode->i_size) {
7134 ret = btrfs_cont_expand(inode, alloc_start);
7135 if (ret)
7136 goto out;
7137 }
7138
7139 ret = btrfs_check_data_free_space(inode, alloc_end - alloc_start);
7140 if (ret)
7141 goto out;
7142
7143 locked_end = alloc_end - 1;
7144 while (1) {
7145 struct btrfs_ordered_extent *ordered;
7146
7147 /* the extent lock is ordered inside the running
7148 * transaction
7149 */
7150 lock_extent_bits(&BTRFS_I(inode)->io_tree, alloc_start,
7151 locked_end, 0, &cached_state, GFP_NOFS);
7152 ordered = btrfs_lookup_first_ordered_extent(inode,
7153 alloc_end - 1);
7154 if (ordered &&
7155 ordered->file_offset + ordered->len > alloc_start &&
7156 ordered->file_offset < alloc_end) {
7157 btrfs_put_ordered_extent(ordered);
7158 unlock_extent_cached(&BTRFS_I(inode)->io_tree,
7159 alloc_start, locked_end,
7160 &cached_state, GFP_NOFS);
7161 /*
7162 * we can't wait on the range with the transaction
7163 * running or with the extent lock held
7164 */
7165 btrfs_wait_ordered_range(inode, alloc_start,
7166 alloc_end - alloc_start);
7167 } else {
7168 if (ordered)
7169 btrfs_put_ordered_extent(ordered);
7170 break;
7171 }
7172 }
7173
7174 cur_offset = alloc_start;
7175 while (1) {
7176 em = btrfs_get_extent(inode, NULL, 0, cur_offset,
7177 alloc_end - cur_offset, 0);
7178 BUG_ON(IS_ERR(em) || !em);
7179 last_byte = min(extent_map_end(em), alloc_end);
7180 last_byte = (last_byte + mask) & ~mask;
7181 if (em->block_start == EXTENT_MAP_HOLE ||
7182 (cur_offset >= inode->i_size &&
7183 !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
7184 ret = btrfs_prealloc_file_range(inode, mode, cur_offset,
7185 last_byte - cur_offset,
7186 1 << inode->i_blkbits,
7187 offset + len,
7188 &alloc_hint);
7189 if (ret < 0) {
7190 free_extent_map(em);
7191 break;
7192 }
7193 }
7194 free_extent_map(em);
7195
7196 cur_offset = last_byte;
7197 if (cur_offset >= alloc_end) {
7198 ret = 0;
7199 break;
7200 }
7201 }
7202 unlock_extent_cached(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
7203 &cached_state, GFP_NOFS);
7204
7205 btrfs_free_reserved_data_space(inode, alloc_end - alloc_start);
7206out:
7207 mutex_unlock(&inode->i_mutex);
7208 return ret;
7209}
7210
7211static int btrfs_set_page_dirty(struct page *page) 7101static int btrfs_set_page_dirty(struct page *page)
7212{ 7102{
7213 return __set_page_dirty_nobuffers(page); 7103 return __set_page_dirty_nobuffers(page);
@@ -7310,7 +7200,6 @@ static const struct inode_operations btrfs_file_inode_operations = {
7310 .listxattr = btrfs_listxattr, 7200 .listxattr = btrfs_listxattr,
7311 .removexattr = btrfs_removexattr, 7201 .removexattr = btrfs_removexattr,
7312 .permission = btrfs_permission, 7202 .permission = btrfs_permission,
7313 .fallocate = btrfs_fallocate,
7314 .fiemap = btrfs_fiemap, 7203 .fiemap = btrfs_fiemap,
7315}; 7204};
7316static const struct inode_operations btrfs_special_inode_operations = { 7205static const struct inode_operations btrfs_special_inode_operations = {
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index a65d311d163a..9f59887badd2 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -1113,6 +1113,8 @@ cifs_parse_mount_options(char *options, const char *devname,
1113 } else if (!strnicmp(data, "uid", 3) && value && *value) { 1113 } else if (!strnicmp(data, "uid", 3) && value && *value) {
1114 vol->linux_uid = simple_strtoul(value, &value, 0); 1114 vol->linux_uid = simple_strtoul(value, &value, 0);
1115 uid_specified = true; 1115 uid_specified = true;
1116 } else if (!strnicmp(data, "cruid", 5) && value && *value) {
1117 vol->cred_uid = simple_strtoul(value, &value, 0);
1116 } else if (!strnicmp(data, "forceuid", 8)) { 1118 } else if (!strnicmp(data, "forceuid", 8)) {
1117 override_uid = 1; 1119 override_uid = 1;
1118 } else if (!strnicmp(data, "noforceuid", 10)) { 1120 } else if (!strnicmp(data, "noforceuid", 10)) {
diff --git a/fs/cifs/netmisc.c b/fs/cifs/netmisc.c
index 9aad47a2d62f..6783ce6cdc89 100644
--- a/fs/cifs/netmisc.c
+++ b/fs/cifs/netmisc.c
@@ -899,8 +899,8 @@ map_smb_to_linux_error(struct smb_hdr *smb, int logErr)
899 } 899 }
900 /* else ERRHRD class errors or junk - return EIO */ 900 /* else ERRHRD class errors or junk - return EIO */
901 901
902 cFYI(1, "Mapping smb error code %d to POSIX err %d", 902 cFYI(1, "Mapping smb error code 0x%x to POSIX err %d",
903 smberrcode, rc); 903 le32_to_cpu(smb->Status.CifsError), rc);
904 904
905 /* generic corrective action e.g. reconnect SMB session on 905 /* generic corrective action e.g. reconnect SMB session on
906 * ERRbaduid could be added */ 906 * ERRbaduid could be added */
diff --git a/fs/compat.c b/fs/compat.c
index eb1740ac8c0a..f6fd0a00e6cc 100644
--- a/fs/compat.c
+++ b/fs/compat.c
@@ -257,7 +257,7 @@ static int put_compat_statfs(struct compat_statfs __user *ubuf, struct kstatfs *
257} 257}
258 258
259/* 259/*
260 * The following statfs calls are copies of code from fs/open.c and 260 * The following statfs calls are copies of code from fs/statfs.c and
261 * should be checked against those from time to time 261 * should be checked against those from time to time
262 */ 262 */
263asmlinkage long compat_sys_statfs(const char __user *pathname, struct compat_statfs __user *buf) 263asmlinkage long compat_sys_statfs(const char __user *pathname, struct compat_statfs __user *buf)
@@ -320,7 +320,9 @@ static int put_compat_statfs64(struct compat_statfs64 __user *ubuf, struct kstat
320 __put_user(kbuf->f_namelen, &ubuf->f_namelen) || 320 __put_user(kbuf->f_namelen, &ubuf->f_namelen) ||
321 __put_user(kbuf->f_fsid.val[0], &ubuf->f_fsid.val[0]) || 321 __put_user(kbuf->f_fsid.val[0], &ubuf->f_fsid.val[0]) ||
322 __put_user(kbuf->f_fsid.val[1], &ubuf->f_fsid.val[1]) || 322 __put_user(kbuf->f_fsid.val[1], &ubuf->f_fsid.val[1]) ||
323 __put_user(kbuf->f_frsize, &ubuf->f_frsize)) 323 __put_user(kbuf->f_frsize, &ubuf->f_frsize) ||
324 __put_user(kbuf->f_flags, &ubuf->f_flags) ||
325 __clear_user(ubuf->f_spare, sizeof(ubuf->f_spare)))
324 return -EFAULT; 326 return -EFAULT;
325 return 0; 327 return 0;
326} 328}
@@ -597,10 +599,8 @@ ssize_t compat_rw_copy_check_uvector(int type,
597 if (nr_segs > fast_segs) { 599 if (nr_segs > fast_segs) {
598 ret = -ENOMEM; 600 ret = -ENOMEM;
599 iov = kmalloc(nr_segs*sizeof(struct iovec), GFP_KERNEL); 601 iov = kmalloc(nr_segs*sizeof(struct iovec), GFP_KERNEL);
600 if (iov == NULL) { 602 if (iov == NULL)
601 *ret_pointer = fast_pointer;
602 goto out; 603 goto out;
603 }
604 } 604 }
605 *ret_pointer = iov; 605 *ret_pointer = iov;
606 606
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 1de65f572033..0c8d97b56f34 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -2065,7 +2065,7 @@ extern int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
2065extern void ext4_ext_truncate(struct inode *); 2065extern void ext4_ext_truncate(struct inode *);
2066extern void ext4_ext_init(struct super_block *); 2066extern void ext4_ext_init(struct super_block *);
2067extern void ext4_ext_release(struct super_block *); 2067extern void ext4_ext_release(struct super_block *);
2068extern long ext4_fallocate(struct inode *inode, int mode, loff_t offset, 2068extern long ext4_fallocate(struct file *file, int mode, loff_t offset,
2069 loff_t len); 2069 loff_t len);
2070extern int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset, 2070extern int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset,
2071 ssize_t len); 2071 ssize_t len);
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index c4068f6abf03..63a75810b7c3 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -3627,14 +3627,15 @@ static void ext4_falloc_update_inode(struct inode *inode,
3627} 3627}
3628 3628
3629/* 3629/*
3630 * preallocate space for a file. This implements ext4's fallocate inode 3630 * preallocate space for a file. This implements ext4's fallocate file
3631 * operation, which gets called from sys_fallocate system call. 3631 * operation, which gets called from sys_fallocate system call.
3632 * For block-mapped files, posix_fallocate should fall back to the method 3632 * For block-mapped files, posix_fallocate should fall back to the method
3633 * of writing zeroes to the required new blocks (the same behavior which is 3633 * of writing zeroes to the required new blocks (the same behavior which is
3634 * expected for file systems which do not support fallocate() system call). 3634 * expected for file systems which do not support fallocate() system call).
3635 */ 3635 */
3636long ext4_fallocate(struct inode *inode, int mode, loff_t offset, loff_t len) 3636long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
3637{ 3637{
3638 struct inode *inode = file->f_path.dentry->d_inode;
3638 handle_t *handle; 3639 handle_t *handle;
3639 loff_t new_size; 3640 loff_t new_size;
3640 unsigned int max_blocks; 3641 unsigned int max_blocks;
@@ -3645,7 +3646,7 @@ long ext4_fallocate(struct inode *inode, int mode, loff_t offset, loff_t len)
3645 unsigned int credits, blkbits = inode->i_blkbits; 3646 unsigned int credits, blkbits = inode->i_blkbits;
3646 3647
3647 /* We only support the FALLOC_FL_KEEP_SIZE mode */ 3648 /* We only support the FALLOC_FL_KEEP_SIZE mode */
3648 if (mode && (mode != FALLOC_FL_KEEP_SIZE)) 3649 if (mode & ~FALLOC_FL_KEEP_SIZE)
3649 return -EOPNOTSUPP; 3650 return -EOPNOTSUPP;
3650 3651
3651 /* 3652 /*
@@ -3655,10 +3656,6 @@ long ext4_fallocate(struct inode *inode, int mode, loff_t offset, loff_t len)
3655 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) 3656 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
3656 return -EOPNOTSUPP; 3657 return -EOPNOTSUPP;
3657 3658
3658 /* preallocation to directories is currently not supported */
3659 if (S_ISDIR(inode->i_mode))
3660 return -ENODEV;
3661
3662 map.m_lblk = offset >> blkbits; 3659 map.m_lblk = offset >> blkbits;
3663 /* 3660 /*
3664 * We can't just convert len to max_blocks because 3661 * We can't just convert len to max_blocks because
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index bb003dc9ffff..2e8322c8aa88 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -210,6 +210,7 @@ const struct file_operations ext4_file_operations = {
210 .fsync = ext4_sync_file, 210 .fsync = ext4_sync_file,
211 .splice_read = generic_file_splice_read, 211 .splice_read = generic_file_splice_read,
212 .splice_write = generic_file_splice_write, 212 .splice_write = generic_file_splice_write,
213 .fallocate = ext4_fallocate,
213}; 214};
214 215
215const struct inode_operations ext4_file_inode_operations = { 216const struct inode_operations ext4_file_inode_operations = {
@@ -223,7 +224,6 @@ const struct inode_operations ext4_file_inode_operations = {
223 .removexattr = generic_removexattr, 224 .removexattr = generic_removexattr,
224#endif 225#endif
225 .check_acl = ext4_check_acl, 226 .check_acl = ext4_check_acl,
226 .fallocate = ext4_fallocate,
227 .fiemap = ext4_fiemap, 227 .fiemap = ext4_fiemap,
228}; 228};
229 229
diff --git a/fs/file_table.c b/fs/file_table.c
index c3dee381f1b4..c3e89adf53c0 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -311,7 +311,7 @@ struct file *fget_light(unsigned int fd, int *fput_needed)
311 struct files_struct *files = current->files; 311 struct files_struct *files = current->files;
312 312
313 *fput_needed = 0; 313 *fput_needed = 0;
314 if (likely((atomic_read(&files->count) == 1))) { 314 if (atomic_read(&files->count) == 1) {
315 file = fcheck_files(files, fd); 315 file = fcheck_files(files, fd);
316 } else { 316 } else {
317 rcu_read_lock(); 317 rcu_read_lock();
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
index fca6689e12e6..7cfdcb913363 100644
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -19,6 +19,8 @@
19#include <linux/fs.h> 19#include <linux/fs.h>
20#include <linux/gfs2_ondisk.h> 20#include <linux/gfs2_ondisk.h>
21#include <linux/ext2_fs.h> 21#include <linux/ext2_fs.h>
22#include <linux/falloc.h>
23#include <linux/swap.h>
22#include <linux/crc32.h> 24#include <linux/crc32.h>
23#include <linux/writeback.h> 25#include <linux/writeback.h>
24#include <asm/uaccess.h> 26#include <asm/uaccess.h>
@@ -610,6 +612,260 @@ static ssize_t gfs2_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
610 return generic_file_aio_write(iocb, iov, nr_segs, pos); 612 return generic_file_aio_write(iocb, iov, nr_segs, pos);
611} 613}
612 614
615static void empty_write_end(struct page *page, unsigned from,
616 unsigned to)
617{
618 struct gfs2_inode *ip = GFS2_I(page->mapping->host);
619
620 page_zero_new_buffers(page, from, to);
621 flush_dcache_page(page);
622 mark_page_accessed(page);
623
624 if (!gfs2_is_writeback(ip))
625 gfs2_page_add_databufs(ip, page, from, to);
626
627 block_commit_write(page, from, to);
628}
629
630static int write_empty_blocks(struct page *page, unsigned from, unsigned to)
631{
632 unsigned start, end, next;
633 struct buffer_head *bh, *head;
634 int error;
635
636 if (!page_has_buffers(page)) {
637 error = __block_write_begin(page, from, to - from, gfs2_block_map);
638 if (unlikely(error))
639 return error;
640
641 empty_write_end(page, from, to);
642 return 0;
643 }
644
645 bh = head = page_buffers(page);
646 next = end = 0;
647 while (next < from) {
648 next += bh->b_size;
649 bh = bh->b_this_page;
650 }
651 start = next;
652 do {
653 next += bh->b_size;
654 if (buffer_mapped(bh)) {
655 if (end) {
656 error = __block_write_begin(page, start, end - start,
657 gfs2_block_map);
658 if (unlikely(error))
659 return error;
660 empty_write_end(page, start, end);
661 end = 0;
662 }
663 start = next;
664 }
665 else
666 end = next;
667 bh = bh->b_this_page;
668 } while (next < to);
669
670 if (end) {
671 error = __block_write_begin(page, start, end - start, gfs2_block_map);
672 if (unlikely(error))
673 return error;
674 empty_write_end(page, start, end);
675 }
676
677 return 0;
678}
679
680static int fallocate_chunk(struct inode *inode, loff_t offset, loff_t len,
681 int mode)
682{
683 struct gfs2_inode *ip = GFS2_I(inode);
684 struct buffer_head *dibh;
685 int error;
686 u64 start = offset >> PAGE_CACHE_SHIFT;
687 unsigned int start_offset = offset & ~PAGE_CACHE_MASK;
688 u64 end = (offset + len - 1) >> PAGE_CACHE_SHIFT;
689 pgoff_t curr;
690 struct page *page;
691 unsigned int end_offset = (offset + len) & ~PAGE_CACHE_MASK;
692 unsigned int from, to;
693
694 if (!end_offset)
695 end_offset = PAGE_CACHE_SIZE;
696
697 error = gfs2_meta_inode_buffer(ip, &dibh);
698 if (unlikely(error))
699 goto out;
700
701 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
702
703 if (gfs2_is_stuffed(ip)) {
704 error = gfs2_unstuff_dinode(ip, NULL);
705 if (unlikely(error))
706 goto out;
707 }
708
709 curr = start;
710 offset = start << PAGE_CACHE_SHIFT;
711 from = start_offset;
712 to = PAGE_CACHE_SIZE;
713 while (curr <= end) {
714 page = grab_cache_page_write_begin(inode->i_mapping, curr,
715 AOP_FLAG_NOFS);
716 if (unlikely(!page)) {
717 error = -ENOMEM;
718 goto out;
719 }
720
721 if (curr == end)
722 to = end_offset;
723 error = write_empty_blocks(page, from, to);
724 if (!error && offset + to > inode->i_size &&
725 !(mode & FALLOC_FL_KEEP_SIZE)) {
726 i_size_write(inode, offset + to);
727 }
728 unlock_page(page);
729 page_cache_release(page);
730 if (error)
731 goto out;
732 curr++;
733 offset += PAGE_CACHE_SIZE;
734 from = 0;
735 }
736
737 gfs2_dinode_out(ip, dibh->b_data);
738 mark_inode_dirty(inode);
739
740 brelse(dibh);
741
742out:
743 return error;
744}
745
746static void calc_max_reserv(struct gfs2_inode *ip, loff_t max, loff_t *len,
747 unsigned int *data_blocks, unsigned int *ind_blocks)
748{
749 const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
750 unsigned int max_blocks = ip->i_alloc->al_rgd->rd_free_clone;
751 unsigned int tmp, max_data = max_blocks - 3 * (sdp->sd_max_height - 1);
752
753 for (tmp = max_data; tmp > sdp->sd_diptrs;) {
754 tmp = DIV_ROUND_UP(tmp, sdp->sd_inptrs);
755 max_data -= tmp;
756 }
757 /* This calculation isn't the exact reverse of gfs2_write_calc_reserve,
758 so it might end up with fewer data blocks */
759 if (max_data <= *data_blocks)
760 return;
761 *data_blocks = max_data;
762 *ind_blocks = max_blocks - max_data;
763 *len = ((loff_t)max_data - 3) << sdp->sd_sb.sb_bsize_shift;
764 if (*len > max) {
765 *len = max;
766 gfs2_write_calc_reserv(ip, max, data_blocks, ind_blocks);
767 }
768}
769
770static long gfs2_fallocate(struct file *file, int mode, loff_t offset,
771 loff_t len)
772{
773 struct inode *inode = file->f_path.dentry->d_inode;
774 struct gfs2_sbd *sdp = GFS2_SB(inode);
775 struct gfs2_inode *ip = GFS2_I(inode);
776 unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
777 loff_t bytes, max_bytes;
778 struct gfs2_alloc *al;
779 int error;
780 loff_t next = (offset + len - 1) >> sdp->sd_sb.sb_bsize_shift;
781 next = (next + 1) << sdp->sd_sb.sb_bsize_shift;
782
783 /* We only support the FALLOC_FL_KEEP_SIZE mode */
784 if (mode & ~FALLOC_FL_KEEP_SIZE)
785 return -EOPNOTSUPP;
786
787 offset = (offset >> sdp->sd_sb.sb_bsize_shift) <<
788 sdp->sd_sb.sb_bsize_shift;
789
790 len = next - offset;
791 bytes = sdp->sd_max_rg_data * sdp->sd_sb.sb_bsize / 2;
792 if (!bytes)
793 bytes = UINT_MAX;
794
795 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &ip->i_gh);
796 error = gfs2_glock_nq(&ip->i_gh);
797 if (unlikely(error))
798 goto out_uninit;
799
800 if (!gfs2_write_alloc_required(ip, offset, len))
801 goto out_unlock;
802
803 while (len > 0) {
804 if (len < bytes)
805 bytes = len;
806 al = gfs2_alloc_get(ip);
807 if (!al) {
808 error = -ENOMEM;
809 goto out_unlock;
810 }
811
812 error = gfs2_quota_lock_check(ip);
813 if (error)
814 goto out_alloc_put;
815
816retry:
817 gfs2_write_calc_reserv(ip, bytes, &data_blocks, &ind_blocks);
818
819 al->al_requested = data_blocks + ind_blocks;
820 error = gfs2_inplace_reserve(ip);
821 if (error) {
822 if (error == -ENOSPC && bytes > sdp->sd_sb.sb_bsize) {
823 bytes >>= 1;
824 goto retry;
825 }
826 goto out_qunlock;
827 }
828 max_bytes = bytes;
829 calc_max_reserv(ip, len, &max_bytes, &data_blocks, &ind_blocks);
830 al->al_requested = data_blocks + ind_blocks;
831
832 rblocks = RES_DINODE + ind_blocks + RES_STATFS + RES_QUOTA +
833 RES_RG_HDR + gfs2_rg_blocks(al);
834 if (gfs2_is_jdata(ip))
835 rblocks += data_blocks ? data_blocks : 1;
836
837 error = gfs2_trans_begin(sdp, rblocks,
838 PAGE_CACHE_SIZE/sdp->sd_sb.sb_bsize);
839 if (error)
840 goto out_trans_fail;
841
842 error = fallocate_chunk(inode, offset, max_bytes, mode);
843 gfs2_trans_end(sdp);
844
845 if (error)
846 goto out_trans_fail;
847
848 len -= max_bytes;
849 offset += max_bytes;
850 gfs2_inplace_release(ip);
851 gfs2_quota_unlock(ip);
852 gfs2_alloc_put(ip);
853 }
854 goto out_unlock;
855
856out_trans_fail:
857 gfs2_inplace_release(ip);
858out_qunlock:
859 gfs2_quota_unlock(ip);
860out_alloc_put:
861 gfs2_alloc_put(ip);
862out_unlock:
863 gfs2_glock_dq(&ip->i_gh);
864out_uninit:
865 gfs2_holder_uninit(&ip->i_gh);
866 return error;
867}
868
613#ifdef CONFIG_GFS2_FS_LOCKING_DLM 869#ifdef CONFIG_GFS2_FS_LOCKING_DLM
614 870
615/** 871/**
@@ -765,6 +1021,7 @@ const struct file_operations gfs2_file_fops = {
765 .splice_read = generic_file_splice_read, 1021 .splice_read = generic_file_splice_read,
766 .splice_write = generic_file_splice_write, 1022 .splice_write = generic_file_splice_write,
767 .setlease = gfs2_setlease, 1023 .setlease = gfs2_setlease,
1024 .fallocate = gfs2_fallocate,
768}; 1025};
769 1026
770const struct file_operations gfs2_dir_fops = { 1027const struct file_operations gfs2_dir_fops = {
@@ -794,6 +1051,7 @@ const struct file_operations gfs2_file_fops_nolock = {
794 .splice_read = generic_file_splice_read, 1051 .splice_read = generic_file_splice_read,
795 .splice_write = generic_file_splice_write, 1052 .splice_write = generic_file_splice_write,
796 .setlease = generic_setlease, 1053 .setlease = generic_setlease,
1054 .fallocate = gfs2_fallocate,
797}; 1055};
798 1056
799const struct file_operations gfs2_dir_fops_nolock = { 1057const struct file_operations gfs2_dir_fops_nolock = {
diff --git a/fs/gfs2/ops_inode.c b/fs/gfs2/ops_inode.c
index 040b5a2e6556..d8b26ac2e20b 100644
--- a/fs/gfs2/ops_inode.c
+++ b/fs/gfs2/ops_inode.c
@@ -18,8 +18,6 @@
18#include <linux/gfs2_ondisk.h> 18#include <linux/gfs2_ondisk.h>
19#include <linux/crc32.h> 19#include <linux/crc32.h>
20#include <linux/fiemap.h> 20#include <linux/fiemap.h>
21#include <linux/swap.h>
22#include <linux/falloc.h>
23#include <asm/uaccess.h> 21#include <asm/uaccess.h>
24 22
25#include "gfs2.h" 23#include "gfs2.h"
@@ -1257,261 +1255,6 @@ static int gfs2_removexattr(struct dentry *dentry, const char *name)
1257 return ret; 1255 return ret;
1258} 1256}
1259 1257
1260static void empty_write_end(struct page *page, unsigned from,
1261 unsigned to)
1262{
1263 struct gfs2_inode *ip = GFS2_I(page->mapping->host);
1264
1265 page_zero_new_buffers(page, from, to);
1266 flush_dcache_page(page);
1267 mark_page_accessed(page);
1268
1269 if (!gfs2_is_writeback(ip))
1270 gfs2_page_add_databufs(ip, page, from, to);
1271
1272 block_commit_write(page, from, to);
1273}
1274
1275
1276static int write_empty_blocks(struct page *page, unsigned from, unsigned to)
1277{
1278 unsigned start, end, next;
1279 struct buffer_head *bh, *head;
1280 int error;
1281
1282 if (!page_has_buffers(page)) {
1283 error = __block_write_begin(page, from, to - from, gfs2_block_map);
1284 if (unlikely(error))
1285 return error;
1286
1287 empty_write_end(page, from, to);
1288 return 0;
1289 }
1290
1291 bh = head = page_buffers(page);
1292 next = end = 0;
1293 while (next < from) {
1294 next += bh->b_size;
1295 bh = bh->b_this_page;
1296 }
1297 start = next;
1298 do {
1299 next += bh->b_size;
1300 if (buffer_mapped(bh)) {
1301 if (end) {
1302 error = __block_write_begin(page, start, end - start,
1303 gfs2_block_map);
1304 if (unlikely(error))
1305 return error;
1306 empty_write_end(page, start, end);
1307 end = 0;
1308 }
1309 start = next;
1310 }
1311 else
1312 end = next;
1313 bh = bh->b_this_page;
1314 } while (next < to);
1315
1316 if (end) {
1317 error = __block_write_begin(page, start, end - start, gfs2_block_map);
1318 if (unlikely(error))
1319 return error;
1320 empty_write_end(page, start, end);
1321 }
1322
1323 return 0;
1324}
1325
1326static int fallocate_chunk(struct inode *inode, loff_t offset, loff_t len,
1327 int mode)
1328{
1329 struct gfs2_inode *ip = GFS2_I(inode);
1330 struct buffer_head *dibh;
1331 int error;
1332 u64 start = offset >> PAGE_CACHE_SHIFT;
1333 unsigned int start_offset = offset & ~PAGE_CACHE_MASK;
1334 u64 end = (offset + len - 1) >> PAGE_CACHE_SHIFT;
1335 pgoff_t curr;
1336 struct page *page;
1337 unsigned int end_offset = (offset + len) & ~PAGE_CACHE_MASK;
1338 unsigned int from, to;
1339
1340 if (!end_offset)
1341 end_offset = PAGE_CACHE_SIZE;
1342
1343 error = gfs2_meta_inode_buffer(ip, &dibh);
1344 if (unlikely(error))
1345 goto out;
1346
1347 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
1348
1349 if (gfs2_is_stuffed(ip)) {
1350 error = gfs2_unstuff_dinode(ip, NULL);
1351 if (unlikely(error))
1352 goto out;
1353 }
1354
1355 curr = start;
1356 offset = start << PAGE_CACHE_SHIFT;
1357 from = start_offset;
1358 to = PAGE_CACHE_SIZE;
1359 while (curr <= end) {
1360 page = grab_cache_page_write_begin(inode->i_mapping, curr,
1361 AOP_FLAG_NOFS);
1362 if (unlikely(!page)) {
1363 error = -ENOMEM;
1364 goto out;
1365 }
1366
1367 if (curr == end)
1368 to = end_offset;
1369 error = write_empty_blocks(page, from, to);
1370 if (!error && offset + to > inode->i_size &&
1371 !(mode & FALLOC_FL_KEEP_SIZE)) {
1372 i_size_write(inode, offset + to);
1373 }
1374 unlock_page(page);
1375 page_cache_release(page);
1376 if (error)
1377 goto out;
1378 curr++;
1379 offset += PAGE_CACHE_SIZE;
1380 from = 0;
1381 }
1382
1383 gfs2_dinode_out(ip, dibh->b_data);
1384 mark_inode_dirty(inode);
1385
1386 brelse(dibh);
1387
1388out:
1389 return error;
1390}
1391
1392static void calc_max_reserv(struct gfs2_inode *ip, loff_t max, loff_t *len,
1393 unsigned int *data_blocks, unsigned int *ind_blocks)
1394{
1395 const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1396 unsigned int max_blocks = ip->i_alloc->al_rgd->rd_free_clone;
1397 unsigned int tmp, max_data = max_blocks - 3 * (sdp->sd_max_height - 1);
1398
1399 for (tmp = max_data; tmp > sdp->sd_diptrs;) {
1400 tmp = DIV_ROUND_UP(tmp, sdp->sd_inptrs);
1401 max_data -= tmp;
1402 }
1403 /* This calculation isn't the exact reverse of gfs2_write_calc_reserve,
1404 so it might end up with fewer data blocks */
1405 if (max_data <= *data_blocks)
1406 return;
1407 *data_blocks = max_data;
1408 *ind_blocks = max_blocks - max_data;
1409 *len = ((loff_t)max_data - 3) << sdp->sd_sb.sb_bsize_shift;
1410 if (*len > max) {
1411 *len = max;
1412 gfs2_write_calc_reserv(ip, max, data_blocks, ind_blocks);
1413 }
1414}
1415
1416static long gfs2_fallocate(struct inode *inode, int mode, loff_t offset,
1417 loff_t len)
1418{
1419 struct gfs2_sbd *sdp = GFS2_SB(inode);
1420 struct gfs2_inode *ip = GFS2_I(inode);
1421 unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
1422 loff_t bytes, max_bytes;
1423 struct gfs2_alloc *al;
1424 int error;
1425 loff_t next = (offset + len - 1) >> sdp->sd_sb.sb_bsize_shift;
1426 next = (next + 1) << sdp->sd_sb.sb_bsize_shift;
1427
1428 /* We only support the FALLOC_FL_KEEP_SIZE mode */
1429 if (mode && (mode != FALLOC_FL_KEEP_SIZE))
1430 return -EOPNOTSUPP;
1431
1432 offset = (offset >> sdp->sd_sb.sb_bsize_shift) <<
1433 sdp->sd_sb.sb_bsize_shift;
1434
1435 len = next - offset;
1436 bytes = sdp->sd_max_rg_data * sdp->sd_sb.sb_bsize / 2;
1437 if (!bytes)
1438 bytes = UINT_MAX;
1439
1440 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &ip->i_gh);
1441 error = gfs2_glock_nq(&ip->i_gh);
1442 if (unlikely(error))
1443 goto out_uninit;
1444
1445 if (!gfs2_write_alloc_required(ip, offset, len))
1446 goto out_unlock;
1447
1448 while (len > 0) {
1449 if (len < bytes)
1450 bytes = len;
1451 al = gfs2_alloc_get(ip);
1452 if (!al) {
1453 error = -ENOMEM;
1454 goto out_unlock;
1455 }
1456
1457 error = gfs2_quota_lock_check(ip);
1458 if (error)
1459 goto out_alloc_put;
1460
1461retry:
1462 gfs2_write_calc_reserv(ip, bytes, &data_blocks, &ind_blocks);
1463
1464 al->al_requested = data_blocks + ind_blocks;
1465 error = gfs2_inplace_reserve(ip);
1466 if (error) {
1467 if (error == -ENOSPC && bytes > sdp->sd_sb.sb_bsize) {
1468 bytes >>= 1;
1469 goto retry;
1470 }
1471 goto out_qunlock;
1472 }
1473 max_bytes = bytes;
1474 calc_max_reserv(ip, len, &max_bytes, &data_blocks, &ind_blocks);
1475 al->al_requested = data_blocks + ind_blocks;
1476
1477 rblocks = RES_DINODE + ind_blocks + RES_STATFS + RES_QUOTA +
1478 RES_RG_HDR + gfs2_rg_blocks(al);
1479 if (gfs2_is_jdata(ip))
1480 rblocks += data_blocks ? data_blocks : 1;
1481
1482 error = gfs2_trans_begin(sdp, rblocks,
1483 PAGE_CACHE_SIZE/sdp->sd_sb.sb_bsize);
1484 if (error)
1485 goto out_trans_fail;
1486
1487 error = fallocate_chunk(inode, offset, max_bytes, mode);
1488 gfs2_trans_end(sdp);
1489
1490 if (error)
1491 goto out_trans_fail;
1492
1493 len -= max_bytes;
1494 offset += max_bytes;
1495 gfs2_inplace_release(ip);
1496 gfs2_quota_unlock(ip);
1497 gfs2_alloc_put(ip);
1498 }
1499 goto out_unlock;
1500
1501out_trans_fail:
1502 gfs2_inplace_release(ip);
1503out_qunlock:
1504 gfs2_quota_unlock(ip);
1505out_alloc_put:
1506 gfs2_alloc_put(ip);
1507out_unlock:
1508 gfs2_glock_dq(&ip->i_gh);
1509out_uninit:
1510 gfs2_holder_uninit(&ip->i_gh);
1511 return error;
1512}
1513
1514
1515static int gfs2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 1258static int gfs2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
1516 u64 start, u64 len) 1259 u64 start, u64 len)
1517{ 1260{
@@ -1562,7 +1305,6 @@ const struct inode_operations gfs2_file_iops = {
1562 .getxattr = gfs2_getxattr, 1305 .getxattr = gfs2_getxattr,
1563 .listxattr = gfs2_listxattr, 1306 .listxattr = gfs2_listxattr,
1564 .removexattr = gfs2_removexattr, 1307 .removexattr = gfs2_removexattr,
1565 .fallocate = gfs2_fallocate,
1566 .fiemap = gfs2_fiemap, 1308 .fiemap = gfs2_fiemap,
1567}; 1309};
1568 1310
diff --git a/fs/hpfs/inode.c b/fs/hpfs/inode.c
index 56f0da1cfd10..1ae35baa539e 100644
--- a/fs/hpfs/inode.c
+++ b/fs/hpfs/inode.c
@@ -281,7 +281,7 @@ int hpfs_setattr(struct dentry *dentry, struct iattr *attr)
281 attr->ia_size != i_size_read(inode)) { 281 attr->ia_size != i_size_read(inode)) {
282 error = vmtruncate(inode, attr->ia_size); 282 error = vmtruncate(inode, attr->ia_size);
283 if (error) 283 if (error)
284 return error; 284 goto out_unlock;
285 } 285 }
286 286
287 setattr_copy(inode, attr); 287 setattr_copy(inode, attr);
diff --git a/fs/internal.h b/fs/internal.h
index 12ccb86edef7..0663568b1247 100644
--- a/fs/internal.h
+++ b/fs/internal.h
@@ -70,8 +70,7 @@ extern void mnt_set_mountpoint(struct vfsmount *, struct dentry *,
70extern void release_mounts(struct list_head *); 70extern void release_mounts(struct list_head *);
71extern void umount_tree(struct vfsmount *, int, struct list_head *); 71extern void umount_tree(struct vfsmount *, int, struct list_head *);
72extern struct vfsmount *copy_tree(struct vfsmount *, struct dentry *, int); 72extern struct vfsmount *copy_tree(struct vfsmount *, struct dentry *, int);
73extern int do_add_mount(struct vfsmount *, struct path *, int); 73extern int finish_automount(struct vfsmount *, struct path *);
74extern void mnt_clear_expiry(struct vfsmount *);
75 74
76extern void mnt_make_longterm(struct vfsmount *); 75extern void mnt_make_longterm(struct vfsmount *);
77extern void mnt_make_shortterm(struct vfsmount *); 76extern void mnt_make_shortterm(struct vfsmount *);
diff --git a/fs/ioctl.c b/fs/ioctl.c
index d6cc16476620..a59635e295fa 100644
--- a/fs/ioctl.c
+++ b/fs/ioctl.c
@@ -86,7 +86,7 @@ int fiemap_fill_next_extent(struct fiemap_extent_info *fieinfo, u64 logical,
86 u64 phys, u64 len, u32 flags) 86 u64 phys, u64 len, u32 flags)
87{ 87{
88 struct fiemap_extent extent; 88 struct fiemap_extent extent;
89 struct fiemap_extent *dest = fieinfo->fi_extents_start; 89 struct fiemap_extent __user *dest = fieinfo->fi_extents_start;
90 90
91 /* only count the extents */ 91 /* only count the extents */
92 if (fieinfo->fi_extents_max == 0) { 92 if (fieinfo->fi_extents_max == 0) {
@@ -173,6 +173,7 @@ static int fiemap_check_ranges(struct super_block *sb,
173static int ioctl_fiemap(struct file *filp, unsigned long arg) 173static int ioctl_fiemap(struct file *filp, unsigned long arg)
174{ 174{
175 struct fiemap fiemap; 175 struct fiemap fiemap;
176 struct fiemap __user *ufiemap = (struct fiemap __user *) arg;
176 struct fiemap_extent_info fieinfo = { 0, }; 177 struct fiemap_extent_info fieinfo = { 0, };
177 struct inode *inode = filp->f_path.dentry->d_inode; 178 struct inode *inode = filp->f_path.dentry->d_inode;
178 struct super_block *sb = inode->i_sb; 179 struct super_block *sb = inode->i_sb;
@@ -182,8 +183,7 @@ static int ioctl_fiemap(struct file *filp, unsigned long arg)
182 if (!inode->i_op->fiemap) 183 if (!inode->i_op->fiemap)
183 return -EOPNOTSUPP; 184 return -EOPNOTSUPP;
184 185
185 if (copy_from_user(&fiemap, (struct fiemap __user *)arg, 186 if (copy_from_user(&fiemap, ufiemap, sizeof(fiemap)))
186 sizeof(struct fiemap)))
187 return -EFAULT; 187 return -EFAULT;
188 188
189 if (fiemap.fm_extent_count > FIEMAP_MAX_EXTENTS) 189 if (fiemap.fm_extent_count > FIEMAP_MAX_EXTENTS)
@@ -196,7 +196,7 @@ static int ioctl_fiemap(struct file *filp, unsigned long arg)
196 196
197 fieinfo.fi_flags = fiemap.fm_flags; 197 fieinfo.fi_flags = fiemap.fm_flags;
198 fieinfo.fi_extents_max = fiemap.fm_extent_count; 198 fieinfo.fi_extents_max = fiemap.fm_extent_count;
199 fieinfo.fi_extents_start = (struct fiemap_extent *)(arg + sizeof(fiemap)); 199 fieinfo.fi_extents_start = ufiemap->fm_extents;
200 200
201 if (fiemap.fm_extent_count != 0 && 201 if (fiemap.fm_extent_count != 0 &&
202 !access_ok(VERIFY_WRITE, fieinfo.fi_extents_start, 202 !access_ok(VERIFY_WRITE, fieinfo.fi_extents_start,
@@ -209,7 +209,7 @@ static int ioctl_fiemap(struct file *filp, unsigned long arg)
209 error = inode->i_op->fiemap(inode, &fieinfo, fiemap.fm_start, len); 209 error = inode->i_op->fiemap(inode, &fieinfo, fiemap.fm_start, len);
210 fiemap.fm_flags = fieinfo.fi_flags; 210 fiemap.fm_flags = fieinfo.fi_flags;
211 fiemap.fm_mapped_extents = fieinfo.fi_extents_mapped; 211 fiemap.fm_mapped_extents = fieinfo.fi_extents_mapped;
212 if (copy_to_user((char *)arg, &fiemap, sizeof(fiemap))) 212 if (copy_to_user(ufiemap, &fiemap, sizeof(fiemap)))
213 error = -EFAULT; 213 error = -EFAULT;
214 214
215 return error; 215 return error;
diff --git a/fs/jffs2/build.c b/fs/jffs2/build.c
index 85c6be2db02f..3005ec4520ad 100644
--- a/fs/jffs2/build.c
+++ b/fs/jffs2/build.c
@@ -336,14 +336,13 @@ int jffs2_do_mount_fs(struct jffs2_sb_info *c)
336 size = sizeof(struct jffs2_eraseblock) * c->nr_blocks; 336 size = sizeof(struct jffs2_eraseblock) * c->nr_blocks;
337#ifndef __ECOS 337#ifndef __ECOS
338 if (jffs2_blocks_use_vmalloc(c)) 338 if (jffs2_blocks_use_vmalloc(c))
339 c->blocks = vmalloc(size); 339 c->blocks = vzalloc(size);
340 else 340 else
341#endif 341#endif
342 c->blocks = kmalloc(size, GFP_KERNEL); 342 c->blocks = kzalloc(size, GFP_KERNEL);
343 if (!c->blocks) 343 if (!c->blocks)
344 return -ENOMEM; 344 return -ENOMEM;
345 345
346 memset(c->blocks, 0, size);
347 for (i=0; i<c->nr_blocks; i++) { 346 for (i=0; i<c->nr_blocks; i++) {
348 INIT_LIST_HEAD(&c->blocks[i].list); 347 INIT_LIST_HEAD(&c->blocks[i].list);
349 c->blocks[i].offset = i * c->sector_size; 348 c->blocks[i].offset = i * c->sector_size;
diff --git a/fs/jffs2/jffs2_fs_sb.h b/fs/jffs2/jffs2_fs_sb.h
index f864005de64c..0bc6a6c80a56 100644
--- a/fs/jffs2/jffs2_fs_sb.h
+++ b/fs/jffs2/jffs2_fs_sb.h
@@ -144,4 +144,4 @@ struct jffs2_sb_info {
144 void *os_priv; 144 void *os_priv;
145}; 145};
146 146
147#endif /* _JFFS2_FB_SB */ 147#endif /* _JFFS2_FS_SB */
diff --git a/fs/jffs2/xattr.c b/fs/jffs2/xattr.c
index 9b572ca40a49..4f9cc0482949 100644
--- a/fs/jffs2/xattr.c
+++ b/fs/jffs2/xattr.c
@@ -151,7 +151,7 @@ static int do_verify_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_dat
151 JFFS2_ERROR("node CRC failed at %#08x, read=%#08x, calc=%#08x\n", 151 JFFS2_ERROR("node CRC failed at %#08x, read=%#08x, calc=%#08x\n",
152 offset, je32_to_cpu(rx.hdr_crc), crc); 152 offset, je32_to_cpu(rx.hdr_crc), crc);
153 xd->flags |= JFFS2_XFLAGS_INVALID; 153 xd->flags |= JFFS2_XFLAGS_INVALID;
154 return EIO; 154 return -EIO;
155 } 155 }
156 totlen = PAD(sizeof(rx) + rx.name_len + 1 + je16_to_cpu(rx.value_len)); 156 totlen = PAD(sizeof(rx) + rx.name_len + 1 + je16_to_cpu(rx.value_len));
157 if (je16_to_cpu(rx.magic) != JFFS2_MAGIC_BITMASK 157 if (je16_to_cpu(rx.magic) != JFFS2_MAGIC_BITMASK
@@ -167,7 +167,7 @@ static int do_verify_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_dat
167 je32_to_cpu(rx.xid), xd->xid, 167 je32_to_cpu(rx.xid), xd->xid,
168 je32_to_cpu(rx.version), xd->version); 168 je32_to_cpu(rx.version), xd->version);
169 xd->flags |= JFFS2_XFLAGS_INVALID; 169 xd->flags |= JFFS2_XFLAGS_INVALID;
170 return EIO; 170 return -EIO;
171 } 171 }
172 xd->xprefix = rx.xprefix; 172 xd->xprefix = rx.xprefix;
173 xd->name_len = rx.name_len; 173 xd->name_len = rx.name_len;
@@ -230,7 +230,7 @@ static int do_load_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum
230 ref_offset(xd->node), xd->data_crc, crc); 230 ref_offset(xd->node), xd->data_crc, crc);
231 kfree(data); 231 kfree(data);
232 xd->flags |= JFFS2_XFLAGS_INVALID; 232 xd->flags |= JFFS2_XFLAGS_INVALID;
233 return EIO; 233 return -EIO;
234 } 234 }
235 235
236 xd->flags |= JFFS2_XFLAGS_HOT; 236 xd->flags |= JFFS2_XFLAGS_HOT;
@@ -268,7 +268,7 @@ static int load_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *x
268 if (xd->xname) 268 if (xd->xname)
269 return 0; 269 return 0;
270 if (xd->flags & JFFS2_XFLAGS_INVALID) 270 if (xd->flags & JFFS2_XFLAGS_INVALID)
271 return EIO; 271 return -EIO;
272 if (unlikely(is_xattr_datum_unchecked(c, xd))) 272 if (unlikely(is_xattr_datum_unchecked(c, xd)))
273 rc = do_verify_xattr_datum(c, xd); 273 rc = do_verify_xattr_datum(c, xd);
274 if (!rc) 274 if (!rc)
@@ -460,7 +460,7 @@ static int verify_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref
460 if (crc != je32_to_cpu(rr.node_crc)) { 460 if (crc != je32_to_cpu(rr.node_crc)) {
461 JFFS2_ERROR("node CRC failed at %#08x, read=%#08x, calc=%#08x\n", 461 JFFS2_ERROR("node CRC failed at %#08x, read=%#08x, calc=%#08x\n",
462 offset, je32_to_cpu(rr.node_crc), crc); 462 offset, je32_to_cpu(rr.node_crc), crc);
463 return EIO; 463 return -EIO;
464 } 464 }
465 if (je16_to_cpu(rr.magic) != JFFS2_MAGIC_BITMASK 465 if (je16_to_cpu(rr.magic) != JFFS2_MAGIC_BITMASK
466 || je16_to_cpu(rr.nodetype) != JFFS2_NODETYPE_XREF 466 || je16_to_cpu(rr.nodetype) != JFFS2_NODETYPE_XREF
@@ -470,7 +470,7 @@ static int verify_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref
470 offset, je16_to_cpu(rr.magic), JFFS2_MAGIC_BITMASK, 470 offset, je16_to_cpu(rr.magic), JFFS2_MAGIC_BITMASK,
471 je16_to_cpu(rr.nodetype), JFFS2_NODETYPE_XREF, 471 je16_to_cpu(rr.nodetype), JFFS2_NODETYPE_XREF,
472 je32_to_cpu(rr.totlen), PAD(sizeof(rr))); 472 je32_to_cpu(rr.totlen), PAD(sizeof(rr)));
473 return EIO; 473 return -EIO;
474 } 474 }
475 ref->ino = je32_to_cpu(rr.ino); 475 ref->ino = je32_to_cpu(rr.ino);
476 ref->xid = je32_to_cpu(rr.xid); 476 ref->xid = je32_to_cpu(rr.xid);
diff --git a/fs/namei.c b/fs/namei.c
index 8f7b41a14882..b753192d8c3f 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -923,37 +923,13 @@ static int follow_automount(struct path *path, unsigned flags,
923 if (!mnt) /* mount collision */ 923 if (!mnt) /* mount collision */
924 return 0; 924 return 0;
925 925
926 /* The new mount record should have at least 2 refs to prevent it being 926 err = finish_automount(mnt, path);
927 * expired before we get a chance to add it
928 */
929 BUG_ON(mnt_get_count(mnt) < 2);
930
931 if (mnt->mnt_sb == path->mnt->mnt_sb &&
932 mnt->mnt_root == path->dentry) {
933 mnt_clear_expiry(mnt);
934 mntput(mnt);
935 mntput(mnt);
936 return -ELOOP;
937 }
938 927
939 /* We need to add the mountpoint to the parent. The filesystem may
940 * have placed it on an expiry list, and so we need to make sure it
941 * won't be expired under us if do_add_mount() fails (do_add_mount()
942 * will eat a reference unconditionally).
943 */
944 mntget(mnt);
945 err = do_add_mount(mnt, path, path->mnt->mnt_flags | MNT_SHRINKABLE);
946 switch (err) { 928 switch (err) {
947 case -EBUSY: 929 case -EBUSY:
948 /* Someone else made a mount here whilst we were busy */ 930 /* Someone else made a mount here whilst we were busy */
949 err = 0; 931 return 0;
950 default:
951 mnt_clear_expiry(mnt);
952 mntput(mnt);
953 mntput(mnt);
954 return err;
955 case 0: 932 case 0:
956 mntput(mnt);
957 dput(path->dentry); 933 dput(path->dentry);
958 if (*need_mntput) 934 if (*need_mntput)
959 mntput(path->mnt); 935 mntput(path->mnt);
@@ -961,7 +937,10 @@ static int follow_automount(struct path *path, unsigned flags,
961 path->dentry = dget(mnt->mnt_root); 937 path->dentry = dget(mnt->mnt_root);
962 *need_mntput = true; 938 *need_mntput = true;
963 return 0; 939 return 0;
940 default:
941 return err;
964 } 942 }
943
965} 944}
966 945
967/* 946/*
diff --git a/fs/namespace.c b/fs/namespace.c
index 9f544f35ed34..7b0b95371696 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -1872,6 +1872,8 @@ out:
1872 return err; 1872 return err;
1873} 1873}
1874 1874
1875static int do_add_mount(struct vfsmount *, struct path *, int);
1876
1875/* 1877/*
1876 * create a new mount for userspace and request it to be added into the 1878 * create a new mount for userspace and request it to be added into the
1877 * namespace's tree 1879 * namespace's tree
@@ -1880,6 +1882,7 @@ static int do_new_mount(struct path *path, char *type, int flags,
1880 int mnt_flags, char *name, void *data) 1882 int mnt_flags, char *name, void *data)
1881{ 1883{
1882 struct vfsmount *mnt; 1884 struct vfsmount *mnt;
1885 int err;
1883 1886
1884 if (!type) 1887 if (!type)
1885 return -EINVAL; 1888 return -EINVAL;
@@ -1892,14 +1895,47 @@ static int do_new_mount(struct path *path, char *type, int flags,
1892 if (IS_ERR(mnt)) 1895 if (IS_ERR(mnt))
1893 return PTR_ERR(mnt); 1896 return PTR_ERR(mnt);
1894 1897
1895 return do_add_mount(mnt, path, mnt_flags); 1898 err = do_add_mount(mnt, path, mnt_flags);
1899 if (err)
1900 mntput(mnt);
1901 return err;
1902}
1903
1904int finish_automount(struct vfsmount *m, struct path *path)
1905{
1906 int err;
1907 /* The new mount record should have at least 2 refs to prevent it being
1908 * expired before we get a chance to add it
1909 */
1910 BUG_ON(mnt_get_count(m) < 2);
1911
1912 if (m->mnt_sb == path->mnt->mnt_sb &&
1913 m->mnt_root == path->dentry) {
1914 err = -ELOOP;
1915 goto fail;
1916 }
1917
1918 err = do_add_mount(m, path, path->mnt->mnt_flags | MNT_SHRINKABLE);
1919 if (!err)
1920 return 0;
1921fail:
1922 /* remove m from any expiration list it may be on */
1923 if (!list_empty(&m->mnt_expire)) {
1924 down_write(&namespace_sem);
1925 br_write_lock(vfsmount_lock);
1926 list_del_init(&m->mnt_expire);
1927 br_write_unlock(vfsmount_lock);
1928 up_write(&namespace_sem);
1929 }
1930 mntput(m);
1931 mntput(m);
1932 return err;
1896} 1933}
1897 1934
1898/* 1935/*
1899 * add a mount into a namespace's mount tree 1936 * add a mount into a namespace's mount tree
1900 * - this unconditionally eats one of the caller's references to newmnt.
1901 */ 1937 */
1902int do_add_mount(struct vfsmount *newmnt, struct path *path, int mnt_flags) 1938static int do_add_mount(struct vfsmount *newmnt, struct path *path, int mnt_flags)
1903{ 1939{
1904 int err; 1940 int err;
1905 1941
@@ -1926,15 +1962,10 @@ int do_add_mount(struct vfsmount *newmnt, struct path *path, int mnt_flags)
1926 goto unlock; 1962 goto unlock;
1927 1963
1928 newmnt->mnt_flags = mnt_flags; 1964 newmnt->mnt_flags = mnt_flags;
1929 if ((err = graft_tree(newmnt, path))) 1965 err = graft_tree(newmnt, path);
1930 goto unlock;
1931
1932 up_write(&namespace_sem);
1933 return 0;
1934 1966
1935unlock: 1967unlock:
1936 up_write(&namespace_sem); 1968 up_write(&namespace_sem);
1937 mntput(newmnt);
1938 return err; 1969 return err;
1939} 1970}
1940 1971
@@ -1956,20 +1987,6 @@ void mnt_set_expiry(struct vfsmount *mnt, struct list_head *expiry_list)
1956EXPORT_SYMBOL(mnt_set_expiry); 1987EXPORT_SYMBOL(mnt_set_expiry);
1957 1988
1958/* 1989/*
1959 * Remove a vfsmount from any expiration list it may be on
1960 */
1961void mnt_clear_expiry(struct vfsmount *mnt)
1962{
1963 if (!list_empty(&mnt->mnt_expire)) {
1964 down_write(&namespace_sem);
1965 br_write_lock(vfsmount_lock);
1966 list_del_init(&mnt->mnt_expire);
1967 br_write_unlock(vfsmount_lock);
1968 up_write(&namespace_sem);
1969 }
1970}
1971
1972/*
1973 * process a list of expirable mountpoints with the intent of discarding any 1990 * process a list of expirable mountpoints with the intent of discarding any
1974 * mountpoints that aren't in use and haven't been touched since last we came 1991 * mountpoints that aren't in use and haven't been touched since last we came
1975 * here 1992 * here
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 63e3fca266e0..a6651956482e 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -1989,20 +1989,20 @@ int ocfs2_change_file_space(struct file *file, unsigned int cmd,
1989 return __ocfs2_change_file_space(file, inode, file->f_pos, cmd, sr, 0); 1989 return __ocfs2_change_file_space(file, inode, file->f_pos, cmd, sr, 0);
1990} 1990}
1991 1991
1992static long ocfs2_fallocate(struct inode *inode, int mode, loff_t offset, 1992static long ocfs2_fallocate(struct file *file, int mode, loff_t offset,
1993 loff_t len) 1993 loff_t len)
1994{ 1994{
1995 struct inode *inode = file->f_path.dentry->d_inode;
1995 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 1996 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1996 struct ocfs2_space_resv sr; 1997 struct ocfs2_space_resv sr;
1997 int change_size = 1; 1998 int change_size = 1;
1998 int cmd = OCFS2_IOC_RESVSP64; 1999 int cmd = OCFS2_IOC_RESVSP64;
1999 2000
2001 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
2002 return -EOPNOTSUPP;
2000 if (!ocfs2_writes_unwritten_extents(osb)) 2003 if (!ocfs2_writes_unwritten_extents(osb))
2001 return -EOPNOTSUPP; 2004 return -EOPNOTSUPP;
2002 2005
2003 if (S_ISDIR(inode->i_mode))
2004 return -ENODEV;
2005
2006 if (mode & FALLOC_FL_KEEP_SIZE) 2006 if (mode & FALLOC_FL_KEEP_SIZE)
2007 change_size = 0; 2007 change_size = 0;
2008 2008
@@ -2610,7 +2610,6 @@ const struct inode_operations ocfs2_file_iops = {
2610 .getxattr = generic_getxattr, 2610 .getxattr = generic_getxattr,
2611 .listxattr = ocfs2_listxattr, 2611 .listxattr = ocfs2_listxattr,
2612 .removexattr = generic_removexattr, 2612 .removexattr = generic_removexattr,
2613 .fallocate = ocfs2_fallocate,
2614 .fiemap = ocfs2_fiemap, 2613 .fiemap = ocfs2_fiemap,
2615}; 2614};
2616 2615
@@ -2642,6 +2641,7 @@ const struct file_operations ocfs2_fops = {
2642 .flock = ocfs2_flock, 2641 .flock = ocfs2_flock,
2643 .splice_read = ocfs2_file_splice_read, 2642 .splice_read = ocfs2_file_splice_read,
2644 .splice_write = ocfs2_file_splice_write, 2643 .splice_write = ocfs2_file_splice_write,
2644 .fallocate = ocfs2_fallocate,
2645}; 2645};
2646 2646
2647const struct file_operations ocfs2_dops = { 2647const struct file_operations ocfs2_dops = {
diff --git a/fs/open.c b/fs/open.c
index 5b6ef7e2859e..e52389e1f05b 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -255,10 +255,10 @@ int do_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
255 if (((offset + len) > inode->i_sb->s_maxbytes) || ((offset + len) < 0)) 255 if (((offset + len) > inode->i_sb->s_maxbytes) || ((offset + len) < 0))
256 return -EFBIG; 256 return -EFBIG;
257 257
258 if (!inode->i_op->fallocate) 258 if (!file->f_op->fallocate)
259 return -EOPNOTSUPP; 259 return -EOPNOTSUPP;
260 260
261 return inode->i_op->fallocate(inode, mode, offset, len); 261 return file->f_op->fallocate(file, mode, offset, len);
262} 262}
263 263
264SYSCALL_DEFINE(fallocate)(int fd, int mode, loff_t offset, loff_t len) 264SYSCALL_DEFINE(fallocate)(int fd, int mode, loff_t offset, loff_t len)
diff --git a/fs/xfs/linux-2.6/xfs_file.c b/fs/xfs/linux-2.6/xfs_file.c
index ef51eb43e137..a55c1b46b219 100644
--- a/fs/xfs/linux-2.6/xfs_file.c
+++ b/fs/xfs/linux-2.6/xfs_file.c
@@ -37,6 +37,7 @@
37#include "xfs_trace.h" 37#include "xfs_trace.h"
38 38
39#include <linux/dcache.h> 39#include <linux/dcache.h>
40#include <linux/falloc.h>
40 41
41static const struct vm_operations_struct xfs_file_vm_ops; 42static const struct vm_operations_struct xfs_file_vm_ops;
42 43
@@ -882,6 +883,60 @@ out_unlock:
882 return ret; 883 return ret;
883} 884}
884 885
886STATIC long
887xfs_file_fallocate(
888 struct file *file,
889 int mode,
890 loff_t offset,
891 loff_t len)
892{
893 struct inode *inode = file->f_path.dentry->d_inode;
894 long error;
895 loff_t new_size = 0;
896 xfs_flock64_t bf;
897 xfs_inode_t *ip = XFS_I(inode);
898 int cmd = XFS_IOC_RESVSP;
899
900 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
901 return -EOPNOTSUPP;
902
903 bf.l_whence = 0;
904 bf.l_start = offset;
905 bf.l_len = len;
906
907 xfs_ilock(ip, XFS_IOLOCK_EXCL);
908
909 if (mode & FALLOC_FL_PUNCH_HOLE)
910 cmd = XFS_IOC_UNRESVSP;
911
912 /* check the new inode size is valid before allocating */
913 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
914 offset + len > i_size_read(inode)) {
915 new_size = offset + len;
916 error = inode_newsize_ok(inode, new_size);
917 if (error)
918 goto out_unlock;
919 }
920
921 error = -xfs_change_file_space(ip, cmd, &bf, 0, XFS_ATTR_NOLOCK);
922 if (error)
923 goto out_unlock;
924
925 /* Change file size if needed */
926 if (new_size) {
927 struct iattr iattr;
928
929 iattr.ia_valid = ATTR_SIZE;
930 iattr.ia_size = new_size;
931 error = -xfs_setattr(ip, &iattr, XFS_ATTR_NOLOCK);
932 }
933
934out_unlock:
935 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
936 return error;
937}
938
939
885STATIC int 940STATIC int
886xfs_file_open( 941xfs_file_open(
887 struct inode *inode, 942 struct inode *inode,
@@ -1000,6 +1055,7 @@ const struct file_operations xfs_file_operations = {
1000 .open = xfs_file_open, 1055 .open = xfs_file_open,
1001 .release = xfs_file_release, 1056 .release = xfs_file_release,
1002 .fsync = xfs_file_fsync, 1057 .fsync = xfs_file_fsync,
1058 .fallocate = xfs_file_fallocate,
1003}; 1059};
1004 1060
1005const struct file_operations xfs_dir_file_operations = { 1061const struct file_operations xfs_dir_file_operations = {
diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c
index da54403633b6..bd5727852fd6 100644
--- a/fs/xfs/linux-2.6/xfs_iops.c
+++ b/fs/xfs/linux-2.6/xfs_iops.c
@@ -46,7 +46,6 @@
46#include <linux/namei.h> 46#include <linux/namei.h>
47#include <linux/posix_acl.h> 47#include <linux/posix_acl.h>
48#include <linux/security.h> 48#include <linux/security.h>
49#include <linux/falloc.h>
50#include <linux/fiemap.h> 49#include <linux/fiemap.h>
51#include <linux/slab.h> 50#include <linux/slab.h>
52 51
@@ -505,61 +504,6 @@ xfs_vn_setattr(
505 return -xfs_setattr(XFS_I(dentry->d_inode), iattr, 0); 504 return -xfs_setattr(XFS_I(dentry->d_inode), iattr, 0);
506} 505}
507 506
508STATIC long
509xfs_vn_fallocate(
510 struct inode *inode,
511 int mode,
512 loff_t offset,
513 loff_t len)
514{
515 long error;
516 loff_t new_size = 0;
517 xfs_flock64_t bf;
518 xfs_inode_t *ip = XFS_I(inode);
519 int cmd = XFS_IOC_RESVSP;
520
521 /* preallocation on directories not yet supported */
522 error = -ENODEV;
523 if (S_ISDIR(inode->i_mode))
524 goto out_error;
525
526 bf.l_whence = 0;
527 bf.l_start = offset;
528 bf.l_len = len;
529
530 xfs_ilock(ip, XFS_IOLOCK_EXCL);
531
532 if (mode & FALLOC_FL_PUNCH_HOLE)
533 cmd = XFS_IOC_UNRESVSP;
534
535 /* check the new inode size is valid before allocating */
536 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
537 offset + len > i_size_read(inode)) {
538 new_size = offset + len;
539 error = inode_newsize_ok(inode, new_size);
540 if (error)
541 goto out_unlock;
542 }
543
544 error = -xfs_change_file_space(ip, cmd, &bf, 0, XFS_ATTR_NOLOCK);
545 if (error)
546 goto out_unlock;
547
548 /* Change file size if needed */
549 if (new_size) {
550 struct iattr iattr;
551
552 iattr.ia_valid = ATTR_SIZE;
553 iattr.ia_size = new_size;
554 error = -xfs_setattr(ip, &iattr, XFS_ATTR_NOLOCK);
555 }
556
557out_unlock:
558 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
559out_error:
560 return error;
561}
562
563#define XFS_FIEMAP_FLAGS (FIEMAP_FLAG_SYNC|FIEMAP_FLAG_XATTR) 507#define XFS_FIEMAP_FLAGS (FIEMAP_FLAG_SYNC|FIEMAP_FLAG_XATTR)
564 508
565/* 509/*
@@ -653,7 +597,6 @@ static const struct inode_operations xfs_inode_operations = {
653 .getxattr = generic_getxattr, 597 .getxattr = generic_getxattr,
654 .removexattr = generic_removexattr, 598 .removexattr = generic_removexattr,
655 .listxattr = xfs_vn_listxattr, 599 .listxattr = xfs_vn_listxattr,
656 .fallocate = xfs_vn_fallocate,
657 .fiemap = xfs_vn_fiemap, 600 .fiemap = xfs_vn_fiemap,
658}; 601};
659 602
diff --git a/fs/xfs/support/debug.c b/fs/xfs/support/debug.c
index e6cf955ec0fc..0df88897ef84 100644
--- a/fs/xfs/support/debug.c
+++ b/fs/xfs/support/debug.c
@@ -75,11 +75,11 @@ xfs_cmn_err(
75{ 75{
76 struct va_format vaf; 76 struct va_format vaf;
77 va_list args; 77 va_list args;
78 int panic = 0; 78 int do_panic = 0;
79 79
80 if (xfs_panic_mask && (xfs_panic_mask & panic_tag)) { 80 if (xfs_panic_mask && (xfs_panic_mask & panic_tag)) {
81 printk(KERN_ALERT "XFS: Transforming an alert into a BUG."); 81 printk(KERN_ALERT "XFS: Transforming an alert into a BUG.");
82 panic = 1; 82 do_panic = 1;
83 } 83 }
84 84
85 va_start(args, fmt); 85 va_start(args, fmt);
@@ -89,7 +89,7 @@ xfs_cmn_err(
89 printk(KERN_ALERT "Filesystem %s: %pV", mp->m_fsname, &vaf); 89 printk(KERN_ALERT "Filesystem %s: %pV", mp->m_fsname, &vaf);
90 va_end(args); 90 va_end(args);
91 91
92 BUG_ON(panic); 92 BUG_ON(do_panic);
93} 93}
94 94
95void 95void
diff --git a/include/linux/amba/pl08x.h b/include/linux/amba/pl08x.h
index 521a0f8974ac..3111385b8ca7 100644
--- a/include/linux/amba/pl08x.h
+++ b/include/linux/amba/pl08x.h
@@ -12,7 +12,6 @@
12 * 12 *
13 * Please credit ARM.com 13 * Please credit ARM.com
14 * Documentation: ARM DDI 0196D 14 * Documentation: ARM DDI 0196D
15 *
16 */ 15 */
17 16
18#ifndef AMBA_PL08X_H 17#ifndef AMBA_PL08X_H
@@ -22,6 +21,15 @@
22#include <linux/dmaengine.h> 21#include <linux/dmaengine.h>
23#include <linux/interrupt.h> 22#include <linux/interrupt.h>
24 23
24struct pl08x_lli;
25struct pl08x_driver_data;
26
27/* Bitmasks for selecting AHB ports for DMA transfers */
28enum {
29 PL08X_AHB1 = (1 << 0),
30 PL08X_AHB2 = (1 << 1)
31};
32
25/** 33/**
26 * struct pl08x_channel_data - data structure to pass info between 34 * struct pl08x_channel_data - data structure to pass info between
27 * platform and PL08x driver regarding channel configuration 35 * platform and PL08x driver regarding channel configuration
@@ -46,8 +54,10 @@
46 * @circular_buffer: whether the buffer passed in is circular and 54 * @circular_buffer: whether the buffer passed in is circular and
47 * shall simply be looped round round (like a record baby round 55 * shall simply be looped round round (like a record baby round
48 * round round round) 56 * round round round)
49 * @single: the device connected to this channel will request single 57 * @single: the device connected to this channel will request single DMA
50 * DMA transfers, not bursts. (Bursts are default.) 58 * transfers, not bursts. (Bursts are default.)
59 * @periph_buses: the device connected to this channel is accessible via
60 * these buses (use PL08X_AHB1 | PL08X_AHB2).
51 */ 61 */
52struct pl08x_channel_data { 62struct pl08x_channel_data {
53 char *bus_id; 63 char *bus_id;
@@ -55,10 +65,10 @@ struct pl08x_channel_data {
55 int max_signal; 65 int max_signal;
56 u32 muxval; 66 u32 muxval;
57 u32 cctl; 67 u32 cctl;
58 u32 ccfg;
59 dma_addr_t addr; 68 dma_addr_t addr;
60 bool circular_buffer; 69 bool circular_buffer;
61 bool single; 70 bool single;
71 u8 periph_buses;
62}; 72};
63 73
64/** 74/**
@@ -67,24 +77,23 @@ struct pl08x_channel_data {
67 * @addr: current address 77 * @addr: current address
68 * @maxwidth: the maximum width of a transfer on this bus 78 * @maxwidth: the maximum width of a transfer on this bus
69 * @buswidth: the width of this bus in bytes: 1, 2 or 4 79 * @buswidth: the width of this bus in bytes: 1, 2 or 4
70 * @fill_bytes: bytes required to fill to the next bus memory 80 * @fill_bytes: bytes required to fill to the next bus memory boundary
71 * boundary
72 */ 81 */
73struct pl08x_bus_data { 82struct pl08x_bus_data {
74 dma_addr_t addr; 83 dma_addr_t addr;
75 u8 maxwidth; 84 u8 maxwidth;
76 u8 buswidth; 85 u8 buswidth;
77 u32 fill_bytes; 86 size_t fill_bytes;
78}; 87};
79 88
80/** 89/**
81 * struct pl08x_phy_chan - holder for the physical channels 90 * struct pl08x_phy_chan - holder for the physical channels
82 * @id: physical index to this channel 91 * @id: physical index to this channel
83 * @lock: a lock to use when altering an instance of this struct 92 * @lock: a lock to use when altering an instance of this struct
84 * @signal: the physical signal (aka channel) serving this 93 * @signal: the physical signal (aka channel) serving this physical channel
85 * physical channel right now 94 * right now
86 * @serving: the virtual channel currently being served by this 95 * @serving: the virtual channel currently being served by this physical
87 * physical channel 96 * channel
88 */ 97 */
89struct pl08x_phy_chan { 98struct pl08x_phy_chan {
90 unsigned int id; 99 unsigned int id;
@@ -92,11 +101,6 @@ struct pl08x_phy_chan {
92 spinlock_t lock; 101 spinlock_t lock;
93 int signal; 102 int signal;
94 struct pl08x_dma_chan *serving; 103 struct pl08x_dma_chan *serving;
95 u32 csrc;
96 u32 cdst;
97 u32 clli;
98 u32 cctl;
99 u32 ccfg;
100}; 104};
101 105
102/** 106/**
@@ -108,26 +112,23 @@ struct pl08x_txd {
108 struct dma_async_tx_descriptor tx; 112 struct dma_async_tx_descriptor tx;
109 struct list_head node; 113 struct list_head node;
110 enum dma_data_direction direction; 114 enum dma_data_direction direction;
111 struct pl08x_bus_data srcbus; 115 dma_addr_t src_addr;
112 struct pl08x_bus_data dstbus; 116 dma_addr_t dst_addr;
113 int len; 117 size_t len;
114 dma_addr_t llis_bus; 118 dma_addr_t llis_bus;
115 void *llis_va; 119 struct pl08x_lli *llis_va;
116 struct pl08x_channel_data *cd; 120 /* Default cctl value for LLIs */
117 bool active; 121 u32 cctl;
118 /* 122 /*
119 * Settings to be put into the physical channel when we 123 * Settings to be put into the physical channel when we
120 * trigger this txd 124 * trigger this txd. Other registers are in llis_va[0].
121 */ 125 */
122 u32 csrc; 126 u32 ccfg;
123 u32 cdst;
124 u32 clli;
125 u32 cctl;
126}; 127};
127 128
128/** 129/**
129 * struct pl08x_dma_chan_state - holds the PL08x specific virtual 130 * struct pl08x_dma_chan_state - holds the PL08x specific virtual channel
130 * channel states 131 * states
131 * @PL08X_CHAN_IDLE: the channel is idle 132 * @PL08X_CHAN_IDLE: the channel is idle
132 * @PL08X_CHAN_RUNNING: the channel has allocated a physical transport 133 * @PL08X_CHAN_RUNNING: the channel has allocated a physical transport
133 * channel and is running a transfer on it 134 * channel and is running a transfer on it
@@ -147,6 +148,8 @@ enum pl08x_dma_chan_state {
147 * struct pl08x_dma_chan - this structure wraps a DMA ENGINE channel 148 * struct pl08x_dma_chan - this structure wraps a DMA ENGINE channel
148 * @chan: wrappped abstract channel 149 * @chan: wrappped abstract channel
149 * @phychan: the physical channel utilized by this channel, if there is one 150 * @phychan: the physical channel utilized by this channel, if there is one
151 * @phychan_hold: if non-zero, hold on to the physical channel even if we
152 * have no pending entries
150 * @tasklet: tasklet scheduled by the IRQ to handle actual work etc 153 * @tasklet: tasklet scheduled by the IRQ to handle actual work etc
151 * @name: name of channel 154 * @name: name of channel
152 * @cd: channel platform data 155 * @cd: channel platform data
@@ -154,53 +157,49 @@ enum pl08x_dma_chan_state {
154 * @runtime_direction: current direction of this channel according to 157 * @runtime_direction: current direction of this channel according to
155 * runtime config 158 * runtime config
156 * @lc: last completed transaction on this channel 159 * @lc: last completed transaction on this channel
157 * @desc_list: queued transactions pending on this channel 160 * @pend_list: queued transactions pending on this channel
158 * @at: active transaction on this channel 161 * @at: active transaction on this channel
159 * @lockflags: sometimes we let a lock last between two function calls,
160 * especially prep/submit, and then we need to store the IRQ flags
161 * in the channel state, here
162 * @lock: a lock for this channel data 162 * @lock: a lock for this channel data
163 * @host: a pointer to the host (internal use) 163 * @host: a pointer to the host (internal use)
164 * @state: whether the channel is idle, paused, running etc 164 * @state: whether the channel is idle, paused, running etc
165 * @slave: whether this channel is a device (slave) or for memcpy 165 * @slave: whether this channel is a device (slave) or for memcpy
166 * @waiting: a TX descriptor on this channel which is waiting for 166 * @waiting: a TX descriptor on this channel which is waiting for a physical
167 * a physical channel to become available 167 * channel to become available
168 */ 168 */
169struct pl08x_dma_chan { 169struct pl08x_dma_chan {
170 struct dma_chan chan; 170 struct dma_chan chan;
171 struct pl08x_phy_chan *phychan; 171 struct pl08x_phy_chan *phychan;
172 int phychan_hold;
172 struct tasklet_struct tasklet; 173 struct tasklet_struct tasklet;
173 char *name; 174 char *name;
174 struct pl08x_channel_data *cd; 175 struct pl08x_channel_data *cd;
175 dma_addr_t runtime_addr; 176 dma_addr_t runtime_addr;
176 enum dma_data_direction runtime_direction; 177 enum dma_data_direction runtime_direction;
177 atomic_t last_issued;
178 dma_cookie_t lc; 178 dma_cookie_t lc;
179 struct list_head desc_list; 179 struct list_head pend_list;
180 struct pl08x_txd *at; 180 struct pl08x_txd *at;
181 unsigned long lockflags;
182 spinlock_t lock; 181 spinlock_t lock;
183 void *host; 182 struct pl08x_driver_data *host;
184 enum pl08x_dma_chan_state state; 183 enum pl08x_dma_chan_state state;
185 bool slave; 184 bool slave;
186 struct pl08x_txd *waiting; 185 struct pl08x_txd *waiting;
187}; 186};
188 187
189/** 188/**
190 * struct pl08x_platform_data - the platform configuration for the 189 * struct pl08x_platform_data - the platform configuration for the PL08x
191 * PL08x PrimeCells. 190 * PrimeCells.
192 * @slave_channels: the channels defined for the different devices on the 191 * @slave_channels: the channels defined for the different devices on the
193 * platform, all inclusive, including multiplexed channels. The available 192 * platform, all inclusive, including multiplexed channels. The available
194 * physical channels will be multiplexed around these signals as they 193 * physical channels will be multiplexed around these signals as they are
195 * are requested, just enumerate all possible channels. 194 * requested, just enumerate all possible channels.
196 * @get_signal: request a physical signal to be used for a DMA 195 * @get_signal: request a physical signal to be used for a DMA transfer
197 * transfer immediately: if there is some multiplexing or similar blocking 196 * immediately: if there is some multiplexing or similar blocking the use
198 * the use of the channel the transfer can be denied by returning 197 * of the channel the transfer can be denied by returning less than zero,
199 * less than zero, else it returns the allocated signal number 198 * else it returns the allocated signal number
200 * @put_signal: indicate to the platform that this physical signal is not 199 * @put_signal: indicate to the platform that this physical signal is not
201 * running any DMA transfer and multiplexing can be recycled 200 * running any DMA transfer and multiplexing can be recycled
202 * @bus_bit_lli: Bit[0] of the address indicated which AHB bus master the 201 * @lli_buses: buses which LLIs can be fetched from: PL08X_AHB1 | PL08X_AHB2
203 * LLI addresses are on 0/1 Master 1/2. 202 * @mem_buses: buses which memory can be accessed from: PL08X_AHB1 | PL08X_AHB2
204 */ 203 */
205struct pl08x_platform_data { 204struct pl08x_platform_data {
206 struct pl08x_channel_data *slave_channels; 205 struct pl08x_channel_data *slave_channels;
@@ -208,6 +207,8 @@ struct pl08x_platform_data {
208 struct pl08x_channel_data memcpy_channel; 207 struct pl08x_channel_data memcpy_channel;
209 int (*get_signal)(struct pl08x_dma_chan *); 208 int (*get_signal)(struct pl08x_dma_chan *);
210 void (*put_signal)(struct pl08x_dma_chan *); 209 void (*put_signal)(struct pl08x_dma_chan *);
210 u8 lli_buses;
211 u8 mem_buses;
211}; 212};
212 213
213#ifdef CONFIG_AMBA_PL08X 214#ifdef CONFIG_AMBA_PL08X
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 8cd00ad98d37..9bebd7f16ef1 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -532,7 +532,7 @@ static inline int dmaengine_resume(struct dma_chan *chan)
532 return dmaengine_device_control(chan, DMA_RESUME, 0); 532 return dmaengine_device_control(chan, DMA_RESUME, 0);
533} 533}
534 534
535static inline int dmaengine_submit(struct dma_async_tx_descriptor *desc) 535static inline dma_cookie_t dmaengine_submit(struct dma_async_tx_descriptor *desc)
536{ 536{
537 return desc->tx_submit(desc); 537 return desc->tx_submit(desc);
538} 538}
diff --git a/include/linux/file.h b/include/linux/file.h
index b1e12970f617..e85baebf6279 100644
--- a/include/linux/file.h
+++ b/include/linux/file.h
@@ -23,7 +23,7 @@ extern struct file *alloc_file(struct path *, fmode_t mode,
23 23
24static inline void fput_light(struct file *file, int fput_needed) 24static inline void fput_light(struct file *file, int fput_needed)
25{ 25{
26 if (unlikely(fput_needed)) 26 if (fput_needed)
27 fput(file); 27 fput(file);
28} 28}
29 29
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 177b4ddea418..32b38cd829d3 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1483,8 +1483,8 @@ struct fiemap_extent_info {
1483 unsigned int fi_flags; /* Flags as passed from user */ 1483 unsigned int fi_flags; /* Flags as passed from user */
1484 unsigned int fi_extents_mapped; /* Number of mapped extents */ 1484 unsigned int fi_extents_mapped; /* Number of mapped extents */
1485 unsigned int fi_extents_max; /* Size of fiemap_extent array */ 1485 unsigned int fi_extents_max; /* Size of fiemap_extent array */
1486 struct fiemap_extent *fi_extents_start; /* Start of fiemap_extent 1486 struct fiemap_extent __user *fi_extents_start; /* Start of
1487 * array */ 1487 fiemap_extent array */
1488}; 1488};
1489int fiemap_fill_next_extent(struct fiemap_extent_info *info, u64 logical, 1489int fiemap_fill_next_extent(struct fiemap_extent_info *info, u64 logical,
1490 u64 phys, u64 len, u32 flags); 1490 u64 phys, u64 len, u32 flags);
@@ -1552,6 +1552,8 @@ struct file_operations {
1552 ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int); 1552 ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int);
1553 ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int); 1553 ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int);
1554 int (*setlease)(struct file *, long, struct file_lock **); 1554 int (*setlease)(struct file *, long, struct file_lock **);
1555 long (*fallocate)(struct file *file, int mode, loff_t offset,
1556 loff_t len);
1555}; 1557};
1556 1558
1557#define IPERM_FLAG_RCU 0x0001 1559#define IPERM_FLAG_RCU 0x0001
@@ -1582,8 +1584,6 @@ struct inode_operations {
1582 ssize_t (*listxattr) (struct dentry *, char *, size_t); 1584 ssize_t (*listxattr) (struct dentry *, char *, size_t);
1583 int (*removexattr) (struct dentry *, const char *); 1585 int (*removexattr) (struct dentry *, const char *);
1584 void (*truncate_range)(struct inode *, loff_t, loff_t); 1586 void (*truncate_range)(struct inode *, loff_t, loff_t);
1585 long (*fallocate)(struct inode *inode, int mode, loff_t offset,
1586 loff_t len);
1587 int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start, 1587 int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start,
1588 u64 len); 1588 u64 len);
1589} ____cacheline_aligned; 1589} ____cacheline_aligned;
diff --git a/include/linux/mtd/cfi.h b/include/linux/mtd/cfi.h
index 4dd0c2cd7659..a9baee6864af 100644
--- a/include/linux/mtd/cfi.h
+++ b/include/linux/mtd/cfi.h
@@ -527,8 +527,7 @@ struct cfi_extquery *cfi_read_pri(struct map_info *map, uint16_t adr, uint16_t s
527struct cfi_fixup { 527struct cfi_fixup {
528 uint16_t mfr; 528 uint16_t mfr;
529 uint16_t id; 529 uint16_t id;
530 void (*fixup)(struct mtd_info *mtd, void* param); 530 void (*fixup)(struct mtd_info *mtd);
531 void* param;
532}; 531};
533 532
534#define CFI_MFR_ANY 0xFFFF 533#define CFI_MFR_ANY 0xFFFF
diff --git a/include/linux/mtd/fsmc.h b/include/linux/mtd/fsmc.h
index 5d2556700ec2..6987995ad3cf 100644
--- a/include/linux/mtd/fsmc.h
+++ b/include/linux/mtd/fsmc.h
@@ -16,6 +16,7 @@
16#ifndef __MTD_FSMC_H 16#ifndef __MTD_FSMC_H
17#define __MTD_FSMC_H 17#define __MTD_FSMC_H
18 18
19#include <linux/io.h>
19#include <linux/platform_device.h> 20#include <linux/platform_device.h>
20#include <linux/mtd/physmap.h> 21#include <linux/mtd/physmap.h>
21#include <linux/types.h> 22#include <linux/types.h>
@@ -27,7 +28,7 @@
27 28
28/* 29/*
29 * The placement of the Command Latch Enable (CLE) and 30 * The placement of the Command Latch Enable (CLE) and
30 * Address Latch Enable (ALE) is twised around in the 31 * Address Latch Enable (ALE) is twisted around in the
31 * SPEAR310 implementation. 32 * SPEAR310 implementation.
32 */ 33 */
33#if defined(CONFIG_MACH_SPEAR310) 34#if defined(CONFIG_MACH_SPEAR310)
@@ -62,7 +63,7 @@ struct fsmc_nor_bank_regs {
62 63
63/* ctrl_tim register definitions */ 64/* ctrl_tim register definitions */
64 65
65struct fsms_nand_bank_regs { 66struct fsmc_nand_bank_regs {
66 uint32_t pc; 67 uint32_t pc;
67 uint32_t sts; 68 uint32_t sts;
68 uint32_t comm; 69 uint32_t comm;
@@ -78,7 +79,7 @@ struct fsms_nand_bank_regs {
78struct fsmc_regs { 79struct fsmc_regs {
79 struct fsmc_nor_bank_regs nor_bank_regs[FSMC_MAX_NOR_BANKS]; 80 struct fsmc_nor_bank_regs nor_bank_regs[FSMC_MAX_NOR_BANKS];
80 uint8_t reserved_1[0x40 - 0x20]; 81 uint8_t reserved_1[0x40 - 0x20];
81 struct fsms_nand_bank_regs bank_regs[FSMC_MAX_NAND_BANKS]; 82 struct fsmc_nand_bank_regs bank_regs[FSMC_MAX_NAND_BANKS];
82 uint8_t reserved_2[0xfe0 - 0xc0]; 83 uint8_t reserved_2[0xfe0 - 0xc0];
83 uint32_t peripid0; /* 0xfe0 */ 84 uint32_t peripid0; /* 0xfe0 */
84 uint32_t peripid1; /* 0xfe4 */ 85 uint32_t peripid1; /* 0xfe4 */
@@ -114,25 +115,6 @@ struct fsmc_regs {
114#define FSMC_THOLD_4 (4 << 16) 115#define FSMC_THOLD_4 (4 << 16)
115#define FSMC_THIZ_1 (1 << 24) 116#define FSMC_THIZ_1 (1 << 24)
116 117
117/* peripid2 register definitions */
118#define FSMC_REVISION_MSK (0xf)
119#define FSMC_REVISION_SHFT (0x4)
120
121#define FSMC_VER1 1
122#define FSMC_VER2 2
123#define FSMC_VER3 3
124#define FSMC_VER4 4
125#define FSMC_VER5 5
126#define FSMC_VER6 6
127#define FSMC_VER7 7
128#define FSMC_VER8 8
129
130static inline uint32_t get_fsmc_version(struct fsmc_regs *regs)
131{
132 return (readl(&regs->peripid2) >> FSMC_REVISION_SHFT) &
133 FSMC_REVISION_MSK;
134}
135
136/* 118/*
137 * There are 13 bytes of ecc for every 512 byte block in FSMC version 8 119 * There are 13 bytes of ecc for every 512 byte block in FSMC version 8
138 * and it has to be read consecutively and immediately after the 512 120 * and it has to be read consecutively and immediately after the 512
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index fe8d77ebec13..9d5306bad117 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -144,6 +144,17 @@ struct mtd_info {
144 */ 144 */
145 uint32_t writesize; 145 uint32_t writesize;
146 146
147 /*
148 * Size of the write buffer used by the MTD. MTD devices having a write
149 * buffer can write multiple writesize chunks at a time. E.g. while
150 * writing 4 * writesize bytes to a device with 2 * writesize bytes
151 * buffer the MTD driver can (but doesn't have to) do 2 writesize
152 * operations, but not 4. Currently, all NANDs have writebufsize
153 * equivalent to writesize (NAND page size). Some NOR flashes do have
154 * writebufsize greater than writesize.
155 */
156 uint32_t writebufsize;
157
147 uint32_t oobsize; // Amount of OOB data per block (e.g. 16) 158 uint32_t oobsize; // Amount of OOB data per block (e.g. 16)
148 uint32_t oobavail; // Available OOB bytes per block 159 uint32_t oobavail; // Available OOB bytes per block
149 160
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index 63e17d01fde9..1f489b247a29 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -448,6 +448,8 @@ struct nand_buffers {
448 * See the defines for further explanation. 448 * See the defines for further explanation.
449 * @badblockpos: [INTERN] position of the bad block marker in the oob 449 * @badblockpos: [INTERN] position of the bad block marker in the oob
450 * area. 450 * area.
451 * @badblockbits: [INTERN] number of bits to left-shift the bad block
452 * number
451 * @cellinfo: [INTERN] MLC/multichip data from chip ident 453 * @cellinfo: [INTERN] MLC/multichip data from chip ident
452 * @numchips: [INTERN] number of physical chips 454 * @numchips: [INTERN] number of physical chips
453 * @chipsize: [INTERN] the size of one chip for multichip arrays 455 * @chipsize: [INTERN] the size of one chip for multichip arrays
diff --git a/include/linux/mtd/onenand.h b/include/linux/mtd/onenand.h
index 0c8815bfae1c..ae418e41d8f5 100644
--- a/include/linux/mtd/onenand.h
+++ b/include/linux/mtd/onenand.h
@@ -118,6 +118,8 @@ struct onenand_chip {
118 int (*chip_probe)(struct mtd_info *mtd); 118 int (*chip_probe)(struct mtd_info *mtd);
119 int (*block_markbad)(struct mtd_info *mtd, loff_t ofs); 119 int (*block_markbad)(struct mtd_info *mtd, loff_t ofs);
120 int (*scan_bbt)(struct mtd_info *mtd); 120 int (*scan_bbt)(struct mtd_info *mtd);
121 int (*enable)(struct mtd_info *mtd);
122 int (*disable)(struct mtd_info *mtd);
121 123
122 struct completion complete; 124 struct completion complete;
123 int irq; 125 int irq;
@@ -137,6 +139,14 @@ struct onenand_chip {
137 void *bbm; 139 void *bbm;
138 140
139 void *priv; 141 void *priv;
142
143 /*
144 * Shows that the current operation is composed
145 * of sequence of commands. For example, cache program.
146 * Such command status OnGo bit is checked at the end of
147 * sequence.
148 */
149 unsigned int ongoing;
140}; 150};
141 151
142/* 152/*
@@ -171,6 +181,9 @@ struct onenand_chip {
171#define ONENAND_IS_2PLANE(this) (0) 181#define ONENAND_IS_2PLANE(this) (0)
172#endif 182#endif
173 183
184#define ONENAND_IS_CACHE_PROGRAM(this) \
185 (this->options & ONENAND_HAS_CACHE_PROGRAM)
186
174/* Check byte access in OneNAND */ 187/* Check byte access in OneNAND */
175#define ONENAND_CHECK_BYTE_ACCESS(addr) (addr & 0x1) 188#define ONENAND_CHECK_BYTE_ACCESS(addr) (addr & 0x1)
176 189
@@ -181,6 +194,7 @@ struct onenand_chip {
181#define ONENAND_HAS_UNLOCK_ALL (0x0002) 194#define ONENAND_HAS_UNLOCK_ALL (0x0002)
182#define ONENAND_HAS_2PLANE (0x0004) 195#define ONENAND_HAS_2PLANE (0x0004)
183#define ONENAND_HAS_4KB_PAGE (0x0008) 196#define ONENAND_HAS_4KB_PAGE (0x0008)
197#define ONENAND_HAS_CACHE_PROGRAM (0x0010)
184#define ONENAND_SKIP_UNLOCK_CHECK (0x0100) 198#define ONENAND_SKIP_UNLOCK_CHECK (0x0100)
185#define ONENAND_PAGEBUF_ALLOC (0x1000) 199#define ONENAND_PAGEBUF_ALLOC (0x1000)
186#define ONENAND_OOBBUF_ALLOC (0x2000) 200#define ONENAND_OOBBUF_ALLOC (0x2000)
diff --git a/include/linux/mtd/partitions.h b/include/linux/mtd/partitions.h
index 2b54316591d2..4a0a8ba90a72 100644
--- a/include/linux/mtd/partitions.h
+++ b/include/linux/mtd/partitions.h
@@ -89,7 +89,7 @@ static inline int mtd_has_cmdlinepart(void) { return 1; }
89static inline int mtd_has_cmdlinepart(void) { return 0; } 89static inline int mtd_has_cmdlinepart(void) { return 0; }
90#endif 90#endif
91 91
92int mtd_is_master(struct mtd_info *mtd); 92int mtd_is_partition(struct mtd_info *mtd);
93int mtd_add_partition(struct mtd_info *master, char *name, 93int mtd_add_partition(struct mtd_info *master, char *name,
94 long long offset, long long length); 94 long long offset, long long length);
95int mtd_del_partition(struct mtd_info *master, int partno); 95int mtd_del_partition(struct mtd_info *master, int partno);
diff --git a/include/linux/of_fdt.h b/include/linux/of_fdt.h
index 0ef22a1f129e..c84d900fbbb3 100644
--- a/include/linux/of_fdt.h
+++ b/include/linux/of_fdt.h
@@ -97,7 +97,7 @@ extern void early_init_dt_check_for_initrd(unsigned long node);
97extern int early_init_dt_scan_memory(unsigned long node, const char *uname, 97extern int early_init_dt_scan_memory(unsigned long node, const char *uname,
98 int depth, void *data); 98 int depth, void *data);
99extern void early_init_dt_add_memory_arch(u64 base, u64 size); 99extern void early_init_dt_add_memory_arch(u64 base, u64 size);
100extern u64 early_init_dt_alloc_memory_arch(u64 size, u64 align); 100extern void * early_init_dt_alloc_memory_arch(u64 size, u64 align);
101extern u64 dt_mem_next_cell(int s, __be32 **cellp); 101extern u64 dt_mem_next_cell(int s, __be32 **cellp);
102 102
103/* 103/*