aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-02-26 19:54:27 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2010-02-26 19:54:27 -0500
commit64d497f55379b1e320a08ec2426468d96f5642ec (patch)
tree22b9ab3c5e69c5cc2728cbc2ca7fc7623beef8f1 /drivers
parent37d4008484977f60d5d37499a2670c79b214dd46 (diff)
parentb5f5fe80fa98a60daa0fa94512d1599b1e26674c (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/lethal/sh-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/lethal/sh-2.6: (187 commits) sh: remove dead LED code for migo-r and ms7724se sh: ecovec build fix for CONFIG_I2C=n sh: ecovec r-standby support sh: ms7724se r-standby support sh: SH-Mobile R-standby register save/restore clocksource: Fix up a registration/IRQ race in the sh drivers. sh: ms7724: modify scan_timing for KEYSC sh: ms7724: Add sh_sir support sh: mach-ecovec24: Add sh_sir support sh: wire up SET/GET_UNALIGN_CTL. sh: allow alignment fault mode to be configured at kernel boot. sh: sh7724: Update FSI/SPU2 clock sh: always enable sh7724 vpu_clk and set to 166MHz on Ecovec sh: add sh7724 kick callback to clk_div4_table sh: introduce struct clk_div4_table sh: clock-cpg div4 set_rate() shift fix sh: Turn on speculative return for SH7785 and SH7786 sh: Merge legacy and dynamic PMB modes. sh: Use uncached I/O helpers in PMB setup. sh: Provide uncached I/O helpers. ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/clocksource/sh_cmt.c32
-rw-r--r--drivers/clocksource/sh_mtu2.c6
-rw-r--r--drivers/clocksource/sh_tmu.c6
-rw-r--r--drivers/dma/shdma.c411
-rw-r--r--drivers/dma/shdma.h7
-rw-r--r--drivers/mtd/nand/Kconfig4
-rw-r--r--drivers/mtd/nand/sh_flctl.c69
-rw-r--r--drivers/serial/sh-sci.h220
-rw-r--r--drivers/sh/intc.c266
-rw-r--r--drivers/sh/pfc.c37
-rw-r--r--drivers/video/pvr2fb.c2
-rw-r--r--drivers/video/sh_mobile_lcdcfb.c88
12 files changed, 665 insertions, 483 deletions
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
index 6b3e0c2f33e2..6fe4f7701188 100644
--- a/drivers/clocksource/sh_cmt.c
+++ b/drivers/clocksource/sh_cmt.c
@@ -603,18 +603,13 @@ static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev)
603 p->irqaction.handler = sh_cmt_interrupt; 603 p->irqaction.handler = sh_cmt_interrupt;
604 p->irqaction.dev_id = p; 604 p->irqaction.dev_id = p;
605 p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL; 605 p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL;
606 ret = setup_irq(irq, &p->irqaction);
607 if (ret) {
608 pr_err("sh_cmt: failed to request irq %d\n", irq);
609 goto err1;
610 }
611 606
612 /* get hold of clock */ 607 /* get hold of clock */
613 p->clk = clk_get(&p->pdev->dev, cfg->clk); 608 p->clk = clk_get(&p->pdev->dev, cfg->clk);
614 if (IS_ERR(p->clk)) { 609 if (IS_ERR(p->clk)) {
615 pr_err("sh_cmt: cannot get clock \"%s\"\n", cfg->clk); 610 pr_err("sh_cmt: cannot get clock \"%s\"\n", cfg->clk);
616 ret = PTR_ERR(p->clk); 611 ret = PTR_ERR(p->clk);
617 goto err2; 612 goto err1;
618 } 613 }
619 614
620 if (resource_size(res) == 6) { 615 if (resource_size(res) == 6) {
@@ -627,14 +622,25 @@ static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev)
627 p->clear_bits = ~0xc000; 622 p->clear_bits = ~0xc000;
628 } 623 }
629 624
630 return sh_cmt_register(p, cfg->name, 625 ret = sh_cmt_register(p, cfg->name,
631 cfg->clockevent_rating, 626 cfg->clockevent_rating,
632 cfg->clocksource_rating); 627 cfg->clocksource_rating);
633 err2: 628 if (ret) {
634 remove_irq(irq, &p->irqaction); 629 pr_err("sh_cmt: registration failed\n");
635 err1: 630 goto err1;
631 }
632
633 ret = setup_irq(irq, &p->irqaction);
634 if (ret) {
635 pr_err("sh_cmt: failed to request irq %d\n", irq);
636 goto err1;
637 }
638
639 return 0;
640
641err1:
636 iounmap(p->mapbase); 642 iounmap(p->mapbase);
637 err0: 643err0:
638 return ret; 644 return ret;
639} 645}
640 646
diff --git a/drivers/clocksource/sh_mtu2.c b/drivers/clocksource/sh_mtu2.c
index 973e714d6051..4c8a759e60cd 100644
--- a/drivers/clocksource/sh_mtu2.c
+++ b/drivers/clocksource/sh_mtu2.c
@@ -221,15 +221,15 @@ static void sh_mtu2_register_clockevent(struct sh_mtu2_priv *p,
221 ced->cpumask = cpumask_of(0); 221 ced->cpumask = cpumask_of(0);
222 ced->set_mode = sh_mtu2_clock_event_mode; 222 ced->set_mode = sh_mtu2_clock_event_mode;
223 223
224 pr_info("sh_mtu2: %s used for clock events\n", ced->name);
225 clockevents_register_device(ced);
226
224 ret = setup_irq(p->irqaction.irq, &p->irqaction); 227 ret = setup_irq(p->irqaction.irq, &p->irqaction);
225 if (ret) { 228 if (ret) {
226 pr_err("sh_mtu2: failed to request irq %d\n", 229 pr_err("sh_mtu2: failed to request irq %d\n",
227 p->irqaction.irq); 230 p->irqaction.irq);
228 return; 231 return;
229 } 232 }
230
231 pr_info("sh_mtu2: %s used for clock events\n", ced->name);
232 clockevents_register_device(ced);
233} 233}
234 234
235static int sh_mtu2_register(struct sh_mtu2_priv *p, char *name, 235static int sh_mtu2_register(struct sh_mtu2_priv *p, char *name,
diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c
index 93c2322feab7..961f5b5ef6a3 100644
--- a/drivers/clocksource/sh_tmu.c
+++ b/drivers/clocksource/sh_tmu.c
@@ -323,15 +323,15 @@ static void sh_tmu_register_clockevent(struct sh_tmu_priv *p,
323 ced->set_next_event = sh_tmu_clock_event_next; 323 ced->set_next_event = sh_tmu_clock_event_next;
324 ced->set_mode = sh_tmu_clock_event_mode; 324 ced->set_mode = sh_tmu_clock_event_mode;
325 325
326 pr_info("sh_tmu: %s used for clock events\n", ced->name);
327 clockevents_register_device(ced);
328
326 ret = setup_irq(p->irqaction.irq, &p->irqaction); 329 ret = setup_irq(p->irqaction.irq, &p->irqaction);
327 if (ret) { 330 if (ret) {
328 pr_err("sh_tmu: failed to request irq %d\n", 331 pr_err("sh_tmu: failed to request irq %d\n",
329 p->irqaction.irq); 332 p->irqaction.irq);
330 return; 333 return;
331 } 334 }
332
333 pr_info("sh_tmu: %s used for clock events\n", ced->name);
334 clockevents_register_device(ced);
335} 335}
336 336
337static int sh_tmu_register(struct sh_tmu_priv *p, char *name, 337static int sh_tmu_register(struct sh_tmu_priv *p, char *name,
diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c
index d10cc899c460..b75ce8b84c46 100644
--- a/drivers/dma/shdma.c
+++ b/drivers/dma/shdma.c
@@ -48,23 +48,20 @@ enum sh_dmae_desc_status {
48 */ 48 */
49#define RS_DEFAULT (RS_DUAL) 49#define RS_DEFAULT (RS_DUAL)
50 50
51/* A bitmask with bits enough for enum sh_dmae_slave_chan_id */
52static unsigned long sh_dmae_slave_used[BITS_TO_LONGS(SHDMA_SLAVE_NUMBER)];
53
51static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all); 54static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all);
52 55
53#define SH_DMAC_CHAN_BASE(id) (dma_base_addr[id]) 56#define SH_DMAC_CHAN_BASE(id) (dma_base_addr[id])
54static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg) 57static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg)
55{ 58{
56 ctrl_outl(data, (SH_DMAC_CHAN_BASE(sh_dc->id) + reg)); 59 ctrl_outl(data, SH_DMAC_CHAN_BASE(sh_dc->id) + reg);
57} 60}
58 61
59static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg) 62static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg)
60{ 63{
61 return ctrl_inl((SH_DMAC_CHAN_BASE(sh_dc->id) + reg)); 64 return ctrl_inl(SH_DMAC_CHAN_BASE(sh_dc->id) + reg);
62}
63
64static void dmae_init(struct sh_dmae_chan *sh_chan)
65{
66 u32 chcr = RS_DEFAULT; /* default is DUAL mode */
67 sh_dmae_writel(sh_chan, chcr, CHCR);
68} 65}
69 66
70/* 67/*
@@ -95,27 +92,30 @@ static int sh_dmae_rst(int id)
95 return 0; 92 return 0;
96} 93}
97 94
98static int dmae_is_busy(struct sh_dmae_chan *sh_chan) 95static bool dmae_is_busy(struct sh_dmae_chan *sh_chan)
99{ 96{
100 u32 chcr = sh_dmae_readl(sh_chan, CHCR); 97 u32 chcr = sh_dmae_readl(sh_chan, CHCR);
101 if (chcr & CHCR_DE) { 98
102 if (!(chcr & CHCR_TE)) 99 if ((chcr & (CHCR_DE | CHCR_TE)) == CHCR_DE)
103 return -EBUSY; /* working */ 100 return true; /* working */
104 } 101
105 return 0; /* waiting */ 102 return false; /* waiting */
106} 103}
107 104
108static inline unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan) 105static unsigned int ts_shift[] = TS_SHIFT;
106static inline unsigned int calc_xmit_shift(u32 chcr)
109{ 107{
110 u32 chcr = sh_dmae_readl(sh_chan, CHCR); 108 int cnt = ((chcr & CHCR_TS_LOW_MASK) >> CHCR_TS_LOW_SHIFT) |
111 return ts_shift[(chcr & CHCR_TS_MASK) >> CHCR_TS_SHIFT]; 109 ((chcr & CHCR_TS_HIGH_MASK) >> CHCR_TS_HIGH_SHIFT);
110
111 return ts_shift[cnt];
112} 112}
113 113
114static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw) 114static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw)
115{ 115{
116 sh_dmae_writel(sh_chan, hw->sar, SAR); 116 sh_dmae_writel(sh_chan, hw->sar, SAR);
117 sh_dmae_writel(sh_chan, hw->dar, DAR); 117 sh_dmae_writel(sh_chan, hw->dar, DAR);
118 sh_dmae_writel(sh_chan, hw->tcr >> calc_xmit_shift(sh_chan), TCR); 118 sh_dmae_writel(sh_chan, hw->tcr >> sh_chan->xmit_shift, TCR);
119} 119}
120 120
121static void dmae_start(struct sh_dmae_chan *sh_chan) 121static void dmae_start(struct sh_dmae_chan *sh_chan)
@@ -123,7 +123,7 @@ static void dmae_start(struct sh_dmae_chan *sh_chan)
123 u32 chcr = sh_dmae_readl(sh_chan, CHCR); 123 u32 chcr = sh_dmae_readl(sh_chan, CHCR);
124 124
125 chcr |= CHCR_DE | CHCR_IE; 125 chcr |= CHCR_DE | CHCR_IE;
126 sh_dmae_writel(sh_chan, chcr, CHCR); 126 sh_dmae_writel(sh_chan, chcr & ~CHCR_TE, CHCR);
127} 127}
128 128
129static void dmae_halt(struct sh_dmae_chan *sh_chan) 129static void dmae_halt(struct sh_dmae_chan *sh_chan)
@@ -134,55 +134,50 @@ static void dmae_halt(struct sh_dmae_chan *sh_chan)
134 sh_dmae_writel(sh_chan, chcr, CHCR); 134 sh_dmae_writel(sh_chan, chcr, CHCR);
135} 135}
136 136
137static void dmae_init(struct sh_dmae_chan *sh_chan)
138{
139 u32 chcr = RS_DEFAULT; /* default is DUAL mode */
140 sh_chan->xmit_shift = calc_xmit_shift(chcr);
141 sh_dmae_writel(sh_chan, chcr, CHCR);
142}
143
137static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val) 144static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val)
138{ 145{
139 int ret = dmae_is_busy(sh_chan);
140 /* When DMA was working, can not set data to CHCR */ 146 /* When DMA was working, can not set data to CHCR */
141 if (ret) 147 if (dmae_is_busy(sh_chan))
142 return ret; 148 return -EBUSY;
143 149
150 sh_chan->xmit_shift = calc_xmit_shift(val);
144 sh_dmae_writel(sh_chan, val, CHCR); 151 sh_dmae_writel(sh_chan, val, CHCR);
152
145 return 0; 153 return 0;
146} 154}
147 155
148#define DMARS1_ADDR 0x04 156#define DMARS_SHIFT 8
149#define DMARS2_ADDR 0x08 157#define DMARS_CHAN_MSK 0x01
150#define DMARS_SHIFT 8
151#define DMARS_CHAN_MSK 0x01
152static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val) 158static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
153{ 159{
154 u32 addr; 160 u32 addr;
155 int shift = 0; 161 int shift = 0;
156 int ret = dmae_is_busy(sh_chan); 162
157 if (ret) 163 if (dmae_is_busy(sh_chan))
158 return ret; 164 return -EBUSY;
159 165
160 if (sh_chan->id & DMARS_CHAN_MSK) 166 if (sh_chan->id & DMARS_CHAN_MSK)
161 shift = DMARS_SHIFT; 167 shift = DMARS_SHIFT;
162 168
163 switch (sh_chan->id) { 169 if (sh_chan->id < 6)
164 /* DMARS0 */ 170 /* DMA0RS0 - DMA0RS2 */
165 case 0: 171 addr = SH_DMARS_BASE0 + (sh_chan->id / 2) * 4;
166 case 1: 172#ifdef SH_DMARS_BASE1
167 addr = SH_DMARS_BASE; 173 else if (sh_chan->id < 12)
168 break; 174 /* DMA1RS0 - DMA1RS2 */
169 /* DMARS1 */ 175 addr = SH_DMARS_BASE1 + ((sh_chan->id - 6) / 2) * 4;
170 case 2: 176#endif
171 case 3: 177 else
172 addr = (SH_DMARS_BASE + DMARS1_ADDR);
173 break;
174 /* DMARS2 */
175 case 4:
176 case 5:
177 addr = (SH_DMARS_BASE + DMARS2_ADDR);
178 break;
179 default:
180 return -EINVAL; 178 return -EINVAL;
181 }
182 179
183 ctrl_outw((val << shift) | 180 ctrl_outw((val << shift) | (ctrl_inw(addr) & (0xFF00 >> shift)), addr);
184 (ctrl_inw(addr) & (shift ? 0xFF00 : 0x00FF)),
185 addr);
186 181
187 return 0; 182 return 0;
188} 183}
@@ -250,10 +245,53 @@ static struct sh_desc *sh_dmae_get_desc(struct sh_dmae_chan *sh_chan)
250 return NULL; 245 return NULL;
251} 246}
252 247
248static struct sh_dmae_slave_config *sh_dmae_find_slave(
249 struct sh_dmae_chan *sh_chan, enum sh_dmae_slave_chan_id slave_id)
250{
251 struct dma_device *dma_dev = sh_chan->common.device;
252 struct sh_dmae_device *shdev = container_of(dma_dev,
253 struct sh_dmae_device, common);
254 struct sh_dmae_pdata *pdata = &shdev->pdata;
255 int i;
256
257 if ((unsigned)slave_id >= SHDMA_SLAVE_NUMBER)
258 return NULL;
259
260 for (i = 0; i < pdata->config_num; i++)
261 if (pdata->config[i].slave_id == slave_id)
262 return pdata->config + i;
263
264 return NULL;
265}
266
253static int sh_dmae_alloc_chan_resources(struct dma_chan *chan) 267static int sh_dmae_alloc_chan_resources(struct dma_chan *chan)
254{ 268{
255 struct sh_dmae_chan *sh_chan = to_sh_chan(chan); 269 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
256 struct sh_desc *desc; 270 struct sh_desc *desc;
271 struct sh_dmae_slave *param = chan->private;
272
273 /*
274 * This relies on the guarantee from dmaengine that alloc_chan_resources
275 * never runs concurrently with itself or free_chan_resources.
276 */
277 if (param) {
278 struct sh_dmae_slave_config *cfg;
279
280 cfg = sh_dmae_find_slave(sh_chan, param->slave_id);
281 if (!cfg)
282 return -EINVAL;
283
284 if (test_and_set_bit(param->slave_id, sh_dmae_slave_used))
285 return -EBUSY;
286
287 param->config = cfg;
288
289 dmae_set_dmars(sh_chan, cfg->mid_rid);
290 dmae_set_chcr(sh_chan, cfg->chcr);
291 } else {
292 if ((sh_dmae_readl(sh_chan, CHCR) & 0x700) != 0x400)
293 dmae_set_chcr(sh_chan, RS_DEFAULT);
294 }
257 295
258 spin_lock_bh(&sh_chan->desc_lock); 296 spin_lock_bh(&sh_chan->desc_lock);
259 while (sh_chan->descs_allocated < NR_DESCS_PER_CHANNEL) { 297 while (sh_chan->descs_allocated < NR_DESCS_PER_CHANNEL) {
@@ -286,10 +324,18 @@ static void sh_dmae_free_chan_resources(struct dma_chan *chan)
286 struct sh_desc *desc, *_desc; 324 struct sh_desc *desc, *_desc;
287 LIST_HEAD(list); 325 LIST_HEAD(list);
288 326
327 dmae_halt(sh_chan);
328
289 /* Prepared and not submitted descriptors can still be on the queue */ 329 /* Prepared and not submitted descriptors can still be on the queue */
290 if (!list_empty(&sh_chan->ld_queue)) 330 if (!list_empty(&sh_chan->ld_queue))
291 sh_dmae_chan_ld_cleanup(sh_chan, true); 331 sh_dmae_chan_ld_cleanup(sh_chan, true);
292 332
333 if (chan->private) {
334 /* The caller is holding dma_list_mutex */
335 struct sh_dmae_slave *param = chan->private;
336 clear_bit(param->slave_id, sh_dmae_slave_used);
337 }
338
293 spin_lock_bh(&sh_chan->desc_lock); 339 spin_lock_bh(&sh_chan->desc_lock);
294 340
295 list_splice_init(&sh_chan->ld_free, &list); 341 list_splice_init(&sh_chan->ld_free, &list);
@@ -301,23 +347,97 @@ static void sh_dmae_free_chan_resources(struct dma_chan *chan)
301 kfree(desc); 347 kfree(desc);
302} 348}
303 349
304static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy( 350/**
305 struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src, 351 * sh_dmae_add_desc - get, set up and return one transfer descriptor
306 size_t len, unsigned long flags) 352 * @sh_chan: DMA channel
353 * @flags: DMA transfer flags
354 * @dest: destination DMA address, incremented when direction equals
355 * DMA_FROM_DEVICE or DMA_BIDIRECTIONAL
356 * @src: source DMA address, incremented when direction equals
357 * DMA_TO_DEVICE or DMA_BIDIRECTIONAL
358 * @len: DMA transfer length
359 * @first: if NULL, set to the current descriptor and cookie set to -EBUSY
360 * @direction: needed for slave DMA to decide which address to keep constant,
361 * equals DMA_BIDIRECTIONAL for MEMCPY
362 * Returns 0 or an error
363 * Locks: called with desc_lock held
364 */
365static struct sh_desc *sh_dmae_add_desc(struct sh_dmae_chan *sh_chan,
366 unsigned long flags, dma_addr_t *dest, dma_addr_t *src, size_t *len,
367 struct sh_desc **first, enum dma_data_direction direction)
307{ 368{
308 struct sh_dmae_chan *sh_chan; 369 struct sh_desc *new;
309 struct sh_desc *first = NULL, *prev = NULL, *new;
310 size_t copy_size; 370 size_t copy_size;
311 LIST_HEAD(tx_list);
312 int chunks = (len + SH_DMA_TCR_MAX) / (SH_DMA_TCR_MAX + 1);
313 371
314 if (!chan) 372 if (!*len)
315 return NULL; 373 return NULL;
316 374
317 if (!len) 375 /* Allocate the link descriptor from the free list */
376 new = sh_dmae_get_desc(sh_chan);
377 if (!new) {
378 dev_err(sh_chan->dev, "No free link descriptor available\n");
318 return NULL; 379 return NULL;
380 }
319 381
320 sh_chan = to_sh_chan(chan); 382 copy_size = min(*len, (size_t)SH_DMA_TCR_MAX + 1);
383
384 new->hw.sar = *src;
385 new->hw.dar = *dest;
386 new->hw.tcr = copy_size;
387
388 if (!*first) {
389 /* First desc */
390 new->async_tx.cookie = -EBUSY;
391 *first = new;
392 } else {
393 /* Other desc - invisible to the user */
394 new->async_tx.cookie = -EINVAL;
395 }
396
397 dev_dbg(sh_chan->dev,
398 "chaining (%u/%u)@%x -> %x with %p, cookie %d, shift %d\n",
399 copy_size, *len, *src, *dest, &new->async_tx,
400 new->async_tx.cookie, sh_chan->xmit_shift);
401
402 new->mark = DESC_PREPARED;
403 new->async_tx.flags = flags;
404 new->direction = direction;
405
406 *len -= copy_size;
407 if (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE)
408 *src += copy_size;
409 if (direction == DMA_BIDIRECTIONAL || direction == DMA_FROM_DEVICE)
410 *dest += copy_size;
411
412 return new;
413}
414
415/*
416 * sh_dmae_prep_sg - prepare transfer descriptors from an SG list
417 *
418 * Common routine for public (MEMCPY) and slave DMA. The MEMCPY case is also
419 * converted to scatter-gather to guarantee consistent locking and a correct
420 * list manipulation. For slave DMA direction carries the usual meaning, and,
421 * logically, the SG list is RAM and the addr variable contains slave address,
422 * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_BIDIRECTIONAL
423 * and the SG list contains only one element and points at the source buffer.
424 */
425static struct dma_async_tx_descriptor *sh_dmae_prep_sg(struct sh_dmae_chan *sh_chan,
426 struct scatterlist *sgl, unsigned int sg_len, dma_addr_t *addr,
427 enum dma_data_direction direction, unsigned long flags)
428{
429 struct scatterlist *sg;
430 struct sh_desc *first = NULL, *new = NULL /* compiler... */;
431 LIST_HEAD(tx_list);
432 int chunks = 0;
433 int i;
434
435 if (!sg_len)
436 return NULL;
437
438 for_each_sg(sgl, sg, sg_len, i)
439 chunks += (sg_dma_len(sg) + SH_DMA_TCR_MAX) /
440 (SH_DMA_TCR_MAX + 1);
321 441
322 /* Have to lock the whole loop to protect against concurrent release */ 442 /* Have to lock the whole loop to protect against concurrent release */
323 spin_lock_bh(&sh_chan->desc_lock); 443 spin_lock_bh(&sh_chan->desc_lock);
@@ -333,49 +453,32 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy(
333 * only during this function, then they are immediately spliced 453 * only during this function, then they are immediately spliced
334 * back onto the free list in form of a chain 454 * back onto the free list in form of a chain
335 */ 455 */
336 do { 456 for_each_sg(sgl, sg, sg_len, i) {
337 /* Allocate the link descriptor from the free list */ 457 dma_addr_t sg_addr = sg_dma_address(sg);
338 new = sh_dmae_get_desc(sh_chan); 458 size_t len = sg_dma_len(sg);
339 if (!new) { 459
340 dev_err(sh_chan->dev, 460 if (!len)
341 "No free memory for link descriptor\n"); 461 goto err_get_desc;
342 list_for_each_entry(new, &tx_list, node) 462
343 new->mark = DESC_IDLE; 463 do {
344 list_splice(&tx_list, &sh_chan->ld_free); 464 dev_dbg(sh_chan->dev, "Add SG #%d@%p[%d], dma %llx\n",
345 spin_unlock_bh(&sh_chan->desc_lock); 465 i, sg, len, (unsigned long long)sg_addr);
346 return NULL; 466
347 } 467 if (direction == DMA_FROM_DEVICE)
348 468 new = sh_dmae_add_desc(sh_chan, flags,
349 copy_size = min(len, (size_t)SH_DMA_TCR_MAX + 1); 469 &sg_addr, addr, &len, &first,
350 470 direction);
351 new->hw.sar = dma_src; 471 else
352 new->hw.dar = dma_dest; 472 new = sh_dmae_add_desc(sh_chan, flags,
353 new->hw.tcr = copy_size; 473 addr, &sg_addr, &len, &first,
354 if (!first) { 474 direction);
355 /* First desc */ 475 if (!new)
356 new->async_tx.cookie = -EBUSY; 476 goto err_get_desc;
357 first = new; 477
358 } else { 478 new->chunks = chunks--;
359 /* Other desc - invisible to the user */ 479 list_add_tail(&new->node, &tx_list);
360 new->async_tx.cookie = -EINVAL; 480 } while (len);
361 } 481 }
362
363 dev_dbg(sh_chan->dev,
364 "chaining %u of %u with %p, dst %x, cookie %d\n",
365 copy_size, len, &new->async_tx, dma_dest,
366 new->async_tx.cookie);
367
368 new->mark = DESC_PREPARED;
369 new->async_tx.flags = flags;
370 new->chunks = chunks--;
371
372 prev = new;
373 len -= copy_size;
374 dma_src += copy_size;
375 dma_dest += copy_size;
376 /* Insert the link descriptor to the LD ring */
377 list_add_tail(&new->node, &tx_list);
378 } while (len);
379 482
380 if (new != first) 483 if (new != first)
381 new->async_tx.cookie = -ENOSPC; 484 new->async_tx.cookie = -ENOSPC;
@@ -386,6 +489,77 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy(
386 spin_unlock_bh(&sh_chan->desc_lock); 489 spin_unlock_bh(&sh_chan->desc_lock);
387 490
388 return &first->async_tx; 491 return &first->async_tx;
492
493err_get_desc:
494 list_for_each_entry(new, &tx_list, node)
495 new->mark = DESC_IDLE;
496 list_splice(&tx_list, &sh_chan->ld_free);
497
498 spin_unlock_bh(&sh_chan->desc_lock);
499
500 return NULL;
501}
502
503static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy(
504 struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src,
505 size_t len, unsigned long flags)
506{
507 struct sh_dmae_chan *sh_chan;
508 struct scatterlist sg;
509
510 if (!chan || !len)
511 return NULL;
512
513 chan->private = NULL;
514
515 sh_chan = to_sh_chan(chan);
516
517 sg_init_table(&sg, 1);
518 sg_set_page(&sg, pfn_to_page(PFN_DOWN(dma_src)), len,
519 offset_in_page(dma_src));
520 sg_dma_address(&sg) = dma_src;
521 sg_dma_len(&sg) = len;
522
523 return sh_dmae_prep_sg(sh_chan, &sg, 1, &dma_dest, DMA_BIDIRECTIONAL,
524 flags);
525}
526
527static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg(
528 struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
529 enum dma_data_direction direction, unsigned long flags)
530{
531 struct sh_dmae_slave *param;
532 struct sh_dmae_chan *sh_chan;
533
534 if (!chan)
535 return NULL;
536
537 sh_chan = to_sh_chan(chan);
538 param = chan->private;
539
540 /* Someone calling slave DMA on a public channel? */
541 if (!param || !sg_len) {
542 dev_warn(sh_chan->dev, "%s: bad parameter: %p, %d, %d\n",
543 __func__, param, sg_len, param ? param->slave_id : -1);
544 return NULL;
545 }
546
547 /*
548 * if (param != NULL), this is a successfully requested slave channel,
549 * therefore param->config != NULL too.
550 */
551 return sh_dmae_prep_sg(sh_chan, sgl, sg_len, &param->config->addr,
552 direction, flags);
553}
554
555static void sh_dmae_terminate_all(struct dma_chan *chan)
556{
557 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
558
559 if (!chan)
560 return;
561
562 sh_dmae_chan_ld_cleanup(sh_chan, true);
389} 563}
390 564
391static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all) 565static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all)
@@ -419,7 +593,11 @@ static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all
419 cookie = tx->cookie; 593 cookie = tx->cookie;
420 594
421 if (desc->mark == DESC_COMPLETED && desc->chunks == 1) { 595 if (desc->mark == DESC_COMPLETED && desc->chunks == 1) {
422 BUG_ON(sh_chan->completed_cookie != desc->cookie - 1); 596 if (sh_chan->completed_cookie != desc->cookie - 1)
597 dev_dbg(sh_chan->dev,
598 "Completing cookie %d, expected %d\n",
599 desc->cookie,
600 sh_chan->completed_cookie + 1);
423 sh_chan->completed_cookie = desc->cookie; 601 sh_chan->completed_cookie = desc->cookie;
424 } 602 }
425 603
@@ -492,7 +670,7 @@ static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan)
492 return; 670 return;
493 } 671 }
494 672
495 /* Find the first un-transfer desciptor */ 673 /* Find the first not transferred desciptor */
496 list_for_each_entry(sd, &sh_chan->ld_queue, node) 674 list_for_each_entry(sd, &sh_chan->ld_queue, node)
497 if (sd->mark == DESC_SUBMITTED) { 675 if (sd->mark == DESC_SUBMITTED) {
498 /* Get the ld start address from ld_queue */ 676 /* Get the ld start address from ld_queue */
@@ -559,7 +737,7 @@ static irqreturn_t sh_dmae_err(int irq, void *data)
559 737
560 /* IRQ Multi */ 738 /* IRQ Multi */
561 if (shdev->pdata.mode & SHDMA_MIX_IRQ) { 739 if (shdev->pdata.mode & SHDMA_MIX_IRQ) {
562 int cnt = 0; 740 int __maybe_unused cnt = 0;
563 switch (irq) { 741 switch (irq) {
564#if defined(DMTE6_IRQ) && defined(DMAE1_IRQ) 742#if defined(DMTE6_IRQ) && defined(DMAE1_IRQ)
565 case DMTE6_IRQ: 743 case DMTE6_IRQ:
@@ -596,11 +774,14 @@ static void dmae_do_tasklet(unsigned long data)
596 struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data; 774 struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data;
597 struct sh_desc *desc; 775 struct sh_desc *desc;
598 u32 sar_buf = sh_dmae_readl(sh_chan, SAR); 776 u32 sar_buf = sh_dmae_readl(sh_chan, SAR);
777 u32 dar_buf = sh_dmae_readl(sh_chan, DAR);
599 778
600 spin_lock(&sh_chan->desc_lock); 779 spin_lock(&sh_chan->desc_lock);
601 list_for_each_entry(desc, &sh_chan->ld_queue, node) { 780 list_for_each_entry(desc, &sh_chan->ld_queue, node) {
602 if ((desc->hw.sar + desc->hw.tcr) == sar_buf && 781 if (desc->mark == DESC_SUBMITTED &&
603 desc->mark == DESC_SUBMITTED) { 782 ((desc->direction == DMA_FROM_DEVICE &&
783 (desc->hw.dar + desc->hw.tcr) == dar_buf) ||
784 (desc->hw.sar + desc->hw.tcr) == sar_buf)) {
604 dev_dbg(sh_chan->dev, "done #%d@%p dst %u\n", 785 dev_dbg(sh_chan->dev, "done #%d@%p dst %u\n",
605 desc->async_tx.cookie, &desc->async_tx, 786 desc->async_tx.cookie, &desc->async_tx,
606 desc->hw.dar); 787 desc->hw.dar);
@@ -673,7 +854,7 @@ static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id)
673 } 854 }
674 855
675 snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id), 856 snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id),
676 "sh-dmae%d", new_sh_chan->id); 857 "sh-dmae%d", new_sh_chan->id);
677 858
678 /* set up channel irq */ 859 /* set up channel irq */
679 err = request_irq(irq, &sh_dmae_interrupt, irqflags, 860 err = request_irq(irq, &sh_dmae_interrupt, irqflags,
@@ -684,11 +865,6 @@ static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id)
684 goto err_no_irq; 865 goto err_no_irq;
685 } 866 }
686 867
687 /* CHCR register control function */
688 new_sh_chan->set_chcr = dmae_set_chcr;
689 /* DMARS register control function */
690 new_sh_chan->set_dmars = dmae_set_dmars;
691
692 shdev->chan[id] = new_sh_chan; 868 shdev->chan[id] = new_sh_chan;
693 return 0; 869 return 0;
694 870
@@ -759,12 +935,19 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
759 INIT_LIST_HEAD(&shdev->common.channels); 935 INIT_LIST_HEAD(&shdev->common.channels);
760 936
761 dma_cap_set(DMA_MEMCPY, shdev->common.cap_mask); 937 dma_cap_set(DMA_MEMCPY, shdev->common.cap_mask);
938 dma_cap_set(DMA_SLAVE, shdev->common.cap_mask);
939
762 shdev->common.device_alloc_chan_resources 940 shdev->common.device_alloc_chan_resources
763 = sh_dmae_alloc_chan_resources; 941 = sh_dmae_alloc_chan_resources;
764 shdev->common.device_free_chan_resources = sh_dmae_free_chan_resources; 942 shdev->common.device_free_chan_resources = sh_dmae_free_chan_resources;
765 shdev->common.device_prep_dma_memcpy = sh_dmae_prep_memcpy; 943 shdev->common.device_prep_dma_memcpy = sh_dmae_prep_memcpy;
766 shdev->common.device_is_tx_complete = sh_dmae_is_complete; 944 shdev->common.device_is_tx_complete = sh_dmae_is_complete;
767 shdev->common.device_issue_pending = sh_dmae_memcpy_issue_pending; 945 shdev->common.device_issue_pending = sh_dmae_memcpy_issue_pending;
946
947 /* Compulsory for DMA_SLAVE fields */
948 shdev->common.device_prep_slave_sg = sh_dmae_prep_slave_sg;
949 shdev->common.device_terminate_all = sh_dmae_terminate_all;
950
768 shdev->common.dev = &pdev->dev; 951 shdev->common.dev = &pdev->dev;
769 /* Default transfer size of 32 bytes requires 32-byte alignment */ 952 /* Default transfer size of 32 bytes requires 32-byte alignment */
770 shdev->common.copy_align = 5; 953 shdev->common.copy_align = 5;
diff --git a/drivers/dma/shdma.h b/drivers/dma/shdma.h
index 108f1cffb6f5..7e227f3c87c4 100644
--- a/drivers/dma/shdma.h
+++ b/drivers/dma/shdma.h
@@ -29,6 +29,7 @@ struct sh_desc {
29 struct sh_dmae_regs hw; 29 struct sh_dmae_regs hw;
30 struct list_head node; 30 struct list_head node;
31 struct dma_async_tx_descriptor async_tx; 31 struct dma_async_tx_descriptor async_tx;
32 enum dma_data_direction direction;
32 dma_cookie_t cookie; 33 dma_cookie_t cookie;
33 int chunks; 34 int chunks;
34 int mark; 35 int mark;
@@ -45,13 +46,9 @@ struct sh_dmae_chan {
45 struct device *dev; /* Channel device */ 46 struct device *dev; /* Channel device */
46 struct tasklet_struct tasklet; /* Tasklet */ 47 struct tasklet_struct tasklet; /* Tasklet */
47 int descs_allocated; /* desc count */ 48 int descs_allocated; /* desc count */
49 int xmit_shift; /* log_2(bytes_per_xfer) */
48 int id; /* Raw id of this channel */ 50 int id; /* Raw id of this channel */
49 char dev_id[16]; /* unique name per DMAC of channel */ 51 char dev_id[16]; /* unique name per DMAC of channel */
50
51 /* Set chcr */
52 int (*set_chcr)(struct sh_dmae_chan *sh_chan, u32 regs);
53 /* Set DMA resource */
54 int (*set_dmars)(struct sh_dmae_chan *sh_chan, u16 res);
55}; 52};
56 53
57struct sh_dmae_device { 54struct sh_dmae_device {
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 677cd53f18c3..bb6465604235 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -457,10 +457,10 @@ config MTD_NAND_NOMADIK
457 457
458config MTD_NAND_SH_FLCTL 458config MTD_NAND_SH_FLCTL
459 tristate "Support for NAND on Renesas SuperH FLCTL" 459 tristate "Support for NAND on Renesas SuperH FLCTL"
460 depends on MTD_NAND && SUPERH && CPU_SUBTYPE_SH7723 460 depends on MTD_NAND && SUPERH
461 help 461 help
462 Several Renesas SuperH CPU has FLCTL. This option enables support 462 Several Renesas SuperH CPU has FLCTL. This option enables support
463 for NAND Flash using FLCTL. This driver support SH7723. 463 for NAND Flash using FLCTL.
464 464
465config MTD_NAND_DAVINCI 465config MTD_NAND_DAVINCI
466 tristate "Support NAND on DaVinci SoC" 466 tristate "Support NAND on DaVinci SoC"
diff --git a/drivers/mtd/nand/sh_flctl.c b/drivers/mtd/nand/sh_flctl.c
index 02bef21f2e4b..1842df8bdd93 100644
--- a/drivers/mtd/nand/sh_flctl.c
+++ b/drivers/mtd/nand/sh_flctl.c
@@ -1,10 +1,10 @@
1/* 1/*
2 * SuperH FLCTL nand controller 2 * SuperH FLCTL nand controller
3 * 3 *
4 * Copyright © 2008 Renesas Solutions Corp. 4 * Copyright (c) 2008 Renesas Solutions Corp.
5 * Copyright © 2008 Atom Create Engineering Co., Ltd. 5 * Copyright (c) 2008 Atom Create Engineering Co., Ltd.
6 * 6 *
7 * Based on fsl_elbc_nand.c, Copyright © 2006-2007 Freescale Semiconductor 7 * Based on fsl_elbc_nand.c, Copyright (c) 2006-2007 Freescale Semiconductor
8 * 8 *
9 * This program is free software; you can redistribute it and/or modify 9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by 10 * it under the terms of the GNU General Public License as published by
@@ -75,6 +75,11 @@ static void start_translation(struct sh_flctl *flctl)
75 writeb(TRSTRT, FLTRCR(flctl)); 75 writeb(TRSTRT, FLTRCR(flctl));
76} 76}
77 77
78static void timeout_error(struct sh_flctl *flctl, const char *str)
79{
80 dev_err(&flctl->pdev->dev, "Timeout occured in %s\n", str);
81}
82
78static void wait_completion(struct sh_flctl *flctl) 83static void wait_completion(struct sh_flctl *flctl)
79{ 84{
80 uint32_t timeout = LOOP_TIMEOUT_MAX; 85 uint32_t timeout = LOOP_TIMEOUT_MAX;
@@ -87,7 +92,7 @@ static void wait_completion(struct sh_flctl *flctl)
87 udelay(1); 92 udelay(1);
88 } 93 }
89 94
90 printk(KERN_ERR "wait_completion(): Timeout occured \n"); 95 timeout_error(flctl, __func__);
91 writeb(0x0, FLTRCR(flctl)); 96 writeb(0x0, FLTRCR(flctl));
92} 97}
93 98
@@ -100,6 +105,8 @@ static void set_addr(struct mtd_info *mtd, int column, int page_addr)
100 addr = page_addr; /* ERASE1 */ 105 addr = page_addr; /* ERASE1 */
101 } else if (page_addr != -1) { 106 } else if (page_addr != -1) {
102 /* SEQIN, READ0, etc.. */ 107 /* SEQIN, READ0, etc.. */
108 if (flctl->chip.options & NAND_BUSWIDTH_16)
109 column >>= 1;
103 if (flctl->page_size) { 110 if (flctl->page_size) {
104 addr = column & 0x0FFF; 111 addr = column & 0x0FFF;
105 addr |= (page_addr & 0xff) << 16; 112 addr |= (page_addr & 0xff) << 16;
@@ -132,7 +139,7 @@ static void wait_rfifo_ready(struct sh_flctl *flctl)
132 return; 139 return;
133 udelay(1); 140 udelay(1);
134 } 141 }
135 printk(KERN_ERR "wait_rfifo_ready(): Timeout occured \n"); 142 timeout_error(flctl, __func__);
136} 143}
137 144
138static void wait_wfifo_ready(struct sh_flctl *flctl) 145static void wait_wfifo_ready(struct sh_flctl *flctl)
@@ -146,7 +153,7 @@ static void wait_wfifo_ready(struct sh_flctl *flctl)
146 return; 153 return;
147 udelay(1); 154 udelay(1);
148 } 155 }
149 printk(KERN_ERR "wait_wfifo_ready(): Timeout occured \n"); 156 timeout_error(flctl, __func__);
150} 157}
151 158
152static int wait_recfifo_ready(struct sh_flctl *flctl, int sector_number) 159static int wait_recfifo_ready(struct sh_flctl *flctl, int sector_number)
@@ -198,7 +205,7 @@ static int wait_recfifo_ready(struct sh_flctl *flctl, int sector_number)
198 writel(0, FL4ECCCR(flctl)); 205 writel(0, FL4ECCCR(flctl));
199 } 206 }
200 207
201 printk(KERN_ERR "wait_recfifo_ready(): Timeout occured \n"); 208 timeout_error(flctl, __func__);
202 return 1; /* timeout */ 209 return 1; /* timeout */
203} 210}
204 211
@@ -214,7 +221,7 @@ static void wait_wecfifo_ready(struct sh_flctl *flctl)
214 return; 221 return;
215 udelay(1); 222 udelay(1);
216 } 223 }
217 printk(KERN_ERR "wait_wecfifo_ready(): Timeout occured \n"); 224 timeout_error(flctl, __func__);
218} 225}
219 226
220static void read_datareg(struct sh_flctl *flctl, int offset) 227static void read_datareg(struct sh_flctl *flctl, int offset)
@@ -275,7 +282,7 @@ static void write_fiforeg(struct sh_flctl *flctl, int rlen, int offset)
275static void set_cmd_regs(struct mtd_info *mtd, uint32_t cmd, uint32_t flcmcdr_val) 282static void set_cmd_regs(struct mtd_info *mtd, uint32_t cmd, uint32_t flcmcdr_val)
276{ 283{
277 struct sh_flctl *flctl = mtd_to_flctl(mtd); 284 struct sh_flctl *flctl = mtd_to_flctl(mtd);
278 uint32_t flcmncr_val = readl(FLCMNCR(flctl)); 285 uint32_t flcmncr_val = readl(FLCMNCR(flctl)) & ~SEL_16BIT;
279 uint32_t flcmdcr_val, addr_len_bytes = 0; 286 uint32_t flcmdcr_val, addr_len_bytes = 0;
280 287
281 /* Set SNAND bit if page size is 2048byte */ 288 /* Set SNAND bit if page size is 2048byte */
@@ -297,6 +304,8 @@ static void set_cmd_regs(struct mtd_info *mtd, uint32_t cmd, uint32_t flcmcdr_va
297 case NAND_CMD_READOOB: 304 case NAND_CMD_READOOB:
298 addr_len_bytes = flctl->rw_ADRCNT; 305 addr_len_bytes = flctl->rw_ADRCNT;
299 flcmdcr_val |= CDSRC_E; 306 flcmdcr_val |= CDSRC_E;
307 if (flctl->chip.options & NAND_BUSWIDTH_16)
308 flcmncr_val |= SEL_16BIT;
300 break; 309 break;
301 case NAND_CMD_SEQIN: 310 case NAND_CMD_SEQIN:
302 /* This case is that cmd is READ0 or READ1 or READ00 */ 311 /* This case is that cmd is READ0 or READ1 or READ00 */
@@ -305,6 +314,8 @@ static void set_cmd_regs(struct mtd_info *mtd, uint32_t cmd, uint32_t flcmcdr_va
305 case NAND_CMD_PAGEPROG: 314 case NAND_CMD_PAGEPROG:
306 addr_len_bytes = flctl->rw_ADRCNT; 315 addr_len_bytes = flctl->rw_ADRCNT;
307 flcmdcr_val |= DOCMD2_E | CDSRC_E | SELRW; 316 flcmdcr_val |= DOCMD2_E | CDSRC_E | SELRW;
317 if (flctl->chip.options & NAND_BUSWIDTH_16)
318 flcmncr_val |= SEL_16BIT;
308 break; 319 break;
309 case NAND_CMD_READID: 320 case NAND_CMD_READID:
310 flcmncr_val &= ~SNAND_E; 321 flcmncr_val &= ~SNAND_E;
@@ -523,6 +534,8 @@ static void flctl_cmdfunc(struct mtd_info *mtd, unsigned int command,
523 set_addr(mtd, 0, page_addr); 534 set_addr(mtd, 0, page_addr);
524 535
525 flctl->read_bytes = mtd->writesize + mtd->oobsize; 536 flctl->read_bytes = mtd->writesize + mtd->oobsize;
537 if (flctl->chip.options & NAND_BUSWIDTH_16)
538 column >>= 1;
526 flctl->index += column; 539 flctl->index += column;
527 goto read_normal_exit; 540 goto read_normal_exit;
528 541
@@ -686,6 +699,18 @@ static uint8_t flctl_read_byte(struct mtd_info *mtd)
686 return data; 699 return data;
687} 700}
688 701
702static uint16_t flctl_read_word(struct mtd_info *mtd)
703{
704 struct sh_flctl *flctl = mtd_to_flctl(mtd);
705 int index = flctl->index;
706 uint16_t data;
707 uint16_t *buf = (uint16_t *)&flctl->done_buff[index];
708
709 data = *buf;
710 flctl->index += 2;
711 return data;
712}
713
689static void flctl_read_buf(struct mtd_info *mtd, uint8_t *buf, int len) 714static void flctl_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
690{ 715{
691 int i; 716 int i;
@@ -769,38 +794,36 @@ static int flctl_chip_init_tail(struct mtd_info *mtd)
769 return 0; 794 return 0;
770} 795}
771 796
772static int __init flctl_probe(struct platform_device *pdev) 797static int __devinit flctl_probe(struct platform_device *pdev)
773{ 798{
774 struct resource *res; 799 struct resource *res;
775 struct sh_flctl *flctl; 800 struct sh_flctl *flctl;
776 struct mtd_info *flctl_mtd; 801 struct mtd_info *flctl_mtd;
777 struct nand_chip *nand; 802 struct nand_chip *nand;
778 struct sh_flctl_platform_data *pdata; 803 struct sh_flctl_platform_data *pdata;
779 int ret; 804 int ret = -ENXIO;
780 805
781 pdata = pdev->dev.platform_data; 806 pdata = pdev->dev.platform_data;
782 if (pdata == NULL) { 807 if (pdata == NULL) {
783 printk(KERN_ERR "sh_flctl platform_data not found.\n"); 808 dev_err(&pdev->dev, "no platform data defined\n");
784 return -ENODEV; 809 return -EINVAL;
785 } 810 }
786 811
787 flctl = kzalloc(sizeof(struct sh_flctl), GFP_KERNEL); 812 flctl = kzalloc(sizeof(struct sh_flctl), GFP_KERNEL);
788 if (!flctl) { 813 if (!flctl) {
789 printk(KERN_ERR "Unable to allocate NAND MTD dev structure.\n"); 814 dev_err(&pdev->dev, "failed to allocate driver data\n");
790 return -ENOMEM; 815 return -ENOMEM;
791 } 816 }
792 817
793 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 818 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
794 if (!res) { 819 if (!res) {
795 printk(KERN_ERR "%s: resource not found.\n", __func__); 820 dev_err(&pdev->dev, "failed to get I/O memory\n");
796 ret = -ENODEV;
797 goto err; 821 goto err;
798 } 822 }
799 823
800 flctl->reg = ioremap(res->start, res->end - res->start + 1); 824 flctl->reg = ioremap(res->start, resource_size(res));
801 if (flctl->reg == NULL) { 825 if (flctl->reg == NULL) {
802 printk(KERN_ERR "%s: ioremap error.\n", __func__); 826 dev_err(&pdev->dev, "failed to remap I/O memory\n");
803 ret = -ENOMEM;
804 goto err; 827 goto err;
805 } 828 }
806 829
@@ -808,6 +831,7 @@ static int __init flctl_probe(struct platform_device *pdev)
808 flctl_mtd = &flctl->mtd; 831 flctl_mtd = &flctl->mtd;
809 nand = &flctl->chip; 832 nand = &flctl->chip;
810 flctl_mtd->priv = nand; 833 flctl_mtd->priv = nand;
834 flctl->pdev = pdev;
811 flctl->hwecc = pdata->has_hwecc; 835 flctl->hwecc = pdata->has_hwecc;
812 836
813 flctl_register_init(flctl, pdata->flcmncr_val); 837 flctl_register_init(flctl, pdata->flcmncr_val);
@@ -825,6 +849,11 @@ static int __init flctl_probe(struct platform_device *pdev)
825 nand->select_chip = flctl_select_chip; 849 nand->select_chip = flctl_select_chip;
826 nand->cmdfunc = flctl_cmdfunc; 850 nand->cmdfunc = flctl_cmdfunc;
827 851
852 if (pdata->flcmncr_val & SEL_16BIT) {
853 nand->options |= NAND_BUSWIDTH_16;
854 nand->read_word = flctl_read_word;
855 }
856
828 ret = nand_scan_ident(flctl_mtd, 1); 857 ret = nand_scan_ident(flctl_mtd, 1);
829 if (ret) 858 if (ret)
830 goto err; 859 goto err;
@@ -846,7 +875,7 @@ err:
846 return ret; 875 return ret;
847} 876}
848 877
849static int __exit flctl_remove(struct platform_device *pdev) 878static int __devexit flctl_remove(struct platform_device *pdev)
850{ 879{
851 struct sh_flctl *flctl = platform_get_drvdata(pdev); 880 struct sh_flctl *flctl = platform_get_drvdata(pdev);
852 881
diff --git a/drivers/serial/sh-sci.h b/drivers/serial/sh-sci.h
index 0efcded59ae6..f7d2589926d2 100644
--- a/drivers/serial/sh-sci.h
+++ b/drivers/serial/sh-sci.h
@@ -518,34 +518,6 @@ static inline int sci_rxd_in(struct uart_port *port)
518{ 518{
519 if (port->mapbase == 0xfffffe80) 519 if (port->mapbase == 0xfffffe80)
520 return __raw_readb(SCPDR)&0x01 ? 1 : 0; /* SCI */ 520 return __raw_readb(SCPDR)&0x01 ? 1 : 0; /* SCI */
521 if (port->mapbase == 0xa4000150)
522 return __raw_readb(SCPDR)&0x10 ? 1 : 0; /* SCIF */
523 if (port->mapbase == 0xa4000140)
524 return __raw_readb(SCPDR)&0x04 ? 1 : 0; /* IRDA */
525 return 1;
526}
527#elif defined(CONFIG_CPU_SUBTYPE_SH7705)
528static inline int sci_rxd_in(struct uart_port *port)
529{
530 if (port->mapbase == SCIF0)
531 return __raw_readb(SCPDR)&0x04 ? 1 : 0; /* IRDA */
532 if (port->mapbase == SCIF2)
533 return __raw_readb(SCPDR)&0x10 ? 1 : 0; /* SCIF */
534 return 1;
535}
536#elif defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712)
537static inline int sci_rxd_in(struct uart_port *port)
538{
539 return sci_in(port,SCxSR)&0x0010 ? 1 : 0;
540}
541#elif defined(CONFIG_CPU_SUBTYPE_SH7720) || \
542 defined(CONFIG_CPU_SUBTYPE_SH7721)
543static inline int sci_rxd_in(struct uart_port *port)
544{
545 if (port->mapbase == 0xa4430000)
546 return sci_in(port, SCxSR) & 0x0003 ? 1 : 0;
547 else if (port->mapbase == 0xa4438000)
548 return sci_in(port, SCxSR) & 0x0003 ? 1 : 0;
549 return 1; 521 return 1;
550} 522}
551#elif defined(CONFIG_CPU_SUBTYPE_SH7750) || \ 523#elif defined(CONFIG_CPU_SUBTYPE_SH7750) || \
@@ -558,207 +530,17 @@ static inline int sci_rxd_in(struct uart_port *port)
558{ 530{
559 if (port->mapbase == 0xffe00000) 531 if (port->mapbase == 0xffe00000)
560 return __raw_readb(SCSPTR1)&0x01 ? 1 : 0; /* SCI */ 532 return __raw_readb(SCSPTR1)&0x01 ? 1 : 0; /* SCI */
561 if (port->mapbase == 0xffe80000)
562 return __raw_readw(SCSPTR2)&0x0001 ? 1 : 0; /* SCIF */
563 return 1;
564}
565#elif defined(CONFIG_CPU_SUBTYPE_SH4_202)
566static inline int sci_rxd_in(struct uart_port *port)
567{
568 if (port->mapbase == 0xffe80000)
569 return __raw_readw(SCSPTR2)&0x0001 ? 1 : 0; /* SCIF */
570 return 1; 533 return 1;
571} 534}
572#elif defined(CONFIG_CPU_SUBTYPE_SH7757)
573static inline int sci_rxd_in(struct uart_port *port)
574{
575 if (port->mapbase == 0xfe4b0000)
576 return __raw_readw(SCSPTR0) & 0x0001 ? 1 : 0;
577 if (port->mapbase == 0xfe4c0000)
578 return __raw_readw(SCSPTR1) & 0x0001 ? 1 : 0;
579 if (port->mapbase == 0xfe4d0000)
580 return __raw_readw(SCSPTR2) & 0x0001 ? 1 : 0;
581}
582#elif defined(CONFIG_CPU_SUBTYPE_SH7760)
583static inline int sci_rxd_in(struct uart_port *port)
584{
585 if (port->mapbase == 0xfe600000)
586 return __raw_readw(SCSPTR0) & 0x0001 ? 1 : 0; /* SCIF */
587 if (port->mapbase == 0xfe610000)
588 return __raw_readw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */
589 if (port->mapbase == 0xfe620000)
590 return __raw_readw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF */
591 return 1;
592}
593#elif defined(CONFIG_CPU_SUBTYPE_SH7343)
594static inline int sci_rxd_in(struct uart_port *port)
595{
596 if (port->mapbase == 0xffe00000)
597 return __raw_readw(SCSPTR0) & 0x0001 ? 1 : 0; /* SCIF */
598 if (port->mapbase == 0xffe10000)
599 return __raw_readw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */
600 if (port->mapbase == 0xffe20000)
601 return __raw_readw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF */
602 if (port->mapbase == 0xffe30000)
603 return __raw_readw(SCSPTR3) & 0x0001 ? 1 : 0; /* SCIF */
604 return 1;
605}
606#elif defined(CONFIG_CPU_SUBTYPE_SH7366)
607static inline int sci_rxd_in(struct uart_port *port)
608{
609 if (port->mapbase == 0xffe00000)
610 return __raw_readb(SCPDR0) & 0x0001 ? 1 : 0; /* SCIF0 */
611 return 1;
612}
613#elif defined(CONFIG_CPU_SUBTYPE_SH7722)
614static inline int sci_rxd_in(struct uart_port *port)
615{
616 if (port->mapbase == 0xffe00000)
617 return __raw_readb(PSDR) & 0x02 ? 1 : 0; /* SCIF0 */
618 if (port->mapbase == 0xffe10000)
619 return __raw_readb(PADR) & 0x40 ? 1 : 0; /* SCIF1 */
620 if (port->mapbase == 0xffe20000)
621 return __raw_readb(PWDR) & 0x04 ? 1 : 0; /* SCIF2 */
622
623 return 1;
624}
625#elif defined(CONFIG_CPU_SUBTYPE_SH7723)
626static inline int sci_rxd_in(struct uart_port *port)
627{
628 if (port->mapbase == 0xffe00000)
629 return __raw_readb(SCSPTR0) & 0x0008 ? 1 : 0; /* SCIF0 */
630 if (port->mapbase == 0xffe10000)
631 return __raw_readb(SCSPTR1) & 0x0020 ? 1 : 0; /* SCIF1 */
632 if (port->mapbase == 0xffe20000)
633 return __raw_readb(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF2 */
634 if (port->mapbase == 0xa4e30000)
635 return __raw_readb(SCSPTR3) & 0x0001 ? 1 : 0; /* SCIF3 */
636 if (port->mapbase == 0xa4e40000)
637 return __raw_readb(SCSPTR4) & 0x0001 ? 1 : 0; /* SCIF4 */
638 if (port->mapbase == 0xa4e50000)
639 return __raw_readb(SCSPTR5) & 0x0008 ? 1 : 0; /* SCIF5 */
640 return 1;
641}
642#elif defined(CONFIG_CPU_SUBTYPE_SH7724)
643# define SCFSR 0x0010
644# define SCASSR 0x0014
645static inline int sci_rxd_in(struct uart_port *port)
646{
647 if (port->type == PORT_SCIF)
648 return __raw_readw((port->mapbase + SCFSR)) & SCIF_BRK ? 1 : 0;
649 if (port->type == PORT_SCIFA)
650 return __raw_readw((port->mapbase + SCASSR)) & SCIF_BRK ? 1 : 0;
651 return 1;
652}
653#elif defined(CONFIG_CPU_SUBTYPE_SH5_101) || defined(CONFIG_CPU_SUBTYPE_SH5_103)
654static inline int sci_rxd_in(struct uart_port *port)
655{
656 return sci_in(port, SCSPTR)&0x0001 ? 1 : 0; /* SCIF */
657}
658#elif defined(__H8300H__) || defined(__H8300S__) 535#elif defined(__H8300H__) || defined(__H8300S__)
659static inline int sci_rxd_in(struct uart_port *port) 536static inline int sci_rxd_in(struct uart_port *port)
660{ 537{
661 int ch = (port->mapbase - SMR0) >> 3; 538 int ch = (port->mapbase - SMR0) >> 3;
662 return (H8300_SCI_DR(ch) & h8300_sci_pins[ch].rx) ? 1 : 0; 539 return (H8300_SCI_DR(ch) & h8300_sci_pins[ch].rx) ? 1 : 0;
663} 540}
664#elif defined(CONFIG_CPU_SUBTYPE_SH7763) 541#else /* default case for non-SCI processors */
665static inline int sci_rxd_in(struct uart_port *port)
666{
667 if (port->mapbase == 0xffe00000)
668 return __raw_readw(SCSPTR0) & 0x0001 ? 1 : 0; /* SCIF */
669 if (port->mapbase == 0xffe08000)
670 return __raw_readw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */
671 if (port->mapbase == 0xffe10000)
672 return __raw_readw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF/IRDA */
673
674 return 1;
675}
676#elif defined(CONFIG_CPU_SUBTYPE_SH7770)
677static inline int sci_rxd_in(struct uart_port *port)
678{
679 if (port->mapbase == 0xff923000)
680 return __raw_readw(SCSPTR0) & 0x0001 ? 1 : 0; /* SCIF */
681 if (port->mapbase == 0xff924000)
682 return __raw_readw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */
683 if (port->mapbase == 0xff925000)
684 return __raw_readw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF */
685 return 1;
686}
687#elif defined(CONFIG_CPU_SUBTYPE_SH7780)
688static inline int sci_rxd_in(struct uart_port *port)
689{
690 if (port->mapbase == 0xffe00000)
691 return __raw_readw(SCSPTR0) & 0x0001 ? 1 : 0; /* SCIF */
692 if (port->mapbase == 0xffe10000)
693 return __raw_readw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */
694 return 1;
695}
696#elif defined(CONFIG_CPU_SUBTYPE_SH7785) || \
697 defined(CONFIG_CPU_SUBTYPE_SH7786)
698static inline int sci_rxd_in(struct uart_port *port)
699{
700 if (port->mapbase == 0xffea0000)
701 return __raw_readw(SCSPTR0) & 0x0001 ? 1 : 0; /* SCIF */
702 if (port->mapbase == 0xffeb0000)
703 return __raw_readw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */
704 if (port->mapbase == 0xffec0000)
705 return __raw_readw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF */
706 if (port->mapbase == 0xffed0000)
707 return __raw_readw(SCSPTR3) & 0x0001 ? 1 : 0; /* SCIF */
708 if (port->mapbase == 0xffee0000)
709 return __raw_readw(SCSPTR4) & 0x0001 ? 1 : 0; /* SCIF */
710 if (port->mapbase == 0xffef0000)
711 return __raw_readw(SCSPTR5) & 0x0001 ? 1 : 0; /* SCIF */
712 return 1;
713}
714#elif defined(CONFIG_CPU_SUBTYPE_SH7201) || \
715 defined(CONFIG_CPU_SUBTYPE_SH7203) || \
716 defined(CONFIG_CPU_SUBTYPE_SH7206) || \
717 defined(CONFIG_CPU_SUBTYPE_SH7263)
718static inline int sci_rxd_in(struct uart_port *port)
719{
720 if (port->mapbase == 0xfffe8000)
721 return __raw_readw(SCSPTR0) & 0x0001 ? 1 : 0; /* SCIF */
722 if (port->mapbase == 0xfffe8800)
723 return __raw_readw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */
724 if (port->mapbase == 0xfffe9000)
725 return __raw_readw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF */
726 if (port->mapbase == 0xfffe9800)
727 return __raw_readw(SCSPTR3) & 0x0001 ? 1 : 0; /* SCIF */
728#if defined(CONFIG_CPU_SUBTYPE_SH7201)
729 if (port->mapbase == 0xfffeA000)
730 return __raw_readw(SCSPTR0) & 0x0001 ? 1 : 0; /* SCIF */
731 if (port->mapbase == 0xfffeA800)
732 return __raw_readw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */
733 if (port->mapbase == 0xfffeB000)
734 return __raw_readw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF */
735 if (port->mapbase == 0xfffeB800)
736 return __raw_readw(SCSPTR3) & 0x0001 ? 1 : 0; /* SCIF */
737#endif
738 return 1;
739}
740#elif defined(CONFIG_CPU_SUBTYPE_SH7619)
741static inline int sci_rxd_in(struct uart_port *port)
742{
743 if (port->mapbase == 0xf8400000)
744 return __raw_readw(SCSPTR0) & 0x0001 ? 1 : 0; /* SCIF */
745 if (port->mapbase == 0xf8410000)
746 return __raw_readw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */
747 if (port->mapbase == 0xf8420000)
748 return __raw_readw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF */
749 return 1;
750}
751#elif defined(CONFIG_CPU_SUBTYPE_SHX3)
752static inline int sci_rxd_in(struct uart_port *port) 542static inline int sci_rxd_in(struct uart_port *port)
753{ 543{
754 if (port->mapbase == 0xffc30000)
755 return __raw_readw(SCSPTR0) & 0x0001 ? 1 : 0; /* SCIF */
756 if (port->mapbase == 0xffc40000)
757 return __raw_readw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */
758 if (port->mapbase == 0xffc50000)
759 return __raw_readw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF */
760 if (port->mapbase == 0xffc60000)
761 return __raw_readw(SCSPTR3) & 0x0001 ? 1 : 0; /* SCIF */
762 return 1; 544 return 1;
763} 545}
764#endif 546#endif
diff --git a/drivers/sh/intc.c b/drivers/sh/intc.c
index d5d7f23c19a5..3a5a17db9474 100644
--- a/drivers/sh/intc.c
+++ b/drivers/sh/intc.c
@@ -259,6 +259,43 @@ static void intc_disable(unsigned int irq)
259 } 259 }
260} 260}
261 261
262static void (*intc_enable_noprio_fns[])(unsigned long addr,
263 unsigned long handle,
264 void (*fn)(unsigned long,
265 unsigned long,
266 unsigned long),
267 unsigned int irq) = {
268 [MODE_ENABLE_REG] = intc_mode_field,
269 [MODE_MASK_REG] = intc_mode_zero,
270 [MODE_DUAL_REG] = intc_mode_field,
271 [MODE_PRIO_REG] = intc_mode_field,
272 [MODE_PCLR_REG] = intc_mode_field,
273};
274
275static void intc_enable_disable(struct intc_desc_int *d,
276 unsigned long handle, int do_enable)
277{
278 unsigned long addr;
279 unsigned int cpu;
280 void (*fn)(unsigned long, unsigned long,
281 void (*)(unsigned long, unsigned long, unsigned long),
282 unsigned int);
283
284 if (do_enable) {
285 for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_E(handle)); cpu++) {
286 addr = INTC_REG(d, _INTC_ADDR_E(handle), cpu);
287 fn = intc_enable_noprio_fns[_INTC_MODE(handle)];
288 fn(addr, handle, intc_reg_fns[_INTC_FN(handle)], 0);
289 }
290 } else {
291 for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_D(handle)); cpu++) {
292 addr = INTC_REG(d, _INTC_ADDR_D(handle), cpu);
293 fn = intc_disable_fns[_INTC_MODE(handle)];
294 fn(addr, handle, intc_reg_fns[_INTC_FN(handle)], 0);
295 }
296 }
297}
298
262static int intc_set_wake(unsigned int irq, unsigned int on) 299static int intc_set_wake(unsigned int irq, unsigned int on)
263{ 300{
264 return 0; /* allow wakeup, but setup hardware in intc_suspend() */ 301 return 0; /* allow wakeup, but setup hardware in intc_suspend() */
@@ -400,11 +437,11 @@ static unsigned int __init intc_get_reg(struct intc_desc_int *d,
400static intc_enum __init intc_grp_id(struct intc_desc *desc, 437static intc_enum __init intc_grp_id(struct intc_desc *desc,
401 intc_enum enum_id) 438 intc_enum enum_id)
402{ 439{
403 struct intc_group *g = desc->groups; 440 struct intc_group *g = desc->hw.groups;
404 unsigned int i, j; 441 unsigned int i, j;
405 442
406 for (i = 0; g && enum_id && i < desc->nr_groups; i++) { 443 for (i = 0; g && enum_id && i < desc->hw.nr_groups; i++) {
407 g = desc->groups + i; 444 g = desc->hw.groups + i;
408 445
409 for (j = 0; g->enum_ids[j]; j++) { 446 for (j = 0; g->enum_ids[j]; j++) {
410 if (g->enum_ids[j] != enum_id) 447 if (g->enum_ids[j] != enum_id)
@@ -417,19 +454,21 @@ static intc_enum __init intc_grp_id(struct intc_desc *desc,
417 return 0; 454 return 0;
418} 455}
419 456
420static unsigned int __init intc_mask_data(struct intc_desc *desc, 457static unsigned int __init _intc_mask_data(struct intc_desc *desc,
421 struct intc_desc_int *d, 458 struct intc_desc_int *d,
422 intc_enum enum_id, int do_grps) 459 intc_enum enum_id,
460 unsigned int *reg_idx,
461 unsigned int *fld_idx)
423{ 462{
424 struct intc_mask_reg *mr = desc->mask_regs; 463 struct intc_mask_reg *mr = desc->hw.mask_regs;
425 unsigned int i, j, fn, mode; 464 unsigned int fn, mode;
426 unsigned long reg_e, reg_d; 465 unsigned long reg_e, reg_d;
427 466
428 for (i = 0; mr && enum_id && i < desc->nr_mask_regs; i++) { 467 while (mr && enum_id && *reg_idx < desc->hw.nr_mask_regs) {
429 mr = desc->mask_regs + i; 468 mr = desc->hw.mask_regs + *reg_idx;
430 469
431 for (j = 0; j < ARRAY_SIZE(mr->enum_ids); j++) { 470 for (; *fld_idx < ARRAY_SIZE(mr->enum_ids); (*fld_idx)++) {
432 if (mr->enum_ids[j] != enum_id) 471 if (mr->enum_ids[*fld_idx] != enum_id)
433 continue; 472 continue;
434 473
435 if (mr->set_reg && mr->clr_reg) { 474 if (mr->set_reg && mr->clr_reg) {
@@ -455,29 +494,49 @@ static unsigned int __init intc_mask_data(struct intc_desc *desc,
455 intc_get_reg(d, reg_e), 494 intc_get_reg(d, reg_e),
456 intc_get_reg(d, reg_d), 495 intc_get_reg(d, reg_d),
457 1, 496 1,
458 (mr->reg_width - 1) - j); 497 (mr->reg_width - 1) - *fld_idx);
459 } 498 }
499
500 *fld_idx = 0;
501 (*reg_idx)++;
460 } 502 }
461 503
504 return 0;
505}
506
507static unsigned int __init intc_mask_data(struct intc_desc *desc,
508 struct intc_desc_int *d,
509 intc_enum enum_id, int do_grps)
510{
511 unsigned int i = 0;
512 unsigned int j = 0;
513 unsigned int ret;
514
515 ret = _intc_mask_data(desc, d, enum_id, &i, &j);
516 if (ret)
517 return ret;
518
462 if (do_grps) 519 if (do_grps)
463 return intc_mask_data(desc, d, intc_grp_id(desc, enum_id), 0); 520 return intc_mask_data(desc, d, intc_grp_id(desc, enum_id), 0);
464 521
465 return 0; 522 return 0;
466} 523}
467 524
468static unsigned int __init intc_prio_data(struct intc_desc *desc, 525static unsigned int __init _intc_prio_data(struct intc_desc *desc,
469 struct intc_desc_int *d, 526 struct intc_desc_int *d,
470 intc_enum enum_id, int do_grps) 527 intc_enum enum_id,
528 unsigned int *reg_idx,
529 unsigned int *fld_idx)
471{ 530{
472 struct intc_prio_reg *pr = desc->prio_regs; 531 struct intc_prio_reg *pr = desc->hw.prio_regs;
473 unsigned int i, j, fn, mode, bit; 532 unsigned int fn, n, mode, bit;
474 unsigned long reg_e, reg_d; 533 unsigned long reg_e, reg_d;
475 534
476 for (i = 0; pr && enum_id && i < desc->nr_prio_regs; i++) { 535 while (pr && enum_id && *reg_idx < desc->hw.nr_prio_regs) {
477 pr = desc->prio_regs + i; 536 pr = desc->hw.prio_regs + *reg_idx;
478 537
479 for (j = 0; j < ARRAY_SIZE(pr->enum_ids); j++) { 538 for (; *fld_idx < ARRAY_SIZE(pr->enum_ids); (*fld_idx)++) {
480 if (pr->enum_ids[j] != enum_id) 539 if (pr->enum_ids[*fld_idx] != enum_id)
481 continue; 540 continue;
482 541
483 if (pr->set_reg && pr->clr_reg) { 542 if (pr->set_reg && pr->clr_reg) {
@@ -495,34 +554,79 @@ static unsigned int __init intc_prio_data(struct intc_desc *desc,
495 } 554 }
496 555
497 fn += (pr->reg_width >> 3) - 1; 556 fn += (pr->reg_width >> 3) - 1;
557 n = *fld_idx + 1;
498 558
499 BUG_ON((j + 1) * pr->field_width > pr->reg_width); 559 BUG_ON(n * pr->field_width > pr->reg_width);
500 560
501 bit = pr->reg_width - ((j + 1) * pr->field_width); 561 bit = pr->reg_width - (n * pr->field_width);
502 562
503 return _INTC_MK(fn, mode, 563 return _INTC_MK(fn, mode,
504 intc_get_reg(d, reg_e), 564 intc_get_reg(d, reg_e),
505 intc_get_reg(d, reg_d), 565 intc_get_reg(d, reg_d),
506 pr->field_width, bit); 566 pr->field_width, bit);
507 } 567 }
568
569 *fld_idx = 0;
570 (*reg_idx)++;
508 } 571 }
509 572
573 return 0;
574}
575
576static unsigned int __init intc_prio_data(struct intc_desc *desc,
577 struct intc_desc_int *d,
578 intc_enum enum_id, int do_grps)
579{
580 unsigned int i = 0;
581 unsigned int j = 0;
582 unsigned int ret;
583
584 ret = _intc_prio_data(desc, d, enum_id, &i, &j);
585 if (ret)
586 return ret;
587
510 if (do_grps) 588 if (do_grps)
511 return intc_prio_data(desc, d, intc_grp_id(desc, enum_id), 0); 589 return intc_prio_data(desc, d, intc_grp_id(desc, enum_id), 0);
512 590
513 return 0; 591 return 0;
514} 592}
515 593
594static void __init intc_enable_disable_enum(struct intc_desc *desc,
595 struct intc_desc_int *d,
596 intc_enum enum_id, int enable)
597{
598 unsigned int i, j, data;
599
600 /* go through and enable/disable all mask bits */
601 i = j = 0;
602 do {
603 data = _intc_mask_data(desc, d, enum_id, &i, &j);
604 if (data)
605 intc_enable_disable(d, data, enable);
606 j++;
607 } while (data);
608
609 /* go through and enable/disable all priority fields */
610 i = j = 0;
611 do {
612 data = _intc_prio_data(desc, d, enum_id, &i, &j);
613 if (data)
614 intc_enable_disable(d, data, enable);
615
616 j++;
617 } while (data);
618}
619
516static unsigned int __init intc_ack_data(struct intc_desc *desc, 620static unsigned int __init intc_ack_data(struct intc_desc *desc,
517 struct intc_desc_int *d, 621 struct intc_desc_int *d,
518 intc_enum enum_id) 622 intc_enum enum_id)
519{ 623{
520 struct intc_mask_reg *mr = desc->ack_regs; 624 struct intc_mask_reg *mr = desc->hw.ack_regs;
521 unsigned int i, j, fn, mode; 625 unsigned int i, j, fn, mode;
522 unsigned long reg_e, reg_d; 626 unsigned long reg_e, reg_d;
523 627
524 for (i = 0; mr && enum_id && i < desc->nr_ack_regs; i++) { 628 for (i = 0; mr && enum_id && i < desc->hw.nr_ack_regs; i++) {
525 mr = desc->ack_regs + i; 629 mr = desc->hw.ack_regs + i;
526 630
527 for (j = 0; j < ARRAY_SIZE(mr->enum_ids); j++) { 631 for (j = 0; j < ARRAY_SIZE(mr->enum_ids); j++) {
528 if (mr->enum_ids[j] != enum_id) 632 if (mr->enum_ids[j] != enum_id)
@@ -549,11 +653,11 @@ static unsigned int __init intc_sense_data(struct intc_desc *desc,
549 struct intc_desc_int *d, 653 struct intc_desc_int *d,
550 intc_enum enum_id) 654 intc_enum enum_id)
551{ 655{
552 struct intc_sense_reg *sr = desc->sense_regs; 656 struct intc_sense_reg *sr = desc->hw.sense_regs;
553 unsigned int i, j, fn, bit; 657 unsigned int i, j, fn, bit;
554 658
555 for (i = 0; sr && enum_id && i < desc->nr_sense_regs; i++) { 659 for (i = 0; sr && enum_id && i < desc->hw.nr_sense_regs; i++) {
556 sr = desc->sense_regs + i; 660 sr = desc->hw.sense_regs + i;
557 661
558 for (j = 0; j < ARRAY_SIZE(sr->enum_ids); j++) { 662 for (j = 0; j < ARRAY_SIZE(sr->enum_ids); j++) {
559 if (sr->enum_ids[j] != enum_id) 663 if (sr->enum_ids[j] != enum_id)
@@ -656,7 +760,7 @@ static void __init intc_register_irq(struct intc_desc *desc,
656 /* irq should be disabled by default */ 760 /* irq should be disabled by default */
657 d->chip.mask(irq); 761 d->chip.mask(irq);
658 762
659 if (desc->ack_regs) 763 if (desc->hw.ack_regs)
660 ack_handle[irq] = intc_ack_data(desc, d, enum_id); 764 ack_handle[irq] = intc_ack_data(desc, d, enum_id);
661} 765}
662 766
@@ -684,6 +788,7 @@ static void intc_redirect_irq(unsigned int irq, struct irq_desc *desc)
684void __init register_intc_controller(struct intc_desc *desc) 788void __init register_intc_controller(struct intc_desc *desc)
685{ 789{
686 unsigned int i, k, smp; 790 unsigned int i, k, smp;
791 struct intc_hw_desc *hw = &desc->hw;
687 struct intc_desc_int *d; 792 struct intc_desc_int *d;
688 793
689 d = kzalloc(sizeof(*d), GFP_NOWAIT); 794 d = kzalloc(sizeof(*d), GFP_NOWAIT);
@@ -691,10 +796,10 @@ void __init register_intc_controller(struct intc_desc *desc)
691 INIT_LIST_HEAD(&d->list); 796 INIT_LIST_HEAD(&d->list);
692 list_add(&d->list, &intc_list); 797 list_add(&d->list, &intc_list);
693 798
694 d->nr_reg = desc->mask_regs ? desc->nr_mask_regs * 2 : 0; 799 d->nr_reg = hw->mask_regs ? hw->nr_mask_regs * 2 : 0;
695 d->nr_reg += desc->prio_regs ? desc->nr_prio_regs * 2 : 0; 800 d->nr_reg += hw->prio_regs ? hw->nr_prio_regs * 2 : 0;
696 d->nr_reg += desc->sense_regs ? desc->nr_sense_regs : 0; 801 d->nr_reg += hw->sense_regs ? hw->nr_sense_regs : 0;
697 d->nr_reg += desc->ack_regs ? desc->nr_ack_regs : 0; 802 d->nr_reg += hw->ack_regs ? hw->nr_ack_regs : 0;
698 803
699 d->reg = kzalloc(d->nr_reg * sizeof(*d->reg), GFP_NOWAIT); 804 d->reg = kzalloc(d->nr_reg * sizeof(*d->reg), GFP_NOWAIT);
700#ifdef CONFIG_SMP 805#ifdef CONFIG_SMP
@@ -702,30 +807,31 @@ void __init register_intc_controller(struct intc_desc *desc)
702#endif 807#endif
703 k = 0; 808 k = 0;
704 809
705 if (desc->mask_regs) { 810 if (hw->mask_regs) {
706 for (i = 0; i < desc->nr_mask_regs; i++) { 811 for (i = 0; i < hw->nr_mask_regs; i++) {
707 smp = IS_SMP(desc->mask_regs[i]); 812 smp = IS_SMP(hw->mask_regs[i]);
708 k += save_reg(d, k, desc->mask_regs[i].set_reg, smp); 813 k += save_reg(d, k, hw->mask_regs[i].set_reg, smp);
709 k += save_reg(d, k, desc->mask_regs[i].clr_reg, smp); 814 k += save_reg(d, k, hw->mask_regs[i].clr_reg, smp);
710 } 815 }
711 } 816 }
712 817
713 if (desc->prio_regs) { 818 if (hw->prio_regs) {
714 d->prio = kzalloc(desc->nr_vectors * sizeof(*d->prio), GFP_NOWAIT); 819 d->prio = kzalloc(hw->nr_vectors * sizeof(*d->prio),
820 GFP_NOWAIT);
715 821
716 for (i = 0; i < desc->nr_prio_regs; i++) { 822 for (i = 0; i < hw->nr_prio_regs; i++) {
717 smp = IS_SMP(desc->prio_regs[i]); 823 smp = IS_SMP(hw->prio_regs[i]);
718 k += save_reg(d, k, desc->prio_regs[i].set_reg, smp); 824 k += save_reg(d, k, hw->prio_regs[i].set_reg, smp);
719 k += save_reg(d, k, desc->prio_regs[i].clr_reg, smp); 825 k += save_reg(d, k, hw->prio_regs[i].clr_reg, smp);
720 } 826 }
721 } 827 }
722 828
723 if (desc->sense_regs) { 829 if (hw->sense_regs) {
724 d->sense = kzalloc(desc->nr_vectors * sizeof(*d->sense), GFP_NOWAIT); 830 d->sense = kzalloc(hw->nr_vectors * sizeof(*d->sense),
831 GFP_NOWAIT);
725 832
726 for (i = 0; i < desc->nr_sense_regs; i++) { 833 for (i = 0; i < hw->nr_sense_regs; i++)
727 k += save_reg(d, k, desc->sense_regs[i].reg, 0); 834 k += save_reg(d, k, hw->sense_regs[i].reg, 0);
728 }
729 } 835 }
730 836
731 d->chip.name = desc->name; 837 d->chip.name = desc->name;
@@ -738,18 +844,26 @@ void __init register_intc_controller(struct intc_desc *desc)
738 d->chip.set_type = intc_set_sense; 844 d->chip.set_type = intc_set_sense;
739 d->chip.set_wake = intc_set_wake; 845 d->chip.set_wake = intc_set_wake;
740 846
741 if (desc->ack_regs) { 847 if (hw->ack_regs) {
742 for (i = 0; i < desc->nr_ack_regs; i++) 848 for (i = 0; i < hw->nr_ack_regs; i++)
743 k += save_reg(d, k, desc->ack_regs[i].set_reg, 0); 849 k += save_reg(d, k, hw->ack_regs[i].set_reg, 0);
744 850
745 d->chip.mask_ack = intc_mask_ack; 851 d->chip.mask_ack = intc_mask_ack;
746 } 852 }
747 853
854 /* disable bits matching force_disable before registering irqs */
855 if (desc->force_disable)
856 intc_enable_disable_enum(desc, d, desc->force_disable, 0);
857
858 /* disable bits matching force_enable before registering irqs */
859 if (desc->force_enable)
860 intc_enable_disable_enum(desc, d, desc->force_enable, 0);
861
748 BUG_ON(k > 256); /* _INTC_ADDR_E() and _INTC_ADDR_D() are 8 bits */ 862 BUG_ON(k > 256); /* _INTC_ADDR_E() and _INTC_ADDR_D() are 8 bits */
749 863
750 /* register the vectors one by one */ 864 /* register the vectors one by one */
751 for (i = 0; i < desc->nr_vectors; i++) { 865 for (i = 0; i < hw->nr_vectors; i++) {
752 struct intc_vect *vect = desc->vectors + i; 866 struct intc_vect *vect = hw->vectors + i;
753 unsigned int irq = evt2irq(vect->vect); 867 unsigned int irq = evt2irq(vect->vect);
754 struct irq_desc *irq_desc; 868 struct irq_desc *irq_desc;
755 869
@@ -764,8 +878,8 @@ void __init register_intc_controller(struct intc_desc *desc)
764 878
765 intc_register_irq(desc, d, vect->enum_id, irq); 879 intc_register_irq(desc, d, vect->enum_id, irq);
766 880
767 for (k = i + 1; k < desc->nr_vectors; k++) { 881 for (k = i + 1; k < hw->nr_vectors; k++) {
768 struct intc_vect *vect2 = desc->vectors + k; 882 struct intc_vect *vect2 = hw->vectors + k;
769 unsigned int irq2 = evt2irq(vect2->vect); 883 unsigned int irq2 = evt2irq(vect2->vect);
770 884
771 if (vect->enum_id != vect2->enum_id) 885 if (vect->enum_id != vect2->enum_id)
@@ -785,11 +899,15 @@ void __init register_intc_controller(struct intc_desc *desc)
785 vect2->enum_id = 0; 899 vect2->enum_id = 0;
786 900
787 /* redirect this interrupts to the first one */ 901 /* redirect this interrupts to the first one */
788 set_irq_chip_and_handler_name(irq2, &d->chip, 902 set_irq_chip(irq2, &dummy_irq_chip);
789 intc_redirect_irq, "redirect"); 903 set_irq_chained_handler(irq2, intc_redirect_irq);
790 set_irq_data(irq2, (void *)irq); 904 set_irq_data(irq2, (void *)irq);
791 } 905 }
792 } 906 }
907
908 /* enable bits matching force_enable after registering irqs */
909 if (desc->force_enable)
910 intc_enable_disable_enum(desc, d, desc->force_enable, 1);
793} 911}
794 912
795static int intc_suspend(struct sys_device *dev, pm_message_t state) 913static int intc_suspend(struct sys_device *dev, pm_message_t state)
@@ -872,7 +990,7 @@ device_initcall(register_intc_sysdevs);
872/* 990/*
873 * Dynamic IRQ allocation and deallocation 991 * Dynamic IRQ allocation and deallocation
874 */ 992 */
875static unsigned int create_irq_on_node(unsigned int irq_want, int node) 993unsigned int create_irq_nr(unsigned int irq_want, int node)
876{ 994{
877 unsigned int irq = 0, new; 995 unsigned int irq = 0, new;
878 unsigned long flags; 996 unsigned long flags;
@@ -881,24 +999,28 @@ static unsigned int create_irq_on_node(unsigned int irq_want, int node)
881 spin_lock_irqsave(&vector_lock, flags); 999 spin_lock_irqsave(&vector_lock, flags);
882 1000
883 /* 1001 /*
884 * First try the wanted IRQ, then scan. 1002 * First try the wanted IRQ
885 */ 1003 */
886 if (test_and_set_bit(irq_want, intc_irq_map)) { 1004 if (test_and_set_bit(irq_want, intc_irq_map) == 0) {
1005 new = irq_want;
1006 } else {
1007 /* .. then fall back to scanning. */
887 new = find_first_zero_bit(intc_irq_map, nr_irqs); 1008 new = find_first_zero_bit(intc_irq_map, nr_irqs);
888 if (unlikely(new == nr_irqs)) 1009 if (unlikely(new == nr_irqs))
889 goto out_unlock; 1010 goto out_unlock;
890 1011
891 desc = irq_to_desc_alloc_node(new, node);
892 if (unlikely(!desc)) {
893 pr_info("can't get irq_desc for %d\n", new);
894 goto out_unlock;
895 }
896
897 desc = move_irq_desc(desc, node);
898 __set_bit(new, intc_irq_map); 1012 __set_bit(new, intc_irq_map);
899 irq = new;
900 } 1013 }
901 1014
1015 desc = irq_to_desc_alloc_node(new, node);
1016 if (unlikely(!desc)) {
1017 pr_info("can't get irq_desc for %d\n", new);
1018 goto out_unlock;
1019 }
1020
1021 desc = move_irq_desc(desc, node);
1022 irq = new;
1023
902out_unlock: 1024out_unlock:
903 spin_unlock_irqrestore(&vector_lock, flags); 1025 spin_unlock_irqrestore(&vector_lock, flags);
904 1026
@@ -913,7 +1035,7 @@ int create_irq(void)
913 int nid = cpu_to_node(smp_processor_id()); 1035 int nid = cpu_to_node(smp_processor_id());
914 int irq; 1036 int irq;
915 1037
916 irq = create_irq_on_node(NR_IRQS_LEGACY, nid); 1038 irq = create_irq_nr(NR_IRQS_LEGACY, nid);
917 if (irq == 0) 1039 if (irq == 0)
918 irq = -1; 1040 irq = -1;
919 1041
diff --git a/drivers/sh/pfc.c b/drivers/sh/pfc.c
index 082604edc4c2..cf0303acab8e 100644
--- a/drivers/sh/pfc.c
+++ b/drivers/sh/pfc.c
@@ -337,12 +337,39 @@ static int pinmux_config_gpio(struct pinmux_info *gpioc, unsigned gpio,
337 if (!enum_id) 337 if (!enum_id)
338 break; 338 break;
339 339
340 /* first check if this is a function enum */
340 in_range = enum_in_range(enum_id, &gpioc->function); 341 in_range = enum_in_range(enum_id, &gpioc->function);
341 if (!in_range && range) { 342 if (!in_range) {
342 in_range = enum_in_range(enum_id, range); 343 /* not a function enum */
343 344 if (range) {
344 if (in_range && enum_id == range->force) 345 /*
345 continue; 346 * other range exists, so this pin is
347 * a regular GPIO pin that now is being
348 * bound to a specific direction.
349 *
350 * for this case we only allow function enums
351 * and the enums that match the other range.
352 */
353 in_range = enum_in_range(enum_id, range);
354
355 /*
356 * special case pass through for fixed
357 * input-only or output-only pins without
358 * function enum register association.
359 */
360 if (in_range && enum_id == range->force)
361 continue;
362 } else {
363 /*
364 * no other range exists, so this pin
365 * must then be of the function type.
366 *
367 * allow function type pins to select
368 * any combination of function/in/out
369 * in their MARK lists.
370 */
371 in_range = 1;
372 }
346 } 373 }
347 374
348 if (!in_range) 375 if (!in_range)
diff --git a/drivers/video/pvr2fb.c b/drivers/video/pvr2fb.c
index 53f8f1100e81..f9975100d56d 100644
--- a/drivers/video/pvr2fb.c
+++ b/drivers/video/pvr2fb.c
@@ -831,7 +831,7 @@ static int __devinit pvr2fb_common_init(void)
831 printk(KERN_NOTICE "fb%d: registering with SQ API\n", fb_info->node); 831 printk(KERN_NOTICE "fb%d: registering with SQ API\n", fb_info->node);
832 832
833 pvr2fb_map = sq_remap(fb_info->fix.smem_start, fb_info->fix.smem_len, 833 pvr2fb_map = sq_remap(fb_info->fix.smem_start, fb_info->fix.smem_len,
834 fb_info->fix.id, pgprot_val(PAGE_SHARED)); 834 fb_info->fix.id, PAGE_SHARED);
835 835
836 printk(KERN_NOTICE "fb%d: Mapped video memory to SQ addr 0x%lx\n", 836 printk(KERN_NOTICE "fb%d: Mapped video memory to SQ addr 0x%lx\n",
837 fb_info->node, pvr2fb_map); 837 fb_info->node, pvr2fb_map);
diff --git a/drivers/video/sh_mobile_lcdcfb.c b/drivers/video/sh_mobile_lcdcfb.c
index a69830d26f7f..8d7653e56df5 100644
--- a/drivers/video/sh_mobile_lcdcfb.c
+++ b/drivers/video/sh_mobile_lcdcfb.c
@@ -19,6 +19,7 @@
19#include <linux/dma-mapping.h> 19#include <linux/dma-mapping.h>
20#include <linux/interrupt.h> 20#include <linux/interrupt.h>
21#include <linux/vmalloc.h> 21#include <linux/vmalloc.h>
22#include <linux/ioctl.h>
22#include <video/sh_mobile_lcdc.h> 23#include <video/sh_mobile_lcdc.h>
23#include <asm/atomic.h> 24#include <asm/atomic.h>
24 25
@@ -106,6 +107,7 @@ static unsigned long lcdc_offs_sublcd[NR_CH_REGS] = {
106#define LDRCNTR_SRC 0x00010000 107#define LDRCNTR_SRC 0x00010000
107#define LDRCNTR_MRS 0x00000002 108#define LDRCNTR_MRS 0x00000002
108#define LDRCNTR_MRC 0x00000001 109#define LDRCNTR_MRC 0x00000001
110#define LDSR_MRS 0x00000100
109 111
110struct sh_mobile_lcdc_priv; 112struct sh_mobile_lcdc_priv;
111struct sh_mobile_lcdc_chan { 113struct sh_mobile_lcdc_chan {
@@ -122,8 +124,8 @@ struct sh_mobile_lcdc_chan {
122 struct scatterlist *sglist; 124 struct scatterlist *sglist;
123 unsigned long frame_end; 125 unsigned long frame_end;
124 unsigned long pan_offset; 126 unsigned long pan_offset;
125 unsigned long new_pan_offset;
126 wait_queue_head_t frame_end_wait; 127 wait_queue_head_t frame_end_wait;
128 struct completion vsync_completion;
127}; 129};
128 130
129struct sh_mobile_lcdc_priv { 131struct sh_mobile_lcdc_priv {
@@ -366,19 +368,8 @@ static irqreturn_t sh_mobile_lcdc_irq(int irq, void *data)
366 } 368 }
367 369
368 /* VSYNC End */ 370 /* VSYNC End */
369 if (ldintr & LDINTR_VES) { 371 if (ldintr & LDINTR_VES)
370 unsigned long ldrcntr = lcdc_read(priv, _LDRCNTR); 372 complete(&ch->vsync_completion);
371 /* Set the source address for the next refresh */
372 lcdc_write_chan_mirror(ch, LDSA1R, ch->dma_handle +
373 ch->new_pan_offset);
374 if (lcdc_chan_is_sublcd(ch))
375 lcdc_write(ch->lcdc, _LDRCNTR,
376 ldrcntr ^ LDRCNTR_SRS);
377 else
378 lcdc_write(ch->lcdc, _LDRCNTR,
379 ldrcntr ^ LDRCNTR_MRS);
380 ch->pan_offset = ch->new_pan_offset;
381 }
382 } 373 }
383 374
384 return IRQ_HANDLED; 375 return IRQ_HANDLED;
@@ -767,25 +758,69 @@ static int sh_mobile_fb_pan_display(struct fb_var_screeninfo *var,
767 struct fb_info *info) 758 struct fb_info *info)
768{ 759{
769 struct sh_mobile_lcdc_chan *ch = info->par; 760 struct sh_mobile_lcdc_chan *ch = info->par;
761 struct sh_mobile_lcdc_priv *priv = ch->lcdc;
762 unsigned long ldrcntr;
763 unsigned long new_pan_offset;
764
765 new_pan_offset = (var->yoffset * info->fix.line_length) +
766 (var->xoffset * (info->var.bits_per_pixel / 8));
770 767
771 if (info->var.xoffset == var->xoffset && 768 if (new_pan_offset == ch->pan_offset)
772 info->var.yoffset == var->yoffset)
773 return 0; /* No change, do nothing */ 769 return 0; /* No change, do nothing */
774 770
775 ch->new_pan_offset = (var->yoffset * info->fix.line_length) + 771 ldrcntr = lcdc_read(priv, _LDRCNTR);
776 (var->xoffset * (info->var.bits_per_pixel / 8));
777 772
778 if (ch->new_pan_offset != ch->pan_offset) { 773 /* Set the source address for the next refresh */
779 unsigned long ldintr; 774 lcdc_write_chan_mirror(ch, LDSA1R, ch->dma_handle + new_pan_offset);
780 ldintr = lcdc_read(ch->lcdc, _LDINTR); 775 if (lcdc_chan_is_sublcd(ch))
781 ldintr |= LDINTR_VEE; 776 lcdc_write(ch->lcdc, _LDRCNTR, ldrcntr ^ LDRCNTR_SRS);
782 lcdc_write(ch->lcdc, _LDINTR, ldintr); 777 else
783 sh_mobile_lcdc_deferred_io_touch(info); 778 lcdc_write(ch->lcdc, _LDRCNTR, ldrcntr ^ LDRCNTR_MRS);
784 } 779
780 ch->pan_offset = new_pan_offset;
781
782 sh_mobile_lcdc_deferred_io_touch(info);
783
784 return 0;
785}
786
787static int sh_mobile_wait_for_vsync(struct fb_info *info)
788{
789 struct sh_mobile_lcdc_chan *ch = info->par;
790 unsigned long ldintr;
791 int ret;
792
793 /* Enable VSync End interrupt */
794 ldintr = lcdc_read(ch->lcdc, _LDINTR);
795 ldintr |= LDINTR_VEE;
796 lcdc_write(ch->lcdc, _LDINTR, ldintr);
797
798 ret = wait_for_completion_interruptible_timeout(&ch->vsync_completion,
799 msecs_to_jiffies(100));
800 if (!ret)
801 return -ETIMEDOUT;
785 802
786 return 0; 803 return 0;
787} 804}
788 805
806static int sh_mobile_ioctl(struct fb_info *info, unsigned int cmd,
807 unsigned long arg)
808{
809 int retval;
810
811 switch (cmd) {
812 case FBIO_WAITFORVSYNC:
813 retval = sh_mobile_wait_for_vsync(info);
814 break;
815
816 default:
817 retval = -ENOIOCTLCMD;
818 break;
819 }
820 return retval;
821}
822
823
789static struct fb_ops sh_mobile_lcdc_ops = { 824static struct fb_ops sh_mobile_lcdc_ops = {
790 .owner = THIS_MODULE, 825 .owner = THIS_MODULE,
791 .fb_setcolreg = sh_mobile_lcdc_setcolreg, 826 .fb_setcolreg = sh_mobile_lcdc_setcolreg,
@@ -795,6 +830,7 @@ static struct fb_ops sh_mobile_lcdc_ops = {
795 .fb_copyarea = sh_mobile_lcdc_copyarea, 830 .fb_copyarea = sh_mobile_lcdc_copyarea,
796 .fb_imageblit = sh_mobile_lcdc_imageblit, 831 .fb_imageblit = sh_mobile_lcdc_imageblit,
797 .fb_pan_display = sh_mobile_fb_pan_display, 832 .fb_pan_display = sh_mobile_fb_pan_display,
833 .fb_ioctl = sh_mobile_ioctl,
798}; 834};
799 835
800static int sh_mobile_lcdc_set_bpp(struct fb_var_screeninfo *var, int bpp) 836static int sh_mobile_lcdc_set_bpp(struct fb_var_screeninfo *var, int bpp)
@@ -962,8 +998,8 @@ static int __init sh_mobile_lcdc_probe(struct platform_device *pdev)
962 goto err1; 998 goto err1;
963 } 999 }
964 init_waitqueue_head(&priv->ch[i].frame_end_wait); 1000 init_waitqueue_head(&priv->ch[i].frame_end_wait);
1001 init_completion(&priv->ch[i].vsync_completion);
965 priv->ch[j].pan_offset = 0; 1002 priv->ch[j].pan_offset = 0;
966 priv->ch[j].new_pan_offset = 0;
967 1003
968 switch (pdata->ch[i].chan) { 1004 switch (pdata->ch[i].chan) {
969 case LCDC_CHAN_MAINLCD: 1005 case LCDC_CHAN_MAINLCD: