diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-03-26 14:11:23 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-03-26 14:11:23 -0400 |
| commit | 928a726b0e12184729900c076e13dbf1c511c96c (patch) | |
| tree | f31a7f23c1b511ebb486598cc746786e1821d48c /drivers | |
| parent | 8ff64b539bfd998792614481ccb67139b97075ef (diff) | |
| parent | eaeed5d31d8ded02fa0a4b608f57418cc0e65b07 (diff) | |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/lethal/sh-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/lethal/sh-2.6: (96 commits)
sh: add support for SMSC Polaris platform
sh: fix the HD64461 level-triggered interrupts handling
sh: sh-rtc wakeup support
sh: sh-rtc invalid time rework
sh: sh-rtc carry interrupt rework
sh: disallow kexec virtual entry
sh: kexec jump: fix for ftrace.
sh: kexec: Drop SR.BL bit toggling.
sh: add kexec jump support
sh: rework kexec segment code
sh: simplify kexec vbr code
sh: Flush only the needed range when unmapping a VMA.
sh: Update debugfs ASID dumping for 16-bit ASID support.
sh: tlb-pteaex: Kill off legacy PTEA updates.
sh: Support for extended ASIDs on PTEAEX-capable SH-X3 cores.
sh: sh7763rdp: Change IRQ number for sh_eth of sh7763rdp
sh: espt-giga board support
sh: dma: Make G2 DMA configurable.
sh: dma: Make PVR2 DMA configurable.
sh: Move IRQ multi definition of DMAC to defconfig
...
Diffstat (limited to 'drivers')
| -rw-r--r-- | drivers/clocksource/Makefile | 1 | ||||
| -rw-r--r-- | drivers/clocksource/sh_cmt.c | 615 | ||||
| -rw-r--r-- | drivers/input/joystick/maplecontrol.c | 4 | ||||
| -rw-r--r-- | drivers/input/keyboard/maple_keyb.c | 37 | ||||
| -rw-r--r-- | drivers/input/keyboard/sh_keysc.c | 26 | ||||
| -rw-r--r-- | drivers/mtd/maps/Kconfig | 12 | ||||
| -rw-r--r-- | drivers/mtd/maps/Makefile | 1 | ||||
| -rw-r--r-- | drivers/mtd/maps/vmu-flash.c | 832 | ||||
| -rw-r--r-- | drivers/rtc/rtc-sh.c | 246 | ||||
| -rw-r--r-- | drivers/serial/sh-sci.c | 12 | ||||
| -rw-r--r-- | drivers/serial/sh-sci.h | 15 | ||||
| -rw-r--r-- | drivers/sh/intc.c | 47 | ||||
| -rw-r--r-- | drivers/sh/maple/maple.c | 472 | ||||
| -rw-r--r-- | drivers/sh/superhyway/superhyway.c | 4 | ||||
| -rw-r--r-- | drivers/usb/Kconfig | 1 | ||||
| -rw-r--r-- | drivers/usb/host/ohci-hcd.c | 3 | ||||
| -rw-r--r-- | drivers/video/pvr2fb.c | 16 | ||||
| -rw-r--r-- | drivers/video/sh_mobile_lcdcfb.c | 66 |
18 files changed, 2043 insertions, 367 deletions
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile index 1525882190fd..1efb2879a94f 100644 --- a/drivers/clocksource/Makefile +++ b/drivers/clocksource/Makefile | |||
| @@ -2,3 +2,4 @@ obj-$(CONFIG_ATMEL_TCB_CLKSRC) += tcb_clksrc.o | |||
| 2 | obj-$(CONFIG_X86_CYCLONE_TIMER) += cyclone.o | 2 | obj-$(CONFIG_X86_CYCLONE_TIMER) += cyclone.o |
| 3 | obj-$(CONFIG_X86_PM_TIMER) += acpi_pm.o | 3 | obj-$(CONFIG_X86_PM_TIMER) += acpi_pm.o |
| 4 | obj-$(CONFIG_SCx200HR_TIMER) += scx200_hrt.o | 4 | obj-$(CONFIG_SCx200HR_TIMER) += scx200_hrt.o |
| 5 | obj-$(CONFIG_SH_TIMER_CMT) += sh_cmt.o | ||
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c new file mode 100644 index 000000000000..7783b42f6914 --- /dev/null +++ b/drivers/clocksource/sh_cmt.c | |||
| @@ -0,0 +1,615 @@ | |||
| 1 | /* | ||
| 2 | * SuperH Timer Support - CMT | ||
| 3 | * | ||
| 4 | * Copyright (C) 2008 Magnus Damm | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify | ||
| 7 | * it under the terms of the GNU General Public License as published by | ||
| 8 | * the Free Software Foundation; either version 2 of the License | ||
| 9 | * | ||
| 10 | * This program is distributed in the hope that it will be useful, | ||
| 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 13 | * GNU General Public License for more details. | ||
| 14 | * | ||
| 15 | * You should have received a copy of the GNU General Public License | ||
| 16 | * along with this program; if not, write to the Free Software | ||
| 17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
| 18 | */ | ||
| 19 | |||
| 20 | #include <linux/init.h> | ||
| 21 | #include <linux/bootmem.h> | ||
| 22 | #include <linux/platform_device.h> | ||
| 23 | #include <linux/spinlock.h> | ||
| 24 | #include <linux/interrupt.h> | ||
| 25 | #include <linux/ioport.h> | ||
| 26 | #include <linux/io.h> | ||
| 27 | #include <linux/clk.h> | ||
| 28 | #include <linux/irq.h> | ||
| 29 | #include <linux/err.h> | ||
| 30 | #include <linux/clocksource.h> | ||
| 31 | #include <linux/clockchips.h> | ||
| 32 | #include <linux/sh_cmt.h> | ||
| 33 | |||
| 34 | struct sh_cmt_priv { | ||
| 35 | void __iomem *mapbase; | ||
| 36 | struct clk *clk; | ||
| 37 | unsigned long width; /* 16 or 32 bit version of hardware block */ | ||
| 38 | unsigned long overflow_bit; | ||
| 39 | unsigned long clear_bits; | ||
| 40 | struct irqaction irqaction; | ||
| 41 | struct platform_device *pdev; | ||
| 42 | |||
| 43 | unsigned long flags; | ||
| 44 | unsigned long match_value; | ||
| 45 | unsigned long next_match_value; | ||
| 46 | unsigned long max_match_value; | ||
| 47 | unsigned long rate; | ||
| 48 | spinlock_t lock; | ||
| 49 | struct clock_event_device ced; | ||
| 50 | unsigned long total_cycles; | ||
| 51 | }; | ||
| 52 | |||
| 53 | static DEFINE_SPINLOCK(sh_cmt_lock); | ||
| 54 | |||
| 55 | #define CMSTR -1 /* shared register */ | ||
| 56 | #define CMCSR 0 /* channel register */ | ||
| 57 | #define CMCNT 1 /* channel register */ | ||
| 58 | #define CMCOR 2 /* channel register */ | ||
| 59 | |||
| 60 | static inline unsigned long sh_cmt_read(struct sh_cmt_priv *p, int reg_nr) | ||
| 61 | { | ||
| 62 | struct sh_cmt_config *cfg = p->pdev->dev.platform_data; | ||
| 63 | void __iomem *base = p->mapbase; | ||
| 64 | unsigned long offs; | ||
| 65 | |||
| 66 | if (reg_nr == CMSTR) { | ||
| 67 | offs = 0; | ||
| 68 | base -= cfg->channel_offset; | ||
| 69 | } else | ||
| 70 | offs = reg_nr; | ||
| 71 | |||
| 72 | if (p->width == 16) | ||
| 73 | offs <<= 1; | ||
| 74 | else { | ||
| 75 | offs <<= 2; | ||
| 76 | if ((reg_nr == CMCNT) || (reg_nr == CMCOR)) | ||
| 77 | return ioread32(base + offs); | ||
| 78 | } | ||
| 79 | |||
| 80 | return ioread16(base + offs); | ||
| 81 | } | ||
| 82 | |||
| 83 | static inline void sh_cmt_write(struct sh_cmt_priv *p, int reg_nr, | ||
| 84 | unsigned long value) | ||
| 85 | { | ||
| 86 | struct sh_cmt_config *cfg = p->pdev->dev.platform_data; | ||
| 87 | void __iomem *base = p->mapbase; | ||
| 88 | unsigned long offs; | ||
| 89 | |||
| 90 | if (reg_nr == CMSTR) { | ||
| 91 | offs = 0; | ||
| 92 | base -= cfg->channel_offset; | ||
| 93 | } else | ||
| 94 | offs = reg_nr; | ||
| 95 | |||
| 96 | if (p->width == 16) | ||
| 97 | offs <<= 1; | ||
| 98 | else { | ||
| 99 | offs <<= 2; | ||
| 100 | if ((reg_nr == CMCNT) || (reg_nr == CMCOR)) { | ||
| 101 | iowrite32(value, base + offs); | ||
| 102 | return; | ||
| 103 | } | ||
| 104 | } | ||
| 105 | |||
| 106 | iowrite16(value, base + offs); | ||
| 107 | } | ||
| 108 | |||
| 109 | static unsigned long sh_cmt_get_counter(struct sh_cmt_priv *p, | ||
| 110 | int *has_wrapped) | ||
| 111 | { | ||
| 112 | unsigned long v1, v2, v3; | ||
| 113 | |||
| 114 | /* Make sure the timer value is stable. Stolen from acpi_pm.c */ | ||
| 115 | do { | ||
| 116 | v1 = sh_cmt_read(p, CMCNT); | ||
| 117 | v2 = sh_cmt_read(p, CMCNT); | ||
| 118 | v3 = sh_cmt_read(p, CMCNT); | ||
| 119 | } while (unlikely((v1 > v2 && v1 < v3) || (v2 > v3 && v2 < v1) | ||
| 120 | || (v3 > v1 && v3 < v2))); | ||
| 121 | |||
| 122 | *has_wrapped = sh_cmt_read(p, CMCSR) & p->overflow_bit; | ||
| 123 | return v2; | ||
| 124 | } | ||
| 125 | |||
| 126 | |||
| 127 | static void sh_cmt_start_stop_ch(struct sh_cmt_priv *p, int start) | ||
| 128 | { | ||
| 129 | struct sh_cmt_config *cfg = p->pdev->dev.platform_data; | ||
| 130 | unsigned long flags, value; | ||
| 131 | |||
| 132 | /* start stop register shared by multiple timer channels */ | ||
| 133 | spin_lock_irqsave(&sh_cmt_lock, flags); | ||
| 134 | value = sh_cmt_read(p, CMSTR); | ||
| 135 | |||
| 136 | if (start) | ||
| 137 | value |= 1 << cfg->timer_bit; | ||
| 138 | else | ||
| 139 | value &= ~(1 << cfg->timer_bit); | ||
| 140 | |||
| 141 | sh_cmt_write(p, CMSTR, value); | ||
| 142 | spin_unlock_irqrestore(&sh_cmt_lock, flags); | ||
| 143 | } | ||
| 144 | |||
| 145 | static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate) | ||
| 146 | { | ||
| 147 | struct sh_cmt_config *cfg = p->pdev->dev.platform_data; | ||
| 148 | int ret; | ||
| 149 | |||
| 150 | /* enable clock */ | ||
| 151 | ret = clk_enable(p->clk); | ||
| 152 | if (ret) { | ||
| 153 | pr_err("sh_cmt: cannot enable clock \"%s\"\n", cfg->clk); | ||
| 154 | return ret; | ||
| 155 | } | ||
| 156 | *rate = clk_get_rate(p->clk) / 8; | ||
| 157 | |||
| 158 | /* make sure channel is disabled */ | ||
| 159 | sh_cmt_start_stop_ch(p, 0); | ||
| 160 | |||
| 161 | /* configure channel, periodic mode and maximum timeout */ | ||
| 162 | if (p->width == 16) | ||
| 163 | sh_cmt_write(p, CMCSR, 0); | ||
| 164 | else | ||
| 165 | sh_cmt_write(p, CMCSR, 0x01a4); | ||
| 166 | |||
| 167 | sh_cmt_write(p, CMCOR, 0xffffffff); | ||
| 168 | sh_cmt_write(p, CMCNT, 0); | ||
| 169 | |||
| 170 | /* enable channel */ | ||
| 171 | sh_cmt_start_stop_ch(p, 1); | ||
| 172 | return 0; | ||
| 173 | } | ||
| 174 | |||
| 175 | static void sh_cmt_disable(struct sh_cmt_priv *p) | ||
| 176 | { | ||
| 177 | /* disable channel */ | ||
| 178 | sh_cmt_start_stop_ch(p, 0); | ||
| 179 | |||
| 180 | /* stop clock */ | ||
| 181 | clk_disable(p->clk); | ||
| 182 | } | ||
| 183 | |||
| 184 | /* private flags */ | ||
| 185 | #define FLAG_CLOCKEVENT (1 << 0) | ||
| 186 | #define FLAG_CLOCKSOURCE (1 << 1) | ||
| 187 | #define FLAG_REPROGRAM (1 << 2) | ||
| 188 | #define FLAG_SKIPEVENT (1 << 3) | ||
| 189 | #define FLAG_IRQCONTEXT (1 << 4) | ||
| 190 | |||
| 191 | static void sh_cmt_clock_event_program_verify(struct sh_cmt_priv *p, | ||
| 192 | int absolute) | ||
| 193 | { | ||
| 194 | unsigned long new_match; | ||
| 195 | unsigned long value = p->next_match_value; | ||
| 196 | unsigned long delay = 0; | ||
| 197 | unsigned long now = 0; | ||
| 198 | int has_wrapped; | ||
| 199 | |||
| 200 | now = sh_cmt_get_counter(p, &has_wrapped); | ||
| 201 | p->flags |= FLAG_REPROGRAM; /* force reprogram */ | ||
| 202 | |||
| 203 | if (has_wrapped) { | ||
| 204 | /* we're competing with the interrupt handler. | ||
| 205 | * -> let the interrupt handler reprogram the timer. | ||
| 206 | * -> interrupt number two handles the event. | ||
| 207 | */ | ||
| 208 | p->flags |= FLAG_SKIPEVENT; | ||
| 209 | return; | ||
| 210 | } | ||
| 211 | |||
| 212 | if (absolute) | ||
| 213 | now = 0; | ||
| 214 | |||
| 215 | do { | ||
| 216 | /* reprogram the timer hardware, | ||
| 217 | * but don't save the new match value yet. | ||
| 218 | */ | ||
| 219 | new_match = now + value + delay; | ||
| 220 | if (new_match > p->max_match_value) | ||
| 221 | new_match = p->max_match_value; | ||
| 222 | |||
| 223 | sh_cmt_write(p, CMCOR, new_match); | ||
| 224 | |||
| 225 | now = sh_cmt_get_counter(p, &has_wrapped); | ||
| 226 | if (has_wrapped && (new_match > p->match_value)) { | ||
| 227 | /* we are changing to a greater match value, | ||
| 228 | * so this wrap must be caused by the counter | ||
| 229 | * matching the old value. | ||
| 230 | * -> first interrupt reprograms the timer. | ||
| 231 | * -> interrupt number two handles the event. | ||
| 232 | */ | ||
| 233 | p->flags |= FLAG_SKIPEVENT; | ||
| 234 | break; | ||
| 235 | } | ||
| 236 | |||
| 237 | if (has_wrapped) { | ||
| 238 | /* we are changing to a smaller match value, | ||
| 239 | * so the wrap must be caused by the counter | ||
| 240 | * matching the new value. | ||
| 241 | * -> save programmed match value. | ||
| 242 | * -> let isr handle the event. | ||
| 243 | */ | ||
| 244 | p->match_value = new_match; | ||
| 245 | break; | ||
| 246 | } | ||
| 247 | |||
| 248 | /* be safe: verify hardware settings */ | ||
| 249 | if (now < new_match) { | ||
| 250 | /* timer value is below match value, all good. | ||
| 251 | * this makes sure we won't miss any match events. | ||
| 252 | * -> save programmed match value. | ||
| 253 | * -> let isr handle the event. | ||
| 254 | */ | ||
| 255 | p->match_value = new_match; | ||
| 256 | break; | ||
| 257 | } | ||
| 258 | |||
| 259 | /* the counter has reached a value greater | ||
| 260 | * than our new match value. and since the | ||
| 261 | * has_wrapped flag isn't set we must have | ||
| 262 | * programmed a too close event. | ||
| 263 | * -> increase delay and retry. | ||
| 264 | */ | ||
| 265 | if (delay) | ||
| 266 | delay <<= 1; | ||
| 267 | else | ||
| 268 | delay = 1; | ||
| 269 | |||
| 270 | if (!delay) | ||
| 271 | pr_warning("sh_cmt: too long delay\n"); | ||
| 272 | |||
| 273 | } while (delay); | ||
| 274 | } | ||
| 275 | |||
| 276 | static void sh_cmt_set_next(struct sh_cmt_priv *p, unsigned long delta) | ||
| 277 | { | ||
| 278 | unsigned long flags; | ||
| 279 | |||
| 280 | if (delta > p->max_match_value) | ||
| 281 | pr_warning("sh_cmt: delta out of range\n"); | ||
| 282 | |||
| 283 | spin_lock_irqsave(&p->lock, flags); | ||
| 284 | p->next_match_value = delta; | ||
| 285 | sh_cmt_clock_event_program_verify(p, 0); | ||
| 286 | spin_unlock_irqrestore(&p->lock, flags); | ||
| 287 | } | ||
| 288 | |||
| 289 | static irqreturn_t sh_cmt_interrupt(int irq, void *dev_id) | ||
| 290 | { | ||
| 291 | struct sh_cmt_priv *p = dev_id; | ||
| 292 | |||
| 293 | /* clear flags */ | ||
| 294 | sh_cmt_write(p, CMCSR, sh_cmt_read(p, CMCSR) & p->clear_bits); | ||
| 295 | |||
| 296 | /* update clock source counter to begin with if enabled | ||
| 297 | * the wrap flag should be cleared by the timer specific | ||
| 298 | * isr before we end up here. | ||
| 299 | */ | ||
| 300 | if (p->flags & FLAG_CLOCKSOURCE) | ||
| 301 | p->total_cycles += p->match_value; | ||
| 302 | |||
| 303 | if (!(p->flags & FLAG_REPROGRAM)) | ||
| 304 | p->next_match_value = p->max_match_value; | ||
| 305 | |||
| 306 | p->flags |= FLAG_IRQCONTEXT; | ||
| 307 | |||
| 308 | if (p->flags & FLAG_CLOCKEVENT) { | ||
| 309 | if (!(p->flags & FLAG_SKIPEVENT)) { | ||
| 310 | if (p->ced.mode == CLOCK_EVT_MODE_ONESHOT) { | ||
| 311 | p->next_match_value = p->max_match_value; | ||
| 312 | p->flags |= FLAG_REPROGRAM; | ||
| 313 | } | ||
| 314 | |||
| 315 | p->ced.event_handler(&p->ced); | ||
| 316 | } | ||
| 317 | } | ||
| 318 | |||
| 319 | p->flags &= ~FLAG_SKIPEVENT; | ||
| 320 | |||
| 321 | if (p->flags & FLAG_REPROGRAM) { | ||
| 322 | p->flags &= ~FLAG_REPROGRAM; | ||
| 323 | sh_cmt_clock_event_program_verify(p, 1); | ||
| 324 | |||
| 325 | if (p->flags & FLAG_CLOCKEVENT) | ||
| 326 | if ((p->ced.mode == CLOCK_EVT_MODE_SHUTDOWN) | ||
| 327 | || (p->match_value == p->next_match_value)) | ||
| 328 | p->flags &= ~FLAG_REPROGRAM; | ||
| 329 | } | ||
| 330 | |||
| 331 | p->flags &= ~FLAG_IRQCONTEXT; | ||
| 332 | |||
| 333 | return IRQ_HANDLED; | ||
| 334 | } | ||
| 335 | |||
| 336 | static int sh_cmt_start(struct sh_cmt_priv *p, unsigned long flag) | ||
| 337 | { | ||
| 338 | int ret = 0; | ||
| 339 | unsigned long flags; | ||
| 340 | |||
| 341 | spin_lock_irqsave(&p->lock, flags); | ||
| 342 | |||
| 343 | if (!(p->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE))) | ||
| 344 | ret = sh_cmt_enable(p, &p->rate); | ||
| 345 | |||
| 346 | if (ret) | ||
| 347 | goto out; | ||
| 348 | p->flags |= flag; | ||
| 349 | |||
| 350 | /* setup timeout if no clockevent */ | ||
| 351 | if ((flag == FLAG_CLOCKSOURCE) && (!(p->flags & FLAG_CLOCKEVENT))) | ||
| 352 | sh_cmt_set_next(p, p->max_match_value); | ||
| 353 | out: | ||
| 354 | spin_unlock_irqrestore(&p->lock, flags); | ||
| 355 | |||
| 356 | return ret; | ||
| 357 | } | ||
| 358 | |||
| 359 | static void sh_cmt_stop(struct sh_cmt_priv *p, unsigned long flag) | ||
| 360 | { | ||
| 361 | unsigned long flags; | ||
| 362 | unsigned long f; | ||
| 363 | |||
| 364 | spin_lock_irqsave(&p->lock, flags); | ||
| 365 | |||
| 366 | f = p->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE); | ||
| 367 | p->flags &= ~flag; | ||
| 368 | |||
| 369 | if (f && !(p->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE))) | ||
| 370 | sh_cmt_disable(p); | ||
| 371 | |||
| 372 | /* adjust the timeout to maximum if only clocksource left */ | ||
| 373 | if ((flag == FLAG_CLOCKEVENT) && (p->flags & FLAG_CLOCKSOURCE)) | ||
| 374 | sh_cmt_set_next(p, p->max_match_value); | ||
| 375 | |||
| 376 | spin_unlock_irqrestore(&p->lock, flags); | ||
| 377 | } | ||
| 378 | |||
| 379 | static struct sh_cmt_priv *ced_to_sh_cmt(struct clock_event_device *ced) | ||
| 380 | { | ||
| 381 | return container_of(ced, struct sh_cmt_priv, ced); | ||
| 382 | } | ||
| 383 | |||
| 384 | static void sh_cmt_clock_event_start(struct sh_cmt_priv *p, int periodic) | ||
| 385 | { | ||
| 386 | struct clock_event_device *ced = &p->ced; | ||
| 387 | |||
| 388 | sh_cmt_start(p, FLAG_CLOCKEVENT); | ||
| 389 | |||
| 390 | /* TODO: calculate good shift from rate and counter bit width */ | ||
| 391 | |||
| 392 | ced->shift = 32; | ||
| 393 | ced->mult = div_sc(p->rate, NSEC_PER_SEC, ced->shift); | ||
| 394 | ced->max_delta_ns = clockevent_delta2ns(p->max_match_value, ced); | ||
| 395 | ced->min_delta_ns = clockevent_delta2ns(0x1f, ced); | ||
| 396 | |||
| 397 | if (periodic) | ||
| 398 | sh_cmt_set_next(p, (p->rate + HZ/2) / HZ); | ||
| 399 | else | ||
| 400 | sh_cmt_set_next(p, p->max_match_value); | ||
| 401 | } | ||
| 402 | |||
| 403 | static void sh_cmt_clock_event_mode(enum clock_event_mode mode, | ||
| 404 | struct clock_event_device *ced) | ||
| 405 | { | ||
| 406 | struct sh_cmt_priv *p = ced_to_sh_cmt(ced); | ||
| 407 | |||
| 408 | /* deal with old setting first */ | ||
| 409 | switch (ced->mode) { | ||
| 410 | case CLOCK_EVT_MODE_PERIODIC: | ||
| 411 | case CLOCK_EVT_MODE_ONESHOT: | ||
| 412 | sh_cmt_stop(p, FLAG_CLOCKEVENT); | ||
| 413 | break; | ||
| 414 | default: | ||
| 415 | break; | ||
| 416 | } | ||
| 417 | |||
| 418 | switch (mode) { | ||
| 419 | case CLOCK_EVT_MODE_PERIODIC: | ||
| 420 | pr_info("sh_cmt: %s used for periodic clock events\n", | ||
| 421 | ced->name); | ||
| 422 | sh_cmt_clock_event_start(p, 1); | ||
| 423 | break; | ||
| 424 | case CLOCK_EVT_MODE_ONESHOT: | ||
| 425 | pr_info("sh_cmt: %s used for oneshot clock events\n", | ||
| 426 | ced->name); | ||
| 427 | sh_cmt_clock_event_start(p, 0); | ||
| 428 | break; | ||
| 429 | case CLOCK_EVT_MODE_SHUTDOWN: | ||
| 430 | case CLOCK_EVT_MODE_UNUSED: | ||
| 431 | sh_cmt_stop(p, FLAG_CLOCKEVENT); | ||
| 432 | break; | ||
| 433 | default: | ||
| 434 | break; | ||
| 435 | } | ||
| 436 | } | ||
| 437 | |||
| 438 | static int sh_cmt_clock_event_next(unsigned long delta, | ||
| 439 | struct clock_event_device *ced) | ||
| 440 | { | ||
| 441 | struct sh_cmt_priv *p = ced_to_sh_cmt(ced); | ||
| 442 | |||
| 443 | BUG_ON(ced->mode != CLOCK_EVT_MODE_ONESHOT); | ||
| 444 | if (likely(p->flags & FLAG_IRQCONTEXT)) | ||
| 445 | p->next_match_value = delta; | ||
| 446 | else | ||
| 447 | sh_cmt_set_next(p, delta); | ||
| 448 | |||
| 449 | return 0; | ||
| 450 | } | ||
| 451 | |||
| 452 | static void sh_cmt_register_clockevent(struct sh_cmt_priv *p, | ||
| 453 | char *name, unsigned long rating) | ||
| 454 | { | ||
| 455 | struct clock_event_device *ced = &p->ced; | ||
| 456 | |||
| 457 | memset(ced, 0, sizeof(*ced)); | ||
| 458 | |||
| 459 | ced->name = name; | ||
| 460 | ced->features = CLOCK_EVT_FEAT_PERIODIC; | ||
| 461 | ced->features |= CLOCK_EVT_FEAT_ONESHOT; | ||
| 462 | ced->rating = rating; | ||
| 463 | ced->cpumask = cpumask_of(0); | ||
| 464 | ced->set_next_event = sh_cmt_clock_event_next; | ||
| 465 | ced->set_mode = sh_cmt_clock_event_mode; | ||
| 466 | |||
| 467 | pr_info("sh_cmt: %s used for clock events\n", ced->name); | ||
| 468 | ced->mult = 1; /* work around misplaced WARN_ON() in clockevents.c */ | ||
| 469 | clockevents_register_device(ced); | ||
| 470 | } | ||
| 471 | |||
| 472 | int sh_cmt_register(struct sh_cmt_priv *p, char *name, | ||
| 473 | unsigned long clockevent_rating, | ||
| 474 | unsigned long clocksource_rating) | ||
| 475 | { | ||
| 476 | if (p->width == (sizeof(p->max_match_value) * 8)) | ||
| 477 | p->max_match_value = ~0; | ||
| 478 | else | ||
| 479 | p->max_match_value = (1 << p->width) - 1; | ||
| 480 | |||
| 481 | p->match_value = p->max_match_value; | ||
| 482 | spin_lock_init(&p->lock); | ||
| 483 | |||
| 484 | if (clockevent_rating) | ||
| 485 | sh_cmt_register_clockevent(p, name, clockevent_rating); | ||
| 486 | |||
| 487 | return 0; | ||
| 488 | } | ||
| 489 | |||
| 490 | static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev) | ||
| 491 | { | ||
| 492 | struct sh_cmt_config *cfg = pdev->dev.platform_data; | ||
| 493 | struct resource *res; | ||
| 494 | int irq, ret; | ||
| 495 | ret = -ENXIO; | ||
| 496 | |||
| 497 | memset(p, 0, sizeof(*p)); | ||
| 498 | p->pdev = pdev; | ||
| 499 | |||
| 500 | if (!cfg) { | ||
| 501 | dev_err(&p->pdev->dev, "missing platform data\n"); | ||
| 502 | goto err0; | ||
| 503 | } | ||
| 504 | |||
| 505 | platform_set_drvdata(pdev, p); | ||
| 506 | |||
| 507 | res = platform_get_resource(p->pdev, IORESOURCE_MEM, 0); | ||
| 508 | if (!res) { | ||
| 509 | dev_err(&p->pdev->dev, "failed to get I/O memory\n"); | ||
| 510 | goto err0; | ||
| 511 | } | ||
| 512 | |||
| 513 | irq = platform_get_irq(p->pdev, 0); | ||
| 514 | if (irq < 0) { | ||
| 515 | dev_err(&p->pdev->dev, "failed to get irq\n"); | ||
| 516 | goto err0; | ||
| 517 | } | ||
| 518 | |||
| 519 | /* map memory, let mapbase point to our channel */ | ||
| 520 | p->mapbase = ioremap_nocache(res->start, resource_size(res)); | ||
| 521 | if (p->mapbase == NULL) { | ||
| 522 | pr_err("sh_cmt: failed to remap I/O memory\n"); | ||
| 523 | goto err0; | ||
| 524 | } | ||
| 525 | |||
| 526 | /* request irq using setup_irq() (too early for request_irq()) */ | ||
| 527 | p->irqaction.name = cfg->name; | ||
| 528 | p->irqaction.handler = sh_cmt_interrupt; | ||
| 529 | p->irqaction.dev_id = p; | ||
| 530 | p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL; | ||
| 531 | p->irqaction.mask = CPU_MASK_NONE; | ||
| 532 | ret = setup_irq(irq, &p->irqaction); | ||
| 533 | if (ret) { | ||
| 534 | pr_err("sh_cmt: failed to request irq %d\n", irq); | ||
| 535 | goto err1; | ||
| 536 | } | ||
| 537 | |||
| 538 | /* get hold of clock */ | ||
| 539 | p->clk = clk_get(&p->pdev->dev, cfg->clk); | ||
| 540 | if (IS_ERR(p->clk)) { | ||
| 541 | pr_err("sh_cmt: cannot get clock \"%s\"\n", cfg->clk); | ||
| 542 | ret = PTR_ERR(p->clk); | ||
| 543 | goto err2; | ||
| 544 | } | ||
| 545 | |||
| 546 | if (resource_size(res) == 6) { | ||
| 547 | p->width = 16; | ||
| 548 | p->overflow_bit = 0x80; | ||
| 549 | p->clear_bits = ~0xc0; | ||
| 550 | } else { | ||
| 551 | p->width = 32; | ||
| 552 | p->overflow_bit = 0x8000; | ||
| 553 | p->clear_bits = ~0xc000; | ||
| 554 | } | ||
| 555 | |||
| 556 | return sh_cmt_register(p, cfg->name, | ||
| 557 | cfg->clockevent_rating, | ||
| 558 | cfg->clocksource_rating); | ||
| 559 | err2: | ||
| 560 | free_irq(irq, p); | ||
| 561 | err1: | ||
| 562 | iounmap(p->mapbase); | ||
| 563 | err0: | ||
| 564 | return ret; | ||
| 565 | } | ||
| 566 | |||
| 567 | static int __devinit sh_cmt_probe(struct platform_device *pdev) | ||
| 568 | { | ||
| 569 | struct sh_cmt_priv *p = platform_get_drvdata(pdev); | ||
| 570 | int ret; | ||
| 571 | |||
| 572 | p = kmalloc(sizeof(*p), GFP_KERNEL); | ||
| 573 | if (p == NULL) { | ||
| 574 | dev_err(&pdev->dev, "failed to allocate driver data\n"); | ||
| 575 | return -ENOMEM; | ||
| 576 | } | ||
| 577 | |||
| 578 | ret = sh_cmt_setup(p, pdev); | ||
| 579 | if (ret) { | ||
| 580 | kfree(p); | ||
| 581 | |||
| 582 | platform_set_drvdata(pdev, NULL); | ||
| 583 | } | ||
| 584 | return ret; | ||
| 585 | } | ||
| 586 | |||
| 587 | static int __devexit sh_cmt_remove(struct platform_device *pdev) | ||
| 588 | { | ||
| 589 | return -EBUSY; /* cannot unregister clockevent and clocksource */ | ||
| 590 | } | ||
| 591 | |||
| 592 | static struct platform_driver sh_cmt_device_driver = { | ||
| 593 | .probe = sh_cmt_probe, | ||
| 594 | .remove = __devexit_p(sh_cmt_remove), | ||
| 595 | .driver = { | ||
| 596 | .name = "sh_cmt", | ||
| 597 | } | ||
| 598 | }; | ||
| 599 | |||
| 600 | static int __init sh_cmt_init(void) | ||
| 601 | { | ||
| 602 | return platform_driver_register(&sh_cmt_device_driver); | ||
| 603 | } | ||
| 604 | |||
| 605 | static void __exit sh_cmt_exit(void) | ||
| 606 | { | ||
| 607 | platform_driver_unregister(&sh_cmt_device_driver); | ||
| 608 | } | ||
| 609 | |||
| 610 | module_init(sh_cmt_init); | ||
| 611 | module_exit(sh_cmt_exit); | ||
| 612 | |||
| 613 | MODULE_AUTHOR("Magnus Damm"); | ||
| 614 | MODULE_DESCRIPTION("SuperH CMT Timer Driver"); | ||
| 615 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/input/joystick/maplecontrol.c b/drivers/input/joystick/maplecontrol.c index e50047bfe938..77cfde571bd9 100644 --- a/drivers/input/joystick/maplecontrol.c +++ b/drivers/input/joystick/maplecontrol.c | |||
| @@ -3,7 +3,7 @@ | |||
| 3 | * Based on drivers/usb/iforce.c | 3 | * Based on drivers/usb/iforce.c |
| 4 | * | 4 | * |
| 5 | * Copyright Yaegashi Takeshi, 2001 | 5 | * Copyright Yaegashi Takeshi, 2001 |
| 6 | * Adrian McMenamin, 2008 | 6 | * Adrian McMenamin, 2008 - 2009 |
| 7 | */ | 7 | */ |
| 8 | 8 | ||
| 9 | #include <linux/kernel.h> | 9 | #include <linux/kernel.h> |
| @@ -29,7 +29,7 @@ static void dc_pad_callback(struct mapleq *mq) | |||
| 29 | struct maple_device *mapledev = mq->dev; | 29 | struct maple_device *mapledev = mq->dev; |
| 30 | struct dc_pad *pad = maple_get_drvdata(mapledev); | 30 | struct dc_pad *pad = maple_get_drvdata(mapledev); |
| 31 | struct input_dev *dev = pad->dev; | 31 | struct input_dev *dev = pad->dev; |
| 32 | unsigned char *res = mq->recvbuf; | 32 | unsigned char *res = mq->recvbuf->buf; |
| 33 | 33 | ||
| 34 | buttons = ~le16_to_cpup((__le16 *)(res + 8)); | 34 | buttons = ~le16_to_cpup((__le16 *)(res + 8)); |
| 35 | 35 | ||
diff --git a/drivers/input/keyboard/maple_keyb.c b/drivers/input/keyboard/maple_keyb.c index 22f17a593be7..5aa2361aef95 100644 --- a/drivers/input/keyboard/maple_keyb.c +++ b/drivers/input/keyboard/maple_keyb.c | |||
| @@ -1,8 +1,8 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * SEGA Dreamcast keyboard driver | 2 | * SEGA Dreamcast keyboard driver |
| 3 | * Based on drivers/usb/usbkbd.c | 3 | * Based on drivers/usb/usbkbd.c |
| 4 | * Copyright YAEGASHI Takeshi, 2001 | 4 | * Copyright (c) YAEGASHI Takeshi, 2001 |
| 5 | * Porting to 2.6 Copyright Adrian McMenamin, 2007, 2008 | 5 | * Porting to 2.6 Copyright (c) Adrian McMenamin, 2007 - 2009 |
| 6 | * | 6 | * |
| 7 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
| 8 | * it under the terms of the GNU General Public License as published by | 8 | * it under the terms of the GNU General Public License as published by |
| @@ -33,7 +33,7 @@ static DEFINE_MUTEX(maple_keyb_mutex); | |||
| 33 | 33 | ||
| 34 | #define NR_SCANCODES 256 | 34 | #define NR_SCANCODES 256 |
| 35 | 35 | ||
| 36 | MODULE_AUTHOR("YAEGASHI Takeshi, Adrian McMenamin"); | 36 | MODULE_AUTHOR("Adrian McMenamin <adrian@mcmen.demon.co.uk"); |
| 37 | MODULE_DESCRIPTION("SEGA Dreamcast keyboard driver"); | 37 | MODULE_DESCRIPTION("SEGA Dreamcast keyboard driver"); |
| 38 | MODULE_LICENSE("GPL"); | 38 | MODULE_LICENSE("GPL"); |
| 39 | 39 | ||
| @@ -115,7 +115,7 @@ static void dc_scan_kbd(struct dc_kbd *kbd) | |||
| 115 | input_event(dev, EV_MSC, MSC_SCAN, code); | 115 | input_event(dev, EV_MSC, MSC_SCAN, code); |
| 116 | input_report_key(dev, keycode, 0); | 116 | input_report_key(dev, keycode, 0); |
| 117 | } else | 117 | } else |
| 118 | printk(KERN_DEBUG "maple_keyb: " | 118 | dev_dbg(&dev->dev, |
| 119 | "Unknown key (scancode %#x) released.", | 119 | "Unknown key (scancode %#x) released.", |
| 120 | code); | 120 | code); |
| 121 | } | 121 | } |
| @@ -127,7 +127,7 @@ static void dc_scan_kbd(struct dc_kbd *kbd) | |||
| 127 | input_event(dev, EV_MSC, MSC_SCAN, code); | 127 | input_event(dev, EV_MSC, MSC_SCAN, code); |
| 128 | input_report_key(dev, keycode, 1); | 128 | input_report_key(dev, keycode, 1); |
| 129 | } else | 129 | } else |
| 130 | printk(KERN_DEBUG "maple_keyb: " | 130 | dev_dbg(&dev->dev, |
| 131 | "Unknown key (scancode %#x) pressed.", | 131 | "Unknown key (scancode %#x) pressed.", |
| 132 | code); | 132 | code); |
| 133 | } | 133 | } |
| @@ -140,7 +140,7 @@ static void dc_kbd_callback(struct mapleq *mq) | |||
| 140 | { | 140 | { |
| 141 | struct maple_device *mapledev = mq->dev; | 141 | struct maple_device *mapledev = mq->dev; |
| 142 | struct dc_kbd *kbd = maple_get_drvdata(mapledev); | 142 | struct dc_kbd *kbd = maple_get_drvdata(mapledev); |
| 143 | unsigned long *buf = mq->recvbuf; | 143 | unsigned long *buf = (unsigned long *)(mq->recvbuf->buf); |
| 144 | 144 | ||
| 145 | /* | 145 | /* |
| 146 | * We should always get the lock because the only | 146 | * We should always get the lock because the only |
| @@ -159,22 +159,27 @@ static void dc_kbd_callback(struct mapleq *mq) | |||
| 159 | 159 | ||
| 160 | static int probe_maple_kbd(struct device *dev) | 160 | static int probe_maple_kbd(struct device *dev) |
| 161 | { | 161 | { |
| 162 | struct maple_device *mdev = to_maple_dev(dev); | 162 | struct maple_device *mdev; |
| 163 | struct maple_driver *mdrv = to_maple_driver(dev->driver); | 163 | struct maple_driver *mdrv; |
| 164 | int i, error; | 164 | int i, error; |
| 165 | struct dc_kbd *kbd; | 165 | struct dc_kbd *kbd; |
| 166 | struct input_dev *idev; | 166 | struct input_dev *idev; |
| 167 | 167 | ||
| 168 | if (!(mdev->function & MAPLE_FUNC_KEYBOARD)) | 168 | mdev = to_maple_dev(dev); |
| 169 | return -EINVAL; | 169 | mdrv = to_maple_driver(dev->driver); |
| 170 | 170 | ||
| 171 | kbd = kzalloc(sizeof(struct dc_kbd), GFP_KERNEL); | 171 | kbd = kzalloc(sizeof(struct dc_kbd), GFP_KERNEL); |
| 172 | idev = input_allocate_device(); | 172 | if (!kbd) { |
| 173 | if (!kbd || !idev) { | ||
| 174 | error = -ENOMEM; | 173 | error = -ENOMEM; |
| 175 | goto fail; | 174 | goto fail; |
| 176 | } | 175 | } |
| 177 | 176 | ||
| 177 | idev = input_allocate_device(); | ||
| 178 | if (!idev) { | ||
| 179 | error = -ENOMEM; | ||
| 180 | goto fail_idev_alloc; | ||
| 181 | } | ||
| 182 | |||
| 178 | kbd->dev = idev; | 183 | kbd->dev = idev; |
| 179 | memcpy(kbd->keycode, dc_kbd_keycode, sizeof(kbd->keycode)); | 184 | memcpy(kbd->keycode, dc_kbd_keycode, sizeof(kbd->keycode)); |
| 180 | 185 | ||
| @@ -195,7 +200,7 @@ static int probe_maple_kbd(struct device *dev) | |||
| 195 | 200 | ||
| 196 | error = input_register_device(idev); | 201 | error = input_register_device(idev); |
| 197 | if (error) | 202 | if (error) |
| 198 | goto fail; | 203 | goto fail_register; |
| 199 | 204 | ||
| 200 | /* Maple polling is locked to VBLANK - which may be just 50/s */ | 205 | /* Maple polling is locked to VBLANK - which may be just 50/s */ |
| 201 | maple_getcond_callback(mdev, dc_kbd_callback, HZ/50, | 206 | maple_getcond_callback(mdev, dc_kbd_callback, HZ/50, |
| @@ -207,10 +212,12 @@ static int probe_maple_kbd(struct device *dev) | |||
| 207 | 212 | ||
| 208 | return error; | 213 | return error; |
| 209 | 214 | ||
| 210 | fail: | 215 | fail_register: |
| 216 | maple_set_drvdata(mdev, NULL); | ||
| 211 | input_free_device(idev); | 217 | input_free_device(idev); |
| 218 | fail_idev_alloc: | ||
| 212 | kfree(kbd); | 219 | kfree(kbd); |
| 213 | maple_set_drvdata(mdev, NULL); | 220 | fail: |
| 214 | return error; | 221 | return error; |
| 215 | } | 222 | } |
| 216 | 223 | ||
diff --git a/drivers/input/keyboard/sh_keysc.c b/drivers/input/keyboard/sh_keysc.c index 5c8a1bcf7ca7..e1480fb11de3 100644 --- a/drivers/input/keyboard/sh_keysc.c +++ b/drivers/input/keyboard/sh_keysc.c | |||
| @@ -219,6 +219,8 @@ static int __devinit sh_keysc_probe(struct platform_device *pdev) | |||
| 219 | pdata->scan_timing, priv->iomem_base + KYCR1_OFFS); | 219 | pdata->scan_timing, priv->iomem_base + KYCR1_OFFS); |
| 220 | iowrite16(0, priv->iomem_base + KYOUTDR_OFFS); | 220 | iowrite16(0, priv->iomem_base + KYOUTDR_OFFS); |
| 221 | iowrite16(KYCR2_IRQ_LEVEL, priv->iomem_base + KYCR2_OFFS); | 221 | iowrite16(KYCR2_IRQ_LEVEL, priv->iomem_base + KYCR2_OFFS); |
| 222 | |||
| 223 | device_init_wakeup(&pdev->dev, 1); | ||
| 222 | return 0; | 224 | return 0; |
| 223 | err5: | 225 | err5: |
| 224 | free_irq(irq, pdev); | 226 | free_irq(irq, pdev); |
| @@ -253,17 +255,33 @@ static int __devexit sh_keysc_remove(struct platform_device *pdev) | |||
| 253 | return 0; | 255 | return 0; |
| 254 | } | 256 | } |
| 255 | 257 | ||
| 258 | static int sh_keysc_suspend(struct device *dev) | ||
| 259 | { | ||
| 260 | struct platform_device *pdev = to_platform_device(dev); | ||
| 261 | struct sh_keysc_priv *priv = platform_get_drvdata(pdev); | ||
| 262 | unsigned short value; | ||
| 263 | |||
| 264 | value = ioread16(priv->iomem_base + KYCR1_OFFS); | ||
| 265 | |||
| 266 | if (device_may_wakeup(dev)) | ||
| 267 | value |= 0x80; | ||
| 268 | else | ||
| 269 | value &= ~0x80; | ||
| 256 | 270 | ||
| 257 | #define sh_keysc_suspend NULL | 271 | iowrite16(value, priv->iomem_base + KYCR1_OFFS); |
| 258 | #define sh_keysc_resume NULL | 272 | return 0; |
| 273 | } | ||
| 274 | |||
| 275 | static struct dev_pm_ops sh_keysc_dev_pm_ops = { | ||
| 276 | .suspend = sh_keysc_suspend, | ||
| 277 | }; | ||
| 259 | 278 | ||
| 260 | struct platform_driver sh_keysc_device_driver = { | 279 | struct platform_driver sh_keysc_device_driver = { |
| 261 | .probe = sh_keysc_probe, | 280 | .probe = sh_keysc_probe, |
| 262 | .remove = __devexit_p(sh_keysc_remove), | 281 | .remove = __devexit_p(sh_keysc_remove), |
| 263 | .suspend = sh_keysc_suspend, | ||
| 264 | .resume = sh_keysc_resume, | ||
| 265 | .driver = { | 282 | .driver = { |
| 266 | .name = "sh_keysc", | 283 | .name = "sh_keysc", |
| 284 | .pm = &sh_keysc_dev_pm_ops, | ||
| 267 | } | 285 | } |
| 268 | }; | 286 | }; |
| 269 | 287 | ||
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig index 043d50fb6ef6..729f899a5cd5 100644 --- a/drivers/mtd/maps/Kconfig +++ b/drivers/mtd/maps/Kconfig | |||
| @@ -551,5 +551,15 @@ config MTD_PLATRAM | |||
| 551 | 551 | ||
| 552 | This selection automatically selects the map_ram driver. | 552 | This selection automatically selects the map_ram driver. |
| 553 | 553 | ||
| 554 | endmenu | 554 | config MTD_VMU |
| 555 | tristate "Map driver for Dreamcast VMU" | ||
| 556 | depends on MAPLE | ||
| 557 | help | ||
| 558 | This driver enables access to the Dreamcast Visual Memory Unit (VMU). | ||
| 559 | |||
| 560 | Most Dreamcast users will want to say Y here. | ||
| 555 | 561 | ||
| 562 | To build this as a module select M here, the module will be called | ||
| 563 | vmu-flash. | ||
| 564 | |||
| 565 | endmenu | ||
diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile index 6d9ba35caf11..26b28a7a90b5 100644 --- a/drivers/mtd/maps/Makefile +++ b/drivers/mtd/maps/Makefile | |||
| @@ -61,3 +61,4 @@ obj-$(CONFIG_MTD_PLATRAM) += plat-ram.o | |||
| 61 | obj-$(CONFIG_MTD_OMAP_NOR) += omap_nor.o | 61 | obj-$(CONFIG_MTD_OMAP_NOR) += omap_nor.o |
| 62 | obj-$(CONFIG_MTD_INTEL_VR_NOR) += intel_vr_nor.o | 62 | obj-$(CONFIG_MTD_INTEL_VR_NOR) += intel_vr_nor.o |
| 63 | obj-$(CONFIG_MTD_BFIN_ASYNC) += bfin-async-flash.o | 63 | obj-$(CONFIG_MTD_BFIN_ASYNC) += bfin-async-flash.o |
| 64 | obj-$(CONFIG_MTD_VMU) += vmu-flash.o | ||
diff --git a/drivers/mtd/maps/vmu-flash.c b/drivers/mtd/maps/vmu-flash.c new file mode 100644 index 000000000000..1f73297e7776 --- /dev/null +++ b/drivers/mtd/maps/vmu-flash.c | |||
| @@ -0,0 +1,832 @@ | |||
| 1 | /* vmu-flash.c | ||
| 2 | * Driver for SEGA Dreamcast Visual Memory Unit | ||
| 3 | * | ||
| 4 | * Copyright (c) Adrian McMenamin 2002 - 2009 | ||
| 5 | * Copyright (c) Paul Mundt 2001 | ||
| 6 | * | ||
| 7 | * Licensed under version 2 of the | ||
| 8 | * GNU General Public Licence | ||
| 9 | */ | ||
| 10 | #include <linux/init.h> | ||
| 11 | #include <linux/sched.h> | ||
| 12 | #include <linux/delay.h> | ||
| 13 | #include <linux/maple.h> | ||
| 14 | #include <linux/mtd/mtd.h> | ||
| 15 | #include <linux/mtd/map.h> | ||
| 16 | |||
| 17 | struct vmu_cache { | ||
| 18 | unsigned char *buffer; /* Cache */ | ||
| 19 | unsigned int block; /* Which block was cached */ | ||
| 20 | unsigned long jiffies_atc; /* When was it cached? */ | ||
| 21 | int valid; | ||
| 22 | }; | ||
| 23 | |||
| 24 | struct mdev_part { | ||
| 25 | struct maple_device *mdev; | ||
| 26 | int partition; | ||
| 27 | }; | ||
| 28 | |||
| 29 | struct vmupart { | ||
| 30 | u16 user_blocks; | ||
| 31 | u16 root_block; | ||
| 32 | u16 numblocks; | ||
| 33 | char *name; | ||
| 34 | struct vmu_cache *pcache; | ||
| 35 | }; | ||
| 36 | |||
| 37 | struct memcard { | ||
| 38 | u16 tempA; | ||
| 39 | u16 tempB; | ||
| 40 | u32 partitions; | ||
| 41 | u32 blocklen; | ||
| 42 | u32 writecnt; | ||
| 43 | u32 readcnt; | ||
| 44 | u32 removeable; | ||
| 45 | int partition; | ||
| 46 | int read; | ||
| 47 | unsigned char *blockread; | ||
| 48 | struct vmupart *parts; | ||
| 49 | struct mtd_info *mtd; | ||
| 50 | }; | ||
| 51 | |||
| 52 | struct vmu_block { | ||
| 53 | unsigned int num; /* block number */ | ||
| 54 | unsigned int ofs; /* block offset */ | ||
| 55 | }; | ||
| 56 | |||
| 57 | static struct vmu_block *ofs_to_block(unsigned long src_ofs, | ||
| 58 | struct mtd_info *mtd, int partition) | ||
| 59 | { | ||
| 60 | struct vmu_block *vblock; | ||
| 61 | struct maple_device *mdev; | ||
| 62 | struct memcard *card; | ||
| 63 | struct mdev_part *mpart; | ||
| 64 | int num; | ||
| 65 | |||
| 66 | mpart = mtd->priv; | ||
| 67 | mdev = mpart->mdev; | ||
| 68 | card = maple_get_drvdata(mdev); | ||
| 69 | |||
| 70 | if (src_ofs >= card->parts[partition].numblocks * card->blocklen) | ||
| 71 | goto failed; | ||
| 72 | |||
| 73 | num = src_ofs / card->blocklen; | ||
| 74 | if (num > card->parts[partition].numblocks) | ||
| 75 | goto failed; | ||
| 76 | |||
| 77 | vblock = kmalloc(sizeof(struct vmu_block), GFP_KERNEL); | ||
| 78 | if (!vblock) | ||
| 79 | goto failed; | ||
| 80 | |||
| 81 | vblock->num = num; | ||
| 82 | vblock->ofs = src_ofs % card->blocklen; | ||
| 83 | return vblock; | ||
| 84 | |||
| 85 | failed: | ||
| 86 | return NULL; | ||
| 87 | } | ||
| 88 | |||
| 89 | /* Maple bus callback function for reads */ | ||
| 90 | static void vmu_blockread(struct mapleq *mq) | ||
| 91 | { | ||
| 92 | struct maple_device *mdev; | ||
| 93 | struct memcard *card; | ||
| 94 | |||
| 95 | mdev = mq->dev; | ||
| 96 | card = maple_get_drvdata(mdev); | ||
| 97 | /* copy the read in data */ | ||
| 98 | |||
| 99 | if (unlikely(!card->blockread)) | ||
| 100 | return; | ||
| 101 | |||
| 102 | memcpy(card->blockread, mq->recvbuf->buf + 12, | ||
| 103 | card->blocklen/card->readcnt); | ||
| 104 | |||
| 105 | } | ||
| 106 | |||
| 107 | /* Interface with maple bus to read blocks | ||
| 108 | * caching the results so that other parts | ||
| 109 | * of the driver can access block reads */ | ||
| 110 | static int maple_vmu_read_block(unsigned int num, unsigned char *buf, | ||
| 111 | struct mtd_info *mtd) | ||
| 112 | { | ||
| 113 | struct memcard *card; | ||
| 114 | struct mdev_part *mpart; | ||
| 115 | struct maple_device *mdev; | ||
| 116 | int partition, error = 0, x, wait; | ||
| 117 | unsigned char *blockread = NULL; | ||
| 118 | struct vmu_cache *pcache; | ||
| 119 | __be32 sendbuf; | ||
| 120 | |||
| 121 | mpart = mtd->priv; | ||
| 122 | mdev = mpart->mdev; | ||
| 123 | partition = mpart->partition; | ||
| 124 | card = maple_get_drvdata(mdev); | ||
| 125 | pcache = card->parts[partition].pcache; | ||
| 126 | pcache->valid = 0; | ||
| 127 | |||
| 128 | /* prepare the cache for this block */ | ||
| 129 | if (!pcache->buffer) { | ||
| 130 | pcache->buffer = kmalloc(card->blocklen, GFP_KERNEL); | ||
| 131 | if (!pcache->buffer) { | ||
| 132 | dev_err(&mdev->dev, "VMU at (%d, %d) - read fails due" | ||
| 133 | " to lack of memory\n", mdev->port, | ||
| 134 | mdev->unit); | ||
| 135 | error = -ENOMEM; | ||
| 136 | goto outB; | ||
| 137 | } | ||
| 138 | } | ||
| 139 | |||
| 140 | /* | ||
| 141 | * Reads may be phased - again the hardware spec | ||
| 142 | * supports this - though may not be any devices in | ||
| 143 | * the wild that implement it, but we will here | ||
| 144 | */ | ||
| 145 | for (x = 0; x < card->readcnt; x++) { | ||
| 146 | sendbuf = cpu_to_be32(partition << 24 | x << 16 | num); | ||
| 147 | |||
| 148 | if (atomic_read(&mdev->busy) == 1) { | ||
| 149 | wait_event_interruptible_timeout(mdev->maple_wait, | ||
| 150 | atomic_read(&mdev->busy) == 0, HZ); | ||
| 151 | if (atomic_read(&mdev->busy) == 1) { | ||
| 152 | dev_notice(&mdev->dev, "VMU at (%d, %d)" | ||
| 153 | " is busy\n", mdev->port, mdev->unit); | ||
| 154 | error = -EAGAIN; | ||
| 155 | goto outB; | ||
| 156 | } | ||
| 157 | } | ||
| 158 | |||
| 159 | atomic_set(&mdev->busy, 1); | ||
| 160 | blockread = kmalloc(card->blocklen/card->readcnt, GFP_KERNEL); | ||
| 161 | if (!blockread) { | ||
| 162 | error = -ENOMEM; | ||
| 163 | atomic_set(&mdev->busy, 0); | ||
| 164 | goto outB; | ||
| 165 | } | ||
| 166 | card->blockread = blockread; | ||
| 167 | |||
| 168 | maple_getcond_callback(mdev, vmu_blockread, 0, | ||
| 169 | MAPLE_FUNC_MEMCARD); | ||
| 170 | error = maple_add_packet(mdev, MAPLE_FUNC_MEMCARD, | ||
| 171 | MAPLE_COMMAND_BREAD, 2, &sendbuf); | ||
| 172 | /* Very long timeouts seem to be needed when box is stressed */ | ||
| 173 | wait = wait_event_interruptible_timeout(mdev->maple_wait, | ||
| 174 | (atomic_read(&mdev->busy) == 0 || | ||
| 175 | atomic_read(&mdev->busy) == 2), HZ * 3); | ||
| 176 | /* | ||
| 177 | * MTD layer does not handle hotplugging well | ||
| 178 | * so have to return errors when VMU is unplugged | ||
| 179 | * in the middle of a read (busy == 2) | ||
| 180 | */ | ||
| 181 | if (error || atomic_read(&mdev->busy) == 2) { | ||
| 182 | if (atomic_read(&mdev->busy) == 2) | ||
| 183 | error = -ENXIO; | ||
| 184 | atomic_set(&mdev->busy, 0); | ||
| 185 | card->blockread = NULL; | ||
| 186 | goto outA; | ||
| 187 | } | ||
| 188 | if (wait == 0 || wait == -ERESTARTSYS) { | ||
| 189 | card->blockread = NULL; | ||
| 190 | atomic_set(&mdev->busy, 0); | ||
| 191 | error = -EIO; | ||
| 192 | list_del_init(&(mdev->mq->list)); | ||
| 193 | kfree(mdev->mq->sendbuf); | ||
| 194 | mdev->mq->sendbuf = NULL; | ||
| 195 | if (wait == -ERESTARTSYS) { | ||
| 196 | dev_warn(&mdev->dev, "VMU read on (%d, %d)" | ||
| 197 | " interrupted on block 0x%X\n", | ||
| 198 | mdev->port, mdev->unit, num); | ||
| 199 | } else | ||
| 200 | dev_notice(&mdev->dev, "VMU read on (%d, %d)" | ||
| 201 | " timed out on block 0x%X\n", | ||
| 202 | mdev->port, mdev->unit, num); | ||
| 203 | goto outA; | ||
| 204 | } | ||
| 205 | |||
| 206 | memcpy(buf + (card->blocklen/card->readcnt) * x, blockread, | ||
| 207 | card->blocklen/card->readcnt); | ||
| 208 | |||
| 209 | memcpy(pcache->buffer + (card->blocklen/card->readcnt) * x, | ||
| 210 | card->blockread, card->blocklen/card->readcnt); | ||
| 211 | card->blockread = NULL; | ||
| 212 | pcache->block = num; | ||
| 213 | pcache->jiffies_atc = jiffies; | ||
| 214 | pcache->valid = 1; | ||
| 215 | kfree(blockread); | ||
| 216 | } | ||
| 217 | |||
| 218 | return error; | ||
| 219 | |||
| 220 | outA: | ||
| 221 | kfree(blockread); | ||
| 222 | outB: | ||
| 223 | return error; | ||
| 224 | } | ||
| 225 | |||
| 226 | /* communicate with maple bus for phased writing */ | ||
| 227 | static int maple_vmu_write_block(unsigned int num, const unsigned char *buf, | ||
| 228 | struct mtd_info *mtd) | ||
| 229 | { | ||
| 230 | struct memcard *card; | ||
| 231 | struct mdev_part *mpart; | ||
| 232 | struct maple_device *mdev; | ||
| 233 | int partition, error, locking, x, phaselen, wait; | ||
| 234 | __be32 *sendbuf; | ||
| 235 | |||
| 236 | mpart = mtd->priv; | ||
| 237 | mdev = mpart->mdev; | ||
| 238 | partition = mpart->partition; | ||
| 239 | card = maple_get_drvdata(mdev); | ||
| 240 | |||
| 241 | phaselen = card->blocklen/card->writecnt; | ||
| 242 | |||
| 243 | sendbuf = kmalloc(phaselen + 4, GFP_KERNEL); | ||
| 244 | if (!sendbuf) { | ||
| 245 | error = -ENOMEM; | ||
| 246 | goto fail_nosendbuf; | ||
| 247 | } | ||
| 248 | for (x = 0; x < card->writecnt; x++) { | ||
| 249 | sendbuf[0] = cpu_to_be32(partition << 24 | x << 16 | num); | ||
| 250 | memcpy(&sendbuf[1], buf + phaselen * x, phaselen); | ||
| 251 | /* wait until the device is not busy doing something else | ||
| 252 | * or 1 second - which ever is longer */ | ||
| 253 | if (atomic_read(&mdev->busy) == 1) { | ||
| 254 | wait_event_interruptible_timeout(mdev->maple_wait, | ||
| 255 | atomic_read(&mdev->busy) == 0, HZ); | ||
| 256 | if (atomic_read(&mdev->busy) == 1) { | ||
| 257 | error = -EBUSY; | ||
| 258 | dev_notice(&mdev->dev, "VMU write at (%d, %d)" | ||
| 259 | "failed - device is busy\n", | ||
| 260 | mdev->port, mdev->unit); | ||
| 261 | goto fail_nolock; | ||
| 262 | } | ||
| 263 | } | ||
| 264 | atomic_set(&mdev->busy, 1); | ||
| 265 | |||
| 266 | locking = maple_add_packet(mdev, MAPLE_FUNC_MEMCARD, | ||
| 267 | MAPLE_COMMAND_BWRITE, phaselen / 4 + 2, sendbuf); | ||
| 268 | wait = wait_event_interruptible_timeout(mdev->maple_wait, | ||
| 269 | atomic_read(&mdev->busy) == 0, HZ/10); | ||
| 270 | if (locking) { | ||
| 271 | error = -EIO; | ||
| 272 | atomic_set(&mdev->busy, 0); | ||
| 273 | goto fail_nolock; | ||
| 274 | } | ||
| 275 | if (atomic_read(&mdev->busy) == 2) { | ||
| 276 | atomic_set(&mdev->busy, 0); | ||
| 277 | } else if (wait == 0 || wait == -ERESTARTSYS) { | ||
| 278 | error = -EIO; | ||
| 279 | dev_warn(&mdev->dev, "Write at (%d, %d) of block" | ||
| 280 | " 0x%X at phase %d failed: could not" | ||
| 281 | " communicate with VMU", mdev->port, | ||
| 282 | mdev->unit, num, x); | ||
| 283 | atomic_set(&mdev->busy, 0); | ||
| 284 | kfree(mdev->mq->sendbuf); | ||
| 285 | mdev->mq->sendbuf = NULL; | ||
| 286 | list_del_init(&(mdev->mq->list)); | ||
| 287 | goto fail_nolock; | ||
| 288 | } | ||
| 289 | } | ||
| 290 | kfree(sendbuf); | ||
| 291 | |||
| 292 | return card->blocklen; | ||
| 293 | |||
| 294 | fail_nolock: | ||
| 295 | kfree(sendbuf); | ||
| 296 | fail_nosendbuf: | ||
| 297 | dev_err(&mdev->dev, "VMU (%d, %d): write failed\n", mdev->port, | ||
| 298 | mdev->unit); | ||
| 299 | return error; | ||
| 300 | } | ||
| 301 | |||
| 302 | /* mtd function to simulate reading byte by byte */ | ||
| 303 | static unsigned char vmu_flash_read_char(unsigned long ofs, int *retval, | ||
| 304 | struct mtd_info *mtd) | ||
| 305 | { | ||
| 306 | struct vmu_block *vblock; | ||
| 307 | struct memcard *card; | ||
| 308 | struct mdev_part *mpart; | ||
| 309 | struct maple_device *mdev; | ||
| 310 | unsigned char *buf, ret; | ||
| 311 | int partition, error; | ||
| 312 | |||
| 313 | mpart = mtd->priv; | ||
| 314 | mdev = mpart->mdev; | ||
| 315 | partition = mpart->partition; | ||
| 316 | card = maple_get_drvdata(mdev); | ||
| 317 | *retval = 0; | ||
| 318 | |||
| 319 | buf = kmalloc(card->blocklen, GFP_KERNEL); | ||
| 320 | if (!buf) { | ||
| 321 | *retval = 1; | ||
| 322 | ret = -ENOMEM; | ||
| 323 | goto finish; | ||
| 324 | } | ||
| 325 | |||
| 326 | vblock = ofs_to_block(ofs, mtd, partition); | ||
| 327 | if (!vblock) { | ||
| 328 | *retval = 3; | ||
| 329 | ret = -ENOMEM; | ||
| 330 | goto out_buf; | ||
| 331 | } | ||
| 332 | |||
| 333 | error = maple_vmu_read_block(vblock->num, buf, mtd); | ||
| 334 | if (error) { | ||
| 335 | ret = error; | ||
| 336 | *retval = 2; | ||
| 337 | goto out_vblock; | ||
| 338 | } | ||
| 339 | |||
| 340 | ret = buf[vblock->ofs]; | ||
| 341 | |||
| 342 | out_vblock: | ||
| 343 | kfree(vblock); | ||
| 344 | out_buf: | ||
| 345 | kfree(buf); | ||
| 346 | finish: | ||
| 347 | return ret; | ||
| 348 | } | ||
| 349 | |||
| 350 | /* mtd higher order function to read flash */ | ||
| 351 | static int vmu_flash_read(struct mtd_info *mtd, loff_t from, size_t len, | ||
| 352 | size_t *retlen, u_char *buf) | ||
| 353 | { | ||
| 354 | struct maple_device *mdev; | ||
| 355 | struct memcard *card; | ||
| 356 | struct mdev_part *mpart; | ||
| 357 | struct vmu_cache *pcache; | ||
| 358 | struct vmu_block *vblock; | ||
| 359 | int index = 0, retval, partition, leftover, numblocks; | ||
| 360 | unsigned char cx; | ||
| 361 | |||
| 362 | if (len < 1) | ||
| 363 | return -EIO; | ||
| 364 | |||
| 365 | mpart = mtd->priv; | ||
| 366 | mdev = mpart->mdev; | ||
| 367 | partition = mpart->partition; | ||
| 368 | card = maple_get_drvdata(mdev); | ||
| 369 | |||
| 370 | numblocks = card->parts[partition].numblocks; | ||
| 371 | if (from + len > numblocks * card->blocklen) | ||
| 372 | len = numblocks * card->blocklen - from; | ||
| 373 | if (len == 0) | ||
| 374 | return -EIO; | ||
| 375 | /* Have we cached this bit already? */ | ||
| 376 | pcache = card->parts[partition].pcache; | ||
| 377 | do { | ||
| 378 | vblock = ofs_to_block(from + index, mtd, partition); | ||
| 379 | if (!vblock) | ||
| 380 | return -ENOMEM; | ||
| 381 | /* Have we cached this and is the cache valid and timely? */ | ||
| 382 | if (pcache->valid && | ||
| 383 | time_before(jiffies, pcache->jiffies_atc + HZ) && | ||
| 384 | (pcache->block == vblock->num)) { | ||
| 385 | /* we have cached it, so do necessary copying */ | ||
| 386 | leftover = card->blocklen - vblock->ofs; | ||
| 387 | if (vblock->ofs + len - index < card->blocklen) { | ||
| 388 | /* only a bit of this block to copy */ | ||
| 389 | memcpy(buf + index, | ||
| 390 | pcache->buffer + vblock->ofs, | ||
| 391 | len - index); | ||
| 392 | index = len; | ||
| 393 | } else { | ||
| 394 | /* otherwise copy remainder of whole block */ | ||
| 395 | memcpy(buf + index, pcache->buffer + | ||
| 396 | vblock->ofs, leftover); | ||
| 397 | index += leftover; | ||
| 398 | } | ||
| 399 | } else { | ||
| 400 | /* | ||
| 401 | * Not cached so read one byte - | ||
| 402 | * but cache the rest of the block | ||
| 403 | */ | ||
| 404 | cx = vmu_flash_read_char(from + index, &retval, mtd); | ||
| 405 | if (retval) { | ||
| 406 | *retlen = index; | ||
| 407 | kfree(vblock); | ||
| 408 | return cx; | ||
| 409 | } | ||
| 410 | memset(buf + index, cx, 1); | ||
| 411 | index++; | ||
| 412 | } | ||
| 413 | kfree(vblock); | ||
| 414 | } while (len > index); | ||
| 415 | *retlen = index; | ||
| 416 | |||
| 417 | return 0; | ||
| 418 | } | ||
| 419 | |||
| 420 | static int vmu_flash_write(struct mtd_info *mtd, loff_t to, size_t len, | ||
| 421 | size_t *retlen, const u_char *buf) | ||
| 422 | { | ||
| 423 | struct maple_device *mdev; | ||
| 424 | struct memcard *card; | ||
| 425 | struct mdev_part *mpart; | ||
| 426 | int index = 0, partition, error = 0, numblocks; | ||
| 427 | struct vmu_cache *pcache; | ||
| 428 | struct vmu_block *vblock; | ||
| 429 | unsigned char *buffer; | ||
| 430 | |||
| 431 | mpart = mtd->priv; | ||
| 432 | mdev = mpart->mdev; | ||
| 433 | partition = mpart->partition; | ||
| 434 | card = maple_get_drvdata(mdev); | ||
| 435 | |||
| 436 | /* simple sanity checks */ | ||
| 437 | if (len < 1) { | ||
| 438 | error = -EIO; | ||
| 439 | goto failed; | ||
| 440 | } | ||
| 441 | numblocks = card->parts[partition].numblocks; | ||
| 442 | if (to + len > numblocks * card->blocklen) | ||
| 443 | len = numblocks * card->blocklen - to; | ||
| 444 | if (len == 0) { | ||
| 445 | error = -EIO; | ||
| 446 | goto failed; | ||
| 447 | } | ||
| 448 | |||
| 449 | vblock = ofs_to_block(to, mtd, partition); | ||
| 450 | if (!vblock) { | ||
| 451 | error = -ENOMEM; | ||
| 452 | goto failed; | ||
| 453 | } | ||
| 454 | |||
| 455 | buffer = kmalloc(card->blocklen, GFP_KERNEL); | ||
| 456 | if (!buffer) { | ||
| 457 | error = -ENOMEM; | ||
| 458 | goto fail_buffer; | ||
| 459 | } | ||
| 460 | |||
| 461 | do { | ||
| 462 | /* Read in the block we are to write to */ | ||
| 463 | error = maple_vmu_read_block(vblock->num, buffer, mtd); | ||
| 464 | if (error) | ||
| 465 | goto fail_io; | ||
| 466 | |||
| 467 | do { | ||
| 468 | buffer[vblock->ofs] = buf[index]; | ||
| 469 | vblock->ofs++; | ||
| 470 | index++; | ||
| 471 | if (index >= len) | ||
| 472 | break; | ||
| 473 | } while (vblock->ofs < card->blocklen); | ||
| 474 | |||
| 475 | /* write out new buffer */ | ||
| 476 | error = maple_vmu_write_block(vblock->num, buffer, mtd); | ||
| 477 | /* invalidate the cache */ | ||
| 478 | pcache = card->parts[partition].pcache; | ||
| 479 | pcache->valid = 0; | ||
| 480 | |||
| 481 | if (error != card->blocklen) | ||
| 482 | goto fail_io; | ||
| 483 | |||
| 484 | vblock->num++; | ||
| 485 | vblock->ofs = 0; | ||
| 486 | } while (len > index); | ||
| 487 | |||
| 488 | kfree(buffer); | ||
| 489 | *retlen = index; | ||
| 490 | kfree(vblock); | ||
| 491 | return 0; | ||
| 492 | |||
| 493 | fail_io: | ||
| 494 | kfree(buffer); | ||
| 495 | fail_buffer: | ||
| 496 | kfree(vblock); | ||
| 497 | failed: | ||
| 498 | dev_err(&mdev->dev, "VMU write failing with error %d\n", error); | ||
| 499 | return error; | ||
| 500 | } | ||
| 501 | |||
| 502 | static void vmu_flash_sync(struct mtd_info *mtd) | ||
| 503 | { | ||
| 504 | /* Do nothing here */ | ||
| 505 | } | ||
| 506 | |||
| 507 | /* Maple bus callback function to recursively query hardware details */ | ||
| 508 | static void vmu_queryblocks(struct mapleq *mq) | ||
| 509 | { | ||
| 510 | struct maple_device *mdev; | ||
| 511 | unsigned short *res; | ||
| 512 | struct memcard *card; | ||
| 513 | __be32 partnum; | ||
| 514 | struct vmu_cache *pcache; | ||
| 515 | struct mdev_part *mpart; | ||
| 516 | struct mtd_info *mtd_cur; | ||
| 517 | struct vmupart *part_cur; | ||
| 518 | int error; | ||
| 519 | |||
| 520 | mdev = mq->dev; | ||
| 521 | card = maple_get_drvdata(mdev); | ||
| 522 | res = (unsigned short *) (mq->recvbuf->buf); | ||
| 523 | card->tempA = res[12]; | ||
| 524 | card->tempB = res[6]; | ||
| 525 | |||
| 526 | dev_info(&mdev->dev, "VMU device at partition %d has %d user " | ||
| 527 | "blocks with a root block at %d\n", card->partition, | ||
| 528 | card->tempA, card->tempB); | ||
| 529 | |||
| 530 | part_cur = &card->parts[card->partition]; | ||
| 531 | part_cur->user_blocks = card->tempA; | ||
| 532 | part_cur->root_block = card->tempB; | ||
| 533 | part_cur->numblocks = card->tempB + 1; | ||
| 534 | part_cur->name = kmalloc(12, GFP_KERNEL); | ||
| 535 | if (!part_cur->name) | ||
| 536 | goto fail_name; | ||
| 537 | |||
| 538 | sprintf(part_cur->name, "vmu%d.%d.%d", | ||
| 539 | mdev->port, mdev->unit, card->partition); | ||
| 540 | mtd_cur = &card->mtd[card->partition]; | ||
| 541 | mtd_cur->name = part_cur->name; | ||
| 542 | mtd_cur->type = 8; | ||
| 543 | mtd_cur->flags = MTD_WRITEABLE|MTD_NO_ERASE; | ||
| 544 | mtd_cur->size = part_cur->numblocks * card->blocklen; | ||
| 545 | mtd_cur->erasesize = card->blocklen; | ||
| 546 | mtd_cur->write = vmu_flash_write; | ||
| 547 | mtd_cur->read = vmu_flash_read; | ||
| 548 | mtd_cur->sync = vmu_flash_sync; | ||
| 549 | mtd_cur->writesize = card->blocklen; | ||
| 550 | |||
| 551 | mpart = kmalloc(sizeof(struct mdev_part), GFP_KERNEL); | ||
| 552 | if (!mpart) | ||
| 553 | goto fail_mpart; | ||
| 554 | |||
| 555 | mpart->mdev = mdev; | ||
| 556 | mpart->partition = card->partition; | ||
| 557 | mtd_cur->priv = mpart; | ||
| 558 | mtd_cur->owner = THIS_MODULE; | ||
| 559 | |||
| 560 | pcache = kzalloc(sizeof(struct vmu_cache), GFP_KERNEL); | ||
| 561 | if (!pcache) | ||
| 562 | goto fail_cache_create; | ||
| 563 | part_cur->pcache = pcache; | ||
| 564 | |||
| 565 | error = add_mtd_device(mtd_cur); | ||
| 566 | if (error) | ||
| 567 | goto fail_mtd_register; | ||
| 568 | |||
| 569 | maple_getcond_callback(mdev, NULL, 0, | ||
| 570 | MAPLE_FUNC_MEMCARD); | ||
| 571 | |||
| 572 | /* | ||
| 573 | * Set up a recursive call to the (probably theoretical) | ||
| 574 | * second or more partition | ||
| 575 | */ | ||
| 576 | if (++card->partition < card->partitions) { | ||
| 577 | partnum = cpu_to_be32(card->partition << 24); | ||
| 578 | maple_getcond_callback(mdev, vmu_queryblocks, 0, | ||
| 579 | MAPLE_FUNC_MEMCARD); | ||
| 580 | maple_add_packet(mdev, MAPLE_FUNC_MEMCARD, | ||
| 581 | MAPLE_COMMAND_GETMINFO, 2, &partnum); | ||
| 582 | } | ||
| 583 | return; | ||
| 584 | |||
| 585 | fail_mtd_register: | ||
| 586 | dev_err(&mdev->dev, "Could not register maple device at (%d, %d)" | ||
| 587 | "error is 0x%X\n", mdev->port, mdev->unit, error); | ||
| 588 | for (error = 0; error <= card->partition; error++) { | ||
| 589 | kfree(((card->parts)[error]).pcache); | ||
| 590 | ((card->parts)[error]).pcache = NULL; | ||
| 591 | } | ||
| 592 | fail_cache_create: | ||
| 593 | fail_mpart: | ||
| 594 | for (error = 0; error <= card->partition; error++) { | ||
| 595 | kfree(((card->mtd)[error]).priv); | ||
| 596 | ((card->mtd)[error]).priv = NULL; | ||
| 597 | } | ||
| 598 | maple_getcond_callback(mdev, NULL, 0, | ||
| 599 | MAPLE_FUNC_MEMCARD); | ||
| 600 | kfree(part_cur->name); | ||
| 601 | fail_name: | ||
| 602 | return; | ||
| 603 | } | ||
| 604 | |||
| 605 | /* Handles very basic info about the flash, queries for details */ | ||
| 606 | static int __devinit vmu_connect(struct maple_device *mdev) | ||
| 607 | { | ||
| 608 | unsigned long test_flash_data, basic_flash_data; | ||
| 609 | int c, error; | ||
| 610 | struct memcard *card; | ||
| 611 | u32 partnum = 0; | ||
| 612 | |||
| 613 | test_flash_data = be32_to_cpu(mdev->devinfo.function); | ||
| 614 | /* Need to count how many bits are set - to find out which | ||
| 615 | * function_data element has details of the memory card: | ||
| 616 | * using Brian Kernighan's/Peter Wegner's method */ | ||
| 617 | for (c = 0; test_flash_data; c++) | ||
| 618 | test_flash_data &= test_flash_data - 1; | ||
| 619 | |||
| 620 | basic_flash_data = be32_to_cpu(mdev->devinfo.function_data[c - 1]); | ||
| 621 | |||
| 622 | card = kmalloc(sizeof(struct memcard), GFP_KERNEL); | ||
| 623 | if (!card) { | ||
| 624 | error = ENOMEM; | ||
| 625 | goto fail_nomem; | ||
| 626 | } | ||
| 627 | |||
| 628 | card->partitions = (basic_flash_data >> 24 & 0xFF) + 1; | ||
| 629 | card->blocklen = ((basic_flash_data >> 16 & 0xFF) + 1) << 5; | ||
| 630 | card->writecnt = basic_flash_data >> 12 & 0xF; | ||
| 631 | card->readcnt = basic_flash_data >> 8 & 0xF; | ||
| 632 | card->removeable = basic_flash_data >> 7 & 1; | ||
| 633 | |||
| 634 | card->partition = 0; | ||
| 635 | |||
| 636 | /* | ||
| 637 | * Not sure there are actually any multi-partition devices in the | ||
| 638 | * real world, but the hardware supports them, so, so will we | ||
| 639 | */ | ||
| 640 | card->parts = kmalloc(sizeof(struct vmupart) * card->partitions, | ||
| 641 | GFP_KERNEL); | ||
| 642 | if (!card->parts) { | ||
| 643 | error = -ENOMEM; | ||
| 644 | goto fail_partitions; | ||
| 645 | } | ||
| 646 | |||
| 647 | card->mtd = kmalloc(sizeof(struct mtd_info) * card->partitions, | ||
| 648 | GFP_KERNEL); | ||
| 649 | if (!card->mtd) { | ||
| 650 | error = -ENOMEM; | ||
| 651 | goto fail_mtd_info; | ||
| 652 | } | ||
| 653 | |||
| 654 | maple_set_drvdata(mdev, card); | ||
| 655 | |||
| 656 | /* | ||
| 657 | * We want to trap meminfo not get cond | ||
| 658 | * so set interval to zero, but rely on maple bus | ||
| 659 | * driver to pass back the results of the meminfo | ||
| 660 | */ | ||
| 661 | maple_getcond_callback(mdev, vmu_queryblocks, 0, | ||
| 662 | MAPLE_FUNC_MEMCARD); | ||
| 663 | |||
| 664 | /* Make sure we are clear to go */ | ||
| 665 | if (atomic_read(&mdev->busy) == 1) { | ||
| 666 | wait_event_interruptible_timeout(mdev->maple_wait, | ||
| 667 | atomic_read(&mdev->busy) == 0, HZ); | ||
| 668 | if (atomic_read(&mdev->busy) == 1) { | ||
| 669 | dev_notice(&mdev->dev, "VMU at (%d, %d) is busy\n", | ||
| 670 | mdev->port, mdev->unit); | ||
| 671 | error = -EAGAIN; | ||
| 672 | goto fail_device_busy; | ||
| 673 | } | ||
| 674 | } | ||
| 675 | |||
| 676 | atomic_set(&mdev->busy, 1); | ||
| 677 | |||
| 678 | /* | ||
| 679 | * Set up the minfo call: vmu_queryblocks will handle | ||
| 680 | * the information passed back | ||
| 681 | */ | ||
| 682 | error = maple_add_packet(mdev, MAPLE_FUNC_MEMCARD, | ||
| 683 | MAPLE_COMMAND_GETMINFO, 2, &partnum); | ||
| 684 | if (error) { | ||
| 685 | dev_err(&mdev->dev, "Could not lock VMU at (%d, %d)" | ||
| 686 | " error is 0x%X\n", mdev->port, mdev->unit, error); | ||
| 687 | goto fail_mtd_info; | ||
| 688 | } | ||
| 689 | return 0; | ||
| 690 | |||
| 691 | fail_device_busy: | ||
| 692 | kfree(card->mtd); | ||
| 693 | fail_mtd_info: | ||
| 694 | kfree(card->parts); | ||
| 695 | fail_partitions: | ||
| 696 | kfree(card); | ||
| 697 | fail_nomem: | ||
| 698 | return error; | ||
| 699 | } | ||
| 700 | |||
| 701 | static void __devexit vmu_disconnect(struct maple_device *mdev) | ||
| 702 | { | ||
| 703 | struct memcard *card; | ||
| 704 | struct mdev_part *mpart; | ||
| 705 | int x; | ||
| 706 | |||
| 707 | mdev->callback = NULL; | ||
| 708 | card = maple_get_drvdata(mdev); | ||
| 709 | for (x = 0; x < card->partitions; x++) { | ||
| 710 | mpart = ((card->mtd)[x]).priv; | ||
| 711 | mpart->mdev = NULL; | ||
| 712 | del_mtd_device(&((card->mtd)[x])); | ||
| 713 | kfree(((card->parts)[x]).name); | ||
| 714 | } | ||
| 715 | kfree(card->parts); | ||
| 716 | kfree(card->mtd); | ||
| 717 | kfree(card); | ||
| 718 | } | ||
| 719 | |||
| 720 | /* Callback to handle eccentricities of both mtd subsystem | ||
| 721 | * and general flakyness of Dreamcast VMUs | ||
| 722 | */ | ||
| 723 | static int vmu_can_unload(struct maple_device *mdev) | ||
| 724 | { | ||
| 725 | struct memcard *card; | ||
| 726 | int x; | ||
| 727 | struct mtd_info *mtd; | ||
| 728 | |||
| 729 | card = maple_get_drvdata(mdev); | ||
| 730 | for (x = 0; x < card->partitions; x++) { | ||
| 731 | mtd = &((card->mtd)[x]); | ||
| 732 | if (mtd->usecount > 0) | ||
| 733 | return 0; | ||
| 734 | } | ||
| 735 | return 1; | ||
| 736 | } | ||
| 737 | |||
| 738 | #define ERRSTR "VMU at (%d, %d) file error -" | ||
| 739 | |||
| 740 | static void vmu_file_error(struct maple_device *mdev, void *recvbuf) | ||
| 741 | { | ||
| 742 | enum maple_file_errors error = ((int *)recvbuf)[1]; | ||
| 743 | |||
| 744 | switch (error) { | ||
| 745 | |||
| 746 | case MAPLE_FILEERR_INVALID_PARTITION: | ||
| 747 | dev_notice(&mdev->dev, ERRSTR " invalid partition number\n", | ||
| 748 | mdev->port, mdev->unit); | ||
| 749 | break; | ||
| 750 | |||
| 751 | case MAPLE_FILEERR_PHASE_ERROR: | ||
| 752 | dev_notice(&mdev->dev, ERRSTR " phase error\n", | ||
| 753 | mdev->port, mdev->unit); | ||
| 754 | break; | ||
| 755 | |||
| 756 | case MAPLE_FILEERR_INVALID_BLOCK: | ||
| 757 | dev_notice(&mdev->dev, ERRSTR " invalid block number\n", | ||
| 758 | mdev->port, mdev->unit); | ||
| 759 | break; | ||
| 760 | |||
| 761 | case MAPLE_FILEERR_WRITE_ERROR: | ||
| 762 | dev_notice(&mdev->dev, ERRSTR " write error\n", | ||
| 763 | mdev->port, mdev->unit); | ||
| 764 | break; | ||
| 765 | |||
| 766 | case MAPLE_FILEERR_INVALID_WRITE_LENGTH: | ||
| 767 | dev_notice(&mdev->dev, ERRSTR " invalid write length\n", | ||
| 768 | mdev->port, mdev->unit); | ||
| 769 | break; | ||
| 770 | |||
| 771 | case MAPLE_FILEERR_BAD_CRC: | ||
| 772 | dev_notice(&mdev->dev, ERRSTR " bad CRC\n", | ||
| 773 | mdev->port, mdev->unit); | ||
| 774 | break; | ||
| 775 | |||
| 776 | default: | ||
| 777 | dev_notice(&mdev->dev, ERRSTR " 0x%X\n", | ||
| 778 | mdev->port, mdev->unit, error); | ||
| 779 | } | ||
| 780 | } | ||
| 781 | |||
| 782 | |||
| 783 | static int __devinit probe_maple_vmu(struct device *dev) | ||
| 784 | { | ||
| 785 | int error; | ||
| 786 | struct maple_device *mdev = to_maple_dev(dev); | ||
| 787 | struct maple_driver *mdrv = to_maple_driver(dev->driver); | ||
| 788 | |||
| 789 | mdev->can_unload = vmu_can_unload; | ||
| 790 | mdev->fileerr_handler = vmu_file_error; | ||
| 791 | mdev->driver = mdrv; | ||
| 792 | |||
| 793 | error = vmu_connect(mdev); | ||
| 794 | if (error) | ||
| 795 | return error; | ||
| 796 | |||
| 797 | return 0; | ||
| 798 | } | ||
| 799 | |||
| 800 | static int __devexit remove_maple_vmu(struct device *dev) | ||
| 801 | { | ||
| 802 | struct maple_device *mdev = to_maple_dev(dev); | ||
| 803 | |||
| 804 | vmu_disconnect(mdev); | ||
| 805 | return 0; | ||
| 806 | } | ||
| 807 | |||
| 808 | static struct maple_driver vmu_flash_driver = { | ||
| 809 | .function = MAPLE_FUNC_MEMCARD, | ||
| 810 | .drv = { | ||
| 811 | .name = "Dreamcast_visual_memory", | ||
| 812 | .probe = probe_maple_vmu, | ||
| 813 | .remove = __devexit_p(remove_maple_vmu), | ||
| 814 | }, | ||
| 815 | }; | ||
| 816 | |||
| 817 | static int __init vmu_flash_map_init(void) | ||
| 818 | { | ||
| 819 | return maple_driver_register(&vmu_flash_driver); | ||
| 820 | } | ||
| 821 | |||
| 822 | static void __exit vmu_flash_map_exit(void) | ||
| 823 | { | ||
| 824 | maple_driver_unregister(&vmu_flash_driver); | ||
| 825 | } | ||
| 826 | |||
| 827 | module_init(vmu_flash_map_init); | ||
| 828 | module_exit(vmu_flash_map_exit); | ||
| 829 | |||
| 830 | MODULE_LICENSE("GPL"); | ||
| 831 | MODULE_AUTHOR("Adrian McMenamin"); | ||
| 832 | MODULE_DESCRIPTION("Flash mapping for Sega Dreamcast visual memory"); | ||
diff --git a/drivers/rtc/rtc-sh.c b/drivers/rtc/rtc-sh.c index 1c3fc6b428e9..4898f7fe8518 100644 --- a/drivers/rtc/rtc-sh.c +++ b/drivers/rtc/rtc-sh.c | |||
| @@ -28,7 +28,7 @@ | |||
| 28 | #include <asm/rtc.h> | 28 | #include <asm/rtc.h> |
| 29 | 29 | ||
| 30 | #define DRV_NAME "sh-rtc" | 30 | #define DRV_NAME "sh-rtc" |
| 31 | #define DRV_VERSION "0.2.0" | 31 | #define DRV_VERSION "0.2.1" |
| 32 | 32 | ||
| 33 | #define RTC_REG(r) ((r) * rtc_reg_size) | 33 | #define RTC_REG(r) ((r) * rtc_reg_size) |
| 34 | 34 | ||
| @@ -99,56 +99,51 @@ struct sh_rtc { | |||
| 99 | unsigned short periodic_freq; | 99 | unsigned short periodic_freq; |
| 100 | }; | 100 | }; |
| 101 | 101 | ||
| 102 | static irqreturn_t sh_rtc_interrupt(int irq, void *dev_id) | 102 | static int __sh_rtc_interrupt(struct sh_rtc *rtc) |
| 103 | { | 103 | { |
| 104 | struct sh_rtc *rtc = dev_id; | 104 | unsigned int tmp, pending; |
| 105 | unsigned int tmp; | ||
| 106 | |||
| 107 | spin_lock(&rtc->lock); | ||
| 108 | 105 | ||
| 109 | tmp = readb(rtc->regbase + RCR1); | 106 | tmp = readb(rtc->regbase + RCR1); |
| 107 | pending = tmp & RCR1_CF; | ||
| 110 | tmp &= ~RCR1_CF; | 108 | tmp &= ~RCR1_CF; |
| 111 | writeb(tmp, rtc->regbase + RCR1); | 109 | writeb(tmp, rtc->regbase + RCR1); |
| 112 | 110 | ||
| 113 | /* Users have requested One x Second IRQ */ | 111 | /* Users have requested One x Second IRQ */ |
| 114 | if (rtc->periodic_freq & PF_OXS) | 112 | if (pending && rtc->periodic_freq & PF_OXS) |
| 115 | rtc_update_irq(rtc->rtc_dev, 1, RTC_UF | RTC_IRQF); | 113 | rtc_update_irq(rtc->rtc_dev, 1, RTC_UF | RTC_IRQF); |
| 116 | 114 | ||
| 117 | spin_unlock(&rtc->lock); | 115 | return pending; |
| 118 | |||
| 119 | return IRQ_HANDLED; | ||
| 120 | } | 116 | } |
| 121 | 117 | ||
| 122 | static irqreturn_t sh_rtc_alarm(int irq, void *dev_id) | 118 | static int __sh_rtc_alarm(struct sh_rtc *rtc) |
| 123 | { | 119 | { |
| 124 | struct sh_rtc *rtc = dev_id; | 120 | unsigned int tmp, pending; |
| 125 | unsigned int tmp; | ||
| 126 | |||
| 127 | spin_lock(&rtc->lock); | ||
| 128 | 121 | ||
| 129 | tmp = readb(rtc->regbase + RCR1); | 122 | tmp = readb(rtc->regbase + RCR1); |
| 123 | pending = tmp & RCR1_AF; | ||
| 130 | tmp &= ~(RCR1_AF | RCR1_AIE); | 124 | tmp &= ~(RCR1_AF | RCR1_AIE); |
| 131 | writeb(tmp, rtc->regbase + RCR1); | 125 | writeb(tmp, rtc->regbase + RCR1); |
| 132 | |||
| 133 | rtc_update_irq(rtc->rtc_dev, 1, RTC_AF | RTC_IRQF); | ||
| 134 | 126 | ||
| 135 | spin_unlock(&rtc->lock); | 127 | if (pending) |
| 128 | rtc_update_irq(rtc->rtc_dev, 1, RTC_AF | RTC_IRQF); | ||
| 136 | 129 | ||
| 137 | return IRQ_HANDLED; | 130 | return pending; |
| 138 | } | 131 | } |
| 139 | 132 | ||
| 140 | static irqreturn_t sh_rtc_periodic(int irq, void *dev_id) | 133 | static int __sh_rtc_periodic(struct sh_rtc *rtc) |
| 141 | { | 134 | { |
| 142 | struct sh_rtc *rtc = dev_id; | ||
| 143 | struct rtc_device *rtc_dev = rtc->rtc_dev; | 135 | struct rtc_device *rtc_dev = rtc->rtc_dev; |
| 144 | unsigned int tmp; | 136 | struct rtc_task *irq_task; |
| 145 | 137 | unsigned int tmp, pending; | |
| 146 | spin_lock(&rtc->lock); | ||
| 147 | 138 | ||
| 148 | tmp = readb(rtc->regbase + RCR2); | 139 | tmp = readb(rtc->regbase + RCR2); |
| 140 | pending = tmp & RCR2_PEF; | ||
| 149 | tmp &= ~RCR2_PEF; | 141 | tmp &= ~RCR2_PEF; |
| 150 | writeb(tmp, rtc->regbase + RCR2); | 142 | writeb(tmp, rtc->regbase + RCR2); |
| 151 | 143 | ||
| 144 | if (!pending) | ||
| 145 | return 0; | ||
| 146 | |||
| 152 | /* Half period enabled than one skipped and the next notified */ | 147 | /* Half period enabled than one skipped and the next notified */ |
| 153 | if ((rtc->periodic_freq & PF_HP) && (rtc->periodic_freq & PF_COUNT)) | 148 | if ((rtc->periodic_freq & PF_HP) && (rtc->periodic_freq & PF_COUNT)) |
| 154 | rtc->periodic_freq &= ~PF_COUNT; | 149 | rtc->periodic_freq &= ~PF_COUNT; |
| @@ -157,16 +152,65 @@ static irqreturn_t sh_rtc_periodic(int irq, void *dev_id) | |||
| 157 | rtc->periodic_freq |= PF_COUNT; | 152 | rtc->periodic_freq |= PF_COUNT; |
| 158 | if (rtc->periodic_freq & PF_KOU) { | 153 | if (rtc->periodic_freq & PF_KOU) { |
| 159 | spin_lock(&rtc_dev->irq_task_lock); | 154 | spin_lock(&rtc_dev->irq_task_lock); |
| 160 | if (rtc_dev->irq_task) | 155 | irq_task = rtc_dev->irq_task; |
| 161 | rtc_dev->irq_task->func(rtc_dev->irq_task->private_data); | 156 | if (irq_task) |
| 157 | irq_task->func(irq_task->private_data); | ||
| 162 | spin_unlock(&rtc_dev->irq_task_lock); | 158 | spin_unlock(&rtc_dev->irq_task_lock); |
| 163 | } else | 159 | } else |
| 164 | rtc_update_irq(rtc->rtc_dev, 1, RTC_PF | RTC_IRQF); | 160 | rtc_update_irq(rtc->rtc_dev, 1, RTC_PF | RTC_IRQF); |
| 165 | } | 161 | } |
| 166 | 162 | ||
| 163 | return pending; | ||
| 164 | } | ||
| 165 | |||
| 166 | static irqreturn_t sh_rtc_interrupt(int irq, void *dev_id) | ||
| 167 | { | ||
| 168 | struct sh_rtc *rtc = dev_id; | ||
| 169 | int ret; | ||
| 170 | |||
| 171 | spin_lock(&rtc->lock); | ||
| 172 | ret = __sh_rtc_interrupt(rtc); | ||
| 173 | spin_unlock(&rtc->lock); | ||
| 174 | |||
| 175 | return IRQ_RETVAL(ret); | ||
| 176 | } | ||
| 177 | |||
| 178 | static irqreturn_t sh_rtc_alarm(int irq, void *dev_id) | ||
| 179 | { | ||
| 180 | struct sh_rtc *rtc = dev_id; | ||
| 181 | int ret; | ||
| 182 | |||
| 183 | spin_lock(&rtc->lock); | ||
| 184 | ret = __sh_rtc_alarm(rtc); | ||
| 185 | spin_unlock(&rtc->lock); | ||
| 186 | |||
| 187 | return IRQ_RETVAL(ret); | ||
| 188 | } | ||
| 189 | |||
| 190 | static irqreturn_t sh_rtc_periodic(int irq, void *dev_id) | ||
| 191 | { | ||
| 192 | struct sh_rtc *rtc = dev_id; | ||
| 193 | int ret; | ||
| 194 | |||
| 195 | spin_lock(&rtc->lock); | ||
| 196 | ret = __sh_rtc_periodic(rtc); | ||
| 167 | spin_unlock(&rtc->lock); | 197 | spin_unlock(&rtc->lock); |
| 168 | 198 | ||
| 169 | return IRQ_HANDLED; | 199 | return IRQ_RETVAL(ret); |
| 200 | } | ||
| 201 | |||
| 202 | static irqreturn_t sh_rtc_shared(int irq, void *dev_id) | ||
| 203 | { | ||
| 204 | struct sh_rtc *rtc = dev_id; | ||
| 205 | int ret; | ||
| 206 | |||
| 207 | spin_lock(&rtc->lock); | ||
| 208 | ret = __sh_rtc_interrupt(rtc); | ||
| 209 | ret |= __sh_rtc_alarm(rtc); | ||
| 210 | ret |= __sh_rtc_periodic(rtc); | ||
| 211 | spin_unlock(&rtc->lock); | ||
| 212 | |||
| 213 | return IRQ_RETVAL(ret); | ||
| 170 | } | 214 | } |
| 171 | 215 | ||
| 172 | static inline void sh_rtc_setpie(struct device *dev, unsigned int enable) | 216 | static inline void sh_rtc_setpie(struct device *dev, unsigned int enable) |
| @@ -275,6 +319,25 @@ static int sh_rtc_proc(struct device *dev, struct seq_file *seq) | |||
| 275 | return 0; | 319 | return 0; |
| 276 | } | 320 | } |
| 277 | 321 | ||
| 322 | static inline void sh_rtc_setcie(struct device *dev, unsigned int enable) | ||
| 323 | { | ||
| 324 | struct sh_rtc *rtc = dev_get_drvdata(dev); | ||
| 325 | unsigned int tmp; | ||
| 326 | |||
| 327 | spin_lock_irq(&rtc->lock); | ||
| 328 | |||
| 329 | tmp = readb(rtc->regbase + RCR1); | ||
| 330 | |||
| 331 | if (!enable) | ||
| 332 | tmp &= ~RCR1_CIE; | ||
| 333 | else | ||
| 334 | tmp |= RCR1_CIE; | ||
| 335 | |||
| 336 | writeb(tmp, rtc->regbase + RCR1); | ||
| 337 | |||
| 338 | spin_unlock_irq(&rtc->lock); | ||
| 339 | } | ||
| 340 | |||
| 278 | static int sh_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) | 341 | static int sh_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) |
| 279 | { | 342 | { |
| 280 | struct sh_rtc *rtc = dev_get_drvdata(dev); | 343 | struct sh_rtc *rtc = dev_get_drvdata(dev); |
| @@ -291,9 +354,11 @@ static int sh_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) | |||
| 291 | break; | 354 | break; |
| 292 | case RTC_UIE_OFF: | 355 | case RTC_UIE_OFF: |
| 293 | rtc->periodic_freq &= ~PF_OXS; | 356 | rtc->periodic_freq &= ~PF_OXS; |
| 357 | sh_rtc_setcie(dev, 0); | ||
| 294 | break; | 358 | break; |
| 295 | case RTC_UIE_ON: | 359 | case RTC_UIE_ON: |
| 296 | rtc->periodic_freq |= PF_OXS; | 360 | rtc->periodic_freq |= PF_OXS; |
| 361 | sh_rtc_setcie(dev, 1); | ||
| 297 | break; | 362 | break; |
| 298 | case RTC_IRQP_READ: | 363 | case RTC_IRQP_READ: |
| 299 | ret = put_user(rtc->rtc_dev->irq_freq, | 364 | ret = put_user(rtc->rtc_dev->irq_freq, |
| @@ -356,18 +421,17 @@ static int sh_rtc_read_time(struct device *dev, struct rtc_time *tm) | |||
| 356 | tm->tm_sec--; | 421 | tm->tm_sec--; |
| 357 | #endif | 422 | #endif |
| 358 | 423 | ||
| 424 | /* only keep the carry interrupt enabled if UIE is on */ | ||
| 425 | if (!(rtc->periodic_freq & PF_OXS)) | ||
| 426 | sh_rtc_setcie(dev, 0); | ||
| 427 | |||
| 359 | dev_dbg(dev, "%s: tm is secs=%d, mins=%d, hours=%d, " | 428 | dev_dbg(dev, "%s: tm is secs=%d, mins=%d, hours=%d, " |
| 360 | "mday=%d, mon=%d, year=%d, wday=%d\n", | 429 | "mday=%d, mon=%d, year=%d, wday=%d\n", |
| 361 | __func__, | 430 | __func__, |
| 362 | tm->tm_sec, tm->tm_min, tm->tm_hour, | 431 | tm->tm_sec, tm->tm_min, tm->tm_hour, |
| 363 | tm->tm_mday, tm->tm_mon + 1, tm->tm_year, tm->tm_wday); | 432 | tm->tm_mday, tm->tm_mon + 1, tm->tm_year, tm->tm_wday); |
| 364 | 433 | ||
| 365 | if (rtc_valid_tm(tm) < 0) { | 434 | return rtc_valid_tm(tm); |
| 366 | dev_err(dev, "invalid date\n"); | ||
| 367 | rtc_time_to_tm(0, tm); | ||
| 368 | } | ||
| 369 | |||
| 370 | return 0; | ||
| 371 | } | 435 | } |
| 372 | 436 | ||
| 373 | static int sh_rtc_set_time(struct device *dev, struct rtc_time *tm) | 437 | static int sh_rtc_set_time(struct device *dev, struct rtc_time *tm) |
| @@ -572,7 +636,7 @@ static int __devinit sh_rtc_probe(struct platform_device *pdev) | |||
| 572 | { | 636 | { |
| 573 | struct sh_rtc *rtc; | 637 | struct sh_rtc *rtc; |
| 574 | struct resource *res; | 638 | struct resource *res; |
| 575 | unsigned int tmp; | 639 | struct rtc_time r; |
| 576 | int ret; | 640 | int ret; |
| 577 | 641 | ||
| 578 | rtc = kzalloc(sizeof(struct sh_rtc), GFP_KERNEL); | 642 | rtc = kzalloc(sizeof(struct sh_rtc), GFP_KERNEL); |
| @@ -585,26 +649,12 @@ static int __devinit sh_rtc_probe(struct platform_device *pdev) | |||
| 585 | ret = platform_get_irq(pdev, 0); | 649 | ret = platform_get_irq(pdev, 0); |
| 586 | if (unlikely(ret <= 0)) { | 650 | if (unlikely(ret <= 0)) { |
| 587 | ret = -ENOENT; | 651 | ret = -ENOENT; |
| 588 | dev_err(&pdev->dev, "No IRQ for period\n"); | 652 | dev_err(&pdev->dev, "No IRQ resource\n"); |
| 589 | goto err_badres; | 653 | goto err_badres; |
| 590 | } | 654 | } |
| 591 | rtc->periodic_irq = ret; | 655 | rtc->periodic_irq = ret; |
| 592 | 656 | rtc->carry_irq = platform_get_irq(pdev, 1); | |
| 593 | ret = platform_get_irq(pdev, 1); | 657 | rtc->alarm_irq = platform_get_irq(pdev, 2); |
| 594 | if (unlikely(ret <= 0)) { | ||
| 595 | ret = -ENOENT; | ||
| 596 | dev_err(&pdev->dev, "No IRQ for carry\n"); | ||
| 597 | goto err_badres; | ||
| 598 | } | ||
| 599 | rtc->carry_irq = ret; | ||
| 600 | |||
| 601 | ret = platform_get_irq(pdev, 2); | ||
| 602 | if (unlikely(ret <= 0)) { | ||
| 603 | ret = -ENOENT; | ||
| 604 | dev_err(&pdev->dev, "No IRQ for alarm\n"); | ||
| 605 | goto err_badres; | ||
| 606 | } | ||
| 607 | rtc->alarm_irq = ret; | ||
| 608 | 658 | ||
| 609 | res = platform_get_resource(pdev, IORESOURCE_IO, 0); | 659 | res = platform_get_resource(pdev, IORESOURCE_IO, 0); |
| 610 | if (unlikely(res == NULL)) { | 660 | if (unlikely(res == NULL)) { |
| @@ -646,47 +696,66 @@ static int __devinit sh_rtc_probe(struct platform_device *pdev) | |||
| 646 | } | 696 | } |
| 647 | 697 | ||
| 648 | rtc->rtc_dev->max_user_freq = 256; | 698 | rtc->rtc_dev->max_user_freq = 256; |
| 649 | rtc->rtc_dev->irq_freq = 1; | ||
| 650 | rtc->periodic_freq = 0x60; | ||
| 651 | 699 | ||
| 652 | platform_set_drvdata(pdev, rtc); | 700 | platform_set_drvdata(pdev, rtc); |
| 653 | 701 | ||
| 654 | /* register periodic/carry/alarm irqs */ | 702 | if (rtc->carry_irq <= 0) { |
| 655 | ret = request_irq(rtc->periodic_irq, sh_rtc_periodic, IRQF_DISABLED, | 703 | /* register shared periodic/carry/alarm irq */ |
| 656 | "sh-rtc period", rtc); | 704 | ret = request_irq(rtc->periodic_irq, sh_rtc_shared, |
| 657 | if (unlikely(ret)) { | 705 | IRQF_DISABLED, "sh-rtc", rtc); |
| 658 | dev_err(&pdev->dev, | 706 | if (unlikely(ret)) { |
| 659 | "request period IRQ failed with %d, IRQ %d\n", ret, | 707 | dev_err(&pdev->dev, |
| 660 | rtc->periodic_irq); | 708 | "request IRQ failed with %d, IRQ %d\n", ret, |
| 661 | goto err_unmap; | 709 | rtc->periodic_irq); |
| 662 | } | 710 | goto err_unmap; |
| 711 | } | ||
| 712 | } else { | ||
| 713 | /* register periodic/carry/alarm irqs */ | ||
| 714 | ret = request_irq(rtc->periodic_irq, sh_rtc_periodic, | ||
| 715 | IRQF_DISABLED, "sh-rtc period", rtc); | ||
| 716 | if (unlikely(ret)) { | ||
| 717 | dev_err(&pdev->dev, | ||
| 718 | "request period IRQ failed with %d, IRQ %d\n", | ||
| 719 | ret, rtc->periodic_irq); | ||
| 720 | goto err_unmap; | ||
| 721 | } | ||
| 663 | 722 | ||
| 664 | ret = request_irq(rtc->carry_irq, sh_rtc_interrupt, IRQF_DISABLED, | 723 | ret = request_irq(rtc->carry_irq, sh_rtc_interrupt, |
| 665 | "sh-rtc carry", rtc); | 724 | IRQF_DISABLED, "sh-rtc carry", rtc); |
| 666 | if (unlikely(ret)) { | 725 | if (unlikely(ret)) { |
| 667 | dev_err(&pdev->dev, | 726 | dev_err(&pdev->dev, |
| 668 | "request carry IRQ failed with %d, IRQ %d\n", ret, | 727 | "request carry IRQ failed with %d, IRQ %d\n", |
| 669 | rtc->carry_irq); | 728 | ret, rtc->carry_irq); |
| 670 | free_irq(rtc->periodic_irq, rtc); | 729 | free_irq(rtc->periodic_irq, rtc); |
| 671 | goto err_unmap; | 730 | goto err_unmap; |
| 672 | } | 731 | } |
| 673 | 732 | ||
| 674 | ret = request_irq(rtc->alarm_irq, sh_rtc_alarm, IRQF_DISABLED, | 733 | ret = request_irq(rtc->alarm_irq, sh_rtc_alarm, |
| 675 | "sh-rtc alarm", rtc); | 734 | IRQF_DISABLED, "sh-rtc alarm", rtc); |
| 676 | if (unlikely(ret)) { | 735 | if (unlikely(ret)) { |
| 677 | dev_err(&pdev->dev, | 736 | dev_err(&pdev->dev, |
| 678 | "request alarm IRQ failed with %d, IRQ %d\n", ret, | 737 | "request alarm IRQ failed with %d, IRQ %d\n", |
| 679 | rtc->alarm_irq); | 738 | ret, rtc->alarm_irq); |
| 680 | free_irq(rtc->carry_irq, rtc); | 739 | free_irq(rtc->carry_irq, rtc); |
| 681 | free_irq(rtc->periodic_irq, rtc); | 740 | free_irq(rtc->periodic_irq, rtc); |
| 682 | goto err_unmap; | 741 | goto err_unmap; |
| 742 | } | ||
| 683 | } | 743 | } |
| 684 | 744 | ||
| 685 | tmp = readb(rtc->regbase + RCR1); | 745 | /* everything disabled by default */ |
| 686 | tmp &= ~RCR1_CF; | 746 | rtc->periodic_freq = 0; |
| 687 | tmp |= RCR1_CIE; | 747 | rtc->rtc_dev->irq_freq = 0; |
| 688 | writeb(tmp, rtc->regbase + RCR1); | 748 | sh_rtc_setpie(&pdev->dev, 0); |
| 749 | sh_rtc_setaie(&pdev->dev, 0); | ||
| 750 | sh_rtc_setcie(&pdev->dev, 0); | ||
| 751 | |||
| 752 | /* reset rtc to epoch 0 if time is invalid */ | ||
| 753 | if (rtc_read_time(rtc->rtc_dev, &r) < 0) { | ||
| 754 | rtc_time_to_tm(0, &r); | ||
| 755 | rtc_set_time(rtc->rtc_dev, &r); | ||
| 756 | } | ||
| 689 | 757 | ||
| 758 | device_init_wakeup(&pdev->dev, 1); | ||
| 690 | return 0; | 759 | return 0; |
| 691 | 760 | ||
| 692 | err_unmap: | 761 | err_unmap: |
| @@ -708,10 +777,13 @@ static int __devexit sh_rtc_remove(struct platform_device *pdev) | |||
| 708 | 777 | ||
| 709 | sh_rtc_setpie(&pdev->dev, 0); | 778 | sh_rtc_setpie(&pdev->dev, 0); |
| 710 | sh_rtc_setaie(&pdev->dev, 0); | 779 | sh_rtc_setaie(&pdev->dev, 0); |
| 780 | sh_rtc_setcie(&pdev->dev, 0); | ||
| 711 | 781 | ||
| 712 | free_irq(rtc->carry_irq, rtc); | ||
| 713 | free_irq(rtc->periodic_irq, rtc); | 782 | free_irq(rtc->periodic_irq, rtc); |
| 714 | free_irq(rtc->alarm_irq, rtc); | 783 | if (rtc->carry_irq > 0) { |
| 784 | free_irq(rtc->carry_irq, rtc); | ||
| 785 | free_irq(rtc->alarm_irq, rtc); | ||
| 786 | } | ||
| 715 | 787 | ||
| 716 | release_resource(rtc->res); | 788 | release_resource(rtc->res); |
| 717 | 789 | ||
diff --git a/drivers/serial/sh-sci.c b/drivers/serial/sh-sci.c index 557b54ab2f25..dbf5357a77b3 100644 --- a/drivers/serial/sh-sci.c +++ b/drivers/serial/sh-sci.c | |||
| @@ -139,7 +139,7 @@ static void sci_poll_put_char(struct uart_port *port, unsigned char c) | |||
| 139 | } while (!(status & SCxSR_TDxE(port))); | 139 | } while (!(status & SCxSR_TDxE(port))); |
| 140 | 140 | ||
| 141 | sci_in(port, SCxSR); /* Dummy read */ | 141 | sci_in(port, SCxSR); /* Dummy read */ |
| 142 | sci_out(port, SCxSR, SCxSR_TDxE_CLEAR(port)); | 142 | sci_out(port, SCxSR, SCxSR_TDxE_CLEAR(port) & ~SCxSR_TEND(port)); |
| 143 | sci_out(port, SCxTDR, c); | 143 | sci_out(port, SCxTDR, c); |
| 144 | } | 144 | } |
| 145 | #endif /* CONFIG_CONSOLE_POLL || CONFIG_SERIAL_SH_SCI_CONSOLE */ | 145 | #endif /* CONFIG_CONSOLE_POLL || CONFIG_SERIAL_SH_SCI_CONSOLE */ |
| @@ -263,6 +263,7 @@ static inline void sci_init_pins(struct uart_port *port, unsigned int cflag) | |||
| 263 | #elif defined(CONFIG_CPU_SUBTYPE_SH7763) || \ | 263 | #elif defined(CONFIG_CPU_SUBTYPE_SH7763) || \ |
| 264 | defined(CONFIG_CPU_SUBTYPE_SH7780) || \ | 264 | defined(CONFIG_CPU_SUBTYPE_SH7780) || \ |
| 265 | defined(CONFIG_CPU_SUBTYPE_SH7785) || \ | 265 | defined(CONFIG_CPU_SUBTYPE_SH7785) || \ |
| 266 | defined(CONFIG_CPU_SUBTYPE_SH7786) || \ | ||
| 266 | defined(CONFIG_CPU_SUBTYPE_SHX3) | 267 | defined(CONFIG_CPU_SUBTYPE_SHX3) |
| 267 | static inline void sci_init_pins(struct uart_port *port, unsigned int cflag) | 268 | static inline void sci_init_pins(struct uart_port *port, unsigned int cflag) |
| 268 | { | 269 | { |
| @@ -284,7 +285,8 @@ static inline void sci_init_pins(struct uart_port *port, unsigned int cflag) | |||
| 284 | 285 | ||
| 285 | #if defined(CONFIG_CPU_SUBTYPE_SH7760) || \ | 286 | #if defined(CONFIG_CPU_SUBTYPE_SH7760) || \ |
| 286 | defined(CONFIG_CPU_SUBTYPE_SH7780) || \ | 287 | defined(CONFIG_CPU_SUBTYPE_SH7780) || \ |
| 287 | defined(CONFIG_CPU_SUBTYPE_SH7785) | 288 | defined(CONFIG_CPU_SUBTYPE_SH7785) || \ |
| 289 | defined(CONFIG_CPU_SUBTYPE_SH7786) | ||
| 288 | static inline int scif_txroom(struct uart_port *port) | 290 | static inline int scif_txroom(struct uart_port *port) |
| 289 | { | 291 | { |
| 290 | return SCIF_TXROOM_MAX - (sci_in(port, SCTFDR) & 0xff); | 292 | return SCIF_TXROOM_MAX - (sci_in(port, SCTFDR) & 0xff); |
| @@ -1095,6 +1097,7 @@ static void serial_console_write(struct console *co, const char *s, | |||
| 1095 | unsigned count) | 1097 | unsigned count) |
| 1096 | { | 1098 | { |
| 1097 | struct uart_port *port = &serial_console_port->port; | 1099 | struct uart_port *port = &serial_console_port->port; |
| 1100 | unsigned short bits; | ||
| 1098 | int i; | 1101 | int i; |
| 1099 | 1102 | ||
| 1100 | for (i = 0; i < count; i++) { | 1103 | for (i = 0; i < count; i++) { |
| @@ -1103,6 +1106,11 @@ static void serial_console_write(struct console *co, const char *s, | |||
| 1103 | 1106 | ||
| 1104 | sci_poll_put_char(port, *s++); | 1107 | sci_poll_put_char(port, *s++); |
| 1105 | } | 1108 | } |
| 1109 | |||
| 1110 | /* wait until fifo is empty and last bit has been transmitted */ | ||
| 1111 | bits = SCxSR_TDxE(port) | SCxSR_TEND(port); | ||
| 1112 | while ((sci_in(port, SCxSR) & bits) != bits) | ||
| 1113 | cpu_relax(); | ||
| 1106 | } | 1114 | } |
| 1107 | 1115 | ||
| 1108 | static int __init serial_console_setup(struct console *co, char *options) | 1116 | static int __init serial_console_setup(struct console *co, char *options) |
diff --git a/drivers/serial/sh-sci.h b/drivers/serial/sh-sci.h index 022e89ffec1d..d0aa82d7fce0 100644 --- a/drivers/serial/sh-sci.h +++ b/drivers/serial/sh-sci.h | |||
| @@ -1,6 +1,6 @@ | |||
| 1 | #include <linux/serial_core.h> | 1 | #include <linux/serial_core.h> |
| 2 | #include <asm/io.h> | 2 | #include <asm/io.h> |
| 3 | #include <asm/gpio.h> | 3 | #include <linux/gpio.h> |
| 4 | 4 | ||
| 5 | #if defined(CONFIG_H83007) || defined(CONFIG_H83068) | 5 | #if defined(CONFIG_H83007) || defined(CONFIG_H83068) |
| 6 | #include <asm/regs306x.h> | 6 | #include <asm/regs306x.h> |
| @@ -126,7 +126,8 @@ | |||
| 126 | # define SCSPTR1 0xffe10024 /* 16 bit SCIF */ | 126 | # define SCSPTR1 0xffe10024 /* 16 bit SCIF */ |
| 127 | # define SCIF_ORER 0x0001 /* Overrun error bit */ | 127 | # define SCIF_ORER 0x0001 /* Overrun error bit */ |
| 128 | # define SCSCR_INIT(port) 0x3a /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */ | 128 | # define SCSCR_INIT(port) 0x3a /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */ |
| 129 | #elif defined(CONFIG_CPU_SUBTYPE_SH7785) | 129 | #elif defined(CONFIG_CPU_SUBTYPE_SH7785) || \ |
| 130 | defined(CONFIG_CPU_SUBTYPE_SH7786) | ||
| 130 | # define SCSPTR0 0xffea0024 /* 16 bit SCIF */ | 131 | # define SCSPTR0 0xffea0024 /* 16 bit SCIF */ |
| 131 | # define SCSPTR1 0xffeb0024 /* 16 bit SCIF */ | 132 | # define SCSPTR1 0xffeb0024 /* 16 bit SCIF */ |
| 132 | # define SCSPTR2 0xffec0024 /* 16 bit SCIF */ | 133 | # define SCSPTR2 0xffec0024 /* 16 bit SCIF */ |
| @@ -182,6 +183,7 @@ | |||
| 182 | defined(CONFIG_CPU_SUBTYPE_SH7763) || \ | 183 | defined(CONFIG_CPU_SUBTYPE_SH7763) || \ |
| 183 | defined(CONFIG_CPU_SUBTYPE_SH7780) || \ | 184 | defined(CONFIG_CPU_SUBTYPE_SH7780) || \ |
| 184 | defined(CONFIG_CPU_SUBTYPE_SH7785) || \ | 185 | defined(CONFIG_CPU_SUBTYPE_SH7785) || \ |
| 186 | defined(CONFIG_CPU_SUBTYPE_SH7786) || \ | ||
| 185 | defined(CONFIG_CPU_SUBTYPE_SHX3) | 187 | defined(CONFIG_CPU_SUBTYPE_SHX3) |
| 186 | #define SCI_CTRL_FLAGS_REIE 0x08 /* 7750 SCIF */ | 188 | #define SCI_CTRL_FLAGS_REIE 0x08 /* 7750 SCIF */ |
| 187 | #else | 189 | #else |
| @@ -413,7 +415,8 @@ SCIx_FNS(SCxRDR, 0x0a, 8, 0x14, 8, 0x0A, 8, 0x14, 8, 0x05, 8) | |||
| 413 | SCIF_FNS(SCFCR, 0x0c, 8, 0x18, 16) | 415 | SCIF_FNS(SCFCR, 0x0c, 8, 0x18, 16) |
| 414 | #if defined(CONFIG_CPU_SUBTYPE_SH7760) || \ | 416 | #if defined(CONFIG_CPU_SUBTYPE_SH7760) || \ |
| 415 | defined(CONFIG_CPU_SUBTYPE_SH7780) || \ | 417 | defined(CONFIG_CPU_SUBTYPE_SH7780) || \ |
| 416 | defined(CONFIG_CPU_SUBTYPE_SH7785) | 418 | defined(CONFIG_CPU_SUBTYPE_SH7785) || \ |
| 419 | defined(CONFIG_CPU_SUBTYPE_SH7786) | ||
| 417 | SCIF_FNS(SCFDR, 0x0e, 16, 0x1C, 16) | 420 | SCIF_FNS(SCFDR, 0x0e, 16, 0x1C, 16) |
| 418 | SCIF_FNS(SCTFDR, 0x0e, 16, 0x1C, 16) | 421 | SCIF_FNS(SCTFDR, 0x0e, 16, 0x1C, 16) |
| 419 | SCIF_FNS(SCRFDR, 0x0e, 16, 0x20, 16) | 422 | SCIF_FNS(SCRFDR, 0x0e, 16, 0x20, 16) |
| @@ -644,7 +647,8 @@ static inline int sci_rxd_in(struct uart_port *port) | |||
| 644 | return ctrl_inw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */ | 647 | return ctrl_inw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */ |
| 645 | return 1; | 648 | return 1; |
| 646 | } | 649 | } |
| 647 | #elif defined(CONFIG_CPU_SUBTYPE_SH7785) | 650 | #elif defined(CONFIG_CPU_SUBTYPE_SH7785) || \ |
| 651 | defined(CONFIG_CPU_SUBTYPE_SH7786) | ||
| 648 | static inline int sci_rxd_in(struct uart_port *port) | 652 | static inline int sci_rxd_in(struct uart_port *port) |
| 649 | { | 653 | { |
| 650 | if (port->mapbase == 0xffea0000) | 654 | if (port->mapbase == 0xffea0000) |
| @@ -746,7 +750,8 @@ static inline int sci_rxd_in(struct uart_port *port) | |||
| 746 | */ | 750 | */ |
| 747 | 751 | ||
| 748 | #if defined(CONFIG_CPU_SUBTYPE_SH7780) || \ | 752 | #if defined(CONFIG_CPU_SUBTYPE_SH7780) || \ |
| 749 | defined(CONFIG_CPU_SUBTYPE_SH7785) | 753 | defined(CONFIG_CPU_SUBTYPE_SH7785) || \ |
| 754 | defined(CONFIG_CPU_SUBTYPE_SH7786) | ||
| 750 | #define SCBRR_VALUE(bps, clk) ((clk+16*bps)/(16*bps)-1) | 755 | #define SCBRR_VALUE(bps, clk) ((clk+16*bps)/(16*bps)-1) |
| 751 | #elif defined(CONFIG_CPU_SUBTYPE_SH7705) || \ | 756 | #elif defined(CONFIG_CPU_SUBTYPE_SH7705) || \ |
| 752 | defined(CONFIG_CPU_SUBTYPE_SH7720) || \ | 757 | defined(CONFIG_CPU_SUBTYPE_SH7720) || \ |
diff --git a/drivers/sh/intc.c b/drivers/sh/intc.c index 58d24c5a76ce..2269fbcaa182 100644 --- a/drivers/sh/intc.c +++ b/drivers/sh/intc.c | |||
| @@ -568,6 +568,10 @@ static void __init intc_register_irq(struct intc_desc *desc, | |||
| 568 | if (!data[0] && data[1]) | 568 | if (!data[0] && data[1]) |
| 569 | primary = 1; | 569 | primary = 1; |
| 570 | 570 | ||
| 571 | if (!data[0] && !data[1]) | ||
| 572 | pr_warning("intc: missing unique irq mask for " | ||
| 573 | "irq %d (vect 0x%04x)\n", irq, irq2evt(irq)); | ||
| 574 | |||
| 571 | data[0] = data[0] ? data[0] : intc_mask_data(desc, d, enum_id, 1); | 575 | data[0] = data[0] ? data[0] : intc_mask_data(desc, d, enum_id, 1); |
| 572 | data[1] = data[1] ? data[1] : intc_prio_data(desc, d, enum_id, 1); | 576 | data[1] = data[1] ? data[1] : intc_prio_data(desc, d, enum_id, 1); |
| 573 | 577 | ||
| @@ -641,6 +645,17 @@ static unsigned int __init save_reg(struct intc_desc_int *d, | |||
| 641 | return 0; | 645 | return 0; |
| 642 | } | 646 | } |
| 643 | 647 | ||
| 648 | static unsigned char *intc_evt2irq_table; | ||
| 649 | |||
| 650 | unsigned int intc_evt2irq(unsigned int vector) | ||
| 651 | { | ||
| 652 | unsigned int irq = evt2irq(vector); | ||
| 653 | |||
| 654 | if (intc_evt2irq_table && intc_evt2irq_table[irq]) | ||
| 655 | irq = intc_evt2irq_table[irq]; | ||
| 656 | |||
| 657 | return irq; | ||
| 658 | } | ||
| 644 | 659 | ||
| 645 | void __init register_intc_controller(struct intc_desc *desc) | 660 | void __init register_intc_controller(struct intc_desc *desc) |
| 646 | { | 661 | { |
| @@ -705,9 +720,41 @@ void __init register_intc_controller(struct intc_desc *desc) | |||
| 705 | 720 | ||
| 706 | BUG_ON(k > 256); /* _INTC_ADDR_E() and _INTC_ADDR_D() are 8 bits */ | 721 | BUG_ON(k > 256); /* _INTC_ADDR_E() and _INTC_ADDR_D() are 8 bits */ |
| 707 | 722 | ||
| 723 | /* keep the first vector only if same enum is used multiple times */ | ||
| 724 | for (i = 0; i < desc->nr_vectors; i++) { | ||
| 725 | struct intc_vect *vect = desc->vectors + i; | ||
| 726 | int first_irq = evt2irq(vect->vect); | ||
| 727 | |||
| 728 | if (!vect->enum_id) | ||
| 729 | continue; | ||
| 730 | |||
| 731 | for (k = i + 1; k < desc->nr_vectors; k++) { | ||
| 732 | struct intc_vect *vect2 = desc->vectors + k; | ||
| 733 | |||
| 734 | if (vect->enum_id != vect2->enum_id) | ||
| 735 | continue; | ||
| 736 | |||
| 737 | vect2->enum_id = 0; | ||
| 738 | |||
| 739 | if (!intc_evt2irq_table) | ||
| 740 | intc_evt2irq_table = alloc_bootmem(NR_IRQS); | ||
| 741 | |||
| 742 | if (!intc_evt2irq_table) { | ||
| 743 | pr_warning("intc: cannot allocate evt2irq!\n"); | ||
| 744 | continue; | ||
| 745 | } | ||
| 746 | |||
| 747 | intc_evt2irq_table[evt2irq(vect2->vect)] = first_irq; | ||
| 748 | } | ||
| 749 | } | ||
| 750 | |||
| 751 | /* register the vectors one by one */ | ||
| 708 | for (i = 0; i < desc->nr_vectors; i++) { | 752 | for (i = 0; i < desc->nr_vectors; i++) { |
| 709 | struct intc_vect *vect = desc->vectors + i; | 753 | struct intc_vect *vect = desc->vectors + i; |
| 710 | 754 | ||
| 755 | if (!vect->enum_id) | ||
| 756 | continue; | ||
| 757 | |||
| 711 | intc_register_irq(desc, d, vect->enum_id, evt2irq(vect->vect)); | 758 | intc_register_irq(desc, d, vect->enum_id, evt2irq(vect->vect)); |
| 712 | } | 759 | } |
| 713 | } | 760 | } |
diff --git a/drivers/sh/maple/maple.c b/drivers/sh/maple/maple.c index 63f0de29aa14..c71bb4b4ce84 100644 --- a/drivers/sh/maple/maple.c +++ b/drivers/sh/maple/maple.c | |||
| @@ -1,16 +1,10 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * Core maple bus functionality | 2 | * Core maple bus functionality |
| 3 | * | 3 | * |
| 4 | * Copyright (C) 2007, 2008 Adrian McMenamin | 4 | * Copyright (C) 2007 - 2009 Adrian McMenamin |
| 5 | * Copyright (C) 2001 - 2008 Paul Mundt | 5 | * Copyright (C) 2001 - 2008 Paul Mundt |
| 6 | * | 6 | * Copyright (C) 2000 - 2001 YAEGASHI Takeshi |
| 7 | * Based on 2.4 code by: | ||
| 8 | * | ||
| 9 | * Copyright (C) 2000-2001 YAEGASHI Takeshi | ||
| 10 | * Copyright (C) 2001 M. R. Brown | 7 | * Copyright (C) 2001 M. R. Brown |
| 11 | * Copyright (C) 2001 Paul Mundt | ||
| 12 | * | ||
| 13 | * and others. | ||
| 14 | * | 8 | * |
| 15 | * This file is subject to the terms and conditions of the GNU General Public | 9 | * This file is subject to the terms and conditions of the GNU General Public |
| 16 | * License. See the file "COPYING" in the main directory of this archive | 10 | * License. See the file "COPYING" in the main directory of this archive |
| @@ -32,7 +26,7 @@ | |||
| 32 | #include <mach/dma.h> | 26 | #include <mach/dma.h> |
| 33 | #include <mach/sysasic.h> | 27 | #include <mach/sysasic.h> |
| 34 | 28 | ||
| 35 | MODULE_AUTHOR("Yaegashi Takeshi, Paul Mundt, M. R. Brown, Adrian McMenamin"); | 29 | MODULE_AUTHOR("Adrian McMenamin <adrian@mcmen.demon.co.uk>"); |
| 36 | MODULE_DESCRIPTION("Maple bus driver for Dreamcast"); | 30 | MODULE_DESCRIPTION("Maple bus driver for Dreamcast"); |
| 37 | MODULE_LICENSE("GPL v2"); | 31 | MODULE_LICENSE("GPL v2"); |
| 38 | MODULE_SUPPORTED_DEVICE("{{SEGA, Dreamcast/Maple}}"); | 32 | MODULE_SUPPORTED_DEVICE("{{SEGA, Dreamcast/Maple}}"); |
| @@ -49,7 +43,7 @@ static LIST_HEAD(maple_sentq); | |||
| 49 | /* mutex to protect queue of waiting packets */ | 43 | /* mutex to protect queue of waiting packets */ |
| 50 | static DEFINE_MUTEX(maple_wlist_lock); | 44 | static DEFINE_MUTEX(maple_wlist_lock); |
| 51 | 45 | ||
| 52 | static struct maple_driver maple_dummy_driver; | 46 | static struct maple_driver maple_unsupported_device; |
| 53 | static struct device maple_bus; | 47 | static struct device maple_bus; |
| 54 | static int subdevice_map[MAPLE_PORTS]; | 48 | static int subdevice_map[MAPLE_PORTS]; |
| 55 | static unsigned long *maple_sendbuf, *maple_sendptr, *maple_lastptr; | 49 | static unsigned long *maple_sendbuf, *maple_sendptr, *maple_lastptr; |
| @@ -62,8 +56,9 @@ struct maple_device_specify { | |||
| 62 | int unit; | 56 | int unit; |
| 63 | }; | 57 | }; |
| 64 | 58 | ||
| 65 | static bool checked[4]; | 59 | static bool checked[MAPLE_PORTS]; |
| 66 | static struct maple_device *baseunits[4]; | 60 | static bool empty[MAPLE_PORTS]; |
| 61 | static struct maple_device *baseunits[MAPLE_PORTS]; | ||
| 67 | 62 | ||
| 68 | /** | 63 | /** |
| 69 | * maple_driver_register - register a maple driver | 64 | * maple_driver_register - register a maple driver |
| @@ -97,12 +92,20 @@ void maple_driver_unregister(struct maple_driver *drv) | |||
| 97 | EXPORT_SYMBOL_GPL(maple_driver_unregister); | 92 | EXPORT_SYMBOL_GPL(maple_driver_unregister); |
| 98 | 93 | ||
| 99 | /* set hardware registers to enable next round of dma */ | 94 | /* set hardware registers to enable next round of dma */ |
| 100 | static void maplebus_dma_reset(void) | 95 | static void maple_dma_reset(void) |
| 101 | { | 96 | { |
| 102 | ctrl_outl(MAPLE_MAGIC, MAPLE_RESET); | 97 | ctrl_outl(MAPLE_MAGIC, MAPLE_RESET); |
| 103 | /* set trig type to 0 for software trigger, 1 for hardware (VBLANK) */ | 98 | /* set trig type to 0 for software trigger, 1 for hardware (VBLANK) */ |
| 104 | ctrl_outl(1, MAPLE_TRIGTYPE); | 99 | ctrl_outl(1, MAPLE_TRIGTYPE); |
| 105 | ctrl_outl(MAPLE_2MBPS | MAPLE_TIMEOUT(50000), MAPLE_SPEED); | 100 | /* |
| 101 | * Maple system register | ||
| 102 | * bits 31 - 16 timeout in units of 20nsec | ||
| 103 | * bit 12 hard trigger - set 0 to keep responding to VBLANK | ||
| 104 | * bits 9 - 8 set 00 for 2 Mbps, 01 for 1 Mbps | ||
| 105 | * bits 3 - 0 delay (in 1.3ms) between VBLANK and start of DMA | ||
| 106 | * max delay is 11 | ||
| 107 | */ | ||
| 108 | ctrl_outl(MAPLE_2MBPS | MAPLE_TIMEOUT(0xFFFF), MAPLE_SPEED); | ||
| 106 | ctrl_outl(PHYSADDR(maple_sendbuf), MAPLE_DMAADDR); | 109 | ctrl_outl(PHYSADDR(maple_sendbuf), MAPLE_DMAADDR); |
| 107 | ctrl_outl(1, MAPLE_ENABLE); | 110 | ctrl_outl(1, MAPLE_ENABLE); |
| 108 | } | 111 | } |
| @@ -134,21 +137,16 @@ static void maple_release_device(struct device *dev) | |||
| 134 | { | 137 | { |
| 135 | struct maple_device *mdev; | 138 | struct maple_device *mdev; |
| 136 | struct mapleq *mq; | 139 | struct mapleq *mq; |
| 137 | if (!dev) | 140 | |
| 138 | return; | ||
| 139 | mdev = to_maple_dev(dev); | 141 | mdev = to_maple_dev(dev); |
| 140 | mq = mdev->mq; | 142 | mq = mdev->mq; |
| 141 | if (mq) { | 143 | kmem_cache_free(maple_queue_cache, mq->recvbuf); |
| 142 | if (mq->recvbufdcsp) | 144 | kfree(mq); |
| 143 | kmem_cache_free(maple_queue_cache, mq->recvbufdcsp); | ||
| 144 | kfree(mq); | ||
| 145 | mq = NULL; | ||
| 146 | } | ||
| 147 | kfree(mdev); | 145 | kfree(mdev); |
| 148 | } | 146 | } |
| 149 | 147 | ||
| 150 | /** | 148 | /** |
| 151 | * maple_add_packet - add a single instruction to the queue | 149 | * maple_add_packet - add a single instruction to the maple bus queue |
| 152 | * @mdev: maple device | 150 | * @mdev: maple device |
| 153 | * @function: function on device being queried | 151 | * @function: function on device being queried |
| 154 | * @command: maple command to add | 152 | * @command: maple command to add |
| @@ -158,68 +156,12 @@ static void maple_release_device(struct device *dev) | |||
| 158 | int maple_add_packet(struct maple_device *mdev, u32 function, u32 command, | 156 | int maple_add_packet(struct maple_device *mdev, u32 function, u32 command, |
| 159 | size_t length, void *data) | 157 | size_t length, void *data) |
| 160 | { | 158 | { |
| 161 | int locking, ret = 0; | 159 | int ret = 0; |
| 162 | void *sendbuf = NULL; | ||
| 163 | |||
| 164 | mutex_lock(&maple_wlist_lock); | ||
| 165 | /* bounce if device already locked */ | ||
| 166 | locking = mutex_is_locked(&mdev->mq->mutex); | ||
| 167 | if (locking) { | ||
| 168 | ret = -EBUSY; | ||
| 169 | goto out; | ||
| 170 | } | ||
| 171 | |||
| 172 | mutex_lock(&mdev->mq->mutex); | ||
| 173 | |||
| 174 | if (length) { | ||
| 175 | sendbuf = kmalloc(length * 4, GFP_KERNEL); | ||
| 176 | if (!sendbuf) { | ||
| 177 | mutex_unlock(&mdev->mq->mutex); | ||
| 178 | ret = -ENOMEM; | ||
| 179 | goto out; | ||
| 180 | } | ||
| 181 | ((__be32 *)sendbuf)[0] = cpu_to_be32(function); | ||
| 182 | } | ||
| 183 | |||
| 184 | mdev->mq->command = command; | ||
| 185 | mdev->mq->length = length; | ||
| 186 | if (length > 1) | ||
| 187 | memcpy(sendbuf + 4, data, (length - 1) * 4); | ||
| 188 | mdev->mq->sendbuf = sendbuf; | ||
| 189 | |||
| 190 | list_add(&mdev->mq->list, &maple_waitq); | ||
| 191 | out: | ||
| 192 | mutex_unlock(&maple_wlist_lock); | ||
| 193 | return ret; | ||
| 194 | } | ||
| 195 | EXPORT_SYMBOL_GPL(maple_add_packet); | ||
| 196 | |||
| 197 | /** | ||
| 198 | * maple_add_packet_sleeps - add a single instruction to the queue | ||
| 199 | * @mdev: maple device | ||
| 200 | * @function: function on device being queried | ||
| 201 | * @command: maple command to add | ||
| 202 | * @length: length of command string (in 32 bit words) | ||
| 203 | * @data: remainder of command string | ||
| 204 | * | ||
| 205 | * Same as maple_add_packet(), but waits for the lock to become free. | ||
| 206 | */ | ||
| 207 | int maple_add_packet_sleeps(struct maple_device *mdev, u32 function, | ||
| 208 | u32 command, size_t length, void *data) | ||
| 209 | { | ||
| 210 | int locking, ret = 0; | ||
| 211 | void *sendbuf = NULL; | 160 | void *sendbuf = NULL; |
| 212 | 161 | ||
| 213 | locking = mutex_lock_interruptible(&mdev->mq->mutex); | ||
| 214 | if (locking) { | ||
| 215 | ret = -EIO; | ||
| 216 | goto out; | ||
| 217 | } | ||
| 218 | |||
| 219 | if (length) { | 162 | if (length) { |
| 220 | sendbuf = kmalloc(length * 4, GFP_KERNEL); | 163 | sendbuf = kzalloc(length * 4, GFP_KERNEL); |
| 221 | if (!sendbuf) { | 164 | if (!sendbuf) { |
| 222 | mutex_unlock(&mdev->mq->mutex); | ||
| 223 | ret = -ENOMEM; | 165 | ret = -ENOMEM; |
| 224 | goto out; | 166 | goto out; |
| 225 | } | 167 | } |
| @@ -233,38 +175,35 @@ int maple_add_packet_sleeps(struct maple_device *mdev, u32 function, | |||
| 233 | mdev->mq->sendbuf = sendbuf; | 175 | mdev->mq->sendbuf = sendbuf; |
| 234 | 176 | ||
| 235 | mutex_lock(&maple_wlist_lock); | 177 | mutex_lock(&maple_wlist_lock); |
| 236 | list_add(&mdev->mq->list, &maple_waitq); | 178 | list_add_tail(&mdev->mq->list, &maple_waitq); |
| 237 | mutex_unlock(&maple_wlist_lock); | 179 | mutex_unlock(&maple_wlist_lock); |
| 238 | out: | 180 | out: |
| 239 | return ret; | 181 | return ret; |
| 240 | } | 182 | } |
| 241 | EXPORT_SYMBOL_GPL(maple_add_packet_sleeps); | 183 | EXPORT_SYMBOL_GPL(maple_add_packet); |
| 242 | 184 | ||
| 243 | static struct mapleq *maple_allocq(struct maple_device *mdev) | 185 | static struct mapleq *maple_allocq(struct maple_device *mdev) |
| 244 | { | 186 | { |
| 245 | struct mapleq *mq; | 187 | struct mapleq *mq; |
| 246 | 188 | ||
| 247 | mq = kmalloc(sizeof(*mq), GFP_KERNEL); | 189 | mq = kzalloc(sizeof(*mq), GFP_KERNEL); |
| 248 | if (!mq) | 190 | if (!mq) |
| 249 | goto failed_nomem; | 191 | goto failed_nomem; |
| 250 | 192 | ||
| 193 | INIT_LIST_HEAD(&mq->list); | ||
| 251 | mq->dev = mdev; | 194 | mq->dev = mdev; |
| 252 | mq->recvbufdcsp = kmem_cache_zalloc(maple_queue_cache, GFP_KERNEL); | 195 | mq->recvbuf = kmem_cache_zalloc(maple_queue_cache, GFP_KERNEL); |
| 253 | mq->recvbuf = (void *) P2SEGADDR(mq->recvbufdcsp); | ||
| 254 | if (!mq->recvbuf) | 196 | if (!mq->recvbuf) |
| 255 | goto failed_p2; | 197 | goto failed_p2; |
| 256 | /* | 198 | mq->recvbuf->buf = &((mq->recvbuf->bufx)[0]); |
| 257 | * most devices do not need the mutex - but | ||
| 258 | * anything that injects block reads or writes | ||
| 259 | * will rely on it | ||
| 260 | */ | ||
| 261 | mutex_init(&mq->mutex); | ||
| 262 | 199 | ||
| 263 | return mq; | 200 | return mq; |
| 264 | 201 | ||
| 265 | failed_p2: | 202 | failed_p2: |
| 266 | kfree(mq); | 203 | kfree(mq); |
| 267 | failed_nomem: | 204 | failed_nomem: |
| 205 | dev_err(&mdev->dev, "could not allocate memory for device (%d, %d)\n", | ||
| 206 | mdev->port, mdev->unit); | ||
| 268 | return NULL; | 207 | return NULL; |
| 269 | } | 208 | } |
| 270 | 209 | ||
| @@ -272,12 +211,16 @@ static struct maple_device *maple_alloc_dev(int port, int unit) | |||
| 272 | { | 211 | { |
| 273 | struct maple_device *mdev; | 212 | struct maple_device *mdev; |
| 274 | 213 | ||
| 214 | /* zero this out to avoid kobj subsystem | ||
| 215 | * thinking it has already been registered */ | ||
| 216 | |||
| 275 | mdev = kzalloc(sizeof(*mdev), GFP_KERNEL); | 217 | mdev = kzalloc(sizeof(*mdev), GFP_KERNEL); |
| 276 | if (!mdev) | 218 | if (!mdev) |
| 277 | return NULL; | 219 | return NULL; |
| 278 | 220 | ||
| 279 | mdev->port = port; | 221 | mdev->port = port; |
| 280 | mdev->unit = unit; | 222 | mdev->unit = unit; |
| 223 | |||
| 281 | mdev->mq = maple_allocq(mdev); | 224 | mdev->mq = maple_allocq(mdev); |
| 282 | 225 | ||
| 283 | if (!mdev->mq) { | 226 | if (!mdev->mq) { |
| @@ -286,19 +229,14 @@ static struct maple_device *maple_alloc_dev(int port, int unit) | |||
| 286 | } | 229 | } |
| 287 | mdev->dev.bus = &maple_bus_type; | 230 | mdev->dev.bus = &maple_bus_type; |
| 288 | mdev->dev.parent = &maple_bus; | 231 | mdev->dev.parent = &maple_bus; |
| 232 | init_waitqueue_head(&mdev->maple_wait); | ||
| 289 | return mdev; | 233 | return mdev; |
| 290 | } | 234 | } |
| 291 | 235 | ||
| 292 | static void maple_free_dev(struct maple_device *mdev) | 236 | static void maple_free_dev(struct maple_device *mdev) |
| 293 | { | 237 | { |
| 294 | if (!mdev) | 238 | kmem_cache_free(maple_queue_cache, mdev->mq->recvbuf); |
| 295 | return; | 239 | kfree(mdev->mq); |
| 296 | if (mdev->mq) { | ||
| 297 | if (mdev->mq->recvbufdcsp) | ||
| 298 | kmem_cache_free(maple_queue_cache, | ||
| 299 | mdev->mq->recvbufdcsp); | ||
| 300 | kfree(mdev->mq); | ||
| 301 | } | ||
| 302 | kfree(mdev); | 240 | kfree(mdev); |
| 303 | } | 241 | } |
| 304 | 242 | ||
| @@ -320,7 +258,7 @@ static void maple_build_block(struct mapleq *mq) | |||
| 320 | maple_lastptr = maple_sendptr; | 258 | maple_lastptr = maple_sendptr; |
| 321 | 259 | ||
| 322 | *maple_sendptr++ = (port << 16) | len | 0x80000000; | 260 | *maple_sendptr++ = (port << 16) | len | 0x80000000; |
| 323 | *maple_sendptr++ = PHYSADDR(mq->recvbuf); | 261 | *maple_sendptr++ = PHYSADDR(mq->recvbuf->buf); |
| 324 | *maple_sendptr++ = | 262 | *maple_sendptr++ = |
| 325 | mq->command | (to << 8) | (from << 16) | (len << 24); | 263 | mq->command | (to << 8) | (from << 16) | (len << 24); |
| 326 | while (len-- > 0) | 264 | while (len-- > 0) |
| @@ -333,20 +271,28 @@ static void maple_send(void) | |||
| 333 | int i, maple_packets = 0; | 271 | int i, maple_packets = 0; |
| 334 | struct mapleq *mq, *nmq; | 272 | struct mapleq *mq, *nmq; |
| 335 | 273 | ||
| 336 | if (!list_empty(&maple_sentq)) | 274 | if (!maple_dma_done()) |
| 337 | return; | 275 | return; |
| 276 | |||
| 277 | /* disable DMA */ | ||
| 278 | ctrl_outl(0, MAPLE_ENABLE); | ||
| 279 | |||
| 280 | if (!list_empty(&maple_sentq)) | ||
| 281 | goto finish; | ||
| 282 | |||
| 338 | mutex_lock(&maple_wlist_lock); | 283 | mutex_lock(&maple_wlist_lock); |
| 339 | if (list_empty(&maple_waitq) || !maple_dma_done()) { | 284 | if (list_empty(&maple_waitq)) { |
| 340 | mutex_unlock(&maple_wlist_lock); | 285 | mutex_unlock(&maple_wlist_lock); |
| 341 | return; | 286 | goto finish; |
| 342 | } | 287 | } |
| 343 | mutex_unlock(&maple_wlist_lock); | 288 | |
| 344 | maple_lastptr = maple_sendbuf; | 289 | maple_lastptr = maple_sendbuf; |
| 345 | maple_sendptr = maple_sendbuf; | 290 | maple_sendptr = maple_sendbuf; |
| 346 | mutex_lock(&maple_wlist_lock); | 291 | |
| 347 | list_for_each_entry_safe(mq, nmq, &maple_waitq, list) { | 292 | list_for_each_entry_safe(mq, nmq, &maple_waitq, list) { |
| 348 | maple_build_block(mq); | 293 | maple_build_block(mq); |
| 349 | list_move(&mq->list, &maple_sentq); | 294 | list_del_init(&mq->list); |
| 295 | list_add_tail(&mq->list, &maple_sentq); | ||
| 350 | if (maple_packets++ > MAPLE_MAXPACKETS) | 296 | if (maple_packets++ > MAPLE_MAXPACKETS) |
| 351 | break; | 297 | break; |
| 352 | } | 298 | } |
| @@ -356,10 +302,13 @@ static void maple_send(void) | |||
| 356 | dma_cache_sync(0, maple_sendbuf + i * PAGE_SIZE, | 302 | dma_cache_sync(0, maple_sendbuf + i * PAGE_SIZE, |
| 357 | PAGE_SIZE, DMA_BIDIRECTIONAL); | 303 | PAGE_SIZE, DMA_BIDIRECTIONAL); |
| 358 | } | 304 | } |
| 305 | |||
| 306 | finish: | ||
| 307 | maple_dma_reset(); | ||
| 359 | } | 308 | } |
| 360 | 309 | ||
| 361 | /* check if there is a driver registered likely to match this device */ | 310 | /* check if there is a driver registered likely to match this device */ |
| 362 | static int check_matching_maple_driver(struct device_driver *driver, | 311 | static int maple_check_matching_driver(struct device_driver *driver, |
| 363 | void *devptr) | 312 | void *devptr) |
| 364 | { | 313 | { |
| 365 | struct maple_driver *maple_drv; | 314 | struct maple_driver *maple_drv; |
| @@ -374,10 +323,7 @@ static int check_matching_maple_driver(struct device_driver *driver, | |||
| 374 | 323 | ||
| 375 | static void maple_detach_driver(struct maple_device *mdev) | 324 | static void maple_detach_driver(struct maple_device *mdev) |
| 376 | { | 325 | { |
| 377 | if (!mdev) | ||
| 378 | return; | ||
| 379 | device_unregister(&mdev->dev); | 326 | device_unregister(&mdev->dev); |
| 380 | mdev = NULL; | ||
| 381 | } | 327 | } |
| 382 | 328 | ||
| 383 | /* process initial MAPLE_COMMAND_DEVINFO for each device or port */ | 329 | /* process initial MAPLE_COMMAND_DEVINFO for each device or port */ |
| @@ -385,9 +331,9 @@ static void maple_attach_driver(struct maple_device *mdev) | |||
| 385 | { | 331 | { |
| 386 | char *p, *recvbuf; | 332 | char *p, *recvbuf; |
| 387 | unsigned long function; | 333 | unsigned long function; |
| 388 | int matched, retval; | 334 | int matched, error; |
| 389 | 335 | ||
| 390 | recvbuf = mdev->mq->recvbuf; | 336 | recvbuf = mdev->mq->recvbuf->buf; |
| 391 | /* copy the data as individual elements in | 337 | /* copy the data as individual elements in |
| 392 | * case of memory optimisation */ | 338 | * case of memory optimisation */ |
| 393 | memcpy(&mdev->devinfo.function, recvbuf + 4, 4); | 339 | memcpy(&mdev->devinfo.function, recvbuf + 4, 4); |
| @@ -395,7 +341,6 @@ static void maple_attach_driver(struct maple_device *mdev) | |||
| 395 | memcpy(&mdev->devinfo.area_code, recvbuf + 20, 1); | 341 | memcpy(&mdev->devinfo.area_code, recvbuf + 20, 1); |
| 396 | memcpy(&mdev->devinfo.connector_direction, recvbuf + 21, 1); | 342 | memcpy(&mdev->devinfo.connector_direction, recvbuf + 21, 1); |
| 397 | memcpy(&mdev->devinfo.product_name[0], recvbuf + 22, 30); | 343 | memcpy(&mdev->devinfo.product_name[0], recvbuf + 22, 30); |
| 398 | memcpy(&mdev->devinfo.product_licence[0], recvbuf + 52, 60); | ||
| 399 | memcpy(&mdev->devinfo.standby_power, recvbuf + 112, 2); | 344 | memcpy(&mdev->devinfo.standby_power, recvbuf + 112, 2); |
| 400 | memcpy(&mdev->devinfo.max_power, recvbuf + 114, 2); | 345 | memcpy(&mdev->devinfo.max_power, recvbuf + 114, 2); |
| 401 | memcpy(mdev->product_name, mdev->devinfo.product_name, 30); | 346 | memcpy(mdev->product_name, mdev->devinfo.product_name, 30); |
| @@ -414,43 +359,40 @@ static void maple_attach_driver(struct maple_device *mdev) | |||
| 414 | else | 359 | else |
| 415 | break; | 360 | break; |
| 416 | 361 | ||
| 417 | printk(KERN_INFO "Maple device detected: %s\n", | ||
| 418 | mdev->product_name); | ||
| 419 | printk(KERN_INFO "Maple device: %s\n", mdev->product_licence); | ||
| 420 | |||
| 421 | function = be32_to_cpu(mdev->devinfo.function); | 362 | function = be32_to_cpu(mdev->devinfo.function); |
| 422 | 363 | ||
| 364 | dev_info(&mdev->dev, "detected %s: function 0x%lX: at (%d, %d)\n", | ||
| 365 | mdev->product_name, function, mdev->port, mdev->unit); | ||
| 366 | |||
| 423 | if (function > 0x200) { | 367 | if (function > 0x200) { |
| 424 | /* Do this silently - as not a real device */ | 368 | /* Do this silently - as not a real device */ |
| 425 | function = 0; | 369 | function = 0; |
| 426 | mdev->driver = &maple_dummy_driver; | 370 | mdev->driver = &maple_unsupported_device; |
| 427 | sprintf(mdev->dev.bus_id, "%d:0.port", mdev->port); | 371 | dev_set_name(&mdev->dev, "%d:0.port", mdev->port); |
| 428 | } else { | 372 | } else { |
| 429 | printk(KERN_INFO | ||
| 430 | "Maple bus at (%d, %d): Function 0x%lX\n", | ||
| 431 | mdev->port, mdev->unit, function); | ||
| 432 | |||
| 433 | matched = | 373 | matched = |
| 434 | bus_for_each_drv(&maple_bus_type, NULL, mdev, | 374 | bus_for_each_drv(&maple_bus_type, NULL, mdev, |
| 435 | check_matching_maple_driver); | 375 | maple_check_matching_driver); |
| 436 | 376 | ||
| 437 | if (matched == 0) { | 377 | if (matched == 0) { |
| 438 | /* Driver does not exist yet */ | 378 | /* Driver does not exist yet */ |
| 439 | printk(KERN_INFO | 379 | dev_info(&mdev->dev, "no driver found\n"); |
| 440 | "No maple driver found.\n"); | 380 | mdev->driver = &maple_unsupported_device; |
| 441 | mdev->driver = &maple_dummy_driver; | ||
| 442 | } | 381 | } |
| 443 | sprintf(mdev->dev.bus_id, "%d:0%d.%lX", mdev->port, | 382 | |
| 444 | mdev->unit, function); | 383 | dev_set_name(&mdev->dev, "%d:0%d.%lX", mdev->port, |
| 384 | mdev->unit, function); | ||
| 445 | } | 385 | } |
| 386 | |||
| 446 | mdev->function = function; | 387 | mdev->function = function; |
| 447 | mdev->dev.release = &maple_release_device; | 388 | mdev->dev.release = &maple_release_device; |
| 448 | retval = device_register(&mdev->dev); | 389 | |
| 449 | if (retval) { | 390 | atomic_set(&mdev->busy, 0); |
| 450 | printk(KERN_INFO | 391 | error = device_register(&mdev->dev); |
| 451 | "Maple bus: Attempt to register device" | 392 | if (error) { |
| 452 | " (%x, %x) failed.\n", | 393 | dev_warn(&mdev->dev, "could not register device at" |
| 453 | mdev->port, mdev->unit); | 394 | " (%d, %d), with error 0x%X\n", mdev->unit, |
| 395 | mdev->port, error); | ||
| 454 | maple_free_dev(mdev); | 396 | maple_free_dev(mdev); |
| 455 | mdev = NULL; | 397 | mdev = NULL; |
| 456 | return; | 398 | return; |
| @@ -462,7 +404,7 @@ static void maple_attach_driver(struct maple_device *mdev) | |||
| 462 | * port and unit then return 1 - allows identification | 404 | * port and unit then return 1 - allows identification |
| 463 | * of which devices need to be attached or detached | 405 | * of which devices need to be attached or detached |
| 464 | */ | 406 | */ |
| 465 | static int detach_maple_device(struct device *device, void *portptr) | 407 | static int check_maple_device(struct device *device, void *portptr) |
| 466 | { | 408 | { |
| 467 | struct maple_device_specify *ds; | 409 | struct maple_device_specify *ds; |
| 468 | struct maple_device *mdev; | 410 | struct maple_device *mdev; |
| @@ -477,21 +419,25 @@ static int detach_maple_device(struct device *device, void *portptr) | |||
| 477 | static int setup_maple_commands(struct device *device, void *ignored) | 419 | static int setup_maple_commands(struct device *device, void *ignored) |
| 478 | { | 420 | { |
| 479 | int add; | 421 | int add; |
| 480 | struct maple_device *maple_dev = to_maple_dev(device); | 422 | struct maple_device *mdev = to_maple_dev(device); |
| 481 | 423 | if (mdev->interval > 0 && atomic_read(&mdev->busy) == 0 && | |
| 482 | if ((maple_dev->interval > 0) | 424 | time_after(jiffies, mdev->when)) { |
| 483 | && time_after(jiffies, maple_dev->when)) { | 425 | /* bounce if we cannot add */ |
| 484 | /* bounce if we cannot lock */ | 426 | add = maple_add_packet(mdev, |
| 485 | add = maple_add_packet(maple_dev, | 427 | be32_to_cpu(mdev->devinfo.function), |
| 486 | be32_to_cpu(maple_dev->devinfo.function), | ||
| 487 | MAPLE_COMMAND_GETCOND, 1, NULL); | 428 | MAPLE_COMMAND_GETCOND, 1, NULL); |
| 488 | if (!add) | 429 | if (!add) |
| 489 | maple_dev->when = jiffies + maple_dev->interval; | 430 | mdev->when = jiffies + mdev->interval; |
| 490 | } else { | 431 | } else { |
| 491 | if (time_after(jiffies, maple_pnp_time)) | 432 | if (time_after(jiffies, maple_pnp_time)) |
| 492 | /* This will also bounce */ | 433 | /* Ensure we don't have block reads and devinfo |
| 493 | maple_add_packet(maple_dev, 0, | 434 | * calls interfering with one another - so flag the |
| 494 | MAPLE_COMMAND_DEVINFO, 0, NULL); | 435 | * device as busy */ |
| 436 | if (atomic_read(&mdev->busy) == 0) { | ||
| 437 | atomic_set(&mdev->busy, 1); | ||
| 438 | maple_add_packet(mdev, 0, | ||
| 439 | MAPLE_COMMAND_DEVINFO, 0, NULL); | ||
| 440 | } | ||
| 495 | } | 441 | } |
| 496 | return 0; | 442 | return 0; |
| 497 | } | 443 | } |
| @@ -499,29 +445,50 @@ static int setup_maple_commands(struct device *device, void *ignored) | |||
| 499 | /* VBLANK bottom half - implemented via workqueue */ | 445 | /* VBLANK bottom half - implemented via workqueue */ |
| 500 | static void maple_vblank_handler(struct work_struct *work) | 446 | static void maple_vblank_handler(struct work_struct *work) |
| 501 | { | 447 | { |
| 502 | if (!list_empty(&maple_sentq) || !maple_dma_done()) | 448 | int x, locking; |
| 449 | struct maple_device *mdev; | ||
| 450 | |||
| 451 | if (!maple_dma_done()) | ||
| 503 | return; | 452 | return; |
| 504 | 453 | ||
| 505 | ctrl_outl(0, MAPLE_ENABLE); | 454 | ctrl_outl(0, MAPLE_ENABLE); |
| 506 | 455 | ||
| 456 | if (!list_empty(&maple_sentq)) | ||
| 457 | goto finish; | ||
| 458 | |||
| 459 | /* | ||
| 460 | * Set up essential commands - to fetch data and | ||
| 461 | * check devices are still present | ||
| 462 | */ | ||
| 507 | bus_for_each_dev(&maple_bus_type, NULL, NULL, | 463 | bus_for_each_dev(&maple_bus_type, NULL, NULL, |
| 508 | setup_maple_commands); | 464 | setup_maple_commands); |
| 465 | |||
| 466 | if (time_after(jiffies, maple_pnp_time)) { | ||
| 467 | /* | ||
| 468 | * Scan the empty ports - bus is flakey and may have | ||
| 469 | * mis-reported emptyness | ||
| 470 | */ | ||
| 471 | for (x = 0; x < MAPLE_PORTS; x++) { | ||
| 472 | if (checked[x] && empty[x]) { | ||
| 473 | mdev = baseunits[x]; | ||
| 474 | if (!mdev) | ||
| 475 | break; | ||
| 476 | atomic_set(&mdev->busy, 1); | ||
| 477 | locking = maple_add_packet(mdev, 0, | ||
| 478 | MAPLE_COMMAND_DEVINFO, 0, NULL); | ||
| 479 | if (!locking) | ||
| 480 | break; | ||
| 481 | } | ||
| 482 | } | ||
| 509 | 483 | ||
| 510 | if (time_after(jiffies, maple_pnp_time)) | ||
| 511 | maple_pnp_time = jiffies + MAPLE_PNP_INTERVAL; | 484 | maple_pnp_time = jiffies + MAPLE_PNP_INTERVAL; |
| 512 | |||
| 513 | mutex_lock(&maple_wlist_lock); | ||
| 514 | if (!list_empty(&maple_waitq) && list_empty(&maple_sentq)) { | ||
| 515 | mutex_unlock(&maple_wlist_lock); | ||
| 516 | maple_send(); | ||
| 517 | } else { | ||
| 518 | mutex_unlock(&maple_wlist_lock); | ||
| 519 | } | 485 | } |
| 520 | 486 | ||
| 521 | maplebus_dma_reset(); | 487 | finish: |
| 488 | maple_send(); | ||
| 522 | } | 489 | } |
| 523 | 490 | ||
| 524 | /* handle devices added via hotplugs - placing them on queue for DEVINFO*/ | 491 | /* handle devices added via hotplugs - placing them on queue for DEVINFO */ |
| 525 | static void maple_map_subunits(struct maple_device *mdev, int submask) | 492 | static void maple_map_subunits(struct maple_device *mdev, int submask) |
| 526 | { | 493 | { |
| 527 | int retval, k, devcheck; | 494 | int retval, k, devcheck; |
| @@ -533,7 +500,7 @@ static void maple_map_subunits(struct maple_device *mdev, int submask) | |||
| 533 | ds.unit = k + 1; | 500 | ds.unit = k + 1; |
| 534 | retval = | 501 | retval = |
| 535 | bus_for_each_dev(&maple_bus_type, NULL, &ds, | 502 | bus_for_each_dev(&maple_bus_type, NULL, &ds, |
| 536 | detach_maple_device); | 503 | check_maple_device); |
| 537 | if (retval) { | 504 | if (retval) { |
| 538 | submask = submask >> 1; | 505 | submask = submask >> 1; |
| 539 | continue; | 506 | continue; |
| @@ -543,6 +510,7 @@ static void maple_map_subunits(struct maple_device *mdev, int submask) | |||
| 543 | mdev_add = maple_alloc_dev(mdev->port, k + 1); | 510 | mdev_add = maple_alloc_dev(mdev->port, k + 1); |
| 544 | if (!mdev_add) | 511 | if (!mdev_add) |
| 545 | return; | 512 | return; |
| 513 | atomic_set(&mdev_add->busy, 1); | ||
| 546 | maple_add_packet(mdev_add, 0, MAPLE_COMMAND_DEVINFO, | 514 | maple_add_packet(mdev_add, 0, MAPLE_COMMAND_DEVINFO, |
| 547 | 0, NULL); | 515 | 0, NULL); |
| 548 | /* mark that we are checking sub devices */ | 516 | /* mark that we are checking sub devices */ |
| @@ -564,27 +532,45 @@ static void maple_clean_submap(struct maple_device *mdev) | |||
| 564 | } | 532 | } |
| 565 | 533 | ||
| 566 | /* handle empty port or hotplug removal */ | 534 | /* handle empty port or hotplug removal */ |
| 567 | static void maple_response_none(struct maple_device *mdev, | 535 | static void maple_response_none(struct maple_device *mdev) |
| 568 | struct mapleq *mq) | 536 | { |
| 569 | { | 537 | maple_clean_submap(mdev); |
| 570 | if (mdev->unit != 0) { | 538 | |
| 571 | list_del(&mq->list); | 539 | if (likely(mdev->unit != 0)) { |
| 572 | maple_clean_submap(mdev); | 540 | /* |
| 573 | printk(KERN_INFO | 541 | * Block devices play up |
| 574 | "Maple bus device detaching at (%d, %d)\n", | 542 | * and give the impression they have |
| 575 | mdev->port, mdev->unit); | 543 | * been removed even when still in place or |
| 544 | * trip the mtd layer when they have | ||
| 545 | * really gone - this code traps that eventuality | ||
| 546 | * and ensures we aren't overloaded with useless | ||
| 547 | * error messages | ||
| 548 | */ | ||
| 549 | if (mdev->can_unload) { | ||
| 550 | if (!mdev->can_unload(mdev)) { | ||
| 551 | atomic_set(&mdev->busy, 2); | ||
| 552 | wake_up(&mdev->maple_wait); | ||
| 553 | return; | ||
| 554 | } | ||
| 555 | } | ||
| 556 | |||
| 557 | dev_info(&mdev->dev, "detaching device at (%d, %d)\n", | ||
| 558 | mdev->port, mdev->unit); | ||
| 576 | maple_detach_driver(mdev); | 559 | maple_detach_driver(mdev); |
| 577 | return; | 560 | return; |
| 578 | } | 561 | } else { |
| 579 | if (!started || !fullscan) { | 562 | if (!started || !fullscan) { |
| 580 | if (checked[mdev->port] == false) { | 563 | if (checked[mdev->port] == false) { |
| 581 | checked[mdev->port] = true; | 564 | checked[mdev->port] = true; |
| 582 | printk(KERN_INFO "No maple devices attached" | 565 | empty[mdev->port] = true; |
| 583 | " to port %d\n", mdev->port); | 566 | dev_info(&mdev->dev, "no devices" |
| 567 | " to port %d\n", mdev->port); | ||
| 568 | } | ||
| 569 | return; | ||
| 584 | } | 570 | } |
| 585 | return; | ||
| 586 | } | 571 | } |
| 587 | maple_clean_submap(mdev); | 572 | /* Some hardware devices generate false detach messages on unit 0 */ |
| 573 | atomic_set(&mdev->busy, 0); | ||
| 588 | } | 574 | } |
| 589 | 575 | ||
| 590 | /* preprocess hotplugs or scans */ | 576 | /* preprocess hotplugs or scans */ |
| @@ -599,8 +585,11 @@ static void maple_response_devinfo(struct maple_device *mdev, | |||
| 599 | } else { | 585 | } else { |
| 600 | if (mdev->unit != 0) | 586 | if (mdev->unit != 0) |
| 601 | maple_attach_driver(mdev); | 587 | maple_attach_driver(mdev); |
| 588 | if (mdev->unit == 0) { | ||
| 589 | empty[mdev->port] = false; | ||
| 590 | maple_attach_driver(mdev); | ||
| 591 | } | ||
| 602 | } | 592 | } |
| 603 | return; | ||
| 604 | } | 593 | } |
| 605 | if (mdev->unit == 0) { | 594 | if (mdev->unit == 0) { |
| 606 | submask = recvbuf[2] & 0x1F; | 595 | submask = recvbuf[2] & 0x1F; |
| @@ -611,6 +600,17 @@ static void maple_response_devinfo(struct maple_device *mdev, | |||
| 611 | } | 600 | } |
| 612 | } | 601 | } |
| 613 | 602 | ||
| 603 | static void maple_response_fileerr(struct maple_device *mdev, void *recvbuf) | ||
| 604 | { | ||
| 605 | if (mdev->fileerr_handler) { | ||
| 606 | mdev->fileerr_handler(mdev, recvbuf); | ||
| 607 | return; | ||
| 608 | } else | ||
| 609 | dev_warn(&mdev->dev, "device at (%d, %d) reports" | ||
| 610 | "file error 0x%X\n", mdev->port, mdev->unit, | ||
| 611 | ((int *)recvbuf)[1]); | ||
| 612 | } | ||
| 613 | |||
| 614 | static void maple_port_rescan(void) | 614 | static void maple_port_rescan(void) |
| 615 | { | 615 | { |
| 616 | int i; | 616 | int i; |
| @@ -621,12 +621,6 @@ static void maple_port_rescan(void) | |||
| 621 | if (checked[i] == false) { | 621 | if (checked[i] == false) { |
| 622 | fullscan = 0; | 622 | fullscan = 0; |
| 623 | mdev = baseunits[i]; | 623 | mdev = baseunits[i]; |
| 624 | /* | ||
| 625 | * test lock in case scan has failed | ||
| 626 | * but device is still locked | ||
| 627 | */ | ||
| 628 | if (mutex_is_locked(&mdev->mq->mutex)) | ||
| 629 | mutex_unlock(&mdev->mq->mutex); | ||
| 630 | maple_add_packet(mdev, 0, MAPLE_COMMAND_DEVINFO, | 624 | maple_add_packet(mdev, 0, MAPLE_COMMAND_DEVINFO, |
| 631 | 0, NULL); | 625 | 0, NULL); |
| 632 | } | 626 | } |
| @@ -637,7 +631,7 @@ static void maple_port_rescan(void) | |||
| 637 | static void maple_dma_handler(struct work_struct *work) | 631 | static void maple_dma_handler(struct work_struct *work) |
| 638 | { | 632 | { |
| 639 | struct mapleq *mq, *nmq; | 633 | struct mapleq *mq, *nmq; |
| 640 | struct maple_device *dev; | 634 | struct maple_device *mdev; |
| 641 | char *recvbuf; | 635 | char *recvbuf; |
| 642 | enum maple_code code; | 636 | enum maple_code code; |
| 643 | 637 | ||
| @@ -646,43 +640,56 @@ static void maple_dma_handler(struct work_struct *work) | |||
| 646 | ctrl_outl(0, MAPLE_ENABLE); | 640 | ctrl_outl(0, MAPLE_ENABLE); |
| 647 | if (!list_empty(&maple_sentq)) { | 641 | if (!list_empty(&maple_sentq)) { |
| 648 | list_for_each_entry_safe(mq, nmq, &maple_sentq, list) { | 642 | list_for_each_entry_safe(mq, nmq, &maple_sentq, list) { |
| 649 | recvbuf = mq->recvbuf; | 643 | mdev = mq->dev; |
| 644 | recvbuf = mq->recvbuf->buf; | ||
| 645 | dma_cache_sync(&mdev->dev, recvbuf, 0x400, | ||
| 646 | DMA_FROM_DEVICE); | ||
| 650 | code = recvbuf[0]; | 647 | code = recvbuf[0]; |
| 651 | dev = mq->dev; | ||
| 652 | kfree(mq->sendbuf); | 648 | kfree(mq->sendbuf); |
| 653 | mutex_unlock(&mq->mutex); | ||
| 654 | list_del_init(&mq->list); | 649 | list_del_init(&mq->list); |
| 655 | |||
| 656 | switch (code) { | 650 | switch (code) { |
| 657 | case MAPLE_RESPONSE_NONE: | 651 | case MAPLE_RESPONSE_NONE: |
| 658 | maple_response_none(dev, mq); | 652 | maple_response_none(mdev); |
| 659 | break; | 653 | break; |
| 660 | 654 | ||
| 661 | case MAPLE_RESPONSE_DEVINFO: | 655 | case MAPLE_RESPONSE_DEVINFO: |
| 662 | maple_response_devinfo(dev, recvbuf); | 656 | maple_response_devinfo(mdev, recvbuf); |
| 657 | atomic_set(&mdev->busy, 0); | ||
| 663 | break; | 658 | break; |
| 664 | 659 | ||
| 665 | case MAPLE_RESPONSE_DATATRF: | 660 | case MAPLE_RESPONSE_DATATRF: |
| 666 | if (dev->callback) | 661 | if (mdev->callback) |
| 667 | dev->callback(mq); | 662 | mdev->callback(mq); |
| 663 | atomic_set(&mdev->busy, 0); | ||
| 664 | wake_up(&mdev->maple_wait); | ||
| 668 | break; | 665 | break; |
| 669 | 666 | ||
| 670 | case MAPLE_RESPONSE_FILEERR: | 667 | case MAPLE_RESPONSE_FILEERR: |
| 668 | maple_response_fileerr(mdev, recvbuf); | ||
| 669 | atomic_set(&mdev->busy, 0); | ||
| 670 | wake_up(&mdev->maple_wait); | ||
| 671 | break; | ||
| 672 | |||
| 671 | case MAPLE_RESPONSE_AGAIN: | 673 | case MAPLE_RESPONSE_AGAIN: |
| 672 | case MAPLE_RESPONSE_BADCMD: | 674 | case MAPLE_RESPONSE_BADCMD: |
| 673 | case MAPLE_RESPONSE_BADFUNC: | 675 | case MAPLE_RESPONSE_BADFUNC: |
| 674 | printk(KERN_DEBUG | 676 | dev_warn(&mdev->dev, "non-fatal error" |
| 675 | "Maple non-fatal error 0x%X\n", | 677 | " 0x%X at (%d, %d)\n", code, |
| 676 | code); | 678 | mdev->port, mdev->unit); |
| 679 | atomic_set(&mdev->busy, 0); | ||
| 677 | break; | 680 | break; |
| 678 | 681 | ||
| 679 | case MAPLE_RESPONSE_ALLINFO: | 682 | case MAPLE_RESPONSE_ALLINFO: |
| 680 | printk(KERN_DEBUG | 683 | dev_notice(&mdev->dev, "extended" |
| 681 | "Maple - extended device information" | 684 | " device information request for (%d, %d)" |
| 682 | " not supported\n"); | 685 | " but call is not supported\n", mdev->port, |
| 686 | mdev->unit); | ||
| 687 | atomic_set(&mdev->busy, 0); | ||
| 683 | break; | 688 | break; |
| 684 | 689 | ||
| 685 | case MAPLE_RESPONSE_OK: | 690 | case MAPLE_RESPONSE_OK: |
| 691 | atomic_set(&mdev->busy, 0); | ||
| 692 | wake_up(&mdev->maple_wait); | ||
| 686 | break; | 693 | break; |
| 687 | 694 | ||
| 688 | default: | 695 | default: |
| @@ -699,20 +706,19 @@ static void maple_dma_handler(struct work_struct *work) | |||
| 699 | if (!fullscan) | 706 | if (!fullscan) |
| 700 | maple_port_rescan(); | 707 | maple_port_rescan(); |
| 701 | /* mark that we have been through the first scan */ | 708 | /* mark that we have been through the first scan */ |
| 702 | if (started == 0) | 709 | started = 1; |
| 703 | started = 1; | ||
| 704 | } | 710 | } |
| 705 | maplebus_dma_reset(); | 711 | maple_send(); |
| 706 | } | 712 | } |
| 707 | 713 | ||
| 708 | static irqreturn_t maplebus_dma_interrupt(int irq, void *dev_id) | 714 | static irqreturn_t maple_dma_interrupt(int irq, void *dev_id) |
| 709 | { | 715 | { |
| 710 | /* Load everything into the bottom half */ | 716 | /* Load everything into the bottom half */ |
| 711 | schedule_work(&maple_dma_process); | 717 | schedule_work(&maple_dma_process); |
| 712 | return IRQ_HANDLED; | 718 | return IRQ_HANDLED; |
| 713 | } | 719 | } |
| 714 | 720 | ||
| 715 | static irqreturn_t maplebus_vblank_interrupt(int irq, void *dev_id) | 721 | static irqreturn_t maple_vblank_interrupt(int irq, void *dev_id) |
| 716 | { | 722 | { |
| 717 | schedule_work(&maple_vblank_process); | 723 | schedule_work(&maple_vblank_process); |
| 718 | return IRQ_HANDLED; | 724 | return IRQ_HANDLED; |
| @@ -720,14 +726,14 @@ static irqreturn_t maplebus_vblank_interrupt(int irq, void *dev_id) | |||
| 720 | 726 | ||
| 721 | static int maple_set_dma_interrupt_handler(void) | 727 | static int maple_set_dma_interrupt_handler(void) |
| 722 | { | 728 | { |
| 723 | return request_irq(HW_EVENT_MAPLE_DMA, maplebus_dma_interrupt, | 729 | return request_irq(HW_EVENT_MAPLE_DMA, maple_dma_interrupt, |
| 724 | IRQF_SHARED, "maple bus DMA", &maple_dummy_driver); | 730 | IRQF_SHARED, "maple bus DMA", &maple_unsupported_device); |
| 725 | } | 731 | } |
| 726 | 732 | ||
| 727 | static int maple_set_vblank_interrupt_handler(void) | 733 | static int maple_set_vblank_interrupt_handler(void) |
| 728 | { | 734 | { |
| 729 | return request_irq(HW_EVENT_VSYNC, maplebus_vblank_interrupt, | 735 | return request_irq(HW_EVENT_VSYNC, maple_vblank_interrupt, |
| 730 | IRQF_SHARED, "maple bus VBLANK", &maple_dummy_driver); | 736 | IRQF_SHARED, "maple bus VBLANK", &maple_unsupported_device); |
| 731 | } | 737 | } |
| 732 | 738 | ||
| 733 | static int maple_get_dma_buffer(void) | 739 | static int maple_get_dma_buffer(void) |
| @@ -740,7 +746,7 @@ static int maple_get_dma_buffer(void) | |||
| 740 | return 0; | 746 | return 0; |
| 741 | } | 747 | } |
| 742 | 748 | ||
| 743 | static int match_maple_bus_driver(struct device *devptr, | 749 | static int maple_match_bus_driver(struct device *devptr, |
| 744 | struct device_driver *drvptr) | 750 | struct device_driver *drvptr) |
| 745 | { | 751 | { |
| 746 | struct maple_driver *maple_drv = to_maple_driver(drvptr); | 752 | struct maple_driver *maple_drv = to_maple_driver(drvptr); |
| @@ -765,22 +771,24 @@ static void maple_bus_release(struct device *dev) | |||
| 765 | { | 771 | { |
| 766 | } | 772 | } |
| 767 | 773 | ||
| 768 | static struct maple_driver maple_dummy_driver = { | 774 | static struct maple_driver maple_unsupported_device = { |
| 769 | .drv = { | 775 | .drv = { |
| 770 | .name = "maple_dummy_driver", | 776 | .name = "maple_unsupported_device", |
| 771 | .bus = &maple_bus_type, | 777 | .bus = &maple_bus_type, |
| 772 | }, | 778 | }, |
| 773 | }; | 779 | }; |
| 774 | 780 | /** | |
| 781 | * maple_bus_type - core maple bus structure | ||
| 782 | */ | ||
| 775 | struct bus_type maple_bus_type = { | 783 | struct bus_type maple_bus_type = { |
| 776 | .name = "maple", | 784 | .name = "maple", |
| 777 | .match = match_maple_bus_driver, | 785 | .match = maple_match_bus_driver, |
| 778 | .uevent = maple_bus_uevent, | 786 | .uevent = maple_bus_uevent, |
| 779 | }; | 787 | }; |
| 780 | EXPORT_SYMBOL_GPL(maple_bus_type); | 788 | EXPORT_SYMBOL_GPL(maple_bus_type); |
| 781 | 789 | ||
| 782 | static struct device maple_bus = { | 790 | static struct device maple_bus = { |
| 783 | .bus_id = "maple", | 791 | .init_name = "maple", |
| 784 | .release = maple_bus_release, | 792 | .release = maple_bus_release, |
| 785 | }; | 793 | }; |
| 786 | 794 | ||
| @@ -788,7 +796,8 @@ static int __init maple_bus_init(void) | |||
| 788 | { | 796 | { |
| 789 | int retval, i; | 797 | int retval, i; |
| 790 | struct maple_device *mdev[MAPLE_PORTS]; | 798 | struct maple_device *mdev[MAPLE_PORTS]; |
| 791 | ctrl_outl(0, MAPLE_STATE); | 799 | |
| 800 | ctrl_outl(0, MAPLE_ENABLE); | ||
| 792 | 801 | ||
| 793 | retval = device_register(&maple_bus); | 802 | retval = device_register(&maple_bus); |
| 794 | if (retval) | 803 | if (retval) |
| @@ -798,36 +807,33 @@ static int __init maple_bus_init(void) | |||
| 798 | if (retval) | 807 | if (retval) |
| 799 | goto cleanup_device; | 808 | goto cleanup_device; |
| 800 | 809 | ||
| 801 | retval = driver_register(&maple_dummy_driver.drv); | 810 | retval = driver_register(&maple_unsupported_device.drv); |
| 802 | if (retval) | 811 | if (retval) |
| 803 | goto cleanup_bus; | 812 | goto cleanup_bus; |
| 804 | 813 | ||
| 805 | /* allocate memory for maple bus dma */ | 814 | /* allocate memory for maple bus dma */ |
| 806 | retval = maple_get_dma_buffer(); | 815 | retval = maple_get_dma_buffer(); |
| 807 | if (retval) { | 816 | if (retval) { |
| 808 | printk(KERN_INFO | 817 | dev_err(&maple_bus, "failed to allocate DMA buffers\n"); |
| 809 | "Maple bus: Failed to allocate Maple DMA buffers\n"); | ||
| 810 | goto cleanup_basic; | 818 | goto cleanup_basic; |
| 811 | } | 819 | } |
| 812 | 820 | ||
| 813 | /* set up DMA interrupt handler */ | 821 | /* set up DMA interrupt handler */ |
| 814 | retval = maple_set_dma_interrupt_handler(); | 822 | retval = maple_set_dma_interrupt_handler(); |
| 815 | if (retval) { | 823 | if (retval) { |
| 816 | printk(KERN_INFO | 824 | dev_err(&maple_bus, "bus failed to grab maple " |
| 817 | "Maple bus: Failed to grab maple DMA IRQ\n"); | 825 | "DMA IRQ\n"); |
| 818 | goto cleanup_dma; | 826 | goto cleanup_dma; |
| 819 | } | 827 | } |
| 820 | 828 | ||
| 821 | /* set up VBLANK interrupt handler */ | 829 | /* set up VBLANK interrupt handler */ |
| 822 | retval = maple_set_vblank_interrupt_handler(); | 830 | retval = maple_set_vblank_interrupt_handler(); |
| 823 | if (retval) { | 831 | if (retval) { |
| 824 | printk(KERN_INFO "Maple bus: Failed to grab VBLANK IRQ\n"); | 832 | dev_err(&maple_bus, "bus failed to grab VBLANK IRQ\n"); |
| 825 | goto cleanup_irq; | 833 | goto cleanup_irq; |
| 826 | } | 834 | } |
| 827 | 835 | ||
| 828 | maple_queue_cache = | 836 | maple_queue_cache = KMEM_CACHE(maple_buffer, SLAB_HWCACHE_ALIGN); |
| 829 | kmem_cache_create("maple_queue_cache", 0x400, 0, | ||
| 830 | SLAB_HWCACHE_ALIGN, NULL); | ||
| 831 | 837 | ||
| 832 | if (!maple_queue_cache) | 838 | if (!maple_queue_cache) |
| 833 | goto cleanup_bothirqs; | 839 | goto cleanup_bothirqs; |
| @@ -838,23 +844,23 @@ static int __init maple_bus_init(void) | |||
| 838 | /* setup maple ports */ | 844 | /* setup maple ports */ |
| 839 | for (i = 0; i < MAPLE_PORTS; i++) { | 845 | for (i = 0; i < MAPLE_PORTS; i++) { |
| 840 | checked[i] = false; | 846 | checked[i] = false; |
| 847 | empty[i] = false; | ||
| 841 | mdev[i] = maple_alloc_dev(i, 0); | 848 | mdev[i] = maple_alloc_dev(i, 0); |
| 842 | baseunits[i] = mdev[i]; | ||
| 843 | if (!mdev[i]) { | 849 | if (!mdev[i]) { |
| 844 | while (i-- > 0) | 850 | while (i-- > 0) |
| 845 | maple_free_dev(mdev[i]); | 851 | maple_free_dev(mdev[i]); |
| 846 | goto cleanup_cache; | 852 | goto cleanup_cache; |
| 847 | } | 853 | } |
| 854 | baseunits[i] = mdev[i]; | ||
| 855 | atomic_set(&mdev[i]->busy, 1); | ||
| 848 | maple_add_packet(mdev[i], 0, MAPLE_COMMAND_DEVINFO, 0, NULL); | 856 | maple_add_packet(mdev[i], 0, MAPLE_COMMAND_DEVINFO, 0, NULL); |
| 849 | subdevice_map[i] = 0; | 857 | subdevice_map[i] = 0; |
| 850 | } | 858 | } |
| 851 | 859 | ||
| 852 | /* setup maplebus hardware */ | 860 | maple_pnp_time = jiffies + HZ; |
| 853 | maplebus_dma_reset(); | 861 | /* prepare initial queue */ |
| 854 | /* initial detection */ | ||
| 855 | maple_send(); | 862 | maple_send(); |
| 856 | maple_pnp_time = jiffies; | 863 | dev_info(&maple_bus, "bus core now registered\n"); |
| 857 | printk(KERN_INFO "Maple bus core now registered.\n"); | ||
| 858 | 864 | ||
| 859 | return 0; | 865 | return 0; |
| 860 | 866 | ||
| @@ -871,7 +877,7 @@ cleanup_dma: | |||
| 871 | free_pages((unsigned long) maple_sendbuf, MAPLE_DMA_PAGES); | 877 | free_pages((unsigned long) maple_sendbuf, MAPLE_DMA_PAGES); |
| 872 | 878 | ||
| 873 | cleanup_basic: | 879 | cleanup_basic: |
| 874 | driver_unregister(&maple_dummy_driver.drv); | 880 | driver_unregister(&maple_unsupported_device.drv); |
| 875 | 881 | ||
| 876 | cleanup_bus: | 882 | cleanup_bus: |
| 877 | bus_unregister(&maple_bus_type); | 883 | bus_unregister(&maple_bus_type); |
| @@ -880,7 +886,7 @@ cleanup_device: | |||
| 880 | device_unregister(&maple_bus); | 886 | device_unregister(&maple_bus); |
| 881 | 887 | ||
| 882 | cleanup: | 888 | cleanup: |
| 883 | printk(KERN_INFO "Maple bus registration failed\n"); | 889 | printk(KERN_ERR "Maple bus registration failed\n"); |
| 884 | return retval; | 890 | return retval; |
| 885 | } | 891 | } |
| 886 | /* Push init to later to ensure hardware gets detected */ | 892 | /* Push init to later to ensure hardware gets detected */ |
diff --git a/drivers/sh/superhyway/superhyway.c b/drivers/sh/superhyway/superhyway.c index 4d0282b821b5..2d9e7f3d5611 100644 --- a/drivers/sh/superhyway/superhyway.c +++ b/drivers/sh/superhyway/superhyway.c | |||
| @@ -22,7 +22,7 @@ | |||
| 22 | static int superhyway_devices; | 22 | static int superhyway_devices; |
| 23 | 23 | ||
| 24 | static struct device superhyway_bus_device = { | 24 | static struct device superhyway_bus_device = { |
| 25 | .bus_id = "superhyway", | 25 | .init_name = "superhyway", |
| 26 | }; | 26 | }; |
| 27 | 27 | ||
| 28 | static void superhyway_device_release(struct device *dev) | 28 | static void superhyway_device_release(struct device *dev) |
| @@ -83,7 +83,7 @@ int superhyway_add_device(unsigned long base, struct superhyway_device *sdev, | |||
| 83 | dev->id.id = dev->vcr.mod_id; | 83 | dev->id.id = dev->vcr.mod_id; |
| 84 | 84 | ||
| 85 | sprintf(dev->name, "SuperHyway device %04x", dev->id.id); | 85 | sprintf(dev->name, "SuperHyway device %04x", dev->id.id); |
| 86 | sprintf(dev->dev.bus_id, "%02x", superhyway_devices); | 86 | dev_set_name(&dev->dev, "%02x", superhyway_devices); |
| 87 | 87 | ||
| 88 | superhyway_devices++; | 88 | superhyway_devices++; |
| 89 | 89 | ||
diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig index 83babb0a1df7..c6c816b7ecb5 100644 --- a/drivers/usb/Kconfig +++ b/drivers/usb/Kconfig | |||
| @@ -47,6 +47,7 @@ config USB_ARCH_HAS_OHCI | |||
| 47 | default y if CPU_SUBTYPE_SH7720 | 47 | default y if CPU_SUBTYPE_SH7720 |
| 48 | default y if CPU_SUBTYPE_SH7721 | 48 | default y if CPU_SUBTYPE_SH7721 |
| 49 | default y if CPU_SUBTYPE_SH7763 | 49 | default y if CPU_SUBTYPE_SH7763 |
| 50 | default y if CPU_SUBTYPE_SH7786 | ||
| 50 | # more: | 51 | # more: |
| 51 | default PCI | 52 | default PCI |
| 52 | 53 | ||
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c index 5cf5f1eca4f4..7658589edb1c 100644 --- a/drivers/usb/host/ohci-hcd.c +++ b/drivers/usb/host/ohci-hcd.c | |||
| @@ -1049,7 +1049,8 @@ MODULE_LICENSE ("GPL"); | |||
| 1049 | 1049 | ||
| 1050 | #if defined(CONFIG_CPU_SUBTYPE_SH7720) || \ | 1050 | #if defined(CONFIG_CPU_SUBTYPE_SH7720) || \ |
| 1051 | defined(CONFIG_CPU_SUBTYPE_SH7721) || \ | 1051 | defined(CONFIG_CPU_SUBTYPE_SH7721) || \ |
| 1052 | defined(CONFIG_CPU_SUBTYPE_SH7763) | 1052 | defined(CONFIG_CPU_SUBTYPE_SH7763) || \ |
| 1053 | defined(CONFIG_CPU_SUBTYPE_SH7786) | ||
| 1053 | #include "ohci-sh.c" | 1054 | #include "ohci-sh.c" |
| 1054 | #define PLATFORM_DRIVER ohci_hcd_sh_driver | 1055 | #define PLATFORM_DRIVER ohci_hcd_sh_driver |
| 1055 | #endif | 1056 | #endif |
diff --git a/drivers/video/pvr2fb.c b/drivers/video/pvr2fb.c index 0a0fd48a8566..53f8f1100e81 100644 --- a/drivers/video/pvr2fb.c +++ b/drivers/video/pvr2fb.c | |||
| @@ -61,7 +61,7 @@ | |||
| 61 | #include <mach-dreamcast/mach/sysasic.h> | 61 | #include <mach-dreamcast/mach/sysasic.h> |
| 62 | #endif | 62 | #endif |
| 63 | 63 | ||
| 64 | #ifdef CONFIG_SH_DMA | 64 | #ifdef CONFIG_PVR2_DMA |
| 65 | #include <linux/pagemap.h> | 65 | #include <linux/pagemap.h> |
| 66 | #include <mach/dma.h> | 66 | #include <mach/dma.h> |
| 67 | #include <asm/dma.h> | 67 | #include <asm/dma.h> |
| @@ -188,7 +188,7 @@ static unsigned int is_blanked = 0; /* Is the screen blanked? */ | |||
| 188 | static unsigned long pvr2fb_map; | 188 | static unsigned long pvr2fb_map; |
| 189 | #endif | 189 | #endif |
| 190 | 190 | ||
| 191 | #ifdef CONFIG_SH_DMA | 191 | #ifdef CONFIG_PVR2_DMA |
| 192 | static unsigned int shdma = PVR2_CASCADE_CHAN; | 192 | static unsigned int shdma = PVR2_CASCADE_CHAN; |
| 193 | static unsigned int pvr2dma = ONCHIP_NR_DMA_CHANNELS; | 193 | static unsigned int pvr2dma = ONCHIP_NR_DMA_CHANNELS; |
| 194 | #endif | 194 | #endif |
| @@ -207,7 +207,7 @@ static irqreturn_t pvr2fb_interrupt(int irq, void *dev_id); | |||
| 207 | static int pvr2_init_cable(void); | 207 | static int pvr2_init_cable(void); |
| 208 | static int pvr2_get_param(const struct pvr2_params *p, const char *s, | 208 | static int pvr2_get_param(const struct pvr2_params *p, const char *s, |
| 209 | int val, int size); | 209 | int val, int size); |
| 210 | #ifdef CONFIG_SH_DMA | 210 | #ifdef CONFIG_PVR2_DMA |
| 211 | static ssize_t pvr2fb_write(struct fb_info *info, const char *buf, | 211 | static ssize_t pvr2fb_write(struct fb_info *info, const char *buf, |
| 212 | size_t count, loff_t *ppos); | 212 | size_t count, loff_t *ppos); |
| 213 | #endif | 213 | #endif |
| @@ -218,7 +218,7 @@ static struct fb_ops pvr2fb_ops = { | |||
| 218 | .fb_blank = pvr2fb_blank, | 218 | .fb_blank = pvr2fb_blank, |
| 219 | .fb_check_var = pvr2fb_check_var, | 219 | .fb_check_var = pvr2fb_check_var, |
| 220 | .fb_set_par = pvr2fb_set_par, | 220 | .fb_set_par = pvr2fb_set_par, |
| 221 | #ifdef CONFIG_SH_DMA | 221 | #ifdef CONFIG_PVR2_DMA |
| 222 | .fb_write = pvr2fb_write, | 222 | .fb_write = pvr2fb_write, |
| 223 | #endif | 223 | #endif |
| 224 | .fb_fillrect = cfb_fillrect, | 224 | .fb_fillrect = cfb_fillrect, |
| @@ -671,7 +671,7 @@ static int pvr2_init_cable(void) | |||
| 671 | return cable_type; | 671 | return cable_type; |
| 672 | } | 672 | } |
| 673 | 673 | ||
| 674 | #ifdef CONFIG_SH_DMA | 674 | #ifdef CONFIG_PVR2_DMA |
| 675 | static ssize_t pvr2fb_write(struct fb_info *info, const char *buf, | 675 | static ssize_t pvr2fb_write(struct fb_info *info, const char *buf, |
| 676 | size_t count, loff_t *ppos) | 676 | size_t count, loff_t *ppos) |
| 677 | { | 677 | { |
| @@ -743,7 +743,7 @@ out_unmap: | |||
| 743 | 743 | ||
| 744 | return ret; | 744 | return ret; |
| 745 | } | 745 | } |
| 746 | #endif /* CONFIG_SH_DMA */ | 746 | #endif /* CONFIG_PVR2_DMA */ |
| 747 | 747 | ||
| 748 | /** | 748 | /** |
| 749 | * pvr2fb_common_init | 749 | * pvr2fb_common_init |
| @@ -893,7 +893,7 @@ static int __init pvr2fb_dc_init(void) | |||
| 893 | return -EBUSY; | 893 | return -EBUSY; |
| 894 | } | 894 | } |
| 895 | 895 | ||
| 896 | #ifdef CONFIG_SH_DMA | 896 | #ifdef CONFIG_PVR2_DMA |
| 897 | if (request_dma(pvr2dma, "pvr2") != 0) { | 897 | if (request_dma(pvr2dma, "pvr2") != 0) { |
| 898 | free_irq(HW_EVENT_VSYNC, 0); | 898 | free_irq(HW_EVENT_VSYNC, 0); |
| 899 | return -EBUSY; | 899 | return -EBUSY; |
| @@ -915,7 +915,7 @@ static void __exit pvr2fb_dc_exit(void) | |||
| 915 | } | 915 | } |
| 916 | 916 | ||
| 917 | free_irq(HW_EVENT_VSYNC, 0); | 917 | free_irq(HW_EVENT_VSYNC, 0); |
| 918 | #ifdef CONFIG_SH_DMA | 918 | #ifdef CONFIG_PVR2_DMA |
| 919 | free_dma(pvr2dma); | 919 | free_dma(pvr2dma); |
| 920 | #endif | 920 | #endif |
| 921 | } | 921 | } |
diff --git a/drivers/video/sh_mobile_lcdcfb.c b/drivers/video/sh_mobile_lcdcfb.c index 2c5d069e5f06..b433b8ac76d9 100644 --- a/drivers/video/sh_mobile_lcdcfb.c +++ b/drivers/video/sh_mobile_lcdcfb.c | |||
| @@ -33,6 +33,8 @@ struct sh_mobile_lcdc_chan { | |||
| 33 | struct fb_info info; | 33 | struct fb_info info; |
| 34 | dma_addr_t dma_handle; | 34 | dma_addr_t dma_handle; |
| 35 | struct fb_deferred_io defio; | 35 | struct fb_deferred_io defio; |
| 36 | unsigned long frame_end; | ||
| 37 | wait_queue_head_t frame_end_wait; | ||
| 36 | }; | 38 | }; |
| 37 | 39 | ||
| 38 | struct sh_mobile_lcdc_priv { | 40 | struct sh_mobile_lcdc_priv { |
| @@ -226,7 +228,10 @@ static void sh_mobile_lcdc_deferred_io_touch(struct fb_info *info) | |||
| 226 | static irqreturn_t sh_mobile_lcdc_irq(int irq, void *data) | 228 | static irqreturn_t sh_mobile_lcdc_irq(int irq, void *data) |
| 227 | { | 229 | { |
| 228 | struct sh_mobile_lcdc_priv *priv = data; | 230 | struct sh_mobile_lcdc_priv *priv = data; |
| 231 | struct sh_mobile_lcdc_chan *ch; | ||
| 229 | unsigned long tmp; | 232 | unsigned long tmp; |
| 233 | int is_sub; | ||
| 234 | int k; | ||
| 230 | 235 | ||
| 231 | /* acknowledge interrupt */ | 236 | /* acknowledge interrupt */ |
| 232 | tmp = lcdc_read(priv, _LDINTR); | 237 | tmp = lcdc_read(priv, _LDINTR); |
| @@ -234,8 +239,24 @@ static irqreturn_t sh_mobile_lcdc_irq(int irq, void *data) | |||
| 234 | tmp |= 0x000000ff ^ LDINTR_FS; /* status in low 8 */ | 239 | tmp |= 0x000000ff ^ LDINTR_FS; /* status in low 8 */ |
| 235 | lcdc_write(priv, _LDINTR, tmp); | 240 | lcdc_write(priv, _LDINTR, tmp); |
| 236 | 241 | ||
| 237 | /* disable clocks */ | 242 | /* figure out if this interrupt is for main or sub lcd */ |
| 238 | sh_mobile_lcdc_clk_off(priv); | 243 | is_sub = (lcdc_read(priv, _LDSR) & (1 << 10)) ? 1 : 0; |
| 244 | |||
| 245 | /* wake up channel and disable clocks*/ | ||
| 246 | for (k = 0; k < ARRAY_SIZE(priv->ch); k++) { | ||
| 247 | ch = &priv->ch[k]; | ||
| 248 | |||
| 249 | if (!ch->enabled) | ||
| 250 | continue; | ||
| 251 | |||
| 252 | if (is_sub == lcdc_chan_is_sublcd(ch)) { | ||
| 253 | ch->frame_end = 1; | ||
| 254 | wake_up(&ch->frame_end_wait); | ||
| 255 | |||
| 256 | sh_mobile_lcdc_clk_off(priv); | ||
| 257 | } | ||
| 258 | } | ||
| 259 | |||
| 239 | return IRQ_HANDLED; | 260 | return IRQ_HANDLED; |
| 240 | } | 261 | } |
| 241 | 262 | ||
| @@ -448,18 +469,27 @@ static void sh_mobile_lcdc_stop(struct sh_mobile_lcdc_priv *priv) | |||
| 448 | struct sh_mobile_lcdc_board_cfg *board_cfg; | 469 | struct sh_mobile_lcdc_board_cfg *board_cfg; |
| 449 | int k; | 470 | int k; |
| 450 | 471 | ||
| 451 | /* tell the board code to disable the panel */ | 472 | /* clean up deferred io and ask board code to disable panel */ |
| 452 | for (k = 0; k < ARRAY_SIZE(priv->ch); k++) { | 473 | for (k = 0; k < ARRAY_SIZE(priv->ch); k++) { |
| 453 | ch = &priv->ch[k]; | 474 | ch = &priv->ch[k]; |
| 454 | board_cfg = &ch->cfg.board_cfg; | ||
| 455 | if (board_cfg->display_off) | ||
| 456 | board_cfg->display_off(board_cfg->board_data); | ||
| 457 | 475 | ||
| 458 | /* cleanup deferred io if enabled */ | 476 | /* deferred io mode: |
| 477 | * flush frame, and wait for frame end interrupt | ||
| 478 | * clean up deferred io and enable clock | ||
| 479 | */ | ||
| 459 | if (ch->info.fbdefio) { | 480 | if (ch->info.fbdefio) { |
| 481 | ch->frame_end = 0; | ||
| 482 | schedule_delayed_work(&ch->info.deferred_work, 0); | ||
| 483 | wait_event(ch->frame_end_wait, ch->frame_end); | ||
| 460 | fb_deferred_io_cleanup(&ch->info); | 484 | fb_deferred_io_cleanup(&ch->info); |
| 461 | ch->info.fbdefio = NULL; | 485 | ch->info.fbdefio = NULL; |
| 486 | sh_mobile_lcdc_clk_on(priv); | ||
| 462 | } | 487 | } |
| 488 | |||
| 489 | board_cfg = &ch->cfg.board_cfg; | ||
| 490 | if (board_cfg->display_off) | ||
| 491 | board_cfg->display_off(board_cfg->board_data); | ||
| 492 | |||
| 463 | } | 493 | } |
| 464 | 494 | ||
| 465 | /* stop the lcdc */ | 495 | /* stop the lcdc */ |
| @@ -652,6 +682,26 @@ static int sh_mobile_lcdc_set_bpp(struct fb_var_screeninfo *var, int bpp) | |||
| 652 | return 0; | 682 | return 0; |
| 653 | } | 683 | } |
| 654 | 684 | ||
| 685 | static int sh_mobile_lcdc_suspend(struct device *dev) | ||
| 686 | { | ||
| 687 | struct platform_device *pdev = to_platform_device(dev); | ||
| 688 | |||
| 689 | sh_mobile_lcdc_stop(platform_get_drvdata(pdev)); | ||
| 690 | return 0; | ||
| 691 | } | ||
| 692 | |||
| 693 | static int sh_mobile_lcdc_resume(struct device *dev) | ||
| 694 | { | ||
| 695 | struct platform_device *pdev = to_platform_device(dev); | ||
| 696 | |||
| 697 | return sh_mobile_lcdc_start(platform_get_drvdata(pdev)); | ||
| 698 | } | ||
| 699 | |||
| 700 | static struct dev_pm_ops sh_mobile_lcdc_dev_pm_ops = { | ||
| 701 | .suspend = sh_mobile_lcdc_suspend, | ||
| 702 | .resume = sh_mobile_lcdc_resume, | ||
| 703 | }; | ||
| 704 | |||
| 655 | static int sh_mobile_lcdc_remove(struct platform_device *pdev); | 705 | static int sh_mobile_lcdc_remove(struct platform_device *pdev); |
| 656 | 706 | ||
| 657 | static int __init sh_mobile_lcdc_probe(struct platform_device *pdev) | 707 | static int __init sh_mobile_lcdc_probe(struct platform_device *pdev) |
| @@ -707,6 +757,7 @@ static int __init sh_mobile_lcdc_probe(struct platform_device *pdev) | |||
| 707 | dev_err(&pdev->dev, "unsupported interface type\n"); | 757 | dev_err(&pdev->dev, "unsupported interface type\n"); |
| 708 | goto err1; | 758 | goto err1; |
| 709 | } | 759 | } |
| 760 | init_waitqueue_head(&priv->ch[i].frame_end_wait); | ||
| 710 | 761 | ||
| 711 | switch (pdata->ch[i].chan) { | 762 | switch (pdata->ch[i].chan) { |
| 712 | case LCDC_CHAN_MAINLCD: | 763 | case LCDC_CHAN_MAINLCD: |
| @@ -860,6 +911,7 @@ static struct platform_driver sh_mobile_lcdc_driver = { | |||
| 860 | .driver = { | 911 | .driver = { |
| 861 | .name = "sh_mobile_lcdc_fb", | 912 | .name = "sh_mobile_lcdc_fb", |
| 862 | .owner = THIS_MODULE, | 913 | .owner = THIS_MODULE, |
| 914 | .pm = &sh_mobile_lcdc_dev_pm_ops, | ||
| 863 | }, | 915 | }, |
| 864 | .probe = sh_mobile_lcdc_probe, | 916 | .probe = sh_mobile_lcdc_probe, |
| 865 | .remove = sh_mobile_lcdc_remove, | 917 | .remove = sh_mobile_lcdc_remove, |
