diff options
58 files changed, 4269 insertions, 787 deletions
diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu index a703b9e9aeb9..d868a11c94a5 100644 --- a/Documentation/ABI/testing/sysfs-devices-system-cpu +++ b/Documentation/ABI/testing/sysfs-devices-system-cpu | |||
@@ -62,6 +62,21 @@ Description: CPU topology files that describe kernel limits related to | |||
62 | See Documentation/cputopology.txt for more information. | 62 | See Documentation/cputopology.txt for more information. |
63 | 63 | ||
64 | 64 | ||
65 | What: /sys/devices/system/cpu/probe | ||
66 | /sys/devices/system/cpu/release | ||
67 | Date: November 2009 | ||
68 | Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org> | ||
69 | Description: Dynamic addition and removal of CPU's. This is not hotplug | ||
70 | removal, this is meant complete removal/addition of the CPU | ||
71 | from the system. | ||
72 | |||
73 | probe: writes to this file will dynamically add a CPU to the | ||
74 | system. Information written to the file to add CPU's is | ||
75 | architecture specific. | ||
76 | |||
77 | release: writes to this file dynamically remove a CPU from | ||
78 | the system. Information writtento the file to remove CPU's | ||
79 | is architecture specific. | ||
65 | 80 | ||
66 | What: /sys/devices/system/cpu/cpu#/node | 81 | What: /sys/devices/system/cpu/cpu#/node |
67 | Date: October 2009 | 82 | Date: October 2009 |
diff --git a/Documentation/powerpc/dts-bindings/fsl/mpc5200.txt b/Documentation/powerpc/dts-bindings/fsl/mpc5200.txt index 8447fd7090d0..ddd5ee32ea63 100644 --- a/Documentation/powerpc/dts-bindings/fsl/mpc5200.txt +++ b/Documentation/powerpc/dts-bindings/fsl/mpc5200.txt | |||
@@ -103,7 +103,22 @@ fsl,mpc5200-gpt nodes | |||
103 | --------------------- | 103 | --------------------- |
104 | On the mpc5200 and 5200b, GPT0 has a watchdog timer function. If the board | 104 | On the mpc5200 and 5200b, GPT0 has a watchdog timer function. If the board |
105 | design supports the internal wdt, then the device node for GPT0 should | 105 | design supports the internal wdt, then the device node for GPT0 should |
106 | include the empty property 'fsl,has-wdt'. | 106 | include the empty property 'fsl,has-wdt'. Note that this does not activate |
107 | the watchdog. The timer will function as a GPT if the timer api is used, and | ||
108 | it will function as watchdog if the watchdog device is used. The watchdog | ||
109 | mode has priority over the gpt mode, i.e. if the watchdog is activated, any | ||
110 | gpt api call to this timer will fail with -EBUSY. | ||
111 | |||
112 | If you add the property | ||
113 | fsl,wdt-on-boot = <n>; | ||
114 | GPT0 will be marked as in-use watchdog, i.e. blocking every gpt access to it. | ||
115 | If n>0, the watchdog is started with a timeout of n seconds. If n=0, the | ||
116 | configuration of the watchdog is not touched. This is useful in two cases: | ||
117 | - just mark GPT0 as watchdog, blocking gpt accesses, and configure it later; | ||
118 | - do not touch a configuration assigned by the boot loader which supervises | ||
119 | the boot process itself. | ||
120 | |||
121 | The watchdog will respect the CONFIG_WATCHDOG_NOWAYOUT option. | ||
107 | 122 | ||
108 | An mpc5200-gpt can be used as a single line GPIO controller. To do so, | 123 | An mpc5200-gpt can be used as a single line GPIO controller. To do so, |
109 | add the following properties to the gpt node: | 124 | add the following properties to the gpt node: |
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 5dbd375a3f2a..0df57466e783 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig | |||
@@ -320,6 +320,10 @@ config HOTPLUG_CPU | |||
320 | 320 | ||
321 | Say N if you are unsure. | 321 | Say N if you are unsure. |
322 | 322 | ||
323 | config ARCH_CPU_PROBE_RELEASE | ||
324 | def_bool y | ||
325 | depends on HOTPLUG_CPU | ||
326 | |||
323 | config ARCH_ENABLE_MEMORY_HOTPLUG | 327 | config ARCH_ENABLE_MEMORY_HOTPLUG |
324 | def_bool y | 328 | def_bool y |
325 | 329 | ||
diff --git a/arch/powerpc/include/asm/kvm.h b/arch/powerpc/include/asm/kvm.h index c9ca97f43bc1..81f3b0b5601e 100644 --- a/arch/powerpc/include/asm/kvm.h +++ b/arch/powerpc/include/asm/kvm.h | |||
@@ -47,7 +47,23 @@ struct kvm_regs { | |||
47 | 47 | ||
48 | struct kvm_sregs { | 48 | struct kvm_sregs { |
49 | __u32 pvr; | 49 | __u32 pvr; |
50 | char pad[1020]; | 50 | union { |
51 | struct { | ||
52 | __u64 sdr1; | ||
53 | struct { | ||
54 | struct { | ||
55 | __u64 slbe; | ||
56 | __u64 slbv; | ||
57 | } slb[64]; | ||
58 | } ppc64; | ||
59 | struct { | ||
60 | __u32 sr[16]; | ||
61 | __u64 ibat[8]; | ||
62 | __u64 dbat[8]; | ||
63 | } ppc32; | ||
64 | } s; | ||
65 | __u8 pad[1020]; | ||
66 | } u; | ||
51 | }; | 67 | }; |
52 | 68 | ||
53 | struct kvm_fpu { | 69 | struct kvm_fpu { |
diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h index 19ddb352fd0f..af2abe74f544 100644 --- a/arch/powerpc/include/asm/kvm_asm.h +++ b/arch/powerpc/include/asm/kvm_asm.h | |||
@@ -87,6 +87,7 @@ | |||
87 | #define BOOK3S_IRQPRIO_MAX 16 | 87 | #define BOOK3S_IRQPRIO_MAX 16 |
88 | 88 | ||
89 | #define BOOK3S_HFLAG_DCBZ32 0x1 | 89 | #define BOOK3S_HFLAG_DCBZ32 0x1 |
90 | #define BOOK3S_HFLAG_SLB 0x2 | ||
90 | 91 | ||
91 | #define RESUME_FLAG_NV (1<<0) /* Reload guest nonvolatile state? */ | 92 | #define RESUME_FLAG_NV (1<<0) /* Reload guest nonvolatile state? */ |
92 | #define RESUME_FLAG_HOST (1<<1) /* Resume host? */ | 93 | #define RESUME_FLAG_HOST (1<<1) /* Resume host? */ |
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index c6011336371e..74b7369770d0 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h | |||
@@ -46,6 +46,7 @@ struct kvmppc_sr { | |||
46 | }; | 46 | }; |
47 | 47 | ||
48 | struct kvmppc_bat { | 48 | struct kvmppc_bat { |
49 | u64 raw; | ||
49 | u32 bepi; | 50 | u32 bepi; |
50 | u32 bepi_mask; | 51 | u32 bepi_mask; |
51 | bool vs; | 52 | bool vs; |
@@ -113,6 +114,8 @@ extern struct kvmppc_pte *kvmppc_mmu_find_pte(struct kvm_vcpu *vcpu, u64 ea, boo | |||
113 | extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong eaddr, int size, void *ptr, bool data); | 114 | extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong eaddr, int size, void *ptr, bool data); |
114 | extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong eaddr, int size, void *ptr); | 115 | extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong eaddr, int size, void *ptr); |
115 | extern void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec); | 116 | extern void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec); |
117 | extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat, | ||
118 | bool upper, u32 val); | ||
116 | 119 | ||
117 | extern u32 kvmppc_trampoline_lowmem; | 120 | extern u32 kvmppc_trampoline_lowmem; |
118 | extern u32 kvmppc_trampoline_enter; | 121 | extern u32 kvmppc_trampoline_enter; |
diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h index 9efa2be78331..9f0fc9e6ce0d 100644 --- a/arch/powerpc/include/asm/machdep.h +++ b/arch/powerpc/include/asm/machdep.h | |||
@@ -266,6 +266,11 @@ struct machdep_calls { | |||
266 | void (*suspend_disable_irqs)(void); | 266 | void (*suspend_disable_irqs)(void); |
267 | void (*suspend_enable_irqs)(void); | 267 | void (*suspend_enable_irqs)(void); |
268 | #endif | 268 | #endif |
269 | |||
270 | #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE | ||
271 | ssize_t (*cpu_probe)(const char *, size_t); | ||
272 | ssize_t (*cpu_release)(const char *, size_t); | ||
273 | #endif | ||
269 | }; | 274 | }; |
270 | 275 | ||
271 | extern void e500_idle(void); | 276 | extern void e500_idle(void); |
diff --git a/arch/powerpc/include/asm/macio.h b/arch/powerpc/include/asm/macio.h index 079c06eae446..a062c57696d0 100644 --- a/arch/powerpc/include/asm/macio.h +++ b/arch/powerpc/include/asm/macio.h | |||
@@ -39,6 +39,7 @@ struct macio_dev | |||
39 | struct macio_bus *bus; /* macio bus this device is on */ | 39 | struct macio_bus *bus; /* macio bus this device is on */ |
40 | struct macio_dev *media_bay; /* Device is part of a media bay */ | 40 | struct macio_dev *media_bay; /* Device is part of a media bay */ |
41 | struct of_device ofdev; | 41 | struct of_device ofdev; |
42 | struct device_dma_parameters dma_parms; /* ide needs that */ | ||
42 | int n_resources; | 43 | int n_resources; |
43 | struct resource resource[MACIO_DEV_COUNT_RESOURCES]; | 44 | struct resource resource[MACIO_DEV_COUNT_RESOURCES]; |
44 | int n_interrupts; | 45 | int n_interrupts; |
@@ -78,6 +79,8 @@ static inline unsigned long macio_resource_len(struct macio_dev *dev, int resour | |||
78 | return res->end - res->start + 1; | 79 | return res->end - res->start + 1; |
79 | } | 80 | } |
80 | 81 | ||
82 | extern int macio_enable_devres(struct macio_dev *dev); | ||
83 | |||
81 | extern int macio_request_resource(struct macio_dev *dev, int resource_no, const char *name); | 84 | extern int macio_request_resource(struct macio_dev *dev, int resource_no, const char *name); |
82 | extern void macio_release_resource(struct macio_dev *dev, int resource_no); | 85 | extern void macio_release_resource(struct macio_dev *dev, int resource_no); |
83 | extern int macio_request_resources(struct macio_dev *dev, const char *name); | 86 | extern int macio_request_resources(struct macio_dev *dev, const char *name); |
@@ -131,6 +134,9 @@ struct macio_driver | |||
131 | int (*resume)(struct macio_dev* dev); | 134 | int (*resume)(struct macio_dev* dev); |
132 | int (*shutdown)(struct macio_dev* dev); | 135 | int (*shutdown)(struct macio_dev* dev); |
133 | 136 | ||
137 | #ifdef CONFIG_PMAC_MEDIABAY | ||
138 | void (*mediabay_event)(struct macio_dev* dev, int mb_state); | ||
139 | #endif | ||
134 | struct device_driver driver; | 140 | struct device_driver driver; |
135 | }; | 141 | }; |
136 | #define to_macio_driver(drv) container_of(drv,struct macio_driver, driver) | 142 | #define to_macio_driver(drv) container_of(drv,struct macio_driver, driver) |
diff --git a/arch/powerpc/include/asm/mediabay.h b/arch/powerpc/include/asm/mediabay.h index b2efb3325808..11037a4133ee 100644 --- a/arch/powerpc/include/asm/mediabay.h +++ b/arch/powerpc/include/asm/mediabay.h | |||
@@ -17,26 +17,31 @@ | |||
17 | #define MB_POWER 6 /* media bay contains a Power device (???) */ | 17 | #define MB_POWER 6 /* media bay contains a Power device (???) */ |
18 | #define MB_NO 7 /* media bay contains nothing */ | 18 | #define MB_NO 7 /* media bay contains nothing */ |
19 | 19 | ||
20 | /* Number of bays in the machine or 0 */ | 20 | struct macio_dev; |
21 | extern int media_bay_count; | ||
22 | 21 | ||
23 | #ifdef CONFIG_BLK_DEV_IDE_PMAC | 22 | #ifdef CONFIG_PMAC_MEDIABAY |
24 | #include <linux/ide.h> | ||
25 | 23 | ||
26 | int check_media_bay_by_base(unsigned long base, int what); | 24 | /* Check the content type of the bay, returns MB_NO if the bay is still |
27 | /* called by IDE PMAC host driver to register IDE controller for media bay */ | 25 | * transitionning |
28 | int media_bay_set_ide_infos(struct device_node *which_bay, unsigned long base, | 26 | */ |
29 | int irq, ide_hwif_t *hwif); | 27 | extern int check_media_bay(struct macio_dev *bay); |
30 | 28 | ||
31 | int check_media_bay(struct device_node *which_bay, int what); | 29 | /* The ATA driver uses the calls below to temporarily hold on the |
30 | * media bay callbacks while initializing the interface | ||
31 | */ | ||
32 | extern void lock_media_bay(struct macio_dev *bay); | ||
33 | extern void unlock_media_bay(struct macio_dev *bay); | ||
32 | 34 | ||
33 | #else | 35 | #else |
34 | 36 | ||
35 | static inline int check_media_bay(struct device_node *which_bay, int what) | 37 | static inline int check_media_bay(struct macio_dev *bay) |
36 | { | 38 | { |
37 | return -ENODEV; | 39 | return MB_NO; |
38 | } | 40 | } |
39 | 41 | ||
42 | static inline void lock_media_bay(struct macio_dev *bay) { } | ||
43 | static inline void unlock_media_bay(struct macio_dev *bay) { } | ||
44 | |||
40 | #endif | 45 | #endif |
41 | 46 | ||
42 | #endif /* __KERNEL__ */ | 47 | #endif /* __KERNEL__ */ |
diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h index 7514ec2f8540..2102b214a87c 100644 --- a/arch/powerpc/include/asm/mmu-hash64.h +++ b/arch/powerpc/include/asm/mmu-hash64.h | |||
@@ -373,6 +373,38 @@ extern void slb_set_size(u16 size); | |||
373 | 373 | ||
374 | #ifndef __ASSEMBLY__ | 374 | #ifndef __ASSEMBLY__ |
375 | 375 | ||
376 | #ifdef CONFIG_PPC_SUBPAGE_PROT | ||
377 | /* | ||
378 | * For the sub-page protection option, we extend the PGD with one of | ||
379 | * these. Basically we have a 3-level tree, with the top level being | ||
380 | * the protptrs array. To optimize speed and memory consumption when | ||
381 | * only addresses < 4GB are being protected, pointers to the first | ||
382 | * four pages of sub-page protection words are stored in the low_prot | ||
383 | * array. | ||
384 | * Each page of sub-page protection words protects 1GB (4 bytes | ||
385 | * protects 64k). For the 3-level tree, each page of pointers then | ||
386 | * protects 8TB. | ||
387 | */ | ||
388 | struct subpage_prot_table { | ||
389 | unsigned long maxaddr; /* only addresses < this are protected */ | ||
390 | unsigned int **protptrs[2]; | ||
391 | unsigned int *low_prot[4]; | ||
392 | }; | ||
393 | |||
394 | #define SBP_L1_BITS (PAGE_SHIFT - 2) | ||
395 | #define SBP_L2_BITS (PAGE_SHIFT - 3) | ||
396 | #define SBP_L1_COUNT (1 << SBP_L1_BITS) | ||
397 | #define SBP_L2_COUNT (1 << SBP_L2_BITS) | ||
398 | #define SBP_L2_SHIFT (PAGE_SHIFT + SBP_L1_BITS) | ||
399 | #define SBP_L3_SHIFT (SBP_L2_SHIFT + SBP_L2_BITS) | ||
400 | |||
401 | extern void subpage_prot_free(struct mm_struct *mm); | ||
402 | extern void subpage_prot_init_new_context(struct mm_struct *mm); | ||
403 | #else | ||
404 | static inline void subpage_prot_free(struct mm_struct *mm) {} | ||
405 | static inline void subpage_prot_init_new_context(struct mm_struct *mm) { } | ||
406 | #endif /* CONFIG_PPC_SUBPAGE_PROT */ | ||
407 | |||
376 | typedef unsigned long mm_context_id_t; | 408 | typedef unsigned long mm_context_id_t; |
377 | 409 | ||
378 | typedef struct { | 410 | typedef struct { |
@@ -386,6 +418,9 @@ typedef struct { | |||
386 | u16 sllp; /* SLB page size encoding */ | 418 | u16 sllp; /* SLB page size encoding */ |
387 | #endif | 419 | #endif |
388 | unsigned long vdso_base; | 420 | unsigned long vdso_base; |
421 | #ifdef CONFIG_PPC_SUBPAGE_PROT | ||
422 | struct subpage_prot_table spt; | ||
423 | #endif /* CONFIG_PPC_SUBPAGE_PROT */ | ||
389 | } mm_context_t; | 424 | } mm_context_t; |
390 | 425 | ||
391 | 426 | ||
diff --git a/arch/powerpc/include/asm/mpc52xx.h b/arch/powerpc/include/asm/mpc52xx.h index 1b4f697abbdd..b664ce79a172 100644 --- a/arch/powerpc/include/asm/mpc52xx.h +++ b/arch/powerpc/include/asm/mpc52xx.h | |||
@@ -276,6 +276,53 @@ extern int mpc52xx_set_psc_clkdiv(int psc_id, int clkdiv); | |||
276 | extern unsigned int mpc52xx_get_xtal_freq(struct device_node *node); | 276 | extern unsigned int mpc52xx_get_xtal_freq(struct device_node *node); |
277 | extern void mpc52xx_restart(char *cmd); | 277 | extern void mpc52xx_restart(char *cmd); |
278 | 278 | ||
279 | /* mpc52xx_gpt.c */ | ||
280 | struct mpc52xx_gpt_priv; | ||
281 | extern struct mpc52xx_gpt_priv *mpc52xx_gpt_from_irq(int irq); | ||
282 | extern int mpc52xx_gpt_start_timer(struct mpc52xx_gpt_priv *gpt, u64 period, | ||
283 | int continuous); | ||
284 | extern u64 mpc52xx_gpt_timer_period(struct mpc52xx_gpt_priv *gpt); | ||
285 | extern int mpc52xx_gpt_stop_timer(struct mpc52xx_gpt_priv *gpt); | ||
286 | |||
287 | /* mpc52xx_lpbfifo.c */ | ||
288 | #define MPC52XX_LPBFIFO_FLAG_READ (0) | ||
289 | #define MPC52XX_LPBFIFO_FLAG_WRITE (1<<0) | ||
290 | #define MPC52XX_LPBFIFO_FLAG_NO_INCREMENT (1<<1) | ||
291 | #define MPC52XX_LPBFIFO_FLAG_NO_DMA (1<<2) | ||
292 | #define MPC52XX_LPBFIFO_FLAG_POLL_DMA (1<<3) | ||
293 | |||
294 | struct mpc52xx_lpbfifo_request { | ||
295 | struct list_head list; | ||
296 | |||
297 | /* localplus bus address */ | ||
298 | unsigned int cs; | ||
299 | size_t offset; | ||
300 | |||
301 | /* Memory address */ | ||
302 | void *data; | ||
303 | phys_addr_t data_phys; | ||
304 | |||
305 | /* Details of transfer */ | ||
306 | size_t size; | ||
307 | size_t pos; /* current position of transfer */ | ||
308 | int flags; | ||
309 | |||
310 | /* What to do when finished */ | ||
311 | void (*callback)(struct mpc52xx_lpbfifo_request *); | ||
312 | |||
313 | void *priv; /* Driver private data */ | ||
314 | |||
315 | /* statistics */ | ||
316 | int irq_count; | ||
317 | int irq_ticks; | ||
318 | u8 last_byte; | ||
319 | int buffer_not_done_cnt; | ||
320 | }; | ||
321 | |||
322 | extern int mpc52xx_lpbfifo_submit(struct mpc52xx_lpbfifo_request *req); | ||
323 | extern void mpc52xx_lpbfifo_abort(struct mpc52xx_lpbfifo_request *req); | ||
324 | extern void mpc52xx_lpbfifo_poll(void); | ||
325 | |||
279 | /* mpc52xx_pic.c */ | 326 | /* mpc52xx_pic.c */ |
280 | extern void mpc52xx_init_irq(void); | 327 | extern void mpc52xx_init_irq(void); |
281 | extern unsigned int mpc52xx_get_irq(void); | 328 | extern unsigned int mpc52xx_get_irq(void); |
diff --git a/arch/powerpc/include/asm/pSeries_reconfig.h b/arch/powerpc/include/asm/pSeries_reconfig.h index e482e5352e69..d4b4bfa26fb3 100644 --- a/arch/powerpc/include/asm/pSeries_reconfig.h +++ b/arch/powerpc/include/asm/pSeries_reconfig.h | |||
@@ -17,6 +17,7 @@ | |||
17 | #ifdef CONFIG_PPC_PSERIES | 17 | #ifdef CONFIG_PPC_PSERIES |
18 | extern int pSeries_reconfig_notifier_register(struct notifier_block *); | 18 | extern int pSeries_reconfig_notifier_register(struct notifier_block *); |
19 | extern void pSeries_reconfig_notifier_unregister(struct notifier_block *); | 19 | extern void pSeries_reconfig_notifier_unregister(struct notifier_block *); |
20 | extern struct blocking_notifier_head pSeries_reconfig_chain; | ||
20 | #else /* !CONFIG_PPC_PSERIES */ | 21 | #else /* !CONFIG_PPC_PSERIES */ |
21 | static inline int pSeries_reconfig_notifier_register(struct notifier_block *nb) | 22 | static inline int pSeries_reconfig_notifier_register(struct notifier_block *nb) |
22 | { | 23 | { |
diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h index 5c1cd73dafa8..605f5c5398d1 100644 --- a/arch/powerpc/include/asm/pgalloc-64.h +++ b/arch/powerpc/include/asm/pgalloc-64.h | |||
@@ -28,10 +28,6 @@ | |||
28 | */ | 28 | */ |
29 | #define MAX_PGTABLE_INDEX_SIZE 0xf | 29 | #define MAX_PGTABLE_INDEX_SIZE 0xf |
30 | 30 | ||
31 | #ifndef CONFIG_PPC_SUBPAGE_PROT | ||
32 | static inline void subpage_prot_free(pgd_t *pgd) {} | ||
33 | #endif | ||
34 | |||
35 | extern struct kmem_cache *pgtable_cache[]; | 31 | extern struct kmem_cache *pgtable_cache[]; |
36 | #define PGT_CACHE(shift) (pgtable_cache[(shift)-1]) | 32 | #define PGT_CACHE(shift) (pgtable_cache[(shift)-1]) |
37 | 33 | ||
@@ -42,7 +38,6 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm) | |||
42 | 38 | ||
43 | static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) | 39 | static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) |
44 | { | 40 | { |
45 | subpage_prot_free(pgd); | ||
46 | kmem_cache_free(PGT_CACHE(PGD_INDEX_SIZE), pgd); | 41 | kmem_cache_free(PGT_CACHE(PGD_INDEX_SIZE), pgd); |
47 | } | 42 | } |
48 | 43 | ||
diff --git a/arch/powerpc/include/asm/pte-hash64-64k.h b/arch/powerpc/include/asm/pte-hash64-64k.h index 82b72207c51c..c4490f9c67c4 100644 --- a/arch/powerpc/include/asm/pte-hash64-64k.h +++ b/arch/powerpc/include/asm/pte-hash64-64k.h | |||
@@ -76,41 +76,4 @@ | |||
76 | remap_pfn_range((vma), (addr), (pfn), PAGE_SIZE, \ | 76 | remap_pfn_range((vma), (addr), (pfn), PAGE_SIZE, \ |
77 | __pgprot(pgprot_val((prot)) | _PAGE_4K_PFN)) | 77 | __pgprot(pgprot_val((prot)) | _PAGE_4K_PFN)) |
78 | 78 | ||
79 | |||
80 | #ifdef CONFIG_PPC_SUBPAGE_PROT | ||
81 | /* | ||
82 | * For the sub-page protection option, we extend the PGD with one of | ||
83 | * these. Basically we have a 3-level tree, with the top level being | ||
84 | * the protptrs array. To optimize speed and memory consumption when | ||
85 | * only addresses < 4GB are being protected, pointers to the first | ||
86 | * four pages of sub-page protection words are stored in the low_prot | ||
87 | * array. | ||
88 | * Each page of sub-page protection words protects 1GB (4 bytes | ||
89 | * protects 64k). For the 3-level tree, each page of pointers then | ||
90 | * protects 8TB. | ||
91 | */ | ||
92 | struct subpage_prot_table { | ||
93 | unsigned long maxaddr; /* only addresses < this are protected */ | ||
94 | unsigned int **protptrs[2]; | ||
95 | unsigned int *low_prot[4]; | ||
96 | }; | ||
97 | |||
98 | #undef PGD_TABLE_SIZE | ||
99 | #define PGD_TABLE_SIZE ((sizeof(pgd_t) << PGD_INDEX_SIZE) + \ | ||
100 | sizeof(struct subpage_prot_table)) | ||
101 | |||
102 | #define SBP_L1_BITS (PAGE_SHIFT - 2) | ||
103 | #define SBP_L2_BITS (PAGE_SHIFT - 3) | ||
104 | #define SBP_L1_COUNT (1 << SBP_L1_BITS) | ||
105 | #define SBP_L2_COUNT (1 << SBP_L2_BITS) | ||
106 | #define SBP_L2_SHIFT (PAGE_SHIFT + SBP_L1_BITS) | ||
107 | #define SBP_L3_SHIFT (SBP_L2_SHIFT + SBP_L2_BITS) | ||
108 | |||
109 | extern void subpage_prot_free(pgd_t *pgd); | ||
110 | |||
111 | static inline struct subpage_prot_table *pgd_subpage_prot(pgd_t *pgd) | ||
112 | { | ||
113 | return (struct subpage_prot_table *)(pgd + PTRS_PER_PGD); | ||
114 | } | ||
115 | #endif /* CONFIG_PPC_SUBPAGE_PROT */ | ||
116 | #endif /* __ASSEMBLY__ */ | 79 | #endif /* __ASSEMBLY__ */ |
diff --git a/arch/powerpc/kernel/io.c b/arch/powerpc/kernel/io.c index 1882bf419fa6..8dc7547c2377 100644 --- a/arch/powerpc/kernel/io.c +++ b/arch/powerpc/kernel/io.c | |||
@@ -161,7 +161,7 @@ void _memcpy_fromio(void *dest, const volatile void __iomem *src, | |||
161 | dest++; | 161 | dest++; |
162 | n--; | 162 | n--; |
163 | } | 163 | } |
164 | while(n > 4) { | 164 | while(n >= 4) { |
165 | *((u32 *)dest) = *((volatile u32 *)vsrc); | 165 | *((u32 *)dest) = *((volatile u32 *)vsrc); |
166 | eieio(); | 166 | eieio(); |
167 | vsrc += 4; | 167 | vsrc += 4; |
@@ -190,7 +190,7 @@ void _memcpy_toio(volatile void __iomem *dest, const void *src, unsigned long n) | |||
190 | vdest++; | 190 | vdest++; |
191 | n--; | 191 | n--; |
192 | } | 192 | } |
193 | while(n > 4) { | 193 | while(n >= 4) { |
194 | *((volatile u32 *)vdest) = *((volatile u32 *)src); | 194 | *((volatile u32 *)vdest) = *((volatile u32 *)src); |
195 | src += 4; | 195 | src += 4; |
196 | vdest += 4; | 196 | vdest += 4; |
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 9b86a74d2815..97196eefef3e 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c | |||
@@ -218,6 +218,9 @@ void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *)) | |||
218 | 218 | ||
219 | static void stop_this_cpu(void *dummy) | 219 | static void stop_this_cpu(void *dummy) |
220 | { | 220 | { |
221 | /* Remove this CPU */ | ||
222 | set_cpu_online(smp_processor_id(), false); | ||
223 | |||
221 | local_irq_disable(); | 224 | local_irq_disable(); |
222 | while (1) | 225 | while (1) |
223 | ; | 226 | ; |
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c index 956ab33fd73f..e235e52dc4fe 100644 --- a/arch/powerpc/kernel/sysfs.c +++ b/arch/powerpc/kernel/sysfs.c | |||
@@ -461,6 +461,25 @@ static void unregister_cpu_online(unsigned int cpu) | |||
461 | 461 | ||
462 | cacheinfo_cpu_offline(cpu); | 462 | cacheinfo_cpu_offline(cpu); |
463 | } | 463 | } |
464 | |||
465 | #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE | ||
466 | ssize_t arch_cpu_probe(const char *buf, size_t count) | ||
467 | { | ||
468 | if (ppc_md.cpu_probe) | ||
469 | return ppc_md.cpu_probe(buf, count); | ||
470 | |||
471 | return -EINVAL; | ||
472 | } | ||
473 | |||
474 | ssize_t arch_cpu_release(const char *buf, size_t count) | ||
475 | { | ||
476 | if (ppc_md.cpu_release) | ||
477 | return ppc_md.cpu_release(buf, count); | ||
478 | |||
479 | return -EINVAL; | ||
480 | } | ||
481 | #endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */ | ||
482 | |||
464 | #endif /* CONFIG_HOTPLUG_CPU */ | 483 | #endif /* CONFIG_HOTPLUG_CPU */ |
465 | 484 | ||
466 | static int __cpuinit sysfs_cpu_notify(struct notifier_block *self, | 485 | static int __cpuinit sysfs_cpu_notify(struct notifier_block *self, |
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index 42037d46a416..3e294bd9b8c6 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c | |||
@@ -281,6 +281,7 @@ void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu) | |||
281 | 281 | ||
282 | void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr) | 282 | void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr) |
283 | { | 283 | { |
284 | vcpu->arch.hflags &= ~BOOK3S_HFLAG_SLB; | ||
284 | vcpu->arch.pvr = pvr; | 285 | vcpu->arch.pvr = pvr; |
285 | if ((pvr >= 0x330000) && (pvr < 0x70330000)) { | 286 | if ((pvr >= 0x330000) && (pvr < 0x70330000)) { |
286 | kvmppc_mmu_book3s_64_init(vcpu); | 287 | kvmppc_mmu_book3s_64_init(vcpu); |
@@ -762,14 +763,62 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |||
762 | int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, | 763 | int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, |
763 | struct kvm_sregs *sregs) | 764 | struct kvm_sregs *sregs) |
764 | { | 765 | { |
766 | struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); | ||
767 | int i; | ||
768 | |||
765 | sregs->pvr = vcpu->arch.pvr; | 769 | sregs->pvr = vcpu->arch.pvr; |
770 | |||
771 | sregs->u.s.sdr1 = to_book3s(vcpu)->sdr1; | ||
772 | if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) { | ||
773 | for (i = 0; i < 64; i++) { | ||
774 | sregs->u.s.ppc64.slb[i].slbe = vcpu3s->slb[i].orige | i; | ||
775 | sregs->u.s.ppc64.slb[i].slbv = vcpu3s->slb[i].origv; | ||
776 | } | ||
777 | } else { | ||
778 | for (i = 0; i < 16; i++) { | ||
779 | sregs->u.s.ppc32.sr[i] = vcpu3s->sr[i].raw; | ||
780 | sregs->u.s.ppc32.sr[i] = vcpu3s->sr[i].raw; | ||
781 | } | ||
782 | for (i = 0; i < 8; i++) { | ||
783 | sregs->u.s.ppc32.ibat[i] = vcpu3s->ibat[i].raw; | ||
784 | sregs->u.s.ppc32.dbat[i] = vcpu3s->dbat[i].raw; | ||
785 | } | ||
786 | } | ||
766 | return 0; | 787 | return 0; |
767 | } | 788 | } |
768 | 789 | ||
769 | int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, | 790 | int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, |
770 | struct kvm_sregs *sregs) | 791 | struct kvm_sregs *sregs) |
771 | { | 792 | { |
793 | struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); | ||
794 | int i; | ||
795 | |||
772 | kvmppc_set_pvr(vcpu, sregs->pvr); | 796 | kvmppc_set_pvr(vcpu, sregs->pvr); |
797 | |||
798 | vcpu3s->sdr1 = sregs->u.s.sdr1; | ||
799 | if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) { | ||
800 | for (i = 0; i < 64; i++) { | ||
801 | vcpu->arch.mmu.slbmte(vcpu, sregs->u.s.ppc64.slb[i].slbv, | ||
802 | sregs->u.s.ppc64.slb[i].slbe); | ||
803 | } | ||
804 | } else { | ||
805 | for (i = 0; i < 16; i++) { | ||
806 | vcpu->arch.mmu.mtsrin(vcpu, i, sregs->u.s.ppc32.sr[i]); | ||
807 | } | ||
808 | for (i = 0; i < 8; i++) { | ||
809 | kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), false, | ||
810 | (u32)sregs->u.s.ppc32.ibat[i]); | ||
811 | kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), true, | ||
812 | (u32)(sregs->u.s.ppc32.ibat[i] >> 32)); | ||
813 | kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), false, | ||
814 | (u32)sregs->u.s.ppc32.dbat[i]); | ||
815 | kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), true, | ||
816 | (u32)(sregs->u.s.ppc32.dbat[i] >> 32)); | ||
817 | } | ||
818 | } | ||
819 | |||
820 | /* Flush the MMU after messing with the segments */ | ||
821 | kvmppc_mmu_pte_flush(vcpu, 0, 0); | ||
773 | return 0; | 822 | return 0; |
774 | } | 823 | } |
775 | 824 | ||
diff --git a/arch/powerpc/kvm/book3s_64_emulate.c b/arch/powerpc/kvm/book3s_64_emulate.c index c343e67306e0..1027eac6d474 100644 --- a/arch/powerpc/kvm/book3s_64_emulate.c +++ b/arch/powerpc/kvm/book3s_64_emulate.c | |||
@@ -185,7 +185,27 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
185 | return emulated; | 185 | return emulated; |
186 | } | 186 | } |
187 | 187 | ||
188 | static void kvmppc_write_bat(struct kvm_vcpu *vcpu, int sprn, u64 val) | 188 | void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat, bool upper, |
189 | u32 val) | ||
190 | { | ||
191 | if (upper) { | ||
192 | /* Upper BAT */ | ||
193 | u32 bl = (val >> 2) & 0x7ff; | ||
194 | bat->bepi_mask = (~bl << 17); | ||
195 | bat->bepi = val & 0xfffe0000; | ||
196 | bat->vs = (val & 2) ? 1 : 0; | ||
197 | bat->vp = (val & 1) ? 1 : 0; | ||
198 | bat->raw = (bat->raw & 0xffffffff00000000ULL) | val; | ||
199 | } else { | ||
200 | /* Lower BAT */ | ||
201 | bat->brpn = val & 0xfffe0000; | ||
202 | bat->wimg = (val >> 3) & 0xf; | ||
203 | bat->pp = val & 3; | ||
204 | bat->raw = (bat->raw & 0x00000000ffffffffULL) | ((u64)val << 32); | ||
205 | } | ||
206 | } | ||
207 | |||
208 | static void kvmppc_write_bat(struct kvm_vcpu *vcpu, int sprn, u32 val) | ||
189 | { | 209 | { |
190 | struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); | 210 | struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); |
191 | struct kvmppc_bat *bat; | 211 | struct kvmppc_bat *bat; |
@@ -207,19 +227,7 @@ static void kvmppc_write_bat(struct kvm_vcpu *vcpu, int sprn, u64 val) | |||
207 | BUG(); | 227 | BUG(); |
208 | } | 228 | } |
209 | 229 | ||
210 | if (!(sprn % 2)) { | 230 | kvmppc_set_bat(vcpu, bat, !(sprn % 2), val); |
211 | /* Upper BAT */ | ||
212 | u32 bl = (val >> 2) & 0x7ff; | ||
213 | bat->bepi_mask = (~bl << 17); | ||
214 | bat->bepi = val & 0xfffe0000; | ||
215 | bat->vs = (val & 2) ? 1 : 0; | ||
216 | bat->vp = (val & 1) ? 1 : 0; | ||
217 | } else { | ||
218 | /* Lower BAT */ | ||
219 | bat->brpn = val & 0xfffe0000; | ||
220 | bat->wimg = (val >> 3) & 0xf; | ||
221 | bat->pp = val & 3; | ||
222 | } | ||
223 | } | 231 | } |
224 | 232 | ||
225 | int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) | 233 | int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) |
@@ -243,7 +251,7 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) | |||
243 | case SPRN_IBAT4U ... SPRN_IBAT7L: | 251 | case SPRN_IBAT4U ... SPRN_IBAT7L: |
244 | case SPRN_DBAT0U ... SPRN_DBAT3L: | 252 | case SPRN_DBAT0U ... SPRN_DBAT3L: |
245 | case SPRN_DBAT4U ... SPRN_DBAT7L: | 253 | case SPRN_DBAT4U ... SPRN_DBAT7L: |
246 | kvmppc_write_bat(vcpu, sprn, vcpu->arch.gpr[rs]); | 254 | kvmppc_write_bat(vcpu, sprn, (u32)vcpu->arch.gpr[rs]); |
247 | /* BAT writes happen so rarely that we're ok to flush | 255 | /* BAT writes happen so rarely that we're ok to flush |
248 | * everything here */ | 256 | * everything here */ |
249 | kvmppc_mmu_pte_flush(vcpu, 0, 0); | 257 | kvmppc_mmu_pte_flush(vcpu, 0, 0); |
diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c index a31f9c677d23..5598f88f142e 100644 --- a/arch/powerpc/kvm/book3s_64_mmu.c +++ b/arch/powerpc/kvm/book3s_64_mmu.c | |||
@@ -473,4 +473,6 @@ void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu) | |||
473 | mmu->esid_to_vsid = kvmppc_mmu_book3s_64_esid_to_vsid; | 473 | mmu->esid_to_vsid = kvmppc_mmu_book3s_64_esid_to_vsid; |
474 | mmu->ea_to_vp = kvmppc_mmu_book3s_64_ea_to_vp; | 474 | mmu->ea_to_vp = kvmppc_mmu_book3s_64_ea_to_vp; |
475 | mmu->is_dcbz32 = kvmppc_mmu_book3s_64_is_dcbz32; | 475 | mmu->is_dcbz32 = kvmppc_mmu_book3s_64_is_dcbz32; |
476 | |||
477 | vcpu->arch.hflags |= BOOK3S_HFLAG_SLB; | ||
476 | } | 478 | } |
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 692c3709011e..d82551efbfbf 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c | |||
@@ -144,6 +144,9 @@ int kvm_dev_ioctl_check_extension(long ext) | |||
144 | int r; | 144 | int r; |
145 | 145 | ||
146 | switch (ext) { | 146 | switch (ext) { |
147 | case KVM_CAP_PPC_SEGSTATE: | ||
148 | r = 1; | ||
149 | break; | ||
147 | case KVM_CAP_COALESCED_MMIO: | 150 | case KVM_CAP_COALESCED_MMIO: |
148 | r = KVM_COALESCED_MMIO_PAGE_OFFSET; | 151 | r = KVM_COALESCED_MMIO_PAGE_OFFSET; |
149 | break; | 152 | break; |
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index 6810128aba30..50f867d657df 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c | |||
@@ -835,9 +835,9 @@ void demote_segment_4k(struct mm_struct *mm, unsigned long addr) | |||
835 | * Result is 0: full permissions, _PAGE_RW: read-only, | 835 | * Result is 0: full permissions, _PAGE_RW: read-only, |
836 | * _PAGE_USER or _PAGE_USER|_PAGE_RW: no access. | 836 | * _PAGE_USER or _PAGE_USER|_PAGE_RW: no access. |
837 | */ | 837 | */ |
838 | static int subpage_protection(pgd_t *pgdir, unsigned long ea) | 838 | static int subpage_protection(struct mm_struct *mm, unsigned long ea) |
839 | { | 839 | { |
840 | struct subpage_prot_table *spt = pgd_subpage_prot(pgdir); | 840 | struct subpage_prot_table *spt = &mm->context.spt; |
841 | u32 spp = 0; | 841 | u32 spp = 0; |
842 | u32 **sbpm, *sbpp; | 842 | u32 **sbpm, *sbpp; |
843 | 843 | ||
@@ -865,7 +865,7 @@ static int subpage_protection(pgd_t *pgdir, unsigned long ea) | |||
865 | } | 865 | } |
866 | 866 | ||
867 | #else /* CONFIG_PPC_SUBPAGE_PROT */ | 867 | #else /* CONFIG_PPC_SUBPAGE_PROT */ |
868 | static inline int subpage_protection(pgd_t *pgdir, unsigned long ea) | 868 | static inline int subpage_protection(struct mm_struct *mm, unsigned long ea) |
869 | { | 869 | { |
870 | return 0; | 870 | return 0; |
871 | } | 871 | } |
diff --git a/arch/powerpc/mm/mmu_context_hash64.c b/arch/powerpc/mm/mmu_context_hash64.c index b9e4cc2c2057..b910d37aea1a 100644 --- a/arch/powerpc/mm/mmu_context_hash64.c +++ b/arch/powerpc/mm/mmu_context_hash64.c | |||
@@ -76,6 +76,7 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm) | |||
76 | */ | 76 | */ |
77 | if (slice_mm_new_context(mm)) | 77 | if (slice_mm_new_context(mm)) |
78 | slice_set_user_psize(mm, mmu_virtual_psize); | 78 | slice_set_user_psize(mm, mmu_virtual_psize); |
79 | subpage_prot_init_new_context(mm); | ||
79 | mm->context.id = index; | 80 | mm->context.id = index; |
80 | 81 | ||
81 | return 0; | 82 | return 0; |
@@ -92,5 +93,6 @@ EXPORT_SYMBOL_GPL(__destroy_context); | |||
92 | void destroy_context(struct mm_struct *mm) | 93 | void destroy_context(struct mm_struct *mm) |
93 | { | 94 | { |
94 | __destroy_context(mm->context.id); | 95 | __destroy_context(mm->context.id); |
96 | subpage_prot_free(mm); | ||
95 | mm->context.id = NO_CONTEXT; | 97 | mm->context.id = NO_CONTEXT; |
96 | } | 98 | } |
diff --git a/arch/powerpc/mm/subpage-prot.c b/arch/powerpc/mm/subpage-prot.c index 4cafc0c33d0a..a040b81e93bd 100644 --- a/arch/powerpc/mm/subpage-prot.c +++ b/arch/powerpc/mm/subpage-prot.c | |||
@@ -24,9 +24,9 @@ | |||
24 | * Also makes sure that the subpage_prot_table structure is | 24 | * Also makes sure that the subpage_prot_table structure is |
25 | * reinitialized for the next user. | 25 | * reinitialized for the next user. |
26 | */ | 26 | */ |
27 | void subpage_prot_free(pgd_t *pgd) | 27 | void subpage_prot_free(struct mm_struct *mm) |
28 | { | 28 | { |
29 | struct subpage_prot_table *spt = pgd_subpage_prot(pgd); | 29 | struct subpage_prot_table *spt = &mm->context.spt; |
30 | unsigned long i, j, addr; | 30 | unsigned long i, j, addr; |
31 | u32 **p; | 31 | u32 **p; |
32 | 32 | ||
@@ -51,6 +51,13 @@ void subpage_prot_free(pgd_t *pgd) | |||
51 | spt->maxaddr = 0; | 51 | spt->maxaddr = 0; |
52 | } | 52 | } |
53 | 53 | ||
54 | void subpage_prot_init_new_context(struct mm_struct *mm) | ||
55 | { | ||
56 | struct subpage_prot_table *spt = &mm->context.spt; | ||
57 | |||
58 | memset(spt, 0, sizeof(*spt)); | ||
59 | } | ||
60 | |||
54 | static void hpte_flush_range(struct mm_struct *mm, unsigned long addr, | 61 | static void hpte_flush_range(struct mm_struct *mm, unsigned long addr, |
55 | int npages) | 62 | int npages) |
56 | { | 63 | { |
@@ -87,7 +94,7 @@ static void hpte_flush_range(struct mm_struct *mm, unsigned long addr, | |||
87 | static void subpage_prot_clear(unsigned long addr, unsigned long len) | 94 | static void subpage_prot_clear(unsigned long addr, unsigned long len) |
88 | { | 95 | { |
89 | struct mm_struct *mm = current->mm; | 96 | struct mm_struct *mm = current->mm; |
90 | struct subpage_prot_table *spt = pgd_subpage_prot(mm->pgd); | 97 | struct subpage_prot_table *spt = &mm->context.spt; |
91 | u32 **spm, *spp; | 98 | u32 **spm, *spp; |
92 | int i, nw; | 99 | int i, nw; |
93 | unsigned long next, limit; | 100 | unsigned long next, limit; |
@@ -136,7 +143,7 @@ static void subpage_prot_clear(unsigned long addr, unsigned long len) | |||
136 | long sys_subpage_prot(unsigned long addr, unsigned long len, u32 __user *map) | 143 | long sys_subpage_prot(unsigned long addr, unsigned long len, u32 __user *map) |
137 | { | 144 | { |
138 | struct mm_struct *mm = current->mm; | 145 | struct mm_struct *mm = current->mm; |
139 | struct subpage_prot_table *spt = pgd_subpage_prot(mm->pgd); | 146 | struct subpage_prot_table *spt = &mm->context.spt; |
140 | u32 **spm, *spp; | 147 | u32 **spm, *spp; |
141 | int i, nw; | 148 | int i, nw; |
142 | unsigned long next, limit; | 149 | unsigned long next, limit; |
diff --git a/arch/powerpc/platforms/52xx/Kconfig b/arch/powerpc/platforms/52xx/Kconfig index 8b8e9560a315..47ea1be1481b 100644 --- a/arch/powerpc/platforms/52xx/Kconfig +++ b/arch/powerpc/platforms/52xx/Kconfig | |||
@@ -62,3 +62,8 @@ config PPC_MPC5200_GPIO | |||
62 | select GENERIC_GPIO | 62 | select GENERIC_GPIO |
63 | help | 63 | help |
64 | Enable gpiolib support for mpc5200 based boards | 64 | Enable gpiolib support for mpc5200 based boards |
65 | |||
66 | config PPC_MPC5200_LPBFIFO | ||
67 | tristate "MPC5200 LocalPlus bus FIFO driver" | ||
68 | depends on PPC_MPC52xx | ||
69 | select PPC_BESTCOMM_GEN_BD | ||
diff --git a/arch/powerpc/platforms/52xx/Makefile b/arch/powerpc/platforms/52xx/Makefile index bfd4f52cf3dd..2bc8cd0c5cfc 100644 --- a/arch/powerpc/platforms/52xx/Makefile +++ b/arch/powerpc/platforms/52xx/Makefile | |||
@@ -15,3 +15,4 @@ ifeq ($(CONFIG_PPC_LITE5200),y) | |||
15 | endif | 15 | endif |
16 | 16 | ||
17 | obj-$(CONFIG_PPC_MPC5200_GPIO) += mpc52xx_gpio.o | 17 | obj-$(CONFIG_PPC_MPC5200_GPIO) += mpc52xx_gpio.o |
18 | obj-$(CONFIG_PPC_MPC5200_LPBFIFO) += mpc52xx_lpbfifo.o | ||
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c index 4d76b7f2336c..17ecdf4c87ae 100644 --- a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c +++ b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c | |||
@@ -16,8 +16,14 @@ | |||
16 | * output signals or measure input signals. | 16 | * output signals or measure input signals. |
17 | * | 17 | * |
18 | * This driver supports the GPIO and IRQ controller functions of the GPT | 18 | * This driver supports the GPIO and IRQ controller functions of the GPT |
19 | * device. Timer functions are not yet supported, nor is the watchdog | 19 | * device. Timer functions are not yet supported. |
20 | * timer. | 20 | * |
21 | * The timer gpt0 can be used as watchdog (wdt). If the wdt mode is used, | ||
22 | * this prevents the use of any gpt0 gpt function (i.e. they will fail with | ||
23 | * -EBUSY). Thus, the safety wdt function always has precedence over the gpt | ||
24 | * function. If the kernel has been compiled with CONFIG_WATCHDOG_NOWAYOUT, | ||
25 | * this means that gpt0 is locked in wdt mode until the next reboot - this | ||
26 | * may be a requirement in safety applications. | ||
21 | * | 27 | * |
22 | * To use the GPIO function, the following two properties must be added | 28 | * To use the GPIO function, the following two properties must be added |
23 | * to the device tree node for the gpt device (typically in the .dts file | 29 | * to the device tree node for the gpt device (typically in the .dts file |
@@ -46,17 +52,24 @@ | |||
46 | * the output mode. This driver does not change the output mode setting. | 52 | * the output mode. This driver does not change the output mode setting. |
47 | */ | 53 | */ |
48 | 54 | ||
55 | #include <linux/device.h> | ||
49 | #include <linux/irq.h> | 56 | #include <linux/irq.h> |
50 | #include <linux/interrupt.h> | 57 | #include <linux/interrupt.h> |
51 | #include <linux/io.h> | 58 | #include <linux/io.h> |
59 | #include <linux/list.h> | ||
60 | #include <linux/mutex.h> | ||
52 | #include <linux/of.h> | 61 | #include <linux/of.h> |
53 | #include <linux/of_platform.h> | 62 | #include <linux/of_platform.h> |
54 | #include <linux/of_gpio.h> | 63 | #include <linux/of_gpio.h> |
55 | #include <linux/kernel.h> | 64 | #include <linux/kernel.h> |
65 | #include <linux/watchdog.h> | ||
66 | #include <linux/miscdevice.h> | ||
67 | #include <linux/uaccess.h> | ||
68 | #include <asm/div64.h> | ||
56 | #include <asm/mpc52xx.h> | 69 | #include <asm/mpc52xx.h> |
57 | 70 | ||
58 | MODULE_DESCRIPTION("Freescale MPC52xx gpt driver"); | 71 | MODULE_DESCRIPTION("Freescale MPC52xx gpt driver"); |
59 | MODULE_AUTHOR("Sascha Hauer, Grant Likely"); | 72 | MODULE_AUTHOR("Sascha Hauer, Grant Likely, Albrecht Dreß"); |
60 | MODULE_LICENSE("GPL"); | 73 | MODULE_LICENSE("GPL"); |
61 | 74 | ||
62 | /** | 75 | /** |
@@ -66,18 +79,27 @@ MODULE_LICENSE("GPL"); | |||
66 | * @lock: spinlock to coordinate between different functions. | 79 | * @lock: spinlock to coordinate between different functions. |
67 | * @of_gc: of_gpio_chip instance structure; used when GPIO is enabled | 80 | * @of_gc: of_gpio_chip instance structure; used when GPIO is enabled |
68 | * @irqhost: Pointer to irq_host instance; used when IRQ mode is supported | 81 | * @irqhost: Pointer to irq_host instance; used when IRQ mode is supported |
82 | * @wdt_mode: only relevant for gpt0: bit 0 (MPC52xx_GPT_CAN_WDT) indicates | ||
83 | * if the gpt may be used as wdt, bit 1 (MPC52xx_GPT_IS_WDT) indicates | ||
84 | * if the timer is actively used as wdt which blocks gpt functions | ||
69 | */ | 85 | */ |
70 | struct mpc52xx_gpt_priv { | 86 | struct mpc52xx_gpt_priv { |
87 | struct list_head list; /* List of all GPT devices */ | ||
71 | struct device *dev; | 88 | struct device *dev; |
72 | struct mpc52xx_gpt __iomem *regs; | 89 | struct mpc52xx_gpt __iomem *regs; |
73 | spinlock_t lock; | 90 | spinlock_t lock; |
74 | struct irq_host *irqhost; | 91 | struct irq_host *irqhost; |
92 | u32 ipb_freq; | ||
93 | u8 wdt_mode; | ||
75 | 94 | ||
76 | #if defined(CONFIG_GPIOLIB) | 95 | #if defined(CONFIG_GPIOLIB) |
77 | struct of_gpio_chip of_gc; | 96 | struct of_gpio_chip of_gc; |
78 | #endif | 97 | #endif |
79 | }; | 98 | }; |
80 | 99 | ||
100 | LIST_HEAD(mpc52xx_gpt_list); | ||
101 | DEFINE_MUTEX(mpc52xx_gpt_list_mutex); | ||
102 | |||
81 | #define MPC52xx_GPT_MODE_MS_MASK (0x07) | 103 | #define MPC52xx_GPT_MODE_MS_MASK (0x07) |
82 | #define MPC52xx_GPT_MODE_MS_IC (0x01) | 104 | #define MPC52xx_GPT_MODE_MS_IC (0x01) |
83 | #define MPC52xx_GPT_MODE_MS_OC (0x02) | 105 | #define MPC52xx_GPT_MODE_MS_OC (0x02) |
@@ -88,15 +110,25 @@ struct mpc52xx_gpt_priv { | |||
88 | #define MPC52xx_GPT_MODE_GPIO_OUT_LOW (0x20) | 110 | #define MPC52xx_GPT_MODE_GPIO_OUT_LOW (0x20) |
89 | #define MPC52xx_GPT_MODE_GPIO_OUT_HIGH (0x30) | 111 | #define MPC52xx_GPT_MODE_GPIO_OUT_HIGH (0x30) |
90 | 112 | ||
113 | #define MPC52xx_GPT_MODE_COUNTER_ENABLE (0x1000) | ||
114 | #define MPC52xx_GPT_MODE_CONTINUOUS (0x0400) | ||
115 | #define MPC52xx_GPT_MODE_OPEN_DRAIN (0x0200) | ||
91 | #define MPC52xx_GPT_MODE_IRQ_EN (0x0100) | 116 | #define MPC52xx_GPT_MODE_IRQ_EN (0x0100) |
117 | #define MPC52xx_GPT_MODE_WDT_EN (0x8000) | ||
92 | 118 | ||
93 | #define MPC52xx_GPT_MODE_ICT_MASK (0x030000) | 119 | #define MPC52xx_GPT_MODE_ICT_MASK (0x030000) |
94 | #define MPC52xx_GPT_MODE_ICT_RISING (0x010000) | 120 | #define MPC52xx_GPT_MODE_ICT_RISING (0x010000) |
95 | #define MPC52xx_GPT_MODE_ICT_FALLING (0x020000) | 121 | #define MPC52xx_GPT_MODE_ICT_FALLING (0x020000) |
96 | #define MPC52xx_GPT_MODE_ICT_TOGGLE (0x030000) | 122 | #define MPC52xx_GPT_MODE_ICT_TOGGLE (0x030000) |
97 | 123 | ||
124 | #define MPC52xx_GPT_MODE_WDT_PING (0xa5) | ||
125 | |||
98 | #define MPC52xx_GPT_STATUS_IRQMASK (0x000f) | 126 | #define MPC52xx_GPT_STATUS_IRQMASK (0x000f) |
99 | 127 | ||
128 | #define MPC52xx_GPT_CAN_WDT (1 << 0) | ||
129 | #define MPC52xx_GPT_IS_WDT (1 << 1) | ||
130 | |||
131 | |||
100 | /* --------------------------------------------------------------------- | 132 | /* --------------------------------------------------------------------- |
101 | * Cascaded interrupt controller hooks | 133 | * Cascaded interrupt controller hooks |
102 | */ | 134 | */ |
@@ -190,7 +222,7 @@ static int mpc52xx_gpt_irq_xlate(struct irq_host *h, struct device_node *ct, | |||
190 | 222 | ||
191 | dev_dbg(gpt->dev, "%s: flags=%i\n", __func__, intspec[0]); | 223 | dev_dbg(gpt->dev, "%s: flags=%i\n", __func__, intspec[0]); |
192 | 224 | ||
193 | if ((intsize < 1) || (intspec[0] < 1) || (intspec[0] > 3)) { | 225 | if ((intsize < 1) || (intspec[0] > 3)) { |
194 | dev_err(gpt->dev, "bad irq specifier in %s\n", ct->full_name); | 226 | dev_err(gpt->dev, "bad irq specifier in %s\n", ct->full_name); |
195 | return -EINVAL; | 227 | return -EINVAL; |
196 | } | 228 | } |
@@ -211,13 +243,11 @@ mpc52xx_gpt_irq_setup(struct mpc52xx_gpt_priv *gpt, struct device_node *node) | |||
211 | { | 243 | { |
212 | int cascade_virq; | 244 | int cascade_virq; |
213 | unsigned long flags; | 245 | unsigned long flags; |
214 | 246 | u32 mode; | |
215 | /* Only setup cascaded IRQ if device tree claims the GPT is | ||
216 | * an interrupt controller */ | ||
217 | if (!of_find_property(node, "interrupt-controller", NULL)) | ||
218 | return; | ||
219 | 247 | ||
220 | cascade_virq = irq_of_parse_and_map(node, 0); | 248 | cascade_virq = irq_of_parse_and_map(node, 0); |
249 | if (!cascade_virq) | ||
250 | return; | ||
221 | 251 | ||
222 | gpt->irqhost = irq_alloc_host(node, IRQ_HOST_MAP_LINEAR, 1, | 252 | gpt->irqhost = irq_alloc_host(node, IRQ_HOST_MAP_LINEAR, 1, |
223 | &mpc52xx_gpt_irq_ops, -1); | 253 | &mpc52xx_gpt_irq_ops, -1); |
@@ -227,14 +257,16 @@ mpc52xx_gpt_irq_setup(struct mpc52xx_gpt_priv *gpt, struct device_node *node) | |||
227 | } | 257 | } |
228 | 258 | ||
229 | gpt->irqhost->host_data = gpt; | 259 | gpt->irqhost->host_data = gpt; |
230 | |||
231 | set_irq_data(cascade_virq, gpt); | 260 | set_irq_data(cascade_virq, gpt); |
232 | set_irq_chained_handler(cascade_virq, mpc52xx_gpt_irq_cascade); | 261 | set_irq_chained_handler(cascade_virq, mpc52xx_gpt_irq_cascade); |
233 | 262 | ||
234 | /* Set to Input Capture mode */ | 263 | /* If the GPT is currently disabled, then change it to be in Input |
264 | * Capture mode. If the mode is non-zero, then the pin could be | ||
265 | * already in use for something. */ | ||
235 | spin_lock_irqsave(&gpt->lock, flags); | 266 | spin_lock_irqsave(&gpt->lock, flags); |
236 | clrsetbits_be32(&gpt->regs->mode, MPC52xx_GPT_MODE_MS_MASK, | 267 | mode = in_be32(&gpt->regs->mode); |
237 | MPC52xx_GPT_MODE_MS_IC); | 268 | if ((mode & MPC52xx_GPT_MODE_MS_MASK) == 0) |
269 | out_be32(&gpt->regs->mode, mode | MPC52xx_GPT_MODE_MS_IC); | ||
238 | spin_unlock_irqrestore(&gpt->lock, flags); | 270 | spin_unlock_irqrestore(&gpt->lock, flags); |
239 | 271 | ||
240 | dev_dbg(gpt->dev, "%s() complete. virq=%i\n", __func__, cascade_virq); | 272 | dev_dbg(gpt->dev, "%s() complete. virq=%i\n", __func__, cascade_virq); |
@@ -335,6 +367,354 @@ static void | |||
335 | mpc52xx_gpt_gpio_setup(struct mpc52xx_gpt_priv *p, struct device_node *np) { } | 367 | mpc52xx_gpt_gpio_setup(struct mpc52xx_gpt_priv *p, struct device_node *np) { } |
336 | #endif /* defined(CONFIG_GPIOLIB) */ | 368 | #endif /* defined(CONFIG_GPIOLIB) */ |
337 | 369 | ||
370 | /*********************************************************************** | ||
371 | * Timer API | ||
372 | */ | ||
373 | |||
374 | /** | ||
375 | * mpc52xx_gpt_from_irq - Return the GPT device associated with an IRQ number | ||
376 | * @irq: irq of timer. | ||
377 | */ | ||
378 | struct mpc52xx_gpt_priv *mpc52xx_gpt_from_irq(int irq) | ||
379 | { | ||
380 | struct mpc52xx_gpt_priv *gpt; | ||
381 | struct list_head *pos; | ||
382 | |||
383 | /* Iterate over the list of timers looking for a matching device */ | ||
384 | mutex_lock(&mpc52xx_gpt_list_mutex); | ||
385 | list_for_each(pos, &mpc52xx_gpt_list) { | ||
386 | gpt = container_of(pos, struct mpc52xx_gpt_priv, list); | ||
387 | if (gpt->irqhost && irq == irq_linear_revmap(gpt->irqhost, 0)) { | ||
388 | mutex_unlock(&mpc52xx_gpt_list_mutex); | ||
389 | return gpt; | ||
390 | } | ||
391 | } | ||
392 | mutex_unlock(&mpc52xx_gpt_list_mutex); | ||
393 | |||
394 | return NULL; | ||
395 | } | ||
396 | EXPORT_SYMBOL(mpc52xx_gpt_from_irq); | ||
397 | |||
398 | static int mpc52xx_gpt_do_start(struct mpc52xx_gpt_priv *gpt, u64 period, | ||
399 | int continuous, int as_wdt) | ||
400 | { | ||
401 | u32 clear, set; | ||
402 | u64 clocks; | ||
403 | u32 prescale; | ||
404 | unsigned long flags; | ||
405 | |||
406 | clear = MPC52xx_GPT_MODE_MS_MASK | MPC52xx_GPT_MODE_CONTINUOUS; | ||
407 | set = MPC52xx_GPT_MODE_MS_GPIO | MPC52xx_GPT_MODE_COUNTER_ENABLE; | ||
408 | if (as_wdt) { | ||
409 | clear |= MPC52xx_GPT_MODE_IRQ_EN; | ||
410 | set |= MPC52xx_GPT_MODE_WDT_EN; | ||
411 | } else if (continuous) | ||
412 | set |= MPC52xx_GPT_MODE_CONTINUOUS; | ||
413 | |||
414 | /* Determine the number of clocks in the requested period. 64 bit | ||
415 | * arithmatic is done here to preserve the precision until the value | ||
416 | * is scaled back down into the u32 range. Period is in 'ns', bus | ||
417 | * frequency is in Hz. */ | ||
418 | clocks = period * (u64)gpt->ipb_freq; | ||
419 | do_div(clocks, 1000000000); /* Scale it down to ns range */ | ||
420 | |||
421 | /* This device cannot handle a clock count greater than 32 bits */ | ||
422 | if (clocks > 0xffffffff) | ||
423 | return -EINVAL; | ||
424 | |||
425 | /* Calculate the prescaler and count values from the clocks value. | ||
426 | * 'clocks' is the number of clock ticks in the period. The timer | ||
427 | * has 16 bit precision and a 16 bit prescaler. Prescaler is | ||
428 | * calculated by integer dividing the clocks by 0x10000 (shifting | ||
429 | * down 16 bits) to obtain the smallest possible divisor for clocks | ||
430 | * to get a 16 bit count value. | ||
431 | * | ||
432 | * Note: the prescale register is '1' based, not '0' based. ie. a | ||
433 | * value of '1' means divide the clock by one. 0xffff divides the | ||
434 | * clock by 0xffff. '0x0000' does not divide by zero, but wraps | ||
435 | * around and divides by 0x10000. That is why prescale must be | ||
436 | * a u32 variable, not a u16, for this calculation. */ | ||
437 | prescale = (clocks >> 16) + 1; | ||
438 | do_div(clocks, prescale); | ||
439 | if (clocks > 0xffff) { | ||
440 | pr_err("calculation error; prescale:%x clocks:%llx\n", | ||
441 | prescale, clocks); | ||
442 | return -EINVAL; | ||
443 | } | ||
444 | |||
445 | /* Set and enable the timer, reject an attempt to use a wdt as gpt */ | ||
446 | spin_lock_irqsave(&gpt->lock, flags); | ||
447 | if (as_wdt) | ||
448 | gpt->wdt_mode |= MPC52xx_GPT_IS_WDT; | ||
449 | else if ((gpt->wdt_mode & MPC52xx_GPT_IS_WDT) != 0) { | ||
450 | spin_unlock_irqrestore(&gpt->lock, flags); | ||
451 | return -EBUSY; | ||
452 | } | ||
453 | out_be32(&gpt->regs->count, prescale << 16 | clocks); | ||
454 | clrsetbits_be32(&gpt->regs->mode, clear, set); | ||
455 | spin_unlock_irqrestore(&gpt->lock, flags); | ||
456 | |||
457 | return 0; | ||
458 | } | ||
459 | |||
460 | /** | ||
461 | * mpc52xx_gpt_start_timer - Set and enable the GPT timer | ||
462 | * @gpt: Pointer to gpt private data structure | ||
463 | * @period: period of timer in ns; max. ~130s @ 33MHz IPB clock | ||
464 | * @continuous: set to 1 to make timer continuous free running | ||
465 | * | ||
466 | * An interrupt will be generated every time the timer fires | ||
467 | */ | ||
468 | int mpc52xx_gpt_start_timer(struct mpc52xx_gpt_priv *gpt, u64 period, | ||
469 | int continuous) | ||
470 | { | ||
471 | return mpc52xx_gpt_do_start(gpt, period, continuous, 0); | ||
472 | } | ||
473 | EXPORT_SYMBOL(mpc52xx_gpt_start_timer); | ||
474 | |||
475 | /** | ||
476 | * mpc52xx_gpt_stop_timer - Stop a gpt | ||
477 | * @gpt: Pointer to gpt private data structure | ||
478 | * | ||
479 | * Returns an error if attempting to stop a wdt | ||
480 | */ | ||
481 | int mpc52xx_gpt_stop_timer(struct mpc52xx_gpt_priv *gpt) | ||
482 | { | ||
483 | unsigned long flags; | ||
484 | |||
485 | /* reject the operation if the timer is used as watchdog (gpt 0 only) */ | ||
486 | spin_lock_irqsave(&gpt->lock, flags); | ||
487 | if ((gpt->wdt_mode & MPC52xx_GPT_IS_WDT) != 0) { | ||
488 | spin_unlock_irqrestore(&gpt->lock, flags); | ||
489 | return -EBUSY; | ||
490 | } | ||
491 | |||
492 | clrbits32(&gpt->regs->mode, MPC52xx_GPT_MODE_COUNTER_ENABLE); | ||
493 | spin_unlock_irqrestore(&gpt->lock, flags); | ||
494 | return 0; | ||
495 | } | ||
496 | EXPORT_SYMBOL(mpc52xx_gpt_stop_timer); | ||
497 | |||
498 | /** | ||
499 | * mpc52xx_gpt_timer_period - Read the timer period | ||
500 | * @gpt: Pointer to gpt private data structure | ||
501 | * | ||
502 | * Returns the timer period in ns | ||
503 | */ | ||
504 | u64 mpc52xx_gpt_timer_period(struct mpc52xx_gpt_priv *gpt) | ||
505 | { | ||
506 | u64 period; | ||
507 | u64 prescale; | ||
508 | unsigned long flags; | ||
509 | |||
510 | spin_lock_irqsave(&gpt->lock, flags); | ||
511 | period = in_be32(&gpt->regs->count); | ||
512 | spin_unlock_irqrestore(&gpt->lock, flags); | ||
513 | |||
514 | prescale = period >> 16; | ||
515 | period &= 0xffff; | ||
516 | if (prescale == 0) | ||
517 | prescale = 0x10000; | ||
518 | period = period * prescale * 1000000000ULL; | ||
519 | do_div(period, (u64)gpt->ipb_freq); | ||
520 | return period; | ||
521 | } | ||
522 | EXPORT_SYMBOL(mpc52xx_gpt_timer_period); | ||
523 | |||
524 | #if defined(CONFIG_MPC5200_WDT) | ||
525 | /*********************************************************************** | ||
526 | * Watchdog API for gpt0 | ||
527 | */ | ||
528 | |||
529 | #define WDT_IDENTITY "mpc52xx watchdog on GPT0" | ||
530 | |||
531 | /* wdt_is_active stores wether or not the /dev/watchdog device is opened */ | ||
532 | static unsigned long wdt_is_active; | ||
533 | |||
534 | /* wdt-capable gpt */ | ||
535 | static struct mpc52xx_gpt_priv *mpc52xx_gpt_wdt; | ||
536 | |||
537 | /* low-level wdt functions */ | ||
538 | static inline void mpc52xx_gpt_wdt_ping(struct mpc52xx_gpt_priv *gpt_wdt) | ||
539 | { | ||
540 | unsigned long flags; | ||
541 | |||
542 | spin_lock_irqsave(&gpt_wdt->lock, flags); | ||
543 | out_8((u8 *) &gpt_wdt->regs->mode, MPC52xx_GPT_MODE_WDT_PING); | ||
544 | spin_unlock_irqrestore(&gpt_wdt->lock, flags); | ||
545 | } | ||
546 | |||
547 | /* wdt misc device api */ | ||
548 | static ssize_t mpc52xx_wdt_write(struct file *file, const char __user *data, | ||
549 | size_t len, loff_t *ppos) | ||
550 | { | ||
551 | struct mpc52xx_gpt_priv *gpt_wdt = file->private_data; | ||
552 | mpc52xx_gpt_wdt_ping(gpt_wdt); | ||
553 | return 0; | ||
554 | } | ||
555 | |||
556 | static struct watchdog_info mpc5200_wdt_info = { | ||
557 | .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING, | ||
558 | .identity = WDT_IDENTITY, | ||
559 | }; | ||
560 | |||
561 | static long mpc52xx_wdt_ioctl(struct file *file, unsigned int cmd, | ||
562 | unsigned long arg) | ||
563 | { | ||
564 | struct mpc52xx_gpt_priv *gpt_wdt = file->private_data; | ||
565 | int __user *data = (int __user *)arg; | ||
566 | int timeout; | ||
567 | u64 real_timeout; | ||
568 | int ret = 0; | ||
569 | |||
570 | switch (cmd) { | ||
571 | case WDIOC_GETSUPPORT: | ||
572 | ret = copy_to_user(data, &mpc5200_wdt_info, | ||
573 | sizeof(mpc5200_wdt_info)); | ||
574 | if (ret) | ||
575 | ret = -EFAULT; | ||
576 | break; | ||
577 | |||
578 | case WDIOC_GETSTATUS: | ||
579 | case WDIOC_GETBOOTSTATUS: | ||
580 | ret = put_user(0, data); | ||
581 | break; | ||
582 | |||
583 | case WDIOC_KEEPALIVE: | ||
584 | mpc52xx_gpt_wdt_ping(gpt_wdt); | ||
585 | break; | ||
586 | |||
587 | case WDIOC_SETTIMEOUT: | ||
588 | ret = get_user(timeout, data); | ||
589 | if (ret) | ||
590 | break; | ||
591 | real_timeout = (u64) timeout * 1000000000ULL; | ||
592 | ret = mpc52xx_gpt_do_start(gpt_wdt, real_timeout, 0, 1); | ||
593 | if (ret) | ||
594 | break; | ||
595 | /* fall through and return the timeout */ | ||
596 | |||
597 | case WDIOC_GETTIMEOUT: | ||
598 | /* we need to round here as to avoid e.g. the following | ||
599 | * situation: | ||
600 | * - timeout requested is 1 second; | ||
601 | * - real timeout @33MHz is 999997090ns | ||
602 | * - the int divide by 10^9 will return 0. | ||
603 | */ | ||
604 | real_timeout = | ||
605 | mpc52xx_gpt_timer_period(gpt_wdt) + 500000000ULL; | ||
606 | do_div(real_timeout, 1000000000ULL); | ||
607 | timeout = (int) real_timeout; | ||
608 | ret = put_user(timeout, data); | ||
609 | break; | ||
610 | |||
611 | default: | ||
612 | ret = -ENOTTY; | ||
613 | } | ||
614 | return ret; | ||
615 | } | ||
616 | |||
617 | static int mpc52xx_wdt_open(struct inode *inode, struct file *file) | ||
618 | { | ||
619 | int ret; | ||
620 | |||
621 | /* sanity check */ | ||
622 | if (!mpc52xx_gpt_wdt) | ||
623 | return -ENODEV; | ||
624 | |||
625 | /* /dev/watchdog can only be opened once */ | ||
626 | if (test_and_set_bit(0, &wdt_is_active)) | ||
627 | return -EBUSY; | ||
628 | |||
629 | /* Set and activate the watchdog with 30 seconds timeout */ | ||
630 | ret = mpc52xx_gpt_do_start(mpc52xx_gpt_wdt, 30ULL * 1000000000ULL, | ||
631 | 0, 1); | ||
632 | if (ret) { | ||
633 | clear_bit(0, &wdt_is_active); | ||
634 | return ret; | ||
635 | } | ||
636 | |||
637 | file->private_data = mpc52xx_gpt_wdt; | ||
638 | return nonseekable_open(inode, file); | ||
639 | } | ||
640 | |||
641 | static int mpc52xx_wdt_release(struct inode *inode, struct file *file) | ||
642 | { | ||
643 | /* note: releasing the wdt in NOWAYOUT-mode does not stop it */ | ||
644 | #if !defined(CONFIG_WATCHDOG_NOWAYOUT) | ||
645 | struct mpc52xx_gpt_priv *gpt_wdt = file->private_data; | ||
646 | unsigned long flags; | ||
647 | |||
648 | spin_lock_irqsave(&gpt_wdt->lock, flags); | ||
649 | clrbits32(&gpt_wdt->regs->mode, | ||
650 | MPC52xx_GPT_MODE_COUNTER_ENABLE | MPC52xx_GPT_MODE_WDT_EN); | ||
651 | gpt_wdt->wdt_mode &= ~MPC52xx_GPT_IS_WDT; | ||
652 | spin_unlock_irqrestore(&gpt_wdt->lock, flags); | ||
653 | #endif | ||
654 | clear_bit(0, &wdt_is_active); | ||
655 | return 0; | ||
656 | } | ||
657 | |||
658 | |||
659 | static const struct file_operations mpc52xx_wdt_fops = { | ||
660 | .owner = THIS_MODULE, | ||
661 | .llseek = no_llseek, | ||
662 | .write = mpc52xx_wdt_write, | ||
663 | .unlocked_ioctl = mpc52xx_wdt_ioctl, | ||
664 | .open = mpc52xx_wdt_open, | ||
665 | .release = mpc52xx_wdt_release, | ||
666 | }; | ||
667 | |||
668 | static struct miscdevice mpc52xx_wdt_miscdev = { | ||
669 | .minor = WATCHDOG_MINOR, | ||
670 | .name = "watchdog", | ||
671 | .fops = &mpc52xx_wdt_fops, | ||
672 | }; | ||
673 | |||
674 | static int __devinit mpc52xx_gpt_wdt_init(void) | ||
675 | { | ||
676 | int err; | ||
677 | |||
678 | /* try to register the watchdog misc device */ | ||
679 | err = misc_register(&mpc52xx_wdt_miscdev); | ||
680 | if (err) | ||
681 | pr_err("%s: cannot register watchdog device\n", WDT_IDENTITY); | ||
682 | else | ||
683 | pr_info("%s: watchdog device registered\n", WDT_IDENTITY); | ||
684 | return err; | ||
685 | } | ||
686 | |||
687 | static int mpc52xx_gpt_wdt_setup(struct mpc52xx_gpt_priv *gpt, | ||
688 | const u32 *period) | ||
689 | { | ||
690 | u64 real_timeout; | ||
691 | |||
692 | /* remember the gpt for the wdt operation */ | ||
693 | mpc52xx_gpt_wdt = gpt; | ||
694 | |||
695 | /* configure the wdt if the device tree contained a timeout */ | ||
696 | if (!period || *period == 0) | ||
697 | return 0; | ||
698 | |||
699 | real_timeout = (u64) *period * 1000000000ULL; | ||
700 | if (mpc52xx_gpt_do_start(gpt, real_timeout, 0, 1)) | ||
701 | dev_warn(gpt->dev, "starting as wdt failed\n"); | ||
702 | else | ||
703 | dev_info(gpt->dev, "watchdog set to %us timeout\n", *period); | ||
704 | return 0; | ||
705 | } | ||
706 | |||
707 | #else | ||
708 | |||
709 | static int __devinit mpc52xx_gpt_wdt_init(void) | ||
710 | { | ||
711 | return 0; | ||
712 | } | ||
713 | |||
714 | #define mpc52xx_gpt_wdt_setup(x, y) (0) | ||
715 | |||
716 | #endif /* CONFIG_MPC5200_WDT */ | ||
717 | |||
338 | /* --------------------------------------------------------------------- | 718 | /* --------------------------------------------------------------------- |
339 | * of_platform bus binding code | 719 | * of_platform bus binding code |
340 | */ | 720 | */ |
@@ -349,6 +729,7 @@ static int __devinit mpc52xx_gpt_probe(struct of_device *ofdev, | |||
349 | 729 | ||
350 | spin_lock_init(&gpt->lock); | 730 | spin_lock_init(&gpt->lock); |
351 | gpt->dev = &ofdev->dev; | 731 | gpt->dev = &ofdev->dev; |
732 | gpt->ipb_freq = mpc5xxx_get_bus_frequency(ofdev->node); | ||
352 | gpt->regs = of_iomap(ofdev->node, 0); | 733 | gpt->regs = of_iomap(ofdev->node, 0); |
353 | if (!gpt->regs) { | 734 | if (!gpt->regs) { |
354 | kfree(gpt); | 735 | kfree(gpt); |
@@ -360,6 +741,26 @@ static int __devinit mpc52xx_gpt_probe(struct of_device *ofdev, | |||
360 | mpc52xx_gpt_gpio_setup(gpt, ofdev->node); | 741 | mpc52xx_gpt_gpio_setup(gpt, ofdev->node); |
361 | mpc52xx_gpt_irq_setup(gpt, ofdev->node); | 742 | mpc52xx_gpt_irq_setup(gpt, ofdev->node); |
362 | 743 | ||
744 | mutex_lock(&mpc52xx_gpt_list_mutex); | ||
745 | list_add(&gpt->list, &mpc52xx_gpt_list); | ||
746 | mutex_unlock(&mpc52xx_gpt_list_mutex); | ||
747 | |||
748 | /* check if this device could be a watchdog */ | ||
749 | if (of_get_property(ofdev->node, "fsl,has-wdt", NULL) || | ||
750 | of_get_property(ofdev->node, "has-wdt", NULL)) { | ||
751 | const u32 *on_boot_wdt; | ||
752 | |||
753 | gpt->wdt_mode = MPC52xx_GPT_CAN_WDT; | ||
754 | on_boot_wdt = of_get_property(ofdev->node, "fsl,wdt-on-boot", | ||
755 | NULL); | ||
756 | if (on_boot_wdt) { | ||
757 | dev_info(gpt->dev, "used as watchdog\n"); | ||
758 | gpt->wdt_mode |= MPC52xx_GPT_IS_WDT; | ||
759 | } else | ||
760 | dev_info(gpt->dev, "can function as watchdog\n"); | ||
761 | mpc52xx_gpt_wdt_setup(gpt, on_boot_wdt); | ||
762 | } | ||
763 | |||
363 | return 0; | 764 | return 0; |
364 | } | 765 | } |
365 | 766 | ||
@@ -394,3 +795,4 @@ static int __init mpc52xx_gpt_init(void) | |||
394 | 795 | ||
395 | /* Make sure GPIOs and IRQs get set up before anyone tries to use them */ | 796 | /* Make sure GPIOs and IRQs get set up before anyone tries to use them */ |
396 | subsys_initcall(mpc52xx_gpt_init); | 797 | subsys_initcall(mpc52xx_gpt_init); |
798 | device_initcall(mpc52xx_gpt_wdt_init); | ||
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c b/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c new file mode 100644 index 000000000000..929d017535a3 --- /dev/null +++ b/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c | |||
@@ -0,0 +1,560 @@ | |||
1 | /* | ||
2 | * LocalPlus Bus FIFO driver for the Freescale MPC52xx. | ||
3 | * | ||
4 | * Copyright (C) 2009 Secret Lab Technologies Ltd. | ||
5 | * | ||
6 | * This file is released under the GPLv2 | ||
7 | * | ||
8 | * Todo: | ||
9 | * - Add support for multiple requests to be queued. | ||
10 | */ | ||
11 | |||
12 | #include <linux/interrupt.h> | ||
13 | #include <linux/kernel.h> | ||
14 | #include <linux/of.h> | ||
15 | #include <linux/of_platform.h> | ||
16 | #include <linux/spinlock.h> | ||
17 | #include <asm/io.h> | ||
18 | #include <asm/prom.h> | ||
19 | #include <asm/mpc52xx.h> | ||
20 | #include <asm/time.h> | ||
21 | |||
22 | #include <sysdev/bestcomm/bestcomm.h> | ||
23 | #include <sysdev/bestcomm/bestcomm_priv.h> | ||
24 | #include <sysdev/bestcomm/gen_bd.h> | ||
25 | |||
26 | MODULE_AUTHOR("Grant Likely <grant.likely@secretlab.ca>"); | ||
27 | MODULE_DESCRIPTION("MPC5200 LocalPlus FIFO device driver"); | ||
28 | MODULE_LICENSE("GPL"); | ||
29 | |||
30 | #define LPBFIFO_REG_PACKET_SIZE (0x00) | ||
31 | #define LPBFIFO_REG_START_ADDRESS (0x04) | ||
32 | #define LPBFIFO_REG_CONTROL (0x08) | ||
33 | #define LPBFIFO_REG_ENABLE (0x0C) | ||
34 | #define LPBFIFO_REG_BYTES_DONE_STATUS (0x14) | ||
35 | #define LPBFIFO_REG_FIFO_DATA (0x40) | ||
36 | #define LPBFIFO_REG_FIFO_STATUS (0x44) | ||
37 | #define LPBFIFO_REG_FIFO_CONTROL (0x48) | ||
38 | #define LPBFIFO_REG_FIFO_ALARM (0x4C) | ||
39 | |||
40 | struct mpc52xx_lpbfifo { | ||
41 | struct device *dev; | ||
42 | phys_addr_t regs_phys; | ||
43 | void __iomem *regs; | ||
44 | int irq; | ||
45 | spinlock_t lock; | ||
46 | |||
47 | struct bcom_task *bcom_tx_task; | ||
48 | struct bcom_task *bcom_rx_task; | ||
49 | struct bcom_task *bcom_cur_task; | ||
50 | |||
51 | /* Current state data */ | ||
52 | struct mpc52xx_lpbfifo_request *req; | ||
53 | int dma_irqs_enabled; | ||
54 | }; | ||
55 | |||
56 | /* The MPC5200 has only one fifo, so only need one instance structure */ | ||
57 | static struct mpc52xx_lpbfifo lpbfifo; | ||
58 | |||
59 | /** | ||
60 | * mpc52xx_lpbfifo_kick - Trigger the next block of data to be transfered | ||
61 | */ | ||
62 | static void mpc52xx_lpbfifo_kick(struct mpc52xx_lpbfifo_request *req) | ||
63 | { | ||
64 | size_t transfer_size = req->size - req->pos; | ||
65 | struct bcom_bd *bd; | ||
66 | void __iomem *reg; | ||
67 | u32 *data; | ||
68 | int i; | ||
69 | int bit_fields; | ||
70 | int dma = !(req->flags & MPC52XX_LPBFIFO_FLAG_NO_DMA); | ||
71 | int write = req->flags & MPC52XX_LPBFIFO_FLAG_WRITE; | ||
72 | int poll_dma = req->flags & MPC52XX_LPBFIFO_FLAG_POLL_DMA; | ||
73 | |||
74 | /* Set and clear the reset bits; is good practice in User Manual */ | ||
75 | out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, 0x01010000); | ||
76 | |||
77 | /* set master enable bit */ | ||
78 | out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, 0x00000001); | ||
79 | if (!dma) { | ||
80 | /* While the FIFO can be setup for transfer sizes as large as | ||
81 | * 16M-1, the FIFO itself is only 512 bytes deep and it does | ||
82 | * not generate interrupts for FIFO full events (only transfer | ||
83 | * complete will raise an IRQ). Therefore when not using | ||
84 | * Bestcomm to drive the FIFO it needs to either be polled, or | ||
85 | * transfers need to constrained to the size of the fifo. | ||
86 | * | ||
87 | * This driver restricts the size of the transfer | ||
88 | */ | ||
89 | if (transfer_size > 512) | ||
90 | transfer_size = 512; | ||
91 | |||
92 | /* Load the FIFO with data */ | ||
93 | if (write) { | ||
94 | reg = lpbfifo.regs + LPBFIFO_REG_FIFO_DATA; | ||
95 | data = req->data + req->pos; | ||
96 | for (i = 0; i < transfer_size; i += 4) | ||
97 | out_be32(reg, *data++); | ||
98 | } | ||
99 | |||
100 | /* Unmask both error and completion irqs */ | ||
101 | out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, 0x00000301); | ||
102 | } else { | ||
103 | /* Choose the correct direction | ||
104 | * | ||
105 | * Configure the watermarks so DMA will always complete correctly. | ||
106 | * It may be worth experimenting with the ALARM value to see if | ||
107 | * there is a performance impacit. However, if it is wrong there | ||
108 | * is a risk of DMA not transferring the last chunk of data | ||
109 | */ | ||
110 | if (write) { | ||
111 | out_be32(lpbfifo.regs + LPBFIFO_REG_FIFO_ALARM, 0x1e4); | ||
112 | out_8(lpbfifo.regs + LPBFIFO_REG_FIFO_CONTROL, 7); | ||
113 | lpbfifo.bcom_cur_task = lpbfifo.bcom_tx_task; | ||
114 | } else { | ||
115 | out_be32(lpbfifo.regs + LPBFIFO_REG_FIFO_ALARM, 0x1ff); | ||
116 | out_8(lpbfifo.regs + LPBFIFO_REG_FIFO_CONTROL, 0); | ||
117 | lpbfifo.bcom_cur_task = lpbfifo.bcom_rx_task; | ||
118 | |||
119 | if (poll_dma) { | ||
120 | if (lpbfifo.dma_irqs_enabled) { | ||
121 | disable_irq(bcom_get_task_irq(lpbfifo.bcom_rx_task)); | ||
122 | lpbfifo.dma_irqs_enabled = 0; | ||
123 | } | ||
124 | } else { | ||
125 | if (!lpbfifo.dma_irqs_enabled) { | ||
126 | enable_irq(bcom_get_task_irq(lpbfifo.bcom_rx_task)); | ||
127 | lpbfifo.dma_irqs_enabled = 1; | ||
128 | } | ||
129 | } | ||
130 | } | ||
131 | |||
132 | bd = bcom_prepare_next_buffer(lpbfifo.bcom_cur_task); | ||
133 | bd->status = transfer_size; | ||
134 | if (!write) { | ||
135 | /* | ||
136 | * In the DMA read case, the DMA doesn't complete, | ||
137 | * possibly due to incorrect watermarks in the ALARM | ||
138 | * and CONTROL regs. For now instead of trying to | ||
139 | * determine the right watermarks that will make this | ||
140 | * work, just increase the number of bytes the FIFO is | ||
141 | * expecting. | ||
142 | * | ||
143 | * When submitting another operation, the FIFO will get | ||
144 | * reset, so the condition of the FIFO waiting for a | ||
145 | * non-existent 4 bytes will get cleared. | ||
146 | */ | ||
147 | transfer_size += 4; /* BLECH! */ | ||
148 | } | ||
149 | bd->data[0] = req->data_phys + req->pos; | ||
150 | bcom_submit_next_buffer(lpbfifo.bcom_cur_task, NULL); | ||
151 | |||
152 | /* error irq & master enabled bit */ | ||
153 | bit_fields = 0x00000201; | ||
154 | |||
155 | /* Unmask irqs */ | ||
156 | if (write && (!poll_dma)) | ||
157 | bit_fields |= 0x00000100; /* completion irq too */ | ||
158 | out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, bit_fields); | ||
159 | } | ||
160 | |||
161 | /* Set transfer size, width, chip select and READ mode */ | ||
162 | out_be32(lpbfifo.regs + LPBFIFO_REG_START_ADDRESS, | ||
163 | req->offset + req->pos); | ||
164 | out_be32(lpbfifo.regs + LPBFIFO_REG_PACKET_SIZE, transfer_size); | ||
165 | |||
166 | bit_fields = req->cs << 24 | 0x000008; | ||
167 | if (!write) | ||
168 | bit_fields |= 0x010000; /* read mode */ | ||
169 | out_be32(lpbfifo.regs + LPBFIFO_REG_CONTROL, bit_fields); | ||
170 | |||
171 | /* Kick it off */ | ||
172 | out_8(lpbfifo.regs + LPBFIFO_REG_PACKET_SIZE, 0x01); | ||
173 | if (dma) | ||
174 | bcom_enable(lpbfifo.bcom_cur_task); | ||
175 | } | ||
176 | |||
177 | /** | ||
178 | * mpc52xx_lpbfifo_irq - IRQ handler for LPB FIFO | ||
179 | * | ||
180 | * On transmit, the dma completion irq triggers before the fifo completion | ||
181 | * triggers. Handle the dma completion here instead of the LPB FIFO Bestcomm | ||
182 | * task completion irq becuase everyting is not really done until the LPB FIFO | ||
183 | * completion irq triggers. | ||
184 | * | ||
185 | * In other words: | ||
186 | * For DMA, on receive, the "Fat Lady" is the bestcom completion irq. on | ||
187 | * transmit, the fifo completion irq is the "Fat Lady". The opera (or in this | ||
188 | * case the DMA/FIFO operation) is not finished until the "Fat Lady" sings. | ||
189 | * | ||
190 | * Reasons for entering this routine: | ||
191 | * 1) PIO mode rx and tx completion irq | ||
192 | * 2) DMA interrupt mode tx completion irq | ||
193 | * 3) DMA polled mode tx | ||
194 | * | ||
195 | * Exit conditions: | ||
196 | * 1) Transfer aborted | ||
197 | * 2) FIFO complete without DMA; more data to do | ||
198 | * 3) FIFO complete without DMA; all data transfered | ||
199 | * 4) FIFO complete using DMA | ||
200 | * | ||
201 | * Condition 1 can occur regardless of whether or not DMA is used. | ||
202 | * It requires executing the callback to report the error and exiting | ||
203 | * immediately. | ||
204 | * | ||
205 | * Condition 2 requires programming the FIFO with the next block of data | ||
206 | * | ||
207 | * Condition 3 requires executing the callback to report completion | ||
208 | * | ||
209 | * Condition 4 means the same as 3, except that we also retrieve the bcom | ||
210 | * buffer so DMA doesn't get clogged up. | ||
211 | * | ||
212 | * To make things trickier, the spinlock must be dropped before | ||
213 | * executing the callback, otherwise we could end up with a deadlock | ||
214 | * or nested spinlock condition. The out path is non-trivial, so | ||
215 | * extra fiddling is done to make sure all paths lead to the same | ||
216 | * outbound code. | ||
217 | */ | ||
218 | static irqreturn_t mpc52xx_lpbfifo_irq(int irq, void *dev_id) | ||
219 | { | ||
220 | struct mpc52xx_lpbfifo_request *req; | ||
221 | u32 status = in_8(lpbfifo.regs + LPBFIFO_REG_BYTES_DONE_STATUS); | ||
222 | void __iomem *reg; | ||
223 | u32 *data; | ||
224 | int count, i; | ||
225 | int do_callback = 0; | ||
226 | u32 ts; | ||
227 | unsigned long flags; | ||
228 | int dma, write, poll_dma; | ||
229 | |||
230 | spin_lock_irqsave(&lpbfifo.lock, flags); | ||
231 | ts = get_tbl(); | ||
232 | |||
233 | req = lpbfifo.req; | ||
234 | if (!req) { | ||
235 | spin_unlock_irqrestore(&lpbfifo.lock, flags); | ||
236 | pr_err("bogus LPBFIFO IRQ\n"); | ||
237 | return IRQ_HANDLED; | ||
238 | } | ||
239 | |||
240 | dma = !(req->flags & MPC52XX_LPBFIFO_FLAG_NO_DMA); | ||
241 | write = req->flags & MPC52XX_LPBFIFO_FLAG_WRITE; | ||
242 | poll_dma = req->flags & MPC52XX_LPBFIFO_FLAG_POLL_DMA; | ||
243 | |||
244 | if (dma && !write) { | ||
245 | spin_unlock_irqrestore(&lpbfifo.lock, flags); | ||
246 | pr_err("bogus LPBFIFO IRQ (dma and not writting)\n"); | ||
247 | return IRQ_HANDLED; | ||
248 | } | ||
249 | |||
250 | if ((status & 0x01) == 0) { | ||
251 | goto out; | ||
252 | } | ||
253 | |||
254 | /* check abort bit */ | ||
255 | if (status & 0x10) { | ||
256 | out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, 0x01010000); | ||
257 | do_callback = 1; | ||
258 | goto out; | ||
259 | } | ||
260 | |||
261 | /* Read result from hardware */ | ||
262 | count = in_be32(lpbfifo.regs + LPBFIFO_REG_BYTES_DONE_STATUS); | ||
263 | count &= 0x00ffffff; | ||
264 | |||
265 | if (!dma && !write) { | ||
266 | /* copy the data out of the FIFO */ | ||
267 | reg = lpbfifo.regs + LPBFIFO_REG_FIFO_DATA; | ||
268 | data = req->data + req->pos; | ||
269 | for (i = 0; i < count; i += 4) | ||
270 | *data++ = in_be32(reg); | ||
271 | } | ||
272 | |||
273 | /* Update transfer position and count */ | ||
274 | req->pos += count; | ||
275 | |||
276 | /* Decide what to do next */ | ||
277 | if (req->size - req->pos) | ||
278 | mpc52xx_lpbfifo_kick(req); /* more work to do */ | ||
279 | else | ||
280 | do_callback = 1; | ||
281 | |||
282 | out: | ||
283 | /* Clear the IRQ */ | ||
284 | out_8(lpbfifo.regs + LPBFIFO_REG_BYTES_DONE_STATUS, 0x01); | ||
285 | |||
286 | if (dma && (status & 0x11)) { | ||
287 | /* | ||
288 | * Count the DMA as complete only when the FIFO completion | ||
289 | * status or abort bits are set. | ||
290 | * | ||
291 | * (status & 0x01) should always be the case except sometimes | ||
292 | * when using polled DMA. | ||
293 | * | ||
294 | * (status & 0x10) {transfer aborted}: This case needs more | ||
295 | * testing. | ||
296 | */ | ||
297 | bcom_retrieve_buffer(lpbfifo.bcom_cur_task, &status, NULL); | ||
298 | } | ||
299 | req->last_byte = ((u8 *)req->data)[req->size - 1]; | ||
300 | |||
301 | /* When the do_callback flag is set; it means the transfer is finished | ||
302 | * so set the FIFO as idle */ | ||
303 | if (do_callback) | ||
304 | lpbfifo.req = NULL; | ||
305 | |||
306 | if (irq != 0) /* don't increment on polled case */ | ||
307 | req->irq_count++; | ||
308 | |||
309 | req->irq_ticks += get_tbl() - ts; | ||
310 | spin_unlock_irqrestore(&lpbfifo.lock, flags); | ||
311 | |||
312 | /* Spinlock is released; it is now safe to call the callback */ | ||
313 | if (do_callback && req->callback) | ||
314 | req->callback(req); | ||
315 | |||
316 | return IRQ_HANDLED; | ||
317 | } | ||
318 | |||
319 | /** | ||
320 | * mpc52xx_lpbfifo_bcom_irq - IRQ handler for LPB FIFO Bestcomm task | ||
321 | * | ||
322 | * Only used when receiving data. | ||
323 | */ | ||
324 | static irqreturn_t mpc52xx_lpbfifo_bcom_irq(int irq, void *dev_id) | ||
325 | { | ||
326 | struct mpc52xx_lpbfifo_request *req; | ||
327 | unsigned long flags; | ||
328 | u32 status; | ||
329 | u32 ts; | ||
330 | |||
331 | spin_lock_irqsave(&lpbfifo.lock, flags); | ||
332 | ts = get_tbl(); | ||
333 | |||
334 | req = lpbfifo.req; | ||
335 | if (!req || (req->flags & MPC52XX_LPBFIFO_FLAG_NO_DMA)) { | ||
336 | spin_unlock_irqrestore(&lpbfifo.lock, flags); | ||
337 | return IRQ_HANDLED; | ||
338 | } | ||
339 | |||
340 | if (irq != 0) /* don't increment on polled case */ | ||
341 | req->irq_count++; | ||
342 | |||
343 | if (!bcom_buffer_done(lpbfifo.bcom_cur_task)) { | ||
344 | spin_unlock_irqrestore(&lpbfifo.lock, flags); | ||
345 | |||
346 | req->buffer_not_done_cnt++; | ||
347 | if ((req->buffer_not_done_cnt % 1000) == 0) | ||
348 | pr_err("transfer stalled\n"); | ||
349 | |||
350 | return IRQ_HANDLED; | ||
351 | } | ||
352 | |||
353 | bcom_retrieve_buffer(lpbfifo.bcom_cur_task, &status, NULL); | ||
354 | |||
355 | req->last_byte = ((u8 *)req->data)[req->size - 1]; | ||
356 | |||
357 | req->pos = status & 0x00ffffff; | ||
358 | |||
359 | /* Mark the FIFO as idle */ | ||
360 | lpbfifo.req = NULL; | ||
361 | |||
362 | /* Release the lock before calling out to the callback. */ | ||
363 | req->irq_ticks += get_tbl() - ts; | ||
364 | spin_unlock_irqrestore(&lpbfifo.lock, flags); | ||
365 | |||
366 | if (req->callback) | ||
367 | req->callback(req); | ||
368 | |||
369 | return IRQ_HANDLED; | ||
370 | } | ||
371 | |||
372 | /** | ||
373 | * mpc52xx_lpbfifo_bcom_poll - Poll for DMA completion | ||
374 | */ | ||
375 | void mpc52xx_lpbfifo_poll(void) | ||
376 | { | ||
377 | struct mpc52xx_lpbfifo_request *req = lpbfifo.req; | ||
378 | int dma = !(req->flags & MPC52XX_LPBFIFO_FLAG_NO_DMA); | ||
379 | int write = req->flags & MPC52XX_LPBFIFO_FLAG_WRITE; | ||
380 | |||
381 | /* | ||
382 | * For more information, see comments on the "Fat Lady" | ||
383 | */ | ||
384 | if (dma && write) | ||
385 | mpc52xx_lpbfifo_irq(0, NULL); | ||
386 | else | ||
387 | mpc52xx_lpbfifo_bcom_irq(0, NULL); | ||
388 | } | ||
389 | EXPORT_SYMBOL(mpc52xx_lpbfifo_poll); | ||
390 | |||
391 | /** | ||
392 | * mpc52xx_lpbfifo_submit - Submit an LPB FIFO transfer request. | ||
393 | * @req: Pointer to request structure | ||
394 | */ | ||
395 | int mpc52xx_lpbfifo_submit(struct mpc52xx_lpbfifo_request *req) | ||
396 | { | ||
397 | unsigned long flags; | ||
398 | |||
399 | if (!lpbfifo.regs) | ||
400 | return -ENODEV; | ||
401 | |||
402 | spin_lock_irqsave(&lpbfifo.lock, flags); | ||
403 | |||
404 | /* If the req pointer is already set, then a transfer is in progress */ | ||
405 | if (lpbfifo.req) { | ||
406 | spin_unlock_irqrestore(&lpbfifo.lock, flags); | ||
407 | return -EBUSY; | ||
408 | } | ||
409 | |||
410 | /* Setup the transfer */ | ||
411 | lpbfifo.req = req; | ||
412 | req->irq_count = 0; | ||
413 | req->irq_ticks = 0; | ||
414 | req->buffer_not_done_cnt = 0; | ||
415 | req->pos = 0; | ||
416 | |||
417 | mpc52xx_lpbfifo_kick(req); | ||
418 | spin_unlock_irqrestore(&lpbfifo.lock, flags); | ||
419 | return 0; | ||
420 | } | ||
421 | EXPORT_SYMBOL(mpc52xx_lpbfifo_submit); | ||
422 | |||
423 | void mpc52xx_lpbfifo_abort(struct mpc52xx_lpbfifo_request *req) | ||
424 | { | ||
425 | unsigned long flags; | ||
426 | |||
427 | spin_lock_irqsave(&lpbfifo.lock, flags); | ||
428 | if (lpbfifo.req == req) { | ||
429 | /* Put it into reset and clear the state */ | ||
430 | bcom_gen_bd_rx_reset(lpbfifo.bcom_rx_task); | ||
431 | bcom_gen_bd_tx_reset(lpbfifo.bcom_tx_task); | ||
432 | out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, 0x01010000); | ||
433 | lpbfifo.req = NULL; | ||
434 | } | ||
435 | spin_unlock_irqrestore(&lpbfifo.lock, flags); | ||
436 | } | ||
437 | EXPORT_SYMBOL(mpc52xx_lpbfifo_abort); | ||
438 | |||
439 | static int __devinit | ||
440 | mpc52xx_lpbfifo_probe(struct of_device *op, const struct of_device_id *match) | ||
441 | { | ||
442 | struct resource res; | ||
443 | int rc = -ENOMEM; | ||
444 | |||
445 | if (lpbfifo.dev != NULL) | ||
446 | return -ENOSPC; | ||
447 | |||
448 | lpbfifo.irq = irq_of_parse_and_map(op->node, 0); | ||
449 | if (!lpbfifo.irq) | ||
450 | return -ENODEV; | ||
451 | |||
452 | if (of_address_to_resource(op->node, 0, &res)) | ||
453 | return -ENODEV; | ||
454 | lpbfifo.regs_phys = res.start; | ||
455 | lpbfifo.regs = of_iomap(op->node, 0); | ||
456 | if (!lpbfifo.regs) | ||
457 | return -ENOMEM; | ||
458 | |||
459 | spin_lock_init(&lpbfifo.lock); | ||
460 | |||
461 | /* Put FIFO into reset */ | ||
462 | out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, 0x01010000); | ||
463 | |||
464 | /* Register the interrupt handler */ | ||
465 | rc = request_irq(lpbfifo.irq, mpc52xx_lpbfifo_irq, 0, | ||
466 | "mpc52xx-lpbfifo", &lpbfifo); | ||
467 | if (rc) | ||
468 | goto err_irq; | ||
469 | |||
470 | /* Request the Bestcomm receive (fifo --> memory) task and IRQ */ | ||
471 | lpbfifo.bcom_rx_task = | ||
472 | bcom_gen_bd_rx_init(2, res.start + LPBFIFO_REG_FIFO_DATA, | ||
473 | BCOM_INITIATOR_SCLPC, BCOM_IPR_SCLPC, | ||
474 | 16*1024*1024); | ||
475 | if (!lpbfifo.bcom_rx_task) | ||
476 | goto err_bcom_rx; | ||
477 | |||
478 | rc = request_irq(bcom_get_task_irq(lpbfifo.bcom_rx_task), | ||
479 | mpc52xx_lpbfifo_bcom_irq, 0, | ||
480 | "mpc52xx-lpbfifo-rx", &lpbfifo); | ||
481 | if (rc) | ||
482 | goto err_bcom_rx_irq; | ||
483 | |||
484 | /* Request the Bestcomm transmit (memory --> fifo) task and IRQ */ | ||
485 | lpbfifo.bcom_tx_task = | ||
486 | bcom_gen_bd_tx_init(2, res.start + LPBFIFO_REG_FIFO_DATA, | ||
487 | BCOM_INITIATOR_SCLPC, BCOM_IPR_SCLPC); | ||
488 | if (!lpbfifo.bcom_tx_task) | ||
489 | goto err_bcom_tx; | ||
490 | |||
491 | lpbfifo.dev = &op->dev; | ||
492 | return 0; | ||
493 | |||
494 | err_bcom_tx: | ||
495 | free_irq(bcom_get_task_irq(lpbfifo.bcom_rx_task), &lpbfifo); | ||
496 | err_bcom_rx_irq: | ||
497 | bcom_gen_bd_rx_release(lpbfifo.bcom_rx_task); | ||
498 | err_bcom_rx: | ||
499 | err_irq: | ||
500 | iounmap(lpbfifo.regs); | ||
501 | lpbfifo.regs = NULL; | ||
502 | |||
503 | dev_err(&op->dev, "mpc52xx_lpbfifo_probe() failed\n"); | ||
504 | return -ENODEV; | ||
505 | } | ||
506 | |||
507 | |||
508 | static int __devexit mpc52xx_lpbfifo_remove(struct of_device *op) | ||
509 | { | ||
510 | if (lpbfifo.dev != &op->dev) | ||
511 | return 0; | ||
512 | |||
513 | /* Put FIFO in reset */ | ||
514 | out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, 0x01010000); | ||
515 | |||
516 | /* Release the bestcomm transmit task */ | ||
517 | free_irq(bcom_get_task_irq(lpbfifo.bcom_tx_task), &lpbfifo); | ||
518 | bcom_gen_bd_tx_release(lpbfifo.bcom_tx_task); | ||
519 | |||
520 | /* Release the bestcomm receive task */ | ||
521 | free_irq(bcom_get_task_irq(lpbfifo.bcom_rx_task), &lpbfifo); | ||
522 | bcom_gen_bd_rx_release(lpbfifo.bcom_rx_task); | ||
523 | |||
524 | free_irq(lpbfifo.irq, &lpbfifo); | ||
525 | iounmap(lpbfifo.regs); | ||
526 | lpbfifo.regs = NULL; | ||
527 | lpbfifo.dev = NULL; | ||
528 | |||
529 | return 0; | ||
530 | } | ||
531 | |||
532 | static struct of_device_id mpc52xx_lpbfifo_match[] __devinitconst = { | ||
533 | { .compatible = "fsl,mpc5200-lpbfifo", }, | ||
534 | {}, | ||
535 | }; | ||
536 | |||
537 | static struct of_platform_driver mpc52xx_lpbfifo_driver = { | ||
538 | .owner = THIS_MODULE, | ||
539 | .name = "mpc52xx-lpbfifo", | ||
540 | .match_table = mpc52xx_lpbfifo_match, | ||
541 | .probe = mpc52xx_lpbfifo_probe, | ||
542 | .remove = __devexit_p(mpc52xx_lpbfifo_remove), | ||
543 | }; | ||
544 | |||
545 | /*********************************************************************** | ||
546 | * Module init/exit | ||
547 | */ | ||
548 | static int __init mpc52xx_lpbfifo_init(void) | ||
549 | { | ||
550 | pr_debug("Registering LocalPlus bus FIFO driver\n"); | ||
551 | return of_register_platform_driver(&mpc52xx_lpbfifo_driver); | ||
552 | } | ||
553 | module_init(mpc52xx_lpbfifo_init); | ||
554 | |||
555 | static void __exit mpc52xx_lpbfifo_exit(void) | ||
556 | { | ||
557 | pr_debug("Unregistering LocalPlus bus FIFO driver\n"); | ||
558 | of_unregister_platform_driver(&mpc52xx_lpbfifo_driver); | ||
559 | } | ||
560 | module_exit(mpc52xx_lpbfifo_exit); | ||
diff --git a/arch/powerpc/platforms/pseries/Makefile b/arch/powerpc/platforms/pseries/Makefile index 4b1c422b8145..0ff5174ae4f5 100644 --- a/arch/powerpc/platforms/pseries/Makefile +++ b/arch/powerpc/platforms/pseries/Makefile | |||
@@ -8,7 +8,7 @@ endif | |||
8 | 8 | ||
9 | obj-y := lpar.o hvCall.o nvram.o reconfig.o \ | 9 | obj-y := lpar.o hvCall.o nvram.o reconfig.o \ |
10 | setup.o iommu.o ras.o \ | 10 | setup.o iommu.o ras.o \ |
11 | firmware.o power.o | 11 | firmware.o power.o dlpar.o |
12 | obj-$(CONFIG_SMP) += smp.o | 12 | obj-$(CONFIG_SMP) += smp.o |
13 | obj-$(CONFIG_XICS) += xics.o | 13 | obj-$(CONFIG_XICS) += xics.o |
14 | obj-$(CONFIG_SCANLOG) += scanlog.o | 14 | obj-$(CONFIG_SCANLOG) += scanlog.o |
diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c new file mode 100644 index 000000000000..fd2f0afeb4de --- /dev/null +++ b/arch/powerpc/platforms/pseries/dlpar.c | |||
@@ -0,0 +1,558 @@ | |||
1 | /* | ||
2 | * Support for dynamic reconfiguration for PCI, Memory, and CPU | ||
3 | * Hotplug and Dynamic Logical Partitioning on RPA platforms. | ||
4 | * | ||
5 | * Copyright (C) 2009 Nathan Fontenot | ||
6 | * Copyright (C) 2009 IBM Corporation | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License version | ||
10 | * 2 as published by the Free Software Foundation. | ||
11 | */ | ||
12 | |||
13 | #include <linux/kernel.h> | ||
14 | #include <linux/kref.h> | ||
15 | #include <linux/notifier.h> | ||
16 | #include <linux/proc_fs.h> | ||
17 | #include <linux/spinlock.h> | ||
18 | #include <linux/cpu.h> | ||
19 | #include "offline_states.h" | ||
20 | |||
21 | #include <asm/prom.h> | ||
22 | #include <asm/machdep.h> | ||
23 | #include <asm/uaccess.h> | ||
24 | #include <asm/rtas.h> | ||
25 | #include <asm/pSeries_reconfig.h> | ||
26 | |||
27 | struct cc_workarea { | ||
28 | u32 drc_index; | ||
29 | u32 zero; | ||
30 | u32 name_offset; | ||
31 | u32 prop_length; | ||
32 | u32 prop_offset; | ||
33 | }; | ||
34 | |||
35 | static void dlpar_free_cc_property(struct property *prop) | ||
36 | { | ||
37 | kfree(prop->name); | ||
38 | kfree(prop->value); | ||
39 | kfree(prop); | ||
40 | } | ||
41 | |||
42 | static struct property *dlpar_parse_cc_property(struct cc_workarea *ccwa) | ||
43 | { | ||
44 | struct property *prop; | ||
45 | char *name; | ||
46 | char *value; | ||
47 | |||
48 | prop = kzalloc(sizeof(*prop), GFP_KERNEL); | ||
49 | if (!prop) | ||
50 | return NULL; | ||
51 | |||
52 | name = (char *)ccwa + ccwa->name_offset; | ||
53 | prop->name = kstrdup(name, GFP_KERNEL); | ||
54 | |||
55 | prop->length = ccwa->prop_length; | ||
56 | value = (char *)ccwa + ccwa->prop_offset; | ||
57 | prop->value = kzalloc(prop->length, GFP_KERNEL); | ||
58 | if (!prop->value) { | ||
59 | dlpar_free_cc_property(prop); | ||
60 | return NULL; | ||
61 | } | ||
62 | |||
63 | memcpy(prop->value, value, prop->length); | ||
64 | return prop; | ||
65 | } | ||
66 | |||
67 | static struct device_node *dlpar_parse_cc_node(struct cc_workarea *ccwa) | ||
68 | { | ||
69 | struct device_node *dn; | ||
70 | char *name; | ||
71 | |||
72 | dn = kzalloc(sizeof(*dn), GFP_KERNEL); | ||
73 | if (!dn) | ||
74 | return NULL; | ||
75 | |||
76 | /* The configure connector reported name does not contain a | ||
77 | * preceeding '/', so we allocate a buffer large enough to | ||
78 | * prepend this to the full_name. | ||
79 | */ | ||
80 | name = (char *)ccwa + ccwa->name_offset; | ||
81 | dn->full_name = kmalloc(strlen(name) + 2, GFP_KERNEL); | ||
82 | if (!dn->full_name) { | ||
83 | kfree(dn); | ||
84 | return NULL; | ||
85 | } | ||
86 | |||
87 | sprintf(dn->full_name, "/%s", name); | ||
88 | return dn; | ||
89 | } | ||
90 | |||
91 | static void dlpar_free_one_cc_node(struct device_node *dn) | ||
92 | { | ||
93 | struct property *prop; | ||
94 | |||
95 | while (dn->properties) { | ||
96 | prop = dn->properties; | ||
97 | dn->properties = prop->next; | ||
98 | dlpar_free_cc_property(prop); | ||
99 | } | ||
100 | |||
101 | kfree(dn->full_name); | ||
102 | kfree(dn); | ||
103 | } | ||
104 | |||
105 | static void dlpar_free_cc_nodes(struct device_node *dn) | ||
106 | { | ||
107 | if (dn->child) | ||
108 | dlpar_free_cc_nodes(dn->child); | ||
109 | |||
110 | if (dn->sibling) | ||
111 | dlpar_free_cc_nodes(dn->sibling); | ||
112 | |||
113 | dlpar_free_one_cc_node(dn); | ||
114 | } | ||
115 | |||
116 | #define NEXT_SIBLING 1 | ||
117 | #define NEXT_CHILD 2 | ||
118 | #define NEXT_PROPERTY 3 | ||
119 | #define PREV_PARENT 4 | ||
120 | #define MORE_MEMORY 5 | ||
121 | #define CALL_AGAIN -2 | ||
122 | #define ERR_CFG_USE -9003 | ||
123 | |||
124 | struct device_node *dlpar_configure_connector(u32 drc_index) | ||
125 | { | ||
126 | struct device_node *dn; | ||
127 | struct device_node *first_dn = NULL; | ||
128 | struct device_node *last_dn = NULL; | ||
129 | struct property *property; | ||
130 | struct property *last_property = NULL; | ||
131 | struct cc_workarea *ccwa; | ||
132 | int cc_token; | ||
133 | int rc; | ||
134 | |||
135 | cc_token = rtas_token("ibm,configure-connector"); | ||
136 | if (cc_token == RTAS_UNKNOWN_SERVICE) | ||
137 | return NULL; | ||
138 | |||
139 | spin_lock(&rtas_data_buf_lock); | ||
140 | ccwa = (struct cc_workarea *)&rtas_data_buf[0]; | ||
141 | ccwa->drc_index = drc_index; | ||
142 | ccwa->zero = 0; | ||
143 | |||
144 | rc = rtas_call(cc_token, 2, 1, NULL, rtas_data_buf, NULL); | ||
145 | while (rc) { | ||
146 | switch (rc) { | ||
147 | case NEXT_SIBLING: | ||
148 | dn = dlpar_parse_cc_node(ccwa); | ||
149 | if (!dn) | ||
150 | goto cc_error; | ||
151 | |||
152 | dn->parent = last_dn->parent; | ||
153 | last_dn->sibling = dn; | ||
154 | last_dn = dn; | ||
155 | break; | ||
156 | |||
157 | case NEXT_CHILD: | ||
158 | dn = dlpar_parse_cc_node(ccwa); | ||
159 | if (!dn) | ||
160 | goto cc_error; | ||
161 | |||
162 | if (!first_dn) | ||
163 | first_dn = dn; | ||
164 | else { | ||
165 | dn->parent = last_dn; | ||
166 | if (last_dn) | ||
167 | last_dn->child = dn; | ||
168 | } | ||
169 | |||
170 | last_dn = dn; | ||
171 | break; | ||
172 | |||
173 | case NEXT_PROPERTY: | ||
174 | property = dlpar_parse_cc_property(ccwa); | ||
175 | if (!property) | ||
176 | goto cc_error; | ||
177 | |||
178 | if (!last_dn->properties) | ||
179 | last_dn->properties = property; | ||
180 | else | ||
181 | last_property->next = property; | ||
182 | |||
183 | last_property = property; | ||
184 | break; | ||
185 | |||
186 | case PREV_PARENT: | ||
187 | last_dn = last_dn->parent; | ||
188 | break; | ||
189 | |||
190 | case CALL_AGAIN: | ||
191 | break; | ||
192 | |||
193 | case MORE_MEMORY: | ||
194 | case ERR_CFG_USE: | ||
195 | default: | ||
196 | printk(KERN_ERR "Unexpected Error (%d) " | ||
197 | "returned from configure-connector\n", rc); | ||
198 | goto cc_error; | ||
199 | } | ||
200 | |||
201 | rc = rtas_call(cc_token, 2, 1, NULL, rtas_data_buf, NULL); | ||
202 | } | ||
203 | |||
204 | spin_unlock(&rtas_data_buf_lock); | ||
205 | return first_dn; | ||
206 | |||
207 | cc_error: | ||
208 | if (first_dn) | ||
209 | dlpar_free_cc_nodes(first_dn); | ||
210 | spin_unlock(&rtas_data_buf_lock); | ||
211 | return NULL; | ||
212 | } | ||
213 | |||
214 | static struct device_node *derive_parent(const char *path) | ||
215 | { | ||
216 | struct device_node *parent; | ||
217 | char *last_slash; | ||
218 | |||
219 | last_slash = strrchr(path, '/'); | ||
220 | if (last_slash == path) { | ||
221 | parent = of_find_node_by_path("/"); | ||
222 | } else { | ||
223 | char *parent_path; | ||
224 | int parent_path_len = last_slash - path + 1; | ||
225 | parent_path = kmalloc(parent_path_len, GFP_KERNEL); | ||
226 | if (!parent_path) | ||
227 | return NULL; | ||
228 | |||
229 | strlcpy(parent_path, path, parent_path_len); | ||
230 | parent = of_find_node_by_path(parent_path); | ||
231 | kfree(parent_path); | ||
232 | } | ||
233 | |||
234 | return parent; | ||
235 | } | ||
236 | |||
237 | int dlpar_attach_node(struct device_node *dn) | ||
238 | { | ||
239 | struct proc_dir_entry *ent; | ||
240 | int rc; | ||
241 | |||
242 | of_node_set_flag(dn, OF_DYNAMIC); | ||
243 | kref_init(&dn->kref); | ||
244 | dn->parent = derive_parent(dn->full_name); | ||
245 | if (!dn->parent) | ||
246 | return -ENOMEM; | ||
247 | |||
248 | rc = blocking_notifier_call_chain(&pSeries_reconfig_chain, | ||
249 | PSERIES_RECONFIG_ADD, dn); | ||
250 | if (rc == NOTIFY_BAD) { | ||
251 | printk(KERN_ERR "Failed to add device node %s\n", | ||
252 | dn->full_name); | ||
253 | return -ENOMEM; /* For now, safe to assume kmalloc failure */ | ||
254 | } | ||
255 | |||
256 | of_attach_node(dn); | ||
257 | |||
258 | #ifdef CONFIG_PROC_DEVICETREE | ||
259 | ent = proc_mkdir(strrchr(dn->full_name, '/') + 1, dn->parent->pde); | ||
260 | if (ent) | ||
261 | proc_device_tree_add_node(dn, ent); | ||
262 | #endif | ||
263 | |||
264 | of_node_put(dn->parent); | ||
265 | return 0; | ||
266 | } | ||
267 | |||
268 | int dlpar_detach_node(struct device_node *dn) | ||
269 | { | ||
270 | struct device_node *parent = dn->parent; | ||
271 | struct property *prop = dn->properties; | ||
272 | |||
273 | #ifdef CONFIG_PROC_DEVICETREE | ||
274 | while (prop) { | ||
275 | remove_proc_entry(prop->name, dn->pde); | ||
276 | prop = prop->next; | ||
277 | } | ||
278 | |||
279 | if (dn->pde) | ||
280 | remove_proc_entry(dn->pde->name, parent->pde); | ||
281 | #endif | ||
282 | |||
283 | blocking_notifier_call_chain(&pSeries_reconfig_chain, | ||
284 | PSERIES_RECONFIG_REMOVE, dn); | ||
285 | of_detach_node(dn); | ||
286 | of_node_put(dn); /* Must decrement the refcount */ | ||
287 | |||
288 | return 0; | ||
289 | } | ||
290 | |||
291 | int online_node_cpus(struct device_node *dn) | ||
292 | { | ||
293 | int rc = 0; | ||
294 | unsigned int cpu; | ||
295 | int len, nthreads, i; | ||
296 | const u32 *intserv; | ||
297 | |||
298 | intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s", &len); | ||
299 | if (!intserv) | ||
300 | return -EINVAL; | ||
301 | |||
302 | nthreads = len / sizeof(u32); | ||
303 | |||
304 | cpu_maps_update_begin(); | ||
305 | for (i = 0; i < nthreads; i++) { | ||
306 | for_each_present_cpu(cpu) { | ||
307 | if (get_hard_smp_processor_id(cpu) != intserv[i]) | ||
308 | continue; | ||
309 | BUG_ON(get_cpu_current_state(cpu) | ||
310 | != CPU_STATE_OFFLINE); | ||
311 | cpu_maps_update_done(); | ||
312 | rc = cpu_up(cpu); | ||
313 | if (rc) | ||
314 | goto out; | ||
315 | cpu_maps_update_begin(); | ||
316 | |||
317 | break; | ||
318 | } | ||
319 | if (cpu == num_possible_cpus()) | ||
320 | printk(KERN_WARNING "Could not find cpu to online " | ||
321 | "with physical id 0x%x\n", intserv[i]); | ||
322 | } | ||
323 | cpu_maps_update_done(); | ||
324 | |||
325 | out: | ||
326 | return rc; | ||
327 | |||
328 | } | ||
329 | |||
330 | int offline_node_cpus(struct device_node *dn) | ||
331 | { | ||
332 | int rc = 0; | ||
333 | unsigned int cpu; | ||
334 | int len, nthreads, i; | ||
335 | const u32 *intserv; | ||
336 | |||
337 | intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s", &len); | ||
338 | if (!intserv) | ||
339 | return -EINVAL; | ||
340 | |||
341 | nthreads = len / sizeof(u32); | ||
342 | |||
343 | cpu_maps_update_begin(); | ||
344 | for (i = 0; i < nthreads; i++) { | ||
345 | for_each_present_cpu(cpu) { | ||
346 | if (get_hard_smp_processor_id(cpu) != intserv[i]) | ||
347 | continue; | ||
348 | |||
349 | if (get_cpu_current_state(cpu) == CPU_STATE_OFFLINE) | ||
350 | break; | ||
351 | |||
352 | if (get_cpu_current_state(cpu) == CPU_STATE_ONLINE) { | ||
353 | cpu_maps_update_done(); | ||
354 | rc = cpu_down(cpu); | ||
355 | if (rc) | ||
356 | goto out; | ||
357 | cpu_maps_update_begin(); | ||
358 | break; | ||
359 | |||
360 | } | ||
361 | |||
362 | /* | ||
363 | * The cpu is in CPU_STATE_INACTIVE. | ||
364 | * Upgrade it's state to CPU_STATE_OFFLINE. | ||
365 | */ | ||
366 | set_preferred_offline_state(cpu, CPU_STATE_OFFLINE); | ||
367 | BUG_ON(plpar_hcall_norets(H_PROD, intserv[i]) | ||
368 | != H_SUCCESS); | ||
369 | __cpu_die(cpu); | ||
370 | break; | ||
371 | } | ||
372 | if (cpu == num_possible_cpus()) | ||
373 | printk(KERN_WARNING "Could not find cpu to offline " | ||
374 | "with physical id 0x%x\n", intserv[i]); | ||
375 | } | ||
376 | cpu_maps_update_done(); | ||
377 | |||
378 | out: | ||
379 | return rc; | ||
380 | |||
381 | } | ||
382 | |||
383 | #define DR_ENTITY_SENSE 9003 | ||
384 | #define DR_ENTITY_PRESENT 1 | ||
385 | #define DR_ENTITY_UNUSABLE 2 | ||
386 | #define ALLOCATION_STATE 9003 | ||
387 | #define ALLOC_UNUSABLE 0 | ||
388 | #define ALLOC_USABLE 1 | ||
389 | #define ISOLATION_STATE 9001 | ||
390 | #define ISOLATE 0 | ||
391 | #define UNISOLATE 1 | ||
392 | |||
393 | int dlpar_acquire_drc(u32 drc_index) | ||
394 | { | ||
395 | int dr_status, rc; | ||
396 | |||
397 | rc = rtas_call(rtas_token("get-sensor-state"), 2, 2, &dr_status, | ||
398 | DR_ENTITY_SENSE, drc_index); | ||
399 | if (rc || dr_status != DR_ENTITY_UNUSABLE) | ||
400 | return -1; | ||
401 | |||
402 | rc = rtas_set_indicator(ALLOCATION_STATE, drc_index, ALLOC_USABLE); | ||
403 | if (rc) | ||
404 | return rc; | ||
405 | |||
406 | rc = rtas_set_indicator(ISOLATION_STATE, drc_index, UNISOLATE); | ||
407 | if (rc) { | ||
408 | rtas_set_indicator(ALLOCATION_STATE, drc_index, ALLOC_UNUSABLE); | ||
409 | return rc; | ||
410 | } | ||
411 | |||
412 | return 0; | ||
413 | } | ||
414 | |||
415 | int dlpar_release_drc(u32 drc_index) | ||
416 | { | ||
417 | int dr_status, rc; | ||
418 | |||
419 | rc = rtas_call(rtas_token("get-sensor-state"), 2, 2, &dr_status, | ||
420 | DR_ENTITY_SENSE, drc_index); | ||
421 | if (rc || dr_status != DR_ENTITY_PRESENT) | ||
422 | return -1; | ||
423 | |||
424 | rc = rtas_set_indicator(ISOLATION_STATE, drc_index, ISOLATE); | ||
425 | if (rc) | ||
426 | return rc; | ||
427 | |||
428 | rc = rtas_set_indicator(ALLOCATION_STATE, drc_index, ALLOC_UNUSABLE); | ||
429 | if (rc) { | ||
430 | rtas_set_indicator(ISOLATION_STATE, drc_index, UNISOLATE); | ||
431 | return rc; | ||
432 | } | ||
433 | |||
434 | return 0; | ||
435 | } | ||
436 | |||
437 | #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE | ||
438 | |||
439 | static DEFINE_MUTEX(pseries_cpu_hotplug_mutex); | ||
440 | |||
441 | void cpu_hotplug_driver_lock() | ||
442 | { | ||
443 | mutex_lock(&pseries_cpu_hotplug_mutex); | ||
444 | } | ||
445 | |||
446 | void cpu_hotplug_driver_unlock() | ||
447 | { | ||
448 | mutex_unlock(&pseries_cpu_hotplug_mutex); | ||
449 | } | ||
450 | |||
451 | static ssize_t dlpar_cpu_probe(const char *buf, size_t count) | ||
452 | { | ||
453 | struct device_node *dn; | ||
454 | unsigned long drc_index; | ||
455 | char *cpu_name; | ||
456 | int rc; | ||
457 | |||
458 | cpu_hotplug_driver_lock(); | ||
459 | rc = strict_strtoul(buf, 0, &drc_index); | ||
460 | if (rc) { | ||
461 | rc = -EINVAL; | ||
462 | goto out; | ||
463 | } | ||
464 | |||
465 | dn = dlpar_configure_connector(drc_index); | ||
466 | if (!dn) { | ||
467 | rc = -EINVAL; | ||
468 | goto out; | ||
469 | } | ||
470 | |||
471 | /* configure-connector reports cpus as living in the base | ||
472 | * directory of the device tree. CPUs actually live in the | ||
473 | * cpus directory so we need to fixup the full_name. | ||
474 | */ | ||
475 | cpu_name = kzalloc(strlen(dn->full_name) + strlen("/cpus") + 1, | ||
476 | GFP_KERNEL); | ||
477 | if (!cpu_name) { | ||
478 | dlpar_free_cc_nodes(dn); | ||
479 | rc = -ENOMEM; | ||
480 | goto out; | ||
481 | } | ||
482 | |||
483 | sprintf(cpu_name, "/cpus%s", dn->full_name); | ||
484 | kfree(dn->full_name); | ||
485 | dn->full_name = cpu_name; | ||
486 | |||
487 | rc = dlpar_acquire_drc(drc_index); | ||
488 | if (rc) { | ||
489 | dlpar_free_cc_nodes(dn); | ||
490 | rc = -EINVAL; | ||
491 | goto out; | ||
492 | } | ||
493 | |||
494 | rc = dlpar_attach_node(dn); | ||
495 | if (rc) { | ||
496 | dlpar_release_drc(drc_index); | ||
497 | dlpar_free_cc_nodes(dn); | ||
498 | } | ||
499 | |||
500 | rc = online_node_cpus(dn); | ||
501 | out: | ||
502 | cpu_hotplug_driver_unlock(); | ||
503 | |||
504 | return rc ? rc : count; | ||
505 | } | ||
506 | |||
507 | static ssize_t dlpar_cpu_release(const char *buf, size_t count) | ||
508 | { | ||
509 | struct device_node *dn; | ||
510 | const u32 *drc_index; | ||
511 | int rc; | ||
512 | |||
513 | dn = of_find_node_by_path(buf); | ||
514 | if (!dn) | ||
515 | return -EINVAL; | ||
516 | |||
517 | drc_index = of_get_property(dn, "ibm,my-drc-index", NULL); | ||
518 | if (!drc_index) { | ||
519 | of_node_put(dn); | ||
520 | return -EINVAL; | ||
521 | } | ||
522 | |||
523 | cpu_hotplug_driver_lock(); | ||
524 | rc = offline_node_cpus(dn); | ||
525 | if (rc) { | ||
526 | of_node_put(dn); | ||
527 | rc = -EINVAL; | ||
528 | goto out; | ||
529 | } | ||
530 | |||
531 | rc = dlpar_release_drc(*drc_index); | ||
532 | if (rc) { | ||
533 | of_node_put(dn); | ||
534 | goto out; | ||
535 | } | ||
536 | |||
537 | rc = dlpar_detach_node(dn); | ||
538 | if (rc) { | ||
539 | dlpar_acquire_drc(*drc_index); | ||
540 | goto out; | ||
541 | } | ||
542 | |||
543 | of_node_put(dn); | ||
544 | out: | ||
545 | cpu_hotplug_driver_unlock(); | ||
546 | return rc ? rc : count; | ||
547 | } | ||
548 | |||
549 | static int __init pseries_dlpar_init(void) | ||
550 | { | ||
551 | ppc_md.cpu_probe = dlpar_cpu_probe; | ||
552 | ppc_md.cpu_release = dlpar_cpu_release; | ||
553 | |||
554 | return 0; | ||
555 | } | ||
556 | machine_device_initcall(pseries, pseries_dlpar_init); | ||
557 | |||
558 | #endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */ | ||
diff --git a/arch/powerpc/platforms/pseries/reconfig.c b/arch/powerpc/platforms/pseries/reconfig.c index 5182d2b992c6..a2305d29bbbd 100644 --- a/arch/powerpc/platforms/pseries/reconfig.c +++ b/arch/powerpc/platforms/pseries/reconfig.c | |||
@@ -96,7 +96,7 @@ static struct device_node *derive_parent(const char *path) | |||
96 | return parent; | 96 | return parent; |
97 | } | 97 | } |
98 | 98 | ||
99 | static BLOCKING_NOTIFIER_HEAD(pSeries_reconfig_chain); | 99 | BLOCKING_NOTIFIER_HEAD(pSeries_reconfig_chain); |
100 | 100 | ||
101 | int pSeries_reconfig_notifier_register(struct notifier_block *nb) | 101 | int pSeries_reconfig_notifier_register(struct notifier_block *nb) |
102 | { | 102 | { |
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig index f2df6e2a224c..51eea3000b55 100644 --- a/drivers/ata/Kconfig +++ b/drivers/ata/Kconfig | |||
@@ -781,5 +781,15 @@ config PATA_BF54X | |||
781 | 781 | ||
782 | If unsure, say N. | 782 | If unsure, say N. |
783 | 783 | ||
784 | config PATA_MACIO | ||
785 | tristate "Apple PowerMac/PowerBook internal 'MacIO' IDE" | ||
786 | depends on PPC_PMAC | ||
787 | help | ||
788 | Most IDE capable PowerMacs have IDE busses driven by a variant | ||
789 | of this controller which is part of the Apple chipset used on | ||
790 | most PowerMac models. Some models have multiple busses using | ||
791 | different chipsets, though generally, MacIO is one of them. | ||
792 | |||
793 | |||
784 | endif # ATA_SFF | 794 | endif # ATA_SFF |
785 | endif # ATA | 795 | endif # ATA |
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile index 01e126f343b3..e439141d423e 100644 --- a/drivers/ata/Makefile +++ b/drivers/ata/Makefile | |||
@@ -18,6 +18,7 @@ obj-$(CONFIG_SATA_MV) += sata_mv.o | |||
18 | obj-$(CONFIG_SATA_INIC162X) += sata_inic162x.o | 18 | obj-$(CONFIG_SATA_INIC162X) += sata_inic162x.o |
19 | obj-$(CONFIG_PDC_ADMA) += pdc_adma.o | 19 | obj-$(CONFIG_PDC_ADMA) += pdc_adma.o |
20 | obj-$(CONFIG_SATA_FSL) += sata_fsl.o | 20 | obj-$(CONFIG_SATA_FSL) += sata_fsl.o |
21 | obj-$(CONFIG_PATA_MACIO) += pata_macio.o | ||
21 | 22 | ||
22 | obj-$(CONFIG_PATA_ALI) += pata_ali.o | 23 | obj-$(CONFIG_PATA_ALI) += pata_ali.o |
23 | obj-$(CONFIG_PATA_AMD) += pata_amd.o | 24 | obj-$(CONFIG_PATA_AMD) += pata_amd.o |
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c index bbbb1fab1755..51eb1e298601 100644 --- a/drivers/ata/libata-sff.c +++ b/drivers/ata/libata-sff.c | |||
@@ -2384,7 +2384,7 @@ void ata_sff_post_internal_cmd(struct ata_queued_cmd *qc) | |||
2384 | ap->hsm_task_state = HSM_ST_IDLE; | 2384 | ap->hsm_task_state = HSM_ST_IDLE; |
2385 | 2385 | ||
2386 | if (ap->ioaddr.bmdma_addr) | 2386 | if (ap->ioaddr.bmdma_addr) |
2387 | ata_bmdma_stop(qc); | 2387 | ap->ops->bmdma_stop(qc); |
2388 | 2388 | ||
2389 | spin_unlock_irqrestore(ap->lock, flags); | 2389 | spin_unlock_irqrestore(ap->lock, flags); |
2390 | } | 2390 | } |
diff --git a/drivers/ata/pata_macio.c b/drivers/ata/pata_macio.c new file mode 100644 index 000000000000..4cc7bbd10ec2 --- /dev/null +++ b/drivers/ata/pata_macio.c | |||
@@ -0,0 +1,1427 @@ | |||
1 | /* | ||
2 | * Libata based driver for Apple "macio" family of PATA controllers | ||
3 | * | ||
4 | * Copyright 2008/2009 Benjamin Herrenschmidt, IBM Corp | ||
5 | * <benh@kernel.crashing.org> | ||
6 | * | ||
7 | * Some bits and pieces from drivers/ide/ppc/pmac.c | ||
8 | * | ||
9 | */ | ||
10 | |||
11 | #undef DEBUG | ||
12 | #undef DEBUG_DMA | ||
13 | |||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/module.h> | ||
16 | #include <linux/init.h> | ||
17 | #include <linux/blkdev.h> | ||
18 | #include <linux/ata.h> | ||
19 | #include <linux/libata.h> | ||
20 | #include <linux/adb.h> | ||
21 | #include <linux/pmu.h> | ||
22 | #include <linux/scatterlist.h> | ||
23 | #include <linux/of.h> | ||
24 | |||
25 | #include <scsi/scsi.h> | ||
26 | #include <scsi/scsi_host.h> | ||
27 | #include <scsi/scsi_device.h> | ||
28 | |||
29 | #include <asm/macio.h> | ||
30 | #include <asm/io.h> | ||
31 | #include <asm/dbdma.h> | ||
32 | #include <asm/pci-bridge.h> | ||
33 | #include <asm/machdep.h> | ||
34 | #include <asm/pmac_feature.h> | ||
35 | #include <asm/mediabay.h> | ||
36 | |||
37 | #ifdef DEBUG_DMA | ||
38 | #define dev_dbgdma(dev, format, arg...) \ | ||
39 | dev_printk(KERN_DEBUG , dev , format , ## arg) | ||
40 | #else | ||
41 | #define dev_dbgdma(dev, format, arg...) \ | ||
42 | ({ if (0) dev_printk(KERN_DEBUG, dev, format, ##arg); 0; }) | ||
43 | #endif | ||
44 | |||
45 | #define DRV_NAME "pata_macio" | ||
46 | #define DRV_VERSION "0.9" | ||
47 | |||
48 | /* Models of macio ATA controller */ | ||
49 | enum { | ||
50 | controller_ohare, /* OHare based */ | ||
51 | controller_heathrow, /* Heathrow/Paddington */ | ||
52 | controller_kl_ata3, /* KeyLargo ATA-3 */ | ||
53 | controller_kl_ata4, /* KeyLargo ATA-4 */ | ||
54 | controller_un_ata6, /* UniNorth2 ATA-6 */ | ||
55 | controller_k2_ata6, /* K2 ATA-6 */ | ||
56 | controller_sh_ata6, /* Shasta ATA-6 */ | ||
57 | }; | ||
58 | |||
59 | static const char* macio_ata_names[] = { | ||
60 | "OHare ATA", /* OHare based */ | ||
61 | "Heathrow ATA", /* Heathrow/Paddington */ | ||
62 | "KeyLargo ATA-3", /* KeyLargo ATA-3 (MDMA only) */ | ||
63 | "KeyLargo ATA-4", /* KeyLargo ATA-4 (UDMA/66) */ | ||
64 | "UniNorth ATA-6", /* UniNorth2 ATA-6 (UDMA/100) */ | ||
65 | "K2 ATA-6", /* K2 ATA-6 (UDMA/100) */ | ||
66 | "Shasta ATA-6", /* Shasta ATA-6 (UDMA/133) */ | ||
67 | }; | ||
68 | |||
69 | /* | ||
70 | * Extra registers, both 32-bit little-endian | ||
71 | */ | ||
72 | #define IDE_TIMING_CONFIG 0x200 | ||
73 | #define IDE_INTERRUPT 0x300 | ||
74 | |||
75 | /* Kauai (U2) ATA has different register setup */ | ||
76 | #define IDE_KAUAI_PIO_CONFIG 0x200 | ||
77 | #define IDE_KAUAI_ULTRA_CONFIG 0x210 | ||
78 | #define IDE_KAUAI_POLL_CONFIG 0x220 | ||
79 | |||
80 | /* | ||
81 | * Timing configuration register definitions | ||
82 | */ | ||
83 | |||
84 | /* Number of IDE_SYSCLK_NS ticks, argument is in nanoseconds */ | ||
85 | #define SYSCLK_TICKS(t) (((t) + IDE_SYSCLK_NS - 1) / IDE_SYSCLK_NS) | ||
86 | #define SYSCLK_TICKS_66(t) (((t) + IDE_SYSCLK_66_NS - 1) / IDE_SYSCLK_66_NS) | ||
87 | #define IDE_SYSCLK_NS 30 /* 33Mhz cell */ | ||
88 | #define IDE_SYSCLK_66_NS 15 /* 66Mhz cell */ | ||
89 | |||
90 | /* 133Mhz cell, found in shasta. | ||
91 | * See comments about 100 Mhz Uninorth 2... | ||
92 | * Note that PIO_MASK and MDMA_MASK seem to overlap, that's just | ||
93 | * weird and I don't now why .. at this stage | ||
94 | */ | ||
95 | #define TR_133_PIOREG_PIO_MASK 0xff000fff | ||
96 | #define TR_133_PIOREG_MDMA_MASK 0x00fff800 | ||
97 | #define TR_133_UDMAREG_UDMA_MASK 0x0003ffff | ||
98 | #define TR_133_UDMAREG_UDMA_EN 0x00000001 | ||
99 | |||
100 | /* 100Mhz cell, found in Uninorth 2 and K2. It appears as a pci device | ||
101 | * (106b/0033) on uninorth or K2 internal PCI bus and it's clock is | ||
102 | * controlled like gem or fw. It appears to be an evolution of keylargo | ||
103 | * ATA4 with a timing register extended to 2x32bits registers (one | ||
104 | * for PIO & MWDMA and one for UDMA, and a similar DBDMA channel. | ||
105 | * It has it's own local feature control register as well. | ||
106 | * | ||
107 | * After scratching my mind over the timing values, at least for PIO | ||
108 | * and MDMA, I think I've figured the format of the timing register, | ||
109 | * though I use pre-calculated tables for UDMA as usual... | ||
110 | */ | ||
111 | #define TR_100_PIO_ADDRSETUP_MASK 0xff000000 /* Size of field unknown */ | ||
112 | #define TR_100_PIO_ADDRSETUP_SHIFT 24 | ||
113 | #define TR_100_MDMA_MASK 0x00fff000 | ||
114 | #define TR_100_MDMA_RECOVERY_MASK 0x00fc0000 | ||
115 | #define TR_100_MDMA_RECOVERY_SHIFT 18 | ||
116 | #define TR_100_MDMA_ACCESS_MASK 0x0003f000 | ||
117 | #define TR_100_MDMA_ACCESS_SHIFT 12 | ||
118 | #define TR_100_PIO_MASK 0xff000fff | ||
119 | #define TR_100_PIO_RECOVERY_MASK 0x00000fc0 | ||
120 | #define TR_100_PIO_RECOVERY_SHIFT 6 | ||
121 | #define TR_100_PIO_ACCESS_MASK 0x0000003f | ||
122 | #define TR_100_PIO_ACCESS_SHIFT 0 | ||
123 | |||
124 | #define TR_100_UDMAREG_UDMA_MASK 0x0000ffff | ||
125 | #define TR_100_UDMAREG_UDMA_EN 0x00000001 | ||
126 | |||
127 | |||
128 | /* 66Mhz cell, found in KeyLargo. Can do ultra mode 0 to 2 on | ||
129 | * 40 connector cable and to 4 on 80 connector one. | ||
130 | * Clock unit is 15ns (66Mhz) | ||
131 | * | ||
132 | * 3 Values can be programmed: | ||
133 | * - Write data setup, which appears to match the cycle time. They | ||
134 | * also call it DIOW setup. | ||
135 | * - Ready to pause time (from spec) | ||
136 | * - Address setup. That one is weird. I don't see where exactly | ||
137 | * it fits in UDMA cycles, I got it's name from an obscure piece | ||
138 | * of commented out code in Darwin. They leave it to 0, we do as | ||
139 | * well, despite a comment that would lead to think it has a | ||
140 | * min value of 45ns. | ||
141 | * Apple also add 60ns to the write data setup (or cycle time ?) on | ||
142 | * reads. | ||
143 | */ | ||
144 | #define TR_66_UDMA_MASK 0xfff00000 | ||
145 | #define TR_66_UDMA_EN 0x00100000 /* Enable Ultra mode for DMA */ | ||
146 | #define TR_66_PIO_ADDRSETUP_MASK 0xe0000000 /* Address setup */ | ||
147 | #define TR_66_PIO_ADDRSETUP_SHIFT 29 | ||
148 | #define TR_66_UDMA_RDY2PAUS_MASK 0x1e000000 /* Ready 2 pause time */ | ||
149 | #define TR_66_UDMA_RDY2PAUS_SHIFT 25 | ||
150 | #define TR_66_UDMA_WRDATASETUP_MASK 0x01e00000 /* Write data setup time */ | ||
151 | #define TR_66_UDMA_WRDATASETUP_SHIFT 21 | ||
152 | #define TR_66_MDMA_MASK 0x000ffc00 | ||
153 | #define TR_66_MDMA_RECOVERY_MASK 0x000f8000 | ||
154 | #define TR_66_MDMA_RECOVERY_SHIFT 15 | ||
155 | #define TR_66_MDMA_ACCESS_MASK 0x00007c00 | ||
156 | #define TR_66_MDMA_ACCESS_SHIFT 10 | ||
157 | #define TR_66_PIO_MASK 0xe00003ff | ||
158 | #define TR_66_PIO_RECOVERY_MASK 0x000003e0 | ||
159 | #define TR_66_PIO_RECOVERY_SHIFT 5 | ||
160 | #define TR_66_PIO_ACCESS_MASK 0x0000001f | ||
161 | #define TR_66_PIO_ACCESS_SHIFT 0 | ||
162 | |||
163 | /* 33Mhz cell, found in OHare, Heathrow (& Paddington) and KeyLargo | ||
164 | * Can do pio & mdma modes, clock unit is 30ns (33Mhz) | ||
165 | * | ||
166 | * The access time and recovery time can be programmed. Some older | ||
167 | * Darwin code base limit OHare to 150ns cycle time. I decided to do | ||
168 | * the same here fore safety against broken old hardware ;) | ||
169 | * The HalfTick bit, when set, adds half a clock (15ns) to the access | ||
170 | * time and removes one from recovery. It's not supported on KeyLargo | ||
171 | * implementation afaik. The E bit appears to be set for PIO mode 0 and | ||
172 | * is used to reach long timings used in this mode. | ||
173 | */ | ||
174 | #define TR_33_MDMA_MASK 0x003ff800 | ||
175 | #define TR_33_MDMA_RECOVERY_MASK 0x001f0000 | ||
176 | #define TR_33_MDMA_RECOVERY_SHIFT 16 | ||
177 | #define TR_33_MDMA_ACCESS_MASK 0x0000f800 | ||
178 | #define TR_33_MDMA_ACCESS_SHIFT 11 | ||
179 | #define TR_33_MDMA_HALFTICK 0x00200000 | ||
180 | #define TR_33_PIO_MASK 0x000007ff | ||
181 | #define TR_33_PIO_E 0x00000400 | ||
182 | #define TR_33_PIO_RECOVERY_MASK 0x000003e0 | ||
183 | #define TR_33_PIO_RECOVERY_SHIFT 5 | ||
184 | #define TR_33_PIO_ACCESS_MASK 0x0000001f | ||
185 | #define TR_33_PIO_ACCESS_SHIFT 0 | ||
186 | |||
187 | /* | ||
188 | * Interrupt register definitions. Only present on newer cells | ||
189 | * (Keylargo and later afaik) so we don't use it. | ||
190 | */ | ||
191 | #define IDE_INTR_DMA 0x80000000 | ||
192 | #define IDE_INTR_DEVICE 0x40000000 | ||
193 | |||
194 | /* | ||
195 | * FCR Register on Kauai. Not sure what bit 0x4 is ... | ||
196 | */ | ||
197 | #define KAUAI_FCR_UATA_MAGIC 0x00000004 | ||
198 | #define KAUAI_FCR_UATA_RESET_N 0x00000002 | ||
199 | #define KAUAI_FCR_UATA_ENABLE 0x00000001 | ||
200 | |||
201 | |||
202 | /* Allow up to 256 DBDMA commands per xfer */ | ||
203 | #define MAX_DCMDS 256 | ||
204 | |||
205 | /* Don't let a DMA segment go all the way to 64K */ | ||
206 | #define MAX_DBDMA_SEG 0xff00 | ||
207 | |||
208 | |||
209 | /* | ||
210 | * Wait 1s for disk to answer on IDE bus after a hard reset | ||
211 | * of the device (via GPIO/FCR). | ||
212 | * | ||
213 | * Some devices seem to "pollute" the bus even after dropping | ||
214 | * the BSY bit (typically some combo drives slave on the UDMA | ||
215 | * bus) after a hard reset. Since we hard reset all drives on | ||
216 | * KeyLargo ATA66, we have to keep that delay around. I may end | ||
217 | * up not hard resetting anymore on these and keep the delay only | ||
218 | * for older interfaces instead (we have to reset when coming | ||
219 | * from MacOS...) --BenH. | ||
220 | */ | ||
221 | #define IDE_WAKEUP_DELAY_MS 1000 | ||
222 | |||
223 | struct pata_macio_timing; | ||
224 | |||
225 | struct pata_macio_priv { | ||
226 | int kind; | ||
227 | int aapl_bus_id; | ||
228 | int mediabay : 1; | ||
229 | struct device_node *node; | ||
230 | struct macio_dev *mdev; | ||
231 | struct pci_dev *pdev; | ||
232 | struct device *dev; | ||
233 | int irq; | ||
234 | u32 treg[2][2]; | ||
235 | void __iomem *tfregs; | ||
236 | void __iomem *kauai_fcr; | ||
237 | struct dbdma_cmd * dma_table_cpu; | ||
238 | dma_addr_t dma_table_dma; | ||
239 | struct ata_host *host; | ||
240 | const struct pata_macio_timing *timings; | ||
241 | }; | ||
242 | |||
243 | /* Previous variants of this driver used to calculate timings | ||
244 | * for various variants of the chip and use tables for others. | ||
245 | * | ||
246 | * Not only was this confusing, but in addition, it isn't clear | ||
247 | * whether our calculation code was correct. It didn't entirely | ||
248 | * match the darwin code and whatever documentation I could find | ||
249 | * on these cells | ||
250 | * | ||
251 | * I decided to entirely rely on a table instead for this version | ||
252 | * of the driver. Also, because I don't really care about derated | ||
253 | * modes and really old HW other than making it work, I'm not going | ||
254 | * to calculate / snoop timing values for something else than the | ||
255 | * standard modes. | ||
256 | */ | ||
257 | struct pata_macio_timing { | ||
258 | int mode; | ||
259 | u32 reg1; /* Bits to set in first timing reg */ | ||
260 | u32 reg2; /* Bits to set in second timing reg */ | ||
261 | }; | ||
262 | |||
263 | static const struct pata_macio_timing pata_macio_ohare_timings[] = { | ||
264 | { XFER_PIO_0, 0x00000526, 0, }, | ||
265 | { XFER_PIO_1, 0x00000085, 0, }, | ||
266 | { XFER_PIO_2, 0x00000025, 0, }, | ||
267 | { XFER_PIO_3, 0x00000025, 0, }, | ||
268 | { XFER_PIO_4, 0x00000025, 0, }, | ||
269 | { XFER_MW_DMA_0, 0x00074000, 0, }, | ||
270 | { XFER_MW_DMA_1, 0x00221000, 0, }, | ||
271 | { XFER_MW_DMA_2, 0x00211000, 0, }, | ||
272 | { -1, 0, 0 } | ||
273 | }; | ||
274 | |||
275 | static const struct pata_macio_timing pata_macio_heathrow_timings[] = { | ||
276 | { XFER_PIO_0, 0x00000526, 0, }, | ||
277 | { XFER_PIO_1, 0x00000085, 0, }, | ||
278 | { XFER_PIO_2, 0x00000025, 0, }, | ||
279 | { XFER_PIO_3, 0x00000025, 0, }, | ||
280 | { XFER_PIO_4, 0x00000025, 0, }, | ||
281 | { XFER_MW_DMA_0, 0x00074000, 0, }, | ||
282 | { XFER_MW_DMA_1, 0x00221000, 0, }, | ||
283 | { XFER_MW_DMA_2, 0x00211000, 0, }, | ||
284 | { -1, 0, 0 } | ||
285 | }; | ||
286 | |||
287 | static const struct pata_macio_timing pata_macio_kl33_timings[] = { | ||
288 | { XFER_PIO_0, 0x00000526, 0, }, | ||
289 | { XFER_PIO_1, 0x00000085, 0, }, | ||
290 | { XFER_PIO_2, 0x00000025, 0, }, | ||
291 | { XFER_PIO_3, 0x00000025, 0, }, | ||
292 | { XFER_PIO_4, 0x00000025, 0, }, | ||
293 | { XFER_MW_DMA_0, 0x00084000, 0, }, | ||
294 | { XFER_MW_DMA_1, 0x00021800, 0, }, | ||
295 | { XFER_MW_DMA_2, 0x00011800, 0, }, | ||
296 | { -1, 0, 0 } | ||
297 | }; | ||
298 | |||
299 | static const struct pata_macio_timing pata_macio_kl66_timings[] = { | ||
300 | { XFER_PIO_0, 0x0000038c, 0, }, | ||
301 | { XFER_PIO_1, 0x0000020a, 0, }, | ||
302 | { XFER_PIO_2, 0x00000127, 0, }, | ||
303 | { XFER_PIO_3, 0x000000c6, 0, }, | ||
304 | { XFER_PIO_4, 0x00000065, 0, }, | ||
305 | { XFER_MW_DMA_0, 0x00084000, 0, }, | ||
306 | { XFER_MW_DMA_1, 0x00029800, 0, }, | ||
307 | { XFER_MW_DMA_2, 0x00019400, 0, }, | ||
308 | { XFER_UDMA_0, 0x19100000, 0, }, | ||
309 | { XFER_UDMA_1, 0x14d00000, 0, }, | ||
310 | { XFER_UDMA_2, 0x10900000, 0, }, | ||
311 | { XFER_UDMA_3, 0x0c700000, 0, }, | ||
312 | { XFER_UDMA_4, 0x0c500000, 0, }, | ||
313 | { -1, 0, 0 } | ||
314 | }; | ||
315 | |||
316 | static const struct pata_macio_timing pata_macio_kauai_timings[] = { | ||
317 | { XFER_PIO_0, 0x08000a92, 0, }, | ||
318 | { XFER_PIO_1, 0x0800060f, 0, }, | ||
319 | { XFER_PIO_2, 0x0800038b, 0, }, | ||
320 | { XFER_PIO_3, 0x05000249, 0, }, | ||
321 | { XFER_PIO_4, 0x04000148, 0, }, | ||
322 | { XFER_MW_DMA_0, 0x00618000, 0, }, | ||
323 | { XFER_MW_DMA_1, 0x00209000, 0, }, | ||
324 | { XFER_MW_DMA_2, 0x00148000, 0, }, | ||
325 | { XFER_UDMA_0, 0, 0x000070c1, }, | ||
326 | { XFER_UDMA_1, 0, 0x00005d81, }, | ||
327 | { XFER_UDMA_2, 0, 0x00004a61, }, | ||
328 | { XFER_UDMA_3, 0, 0x00003a51, }, | ||
329 | { XFER_UDMA_4, 0, 0x00002a31, }, | ||
330 | { XFER_UDMA_5, 0, 0x00002921, }, | ||
331 | { -1, 0, 0 } | ||
332 | }; | ||
333 | |||
334 | static const struct pata_macio_timing pata_macio_shasta_timings[] = { | ||
335 | { XFER_PIO_0, 0x0a000c97, 0, }, | ||
336 | { XFER_PIO_1, 0x07000712, 0, }, | ||
337 | { XFER_PIO_2, 0x040003cd, 0, }, | ||
338 | { XFER_PIO_3, 0x0500028b, 0, }, | ||
339 | { XFER_PIO_4, 0x0400010a, 0, }, | ||
340 | { XFER_MW_DMA_0, 0x00820800, 0, }, | ||
341 | { XFER_MW_DMA_1, 0x0028b000, 0, }, | ||
342 | { XFER_MW_DMA_2, 0x001ca000, 0, }, | ||
343 | { XFER_UDMA_0, 0, 0x00035901, }, | ||
344 | { XFER_UDMA_1, 0, 0x000348b1, }, | ||
345 | { XFER_UDMA_2, 0, 0x00033881, }, | ||
346 | { XFER_UDMA_3, 0, 0x00033861, }, | ||
347 | { XFER_UDMA_4, 0, 0x00033841, }, | ||
348 | { XFER_UDMA_5, 0, 0x00033031, }, | ||
349 | { XFER_UDMA_6, 0, 0x00033021, }, | ||
350 | { -1, 0, 0 } | ||
351 | }; | ||
352 | |||
353 | static const struct pata_macio_timing *pata_macio_find_timing( | ||
354 | struct pata_macio_priv *priv, | ||
355 | int mode) | ||
356 | { | ||
357 | int i; | ||
358 | |||
359 | for (i = 0; priv->timings[i].mode > 0; i++) { | ||
360 | if (priv->timings[i].mode == mode) | ||
361 | return &priv->timings[i]; | ||
362 | } | ||
363 | return NULL; | ||
364 | } | ||
365 | |||
366 | |||
367 | static void pata_macio_apply_timings(struct ata_port *ap, unsigned int device) | ||
368 | { | ||
369 | struct pata_macio_priv *priv = ap->private_data; | ||
370 | void __iomem *rbase = ap->ioaddr.cmd_addr; | ||
371 | |||
372 | if (priv->kind == controller_sh_ata6 || | ||
373 | priv->kind == controller_un_ata6 || | ||
374 | priv->kind == controller_k2_ata6) { | ||
375 | writel(priv->treg[device][0], rbase + IDE_KAUAI_PIO_CONFIG); | ||
376 | writel(priv->treg[device][1], rbase + IDE_KAUAI_ULTRA_CONFIG); | ||
377 | } else | ||
378 | writel(priv->treg[device][0], rbase + IDE_TIMING_CONFIG); | ||
379 | } | ||
380 | |||
381 | static void pata_macio_dev_select(struct ata_port *ap, unsigned int device) | ||
382 | { | ||
383 | ata_sff_dev_select(ap, device); | ||
384 | |||
385 | /* Apply timings */ | ||
386 | pata_macio_apply_timings(ap, device); | ||
387 | } | ||
388 | |||
389 | static void pata_macio_set_timings(struct ata_port *ap, | ||
390 | struct ata_device *adev) | ||
391 | { | ||
392 | struct pata_macio_priv *priv = ap->private_data; | ||
393 | const struct pata_macio_timing *t; | ||
394 | |||
395 | dev_dbg(priv->dev, "Set timings: DEV=%d,PIO=0x%x (%s),DMA=0x%x (%s)\n", | ||
396 | adev->devno, | ||
397 | adev->pio_mode, | ||
398 | ata_mode_string(ata_xfer_mode2mask(adev->pio_mode)), | ||
399 | adev->dma_mode, | ||
400 | ata_mode_string(ata_xfer_mode2mask(adev->dma_mode))); | ||
401 | |||
402 | /* First clear timings */ | ||
403 | priv->treg[adev->devno][0] = priv->treg[adev->devno][1] = 0; | ||
404 | |||
405 | /* Now get the PIO timings */ | ||
406 | t = pata_macio_find_timing(priv, adev->pio_mode); | ||
407 | if (t == NULL) { | ||
408 | dev_warn(priv->dev, "Invalid PIO timing requested: 0x%x\n", | ||
409 | adev->pio_mode); | ||
410 | t = pata_macio_find_timing(priv, XFER_PIO_0); | ||
411 | } | ||
412 | BUG_ON(t == NULL); | ||
413 | |||
414 | /* PIO timings only ever use the first treg */ | ||
415 | priv->treg[adev->devno][0] |= t->reg1; | ||
416 | |||
417 | /* Now get DMA timings */ | ||
418 | t = pata_macio_find_timing(priv, adev->dma_mode); | ||
419 | if (t == NULL || (t->reg1 == 0 && t->reg2 == 0)) { | ||
420 | dev_dbg(priv->dev, "DMA timing not set yet, using MW_DMA_0\n"); | ||
421 | t = pata_macio_find_timing(priv, XFER_MW_DMA_0); | ||
422 | } | ||
423 | BUG_ON(t == NULL); | ||
424 | |||
425 | /* DMA timings can use both tregs */ | ||
426 | priv->treg[adev->devno][0] |= t->reg1; | ||
427 | priv->treg[adev->devno][1] |= t->reg2; | ||
428 | |||
429 | dev_dbg(priv->dev, " -> %08x %08x\n", | ||
430 | priv->treg[adev->devno][0], | ||
431 | priv->treg[adev->devno][1]); | ||
432 | |||
433 | /* Apply to hardware */ | ||
434 | pata_macio_apply_timings(ap, adev->devno); | ||
435 | } | ||
436 | |||
437 | /* | ||
438 | * Blast some well known "safe" values to the timing registers at init or | ||
439 | * wakeup from sleep time, before we do real calculation | ||
440 | */ | ||
441 | static void pata_macio_default_timings(struct pata_macio_priv *priv) | ||
442 | { | ||
443 | unsigned int value, value2 = 0; | ||
444 | |||
445 | switch(priv->kind) { | ||
446 | case controller_sh_ata6: | ||
447 | value = 0x0a820c97; | ||
448 | value2 = 0x00033031; | ||
449 | break; | ||
450 | case controller_un_ata6: | ||
451 | case controller_k2_ata6: | ||
452 | value = 0x08618a92; | ||
453 | value2 = 0x00002921; | ||
454 | break; | ||
455 | case controller_kl_ata4: | ||
456 | value = 0x0008438c; | ||
457 | break; | ||
458 | case controller_kl_ata3: | ||
459 | value = 0x00084526; | ||
460 | break; | ||
461 | case controller_heathrow: | ||
462 | case controller_ohare: | ||
463 | default: | ||
464 | value = 0x00074526; | ||
465 | break; | ||
466 | } | ||
467 | priv->treg[0][0] = priv->treg[1][0] = value; | ||
468 | priv->treg[0][1] = priv->treg[1][1] = value2; | ||
469 | } | ||
470 | |||
471 | static int pata_macio_cable_detect(struct ata_port *ap) | ||
472 | { | ||
473 | struct pata_macio_priv *priv = ap->private_data; | ||
474 | |||
475 | /* Get cable type from device-tree */ | ||
476 | if (priv->kind == controller_kl_ata4 || | ||
477 | priv->kind == controller_un_ata6 || | ||
478 | priv->kind == controller_k2_ata6 || | ||
479 | priv->kind == controller_sh_ata6) { | ||
480 | const char* cable = of_get_property(priv->node, "cable-type", | ||
481 | NULL); | ||
482 | struct device_node *root = of_find_node_by_path("/"); | ||
483 | const char *model = of_get_property(root, "model", NULL); | ||
484 | |||
485 | if (cable && !strncmp(cable, "80-", 3)) { | ||
486 | /* Some drives fail to detect 80c cable in PowerBook | ||
487 | * These machine use proprietary short IDE cable | ||
488 | * anyway | ||
489 | */ | ||
490 | if (!strncmp(model, "PowerBook", 9)) | ||
491 | return ATA_CBL_PATA40_SHORT; | ||
492 | else | ||
493 | return ATA_CBL_PATA80; | ||
494 | } | ||
495 | } | ||
496 | |||
497 | /* G5's seem to have incorrect cable type in device-tree. | ||
498 | * Let's assume they always have a 80 conductor cable, this seem to | ||
499 | * be always the case unless the user mucked around | ||
500 | */ | ||
501 | if (of_device_is_compatible(priv->node, "K2-UATA") || | ||
502 | of_device_is_compatible(priv->node, "shasta-ata")) | ||
503 | return ATA_CBL_PATA80; | ||
504 | |||
505 | /* Anything else is 40 connectors */ | ||
506 | return ATA_CBL_PATA40; | ||
507 | } | ||
508 | |||
509 | static void pata_macio_qc_prep(struct ata_queued_cmd *qc) | ||
510 | { | ||
511 | unsigned int write = (qc->tf.flags & ATA_TFLAG_WRITE); | ||
512 | struct ata_port *ap = qc->ap; | ||
513 | struct pata_macio_priv *priv = ap->private_data; | ||
514 | struct scatterlist *sg; | ||
515 | struct dbdma_cmd *table; | ||
516 | unsigned int si, pi; | ||
517 | |||
518 | dev_dbgdma(priv->dev, "%s: qc %p flags %lx, write %d dev %d\n", | ||
519 | __func__, qc, qc->flags, write, qc->dev->devno); | ||
520 | |||
521 | if (!(qc->flags & ATA_QCFLAG_DMAMAP)) | ||
522 | return; | ||
523 | |||
524 | table = (struct dbdma_cmd *) priv->dma_table_cpu; | ||
525 | |||
526 | pi = 0; | ||
527 | for_each_sg(qc->sg, sg, qc->n_elem, si) { | ||
528 | u32 addr, sg_len, len; | ||
529 | |||
530 | /* determine if physical DMA addr spans 64K boundary. | ||
531 | * Note h/w doesn't support 64-bit, so we unconditionally | ||
532 | * truncate dma_addr_t to u32. | ||
533 | */ | ||
534 | addr = (u32) sg_dma_address(sg); | ||
535 | sg_len = sg_dma_len(sg); | ||
536 | |||
537 | while (sg_len) { | ||
538 | /* table overflow should never happen */ | ||
539 | BUG_ON (pi++ >= MAX_DCMDS); | ||
540 | |||
541 | len = (sg_len < MAX_DBDMA_SEG) ? sg_len : MAX_DBDMA_SEG; | ||
542 | st_le16(&table->command, write ? OUTPUT_MORE: INPUT_MORE); | ||
543 | st_le16(&table->req_count, len); | ||
544 | st_le32(&table->phy_addr, addr); | ||
545 | table->cmd_dep = 0; | ||
546 | table->xfer_status = 0; | ||
547 | table->res_count = 0; | ||
548 | addr += len; | ||
549 | sg_len -= len; | ||
550 | ++table; | ||
551 | } | ||
552 | } | ||
553 | |||
554 | /* Should never happen according to Tejun */ | ||
555 | BUG_ON(!pi); | ||
556 | |||
557 | /* Convert the last command to an input/output */ | ||
558 | table--; | ||
559 | st_le16(&table->command, write ? OUTPUT_LAST: INPUT_LAST); | ||
560 | table++; | ||
561 | |||
562 | /* Add the stop command to the end of the list */ | ||
563 | memset(table, 0, sizeof(struct dbdma_cmd)); | ||
564 | st_le16(&table->command, DBDMA_STOP); | ||
565 | |||
566 | dev_dbgdma(priv->dev, "%s: %d DMA list entries\n", __func__, pi); | ||
567 | } | ||
568 | |||
569 | |||
570 | static void pata_macio_freeze(struct ata_port *ap) | ||
571 | { | ||
572 | struct dbdma_regs __iomem *dma_regs = ap->ioaddr.bmdma_addr; | ||
573 | |||
574 | if (dma_regs) { | ||
575 | unsigned int timeout = 1000000; | ||
576 | |||
577 | /* Make sure DMA controller is stopped */ | ||
578 | writel((RUN|PAUSE|FLUSH|WAKE|DEAD) << 16, &dma_regs->control); | ||
579 | while (--timeout && (readl(&dma_regs->status) & RUN)) | ||
580 | udelay(1); | ||
581 | } | ||
582 | |||
583 | ata_sff_freeze(ap); | ||
584 | } | ||
585 | |||
586 | |||
587 | static void pata_macio_bmdma_setup(struct ata_queued_cmd *qc) | ||
588 | { | ||
589 | struct ata_port *ap = qc->ap; | ||
590 | struct pata_macio_priv *priv = ap->private_data; | ||
591 | struct dbdma_regs __iomem *dma_regs = ap->ioaddr.bmdma_addr; | ||
592 | int dev = qc->dev->devno; | ||
593 | |||
594 | dev_dbgdma(priv->dev, "%s: qc %p\n", __func__, qc); | ||
595 | |||
596 | /* Make sure DMA commands updates are visible */ | ||
597 | writel(priv->dma_table_dma, &dma_regs->cmdptr); | ||
598 | |||
599 | /* On KeyLargo 66Mhz cell, we need to add 60ns to wrDataSetup on | ||
600 | * UDMA reads | ||
601 | */ | ||
602 | if (priv->kind == controller_kl_ata4 && | ||
603 | (priv->treg[dev][0] & TR_66_UDMA_EN)) { | ||
604 | void __iomem *rbase = ap->ioaddr.cmd_addr; | ||
605 | u32 reg = priv->treg[dev][0]; | ||
606 | |||
607 | if (!(qc->tf.flags & ATA_TFLAG_WRITE)) | ||
608 | reg += 0x00800000; | ||
609 | writel(reg, rbase + IDE_TIMING_CONFIG); | ||
610 | } | ||
611 | |||
612 | /* issue r/w command */ | ||
613 | ap->ops->sff_exec_command(ap, &qc->tf); | ||
614 | } | ||
615 | |||
616 | static void pata_macio_bmdma_start(struct ata_queued_cmd *qc) | ||
617 | { | ||
618 | struct ata_port *ap = qc->ap; | ||
619 | struct pata_macio_priv *priv = ap->private_data; | ||
620 | struct dbdma_regs __iomem *dma_regs = ap->ioaddr.bmdma_addr; | ||
621 | |||
622 | dev_dbgdma(priv->dev, "%s: qc %p\n", __func__, qc); | ||
623 | |||
624 | writel((RUN << 16) | RUN, &dma_regs->control); | ||
625 | /* Make sure it gets to the controller right now */ | ||
626 | (void)readl(&dma_regs->control); | ||
627 | } | ||
628 | |||
629 | static void pata_macio_bmdma_stop(struct ata_queued_cmd *qc) | ||
630 | { | ||
631 | struct ata_port *ap = qc->ap; | ||
632 | struct pata_macio_priv *priv = ap->private_data; | ||
633 | struct dbdma_regs __iomem *dma_regs = ap->ioaddr.bmdma_addr; | ||
634 | unsigned int timeout = 1000000; | ||
635 | |||
636 | dev_dbgdma(priv->dev, "%s: qc %p\n", __func__, qc); | ||
637 | |||
638 | /* Stop the DMA engine and wait for it to full halt */ | ||
639 | writel (((RUN|WAKE|DEAD) << 16), &dma_regs->control); | ||
640 | while (--timeout && (readl(&dma_regs->status) & RUN)) | ||
641 | udelay(1); | ||
642 | } | ||
643 | |||
644 | static u8 pata_macio_bmdma_status(struct ata_port *ap) | ||
645 | { | ||
646 | struct pata_macio_priv *priv = ap->private_data; | ||
647 | struct dbdma_regs __iomem *dma_regs = ap->ioaddr.bmdma_addr; | ||
648 | u32 dstat, rstat = ATA_DMA_INTR; | ||
649 | unsigned long timeout = 0; | ||
650 | |||
651 | dstat = readl(&dma_regs->status); | ||
652 | |||
653 | dev_dbgdma(priv->dev, "%s: dstat=%x\n", __func__, dstat); | ||
654 | |||
655 | /* We have two things to deal with here: | ||
656 | * | ||
657 | * - The dbdma won't stop if the command was started | ||
658 | * but completed with an error without transferring all | ||
659 | * datas. This happens when bad blocks are met during | ||
660 | * a multi-block transfer. | ||
661 | * | ||
662 | * - The dbdma fifo hasn't yet finished flushing to | ||
663 | * to system memory when the disk interrupt occurs. | ||
664 | * | ||
665 | */ | ||
666 | |||
667 | /* First check for errors */ | ||
668 | if ((dstat & (RUN|DEAD)) != RUN) | ||
669 | rstat |= ATA_DMA_ERR; | ||
670 | |||
671 | /* If ACTIVE is cleared, the STOP command has been hit and | ||
672 | * the transfer is complete. If not, we have to flush the | ||
673 | * channel. | ||
674 | */ | ||
675 | if ((dstat & ACTIVE) == 0) | ||
676 | return rstat; | ||
677 | |||
678 | dev_dbgdma(priv->dev, "%s: DMA still active, flushing...\n", __func__); | ||
679 | |||
680 | /* If dbdma didn't execute the STOP command yet, the | ||
681 | * active bit is still set. We consider that we aren't | ||
682 | * sharing interrupts (which is hopefully the case with | ||
683 | * those controllers) and so we just try to flush the | ||
684 | * channel for pending data in the fifo | ||
685 | */ | ||
686 | udelay(1); | ||
687 | writel((FLUSH << 16) | FLUSH, &dma_regs->control); | ||
688 | for (;;) { | ||
689 | udelay(1); | ||
690 | dstat = readl(&dma_regs->status); | ||
691 | if ((dstat & FLUSH) == 0) | ||
692 | break; | ||
693 | if (++timeout > 1000) { | ||
694 | dev_warn(priv->dev, "timeout flushing DMA\n"); | ||
695 | rstat |= ATA_DMA_ERR; | ||
696 | break; | ||
697 | } | ||
698 | } | ||
699 | return rstat; | ||
700 | } | ||
701 | |||
702 | /* port_start is when we allocate the DMA command list */ | ||
703 | static int pata_macio_port_start(struct ata_port *ap) | ||
704 | { | ||
705 | struct pata_macio_priv *priv = ap->private_data; | ||
706 | |||
707 | if (ap->ioaddr.bmdma_addr == NULL) | ||
708 | return 0; | ||
709 | |||
710 | /* Allocate space for the DBDMA commands. | ||
711 | * | ||
712 | * The +2 is +1 for the stop command and +1 to allow for | ||
713 | * aligning the start address to a multiple of 16 bytes. | ||
714 | */ | ||
715 | priv->dma_table_cpu = | ||
716 | dmam_alloc_coherent(priv->dev, | ||
717 | (MAX_DCMDS + 2) * sizeof(struct dbdma_cmd), | ||
718 | &priv->dma_table_dma, GFP_KERNEL); | ||
719 | if (priv->dma_table_cpu == NULL) { | ||
720 | dev_err(priv->dev, "Unable to allocate DMA command list\n"); | ||
721 | ap->ioaddr.bmdma_addr = NULL; | ||
722 | } | ||
723 | return 0; | ||
724 | } | ||
725 | |||
726 | static void pata_macio_irq_clear(struct ata_port *ap) | ||
727 | { | ||
728 | struct pata_macio_priv *priv = ap->private_data; | ||
729 | |||
730 | /* Nothing to do here */ | ||
731 | |||
732 | dev_dbgdma(priv->dev, "%s\n", __func__); | ||
733 | } | ||
734 | |||
735 | static void pata_macio_reset_hw(struct pata_macio_priv *priv, int resume) | ||
736 | { | ||
737 | dev_dbg(priv->dev, "Enabling & resetting... \n"); | ||
738 | |||
739 | if (priv->mediabay) | ||
740 | return; | ||
741 | |||
742 | if (priv->kind == controller_ohare && !resume) { | ||
743 | /* The code below is having trouble on some ohare machines | ||
744 | * (timing related ?). Until I can put my hand on one of these | ||
745 | * units, I keep the old way | ||
746 | */ | ||
747 | ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, priv->node, 0, 1); | ||
748 | } else { | ||
749 | int rc; | ||
750 | |||
751 | /* Reset and enable controller */ | ||
752 | rc = ppc_md.feature_call(PMAC_FTR_IDE_RESET, | ||
753 | priv->node, priv->aapl_bus_id, 1); | ||
754 | ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, | ||
755 | priv->node, priv->aapl_bus_id, 1); | ||
756 | msleep(10); | ||
757 | /* Only bother waiting if there's a reset control */ | ||
758 | if (rc == 0) { | ||
759 | ppc_md.feature_call(PMAC_FTR_IDE_RESET, | ||
760 | priv->node, priv->aapl_bus_id, 0); | ||
761 | msleep(IDE_WAKEUP_DELAY_MS); | ||
762 | } | ||
763 | } | ||
764 | |||
765 | /* If resuming a PCI device, restore the config space here */ | ||
766 | if (priv->pdev && resume) { | ||
767 | int rc; | ||
768 | |||
769 | pci_restore_state(priv->pdev); | ||
770 | rc = pcim_enable_device(priv->pdev); | ||
771 | if (rc) | ||
772 | dev_printk(KERN_ERR, &priv->pdev->dev, | ||
773 | "Failed to enable device after resume (%d)\n", rc); | ||
774 | else | ||
775 | pci_set_master(priv->pdev); | ||
776 | } | ||
777 | |||
778 | /* On Kauai, initialize the FCR. We don't perform a reset, doesn't really | ||
779 | * seem necessary and speeds up the boot process | ||
780 | */ | ||
781 | if (priv->kauai_fcr) | ||
782 | writel(KAUAI_FCR_UATA_MAGIC | | ||
783 | KAUAI_FCR_UATA_RESET_N | | ||
784 | KAUAI_FCR_UATA_ENABLE, priv->kauai_fcr); | ||
785 | } | ||
786 | |||
787 | /* Hook the standard slave config to fixup some HW related alignment | ||
788 | * restrictions | ||
789 | */ | ||
790 | static int pata_macio_slave_config(struct scsi_device *sdev) | ||
791 | { | ||
792 | struct ata_port *ap = ata_shost_to_port(sdev->host); | ||
793 | struct pata_macio_priv *priv = ap->private_data; | ||
794 | struct ata_device *dev; | ||
795 | u16 cmd; | ||
796 | int rc; | ||
797 | |||
798 | /* First call original */ | ||
799 | rc = ata_scsi_slave_config(sdev); | ||
800 | if (rc) | ||
801 | return rc; | ||
802 | |||
803 | /* This is lifted from sata_nv */ | ||
804 | dev = &ap->link.device[sdev->id]; | ||
805 | |||
806 | /* OHare has issues with non cache aligned DMA on some chipsets */ | ||
807 | if (priv->kind == controller_ohare) { | ||
808 | blk_queue_update_dma_alignment(sdev->request_queue, 31); | ||
809 | blk_queue_update_dma_pad(sdev->request_queue, 31); | ||
810 | |||
811 | /* Tell the world about it */ | ||
812 | ata_dev_printk(dev, KERN_INFO, "OHare alignment limits applied\n"); | ||
813 | return 0; | ||
814 | } | ||
815 | |||
816 | /* We only have issues with ATAPI */ | ||
817 | if (dev->class != ATA_DEV_ATAPI) | ||
818 | return 0; | ||
819 | |||
820 | /* Shasta and K2 seem to have "issues" with reads ... */ | ||
821 | if (priv->kind == controller_sh_ata6 || priv->kind == controller_k2_ata6) { | ||
822 | /* Allright these are bad, apply restrictions */ | ||
823 | blk_queue_update_dma_alignment(sdev->request_queue, 15); | ||
824 | blk_queue_update_dma_pad(sdev->request_queue, 15); | ||
825 | |||
826 | /* We enable MWI and hack cache line size directly here, this | ||
827 | * is specific to this chipset and not normal values, we happen | ||
828 | * to somewhat know what we are doing here (which is basically | ||
829 | * to do the same Apple does and pray they did not get it wrong :-) | ||
830 | */ | ||
831 | BUG_ON(!priv->pdev); | ||
832 | pci_write_config_byte(priv->pdev, PCI_CACHE_LINE_SIZE, 0x08); | ||
833 | pci_read_config_word(priv->pdev, PCI_COMMAND, &cmd); | ||
834 | pci_write_config_word(priv->pdev, PCI_COMMAND, | ||
835 | cmd | PCI_COMMAND_INVALIDATE); | ||
836 | |||
837 | /* Tell the world about it */ | ||
838 | ata_dev_printk(dev, KERN_INFO, | ||
839 | "K2/Shasta alignment limits applied\n"); | ||
840 | } | ||
841 | |||
842 | return 0; | ||
843 | } | ||
844 | |||
845 | #ifdef CONFIG_PM | ||
846 | |||
847 | static int pata_macio_do_suspend(struct pata_macio_priv *priv, pm_message_t mesg) | ||
848 | { | ||
849 | int rc; | ||
850 | |||
851 | /* First, core libata suspend to do most of the work */ | ||
852 | rc = ata_host_suspend(priv->host, mesg); | ||
853 | if (rc) | ||
854 | return rc; | ||
855 | |||
856 | /* Restore to default timings */ | ||
857 | pata_macio_default_timings(priv); | ||
858 | |||
859 | /* Mask interrupt. Not strictly necessary but old driver did | ||
860 | * it and I'd rather not change that here */ | ||
861 | disable_irq(priv->irq); | ||
862 | |||
863 | /* The media bay will handle itself just fine */ | ||
864 | if (priv->mediabay) | ||
865 | return 0; | ||
866 | |||
867 | /* Kauai has bus control FCRs directly here */ | ||
868 | if (priv->kauai_fcr) { | ||
869 | u32 fcr = readl(priv->kauai_fcr); | ||
870 | fcr &= ~(KAUAI_FCR_UATA_RESET_N | KAUAI_FCR_UATA_ENABLE); | ||
871 | writel(fcr, priv->kauai_fcr); | ||
872 | } | ||
873 | |||
874 | /* For PCI, save state and disable DMA. No need to call | ||
875 | * pci_set_power_state(), the HW doesn't do D states that | ||
876 | * way, the platform code will take care of suspending the | ||
877 | * ASIC properly | ||
878 | */ | ||
879 | if (priv->pdev) { | ||
880 | pci_save_state(priv->pdev); | ||
881 | pci_disable_device(priv->pdev); | ||
882 | } | ||
883 | |||
884 | /* Disable the bus on older machines and the cell on kauai */ | ||
885 | ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, priv->node, | ||
886 | priv->aapl_bus_id, 0); | ||
887 | |||
888 | return 0; | ||
889 | } | ||
890 | |||
891 | static int pata_macio_do_resume(struct pata_macio_priv *priv) | ||
892 | { | ||
893 | /* Reset and re-enable the HW */ | ||
894 | pata_macio_reset_hw(priv, 1); | ||
895 | |||
896 | /* Sanitize drive timings */ | ||
897 | pata_macio_apply_timings(priv->host->ports[0], 0); | ||
898 | |||
899 | /* We want our IRQ back ! */ | ||
900 | enable_irq(priv->irq); | ||
901 | |||
902 | /* Let the libata core take it from there */ | ||
903 | ata_host_resume(priv->host); | ||
904 | |||
905 | return 0; | ||
906 | } | ||
907 | |||
908 | #endif /* CONFIG_PM */ | ||
909 | |||
910 | static struct scsi_host_template pata_macio_sht = { | ||
911 | ATA_BASE_SHT(DRV_NAME), | ||
912 | .sg_tablesize = MAX_DCMDS, | ||
913 | /* We may not need that strict one */ | ||
914 | .dma_boundary = ATA_DMA_BOUNDARY, | ||
915 | .slave_configure = pata_macio_slave_config, | ||
916 | }; | ||
917 | |||
918 | static struct ata_port_operations pata_macio_ops = { | ||
919 | .inherits = &ata_sff_port_ops, | ||
920 | |||
921 | .freeze = pata_macio_freeze, | ||
922 | .set_piomode = pata_macio_set_timings, | ||
923 | .set_dmamode = pata_macio_set_timings, | ||
924 | .cable_detect = pata_macio_cable_detect, | ||
925 | .sff_dev_select = pata_macio_dev_select, | ||
926 | .qc_prep = pata_macio_qc_prep, | ||
927 | .mode_filter = ata_bmdma_mode_filter, | ||
928 | .bmdma_setup = pata_macio_bmdma_setup, | ||
929 | .bmdma_start = pata_macio_bmdma_start, | ||
930 | .bmdma_stop = pata_macio_bmdma_stop, | ||
931 | .bmdma_status = pata_macio_bmdma_status, | ||
932 | .port_start = pata_macio_port_start, | ||
933 | .sff_irq_clear = pata_macio_irq_clear, | ||
934 | }; | ||
935 | |||
936 | static void __devinit pata_macio_invariants(struct pata_macio_priv *priv) | ||
937 | { | ||
938 | const int *bidp; | ||
939 | |||
940 | /* Identify the type of controller */ | ||
941 | if (of_device_is_compatible(priv->node, "shasta-ata")) { | ||
942 | priv->kind = controller_sh_ata6; | ||
943 | priv->timings = pata_macio_shasta_timings; | ||
944 | } else if (of_device_is_compatible(priv->node, "kauai-ata")) { | ||
945 | priv->kind = controller_un_ata6; | ||
946 | priv->timings = pata_macio_kauai_timings; | ||
947 | } else if (of_device_is_compatible(priv->node, "K2-UATA")) { | ||
948 | priv->kind = controller_k2_ata6; | ||
949 | priv->timings = pata_macio_kauai_timings; | ||
950 | } else if (of_device_is_compatible(priv->node, "keylargo-ata")) { | ||
951 | if (strcmp(priv->node->name, "ata-4") == 0) { | ||
952 | priv->kind = controller_kl_ata4; | ||
953 | priv->timings = pata_macio_kl66_timings; | ||
954 | } else { | ||
955 | priv->kind = controller_kl_ata3; | ||
956 | priv->timings = pata_macio_kl33_timings; | ||
957 | } | ||
958 | } else if (of_device_is_compatible(priv->node, "heathrow-ata")) { | ||
959 | priv->kind = controller_heathrow; | ||
960 | priv->timings = pata_macio_heathrow_timings; | ||
961 | } else { | ||
962 | priv->kind = controller_ohare; | ||
963 | priv->timings = pata_macio_ohare_timings; | ||
964 | } | ||
965 | |||
966 | /* XXX FIXME --- setup priv->mediabay here */ | ||
967 | |||
968 | /* Get Apple bus ID (for clock and ASIC control) */ | ||
969 | bidp = of_get_property(priv->node, "AAPL,bus-id", NULL); | ||
970 | priv->aapl_bus_id = bidp ? *bidp : 0; | ||
971 | |||
972 | /* Fixup missing Apple bus ID in case of media-bay */ | ||
973 | if (priv->mediabay && bidp == 0) | ||
974 | priv->aapl_bus_id = 1; | ||
975 | } | ||
976 | |||
977 | static void __devinit pata_macio_setup_ios(struct ata_ioports *ioaddr, | ||
978 | void __iomem * base, | ||
979 | void __iomem * dma) | ||
980 | { | ||
981 | /* cmd_addr is the base of regs for that port */ | ||
982 | ioaddr->cmd_addr = base; | ||
983 | |||
984 | /* taskfile registers */ | ||
985 | ioaddr->data_addr = base + (ATA_REG_DATA << 4); | ||
986 | ioaddr->error_addr = base + (ATA_REG_ERR << 4); | ||
987 | ioaddr->feature_addr = base + (ATA_REG_FEATURE << 4); | ||
988 | ioaddr->nsect_addr = base + (ATA_REG_NSECT << 4); | ||
989 | ioaddr->lbal_addr = base + (ATA_REG_LBAL << 4); | ||
990 | ioaddr->lbam_addr = base + (ATA_REG_LBAM << 4); | ||
991 | ioaddr->lbah_addr = base + (ATA_REG_LBAH << 4); | ||
992 | ioaddr->device_addr = base + (ATA_REG_DEVICE << 4); | ||
993 | ioaddr->status_addr = base + (ATA_REG_STATUS << 4); | ||
994 | ioaddr->command_addr = base + (ATA_REG_CMD << 4); | ||
995 | ioaddr->altstatus_addr = base + 0x160; | ||
996 | ioaddr->ctl_addr = base + 0x160; | ||
997 | ioaddr->bmdma_addr = dma; | ||
998 | } | ||
999 | |||
1000 | static void __devinit pmac_macio_calc_timing_masks(struct pata_macio_priv *priv, | ||
1001 | struct ata_port_info *pinfo) | ||
1002 | { | ||
1003 | int i = 0; | ||
1004 | |||
1005 | pinfo->pio_mask = 0; | ||
1006 | pinfo->mwdma_mask = 0; | ||
1007 | pinfo->udma_mask = 0; | ||
1008 | |||
1009 | while (priv->timings[i].mode > 0) { | ||
1010 | unsigned int mask = 1U << (priv->timings[i].mode & 0x0f); | ||
1011 | switch(priv->timings[i].mode & 0xf0) { | ||
1012 | case 0x00: /* PIO */ | ||
1013 | pinfo->pio_mask |= (mask >> 8); | ||
1014 | break; | ||
1015 | case 0x20: /* MWDMA */ | ||
1016 | pinfo->mwdma_mask |= mask; | ||
1017 | break; | ||
1018 | case 0x40: /* UDMA */ | ||
1019 | pinfo->udma_mask |= mask; | ||
1020 | break; | ||
1021 | } | ||
1022 | i++; | ||
1023 | } | ||
1024 | dev_dbg(priv->dev, "Supported masks: PIO=%lx, MWDMA=%lx, UDMA=%lx\n", | ||
1025 | pinfo->pio_mask, pinfo->mwdma_mask, pinfo->udma_mask); | ||
1026 | } | ||
1027 | |||
1028 | static int __devinit pata_macio_common_init(struct pata_macio_priv *priv, | ||
1029 | resource_size_t tfregs, | ||
1030 | resource_size_t dmaregs, | ||
1031 | resource_size_t fcregs, | ||
1032 | unsigned long irq) | ||
1033 | { | ||
1034 | struct ata_port_info pinfo; | ||
1035 | const struct ata_port_info *ppi[] = { &pinfo, NULL }; | ||
1036 | void __iomem *dma_regs = NULL; | ||
1037 | |||
1038 | /* Fill up privates with various invariants collected from the | ||
1039 | * device-tree | ||
1040 | */ | ||
1041 | pata_macio_invariants(priv); | ||
1042 | |||
1043 | /* Make sure we have sane initial timings in the cache */ | ||
1044 | pata_macio_default_timings(priv); | ||
1045 | |||
1046 | /* Not sure what the real max is but we know it's less than 64K, let's | ||
1047 | * use 64K minus 256 | ||
1048 | */ | ||
1049 | dma_set_max_seg_size(priv->dev, MAX_DBDMA_SEG); | ||
1050 | |||
1051 | /* Allocate libata host for 1 port */ | ||
1052 | memset(&pinfo, 0, sizeof(struct ata_port_info)); | ||
1053 | pmac_macio_calc_timing_masks(priv, &pinfo); | ||
1054 | pinfo.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_MMIO | | ||
1055 | ATA_FLAG_NO_LEGACY; | ||
1056 | pinfo.port_ops = &pata_macio_ops; | ||
1057 | pinfo.private_data = priv; | ||
1058 | |||
1059 | priv->host = ata_host_alloc_pinfo(priv->dev, ppi, 1); | ||
1060 | if (priv->host == NULL) { | ||
1061 | dev_err(priv->dev, "Failed to allocate ATA port structure\n"); | ||
1062 | return -ENOMEM; | ||
1063 | } | ||
1064 | |||
1065 | /* Setup the private data in host too */ | ||
1066 | priv->host->private_data = priv; | ||
1067 | |||
1068 | /* Map base registers */ | ||
1069 | priv->tfregs = devm_ioremap(priv->dev, tfregs, 0x100); | ||
1070 | if (priv->tfregs == NULL) { | ||
1071 | dev_err(priv->dev, "Failed to map ATA ports\n"); | ||
1072 | return -ENOMEM; | ||
1073 | } | ||
1074 | priv->host->iomap = &priv->tfregs; | ||
1075 | |||
1076 | /* Map DMA regs */ | ||
1077 | if (dmaregs != 0) { | ||
1078 | dma_regs = devm_ioremap(priv->dev, dmaregs, | ||
1079 | sizeof(struct dbdma_regs)); | ||
1080 | if (dma_regs == NULL) | ||
1081 | dev_warn(priv->dev, "Failed to map ATA DMA registers\n"); | ||
1082 | } | ||
1083 | |||
1084 | /* If chip has local feature control, map those regs too */ | ||
1085 | if (fcregs != 0) { | ||
1086 | priv->kauai_fcr = devm_ioremap(priv->dev, fcregs, 4); | ||
1087 | if (priv->kauai_fcr == NULL) { | ||
1088 | dev_err(priv->dev, "Failed to map ATA FCR register\n"); | ||
1089 | return -ENOMEM; | ||
1090 | } | ||
1091 | } | ||
1092 | |||
1093 | /* Setup port data structure */ | ||
1094 | pata_macio_setup_ios(&priv->host->ports[0]->ioaddr, | ||
1095 | priv->tfregs, dma_regs); | ||
1096 | priv->host->ports[0]->private_data = priv; | ||
1097 | |||
1098 | /* hard-reset the controller */ | ||
1099 | pata_macio_reset_hw(priv, 0); | ||
1100 | pata_macio_apply_timings(priv->host->ports[0], 0); | ||
1101 | |||
1102 | /* Enable bus master if necessary */ | ||
1103 | if (priv->pdev && dma_regs) | ||
1104 | pci_set_master(priv->pdev); | ||
1105 | |||
1106 | dev_info(priv->dev, "Activating pata-macio chipset %s, Apple bus ID %d\n", | ||
1107 | macio_ata_names[priv->kind], priv->aapl_bus_id); | ||
1108 | |||
1109 | /* Start it up */ | ||
1110 | priv->irq = irq; | ||
1111 | return ata_host_activate(priv->host, irq, ata_sff_interrupt, 0, | ||
1112 | &pata_macio_sht); | ||
1113 | } | ||
1114 | |||
1115 | static int __devinit pata_macio_attach(struct macio_dev *mdev, | ||
1116 | const struct of_device_id *match) | ||
1117 | { | ||
1118 | struct pata_macio_priv *priv; | ||
1119 | resource_size_t tfregs, dmaregs = 0; | ||
1120 | unsigned long irq; | ||
1121 | int rc; | ||
1122 | |||
1123 | /* Check for broken device-trees */ | ||
1124 | if (macio_resource_count(mdev) == 0) { | ||
1125 | dev_err(&mdev->ofdev.dev, | ||
1126 | "No addresses for controller\n"); | ||
1127 | return -ENXIO; | ||
1128 | } | ||
1129 | |||
1130 | /* Enable managed resources */ | ||
1131 | macio_enable_devres(mdev); | ||
1132 | |||
1133 | /* Allocate and init private data structure */ | ||
1134 | priv = devm_kzalloc(&mdev->ofdev.dev, | ||
1135 | sizeof(struct pata_macio_priv), GFP_KERNEL); | ||
1136 | if (priv == NULL) { | ||
1137 | dev_err(&mdev->ofdev.dev, | ||
1138 | "Failed to allocate private memory\n"); | ||
1139 | return -ENOMEM; | ||
1140 | } | ||
1141 | priv->node = of_node_get(mdev->ofdev.node); | ||
1142 | priv->mdev = mdev; | ||
1143 | priv->dev = &mdev->ofdev.dev; | ||
1144 | |||
1145 | /* Request memory resource for taskfile registers */ | ||
1146 | if (macio_request_resource(mdev, 0, "pata-macio")) { | ||
1147 | dev_err(&mdev->ofdev.dev, | ||
1148 | "Cannot obtain taskfile resource\n"); | ||
1149 | return -EBUSY; | ||
1150 | } | ||
1151 | tfregs = macio_resource_start(mdev, 0); | ||
1152 | |||
1153 | /* Request resources for DMA registers if any */ | ||
1154 | if (macio_resource_count(mdev) >= 2) { | ||
1155 | if (macio_request_resource(mdev, 1, "pata-macio-dma")) | ||
1156 | dev_err(&mdev->ofdev.dev, | ||
1157 | "Cannot obtain DMA resource\n"); | ||
1158 | else | ||
1159 | dmaregs = macio_resource_start(mdev, 1); | ||
1160 | } | ||
1161 | |||
1162 | /* | ||
1163 | * Fixup missing IRQ for some old implementations with broken | ||
1164 | * device-trees. | ||
1165 | * | ||
1166 | * This is a bit bogus, it should be fixed in the device-tree itself, | ||
1167 | * via the existing macio fixups, based on the type of interrupt | ||
1168 | * controller in the machine. However, I have no test HW for this case, | ||
1169 | * and this trick works well enough on those old machines... | ||
1170 | */ | ||
1171 | if (macio_irq_count(mdev) == 0) { | ||
1172 | dev_warn(&mdev->ofdev.dev, | ||
1173 | "No interrupts for controller, using 13\n"); | ||
1174 | irq = irq_create_mapping(NULL, 13); | ||
1175 | } else | ||
1176 | irq = macio_irq(mdev, 0); | ||
1177 | |||
1178 | /* Prevvent media bay callbacks until fully registered */ | ||
1179 | lock_media_bay(priv->mdev->media_bay); | ||
1180 | |||
1181 | /* Get register addresses and call common initialization */ | ||
1182 | rc = pata_macio_common_init(priv, | ||
1183 | tfregs, /* Taskfile regs */ | ||
1184 | dmaregs, /* DBDMA regs */ | ||
1185 | 0, /* Feature control */ | ||
1186 | irq); | ||
1187 | unlock_media_bay(priv->mdev->media_bay); | ||
1188 | |||
1189 | return rc; | ||
1190 | } | ||
1191 | |||
1192 | static int __devexit pata_macio_detach(struct macio_dev *mdev) | ||
1193 | { | ||
1194 | struct ata_host *host = macio_get_drvdata(mdev); | ||
1195 | struct pata_macio_priv *priv = host->private_data; | ||
1196 | |||
1197 | lock_media_bay(priv->mdev->media_bay); | ||
1198 | |||
1199 | /* Make sure the mediabay callback doesn't try to access | ||
1200 | * dead stuff | ||
1201 | */ | ||
1202 | priv->host->private_data = NULL; | ||
1203 | |||
1204 | ata_host_detach(host); | ||
1205 | |||
1206 | unlock_media_bay(priv->mdev->media_bay); | ||
1207 | |||
1208 | return 0; | ||
1209 | } | ||
1210 | |||
1211 | #ifdef CONFIG_PM | ||
1212 | |||
1213 | static int pata_macio_suspend(struct macio_dev *mdev, pm_message_t mesg) | ||
1214 | { | ||
1215 | struct ata_host *host = macio_get_drvdata(mdev); | ||
1216 | |||
1217 | return pata_macio_do_suspend(host->private_data, mesg); | ||
1218 | } | ||
1219 | |||
1220 | static int pata_macio_resume(struct macio_dev *mdev) | ||
1221 | { | ||
1222 | struct ata_host *host = macio_get_drvdata(mdev); | ||
1223 | |||
1224 | return pata_macio_do_resume(host->private_data); | ||
1225 | } | ||
1226 | |||
1227 | #endif /* CONFIG_PM */ | ||
1228 | |||
1229 | #ifdef CONFIG_PMAC_MEDIABAY | ||
1230 | static void pata_macio_mb_event(struct macio_dev* mdev, int mb_state) | ||
1231 | { | ||
1232 | struct ata_host *host = macio_get_drvdata(mdev); | ||
1233 | struct ata_port *ap; | ||
1234 | struct ata_eh_info *ehi; | ||
1235 | struct ata_device *dev; | ||
1236 | unsigned long flags; | ||
1237 | |||
1238 | if (!host || !host->private_data) | ||
1239 | return; | ||
1240 | ap = host->ports[0]; | ||
1241 | spin_lock_irqsave(ap->lock, flags); | ||
1242 | ehi = &ap->link.eh_info; | ||
1243 | if (mb_state == MB_CD) { | ||
1244 | ata_ehi_push_desc(ehi, "mediabay plug"); | ||
1245 | ata_ehi_hotplugged(ehi); | ||
1246 | ata_port_freeze(ap); | ||
1247 | } else { | ||
1248 | ata_ehi_push_desc(ehi, "mediabay unplug"); | ||
1249 | ata_for_each_dev(dev, &ap->link, ALL) | ||
1250 | dev->flags |= ATA_DFLAG_DETACH; | ||
1251 | ata_port_abort(ap); | ||
1252 | } | ||
1253 | spin_unlock_irqrestore(ap->lock, flags); | ||
1254 | |||
1255 | } | ||
1256 | #endif /* CONFIG_PMAC_MEDIABAY */ | ||
1257 | |||
1258 | |||
1259 | static int __devinit pata_macio_pci_attach(struct pci_dev *pdev, | ||
1260 | const struct pci_device_id *id) | ||
1261 | { | ||
1262 | struct pata_macio_priv *priv; | ||
1263 | struct device_node *np; | ||
1264 | resource_size_t rbase; | ||
1265 | |||
1266 | /* We cannot use a MacIO controller without its OF device node */ | ||
1267 | np = pci_device_to_OF_node(pdev); | ||
1268 | if (np == NULL) { | ||
1269 | dev_err(&pdev->dev, | ||
1270 | "Cannot find OF device node for controller\n"); | ||
1271 | return -ENODEV; | ||
1272 | } | ||
1273 | |||
1274 | /* Check that it can be enabled */ | ||
1275 | if (pcim_enable_device(pdev)) { | ||
1276 | dev_err(&pdev->dev, | ||
1277 | "Cannot enable controller PCI device\n"); | ||
1278 | return -ENXIO; | ||
1279 | } | ||
1280 | |||
1281 | /* Allocate and init private data structure */ | ||
1282 | priv = devm_kzalloc(&pdev->dev, | ||
1283 | sizeof(struct pata_macio_priv), GFP_KERNEL); | ||
1284 | if (priv == NULL) { | ||
1285 | dev_err(&pdev->dev, | ||
1286 | "Failed to allocate private memory\n"); | ||
1287 | return -ENOMEM; | ||
1288 | } | ||
1289 | priv->node = of_node_get(np); | ||
1290 | priv->pdev = pdev; | ||
1291 | priv->dev = &pdev->dev; | ||
1292 | |||
1293 | /* Get MMIO regions */ | ||
1294 | if (pci_request_regions(pdev, "pata-macio")) { | ||
1295 | dev_err(&pdev->dev, | ||
1296 | "Cannot obtain PCI resources\n"); | ||
1297 | return -EBUSY; | ||
1298 | } | ||
1299 | |||
1300 | /* Get register addresses and call common initialization */ | ||
1301 | rbase = pci_resource_start(pdev, 0); | ||
1302 | if (pata_macio_common_init(priv, | ||
1303 | rbase + 0x2000, /* Taskfile regs */ | ||
1304 | rbase + 0x1000, /* DBDMA regs */ | ||
1305 | rbase, /* Feature control */ | ||
1306 | pdev->irq)) | ||
1307 | return -ENXIO; | ||
1308 | |||
1309 | return 0; | ||
1310 | } | ||
1311 | |||
1312 | static void __devexit pata_macio_pci_detach(struct pci_dev *pdev) | ||
1313 | { | ||
1314 | struct ata_host *host = dev_get_drvdata(&pdev->dev); | ||
1315 | |||
1316 | ata_host_detach(host); | ||
1317 | } | ||
1318 | |||
1319 | #ifdef CONFIG_PM | ||
1320 | |||
1321 | static int pata_macio_pci_suspend(struct pci_dev *pdev, pm_message_t mesg) | ||
1322 | { | ||
1323 | struct ata_host *host = dev_get_drvdata(&pdev->dev); | ||
1324 | |||
1325 | return pata_macio_do_suspend(host->private_data, mesg); | ||
1326 | } | ||
1327 | |||
1328 | static int pata_macio_pci_resume(struct pci_dev *pdev) | ||
1329 | { | ||
1330 | struct ata_host *host = dev_get_drvdata(&pdev->dev); | ||
1331 | |||
1332 | return pata_macio_do_resume(host->private_data); | ||
1333 | } | ||
1334 | |||
1335 | #endif /* CONFIG_PM */ | ||
1336 | |||
1337 | static struct of_device_id pata_macio_match[] = | ||
1338 | { | ||
1339 | { | ||
1340 | .name = "IDE", | ||
1341 | }, | ||
1342 | { | ||
1343 | .name = "ATA", | ||
1344 | }, | ||
1345 | { | ||
1346 | .type = "ide", | ||
1347 | }, | ||
1348 | { | ||
1349 | .type = "ata", | ||
1350 | }, | ||
1351 | {}, | ||
1352 | }; | ||
1353 | |||
1354 | static struct macio_driver pata_macio_driver = | ||
1355 | { | ||
1356 | .name = "pata-macio", | ||
1357 | .match_table = pata_macio_match, | ||
1358 | .probe = pata_macio_attach, | ||
1359 | .remove = pata_macio_detach, | ||
1360 | #ifdef CONFIG_PM | ||
1361 | .suspend = pata_macio_suspend, | ||
1362 | .resume = pata_macio_resume, | ||
1363 | #endif | ||
1364 | #ifdef CONFIG_PMAC_MEDIABAY | ||
1365 | .mediabay_event = pata_macio_mb_event, | ||
1366 | #endif | ||
1367 | .driver = { | ||
1368 | .owner = THIS_MODULE, | ||
1369 | }, | ||
1370 | }; | ||
1371 | |||
1372 | static const struct pci_device_id pata_macio_pci_match[] = { | ||
1373 | { PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_UNI_N_ATA), 0 }, | ||
1374 | { PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_IPID_ATA100), 0 }, | ||
1375 | { PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_K2_ATA100), 0 }, | ||
1376 | { PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_SH_ATA), 0 }, | ||
1377 | { PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_IPID2_ATA), 0 }, | ||
1378 | {}, | ||
1379 | }; | ||
1380 | |||
1381 | static struct pci_driver pata_macio_pci_driver = { | ||
1382 | .name = "pata-pci-macio", | ||
1383 | .id_table = pata_macio_pci_match, | ||
1384 | .probe = pata_macio_pci_attach, | ||
1385 | .remove = pata_macio_pci_detach, | ||
1386 | #ifdef CONFIG_PM | ||
1387 | .suspend = pata_macio_pci_suspend, | ||
1388 | .resume = pata_macio_pci_resume, | ||
1389 | #endif | ||
1390 | .driver = { | ||
1391 | .owner = THIS_MODULE, | ||
1392 | }, | ||
1393 | }; | ||
1394 | MODULE_DEVICE_TABLE(pci, pata_macio_pci_match); | ||
1395 | |||
1396 | |||
1397 | static int __init pata_macio_init(void) | ||
1398 | { | ||
1399 | int rc; | ||
1400 | |||
1401 | if (!machine_is(powermac)) | ||
1402 | return -ENODEV; | ||
1403 | |||
1404 | rc = pci_register_driver(&pata_macio_pci_driver); | ||
1405 | if (rc) | ||
1406 | return rc; | ||
1407 | rc = macio_register_driver(&pata_macio_driver); | ||
1408 | if (rc) { | ||
1409 | pci_unregister_driver(&pata_macio_pci_driver); | ||
1410 | return rc; | ||
1411 | } | ||
1412 | return 0; | ||
1413 | } | ||
1414 | |||
1415 | static void __exit pata_macio_exit(void) | ||
1416 | { | ||
1417 | macio_unregister_driver(&pata_macio_driver); | ||
1418 | pci_unregister_driver(&pata_macio_pci_driver); | ||
1419 | } | ||
1420 | |||
1421 | module_init(pata_macio_init); | ||
1422 | module_exit(pata_macio_exit); | ||
1423 | |||
1424 | MODULE_AUTHOR("Benjamin Herrenschmidt"); | ||
1425 | MODULE_DESCRIPTION("Apple MacIO PATA driver"); | ||
1426 | MODULE_LICENSE("GPL"); | ||
1427 | MODULE_VERSION(DRV_VERSION); | ||
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index e62a4ccea54d..27fd775375b0 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c | |||
@@ -35,6 +35,7 @@ static ssize_t __ref store_online(struct sys_device *dev, struct sysdev_attribut | |||
35 | struct cpu *cpu = container_of(dev, struct cpu, sysdev); | 35 | struct cpu *cpu = container_of(dev, struct cpu, sysdev); |
36 | ssize_t ret; | 36 | ssize_t ret; |
37 | 37 | ||
38 | cpu_hotplug_driver_lock(); | ||
38 | switch (buf[0]) { | 39 | switch (buf[0]) { |
39 | case '0': | 40 | case '0': |
40 | ret = cpu_down(cpu->sysdev.id); | 41 | ret = cpu_down(cpu->sysdev.id); |
@@ -49,6 +50,7 @@ static ssize_t __ref store_online(struct sys_device *dev, struct sysdev_attribut | |||
49 | default: | 50 | default: |
50 | ret = -EINVAL; | 51 | ret = -EINVAL; |
51 | } | 52 | } |
53 | cpu_hotplug_driver_unlock(); | ||
52 | 54 | ||
53 | if (ret >= 0) | 55 | if (ret >= 0) |
54 | ret = count; | 56 | ret = count; |
@@ -72,6 +74,38 @@ void unregister_cpu(struct cpu *cpu) | |||
72 | per_cpu(cpu_sys_devices, logical_cpu) = NULL; | 74 | per_cpu(cpu_sys_devices, logical_cpu) = NULL; |
73 | return; | 75 | return; |
74 | } | 76 | } |
77 | |||
78 | #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE | ||
79 | static ssize_t cpu_probe_store(struct class *class, const char *buf, | ||
80 | size_t count) | ||
81 | { | ||
82 | return arch_cpu_probe(buf, count); | ||
83 | } | ||
84 | |||
85 | static ssize_t cpu_release_store(struct class *class, const char *buf, | ||
86 | size_t count) | ||
87 | { | ||
88 | return arch_cpu_release(buf, count); | ||
89 | } | ||
90 | |||
91 | static CLASS_ATTR(probe, S_IWUSR, NULL, cpu_probe_store); | ||
92 | static CLASS_ATTR(release, S_IWUSR, NULL, cpu_release_store); | ||
93 | |||
94 | int __init cpu_probe_release_init(void) | ||
95 | { | ||
96 | int rc; | ||
97 | |||
98 | rc = sysfs_create_file(&cpu_sysdev_class.kset.kobj, | ||
99 | &class_attr_probe.attr); | ||
100 | if (!rc) | ||
101 | rc = sysfs_create_file(&cpu_sysdev_class.kset.kobj, | ||
102 | &class_attr_release.attr); | ||
103 | |||
104 | return rc; | ||
105 | } | ||
106 | device_initcall(cpu_probe_release_init); | ||
107 | #endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */ | ||
108 | |||
75 | #else /* ... !CONFIG_HOTPLUG_CPU */ | 109 | #else /* ... !CONFIG_HOTPLUG_CPU */ |
76 | static inline void register_cpu_control(struct cpu *cpu) | 110 | static inline void register_cpu_control(struct cpu *cpu) |
77 | { | 111 | { |
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c index 6380ad8d91bd..59ca2b77b574 100644 --- a/drivers/block/swim3.c +++ b/drivers/block/swim3.c | |||
@@ -200,7 +200,7 @@ struct floppy_state { | |||
200 | int ejected; | 200 | int ejected; |
201 | wait_queue_head_t wait; | 201 | wait_queue_head_t wait; |
202 | int wanted; | 202 | int wanted; |
203 | struct device_node* media_bay; /* NULL when not in bay */ | 203 | struct macio_dev *mdev; |
204 | char dbdma_cmd_space[5 * sizeof(struct dbdma_cmd)]; | 204 | char dbdma_cmd_space[5 * sizeof(struct dbdma_cmd)]; |
205 | }; | 205 | }; |
206 | 206 | ||
@@ -303,14 +303,13 @@ static int swim3_readbit(struct floppy_state *fs, int bit) | |||
303 | static void do_fd_request(struct request_queue * q) | 303 | static void do_fd_request(struct request_queue * q) |
304 | { | 304 | { |
305 | int i; | 305 | int i; |
306 | for(i=0;i<floppy_count;i++) | 306 | |
307 | { | 307 | for(i=0; i<floppy_count; i++) { |
308 | #ifdef CONFIG_PMAC_MEDIABAY | 308 | struct floppy_state *fs = &floppy_states[i]; |
309 | if (floppy_states[i].media_bay && | 309 | if (fs->mdev->media_bay && |
310 | check_media_bay(floppy_states[i].media_bay, MB_FD)) | 310 | check_media_bay(fs->mdev->media_bay) != MB_FD) |
311 | continue; | 311 | continue; |
312 | #endif /* CONFIG_PMAC_MEDIABAY */ | 312 | start_request(fs); |
313 | start_request(&floppy_states[i]); | ||
314 | } | 313 | } |
315 | } | 314 | } |
316 | 315 | ||
@@ -849,10 +848,9 @@ static int floppy_ioctl(struct block_device *bdev, fmode_t mode, | |||
849 | if ((cmd & 0x80) && !capable(CAP_SYS_ADMIN)) | 848 | if ((cmd & 0x80) && !capable(CAP_SYS_ADMIN)) |
850 | return -EPERM; | 849 | return -EPERM; |
851 | 850 | ||
852 | #ifdef CONFIG_PMAC_MEDIABAY | 851 | if (fs->mdev->media_bay && |
853 | if (fs->media_bay && check_media_bay(fs->media_bay, MB_FD)) | 852 | check_media_bay(fs->mdev->media_bay) != MB_FD) |
854 | return -ENXIO; | 853 | return -ENXIO; |
855 | #endif | ||
856 | 854 | ||
857 | switch (cmd) { | 855 | switch (cmd) { |
858 | case FDEJECT: | 856 | case FDEJECT: |
@@ -876,10 +874,9 @@ static int floppy_open(struct block_device *bdev, fmode_t mode) | |||
876 | int n, err = 0; | 874 | int n, err = 0; |
877 | 875 | ||
878 | if (fs->ref_count == 0) { | 876 | if (fs->ref_count == 0) { |
879 | #ifdef CONFIG_PMAC_MEDIABAY | 877 | if (fs->mdev->media_bay && |
880 | if (fs->media_bay && check_media_bay(fs->media_bay, MB_FD)) | 878 | check_media_bay(fs->mdev->media_bay) != MB_FD) |
881 | return -ENXIO; | 879 | return -ENXIO; |
882 | #endif | ||
883 | out_8(&sw->setup, S_IBM_DRIVE | S_FCLK_DIV2); | 880 | out_8(&sw->setup, S_IBM_DRIVE | S_FCLK_DIV2); |
884 | out_8(&sw->control_bic, 0xff); | 881 | out_8(&sw->control_bic, 0xff); |
885 | out_8(&sw->mode, 0x95); | 882 | out_8(&sw->mode, 0x95); |
@@ -963,10 +960,9 @@ static int floppy_revalidate(struct gendisk *disk) | |||
963 | struct swim3 __iomem *sw; | 960 | struct swim3 __iomem *sw; |
964 | int ret, n; | 961 | int ret, n; |
965 | 962 | ||
966 | #ifdef CONFIG_PMAC_MEDIABAY | 963 | if (fs->mdev->media_bay && |
967 | if (fs->media_bay && check_media_bay(fs->media_bay, MB_FD)) | 964 | check_media_bay(fs->mdev->media_bay) != MB_FD) |
968 | return -ENXIO; | 965 | return -ENXIO; |
969 | #endif | ||
970 | 966 | ||
971 | sw = fs->swim3; | 967 | sw = fs->swim3; |
972 | grab_drive(fs, revalidating, 0); | 968 | grab_drive(fs, revalidating, 0); |
@@ -1009,7 +1005,6 @@ static const struct block_device_operations floppy_fops = { | |||
1009 | static int swim3_add_device(struct macio_dev *mdev, int index) | 1005 | static int swim3_add_device(struct macio_dev *mdev, int index) |
1010 | { | 1006 | { |
1011 | struct device_node *swim = mdev->ofdev.node; | 1007 | struct device_node *swim = mdev->ofdev.node; |
1012 | struct device_node *mediabay; | ||
1013 | struct floppy_state *fs = &floppy_states[index]; | 1008 | struct floppy_state *fs = &floppy_states[index]; |
1014 | int rc = -EBUSY; | 1009 | int rc = -EBUSY; |
1015 | 1010 | ||
@@ -1036,9 +1031,7 @@ static int swim3_add_device(struct macio_dev *mdev, int index) | |||
1036 | } | 1031 | } |
1037 | dev_set_drvdata(&mdev->ofdev.dev, fs); | 1032 | dev_set_drvdata(&mdev->ofdev.dev, fs); |
1038 | 1033 | ||
1039 | mediabay = (strcasecmp(swim->parent->type, "media-bay") == 0) ? | 1034 | if (mdev->media_bay == NULL) |
1040 | swim->parent : NULL; | ||
1041 | if (mediabay == NULL) | ||
1042 | pmac_call_feature(PMAC_FTR_SWIM3_ENABLE, swim, 0, 1); | 1035 | pmac_call_feature(PMAC_FTR_SWIM3_ENABLE, swim, 0, 1); |
1043 | 1036 | ||
1044 | memset(fs, 0, sizeof(*fs)); | 1037 | memset(fs, 0, sizeof(*fs)); |
@@ -1068,7 +1061,7 @@ static int swim3_add_device(struct macio_dev *mdev, int index) | |||
1068 | fs->secpercyl = 36; | 1061 | fs->secpercyl = 36; |
1069 | fs->secpertrack = 18; | 1062 | fs->secpertrack = 18; |
1070 | fs->total_secs = 2880; | 1063 | fs->total_secs = 2880; |
1071 | fs->media_bay = mediabay; | 1064 | fs->mdev = mdev; |
1072 | init_waitqueue_head(&fs->wait); | 1065 | init_waitqueue_head(&fs->wait); |
1073 | 1066 | ||
1074 | fs->dma_cmd = (struct dbdma_cmd *) DBDMA_ALIGN(fs->dbdma_cmd_space); | 1067 | fs->dma_cmd = (struct dbdma_cmd *) DBDMA_ALIGN(fs->dbdma_cmd_space); |
@@ -1093,7 +1086,7 @@ static int swim3_add_device(struct macio_dev *mdev, int index) | |||
1093 | init_timer(&fs->timeout); | 1086 | init_timer(&fs->timeout); |
1094 | 1087 | ||
1095 | printk(KERN_INFO "fd%d: SWIM3 floppy controller %s\n", floppy_count, | 1088 | printk(KERN_INFO "fd%d: SWIM3 floppy controller %s\n", floppy_count, |
1096 | mediabay ? "in media bay" : ""); | 1089 | mdev->media_bay ? "in media bay" : ""); |
1097 | 1090 | ||
1098 | return 0; | 1091 | return 0; |
1099 | 1092 | ||
diff --git a/drivers/char/agp/uninorth-agp.c b/drivers/char/agp/uninorth-agp.c index 703959eba45a..d89da4ac061f 100644 --- a/drivers/char/agp/uninorth-agp.c +++ b/drivers/char/agp/uninorth-agp.c | |||
@@ -144,16 +144,13 @@ static int uninorth_configure(void) | |||
144 | return 0; | 144 | return 0; |
145 | } | 145 | } |
146 | 146 | ||
147 | static int uninorth_insert_memory(struct agp_memory *mem, off_t pg_start, | 147 | static int uninorth_insert_memory(struct agp_memory *mem, off_t pg_start, int type) |
148 | int type) | ||
149 | { | 148 | { |
150 | int i, j, num_entries; | 149 | int i, num_entries; |
151 | void *temp; | 150 | void *temp; |
151 | u32 *gp; | ||
152 | int mask_type; | 152 | int mask_type; |
153 | 153 | ||
154 | temp = agp_bridge->current_size; | ||
155 | num_entries = A_SIZE_32(temp)->num_entries; | ||
156 | |||
157 | if (type != mem->type) | 154 | if (type != mem->type) |
158 | return -EINVAL; | 155 | return -EINVAL; |
159 | 156 | ||
@@ -163,49 +160,12 @@ static int uninorth_insert_memory(struct agp_memory *mem, off_t pg_start, | |||
163 | return -EINVAL; | 160 | return -EINVAL; |
164 | } | 161 | } |
165 | 162 | ||
166 | if ((pg_start + mem->page_count) > num_entries) | 163 | if (mem->page_count == 0) |
167 | return -EINVAL; | 164 | return 0; |
168 | |||
169 | j = pg_start; | ||
170 | |||
171 | while (j < (pg_start + mem->page_count)) { | ||
172 | if (agp_bridge->gatt_table[j]) | ||
173 | return -EBUSY; | ||
174 | j++; | ||
175 | } | ||
176 | |||
177 | for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { | ||
178 | agp_bridge->gatt_table[j] = | ||
179 | cpu_to_le32((page_to_phys(mem->pages[i]) & 0xFFFFF000UL) | 0x1UL); | ||
180 | flush_dcache_range((unsigned long)__va(page_to_phys(mem->pages[i])), | ||
181 | (unsigned long)__va(page_to_phys(mem->pages[i]))+0x1000); | ||
182 | } | ||
183 | (void)in_le32((volatile u32*)&agp_bridge->gatt_table[pg_start]); | ||
184 | mb(); | ||
185 | |||
186 | uninorth_tlbflush(mem); | ||
187 | return 0; | ||
188 | } | ||
189 | |||
190 | static int u3_insert_memory(struct agp_memory *mem, off_t pg_start, int type) | ||
191 | { | ||
192 | int i, num_entries; | ||
193 | void *temp; | ||
194 | u32 *gp; | ||
195 | int mask_type; | ||
196 | 165 | ||
197 | temp = agp_bridge->current_size; | 166 | temp = agp_bridge->current_size; |
198 | num_entries = A_SIZE_32(temp)->num_entries; | 167 | num_entries = A_SIZE_32(temp)->num_entries; |
199 | 168 | ||
200 | if (type != mem->type) | ||
201 | return -EINVAL; | ||
202 | |||
203 | mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type); | ||
204 | if (mask_type != 0) { | ||
205 | /* We know nothing of memory types */ | ||
206 | return -EINVAL; | ||
207 | } | ||
208 | |||
209 | if ((pg_start + mem->page_count) > num_entries) | 169 | if ((pg_start + mem->page_count) > num_entries) |
210 | return -EINVAL; | 170 | return -EINVAL; |
211 | 171 | ||
@@ -213,14 +173,18 @@ static int u3_insert_memory(struct agp_memory *mem, off_t pg_start, int type) | |||
213 | for (i = 0; i < mem->page_count; ++i) { | 173 | for (i = 0; i < mem->page_count; ++i) { |
214 | if (gp[i]) { | 174 | if (gp[i]) { |
215 | dev_info(&agp_bridge->dev->dev, | 175 | dev_info(&agp_bridge->dev->dev, |
216 | "u3_insert_memory: entry 0x%x occupied (%x)\n", | 176 | "uninorth_insert_memory: entry 0x%x occupied (%x)\n", |
217 | i, gp[i]); | 177 | i, gp[i]); |
218 | return -EBUSY; | 178 | return -EBUSY; |
219 | } | 179 | } |
220 | } | 180 | } |
221 | 181 | ||
222 | for (i = 0; i < mem->page_count; i++) { | 182 | for (i = 0; i < mem->page_count; i++) { |
223 | gp[i] = (page_to_phys(mem->pages[i]) >> PAGE_SHIFT) | 0x80000000UL; | 183 | if (is_u3) |
184 | gp[i] = (page_to_phys(mem->pages[i]) >> PAGE_SHIFT) | 0x80000000UL; | ||
185 | else | ||
186 | gp[i] = cpu_to_le32((page_to_phys(mem->pages[i]) & 0xFFFFF000UL) | | ||
187 | 0x1UL); | ||
224 | flush_dcache_range((unsigned long)__va(page_to_phys(mem->pages[i])), | 188 | flush_dcache_range((unsigned long)__va(page_to_phys(mem->pages[i])), |
225 | (unsigned long)__va(page_to_phys(mem->pages[i]))+0x1000); | 189 | (unsigned long)__va(page_to_phys(mem->pages[i]))+0x1000); |
226 | } | 190 | } |
@@ -230,14 +194,23 @@ static int u3_insert_memory(struct agp_memory *mem, off_t pg_start, int type) | |||
230 | return 0; | 194 | return 0; |
231 | } | 195 | } |
232 | 196 | ||
233 | int u3_remove_memory(struct agp_memory *mem, off_t pg_start, int type) | 197 | int uninorth_remove_memory(struct agp_memory *mem, off_t pg_start, int type) |
234 | { | 198 | { |
235 | size_t i; | 199 | size_t i; |
236 | u32 *gp; | 200 | u32 *gp; |
201 | int mask_type; | ||
202 | |||
203 | if (type != mem->type) | ||
204 | return -EINVAL; | ||
237 | 205 | ||
238 | if (type != 0 || mem->type != 0) | 206 | mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type); |
207 | if (mask_type != 0) { | ||
239 | /* We know nothing of memory types */ | 208 | /* We know nothing of memory types */ |
240 | return -EINVAL; | 209 | return -EINVAL; |
210 | } | ||
211 | |||
212 | if (mem->page_count == 0) | ||
213 | return 0; | ||
241 | 214 | ||
242 | gp = (u32 *) &agp_bridge->gatt_table[pg_start]; | 215 | gp = (u32 *) &agp_bridge->gatt_table[pg_start]; |
243 | for (i = 0; i < mem->page_count; ++i) | 216 | for (i = 0; i < mem->page_count; ++i) |
@@ -536,7 +509,7 @@ const struct agp_bridge_driver uninorth_agp_driver = { | |||
536 | .create_gatt_table = uninorth_create_gatt_table, | 509 | .create_gatt_table = uninorth_create_gatt_table, |
537 | .free_gatt_table = uninorth_free_gatt_table, | 510 | .free_gatt_table = uninorth_free_gatt_table, |
538 | .insert_memory = uninorth_insert_memory, | 511 | .insert_memory = uninorth_insert_memory, |
539 | .remove_memory = agp_generic_remove_memory, | 512 | .remove_memory = uninorth_remove_memory, |
540 | .alloc_by_type = agp_generic_alloc_by_type, | 513 | .alloc_by_type = agp_generic_alloc_by_type, |
541 | .free_by_type = agp_generic_free_by_type, | 514 | .free_by_type = agp_generic_free_by_type, |
542 | .agp_alloc_page = agp_generic_alloc_page, | 515 | .agp_alloc_page = agp_generic_alloc_page, |
@@ -562,8 +535,8 @@ const struct agp_bridge_driver u3_agp_driver = { | |||
562 | .agp_enable = uninorth_agp_enable, | 535 | .agp_enable = uninorth_agp_enable, |
563 | .create_gatt_table = uninorth_create_gatt_table, | 536 | .create_gatt_table = uninorth_create_gatt_table, |
564 | .free_gatt_table = uninorth_free_gatt_table, | 537 | .free_gatt_table = uninorth_free_gatt_table, |
565 | .insert_memory = u3_insert_memory, | 538 | .insert_memory = uninorth_insert_memory, |
566 | .remove_memory = u3_remove_memory, | 539 | .remove_memory = uninorth_remove_memory, |
567 | .alloc_by_type = agp_generic_alloc_by_type, | 540 | .alloc_by_type = agp_generic_alloc_by_type, |
568 | .free_by_type = agp_generic_free_by_type, | 541 | .free_by_type = agp_generic_free_by_type, |
569 | .agp_alloc_page = agp_generic_alloc_page, | 542 | .agp_alloc_page = agp_generic_alloc_page, |
diff --git a/drivers/char/hvc_console.c b/drivers/char/hvc_console.c index a632f25f144a..416d3423150d 100644 --- a/drivers/char/hvc_console.c +++ b/drivers/char/hvc_console.c | |||
@@ -832,6 +832,7 @@ int hvc_remove(struct hvc_struct *hp) | |||
832 | tty_hangup(tty); | 832 | tty_hangup(tty); |
833 | return 0; | 833 | return 0; |
834 | } | 834 | } |
835 | EXPORT_SYMBOL_GPL(hvc_remove); | ||
835 | 836 | ||
836 | /* Driver initialization: called as soon as someone uses hvc_alloc(). */ | 837 | /* Driver initialization: called as soon as someone uses hvc_alloc(). */ |
837 | static int hvc_init(void) | 838 | static int hvc_init(void) |
diff --git a/drivers/ide/pmac.c b/drivers/ide/pmac.c index 97642a7a79c4..7a4e788cab2f 100644 --- a/drivers/ide/pmac.c +++ b/drivers/ide/pmac.c | |||
@@ -43,10 +43,7 @@ | |||
43 | #include <asm/pmac_feature.h> | 43 | #include <asm/pmac_feature.h> |
44 | #include <asm/sections.h> | 44 | #include <asm/sections.h> |
45 | #include <asm/irq.h> | 45 | #include <asm/irq.h> |
46 | |||
47 | #ifndef CONFIG_PPC64 | ||
48 | #include <asm/mediabay.h> | 46 | #include <asm/mediabay.h> |
49 | #endif | ||
50 | 47 | ||
51 | #define DRV_NAME "ide-pmac" | 48 | #define DRV_NAME "ide-pmac" |
52 | 49 | ||
@@ -59,13 +56,14 @@ typedef struct pmac_ide_hwif { | |||
59 | int irq; | 56 | int irq; |
60 | int kind; | 57 | int kind; |
61 | int aapl_bus_id; | 58 | int aapl_bus_id; |
62 | unsigned mediabay : 1; | ||
63 | unsigned broken_dma : 1; | 59 | unsigned broken_dma : 1; |
64 | unsigned broken_dma_warn : 1; | 60 | unsigned broken_dma_warn : 1; |
65 | struct device_node* node; | 61 | struct device_node* node; |
66 | struct macio_dev *mdev; | 62 | struct macio_dev *mdev; |
67 | u32 timings[4]; | 63 | u32 timings[4]; |
68 | volatile u32 __iomem * *kauai_fcr; | 64 | volatile u32 __iomem * *kauai_fcr; |
65 | ide_hwif_t *hwif; | ||
66 | |||
69 | /* Those fields are duplicating what is in hwif. We currently | 67 | /* Those fields are duplicating what is in hwif. We currently |
70 | * can't use the hwif ones because of some assumptions that are | 68 | * can't use the hwif ones because of some assumptions that are |
71 | * beeing done by the generic code about the kind of dma controller | 69 | * beeing done by the generic code about the kind of dma controller |
@@ -854,6 +852,11 @@ sanitize_timings(pmac_ide_hwif_t *pmif) | |||
854 | pmif->timings[2] = pmif->timings[3] = value2; | 852 | pmif->timings[2] = pmif->timings[3] = value2; |
855 | } | 853 | } |
856 | 854 | ||
855 | static int on_media_bay(pmac_ide_hwif_t *pmif) | ||
856 | { | ||
857 | return pmif->mdev && pmif->mdev->media_bay != NULL; | ||
858 | } | ||
859 | |||
857 | /* Suspend call back, should be called after the child devices | 860 | /* Suspend call back, should be called after the child devices |
858 | * have actually been suspended | 861 | * have actually been suspended |
859 | */ | 862 | */ |
@@ -866,7 +869,7 @@ static int pmac_ide_do_suspend(pmac_ide_hwif_t *pmif) | |||
866 | disable_irq(pmif->irq); | 869 | disable_irq(pmif->irq); |
867 | 870 | ||
868 | /* The media bay will handle itself just fine */ | 871 | /* The media bay will handle itself just fine */ |
869 | if (pmif->mediabay) | 872 | if (on_media_bay(pmif)) |
870 | return 0; | 873 | return 0; |
871 | 874 | ||
872 | /* Kauai has bus control FCRs directly here */ | 875 | /* Kauai has bus control FCRs directly here */ |
@@ -889,7 +892,7 @@ static int pmac_ide_do_suspend(pmac_ide_hwif_t *pmif) | |||
889 | static int pmac_ide_do_resume(pmac_ide_hwif_t *pmif) | 892 | static int pmac_ide_do_resume(pmac_ide_hwif_t *pmif) |
890 | { | 893 | { |
891 | /* Hard reset & re-enable controller (do we really need to reset ? -BenH) */ | 894 | /* Hard reset & re-enable controller (do we really need to reset ? -BenH) */ |
892 | if (!pmif->mediabay) { | 895 | if (!on_media_bay(pmif)) { |
893 | ppc_md.feature_call(PMAC_FTR_IDE_RESET, pmif->node, pmif->aapl_bus_id, 1); | 896 | ppc_md.feature_call(PMAC_FTR_IDE_RESET, pmif->node, pmif->aapl_bus_id, 1); |
894 | ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, pmif->node, pmif->aapl_bus_id, 1); | 897 | ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, pmif->node, pmif->aapl_bus_id, 1); |
895 | msleep(10); | 898 | msleep(10); |
@@ -950,13 +953,11 @@ static void pmac_ide_init_dev(ide_drive_t *drive) | |||
950 | pmac_ide_hwif_t *pmif = | 953 | pmac_ide_hwif_t *pmif = |
951 | (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent); | 954 | (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent); |
952 | 955 | ||
953 | if (pmif->mediabay) { | 956 | if (on_media_bay(pmif)) { |
954 | #ifdef CONFIG_PMAC_MEDIABAY | 957 | if (check_media_bay(pmif->mdev->media_bay) == MB_CD) { |
955 | if (check_media_bay_by_base(pmif->regbase, MB_CD) == 0) { | ||
956 | drive->dev_flags &= ~IDE_DFLAG_NOPROBE; | 958 | drive->dev_flags &= ~IDE_DFLAG_NOPROBE; |
957 | return; | 959 | return; |
958 | } | 960 | } |
959 | #endif | ||
960 | drive->dev_flags |= IDE_DFLAG_NOPROBE; | 961 | drive->dev_flags |= IDE_DFLAG_NOPROBE; |
961 | } | 962 | } |
962 | } | 963 | } |
@@ -1072,26 +1073,23 @@ static int __devinit pmac_ide_setup_device(pmac_ide_hwif_t *pmif, | |||
1072 | writel(KAUAI_FCR_UATA_MAGIC | | 1073 | writel(KAUAI_FCR_UATA_MAGIC | |
1073 | KAUAI_FCR_UATA_RESET_N | | 1074 | KAUAI_FCR_UATA_RESET_N | |
1074 | KAUAI_FCR_UATA_ENABLE, pmif->kauai_fcr); | 1075 | KAUAI_FCR_UATA_ENABLE, pmif->kauai_fcr); |
1075 | |||
1076 | pmif->mediabay = 0; | ||
1077 | 1076 | ||
1078 | /* Make sure we have sane timings */ | 1077 | /* Make sure we have sane timings */ |
1079 | sanitize_timings(pmif); | 1078 | sanitize_timings(pmif); |
1080 | 1079 | ||
1080 | /* If we are on a media bay, wait for it to settle and lock it */ | ||
1081 | if (pmif->mdev) | ||
1082 | lock_media_bay(pmif->mdev->media_bay); | ||
1083 | |||
1081 | host = ide_host_alloc(&d, hws, 1); | 1084 | host = ide_host_alloc(&d, hws, 1); |
1082 | if (host == NULL) | 1085 | if (host == NULL) { |
1083 | return -ENOMEM; | 1086 | rc = -ENOMEM; |
1084 | hwif = host->ports[0]; | 1087 | goto bail; |
1088 | } | ||
1089 | hwif = pmif->hwif = host->ports[0]; | ||
1085 | 1090 | ||
1086 | #ifndef CONFIG_PPC64 | 1091 | if (on_media_bay(pmif)) { |
1087 | /* XXX FIXME: Media bay stuff need re-organizing */ | 1092 | /* Fixup bus ID for media bay */ |
1088 | if (np->parent && np->parent->name | ||
1089 | && strcasecmp(np->parent->name, "media-bay") == 0) { | ||
1090 | #ifdef CONFIG_PMAC_MEDIABAY | ||
1091 | media_bay_set_ide_infos(np->parent, pmif->regbase, pmif->irq, | ||
1092 | hwif); | ||
1093 | #endif /* CONFIG_PMAC_MEDIABAY */ | ||
1094 | pmif->mediabay = 1; | ||
1095 | if (!bidp) | 1093 | if (!bidp) |
1096 | pmif->aapl_bus_id = 1; | 1094 | pmif->aapl_bus_id = 1; |
1097 | } else if (pmif->kind == controller_ohare) { | 1095 | } else if (pmif->kind == controller_ohare) { |
@@ -1100,9 +1098,7 @@ static int __devinit pmac_ide_setup_device(pmac_ide_hwif_t *pmif, | |||
1100 | * units, I keep the old way | 1098 | * units, I keep the old way |
1101 | */ | 1099 | */ |
1102 | ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, np, 0, 1); | 1100 | ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, np, 0, 1); |
1103 | } else | 1101 | } else { |
1104 | #endif | ||
1105 | { | ||
1106 | /* This is necessary to enable IDE when net-booting */ | 1102 | /* This is necessary to enable IDE when net-booting */ |
1107 | ppc_md.feature_call(PMAC_FTR_IDE_RESET, np, pmif->aapl_bus_id, 1); | 1103 | ppc_md.feature_call(PMAC_FTR_IDE_RESET, np, pmif->aapl_bus_id, 1); |
1108 | ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, np, pmif->aapl_bus_id, 1); | 1104 | ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, np, pmif->aapl_bus_id, 1); |
@@ -1112,17 +1108,21 @@ static int __devinit pmac_ide_setup_device(pmac_ide_hwif_t *pmif, | |||
1112 | } | 1108 | } |
1113 | 1109 | ||
1114 | printk(KERN_INFO DRV_NAME ": Found Apple %s controller (%s), " | 1110 | printk(KERN_INFO DRV_NAME ": Found Apple %s controller (%s), " |
1115 | "bus ID %d%s, irq %d\n", model_name[pmif->kind], | 1111 | "bus ID %d%s, irq %d\n", model_name[pmif->kind], |
1116 | pmif->mdev ? "macio" : "PCI", pmif->aapl_bus_id, | 1112 | pmif->mdev ? "macio" : "PCI", pmif->aapl_bus_id, |
1117 | pmif->mediabay ? " (mediabay)" : "", hw->irq); | 1113 | on_media_bay(pmif) ? " (mediabay)" : "", hw->irq); |
1118 | 1114 | ||
1119 | rc = ide_host_register(host, &d, hws); | 1115 | rc = ide_host_register(host, &d, hws); |
1120 | if (rc) { | 1116 | if (rc) |
1121 | ide_host_free(host); | 1117 | pmif->hwif = NULL; |
1122 | return rc; | ||
1123 | } | ||
1124 | 1118 | ||
1125 | return 0; | 1119 | if (pmif->mdev) |
1120 | unlock_media_bay(pmif->mdev->media_bay); | ||
1121 | |||
1122 | bail: | ||
1123 | if (rc && host) | ||
1124 | ide_host_free(host); | ||
1125 | return rc; | ||
1126 | } | 1126 | } |
1127 | 1127 | ||
1128 | static void __devinit pmac_ide_init_ports(struct ide_hw *hw, unsigned long base) | 1128 | static void __devinit pmac_ide_init_ports(struct ide_hw *hw, unsigned long base) |
@@ -1362,6 +1362,25 @@ pmac_ide_pci_resume(struct pci_dev *pdev) | |||
1362 | return rc; | 1362 | return rc; |
1363 | } | 1363 | } |
1364 | 1364 | ||
1365 | #ifdef CONFIG_PMAC_MEDIABAY | ||
1366 | static void pmac_ide_macio_mb_event(struct macio_dev* mdev, int mb_state) | ||
1367 | { | ||
1368 | pmac_ide_hwif_t *pmif = | ||
1369 | (pmac_ide_hwif_t *)dev_get_drvdata(&mdev->ofdev.dev); | ||
1370 | |||
1371 | switch(mb_state) { | ||
1372 | case MB_CD: | ||
1373 | if (!pmif->hwif->present) | ||
1374 | ide_port_scan(pmif->hwif); | ||
1375 | break; | ||
1376 | default: | ||
1377 | if (pmif->hwif->present) | ||
1378 | ide_port_unregister_devices(pmif->hwif); | ||
1379 | } | ||
1380 | } | ||
1381 | #endif /* CONFIG_PMAC_MEDIABAY */ | ||
1382 | |||
1383 | |||
1365 | static struct of_device_id pmac_ide_macio_match[] = | 1384 | static struct of_device_id pmac_ide_macio_match[] = |
1366 | { | 1385 | { |
1367 | { | 1386 | { |
@@ -1386,6 +1405,9 @@ static struct macio_driver pmac_ide_macio_driver = | |||
1386 | .probe = pmac_ide_macio_attach, | 1405 | .probe = pmac_ide_macio_attach, |
1387 | .suspend = pmac_ide_macio_suspend, | 1406 | .suspend = pmac_ide_macio_suspend, |
1388 | .resume = pmac_ide_macio_resume, | 1407 | .resume = pmac_ide_macio_resume, |
1408 | #ifdef CONFIG_PMAC_MEDIABAY | ||
1409 | .mediabay_event = pmac_ide_macio_mb_event, | ||
1410 | #endif | ||
1389 | }; | 1411 | }; |
1390 | 1412 | ||
1391 | static const struct pci_device_id pmac_ide_pci_match[] = { | 1413 | static const struct pci_device_id pmac_ide_pci_match[] = { |
diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c index 588a5b0bc4b5..26a303a1d1ab 100644 --- a/drivers/macintosh/macio_asic.c +++ b/drivers/macintosh/macio_asic.c | |||
@@ -379,6 +379,11 @@ static struct macio_dev * macio_add_one_device(struct macio_chip *chip, | |||
379 | dev->ofdev.dev.parent = parent; | 379 | dev->ofdev.dev.parent = parent; |
380 | dev->ofdev.dev.bus = &macio_bus_type; | 380 | dev->ofdev.dev.bus = &macio_bus_type; |
381 | dev->ofdev.dev.release = macio_release_dev; | 381 | dev->ofdev.dev.release = macio_release_dev; |
382 | dev->ofdev.dev.dma_parms = &dev->dma_parms; | ||
383 | |||
384 | /* Standard DMA paremeters */ | ||
385 | dma_set_max_seg_size(&dev->ofdev.dev, 65536); | ||
386 | dma_set_seg_boundary(&dev->ofdev.dev, 0xffffffff); | ||
382 | 387 | ||
383 | #ifdef CONFIG_PCI | 388 | #ifdef CONFIG_PCI |
384 | /* Set the DMA ops to the ones from the PCI device, this could be | 389 | /* Set the DMA ops to the ones from the PCI device, this could be |
@@ -538,6 +543,42 @@ void macio_unregister_driver(struct macio_driver *drv) | |||
538 | driver_unregister(&drv->driver); | 543 | driver_unregister(&drv->driver); |
539 | } | 544 | } |
540 | 545 | ||
546 | /* Managed MacIO resources */ | ||
547 | struct macio_devres { | ||
548 | u32 res_mask; | ||
549 | }; | ||
550 | |||
551 | static void maciom_release(struct device *gendev, void *res) | ||
552 | { | ||
553 | struct macio_dev *dev = to_macio_device(gendev); | ||
554 | struct macio_devres *dr = res; | ||
555 | int i, max; | ||
556 | |||
557 | max = min(dev->n_resources, 32); | ||
558 | for (i = 0; i < max; i++) { | ||
559 | if (dr->res_mask & (1 << i)) | ||
560 | macio_release_resource(dev, i); | ||
561 | } | ||
562 | } | ||
563 | |||
564 | int macio_enable_devres(struct macio_dev *dev) | ||
565 | { | ||
566 | struct macio_devres *dr; | ||
567 | |||
568 | dr = devres_find(&dev->ofdev.dev, maciom_release, NULL, NULL); | ||
569 | if (!dr) { | ||
570 | dr = devres_alloc(maciom_release, sizeof(*dr), GFP_KERNEL); | ||
571 | if (!dr) | ||
572 | return -ENOMEM; | ||
573 | } | ||
574 | return devres_get(&dev->ofdev.dev, dr, NULL, NULL) != NULL; | ||
575 | } | ||
576 | |||
577 | static struct macio_devres * find_macio_dr(struct macio_dev *dev) | ||
578 | { | ||
579 | return devres_find(&dev->ofdev.dev, maciom_release, NULL, NULL); | ||
580 | } | ||
581 | |||
541 | /** | 582 | /** |
542 | * macio_request_resource - Request an MMIO resource | 583 | * macio_request_resource - Request an MMIO resource |
543 | * @dev: pointer to the device holding the resource | 584 | * @dev: pointer to the device holding the resource |
@@ -555,6 +596,8 @@ void macio_unregister_driver(struct macio_driver *drv) | |||
555 | int macio_request_resource(struct macio_dev *dev, int resource_no, | 596 | int macio_request_resource(struct macio_dev *dev, int resource_no, |
556 | const char *name) | 597 | const char *name) |
557 | { | 598 | { |
599 | struct macio_devres *dr = find_macio_dr(dev); | ||
600 | |||
558 | if (macio_resource_len(dev, resource_no) == 0) | 601 | if (macio_resource_len(dev, resource_no) == 0) |
559 | return 0; | 602 | return 0; |
560 | 603 | ||
@@ -562,6 +605,9 @@ int macio_request_resource(struct macio_dev *dev, int resource_no, | |||
562 | macio_resource_len(dev, resource_no), | 605 | macio_resource_len(dev, resource_no), |
563 | name)) | 606 | name)) |
564 | goto err_out; | 607 | goto err_out; |
608 | |||
609 | if (dr && resource_no < 32) | ||
610 | dr->res_mask |= 1 << resource_no; | ||
565 | 611 | ||
566 | return 0; | 612 | return 0; |
567 | 613 | ||
@@ -582,10 +628,14 @@ err_out: | |||
582 | */ | 628 | */ |
583 | void macio_release_resource(struct macio_dev *dev, int resource_no) | 629 | void macio_release_resource(struct macio_dev *dev, int resource_no) |
584 | { | 630 | { |
631 | struct macio_devres *dr = find_macio_dr(dev); | ||
632 | |||
585 | if (macio_resource_len(dev, resource_no) == 0) | 633 | if (macio_resource_len(dev, resource_no) == 0) |
586 | return; | 634 | return; |
587 | release_mem_region(macio_resource_start(dev, resource_no), | 635 | release_mem_region(macio_resource_start(dev, resource_no), |
588 | macio_resource_len(dev, resource_no)); | 636 | macio_resource_len(dev, resource_no)); |
637 | if (dr && resource_no < 32) | ||
638 | dr->res_mask &= ~(1 << resource_no); | ||
589 | } | 639 | } |
590 | 640 | ||
591 | /** | 641 | /** |
@@ -744,3 +794,5 @@ EXPORT_SYMBOL(macio_request_resource); | |||
744 | EXPORT_SYMBOL(macio_release_resource); | 794 | EXPORT_SYMBOL(macio_release_resource); |
745 | EXPORT_SYMBOL(macio_request_resources); | 795 | EXPORT_SYMBOL(macio_request_resources); |
746 | EXPORT_SYMBOL(macio_release_resources); | 796 | EXPORT_SYMBOL(macio_release_resources); |
797 | EXPORT_SYMBOL(macio_enable_devres); | ||
798 | |||
diff --git a/drivers/macintosh/mediabay.c b/drivers/macintosh/mediabay.c index 029ad8ce8a7e..08002b88f342 100644 --- a/drivers/macintosh/mediabay.c +++ b/drivers/macintosh/mediabay.c | |||
@@ -33,15 +33,6 @@ | |||
33 | #include <linux/adb.h> | 33 | #include <linux/adb.h> |
34 | #include <linux/pmu.h> | 34 | #include <linux/pmu.h> |
35 | 35 | ||
36 | |||
37 | #define MB_DEBUG | ||
38 | |||
39 | #ifdef MB_DEBUG | ||
40 | #define MBDBG(fmt, arg...) printk(KERN_INFO fmt , ## arg) | ||
41 | #else | ||
42 | #define MBDBG(fmt, arg...) do { } while (0) | ||
43 | #endif | ||
44 | |||
45 | #define MB_FCR32(bay, r) ((bay)->base + ((r) >> 2)) | 36 | #define MB_FCR32(bay, r) ((bay)->base + ((r) >> 2)) |
46 | #define MB_FCR8(bay, r) (((volatile u8 __iomem *)((bay)->base)) + (r)) | 37 | #define MB_FCR8(bay, r) (((volatile u8 __iomem *)((bay)->base)) + (r)) |
47 | 38 | ||
@@ -76,28 +67,14 @@ struct media_bay_info { | |||
76 | int index; | 67 | int index; |
77 | int cached_gpio; | 68 | int cached_gpio; |
78 | int sleeping; | 69 | int sleeping; |
70 | int user_lock; | ||
79 | struct mutex lock; | 71 | struct mutex lock; |
80 | #ifdef CONFIG_BLK_DEV_IDE_PMAC | ||
81 | ide_hwif_t *cd_port; | ||
82 | void __iomem *cd_base; | ||
83 | int cd_irq; | ||
84 | int cd_retry; | ||
85 | #endif | ||
86 | #if defined(CONFIG_BLK_DEV_IDE_PMAC) | ||
87 | int cd_index; | ||
88 | #endif | ||
89 | }; | 72 | }; |
90 | 73 | ||
91 | #define MAX_BAYS 2 | 74 | #define MAX_BAYS 2 |
92 | 75 | ||
93 | static struct media_bay_info media_bays[MAX_BAYS]; | 76 | static struct media_bay_info media_bays[MAX_BAYS]; |
94 | int media_bay_count = 0; | 77 | static int media_bay_count = 0; |
95 | |||
96 | #ifdef CONFIG_BLK_DEV_IDE_PMAC | ||
97 | /* check the busy bit in the media-bay ide interface | ||
98 | (assumes the media-bay contains an ide device) */ | ||
99 | #define MB_IDE_READY(i) ((readb(media_bays[i].cd_base + 0x70) & 0x80) == 0) | ||
100 | #endif | ||
101 | 78 | ||
102 | /* | 79 | /* |
103 | * Wait that number of ms between each step in normal polling mode | 80 | * Wait that number of ms between each step in normal polling mode |
@@ -130,21 +107,11 @@ int media_bay_count = 0; | |||
130 | 107 | ||
131 | /* | 108 | /* |
132 | * Wait this many ticks after an IDE device (e.g. CD-ROM) is inserted | 109 | * Wait this many ticks after an IDE device (e.g. CD-ROM) is inserted |
133 | * (or until the device is ready) before waiting for busy bit to disappear | 110 | * (or until the device is ready) before calling into the driver |
134 | */ | 111 | */ |
135 | #define MB_IDE_WAIT 1000 | 112 | #define MB_IDE_WAIT 1000 |
136 | 113 | ||
137 | /* | 114 | /* |
138 | * Timeout waiting for busy bit of an IDE device to go down | ||
139 | */ | ||
140 | #define MB_IDE_TIMEOUT 5000 | ||
141 | |||
142 | /* | ||
143 | * Max retries of the full power up/down sequence for an IDE device | ||
144 | */ | ||
145 | #define MAX_CD_RETRIES 3 | ||
146 | |||
147 | /* | ||
148 | * States of a media bay | 115 | * States of a media bay |
149 | */ | 116 | */ |
150 | enum { | 117 | enum { |
@@ -153,7 +120,6 @@ enum { | |||
153 | mb_enabling_bay, /* enable bits set, waiting MB_RESET_DELAY */ | 120 | mb_enabling_bay, /* enable bits set, waiting MB_RESET_DELAY */ |
154 | mb_resetting, /* reset bit unset, waiting MB_SETUP_DELAY */ | 121 | mb_resetting, /* reset bit unset, waiting MB_SETUP_DELAY */ |
155 | mb_ide_resetting, /* IDE reset bit unser, waiting MB_IDE_WAIT */ | 122 | mb_ide_resetting, /* IDE reset bit unser, waiting MB_IDE_WAIT */ |
156 | mb_ide_waiting, /* Waiting for BUSY bit to go away until MB_IDE_TIMEOUT */ | ||
157 | mb_up, /* Media bay full */ | 123 | mb_up, /* Media bay full */ |
158 | mb_powering_down /* Powering down (avoid too fast down/up) */ | 124 | mb_powering_down /* Powering down (avoid too fast down/up) */ |
159 | }; | 125 | }; |
@@ -373,12 +339,12 @@ static inline void set_mb_power(struct media_bay_info* bay, int onoff) | |||
373 | if (onoff) { | 339 | if (onoff) { |
374 | bay->ops->power(bay, 1); | 340 | bay->ops->power(bay, 1); |
375 | bay->state = mb_powering_up; | 341 | bay->state = mb_powering_up; |
376 | MBDBG("mediabay%d: powering up\n", bay->index); | 342 | pr_debug("mediabay%d: powering up\n", bay->index); |
377 | } else { | 343 | } else { |
378 | /* Make sure everything is powered down & disabled */ | 344 | /* Make sure everything is powered down & disabled */ |
379 | bay->ops->power(bay, 0); | 345 | bay->ops->power(bay, 0); |
380 | bay->state = mb_powering_down; | 346 | bay->state = mb_powering_down; |
381 | MBDBG("mediabay%d: powering down\n", bay->index); | 347 | pr_debug("mediabay%d: powering down\n", bay->index); |
382 | } | 348 | } |
383 | bay->timer = msecs_to_jiffies(MB_POWER_DELAY); | 349 | bay->timer = msecs_to_jiffies(MB_POWER_DELAY); |
384 | } | 350 | } |
@@ -387,107 +353,118 @@ static void poll_media_bay(struct media_bay_info* bay) | |||
387 | { | 353 | { |
388 | int id = bay->ops->content(bay); | 354 | int id = bay->ops->content(bay); |
389 | 355 | ||
390 | if (id == bay->last_value) { | 356 | static char *mb_content_types[] = { |
391 | if (id != bay->content_id) { | 357 | "a floppy drive", |
392 | bay->value_count += msecs_to_jiffies(MB_POLL_DELAY); | 358 | "a floppy drive", |
393 | if (bay->value_count >= msecs_to_jiffies(MB_STABLE_DELAY)) { | 359 | "an unsuported audio device", |
394 | /* If the device type changes without going thru | 360 | "an ATA device", |
395 | * "MB_NO", we force a pass by "MB_NO" to make sure | 361 | "an unsupported PCI device", |
396 | * things are properly reset | 362 | "an unknown device", |
397 | */ | 363 | }; |
398 | if ((id != MB_NO) && (bay->content_id != MB_NO)) { | 364 | |
399 | id = MB_NO; | 365 | if (id != bay->last_value) { |
400 | MBDBG("mediabay%d: forcing MB_NO\n", bay->index); | ||
401 | } | ||
402 | MBDBG("mediabay%d: switching to %d\n", bay->index, id); | ||
403 | set_mb_power(bay, id != MB_NO); | ||
404 | bay->content_id = id; | ||
405 | if (id == MB_NO) { | ||
406 | #ifdef CONFIG_BLK_DEV_IDE_PMAC | ||
407 | bay->cd_retry = 0; | ||
408 | #endif | ||
409 | printk(KERN_INFO "media bay %d is empty\n", bay->index); | ||
410 | } | ||
411 | } | ||
412 | } | ||
413 | } else { | ||
414 | bay->last_value = id; | 366 | bay->last_value = id; |
415 | bay->value_count = 0; | 367 | bay->value_count = 0; |
368 | return; | ||
369 | } | ||
370 | if (id == bay->content_id) | ||
371 | return; | ||
372 | |||
373 | bay->value_count += msecs_to_jiffies(MB_POLL_DELAY); | ||
374 | if (bay->value_count >= msecs_to_jiffies(MB_STABLE_DELAY)) { | ||
375 | /* If the device type changes without going thru | ||
376 | * "MB_NO", we force a pass by "MB_NO" to make sure | ||
377 | * things are properly reset | ||
378 | */ | ||
379 | if ((id != MB_NO) && (bay->content_id != MB_NO)) { | ||
380 | id = MB_NO; | ||
381 | pr_debug("mediabay%d: forcing MB_NO\n", bay->index); | ||
382 | } | ||
383 | pr_debug("mediabay%d: switching to %d\n", bay->index, id); | ||
384 | set_mb_power(bay, id != MB_NO); | ||
385 | bay->content_id = id; | ||
386 | if (id >= MB_NO || id < 0) | ||
387 | printk(KERN_INFO "mediabay%d: Bay is now empty\n", bay->index); | ||
388 | else | ||
389 | printk(KERN_INFO "mediabay%d: Bay contains %s\n", | ||
390 | bay->index, mb_content_types[id]); | ||
416 | } | 391 | } |
417 | } | 392 | } |
418 | 393 | ||
419 | #ifdef CONFIG_BLK_DEV_IDE_PMAC | 394 | int check_media_bay(struct macio_dev *baydev) |
420 | int check_media_bay(struct device_node *which_bay, int what) | ||
421 | { | 395 | { |
422 | int i; | 396 | struct media_bay_info* bay; |
397 | int id; | ||
423 | 398 | ||
424 | for (i=0; i<media_bay_count; i++) | 399 | if (baydev == NULL) |
425 | if (media_bays[i].mdev && which_bay == media_bays[i].mdev->ofdev.node) { | 400 | return MB_NO; |
426 | if ((what == media_bays[i].content_id) && media_bays[i].state == mb_up) | 401 | |
427 | return 0; | 402 | /* This returns an instant snapshot, not locking, sine |
428 | media_bays[i].cd_index = -1; | 403 | * we may be called with the bay lock held. The resulting |
429 | return -EINVAL; | 404 | * fuzzyness of the result if called at the wrong time is |
430 | } | 405 | * not actually a huge deal |
431 | return -ENODEV; | 406 | */ |
407 | bay = macio_get_drvdata(baydev); | ||
408 | if (bay == NULL) | ||
409 | return MB_NO; | ||
410 | id = bay->content_id; | ||
411 | if (bay->state != mb_up) | ||
412 | return MB_NO; | ||
413 | if (id == MB_FD1) | ||
414 | return MB_FD; | ||
415 | return id; | ||
432 | } | 416 | } |
433 | EXPORT_SYMBOL(check_media_bay); | 417 | EXPORT_SYMBOL_GPL(check_media_bay); |
434 | 418 | ||
435 | int check_media_bay_by_base(unsigned long base, int what) | 419 | void lock_media_bay(struct macio_dev *baydev) |
436 | { | 420 | { |
437 | int i; | 421 | struct media_bay_info* bay; |
438 | |||
439 | for (i=0; i<media_bay_count; i++) | ||
440 | if (media_bays[i].mdev && base == (unsigned long) media_bays[i].cd_base) { | ||
441 | if ((what == media_bays[i].content_id) && media_bays[i].state == mb_up) | ||
442 | return 0; | ||
443 | media_bays[i].cd_index = -1; | ||
444 | return -EINVAL; | ||
445 | } | ||
446 | 422 | ||
447 | return -ENODEV; | 423 | if (baydev == NULL) |
424 | return; | ||
425 | bay = macio_get_drvdata(baydev); | ||
426 | if (bay == NULL) | ||
427 | return; | ||
428 | mutex_lock(&bay->lock); | ||
429 | bay->user_lock = 1; | ||
448 | } | 430 | } |
449 | EXPORT_SYMBOL_GPL(check_media_bay_by_base); | 431 | EXPORT_SYMBOL_GPL(lock_media_bay); |
450 | 432 | ||
451 | int media_bay_set_ide_infos(struct device_node* which_bay, unsigned long base, | 433 | void unlock_media_bay(struct macio_dev *baydev) |
452 | int irq, ide_hwif_t *hwif) | ||
453 | { | 434 | { |
454 | int i; | 435 | struct media_bay_info* bay; |
455 | 436 | ||
456 | for (i=0; i<media_bay_count; i++) { | 437 | if (baydev == NULL) |
457 | struct media_bay_info* bay = &media_bays[i]; | 438 | return; |
458 | 439 | bay = macio_get_drvdata(baydev); | |
459 | if (bay->mdev && which_bay == bay->mdev->ofdev.node) { | 440 | if (bay == NULL) |
460 | int timeout = 5000, index = hwif->index; | 441 | return; |
461 | 442 | if (bay->user_lock) { | |
462 | mutex_lock(&bay->lock); | 443 | bay->user_lock = 0; |
463 | 444 | mutex_unlock(&bay->lock); | |
464 | bay->cd_port = hwif; | ||
465 | bay->cd_base = (void __iomem *) base; | ||
466 | bay->cd_irq = irq; | ||
467 | |||
468 | if ((MB_CD != bay->content_id) || bay->state != mb_up) { | ||
469 | mutex_unlock(&bay->lock); | ||
470 | return 0; | ||
471 | } | ||
472 | printk(KERN_DEBUG "Registered ide%d for media bay %d\n", index, i); | ||
473 | do { | ||
474 | if (MB_IDE_READY(i)) { | ||
475 | bay->cd_index = index; | ||
476 | mutex_unlock(&bay->lock); | ||
477 | return 0; | ||
478 | } | ||
479 | mdelay(1); | ||
480 | } while(--timeout); | ||
481 | printk(KERN_DEBUG "Timeount waiting IDE in bay %d\n", i); | ||
482 | mutex_unlock(&bay->lock); | ||
483 | return -ENODEV; | ||
484 | } | ||
485 | } | 445 | } |
446 | } | ||
447 | EXPORT_SYMBOL_GPL(unlock_media_bay); | ||
486 | 448 | ||
487 | return -ENODEV; | 449 | static int mb_broadcast_hotplug(struct device *dev, void *data) |
450 | { | ||
451 | struct media_bay_info* bay = data; | ||
452 | struct macio_dev *mdev; | ||
453 | struct macio_driver *drv; | ||
454 | int state; | ||
455 | |||
456 | if (dev->bus != &macio_bus_type) | ||
457 | return 0; | ||
458 | |||
459 | state = bay->state == mb_up ? bay->content_id : MB_NO; | ||
460 | if (state == MB_FD1) | ||
461 | state = MB_FD; | ||
462 | mdev = to_macio_device(dev); | ||
463 | drv = to_macio_driver(dev->driver); | ||
464 | if (dev->driver && drv->mediabay_event) | ||
465 | drv->mediabay_event(mdev, state); | ||
466 | return 0; | ||
488 | } | 467 | } |
489 | EXPORT_SYMBOL_GPL(media_bay_set_ide_infos); | ||
490 | #endif /* CONFIG_BLK_DEV_IDE_PMAC */ | ||
491 | 468 | ||
492 | static void media_bay_step(int i) | 469 | static void media_bay_step(int i) |
493 | { | 470 | { |
@@ -497,8 +474,8 @@ static void media_bay_step(int i) | |||
497 | if (bay->state != mb_powering_down) | 474 | if (bay->state != mb_powering_down) |
498 | poll_media_bay(bay); | 475 | poll_media_bay(bay); |
499 | 476 | ||
500 | /* If timer expired or polling IDE busy, run state machine */ | 477 | /* If timer expired run state machine */ |
501 | if ((bay->state != mb_ide_waiting) && (bay->timer != 0)) { | 478 | if (bay->timer != 0) { |
502 | bay->timer -= msecs_to_jiffies(MB_POLL_DELAY); | 479 | bay->timer -= msecs_to_jiffies(MB_POLL_DELAY); |
503 | if (bay->timer > 0) | 480 | if (bay->timer > 0) |
504 | return; | 481 | return; |
@@ -508,100 +485,50 @@ static void media_bay_step(int i) | |||
508 | switch(bay->state) { | 485 | switch(bay->state) { |
509 | case mb_powering_up: | 486 | case mb_powering_up: |
510 | if (bay->ops->setup_bus(bay, bay->last_value) < 0) { | 487 | if (bay->ops->setup_bus(bay, bay->last_value) < 0) { |
511 | MBDBG("mediabay%d: device not supported (kind:%d)\n", i, bay->content_id); | 488 | pr_debug("mediabay%d: device not supported (kind:%d)\n", |
489 | i, bay->content_id); | ||
512 | set_mb_power(bay, 0); | 490 | set_mb_power(bay, 0); |
513 | break; | 491 | break; |
514 | } | 492 | } |
515 | bay->timer = msecs_to_jiffies(MB_RESET_DELAY); | 493 | bay->timer = msecs_to_jiffies(MB_RESET_DELAY); |
516 | bay->state = mb_enabling_bay; | 494 | bay->state = mb_enabling_bay; |
517 | MBDBG("mediabay%d: enabling (kind:%d)\n", i, bay->content_id); | 495 | pr_debug("mediabay%d: enabling (kind:%d)\n", i, bay->content_id); |
518 | break; | 496 | break; |
519 | case mb_enabling_bay: | 497 | case mb_enabling_bay: |
520 | bay->ops->un_reset(bay); | 498 | bay->ops->un_reset(bay); |
521 | bay->timer = msecs_to_jiffies(MB_SETUP_DELAY); | 499 | bay->timer = msecs_to_jiffies(MB_SETUP_DELAY); |
522 | bay->state = mb_resetting; | 500 | bay->state = mb_resetting; |
523 | MBDBG("mediabay%d: waiting reset (kind:%d)\n", i, bay->content_id); | 501 | pr_debug("mediabay%d: releasing bay reset (kind:%d)\n", |
502 | i, bay->content_id); | ||
524 | break; | 503 | break; |
525 | case mb_resetting: | 504 | case mb_resetting: |
526 | if (bay->content_id != MB_CD) { | 505 | if (bay->content_id != MB_CD) { |
527 | MBDBG("mediabay%d: bay is up (kind:%d)\n", i, bay->content_id); | 506 | pr_debug("mediabay%d: bay is up (kind:%d)\n", i, |
507 | bay->content_id); | ||
528 | bay->state = mb_up; | 508 | bay->state = mb_up; |
509 | device_for_each_child(&bay->mdev->ofdev.dev, | ||
510 | bay, mb_broadcast_hotplug); | ||
529 | break; | 511 | break; |
530 | } | 512 | } |
531 | #ifdef CONFIG_BLK_DEV_IDE_PMAC | 513 | pr_debug("mediabay%d: releasing ATA reset (kind:%d)\n", |
532 | MBDBG("mediabay%d: waiting IDE reset (kind:%d)\n", i, bay->content_id); | 514 | i, bay->content_id); |
533 | bay->ops->un_reset_ide(bay); | 515 | bay->ops->un_reset_ide(bay); |
534 | bay->timer = msecs_to_jiffies(MB_IDE_WAIT); | 516 | bay->timer = msecs_to_jiffies(MB_IDE_WAIT); |
535 | bay->state = mb_ide_resetting; | 517 | bay->state = mb_ide_resetting; |
536 | #else | ||
537 | printk(KERN_DEBUG "media-bay %d is ide (not compiled in kernel)\n", i); | ||
538 | set_mb_power(bay, 0); | ||
539 | #endif /* CONFIG_BLK_DEV_IDE_PMAC */ | ||
540 | break; | 518 | break; |
541 | #ifdef CONFIG_BLK_DEV_IDE_PMAC | 519 | |
542 | case mb_ide_resetting: | 520 | case mb_ide_resetting: |
543 | bay->timer = msecs_to_jiffies(MB_IDE_TIMEOUT); | 521 | pr_debug("mediabay%d: bay is up (kind:%d)\n", i, bay->content_id); |
544 | bay->state = mb_ide_waiting; | 522 | bay->state = mb_up; |
545 | MBDBG("mediabay%d: waiting IDE ready (kind:%d)\n", i, bay->content_id); | 523 | device_for_each_child(&bay->mdev->ofdev.dev, |
524 | bay, mb_broadcast_hotplug); | ||
546 | break; | 525 | break; |
547 | case mb_ide_waiting: | 526 | |
548 | if (bay->cd_base == NULL) { | ||
549 | bay->timer = 0; | ||
550 | bay->state = mb_up; | ||
551 | MBDBG("mediabay%d: up before IDE init\n", i); | ||
552 | break; | ||
553 | } else if (MB_IDE_READY(i)) { | ||
554 | bay->timer = 0; | ||
555 | bay->state = mb_up; | ||
556 | if (bay->cd_index < 0) { | ||
557 | printk("mediabay %d, registering IDE...\n", i); | ||
558 | pmu_suspend(); | ||
559 | ide_port_scan(bay->cd_port); | ||
560 | if (bay->cd_port->present) | ||
561 | bay->cd_index = bay->cd_port->index; | ||
562 | pmu_resume(); | ||
563 | } | ||
564 | if (bay->cd_index == -1) { | ||
565 | /* We eventually do a retry */ | ||
566 | bay->cd_retry++; | ||
567 | printk("IDE register error\n"); | ||
568 | set_mb_power(bay, 0); | ||
569 | } else { | ||
570 | printk(KERN_DEBUG "media-bay %d is ide%d\n", i, bay->cd_index); | ||
571 | MBDBG("mediabay %d IDE ready\n", i); | ||
572 | } | ||
573 | break; | ||
574 | } else if (bay->timer > 0) | ||
575 | bay->timer -= msecs_to_jiffies(MB_POLL_DELAY); | ||
576 | if (bay->timer <= 0) { | ||
577 | printk("\nIDE Timeout in bay %d !, IDE state is: 0x%02x\n", | ||
578 | i, readb(bay->cd_base + 0x70)); | ||
579 | MBDBG("mediabay%d: nIDE Timeout !\n", i); | ||
580 | set_mb_power(bay, 0); | ||
581 | bay->timer = 0; | ||
582 | } | ||
583 | break; | ||
584 | #endif /* CONFIG_BLK_DEV_IDE_PMAC */ | ||
585 | case mb_powering_down: | 527 | case mb_powering_down: |
586 | bay->state = mb_empty; | 528 | bay->state = mb_empty; |
587 | #ifdef CONFIG_BLK_DEV_IDE_PMAC | 529 | device_for_each_child(&bay->mdev->ofdev.dev, |
588 | if (bay->cd_index >= 0) { | 530 | bay, mb_broadcast_hotplug); |
589 | printk(KERN_DEBUG "Unregistering mb %d ide, index:%d\n", i, | 531 | pr_debug("mediabay%d: end of power down\n", i); |
590 | bay->cd_index); | ||
591 | ide_port_unregister_devices(bay->cd_port); | ||
592 | bay->cd_index = -1; | ||
593 | } | ||
594 | if (bay->cd_retry) { | ||
595 | if (bay->cd_retry > MAX_CD_RETRIES) { | ||
596 | /* Should add an error sound (sort of beep in dmasound) */ | ||
597 | printk("\nmedia-bay %d, IDE device badly inserted or unrecognised\n", i); | ||
598 | } else { | ||
599 | /* Force a new power down/up sequence */ | ||
600 | bay->content_id = MB_NO; | ||
601 | } | ||
602 | } | ||
603 | #endif /* CONFIG_BLK_DEV_IDE_PMAC */ | ||
604 | MBDBG("mediabay%d: end of power down\n", i); | ||
605 | break; | 532 | break; |
606 | } | 533 | } |
607 | } | 534 | } |
@@ -676,11 +603,6 @@ static int __devinit media_bay_attach(struct macio_dev *mdev, const struct of_de | |||
676 | bay->last_value = bay->ops->content(bay); | 603 | bay->last_value = bay->ops->content(bay); |
677 | bay->value_count = msecs_to_jiffies(MB_STABLE_DELAY); | 604 | bay->value_count = msecs_to_jiffies(MB_STABLE_DELAY); |
678 | bay->state = mb_empty; | 605 | bay->state = mb_empty; |
679 | do { | ||
680 | msleep(MB_POLL_DELAY); | ||
681 | media_bay_step(i); | ||
682 | } while((bay->state != mb_empty) && | ||
683 | (bay->state != mb_up)); | ||
684 | 606 | ||
685 | /* Mark us ready by filling our mdev data */ | 607 | /* Mark us ready by filling our mdev data */ |
686 | macio_set_drvdata(mdev, bay); | 608 | macio_set_drvdata(mdev, bay); |
@@ -725,7 +647,7 @@ static int media_bay_resume(struct macio_dev *mdev) | |||
725 | set_mb_power(bay, 0); | 647 | set_mb_power(bay, 0); |
726 | msleep(MB_POWER_DELAY); | 648 | msleep(MB_POWER_DELAY); |
727 | if (bay->ops->content(bay) != bay->content_id) { | 649 | if (bay->ops->content(bay) != bay->content_id) { |
728 | printk("mediabay%d: content changed during sleep...\n", bay->index); | 650 | printk("mediabay%d: Content changed during sleep...\n", bay->index); |
729 | mutex_unlock(&bay->lock); | 651 | mutex_unlock(&bay->lock); |
730 | return 0; | 652 | return 0; |
731 | } | 653 | } |
@@ -733,9 +655,6 @@ static int media_bay_resume(struct macio_dev *mdev) | |||
733 | bay->last_value = bay->content_id; | 655 | bay->last_value = bay->content_id; |
734 | bay->value_count = msecs_to_jiffies(MB_STABLE_DELAY); | 656 | bay->value_count = msecs_to_jiffies(MB_STABLE_DELAY); |
735 | bay->timer = msecs_to_jiffies(MB_POWER_DELAY); | 657 | bay->timer = msecs_to_jiffies(MB_POWER_DELAY); |
736 | #ifdef CONFIG_BLK_DEV_IDE_PMAC | ||
737 | bay->cd_retry = 0; | ||
738 | #endif | ||
739 | do { | 658 | do { |
740 | msleep(MB_POLL_DELAY); | 659 | msleep(MB_POLL_DELAY); |
741 | media_bay_step(bay->index); | 660 | media_bay_step(bay->index); |
@@ -823,9 +742,6 @@ static int __init media_bay_init(void) | |||
823 | for (i=0; i<MAX_BAYS; i++) { | 742 | for (i=0; i<MAX_BAYS; i++) { |
824 | memset((char *)&media_bays[i], 0, sizeof(struct media_bay_info)); | 743 | memset((char *)&media_bays[i], 0, sizeof(struct media_bay_info)); |
825 | media_bays[i].content_id = -1; | 744 | media_bays[i].content_id = -1; |
826 | #ifdef CONFIG_BLK_DEV_IDE_PMAC | ||
827 | media_bays[i].cd_index = -1; | ||
828 | #endif | ||
829 | } | 745 | } |
830 | if (!machine_is(powermac)) | 746 | if (!machine_is(powermac)) |
831 | return 0; | 747 | return 0; |
diff --git a/drivers/macintosh/therm_adt746x.c b/drivers/macintosh/therm_adt746x.c index 95b676a19be7..5ff47ba7f2d0 100644 --- a/drivers/macintosh/therm_adt746x.c +++ b/drivers/macintosh/therm_adt746x.c | |||
@@ -79,6 +79,7 @@ struct thermostat { | |||
79 | u8 limits[3]; | 79 | u8 limits[3]; |
80 | int last_speed[2]; | 80 | int last_speed[2]; |
81 | int last_var[2]; | 81 | int last_var[2]; |
82 | int pwm_inv[2]; | ||
82 | }; | 83 | }; |
83 | 84 | ||
84 | static enum {ADT7460, ADT7467} therm_type; | 85 | static enum {ADT7460, ADT7467} therm_type; |
@@ -229,19 +230,23 @@ static void write_fan_speed(struct thermostat *th, int speed, int fan) | |||
229 | 230 | ||
230 | if (speed >= 0) { | 231 | if (speed >= 0) { |
231 | manual = read_reg(th, MANUAL_MODE[fan]); | 232 | manual = read_reg(th, MANUAL_MODE[fan]); |
233 | manual &= ~INVERT_MASK; | ||
232 | write_reg(th, MANUAL_MODE[fan], | 234 | write_reg(th, MANUAL_MODE[fan], |
233 | (manual|MANUAL_MASK) & (~INVERT_MASK)); | 235 | manual | MANUAL_MASK | th->pwm_inv[fan]); |
234 | write_reg(th, FAN_SPD_SET[fan], speed); | 236 | write_reg(th, FAN_SPD_SET[fan], speed); |
235 | } else { | 237 | } else { |
236 | /* back to automatic */ | 238 | /* back to automatic */ |
237 | if(therm_type == ADT7460) { | 239 | if(therm_type == ADT7460) { |
238 | manual = read_reg(th, | 240 | manual = read_reg(th, |
239 | MANUAL_MODE[fan]) & (~MANUAL_MASK); | 241 | MANUAL_MODE[fan]) & (~MANUAL_MASK); |
240 | 242 | manual &= ~INVERT_MASK; | |
243 | manual |= th->pwm_inv[fan]; | ||
241 | write_reg(th, | 244 | write_reg(th, |
242 | MANUAL_MODE[fan], manual|REM_CONTROL[fan]); | 245 | MANUAL_MODE[fan], manual|REM_CONTROL[fan]); |
243 | } else { | 246 | } else { |
244 | manual = read_reg(th, MANUAL_MODE[fan]); | 247 | manual = read_reg(th, MANUAL_MODE[fan]); |
248 | manual &= ~INVERT_MASK; | ||
249 | manual |= th->pwm_inv[fan]; | ||
245 | write_reg(th, MANUAL_MODE[fan], manual&(~AUTO_MASK)); | 250 | write_reg(th, MANUAL_MODE[fan], manual&(~AUTO_MASK)); |
246 | } | 251 | } |
247 | } | 252 | } |
@@ -418,6 +423,10 @@ static int probe_thermostat(struct i2c_client *client, | |||
418 | 423 | ||
419 | thermostat = th; | 424 | thermostat = th; |
420 | 425 | ||
426 | /* record invert bit status because fw can corrupt it after suspend */ | ||
427 | th->pwm_inv[0] = read_reg(th, MANUAL_MODE[0]) & INVERT_MASK; | ||
428 | th->pwm_inv[1] = read_reg(th, MANUAL_MODE[1]) & INVERT_MASK; | ||
429 | |||
421 | /* be sure to really write fan speed the first time */ | 430 | /* be sure to really write fan speed the first time */ |
422 | th->last_speed[0] = -2; | 431 | th->last_speed[0] = -2; |
423 | th->last_speed[1] = -2; | 432 | th->last_speed[1] = -2; |
diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c index 6f308a4757ee..db379c381432 100644 --- a/drivers/macintosh/via-pmu.c +++ b/drivers/macintosh/via-pmu.c | |||
@@ -36,6 +36,7 @@ | |||
36 | #include <linux/spinlock.h> | 36 | #include <linux/spinlock.h> |
37 | #include <linux/pm.h> | 37 | #include <linux/pm.h> |
38 | #include <linux/proc_fs.h> | 38 | #include <linux/proc_fs.h> |
39 | #include <linux/seq_file.h> | ||
39 | #include <linux/init.h> | 40 | #include <linux/init.h> |
40 | #include <linux/interrupt.h> | 41 | #include <linux/interrupt.h> |
41 | #include <linux/device.h> | 42 | #include <linux/device.h> |
@@ -186,17 +187,11 @@ static int init_pmu(void); | |||
186 | static void pmu_start(void); | 187 | static void pmu_start(void); |
187 | static irqreturn_t via_pmu_interrupt(int irq, void *arg); | 188 | static irqreturn_t via_pmu_interrupt(int irq, void *arg); |
188 | static irqreturn_t gpio1_interrupt(int irq, void *arg); | 189 | static irqreturn_t gpio1_interrupt(int irq, void *arg); |
189 | static int proc_get_info(char *page, char **start, off_t off, | 190 | static const struct file_operations pmu_info_proc_fops; |
190 | int count, int *eof, void *data); | 191 | static const struct file_operations pmu_irqstats_proc_fops; |
191 | static int proc_get_irqstats(char *page, char **start, off_t off, | ||
192 | int count, int *eof, void *data); | ||
193 | static void pmu_pass_intr(unsigned char *data, int len); | 192 | static void pmu_pass_intr(unsigned char *data, int len); |
194 | static int proc_get_batt(char *page, char **start, off_t off, | 193 | static const struct file_operations pmu_battery_proc_fops; |
195 | int count, int *eof, void *data); | 194 | static const struct file_operations pmu_options_proc_fops; |
196 | static int proc_read_options(char *page, char **start, off_t off, | ||
197 | int count, int *eof, void *data); | ||
198 | static int proc_write_options(struct file *file, const char __user *buffer, | ||
199 | unsigned long count, void *data); | ||
200 | 195 | ||
201 | #ifdef CONFIG_ADB | 196 | #ifdef CONFIG_ADB |
202 | struct adb_driver via_pmu_driver = { | 197 | struct adb_driver via_pmu_driver = { |
@@ -507,19 +502,15 @@ static int __init via_pmu_dev_init(void) | |||
507 | for (i=0; i<pmu_battery_count; i++) { | 502 | for (i=0; i<pmu_battery_count; i++) { |
508 | char title[16]; | 503 | char title[16]; |
509 | sprintf(title, "battery_%ld", i); | 504 | sprintf(title, "battery_%ld", i); |
510 | proc_pmu_batt[i] = create_proc_read_entry(title, 0, proc_pmu_root, | 505 | proc_pmu_batt[i] = proc_create_data(title, 0, proc_pmu_root, |
511 | proc_get_batt, (void *)i); | 506 | &pmu_battery_proc_fops, (void *)i); |
512 | } | 507 | } |
513 | 508 | ||
514 | proc_pmu_info = create_proc_read_entry("info", 0, proc_pmu_root, | 509 | proc_pmu_info = proc_create("info", 0, proc_pmu_root, &pmu_info_proc_fops); |
515 | proc_get_info, NULL); | 510 | proc_pmu_irqstats = proc_create("interrupts", 0, proc_pmu_root, |
516 | proc_pmu_irqstats = create_proc_read_entry("interrupts", 0, proc_pmu_root, | 511 | &pmu_irqstats_proc_fops); |
517 | proc_get_irqstats, NULL); | 512 | proc_pmu_options = proc_create("options", 0600, proc_pmu_root, |
518 | proc_pmu_options = create_proc_entry("options", 0600, proc_pmu_root); | 513 | &pmu_options_proc_fops); |
519 | if (proc_pmu_options) { | ||
520 | proc_pmu_options->read_proc = proc_read_options; | ||
521 | proc_pmu_options->write_proc = proc_write_options; | ||
522 | } | ||
523 | } | 514 | } |
524 | return 0; | 515 | return 0; |
525 | } | 516 | } |
@@ -799,27 +790,33 @@ query_battery_state(void) | |||
799 | 2, PMU_SMART_BATTERY_STATE, pmu_cur_battery+1); | 790 | 2, PMU_SMART_BATTERY_STATE, pmu_cur_battery+1); |
800 | } | 791 | } |
801 | 792 | ||
802 | static int | 793 | static int pmu_info_proc_show(struct seq_file *m, void *v) |
803 | proc_get_info(char *page, char **start, off_t off, | ||
804 | int count, int *eof, void *data) | ||
805 | { | 794 | { |
806 | char* p = page; | 795 | seq_printf(m, "PMU driver version : %d\n", PMU_DRIVER_VERSION); |
807 | 796 | seq_printf(m, "PMU firmware version : %02x\n", pmu_version); | |
808 | p += sprintf(p, "PMU driver version : %d\n", PMU_DRIVER_VERSION); | 797 | seq_printf(m, "AC Power : %d\n", |
809 | p += sprintf(p, "PMU firmware version : %02x\n", pmu_version); | ||
810 | p += sprintf(p, "AC Power : %d\n", | ||
811 | ((pmu_power_flags & PMU_PWR_AC_PRESENT) != 0) || pmu_battery_count == 0); | 798 | ((pmu_power_flags & PMU_PWR_AC_PRESENT) != 0) || pmu_battery_count == 0); |
812 | p += sprintf(p, "Battery count : %d\n", pmu_battery_count); | 799 | seq_printf(m, "Battery count : %d\n", pmu_battery_count); |
800 | |||
801 | return 0; | ||
802 | } | ||
813 | 803 | ||
814 | return p - page; | 804 | static int pmu_info_proc_open(struct inode *inode, struct file *file) |
805 | { | ||
806 | return single_open(file, pmu_info_proc_show, NULL); | ||
815 | } | 807 | } |
816 | 808 | ||
817 | static int | 809 | static const struct file_operations pmu_info_proc_fops = { |
818 | proc_get_irqstats(char *page, char **start, off_t off, | 810 | .owner = THIS_MODULE, |
819 | int count, int *eof, void *data) | 811 | .open = pmu_info_proc_open, |
812 | .read = seq_read, | ||
813 | .llseek = seq_lseek, | ||
814 | .release = single_release, | ||
815 | }; | ||
816 | |||
817 | static int pmu_irqstats_proc_show(struct seq_file *m, void *v) | ||
820 | { | 818 | { |
821 | int i; | 819 | int i; |
822 | char* p = page; | ||
823 | static const char *irq_names[] = { | 820 | static const char *irq_names[] = { |
824 | "Total CB1 triggered events", | 821 | "Total CB1 triggered events", |
825 | "Total GPIO1 triggered events", | 822 | "Total GPIO1 triggered events", |
@@ -835,60 +832,76 @@ proc_get_irqstats(char *page, char **start, off_t off, | |||
835 | }; | 832 | }; |
836 | 833 | ||
837 | for (i=0; i<11; i++) { | 834 | for (i=0; i<11; i++) { |
838 | p += sprintf(p, " %2u: %10u (%s)\n", | 835 | seq_printf(m, " %2u: %10u (%s)\n", |
839 | i, pmu_irq_stats[i], irq_names[i]); | 836 | i, pmu_irq_stats[i], irq_names[i]); |
840 | } | 837 | } |
841 | return p - page; | 838 | return 0; |
842 | } | 839 | } |
843 | 840 | ||
844 | static int | 841 | static int pmu_irqstats_proc_open(struct inode *inode, struct file *file) |
845 | proc_get_batt(char *page, char **start, off_t off, | ||
846 | int count, int *eof, void *data) | ||
847 | { | 842 | { |
848 | long batnum = (long)data; | 843 | return single_open(file, pmu_irqstats_proc_show, NULL); |
849 | char *p = page; | 844 | } |
845 | |||
846 | static const struct file_operations pmu_irqstats_proc_fops = { | ||
847 | .owner = THIS_MODULE, | ||
848 | .open = pmu_irqstats_proc_open, | ||
849 | .read = seq_read, | ||
850 | .llseek = seq_lseek, | ||
851 | .release = single_release, | ||
852 | }; | ||
853 | |||
854 | static int pmu_battery_proc_show(struct seq_file *m, void *v) | ||
855 | { | ||
856 | long batnum = (long)m->private; | ||
850 | 857 | ||
851 | p += sprintf(p, "\n"); | 858 | seq_putc(m, '\n'); |
852 | p += sprintf(p, "flags : %08x\n", | 859 | seq_printf(m, "flags : %08x\n", pmu_batteries[batnum].flags); |
853 | pmu_batteries[batnum].flags); | 860 | seq_printf(m, "charge : %d\n", pmu_batteries[batnum].charge); |
854 | p += sprintf(p, "charge : %d\n", | 861 | seq_printf(m, "max_charge : %d\n", pmu_batteries[batnum].max_charge); |
855 | pmu_batteries[batnum].charge); | 862 | seq_printf(m, "current : %d\n", pmu_batteries[batnum].amperage); |
856 | p += sprintf(p, "max_charge : %d\n", | 863 | seq_printf(m, "voltage : %d\n", pmu_batteries[batnum].voltage); |
857 | pmu_batteries[batnum].max_charge); | 864 | seq_printf(m, "time rem. : %d\n", pmu_batteries[batnum].time_remaining); |
858 | p += sprintf(p, "current : %d\n", | 865 | return 0; |
859 | pmu_batteries[batnum].amperage); | ||
860 | p += sprintf(p, "voltage : %d\n", | ||
861 | pmu_batteries[batnum].voltage); | ||
862 | p += sprintf(p, "time rem. : %d\n", | ||
863 | pmu_batteries[batnum].time_remaining); | ||
864 | |||
865 | return p - page; | ||
866 | } | 866 | } |
867 | 867 | ||
868 | static int | 868 | static int pmu_battery_proc_open(struct inode *inode, struct file *file) |
869 | proc_read_options(char *page, char **start, off_t off, | ||
870 | int count, int *eof, void *data) | ||
871 | { | 869 | { |
872 | char *p = page; | 870 | return single_open(file, pmu_battery_proc_show, PDE(inode)->data); |
871 | } | ||
873 | 872 | ||
873 | static const struct file_operations pmu_battery_proc_fops = { | ||
874 | .owner = THIS_MODULE, | ||
875 | .open = pmu_battery_proc_open, | ||
876 | .read = seq_read, | ||
877 | .llseek = seq_lseek, | ||
878 | .release = single_release, | ||
879 | }; | ||
880 | |||
881 | static int pmu_options_proc_show(struct seq_file *m, void *v) | ||
882 | { | ||
874 | #if defined(CONFIG_SUSPEND) && defined(CONFIG_PPC32) | 883 | #if defined(CONFIG_SUSPEND) && defined(CONFIG_PPC32) |
875 | if (pmu_kind == PMU_KEYLARGO_BASED && | 884 | if (pmu_kind == PMU_KEYLARGO_BASED && |
876 | pmac_call_feature(PMAC_FTR_SLEEP_STATE,NULL,0,-1) >= 0) | 885 | pmac_call_feature(PMAC_FTR_SLEEP_STATE,NULL,0,-1) >= 0) |
877 | p += sprintf(p, "lid_wakeup=%d\n", option_lid_wakeup); | 886 | seq_printf(m, "lid_wakeup=%d\n", option_lid_wakeup); |
878 | #endif | 887 | #endif |
879 | if (pmu_kind == PMU_KEYLARGO_BASED) | 888 | if (pmu_kind == PMU_KEYLARGO_BASED) |
880 | p += sprintf(p, "server_mode=%d\n", option_server_mode); | 889 | seq_printf(m, "server_mode=%d\n", option_server_mode); |
881 | 890 | ||
882 | return p - page; | 891 | return 0; |
883 | } | 892 | } |
884 | 893 | ||
885 | static int | 894 | static int pmu_options_proc_open(struct inode *inode, struct file *file) |
886 | proc_write_options(struct file *file, const char __user *buffer, | 895 | { |
887 | unsigned long count, void *data) | 896 | return single_open(file, pmu_options_proc_show, NULL); |
897 | } | ||
898 | |||
899 | static ssize_t pmu_options_proc_write(struct file *file, | ||
900 | const char __user *buffer, size_t count, loff_t *pos) | ||
888 | { | 901 | { |
889 | char tmp[33]; | 902 | char tmp[33]; |
890 | char *label, *val; | 903 | char *label, *val; |
891 | unsigned long fcount = count; | 904 | size_t fcount = count; |
892 | 905 | ||
893 | if (!count) | 906 | if (!count) |
894 | return -EINVAL; | 907 | return -EINVAL; |
@@ -927,6 +940,15 @@ proc_write_options(struct file *file, const char __user *buffer, | |||
927 | return fcount; | 940 | return fcount; |
928 | } | 941 | } |
929 | 942 | ||
943 | static const struct file_operations pmu_options_proc_fops = { | ||
944 | .owner = THIS_MODULE, | ||
945 | .open = pmu_options_proc_open, | ||
946 | .read = seq_read, | ||
947 | .llseek = seq_lseek, | ||
948 | .release = single_release, | ||
949 | .write = pmu_options_proc_write, | ||
950 | }; | ||
951 | |||
930 | #ifdef CONFIG_ADB | 952 | #ifdef CONFIG_ADB |
931 | /* Send an ADB command */ | 953 | /* Send an ADB command */ |
932 | static int pmu_send_request(struct adb_request *req, int sync) | 954 | static int pmu_send_request(struct adb_request *req, int sync) |
diff --git a/drivers/macintosh/windfarm_smu_controls.c b/drivers/macintosh/windfarm_smu_controls.c index 961fa0e7c2cf..6c68b9e5f5c4 100644 --- a/drivers/macintosh/windfarm_smu_controls.c +++ b/drivers/macintosh/windfarm_smu_controls.c | |||
@@ -202,6 +202,8 @@ static struct smu_fan_control *smu_fan_create(struct device_node *node, | |||
202 | fct->ctrl.name = "cpu-front-fan-1"; | 202 | fct->ctrl.name = "cpu-front-fan-1"; |
203 | else if (!strcmp(l, "CPU A PUMP")) | 203 | else if (!strcmp(l, "CPU A PUMP")) |
204 | fct->ctrl.name = "cpu-pump-0"; | 204 | fct->ctrl.name = "cpu-pump-0"; |
205 | else if (!strcmp(l, "CPU B PUMP")) | ||
206 | fct->ctrl.name = "cpu-pump-1"; | ||
205 | else if (!strcmp(l, "Slots Fan") || !strcmp(l, "Slots fan") || | 207 | else if (!strcmp(l, "Slots Fan") || !strcmp(l, "Slots fan") || |
206 | !strcmp(l, "EXPANSION SLOTS INTAKE")) | 208 | !strcmp(l, "EXPANSION SLOTS INTAKE")) |
207 | fct->ctrl.name = "slots-fan"; | 209 | fct->ctrl.name = "slots-fan"; |
diff --git a/drivers/mmc/host/of_mmc_spi.c b/drivers/mmc/host/of_mmc_spi.c index 0c44d560bf1a..0c7a63c1f12f 100644 --- a/drivers/mmc/host/of_mmc_spi.c +++ b/drivers/mmc/host/of_mmc_spi.c | |||
@@ -22,6 +22,8 @@ | |||
22 | #include <linux/mmc/core.h> | 22 | #include <linux/mmc/core.h> |
23 | #include <linux/mmc/host.h> | 23 | #include <linux/mmc/host.h> |
24 | 24 | ||
25 | MODULE_LICENSE("GPL"); | ||
26 | |||
25 | enum { | 27 | enum { |
26 | CD_GPIO = 0, | 28 | CD_GPIO = 0, |
27 | WP_GPIO, | 29 | WP_GPIO, |
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index 94058c62620a..28fce65b8594 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig | |||
@@ -133,6 +133,14 @@ config SPI_LM70_LLP | |||
133 | which interfaces to an LM70 temperature sensor using | 133 | which interfaces to an LM70 temperature sensor using |
134 | a parallel port. | 134 | a parallel port. |
135 | 135 | ||
136 | config SPI_MPC52xx | ||
137 | tristate "Freescale MPC52xx SPI (non-PSC) controller support" | ||
138 | depends on PPC_MPC52xx && SPI | ||
139 | select SPI_MASTER_OF | ||
140 | help | ||
141 | This drivers supports the MPC52xx SPI controller in master SPI | ||
142 | mode. | ||
143 | |||
136 | config SPI_MPC52xx_PSC | 144 | config SPI_MPC52xx_PSC |
137 | tristate "Freescale MPC52xx PSC SPI controller" | 145 | tristate "Freescale MPC52xx PSC SPI controller" |
138 | depends on PPC_MPC52xx && EXPERIMENTAL | 146 | depends on PPC_MPC52xx && EXPERIMENTAL |
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile index 21a118269cac..e3f092a9afa5 100644 --- a/drivers/spi/Makefile +++ b/drivers/spi/Makefile | |||
@@ -25,6 +25,7 @@ obj-$(CONFIG_SPI_OMAP24XX) += omap2_mcspi.o | |||
25 | obj-$(CONFIG_SPI_ORION) += orion_spi.o | 25 | obj-$(CONFIG_SPI_ORION) += orion_spi.o |
26 | obj-$(CONFIG_SPI_PL022) += amba-pl022.o | 26 | obj-$(CONFIG_SPI_PL022) += amba-pl022.o |
27 | obj-$(CONFIG_SPI_MPC52xx_PSC) += mpc52xx_psc_spi.o | 27 | obj-$(CONFIG_SPI_MPC52xx_PSC) += mpc52xx_psc_spi.o |
28 | obj-$(CONFIG_SPI_MPC52xx) += mpc52xx_spi.o | ||
28 | obj-$(CONFIG_SPI_MPC8xxx) += spi_mpc8xxx.o | 29 | obj-$(CONFIG_SPI_MPC8xxx) += spi_mpc8xxx.o |
29 | obj-$(CONFIG_SPI_PPC4xx) += spi_ppc4xx.o | 30 | obj-$(CONFIG_SPI_PPC4xx) += spi_ppc4xx.o |
30 | obj-$(CONFIG_SPI_S3C24XX_GPIO) += spi_s3c24xx_gpio.o | 31 | obj-$(CONFIG_SPI_S3C24XX_GPIO) += spi_s3c24xx_gpio.o |
diff --git a/drivers/spi/mpc52xx_psc_spi.c b/drivers/spi/mpc52xx_psc_spi.c index 1b74d5ca03f3..f50c81df336a 100644 --- a/drivers/spi/mpc52xx_psc_spi.c +++ b/drivers/spi/mpc52xx_psc_spi.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/errno.h> | 17 | #include <linux/errno.h> |
18 | #include <linux/interrupt.h> | 18 | #include <linux/interrupt.h> |
19 | #include <linux/of_platform.h> | 19 | #include <linux/of_platform.h> |
20 | #include <linux/of_spi.h> | ||
20 | #include <linux/workqueue.h> | 21 | #include <linux/workqueue.h> |
21 | #include <linux/completion.h> | 22 | #include <linux/completion.h> |
22 | #include <linux/io.h> | 23 | #include <linux/io.h> |
@@ -313,11 +314,13 @@ static int mpc52xx_psc_spi_port_config(int psc_id, struct mpc52xx_psc_spi *mps) | |||
313 | struct mpc52xx_psc __iomem *psc = mps->psc; | 314 | struct mpc52xx_psc __iomem *psc = mps->psc; |
314 | struct mpc52xx_psc_fifo __iomem *fifo = mps->fifo; | 315 | struct mpc52xx_psc_fifo __iomem *fifo = mps->fifo; |
315 | u32 mclken_div; | 316 | u32 mclken_div; |
316 | int ret = 0; | 317 | int ret; |
317 | 318 | ||
318 | /* default sysclk is 512MHz */ | 319 | /* default sysclk is 512MHz */ |
319 | mclken_div = (mps->sysclk ? mps->sysclk : 512000000) / MCLK; | 320 | mclken_div = (mps->sysclk ? mps->sysclk : 512000000) / MCLK; |
320 | mpc52xx_set_psc_clkdiv(psc_id, mclken_div); | 321 | ret = mpc52xx_set_psc_clkdiv(psc_id, mclken_div); |
322 | if (ret) | ||
323 | return ret; | ||
321 | 324 | ||
322 | /* Reset the PSC into a known state */ | 325 | /* Reset the PSC into a known state */ |
323 | out_8(&psc->command, MPC52xx_PSC_RST_RX); | 326 | out_8(&psc->command, MPC52xx_PSC_RST_RX); |
@@ -341,7 +344,7 @@ static int mpc52xx_psc_spi_port_config(int psc_id, struct mpc52xx_psc_spi *mps) | |||
341 | 344 | ||
342 | mps->bits_per_word = 8; | 345 | mps->bits_per_word = 8; |
343 | 346 | ||
344 | return ret; | 347 | return 0; |
345 | } | 348 | } |
346 | 349 | ||
347 | static irqreturn_t mpc52xx_psc_spi_isr(int irq, void *dev_id) | 350 | static irqreturn_t mpc52xx_psc_spi_isr(int irq, void *dev_id) |
@@ -410,8 +413,10 @@ static int __init mpc52xx_psc_spi_do_probe(struct device *dev, u32 regaddr, | |||
410 | goto free_master; | 413 | goto free_master; |
411 | 414 | ||
412 | ret = mpc52xx_psc_spi_port_config(master->bus_num, mps); | 415 | ret = mpc52xx_psc_spi_port_config(master->bus_num, mps); |
413 | if (ret < 0) | 416 | if (ret < 0) { |
417 | dev_err(dev, "can't configure PSC! Is it capable of SPI?\n"); | ||
414 | goto free_irq; | 418 | goto free_irq; |
419 | } | ||
415 | 420 | ||
416 | spin_lock_init(&mps->lock); | 421 | spin_lock_init(&mps->lock); |
417 | init_completion(&mps->done); | 422 | init_completion(&mps->done); |
@@ -464,10 +469,11 @@ static int __init mpc52xx_psc_spi_of_probe(struct of_device *op, | |||
464 | const u32 *regaddr_p; | 469 | const u32 *regaddr_p; |
465 | u64 regaddr64, size64; | 470 | u64 regaddr64, size64; |
466 | s16 id = -1; | 471 | s16 id = -1; |
472 | int rc; | ||
467 | 473 | ||
468 | regaddr_p = of_get_address(op->node, 0, &size64, NULL); | 474 | regaddr_p = of_get_address(op->node, 0, &size64, NULL); |
469 | if (!regaddr_p) { | 475 | if (!regaddr_p) { |
470 | printk(KERN_ERR "Invalid PSC address\n"); | 476 | dev_err(&op->dev, "Invalid PSC address\n"); |
471 | return -EINVAL; | 477 | return -EINVAL; |
472 | } | 478 | } |
473 | regaddr64 = of_translate_address(op->node, regaddr_p); | 479 | regaddr64 = of_translate_address(op->node, regaddr_p); |
@@ -478,15 +484,18 @@ static int __init mpc52xx_psc_spi_of_probe(struct of_device *op, | |||
478 | 484 | ||
479 | psc_nump = of_get_property(op->node, "cell-index", NULL); | 485 | psc_nump = of_get_property(op->node, "cell-index", NULL); |
480 | if (!psc_nump || *psc_nump > 5) { | 486 | if (!psc_nump || *psc_nump > 5) { |
481 | printk(KERN_ERR "mpc52xx_psc_spi: Device node %s has invalid " | 487 | dev_err(&op->dev, "Invalid cell-index property\n"); |
482 | "cell-index property\n", op->node->full_name); | ||
483 | return -EINVAL; | 488 | return -EINVAL; |
484 | } | 489 | } |
485 | id = *psc_nump + 1; | 490 | id = *psc_nump + 1; |
486 | } | 491 | } |
487 | 492 | ||
488 | return mpc52xx_psc_spi_do_probe(&op->dev, (u32)regaddr64, (u32)size64, | 493 | rc = mpc52xx_psc_spi_do_probe(&op->dev, (u32)regaddr64, (u32)size64, |
489 | irq_of_parse_and_map(op->node, 0), id); | 494 | irq_of_parse_and_map(op->node, 0), id); |
495 | if (rc == 0) | ||
496 | of_register_spi_devices(dev_get_drvdata(&op->dev), op->node); | ||
497 | |||
498 | return rc; | ||
490 | } | 499 | } |
491 | 500 | ||
492 | static int __exit mpc52xx_psc_spi_of_remove(struct of_device *op) | 501 | static int __exit mpc52xx_psc_spi_of_remove(struct of_device *op) |
diff --git a/drivers/spi/mpc52xx_spi.c b/drivers/spi/mpc52xx_spi.c new file mode 100644 index 000000000000..ef8379b2c172 --- /dev/null +++ b/drivers/spi/mpc52xx_spi.c | |||
@@ -0,0 +1,520 @@ | |||
1 | /* | ||
2 | * MPC52xx SPI bus driver. | ||
3 | * | ||
4 | * Copyright (C) 2008 Secret Lab Technologies Ltd. | ||
5 | * | ||
6 | * This file is released under the GPLv2 | ||
7 | * | ||
8 | * This is the driver for the MPC5200's dedicated SPI controller. | ||
9 | * | ||
10 | * Note: this driver does not support the MPC5200 PSC in SPI mode. For | ||
11 | * that driver see drivers/spi/mpc52xx_psc_spi.c | ||
12 | */ | ||
13 | |||
14 | #include <linux/module.h> | ||
15 | #include <linux/init.h> | ||
16 | #include <linux/errno.h> | ||
17 | #include <linux/of_platform.h> | ||
18 | #include <linux/interrupt.h> | ||
19 | #include <linux/delay.h> | ||
20 | #include <linux/spi/spi.h> | ||
21 | #include <linux/spi/mpc52xx_spi.h> | ||
22 | #include <linux/of_spi.h> | ||
23 | #include <linux/io.h> | ||
24 | #include <asm/time.h> | ||
25 | #include <asm/mpc52xx.h> | ||
26 | |||
27 | MODULE_AUTHOR("Grant Likely <grant.likely@secretlab.ca>"); | ||
28 | MODULE_DESCRIPTION("MPC52xx SPI (non-PSC) Driver"); | ||
29 | MODULE_LICENSE("GPL"); | ||
30 | |||
31 | /* Register offsets */ | ||
32 | #define SPI_CTRL1 0x00 | ||
33 | #define SPI_CTRL1_SPIE (1 << 7) | ||
34 | #define SPI_CTRL1_SPE (1 << 6) | ||
35 | #define SPI_CTRL1_MSTR (1 << 4) | ||
36 | #define SPI_CTRL1_CPOL (1 << 3) | ||
37 | #define SPI_CTRL1_CPHA (1 << 2) | ||
38 | #define SPI_CTRL1_SSOE (1 << 1) | ||
39 | #define SPI_CTRL1_LSBFE (1 << 0) | ||
40 | |||
41 | #define SPI_CTRL2 0x01 | ||
42 | #define SPI_BRR 0x04 | ||
43 | |||
44 | #define SPI_STATUS 0x05 | ||
45 | #define SPI_STATUS_SPIF (1 << 7) | ||
46 | #define SPI_STATUS_WCOL (1 << 6) | ||
47 | #define SPI_STATUS_MODF (1 << 4) | ||
48 | |||
49 | #define SPI_DATA 0x09 | ||
50 | #define SPI_PORTDATA 0x0d | ||
51 | #define SPI_DATADIR 0x10 | ||
52 | |||
53 | /* FSM state return values */ | ||
54 | #define FSM_STOP 0 /* Nothing more for the state machine to */ | ||
55 | /* do. If something interesting happens */ | ||
56 | /* then and IRQ will be received */ | ||
57 | #define FSM_POLL 1 /* need to poll for completion, an IRQ is */ | ||
58 | /* not expected */ | ||
59 | #define FSM_CONTINUE 2 /* Keep iterating the state machine */ | ||
60 | |||
61 | /* Driver internal data */ | ||
62 | struct mpc52xx_spi { | ||
63 | struct spi_master *master; | ||
64 | u32 sysclk; | ||
65 | void __iomem *regs; | ||
66 | int irq0; /* MODF irq */ | ||
67 | int irq1; /* SPIF irq */ | ||
68 | int ipb_freq; | ||
69 | |||
70 | /* Statistics */ | ||
71 | int msg_count; | ||
72 | int wcol_count; | ||
73 | int wcol_ticks; | ||
74 | u32 wcol_tx_timestamp; | ||
75 | int modf_count; | ||
76 | int byte_count; | ||
77 | |||
78 | struct list_head queue; /* queue of pending messages */ | ||
79 | spinlock_t lock; | ||
80 | struct work_struct work; | ||
81 | |||
82 | |||
83 | /* Details of current transfer (length, and buffer pointers) */ | ||
84 | struct spi_message *message; /* current message */ | ||
85 | struct spi_transfer *transfer; /* current transfer */ | ||
86 | int (*state)(int irq, struct mpc52xx_spi *ms, u8 status, u8 data); | ||
87 | int len; | ||
88 | int timestamp; | ||
89 | u8 *rx_buf; | ||
90 | const u8 *tx_buf; | ||
91 | int cs_change; | ||
92 | }; | ||
93 | |||
94 | /* | ||
95 | * CS control function | ||
96 | */ | ||
97 | static void mpc52xx_spi_chipsel(struct mpc52xx_spi *ms, int value) | ||
98 | { | ||
99 | out_8(ms->regs + SPI_PORTDATA, value ? 0 : 0x08); | ||
100 | } | ||
101 | |||
102 | /* | ||
103 | * Start a new transfer. This is called both by the idle state | ||
104 | * for the first transfer in a message, and by the wait state when the | ||
105 | * previous transfer in a message is complete. | ||
106 | */ | ||
107 | static void mpc52xx_spi_start_transfer(struct mpc52xx_spi *ms) | ||
108 | { | ||
109 | ms->rx_buf = ms->transfer->rx_buf; | ||
110 | ms->tx_buf = ms->transfer->tx_buf; | ||
111 | ms->len = ms->transfer->len; | ||
112 | |||
113 | /* Activate the chip select */ | ||
114 | if (ms->cs_change) | ||
115 | mpc52xx_spi_chipsel(ms, 1); | ||
116 | ms->cs_change = ms->transfer->cs_change; | ||
117 | |||
118 | /* Write out the first byte */ | ||
119 | ms->wcol_tx_timestamp = get_tbl(); | ||
120 | if (ms->tx_buf) | ||
121 | out_8(ms->regs + SPI_DATA, *ms->tx_buf++); | ||
122 | else | ||
123 | out_8(ms->regs + SPI_DATA, 0); | ||
124 | } | ||
125 | |||
126 | /* Forward declaration of state handlers */ | ||
127 | static int mpc52xx_spi_fsmstate_transfer(int irq, struct mpc52xx_spi *ms, | ||
128 | u8 status, u8 data); | ||
129 | static int mpc52xx_spi_fsmstate_wait(int irq, struct mpc52xx_spi *ms, | ||
130 | u8 status, u8 data); | ||
131 | |||
132 | /* | ||
133 | * IDLE state | ||
134 | * | ||
135 | * No transfers are in progress; if another transfer is pending then retrieve | ||
136 | * it and kick it off. Otherwise, stop processing the state machine | ||
137 | */ | ||
138 | static int | ||
139 | mpc52xx_spi_fsmstate_idle(int irq, struct mpc52xx_spi *ms, u8 status, u8 data) | ||
140 | { | ||
141 | struct spi_device *spi; | ||
142 | int spr, sppr; | ||
143 | u8 ctrl1; | ||
144 | |||
145 | if (status && (irq != NO_IRQ)) | ||
146 | dev_err(&ms->master->dev, "spurious irq, status=0x%.2x\n", | ||
147 | status); | ||
148 | |||
149 | /* Check if there is another transfer waiting. */ | ||
150 | if (list_empty(&ms->queue)) | ||
151 | return FSM_STOP; | ||
152 | |||
153 | /* get the head of the queue */ | ||
154 | ms->message = list_first_entry(&ms->queue, struct spi_message, queue); | ||
155 | list_del_init(&ms->message->queue); | ||
156 | |||
157 | /* Setup the controller parameters */ | ||
158 | ctrl1 = SPI_CTRL1_SPIE | SPI_CTRL1_SPE | SPI_CTRL1_MSTR; | ||
159 | spi = ms->message->spi; | ||
160 | if (spi->mode & SPI_CPHA) | ||
161 | ctrl1 |= SPI_CTRL1_CPHA; | ||
162 | if (spi->mode & SPI_CPOL) | ||
163 | ctrl1 |= SPI_CTRL1_CPOL; | ||
164 | if (spi->mode & SPI_LSB_FIRST) | ||
165 | ctrl1 |= SPI_CTRL1_LSBFE; | ||
166 | out_8(ms->regs + SPI_CTRL1, ctrl1); | ||
167 | |||
168 | /* Setup the controller speed */ | ||
169 | /* minimum divider is '2'. Also, add '1' to force rounding the | ||
170 | * divider up. */ | ||
171 | sppr = ((ms->ipb_freq / ms->message->spi->max_speed_hz) + 1) >> 1; | ||
172 | spr = 0; | ||
173 | if (sppr < 1) | ||
174 | sppr = 1; | ||
175 | while (((sppr - 1) & ~0x7) != 0) { | ||
176 | sppr = (sppr + 1) >> 1; /* add '1' to force rounding up */ | ||
177 | spr++; | ||
178 | } | ||
179 | sppr--; /* sppr quantity in register is offset by 1 */ | ||
180 | if (spr > 7) { | ||
181 | /* Don't overrun limits of SPI baudrate register */ | ||
182 | spr = 7; | ||
183 | sppr = 7; | ||
184 | } | ||
185 | out_8(ms->regs + SPI_BRR, sppr << 4 | spr); /* Set speed */ | ||
186 | |||
187 | ms->cs_change = 1; | ||
188 | ms->transfer = container_of(ms->message->transfers.next, | ||
189 | struct spi_transfer, transfer_list); | ||
190 | |||
191 | mpc52xx_spi_start_transfer(ms); | ||
192 | ms->state = mpc52xx_spi_fsmstate_transfer; | ||
193 | |||
194 | return FSM_CONTINUE; | ||
195 | } | ||
196 | |||
197 | /* | ||
198 | * TRANSFER state | ||
199 | * | ||
200 | * In the middle of a transfer. If the SPI core has completed processing | ||
201 | * a byte, then read out the received data and write out the next byte | ||
202 | * (unless this transfer is finished; in which case go on to the wait | ||
203 | * state) | ||
204 | */ | ||
205 | static int mpc52xx_spi_fsmstate_transfer(int irq, struct mpc52xx_spi *ms, | ||
206 | u8 status, u8 data) | ||
207 | { | ||
208 | if (!status) | ||
209 | return ms->irq0 ? FSM_STOP : FSM_POLL; | ||
210 | |||
211 | if (status & SPI_STATUS_WCOL) { | ||
212 | /* The SPI controller is stoopid. At slower speeds, it may | ||
213 | * raise the SPIF flag before the state machine is actually | ||
214 | * finished, which causes a collision (internal to the state | ||
215 | * machine only). The manual recommends inserting a delay | ||
216 | * between receiving the interrupt and sending the next byte, | ||
217 | * but it can also be worked around simply by retrying the | ||
218 | * transfer which is what we do here. */ | ||
219 | ms->wcol_count++; | ||
220 | ms->wcol_ticks += get_tbl() - ms->wcol_tx_timestamp; | ||
221 | ms->wcol_tx_timestamp = get_tbl(); | ||
222 | data = 0; | ||
223 | if (ms->tx_buf) | ||
224 | data = *(ms->tx_buf-1); | ||
225 | out_8(ms->regs + SPI_DATA, data); /* try again */ | ||
226 | return FSM_CONTINUE; | ||
227 | } else if (status & SPI_STATUS_MODF) { | ||
228 | ms->modf_count++; | ||
229 | dev_err(&ms->master->dev, "mode fault\n"); | ||
230 | mpc52xx_spi_chipsel(ms, 0); | ||
231 | ms->message->status = -EIO; | ||
232 | ms->message->complete(ms->message->context); | ||
233 | ms->state = mpc52xx_spi_fsmstate_idle; | ||
234 | return FSM_CONTINUE; | ||
235 | } | ||
236 | |||
237 | /* Read data out of the spi device */ | ||
238 | ms->byte_count++; | ||
239 | if (ms->rx_buf) | ||
240 | *ms->rx_buf++ = data; | ||
241 | |||
242 | /* Is the transfer complete? */ | ||
243 | ms->len--; | ||
244 | if (ms->len == 0) { | ||
245 | ms->timestamp = get_tbl(); | ||
246 | ms->timestamp += ms->transfer->delay_usecs * tb_ticks_per_usec; | ||
247 | ms->state = mpc52xx_spi_fsmstate_wait; | ||
248 | return FSM_CONTINUE; | ||
249 | } | ||
250 | |||
251 | /* Write out the next byte */ | ||
252 | ms->wcol_tx_timestamp = get_tbl(); | ||
253 | if (ms->tx_buf) | ||
254 | out_8(ms->regs + SPI_DATA, *ms->tx_buf++); | ||
255 | else | ||
256 | out_8(ms->regs + SPI_DATA, 0); | ||
257 | |||
258 | return FSM_CONTINUE; | ||
259 | } | ||
260 | |||
261 | /* | ||
262 | * WAIT state | ||
263 | * | ||
264 | * A transfer has completed; need to wait for the delay period to complete | ||
265 | * before starting the next transfer | ||
266 | */ | ||
267 | static int | ||
268 | mpc52xx_spi_fsmstate_wait(int irq, struct mpc52xx_spi *ms, u8 status, u8 data) | ||
269 | { | ||
270 | if (status && irq) | ||
271 | dev_err(&ms->master->dev, "spurious irq, status=0x%.2x\n", | ||
272 | status); | ||
273 | |||
274 | if (((int)get_tbl()) - ms->timestamp < 0) | ||
275 | return FSM_POLL; | ||
276 | |||
277 | ms->message->actual_length += ms->transfer->len; | ||
278 | |||
279 | /* Check if there is another transfer in this message. If there | ||
280 | * aren't then deactivate CS, notify sender, and drop back to idle | ||
281 | * to start the next message. */ | ||
282 | if (ms->transfer->transfer_list.next == &ms->message->transfers) { | ||
283 | ms->msg_count++; | ||
284 | mpc52xx_spi_chipsel(ms, 0); | ||
285 | ms->message->status = 0; | ||
286 | ms->message->complete(ms->message->context); | ||
287 | ms->state = mpc52xx_spi_fsmstate_idle; | ||
288 | return FSM_CONTINUE; | ||
289 | } | ||
290 | |||
291 | /* There is another transfer; kick it off */ | ||
292 | |||
293 | if (ms->cs_change) | ||
294 | mpc52xx_spi_chipsel(ms, 0); | ||
295 | |||
296 | ms->transfer = container_of(ms->transfer->transfer_list.next, | ||
297 | struct spi_transfer, transfer_list); | ||
298 | mpc52xx_spi_start_transfer(ms); | ||
299 | ms->state = mpc52xx_spi_fsmstate_transfer; | ||
300 | return FSM_CONTINUE; | ||
301 | } | ||
302 | |||
303 | /** | ||
304 | * mpc52xx_spi_fsm_process - Finite State Machine iteration function | ||
305 | * @irq: irq number that triggered the FSM or 0 for polling | ||
306 | * @ms: pointer to mpc52xx_spi driver data | ||
307 | */ | ||
308 | static void mpc52xx_spi_fsm_process(int irq, struct mpc52xx_spi *ms) | ||
309 | { | ||
310 | int rc = FSM_CONTINUE; | ||
311 | u8 status, data; | ||
312 | |||
313 | while (rc == FSM_CONTINUE) { | ||
314 | /* Interrupt cleared by read of STATUS followed by | ||
315 | * read of DATA registers */ | ||
316 | status = in_8(ms->regs + SPI_STATUS); | ||
317 | data = in_8(ms->regs + SPI_DATA); | ||
318 | rc = ms->state(irq, ms, status, data); | ||
319 | } | ||
320 | |||
321 | if (rc == FSM_POLL) | ||
322 | schedule_work(&ms->work); | ||
323 | } | ||
324 | |||
325 | /** | ||
326 | * mpc52xx_spi_irq - IRQ handler | ||
327 | */ | ||
328 | static irqreturn_t mpc52xx_spi_irq(int irq, void *_ms) | ||
329 | { | ||
330 | struct mpc52xx_spi *ms = _ms; | ||
331 | spin_lock(&ms->lock); | ||
332 | mpc52xx_spi_fsm_process(irq, ms); | ||
333 | spin_unlock(&ms->lock); | ||
334 | return IRQ_HANDLED; | ||
335 | } | ||
336 | |||
337 | /** | ||
338 | * mpc52xx_spi_wq - Workqueue function for polling the state machine | ||
339 | */ | ||
340 | static void mpc52xx_spi_wq(struct work_struct *work) | ||
341 | { | ||
342 | struct mpc52xx_spi *ms = container_of(work, struct mpc52xx_spi, work); | ||
343 | unsigned long flags; | ||
344 | |||
345 | spin_lock_irqsave(&ms->lock, flags); | ||
346 | mpc52xx_spi_fsm_process(0, ms); | ||
347 | spin_unlock_irqrestore(&ms->lock, flags); | ||
348 | } | ||
349 | |||
350 | /* | ||
351 | * spi_master ops | ||
352 | */ | ||
353 | |||
354 | static int mpc52xx_spi_setup(struct spi_device *spi) | ||
355 | { | ||
356 | if (spi->bits_per_word % 8) | ||
357 | return -EINVAL; | ||
358 | |||
359 | if (spi->mode & ~(SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST)) | ||
360 | return -EINVAL; | ||
361 | |||
362 | if (spi->chip_select >= spi->master->num_chipselect) | ||
363 | return -EINVAL; | ||
364 | |||
365 | return 0; | ||
366 | } | ||
367 | |||
368 | static int mpc52xx_spi_transfer(struct spi_device *spi, struct spi_message *m) | ||
369 | { | ||
370 | struct mpc52xx_spi *ms = spi_master_get_devdata(spi->master); | ||
371 | unsigned long flags; | ||
372 | |||
373 | m->actual_length = 0; | ||
374 | m->status = -EINPROGRESS; | ||
375 | |||
376 | spin_lock_irqsave(&ms->lock, flags); | ||
377 | list_add_tail(&m->queue, &ms->queue); | ||
378 | spin_unlock_irqrestore(&ms->lock, flags); | ||
379 | schedule_work(&ms->work); | ||
380 | |||
381 | return 0; | ||
382 | } | ||
383 | |||
384 | /* | ||
385 | * OF Platform Bus Binding | ||
386 | */ | ||
387 | static int __devinit mpc52xx_spi_probe(struct of_device *op, | ||
388 | const struct of_device_id *match) | ||
389 | { | ||
390 | struct spi_master *master; | ||
391 | struct mpc52xx_spi *ms; | ||
392 | void __iomem *regs; | ||
393 | int rc; | ||
394 | |||
395 | /* MMIO registers */ | ||
396 | dev_dbg(&op->dev, "probing mpc5200 SPI device\n"); | ||
397 | regs = of_iomap(op->node, 0); | ||
398 | if (!regs) | ||
399 | return -ENODEV; | ||
400 | |||
401 | /* initialize the device */ | ||
402 | out_8(regs+SPI_CTRL1, SPI_CTRL1_SPIE | SPI_CTRL1_SPE | SPI_CTRL1_MSTR); | ||
403 | out_8(regs + SPI_CTRL2, 0x0); | ||
404 | out_8(regs + SPI_DATADIR, 0xe); /* Set output pins */ | ||
405 | out_8(regs + SPI_PORTDATA, 0x8); /* Deassert /SS signal */ | ||
406 | |||
407 | /* Clear the status register and re-read it to check for a MODF | ||
408 | * failure. This driver cannot currently handle multiple masters | ||
409 | * on the SPI bus. This fault will also occur if the SPI signals | ||
410 | * are not connected to any pins (port_config setting) */ | ||
411 | in_8(regs + SPI_STATUS); | ||
412 | in_8(regs + SPI_DATA); | ||
413 | if (in_8(regs + SPI_STATUS) & SPI_STATUS_MODF) { | ||
414 | dev_err(&op->dev, "mode fault; is port_config correct?\n"); | ||
415 | rc = -EIO; | ||
416 | goto err_init; | ||
417 | } | ||
418 | |||
419 | dev_dbg(&op->dev, "allocating spi_master struct\n"); | ||
420 | master = spi_alloc_master(&op->dev, sizeof *ms); | ||
421 | if (!master) { | ||
422 | rc = -ENOMEM; | ||
423 | goto err_alloc; | ||
424 | } | ||
425 | master->bus_num = -1; | ||
426 | master->num_chipselect = 1; | ||
427 | master->setup = mpc52xx_spi_setup; | ||
428 | master->transfer = mpc52xx_spi_transfer; | ||
429 | dev_set_drvdata(&op->dev, master); | ||
430 | |||
431 | ms = spi_master_get_devdata(master); | ||
432 | ms->master = master; | ||
433 | ms->regs = regs; | ||
434 | ms->irq0 = irq_of_parse_and_map(op->node, 0); | ||
435 | ms->irq1 = irq_of_parse_and_map(op->node, 1); | ||
436 | ms->state = mpc52xx_spi_fsmstate_idle; | ||
437 | ms->ipb_freq = mpc5xxx_get_bus_frequency(op->node); | ||
438 | spin_lock_init(&ms->lock); | ||
439 | INIT_LIST_HEAD(&ms->queue); | ||
440 | INIT_WORK(&ms->work, mpc52xx_spi_wq); | ||
441 | |||
442 | /* Decide if interrupts can be used */ | ||
443 | if (ms->irq0 && ms->irq1) { | ||
444 | rc = request_irq(ms->irq0, mpc52xx_spi_irq, IRQF_SAMPLE_RANDOM, | ||
445 | "mpc5200-spi-modf", ms); | ||
446 | rc |= request_irq(ms->irq1, mpc52xx_spi_irq, IRQF_SAMPLE_RANDOM, | ||
447 | "mpc5200-spi-spiF", ms); | ||
448 | if (rc) { | ||
449 | free_irq(ms->irq0, ms); | ||
450 | free_irq(ms->irq1, ms); | ||
451 | ms->irq0 = ms->irq1 = 0; | ||
452 | } | ||
453 | } else { | ||
454 | /* operate in polled mode */ | ||
455 | ms->irq0 = ms->irq1 = 0; | ||
456 | } | ||
457 | |||
458 | if (!ms->irq0) | ||
459 | dev_info(&op->dev, "using polled mode\n"); | ||
460 | |||
461 | dev_dbg(&op->dev, "registering spi_master struct\n"); | ||
462 | rc = spi_register_master(master); | ||
463 | if (rc) | ||
464 | goto err_register; | ||
465 | |||
466 | of_register_spi_devices(master, op->node); | ||
467 | dev_info(&ms->master->dev, "registered MPC5200 SPI bus\n"); | ||
468 | |||
469 | return rc; | ||
470 | |||
471 | err_register: | ||
472 | dev_err(&ms->master->dev, "initialization failed\n"); | ||
473 | spi_master_put(master); | ||
474 | err_alloc: | ||
475 | err_init: | ||
476 | iounmap(regs); | ||
477 | return rc; | ||
478 | } | ||
479 | |||
480 | static int __devexit mpc52xx_spi_remove(struct of_device *op) | ||
481 | { | ||
482 | struct spi_master *master = dev_get_drvdata(&op->dev); | ||
483 | struct mpc52xx_spi *ms = spi_master_get_devdata(master); | ||
484 | |||
485 | free_irq(ms->irq0, ms); | ||
486 | free_irq(ms->irq1, ms); | ||
487 | |||
488 | spi_unregister_master(master); | ||
489 | spi_master_put(master); | ||
490 | iounmap(ms->regs); | ||
491 | |||
492 | return 0; | ||
493 | } | ||
494 | |||
495 | static struct of_device_id mpc52xx_spi_match[] __devinitdata = { | ||
496 | { .compatible = "fsl,mpc5200-spi", }, | ||
497 | {} | ||
498 | }; | ||
499 | MODULE_DEVICE_TABLE(of, mpc52xx_spi_match); | ||
500 | |||
501 | static struct of_platform_driver mpc52xx_spi_of_driver = { | ||
502 | .owner = THIS_MODULE, | ||
503 | .name = "mpc52xx-spi", | ||
504 | .match_table = mpc52xx_spi_match, | ||
505 | .probe = mpc52xx_spi_probe, | ||
506 | .remove = __exit_p(mpc52xx_spi_remove), | ||
507 | }; | ||
508 | |||
509 | static int __init mpc52xx_spi_init(void) | ||
510 | { | ||
511 | return of_register_platform_driver(&mpc52xx_spi_of_driver); | ||
512 | } | ||
513 | module_init(mpc52xx_spi_init); | ||
514 | |||
515 | static void __exit mpc52xx_spi_exit(void) | ||
516 | { | ||
517 | of_unregister_platform_driver(&mpc52xx_spi_of_driver); | ||
518 | } | ||
519 | module_exit(mpc52xx_spi_exit); | ||
520 | |||
diff --git a/drivers/spi/xilinx_spi.c b/drivers/spi/xilinx_spi.c index 46b8c5c2f45e..5a143b9f6361 100644 --- a/drivers/spi/xilinx_spi.c +++ b/drivers/spi/xilinx_spi.c | |||
@@ -148,7 +148,8 @@ static int xilinx_spi_setup_transfer(struct spi_device *spi, | |||
148 | { | 148 | { |
149 | u8 bits_per_word; | 149 | u8 bits_per_word; |
150 | 150 | ||
151 | bits_per_word = (t) ? t->bits_per_word : spi->bits_per_word; | 151 | bits_per_word = (t && t->bits_per_word) |
152 | ? t->bits_per_word : spi->bits_per_word; | ||
152 | if (bits_per_word != 8) { | 153 | if (bits_per_word != 8) { |
153 | dev_err(&spi->dev, "%s, unsupported bits_per_word=%d\n", | 154 | dev_err(&spi->dev, "%s, unsupported bits_per_word=%d\n", |
154 | __func__, bits_per_word); | 155 | __func__, bits_per_word); |
diff --git a/drivers/video/offb.c b/drivers/video/offb.c index 4d8c54c23dd7..b043ac83c412 100644 --- a/drivers/video/offb.c +++ b/drivers/video/offb.c | |||
@@ -282,8 +282,17 @@ static int offb_set_par(struct fb_info *info) | |||
282 | return 0; | 282 | return 0; |
283 | } | 283 | } |
284 | 284 | ||
285 | static void offb_destroy(struct fb_info *info) | ||
286 | { | ||
287 | if (info->screen_base) | ||
288 | iounmap(info->screen_base); | ||
289 | release_mem_region(info->aperture_base, info->aperture_size); | ||
290 | framebuffer_release(info); | ||
291 | } | ||
292 | |||
285 | static struct fb_ops offb_ops = { | 293 | static struct fb_ops offb_ops = { |
286 | .owner = THIS_MODULE, | 294 | .owner = THIS_MODULE, |
295 | .fb_destroy = offb_destroy, | ||
287 | .fb_setcolreg = offb_setcolreg, | 296 | .fb_setcolreg = offb_setcolreg, |
288 | .fb_set_par = offb_set_par, | 297 | .fb_set_par = offb_set_par, |
289 | .fb_blank = offb_blank, | 298 | .fb_blank = offb_blank, |
@@ -482,10 +491,14 @@ static void __init offb_init_fb(const char *name, const char *full_name, | |||
482 | var->sync = 0; | 491 | var->sync = 0; |
483 | var->vmode = FB_VMODE_NONINTERLACED; | 492 | var->vmode = FB_VMODE_NONINTERLACED; |
484 | 493 | ||
494 | /* set offb aperture size for generic probing */ | ||
495 | info->aperture_base = address; | ||
496 | info->aperture_size = fix->smem_len; | ||
497 | |||
485 | info->fbops = &offb_ops; | 498 | info->fbops = &offb_ops; |
486 | info->screen_base = ioremap(address, fix->smem_len); | 499 | info->screen_base = ioremap(address, fix->smem_len); |
487 | info->pseudo_palette = (void *) (info + 1); | 500 | info->pseudo_palette = (void *) (info + 1); |
488 | info->flags = FBINFO_DEFAULT | foreign_endian; | 501 | info->flags = FBINFO_DEFAULT | FBINFO_MISC_FIRMWARE | foreign_endian; |
489 | 502 | ||
490 | fb_alloc_cmap(&info->cmap, 256, 0); | 503 | fb_alloc_cmap(&info->cmap, 256, 0); |
491 | 504 | ||
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index 3711b888d482..d958b76430a2 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig | |||
@@ -861,8 +861,10 @@ config GEF_WDT | |||
861 | Watchdog timer found in a number of GE Fanuc single board computers. | 861 | Watchdog timer found in a number of GE Fanuc single board computers. |
862 | 862 | ||
863 | config MPC5200_WDT | 863 | config MPC5200_WDT |
864 | tristate "MPC5200 Watchdog Timer" | 864 | bool "MPC52xx Watchdog Timer" |
865 | depends on PPC_MPC52xx | 865 | depends on PPC_MPC52xx |
866 | help | ||
867 | Use General Purpose Timer (GPT) 0 on the MPC5200 as Watchdog. | ||
866 | 868 | ||
867 | config 8xxx_WDT | 869 | config 8xxx_WDT |
868 | tristate "MPC8xxx Platform Watchdog Timer" | 870 | tristate "MPC8xxx Platform Watchdog Timer" |
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile index 699199b1baa6..89c045dc468e 100644 --- a/drivers/watchdog/Makefile +++ b/drivers/watchdog/Makefile | |||
@@ -118,7 +118,6 @@ obj-$(CONFIG_TXX9_WDT) += txx9wdt.o | |||
118 | 118 | ||
119 | # POWERPC Architecture | 119 | # POWERPC Architecture |
120 | obj-$(CONFIG_GEF_WDT) += gef_wdt.o | 120 | obj-$(CONFIG_GEF_WDT) += gef_wdt.o |
121 | obj-$(CONFIG_MPC5200_WDT) += mpc5200_wdt.o | ||
122 | obj-$(CONFIG_8xxx_WDT) += mpc8xxx_wdt.o | 121 | obj-$(CONFIG_8xxx_WDT) += mpc8xxx_wdt.o |
123 | obj-$(CONFIG_MV64X60_WDT) += mv64x60_wdt.o | 122 | obj-$(CONFIG_MV64X60_WDT) += mv64x60_wdt.o |
124 | obj-$(CONFIG_PIKA_WDT) += pika_wdt.o | 123 | obj-$(CONFIG_PIKA_WDT) += pika_wdt.o |
diff --git a/drivers/watchdog/mpc5200_wdt.c b/drivers/watchdog/mpc5200_wdt.c deleted file mode 100644 index fa9c47ce0ae7..000000000000 --- a/drivers/watchdog/mpc5200_wdt.c +++ /dev/null | |||
@@ -1,293 +0,0 @@ | |||
1 | #include <linux/init.h> | ||
2 | #include <linux/module.h> | ||
3 | #include <linux/miscdevice.h> | ||
4 | #include <linux/watchdog.h> | ||
5 | #include <linux/io.h> | ||
6 | #include <linux/spinlock.h> | ||
7 | #include <linux/of_platform.h> | ||
8 | #include <linux/uaccess.h> | ||
9 | #include <asm/mpc52xx.h> | ||
10 | |||
11 | |||
12 | #define GPT_MODE_WDT (1 << 15) | ||
13 | #define GPT_MODE_CE (1 << 12) | ||
14 | #define GPT_MODE_MS_TIMER (0x4) | ||
15 | |||
16 | |||
17 | struct mpc5200_wdt { | ||
18 | unsigned count; /* timer ticks before watchdog kicks in */ | ||
19 | long ipb_freq; | ||
20 | struct miscdevice miscdev; | ||
21 | struct resource mem; | ||
22 | struct mpc52xx_gpt __iomem *regs; | ||
23 | spinlock_t io_lock; | ||
24 | }; | ||
25 | |||
26 | /* is_active stores wether or not the /dev/watchdog device is opened */ | ||
27 | static unsigned long is_active; | ||
28 | |||
29 | /* misc devices don't provide a way, to get back to 'dev' or 'miscdev' from | ||
30 | * file operations, which sucks. But there can be max 1 watchdog anyway, so... | ||
31 | */ | ||
32 | static struct mpc5200_wdt *wdt_global; | ||
33 | |||
34 | |||
35 | /* helper to calculate timeout in timer counts */ | ||
36 | static void mpc5200_wdt_set_timeout(struct mpc5200_wdt *wdt, int timeout) | ||
37 | { | ||
38 | /* use biggest prescaler of 64k */ | ||
39 | wdt->count = (wdt->ipb_freq + 0xffff) / 0x10000 * timeout; | ||
40 | |||
41 | if (wdt->count > 0xffff) | ||
42 | wdt->count = 0xffff; | ||
43 | } | ||
44 | /* return timeout in seconds (calculated from timer count) */ | ||
45 | static int mpc5200_wdt_get_timeout(struct mpc5200_wdt *wdt) | ||
46 | { | ||
47 | return wdt->count * 0x10000 / wdt->ipb_freq; | ||
48 | } | ||
49 | |||
50 | |||
51 | /* watchdog operations */ | ||
52 | static int mpc5200_wdt_start(struct mpc5200_wdt *wdt) | ||
53 | { | ||
54 | spin_lock(&wdt->io_lock); | ||
55 | /* disable */ | ||
56 | out_be32(&wdt->regs->mode, 0); | ||
57 | /* set timeout, with maximum prescaler */ | ||
58 | out_be32(&wdt->regs->count, 0x0 | wdt->count); | ||
59 | /* enable watchdog */ | ||
60 | out_be32(&wdt->regs->mode, GPT_MODE_CE | GPT_MODE_WDT | | ||
61 | GPT_MODE_MS_TIMER); | ||
62 | spin_unlock(&wdt->io_lock); | ||
63 | |||
64 | return 0; | ||
65 | } | ||
66 | static int mpc5200_wdt_ping(struct mpc5200_wdt *wdt) | ||
67 | { | ||
68 | spin_lock(&wdt->io_lock); | ||
69 | /* writing A5 to OCPW resets the watchdog */ | ||
70 | out_be32(&wdt->regs->mode, 0xA5000000 | | ||
71 | (0xffffff & in_be32(&wdt->regs->mode))); | ||
72 | spin_unlock(&wdt->io_lock); | ||
73 | return 0; | ||
74 | } | ||
75 | static int mpc5200_wdt_stop(struct mpc5200_wdt *wdt) | ||
76 | { | ||
77 | spin_lock(&wdt->io_lock); | ||
78 | /* disable */ | ||
79 | out_be32(&wdt->regs->mode, 0); | ||
80 | spin_unlock(&wdt->io_lock); | ||
81 | return 0; | ||
82 | } | ||
83 | |||
84 | |||
85 | /* file operations */ | ||
86 | static ssize_t mpc5200_wdt_write(struct file *file, const char __user *data, | ||
87 | size_t len, loff_t *ppos) | ||
88 | { | ||
89 | struct mpc5200_wdt *wdt = file->private_data; | ||
90 | mpc5200_wdt_ping(wdt); | ||
91 | return 0; | ||
92 | } | ||
93 | static struct watchdog_info mpc5200_wdt_info = { | ||
94 | .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING, | ||
95 | .identity = "mpc5200 watchdog on GPT0", | ||
96 | }; | ||
97 | static long mpc5200_wdt_ioctl(struct file *file, unsigned int cmd, | ||
98 | unsigned long arg) | ||
99 | { | ||
100 | struct mpc5200_wdt *wdt = file->private_data; | ||
101 | int __user *data = (int __user *)arg; | ||
102 | int timeout; | ||
103 | int ret = 0; | ||
104 | |||
105 | switch (cmd) { | ||
106 | case WDIOC_GETSUPPORT: | ||
107 | ret = copy_to_user(data, &mpc5200_wdt_info, | ||
108 | sizeof(mpc5200_wdt_info)); | ||
109 | if (ret) | ||
110 | ret = -EFAULT; | ||
111 | break; | ||
112 | |||
113 | case WDIOC_GETSTATUS: | ||
114 | case WDIOC_GETBOOTSTATUS: | ||
115 | ret = put_user(0, data); | ||
116 | break; | ||
117 | |||
118 | case WDIOC_KEEPALIVE: | ||
119 | mpc5200_wdt_ping(wdt); | ||
120 | break; | ||
121 | |||
122 | case WDIOC_SETTIMEOUT: | ||
123 | ret = get_user(timeout, data); | ||
124 | if (ret) | ||
125 | break; | ||
126 | mpc5200_wdt_set_timeout(wdt, timeout); | ||
127 | mpc5200_wdt_start(wdt); | ||
128 | /* fall through and return the timeout */ | ||
129 | |||
130 | case WDIOC_GETTIMEOUT: | ||
131 | timeout = mpc5200_wdt_get_timeout(wdt); | ||
132 | ret = put_user(timeout, data); | ||
133 | break; | ||
134 | |||
135 | default: | ||
136 | ret = -ENOTTY; | ||
137 | } | ||
138 | return ret; | ||
139 | } | ||
140 | |||
141 | static int mpc5200_wdt_open(struct inode *inode, struct file *file) | ||
142 | { | ||
143 | /* /dev/watchdog can only be opened once */ | ||
144 | if (test_and_set_bit(0, &is_active)) | ||
145 | return -EBUSY; | ||
146 | |||
147 | /* Set and activate the watchdog */ | ||
148 | mpc5200_wdt_set_timeout(wdt_global, 30); | ||
149 | mpc5200_wdt_start(wdt_global); | ||
150 | file->private_data = wdt_global; | ||
151 | return nonseekable_open(inode, file); | ||
152 | } | ||
153 | static int mpc5200_wdt_release(struct inode *inode, struct file *file) | ||
154 | { | ||
155 | #if WATCHDOG_NOWAYOUT == 0 | ||
156 | struct mpc5200_wdt *wdt = file->private_data; | ||
157 | mpc5200_wdt_stop(wdt); | ||
158 | wdt->count = 0; /* == disabled */ | ||
159 | #endif | ||
160 | clear_bit(0, &is_active); | ||
161 | return 0; | ||
162 | } | ||
163 | |||
164 | static const struct file_operations mpc5200_wdt_fops = { | ||
165 | .owner = THIS_MODULE, | ||
166 | .write = mpc5200_wdt_write, | ||
167 | .unlocked_ioctl = mpc5200_wdt_ioctl, | ||
168 | .open = mpc5200_wdt_open, | ||
169 | .release = mpc5200_wdt_release, | ||
170 | }; | ||
171 | |||
172 | /* module operations */ | ||
173 | static int mpc5200_wdt_probe(struct of_device *op, | ||
174 | const struct of_device_id *match) | ||
175 | { | ||
176 | struct mpc5200_wdt *wdt; | ||
177 | int err; | ||
178 | const void *has_wdt; | ||
179 | int size; | ||
180 | |||
181 | has_wdt = of_get_property(op->node, "has-wdt", NULL); | ||
182 | if (!has_wdt) | ||
183 | has_wdt = of_get_property(op->node, "fsl,has-wdt", NULL); | ||
184 | if (!has_wdt) | ||
185 | return -ENODEV; | ||
186 | |||
187 | wdt = kzalloc(sizeof(*wdt), GFP_KERNEL); | ||
188 | if (!wdt) | ||
189 | return -ENOMEM; | ||
190 | |||
191 | wdt->ipb_freq = mpc5xxx_get_bus_frequency(op->node); | ||
192 | |||
193 | err = of_address_to_resource(op->node, 0, &wdt->mem); | ||
194 | if (err) | ||
195 | goto out_free; | ||
196 | size = wdt->mem.end - wdt->mem.start + 1; | ||
197 | if (!request_mem_region(wdt->mem.start, size, "mpc5200_wdt")) { | ||
198 | err = -ENODEV; | ||
199 | goto out_free; | ||
200 | } | ||
201 | wdt->regs = ioremap(wdt->mem.start, size); | ||
202 | if (!wdt->regs) { | ||
203 | err = -ENODEV; | ||
204 | goto out_release; | ||
205 | } | ||
206 | |||
207 | dev_set_drvdata(&op->dev, wdt); | ||
208 | spin_lock_init(&wdt->io_lock); | ||
209 | |||
210 | wdt->miscdev = (struct miscdevice) { | ||
211 | .minor = WATCHDOG_MINOR, | ||
212 | .name = "watchdog", | ||
213 | .fops = &mpc5200_wdt_fops, | ||
214 | .parent = &op->dev, | ||
215 | }; | ||
216 | wdt_global = wdt; | ||
217 | err = misc_register(&wdt->miscdev); | ||
218 | if (!err) | ||
219 | return 0; | ||
220 | |||
221 | iounmap(wdt->regs); | ||
222 | out_release: | ||
223 | release_mem_region(wdt->mem.start, size); | ||
224 | out_free: | ||
225 | kfree(wdt); | ||
226 | return err; | ||
227 | } | ||
228 | |||
229 | static int mpc5200_wdt_remove(struct of_device *op) | ||
230 | { | ||
231 | struct mpc5200_wdt *wdt = dev_get_drvdata(&op->dev); | ||
232 | |||
233 | mpc5200_wdt_stop(wdt); | ||
234 | misc_deregister(&wdt->miscdev); | ||
235 | iounmap(wdt->regs); | ||
236 | release_mem_region(wdt->mem.start, wdt->mem.end - wdt->mem.start + 1); | ||
237 | kfree(wdt); | ||
238 | |||
239 | return 0; | ||
240 | } | ||
241 | static int mpc5200_wdt_suspend(struct of_device *op, pm_message_t state) | ||
242 | { | ||
243 | struct mpc5200_wdt *wdt = dev_get_drvdata(&op->dev); | ||
244 | mpc5200_wdt_stop(wdt); | ||
245 | return 0; | ||
246 | } | ||
247 | static int mpc5200_wdt_resume(struct of_device *op) | ||
248 | { | ||
249 | struct mpc5200_wdt *wdt = dev_get_drvdata(&op->dev); | ||
250 | if (wdt->count) | ||
251 | mpc5200_wdt_start(wdt); | ||
252 | return 0; | ||
253 | } | ||
254 | static int mpc5200_wdt_shutdown(struct of_device *op) | ||
255 | { | ||
256 | struct mpc5200_wdt *wdt = dev_get_drvdata(&op->dev); | ||
257 | mpc5200_wdt_stop(wdt); | ||
258 | return 0; | ||
259 | } | ||
260 | |||
261 | static struct of_device_id mpc5200_wdt_match[] = { | ||
262 | { .compatible = "mpc5200-gpt", }, | ||
263 | { .compatible = "fsl,mpc5200-gpt", }, | ||
264 | {}, | ||
265 | }; | ||
266 | static struct of_platform_driver mpc5200_wdt_driver = { | ||
267 | .owner = THIS_MODULE, | ||
268 | .name = "mpc5200-gpt-wdt", | ||
269 | .match_table = mpc5200_wdt_match, | ||
270 | .probe = mpc5200_wdt_probe, | ||
271 | .remove = mpc5200_wdt_remove, | ||
272 | .suspend = mpc5200_wdt_suspend, | ||
273 | .resume = mpc5200_wdt_resume, | ||
274 | .shutdown = mpc5200_wdt_shutdown, | ||
275 | }; | ||
276 | |||
277 | |||
278 | static int __init mpc5200_wdt_init(void) | ||
279 | { | ||
280 | return of_register_platform_driver(&mpc5200_wdt_driver); | ||
281 | } | ||
282 | |||
283 | static void __exit mpc5200_wdt_exit(void) | ||
284 | { | ||
285 | of_unregister_platform_driver(&mpc5200_wdt_driver); | ||
286 | } | ||
287 | |||
288 | module_init(mpc5200_wdt_init); | ||
289 | module_exit(mpc5200_wdt_exit); | ||
290 | |||
291 | MODULE_AUTHOR("Domen Puncer <domen.puncer@telargo.com>"); | ||
292 | MODULE_LICENSE("Dual BSD/GPL"); | ||
293 | MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); | ||
diff --git a/include/linux/cpu.h b/include/linux/cpu.h index 47536197ffdd..e287863ac053 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h | |||
@@ -43,6 +43,8 @@ extern int sched_create_sysfs_power_savings_entries(struct sysdev_class *cls); | |||
43 | 43 | ||
44 | #ifdef CONFIG_HOTPLUG_CPU | 44 | #ifdef CONFIG_HOTPLUG_CPU |
45 | extern void unregister_cpu(struct cpu *cpu); | 45 | extern void unregister_cpu(struct cpu *cpu); |
46 | extern ssize_t arch_cpu_probe(const char *, size_t); | ||
47 | extern ssize_t arch_cpu_release(const char *, size_t); | ||
46 | #endif | 48 | #endif |
47 | struct notifier_block; | 49 | struct notifier_block; |
48 | 50 | ||
@@ -115,6 +117,19 @@ extern void put_online_cpus(void); | |||
115 | #define unregister_hotcpu_notifier(nb) unregister_cpu_notifier(nb) | 117 | #define unregister_hotcpu_notifier(nb) unregister_cpu_notifier(nb) |
116 | int cpu_down(unsigned int cpu); | 118 | int cpu_down(unsigned int cpu); |
117 | 119 | ||
120 | #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE | ||
121 | extern void cpu_hotplug_driver_lock(void); | ||
122 | extern void cpu_hotplug_driver_unlock(void); | ||
123 | #else | ||
124 | static inline void cpu_hotplug_driver_lock(void) | ||
125 | { | ||
126 | } | ||
127 | |||
128 | static inline void cpu_hotplug_driver_unlock(void) | ||
129 | { | ||
130 | } | ||
131 | #endif | ||
132 | |||
118 | #else /* CONFIG_HOTPLUG_CPU */ | 133 | #else /* CONFIG_HOTPLUG_CPU */ |
119 | 134 | ||
120 | #define get_online_cpus() do { } while (0) | 135 | #define get_online_cpus() do { } while (0) |
diff --git a/include/linux/kvm.h b/include/linux/kvm.h index f8f8900fc5ec..caf6173bd2e8 100644 --- a/include/linux/kvm.h +++ b/include/linux/kvm.h | |||
@@ -436,6 +436,9 @@ struct kvm_ioeventfd { | |||
436 | #endif | 436 | #endif |
437 | #define KVM_CAP_IOEVENTFD 36 | 437 | #define KVM_CAP_IOEVENTFD 36 |
438 | #define KVM_CAP_SET_IDENTITY_MAP_ADDR 37 | 438 | #define KVM_CAP_SET_IDENTITY_MAP_ADDR 37 |
439 | /* KVM upstream has more features, but we synched this number. | ||
440 | Linux, please remove this comment on rebase. */ | ||
441 | #define KVM_CAP_PPC_SEGSTATE 43 | ||
439 | 442 | ||
440 | #ifdef KVM_CAP_IRQ_ROUTING | 443 | #ifdef KVM_CAP_IRQ_ROUTING |
441 | 444 | ||
diff --git a/include/linux/spi/mpc52xx_spi.h b/include/linux/spi/mpc52xx_spi.h new file mode 100644 index 000000000000..d1004cf09241 --- /dev/null +++ b/include/linux/spi/mpc52xx_spi.h | |||
@@ -0,0 +1,10 @@ | |||
1 | |||
2 | #ifndef INCLUDE_MPC5200_SPI_H | ||
3 | #define INCLUDE_MPC5200_SPI_H | ||
4 | |||
5 | extern void mpc52xx_spi_set_premessage_hook(struct spi_master *master, | ||
6 | void (*hook)(struct spi_message *m, | ||
7 | void *context), | ||
8 | void *hook_context); | ||
9 | |||
10 | #endif | ||