aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/sysdev
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/sysdev')
-rw-r--r--arch/powerpc/sysdev/Makefile1
-rw-r--r--arch/powerpc/sysdev/axonram.c1
-rw-r--r--arch/powerpc/sysdev/cpm_common.c158
-rw-r--r--arch/powerpc/sysdev/fsl_lbc.c2
-rw-r--r--arch/powerpc/sysdev/fsl_pci.c13
-rw-r--r--arch/powerpc/sysdev/qe_lib/Kconfig27
-rw-r--r--arch/powerpc/sysdev/qe_lib/Makefile10
-rw-r--r--arch/powerpc/sysdev/qe_lib/gpio.c317
-rw-r--r--arch/powerpc/sysdev/qe_lib/qe.c706
-rw-r--r--arch/powerpc/sysdev/qe_lib/qe_ic.c502
-rw-r--r--arch/powerpc/sysdev/qe_lib/qe_ic.h103
-rw-r--r--arch/powerpc/sysdev/qe_lib/qe_io.c192
-rw-r--r--arch/powerpc/sysdev/qe_lib/ucc.c212
-rw-r--r--arch/powerpc/sysdev/qe_lib/ucc_fast.c363
-rw-r--r--arch/powerpc/sysdev/qe_lib/ucc_slow.c374
-rw-r--r--arch/powerpc/sysdev/qe_lib/usb.c56
16 files changed, 15 insertions, 3022 deletions
diff --git a/arch/powerpc/sysdev/Makefile b/arch/powerpc/sysdev/Makefile
index 5b492a6438ff..bd6bd729969c 100644
--- a/arch/powerpc/sysdev/Makefile
+++ b/arch/powerpc/sysdev/Makefile
@@ -26,7 +26,6 @@ obj-$(CONFIG_FSL_85XX_CACHE_SRAM) += fsl_85xx_l2ctlr.o fsl_85xx_cache_sram.o
26obj-$(CONFIG_SIMPLE_GPIO) += simple_gpio.o 26obj-$(CONFIG_SIMPLE_GPIO) += simple_gpio.o
27obj-$(CONFIG_FSL_RIO) += fsl_rio.o fsl_rmu.o 27obj-$(CONFIG_FSL_RIO) += fsl_rio.o fsl_rmu.o
28obj-$(CONFIG_TSI108_BRIDGE) += tsi108_pci.o tsi108_dev.o 28obj-$(CONFIG_TSI108_BRIDGE) += tsi108_pci.o tsi108_dev.o
29obj-$(CONFIG_QUICC_ENGINE) += qe_lib/
30mv64x60-$(CONFIG_PCI) += mv64x60_pci.o 29mv64x60-$(CONFIG_PCI) += mv64x60_pci.o
31obj-$(CONFIG_MV64X60) += $(mv64x60-y) mv64x60_pic.o mv64x60_dev.o \ 30obj-$(CONFIG_MV64X60) += $(mv64x60-y) mv64x60_pic.o mv64x60_dev.o \
32 mv64x60_udbg.o 31 mv64x60_udbg.o
diff --git a/arch/powerpc/sysdev/axonram.c b/arch/powerpc/sysdev/axonram.c
index 7a399b4d60a0..c713b349d967 100644
--- a/arch/powerpc/sysdev/axonram.c
+++ b/arch/powerpc/sysdev/axonram.c
@@ -313,6 +313,7 @@ static const struct of_device_id axon_ram_device_id[] = {
313 }, 313 },
314 {} 314 {}
315}; 315};
316MODULE_DEVICE_TABLE(of, axon_ram_device_id);
316 317
317static struct platform_driver axon_ram_driver = { 318static struct platform_driver axon_ram_driver = {
318 .probe = axon_ram_probe, 319 .probe = axon_ram_probe,
diff --git a/arch/powerpc/sysdev/cpm_common.c b/arch/powerpc/sysdev/cpm_common.c
index e00a5ee58fd7..9d32465eddb1 100644
--- a/arch/powerpc/sysdev/cpm_common.c
+++ b/arch/powerpc/sysdev/cpm_common.c
@@ -27,8 +27,8 @@
27 27
28#include <asm/udbg.h> 28#include <asm/udbg.h>
29#include <asm/io.h> 29#include <asm/io.h>
30#include <asm/rheap.h>
31#include <asm/cpm.h> 30#include <asm/cpm.h>
31#include <soc/fsl/qe/qe.h>
32 32
33#include <mm/mmu_decl.h> 33#include <mm/mmu_decl.h>
34 34
@@ -65,162 +65,6 @@ void __init udbg_init_cpm(void)
65} 65}
66#endif 66#endif
67 67
68static spinlock_t cpm_muram_lock;
69static rh_block_t cpm_boot_muram_rh_block[16];
70static rh_info_t cpm_muram_info;
71static u8 __iomem *muram_vbase;
72static phys_addr_t muram_pbase;
73
74/* Max address size we deal with */
75#define OF_MAX_ADDR_CELLS 4
76
77int cpm_muram_init(void)
78{
79 struct device_node *np;
80 struct resource r;
81 u32 zero[OF_MAX_ADDR_CELLS] = {};
82 resource_size_t max = 0;
83 int i = 0;
84 int ret = 0;
85
86 if (muram_pbase)
87 return 0;
88
89 spin_lock_init(&cpm_muram_lock);
90 /* initialize the info header */
91 rh_init(&cpm_muram_info, 1,
92 sizeof(cpm_boot_muram_rh_block) /
93 sizeof(cpm_boot_muram_rh_block[0]),
94 cpm_boot_muram_rh_block);
95
96 np = of_find_compatible_node(NULL, NULL, "fsl,cpm-muram-data");
97 if (!np) {
98 /* try legacy bindings */
99 np = of_find_node_by_name(NULL, "data-only");
100 if (!np) {
101 printk(KERN_ERR "Cannot find CPM muram data node");
102 ret = -ENODEV;
103 goto out;
104 }
105 }
106
107 muram_pbase = of_translate_address(np, zero);
108 if (muram_pbase == (phys_addr_t)OF_BAD_ADDR) {
109 printk(KERN_ERR "Cannot translate zero through CPM muram node");
110 ret = -ENODEV;
111 goto out;
112 }
113
114 while (of_address_to_resource(np, i++, &r) == 0) {
115 if (r.end > max)
116 max = r.end;
117
118 rh_attach_region(&cpm_muram_info, r.start - muram_pbase,
119 resource_size(&r));
120 }
121
122 muram_vbase = ioremap(muram_pbase, max - muram_pbase + 1);
123 if (!muram_vbase) {
124 printk(KERN_ERR "Cannot map CPM muram");
125 ret = -ENOMEM;
126 }
127
128out:
129 of_node_put(np);
130 return ret;
131}
132
133/**
134 * cpm_muram_alloc - allocate the requested size worth of multi-user ram
135 * @size: number of bytes to allocate
136 * @align: requested alignment, in bytes
137 *
138 * This function returns an offset into the muram area.
139 * Use cpm_dpram_addr() to get the virtual address of the area.
140 * Use cpm_muram_free() to free the allocation.
141 */
142unsigned long cpm_muram_alloc(unsigned long size, unsigned long align)
143{
144 unsigned long start;
145 unsigned long flags;
146
147 spin_lock_irqsave(&cpm_muram_lock, flags);
148 cpm_muram_info.alignment = align;
149 start = rh_alloc(&cpm_muram_info, size, "commproc");
150 if (!IS_ERR_VALUE(start))
151 memset_io(cpm_muram_addr(start), 0, size);
152 spin_unlock_irqrestore(&cpm_muram_lock, flags);
153
154 return start;
155}
156EXPORT_SYMBOL(cpm_muram_alloc);
157
158/**
159 * cpm_muram_free - free a chunk of multi-user ram
160 * @offset: The beginning of the chunk as returned by cpm_muram_alloc().
161 */
162int cpm_muram_free(unsigned long offset)
163{
164 int ret;
165 unsigned long flags;
166
167 spin_lock_irqsave(&cpm_muram_lock, flags);
168 ret = rh_free(&cpm_muram_info, offset);
169 spin_unlock_irqrestore(&cpm_muram_lock, flags);
170
171 return ret;
172}
173EXPORT_SYMBOL(cpm_muram_free);
174
175/**
176 * cpm_muram_alloc_fixed - reserve a specific region of multi-user ram
177 * @offset: the offset into the muram area to reserve
178 * @size: the number of bytes to reserve
179 *
180 * This function returns "start" on success, -ENOMEM on failure.
181 * Use cpm_dpram_addr() to get the virtual address of the area.
182 * Use cpm_muram_free() to free the allocation.
183 */
184unsigned long cpm_muram_alloc_fixed(unsigned long offset, unsigned long size)
185{
186 unsigned long start;
187 unsigned long flags;
188
189 spin_lock_irqsave(&cpm_muram_lock, flags);
190 cpm_muram_info.alignment = 1;
191 start = rh_alloc_fixed(&cpm_muram_info, offset, size, "commproc");
192 spin_unlock_irqrestore(&cpm_muram_lock, flags);
193
194 return start;
195}
196EXPORT_SYMBOL(cpm_muram_alloc_fixed);
197
198/**
199 * cpm_muram_addr - turn a muram offset into a virtual address
200 * @offset: muram offset to convert
201 */
202void __iomem *cpm_muram_addr(unsigned long offset)
203{
204 return muram_vbase + offset;
205}
206EXPORT_SYMBOL(cpm_muram_addr);
207
208unsigned long cpm_muram_offset(void __iomem *addr)
209{
210 return addr - (void __iomem *)muram_vbase;
211}
212EXPORT_SYMBOL(cpm_muram_offset);
213
214/**
215 * cpm_muram_dma - turn a muram virtual address into a DMA address
216 * @offset: virtual address from cpm_muram_addr() to convert
217 */
218dma_addr_t cpm_muram_dma(void __iomem *addr)
219{
220 return muram_pbase + ((u8 __iomem *)addr - muram_vbase);
221}
222EXPORT_SYMBOL(cpm_muram_dma);
223
224#if defined(CONFIG_CPM2) || defined(CONFIG_8xx_GPIO) 68#if defined(CONFIG_CPM2) || defined(CONFIG_8xx_GPIO)
225 69
226struct cpm2_ioports { 70struct cpm2_ioports {
diff --git a/arch/powerpc/sysdev/fsl_lbc.c b/arch/powerpc/sysdev/fsl_lbc.c
index 38138cf8d33e..47f781059eeb 100644
--- a/arch/powerpc/sysdev/fsl_lbc.c
+++ b/arch/powerpc/sysdev/fsl_lbc.c
@@ -243,8 +243,6 @@ static irqreturn_t fsl_lbc_ctrl_irq(int irqno, void *data)
243 if (status & LTESR_CS) 243 if (status & LTESR_CS)
244 dev_err(ctrl->dev, "Chip select error: " 244 dev_err(ctrl->dev, "Chip select error: "
245 "LTESR 0x%08X\n", status); 245 "LTESR 0x%08X\n", status);
246 if (status & LTESR_UPM)
247 ;
248 if (status & LTESR_FCT) { 246 if (status & LTESR_FCT) {
249 dev_err(ctrl->dev, "FCM command time-out: " 247 dev_err(ctrl->dev, "FCM command time-out: "
250 "LTESR 0x%08X\n", status); 248 "LTESR 0x%08X\n", status);
diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c
index a1ac80b3041a..c69e88e91459 100644
--- a/arch/powerpc/sysdev/fsl_pci.c
+++ b/arch/powerpc/sysdev/fsl_pci.c
@@ -218,6 +218,19 @@ static void setup_pci_atmu(struct pci_controller *hose)
218 */ 218 */
219 setup_inbound = !is_kdump(); 219 setup_inbound = !is_kdump();
220 220
221 if (of_device_is_compatible(hose->dn, "fsl,bsc9132-pcie")) {
222 /*
223 * BSC9132 Rev1.0 has an issue where all the PEX inbound
224 * windows have implemented the default target value as 0xf
225 * for CCSR space.In all Freescale legacy devices the target
226 * of 0xf is reserved for local memory space. 9132 Rev1.0
227 * now has local mempry space mapped to target 0x0 instead of
228 * 0xf. Hence adding a workaround to remove the target 0xf
229 * defined for memory space from Inbound window attributes.
230 */
231 piwar &= ~PIWAR_TGI_LOCAL;
232 }
233
221 if (early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) { 234 if (early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) {
222 if (in_be32(&pci->block_rev1) >= PCIE_IP_REV_2_2) { 235 if (in_be32(&pci->block_rev1) >= PCIE_IP_REV_2_2) {
223 win_idx = 2; 236 win_idx = 2;
diff --git a/arch/powerpc/sysdev/qe_lib/Kconfig b/arch/powerpc/sysdev/qe_lib/Kconfig
deleted file mode 100644
index 3c251993bacd..000000000000
--- a/arch/powerpc/sysdev/qe_lib/Kconfig
+++ /dev/null
@@ -1,27 +0,0 @@
1#
2# QE Communication options
3#
4
5config UCC_SLOW
6 bool
7 default y if SERIAL_QE
8 help
9 This option provides qe_lib support to UCC slow
10 protocols: UART, BISYNC, QMC
11
12config UCC_FAST
13 bool
14 default y if UCC_GETH
15 help
16 This option provides qe_lib support to UCC fast
17 protocols: HDLC, Ethernet, ATM, transparent
18
19config UCC
20 bool
21 default y if UCC_FAST || UCC_SLOW
22
23config QE_USB
24 bool
25 default y if USB_FSL_QE
26 help
27 QE USB Controller support
diff --git a/arch/powerpc/sysdev/qe_lib/Makefile b/arch/powerpc/sysdev/qe_lib/Makefile
deleted file mode 100644
index f1855c185291..000000000000
--- a/arch/powerpc/sysdev/qe_lib/Makefile
+++ /dev/null
@@ -1,10 +0,0 @@
1#
2# Makefile for the linux ppc-specific parts of QE
3#
4obj-$(CONFIG_QUICC_ENGINE)+= qe.o qe_ic.o qe_io.o
5
6obj-$(CONFIG_UCC) += ucc.o
7obj-$(CONFIG_UCC_SLOW) += ucc_slow.o
8obj-$(CONFIG_UCC_FAST) += ucc_fast.o
9obj-$(CONFIG_QE_USB) += usb.o
10obj-$(CONFIG_QE_GPIO) += gpio.o
diff --git a/arch/powerpc/sysdev/qe_lib/gpio.c b/arch/powerpc/sysdev/qe_lib/gpio.c
deleted file mode 100644
index 521e67a49dc4..000000000000
--- a/arch/powerpc/sysdev/qe_lib/gpio.c
+++ /dev/null
@@ -1,317 +0,0 @@
1/*
2 * QUICC Engine GPIOs
3 *
4 * Copyright (c) MontaVista Software, Inc. 2008.
5 *
6 * Author: Anton Vorontsov <avorontsov@ru.mvista.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 */
13
14#include <linux/kernel.h>
15#include <linux/init.h>
16#include <linux/spinlock.h>
17#include <linux/err.h>
18#include <linux/io.h>
19#include <linux/of.h>
20#include <linux/of_gpio.h>
21#include <linux/gpio.h>
22#include <linux/slab.h>
23#include <linux/export.h>
24#include <asm/qe.h>
25
26struct qe_gpio_chip {
27 struct of_mm_gpio_chip mm_gc;
28 spinlock_t lock;
29
30 unsigned long pin_flags[QE_PIO_PINS];
31#define QE_PIN_REQUESTED 0
32
33 /* shadowed data register to clear/set bits safely */
34 u32 cpdata;
35
36 /* saved_regs used to restore dedicated functions */
37 struct qe_pio_regs saved_regs;
38};
39
40static inline struct qe_gpio_chip *
41to_qe_gpio_chip(struct of_mm_gpio_chip *mm_gc)
42{
43 return container_of(mm_gc, struct qe_gpio_chip, mm_gc);
44}
45
46static void qe_gpio_save_regs(struct of_mm_gpio_chip *mm_gc)
47{
48 struct qe_gpio_chip *qe_gc = to_qe_gpio_chip(mm_gc);
49 struct qe_pio_regs __iomem *regs = mm_gc->regs;
50
51 qe_gc->cpdata = in_be32(&regs->cpdata);
52 qe_gc->saved_regs.cpdata = qe_gc->cpdata;
53 qe_gc->saved_regs.cpdir1 = in_be32(&regs->cpdir1);
54 qe_gc->saved_regs.cpdir2 = in_be32(&regs->cpdir2);
55 qe_gc->saved_regs.cppar1 = in_be32(&regs->cppar1);
56 qe_gc->saved_regs.cppar2 = in_be32(&regs->cppar2);
57 qe_gc->saved_regs.cpodr = in_be32(&regs->cpodr);
58}
59
60static int qe_gpio_get(struct gpio_chip *gc, unsigned int gpio)
61{
62 struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
63 struct qe_pio_regs __iomem *regs = mm_gc->regs;
64 u32 pin_mask = 1 << (QE_PIO_PINS - 1 - gpio);
65
66 return in_be32(&regs->cpdata) & pin_mask;
67}
68
69static void qe_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
70{
71 struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
72 struct qe_gpio_chip *qe_gc = to_qe_gpio_chip(mm_gc);
73 struct qe_pio_regs __iomem *regs = mm_gc->regs;
74 unsigned long flags;
75 u32 pin_mask = 1 << (QE_PIO_PINS - 1 - gpio);
76
77 spin_lock_irqsave(&qe_gc->lock, flags);
78
79 if (val)
80 qe_gc->cpdata |= pin_mask;
81 else
82 qe_gc->cpdata &= ~pin_mask;
83
84 out_be32(&regs->cpdata, qe_gc->cpdata);
85
86 spin_unlock_irqrestore(&qe_gc->lock, flags);
87}
88
89static int qe_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
90{
91 struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
92 struct qe_gpio_chip *qe_gc = to_qe_gpio_chip(mm_gc);
93 unsigned long flags;
94
95 spin_lock_irqsave(&qe_gc->lock, flags);
96
97 __par_io_config_pin(mm_gc->regs, gpio, QE_PIO_DIR_IN, 0, 0, 0);
98
99 spin_unlock_irqrestore(&qe_gc->lock, flags);
100
101 return 0;
102}
103
104static int qe_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
105{
106 struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
107 struct qe_gpio_chip *qe_gc = to_qe_gpio_chip(mm_gc);
108 unsigned long flags;
109
110 qe_gpio_set(gc, gpio, val);
111
112 spin_lock_irqsave(&qe_gc->lock, flags);
113
114 __par_io_config_pin(mm_gc->regs, gpio, QE_PIO_DIR_OUT, 0, 0, 0);
115
116 spin_unlock_irqrestore(&qe_gc->lock, flags);
117
118 return 0;
119}
120
121struct qe_pin {
122 /*
123 * The qe_gpio_chip name is unfortunate, we should change that to
124 * something like qe_pio_controller. Someday.
125 */
126 struct qe_gpio_chip *controller;
127 int num;
128};
129
130/**
131 * qe_pin_request - Request a QE pin
132 * @np: device node to get a pin from
133 * @index: index of a pin in the device tree
134 * Context: non-atomic
135 *
136 * This function return qe_pin so that you could use it with the rest of
137 * the QE Pin Multiplexing API.
138 */
139struct qe_pin *qe_pin_request(struct device_node *np, int index)
140{
141 struct qe_pin *qe_pin;
142 struct gpio_chip *gc;
143 struct of_mm_gpio_chip *mm_gc;
144 struct qe_gpio_chip *qe_gc;
145 int err;
146 unsigned long flags;
147
148 qe_pin = kzalloc(sizeof(*qe_pin), GFP_KERNEL);
149 if (!qe_pin) {
150 pr_debug("%s: can't allocate memory\n", __func__);
151 return ERR_PTR(-ENOMEM);
152 }
153
154 err = of_get_gpio(np, index);
155 if (err < 0)
156 goto err0;
157 gc = gpio_to_chip(err);
158 if (WARN_ON(!gc))
159 goto err0;
160
161 if (!of_device_is_compatible(gc->of_node, "fsl,mpc8323-qe-pario-bank")) {
162 pr_debug("%s: tried to get a non-qe pin\n", __func__);
163 err = -EINVAL;
164 goto err0;
165 }
166
167 mm_gc = to_of_mm_gpio_chip(gc);
168 qe_gc = to_qe_gpio_chip(mm_gc);
169
170 spin_lock_irqsave(&qe_gc->lock, flags);
171
172 err -= gc->base;
173 if (test_and_set_bit(QE_PIN_REQUESTED, &qe_gc->pin_flags[err]) == 0) {
174 qe_pin->controller = qe_gc;
175 qe_pin->num = err;
176 err = 0;
177 } else {
178 err = -EBUSY;
179 }
180
181 spin_unlock_irqrestore(&qe_gc->lock, flags);
182
183 if (!err)
184 return qe_pin;
185err0:
186 kfree(qe_pin);
187 pr_debug("%s failed with status %d\n", __func__, err);
188 return ERR_PTR(err);
189}
190EXPORT_SYMBOL(qe_pin_request);
191
192/**
193 * qe_pin_free - Free a pin
194 * @qe_pin: pointer to the qe_pin structure
195 * Context: any
196 *
197 * This function frees the qe_pin structure and makes a pin available
198 * for further qe_pin_request() calls.
199 */
200void qe_pin_free(struct qe_pin *qe_pin)
201{
202 struct qe_gpio_chip *qe_gc = qe_pin->controller;
203 unsigned long flags;
204 const int pin = qe_pin->num;
205
206 spin_lock_irqsave(&qe_gc->lock, flags);
207 test_and_clear_bit(QE_PIN_REQUESTED, &qe_gc->pin_flags[pin]);
208 spin_unlock_irqrestore(&qe_gc->lock, flags);
209
210 kfree(qe_pin);
211}
212EXPORT_SYMBOL(qe_pin_free);
213
214/**
215 * qe_pin_set_dedicated - Revert a pin to a dedicated peripheral function mode
216 * @qe_pin: pointer to the qe_pin structure
217 * Context: any
218 *
219 * This function resets a pin to a dedicated peripheral function that
220 * has been set up by the firmware.
221 */
222void qe_pin_set_dedicated(struct qe_pin *qe_pin)
223{
224 struct qe_gpio_chip *qe_gc = qe_pin->controller;
225 struct qe_pio_regs __iomem *regs = qe_gc->mm_gc.regs;
226 struct qe_pio_regs *sregs = &qe_gc->saved_regs;
227 int pin = qe_pin->num;
228 u32 mask1 = 1 << (QE_PIO_PINS - (pin + 1));
229 u32 mask2 = 0x3 << (QE_PIO_PINS - (pin % (QE_PIO_PINS / 2) + 1) * 2);
230 bool second_reg = pin > (QE_PIO_PINS / 2) - 1;
231 unsigned long flags;
232
233 spin_lock_irqsave(&qe_gc->lock, flags);
234
235 if (second_reg) {
236 clrsetbits_be32(&regs->cpdir2, mask2, sregs->cpdir2 & mask2);
237 clrsetbits_be32(&regs->cppar2, mask2, sregs->cppar2 & mask2);
238 } else {
239 clrsetbits_be32(&regs->cpdir1, mask2, sregs->cpdir1 & mask2);
240 clrsetbits_be32(&regs->cppar1, mask2, sregs->cppar1 & mask2);
241 }
242
243 if (sregs->cpdata & mask1)
244 qe_gc->cpdata |= mask1;
245 else
246 qe_gc->cpdata &= ~mask1;
247
248 out_be32(&regs->cpdata, qe_gc->cpdata);
249 clrsetbits_be32(&regs->cpodr, mask1, sregs->cpodr & mask1);
250
251 spin_unlock_irqrestore(&qe_gc->lock, flags);
252}
253EXPORT_SYMBOL(qe_pin_set_dedicated);
254
255/**
256 * qe_pin_set_gpio - Set a pin to the GPIO mode
257 * @qe_pin: pointer to the qe_pin structure
258 * Context: any
259 *
260 * This function sets a pin to the GPIO mode.
261 */
262void qe_pin_set_gpio(struct qe_pin *qe_pin)
263{
264 struct qe_gpio_chip *qe_gc = qe_pin->controller;
265 struct qe_pio_regs __iomem *regs = qe_gc->mm_gc.regs;
266 unsigned long flags;
267
268 spin_lock_irqsave(&qe_gc->lock, flags);
269
270 /* Let's make it input by default, GPIO API is able to change that. */
271 __par_io_config_pin(regs, qe_pin->num, QE_PIO_DIR_IN, 0, 0, 0);
272
273 spin_unlock_irqrestore(&qe_gc->lock, flags);
274}
275EXPORT_SYMBOL(qe_pin_set_gpio);
276
277static int __init qe_add_gpiochips(void)
278{
279 struct device_node *np;
280
281 for_each_compatible_node(np, NULL, "fsl,mpc8323-qe-pario-bank") {
282 int ret;
283 struct qe_gpio_chip *qe_gc;
284 struct of_mm_gpio_chip *mm_gc;
285 struct gpio_chip *gc;
286
287 qe_gc = kzalloc(sizeof(*qe_gc), GFP_KERNEL);
288 if (!qe_gc) {
289 ret = -ENOMEM;
290 goto err;
291 }
292
293 spin_lock_init(&qe_gc->lock);
294
295 mm_gc = &qe_gc->mm_gc;
296 gc = &mm_gc->gc;
297
298 mm_gc->save_regs = qe_gpio_save_regs;
299 gc->ngpio = QE_PIO_PINS;
300 gc->direction_input = qe_gpio_dir_in;
301 gc->direction_output = qe_gpio_dir_out;
302 gc->get = qe_gpio_get;
303 gc->set = qe_gpio_set;
304
305 ret = of_mm_gpiochip_add(np, mm_gc);
306 if (ret)
307 goto err;
308 continue;
309err:
310 pr_err("%s: registration failed with status %d\n",
311 np->full_name, ret);
312 kfree(qe_gc);
313 /* try others anyway */
314 }
315 return 0;
316}
317arch_initcall(qe_add_gpiochips);
diff --git a/arch/powerpc/sysdev/qe_lib/qe.c b/arch/powerpc/sysdev/qe_lib/qe.c
deleted file mode 100644
index c2518cdb7ddb..000000000000
--- a/arch/powerpc/sysdev/qe_lib/qe.c
+++ /dev/null
@@ -1,706 +0,0 @@
1/*
2 * Copyright (C) 2006-2010 Freescale Semiconductor, Inc. All rights reserved.
3 *
4 * Authors: Shlomi Gridish <gridish@freescale.com>
5 * Li Yang <leoli@freescale.com>
6 * Based on cpm2_common.c from Dan Malek (dmalek@jlc.net)
7 *
8 * Description:
9 * General Purpose functions for the global management of the
10 * QUICC Engine (QE).
11 *
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
16 */
17#include <linux/errno.h>
18#include <linux/sched.h>
19#include <linux/kernel.h>
20#include <linux/param.h>
21#include <linux/string.h>
22#include <linux/spinlock.h>
23#include <linux/mm.h>
24#include <linux/interrupt.h>
25#include <linux/module.h>
26#include <linux/delay.h>
27#include <linux/ioport.h>
28#include <linux/crc32.h>
29#include <linux/mod_devicetable.h>
30#include <linux/of_platform.h>
31#include <asm/irq.h>
32#include <asm/page.h>
33#include <asm/pgtable.h>
34#include <asm/immap_qe.h>
35#include <asm/qe.h>
36#include <asm/prom.h>
37#include <asm/rheap.h>
38
39static void qe_snums_init(void);
40static int qe_sdma_init(void);
41
42static DEFINE_SPINLOCK(qe_lock);
43DEFINE_SPINLOCK(cmxgcr_lock);
44EXPORT_SYMBOL(cmxgcr_lock);
45
46/* QE snum state */
47enum qe_snum_state {
48 QE_SNUM_STATE_USED,
49 QE_SNUM_STATE_FREE
50};
51
52/* QE snum */
53struct qe_snum {
54 u8 num;
55 enum qe_snum_state state;
56};
57
58/* We allocate this here because it is used almost exclusively for
59 * the communication processor devices.
60 */
61struct qe_immap __iomem *qe_immr;
62EXPORT_SYMBOL(qe_immr);
63
64static struct qe_snum snums[QE_NUM_OF_SNUM]; /* Dynamically allocated SNUMs */
65static unsigned int qe_num_of_snum;
66
67static phys_addr_t qebase = -1;
68
69phys_addr_t get_qe_base(void)
70{
71 struct device_node *qe;
72 int size;
73 const u32 *prop;
74
75 if (qebase != -1)
76 return qebase;
77
78 qe = of_find_compatible_node(NULL, NULL, "fsl,qe");
79 if (!qe) {
80 qe = of_find_node_by_type(NULL, "qe");
81 if (!qe)
82 return qebase;
83 }
84
85 prop = of_get_property(qe, "reg", &size);
86 if (prop && size >= sizeof(*prop))
87 qebase = of_translate_address(qe, prop);
88 of_node_put(qe);
89
90 return qebase;
91}
92
93EXPORT_SYMBOL(get_qe_base);
94
95void qe_reset(void)
96{
97 if (qe_immr == NULL)
98 qe_immr = ioremap(get_qe_base(), QE_IMMAP_SIZE);
99
100 qe_snums_init();
101
102 qe_issue_cmd(QE_RESET, QE_CR_SUBBLOCK_INVALID,
103 QE_CR_PROTOCOL_UNSPECIFIED, 0);
104
105 /* Reclaim the MURAM memory for our use. */
106 qe_muram_init();
107
108 if (qe_sdma_init())
109 panic("sdma init failed!");
110}
111
112int qe_issue_cmd(u32 cmd, u32 device, u8 mcn_protocol, u32 cmd_input)
113{
114 unsigned long flags;
115 u8 mcn_shift = 0, dev_shift = 0;
116 u32 ret;
117
118 spin_lock_irqsave(&qe_lock, flags);
119 if (cmd == QE_RESET) {
120 out_be32(&qe_immr->cp.cecr, (u32) (cmd | QE_CR_FLG));
121 } else {
122 if (cmd == QE_ASSIGN_PAGE) {
123 /* Here device is the SNUM, not sub-block */
124 dev_shift = QE_CR_SNUM_SHIFT;
125 } else if (cmd == QE_ASSIGN_RISC) {
126 /* Here device is the SNUM, and mcnProtocol is
127 * e_QeCmdRiscAssignment value */
128 dev_shift = QE_CR_SNUM_SHIFT;
129 mcn_shift = QE_CR_MCN_RISC_ASSIGN_SHIFT;
130 } else {
131 if (device == QE_CR_SUBBLOCK_USB)
132 mcn_shift = QE_CR_MCN_USB_SHIFT;
133 else
134 mcn_shift = QE_CR_MCN_NORMAL_SHIFT;
135 }
136
137 out_be32(&qe_immr->cp.cecdr, cmd_input);
138 out_be32(&qe_immr->cp.cecr,
139 (cmd | QE_CR_FLG | ((u32) device << dev_shift) | (u32)
140 mcn_protocol << mcn_shift));
141 }
142
143 /* wait for the QE_CR_FLG to clear */
144 ret = spin_event_timeout((in_be32(&qe_immr->cp.cecr) & QE_CR_FLG) == 0,
145 100, 0);
146 /* On timeout (e.g. failure), the expression will be false (ret == 0),
147 otherwise it will be true (ret == 1). */
148 spin_unlock_irqrestore(&qe_lock, flags);
149
150 return ret == 1;
151}
152EXPORT_SYMBOL(qe_issue_cmd);
153
154/* Set a baud rate generator. This needs lots of work. There are
155 * 16 BRGs, which can be connected to the QE channels or output
156 * as clocks. The BRGs are in two different block of internal
157 * memory mapped space.
158 * The BRG clock is the QE clock divided by 2.
159 * It was set up long ago during the initial boot phase and is
160 * is given to us.
161 * Baud rate clocks are zero-based in the driver code (as that maps
162 * to port numbers). Documentation uses 1-based numbering.
163 */
164static unsigned int brg_clk = 0;
165
166unsigned int qe_get_brg_clk(void)
167{
168 struct device_node *qe;
169 int size;
170 const u32 *prop;
171
172 if (brg_clk)
173 return brg_clk;
174
175 qe = of_find_compatible_node(NULL, NULL, "fsl,qe");
176 if (!qe) {
177 qe = of_find_node_by_type(NULL, "qe");
178 if (!qe)
179 return brg_clk;
180 }
181
182 prop = of_get_property(qe, "brg-frequency", &size);
183 if (prop && size == sizeof(*prop))
184 brg_clk = *prop;
185
186 of_node_put(qe);
187
188 return brg_clk;
189}
190EXPORT_SYMBOL(qe_get_brg_clk);
191
192/* Program the BRG to the given sampling rate and multiplier
193 *
194 * @brg: the BRG, QE_BRG1 - QE_BRG16
195 * @rate: the desired sampling rate
196 * @multiplier: corresponds to the value programmed in GUMR_L[RDCR] or
197 * GUMR_L[TDCR]. E.g., if this BRG is the RX clock, and GUMR_L[RDCR]=01,
198 * then 'multiplier' should be 8.
199 */
200int qe_setbrg(enum qe_clock brg, unsigned int rate, unsigned int multiplier)
201{
202 u32 divisor, tempval;
203 u32 div16 = 0;
204
205 if ((brg < QE_BRG1) || (brg > QE_BRG16))
206 return -EINVAL;
207
208 divisor = qe_get_brg_clk() / (rate * multiplier);
209
210 if (divisor > QE_BRGC_DIVISOR_MAX + 1) {
211 div16 = QE_BRGC_DIV16;
212 divisor /= 16;
213 }
214
215 /* Errata QE_General4, which affects some MPC832x and MPC836x SOCs, says
216 that the BRG divisor must be even if you're not using divide-by-16
217 mode. */
218 if (!div16 && (divisor & 1) && (divisor > 3))
219 divisor++;
220
221 tempval = ((divisor - 1) << QE_BRGC_DIVISOR_SHIFT) |
222 QE_BRGC_ENABLE | div16;
223
224 out_be32(&qe_immr->brg.brgc[brg - QE_BRG1], tempval);
225
226 return 0;
227}
228EXPORT_SYMBOL(qe_setbrg);
229
230/* Convert a string to a QE clock source enum
231 *
232 * This function takes a string, typically from a property in the device
233 * tree, and returns the corresponding "enum qe_clock" value.
234*/
235enum qe_clock qe_clock_source(const char *source)
236{
237 unsigned int i;
238
239 if (strcasecmp(source, "none") == 0)
240 return QE_CLK_NONE;
241
242 if (strncasecmp(source, "brg", 3) == 0) {
243 i = simple_strtoul(source + 3, NULL, 10);
244 if ((i >= 1) && (i <= 16))
245 return (QE_BRG1 - 1) + i;
246 else
247 return QE_CLK_DUMMY;
248 }
249
250 if (strncasecmp(source, "clk", 3) == 0) {
251 i = simple_strtoul(source + 3, NULL, 10);
252 if ((i >= 1) && (i <= 24))
253 return (QE_CLK1 - 1) + i;
254 else
255 return QE_CLK_DUMMY;
256 }
257
258 return QE_CLK_DUMMY;
259}
260EXPORT_SYMBOL(qe_clock_source);
261
262/* Initialize SNUMs (thread serial numbers) according to
263 * QE Module Control chapter, SNUM table
264 */
265static void qe_snums_init(void)
266{
267 int i;
268 static const u8 snum_init_76[] = {
269 0x04, 0x05, 0x0C, 0x0D, 0x14, 0x15, 0x1C, 0x1D,
270 0x24, 0x25, 0x2C, 0x2D, 0x34, 0x35, 0x88, 0x89,
271 0x98, 0x99, 0xA8, 0xA9, 0xB8, 0xB9, 0xC8, 0xC9,
272 0xD8, 0xD9, 0xE8, 0xE9, 0x44, 0x45, 0x4C, 0x4D,
273 0x54, 0x55, 0x5C, 0x5D, 0x64, 0x65, 0x6C, 0x6D,
274 0x74, 0x75, 0x7C, 0x7D, 0x84, 0x85, 0x8C, 0x8D,
275 0x94, 0x95, 0x9C, 0x9D, 0xA4, 0xA5, 0xAC, 0xAD,
276 0xB4, 0xB5, 0xBC, 0xBD, 0xC4, 0xC5, 0xCC, 0xCD,
277 0xD4, 0xD5, 0xDC, 0xDD, 0xE4, 0xE5, 0xEC, 0xED,
278 0xF4, 0xF5, 0xFC, 0xFD,
279 };
280 static const u8 snum_init_46[] = {
281 0x04, 0x05, 0x0C, 0x0D, 0x14, 0x15, 0x1C, 0x1D,
282 0x24, 0x25, 0x2C, 0x2D, 0x34, 0x35, 0x88, 0x89,
283 0x98, 0x99, 0xA8, 0xA9, 0xB8, 0xB9, 0xC8, 0xC9,
284 0xD8, 0xD9, 0xE8, 0xE9, 0x08, 0x09, 0x18, 0x19,
285 0x28, 0x29, 0x38, 0x39, 0x48, 0x49, 0x58, 0x59,
286 0x68, 0x69, 0x78, 0x79, 0x80, 0x81,
287 };
288 static const u8 *snum_init;
289
290 qe_num_of_snum = qe_get_num_of_snums();
291
292 if (qe_num_of_snum == 76)
293 snum_init = snum_init_76;
294 else
295 snum_init = snum_init_46;
296
297 for (i = 0; i < qe_num_of_snum; i++) {
298 snums[i].num = snum_init[i];
299 snums[i].state = QE_SNUM_STATE_FREE;
300 }
301}
302
303int qe_get_snum(void)
304{
305 unsigned long flags;
306 int snum = -EBUSY;
307 int i;
308
309 spin_lock_irqsave(&qe_lock, flags);
310 for (i = 0; i < qe_num_of_snum; i++) {
311 if (snums[i].state == QE_SNUM_STATE_FREE) {
312 snums[i].state = QE_SNUM_STATE_USED;
313 snum = snums[i].num;
314 break;
315 }
316 }
317 spin_unlock_irqrestore(&qe_lock, flags);
318
319 return snum;
320}
321EXPORT_SYMBOL(qe_get_snum);
322
323void qe_put_snum(u8 snum)
324{
325 int i;
326
327 for (i = 0; i < qe_num_of_snum; i++) {
328 if (snums[i].num == snum) {
329 snums[i].state = QE_SNUM_STATE_FREE;
330 break;
331 }
332 }
333}
334EXPORT_SYMBOL(qe_put_snum);
335
336static int qe_sdma_init(void)
337{
338 struct sdma __iomem *sdma = &qe_immr->sdma;
339 static unsigned long sdma_buf_offset = (unsigned long)-ENOMEM;
340
341 if (!sdma)
342 return -ENODEV;
343
344 /* allocate 2 internal temporary buffers (512 bytes size each) for
345 * the SDMA */
346 if (IS_ERR_VALUE(sdma_buf_offset)) {
347 sdma_buf_offset = qe_muram_alloc(512 * 2, 4096);
348 if (IS_ERR_VALUE(sdma_buf_offset))
349 return -ENOMEM;
350 }
351
352 out_be32(&sdma->sdebcr, (u32) sdma_buf_offset & QE_SDEBCR_BA_MASK);
353 out_be32(&sdma->sdmr, (QE_SDMR_GLB_1_MSK |
354 (0x1 << QE_SDMR_CEN_SHIFT)));
355
356 return 0;
357}
358
359/* The maximum number of RISCs we support */
360#define MAX_QE_RISC 4
361
362/* Firmware information stored here for qe_get_firmware_info() */
363static struct qe_firmware_info qe_firmware_info;
364
365/*
366 * Set to 1 if QE firmware has been uploaded, and therefore
367 * qe_firmware_info contains valid data.
368 */
369static int qe_firmware_uploaded;
370
371/*
372 * Upload a QE microcode
373 *
374 * This function is a worker function for qe_upload_firmware(). It does
375 * the actual uploading of the microcode.
376 */
377static void qe_upload_microcode(const void *base,
378 const struct qe_microcode *ucode)
379{
380 const __be32 *code = base + be32_to_cpu(ucode->code_offset);
381 unsigned int i;
382
383 if (ucode->major || ucode->minor || ucode->revision)
384 printk(KERN_INFO "qe-firmware: "
385 "uploading microcode '%s' version %u.%u.%u\n",
386 ucode->id, ucode->major, ucode->minor, ucode->revision);
387 else
388 printk(KERN_INFO "qe-firmware: "
389 "uploading microcode '%s'\n", ucode->id);
390
391 /* Use auto-increment */
392 out_be32(&qe_immr->iram.iadd, be32_to_cpu(ucode->iram_offset) |
393 QE_IRAM_IADD_AIE | QE_IRAM_IADD_BADDR);
394
395 for (i = 0; i < be32_to_cpu(ucode->count); i++)
396 out_be32(&qe_immr->iram.idata, be32_to_cpu(code[i]));
397
398 /* Set I-RAM Ready Register */
399 out_be32(&qe_immr->iram.iready, be32_to_cpu(QE_IRAM_READY));
400}
401
402/*
403 * Upload a microcode to the I-RAM at a specific address.
404 *
405 * See Documentation/powerpc/qe_firmware.txt for information on QE microcode
406 * uploading.
407 *
408 * Currently, only version 1 is supported, so the 'version' field must be
409 * set to 1.
410 *
411 * The SOC model and revision are not validated, they are only displayed for
412 * informational purposes.
413 *
414 * 'calc_size' is the calculated size, in bytes, of the firmware structure and
415 * all of the microcode structures, minus the CRC.
416 *
417 * 'length' is the size that the structure says it is, including the CRC.
418 */
419int qe_upload_firmware(const struct qe_firmware *firmware)
420{
421 unsigned int i;
422 unsigned int j;
423 u32 crc;
424 size_t calc_size = sizeof(struct qe_firmware);
425 size_t length;
426 const struct qe_header *hdr;
427
428 if (!firmware) {
429 printk(KERN_ERR "qe-firmware: invalid pointer\n");
430 return -EINVAL;
431 }
432
433 hdr = &firmware->header;
434 length = be32_to_cpu(hdr->length);
435
436 /* Check the magic */
437 if ((hdr->magic[0] != 'Q') || (hdr->magic[1] != 'E') ||
438 (hdr->magic[2] != 'F')) {
439 printk(KERN_ERR "qe-firmware: not a microcode\n");
440 return -EPERM;
441 }
442
443 /* Check the version */
444 if (hdr->version != 1) {
445 printk(KERN_ERR "qe-firmware: unsupported version\n");
446 return -EPERM;
447 }
448
449 /* Validate some of the fields */
450 if ((firmware->count < 1) || (firmware->count > MAX_QE_RISC)) {
451 printk(KERN_ERR "qe-firmware: invalid data\n");
452 return -EINVAL;
453 }
454
455 /* Validate the length and check if there's a CRC */
456 calc_size += (firmware->count - 1) * sizeof(struct qe_microcode);
457
458 for (i = 0; i < firmware->count; i++)
459 /*
460 * For situations where the second RISC uses the same microcode
461 * as the first, the 'code_offset' and 'count' fields will be
462 * zero, so it's okay to add those.
463 */
464 calc_size += sizeof(__be32) *
465 be32_to_cpu(firmware->microcode[i].count);
466
467 /* Validate the length */
468 if (length != calc_size + sizeof(__be32)) {
469 printk(KERN_ERR "qe-firmware: invalid length\n");
470 return -EPERM;
471 }
472
473 /* Validate the CRC */
474 crc = be32_to_cpu(*(__be32 *)((void *)firmware + calc_size));
475 if (crc != crc32(0, firmware, calc_size)) {
476 printk(KERN_ERR "qe-firmware: firmware CRC is invalid\n");
477 return -EIO;
478 }
479
480 /*
481 * If the microcode calls for it, split the I-RAM.
482 */
483 if (!firmware->split)
484 setbits16(&qe_immr->cp.cercr, QE_CP_CERCR_CIR);
485
486 if (firmware->soc.model)
487 printk(KERN_INFO
488 "qe-firmware: firmware '%s' for %u V%u.%u\n",
489 firmware->id, be16_to_cpu(firmware->soc.model),
490 firmware->soc.major, firmware->soc.minor);
491 else
492 printk(KERN_INFO "qe-firmware: firmware '%s'\n",
493 firmware->id);
494
495 /*
496 * The QE only supports one microcode per RISC, so clear out all the
497 * saved microcode information and put in the new.
498 */
499 memset(&qe_firmware_info, 0, sizeof(qe_firmware_info));
500 strlcpy(qe_firmware_info.id, firmware->id, sizeof(qe_firmware_info.id));
501 qe_firmware_info.extended_modes = firmware->extended_modes;
502 memcpy(qe_firmware_info.vtraps, firmware->vtraps,
503 sizeof(firmware->vtraps));
504
505 /* Loop through each microcode. */
506 for (i = 0; i < firmware->count; i++) {
507 const struct qe_microcode *ucode = &firmware->microcode[i];
508
509 /* Upload a microcode if it's present */
510 if (ucode->code_offset)
511 qe_upload_microcode(firmware, ucode);
512
513 /* Program the traps for this processor */
514 for (j = 0; j < 16; j++) {
515 u32 trap = be32_to_cpu(ucode->traps[j]);
516
517 if (trap)
518 out_be32(&qe_immr->rsp[i].tibcr[j], trap);
519 }
520
521 /* Enable traps */
522 out_be32(&qe_immr->rsp[i].eccr, be32_to_cpu(ucode->eccr));
523 }
524
525 qe_firmware_uploaded = 1;
526
527 return 0;
528}
529EXPORT_SYMBOL(qe_upload_firmware);
530
531/*
532 * Get info on the currently-loaded firmware
533 *
534 * This function also checks the device tree to see if the boot loader has
535 * uploaded a firmware already.
536 */
537struct qe_firmware_info *qe_get_firmware_info(void)
538{
539 static int initialized;
540 struct property *prop;
541 struct device_node *qe;
542 struct device_node *fw = NULL;
543 const char *sprop;
544 unsigned int i;
545
546 /*
547 * If we haven't checked yet, and a driver hasn't uploaded a firmware
548 * yet, then check the device tree for information.
549 */
550 if (qe_firmware_uploaded)
551 return &qe_firmware_info;
552
553 if (initialized)
554 return NULL;
555
556 initialized = 1;
557
558 /*
559 * Newer device trees have an "fsl,qe" compatible property for the QE
560 * node, but we still need to support older device trees.
561 */
562 qe = of_find_compatible_node(NULL, NULL, "fsl,qe");
563 if (!qe) {
564 qe = of_find_node_by_type(NULL, "qe");
565 if (!qe)
566 return NULL;
567 }
568
569 /* Find the 'firmware' child node */
570 for_each_child_of_node(qe, fw) {
571 if (strcmp(fw->name, "firmware") == 0)
572 break;
573 }
574
575 of_node_put(qe);
576
577 /* Did we find the 'firmware' node? */
578 if (!fw)
579 return NULL;
580
581 qe_firmware_uploaded = 1;
582
583 /* Copy the data into qe_firmware_info*/
584 sprop = of_get_property(fw, "id", NULL);
585 if (sprop)
586 strlcpy(qe_firmware_info.id, sprop,
587 sizeof(qe_firmware_info.id));
588
589 prop = of_find_property(fw, "extended-modes", NULL);
590 if (prop && (prop->length == sizeof(u64))) {
591 const u64 *iprop = prop->value;
592
593 qe_firmware_info.extended_modes = *iprop;
594 }
595
596 prop = of_find_property(fw, "virtual-traps", NULL);
597 if (prop && (prop->length == 32)) {
598 const u32 *iprop = prop->value;
599
600 for (i = 0; i < ARRAY_SIZE(qe_firmware_info.vtraps); i++)
601 qe_firmware_info.vtraps[i] = iprop[i];
602 }
603
604 of_node_put(fw);
605
606 return &qe_firmware_info;
607}
608EXPORT_SYMBOL(qe_get_firmware_info);
609
610unsigned int qe_get_num_of_risc(void)
611{
612 struct device_node *qe;
613 int size;
614 unsigned int num_of_risc = 0;
615 const u32 *prop;
616
617 qe = of_find_compatible_node(NULL, NULL, "fsl,qe");
618 if (!qe) {
619 /* Older devices trees did not have an "fsl,qe"
620 * compatible property, so we need to look for
621 * the QE node by name.
622 */
623 qe = of_find_node_by_type(NULL, "qe");
624 if (!qe)
625 return num_of_risc;
626 }
627
628 prop = of_get_property(qe, "fsl,qe-num-riscs", &size);
629 if (prop && size == sizeof(*prop))
630 num_of_risc = *prop;
631
632 of_node_put(qe);
633
634 return num_of_risc;
635}
636EXPORT_SYMBOL(qe_get_num_of_risc);
637
638unsigned int qe_get_num_of_snums(void)
639{
640 struct device_node *qe;
641 int size;
642 unsigned int num_of_snums;
643 const u32 *prop;
644
645 num_of_snums = 28; /* The default number of snum for threads is 28 */
646 qe = of_find_compatible_node(NULL, NULL, "fsl,qe");
647 if (!qe) {
648 /* Older devices trees did not have an "fsl,qe"
649 * compatible property, so we need to look for
650 * the QE node by name.
651 */
652 qe = of_find_node_by_type(NULL, "qe");
653 if (!qe)
654 return num_of_snums;
655 }
656
657 prop = of_get_property(qe, "fsl,qe-num-snums", &size);
658 if (prop && size == sizeof(*prop)) {
659 num_of_snums = *prop;
660 if ((num_of_snums < 28) || (num_of_snums > QE_NUM_OF_SNUM)) {
661 /* No QE ever has fewer than 28 SNUMs */
662 pr_err("QE: number of snum is invalid\n");
663 of_node_put(qe);
664 return -EINVAL;
665 }
666 }
667
668 of_node_put(qe);
669
670 return num_of_snums;
671}
672EXPORT_SYMBOL(qe_get_num_of_snums);
673
674#if defined(CONFIG_SUSPEND) && defined(CONFIG_PPC_85xx)
675static int qe_resume(struct platform_device *ofdev)
676{
677 if (!qe_alive_during_sleep())
678 qe_reset();
679 return 0;
680}
681
682static int qe_probe(struct platform_device *ofdev)
683{
684 return 0;
685}
686
687static const struct of_device_id qe_ids[] = {
688 { .compatible = "fsl,qe", },
689 { },
690};
691
692static struct platform_driver qe_driver = {
693 .driver = {
694 .name = "fsl-qe",
695 .of_match_table = qe_ids,
696 },
697 .probe = qe_probe,
698 .resume = qe_resume,
699};
700
701static int __init qe_drv_init(void)
702{
703 return platform_driver_register(&qe_driver);
704}
705device_initcall(qe_drv_init);
706#endif /* defined(CONFIG_SUSPEND) && defined(CONFIG_PPC_85xx) */
diff --git a/arch/powerpc/sysdev/qe_lib/qe_ic.c b/arch/powerpc/sysdev/qe_lib/qe_ic.c
deleted file mode 100644
index ef36f16f9f6f..000000000000
--- a/arch/powerpc/sysdev/qe_lib/qe_ic.c
+++ /dev/null
@@ -1,502 +0,0 @@
1/*
2 * arch/powerpc/sysdev/qe_lib/qe_ic.c
3 *
4 * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved.
5 *
6 * Author: Li Yang <leoli@freescale.com>
7 * Based on code from Shlomi Gridish <gridish@freescale.com>
8 *
9 * QUICC ENGINE Interrupt Controller
10 *
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
15 */
16
17#include <linux/kernel.h>
18#include <linux/init.h>
19#include <linux/errno.h>
20#include <linux/reboot.h>
21#include <linux/slab.h>
22#include <linux/stddef.h>
23#include <linux/sched.h>
24#include <linux/signal.h>
25#include <linux/device.h>
26#include <linux/spinlock.h>
27#include <asm/irq.h>
28#include <asm/io.h>
29#include <asm/prom.h>
30#include <asm/qe_ic.h>
31
32#include "qe_ic.h"
33
34static DEFINE_RAW_SPINLOCK(qe_ic_lock);
35
36static struct qe_ic_info qe_ic_info[] = {
37 [1] = {
38 .mask = 0x00008000,
39 .mask_reg = QEIC_CIMR,
40 .pri_code = 0,
41 .pri_reg = QEIC_CIPWCC,
42 },
43 [2] = {
44 .mask = 0x00004000,
45 .mask_reg = QEIC_CIMR,
46 .pri_code = 1,
47 .pri_reg = QEIC_CIPWCC,
48 },
49 [3] = {
50 .mask = 0x00002000,
51 .mask_reg = QEIC_CIMR,
52 .pri_code = 2,
53 .pri_reg = QEIC_CIPWCC,
54 },
55 [10] = {
56 .mask = 0x00000040,
57 .mask_reg = QEIC_CIMR,
58 .pri_code = 1,
59 .pri_reg = QEIC_CIPZCC,
60 },
61 [11] = {
62 .mask = 0x00000020,
63 .mask_reg = QEIC_CIMR,
64 .pri_code = 2,
65 .pri_reg = QEIC_CIPZCC,
66 },
67 [12] = {
68 .mask = 0x00000010,
69 .mask_reg = QEIC_CIMR,
70 .pri_code = 3,
71 .pri_reg = QEIC_CIPZCC,
72 },
73 [13] = {
74 .mask = 0x00000008,
75 .mask_reg = QEIC_CIMR,
76 .pri_code = 4,
77 .pri_reg = QEIC_CIPZCC,
78 },
79 [14] = {
80 .mask = 0x00000004,
81 .mask_reg = QEIC_CIMR,
82 .pri_code = 5,
83 .pri_reg = QEIC_CIPZCC,
84 },
85 [15] = {
86 .mask = 0x00000002,
87 .mask_reg = QEIC_CIMR,
88 .pri_code = 6,
89 .pri_reg = QEIC_CIPZCC,
90 },
91 [20] = {
92 .mask = 0x10000000,
93 .mask_reg = QEIC_CRIMR,
94 .pri_code = 3,
95 .pri_reg = QEIC_CIPRTA,
96 },
97 [25] = {
98 .mask = 0x00800000,
99 .mask_reg = QEIC_CRIMR,
100 .pri_code = 0,
101 .pri_reg = QEIC_CIPRTB,
102 },
103 [26] = {
104 .mask = 0x00400000,
105 .mask_reg = QEIC_CRIMR,
106 .pri_code = 1,
107 .pri_reg = QEIC_CIPRTB,
108 },
109 [27] = {
110 .mask = 0x00200000,
111 .mask_reg = QEIC_CRIMR,
112 .pri_code = 2,
113 .pri_reg = QEIC_CIPRTB,
114 },
115 [28] = {
116 .mask = 0x00100000,
117 .mask_reg = QEIC_CRIMR,
118 .pri_code = 3,
119 .pri_reg = QEIC_CIPRTB,
120 },
121 [32] = {
122 .mask = 0x80000000,
123 .mask_reg = QEIC_CIMR,
124 .pri_code = 0,
125 .pri_reg = QEIC_CIPXCC,
126 },
127 [33] = {
128 .mask = 0x40000000,
129 .mask_reg = QEIC_CIMR,
130 .pri_code = 1,
131 .pri_reg = QEIC_CIPXCC,
132 },
133 [34] = {
134 .mask = 0x20000000,
135 .mask_reg = QEIC_CIMR,
136 .pri_code = 2,
137 .pri_reg = QEIC_CIPXCC,
138 },
139 [35] = {
140 .mask = 0x10000000,
141 .mask_reg = QEIC_CIMR,
142 .pri_code = 3,
143 .pri_reg = QEIC_CIPXCC,
144 },
145 [36] = {
146 .mask = 0x08000000,
147 .mask_reg = QEIC_CIMR,
148 .pri_code = 4,
149 .pri_reg = QEIC_CIPXCC,
150 },
151 [40] = {
152 .mask = 0x00800000,
153 .mask_reg = QEIC_CIMR,
154 .pri_code = 0,
155 .pri_reg = QEIC_CIPYCC,
156 },
157 [41] = {
158 .mask = 0x00400000,
159 .mask_reg = QEIC_CIMR,
160 .pri_code = 1,
161 .pri_reg = QEIC_CIPYCC,
162 },
163 [42] = {
164 .mask = 0x00200000,
165 .mask_reg = QEIC_CIMR,
166 .pri_code = 2,
167 .pri_reg = QEIC_CIPYCC,
168 },
169 [43] = {
170 .mask = 0x00100000,
171 .mask_reg = QEIC_CIMR,
172 .pri_code = 3,
173 .pri_reg = QEIC_CIPYCC,
174 },
175};
176
177static inline u32 qe_ic_read(volatile __be32 __iomem * base, unsigned int reg)
178{
179 return in_be32(base + (reg >> 2));
180}
181
182static inline void qe_ic_write(volatile __be32 __iomem * base, unsigned int reg,
183 u32 value)
184{
185 out_be32(base + (reg >> 2), value);
186}
187
188static inline struct qe_ic *qe_ic_from_irq(unsigned int virq)
189{
190 return irq_get_chip_data(virq);
191}
192
193static inline struct qe_ic *qe_ic_from_irq_data(struct irq_data *d)
194{
195 return irq_data_get_irq_chip_data(d);
196}
197
198static void qe_ic_unmask_irq(struct irq_data *d)
199{
200 struct qe_ic *qe_ic = qe_ic_from_irq_data(d);
201 unsigned int src = irqd_to_hwirq(d);
202 unsigned long flags;
203 u32 temp;
204
205 raw_spin_lock_irqsave(&qe_ic_lock, flags);
206
207 temp = qe_ic_read(qe_ic->regs, qe_ic_info[src].mask_reg);
208 qe_ic_write(qe_ic->regs, qe_ic_info[src].mask_reg,
209 temp | qe_ic_info[src].mask);
210
211 raw_spin_unlock_irqrestore(&qe_ic_lock, flags);
212}
213
214static void qe_ic_mask_irq(struct irq_data *d)
215{
216 struct qe_ic *qe_ic = qe_ic_from_irq_data(d);
217 unsigned int src = irqd_to_hwirq(d);
218 unsigned long flags;
219 u32 temp;
220
221 raw_spin_lock_irqsave(&qe_ic_lock, flags);
222
223 temp = qe_ic_read(qe_ic->regs, qe_ic_info[src].mask_reg);
224 qe_ic_write(qe_ic->regs, qe_ic_info[src].mask_reg,
225 temp & ~qe_ic_info[src].mask);
226
227 /* Flush the above write before enabling interrupts; otherwise,
228 * spurious interrupts will sometimes happen. To be 100% sure
229 * that the write has reached the device before interrupts are
230 * enabled, the mask register would have to be read back; however,
231 * this is not required for correctness, only to avoid wasting
232 * time on a large number of spurious interrupts. In testing,
233 * a sync reduced the observed spurious interrupts to zero.
234 */
235 mb();
236
237 raw_spin_unlock_irqrestore(&qe_ic_lock, flags);
238}
239
240static struct irq_chip qe_ic_irq_chip = {
241 .name = "QEIC",
242 .irq_unmask = qe_ic_unmask_irq,
243 .irq_mask = qe_ic_mask_irq,
244 .irq_mask_ack = qe_ic_mask_irq,
245};
246
247static int qe_ic_host_match(struct irq_domain *h, struct device_node *node,
248 enum irq_domain_bus_token bus_token)
249{
250 /* Exact match, unless qe_ic node is NULL */
251 struct device_node *of_node = irq_domain_get_of_node(h);
252 return of_node == NULL || of_node == node;
253}
254
255static int qe_ic_host_map(struct irq_domain *h, unsigned int virq,
256 irq_hw_number_t hw)
257{
258 struct qe_ic *qe_ic = h->host_data;
259 struct irq_chip *chip;
260
261 if (qe_ic_info[hw].mask == 0) {
262 printk(KERN_ERR "Can't map reserved IRQ\n");
263 return -EINVAL;
264 }
265 /* Default chip */
266 chip = &qe_ic->hc_irq;
267
268 irq_set_chip_data(virq, qe_ic);
269 irq_set_status_flags(virq, IRQ_LEVEL);
270
271 irq_set_chip_and_handler(virq, chip, handle_level_irq);
272
273 return 0;
274}
275
276static const struct irq_domain_ops qe_ic_host_ops = {
277 .match = qe_ic_host_match,
278 .map = qe_ic_host_map,
279 .xlate = irq_domain_xlate_onetwocell,
280};
281
282/* Return an interrupt vector or NO_IRQ if no interrupt is pending. */
283unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic)
284{
285 int irq;
286
287 BUG_ON(qe_ic == NULL);
288
289 /* get the interrupt source vector. */
290 irq = qe_ic_read(qe_ic->regs, QEIC_CIVEC) >> 26;
291
292 if (irq == 0)
293 return NO_IRQ;
294
295 return irq_linear_revmap(qe_ic->irqhost, irq);
296}
297
298/* Return an interrupt vector or NO_IRQ if no interrupt is pending. */
299unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic)
300{
301 int irq;
302
303 BUG_ON(qe_ic == NULL);
304
305 /* get the interrupt source vector. */
306 irq = qe_ic_read(qe_ic->regs, QEIC_CHIVEC) >> 26;
307
308 if (irq == 0)
309 return NO_IRQ;
310
311 return irq_linear_revmap(qe_ic->irqhost, irq);
312}
313
314void __init qe_ic_init(struct device_node *node, unsigned int flags,
315 void (*low_handler)(struct irq_desc *desc),
316 void (*high_handler)(struct irq_desc *desc))
317{
318 struct qe_ic *qe_ic;
319 struct resource res;
320 u32 temp = 0, ret, high_active = 0;
321
322 ret = of_address_to_resource(node, 0, &res);
323 if (ret)
324 return;
325
326 qe_ic = kzalloc(sizeof(*qe_ic), GFP_KERNEL);
327 if (qe_ic == NULL)
328 return;
329
330 qe_ic->irqhost = irq_domain_add_linear(node, NR_QE_IC_INTS,
331 &qe_ic_host_ops, qe_ic);
332 if (qe_ic->irqhost == NULL) {
333 kfree(qe_ic);
334 return;
335 }
336
337 qe_ic->regs = ioremap(res.start, resource_size(&res));
338
339 qe_ic->hc_irq = qe_ic_irq_chip;
340
341 qe_ic->virq_high = irq_of_parse_and_map(node, 0);
342 qe_ic->virq_low = irq_of_parse_and_map(node, 1);
343
344 if (qe_ic->virq_low == NO_IRQ) {
345 printk(KERN_ERR "Failed to map QE_IC low IRQ\n");
346 kfree(qe_ic);
347 return;
348 }
349
350 /* default priority scheme is grouped. If spread mode is */
351 /* required, configure cicr accordingly. */
352 if (flags & QE_IC_SPREADMODE_GRP_W)
353 temp |= CICR_GWCC;
354 if (flags & QE_IC_SPREADMODE_GRP_X)
355 temp |= CICR_GXCC;
356 if (flags & QE_IC_SPREADMODE_GRP_Y)
357 temp |= CICR_GYCC;
358 if (flags & QE_IC_SPREADMODE_GRP_Z)
359 temp |= CICR_GZCC;
360 if (flags & QE_IC_SPREADMODE_GRP_RISCA)
361 temp |= CICR_GRTA;
362 if (flags & QE_IC_SPREADMODE_GRP_RISCB)
363 temp |= CICR_GRTB;
364
365 /* choose destination signal for highest priority interrupt */
366 if (flags & QE_IC_HIGH_SIGNAL) {
367 temp |= (SIGNAL_HIGH << CICR_HPIT_SHIFT);
368 high_active = 1;
369 }
370
371 qe_ic_write(qe_ic->regs, QEIC_CICR, temp);
372
373 irq_set_handler_data(qe_ic->virq_low, qe_ic);
374 irq_set_chained_handler(qe_ic->virq_low, low_handler);
375
376 if (qe_ic->virq_high != NO_IRQ &&
377 qe_ic->virq_high != qe_ic->virq_low) {
378 irq_set_handler_data(qe_ic->virq_high, qe_ic);
379 irq_set_chained_handler(qe_ic->virq_high, high_handler);
380 }
381}
382
383void qe_ic_set_highest_priority(unsigned int virq, int high)
384{
385 struct qe_ic *qe_ic = qe_ic_from_irq(virq);
386 unsigned int src = virq_to_hw(virq);
387 u32 temp = 0;
388
389 temp = qe_ic_read(qe_ic->regs, QEIC_CICR);
390
391 temp &= ~CICR_HP_MASK;
392 temp |= src << CICR_HP_SHIFT;
393
394 temp &= ~CICR_HPIT_MASK;
395 temp |= (high ? SIGNAL_HIGH : SIGNAL_LOW) << CICR_HPIT_SHIFT;
396
397 qe_ic_write(qe_ic->regs, QEIC_CICR, temp);
398}
399
400/* Set Priority level within its group, from 1 to 8 */
401int qe_ic_set_priority(unsigned int virq, unsigned int priority)
402{
403 struct qe_ic *qe_ic = qe_ic_from_irq(virq);
404 unsigned int src = virq_to_hw(virq);
405 u32 temp;
406
407 if (priority > 8 || priority == 0)
408 return -EINVAL;
409 if (src > 127)
410 return -EINVAL;
411 if (qe_ic_info[src].pri_reg == 0)
412 return -EINVAL;
413
414 temp = qe_ic_read(qe_ic->regs, qe_ic_info[src].pri_reg);
415
416 if (priority < 4) {
417 temp &= ~(0x7 << (32 - priority * 3));
418 temp |= qe_ic_info[src].pri_code << (32 - priority * 3);
419 } else {
420 temp &= ~(0x7 << (24 - priority * 3));
421 temp |= qe_ic_info[src].pri_code << (24 - priority * 3);
422 }
423
424 qe_ic_write(qe_ic->regs, qe_ic_info[src].pri_reg, temp);
425
426 return 0;
427}
428
429/* Set a QE priority to use high irq, only priority 1~2 can use high irq */
430int qe_ic_set_high_priority(unsigned int virq, unsigned int priority, int high)
431{
432 struct qe_ic *qe_ic = qe_ic_from_irq(virq);
433 unsigned int src = virq_to_hw(virq);
434 u32 temp, control_reg = QEIC_CICNR, shift = 0;
435
436 if (priority > 2 || priority == 0)
437 return -EINVAL;
438
439 switch (qe_ic_info[src].pri_reg) {
440 case QEIC_CIPZCC:
441 shift = CICNR_ZCC1T_SHIFT;
442 break;
443 case QEIC_CIPWCC:
444 shift = CICNR_WCC1T_SHIFT;
445 break;
446 case QEIC_CIPYCC:
447 shift = CICNR_YCC1T_SHIFT;
448 break;
449 case QEIC_CIPXCC:
450 shift = CICNR_XCC1T_SHIFT;
451 break;
452 case QEIC_CIPRTA:
453 shift = CRICR_RTA1T_SHIFT;
454 control_reg = QEIC_CRICR;
455 break;
456 case QEIC_CIPRTB:
457 shift = CRICR_RTB1T_SHIFT;
458 control_reg = QEIC_CRICR;
459 break;
460 default:
461 return -EINVAL;
462 }
463
464 shift += (2 - priority) * 2;
465 temp = qe_ic_read(qe_ic->regs, control_reg);
466 temp &= ~(SIGNAL_MASK << shift);
467 temp |= (high ? SIGNAL_HIGH : SIGNAL_LOW) << shift;
468 qe_ic_write(qe_ic->regs, control_reg, temp);
469
470 return 0;
471}
472
473static struct bus_type qe_ic_subsys = {
474 .name = "qe_ic",
475 .dev_name = "qe_ic",
476};
477
478static struct device device_qe_ic = {
479 .id = 0,
480 .bus = &qe_ic_subsys,
481};
482
483static int __init init_qe_ic_sysfs(void)
484{
485 int rc;
486
487 printk(KERN_DEBUG "Registering qe_ic with sysfs...\n");
488
489 rc = subsys_system_register(&qe_ic_subsys, NULL);
490 if (rc) {
491 printk(KERN_ERR "Failed registering qe_ic sys class\n");
492 return -ENODEV;
493 }
494 rc = device_register(&device_qe_ic);
495 if (rc) {
496 printk(KERN_ERR "Failed registering qe_ic sys device\n");
497 return -ENODEV;
498 }
499 return 0;
500}
501
502subsys_initcall(init_qe_ic_sysfs);
diff --git a/arch/powerpc/sysdev/qe_lib/qe_ic.h b/arch/powerpc/sysdev/qe_lib/qe_ic.h
deleted file mode 100644
index efef7ab9b753..000000000000
--- a/arch/powerpc/sysdev/qe_lib/qe_ic.h
+++ /dev/null
@@ -1,103 +0,0 @@
1/*
2 * arch/powerpc/sysdev/qe_lib/qe_ic.h
3 *
4 * QUICC ENGINE Interrupt Controller Header
5 *
6 * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved.
7 *
8 * Author: Li Yang <leoli@freescale.com>
9 * Based on code from Shlomi Gridish <gridish@freescale.com>
10 *
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
15 */
16#ifndef _POWERPC_SYSDEV_QE_IC_H
17#define _POWERPC_SYSDEV_QE_IC_H
18
19#include <asm/qe_ic.h>
20
21#define NR_QE_IC_INTS 64
22
23/* QE IC registers offset */
24#define QEIC_CICR 0x00
25#define QEIC_CIVEC 0x04
26#define QEIC_CRIPNR 0x08
27#define QEIC_CIPNR 0x0c
28#define QEIC_CIPXCC 0x10
29#define QEIC_CIPYCC 0x14
30#define QEIC_CIPWCC 0x18
31#define QEIC_CIPZCC 0x1c
32#define QEIC_CIMR 0x20
33#define QEIC_CRIMR 0x24
34#define QEIC_CICNR 0x28
35#define QEIC_CIPRTA 0x30
36#define QEIC_CIPRTB 0x34
37#define QEIC_CRICR 0x3c
38#define QEIC_CHIVEC 0x60
39
40/* Interrupt priority registers */
41#define CIPCC_SHIFT_PRI0 29
42#define CIPCC_SHIFT_PRI1 26
43#define CIPCC_SHIFT_PRI2 23
44#define CIPCC_SHIFT_PRI3 20
45#define CIPCC_SHIFT_PRI4 13
46#define CIPCC_SHIFT_PRI5 10
47#define CIPCC_SHIFT_PRI6 7
48#define CIPCC_SHIFT_PRI7 4
49
50/* CICR priority modes */
51#define CICR_GWCC 0x00040000
52#define CICR_GXCC 0x00020000
53#define CICR_GYCC 0x00010000
54#define CICR_GZCC 0x00080000
55#define CICR_GRTA 0x00200000
56#define CICR_GRTB 0x00400000
57#define CICR_HPIT_SHIFT 8
58#define CICR_HPIT_MASK 0x00000300
59#define CICR_HP_SHIFT 24
60#define CICR_HP_MASK 0x3f000000
61
62/* CICNR */
63#define CICNR_WCC1T_SHIFT 20
64#define CICNR_ZCC1T_SHIFT 28
65#define CICNR_YCC1T_SHIFT 12
66#define CICNR_XCC1T_SHIFT 4
67
68/* CRICR */
69#define CRICR_RTA1T_SHIFT 20
70#define CRICR_RTB1T_SHIFT 28
71
72/* Signal indicator */
73#define SIGNAL_MASK 3
74#define SIGNAL_HIGH 2
75#define SIGNAL_LOW 0
76
77struct qe_ic {
78 /* Control registers offset */
79 volatile u32 __iomem *regs;
80
81 /* The remapper for this QEIC */
82 struct irq_domain *irqhost;
83
84 /* The "linux" controller struct */
85 struct irq_chip hc_irq;
86
87 /* VIRQ numbers of QE high/low irqs */
88 unsigned int virq_high;
89 unsigned int virq_low;
90};
91
92/*
93 * QE interrupt controller internal structure
94 */
95struct qe_ic_info {
96 u32 mask; /* location of this source at the QIMR register. */
97 u32 mask_reg; /* Mask register offset */
98 u8 pri_code; /* for grouped interrupts sources - the interrupt
99 code as appears at the group priority register */
100 u32 pri_reg; /* Group priority register offset */
101};
102
103#endif /* _POWERPC_SYSDEV_QE_IC_H */
diff --git a/arch/powerpc/sysdev/qe_lib/qe_io.c b/arch/powerpc/sysdev/qe_lib/qe_io.c
deleted file mode 100644
index 7ea0174f6d3d..000000000000
--- a/arch/powerpc/sysdev/qe_lib/qe_io.c
+++ /dev/null
@@ -1,192 +0,0 @@
1/*
2 * arch/powerpc/sysdev/qe_lib/qe_io.c
3 *
4 * QE Parallel I/O ports configuration routines
5 *
6 * Copyright 2006 Freescale Semiconductor, Inc. All rights reserved.
7 *
8 * Author: Li Yang <LeoLi@freescale.com>
9 * Based on code from Shlomi Gridish <gridish@freescale.com>
10 *
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
15 */
16
17#include <linux/stddef.h>
18#include <linux/kernel.h>
19#include <linux/errno.h>
20#include <linux/module.h>
21#include <linux/ioport.h>
22
23#include <asm/io.h>
24#include <asm/qe.h>
25#include <asm/prom.h>
26#include <sysdev/fsl_soc.h>
27
28#undef DEBUG
29
30static struct qe_pio_regs __iomem *par_io;
31static int num_par_io_ports = 0;
32
33int par_io_init(struct device_node *np)
34{
35 struct resource res;
36 int ret;
37 const u32 *num_ports;
38
39 /* Map Parallel I/O ports registers */
40 ret = of_address_to_resource(np, 0, &res);
41 if (ret)
42 return ret;
43 par_io = ioremap(res.start, resource_size(&res));
44
45 num_ports = of_get_property(np, "num-ports", NULL);
46 if (num_ports)
47 num_par_io_ports = *num_ports;
48
49 return 0;
50}
51
52void __par_io_config_pin(struct qe_pio_regs __iomem *par_io, u8 pin, int dir,
53 int open_drain, int assignment, int has_irq)
54{
55 u32 pin_mask1bit;
56 u32 pin_mask2bits;
57 u32 new_mask2bits;
58 u32 tmp_val;
59
60 /* calculate pin location for single and 2 bits information */
61 pin_mask1bit = (u32) (1 << (QE_PIO_PINS - (pin + 1)));
62
63 /* Set open drain, if required */
64 tmp_val = in_be32(&par_io->cpodr);
65 if (open_drain)
66 out_be32(&par_io->cpodr, pin_mask1bit | tmp_val);
67 else
68 out_be32(&par_io->cpodr, ~pin_mask1bit & tmp_val);
69
70 /* define direction */
71 tmp_val = (pin > (QE_PIO_PINS / 2) - 1) ?
72 in_be32(&par_io->cpdir2) :
73 in_be32(&par_io->cpdir1);
74
75 /* get all bits mask for 2 bit per port */
76 pin_mask2bits = (u32) (0x3 << (QE_PIO_PINS -
77 (pin % (QE_PIO_PINS / 2) + 1) * 2));
78
79 /* Get the final mask we need for the right definition */
80 new_mask2bits = (u32) (dir << (QE_PIO_PINS -
81 (pin % (QE_PIO_PINS / 2) + 1) * 2));
82
83 /* clear and set 2 bits mask */
84 if (pin > (QE_PIO_PINS / 2) - 1) {
85 out_be32(&par_io->cpdir2,
86 ~pin_mask2bits & tmp_val);
87 tmp_val &= ~pin_mask2bits;
88 out_be32(&par_io->cpdir2, new_mask2bits | tmp_val);
89 } else {
90 out_be32(&par_io->cpdir1,
91 ~pin_mask2bits & tmp_val);
92 tmp_val &= ~pin_mask2bits;
93 out_be32(&par_io->cpdir1, new_mask2bits | tmp_val);
94 }
95 /* define pin assignment */
96 tmp_val = (pin > (QE_PIO_PINS / 2) - 1) ?
97 in_be32(&par_io->cppar2) :
98 in_be32(&par_io->cppar1);
99
100 new_mask2bits = (u32) (assignment << (QE_PIO_PINS -
101 (pin % (QE_PIO_PINS / 2) + 1) * 2));
102 /* clear and set 2 bits mask */
103 if (pin > (QE_PIO_PINS / 2) - 1) {
104 out_be32(&par_io->cppar2,
105 ~pin_mask2bits & tmp_val);
106 tmp_val &= ~pin_mask2bits;
107 out_be32(&par_io->cppar2, new_mask2bits | tmp_val);
108 } else {
109 out_be32(&par_io->cppar1,
110 ~pin_mask2bits & tmp_val);
111 tmp_val &= ~pin_mask2bits;
112 out_be32(&par_io->cppar1, new_mask2bits | tmp_val);
113 }
114}
115EXPORT_SYMBOL(__par_io_config_pin);
116
117int par_io_config_pin(u8 port, u8 pin, int dir, int open_drain,
118 int assignment, int has_irq)
119{
120 if (!par_io || port >= num_par_io_ports)
121 return -EINVAL;
122
123 __par_io_config_pin(&par_io[port], pin, dir, open_drain, assignment,
124 has_irq);
125 return 0;
126}
127EXPORT_SYMBOL(par_io_config_pin);
128
129int par_io_data_set(u8 port, u8 pin, u8 val)
130{
131 u32 pin_mask, tmp_val;
132
133 if (port >= num_par_io_ports)
134 return -EINVAL;
135 if (pin >= QE_PIO_PINS)
136 return -EINVAL;
137 /* calculate pin location */
138 pin_mask = (u32) (1 << (QE_PIO_PINS - 1 - pin));
139
140 tmp_val = in_be32(&par_io[port].cpdata);
141
142 if (val == 0) /* clear */
143 out_be32(&par_io[port].cpdata, ~pin_mask & tmp_val);
144 else /* set */
145 out_be32(&par_io[port].cpdata, pin_mask | tmp_val);
146
147 return 0;
148}
149EXPORT_SYMBOL(par_io_data_set);
150
151int par_io_of_config(struct device_node *np)
152{
153 struct device_node *pio;
154 const phandle *ph;
155 int pio_map_len;
156 const unsigned int *pio_map;
157
158 if (par_io == NULL) {
159 printk(KERN_ERR "par_io not initialized\n");
160 return -1;
161 }
162
163 ph = of_get_property(np, "pio-handle", NULL);
164 if (ph == NULL) {
165 printk(KERN_ERR "pio-handle not available\n");
166 return -1;
167 }
168
169 pio = of_find_node_by_phandle(*ph);
170
171 pio_map = of_get_property(pio, "pio-map", &pio_map_len);
172 if (pio_map == NULL) {
173 printk(KERN_ERR "pio-map is not set!\n");
174 return -1;
175 }
176 pio_map_len /= sizeof(unsigned int);
177 if ((pio_map_len % 6) != 0) {
178 printk(KERN_ERR "pio-map format wrong!\n");
179 return -1;
180 }
181
182 while (pio_map_len > 0) {
183 par_io_config_pin((u8) pio_map[0], (u8) pio_map[1],
184 (int) pio_map[2], (int) pio_map[3],
185 (int) pio_map[4], (int) pio_map[5]);
186 pio_map += 6;
187 pio_map_len -= 6;
188 }
189 of_node_put(pio);
190 return 0;
191}
192EXPORT_SYMBOL(par_io_of_config);
diff --git a/arch/powerpc/sysdev/qe_lib/ucc.c b/arch/powerpc/sysdev/qe_lib/ucc.c
deleted file mode 100644
index 621575b7e84a..000000000000
--- a/arch/powerpc/sysdev/qe_lib/ucc.c
+++ /dev/null
@@ -1,212 +0,0 @@
1/*
2 * arch/powerpc/sysdev/qe_lib/ucc.c
3 *
4 * QE UCC API Set - UCC specific routines implementations.
5 *
6 * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved.
7 *
8 * Authors: Shlomi Gridish <gridish@freescale.com>
9 * Li Yang <leoli@freescale.com>
10 *
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
15 */
16#include <linux/kernel.h>
17#include <linux/errno.h>
18#include <linux/stddef.h>
19#include <linux/spinlock.h>
20#include <linux/export.h>
21
22#include <asm/irq.h>
23#include <asm/io.h>
24#include <asm/immap_qe.h>
25#include <asm/qe.h>
26#include <asm/ucc.h>
27
28int ucc_set_qe_mux_mii_mng(unsigned int ucc_num)
29{
30 unsigned long flags;
31
32 if (ucc_num > UCC_MAX_NUM - 1)
33 return -EINVAL;
34
35 spin_lock_irqsave(&cmxgcr_lock, flags);
36 clrsetbits_be32(&qe_immr->qmx.cmxgcr, QE_CMXGCR_MII_ENET_MNG,
37 ucc_num << QE_CMXGCR_MII_ENET_MNG_SHIFT);
38 spin_unlock_irqrestore(&cmxgcr_lock, flags);
39
40 return 0;
41}
42EXPORT_SYMBOL(ucc_set_qe_mux_mii_mng);
43
44/* Configure the UCC to either Slow or Fast.
45 *
46 * A given UCC can be figured to support either "slow" devices (e.g. UART)
47 * or "fast" devices (e.g. Ethernet).
48 *
49 * 'ucc_num' is the UCC number, from 0 - 7.
50 *
51 * This function also sets the UCC_GUEMR_SET_RESERVED3 bit because that bit
52 * must always be set to 1.
53 */
54int ucc_set_type(unsigned int ucc_num, enum ucc_speed_type speed)
55{
56 u8 __iomem *guemr;
57
58 /* The GUEMR register is at the same location for both slow and fast
59 devices, so we just use uccX.slow.guemr. */
60 switch (ucc_num) {
61 case 0: guemr = &qe_immr->ucc1.slow.guemr;
62 break;
63 case 1: guemr = &qe_immr->ucc2.slow.guemr;
64 break;
65 case 2: guemr = &qe_immr->ucc3.slow.guemr;
66 break;
67 case 3: guemr = &qe_immr->ucc4.slow.guemr;
68 break;
69 case 4: guemr = &qe_immr->ucc5.slow.guemr;
70 break;
71 case 5: guemr = &qe_immr->ucc6.slow.guemr;
72 break;
73 case 6: guemr = &qe_immr->ucc7.slow.guemr;
74 break;
75 case 7: guemr = &qe_immr->ucc8.slow.guemr;
76 break;
77 default:
78 return -EINVAL;
79 }
80
81 clrsetbits_8(guemr, UCC_GUEMR_MODE_MASK,
82 UCC_GUEMR_SET_RESERVED3 | speed);
83
84 return 0;
85}
86
87static void get_cmxucr_reg(unsigned int ucc_num, __be32 __iomem **cmxucr,
88 unsigned int *reg_num, unsigned int *shift)
89{
90 unsigned int cmx = ((ucc_num & 1) << 1) + (ucc_num > 3);
91
92 *reg_num = cmx + 1;
93 *cmxucr = &qe_immr->qmx.cmxucr[cmx];
94 *shift = 16 - 8 * (ucc_num & 2);
95}
96
97int ucc_mux_set_grant_tsa_bkpt(unsigned int ucc_num, int set, u32 mask)
98{
99 __be32 __iomem *cmxucr;
100 unsigned int reg_num;
101 unsigned int shift;
102
103 /* check if the UCC number is in range. */
104 if (ucc_num > UCC_MAX_NUM - 1)
105 return -EINVAL;
106
107 get_cmxucr_reg(ucc_num, &cmxucr, &reg_num, &shift);
108
109 if (set)
110 setbits32(cmxucr, mask << shift);
111 else
112 clrbits32(cmxucr, mask << shift);
113
114 return 0;
115}
116
117int ucc_set_qe_mux_rxtx(unsigned int ucc_num, enum qe_clock clock,
118 enum comm_dir mode)
119{
120 __be32 __iomem *cmxucr;
121 unsigned int reg_num;
122 unsigned int shift;
123 u32 clock_bits = 0;
124
125 /* check if the UCC number is in range. */
126 if (ucc_num > UCC_MAX_NUM - 1)
127 return -EINVAL;
128
129 /* The communications direction must be RX or TX */
130 if (!((mode == COMM_DIR_RX) || (mode == COMM_DIR_TX)))
131 return -EINVAL;
132
133 get_cmxucr_reg(ucc_num, &cmxucr, &reg_num, &shift);
134
135 switch (reg_num) {
136 case 1:
137 switch (clock) {
138 case QE_BRG1: clock_bits = 1; break;
139 case QE_BRG2: clock_bits = 2; break;
140 case QE_BRG7: clock_bits = 3; break;
141 case QE_BRG8: clock_bits = 4; break;
142 case QE_CLK9: clock_bits = 5; break;
143 case QE_CLK10: clock_bits = 6; break;
144 case QE_CLK11: clock_bits = 7; break;
145 case QE_CLK12: clock_bits = 8; break;
146 case QE_CLK15: clock_bits = 9; break;
147 case QE_CLK16: clock_bits = 10; break;
148 default: break;
149 }
150 break;
151 case 2:
152 switch (clock) {
153 case QE_BRG5: clock_bits = 1; break;
154 case QE_BRG6: clock_bits = 2; break;
155 case QE_BRG7: clock_bits = 3; break;
156 case QE_BRG8: clock_bits = 4; break;
157 case QE_CLK13: clock_bits = 5; break;
158 case QE_CLK14: clock_bits = 6; break;
159 case QE_CLK19: clock_bits = 7; break;
160 case QE_CLK20: clock_bits = 8; break;
161 case QE_CLK15: clock_bits = 9; break;
162 case QE_CLK16: clock_bits = 10; break;
163 default: break;
164 }
165 break;
166 case 3:
167 switch (clock) {
168 case QE_BRG9: clock_bits = 1; break;
169 case QE_BRG10: clock_bits = 2; break;
170 case QE_BRG15: clock_bits = 3; break;
171 case QE_BRG16: clock_bits = 4; break;
172 case QE_CLK3: clock_bits = 5; break;
173 case QE_CLK4: clock_bits = 6; break;
174 case QE_CLK17: clock_bits = 7; break;
175 case QE_CLK18: clock_bits = 8; break;
176 case QE_CLK7: clock_bits = 9; break;
177 case QE_CLK8: clock_bits = 10; break;
178 case QE_CLK16: clock_bits = 11; break;
179 default: break;
180 }
181 break;
182 case 4:
183 switch (clock) {
184 case QE_BRG13: clock_bits = 1; break;
185 case QE_BRG14: clock_bits = 2; break;
186 case QE_BRG15: clock_bits = 3; break;
187 case QE_BRG16: clock_bits = 4; break;
188 case QE_CLK5: clock_bits = 5; break;
189 case QE_CLK6: clock_bits = 6; break;
190 case QE_CLK21: clock_bits = 7; break;
191 case QE_CLK22: clock_bits = 8; break;
192 case QE_CLK7: clock_bits = 9; break;
193 case QE_CLK8: clock_bits = 10; break;
194 case QE_CLK16: clock_bits = 11; break;
195 default: break;
196 }
197 break;
198 default: break;
199 }
200
201 /* Check for invalid combination of clock and UCC number */
202 if (!clock_bits)
203 return -ENOENT;
204
205 if (mode == COMM_DIR_RX)
206 shift += 4;
207
208 clrsetbits_be32(cmxucr, QE_CMXUCR_TX_CLK_SRC_MASK << shift,
209 clock_bits << shift);
210
211 return 0;
212}
diff --git a/arch/powerpc/sysdev/qe_lib/ucc_fast.c b/arch/powerpc/sysdev/qe_lib/ucc_fast.c
deleted file mode 100644
index 65aaf15032ae..000000000000
--- a/arch/powerpc/sysdev/qe_lib/ucc_fast.c
+++ /dev/null
@@ -1,363 +0,0 @@
1/*
2 * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved.
3 *
4 * Authors: Shlomi Gridish <gridish@freescale.com>
5 * Li Yang <leoli@freescale.com>
6 *
7 * Description:
8 * QE UCC Fast API Set - UCC Fast specific routines implementations.
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
14 */
15#include <linux/kernel.h>
16#include <linux/errno.h>
17#include <linux/slab.h>
18#include <linux/stddef.h>
19#include <linux/interrupt.h>
20#include <linux/err.h>
21#include <linux/export.h>
22
23#include <asm/io.h>
24#include <asm/immap_qe.h>
25#include <asm/qe.h>
26
27#include <asm/ucc.h>
28#include <asm/ucc_fast.h>
29
30void ucc_fast_dump_regs(struct ucc_fast_private * uccf)
31{
32 printk(KERN_INFO "UCC%u Fast registers:\n", uccf->uf_info->ucc_num);
33 printk(KERN_INFO "Base address: 0x%p\n", uccf->uf_regs);
34
35 printk(KERN_INFO "gumr : addr=0x%p, val=0x%08x\n",
36 &uccf->uf_regs->gumr, in_be32(&uccf->uf_regs->gumr));
37 printk(KERN_INFO "upsmr : addr=0x%p, val=0x%08x\n",
38 &uccf->uf_regs->upsmr, in_be32(&uccf->uf_regs->upsmr));
39 printk(KERN_INFO "utodr : addr=0x%p, val=0x%04x\n",
40 &uccf->uf_regs->utodr, in_be16(&uccf->uf_regs->utodr));
41 printk(KERN_INFO "udsr : addr=0x%p, val=0x%04x\n",
42 &uccf->uf_regs->udsr, in_be16(&uccf->uf_regs->udsr));
43 printk(KERN_INFO "ucce : addr=0x%p, val=0x%08x\n",
44 &uccf->uf_regs->ucce, in_be32(&uccf->uf_regs->ucce));
45 printk(KERN_INFO "uccm : addr=0x%p, val=0x%08x\n",
46 &uccf->uf_regs->uccm, in_be32(&uccf->uf_regs->uccm));
47 printk(KERN_INFO "uccs : addr=0x%p, val=0x%02x\n",
48 &uccf->uf_regs->uccs, in_8(&uccf->uf_regs->uccs));
49 printk(KERN_INFO "urfb : addr=0x%p, val=0x%08x\n",
50 &uccf->uf_regs->urfb, in_be32(&uccf->uf_regs->urfb));
51 printk(KERN_INFO "urfs : addr=0x%p, val=0x%04x\n",
52 &uccf->uf_regs->urfs, in_be16(&uccf->uf_regs->urfs));
53 printk(KERN_INFO "urfet : addr=0x%p, val=0x%04x\n",
54 &uccf->uf_regs->urfet, in_be16(&uccf->uf_regs->urfet));
55 printk(KERN_INFO "urfset: addr=0x%p, val=0x%04x\n",
56 &uccf->uf_regs->urfset, in_be16(&uccf->uf_regs->urfset));
57 printk(KERN_INFO "utfb : addr=0x%p, val=0x%08x\n",
58 &uccf->uf_regs->utfb, in_be32(&uccf->uf_regs->utfb));
59 printk(KERN_INFO "utfs : addr=0x%p, val=0x%04x\n",
60 &uccf->uf_regs->utfs, in_be16(&uccf->uf_regs->utfs));
61 printk(KERN_INFO "utfet : addr=0x%p, val=0x%04x\n",
62 &uccf->uf_regs->utfet, in_be16(&uccf->uf_regs->utfet));
63 printk(KERN_INFO "utftt : addr=0x%p, val=0x%04x\n",
64 &uccf->uf_regs->utftt, in_be16(&uccf->uf_regs->utftt));
65 printk(KERN_INFO "utpt : addr=0x%p, val=0x%04x\n",
66 &uccf->uf_regs->utpt, in_be16(&uccf->uf_regs->utpt));
67 printk(KERN_INFO "urtry : addr=0x%p, val=0x%08x\n",
68 &uccf->uf_regs->urtry, in_be32(&uccf->uf_regs->urtry));
69 printk(KERN_INFO "guemr : addr=0x%p, val=0x%02x\n",
70 &uccf->uf_regs->guemr, in_8(&uccf->uf_regs->guemr));
71}
72EXPORT_SYMBOL(ucc_fast_dump_regs);
73
74u32 ucc_fast_get_qe_cr_subblock(int uccf_num)
75{
76 switch (uccf_num) {
77 case 0: return QE_CR_SUBBLOCK_UCCFAST1;
78 case 1: return QE_CR_SUBBLOCK_UCCFAST2;
79 case 2: return QE_CR_SUBBLOCK_UCCFAST3;
80 case 3: return QE_CR_SUBBLOCK_UCCFAST4;
81 case 4: return QE_CR_SUBBLOCK_UCCFAST5;
82 case 5: return QE_CR_SUBBLOCK_UCCFAST6;
83 case 6: return QE_CR_SUBBLOCK_UCCFAST7;
84 case 7: return QE_CR_SUBBLOCK_UCCFAST8;
85 default: return QE_CR_SUBBLOCK_INVALID;
86 }
87}
88EXPORT_SYMBOL(ucc_fast_get_qe_cr_subblock);
89
90void ucc_fast_transmit_on_demand(struct ucc_fast_private * uccf)
91{
92 out_be16(&uccf->uf_regs->utodr, UCC_FAST_TOD);
93}
94EXPORT_SYMBOL(ucc_fast_transmit_on_demand);
95
96void ucc_fast_enable(struct ucc_fast_private * uccf, enum comm_dir mode)
97{
98 struct ucc_fast __iomem *uf_regs;
99 u32 gumr;
100
101 uf_regs = uccf->uf_regs;
102
103 /* Enable reception and/or transmission on this UCC. */
104 gumr = in_be32(&uf_regs->gumr);
105 if (mode & COMM_DIR_TX) {
106 gumr |= UCC_FAST_GUMR_ENT;
107 uccf->enabled_tx = 1;
108 }
109 if (mode & COMM_DIR_RX) {
110 gumr |= UCC_FAST_GUMR_ENR;
111 uccf->enabled_rx = 1;
112 }
113 out_be32(&uf_regs->gumr, gumr);
114}
115EXPORT_SYMBOL(ucc_fast_enable);
116
117void ucc_fast_disable(struct ucc_fast_private * uccf, enum comm_dir mode)
118{
119 struct ucc_fast __iomem *uf_regs;
120 u32 gumr;
121
122 uf_regs = uccf->uf_regs;
123
124 /* Disable reception and/or transmission on this UCC. */
125 gumr = in_be32(&uf_regs->gumr);
126 if (mode & COMM_DIR_TX) {
127 gumr &= ~UCC_FAST_GUMR_ENT;
128 uccf->enabled_tx = 0;
129 }
130 if (mode & COMM_DIR_RX) {
131 gumr &= ~UCC_FAST_GUMR_ENR;
132 uccf->enabled_rx = 0;
133 }
134 out_be32(&uf_regs->gumr, gumr);
135}
136EXPORT_SYMBOL(ucc_fast_disable);
137
138int ucc_fast_init(struct ucc_fast_info * uf_info, struct ucc_fast_private ** uccf_ret)
139{
140 struct ucc_fast_private *uccf;
141 struct ucc_fast __iomem *uf_regs;
142 u32 gumr;
143 int ret;
144
145 if (!uf_info)
146 return -EINVAL;
147
148 /* check if the UCC port number is in range. */
149 if ((uf_info->ucc_num < 0) || (uf_info->ucc_num > UCC_MAX_NUM - 1)) {
150 printk(KERN_ERR "%s: illegal UCC number\n", __func__);
151 return -EINVAL;
152 }
153
154 /* Check that 'max_rx_buf_length' is properly aligned (4). */
155 if (uf_info->max_rx_buf_length & (UCC_FAST_MRBLR_ALIGNMENT - 1)) {
156 printk(KERN_ERR "%s: max_rx_buf_length not aligned\n",
157 __func__);
158 return -EINVAL;
159 }
160
161 /* Validate Virtual Fifo register values */
162 if (uf_info->urfs < UCC_FAST_URFS_MIN_VAL) {
163 printk(KERN_ERR "%s: urfs is too small\n", __func__);
164 return -EINVAL;
165 }
166
167 if (uf_info->urfs & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) {
168 printk(KERN_ERR "%s: urfs is not aligned\n", __func__);
169 return -EINVAL;
170 }
171
172 if (uf_info->urfet & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) {
173 printk(KERN_ERR "%s: urfet is not aligned.\n", __func__);
174 return -EINVAL;
175 }
176
177 if (uf_info->urfset & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) {
178 printk(KERN_ERR "%s: urfset is not aligned\n", __func__);
179 return -EINVAL;
180 }
181
182 if (uf_info->utfs & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) {
183 printk(KERN_ERR "%s: utfs is not aligned\n", __func__);
184 return -EINVAL;
185 }
186
187 if (uf_info->utfet & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) {
188 printk(KERN_ERR "%s: utfet is not aligned\n", __func__);
189 return -EINVAL;
190 }
191
192 if (uf_info->utftt & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) {
193 printk(KERN_ERR "%s: utftt is not aligned\n", __func__);
194 return -EINVAL;
195 }
196
197 uccf = kzalloc(sizeof(struct ucc_fast_private), GFP_KERNEL);
198 if (!uccf) {
199 printk(KERN_ERR "%s: Cannot allocate private data\n",
200 __func__);
201 return -ENOMEM;
202 }
203
204 /* Fill fast UCC structure */
205 uccf->uf_info = uf_info;
206 /* Set the PHY base address */
207 uccf->uf_regs = ioremap(uf_info->regs, sizeof(struct ucc_fast));
208 if (uccf->uf_regs == NULL) {
209 printk(KERN_ERR "%s: Cannot map UCC registers\n", __func__);
210 kfree(uccf);
211 return -ENOMEM;
212 }
213
214 uccf->enabled_tx = 0;
215 uccf->enabled_rx = 0;
216 uccf->stopped_tx = 0;
217 uccf->stopped_rx = 0;
218 uf_regs = uccf->uf_regs;
219 uccf->p_ucce = &uf_regs->ucce;
220 uccf->p_uccm = &uf_regs->uccm;
221#ifdef CONFIG_UGETH_TX_ON_DEMAND
222 uccf->p_utodr = &uf_regs->utodr;
223#endif
224#ifdef STATISTICS
225 uccf->tx_frames = 0;
226 uccf->rx_frames = 0;
227 uccf->rx_discarded = 0;
228#endif /* STATISTICS */
229
230 /* Set UCC to fast type */
231 ret = ucc_set_type(uf_info->ucc_num, UCC_SPEED_TYPE_FAST);
232 if (ret) {
233 printk(KERN_ERR "%s: cannot set UCC type\n", __func__);
234 ucc_fast_free(uccf);
235 return ret;
236 }
237
238 uccf->mrblr = uf_info->max_rx_buf_length;
239
240 /* Set GUMR */
241 /* For more details see the hardware spec. */
242 gumr = uf_info->ttx_trx;
243 if (uf_info->tci)
244 gumr |= UCC_FAST_GUMR_TCI;
245 if (uf_info->cdp)
246 gumr |= UCC_FAST_GUMR_CDP;
247 if (uf_info->ctsp)
248 gumr |= UCC_FAST_GUMR_CTSP;
249 if (uf_info->cds)
250 gumr |= UCC_FAST_GUMR_CDS;
251 if (uf_info->ctss)
252 gumr |= UCC_FAST_GUMR_CTSS;
253 if (uf_info->txsy)
254 gumr |= UCC_FAST_GUMR_TXSY;
255 if (uf_info->rsyn)
256 gumr |= UCC_FAST_GUMR_RSYN;
257 gumr |= uf_info->synl;
258 if (uf_info->rtsm)
259 gumr |= UCC_FAST_GUMR_RTSM;
260 gumr |= uf_info->renc;
261 if (uf_info->revd)
262 gumr |= UCC_FAST_GUMR_REVD;
263 gumr |= uf_info->tenc;
264 gumr |= uf_info->tcrc;
265 gumr |= uf_info->mode;
266 out_be32(&uf_regs->gumr, gumr);
267
268 /* Allocate memory for Tx Virtual Fifo */
269 uccf->ucc_fast_tx_virtual_fifo_base_offset =
270 qe_muram_alloc(uf_info->utfs, UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT);
271 if (IS_ERR_VALUE(uccf->ucc_fast_tx_virtual_fifo_base_offset)) {
272 printk(KERN_ERR "%s: cannot allocate MURAM for TX FIFO\n",
273 __func__);
274 uccf->ucc_fast_tx_virtual_fifo_base_offset = 0;
275 ucc_fast_free(uccf);
276 return -ENOMEM;
277 }
278
279 /* Allocate memory for Rx Virtual Fifo */
280 uccf->ucc_fast_rx_virtual_fifo_base_offset =
281 qe_muram_alloc(uf_info->urfs +
282 UCC_FAST_RECEIVE_VIRTUAL_FIFO_SIZE_FUDGE_FACTOR,
283 UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT);
284 if (IS_ERR_VALUE(uccf->ucc_fast_rx_virtual_fifo_base_offset)) {
285 printk(KERN_ERR "%s: cannot allocate MURAM for RX FIFO\n",
286 __func__);
287 uccf->ucc_fast_rx_virtual_fifo_base_offset = 0;
288 ucc_fast_free(uccf);
289 return -ENOMEM;
290 }
291
292 /* Set Virtual Fifo registers */
293 out_be16(&uf_regs->urfs, uf_info->urfs);
294 out_be16(&uf_regs->urfet, uf_info->urfet);
295 out_be16(&uf_regs->urfset, uf_info->urfset);
296 out_be16(&uf_regs->utfs, uf_info->utfs);
297 out_be16(&uf_regs->utfet, uf_info->utfet);
298 out_be16(&uf_regs->utftt, uf_info->utftt);
299 /* utfb, urfb are offsets from MURAM base */
300 out_be32(&uf_regs->utfb, uccf->ucc_fast_tx_virtual_fifo_base_offset);
301 out_be32(&uf_regs->urfb, uccf->ucc_fast_rx_virtual_fifo_base_offset);
302
303 /* Mux clocking */
304 /* Grant Support */
305 ucc_set_qe_mux_grant(uf_info->ucc_num, uf_info->grant_support);
306 /* Breakpoint Support */
307 ucc_set_qe_mux_bkpt(uf_info->ucc_num, uf_info->brkpt_support);
308 /* Set Tsa or NMSI mode. */
309 ucc_set_qe_mux_tsa(uf_info->ucc_num, uf_info->tsa);
310 /* If NMSI (not Tsa), set Tx and Rx clock. */
311 if (!uf_info->tsa) {
312 /* Rx clock routing */
313 if ((uf_info->rx_clock != QE_CLK_NONE) &&
314 ucc_set_qe_mux_rxtx(uf_info->ucc_num, uf_info->rx_clock,
315 COMM_DIR_RX)) {
316 printk(KERN_ERR "%s: illegal value for RX clock\n",
317 __func__);
318 ucc_fast_free(uccf);
319 return -EINVAL;
320 }
321 /* Tx clock routing */
322 if ((uf_info->tx_clock != QE_CLK_NONE) &&
323 ucc_set_qe_mux_rxtx(uf_info->ucc_num, uf_info->tx_clock,
324 COMM_DIR_TX)) {
325 printk(KERN_ERR "%s: illegal value for TX clock\n",
326 __func__);
327 ucc_fast_free(uccf);
328 return -EINVAL;
329 }
330 }
331
332 /* Set interrupt mask register at UCC level. */
333 out_be32(&uf_regs->uccm, uf_info->uccm_mask);
334
335 /* First, clear anything pending at UCC level,
336 * otherwise, old garbage may come through
337 * as soon as the dam is opened. */
338
339 /* Writing '1' clears */
340 out_be32(&uf_regs->ucce, 0xffffffff);
341
342 *uccf_ret = uccf;
343 return 0;
344}
345EXPORT_SYMBOL(ucc_fast_init);
346
347void ucc_fast_free(struct ucc_fast_private * uccf)
348{
349 if (!uccf)
350 return;
351
352 if (uccf->ucc_fast_tx_virtual_fifo_base_offset)
353 qe_muram_free(uccf->ucc_fast_tx_virtual_fifo_base_offset);
354
355 if (uccf->ucc_fast_rx_virtual_fifo_base_offset)
356 qe_muram_free(uccf->ucc_fast_rx_virtual_fifo_base_offset);
357
358 if (uccf->uf_regs)
359 iounmap(uccf->uf_regs);
360
361 kfree(uccf);
362}
363EXPORT_SYMBOL(ucc_fast_free);
diff --git a/arch/powerpc/sysdev/qe_lib/ucc_slow.c b/arch/powerpc/sysdev/qe_lib/ucc_slow.c
deleted file mode 100644
index 5f91628209eb..000000000000
--- a/arch/powerpc/sysdev/qe_lib/ucc_slow.c
+++ /dev/null
@@ -1,374 +0,0 @@
1/*
2 * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved.
3 *
4 * Authors: Shlomi Gridish <gridish@freescale.com>
5 * Li Yang <leoli@freescale.com>
6 *
7 * Description:
8 * QE UCC Slow API Set - UCC Slow specific routines implementations.
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
14 */
15#include <linux/kernel.h>
16#include <linux/errno.h>
17#include <linux/slab.h>
18#include <linux/stddef.h>
19#include <linux/interrupt.h>
20#include <linux/err.h>
21#include <linux/export.h>
22
23#include <asm/io.h>
24#include <asm/immap_qe.h>
25#include <asm/qe.h>
26
27#include <asm/ucc.h>
28#include <asm/ucc_slow.h>
29
30u32 ucc_slow_get_qe_cr_subblock(int uccs_num)
31{
32 switch (uccs_num) {
33 case 0: return QE_CR_SUBBLOCK_UCCSLOW1;
34 case 1: return QE_CR_SUBBLOCK_UCCSLOW2;
35 case 2: return QE_CR_SUBBLOCK_UCCSLOW3;
36 case 3: return QE_CR_SUBBLOCK_UCCSLOW4;
37 case 4: return QE_CR_SUBBLOCK_UCCSLOW5;
38 case 5: return QE_CR_SUBBLOCK_UCCSLOW6;
39 case 6: return QE_CR_SUBBLOCK_UCCSLOW7;
40 case 7: return QE_CR_SUBBLOCK_UCCSLOW8;
41 default: return QE_CR_SUBBLOCK_INVALID;
42 }
43}
44EXPORT_SYMBOL(ucc_slow_get_qe_cr_subblock);
45
46void ucc_slow_graceful_stop_tx(struct ucc_slow_private * uccs)
47{
48 struct ucc_slow_info *us_info = uccs->us_info;
49 u32 id;
50
51 id = ucc_slow_get_qe_cr_subblock(us_info->ucc_num);
52 qe_issue_cmd(QE_GRACEFUL_STOP_TX, id,
53 QE_CR_PROTOCOL_UNSPECIFIED, 0);
54}
55EXPORT_SYMBOL(ucc_slow_graceful_stop_tx);
56
57void ucc_slow_stop_tx(struct ucc_slow_private * uccs)
58{
59 struct ucc_slow_info *us_info = uccs->us_info;
60 u32 id;
61
62 id = ucc_slow_get_qe_cr_subblock(us_info->ucc_num);
63 qe_issue_cmd(QE_STOP_TX, id, QE_CR_PROTOCOL_UNSPECIFIED, 0);
64}
65EXPORT_SYMBOL(ucc_slow_stop_tx);
66
67void ucc_slow_restart_tx(struct ucc_slow_private * uccs)
68{
69 struct ucc_slow_info *us_info = uccs->us_info;
70 u32 id;
71
72 id = ucc_slow_get_qe_cr_subblock(us_info->ucc_num);
73 qe_issue_cmd(QE_RESTART_TX, id, QE_CR_PROTOCOL_UNSPECIFIED, 0);
74}
75EXPORT_SYMBOL(ucc_slow_restart_tx);
76
77void ucc_slow_enable(struct ucc_slow_private * uccs, enum comm_dir mode)
78{
79 struct ucc_slow *us_regs;
80 u32 gumr_l;
81
82 us_regs = uccs->us_regs;
83
84 /* Enable reception and/or transmission on this UCC. */
85 gumr_l = in_be32(&us_regs->gumr_l);
86 if (mode & COMM_DIR_TX) {
87 gumr_l |= UCC_SLOW_GUMR_L_ENT;
88 uccs->enabled_tx = 1;
89 }
90 if (mode & COMM_DIR_RX) {
91 gumr_l |= UCC_SLOW_GUMR_L_ENR;
92 uccs->enabled_rx = 1;
93 }
94 out_be32(&us_regs->gumr_l, gumr_l);
95}
96EXPORT_SYMBOL(ucc_slow_enable);
97
98void ucc_slow_disable(struct ucc_slow_private * uccs, enum comm_dir mode)
99{
100 struct ucc_slow *us_regs;
101 u32 gumr_l;
102
103 us_regs = uccs->us_regs;
104
105 /* Disable reception and/or transmission on this UCC. */
106 gumr_l = in_be32(&us_regs->gumr_l);
107 if (mode & COMM_DIR_TX) {
108 gumr_l &= ~UCC_SLOW_GUMR_L_ENT;
109 uccs->enabled_tx = 0;
110 }
111 if (mode & COMM_DIR_RX) {
112 gumr_l &= ~UCC_SLOW_GUMR_L_ENR;
113 uccs->enabled_rx = 0;
114 }
115 out_be32(&us_regs->gumr_l, gumr_l);
116}
117EXPORT_SYMBOL(ucc_slow_disable);
118
119/* Initialize the UCC for Slow operations
120 *
121 * The caller should initialize the following us_info
122 */
123int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** uccs_ret)
124{
125 struct ucc_slow_private *uccs;
126 u32 i;
127 struct ucc_slow __iomem *us_regs;
128 u32 gumr;
129 struct qe_bd *bd;
130 u32 id;
131 u32 command;
132 int ret = 0;
133
134 if (!us_info)
135 return -EINVAL;
136
137 /* check if the UCC port number is in range. */
138 if ((us_info->ucc_num < 0) || (us_info->ucc_num > UCC_MAX_NUM - 1)) {
139 printk(KERN_ERR "%s: illegal UCC number\n", __func__);
140 return -EINVAL;
141 }
142
143 /*
144 * Set mrblr
145 * Check that 'max_rx_buf_length' is properly aligned (4), unless
146 * rfw is 1, meaning that QE accepts one byte at a time, unlike normal
147 * case when QE accepts 32 bits at a time.
148 */
149 if ((!us_info->rfw) &&
150 (us_info->max_rx_buf_length & (UCC_SLOW_MRBLR_ALIGNMENT - 1))) {
151 printk(KERN_ERR "max_rx_buf_length not aligned.\n");
152 return -EINVAL;
153 }
154
155 uccs = kzalloc(sizeof(struct ucc_slow_private), GFP_KERNEL);
156 if (!uccs) {
157 printk(KERN_ERR "%s: Cannot allocate private data\n",
158 __func__);
159 return -ENOMEM;
160 }
161
162 /* Fill slow UCC structure */
163 uccs->us_info = us_info;
164 /* Set the PHY base address */
165 uccs->us_regs = ioremap(us_info->regs, sizeof(struct ucc_slow));
166 if (uccs->us_regs == NULL) {
167 printk(KERN_ERR "%s: Cannot map UCC registers\n", __func__);
168 kfree(uccs);
169 return -ENOMEM;
170 }
171
172 uccs->saved_uccm = 0;
173 uccs->p_rx_frame = 0;
174 us_regs = uccs->us_regs;
175 uccs->p_ucce = (u16 *) & (us_regs->ucce);
176 uccs->p_uccm = (u16 *) & (us_regs->uccm);
177#ifdef STATISTICS
178 uccs->rx_frames = 0;
179 uccs->tx_frames = 0;
180 uccs->rx_discarded = 0;
181#endif /* STATISTICS */
182
183 /* Get PRAM base */
184 uccs->us_pram_offset =
185 qe_muram_alloc(UCC_SLOW_PRAM_SIZE, ALIGNMENT_OF_UCC_SLOW_PRAM);
186 if (IS_ERR_VALUE(uccs->us_pram_offset)) {
187 printk(KERN_ERR "%s: cannot allocate MURAM for PRAM", __func__);
188 ucc_slow_free(uccs);
189 return -ENOMEM;
190 }
191 id = ucc_slow_get_qe_cr_subblock(us_info->ucc_num);
192 qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, id, us_info->protocol,
193 uccs->us_pram_offset);
194
195 uccs->us_pram = qe_muram_addr(uccs->us_pram_offset);
196
197 /* Set UCC to slow type */
198 ret = ucc_set_type(us_info->ucc_num, UCC_SPEED_TYPE_SLOW);
199 if (ret) {
200 printk(KERN_ERR "%s: cannot set UCC type", __func__);
201 ucc_slow_free(uccs);
202 return ret;
203 }
204
205 out_be16(&uccs->us_pram->mrblr, us_info->max_rx_buf_length);
206
207 INIT_LIST_HEAD(&uccs->confQ);
208
209 /* Allocate BDs. */
210 uccs->rx_base_offset =
211 qe_muram_alloc(us_info->rx_bd_ring_len * sizeof(struct qe_bd),
212 QE_ALIGNMENT_OF_BD);
213 if (IS_ERR_VALUE(uccs->rx_base_offset)) {
214 printk(KERN_ERR "%s: cannot allocate %u RX BDs\n", __func__,
215 us_info->rx_bd_ring_len);
216 uccs->rx_base_offset = 0;
217 ucc_slow_free(uccs);
218 return -ENOMEM;
219 }
220
221 uccs->tx_base_offset =
222 qe_muram_alloc(us_info->tx_bd_ring_len * sizeof(struct qe_bd),
223 QE_ALIGNMENT_OF_BD);
224 if (IS_ERR_VALUE(uccs->tx_base_offset)) {
225 printk(KERN_ERR "%s: cannot allocate TX BDs", __func__);
226 uccs->tx_base_offset = 0;
227 ucc_slow_free(uccs);
228 return -ENOMEM;
229 }
230
231 /* Init Tx bds */
232 bd = uccs->confBd = uccs->tx_bd = qe_muram_addr(uccs->tx_base_offset);
233 for (i = 0; i < us_info->tx_bd_ring_len - 1; i++) {
234 /* clear bd buffer */
235 out_be32(&bd->buf, 0);
236 /* set bd status and length */
237 out_be32((u32 *) bd, 0);
238 bd++;
239 }
240 /* for last BD set Wrap bit */
241 out_be32(&bd->buf, 0);
242 out_be32((u32 *) bd, cpu_to_be32(T_W));
243
244 /* Init Rx bds */
245 bd = uccs->rx_bd = qe_muram_addr(uccs->rx_base_offset);
246 for (i = 0; i < us_info->rx_bd_ring_len - 1; i++) {
247 /* set bd status and length */
248 out_be32((u32*)bd, 0);
249 /* clear bd buffer */
250 out_be32(&bd->buf, 0);
251 bd++;
252 }
253 /* for last BD set Wrap bit */
254 out_be32((u32*)bd, cpu_to_be32(R_W));
255 out_be32(&bd->buf, 0);
256
257 /* Set GUMR (For more details see the hardware spec.). */
258 /* gumr_h */
259 gumr = us_info->tcrc;
260 if (us_info->cdp)
261 gumr |= UCC_SLOW_GUMR_H_CDP;
262 if (us_info->ctsp)
263 gumr |= UCC_SLOW_GUMR_H_CTSP;
264 if (us_info->cds)
265 gumr |= UCC_SLOW_GUMR_H_CDS;
266 if (us_info->ctss)
267 gumr |= UCC_SLOW_GUMR_H_CTSS;
268 if (us_info->tfl)
269 gumr |= UCC_SLOW_GUMR_H_TFL;
270 if (us_info->rfw)
271 gumr |= UCC_SLOW_GUMR_H_RFW;
272 if (us_info->txsy)
273 gumr |= UCC_SLOW_GUMR_H_TXSY;
274 if (us_info->rtsm)
275 gumr |= UCC_SLOW_GUMR_H_RTSM;
276 out_be32(&us_regs->gumr_h, gumr);
277
278 /* gumr_l */
279 gumr = us_info->tdcr | us_info->rdcr | us_info->tenc | us_info->renc |
280 us_info->diag | us_info->mode;
281 if (us_info->tci)
282 gumr |= UCC_SLOW_GUMR_L_TCI;
283 if (us_info->rinv)
284 gumr |= UCC_SLOW_GUMR_L_RINV;
285 if (us_info->tinv)
286 gumr |= UCC_SLOW_GUMR_L_TINV;
287 if (us_info->tend)
288 gumr |= UCC_SLOW_GUMR_L_TEND;
289 out_be32(&us_regs->gumr_l, gumr);
290
291 /* Function code registers */
292
293 /* if the data is in cachable memory, the 'global' */
294 /* in the function code should be set. */
295 uccs->us_pram->tbmr = UCC_BMR_BO_BE;
296 uccs->us_pram->rbmr = UCC_BMR_BO_BE;
297
298 /* rbase, tbase are offsets from MURAM base */
299 out_be16(&uccs->us_pram->rbase, uccs->rx_base_offset);
300 out_be16(&uccs->us_pram->tbase, uccs->tx_base_offset);
301
302 /* Mux clocking */
303 /* Grant Support */
304 ucc_set_qe_mux_grant(us_info->ucc_num, us_info->grant_support);
305 /* Breakpoint Support */
306 ucc_set_qe_mux_bkpt(us_info->ucc_num, us_info->brkpt_support);
307 /* Set Tsa or NMSI mode. */
308 ucc_set_qe_mux_tsa(us_info->ucc_num, us_info->tsa);
309 /* If NMSI (not Tsa), set Tx and Rx clock. */
310 if (!us_info->tsa) {
311 /* Rx clock routing */
312 if (ucc_set_qe_mux_rxtx(us_info->ucc_num, us_info->rx_clock,
313 COMM_DIR_RX)) {
314 printk(KERN_ERR "%s: illegal value for RX clock\n",
315 __func__);
316 ucc_slow_free(uccs);
317 return -EINVAL;
318 }
319 /* Tx clock routing */
320 if (ucc_set_qe_mux_rxtx(us_info->ucc_num, us_info->tx_clock,
321 COMM_DIR_TX)) {
322 printk(KERN_ERR "%s: illegal value for TX clock\n",
323 __func__);
324 ucc_slow_free(uccs);
325 return -EINVAL;
326 }
327 }
328
329 /* Set interrupt mask register at UCC level. */
330 out_be16(&us_regs->uccm, us_info->uccm_mask);
331
332 /* First, clear anything pending at UCC level,
333 * otherwise, old garbage may come through
334 * as soon as the dam is opened. */
335
336 /* Writing '1' clears */
337 out_be16(&us_regs->ucce, 0xffff);
338
339 /* Issue QE Init command */
340 if (us_info->init_tx && us_info->init_rx)
341 command = QE_INIT_TX_RX;
342 else if (us_info->init_tx)
343 command = QE_INIT_TX;
344 else
345 command = QE_INIT_RX; /* We know at least one is TRUE */
346
347 qe_issue_cmd(command, id, us_info->protocol, 0);
348
349 *uccs_ret = uccs;
350 return 0;
351}
352EXPORT_SYMBOL(ucc_slow_init);
353
354void ucc_slow_free(struct ucc_slow_private * uccs)
355{
356 if (!uccs)
357 return;
358
359 if (uccs->rx_base_offset)
360 qe_muram_free(uccs->rx_base_offset);
361
362 if (uccs->tx_base_offset)
363 qe_muram_free(uccs->tx_base_offset);
364
365 if (uccs->us_pram)
366 qe_muram_free(uccs->us_pram_offset);
367
368 if (uccs->us_regs)
369 iounmap(uccs->us_regs);
370
371 kfree(uccs);
372}
373EXPORT_SYMBOL(ucc_slow_free);
374
diff --git a/arch/powerpc/sysdev/qe_lib/usb.c b/arch/powerpc/sysdev/qe_lib/usb.c
deleted file mode 100644
index 27f23bd15eb6..000000000000
--- a/arch/powerpc/sysdev/qe_lib/usb.c
+++ /dev/null
@@ -1,56 +0,0 @@
1/*
2 * QE USB routines
3 *
4 * Copyright 2006 Freescale Semiconductor, Inc.
5 * Shlomi Gridish <gridish@freescale.com>
6 * Jerry Huang <Chang-Ming.Huang@freescale.com>
7 * Copyright (c) MontaVista Software, Inc. 2008.
8 * Anton Vorontsov <avorontsov@ru.mvista.com>
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
14 */
15
16#include <linux/kernel.h>
17#include <linux/errno.h>
18#include <linux/export.h>
19#include <linux/io.h>
20#include <asm/immap_qe.h>
21#include <asm/qe.h>
22
23int qe_usb_clock_set(enum qe_clock clk, int rate)
24{
25 struct qe_mux __iomem *mux = &qe_immr->qmx;
26 unsigned long flags;
27 u32 val;
28
29 switch (clk) {
30 case QE_CLK3: val = QE_CMXGCR_USBCS_CLK3; break;
31 case QE_CLK5: val = QE_CMXGCR_USBCS_CLK5; break;
32 case QE_CLK7: val = QE_CMXGCR_USBCS_CLK7; break;
33 case QE_CLK9: val = QE_CMXGCR_USBCS_CLK9; break;
34 case QE_CLK13: val = QE_CMXGCR_USBCS_CLK13; break;
35 case QE_CLK17: val = QE_CMXGCR_USBCS_CLK17; break;
36 case QE_CLK19: val = QE_CMXGCR_USBCS_CLK19; break;
37 case QE_CLK21: val = QE_CMXGCR_USBCS_CLK21; break;
38 case QE_BRG9: val = QE_CMXGCR_USBCS_BRG9; break;
39 case QE_BRG10: val = QE_CMXGCR_USBCS_BRG10; break;
40 default:
41 pr_err("%s: requested unknown clock %d\n", __func__, clk);
42 return -EINVAL;
43 }
44
45 if (qe_clock_is_brg(clk))
46 qe_setbrg(clk, rate, 1);
47
48 spin_lock_irqsave(&cmxgcr_lock, flags);
49
50 clrsetbits_be32(&mux->cmxgcr, QE_CMXGCR_USBCS, val);
51
52 spin_unlock_irqrestore(&cmxgcr_lock, flags);
53
54 return 0;
55}
56EXPORT_SYMBOL(qe_usb_clock_set);