aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-02-27 19:19:22 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2010-02-27 19:19:22 -0500
commitf1dd6ad599732fc89f36fdd65a2c2cf3c63a8711 (patch)
tree5092207128e47cba99dc0fe373fff6a36f4cb4b8 /drivers
parent8d37a371b6869920e6c40c495c68eabba1ef3909 (diff)
parente10b234b3c4e255d3300a486c4ac15b43253ac6d (diff)
Merge branch 'upstream' of git://ftp.linux-mips.org/pub/scm/upstream-linus
* 'upstream' of git://ftp.linux-mips.org/pub/scm/upstream-linus: (141 commits) MIPS: Alchemy: defconfig updates MIPS: Alchemy: Fix Au1100 ethernet build failure MIPS: Alchemy: Repair db1500/bosporus builds MIPS: ARC: Cleanup unused definitions from sgialib.h MIPS: Cobalt: convert legacy port addresses to GT-64111 bus addresses MIPS: Alchemy: use 36bit addresses for PCMCIA resources. MIPS: Cobalt: Fix theoretical port aliasing issue MIPS: Use ALIGN(x, bytes) instead of __ALIGN_MASK(x, bytes - 1) MIPS: Crazy spinlock speed test. MIPS: Optimize spinlocks. MIPS: Alchemy: devboard PM needs to save CPLD registers. MIPS: PowerTV: Eliminate duplicate opcode definition macros MIPS: Lemote 2F: Move printks out of port_access_lock. MIPS: PNX833x: Convert IRQ controller locks to raw spinlocks. MIPS: Octeon: Replace spinlock with raw_spinlocks in dma-octeon.c. MIPS: Octeon: Replace rwlocks in irq_chip handlers with raw_spinlocks. MIPS: Octeon: Convert octeon_irq_msi_lock to raw spinlock. MIPS: Loongson: Remove pointless sample_lock from oprofile code. MIPS: SNI: Convert sni_rm200_i8259A_lock to raw spinlock. MIPS: i8259: Convert IRQ controller lock to raw spinlock. ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/i2c/busses/Kconfig10
-rw-r--r--drivers/i2c/busses/Makefile1
-rw-r--r--drivers/i2c/busses/i2c-octeon.c651
-rw-r--r--drivers/ide/au1xxx-ide.c21
-rw-r--r--drivers/mmc/host/au1xmmc.c12
-rw-r--r--drivers/mtd/maps/Kconfig6
-rw-r--r--drivers/mtd/maps/Makefile1
-rw-r--r--drivers/mtd/maps/alchemy-flash.c166
-rw-r--r--drivers/mtd/nand/au1550nd.c4
-rw-r--r--drivers/net/au1000_eth.c441
-rw-r--r--drivers/net/au1000_eth.h9
-rw-r--r--drivers/net/cpmac.c10
-rw-r--r--drivers/net/irda/au1k_ir.c14
-rw-r--r--drivers/pcmcia/Kconfig21
-rw-r--r--drivers/pcmcia/Makefile12
-rw-r--r--drivers/pcmcia/au1000_db1x00.c305
-rw-r--r--drivers/pcmcia/au1000_generic.c10
-rw-r--r--drivers/pcmcia/au1000_generic.h18
-rw-r--r--drivers/pcmcia/au1000_pb1x00.c119
-rw-r--r--drivers/pcmcia/au1000_xxs1500.c188
-rw-r--r--drivers/pcmcia/db1xxx_ss.c623
-rw-r--r--drivers/pcmcia/xxs1500_ss.c350
-rw-r--r--drivers/serial/8250.c15
-rw-r--r--drivers/spi/au1550_spi.c6
-rw-r--r--drivers/staging/octeon/Makefile1
-rw-r--r--drivers/staging/octeon/ethernet-defines.h34
-rw-r--r--drivers/staging/octeon/ethernet-mdio.c6
-rw-r--r--drivers/staging/octeon/ethernet-mdio.h1
-rw-r--r--drivers/staging/octeon/ethernet-mem.c124
-rw-r--r--drivers/staging/octeon/ethernet-proc.c144
-rw-r--r--drivers/staging/octeon/ethernet-proc.h29
-rw-r--r--drivers/staging/octeon/ethernet-rgmii.c56
-rw-r--r--drivers/staging/octeon/ethernet-rx.c384
-rw-r--r--drivers/staging/octeon/ethernet-rx.h25
-rw-r--r--drivers/staging/octeon/ethernet-sgmii.c1
-rw-r--r--drivers/staging/octeon/ethernet-spi.c1
-rw-r--r--drivers/staging/octeon/ethernet-tx.c441
-rw-r--r--drivers/staging/octeon/ethernet-tx.h29
-rw-r--r--drivers/staging/octeon/ethernet-util.h13
-rw-r--r--drivers/staging/octeon/ethernet-xaui.c1
-rw-r--r--drivers/staging/octeon/ethernet.c254
-rw-r--r--drivers/staging/octeon/octeon-ethernet.h58
-rw-r--r--drivers/staging/sm7xx/smtc2d.c2
-rw-r--r--drivers/staging/sm7xx/smtc2d.h2
-rw-r--r--drivers/staging/sm7xx/smtcfb.c2
-rw-r--r--drivers/staging/sm7xx/smtcfb.h2
-rw-r--r--drivers/watchdog/ar7_wdt.c18
47 files changed, 2682 insertions, 1959 deletions
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 5f318ce29770..737f05200b1d 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -564,6 +564,16 @@ config I2C_VERSATILE
564 This driver can also be built as a module. If so, the module 564 This driver can also be built as a module. If so, the module
565 will be called i2c-versatile. 565 will be called i2c-versatile.
566 566
567config I2C_OCTEON
568 tristate "Cavium OCTEON I2C bus support"
569 depends on CPU_CAVIUM_OCTEON
570 help
571 Say yes if you want to support the I2C serial bus on Cavium
572 OCTEON SOC.
573
574 This driver can also be built as a module. If so, the module
575 will be called i2c-octeon.
576
567comment "External I2C/SMBus adapter drivers" 577comment "External I2C/SMBus adapter drivers"
568 578
569config I2C_PARPORT 579config I2C_PARPORT
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index 302c551977bb..c2c4ea1908d8 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -54,6 +54,7 @@ obj-$(CONFIG_I2C_SH_MOBILE) += i2c-sh_mobile.o
54obj-$(CONFIG_I2C_SIMTEC) += i2c-simtec.o 54obj-$(CONFIG_I2C_SIMTEC) += i2c-simtec.o
55obj-$(CONFIG_I2C_STU300) += i2c-stu300.o 55obj-$(CONFIG_I2C_STU300) += i2c-stu300.o
56obj-$(CONFIG_I2C_VERSATILE) += i2c-versatile.o 56obj-$(CONFIG_I2C_VERSATILE) += i2c-versatile.o
57obj-$(CONFIG_I2C_OCTEON) += i2c-octeon.o
57 58
58# External I2C/SMBus adapter drivers 59# External I2C/SMBus adapter drivers
59obj-$(CONFIG_I2C_PARPORT) += i2c-parport.o 60obj-$(CONFIG_I2C_PARPORT) += i2c-parport.o
diff --git a/drivers/i2c/busses/i2c-octeon.c b/drivers/i2c/busses/i2c-octeon.c
new file mode 100644
index 000000000000..60375504fa49
--- /dev/null
+++ b/drivers/i2c/busses/i2c-octeon.c
@@ -0,0 +1,651 @@
1/*
2 * (C) Copyright 2009-2010
3 * Nokia Siemens Networks, michael.lawnick.ext@nsn.com
4 *
5 * Portions Copyright (C) 2010 Cavium Networks, Inc.
6 *
7 * This is a driver for the i2c adapter in Cavium Networks' OCTEON processors.
8 *
9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any
11 * warranty of any kind, whether express or implied.
12 */
13
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/sched.h>
17#include <linux/init.h>
18
19#include <linux/io.h>
20#include <linux/i2c.h>
21#include <linux/interrupt.h>
22#include <linux/delay.h>
23#include <linux/platform_device.h>
24
25#include <asm/octeon/octeon.h>
26
27#define DRV_NAME "i2c-octeon"
28
29/* The previous out-of-tree version was implicitly version 1.0. */
30#define DRV_VERSION "2.0"
31
32/* register offsets */
33#define SW_TWSI 0x00
34#define TWSI_INT 0x10
35
36/* Controller command patterns */
37#define SW_TWSI_V 0x8000000000000000ull
38#define SW_TWSI_EOP_TWSI_DATA 0x0C00000100000000ull
39#define SW_TWSI_EOP_TWSI_CTL 0x0C00000200000000ull
40#define SW_TWSI_EOP_TWSI_CLKCTL 0x0C00000300000000ull
41#define SW_TWSI_EOP_TWSI_STAT 0x0C00000300000000ull
42#define SW_TWSI_EOP_TWSI_RST 0x0C00000700000000ull
43#define SW_TWSI_OP_TWSI_CLK 0x0800000000000000ull
44#define SW_TWSI_R 0x0100000000000000ull
45
46/* Controller command and status bits */
47#define TWSI_CTL_CE 0x80
48#define TWSI_CTL_ENAB 0x40
49#define TWSI_CTL_STA 0x20
50#define TWSI_CTL_STP 0x10
51#define TWSI_CTL_IFLG 0x08
52#define TWSI_CTL_AAK 0x04
53
54/* Some status values */
55#define STAT_START 0x08
56#define STAT_RSTART 0x10
57#define STAT_TXADDR_ACK 0x18
58#define STAT_TXDATA_ACK 0x28
59#define STAT_RXADDR_ACK 0x40
60#define STAT_RXDATA_ACK 0x50
61#define STAT_IDLE 0xF8
62
63struct octeon_i2c {
64 wait_queue_head_t queue;
65 struct i2c_adapter adap;
66 int irq;
67 int twsi_freq;
68 int sys_freq;
69 resource_size_t twsi_phys;
70 void __iomem *twsi_base;
71 resource_size_t regsize;
72 struct device *dev;
73};
74
75/**
76 * octeon_i2c_write_sw - write an I2C core register.
77 * @i2c: The struct octeon_i2c.
78 * @eop_reg: Register selector.
79 * @data: Value to be written.
80 *
81 * The I2C core registers are accessed indirectly via the SW_TWSI CSR.
82 */
83static void octeon_i2c_write_sw(struct octeon_i2c *i2c,
84 u64 eop_reg,
85 u8 data)
86{
87 u64 tmp;
88
89 __raw_writeq(SW_TWSI_V | eop_reg | data, i2c->twsi_base + SW_TWSI);
90 do {
91 tmp = __raw_readq(i2c->twsi_base + SW_TWSI);
92 } while ((tmp & SW_TWSI_V) != 0);
93}
94
95/**
96 * octeon_i2c_read_sw - write an I2C core register.
97 * @i2c: The struct octeon_i2c.
98 * @eop_reg: Register selector.
99 *
100 * Returns the data.
101 *
102 * The I2C core registers are accessed indirectly via the SW_TWSI CSR.
103 */
104static u8 octeon_i2c_read_sw(struct octeon_i2c *i2c, u64 eop_reg)
105{
106 u64 tmp;
107
108 __raw_writeq(SW_TWSI_V | eop_reg | SW_TWSI_R, i2c->twsi_base + SW_TWSI);
109 do {
110 tmp = __raw_readq(i2c->twsi_base + SW_TWSI);
111 } while ((tmp & SW_TWSI_V) != 0);
112
113 return tmp & 0xFF;
114}
115
116/**
117 * octeon_i2c_write_int - write the TWSI_INT register
118 * @i2c: The struct octeon_i2c.
119 * @data: Value to be written.
120 */
121static void octeon_i2c_write_int(struct octeon_i2c *i2c, u64 data)
122{
123 u64 tmp;
124
125 __raw_writeq(data, i2c->twsi_base + TWSI_INT);
126 tmp = __raw_readq(i2c->twsi_base + TWSI_INT);
127}
128
129/**
130 * octeon_i2c_int_enable - enable the TS interrupt.
131 * @i2c: The struct octeon_i2c.
132 *
133 * The interrupt will be asserted when there is non-STAT_IDLE state in
134 * the SW_TWSI_EOP_TWSI_STAT register.
135 */
136static void octeon_i2c_int_enable(struct octeon_i2c *i2c)
137{
138 octeon_i2c_write_int(i2c, 0x40);
139}
140
141/**
142 * octeon_i2c_int_disable - disable the TS interrupt.
143 * @i2c: The struct octeon_i2c.
144 */
145static void octeon_i2c_int_disable(struct octeon_i2c *i2c)
146{
147 octeon_i2c_write_int(i2c, 0);
148}
149
150/**
151 * octeon_i2c_unblock - unblock the bus.
152 * @i2c: The struct octeon_i2c.
153 *
154 * If there was a reset while a device was driving 0 to bus,
155 * bus is blocked. We toggle it free manually by some clock
156 * cycles and send a stop.
157 */
158static void octeon_i2c_unblock(struct octeon_i2c *i2c)
159{
160 int i;
161
162 dev_dbg(i2c->dev, "%s\n", __func__);
163 for (i = 0; i < 9; i++) {
164 octeon_i2c_write_int(i2c, 0x0);
165 udelay(5);
166 octeon_i2c_write_int(i2c, 0x200);
167 udelay(5);
168 }
169 octeon_i2c_write_int(i2c, 0x300);
170 udelay(5);
171 octeon_i2c_write_int(i2c, 0x100);
172 udelay(5);
173 octeon_i2c_write_int(i2c, 0x0);
174}
175
176/**
177 * octeon_i2c_isr - the interrupt service routine.
178 * @int: The irq, unused.
179 * @dev_id: Our struct octeon_i2c.
180 */
181static irqreturn_t octeon_i2c_isr(int irq, void *dev_id)
182{
183 struct octeon_i2c *i2c = dev_id;
184
185 octeon_i2c_int_disable(i2c);
186 wake_up_interruptible(&i2c->queue);
187
188 return IRQ_HANDLED;
189}
190
191
192static int octeon_i2c_test_iflg(struct octeon_i2c *i2c)
193{
194 return (octeon_i2c_read_sw(i2c, SW_TWSI_EOP_TWSI_CTL) & TWSI_CTL_IFLG) != 0;
195}
196
197/**
198 * octeon_i2c_wait - wait for the IFLG to be set.
199 * @i2c: The struct octeon_i2c.
200 *
201 * Returns 0 on success, otherwise a negative errno.
202 */
203static int octeon_i2c_wait(struct octeon_i2c *i2c)
204{
205 int result;
206
207 octeon_i2c_int_enable(i2c);
208
209 result = wait_event_interruptible_timeout(i2c->queue,
210 octeon_i2c_test_iflg(i2c),
211 i2c->adap.timeout);
212
213 octeon_i2c_int_disable(i2c);
214
215 if (result < 0) {
216 dev_dbg(i2c->dev, "%s: wait interrupted\n", __func__);
217 return result;
218 } else if (result == 0) {
219 dev_dbg(i2c->dev, "%s: timeout\n", __func__);
220 result = -ETIMEDOUT;
221 }
222
223 return 0;
224}
225
226/**
227 * octeon_i2c_start - send START to the bus.
228 * @i2c: The struct octeon_i2c.
229 *
230 * Returns 0 on success, otherwise a negative errno.
231 */
232static int octeon_i2c_start(struct octeon_i2c *i2c)
233{
234 u8 data;
235 int result;
236
237 octeon_i2c_write_sw(i2c, SW_TWSI_EOP_TWSI_CTL,
238 TWSI_CTL_ENAB | TWSI_CTL_STA);
239
240 result = octeon_i2c_wait(i2c);
241 if (result) {
242 if (octeon_i2c_read_sw(i2c, SW_TWSI_EOP_TWSI_STAT) == STAT_IDLE) {
243 /*
244 * Controller refused to send start flag May
245 * be a client is holding SDA low - let's try
246 * to free it.
247 */
248 octeon_i2c_unblock(i2c);
249 octeon_i2c_write_sw(i2c, SW_TWSI_EOP_TWSI_CTL,
250 TWSI_CTL_ENAB | TWSI_CTL_STA);
251
252 result = octeon_i2c_wait(i2c);
253 }
254 if (result)
255 return result;
256 }
257
258 data = octeon_i2c_read_sw(i2c, SW_TWSI_EOP_TWSI_STAT);
259 if ((data != STAT_START) && (data != STAT_RSTART)) {
260 dev_err(i2c->dev, "%s: bad status (0x%x)\n", __func__, data);
261 return -EIO;
262 }
263
264 return 0;
265}
266
267/**
268 * octeon_i2c_stop - send STOP to the bus.
269 * @i2c: The struct octeon_i2c.
270 *
271 * Returns 0 on success, otherwise a negative errno.
272 */
273static int octeon_i2c_stop(struct octeon_i2c *i2c)
274{
275 u8 data;
276
277 octeon_i2c_write_sw(i2c, SW_TWSI_EOP_TWSI_CTL,
278 TWSI_CTL_ENAB | TWSI_CTL_STP);
279
280 data = octeon_i2c_read_sw(i2c, SW_TWSI_EOP_TWSI_STAT);
281
282 if (data != STAT_IDLE) {
283 dev_err(i2c->dev, "%s: bad status(0x%x)\n", __func__, data);
284 return -EIO;
285 }
286 return 0;
287}
288
289/**
290 * octeon_i2c_write - send data to the bus.
291 * @i2c: The struct octeon_i2c.
292 * @target: Target address.
293 * @data: Pointer to the data to be sent.
294 * @length: Length of the data.
295 *
296 * The address is sent over the bus, then the data.
297 *
298 * Returns 0 on success, otherwise a negative errno.
299 */
300static int octeon_i2c_write(struct octeon_i2c *i2c, int target,
301 const u8 *data, int length)
302{
303 int i, result;
304 u8 tmp;
305
306 result = octeon_i2c_start(i2c);
307 if (result)
308 return result;
309
310 octeon_i2c_write_sw(i2c, SW_TWSI_EOP_TWSI_DATA, target << 1);
311 octeon_i2c_write_sw(i2c, SW_TWSI_EOP_TWSI_CTL, TWSI_CTL_ENAB);
312
313 result = octeon_i2c_wait(i2c);
314 if (result)
315 return result;
316
317 for (i = 0; i < length; i++) {
318 tmp = octeon_i2c_read_sw(i2c, SW_TWSI_EOP_TWSI_STAT);
319 if ((tmp != STAT_TXADDR_ACK) && (tmp != STAT_TXDATA_ACK)) {
320 dev_err(i2c->dev,
321 "%s: bad status before write (0x%x)\n",
322 __func__, tmp);
323 return -EIO;
324 }
325
326 octeon_i2c_write_sw(i2c, SW_TWSI_EOP_TWSI_DATA, data[i]);
327 octeon_i2c_write_sw(i2c, SW_TWSI_EOP_TWSI_CTL, TWSI_CTL_ENAB);
328
329 result = octeon_i2c_wait(i2c);
330 if (result)
331 return result;
332 }
333
334 return 0;
335}
336
337/**
338 * octeon_i2c_read - receive data from the bus.
339 * @i2c: The struct octeon_i2c.
340 * @target: Target address.
341 * @data: Pointer to the location to store the datae .
342 * @length: Length of the data.
343 *
344 * The address is sent over the bus, then the data is read.
345 *
346 * Returns 0 on success, otherwise a negative errno.
347 */
348static int octeon_i2c_read(struct octeon_i2c *i2c, int target,
349 u8 *data, int length)
350{
351 int i, result;
352 u8 tmp;
353
354 if (length < 1)
355 return -EINVAL;
356
357 result = octeon_i2c_start(i2c);
358 if (result)
359 return result;
360
361 octeon_i2c_write_sw(i2c, SW_TWSI_EOP_TWSI_DATA, (target<<1) | 1);
362 octeon_i2c_write_sw(i2c, SW_TWSI_EOP_TWSI_CTL, TWSI_CTL_ENAB);
363
364 result = octeon_i2c_wait(i2c);
365 if (result)
366 return result;
367
368 for (i = 0; i < length; i++) {
369 tmp = octeon_i2c_read_sw(i2c, SW_TWSI_EOP_TWSI_STAT);
370 if ((tmp != STAT_RXDATA_ACK) && (tmp != STAT_RXADDR_ACK)) {
371 dev_err(i2c->dev,
372 "%s: bad status before read (0x%x)\n",
373 __func__, tmp);
374 return -EIO;
375 }
376
377 if (i+1 < length)
378 octeon_i2c_write_sw(i2c, SW_TWSI_EOP_TWSI_CTL,
379 TWSI_CTL_ENAB | TWSI_CTL_AAK);
380 else
381 octeon_i2c_write_sw(i2c, SW_TWSI_EOP_TWSI_CTL,
382 TWSI_CTL_ENAB);
383
384 result = octeon_i2c_wait(i2c);
385 if (result)
386 return result;
387
388 data[i] = octeon_i2c_read_sw(i2c, SW_TWSI_EOP_TWSI_DATA);
389 }
390 return 0;
391}
392
393/**
394 * octeon_i2c_xfer - The driver's master_xfer function.
395 * @adap: Pointer to the i2c_adapter structure.
396 * @msgs: Pointer to the messages to be processed.
397 * @num: Length of the MSGS array.
398 *
399 * Returns the number of messages processed, or a negative errno on
400 * failure.
401 */
402static int octeon_i2c_xfer(struct i2c_adapter *adap,
403 struct i2c_msg *msgs,
404 int num)
405{
406 struct i2c_msg *pmsg;
407 int i;
408 int ret = 0;
409 struct octeon_i2c *i2c = i2c_get_adapdata(adap);
410
411 for (i = 0; ret == 0 && i < num; i++) {
412 pmsg = &msgs[i];
413 dev_dbg(i2c->dev,
414 "Doing %s %d byte(s) to/from 0x%02x - %d of %d messages\n",
415 pmsg->flags & I2C_M_RD ? "read" : "write",
416 pmsg->len, pmsg->addr, i + 1, num);
417 if (pmsg->flags & I2C_M_RD)
418 ret = octeon_i2c_read(i2c, pmsg->addr, pmsg->buf,
419 pmsg->len);
420 else
421 ret = octeon_i2c_write(i2c, pmsg->addr, pmsg->buf,
422 pmsg->len);
423 }
424 octeon_i2c_stop(i2c);
425
426 return (ret != 0) ? ret : num;
427}
428
429static u32 octeon_i2c_functionality(struct i2c_adapter *adap)
430{
431 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
432}
433
434static const struct i2c_algorithm octeon_i2c_algo = {
435 .master_xfer = octeon_i2c_xfer,
436 .functionality = octeon_i2c_functionality,
437};
438
439static struct i2c_adapter octeon_i2c_ops = {
440 .owner = THIS_MODULE,
441 .name = "OCTEON adapter",
442 .algo = &octeon_i2c_algo,
443 .timeout = 2,
444};
445
446/**
447 * octeon_i2c_setclock - Calculate and set clock divisors.
448 */
449static int __init octeon_i2c_setclock(struct octeon_i2c *i2c)
450{
451 int tclk, thp_base, inc, thp_idx, mdiv_idx, ndiv_idx, foscl, diff;
452 int thp = 0x18, mdiv = 2, ndiv = 0, delta_hz = 1000000;
453
454 for (ndiv_idx = 0; ndiv_idx < 8 && delta_hz != 0; ndiv_idx++) {
455 /*
456 * An mdiv value of less than 2 seems to not work well
457 * with ds1337 RTCs, so we constrain it to larger
458 * values.
459 */
460 for (mdiv_idx = 15; mdiv_idx >= 2 && delta_hz != 0; mdiv_idx--) {
461 /*
462 * For given ndiv and mdiv values check the
463 * two closest thp values.
464 */
465 tclk = i2c->twsi_freq * (mdiv_idx + 1) * 10;
466 tclk *= (1 << ndiv_idx);
467 thp_base = (i2c->sys_freq / (tclk * 2)) - 1;
468 for (inc = 0; inc <= 1; inc++) {
469 thp_idx = thp_base + inc;
470 if (thp_idx < 5 || thp_idx > 0xff)
471 continue;
472
473 foscl = i2c->sys_freq / (2 * (thp_idx + 1));
474 foscl = foscl / (1 << ndiv_idx);
475 foscl = foscl / (mdiv_idx + 1) / 10;
476 diff = abs(foscl - i2c->twsi_freq);
477 if (diff < delta_hz) {
478 delta_hz = diff;
479 thp = thp_idx;
480 mdiv = mdiv_idx;
481 ndiv = ndiv_idx;
482 }
483 }
484 }
485 }
486 octeon_i2c_write_sw(i2c, SW_TWSI_OP_TWSI_CLK, thp);
487 octeon_i2c_write_sw(i2c, SW_TWSI_EOP_TWSI_CLKCTL, (mdiv << 3) | ndiv);
488
489 return 0;
490}
491
492static int __init octeon_i2c_initlowlevel(struct octeon_i2c *i2c)
493{
494 u8 status;
495 int tries;
496
497 /* disable high level controller, enable bus access */
498 octeon_i2c_write_sw(i2c, SW_TWSI_EOP_TWSI_CTL, TWSI_CTL_ENAB);
499
500 /* reset controller */
501 octeon_i2c_write_sw(i2c, SW_TWSI_EOP_TWSI_RST, 0);
502
503 for (tries = 10; tries; tries--) {
504 udelay(1);
505 status = octeon_i2c_read_sw(i2c, SW_TWSI_EOP_TWSI_STAT);
506 if (status == STAT_IDLE)
507 return 0;
508 }
509 dev_err(i2c->dev, "%s: TWSI_RST failed! (0x%x)\n", __func__, status);
510 return -EIO;
511}
512
513static int __devinit octeon_i2c_probe(struct platform_device *pdev)
514{
515 int irq, result = 0;
516 struct octeon_i2c *i2c;
517 struct octeon_i2c_data *i2c_data;
518 struct resource *res_mem;
519
520 /* All adaptors have an irq. */
521 irq = platform_get_irq(pdev, 0);
522 if (irq < 0)
523 return irq;
524
525 i2c = kzalloc(sizeof(*i2c), GFP_KERNEL);
526 if (!i2c) {
527 dev_err(&pdev->dev, "kzalloc failed\n");
528 result = -ENOMEM;
529 goto out;
530 }
531 i2c->dev = &pdev->dev;
532 i2c_data = pdev->dev.platform_data;
533
534 res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
535
536 if (res_mem == NULL) {
537 dev_err(i2c->dev, "found no memory resource\n");
538 result = -ENXIO;
539 goto fail_region;
540 }
541
542 if (i2c_data == NULL) {
543 dev_err(i2c->dev, "no I2C frequency data\n");
544 result = -ENXIO;
545 goto fail_region;
546 }
547
548 i2c->twsi_phys = res_mem->start;
549 i2c->regsize = resource_size(res_mem);
550 i2c->twsi_freq = i2c_data->i2c_freq;
551 i2c->sys_freq = i2c_data->sys_freq;
552
553 if (!request_mem_region(i2c->twsi_phys, i2c->regsize, res_mem->name)) {
554 dev_err(i2c->dev, "request_mem_region failed\n");
555 goto fail_region;
556 }
557 i2c->twsi_base = ioremap(i2c->twsi_phys, i2c->regsize);
558
559 init_waitqueue_head(&i2c->queue);
560
561 i2c->irq = irq;
562
563 result = request_irq(i2c->irq, octeon_i2c_isr, 0, DRV_NAME, i2c);
564 if (result < 0) {
565 dev_err(i2c->dev, "failed to attach interrupt\n");
566 goto fail_irq;
567 }
568
569 result = octeon_i2c_initlowlevel(i2c);
570 if (result) {
571 dev_err(i2c->dev, "init low level failed\n");
572 goto fail_add;
573 }
574
575 result = octeon_i2c_setclock(i2c);
576 if (result) {
577 dev_err(i2c->dev, "clock init failed\n");
578 goto fail_add;
579 }
580
581 i2c->adap = octeon_i2c_ops;
582 i2c->adap.dev.parent = &pdev->dev;
583 i2c->adap.nr = pdev->id >= 0 ? pdev->id : 0;
584 i2c_set_adapdata(&i2c->adap, i2c);
585 platform_set_drvdata(pdev, i2c);
586
587 result = i2c_add_numbered_adapter(&i2c->adap);
588 if (result < 0) {
589 dev_err(i2c->dev, "failed to add adapter\n");
590 goto fail_add;
591 }
592
593 dev_info(i2c->dev, "version %s\n", DRV_VERSION);
594
595 return result;
596
597fail_add:
598 platform_set_drvdata(pdev, NULL);
599 free_irq(i2c->irq, i2c);
600fail_irq:
601 iounmap(i2c->twsi_base);
602 release_mem_region(i2c->twsi_phys, i2c->regsize);
603fail_region:
604 kfree(i2c);
605out:
606 return result;
607};
608
609static int __devexit octeon_i2c_remove(struct platform_device *pdev)
610{
611 struct octeon_i2c *i2c = platform_get_drvdata(pdev);
612
613 i2c_del_adapter(&i2c->adap);
614 platform_set_drvdata(pdev, NULL);
615 free_irq(i2c->irq, i2c);
616 iounmap(i2c->twsi_base);
617 release_mem_region(i2c->twsi_phys, i2c->regsize);
618 kfree(i2c);
619 return 0;
620};
621
622static struct platform_driver octeon_i2c_driver = {
623 .probe = octeon_i2c_probe,
624 .remove = __devexit_p(octeon_i2c_remove),
625 .driver = {
626 .owner = THIS_MODULE,
627 .name = DRV_NAME,
628 },
629};
630
631static int __init octeon_i2c_init(void)
632{
633 int rv;
634
635 rv = platform_driver_register(&octeon_i2c_driver);
636 return rv;
637}
638
639static void __exit octeon_i2c_exit(void)
640{
641 platform_driver_unregister(&octeon_i2c_driver);
642}
643
644MODULE_AUTHOR("Michael Lawnick <michael.lawnick.ext@nsn.com>");
645MODULE_DESCRIPTION("I2C-Bus adapter for Cavium OCTEON processors");
646MODULE_LICENSE("GPL");
647MODULE_VERSION(DRV_VERSION);
648MODULE_ALIAS("platform:" DRV_NAME);
649
650module_init(octeon_i2c_init);
651module_exit(octeon_i2c_exit);
diff --git a/drivers/ide/au1xxx-ide.c b/drivers/ide/au1xxx-ide.c
index 87cef0c440ad..349a67bf1a36 100644
--- a/drivers/ide/au1xxx-ide.c
+++ b/drivers/ide/au1xxx-ide.c
@@ -56,8 +56,8 @@ static inline void auide_insw(unsigned long port, void *addr, u32 count)
56 chan_tab_t *ctp; 56 chan_tab_t *ctp;
57 au1x_ddma_desc_t *dp; 57 au1x_ddma_desc_t *dp;
58 58
59 if(!put_dest_flags(ahwif->rx_chan, (void*)addr, count << 1, 59 if (!au1xxx_dbdma_put_dest(ahwif->rx_chan, virt_to_phys(addr),
60 DDMA_FLAGS_NOIE)) { 60 count << 1, DDMA_FLAGS_NOIE)) {
61 printk(KERN_ERR "%s failed %d\n", __func__, __LINE__); 61 printk(KERN_ERR "%s failed %d\n", __func__, __LINE__);
62 return; 62 return;
63 } 63 }
@@ -74,8 +74,8 @@ static inline void auide_outsw(unsigned long port, void *addr, u32 count)
74 chan_tab_t *ctp; 74 chan_tab_t *ctp;
75 au1x_ddma_desc_t *dp; 75 au1x_ddma_desc_t *dp;
76 76
77 if(!put_source_flags(ahwif->tx_chan, (void*)addr, 77 if (!au1xxx_dbdma_put_source(ahwif->tx_chan, virt_to_phys(addr),
78 count << 1, DDMA_FLAGS_NOIE)) { 78 count << 1, DDMA_FLAGS_NOIE)) {
79 printk(KERN_ERR "%s failed %d\n", __func__, __LINE__); 79 printk(KERN_ERR "%s failed %d\n", __func__, __LINE__);
80 return; 80 return;
81 } 81 }
@@ -246,17 +246,14 @@ static int auide_build_dmatable(ide_drive_t *drive, struct ide_cmd *cmd)
246 flags = DDMA_FLAGS_NOIE; 246 flags = DDMA_FLAGS_NOIE;
247 247
248 if (iswrite) { 248 if (iswrite) {
249 if(!put_source_flags(ahwif->tx_chan, 249 if (!au1xxx_dbdma_put_source(ahwif->tx_chan,
250 (void*) sg_virt(sg), 250 sg_phys(sg), tc, flags)) {
251 tc, flags)) {
252 printk(KERN_ERR "%s failed %d\n", 251 printk(KERN_ERR "%s failed %d\n",
253 __func__, __LINE__); 252 __func__, __LINE__);
254 } 253 }
255 } else 254 } else {
256 { 255 if (!au1xxx_dbdma_put_dest(ahwif->rx_chan,
257 if(!put_dest_flags(ahwif->rx_chan, 256 sg_phys(sg), tc, flags)) {
258 (void*) sg_virt(sg),
259 tc, flags)) {
260 printk(KERN_ERR "%s failed %d\n", 257 printk(KERN_ERR "%s failed %d\n",
261 __func__, __LINE__); 258 __func__, __LINE__);
262 } 259 }
diff --git a/drivers/mmc/host/au1xmmc.c b/drivers/mmc/host/au1xmmc.c
index d3f55615c099..57b21198828f 100644
--- a/drivers/mmc/host/au1xmmc.c
+++ b/drivers/mmc/host/au1xmmc.c
@@ -650,11 +650,11 @@ static int au1xmmc_prepare_data(struct au1xmmc_host *host,
650 flags = DDMA_FLAGS_IE; 650 flags = DDMA_FLAGS_IE;
651 651
652 if (host->flags & HOST_F_XMIT) { 652 if (host->flags & HOST_F_XMIT) {
653 ret = au1xxx_dbdma_put_source_flags(channel, 653 ret = au1xxx_dbdma_put_source(channel,
654 (void *)sg_virt(sg), len, flags); 654 sg_phys(sg), len, flags);
655 } else { 655 } else {
656 ret = au1xxx_dbdma_put_dest_flags(channel, 656 ret = au1xxx_dbdma_put_dest(channel,
657 (void *)sg_virt(sg), len, flags); 657 sg_phys(sg), len, flags);
658 } 658 }
659 659
660 if (!ret) 660 if (!ret)
@@ -1017,6 +1017,10 @@ static int __devinit au1xmmc_probe(struct platform_device *pdev)
1017 } else 1017 } else
1018 mmc->caps |= MMC_CAP_NEEDS_POLL; 1018 mmc->caps |= MMC_CAP_NEEDS_POLL;
1019 1019
1020 /* platform may not be able to use all advertised caps */
1021 if (host->platdata)
1022 mmc->caps &= ~(host->platdata->mask_host_caps);
1023
1020 tasklet_init(&host->data_task, au1xmmc_tasklet_data, 1024 tasklet_init(&host->data_task, au1xmmc_tasklet_data,
1021 (unsigned long)host); 1025 (unsigned long)host);
1022 1026
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index 2de0cc823d60..2bb03a8b9ef1 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -251,12 +251,6 @@ config MTD_NETtel
251 help 251 help
252 Support for flash chips on NETtel/SecureEdge/SnapGear boards. 252 Support for flash chips on NETtel/SecureEdge/SnapGear boards.
253 253
254config MTD_ALCHEMY
255 tristate "AMD Alchemy Pb1xxx/Db1xxx/RDK MTD support"
256 depends on SOC_AU1X00 && MTD_PARTITIONS && MTD_CFI
257 help
258 Flash memory access on AMD Alchemy Pb/Db/RDK Reference Boards
259
260config MTD_DILNETPC 254config MTD_DILNETPC
261 tristate "CFI Flash device mapped on DIL/Net PC" 255 tristate "CFI Flash device mapped on DIL/Net PC"
262 depends on X86 && MTD_CONCAT && MTD_PARTITIONS && MTD_CFI_INTELEXT && BROKEN 256 depends on X86 && MTD_CONCAT && MTD_PARTITIONS && MTD_CFI_INTELEXT && BROKEN
diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile
index ce315214ff2b..a44919f3f3d2 100644
--- a/drivers/mtd/maps/Makefile
+++ b/drivers/mtd/maps/Makefile
@@ -40,7 +40,6 @@ obj-$(CONFIG_MTD_SCx200_DOCFLASH)+= scx200_docflash.o
40obj-$(CONFIG_MTD_DBOX2) += dbox2-flash.o 40obj-$(CONFIG_MTD_DBOX2) += dbox2-flash.o
41obj-$(CONFIG_MTD_SOLUTIONENGINE)+= solutionengine.o 41obj-$(CONFIG_MTD_SOLUTIONENGINE)+= solutionengine.o
42obj-$(CONFIG_MTD_PCI) += pci.o 42obj-$(CONFIG_MTD_PCI) += pci.o
43obj-$(CONFIG_MTD_ALCHEMY) += alchemy-flash.o
44obj-$(CONFIG_MTD_AUTCPU12) += autcpu12-nvram.o 43obj-$(CONFIG_MTD_AUTCPU12) += autcpu12-nvram.o
45obj-$(CONFIG_MTD_EDB7312) += edb7312.o 44obj-$(CONFIG_MTD_EDB7312) += edb7312.o
46obj-$(CONFIG_MTD_IMPA7) += impa7.o 45obj-$(CONFIG_MTD_IMPA7) += impa7.o
diff --git a/drivers/mtd/maps/alchemy-flash.c b/drivers/mtd/maps/alchemy-flash.c
deleted file mode 100644
index 845ad4f2a542..000000000000
--- a/drivers/mtd/maps/alchemy-flash.c
+++ /dev/null
@@ -1,166 +0,0 @@
1/*
2 * Flash memory access on AMD Alchemy evaluation boards
3 *
4 * (C) 2003, 2004 Pete Popov <ppopov@embeddedalley.com>
5 */
6
7#include <linux/init.h>
8#include <linux/module.h>
9#include <linux/types.h>
10#include <linux/kernel.h>
11
12#include <linux/mtd/mtd.h>
13#include <linux/mtd/map.h>
14#include <linux/mtd/partitions.h>
15
16#include <asm/io.h>
17
18#ifdef CONFIG_MIPS_PB1000
19#define BOARD_MAP_NAME "Pb1000 Flash"
20#define BOARD_FLASH_SIZE 0x00800000 /* 8MB */
21#define BOARD_FLASH_WIDTH 4 /* 32-bits */
22#endif
23
24#ifdef CONFIG_MIPS_PB1500
25#define BOARD_MAP_NAME "Pb1500 Flash"
26#define BOARD_FLASH_SIZE 0x04000000 /* 64MB */
27#define BOARD_FLASH_WIDTH 4 /* 32-bits */
28#endif
29
30#ifdef CONFIG_MIPS_PB1100
31#define BOARD_MAP_NAME "Pb1100 Flash"
32#define BOARD_FLASH_SIZE 0x04000000 /* 64MB */
33#define BOARD_FLASH_WIDTH 4 /* 32-bits */
34#endif
35
36#ifdef CONFIG_MIPS_PB1550
37#define BOARD_MAP_NAME "Pb1550 Flash"
38#define BOARD_FLASH_SIZE 0x08000000 /* 128MB */
39#define BOARD_FLASH_WIDTH 4 /* 32-bits */
40#endif
41
42#ifdef CONFIG_MIPS_PB1200
43#define BOARD_MAP_NAME "Pb1200 Flash"
44#define BOARD_FLASH_SIZE 0x08000000 /* 128MB */
45#define BOARD_FLASH_WIDTH 2 /* 16-bits */
46#endif
47
48#ifdef CONFIG_MIPS_DB1000
49#define BOARD_MAP_NAME "Db1000 Flash"
50#define BOARD_FLASH_SIZE 0x02000000 /* 32MB */
51#define BOARD_FLASH_WIDTH 4 /* 32-bits */
52#endif
53
54#ifdef CONFIG_MIPS_DB1500
55#define BOARD_MAP_NAME "Db1500 Flash"
56#define BOARD_FLASH_SIZE 0x02000000 /* 32MB */
57#define BOARD_FLASH_WIDTH 4 /* 32-bits */
58#endif
59
60#ifdef CONFIG_MIPS_DB1100
61#define BOARD_MAP_NAME "Db1100 Flash"
62#define BOARD_FLASH_SIZE 0x02000000 /* 32MB */
63#define BOARD_FLASH_WIDTH 4 /* 32-bits */
64#endif
65
66#ifdef CONFIG_MIPS_DB1550
67#define BOARD_MAP_NAME "Db1550 Flash"
68#define BOARD_FLASH_SIZE 0x08000000 /* 128MB */
69#define BOARD_FLASH_WIDTH 4 /* 32-bits */
70#endif
71
72#ifdef CONFIG_MIPS_DB1200
73#define BOARD_MAP_NAME "Db1200 Flash"
74#define BOARD_FLASH_SIZE 0x04000000 /* 64MB */
75#define BOARD_FLASH_WIDTH 2 /* 16-bits */
76#endif
77
78#ifdef CONFIG_MIPS_BOSPORUS
79#define BOARD_MAP_NAME "Bosporus Flash"
80#define BOARD_FLASH_SIZE 0x01000000 /* 16MB */
81#define BOARD_FLASH_WIDTH 2 /* 16-bits */
82#endif
83
84#ifdef CONFIG_MIPS_MIRAGE
85#define BOARD_MAP_NAME "Mirage Flash"
86#define BOARD_FLASH_SIZE 0x04000000 /* 64MB */
87#define BOARD_FLASH_WIDTH 4 /* 32-bits */
88#define USE_LOCAL_ACCESSORS /* why? */
89#endif
90
91static struct map_info alchemy_map = {
92 .name = BOARD_MAP_NAME,
93};
94
95static struct mtd_partition alchemy_partitions[] = {
96 {
97 .name = "User FS",
98 .size = BOARD_FLASH_SIZE - 0x00400000,
99 .offset = 0x0000000
100 },{
101 .name = "YAMON",
102 .size = 0x0100000,
103 .offset = MTDPART_OFS_APPEND,
104 .mask_flags = MTD_WRITEABLE
105 },{
106 .name = "raw kernel",
107 .size = (0x300000 - 0x40000), /* last 256KB is yamon env */
108 .offset = MTDPART_OFS_APPEND,
109 }
110};
111
112static struct mtd_info *mymtd;
113
114static int __init alchemy_mtd_init(void)
115{
116 struct mtd_partition *parts;
117 int nb_parts = 0;
118 unsigned long window_addr;
119 unsigned long window_size;
120
121 /* Default flash buswidth */
122 alchemy_map.bankwidth = BOARD_FLASH_WIDTH;
123
124 window_addr = 0x20000000 - BOARD_FLASH_SIZE;
125 window_size = BOARD_FLASH_SIZE;
126
127 /*
128 * Static partition definition selection
129 */
130 parts = alchemy_partitions;
131 nb_parts = ARRAY_SIZE(alchemy_partitions);
132 alchemy_map.size = window_size;
133
134 /*
135 * Now let's probe for the actual flash. Do it here since
136 * specific machine settings might have been set above.
137 */
138 printk(KERN_NOTICE BOARD_MAP_NAME ": probing %d-bit flash bus\n",
139 alchemy_map.bankwidth*8);
140 alchemy_map.virt = ioremap(window_addr, window_size);
141 mymtd = do_map_probe("cfi_probe", &alchemy_map);
142 if (!mymtd) {
143 iounmap(alchemy_map.virt);
144 return -ENXIO;
145 }
146 mymtd->owner = THIS_MODULE;
147
148 add_mtd_partitions(mymtd, parts, nb_parts);
149 return 0;
150}
151
152static void __exit alchemy_mtd_cleanup(void)
153{
154 if (mymtd) {
155 del_mtd_partitions(mymtd);
156 map_destroy(mymtd);
157 iounmap(alchemy_map.virt);
158 }
159}
160
161module_init(alchemy_mtd_init);
162module_exit(alchemy_mtd_cleanup);
163
164MODULE_AUTHOR("Embedded Alley Solutions, Inc");
165MODULE_DESCRIPTION(BOARD_MAP_NAME " MTD driver");
166MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/nand/au1550nd.c b/drivers/mtd/nand/au1550nd.c
index 92c334ff4508..43d46e424040 100644
--- a/drivers/mtd/nand/au1550nd.c
+++ b/drivers/mtd/nand/au1550nd.c
@@ -19,6 +19,7 @@
19#include <asm/io.h> 19#include <asm/io.h>
20 20
21#include <asm/mach-au1x00/au1xxx.h> 21#include <asm/mach-au1x00/au1xxx.h>
22#include <asm/mach-db1x00/bcsr.h>
22 23
23/* 24/*
24 * MTD structure for NAND controller 25 * MTD structure for NAND controller
@@ -475,7 +476,8 @@ static int __init au1xxx_nand_init(void)
475 /* set gpio206 high */ 476 /* set gpio206 high */
476 au_writel(au_readl(GPIO2_DIR) & ~(1 << 6), GPIO2_DIR); 477 au_writel(au_readl(GPIO2_DIR) & ~(1 << 6), GPIO2_DIR);
477 478
478 boot_swapboot = (au_readl(MEM_STSTAT) & (0x7 << 1)) | ((bcsr->status >> 6) & 0x1); 479 boot_swapboot = (au_readl(MEM_STSTAT) & (0x7 << 1)) | ((bcsr_read(BCSR_STATUS) >> 6) & 0x1);
480
479 switch (boot_swapboot) { 481 switch (boot_swapboot) {
480 case 0: 482 case 0:
481 case 2: 483 case 2:
diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c
index 6bac04603a88..6e5a68ecde09 100644
--- a/drivers/net/au1000_eth.c
+++ b/drivers/net/au1000_eth.c
@@ -55,6 +55,7 @@
55#include <linux/delay.h> 55#include <linux/delay.h>
56#include <linux/crc32.h> 56#include <linux/crc32.h>
57#include <linux/phy.h> 57#include <linux/phy.h>
58#include <linux/platform_device.h>
58 59
59#include <asm/cpu.h> 60#include <asm/cpu.h>
60#include <asm/mipsregs.h> 61#include <asm/mipsregs.h>
@@ -63,6 +64,7 @@
63#include <asm/processor.h> 64#include <asm/processor.h>
64 65
65#include <au1000.h> 66#include <au1000.h>
67#include <au1xxx_eth.h>
66#include <prom.h> 68#include <prom.h>
67 69
68#include "au1000_eth.h" 70#include "au1000_eth.h"
@@ -112,15 +114,15 @@ struct au1000_private *au_macs[NUM_ETH_INTERFACES];
112 * 114 *
113 * PHY detection algorithm 115 * PHY detection algorithm
114 * 116 *
115 * If AU1XXX_PHY_STATIC_CONFIG is undefined, the PHY setup is 117 * If phy_static_config is undefined, the PHY setup is
116 * autodetected: 118 * autodetected:
117 * 119 *
118 * mii_probe() first searches the current MAC's MII bus for a PHY, 120 * mii_probe() first searches the current MAC's MII bus for a PHY,
119 * selecting the first (or last, if AU1XXX_PHY_SEARCH_HIGHEST_ADDR is 121 * selecting the first (or last, if phy_search_highest_addr is
120 * defined) PHY address not already claimed by another netdev. 122 * defined) PHY address not already claimed by another netdev.
121 * 123 *
122 * If nothing was found that way when searching for the 2nd ethernet 124 * If nothing was found that way when searching for the 2nd ethernet
123 * controller's PHY and AU1XXX_PHY1_SEARCH_ON_MAC0 is defined, then 125 * controller's PHY and phy1_search_mac0 is defined, then
124 * the first MII bus is searched as well for an unclaimed PHY; this is 126 * the first MII bus is searched as well for an unclaimed PHY; this is
125 * needed in case of a dual-PHY accessible only through the MAC0's MII 127 * needed in case of a dual-PHY accessible only through the MAC0's MII
126 * bus. 128 * bus.
@@ -129,9 +131,7 @@ struct au1000_private *au_macs[NUM_ETH_INTERFACES];
129 * controller is not registered to the network subsystem. 131 * controller is not registered to the network subsystem.
130 */ 132 */
131 133
132/* autodetection defaults */ 134/* autodetection defaults: phy1_search_mac0 */
133#undef AU1XXX_PHY_SEARCH_HIGHEST_ADDR
134#define AU1XXX_PHY1_SEARCH_ON_MAC0
135 135
136/* static PHY setup 136/* static PHY setup
137 * 137 *
@@ -148,29 +148,6 @@ struct au1000_private *au_macs[NUM_ETH_INTERFACES];
148 * specific irq-map 148 * specific irq-map
149 */ 149 */
150 150
151#if defined(CONFIG_MIPS_BOSPORUS)
152/*
153 * Micrel/Kendin 5 port switch attached to MAC0,
154 * MAC0 is associated with PHY address 5 (== WAN port)
155 * MAC1 is not associated with any PHY, since it's connected directly
156 * to the switch.
157 * no interrupts are used
158 */
159# define AU1XXX_PHY_STATIC_CONFIG
160
161# define AU1XXX_PHY0_ADDR 5
162# define AU1XXX_PHY0_BUSID 0
163# undef AU1XXX_PHY0_IRQ
164
165# undef AU1XXX_PHY1_ADDR
166# undef AU1XXX_PHY1_BUSID
167# undef AU1XXX_PHY1_IRQ
168#endif
169
170#if defined(AU1XXX_PHY0_BUSID) && (AU1XXX_PHY0_BUSID > 0)
171# error MAC0-associated PHY attached 2nd MACs MII bus not supported yet
172#endif
173
174static void enable_mac(struct net_device *dev, int force_reset) 151static void enable_mac(struct net_device *dev, int force_reset)
175{ 152{
176 unsigned long flags; 153 unsigned long flags;
@@ -390,67 +367,55 @@ static int mii_probe (struct net_device *dev)
390 struct au1000_private *const aup = netdev_priv(dev); 367 struct au1000_private *const aup = netdev_priv(dev);
391 struct phy_device *phydev = NULL; 368 struct phy_device *phydev = NULL;
392 369
393#if defined(AU1XXX_PHY_STATIC_CONFIG) 370 if (aup->phy_static_config) {
394 BUG_ON(aup->mac_id < 0 || aup->mac_id > 1); 371 BUG_ON(aup->mac_id < 0 || aup->mac_id > 1);
395 372
396 if(aup->mac_id == 0) { /* get PHY0 */ 373 if (aup->phy_addr)
397# if defined(AU1XXX_PHY0_ADDR) 374 phydev = aup->mii_bus->phy_map[aup->phy_addr];
398 phydev = au_macs[AU1XXX_PHY0_BUSID]->mii_bus->phy_map[AU1XXX_PHY0_ADDR]; 375 else
399# else 376 printk (KERN_INFO DRV_NAME ":%s: using PHY-less setup\n",
400 printk (KERN_INFO DRV_NAME ":%s: using PHY-less setup\n", 377 dev->name);
401 dev->name);
402 return 0;
403# endif /* defined(AU1XXX_PHY0_ADDR) */
404 } else if (aup->mac_id == 1) { /* get PHY1 */
405# if defined(AU1XXX_PHY1_ADDR)
406 phydev = au_macs[AU1XXX_PHY1_BUSID]->mii_bus->phy_map[AU1XXX_PHY1_ADDR];
407# else
408 printk (KERN_INFO DRV_NAME ":%s: using PHY-less setup\n",
409 dev->name);
410 return 0; 378 return 0;
411# endif /* defined(AU1XXX_PHY1_ADDR) */ 379 } else {
412 } 380 int phy_addr;
413 381
414#else /* defined(AU1XXX_PHY_STATIC_CONFIG) */ 382 /* find the first (lowest address) PHY on the current MAC's MII bus */
415 int phy_addr; 383 for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++)
416 384 if (aup->mii_bus->phy_map[phy_addr]) {
417 /* find the first (lowest address) PHY on the current MAC's MII bus */ 385 phydev = aup->mii_bus->phy_map[phy_addr];
418 for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) 386 if (!aup->phy_search_highest_addr)
419 if (aup->mii_bus->phy_map[phy_addr]) { 387 break; /* break out with first one found */
420 phydev = aup->mii_bus->phy_map[phy_addr]; 388 }
421# if !defined(AU1XXX_PHY_SEARCH_HIGHEST_ADDR)
422 break; /* break out with first one found */
423# endif
424 }
425 389
426# if defined(AU1XXX_PHY1_SEARCH_ON_MAC0) 390 if (aup->phy1_search_mac0) {
427 /* try harder to find a PHY */ 391 /* try harder to find a PHY */
428 if (!phydev && (aup->mac_id == 1)) { 392 if (!phydev && (aup->mac_id == 1)) {
429 /* no PHY found, maybe we have a dual PHY? */ 393 /* no PHY found, maybe we have a dual PHY? */
430 printk (KERN_INFO DRV_NAME ": no PHY found on MAC1, " 394 printk (KERN_INFO DRV_NAME ": no PHY found on MAC1, "
431 "let's see if it's attached to MAC0...\n"); 395 "let's see if it's attached to MAC0...\n");
432 396
433 BUG_ON(!au_macs[0]); 397 /* find the first (lowest address) non-attached PHY on
398 * the MAC0 MII bus */
399 for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) {
400 struct phy_device *const tmp_phydev =
401 aup->mii_bus->phy_map[phy_addr];
434 402
435 /* find the first (lowest address) non-attached PHY on 403 if (aup->mac_id == 1)
436 * the MAC0 MII bus */ 404 break;
437 for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) {
438 struct phy_device *const tmp_phydev =
439 au_macs[0]->mii_bus->phy_map[phy_addr];
440 405
441 if (!tmp_phydev) 406 if (!tmp_phydev)
442 continue; /* no PHY here... */ 407 continue; /* no PHY here... */
443 408
444 if (tmp_phydev->attached_dev) 409 if (tmp_phydev->attached_dev)
445 continue; /* already claimed by MAC0 */ 410 continue; /* already claimed by MAC0 */
446 411
447 phydev = tmp_phydev; 412 phydev = tmp_phydev;
448 break; /* found it */ 413 break; /* found it */
414 }
415 }
449 } 416 }
450 } 417 }
451# endif /* defined(AU1XXX_PHY1_SEARCH_OTHER_BUS) */
452 418
453#endif /* defined(AU1XXX_PHY_STATIC_CONFIG) */
454 if (!phydev) { 419 if (!phydev) {
455 printk (KERN_ERR DRV_NAME ":%s: no PHY found\n", dev->name); 420 printk (KERN_ERR DRV_NAME ":%s: no PHY found\n", dev->name);
456 return -1; 421 return -1;
@@ -578,31 +543,6 @@ setup_hw_rings(struct au1000_private *aup, u32 rx_base, u32 tx_base)
578 } 543 }
579} 544}
580 545
581static struct {
582 u32 base_addr;
583 u32 macen_addr;
584 int irq;
585 struct net_device *dev;
586} iflist[2] = {
587#ifdef CONFIG_SOC_AU1000
588 {AU1000_ETH0_BASE, AU1000_MAC0_ENABLE, AU1000_MAC0_DMA_INT},
589 {AU1000_ETH1_BASE, AU1000_MAC1_ENABLE, AU1000_MAC1_DMA_INT}
590#endif
591#ifdef CONFIG_SOC_AU1100
592 {AU1100_ETH0_BASE, AU1100_MAC0_ENABLE, AU1100_MAC0_DMA_INT}
593#endif
594#ifdef CONFIG_SOC_AU1500
595 {AU1500_ETH0_BASE, AU1500_MAC0_ENABLE, AU1500_MAC0_DMA_INT},
596 {AU1500_ETH1_BASE, AU1500_MAC1_ENABLE, AU1500_MAC1_DMA_INT}
597#endif
598#ifdef CONFIG_SOC_AU1550
599 {AU1550_ETH0_BASE, AU1550_MAC0_ENABLE, AU1550_MAC0_DMA_INT},
600 {AU1550_ETH1_BASE, AU1550_MAC1_ENABLE, AU1550_MAC1_DMA_INT}
601#endif
602};
603
604static int num_ifs;
605
606/* 546/*
607 * ethtool operations 547 * ethtool operations
608 */ 548 */
@@ -711,7 +651,6 @@ static int au1000_init(struct net_device *dev)
711 651
712static inline void update_rx_stats(struct net_device *dev, u32 status) 652static inline void update_rx_stats(struct net_device *dev, u32 status)
713{ 653{
714 struct au1000_private *aup = netdev_priv(dev);
715 struct net_device_stats *ps = &dev->stats; 654 struct net_device_stats *ps = &dev->stats;
716 655
717 ps->rx_packets++; 656 ps->rx_packets++;
@@ -969,7 +908,7 @@ static netdev_tx_t au1000_tx(struct sk_buff *skb, struct net_device *dev)
969 } 908 }
970 909
971 pDB = aup->tx_db_inuse[aup->tx_head]; 910 pDB = aup->tx_db_inuse[aup->tx_head];
972 skb_copy_from_linear_data(skb, pDB->vaddr, skb->len); 911 skb_copy_from_linear_data(skb, (void *)pDB->vaddr, skb->len);
973 if (skb->len < ETH_ZLEN) { 912 if (skb->len < ETH_ZLEN) {
974 for (i=skb->len; i<ETH_ZLEN; i++) { 913 for (i=skb->len; i<ETH_ZLEN; i++) {
975 ((char *)pDB->vaddr)[i] = 0; 914 ((char *)pDB->vaddr)[i] = 0;
@@ -1058,53 +997,59 @@ static const struct net_device_ops au1000_netdev_ops = {
1058 .ndo_change_mtu = eth_change_mtu, 997 .ndo_change_mtu = eth_change_mtu,
1059}; 998};
1060 999
1061static struct net_device * au1000_probe(int port_num) 1000static int __devinit au1000_probe(struct platform_device *pdev)
1062{ 1001{
1063 static unsigned version_printed = 0; 1002 static unsigned version_printed = 0;
1064 struct au1000_private *aup = NULL; 1003 struct au1000_private *aup = NULL;
1004 struct au1000_eth_platform_data *pd;
1065 struct net_device *dev = NULL; 1005 struct net_device *dev = NULL;
1066 db_dest_t *pDB, *pDBfree; 1006 db_dest_t *pDB, *pDBfree;
1007 int irq, i, err = 0;
1008 struct resource *base, *macen;
1067 char ethaddr[6]; 1009 char ethaddr[6];
1068 int irq, i, err;
1069 u32 base, macen;
1070 1010
1071 if (port_num >= NUM_ETH_INTERFACES) 1011 base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1072 return NULL; 1012 if (!base) {
1013 printk(KERN_ERR DRV_NAME ": failed to retrieve base register\n");
1014 err = -ENODEV;
1015 goto out;
1016 }
1073 1017
1074 base = CPHYSADDR(iflist[port_num].base_addr ); 1018 macen = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1075 macen = CPHYSADDR(iflist[port_num].macen_addr); 1019 if (!macen) {
1076 irq = iflist[port_num].irq; 1020 printk(KERN_ERR DRV_NAME ": failed to retrieve MAC Enable register\n");
1021 err = -ENODEV;
1022 goto out;
1023 }
1077 1024
1078 if (!request_mem_region( base, MAC_IOSIZE, "Au1x00 ENET") || 1025 irq = platform_get_irq(pdev, 0);
1079 !request_mem_region(macen, 4, "Au1x00 ENET")) 1026 if (irq < 0) {
1080 return NULL; 1027 printk(KERN_ERR DRV_NAME ": failed to retrieve IRQ\n");
1028 err = -ENODEV;
1029 goto out;
1030 }
1081 1031
1082 if (version_printed++ == 0) 1032 if (!request_mem_region(base->start, resource_size(base), pdev->name)) {
1083 printk("%s version %s %s\n", DRV_NAME, DRV_VERSION, DRV_AUTHOR); 1033 printk(KERN_ERR DRV_NAME ": failed to request memory region for base registers\n");
1034 err = -ENXIO;
1035 goto out;
1036 }
1037
1038 if (!request_mem_region(macen->start, resource_size(macen), pdev->name)) {
1039 printk(KERN_ERR DRV_NAME ": failed to request memory region for MAC enable register\n");
1040 err = -ENXIO;
1041 goto err_request;
1042 }
1084 1043
1085 dev = alloc_etherdev(sizeof(struct au1000_private)); 1044 dev = alloc_etherdev(sizeof(struct au1000_private));
1086 if (!dev) { 1045 if (!dev) {
1087 printk(KERN_ERR "%s: alloc_etherdev failed\n", DRV_NAME); 1046 printk(KERN_ERR "%s: alloc_etherdev failed\n", DRV_NAME);
1088 return NULL; 1047 err = -ENOMEM;
1048 goto err_alloc;
1089 } 1049 }
1090 1050
1091 dev->base_addr = base; 1051 SET_NETDEV_DEV(dev, &pdev->dev);
1092 dev->irq = irq; 1052 platform_set_drvdata(pdev, dev);
1093 dev->netdev_ops = &au1000_netdev_ops;
1094 SET_ETHTOOL_OPS(dev, &au1000_ethtool_ops);
1095 dev->watchdog_timeo = ETH_TX_TIMEOUT;
1096
1097 err = register_netdev(dev);
1098 if (err != 0) {
1099 printk(KERN_ERR "%s: Cannot register net device, error %d\n",
1100 DRV_NAME, err);
1101 free_netdev(dev);
1102 return NULL;
1103 }
1104
1105 printk("%s: Au1xx0 Ethernet found at 0x%x, irq %d\n",
1106 dev->name, base, irq);
1107
1108 aup = netdev_priv(dev); 1053 aup = netdev_priv(dev);
1109 1054
1110 spin_lock_init(&aup->lock); 1055 spin_lock_init(&aup->lock);
@@ -1115,21 +1060,29 @@ static struct net_device * au1000_probe(int port_num)
1115 (NUM_TX_BUFFS + NUM_RX_BUFFS), 1060 (NUM_TX_BUFFS + NUM_RX_BUFFS),
1116 &aup->dma_addr, 0); 1061 &aup->dma_addr, 0);
1117 if (!aup->vaddr) { 1062 if (!aup->vaddr) {
1118 free_netdev(dev); 1063 printk(KERN_ERR DRV_NAME ": failed to allocate data buffers\n");
1119 release_mem_region( base, MAC_IOSIZE); 1064 err = -ENOMEM;
1120 release_mem_region(macen, 4); 1065 goto err_vaddr;
1121 return NULL;
1122 } 1066 }
1123 1067
1124 /* aup->mac is the base address of the MAC's registers */ 1068 /* aup->mac is the base address of the MAC's registers */
1125 aup->mac = (volatile mac_reg_t *)iflist[port_num].base_addr; 1069 aup->mac = (volatile mac_reg_t *)ioremap_nocache(base->start, resource_size(base));
1070 if (!aup->mac) {
1071 printk(KERN_ERR DRV_NAME ": failed to ioremap MAC registers\n");
1072 err = -ENXIO;
1073 goto err_remap1;
1074 }
1126 1075
1127 /* Setup some variables for quick register address access */ 1076 /* Setup some variables for quick register address access */
1128 aup->enable = (volatile u32 *)iflist[port_num].macen_addr; 1077 aup->enable = (volatile u32 *)ioremap_nocache(macen->start, resource_size(macen));
1129 aup->mac_id = port_num; 1078 if (!aup->enable) {
1130 au_macs[port_num] = aup; 1079 printk(KERN_ERR DRV_NAME ": failed to ioremap MAC enable register\n");
1080 err = -ENXIO;
1081 goto err_remap2;
1082 }
1083 aup->mac_id = pdev->id;
1131 1084
1132 if (port_num == 0) { 1085 if (pdev->id == 0) {
1133 if (prom_get_ethernet_addr(ethaddr) == 0) 1086 if (prom_get_ethernet_addr(ethaddr) == 0)
1134 memcpy(au1000_mac_addr, ethaddr, sizeof(au1000_mac_addr)); 1087 memcpy(au1000_mac_addr, ethaddr, sizeof(au1000_mac_addr));
1135 else { 1088 else {
@@ -1139,7 +1092,7 @@ static struct net_device * au1000_probe(int port_num)
1139 } 1092 }
1140 1093
1141 setup_hw_rings(aup, MAC0_RX_DMA_ADDR, MAC0_TX_DMA_ADDR); 1094 setup_hw_rings(aup, MAC0_RX_DMA_ADDR, MAC0_TX_DMA_ADDR);
1142 } else if (port_num == 1) 1095 } else if (pdev->id == 1)
1143 setup_hw_rings(aup, MAC1_RX_DMA_ADDR, MAC1_TX_DMA_ADDR); 1096 setup_hw_rings(aup, MAC1_RX_DMA_ADDR, MAC1_TX_DMA_ADDR);
1144 1097
1145 /* 1098 /*
@@ -1147,14 +1100,37 @@ static struct net_device * au1000_probe(int port_num)
1147 * to match those that are printed on their stickers 1100 * to match those that are printed on their stickers
1148 */ 1101 */
1149 memcpy(dev->dev_addr, au1000_mac_addr, sizeof(au1000_mac_addr)); 1102 memcpy(dev->dev_addr, au1000_mac_addr, sizeof(au1000_mac_addr));
1150 dev->dev_addr[5] += port_num; 1103 dev->dev_addr[5] += pdev->id;
1151 1104
1152 *aup->enable = 0; 1105 *aup->enable = 0;
1153 aup->mac_enabled = 0; 1106 aup->mac_enabled = 0;
1154 1107
1108 pd = pdev->dev.platform_data;
1109 if (!pd) {
1110 printk(KERN_INFO DRV_NAME ": no platform_data passed, PHY search on MAC0\n");
1111 aup->phy1_search_mac0 = 1;
1112 } else {
1113 aup->phy_static_config = pd->phy_static_config;
1114 aup->phy_search_highest_addr = pd->phy_search_highest_addr;
1115 aup->phy1_search_mac0 = pd->phy1_search_mac0;
1116 aup->phy_addr = pd->phy_addr;
1117 aup->phy_busid = pd->phy_busid;
1118 aup->phy_irq = pd->phy_irq;
1119 }
1120
1121 if (aup->phy_busid && aup->phy_busid > 0) {
1122 printk(KERN_ERR DRV_NAME ": MAC0-associated PHY attached 2nd MACs MII"
1123 "bus not supported yet\n");
1124 err = -ENODEV;
1125 goto err_mdiobus_alloc;
1126 }
1127
1155 aup->mii_bus = mdiobus_alloc(); 1128 aup->mii_bus = mdiobus_alloc();
1156 if (aup->mii_bus == NULL) 1129 if (aup->mii_bus == NULL) {
1157 goto err_out; 1130 printk(KERN_ERR DRV_NAME ": failed to allocate mdiobus structure\n");
1131 err = -ENOMEM;
1132 goto err_mdiobus_alloc;
1133 }
1158 1134
1159 aup->mii_bus->priv = dev; 1135 aup->mii_bus->priv = dev;
1160 aup->mii_bus->read = au1000_mdiobus_read; 1136 aup->mii_bus->read = au1000_mdiobus_read;
@@ -1168,23 +1144,19 @@ static struct net_device * au1000_probe(int port_num)
1168 1144
1169 for(i = 0; i < PHY_MAX_ADDR; ++i) 1145 for(i = 0; i < PHY_MAX_ADDR; ++i)
1170 aup->mii_bus->irq[i] = PHY_POLL; 1146 aup->mii_bus->irq[i] = PHY_POLL;
1171
1172 /* if known, set corresponding PHY IRQs */ 1147 /* if known, set corresponding PHY IRQs */
1173#if defined(AU1XXX_PHY_STATIC_CONFIG) 1148 if (aup->phy_static_config)
1174# if defined(AU1XXX_PHY0_IRQ) 1149 if (aup->phy_irq && aup->phy_busid == aup->mac_id)
1175 if (AU1XXX_PHY0_BUSID == aup->mac_id) 1150 aup->mii_bus->irq[aup->phy_addr] = aup->phy_irq;
1176 aup->mii_bus->irq[AU1XXX_PHY0_ADDR] = AU1XXX_PHY0_IRQ; 1151
1177# endif 1152 err = mdiobus_register(aup->mii_bus);
1178# if defined(AU1XXX_PHY1_IRQ) 1153 if (err) {
1179 if (AU1XXX_PHY1_BUSID == aup->mac_id) 1154 printk(KERN_ERR DRV_NAME " failed to register MDIO bus\n");
1180 aup->mii_bus->irq[AU1XXX_PHY1_ADDR] = AU1XXX_PHY1_IRQ; 1155 goto err_mdiobus_reg;
1181# endif 1156 }
1182#endif
1183 mdiobus_register(aup->mii_bus);
1184 1157
1185 if (mii_probe(dev) != 0) { 1158 if (mii_probe(dev) != 0)
1186 goto err_out; 1159 goto err_out;
1187 }
1188 1160
1189 pDBfree = NULL; 1161 pDBfree = NULL;
1190 /* setup the data buffer descriptors and attach a buffer to each one */ 1162 /* setup the data buffer descriptors and attach a buffer to each one */
@@ -1216,19 +1188,35 @@ static struct net_device * au1000_probe(int port_num)
1216 aup->tx_db_inuse[i] = pDB; 1188 aup->tx_db_inuse[i] = pDB;
1217 } 1189 }
1218 1190
1191 dev->base_addr = base->start;
1192 dev->irq = irq;
1193 dev->netdev_ops = &au1000_netdev_ops;
1194 SET_ETHTOOL_OPS(dev, &au1000_ethtool_ops);
1195 dev->watchdog_timeo = ETH_TX_TIMEOUT;
1196
1219 /* 1197 /*
1220 * The boot code uses the ethernet controller, so reset it to start 1198 * The boot code uses the ethernet controller, so reset it to start
1221 * fresh. au1000_init() expects that the device is in reset state. 1199 * fresh. au1000_init() expects that the device is in reset state.
1222 */ 1200 */
1223 reset_mac(dev); 1201 reset_mac(dev);
1224 1202
1225 return dev; 1203 err = register_netdev(dev);
1204 if (err) {
1205 printk(KERN_ERR DRV_NAME "%s: Cannot register net device, aborting.\n",
1206 dev->name);
1207 goto err_out;
1208 }
1209
1210 printk("%s: Au1xx0 Ethernet found at 0x%lx, irq %d\n",
1211 dev->name, (unsigned long)base->start, irq);
1212 if (version_printed++ == 0)
1213 printk("%s version %s %s\n", DRV_NAME, DRV_VERSION, DRV_AUTHOR);
1214
1215 return 0;
1226 1216
1227err_out: 1217err_out:
1228 if (aup->mii_bus != NULL) { 1218 if (aup->mii_bus != NULL)
1229 mdiobus_unregister(aup->mii_bus); 1219 mdiobus_unregister(aup->mii_bus);
1230 mdiobus_free(aup->mii_bus);
1231 }
1232 1220
1233 /* here we should have a valid dev plus aup-> register addresses 1221 /* here we should have a valid dev plus aup-> register addresses
1234 * so we can reset the mac properly.*/ 1222 * so we can reset the mac properly.*/
@@ -1242,67 +1230,84 @@ err_out:
1242 if (aup->tx_db_inuse[i]) 1230 if (aup->tx_db_inuse[i])
1243 ReleaseDB(aup, aup->tx_db_inuse[i]); 1231 ReleaseDB(aup, aup->tx_db_inuse[i]);
1244 } 1232 }
1233err_mdiobus_reg:
1234 mdiobus_free(aup->mii_bus);
1235err_mdiobus_alloc:
1236 iounmap(aup->enable);
1237err_remap2:
1238 iounmap(aup->mac);
1239err_remap1:
1245 dma_free_noncoherent(NULL, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS), 1240 dma_free_noncoherent(NULL, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS),
1246 (void *)aup->vaddr, aup->dma_addr); 1241 (void *)aup->vaddr, aup->dma_addr);
1247 unregister_netdev(dev); 1242err_vaddr:
1248 free_netdev(dev); 1243 free_netdev(dev);
1249 release_mem_region( base, MAC_IOSIZE); 1244err_alloc:
1250 release_mem_region(macen, 4); 1245 release_mem_region(macen->start, resource_size(macen));
1251 return NULL; 1246err_request:
1247 release_mem_region(base->start, resource_size(base));
1248out:
1249 return err;
1252} 1250}
1253 1251
1254/* 1252static int __devexit au1000_remove(struct platform_device *pdev)
1255 * Setup the base address and interrupt of the Au1xxx ethernet macs
1256 * based on cpu type and whether the interface is enabled in sys_pinfunc
1257 * register. The last interface is enabled if SYS_PF_NI2 (bit 4) is 0.
1258 */
1259static int __init au1000_init_module(void)
1260{ 1253{
1261 int ni = (int)((au_readl(SYS_PINFUNC) & (u32)(SYS_PF_NI2)) >> 4); 1254 struct net_device *dev = platform_get_drvdata(pdev);
1262 struct net_device *dev; 1255 struct au1000_private *aup = netdev_priv(dev);
1263 int i, found_one = 0; 1256 int i;
1257 struct resource *base, *macen;
1264 1258
1265 num_ifs = NUM_ETH_INTERFACES - ni; 1259 platform_set_drvdata(pdev, NULL);
1260
1261 unregister_netdev(dev);
1262 mdiobus_unregister(aup->mii_bus);
1263 mdiobus_free(aup->mii_bus);
1264
1265 for (i = 0; i < NUM_RX_DMA; i++)
1266 if (aup->rx_db_inuse[i])
1267 ReleaseDB(aup, aup->rx_db_inuse[i]);
1268
1269 for (i = 0; i < NUM_TX_DMA; i++)
1270 if (aup->tx_db_inuse[i])
1271 ReleaseDB(aup, aup->tx_db_inuse[i]);
1272
1273 dma_free_noncoherent(NULL, MAX_BUF_SIZE *
1274 (NUM_TX_BUFFS + NUM_RX_BUFFS),
1275 (void *)aup->vaddr, aup->dma_addr);
1276
1277 iounmap(aup->mac);
1278 iounmap(aup->enable);
1279
1280 base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1281 release_mem_region(base->start, resource_size(base));
1282
1283 macen = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1284 release_mem_region(macen->start, resource_size(macen));
1285
1286 free_netdev(dev);
1266 1287
1267 for(i = 0; i < num_ifs; i++) {
1268 dev = au1000_probe(i);
1269 iflist[i].dev = dev;
1270 if (dev)
1271 found_one++;
1272 }
1273 if (!found_one)
1274 return -ENODEV;
1275 return 0; 1288 return 0;
1276} 1289}
1277 1290
1278static void __exit au1000_cleanup_module(void) 1291static struct platform_driver au1000_eth_driver = {
1292 .probe = au1000_probe,
1293 .remove = __devexit_p(au1000_remove),
1294 .driver = {
1295 .name = "au1000-eth",
1296 .owner = THIS_MODULE,
1297 },
1298};
1299MODULE_ALIAS("platform:au1000-eth");
1300
1301
1302static int __init au1000_init_module(void)
1303{
1304 return platform_driver_register(&au1000_eth_driver);
1305}
1306
1307static void __exit au1000_exit_module(void)
1279{ 1308{
1280 int i, j; 1309 platform_driver_unregister(&au1000_eth_driver);
1281 struct net_device *dev;
1282 struct au1000_private *aup;
1283
1284 for (i = 0; i < num_ifs; i++) {
1285 dev = iflist[i].dev;
1286 if (dev) {
1287 aup = netdev_priv(dev);
1288 unregister_netdev(dev);
1289 mdiobus_unregister(aup->mii_bus);
1290 mdiobus_free(aup->mii_bus);
1291 for (j = 0; j < NUM_RX_DMA; j++)
1292 if (aup->rx_db_inuse[j])
1293 ReleaseDB(aup, aup->rx_db_inuse[j]);
1294 for (j = 0; j < NUM_TX_DMA; j++)
1295 if (aup->tx_db_inuse[j])
1296 ReleaseDB(aup, aup->tx_db_inuse[j]);
1297 dma_free_noncoherent(NULL, MAX_BUF_SIZE *
1298 (NUM_TX_BUFFS + NUM_RX_BUFFS),
1299 (void *)aup->vaddr, aup->dma_addr);
1300 release_mem_region(dev->base_addr, MAC_IOSIZE);
1301 release_mem_region(CPHYSADDR(iflist[i].macen_addr), 4);
1302 free_netdev(dev);
1303 }
1304 }
1305} 1310}
1306 1311
1307module_init(au1000_init_module); 1312module_init(au1000_init_module);
1308module_exit(au1000_cleanup_module); 1313module_exit(au1000_exit_module);
diff --git a/drivers/net/au1000_eth.h b/drivers/net/au1000_eth.h
index 824ecd5ff3a8..f9d29a29b8fd 100644
--- a/drivers/net/au1000_eth.h
+++ b/drivers/net/au1000_eth.h
@@ -108,6 +108,15 @@ struct au1000_private {
108 struct phy_device *phy_dev; 108 struct phy_device *phy_dev;
109 struct mii_bus *mii_bus; 109 struct mii_bus *mii_bus;
110 110
111 /* PHY configuration */
112 int phy_static_config;
113 int phy_search_highest_addr;
114 int phy1_search_mac0;
115
116 int phy_addr;
117 int phy_busid;
118 int phy_irq;
119
111 /* These variables are just for quick access to certain regs addresses. */ 120 /* These variables are just for quick access to certain regs addresses. */
112 volatile mac_reg_t *mac; /* mac registers */ 121 volatile mac_reg_t *mac; /* mac registers */
113 volatile u32 *enable; /* address of MAC Enable Register */ 122 volatile u32 *enable; /* address of MAC Enable Register */
diff --git a/drivers/net/cpmac.c b/drivers/net/cpmac.c
index 8d0be26f94e3..bf2072e54200 100644
--- a/drivers/net/cpmac.c
+++ b/drivers/net/cpmac.c
@@ -36,6 +36,7 @@
36#include <linux/phy_fixed.h> 36#include <linux/phy_fixed.h>
37#include <linux/platform_device.h> 37#include <linux/platform_device.h>
38#include <linux/dma-mapping.h> 38#include <linux/dma-mapping.h>
39#include <linux/clk.h>
39#include <asm/gpio.h> 40#include <asm/gpio.h>
40#include <asm/atomic.h> 41#include <asm/atomic.h>
41 42
@@ -294,9 +295,16 @@ static int cpmac_mdio_write(struct mii_bus *bus, int phy_id,
294 295
295static int cpmac_mdio_reset(struct mii_bus *bus) 296static int cpmac_mdio_reset(struct mii_bus *bus)
296{ 297{
298 struct clk *cpmac_clk;
299
300 cpmac_clk = clk_get(&bus->dev, "cpmac");
301 if (IS_ERR(cpmac_clk)) {
302 printk(KERN_ERR "unable to get cpmac clock\n");
303 return -1;
304 }
297 ar7_device_reset(AR7_RESET_BIT_MDIO); 305 ar7_device_reset(AR7_RESET_BIT_MDIO);
298 cpmac_write(bus->priv, CPMAC_MDIO_CONTROL, MDIOC_ENABLE | 306 cpmac_write(bus->priv, CPMAC_MDIO_CONTROL, MDIOC_ENABLE |
299 MDIOC_CLKDIV(ar7_cpmac_freq() / 2200000 - 1)); 307 MDIOC_CLKDIV(clk_get_rate(cpmac_clk) / 2200000 - 1));
300 return 0; 308 return 0;
301} 309}
302 310
diff --git a/drivers/net/irda/au1k_ir.c b/drivers/net/irda/au1k_ir.c
index 9b2eebdbb25b..b5cbd39d0685 100644
--- a/drivers/net/irda/au1k_ir.c
+++ b/drivers/net/irda/au1k_ir.c
@@ -36,6 +36,7 @@
36#include <asm/pb1000.h> 36#include <asm/pb1000.h>
37#elif defined(CONFIG_MIPS_DB1000) || defined(CONFIG_MIPS_DB1100) 37#elif defined(CONFIG_MIPS_DB1000) || defined(CONFIG_MIPS_DB1100)
38#include <asm/db1x00.h> 38#include <asm/db1x00.h>
39#include <asm/mach-db1x00/bcsr.h>
39#else 40#else
40#error au1k_ir: unsupported board 41#error au1k_ir: unsupported board
41#endif 42#endif
@@ -66,10 +67,6 @@ static char version[] __devinitdata =
66 67
67#define RUN_AT(x) (jiffies + (x)) 68#define RUN_AT(x) (jiffies + (x))
68 69
69#if defined(CONFIG_MIPS_DB1000) || defined(CONFIG_MIPS_DB1100)
70static BCSR * const bcsr = (BCSR *)0xAE000000;
71#endif
72
73static DEFINE_SPINLOCK(ir_lock); 70static DEFINE_SPINLOCK(ir_lock);
74 71
75/* 72/*
@@ -282,9 +279,8 @@ static int au1k_irda_net_init(struct net_device *dev)
282 279
283#if defined(CONFIG_MIPS_DB1000) || defined(CONFIG_MIPS_DB1100) 280#if defined(CONFIG_MIPS_DB1000) || defined(CONFIG_MIPS_DB1100)
284 /* power on */ 281 /* power on */
285 bcsr->resets &= ~BCSR_RESETS_IRDA_MODE_MASK; 282 bcsr_mod(BCSR_RESETS, BCSR_RESETS_IRDA_MODE_MASK,
286 bcsr->resets |= BCSR_RESETS_IRDA_MODE_FULL; 283 BCSR_RESETS_IRDA_MODE_FULL);
287 au_sync();
288#endif 284#endif
289 285
290 return 0; 286 return 0;
@@ -720,14 +716,14 @@ au1k_irda_set_speed(struct net_device *dev, int speed)
720 716
721 if (speed == 4000000) { 717 if (speed == 4000000) {
722#if defined(CONFIG_MIPS_DB1000) || defined(CONFIG_MIPS_DB1100) 718#if defined(CONFIG_MIPS_DB1000) || defined(CONFIG_MIPS_DB1100)
723 bcsr->resets |= BCSR_RESETS_FIR_SEL; 719 bcsr_mod(BCSR_RESETS, 0, BCSR_RESETS_FIR_SEL);
724#else /* Pb1000 and Pb1100 */ 720#else /* Pb1000 and Pb1100 */
725 writel(1<<13, CPLD_AUX1); 721 writel(1<<13, CPLD_AUX1);
726#endif 722#endif
727 } 723 }
728 else { 724 else {
729#if defined(CONFIG_MIPS_DB1000) || defined(CONFIG_MIPS_DB1100) 725#if defined(CONFIG_MIPS_DB1000) || defined(CONFIG_MIPS_DB1100)
730 bcsr->resets &= ~BCSR_RESETS_FIR_SEL; 726 bcsr_mod(BCSR_RESETS, BCSR_RESETS_FIR_SEL, 0);
731#else /* Pb1000 and Pb1100 */ 727#else /* Pb1000 and Pb1100 */
732 writel(readl(CPLD_AUX1) & ~(1<<13), CPLD_AUX1); 728 writel(readl(CPLD_AUX1) & ~(1<<13), CPLD_AUX1);
733#endif 729#endif
diff --git a/drivers/pcmcia/Kconfig b/drivers/pcmcia/Kconfig
index e8f35dac2d51..0a6601c76809 100644
--- a/drivers/pcmcia/Kconfig
+++ b/drivers/pcmcia/Kconfig
@@ -173,6 +173,27 @@ config PCMCIA_AU1X00
173 tristate "Au1x00 pcmcia support" 173 tristate "Au1x00 pcmcia support"
174 depends on SOC_AU1X00 && PCMCIA 174 depends on SOC_AU1X00 && PCMCIA
175 175
176config PCMCIA_ALCHEMY_DEVBOARD
177 tristate "Alchemy Db/Pb1xxx PCMCIA socket services"
178 depends on SOC_AU1X00 && PCMCIA
179 select 64BIT_PHYS_ADDR
180 help
181 Enable this driver of you want PCMCIA support on your Alchemy
182 Db1000, Db/Pb1100, Db/Pb1500, Db/Pb1550, Db/Pb1200 board.
183 NOT suitable for the PB1000!
184
185 This driver is also available as a module called db1xxx_ss.ko
186
187config PCMCIA_XXS1500
188 tristate "MyCable XXS1500 PCMCIA socket support"
189 depends on PCMCIA && MIPS_XXS1500
190 select 64BIT_PHYS_ADDR
191 help
192 Support for the PCMCIA/CF socket interface on MyCable XXS1500
193 systems.
194
195 This driver is also available as a module called xxs1500_ss.ko
196
176config PCMCIA_BCM63XX 197config PCMCIA_BCM63XX
177 tristate "bcm63xx pcmcia support" 198 tristate "bcm63xx pcmcia support"
178 depends on BCM63XX && PCMCIA 199 depends on BCM63XX && PCMCIA
diff --git a/drivers/pcmcia/Makefile b/drivers/pcmcia/Makefile
index 3c83f68c803a..381b031d9d75 100644
--- a/drivers/pcmcia/Makefile
+++ b/drivers/pcmcia/Makefile
@@ -35,18 +35,10 @@ obj-$(CONFIG_OMAP_CF) += omap_cf.o
35obj-$(CONFIG_BFIN_CFPCMCIA) += bfin_cf_pcmcia.o 35obj-$(CONFIG_BFIN_CFPCMCIA) += bfin_cf_pcmcia.o
36obj-$(CONFIG_AT91_CF) += at91_cf.o 36obj-$(CONFIG_AT91_CF) += at91_cf.o
37obj-$(CONFIG_ELECTRA_CF) += electra_cf.o 37obj-$(CONFIG_ELECTRA_CF) += electra_cf.o
38obj-$(CONFIG_PCMCIA_ALCHEMY_DEVBOARD) += db1xxx_ss.o
38 39
39au1x00_ss-y += au1000_generic.o 40au1x00_ss-y += au1000_generic.o
40au1x00_ss-$(CONFIG_MIPS_PB1000) += au1000_pb1x00.o 41au1x00_ss-$(CONFIG_MIPS_PB1000) += au1000_pb1x00.o
41au1x00_ss-$(CONFIG_MIPS_PB1100) += au1000_pb1x00.o
42au1x00_ss-$(CONFIG_MIPS_PB1200) += au1000_db1x00.o
43au1x00_ss-$(CONFIG_MIPS_PB1500) += au1000_pb1x00.o
44au1x00_ss-$(CONFIG_MIPS_DB1000) += au1000_db1x00.o
45au1x00_ss-$(CONFIG_MIPS_DB1100) += au1000_db1x00.o
46au1x00_ss-$(CONFIG_MIPS_DB1200) += au1000_db1x00.o
47au1x00_ss-$(CONFIG_MIPS_DB1500) += au1000_db1x00.o
48au1x00_ss-$(CONFIG_MIPS_DB1550) += au1000_db1x00.o
49au1x00_ss-$(CONFIG_MIPS_XXS1500) += au1000_xxs1500.o
50 42
51sa1111_cs-y += sa1111_generic.o 43sa1111_cs-y += sa1111_generic.o
52sa1111_cs-$(CONFIG_ASSABET_NEPONSET) += sa1100_neponset.o 44sa1111_cs-$(CONFIG_ASSABET_NEPONSET) += sa1100_neponset.o
@@ -76,3 +68,5 @@ pxa2xx-obj-$(CONFIG_MACH_E740) += pxa2xx_e740.o
76pxa2xx-obj-$(CONFIG_MACH_STARGATE2) += pxa2xx_stargate2.o 68pxa2xx-obj-$(CONFIG_MACH_STARGATE2) += pxa2xx_stargate2.o
77 69
78obj-$(CONFIG_PCMCIA_PXA2XX) += pxa2xx_base.o $(pxa2xx-obj-y) 70obj-$(CONFIG_PCMCIA_PXA2XX) += pxa2xx_base.o $(pxa2xx-obj-y)
71
72obj-$(CONFIG_PCMCIA_XXS1500) += xxs1500_ss.o
diff --git a/drivers/pcmcia/au1000_db1x00.c b/drivers/pcmcia/au1000_db1x00.c
deleted file mode 100644
index c78d77fd7e3b..000000000000
--- a/drivers/pcmcia/au1000_db1x00.c
+++ /dev/null
@@ -1,305 +0,0 @@
1/*
2 *
3 * Alchemy Semi Db1x00 boards specific pcmcia routines.
4 *
5 * Copyright 2002 MontaVista Software Inc.
6 * Author: MontaVista Software, Inc.
7 * ppopov@mvista.com or source@mvista.com
8 *
9 * Copyright 2004 Pete Popov, updated the driver to 2.6.
10 * Followed the sa11xx API and largely copied many of the hardware
11 * independent functions.
12 *
13 * ########################################################################
14 *
15 * This program is free software; you can distribute it and/or modify it
16 * under the terms of the GNU General Public License (Version 2) as
17 * published by the Free Software Foundation.
18 *
19 * This program is distributed in the hope it will be useful, but WITHOUT
20 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
21 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
22 * for more details.
23 *
24 * You should have received a copy of the GNU General Public License along
25 * with this program; if not, write to the Free Software Foundation, Inc.,
26 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
27 *
28 * ########################################################################
29 *
30 *
31 */
32
33#include <linux/module.h>
34#include <linux/kernel.h>
35#include <linux/errno.h>
36#include <linux/interrupt.h>
37#include <linux/device.h>
38#include <linux/init.h>
39
40#include <asm/irq.h>
41#include <asm/signal.h>
42#include <asm/mach-au1x00/au1000.h>
43
44#if defined(CONFIG_MIPS_DB1200)
45 #include <db1200.h>
46#elif defined(CONFIG_MIPS_PB1200)
47 #include <pb1200.h>
48#else
49 #include <asm/mach-db1x00/db1x00.h>
50 static BCSR * const bcsr = (BCSR *)BCSR_KSEG1_ADDR;
51#endif
52
53#include "au1000_generic.h"
54
55#if 0
56#define debug(x,args...) printk(KERN_DEBUG "%s: " x, __func__ , ##args)
57#else
58#define debug(x,args...)
59#endif
60
61
62struct au1000_pcmcia_socket au1000_pcmcia_socket[PCMCIA_NUM_SOCKS];
63extern int au1x00_pcmcia_socket_probe(struct device *, struct pcmcia_low_level *, int, int);
64
65static int db1x00_pcmcia_hw_init(struct au1000_pcmcia_socket *skt)
66{
67#ifdef CONFIG_MIPS_DB1550
68 skt->irq = skt->nr ? AU1000_GPIO_5 : AU1000_GPIO_3;
69#elif defined(CONFIG_MIPS_DB1200) || defined(CONFIG_MIPS_PB1200)
70 skt->irq = skt->nr ? BOARD_PC1_INT : BOARD_PC0_INT;
71#else
72 skt->irq = skt->nr ? AU1000_GPIO_5 : AU1000_GPIO_2;
73#endif
74 return 0;
75}
76
77static void db1x00_pcmcia_shutdown(struct au1000_pcmcia_socket *skt)
78{
79 bcsr->pcmcia = 0; /* turn off power */
80 au_sync_delay(2);
81}
82
83static void
84db1x00_pcmcia_socket_state(struct au1000_pcmcia_socket *skt, struct pcmcia_state *state)
85{
86 u32 inserted;
87 unsigned char vs;
88
89 state->ready = 0;
90 state->vs_Xv = 0;
91 state->vs_3v = 0;
92 state->detect = 0;
93
94 switch (skt->nr) {
95 case 0:
96 vs = bcsr->status & 0x3;
97#if defined(CONFIG_MIPS_DB1200) || defined(CONFIG_MIPS_PB1200)
98 inserted = BOARD_CARD_INSERTED(0);
99#else
100 inserted = !(bcsr->status & (1<<4));
101#endif
102 break;
103 case 1:
104 vs = (bcsr->status & 0xC)>>2;
105#if defined(CONFIG_MIPS_DB1200) || defined(CONFIG_MIPS_PB1200)
106 inserted = BOARD_CARD_INSERTED(1);
107#else
108 inserted = !(bcsr->status & (1<<5));
109#endif
110 break;
111 default:/* should never happen */
112 return;
113 }
114
115 if (inserted)
116 debug("db1x00 socket %d: inserted %d, vs %d pcmcia %x\n",
117 skt->nr, inserted, vs, bcsr->pcmcia);
118
119 if (inserted) {
120 switch (vs) {
121 case 0:
122 case 2:
123 state->vs_3v=1;
124 break;
125 case 3: /* 5V */
126 break;
127 default:
128 /* return without setting 'detect' */
129 printk(KERN_ERR "db1x00 bad VS (%d)\n",
130 vs);
131 }
132 state->detect = 1;
133 state->ready = 1;
134 }
135 else {
136 /* if the card was previously inserted and then ejected,
137 * we should turn off power to it
138 */
139 if ((skt->nr == 0) && (bcsr->pcmcia & BCSR_PCMCIA_PC0RST)) {
140 bcsr->pcmcia &= ~(BCSR_PCMCIA_PC0RST |
141 BCSR_PCMCIA_PC0DRVEN |
142 BCSR_PCMCIA_PC0VPP |
143 BCSR_PCMCIA_PC0VCC);
144 au_sync_delay(10);
145 }
146 else if ((skt->nr == 1) && bcsr->pcmcia & BCSR_PCMCIA_PC1RST) {
147 bcsr->pcmcia &= ~(BCSR_PCMCIA_PC1RST |
148 BCSR_PCMCIA_PC1DRVEN |
149 BCSR_PCMCIA_PC1VPP |
150 BCSR_PCMCIA_PC1VCC);
151 au_sync_delay(10);
152 }
153 }
154
155 state->bvd1=1;
156 state->bvd2=1;
157 state->wrprot=0;
158}
159
160static int
161db1x00_pcmcia_configure_socket(struct au1000_pcmcia_socket *skt, struct socket_state_t *state)
162{
163 u16 pwr;
164 int sock = skt->nr;
165
166 debug("config_skt %d Vcc %dV Vpp %dV, reset %d\n",
167 sock, state->Vcc, state->Vpp,
168 state->flags & SS_RESET);
169
170 /* pcmcia reg was set to zero at init time. Be careful when
171 * initializing a socket not to wipe out the settings of the
172 * other socket.
173 */
174 pwr = bcsr->pcmcia;
175 pwr &= ~(0xf << sock*8); /* clear voltage settings */
176
177 state->Vpp = 0;
178 switch(state->Vcc){
179 case 0: /* Vcc 0 */
180 pwr |= SET_VCC_VPP(0,0,sock);
181 break;
182 case 50: /* Vcc 5V */
183 switch(state->Vpp) {
184 case 0:
185 pwr |= SET_VCC_VPP(2,0,sock);
186 break;
187 case 50:
188 pwr |= SET_VCC_VPP(2,1,sock);
189 break;
190 case 12:
191 pwr |= SET_VCC_VPP(2,2,sock);
192 break;
193 case 33:
194 default:
195 pwr |= SET_VCC_VPP(0,0,sock);
196 printk("%s: bad Vcc/Vpp (%d:%d)\n",
197 __func__,
198 state->Vcc,
199 state->Vpp);
200 break;
201 }
202 break;
203 case 33: /* Vcc 3.3V */
204 switch(state->Vpp) {
205 case 0:
206 pwr |= SET_VCC_VPP(1,0,sock);
207 break;
208 case 12:
209 pwr |= SET_VCC_VPP(1,2,sock);
210 break;
211 case 33:
212 pwr |= SET_VCC_VPP(1,1,sock);
213 break;
214 case 50:
215 default:
216 pwr |= SET_VCC_VPP(0,0,sock);
217 printk("%s: bad Vcc/Vpp (%d:%d)\n",
218 __func__,
219 state->Vcc,
220 state->Vpp);
221 break;
222 }
223 break;
224 default: /* what's this ? */
225 pwr |= SET_VCC_VPP(0,0,sock);
226 printk(KERN_ERR "%s: bad Vcc %d\n",
227 __func__, state->Vcc);
228 break;
229 }
230
231 bcsr->pcmcia = pwr;
232 au_sync_delay(300);
233
234 if (sock == 0) {
235 if (!(state->flags & SS_RESET)) {
236 pwr |= BCSR_PCMCIA_PC0DRVEN;
237 bcsr->pcmcia = pwr;
238 au_sync_delay(300);
239 pwr |= BCSR_PCMCIA_PC0RST;
240 bcsr->pcmcia = pwr;
241 au_sync_delay(100);
242 }
243 else {
244 pwr &= ~(BCSR_PCMCIA_PC0RST | BCSR_PCMCIA_PC0DRVEN);
245 bcsr->pcmcia = pwr;
246 au_sync_delay(100);
247 }
248 }
249 else {
250 if (!(state->flags & SS_RESET)) {
251 pwr |= BCSR_PCMCIA_PC1DRVEN;
252 bcsr->pcmcia = pwr;
253 au_sync_delay(300);
254 pwr |= BCSR_PCMCIA_PC1RST;
255 bcsr->pcmcia = pwr;
256 au_sync_delay(100);
257 }
258 else {
259 pwr &= ~(BCSR_PCMCIA_PC1RST | BCSR_PCMCIA_PC1DRVEN);
260 bcsr->pcmcia = pwr;
261 au_sync_delay(100);
262 }
263 }
264 return 0;
265}
266
267/*
268 * Enable card status IRQs on (re-)initialisation. This can
269 * be called at initialisation, power management event, or
270 * pcmcia event.
271 */
272void db1x00_socket_init(struct au1000_pcmcia_socket *skt)
273{
274 /* nothing to do for now */
275}
276
277/*
278 * Disable card status IRQs and PCMCIA bus on suspend.
279 */
280void db1x00_socket_suspend(struct au1000_pcmcia_socket *skt)
281{
282 /* nothing to do for now */
283}
284
285struct pcmcia_low_level db1x00_pcmcia_ops = {
286 .owner = THIS_MODULE,
287
288 .hw_init = db1x00_pcmcia_hw_init,
289 .hw_shutdown = db1x00_pcmcia_shutdown,
290
291 .socket_state = db1x00_pcmcia_socket_state,
292 .configure_socket = db1x00_pcmcia_configure_socket,
293
294 .socket_init = db1x00_socket_init,
295 .socket_suspend = db1x00_socket_suspend
296};
297
298int au1x_board_init(struct device *dev)
299{
300 int ret = -ENODEV;
301 bcsr->pcmcia = 0; /* turn off power, if it's not already off */
302 au_sync_delay(2);
303 ret = au1x00_pcmcia_socket_probe(dev, &db1x00_pcmcia_ops, 0, 2);
304 return ret;
305}
diff --git a/drivers/pcmcia/au1000_generic.c b/drivers/pcmcia/au1000_generic.c
index 02088704ac2c..171c8a654887 100644
--- a/drivers/pcmcia/au1000_generic.c
+++ b/drivers/pcmcia/au1000_generic.c
@@ -405,18 +405,16 @@ int au1x00_pcmcia_socket_probe(struct device *dev, struct pcmcia_low_level *ops,
405 skt->virt_io = (void *) 405 skt->virt_io = (void *)
406 (ioremap((phys_t)AU1X_SOCK0_IO, 0x1000) - 406 (ioremap((phys_t)AU1X_SOCK0_IO, 0x1000) -
407 (u32)mips_io_port_base); 407 (u32)mips_io_port_base);
408 skt->phys_attr = AU1X_SOCK0_PSEUDO_PHYS_ATTR; 408 skt->phys_attr = AU1X_SOCK0_PHYS_ATTR;
409 skt->phys_mem = AU1X_SOCK0_PSEUDO_PHYS_MEM; 409 skt->phys_mem = AU1X_SOCK0_PHYS_MEM;
410 } 410 }
411#ifndef CONFIG_MIPS_XXS1500
412 else { 411 else {
413 skt->virt_io = (void *) 412 skt->virt_io = (void *)
414 (ioremap((phys_t)AU1X_SOCK1_IO, 0x1000) - 413 (ioremap((phys_t)AU1X_SOCK1_IO, 0x1000) -
415 (u32)mips_io_port_base); 414 (u32)mips_io_port_base);
416 skt->phys_attr = AU1X_SOCK1_PSEUDO_PHYS_ATTR; 415 skt->phys_attr = AU1X_SOCK1_PHYS_ATTR;
417 skt->phys_mem = AU1X_SOCK1_PSEUDO_PHYS_MEM; 416 skt->phys_mem = AU1X_SOCK1_PHYS_MEM;
418 } 417 }
419#endif
420 pcmcia_base_vaddrs[i] = (u32 *)skt->virt_io; 418 pcmcia_base_vaddrs[i] = (u32 *)skt->virt_io;
421 ret = ops->hw_init(skt); 419 ret = ops->hw_init(skt);
422 420
diff --git a/drivers/pcmcia/au1000_generic.h b/drivers/pcmcia/au1000_generic.h
index 13a4fbc58711..a324d329dea6 100644
--- a/drivers/pcmcia/au1000_generic.h
+++ b/drivers/pcmcia/au1000_generic.h
@@ -36,30 +36,14 @@
36#define AU1X_SOCK0_IO 0xF00000000ULL 36#define AU1X_SOCK0_IO 0xF00000000ULL
37#define AU1X_SOCK0_PHYS_ATTR 0xF40000000ULL 37#define AU1X_SOCK0_PHYS_ATTR 0xF40000000ULL
38#define AU1X_SOCK0_PHYS_MEM 0xF80000000ULL 38#define AU1X_SOCK0_PHYS_MEM 0xF80000000ULL
39/* pseudo 32 bit phys addresses, which get fixed up to the
40 * real 36 bit address in fixup_bigphys_addr() */
41#define AU1X_SOCK0_PSEUDO_PHYS_ATTR 0xF4000000
42#define AU1X_SOCK0_PSEUDO_PHYS_MEM 0xF8000000
43 39
44/* pcmcia socket 1 needs external glue logic so the memory map 40/* pcmcia socket 1 needs external glue logic so the memory map
45 * differs from board to board. 41 * differs from board to board.
46 */ 42 */
47#if defined(CONFIG_MIPS_PB1000) || defined(CONFIG_MIPS_PB1100) || \ 43#if defined(CONFIG_MIPS_PB1000)
48 defined(CONFIG_MIPS_PB1500) || defined(CONFIG_MIPS_PB1550) || \
49 defined(CONFIG_MIPS_PB1200)
50#define AU1X_SOCK1_IO 0xF08000000ULL 44#define AU1X_SOCK1_IO 0xF08000000ULL
51#define AU1X_SOCK1_PHYS_ATTR 0xF48000000ULL 45#define AU1X_SOCK1_PHYS_ATTR 0xF48000000ULL
52#define AU1X_SOCK1_PHYS_MEM 0xF88000000ULL 46#define AU1X_SOCK1_PHYS_MEM 0xF88000000ULL
53#define AU1X_SOCK1_PSEUDO_PHYS_ATTR 0xF4800000
54#define AU1X_SOCK1_PSEUDO_PHYS_MEM 0xF8800000
55#elif defined(CONFIG_MIPS_DB1000) || defined(CONFIG_MIPS_DB1100) || \
56 defined(CONFIG_MIPS_DB1500) || defined(CONFIG_MIPS_DB1550) || \
57 defined(CONFIG_MIPS_DB1200)
58#define AU1X_SOCK1_IO 0xF04000000ULL
59#define AU1X_SOCK1_PHYS_ATTR 0xF44000000ULL
60#define AU1X_SOCK1_PHYS_MEM 0xF84000000ULL
61#define AU1X_SOCK1_PSEUDO_PHYS_ATTR 0xF4400000
62#define AU1X_SOCK1_PSEUDO_PHYS_MEM 0xF8400000
63#endif 47#endif
64 48
65struct pcmcia_state { 49struct pcmcia_state {
diff --git a/drivers/pcmcia/au1000_pb1x00.c b/drivers/pcmcia/au1000_pb1x00.c
index b1984ed72d1d..5a979cb8f3e6 100644
--- a/drivers/pcmcia/au1000_pb1x00.c
+++ b/drivers/pcmcia/au1000_pb1x00.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * 2 *
3 * Alchemy Semi Pb1x00 boards specific pcmcia routines. 3 * Alchemy Semi Pb1000 boards specific pcmcia routines.
4 * 4 *
5 * Copyright 2002 MontaVista Software Inc. 5 * Copyright 2002 MontaVista Software Inc.
6 * Author: MontaVista Software, Inc. 6 * Author: MontaVista Software, Inc.
@@ -46,20 +46,11 @@
46 46
47#define debug(fmt, arg...) do { } while (0) 47#define debug(fmt, arg...) do { } while (0)
48 48
49#ifdef CONFIG_MIPS_PB1000
50#include <asm/pb1000.h> 49#include <asm/pb1000.h>
51#define PCMCIA_IRQ AU1000_GPIO_15 50#define PCMCIA_IRQ AU1000_GPIO_15
52#elif defined (CONFIG_MIPS_PB1500)
53#include <asm/pb1500.h>
54#define PCMCIA_IRQ AU1500_GPIO_203
55#elif defined (CONFIG_MIPS_PB1100)
56#include <asm/pb1100.h>
57#define PCMCIA_IRQ AU1000_GPIO_11
58#endif
59 51
60static int pb1x00_pcmcia_init(struct pcmcia_init *init) 52static int pb1x00_pcmcia_init(struct pcmcia_init *init)
61{ 53{
62#ifdef CONFIG_MIPS_PB1000
63 u16 pcr; 54 u16 pcr;
64 pcr = PCR_SLOT_0_RST | PCR_SLOT_1_RST; 55 pcr = PCR_SLOT_0_RST | PCR_SLOT_1_RST;
65 56
@@ -74,21 +65,10 @@ static int pb1x00_pcmcia_init(struct pcmcia_init *init)
74 au_sync_delay(20); 65 au_sync_delay(20);
75 66
76 return PCMCIA_NUM_SOCKS; 67 return PCMCIA_NUM_SOCKS;
77
78#else /* fixme -- take care of the Pb1500 at some point */
79
80 u16 pcr;
81 pcr = au_readw(PCMCIA_BOARD_REG) & ~0xf; /* turn off power */
82 pcr &= ~(PC_DEASSERT_RST | PC_DRV_EN);
83 au_writew(pcr, PCMCIA_BOARD_REG);
84 au_sync_delay(500);
85 return PCMCIA_NUM_SOCKS;
86#endif
87} 68}
88 69
89static int pb1x00_pcmcia_shutdown(void) 70static int pb1x00_pcmcia_shutdown(void)
90{ 71{
91#ifdef CONFIG_MIPS_PB1000
92 u16 pcr; 72 u16 pcr;
93 pcr = PCR_SLOT_0_RST | PCR_SLOT_1_RST; 73 pcr = PCR_SLOT_0_RST | PCR_SLOT_1_RST;
94 pcr |= SET_VCC_VPP(VCC_HIZ,VPP_HIZ,0); 74 pcr |= SET_VCC_VPP(VCC_HIZ,VPP_HIZ,0);
@@ -96,14 +76,6 @@ static int pb1x00_pcmcia_shutdown(void)
96 au_writel(pcr, PB1000_PCR); 76 au_writel(pcr, PB1000_PCR);
97 au_sync_delay(20); 77 au_sync_delay(20);
98 return 0; 78 return 0;
99#else
100 u16 pcr;
101 pcr = au_readw(PCMCIA_BOARD_REG) & ~0xf; /* turn off power */
102 pcr &= ~(PC_DEASSERT_RST | PC_DRV_EN);
103 au_writew(pcr, PCMCIA_BOARD_REG);
104 au_sync_delay(2);
105 return 0;
106#endif
107} 79}
108 80
109static int 81static int
@@ -112,21 +84,11 @@ pb1x00_pcmcia_socket_state(unsigned sock, struct pcmcia_state *state)
112 u32 inserted0, inserted1; 84 u32 inserted0, inserted1;
113 u16 vs0, vs1; 85 u16 vs0, vs1;
114 86
115#ifdef CONFIG_MIPS_PB1000
116 vs0 = vs1 = (u16)au_readl(PB1000_ACR1); 87 vs0 = vs1 = (u16)au_readl(PB1000_ACR1);
117 inserted0 = !(vs0 & (ACR1_SLOT_0_CD1 | ACR1_SLOT_0_CD2)); 88 inserted0 = !(vs0 & (ACR1_SLOT_0_CD1 | ACR1_SLOT_0_CD2));
118 inserted1 = !(vs1 & (ACR1_SLOT_1_CD1 | ACR1_SLOT_1_CD2)); 89 inserted1 = !(vs1 & (ACR1_SLOT_1_CD1 | ACR1_SLOT_1_CD2));
119 vs0 = (vs0 >> 4) & 0x3; 90 vs0 = (vs0 >> 4) & 0x3;
120 vs1 = (vs1 >> 12) & 0x3; 91 vs1 = (vs1 >> 12) & 0x3;
121#else
122 vs0 = (au_readw(BOARD_STATUS_REG) >> 4) & 0x3;
123#ifdef CONFIG_MIPS_PB1500
124 inserted0 = !((au_readl(GPIO2_PINSTATE) >> 1) & 0x1); /* gpio 201 */
125#else /* Pb1100 */
126 inserted0 = !((au_readl(SYS_PINSTATERD) >> 9) & 0x1); /* gpio 9 */
127#endif
128 inserted1 = 0;
129#endif
130 92
131 state->ready = 0; 93 state->ready = 0;
132 state->vs_Xv = 0; 94 state->vs_Xv = 0;
@@ -203,7 +165,6 @@ pb1x00_pcmcia_configure_socket(const struct pcmcia_configure *configure)
203 165
204 if(configure->sock > PCMCIA_MAX_SOCK) return -1; 166 if(configure->sock > PCMCIA_MAX_SOCK) return -1;
205 167
206#ifdef CONFIG_MIPS_PB1000
207 pcr = au_readl(PB1000_PCR); 168 pcr = au_readl(PB1000_PCR);
208 169
209 if (configure->sock == 0) { 170 if (configure->sock == 0) {
@@ -323,84 +284,6 @@ pb1x00_pcmcia_configure_socket(const struct pcmcia_configure *configure)
323 au_writel(pcr, PB1000_PCR); 284 au_writel(pcr, PB1000_PCR);
324 au_sync_delay(300); 285 au_sync_delay(300);
325 286
326#else
327
328 pcr = au_readw(PCMCIA_BOARD_REG) & ~0xf;
329
330 debug("Vcc %dV Vpp %dV, pcr %x, reset %d\n",
331 configure->vcc, configure->vpp, pcr, configure->reset);
332
333
334 switch(configure->vcc){
335 case 0: /* Vcc 0 */
336 pcr |= SET_VCC_VPP(0,0);
337 break;
338 case 50: /* Vcc 5V */
339 switch(configure->vpp) {
340 case 0:
341 pcr |= SET_VCC_VPP(2,0);
342 break;
343 case 50:
344 pcr |= SET_VCC_VPP(2,1);
345 break;
346 case 12:
347 pcr |= SET_VCC_VPP(2,2);
348 break;
349 case 33:
350 default:
351 pcr |= SET_VCC_VPP(0,0);
352 printk("%s: bad Vcc/Vpp (%d:%d)\n",
353 __func__,
354 configure->vcc,
355 configure->vpp);
356 break;
357 }
358 break;
359 case 33: /* Vcc 3.3V */
360 switch(configure->vpp) {
361 case 0:
362 pcr |= SET_VCC_VPP(1,0);
363 break;
364 case 12:
365 pcr |= SET_VCC_VPP(1,2);
366 break;
367 case 33:
368 pcr |= SET_VCC_VPP(1,1);
369 break;
370 case 50:
371 default:
372 pcr |= SET_VCC_VPP(0,0);
373 printk("%s: bad Vcc/Vpp (%d:%d)\n",
374 __func__,
375 configure->vcc,
376 configure->vpp);
377 break;
378 }
379 break;
380 default: /* what's this ? */
381 pcr |= SET_VCC_VPP(0,0);
382 printk(KERN_ERR "%s: bad Vcc %d\n",
383 __func__, configure->vcc);
384 break;
385 }
386
387 au_writew(pcr, PCMCIA_BOARD_REG);
388 au_sync_delay(300);
389
390 if (!configure->reset) {
391 pcr |= PC_DRV_EN;
392 au_writew(pcr, PCMCIA_BOARD_REG);
393 au_sync_delay(100);
394 pcr |= PC_DEASSERT_RST;
395 au_writew(pcr, PCMCIA_BOARD_REG);
396 au_sync_delay(100);
397 }
398 else {
399 pcr &= ~(PC_DEASSERT_RST | PC_DRV_EN);
400 au_writew(pcr, PCMCIA_BOARD_REG);
401 au_sync_delay(100);
402 }
403#endif
404 return 0; 287 return 0;
405} 288}
406 289
diff --git a/drivers/pcmcia/au1000_xxs1500.c b/drivers/pcmcia/au1000_xxs1500.c
deleted file mode 100644
index b43d47b50819..000000000000
--- a/drivers/pcmcia/au1000_xxs1500.c
+++ /dev/null
@@ -1,188 +0,0 @@
1/*
2 *
3 * MyCable board specific pcmcia routines.
4 *
5 * Copyright 2003 MontaVista Software Inc.
6 * Author: Pete Popov, MontaVista Software, Inc.
7 * ppopov@mvista.com or source@mvista.com
8 *
9 * ########################################################################
10 *
11 * This program is free software; you can distribute it and/or modify it
12 * under the terms of the GNU General Public License (Version 2) as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope it will be useful, but WITHOUT
16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 * for more details.
19 *
20 * You should have received a copy of the GNU General Public License along
21 * with this program; if not, write to the Free Software Foundation, Inc.,
22 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
23 *
24 * ########################################################################
25 *
26 *
27 */
28#include <linux/module.h>
29#include <linux/init.h>
30#include <linux/delay.h>
31#include <linux/ioport.h>
32#include <linux/kernel.h>
33#include <linux/timer.h>
34#include <linux/mm.h>
35#include <linux/proc_fs.h>
36#include <linux/types.h>
37
38#include <pcmcia/cs_types.h>
39#include <pcmcia/cs.h>
40#include <pcmcia/ss.h>
41#include <pcmcia/cistpl.h>
42#include <pcmcia/bus_ops.h>
43
44#include <asm/io.h>
45#include <asm/irq.h>
46#include <asm/system.h>
47
48#include <asm/au1000.h>
49#include <asm/au1000_pcmcia.h>
50
51#define PCMCIA_MAX_SOCK 0
52#define PCMCIA_NUM_SOCKS (PCMCIA_MAX_SOCK + 1)
53#define PCMCIA_IRQ AU1000_GPIO_4
54
55#if 0
56#define DEBUG(x, args...) printk(__func__ ": " x, ##args)
57#else
58#define DEBUG(x,args...)
59#endif
60
61static int xxs1500_pcmcia_init(struct pcmcia_init *init)
62{
63 return PCMCIA_NUM_SOCKS;
64}
65
66static int xxs1500_pcmcia_shutdown(void)
67{
68 /* turn off power */
69 au_writel(au_readl(GPIO2_PINSTATE) | (1<<14)|(1<<30),
70 GPIO2_OUTPUT);
71 au_sync_delay(100);
72
73 /* assert reset */
74 au_writel(au_readl(GPIO2_PINSTATE) | (1<<4)|(1<<20),
75 GPIO2_OUTPUT);
76 au_sync_delay(100);
77 return 0;
78}
79
80
81static int
82xxs1500_pcmcia_socket_state(unsigned sock, struct pcmcia_state *state)
83{
84 u32 inserted; u32 vs;
85 unsigned long gpio, gpio2;
86
87 if(sock > PCMCIA_MAX_SOCK) return -1;
88
89 gpio = au_readl(SYS_PINSTATERD);
90 gpio2 = au_readl(GPIO2_PINSTATE);
91
92 vs = gpio2 & ((1<<8) | (1<<9));
93 inserted = (!(gpio & 0x1) && !(gpio & 0x2));
94
95 state->ready = 0;
96 state->vs_Xv = 0;
97 state->vs_3v = 0;
98 state->detect = 0;
99
100 if (inserted) {
101 switch (vs) {
102 case 0:
103 case 1:
104 case 2:
105 state->vs_3v=1;
106 break;
107 case 3: /* 5V */
108 default:
109 /* return without setting 'detect' */
110 printk(KERN_ERR "au1x00_cs: unsupported VS\n",
111 vs);
112 return;
113 }
114 state->detect = 1;
115 }
116
117 if (state->detect) {
118 state->ready = 1;
119 }
120
121 state->bvd1= gpio2 & (1<<10);
122 state->bvd2 = gpio2 & (1<<11);
123 state->wrprot=0;
124 return 1;
125}
126
127
128static int xxs1500_pcmcia_get_irq_info(struct pcmcia_irq_info *info)
129{
130
131 if(info->sock > PCMCIA_MAX_SOCK) return -1;
132 info->irq = PCMCIA_IRQ;
133 return 0;
134}
135
136
137static int
138xxs1500_pcmcia_configure_socket(const struct pcmcia_configure *configure)
139{
140
141 if(configure->sock > PCMCIA_MAX_SOCK) return -1;
142
143 DEBUG("Vcc %dV Vpp %dV, reset %d\n",
144 configure->vcc, configure->vpp, configure->reset);
145
146 switch(configure->vcc){
147 case 33: /* Vcc 3.3V */
148 /* turn on power */
149 DEBUG("turn on power\n");
150 au_writel((au_readl(GPIO2_PINSTATE) & ~(1<<14))|(1<<30),
151 GPIO2_OUTPUT);
152 au_sync_delay(100);
153 break;
154 case 50: /* Vcc 5V */
155 default: /* what's this ? */
156 printk(KERN_ERR "au1x00_cs: unsupported VCC\n");
157 case 0: /* Vcc 0 */
158 /* turn off power */
159 au_sync_delay(100);
160 au_writel(au_readl(GPIO2_PINSTATE) | (1<<14)|(1<<30),
161 GPIO2_OUTPUT);
162 break;
163 }
164
165 if (!configure->reset) {
166 DEBUG("deassert reset\n");
167 au_writel((au_readl(GPIO2_PINSTATE) & ~(1<<4))|(1<<20),
168 GPIO2_OUTPUT);
169 au_sync_delay(100);
170 au_writel((au_readl(GPIO2_PINSTATE) & ~(1<<5))|(1<<21),
171 GPIO2_OUTPUT);
172 }
173 else {
174 DEBUG("assert reset\n");
175 au_writel(au_readl(GPIO2_PINSTATE) | (1<<4)|(1<<20),
176 GPIO2_OUTPUT);
177 }
178 au_sync_delay(100);
179 return 0;
180}
181
182struct pcmcia_low_level xxs1500_pcmcia_ops = {
183 xxs1500_pcmcia_init,
184 xxs1500_pcmcia_shutdown,
185 xxs1500_pcmcia_socket_state,
186 xxs1500_pcmcia_get_irq_info,
187 xxs1500_pcmcia_configure_socket
188};
diff --git a/drivers/pcmcia/db1xxx_ss.c b/drivers/pcmcia/db1xxx_ss.c
new file mode 100644
index 000000000000..3889cf07d6ce
--- /dev/null
+++ b/drivers/pcmcia/db1xxx_ss.c
@@ -0,0 +1,623 @@
1/*
2 * PCMCIA socket code for the Alchemy Db1xxx/Pb1xxx boards.
3 *
4 * Copyright (c) 2009 Manuel Lauss <manuel.lauss@gmail.com>
5 *
6 */
7
8/* This is a fairly generic PCMCIA socket driver suitable for the
9 * following Alchemy Development boards:
10 * Db1000, Db/Pb1500, Db/Pb1100, Db/Pb1550, Db/Pb1200.
11 *
12 * The Db1000 is used as a reference: Per-socket card-, carddetect- and
13 * statuschange IRQs connected to SoC GPIOs, control and status register
14 * bits arranged in per-socket groups in an external PLD. All boards
15 * listed here use this layout, including bit positions and meanings.
16 * Of course there are exceptions in later boards:
17 *
18 * - Pb1100/Pb1500: single socket only; voltage key bits VS are
19 * at STATUS[5:4] (instead of STATUS[1:0]).
20 * - Au1200-based: additional card-eject irqs, irqs not gpios!
21 */
22
23#include <linux/delay.h>
24#include <linux/gpio.h>
25#include <linux/interrupt.h>
26#include <linux/pm.h>
27#include <linux/platform_device.h>
28#include <linux/resource.h>
29#include <linux/spinlock.h>
30
31#include <pcmcia/cs_types.h>
32#include <pcmcia/ss.h>
33
34#include <asm/mach-au1x00/au1000.h>
35#include <asm/mach-db1x00/bcsr.h>
36
37#define MEM_MAP_SIZE 0x400000
38#define IO_MAP_SIZE 0x1000
39
40struct db1x_pcmcia_sock {
41 struct pcmcia_socket socket;
42 int nr; /* socket number */
43 void *virt_io;
44
45 /* the "pseudo" addresses of the PCMCIA space. */
46 phys_addr_t phys_io;
47 phys_addr_t phys_attr;
48 phys_addr_t phys_mem;
49
50 /* previous flags for set_socket() */
51 unsigned int old_flags;
52
53 /* interrupt sources: linux irq numbers! */
54 int insert_irq; /* default carddetect irq */
55 int stschg_irq; /* card-status-change irq */
56 int card_irq; /* card irq */
57 int eject_irq; /* db1200/pb1200 have these */
58
59#define BOARD_TYPE_DEFAULT 0 /* most boards */
60#define BOARD_TYPE_DB1200 1 /* IRQs aren't gpios */
61#define BOARD_TYPE_PB1100 2 /* VS bits slightly different */
62 int board_type;
63};
64
65#define to_db1x_socket(x) container_of(x, struct db1x_pcmcia_sock, socket)
66
67/* DB/PB1200: check CPLD SIGSTATUS register bit 10/12 */
68static int db1200_card_inserted(struct db1x_pcmcia_sock *sock)
69{
70 unsigned short sigstat;
71
72 sigstat = bcsr_read(BCSR_SIGSTAT);
73 return sigstat & 1 << (8 + 2 * sock->nr);
74}
75
76/* carddetect gpio: low-active */
77static int db1000_card_inserted(struct db1x_pcmcia_sock *sock)
78{
79 return !gpio_get_value(irq_to_gpio(sock->insert_irq));
80}
81
82static int db1x_card_inserted(struct db1x_pcmcia_sock *sock)
83{
84 switch (sock->board_type) {
85 case BOARD_TYPE_DB1200:
86 return db1200_card_inserted(sock);
87 default:
88 return db1000_card_inserted(sock);
89 }
90}
91
92/* STSCHG tends to bounce heavily when cards are inserted/ejected.
93 * To avoid this, the interrupt is normally disabled and only enabled
94 * after reset to a card has been de-asserted.
95 */
96static inline void set_stschg(struct db1x_pcmcia_sock *sock, int en)
97{
98 if (sock->stschg_irq != -1) {
99 if (en)
100 enable_irq(sock->stschg_irq);
101 else
102 disable_irq(sock->stschg_irq);
103 }
104}
105
106static irqreturn_t db1000_pcmcia_cdirq(int irq, void *data)
107{
108 struct db1x_pcmcia_sock *sock = data;
109
110 pcmcia_parse_events(&sock->socket, SS_DETECT);
111
112 return IRQ_HANDLED;
113}
114
115static irqreturn_t db1000_pcmcia_stschgirq(int irq, void *data)
116{
117 struct db1x_pcmcia_sock *sock = data;
118
119 pcmcia_parse_events(&sock->socket, SS_STSCHG);
120
121 return IRQ_HANDLED;
122}
123
124static irqreturn_t db1200_pcmcia_cdirq(int irq, void *data)
125{
126 struct db1x_pcmcia_sock *sock = data;
127
128 /* Db/Pb1200 have separate per-socket insertion and ejection
129 * interrupts which stay asserted as long as the card is
130 * inserted/missing. The one which caused us to be called
131 * needs to be disabled and the other one enabled.
132 */
133 if (irq == sock->insert_irq) {
134 disable_irq_nosync(sock->insert_irq);
135 enable_irq(sock->eject_irq);
136 } else {
137 disable_irq_nosync(sock->eject_irq);
138 enable_irq(sock->insert_irq);
139 }
140
141 pcmcia_parse_events(&sock->socket, SS_DETECT);
142
143 return IRQ_HANDLED;
144}
145
146static int db1x_pcmcia_setup_irqs(struct db1x_pcmcia_sock *sock)
147{
148 int ret;
149 unsigned long flags;
150
151 if (sock->stschg_irq != -1) {
152 ret = request_irq(sock->stschg_irq, db1000_pcmcia_stschgirq,
153 0, "pcmcia_stschg", sock);
154 if (ret)
155 return ret;
156 }
157
158 /* Db/Pb1200 have separate per-socket insertion and ejection
159 * interrupts, which should show edge behaviour but don't.
160 * So interrupts are disabled until both insertion and
161 * ejection handler have been registered and the currently
162 * active one disabled.
163 */
164 if (sock->board_type == BOARD_TYPE_DB1200) {
165 local_irq_save(flags);
166
167 ret = request_irq(sock->insert_irq, db1200_pcmcia_cdirq,
168 IRQF_DISABLED, "pcmcia_insert", sock);
169 if (ret)
170 goto out1;
171
172 ret = request_irq(sock->eject_irq, db1200_pcmcia_cdirq,
173 IRQF_DISABLED, "pcmcia_eject", sock);
174 if (ret) {
175 free_irq(sock->insert_irq, sock);
176 local_irq_restore(flags);
177 goto out1;
178 }
179
180 /* disable the currently active one */
181 if (db1200_card_inserted(sock))
182 disable_irq_nosync(sock->insert_irq);
183 else
184 disable_irq_nosync(sock->eject_irq);
185
186 local_irq_restore(flags);
187 } else {
188 /* all other (older) Db1x00 boards use a GPIO to show
189 * card detection status: use both-edge triggers.
190 */
191 set_irq_type(sock->insert_irq, IRQ_TYPE_EDGE_BOTH);
192 ret = request_irq(sock->insert_irq, db1000_pcmcia_cdirq,
193 0, "pcmcia_carddetect", sock);
194
195 if (ret)
196 goto out1;
197 }
198
199 return 0; /* all done */
200
201out1:
202 if (sock->stschg_irq != -1)
203 free_irq(sock->stschg_irq, sock);
204
205 return ret;
206}
207
208static void db1x_pcmcia_free_irqs(struct db1x_pcmcia_sock *sock)
209{
210 if (sock->stschg_irq != -1)
211 free_irq(sock->stschg_irq, sock);
212
213 free_irq(sock->insert_irq, sock);
214 if (sock->eject_irq != -1)
215 free_irq(sock->eject_irq, sock);
216}
217
218/*
219 * configure a PCMCIA socket on the Db1x00 series of boards (and
220 * compatibles).
221 *
222 * 2 external registers are involved:
223 * pcmcia_status (offset 0x04): bits [0:1/2:3]: read card voltage id
224 * pcmcia_control(offset 0x10):
225 * bits[0:1] set vcc for card
226 * bits[2:3] set vpp for card
227 * bit 4: enable data buffers
228 * bit 7: reset# for card
229 * add 8 for second socket.
230 */
231static int db1x_pcmcia_configure(struct pcmcia_socket *skt,
232 struct socket_state_t *state)
233{
234 struct db1x_pcmcia_sock *sock = to_db1x_socket(skt);
235 unsigned short cr_clr, cr_set;
236 unsigned int changed;
237 int v, p, ret;
238
239 /* card voltage setup */
240 cr_clr = (0xf << (sock->nr * 8)); /* clear voltage settings */
241 cr_set = 0;
242 v = p = ret = 0;
243
244 switch (state->Vcc) {
245 case 50:
246 ++v;
247 case 33:
248 ++v;
249 case 0:
250 break;
251 default:
252 printk(KERN_INFO "pcmcia%d unsupported Vcc %d\n",
253 sock->nr, state->Vcc);
254 }
255
256 switch (state->Vpp) {
257 case 12:
258 ++p;
259 case 33:
260 case 50:
261 ++p;
262 case 0:
263 break;
264 default:
265 printk(KERN_INFO "pcmcia%d unsupported Vpp %d\n",
266 sock->nr, state->Vpp);
267 }
268
269 /* sanity check: Vpp must be 0, 12, or Vcc */
270 if (((state->Vcc == 33) && (state->Vpp == 50)) ||
271 ((state->Vcc == 50) && (state->Vpp == 33))) {
272 printk(KERN_INFO "pcmcia%d bad Vcc/Vpp combo (%d %d)\n",
273 sock->nr, state->Vcc, state->Vpp);
274 v = p = 0;
275 ret = -EINVAL;
276 }
277
278 /* create new voltage code */
279 cr_set |= ((v << 2) | p) << (sock->nr * 8);
280
281 changed = state->flags ^ sock->old_flags;
282
283 if (changed & SS_RESET) {
284 if (state->flags & SS_RESET) {
285 set_stschg(sock, 0);
286 /* assert reset, disable io buffers */
287 cr_clr |= (1 << (7 + (sock->nr * 8)));
288 cr_clr |= (1 << (4 + (sock->nr * 8)));
289 } else {
290 /* de-assert reset, enable io buffers */
291 cr_set |= 1 << (7 + (sock->nr * 8));
292 cr_set |= 1 << (4 + (sock->nr * 8));
293 }
294 }
295
296 /* update PCMCIA configuration */
297 bcsr_mod(BCSR_PCMCIA, cr_clr, cr_set);
298
299 sock->old_flags = state->flags;
300
301 /* reset was taken away: give card time to initialize properly */
302 if ((changed & SS_RESET) && !(state->flags & SS_RESET)) {
303 msleep(500);
304 set_stschg(sock, 1);
305 }
306
307 return ret;
308}
309
310/* VCC bits at [3:2]/[11:10] */
311#define GET_VCC(cr, socknr) \
312 ((((cr) >> 2) >> ((socknr) * 8)) & 3)
313
314/* VS bits at [0:1]/[3:2] */
315#define GET_VS(sr, socknr) \
316 (((sr) >> (2 * (socknr))) & 3)
317
318/* reset bits at [7]/[15] */
319#define GET_RESET(cr, socknr) \
320 ((cr) & (1 << (7 + (8 * (socknr)))))
321
322static int db1x_pcmcia_get_status(struct pcmcia_socket *skt,
323 unsigned int *value)
324{
325 struct db1x_pcmcia_sock *sock = to_db1x_socket(skt);
326 unsigned short cr, sr;
327 unsigned int status;
328
329 status = db1x_card_inserted(sock) ? SS_DETECT : 0;
330
331 cr = bcsr_read(BCSR_PCMCIA);
332 sr = bcsr_read(BCSR_STATUS);
333
334 /* PB1100/PB1500: voltage key bits are at [5:4] */
335 if (sock->board_type == BOARD_TYPE_PB1100)
336 sr >>= 4;
337
338 /* determine card type */
339 switch (GET_VS(sr, sock->nr)) {
340 case 0:
341 case 2:
342 status |= SS_3VCARD; /* 3V card */
343 case 3:
344 break; /* 5V card: set nothing */
345 default:
346 status |= SS_XVCARD; /* treated as unsupported in core */
347 }
348
349 /* if Vcc is not zero, we have applied power to a card */
350 status |= GET_VCC(cr, sock->nr) ? SS_POWERON : 0;
351
352 /* reset de-asserted? then we're ready */
353 status |= (GET_RESET(cr, sock->nr)) ? SS_READY : SS_RESET;
354
355 *value = status;
356
357 return 0;
358}
359
360static int db1x_pcmcia_sock_init(struct pcmcia_socket *skt)
361{
362 return 0;
363}
364
365static int db1x_pcmcia_sock_suspend(struct pcmcia_socket *skt)
366{
367 return 0;
368}
369
370static int au1x00_pcmcia_set_io_map(struct pcmcia_socket *skt,
371 struct pccard_io_map *map)
372{
373 struct db1x_pcmcia_sock *sock = to_db1x_socket(skt);
374
375 map->start = (u32)sock->virt_io;
376 map->stop = map->start + IO_MAP_SIZE;
377
378 return 0;
379}
380
381static int au1x00_pcmcia_set_mem_map(struct pcmcia_socket *skt,
382 struct pccard_mem_map *map)
383{
384 struct db1x_pcmcia_sock *sock = to_db1x_socket(skt);
385
386 if (map->flags & MAP_ATTRIB)
387 map->static_start = sock->phys_attr + map->card_start;
388 else
389 map->static_start = sock->phys_mem + map->card_start;
390
391 return 0;
392}
393
394static struct pccard_operations db1x_pcmcia_operations = {
395 .init = db1x_pcmcia_sock_init,
396 .suspend = db1x_pcmcia_sock_suspend,
397 .get_status = db1x_pcmcia_get_status,
398 .set_socket = db1x_pcmcia_configure,
399 .set_io_map = au1x00_pcmcia_set_io_map,
400 .set_mem_map = au1x00_pcmcia_set_mem_map,
401};
402
403static int __devinit db1x_pcmcia_socket_probe(struct platform_device *pdev)
404{
405 struct db1x_pcmcia_sock *sock;
406 struct resource *r;
407 int ret, bid;
408
409 sock = kzalloc(sizeof(struct db1x_pcmcia_sock), GFP_KERNEL);
410 if (!sock)
411 return -ENOMEM;
412
413 sock->nr = pdev->id;
414
415 bid = BCSR_WHOAMI_BOARD(bcsr_read(BCSR_WHOAMI));
416 switch (bid) {
417 case BCSR_WHOAMI_PB1500:
418 case BCSR_WHOAMI_PB1500R2:
419 case BCSR_WHOAMI_PB1100:
420 sock->board_type = BOARD_TYPE_PB1100;
421 break;
422 case BCSR_WHOAMI_DB1000 ... BCSR_WHOAMI_PB1550_SDR:
423 sock->board_type = BOARD_TYPE_DEFAULT;
424 break;
425 case BCSR_WHOAMI_PB1200 ... BCSR_WHOAMI_DB1200:
426 sock->board_type = BOARD_TYPE_DB1200;
427 break;
428 default:
429 printk(KERN_INFO "db1xxx-ss: unknown board %d!\n", bid);
430 ret = -ENODEV;
431 goto out0;
432 };
433
434 /*
435 * gather resources necessary and optional nice-to-haves to
436 * operate a socket:
437 * This includes IRQs for Carddetection/ejection, the card
438 * itself and optional status change detection.
439 * Also, the memory areas covered by a socket. For these
440 * we require the 32bit "pseudo" addresses (see the au1000.h
441 * header for more information).
442 */
443
444 /* card: irq assigned to the card itself. */
445 r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "card");
446 sock->card_irq = r ? r->start : 0;
447
448 /* insert: irq which triggers on card insertion/ejection */
449 r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "insert");
450 sock->insert_irq = r ? r->start : -1;
451
452 /* stschg: irq which trigger on card status change (optional) */
453 r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "stschg");
454 sock->stschg_irq = r ? r->start : -1;
455
456 /* eject: irq which triggers on ejection (DB1200/PB1200 only) */
457 r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "eject");
458 sock->eject_irq = r ? r->start : -1;
459
460 ret = -ENODEV;
461
462 /*
463 * pseudo-attr: The 32bit address of the PCMCIA attribute space
464 * for this socket (usually the 36bit address shifted 4 to the
465 * right).
466 */
467 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcmcia-attr");
468 if (!r) {
469 printk(KERN_ERR "pcmcia%d has no 'pseudo-attr' resource!\n",
470 sock->nr);
471 goto out0;
472 }
473 sock->phys_attr = r->start;
474
475 /*
476 * pseudo-mem: The 32bit address of the PCMCIA memory space for
477 * this socket (usually the 36bit address shifted 4 to the right)
478 */
479 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcmcia-mem");
480 if (!r) {
481 printk(KERN_ERR "pcmcia%d has no 'pseudo-mem' resource!\n",
482 sock->nr);
483 goto out0;
484 }
485 sock->phys_mem = r->start;
486
487 /*
488 * pseudo-io: The 32bit address of the PCMCIA IO space for this
489 * socket (usually the 36bit address shifted 4 to the right).
490 */
491 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcmcia-io");
492 if (!r) {
493 printk(KERN_ERR "pcmcia%d has no 'pseudo-io' resource!\n",
494 sock->nr);
495 goto out0;
496 }
497 sock->phys_io = r->start;
498
499 /*
500 * PCMCIA client drivers use the inb/outb macros to access
501 * the IO registers. Since mips_io_port_base is added
502 * to the access address of the mips implementation of
503 * inb/outb, we need to subtract it here because we want
504 * to access the I/O or MEM address directly, without
505 * going through this "mips_io_port_base" mechanism.
506 */
507 sock->virt_io = (void *)(ioremap(sock->phys_io, IO_MAP_SIZE) -
508 mips_io_port_base);
509
510 if (!sock->virt_io) {
511 printk(KERN_ERR "pcmcia%d: cannot remap IO area\n",
512 sock->nr);
513 ret = -ENOMEM;
514 goto out0;
515 }
516
517 sock->socket.ops = &db1x_pcmcia_operations;
518 sock->socket.owner = THIS_MODULE;
519 sock->socket.pci_irq = sock->card_irq;
520 sock->socket.features = SS_CAP_STATIC_MAP | SS_CAP_PCCARD;
521 sock->socket.map_size = MEM_MAP_SIZE;
522 sock->socket.io_offset = (unsigned long)sock->virt_io;
523 sock->socket.dev.parent = &pdev->dev;
524 sock->socket.resource_ops = &pccard_static_ops;
525
526 platform_set_drvdata(pdev, sock);
527
528 ret = db1x_pcmcia_setup_irqs(sock);
529 if (ret) {
530 printk(KERN_ERR "pcmcia%d cannot setup interrupts\n",
531 sock->nr);
532 goto out1;
533 }
534
535 set_stschg(sock, 0);
536
537 ret = pcmcia_register_socket(&sock->socket);
538 if (ret) {
539 printk(KERN_ERR "pcmcia%d failed to register\n", sock->nr);
540 goto out2;
541 }
542
543 printk(KERN_INFO "Alchemy Db/Pb1xxx pcmcia%d @ io/attr/mem %09llx"
544 "(%p) %09llx %09llx card/insert/stschg/eject irqs @ %d "
545 "%d %d %d\n", sock->nr, sock->phys_io, sock->virt_io,
546 sock->phys_attr, sock->phys_mem, sock->card_irq,
547 sock->insert_irq, sock->stschg_irq, sock->eject_irq);
548
549 return 0;
550
551out2:
552 db1x_pcmcia_free_irqs(sock);
553out1:
554 iounmap((void *)(sock->virt_io + (u32)mips_io_port_base));
555out0:
556 kfree(sock);
557 return ret;
558}
559
560static int __devexit db1x_pcmcia_socket_remove(struct platform_device *pdev)
561{
562 struct db1x_pcmcia_sock *sock = platform_get_drvdata(pdev);
563
564 db1x_pcmcia_free_irqs(sock);
565 pcmcia_unregister_socket(&sock->socket);
566 iounmap((void *)(sock->virt_io + (u32)mips_io_port_base));
567 kfree(sock);
568
569 return 0;
570}
571
572#ifdef CONFIG_PM
573static int db1x_pcmcia_suspend(struct device *dev)
574{
575 return pcmcia_socket_dev_suspend(dev);
576}
577
578static int db1x_pcmcia_resume(struct device *dev)
579{
580 return pcmcia_socket_dev_resume(dev);
581}
582
583static struct dev_pm_ops db1x_pcmcia_pmops = {
584 .resume = db1x_pcmcia_resume,
585 .suspend = db1x_pcmcia_suspend,
586 .thaw = db1x_pcmcia_resume,
587 .freeze = db1x_pcmcia_suspend,
588};
589
590#define DB1XXX_SS_PMOPS &db1x_pcmcia_pmops
591
592#else
593
594#define DB1XXX_SS_PMOPS NULL
595
596#endif
597
598static struct platform_driver db1x_pcmcia_socket_driver = {
599 .driver = {
600 .name = "db1xxx_pcmcia",
601 .owner = THIS_MODULE,
602 .pm = DB1XXX_SS_PMOPS
603 },
604 .probe = db1x_pcmcia_socket_probe,
605 .remove = __devexit_p(db1x_pcmcia_socket_remove),
606};
607
608int __init db1x_pcmcia_socket_load(void)
609{
610 return platform_driver_register(&db1x_pcmcia_socket_driver);
611}
612
613void __exit db1x_pcmcia_socket_unload(void)
614{
615 platform_driver_unregister(&db1x_pcmcia_socket_driver);
616}
617
618module_init(db1x_pcmcia_socket_load);
619module_exit(db1x_pcmcia_socket_unload);
620
621MODULE_LICENSE("GPL");
622MODULE_DESCRIPTION("PCMCIA Socket Services for Alchemy Db/Pb1x00 boards");
623MODULE_AUTHOR("Manuel Lauss");
diff --git a/drivers/pcmcia/xxs1500_ss.c b/drivers/pcmcia/xxs1500_ss.c
new file mode 100644
index 000000000000..61560cd6e287
--- /dev/null
+++ b/drivers/pcmcia/xxs1500_ss.c
@@ -0,0 +1,350 @@
1/*
2 * PCMCIA socket code for the MyCable XXS1500 system.
3 *
4 * Copyright (c) 2009 Manuel Lauss <manuel.lauss@gmail.com>
5 *
6 */
7
8#include <linux/delay.h>
9#include <linux/gpio.h>
10#include <linux/interrupt.h>
11#include <linux/io.h>
12#include <linux/ioport.h>
13#include <linux/mm.h>
14#include <linux/platform_device.h>
15#include <linux/pm.h>
16#include <linux/resource.h>
17#include <linux/spinlock.h>
18
19#include <pcmcia/cs_types.h>
20#include <pcmcia/cs.h>
21#include <pcmcia/ss.h>
22#include <pcmcia/cistpl.h>
23
24#include <asm/irq.h>
25#include <asm/system.h>
26#include <asm/mach-au1x00/au1000.h>
27
28#define MEM_MAP_SIZE 0x400000
29#define IO_MAP_SIZE 0x1000
30
31
32/*
33 * 3.3V cards only; all interfacing is done via gpios:
34 *
35 * 0/1: carddetect (00 = card present, xx = huh)
36 * 4: card irq
37 * 204: reset (high-act)
38 * 205: buffer enable (low-act)
39 * 208/209: card voltage key (00,01,10,11)
40 * 210: battwarn
41 * 211: batdead
42 * 214: power (low-act)
43 */
44#define GPIO_CDA 0
45#define GPIO_CDB 1
46#define GPIO_CARDIRQ 4
47#define GPIO_RESET 204
48#define GPIO_OUTEN 205
49#define GPIO_VSL 208
50#define GPIO_VSH 209
51#define GPIO_BATTDEAD 210
52#define GPIO_BATTWARN 211
53#define GPIO_POWER 214
54
55struct xxs1500_pcmcia_sock {
56 struct pcmcia_socket socket;
57 void *virt_io;
58
59 phys_addr_t phys_io;
60 phys_addr_t phys_attr;
61 phys_addr_t phys_mem;
62
63 /* previous flags for set_socket() */
64 unsigned int old_flags;
65};
66
67#define to_xxs_socket(x) container_of(x, struct xxs1500_pcmcia_sock, socket)
68
69static irqreturn_t cdirq(int irq, void *data)
70{
71 struct xxs1500_pcmcia_sock *sock = data;
72
73 pcmcia_parse_events(&sock->socket, SS_DETECT);
74
75 return IRQ_HANDLED;
76}
77
78static int xxs1500_pcmcia_configure(struct pcmcia_socket *skt,
79 struct socket_state_t *state)
80{
81 struct xxs1500_pcmcia_sock *sock = to_xxs_socket(skt);
82 unsigned int changed;
83
84 /* power control */
85 switch (state->Vcc) {
86 case 0:
87 gpio_set_value(GPIO_POWER, 1); /* power off */
88 break;
89 case 33:
90 gpio_set_value(GPIO_POWER, 0); /* power on */
91 break;
92 case 50:
93 default:
94 return -EINVAL;
95 }
96
97 changed = state->flags ^ sock->old_flags;
98
99 if (changed & SS_RESET) {
100 if (state->flags & SS_RESET) {
101 gpio_set_value(GPIO_RESET, 1); /* assert reset */
102 gpio_set_value(GPIO_OUTEN, 1); /* buffers off */
103 } else {
104 gpio_set_value(GPIO_RESET, 0); /* deassert reset */
105 gpio_set_value(GPIO_OUTEN, 0); /* buffers on */
106 msleep(500);
107 }
108 }
109
110 sock->old_flags = state->flags;
111
112 return 0;
113}
114
115static int xxs1500_pcmcia_get_status(struct pcmcia_socket *skt,
116 unsigned int *value)
117{
118 unsigned int status;
119 int i;
120
121 status = 0;
122
123 /* check carddetects: GPIO[0:1] must both be low */
124 if (!gpio_get_value(GPIO_CDA) && !gpio_get_value(GPIO_CDB))
125 status |= SS_DETECT;
126
127 /* determine card voltage: GPIO[208:209] binary value */
128 i = (!!gpio_get_value(GPIO_VSL)) | ((!!gpio_get_value(GPIO_VSH)) << 1);
129
130 switch (i) {
131 case 0:
132 case 1:
133 case 2:
134 status |= SS_3VCARD; /* 3V card */
135 break;
136 case 3: /* 5V card, unsupported */
137 default:
138 status |= SS_XVCARD; /* treated as unsupported in core */
139 }
140
141 /* GPIO214: low active power switch */
142 status |= gpio_get_value(GPIO_POWER) ? 0 : SS_POWERON;
143
144 /* GPIO204: high-active reset line */
145 status |= gpio_get_value(GPIO_RESET) ? SS_RESET : SS_READY;
146
147 /* other stuff */
148 status |= gpio_get_value(GPIO_BATTDEAD) ? 0 : SS_BATDEAD;
149 status |= gpio_get_value(GPIO_BATTWARN) ? 0 : SS_BATWARN;
150
151 *value = status;
152
153 return 0;
154}
155
156static int xxs1500_pcmcia_sock_init(struct pcmcia_socket *skt)
157{
158 gpio_direction_input(GPIO_CDA);
159 gpio_direction_input(GPIO_CDB);
160 gpio_direction_input(GPIO_VSL);
161 gpio_direction_input(GPIO_VSH);
162 gpio_direction_input(GPIO_BATTDEAD);
163 gpio_direction_input(GPIO_BATTWARN);
164 gpio_direction_output(GPIO_RESET, 1); /* assert reset */
165 gpio_direction_output(GPIO_OUTEN, 1); /* disable buffers */
166 gpio_direction_output(GPIO_POWER, 1); /* power off */
167
168 return 0;
169}
170
171static int xxs1500_pcmcia_sock_suspend(struct pcmcia_socket *skt)
172{
173 return 0;
174}
175
176static int au1x00_pcmcia_set_io_map(struct pcmcia_socket *skt,
177 struct pccard_io_map *map)
178{
179 struct xxs1500_pcmcia_sock *sock = to_xxs_socket(skt);
180
181 map->start = (u32)sock->virt_io;
182 map->stop = map->start + IO_MAP_SIZE;
183
184 return 0;
185}
186
187static int au1x00_pcmcia_set_mem_map(struct pcmcia_socket *skt,
188 struct pccard_mem_map *map)
189{
190 struct xxs1500_pcmcia_sock *sock = to_xxs_socket(skt);
191
192 if (map->flags & MAP_ATTRIB)
193 map->static_start = sock->phys_attr + map->card_start;
194 else
195 map->static_start = sock->phys_mem + map->card_start;
196
197 return 0;
198}
199
200static struct pccard_operations xxs1500_pcmcia_operations = {
201 .init = xxs1500_pcmcia_sock_init,
202 .suspend = xxs1500_pcmcia_sock_suspend,
203 .get_status = xxs1500_pcmcia_get_status,
204 .set_socket = xxs1500_pcmcia_configure,
205 .set_io_map = au1x00_pcmcia_set_io_map,
206 .set_mem_map = au1x00_pcmcia_set_mem_map,
207};
208
209static int __devinit xxs1500_pcmcia_probe(struct platform_device *pdev)
210{
211 struct xxs1500_pcmcia_sock *sock;
212 struct resource *r;
213 int ret, irq;
214
215 sock = kzalloc(sizeof(struct xxs1500_pcmcia_sock), GFP_KERNEL);
216 if (!sock)
217 return -ENOMEM;
218
219 ret = -ENODEV;
220
221 /*
222 * pseudo-attr: The 32bit address of the PCMCIA attribute space
223 * for this socket (usually the 36bit address shifted 4 to the
224 * right).
225 */
226 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcmcia-attr");
227 if (!r) {
228 dev_err(&pdev->dev, "missing 'pcmcia-attr' resource!\n");
229 goto out0;
230 }
231 sock->phys_attr = r->start;
232
233 /*
234 * pseudo-mem: The 32bit address of the PCMCIA memory space for
235 * this socket (usually the 36bit address shifted 4 to the right)
236 */
237 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcmcia-mem");
238 if (!r) {
239 dev_err(&pdev->dev, "missing 'pcmcia-mem' resource!\n");
240 goto out0;
241 }
242 sock->phys_mem = r->start;
243
244 /*
245 * pseudo-io: The 32bit address of the PCMCIA IO space for this
246 * socket (usually the 36bit address shifted 4 to the right).
247 */
248 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcmcia-io");
249 if (!r) {
250 dev_err(&pdev->dev, "missing 'pcmcia-io' resource!\n");
251 goto out0;
252 }
253 sock->phys_io = r->start;
254
255
256 /*
257 * PCMCIA client drivers use the inb/outb macros to access
258 * the IO registers. Since mips_io_port_base is added
259 * to the access address of the mips implementation of
260 * inb/outb, we need to subtract it here because we want
261 * to access the I/O or MEM address directly, without
262 * going through this "mips_io_port_base" mechanism.
263 */
264 sock->virt_io = (void *)(ioremap(sock->phys_io, IO_MAP_SIZE) -
265 mips_io_port_base);
266
267 if (!sock->virt_io) {
268 dev_err(&pdev->dev, "cannot remap IO area\n");
269 ret = -ENOMEM;
270 goto out0;
271 }
272
273 sock->socket.ops = &xxs1500_pcmcia_operations;
274 sock->socket.owner = THIS_MODULE;
275 sock->socket.pci_irq = gpio_to_irq(GPIO_CARDIRQ);
276 sock->socket.features = SS_CAP_STATIC_MAP | SS_CAP_PCCARD;
277 sock->socket.map_size = MEM_MAP_SIZE;
278 sock->socket.io_offset = (unsigned long)sock->virt_io;
279 sock->socket.dev.parent = &pdev->dev;
280 sock->socket.resource_ops = &pccard_static_ops;
281
282 platform_set_drvdata(pdev, sock);
283
284 /* setup carddetect irq: use one of the 2 GPIOs as an
285 * edge detector.
286 */
287 irq = gpio_to_irq(GPIO_CDA);
288 set_irq_type(irq, IRQ_TYPE_EDGE_BOTH);
289 ret = request_irq(irq, cdirq, 0, "pcmcia_carddetect", sock);
290 if (ret) {
291 dev_err(&pdev->dev, "cannot setup cd irq\n");
292 goto out1;
293 }
294
295 ret = pcmcia_register_socket(&sock->socket);
296 if (ret) {
297 dev_err(&pdev->dev, "failed to register\n");
298 goto out2;
299 }
300
301 printk(KERN_INFO "MyCable XXS1500 PCMCIA socket services\n");
302
303 return 0;
304
305out2:
306 free_irq(gpio_to_irq(GPIO_CDA), sock);
307out1:
308 iounmap((void *)(sock->virt_io + (u32)mips_io_port_base));
309out0:
310 kfree(sock);
311 return ret;
312}
313
314static int __devexit xxs1500_pcmcia_remove(struct platform_device *pdev)
315{
316 struct xxs1500_pcmcia_sock *sock = platform_get_drvdata(pdev);
317
318 pcmcia_unregister_socket(&sock->socket);
319 free_irq(gpio_to_irq(GPIO_CDA), sock);
320 iounmap((void *)(sock->virt_io + (u32)mips_io_port_base));
321 kfree(sock);
322
323 return 0;
324}
325
326static struct platform_driver xxs1500_pcmcia_socket_driver = {
327 .driver = {
328 .name = "xxs1500_pcmcia",
329 .owner = THIS_MODULE,
330 },
331 .probe = xxs1500_pcmcia_probe,
332 .remove = __devexit_p(xxs1500_pcmcia_remove),
333};
334
335int __init xxs1500_pcmcia_socket_load(void)
336{
337 return platform_driver_register(&xxs1500_pcmcia_socket_driver);
338}
339
340void __exit xxs1500_pcmcia_socket_unload(void)
341{
342 platform_driver_unregister(&xxs1500_pcmcia_socket_driver);
343}
344
345module_init(xxs1500_pcmcia_socket_load);
346module_exit(xxs1500_pcmcia_socket_unload);
347
348MODULE_LICENSE("GPL");
349MODULE_DESCRIPTION("PCMCIA Socket Services for MyCable XXS1500 systems");
350MODULE_AUTHOR("Manuel Lauss");
diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c
index e9b15c3746fa..a81ff7bc5fa1 100644
--- a/drivers/serial/8250.c
+++ b/drivers/serial/8250.c
@@ -1217,12 +1217,6 @@ static void autoconfig(struct uart_8250_port *up, unsigned int probeflags)
1217 } 1217 }
1218#endif 1218#endif
1219 1219
1220#ifdef CONFIG_SERIAL_8250_AU1X00
1221 /* if access method is AU, it is a 16550 with a quirk */
1222 if (up->port.type == PORT_16550A && up->port.iotype == UPIO_AU)
1223 up->bugs |= UART_BUG_NOMSR;
1224#endif
1225
1226 serial_outp(up, UART_LCR, save_lcr); 1220 serial_outp(up, UART_LCR, save_lcr);
1227 1221
1228 if (up->capabilities != uart_config[up->port.type].flags) { 1222 if (up->capabilities != uart_config[up->port.type].flags) {
@@ -2428,7 +2422,7 @@ serial8250_pm(struct uart_port *port, unsigned int state,
2428static unsigned int serial8250_port_size(struct uart_8250_port *pt) 2422static unsigned int serial8250_port_size(struct uart_8250_port *pt)
2429{ 2423{
2430 if (pt->port.iotype == UPIO_AU) 2424 if (pt->port.iotype == UPIO_AU)
2431 return 0x100000; 2425 return 0x1000;
2432#ifdef CONFIG_ARCH_OMAP 2426#ifdef CONFIG_ARCH_OMAP
2433 if (is_omap_port(pt)) 2427 if (is_omap_port(pt))
2434 return 0x16 << pt->port.regshift; 2428 return 0x16 << pt->port.regshift;
@@ -2585,6 +2579,13 @@ static void serial8250_config_port(struct uart_port *port, int flags)
2585 2579
2586 if (flags & UART_CONFIG_TYPE) 2580 if (flags & UART_CONFIG_TYPE)
2587 autoconfig(up, probeflags); 2581 autoconfig(up, probeflags);
2582
2583#ifdef CONFIG_SERIAL_8250_AU1X00
2584 /* if access method is AU, it is a 16550 with a quirk */
2585 if (up->port.type == PORT_16550A && up->port.iotype == UPIO_AU)
2586 up->bugs |= UART_BUG_NOMSR;
2587#endif
2588
2588 if (up->port.type != PORT_UNKNOWN && flags & UART_CONFIG_IRQ) 2589 if (up->port.type != PORT_UNKNOWN && flags & UART_CONFIG_IRQ)
2589 autoconfig_irq(up); 2590 autoconfig_irq(up);
2590 2591
diff --git a/drivers/spi/au1550_spi.c b/drivers/spi/au1550_spi.c
index cfd5ff9508fa..ba8ac4f599d3 100644
--- a/drivers/spi/au1550_spi.c
+++ b/drivers/spi/au1550_spi.c
@@ -412,11 +412,13 @@ static int au1550_spi_dma_txrxb(struct spi_device *spi, struct spi_transfer *t)
412 } 412 }
413 413
414 /* put buffers on the ring */ 414 /* put buffers on the ring */
415 res = au1xxx_dbdma_put_dest(hw->dma_rx_ch, hw->rx, t->len); 415 res = au1xxx_dbdma_put_dest(hw->dma_rx_ch, virt_to_phys(hw->rx),
416 t->len, DDMA_FLAGS_IE);
416 if (!res) 417 if (!res)
417 dev_err(hw->dev, "rx dma put dest error\n"); 418 dev_err(hw->dev, "rx dma put dest error\n");
418 419
419 res = au1xxx_dbdma_put_source(hw->dma_tx_ch, (void *)hw->tx, t->len); 420 res = au1xxx_dbdma_put_source(hw->dma_tx_ch, virt_to_phys(hw->tx),
421 t->len, DDMA_FLAGS_IE);
420 if (!res) 422 if (!res)
421 dev_err(hw->dev, "tx dma put source error\n"); 423 dev_err(hw->dev, "tx dma put source error\n");
422 424
diff --git a/drivers/staging/octeon/Makefile b/drivers/staging/octeon/Makefile
index c0a583cc2227..87447c102fa0 100644
--- a/drivers/staging/octeon/Makefile
+++ b/drivers/staging/octeon/Makefile
@@ -14,7 +14,6 @@ obj-${CONFIG_OCTEON_ETHERNET} := octeon-ethernet.o
14octeon-ethernet-objs := ethernet.o 14octeon-ethernet-objs := ethernet.o
15octeon-ethernet-objs += ethernet-mdio.o 15octeon-ethernet-objs += ethernet-mdio.o
16octeon-ethernet-objs += ethernet-mem.o 16octeon-ethernet-objs += ethernet-mem.o
17octeon-ethernet-objs += ethernet-proc.o
18octeon-ethernet-objs += ethernet-rgmii.o 17octeon-ethernet-objs += ethernet-rgmii.o
19octeon-ethernet-objs += ethernet-rx.o 18octeon-ethernet-objs += ethernet-rx.o
20octeon-ethernet-objs += ethernet-sgmii.o 19octeon-ethernet-objs += ethernet-sgmii.o
diff --git a/drivers/staging/octeon/ethernet-defines.h b/drivers/staging/octeon/ethernet-defines.h
index f13131b03c33..6a2cd50a17df 100644
--- a/drivers/staging/octeon/ethernet-defines.h
+++ b/drivers/staging/octeon/ethernet-defines.h
@@ -41,17 +41,10 @@
41 * Tells the driver to populate the packet buffers with kernel skbuffs. 41 * Tells the driver to populate the packet buffers with kernel skbuffs.
42 * This allows the driver to receive packets without copying them. It also 42 * This allows the driver to receive packets without copying them. It also
43 * means that 32bit userspace can't access the packet buffers. 43 * means that 32bit userspace can't access the packet buffers.
44 * USE_32BIT_SHARED
45 * This define tells the driver to allocate memory for buffers from the
46 * 32bit sahred region instead of the kernel memory space.
47 * USE_HW_TCPUDP_CHECKSUM 44 * USE_HW_TCPUDP_CHECKSUM
48 * Controls if the Octeon TCP/UDP checksum engine is used for packet 45 * Controls if the Octeon TCP/UDP checksum engine is used for packet
49 * output. If this is zero, the kernel will perform the checksum in 46 * output. If this is zero, the kernel will perform the checksum in
50 * software. 47 * software.
51 * USE_MULTICORE_RECEIVE
52 * Process receive interrupts on multiple cores. This spreads the network
53 * load across the first 8 processors. If ths is zero, only one core
54 * processes incomming packets.
55 * USE_ASYNC_IOBDMA 48 * USE_ASYNC_IOBDMA
56 * Use asynchronous IO access to hardware. This uses Octeon's asynchronous 49 * Use asynchronous IO access to hardware. This uses Octeon's asynchronous
57 * IOBDMAs to issue IO accesses without stalling. Set this to zero 50 * IOBDMAs to issue IO accesses without stalling. Set this to zero
@@ -75,29 +68,15 @@
75#define CONFIG_CAVIUM_RESERVE32 0 68#define CONFIG_CAVIUM_RESERVE32 0
76#endif 69#endif
77 70
78#if CONFIG_CAVIUM_RESERVE32
79#define USE_32BIT_SHARED 1
80#define USE_SKBUFFS_IN_HW 0
81#define REUSE_SKBUFFS_WITHOUT_FREE 0
82#else
83#define USE_32BIT_SHARED 0
84#define USE_SKBUFFS_IN_HW 1 71#define USE_SKBUFFS_IN_HW 1
85#ifdef CONFIG_NETFILTER 72#ifdef CONFIG_NETFILTER
86#define REUSE_SKBUFFS_WITHOUT_FREE 0 73#define REUSE_SKBUFFS_WITHOUT_FREE 0
87#else 74#else
88#define REUSE_SKBUFFS_WITHOUT_FREE 1 75#define REUSE_SKBUFFS_WITHOUT_FREE 1
89#endif 76#endif
90#endif
91
92/* Max interrupts per second per core */
93#define INTERRUPT_LIMIT 10000
94 77
95/* Don't limit the number of interrupts */
96/*#define INTERRUPT_LIMIT 0 */
97#define USE_HW_TCPUDP_CHECKSUM 1 78#define USE_HW_TCPUDP_CHECKSUM 1
98 79
99#define USE_MULTICORE_RECEIVE 1
100
101/* Enable Random Early Dropping under load */ 80/* Enable Random Early Dropping under load */
102#define USE_RED 1 81#define USE_RED 1
103#define USE_ASYNC_IOBDMA (CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0) 82#define USE_ASYNC_IOBDMA (CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0)
@@ -115,21 +94,12 @@
115/* Use this to not have FPA frees control L2 */ 94/* Use this to not have FPA frees control L2 */
116/*#define DONT_WRITEBACK(x) 0 */ 95/*#define DONT_WRITEBACK(x) 0 */
117 96
118/* Maximum number of packets to process per interrupt. */
119#define MAX_RX_PACKETS 120
120/* Maximum number of SKBs to try to free per xmit packet. */ 97/* Maximum number of SKBs to try to free per xmit packet. */
121#define MAX_SKB_TO_FREE 10
122#define MAX_OUT_QUEUE_DEPTH 1000 98#define MAX_OUT_QUEUE_DEPTH 1000
123 99
124#ifndef CONFIG_SMP 100#define FAU_TOTAL_TX_TO_CLEAN (CVMX_FAU_REG_END - sizeof(uint32_t))
125#undef USE_MULTICORE_RECEIVE 101#define FAU_NUM_PACKET_BUFFERS_TO_FREE (FAU_TOTAL_TX_TO_CLEAN - sizeof(uint32_t))
126#define USE_MULTICORE_RECEIVE 0
127#endif
128
129#define IP_PROTOCOL_TCP 6
130#define IP_PROTOCOL_UDP 0x11
131 102
132#define FAU_NUM_PACKET_BUFFERS_TO_FREE (CVMX_FAU_REG_END - sizeof(uint32_t))
133#define TOTAL_NUMBER_OF_PORTS (CVMX_PIP_NUM_INPUT_PORTS+1) 103#define TOTAL_NUMBER_OF_PORTS (CVMX_PIP_NUM_INPUT_PORTS+1)
134 104
135 105
diff --git a/drivers/staging/octeon/ethernet-mdio.c b/drivers/staging/octeon/ethernet-mdio.c
index 05a5cc0f43ed..7e0be8d00dc3 100644
--- a/drivers/staging/octeon/ethernet-mdio.c
+++ b/drivers/staging/octeon/ethernet-mdio.c
@@ -96,11 +96,11 @@ const struct ethtool_ops cvm_oct_ethtool_ops = {
96}; 96};
97 97
98/** 98/**
99 * IOCTL support for PHY control 99 * cvm_oct_ioctl - IOCTL support for PHY control
100 *
101 * @dev: Device to change 100 * @dev: Device to change
102 * @rq: the request 101 * @rq: the request
103 * @cmd: the command 102 * @cmd: the command
103 *
104 * Returns Zero on success 104 * Returns Zero on success
105 */ 105 */
106int cvm_oct_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 106int cvm_oct_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
@@ -153,7 +153,7 @@ static void cvm_oct_adjust_link(struct net_device *dev)
153 153
154 154
155/** 155/**
156 * Setup the PHY 156 * cvm_oct_phy_setup_device - setup the PHY
157 * 157 *
158 * @dev: Device to setup 158 * @dev: Device to setup
159 * 159 *
diff --git a/drivers/staging/octeon/ethernet-mdio.h b/drivers/staging/octeon/ethernet-mdio.h
index 55d0614a7cd9..a417d4fce12c 100644
--- a/drivers/staging/octeon/ethernet-mdio.h
+++ b/drivers/staging/octeon/ethernet-mdio.h
@@ -32,7 +32,6 @@
32#include <linux/ip.h> 32#include <linux/ip.h>
33#include <linux/string.h> 33#include <linux/string.h>
34#include <linux/ethtool.h> 34#include <linux/ethtool.h>
35#include <linux/mii.h>
36#include <linux/seq_file.h> 35#include <linux/seq_file.h>
37#include <linux/proc_fs.h> 36#include <linux/proc_fs.h>
38#include <net/dst.h> 37#include <net/dst.h>
diff --git a/drivers/staging/octeon/ethernet-mem.c b/drivers/staging/octeon/ethernet-mem.c
index b595903e2af1..00cc91df6b46 100644
--- a/drivers/staging/octeon/ethernet-mem.c
+++ b/drivers/staging/octeon/ethernet-mem.c
@@ -4,7 +4,7 @@
4 * Contact: support@caviumnetworks.com 4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK 5 * This file is part of the OCTEON SDK
6 * 6 *
7 * Copyright (c) 2003-2007 Cavium Networks 7 * Copyright (c) 2003-2010 Cavium Networks
8 * 8 *
9 * This file is free software; you can redistribute it and/or modify 9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as 10 * it under the terms of the GNU General Public License, Version 2, as
@@ -26,8 +26,6 @@
26**********************************************************************/ 26**********************************************************************/
27#include <linux/kernel.h> 27#include <linux/kernel.h>
28#include <linux/netdevice.h> 28#include <linux/netdevice.h>
29#include <linux/mii.h>
30#include <net/dst.h>
31 29
32#include <asm/octeon/octeon.h> 30#include <asm/octeon/octeon.h>
33 31
@@ -36,18 +34,19 @@
36#include "cvmx-fpa.h" 34#include "cvmx-fpa.h"
37 35
38/** 36/**
39 * Fill the supplied hardware pool with skbuffs 37 * cvm_oct_fill_hw_skbuff - fill the supplied hardware pool with skbuffs
40 *
41 * @pool: Pool to allocate an skbuff for 38 * @pool: Pool to allocate an skbuff for
42 * @size: Size of the buffer needed for the pool 39 * @size: Size of the buffer needed for the pool
43 * @elements: Number of buffers to allocate 40 * @elements: Number of buffers to allocate
41 *
42 * Returns the actual number of buffers allocated.
44 */ 43 */
45static int cvm_oct_fill_hw_skbuff(int pool, int size, int elements) 44static int cvm_oct_fill_hw_skbuff(int pool, int size, int elements)
46{ 45{
47 int freed = elements; 46 int freed = elements;
48 while (freed) { 47 while (freed) {
49 48
50 struct sk_buff *skb = dev_alloc_skb(size + 128); 49 struct sk_buff *skb = dev_alloc_skb(size + 256);
51 if (unlikely(skb == NULL)) { 50 if (unlikely(skb == NULL)) {
52 pr_warning 51 pr_warning
53 ("Failed to allocate skb for hardware pool %d\n", 52 ("Failed to allocate skb for hardware pool %d\n",
@@ -55,7 +54,7 @@ static int cvm_oct_fill_hw_skbuff(int pool, int size, int elements)
55 break; 54 break;
56 } 55 }
57 56
58 skb_reserve(skb, 128 - (((unsigned long)skb->data) & 0x7f)); 57 skb_reserve(skb, 256 - (((unsigned long)skb->data) & 0x7f));
59 *(struct sk_buff **)(skb->data - sizeof(void *)) = skb; 58 *(struct sk_buff **)(skb->data - sizeof(void *)) = skb;
60 cvmx_fpa_free(skb->data, pool, DONT_WRITEBACK(size / 128)); 59 cvmx_fpa_free(skb->data, pool, DONT_WRITEBACK(size / 128));
61 freed--; 60 freed--;
@@ -64,8 +63,7 @@ static int cvm_oct_fill_hw_skbuff(int pool, int size, int elements)
64} 63}
65 64
66/** 65/**
67 * Free the supplied hardware pool of skbuffs 66 * cvm_oct_free_hw_skbuff- free hardware pool skbuffs
68 *
69 * @pool: Pool to allocate an skbuff for 67 * @pool: Pool to allocate an skbuff for
70 * @size: Size of the buffer needed for the pool 68 * @size: Size of the buffer needed for the pool
71 * @elements: Number of buffers to allocate 69 * @elements: Number of buffers to allocate
@@ -93,96 +91,76 @@ static void cvm_oct_free_hw_skbuff(int pool, int size, int elements)
93} 91}
94 92
95/** 93/**
96 * This function fills a hardware pool with memory. Depending 94 * cvm_oct_fill_hw_memory - fill a hardware pool with memory.
97 * on the config defines, this memory might come from the
98 * kernel or global 32bit memory allocated with
99 * cvmx_bootmem_alloc.
100 *
101 * @pool: Pool to populate 95 * @pool: Pool to populate
102 * @size: Size of each buffer in the pool 96 * @size: Size of each buffer in the pool
103 * @elements: Number of buffers to allocate 97 * @elements: Number of buffers to allocate
98 *
99 * Returns the actual number of buffers allocated.
104 */ 100 */
105static int cvm_oct_fill_hw_memory(int pool, int size, int elements) 101static int cvm_oct_fill_hw_memory(int pool, int size, int elements)
106{ 102{
107 char *memory; 103 char *memory;
104 char *fpa;
108 int freed = elements; 105 int freed = elements;
109 106
110 if (USE_32BIT_SHARED) { 107 while (freed) {
111 extern uint64_t octeon_reserve32_memory; 108 /*
112 109 * FPA memory must be 128 byte aligned. Since we are
113 memory = 110 * aligning we need to save the original pointer so we
114 cvmx_bootmem_alloc_range(elements * size, 128, 111 * can feed it to kfree when the memory is returned to
115 octeon_reserve32_memory, 112 * the kernel.
116 octeon_reserve32_memory + 113 *
117 (CONFIG_CAVIUM_RESERVE32 << 20) - 114 * We allocate an extra 256 bytes to allow for
118 1); 115 * alignment and space for the original pointer saved
119 if (memory == NULL) 116 * just before the block.
120 panic("Unable to allocate %u bytes for FPA pool %d\n", 117 */
121 elements * size, pool); 118 memory = kmalloc(size + 256, GFP_ATOMIC);
122 119 if (unlikely(memory == NULL)) {
123 pr_notice("Memory range %p - %p reserved for " 120 pr_warning("Unable to allocate %u bytes for FPA pool %d\n",
124 "hardware\n", memory, 121 elements * size, pool);
125 memory + elements * size - 1); 122 break;
126
127 while (freed) {
128 cvmx_fpa_free(memory, pool, 0);
129 memory += size;
130 freed--;
131 }
132 } else {
133 while (freed) {
134 /* We need to force alignment to 128 bytes here */
135 memory = kmalloc(size + 127, GFP_ATOMIC);
136 if (unlikely(memory == NULL)) {
137 pr_warning("Unable to allocate %u bytes for "
138 "FPA pool %d\n",
139 elements * size, pool);
140 break;
141 }
142 memory = (char *)(((unsigned long)memory + 127) & -128);
143 cvmx_fpa_free(memory, pool, 0);
144 freed--;
145 } 123 }
124 fpa = (char *)(((unsigned long)memory + 256) & ~0x7fUL);
125 *((char **)fpa - 1) = memory;
126 cvmx_fpa_free(fpa, pool, 0);
127 freed--;
146 } 128 }
147 return elements - freed; 129 return elements - freed;
148} 130}
149 131
150/** 132/**
151 * Free memory previously allocated with cvm_oct_fill_hw_memory 133 * cvm_oct_free_hw_memory - Free memory allocated by cvm_oct_fill_hw_memory
152 *
153 * @pool: FPA pool to free 134 * @pool: FPA pool to free
154 * @size: Size of each buffer in the pool 135 * @size: Size of each buffer in the pool
155 * @elements: Number of buffers that should be in the pool 136 * @elements: Number of buffers that should be in the pool
156 */ 137 */
157static void cvm_oct_free_hw_memory(int pool, int size, int elements) 138static void cvm_oct_free_hw_memory(int pool, int size, int elements)
158{ 139{
159 if (USE_32BIT_SHARED) { 140 char *memory;
160 pr_warning("Warning: 32 shared memory is not freeable\n"); 141 char *fpa;
161 } else { 142 do {
162 char *memory; 143 fpa = cvmx_fpa_alloc(pool);
163 do { 144 if (fpa) {
164 memory = cvmx_fpa_alloc(pool); 145 elements--;
165 if (memory) { 146 fpa = (char *)phys_to_virt(cvmx_ptr_to_phys(fpa));
166 elements--; 147 memory = *((char **)fpa - 1);
167 kfree(phys_to_virt(cvmx_ptr_to_phys(memory))); 148 kfree(memory);
168 } 149 }
169 } while (memory); 150 } while (fpa);
170 151
171 if (elements < 0) 152 if (elements < 0)
172 pr_warning("Freeing of pool %u had too many " 153 pr_warning("Freeing of pool %u had too many buffers (%d)\n",
173 "buffers (%d)\n", 154 pool, elements);
174 pool, elements); 155 else if (elements > 0)
175 else if (elements > 0) 156 pr_warning("Warning: Freeing of pool %u is missing %d buffers\n",
176 pr_warning("Warning: Freeing of pool %u is " 157 pool, elements);
177 "missing %d buffers\n",
178 pool, elements);
179 }
180} 158}
181 159
182int cvm_oct_mem_fill_fpa(int pool, int size, int elements) 160int cvm_oct_mem_fill_fpa(int pool, int size, int elements)
183{ 161{
184 int freed; 162 int freed;
185 if (USE_SKBUFFS_IN_HW) 163 if (USE_SKBUFFS_IN_HW && pool == CVMX_FPA_PACKET_POOL)
186 freed = cvm_oct_fill_hw_skbuff(pool, size, elements); 164 freed = cvm_oct_fill_hw_skbuff(pool, size, elements);
187 else 165 else
188 freed = cvm_oct_fill_hw_memory(pool, size, elements); 166 freed = cvm_oct_fill_hw_memory(pool, size, elements);
@@ -191,7 +169,7 @@ int cvm_oct_mem_fill_fpa(int pool, int size, int elements)
191 169
192void cvm_oct_mem_empty_fpa(int pool, int size, int elements) 170void cvm_oct_mem_empty_fpa(int pool, int size, int elements)
193{ 171{
194 if (USE_SKBUFFS_IN_HW) 172 if (USE_SKBUFFS_IN_HW && pool == CVMX_FPA_PACKET_POOL)
195 cvm_oct_free_hw_skbuff(pool, size, elements); 173 cvm_oct_free_hw_skbuff(pool, size, elements);
196 else 174 else
197 cvm_oct_free_hw_memory(pool, size, elements); 175 cvm_oct_free_hw_memory(pool, size, elements);
diff --git a/drivers/staging/octeon/ethernet-proc.c b/drivers/staging/octeon/ethernet-proc.c
deleted file mode 100644
index 16308d484d3b..000000000000
--- a/drivers/staging/octeon/ethernet-proc.c
+++ /dev/null
@@ -1,144 +0,0 @@
1/**********************************************************************
2 * Author: Cavium Networks
3 *
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
6 *
7 * Copyright (c) 2003-2007 Cavium Networks
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
23 *
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26**********************************************************************/
27#include <linux/kernel.h>
28#include <linux/seq_file.h>
29#include <linux/proc_fs.h>
30#include <net/dst.h>
31
32#include <asm/octeon/octeon.h>
33
34#include "octeon-ethernet.h"
35#include "ethernet-defines.h"
36
37#include "cvmx-helper.h"
38#include "cvmx-pip.h"
39
40/**
41 * User is reading /proc/octeon_ethernet_stats
42 *
43 * @m:
44 * @v:
45 * Returns
46 */
47static int cvm_oct_stats_show(struct seq_file *m, void *v)
48{
49 struct octeon_ethernet *priv;
50 int port;
51
52 for (port = 0; port < TOTAL_NUMBER_OF_PORTS; port++) {
53
54 if (cvm_oct_device[port]) {
55 priv = netdev_priv(cvm_oct_device[port]);
56
57 seq_printf(m, "\nOcteon Port %d (%s)\n", port,
58 cvm_oct_device[port]->name);
59 seq_printf(m,
60 "rx_packets: %12lu\t"
61 "tx_packets: %12lu\n",
62 priv->stats.rx_packets,
63 priv->stats.tx_packets);
64 seq_printf(m,
65 "rx_bytes: %12lu\t"
66 "tx_bytes: %12lu\n",
67 priv->stats.rx_bytes, priv->stats.tx_bytes);
68 seq_printf(m,
69 "rx_errors: %12lu\t"
70 "tx_errors: %12lu\n",
71 priv->stats.rx_errors,
72 priv->stats.tx_errors);
73 seq_printf(m,
74 "rx_dropped: %12lu\t"
75 "tx_dropped: %12lu\n",
76 priv->stats.rx_dropped,
77 priv->stats.tx_dropped);
78 seq_printf(m,
79 "rx_length_errors: %12lu\t"
80 "tx_aborted_errors: %12lu\n",
81 priv->stats.rx_length_errors,
82 priv->stats.tx_aborted_errors);
83 seq_printf(m,
84 "rx_over_errors: %12lu\t"
85 "tx_carrier_errors: %12lu\n",
86 priv->stats.rx_over_errors,
87 priv->stats.tx_carrier_errors);
88 seq_printf(m,
89 "rx_crc_errors: %12lu\t"
90 "tx_fifo_errors: %12lu\n",
91 priv->stats.rx_crc_errors,
92 priv->stats.tx_fifo_errors);
93 seq_printf(m,
94 "rx_frame_errors: %12lu\t"
95 "tx_heartbeat_errors: %12lu\n",
96 priv->stats.rx_frame_errors,
97 priv->stats.tx_heartbeat_errors);
98 seq_printf(m,
99 "rx_fifo_errors: %12lu\t"
100 "tx_window_errors: %12lu\n",
101 priv->stats.rx_fifo_errors,
102 priv->stats.tx_window_errors);
103 seq_printf(m,
104 "rx_missed_errors: %12lu\t"
105 "multicast: %12lu\n",
106 priv->stats.rx_missed_errors,
107 priv->stats.multicast);
108 }
109 }
110
111 return 0;
112}
113
114/**
115 * /proc/octeon_ethernet_stats was openned. Use the single_open iterator
116 *
117 * @inode:
118 * @file:
119 * Returns
120 */
121static int cvm_oct_stats_open(struct inode *inode, struct file *file)
122{
123 return single_open(file, cvm_oct_stats_show, NULL);
124}
125
126static const struct file_operations cvm_oct_stats_operations = {
127 .open = cvm_oct_stats_open,
128 .read = seq_read,
129 .llseek = seq_lseek,
130 .release = single_release,
131};
132
133void cvm_oct_proc_initialize(void)
134{
135 struct proc_dir_entry *entry =
136 create_proc_entry("octeon_ethernet_stats", 0, NULL);
137 if (entry)
138 entry->proc_fops = &cvm_oct_stats_operations;
139}
140
141void cvm_oct_proc_shutdown(void)
142{
143 remove_proc_entry("octeon_ethernet_stats", NULL);
144}
diff --git a/drivers/staging/octeon/ethernet-proc.h b/drivers/staging/octeon/ethernet-proc.h
deleted file mode 100644
index 82c7d9f78bc4..000000000000
--- a/drivers/staging/octeon/ethernet-proc.h
+++ /dev/null
@@ -1,29 +0,0 @@
1/*********************************************************************
2 * Author: Cavium Networks
3 *
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
6 *
7 * Copyright (c) 2003-2007 Cavium Networks
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
23 *
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26*********************************************************************/
27
28void cvm_oct_proc_initialize(void);
29void cvm_oct_proc_shutdown(void);
diff --git a/drivers/staging/octeon/ethernet-rgmii.c b/drivers/staging/octeon/ethernet-rgmii.c
index 3820f1ec11d1..a0d4d4b98bdc 100644
--- a/drivers/staging/octeon/ethernet-rgmii.c
+++ b/drivers/staging/octeon/ethernet-rgmii.c
@@ -26,7 +26,7 @@
26**********************************************************************/ 26**********************************************************************/
27#include <linux/kernel.h> 27#include <linux/kernel.h>
28#include <linux/netdevice.h> 28#include <linux/netdevice.h>
29#include <linux/mii.h> 29#include <linux/phy.h>
30#include <net/dst.h> 30#include <net/dst.h>
31 31
32#include <asm/octeon/octeon.h> 32#include <asm/octeon/octeon.h>
@@ -48,14 +48,20 @@ static int number_rgmii_ports;
48static void cvm_oct_rgmii_poll(struct net_device *dev) 48static void cvm_oct_rgmii_poll(struct net_device *dev)
49{ 49{
50 struct octeon_ethernet *priv = netdev_priv(dev); 50 struct octeon_ethernet *priv = netdev_priv(dev);
51 unsigned long flags; 51 unsigned long flags = 0;
52 cvmx_helper_link_info_t link_info; 52 cvmx_helper_link_info_t link_info;
53 int use_global_register_lock = (priv->phydev == NULL);
53 54
54 /* 55 BUG_ON(in_interrupt());
55 * Take the global register lock since we are going to touch 56 if (use_global_register_lock) {
56 * registers that affect more than one port. 57 /*
57 */ 58 * Take the global register lock since we are going to
58 spin_lock_irqsave(&global_register_lock, flags); 59 * touch registers that affect more than one port.
60 */
61 spin_lock_irqsave(&global_register_lock, flags);
62 } else {
63 mutex_lock(&priv->phydev->bus->mdio_lock);
64 }
59 65
60 link_info = cvmx_helper_link_get(priv->port); 66 link_info = cvmx_helper_link_get(priv->port);
61 if (link_info.u64 == priv->link_info) { 67 if (link_info.u64 == priv->link_info) {
@@ -115,7 +121,11 @@ static void cvm_oct_rgmii_poll(struct net_device *dev)
115 dev->name); 121 dev->name);
116 } 122 }
117 } 123 }
118 spin_unlock_irqrestore(&global_register_lock, flags); 124
125 if (use_global_register_lock)
126 spin_unlock_irqrestore(&global_register_lock, flags);
127 else
128 mutex_unlock(&priv->phydev->bus->mdio_lock);
119 return; 129 return;
120 } 130 }
121 131
@@ -151,7 +161,12 @@ static void cvm_oct_rgmii_poll(struct net_device *dev)
151 link_info = cvmx_helper_link_autoconf(priv->port); 161 link_info = cvmx_helper_link_autoconf(priv->port);
152 priv->link_info = link_info.u64; 162 priv->link_info = link_info.u64;
153 } 163 }
154 spin_unlock_irqrestore(&global_register_lock, flags); 164
165 if (use_global_register_lock)
166 spin_unlock_irqrestore(&global_register_lock, flags);
167 else {
168 mutex_unlock(&priv->phydev->bus->mdio_lock);
169 }
155 170
156 if (priv->phydev == NULL) { 171 if (priv->phydev == NULL) {
157 /* Tell core. */ 172 /* Tell core. */
@@ -213,8 +228,11 @@ static irqreturn_t cvm_oct_rgmii_rml_interrupt(int cpl, void *dev_id)
213 struct net_device *dev = 228 struct net_device *dev =
214 cvm_oct_device[cvmx_helper_get_ipd_port 229 cvm_oct_device[cvmx_helper_get_ipd_port
215 (interface, index)]; 230 (interface, index)];
216 if (dev) 231 struct octeon_ethernet *priv = netdev_priv(dev);
217 cvm_oct_rgmii_poll(dev); 232
233 if (dev && !atomic_read(&cvm_oct_poll_queue_stopping))
234 queue_work(cvm_oct_poll_queue, &priv->port_work);
235
218 gmx_rx_int_reg.u64 = 0; 236 gmx_rx_int_reg.u64 = 0;
219 gmx_rx_int_reg.s.phy_dupx = 1; 237 gmx_rx_int_reg.s.phy_dupx = 1;
220 gmx_rx_int_reg.s.phy_link = 1; 238 gmx_rx_int_reg.s.phy_link = 1;
@@ -252,8 +270,11 @@ static irqreturn_t cvm_oct_rgmii_rml_interrupt(int cpl, void *dev_id)
252 struct net_device *dev = 270 struct net_device *dev =
253 cvm_oct_device[cvmx_helper_get_ipd_port 271 cvm_oct_device[cvmx_helper_get_ipd_port
254 (interface, index)]; 272 (interface, index)];
255 if (dev) 273 struct octeon_ethernet *priv = netdev_priv(dev);
256 cvm_oct_rgmii_poll(dev); 274
275 if (dev && !atomic_read(&cvm_oct_poll_queue_stopping))
276 queue_work(cvm_oct_poll_queue, &priv->port_work);
277
257 gmx_rx_int_reg.u64 = 0; 278 gmx_rx_int_reg.u64 = 0;
258 gmx_rx_int_reg.s.phy_dupx = 1; 279 gmx_rx_int_reg.s.phy_dupx = 1;
259 gmx_rx_int_reg.s.phy_link = 1; 280 gmx_rx_int_reg.s.phy_link = 1;
@@ -302,6 +323,12 @@ int cvm_oct_rgmii_stop(struct net_device *dev)
302 return 0; 323 return 0;
303} 324}
304 325
326static void cvm_oct_rgmii_immediate_poll(struct work_struct *work)
327{
328 struct octeon_ethernet *priv = container_of(work, struct octeon_ethernet, port_work);
329 cvm_oct_rgmii_poll(cvm_oct_device[priv->port]);
330}
331
305int cvm_oct_rgmii_init(struct net_device *dev) 332int cvm_oct_rgmii_init(struct net_device *dev)
306{ 333{
307 struct octeon_ethernet *priv = netdev_priv(dev); 334 struct octeon_ethernet *priv = netdev_priv(dev);
@@ -309,7 +336,7 @@ int cvm_oct_rgmii_init(struct net_device *dev)
309 336
310 cvm_oct_common_init(dev); 337 cvm_oct_common_init(dev);
311 dev->netdev_ops->ndo_stop(dev); 338 dev->netdev_ops->ndo_stop(dev);
312 339 INIT_WORK(&priv->port_work, cvm_oct_rgmii_immediate_poll);
313 /* 340 /*
314 * Due to GMX errata in CN3XXX series chips, it is necessary 341 * Due to GMX errata in CN3XXX series chips, it is necessary
315 * to take the link down immediately when the PHY changes 342 * to take the link down immediately when the PHY changes
@@ -397,4 +424,5 @@ void cvm_oct_rgmii_uninit(struct net_device *dev)
397 number_rgmii_ports--; 424 number_rgmii_ports--;
398 if (number_rgmii_ports == 0) 425 if (number_rgmii_ports == 0)
399 free_irq(OCTEON_IRQ_RML, &number_rgmii_ports); 426 free_irq(OCTEON_IRQ_RML, &number_rgmii_ports);
427 cancel_work_sync(&priv->port_work);
400} 428}
diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
index 1b237b7e689d..cb38f9eb2cc0 100644
--- a/drivers/staging/octeon/ethernet-rx.c
+++ b/drivers/staging/octeon/ethernet-rx.c
@@ -4,7 +4,7 @@
4 * Contact: support@caviumnetworks.com 4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK 5 * This file is part of the OCTEON SDK
6 * 6 *
7 * Copyright (c) 2003-2007 Cavium Networks 7 * Copyright (c) 2003-2010 Cavium Networks
8 * 8 *
9 * This file is free software; you can redistribute it and/or modify 9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as 10 * it under the terms of the GNU General Public License, Version 2, as
@@ -27,16 +27,14 @@
27#include <linux/module.h> 27#include <linux/module.h>
28#include <linux/kernel.h> 28#include <linux/kernel.h>
29#include <linux/cache.h> 29#include <linux/cache.h>
30#include <linux/cpumask.h>
30#include <linux/netdevice.h> 31#include <linux/netdevice.h>
31#include <linux/init.h> 32#include <linux/init.h>
32#include <linux/etherdevice.h> 33#include <linux/etherdevice.h>
33#include <linux/ip.h> 34#include <linux/ip.h>
34#include <linux/string.h> 35#include <linux/string.h>
35#include <linux/prefetch.h> 36#include <linux/prefetch.h>
36#include <linux/ethtool.h> 37#include <linux/smp.h>
37#include <linux/mii.h>
38#include <linux/seq_file.h>
39#include <linux/proc_fs.h>
40#include <net/dst.h> 38#include <net/dst.h>
41#ifdef CONFIG_XFRM 39#ifdef CONFIG_XFRM
42#include <linux/xfrm.h> 40#include <linux/xfrm.h>
@@ -48,8 +46,9 @@
48#include <asm/octeon/octeon.h> 46#include <asm/octeon/octeon.h>
49 47
50#include "ethernet-defines.h" 48#include "ethernet-defines.h"
51#include "octeon-ethernet.h"
52#include "ethernet-mem.h" 49#include "ethernet-mem.h"
50#include "ethernet-rx.h"
51#include "octeon-ethernet.h"
53#include "ethernet-util.h" 52#include "ethernet-util.h"
54 53
55#include "cvmx-helper.h" 54#include "cvmx-helper.h"
@@ -61,62 +60,88 @@
61 60
62#include "cvmx-gmxx-defs.h" 61#include "cvmx-gmxx-defs.h"
63 62
64struct cvm_tasklet_wrapper { 63struct cvm_napi_wrapper {
65 struct tasklet_struct t; 64 struct napi_struct napi;
66}; 65} ____cacheline_aligned_in_smp;
67 66
68/* 67static struct cvm_napi_wrapper cvm_oct_napi[NR_CPUS] __cacheline_aligned_in_smp;
69 * Aligning the tasklet_struct on cachline boundries seems to decrease
70 * throughput even though in theory it would reduce contantion on the
71 * cache lines containing the locks.
72 */
73 68
74static struct cvm_tasklet_wrapper cvm_oct_tasklet[NR_CPUS]; 69struct cvm_oct_core_state {
70 int baseline_cores;
71 /*
72 * The number of additional cores that could be processing
73 * input packtes.
74 */
75 atomic_t available_cores;
76 cpumask_t cpu_state;
77} ____cacheline_aligned_in_smp;
75 78
76/** 79static struct cvm_oct_core_state core_state __cacheline_aligned_in_smp;
77 * Interrupt handler. The interrupt occurs whenever the POW 80
78 * transitions from 0->1 packets in our group. 81static void cvm_oct_enable_napi(void *_)
79 *
80 * @cpl:
81 * @dev_id:
82 * @regs:
83 * Returns
84 */
85irqreturn_t cvm_oct_do_interrupt(int cpl, void *dev_id)
86{ 82{
87 /* Acknowledge the interrupt */ 83 int cpu = smp_processor_id();
88 if (INTERRUPT_LIMIT) 84 napi_schedule(&cvm_oct_napi[cpu].napi);
89 cvmx_write_csr(CVMX_POW_WQ_INT, 1 << pow_receive_group); 85}
90 else 86
91 cvmx_write_csr(CVMX_POW_WQ_INT, 0x10001 << pow_receive_group); 87static void cvm_oct_enable_one_cpu(void)
92 preempt_disable(); 88{
93 tasklet_schedule(&cvm_oct_tasklet[smp_processor_id()].t); 89 int v;
94 preempt_enable(); 90 int cpu;
95 return IRQ_HANDLED; 91
92 /* Check to see if more CPUs are available for receive processing... */
93 v = atomic_sub_if_positive(1, &core_state.available_cores);
94 if (v < 0)
95 return;
96
97 /* ... if a CPU is available, Turn on NAPI polling for that CPU. */
98 for_each_online_cpu(cpu) {
99 if (!cpu_test_and_set(cpu, core_state.cpu_state)) {
100 v = smp_call_function_single(cpu, cvm_oct_enable_napi,
101 NULL, 0);
102 if (v)
103 panic("Can't enable NAPI.");
104 break;
105 }
106 }
107}
108
109static void cvm_oct_no_more_work(void)
110{
111 int cpu = smp_processor_id();
112
113 /*
114 * CPU zero is special. It always has the irq enabled when
115 * waiting for incoming packets.
116 */
117 if (cpu == 0) {
118 enable_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group);
119 return;
120 }
121
122 cpu_clear(cpu, core_state.cpu_state);
123 atomic_add(1, &core_state.available_cores);
96} 124}
97 125
98#ifdef CONFIG_NET_POLL_CONTROLLER
99/** 126/**
100 * This is called when the kernel needs to manually poll the 127 * cvm_oct_do_interrupt - interrupt handler.
101 * device. For Octeon, this is simply calling the interrupt 128 *
102 * handler. We actually poll all the devices, not just the 129 * The interrupt occurs whenever the POW has packets in our group.
103 * one supplied.
104 * 130 *
105 * @dev: Device to poll. Unused
106 */ 131 */
107void cvm_oct_poll_controller(struct net_device *dev) 132static irqreturn_t cvm_oct_do_interrupt(int cpl, void *dev_id)
108{ 133{
109 preempt_disable(); 134 /* Disable the IRQ and start napi_poll. */
110 tasklet_schedule(&cvm_oct_tasklet[smp_processor_id()].t); 135 disable_irq_nosync(OCTEON_IRQ_WORKQ0 + pow_receive_group);
111 preempt_enable(); 136 cvm_oct_enable_napi(NULL);
137
138 return IRQ_HANDLED;
112} 139}
113#endif
114 140
115/** 141/**
116 * This is called on receive errors, and determines if the packet 142 * cvm_oct_check_rcv_error - process receive errors
117 * can be dropped early-on in cvm_oct_tasklet_rx().
118 *
119 * @work: Work queue entry pointing to the packet. 143 * @work: Work queue entry pointing to the packet.
144 *
120 * Returns Non-zero if the packet can be dropped, zero otherwise. 145 * Returns Non-zero if the packet can be dropped, zero otherwise.
121 */ 146 */
122static inline int cvm_oct_check_rcv_error(cvmx_wqe_t *work) 147static inline int cvm_oct_check_rcv_error(cvmx_wqe_t *work)
@@ -199,19 +224,20 @@ static inline int cvm_oct_check_rcv_error(cvmx_wqe_t *work)
199} 224}
200 225
201/** 226/**
202 * Tasklet function that is scheduled on a core when an interrupt occurs. 227 * cvm_oct_napi_poll - the NAPI poll function.
228 * @napi: The NAPI instance, or null if called from cvm_oct_poll_controller
229 * @budget: Maximum number of packets to receive.
203 * 230 *
204 * @unused: 231 * Returns the number of packets processed.
205 */ 232 */
206void cvm_oct_tasklet_rx(unsigned long unused) 233static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
207{ 234{
208 const int coreid = cvmx_get_core_num(); 235 const int coreid = cvmx_get_core_num();
209 uint64_t old_group_mask; 236 uint64_t old_group_mask;
210 uint64_t old_scratch; 237 uint64_t old_scratch;
211 int rx_count = 0; 238 int rx_count = 0;
212 int number_to_free; 239 int did_work_request = 0;
213 int num_freed; 240 int packet_not_copied;
214 int packet_not_copied;
215 241
216 /* Prefetch cvm_oct_device since we know we need it soon */ 242 /* Prefetch cvm_oct_device since we know we need it soon */
217 prefetch(cvm_oct_device); 243 prefetch(cvm_oct_device);
@@ -227,59 +253,63 @@ void cvm_oct_tasklet_rx(unsigned long unused)
227 cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid), 253 cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid),
228 (old_group_mask & ~0xFFFFull) | 1 << pow_receive_group); 254 (old_group_mask & ~0xFFFFull) | 1 << pow_receive_group);
229 255
230 if (USE_ASYNC_IOBDMA) 256 if (USE_ASYNC_IOBDMA) {
231 cvmx_pow_work_request_async(CVMX_SCR_SCRATCH, CVMX_POW_NO_WAIT); 257 cvmx_pow_work_request_async(CVMX_SCR_SCRATCH, CVMX_POW_NO_WAIT);
258 did_work_request = 1;
259 }
232 260
233 while (1) { 261 while (rx_count < budget) {
234 struct sk_buff *skb = NULL; 262 struct sk_buff *skb = NULL;
263 struct sk_buff **pskb = NULL;
235 int skb_in_hw; 264 int skb_in_hw;
236 cvmx_wqe_t *work; 265 cvmx_wqe_t *work;
237 266
238 if (USE_ASYNC_IOBDMA) { 267 if (USE_ASYNC_IOBDMA && did_work_request)
239 work = cvmx_pow_work_response_async(CVMX_SCR_SCRATCH); 268 work = cvmx_pow_work_response_async(CVMX_SCR_SCRATCH);
240 } else { 269 else
241 if ((INTERRUPT_LIMIT == 0) 270 work = cvmx_pow_work_request_sync(CVMX_POW_NO_WAIT);
242 || likely(rx_count < MAX_RX_PACKETS)) 271
243 work =
244 cvmx_pow_work_request_sync
245 (CVMX_POW_NO_WAIT);
246 else
247 work = NULL;
248 }
249 prefetch(work); 272 prefetch(work);
250 if (work == NULL) 273 did_work_request = 0;
274 if (work == NULL) {
275 union cvmx_pow_wq_int wq_int;
276 wq_int.u64 = 0;
277 wq_int.s.iq_dis = 1 << pow_receive_group;
278 wq_int.s.wq_int = 1 << pow_receive_group;
279 cvmx_write_csr(CVMX_POW_WQ_INT, wq_int.u64);
251 break; 280 break;
281 }
282 pskb = (struct sk_buff **)(cvm_oct_get_buffer_ptr(work->packet_ptr) - sizeof(void *));
283 prefetch(pskb);
252 284
253 /* 285 if (USE_ASYNC_IOBDMA && rx_count < (budget - 1)) {
254 * Limit each core to processing MAX_RX_PACKETS 286 cvmx_pow_work_request_async_nocheck(CVMX_SCR_SCRATCH, CVMX_POW_NO_WAIT);
255 * packets without a break. This way the RX can't 287 did_work_request = 1;
256 * starve the TX task. 288 }
257 */ 289
258 if (USE_ASYNC_IOBDMA) { 290 if (rx_count == 0) {
259 291 /*
260 if ((INTERRUPT_LIMIT == 0) 292 * First time through, see if there is enough
261 || likely(rx_count < MAX_RX_PACKETS)) 293 * work waiting to merit waking another
262 cvmx_pow_work_request_async_nocheck 294 * CPU.
263 (CVMX_SCR_SCRATCH, CVMX_POW_NO_WAIT); 295 */
264 else { 296 union cvmx_pow_wq_int_cntx counts;
265 cvmx_scratch_write64(CVMX_SCR_SCRATCH, 297 int backlog;
266 0x8000000000000000ull); 298 int cores_in_use = core_state.baseline_cores - atomic_read(&core_state.available_cores);
267 cvmx_pow_tag_sw_null_nocheck(); 299 counts.u64 = cvmx_read_csr(CVMX_POW_WQ_INT_CNTX(pow_receive_group));
268 } 300 backlog = counts.s.iq_cnt + counts.s.ds_cnt;
301 if (backlog > budget * cores_in_use && napi != NULL)
302 cvm_oct_enable_one_cpu();
269 } 303 }
270 304
271 skb_in_hw = USE_SKBUFFS_IN_HW && work->word2.s.bufs == 1; 305 skb_in_hw = USE_SKBUFFS_IN_HW && work->word2.s.bufs == 1;
272 if (likely(skb_in_hw)) { 306 if (likely(skb_in_hw)) {
273 skb = 307 skb = *pskb;
274 *(struct sk_buff
275 **)(cvm_oct_get_buffer_ptr(work->packet_ptr) -
276 sizeof(void *));
277 prefetch(&skb->head); 308 prefetch(&skb->head);
278 prefetch(&skb->len); 309 prefetch(&skb->len);
279 } 310 }
280 prefetch(cvm_oct_device[work->ipprt]); 311 prefetch(cvm_oct_device[work->ipprt]);
281 312
282 rx_count++;
283 /* Immediately throw away all packets with receive errors */ 313 /* Immediately throw away all packets with receive errors */
284 if (unlikely(work->word2.snoip.rcv_error)) { 314 if (unlikely(work->word2.snoip.rcv_error)) {
285 if (cvm_oct_check_rcv_error(work)) 315 if (cvm_oct_check_rcv_error(work))
@@ -292,39 +322,27 @@ void cvm_oct_tasklet_rx(unsigned long unused)
292 * buffer. 322 * buffer.
293 */ 323 */
294 if (likely(skb_in_hw)) { 324 if (likely(skb_in_hw)) {
295 /* 325 skb->data = skb->head + work->packet_ptr.s.addr - cvmx_ptr_to_phys(skb->head);
296 * This calculation was changed in case the
297 * skb header is using a different address
298 * aliasing type than the buffer. It doesn't
299 * make any differnece now, but the new one is
300 * more correct.
301 */
302 skb->data =
303 skb->head + work->packet_ptr.s.addr -
304 cvmx_ptr_to_phys(skb->head);
305 prefetch(skb->data); 326 prefetch(skb->data);
306 skb->len = work->len; 327 skb->len = work->len;
307 skb_set_tail_pointer(skb, skb->len); 328 skb_set_tail_pointer(skb, skb->len);
308 packet_not_copied = 1; 329 packet_not_copied = 1;
309 } else { 330 } else {
310
311 /* 331 /*
312 * We have to copy the packet. First allocate 332 * We have to copy the packet. First allocate
313 * an skbuff for it. 333 * an skbuff for it.
314 */ 334 */
315 skb = dev_alloc_skb(work->len); 335 skb = dev_alloc_skb(work->len);
316 if (!skb) { 336 if (!skb) {
317 DEBUGPRINT("Port %d failed to allocate " 337 DEBUGPRINT("Port %d failed to allocate skbuff, packet dropped\n",
318 "skbuff, packet dropped\n", 338 work->ipprt);
319 work->ipprt);
320 cvm_oct_free_work(work); 339 cvm_oct_free_work(work);
321 continue; 340 continue;
322 } 341 }
323 342
324 /* 343 /*
325 * Check if we've received a packet that was 344 * Check if we've received a packet that was
326 * entirely stored in the work entry. This is 345 * entirely stored in the work entry.
327 * untested.
328 */ 346 */
329 if (unlikely(work->word2.s.bufs == 0)) { 347 if (unlikely(work->word2.s.bufs == 0)) {
330 uint8_t *ptr = work->packet_data; 348 uint8_t *ptr = work->packet_data;
@@ -343,15 +361,13 @@ void cvm_oct_tasklet_rx(unsigned long unused)
343 /* No packet buffers to free */ 361 /* No packet buffers to free */
344 } else { 362 } else {
345 int segments = work->word2.s.bufs; 363 int segments = work->word2.s.bufs;
346 union cvmx_buf_ptr segment_ptr = 364 union cvmx_buf_ptr segment_ptr = work->packet_ptr;
347 work->packet_ptr;
348 int len = work->len; 365 int len = work->len;
349 366
350 while (segments--) { 367 while (segments--) {
351 union cvmx_buf_ptr next_ptr = 368 union cvmx_buf_ptr next_ptr =
352 *(union cvmx_buf_ptr *) 369 *(union cvmx_buf_ptr *)cvmx_phys_to_ptr(segment_ptr.s.addr - 8);
353 cvmx_phys_to_ptr(segment_ptr.s. 370
354 addr - 8);
355 /* 371 /*
356 * Octeon Errata PKI-100: The segment size is 372 * Octeon Errata PKI-100: The segment size is
357 * wrong. Until it is fixed, calculate the 373 * wrong. Until it is fixed, calculate the
@@ -361,22 +377,18 @@ void cvm_oct_tasklet_rx(unsigned long unused)
361 * one: int segment_size = 377 * one: int segment_size =
362 * segment_ptr.s.size; 378 * segment_ptr.s.size;
363 */ 379 */
364 int segment_size = 380 int segment_size = CVMX_FPA_PACKET_POOL_SIZE -
365 CVMX_FPA_PACKET_POOL_SIZE - 381 (segment_ptr.s.addr - (((segment_ptr.s.addr >> 7) - segment_ptr.s.back) << 7));
366 (segment_ptr.s.addr - 382 /*
367 (((segment_ptr.s.addr >> 7) - 383 * Don't copy more than what
368 segment_ptr.s.back) << 7)); 384 * is left in the packet.
369 /* Don't copy more than what is left 385 */
370 in the packet */
371 if (segment_size > len) 386 if (segment_size > len)
372 segment_size = len; 387 segment_size = len;
373 /* Copy the data into the packet */ 388 /* Copy the data into the packet */
374 memcpy(skb_put(skb, segment_size), 389 memcpy(skb_put(skb, segment_size),
375 cvmx_phys_to_ptr(segment_ptr.s. 390 cvmx_phys_to_ptr(segment_ptr.s.addr),
376 addr),
377 segment_size); 391 segment_size);
378 /* Reduce the amount of bytes left
379 to copy */
380 len -= segment_size; 392 len -= segment_size;
381 segment_ptr = next_ptr; 393 segment_ptr = next_ptr;
382 } 394 }
@@ -389,16 +401,15 @@ void cvm_oct_tasklet_rx(unsigned long unused)
389 struct net_device *dev = cvm_oct_device[work->ipprt]; 401 struct net_device *dev = cvm_oct_device[work->ipprt];
390 struct octeon_ethernet *priv = netdev_priv(dev); 402 struct octeon_ethernet *priv = netdev_priv(dev);
391 403
392 /* Only accept packets for devices 404 /*
393 that are currently up */ 405 * Only accept packets for devices that are
406 * currently up.
407 */
394 if (likely(dev->flags & IFF_UP)) { 408 if (likely(dev->flags & IFF_UP)) {
395 skb->protocol = eth_type_trans(skb, dev); 409 skb->protocol = eth_type_trans(skb, dev);
396 skb->dev = dev; 410 skb->dev = dev;
397 411
398 if (unlikely 412 if (unlikely(work->word2.s.not_IP || work->word2.s.IP_exc || work->word2.s.L4_error))
399 (work->word2.s.not_IP
400 || work->word2.s.IP_exc
401 || work->word2.s.L4_error))
402 skb->ip_summed = CHECKSUM_NONE; 413 skb->ip_summed = CHECKSUM_NONE;
403 else 414 else
404 skb->ip_summed = CHECKSUM_UNNECESSARY; 415 skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -414,15 +425,13 @@ void cvm_oct_tasklet_rx(unsigned long unused)
414#endif 425#endif
415 } 426 }
416 netif_receive_skb(skb); 427 netif_receive_skb(skb);
428 rx_count++;
417 } else { 429 } else {
430 /* Drop any packet received for a device that isn't up */
418 /* 431 /*
419 * Drop any packet received for a 432 DEBUGPRINT("%s: Device not up, packet dropped\n",
420 * device that isn't up. 433 dev->name);
421 */ 434 */
422 /*
423 DEBUGPRINT("%s: Device not up, packet dropped\n",
424 dev->name);
425 */
426#ifdef CONFIG_64BIT 435#ifdef CONFIG_64BIT
427 atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped); 436 atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
428#else 437#else
@@ -435,9 +444,8 @@ void cvm_oct_tasklet_rx(unsigned long unused)
435 * Drop any packet received for a device that 444 * Drop any packet received for a device that
436 * doesn't exist. 445 * doesn't exist.
437 */ 446 */
438 DEBUGPRINT("Port %d not controlled by Linux, packet " 447 DEBUGPRINT("Port %d not controlled by Linux, packet dropped\n",
439 "dropped\n", 448 work->ipprt);
440 work->ipprt);
441 dev_kfree_skb_irq(skb); 449 dev_kfree_skb_irq(skb);
442 } 450 }
443 /* 451 /*
@@ -459,47 +467,93 @@ void cvm_oct_tasklet_rx(unsigned long unused)
459 cvm_oct_free_work(work); 467 cvm_oct_free_work(work);
460 } 468 }
461 } 469 }
462
463 /* Restore the original POW group mask */ 470 /* Restore the original POW group mask */
464 cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid), old_group_mask); 471 cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid), old_group_mask);
465 if (USE_ASYNC_IOBDMA) { 472 if (USE_ASYNC_IOBDMA) {
466 /* Restore the scratch area */ 473 /* Restore the scratch area */
467 cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch); 474 cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch);
468 } 475 }
476 cvm_oct_rx_refill_pool(0);
469 477
470 if (USE_SKBUFFS_IN_HW) { 478 if (rx_count < budget && napi != NULL) {
471 /* Refill the packet buffer pool */ 479 /* No more work */
472 number_to_free = 480 napi_complete(napi);
473 cvmx_fau_fetch_and_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0); 481 cvm_oct_no_more_work();
474
475 if (number_to_free > 0) {
476 cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE,
477 -number_to_free);
478 num_freed =
479 cvm_oct_mem_fill_fpa(CVMX_FPA_PACKET_POOL,
480 CVMX_FPA_PACKET_POOL_SIZE,
481 number_to_free);
482 if (num_freed != number_to_free) {
483 cvmx_fau_atomic_add32
484 (FAU_NUM_PACKET_BUFFERS_TO_FREE,
485 number_to_free - num_freed);
486 }
487 }
488 } 482 }
483 return rx_count;
489} 484}
490 485
486#ifdef CONFIG_NET_POLL_CONTROLLER
487/**
488 * cvm_oct_poll_controller - poll for receive packets
489 * device.
490 *
491 * @dev: Device to poll. Unused
492 */
493void cvm_oct_poll_controller(struct net_device *dev)
494{
495 cvm_oct_napi_poll(NULL, 16);
496}
497#endif
498
491void cvm_oct_rx_initialize(void) 499void cvm_oct_rx_initialize(void)
492{ 500{
493 int i; 501 int i;
494 /* Initialize all of the tasklets */ 502 struct net_device *dev_for_napi = NULL;
495 for (i = 0; i < NR_CPUS; i++) 503 union cvmx_pow_wq_int_thrx int_thr;
496 tasklet_init(&cvm_oct_tasklet[i].t, cvm_oct_tasklet_rx, 0); 504 union cvmx_pow_wq_int_pc int_pc;
505
506 for (i = 0; i < TOTAL_NUMBER_OF_PORTS; i++) {
507 if (cvm_oct_device[i]) {
508 dev_for_napi = cvm_oct_device[i];
509 break;
510 }
511 }
512
513 if (NULL == dev_for_napi)
514 panic("No net_devices were allocated.");
515
516 if (max_rx_cpus > 1 && max_rx_cpus < num_online_cpus())
517 atomic_set(&core_state.available_cores, max_rx_cpus);
518 else
519 atomic_set(&core_state.available_cores, num_online_cpus());
520 core_state.baseline_cores = atomic_read(&core_state.available_cores);
521
522 core_state.cpu_state = CPU_MASK_NONE;
523 for_each_possible_cpu(i) {
524 netif_napi_add(dev_for_napi, &cvm_oct_napi[i].napi,
525 cvm_oct_napi_poll, rx_napi_weight);
526 napi_enable(&cvm_oct_napi[i].napi);
527 }
528 /* Register an IRQ hander for to receive POW interrupts */
529 i = request_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group,
530 cvm_oct_do_interrupt, 0, "Ethernet", cvm_oct_device);
531
532 if (i)
533 panic("Could not acquire Ethernet IRQ %d\n",
534 OCTEON_IRQ_WORKQ0 + pow_receive_group);
535
536 disable_irq_nosync(OCTEON_IRQ_WORKQ0 + pow_receive_group);
537
538 int_thr.u64 = 0;
539 int_thr.s.tc_en = 1;
540 int_thr.s.tc_thr = 1;
541 /* Enable POW interrupt when our port has at least one packet */
542 cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group), int_thr.u64);
543
544 int_pc.u64 = 0;
545 int_pc.s.pc_thr = 5;
546 cvmx_write_csr(CVMX_POW_WQ_INT_PC, int_pc.u64);
547
548
549 /* Scheduld NAPI now. This will indirectly enable interrupts. */
550 cvm_oct_enable_one_cpu();
497} 551}
498 552
499void cvm_oct_rx_shutdown(void) 553void cvm_oct_rx_shutdown(void)
500{ 554{
501 int i; 555 int i;
502 /* Shutdown all of the tasklets */ 556 /* Shutdown all of the NAPIs */
503 for (i = 0; i < NR_CPUS; i++) 557 for_each_possible_cpu(i)
504 tasklet_kill(&cvm_oct_tasklet[i].t); 558 netif_napi_del(&cvm_oct_napi[i].napi);
505} 559}
diff --git a/drivers/staging/octeon/ethernet-rx.h b/drivers/staging/octeon/ethernet-rx.h
index a9b72b87a7a6..a0743b85d54e 100644
--- a/drivers/staging/octeon/ethernet-rx.h
+++ b/drivers/staging/octeon/ethernet-rx.h
@@ -24,10 +24,29 @@
24 * This file may also be available under a different license from Cavium. 24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information 25 * Contact Cavium Networks for more information
26*********************************************************************/ 26*********************************************************************/
27#include "cvmx-fau.h"
27 28
28irqreturn_t cvm_oct_do_interrupt(int cpl, void *dev_id);
29void cvm_oct_poll_controller(struct net_device *dev); 29void cvm_oct_poll_controller(struct net_device *dev);
30void cvm_oct_tasklet_rx(unsigned long unused);
31
32void cvm_oct_rx_initialize(void); 30void cvm_oct_rx_initialize(void);
33void cvm_oct_rx_shutdown(void); 31void cvm_oct_rx_shutdown(void);
32
33static inline void cvm_oct_rx_refill_pool(int fill_threshold)
34{
35 int number_to_free;
36 int num_freed;
37 /* Refill the packet buffer pool */
38 number_to_free =
39 cvmx_fau_fetch_and_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);
40
41 if (number_to_free > fill_threshold) {
42 cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE,
43 -number_to_free);
44 num_freed = cvm_oct_mem_fill_fpa(CVMX_FPA_PACKET_POOL,
45 CVMX_FPA_PACKET_POOL_SIZE,
46 number_to_free);
47 if (num_freed != number_to_free) {
48 cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE,
49 number_to_free - num_freed);
50 }
51 }
52}
diff --git a/drivers/staging/octeon/ethernet-sgmii.c b/drivers/staging/octeon/ethernet-sgmii.c
index 6061d01eca2d..2d8589eb461e 100644
--- a/drivers/staging/octeon/ethernet-sgmii.c
+++ b/drivers/staging/octeon/ethernet-sgmii.c
@@ -26,7 +26,6 @@
26**********************************************************************/ 26**********************************************************************/
27#include <linux/kernel.h> 27#include <linux/kernel.h>
28#include <linux/netdevice.h> 28#include <linux/netdevice.h>
29#include <linux/mii.h>
30#include <net/dst.h> 29#include <net/dst.h>
31 30
32#include <asm/octeon/octeon.h> 31#include <asm/octeon/octeon.h>
diff --git a/drivers/staging/octeon/ethernet-spi.c b/drivers/staging/octeon/ethernet-spi.c
index 00dc0f4bad19..b58b8971f939 100644
--- a/drivers/staging/octeon/ethernet-spi.c
+++ b/drivers/staging/octeon/ethernet-spi.c
@@ -26,7 +26,6 @@
26**********************************************************************/ 26**********************************************************************/
27#include <linux/kernel.h> 27#include <linux/kernel.h>
28#include <linux/netdevice.h> 28#include <linux/netdevice.h>
29#include <linux/mii.h>
30#include <net/dst.h> 29#include <net/dst.h>
31 30
32#include <asm/octeon/octeon.h> 31#include <asm/octeon/octeon.h>
diff --git a/drivers/staging/octeon/ethernet-tx.c b/drivers/staging/octeon/ethernet-tx.c
index 535294105f65..afc2b734d554 100644
--- a/drivers/staging/octeon/ethernet-tx.c
+++ b/drivers/staging/octeon/ethernet-tx.c
@@ -4,7 +4,7 @@
4 * Contact: support@caviumnetworks.com 4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK 5 * This file is part of the OCTEON SDK
6 * 6 *
7 * Copyright (c) 2003-2007 Cavium Networks 7 * Copyright (c) 2003-2010 Cavium Networks
8 * 8 *
9 * This file is free software; you can redistribute it and/or modify 9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as 10 * it under the terms of the GNU General Public License, Version 2, as
@@ -31,10 +31,6 @@
31#include <linux/etherdevice.h> 31#include <linux/etherdevice.h>
32#include <linux/ip.h> 32#include <linux/ip.h>
33#include <linux/string.h> 33#include <linux/string.h>
34#include <linux/ethtool.h>
35#include <linux/mii.h>
36#include <linux/seq_file.h>
37#include <linux/proc_fs.h>
38#include <net/dst.h> 34#include <net/dst.h>
39#ifdef CONFIG_XFRM 35#ifdef CONFIG_XFRM
40#include <linux/xfrm.h> 36#include <linux/xfrm.h>
@@ -52,11 +48,14 @@
52 48
53#include "cvmx-wqe.h" 49#include "cvmx-wqe.h"
54#include "cvmx-fau.h" 50#include "cvmx-fau.h"
51#include "cvmx-pip.h"
55#include "cvmx-pko.h" 52#include "cvmx-pko.h"
56#include "cvmx-helper.h" 53#include "cvmx-helper.h"
57 54
58#include "cvmx-gmxx-defs.h" 55#include "cvmx-gmxx-defs.h"
59 56
57#define CVM_OCT_SKB_CB(skb) ((u64 *)((skb)->cb))
58
60/* 59/*
61 * You can define GET_SKBUFF_QOS() to override how the skbuff output 60 * You can define GET_SKBUFF_QOS() to override how the skbuff output
62 * function determines which output queue is used. The default 61 * function determines which output queue is used. The default
@@ -68,12 +67,81 @@
68#define GET_SKBUFF_QOS(skb) 0 67#define GET_SKBUFF_QOS(skb) 0
69#endif 68#endif
70 69
70static void cvm_oct_tx_do_cleanup(unsigned long arg);
71static DECLARE_TASKLET(cvm_oct_tx_cleanup_tasklet, cvm_oct_tx_do_cleanup, 0);
72
73/* Maximum number of SKBs to try to free per xmit packet. */
74#define MAX_SKB_TO_FREE (MAX_OUT_QUEUE_DEPTH * 2)
75
76static inline int32_t cvm_oct_adjust_skb_to_free(int32_t skb_to_free, int fau)
77{
78 int32_t undo;
79 undo = skb_to_free > 0 ? MAX_SKB_TO_FREE : skb_to_free + MAX_SKB_TO_FREE;
80 if (undo > 0)
81 cvmx_fau_atomic_add32(fau, -undo);
82 skb_to_free = -skb_to_free > MAX_SKB_TO_FREE ? MAX_SKB_TO_FREE : -skb_to_free;
83 return skb_to_free;
84}
85
86static void cvm_oct_kick_tx_poll_watchdog(void)
87{
88 union cvmx_ciu_timx ciu_timx;
89 ciu_timx.u64 = 0;
90 ciu_timx.s.one_shot = 1;
91 ciu_timx.s.len = cvm_oct_tx_poll_interval;
92 cvmx_write_csr(CVMX_CIU_TIMX(1), ciu_timx.u64);
93}
94
95void cvm_oct_free_tx_skbs(struct net_device *dev)
96{
97 int32_t skb_to_free;
98 int qos, queues_per_port;
99 int total_freed = 0;
100 int total_remaining = 0;
101 unsigned long flags;
102 struct octeon_ethernet *priv = netdev_priv(dev);
103
104 queues_per_port = cvmx_pko_get_num_queues(priv->port);
105 /* Drain any pending packets in the free list */
106 for (qos = 0; qos < queues_per_port; qos++) {
107 if (skb_queue_len(&priv->tx_free_list[qos]) == 0)
108 continue;
109 skb_to_free = cvmx_fau_fetch_and_add32(priv->fau+qos*4, MAX_SKB_TO_FREE);
110 skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free, priv->fau+qos*4);
111
112
113 total_freed += skb_to_free;
114 if (skb_to_free > 0) {
115 struct sk_buff *to_free_list = NULL;
116 spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
117 while (skb_to_free > 0) {
118 struct sk_buff *t = __skb_dequeue(&priv->tx_free_list[qos]);
119 t->next = to_free_list;
120 to_free_list = t;
121 skb_to_free--;
122 }
123 spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, flags);
124 /* Do the actual freeing outside of the lock. */
125 while (to_free_list) {
126 struct sk_buff *t = to_free_list;
127 to_free_list = to_free_list->next;
128 dev_kfree_skb_any(t);
129 }
130 }
131 total_remaining += skb_queue_len(&priv->tx_free_list[qos]);
132 }
133 if (total_freed >= 0 && netif_queue_stopped(dev))
134 netif_wake_queue(dev);
135 if (total_remaining)
136 cvm_oct_kick_tx_poll_watchdog();
137}
138
71/** 139/**
72 * Packet transmit 140 * cvm_oct_xmit - transmit a packet
73 *
74 * @skb: Packet to send 141 * @skb: Packet to send
75 * @dev: Device info structure 142 * @dev: Device info structure
76 * Returns Always returns zero 143 *
144 * Returns Always returns NETDEV_TX_OK
77 */ 145 */
78int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev) 146int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
79{ 147{
@@ -81,13 +149,15 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
81 union cvmx_buf_ptr hw_buffer; 149 union cvmx_buf_ptr hw_buffer;
82 uint64_t old_scratch; 150 uint64_t old_scratch;
83 uint64_t old_scratch2; 151 uint64_t old_scratch2;
84 int dropped;
85 int qos; 152 int qos;
86 int queue_it_up; 153 int i;
154 enum {QUEUE_CORE, QUEUE_HW, QUEUE_DROP} queue_type;
87 struct octeon_ethernet *priv = netdev_priv(dev); 155 struct octeon_ethernet *priv = netdev_priv(dev);
156 struct sk_buff *to_free_list;
88 int32_t skb_to_free; 157 int32_t skb_to_free;
89 int32_t undo;
90 int32_t buffers_to_free; 158 int32_t buffers_to_free;
159 u32 total_to_clean;
160 unsigned long flags;
91#if REUSE_SKBUFFS_WITHOUT_FREE 161#if REUSE_SKBUFFS_WITHOUT_FREE
92 unsigned char *fpa_head; 162 unsigned char *fpa_head;
93#endif 163#endif
@@ -98,9 +168,6 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
98 */ 168 */
99 prefetch(priv); 169 prefetch(priv);
100 170
101 /* Start off assuming no drop */
102 dropped = 0;
103
104 /* 171 /*
105 * The check on CVMX_PKO_QUEUES_PER_PORT_* is designed to 172 * The check on CVMX_PKO_QUEUES_PER_PORT_* is designed to
106 * completely remove "qos" in the event neither interface 173 * completely remove "qos" in the event neither interface
@@ -135,6 +202,28 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
135 } 202 }
136 203
137 /* 204 /*
205 * We have space for 6 segment pointers, If there will be more
206 * than that, we must linearize.
207 */
208 if (unlikely(skb_shinfo(skb)->nr_frags > 5)) {
209 if (unlikely(__skb_linearize(skb))) {
210 queue_type = QUEUE_DROP;
211 if (USE_ASYNC_IOBDMA) {
212 /* Get the number of skbuffs in use by the hardware */
213 CVMX_SYNCIOBDMA;
214 skb_to_free = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
215 } else {
216 /* Get the number of skbuffs in use by the hardware */
217 skb_to_free = cvmx_fau_fetch_and_add32(priv->fau + qos * 4,
218 MAX_SKB_TO_FREE);
219 }
220 skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free, priv->fau + qos * 4);
221 spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
222 goto skip_xmit;
223 }
224 }
225
226 /*
138 * The CN3XXX series of parts has an errata (GMX-401) which 227 * The CN3XXX series of parts has an errata (GMX-401) which
139 * causes the GMX block to hang if a collision occurs towards 228 * causes the GMX block to hang if a collision occurs towards
140 * the end of a <68 byte packet. As a workaround for this, we 229 * the end of a <68 byte packet. As a workaround for this, we
@@ -162,13 +251,6 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
162 } 251 }
163 } 252 }
164 253
165 /* Build the PKO buffer pointer */
166 hw_buffer.u64 = 0;
167 hw_buffer.s.addr = cvmx_ptr_to_phys(skb->data);
168 hw_buffer.s.pool = 0;
169 hw_buffer.s.size =
170 (unsigned long)skb_end_pointer(skb) - (unsigned long)skb->head;
171
172 /* Build the PKO command */ 254 /* Build the PKO command */
173 pko_command.u64 = 0; 255 pko_command.u64 = 0;
174 pko_command.s.n2 = 1; /* Don't pollute L2 with the outgoing packet */ 256 pko_command.s.n2 = 1; /* Don't pollute L2 with the outgoing packet */
@@ -178,7 +260,31 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
178 pko_command.s.subone0 = 1; 260 pko_command.s.subone0 = 1;
179 261
180 pko_command.s.dontfree = 1; 262 pko_command.s.dontfree = 1;
181 pko_command.s.reg0 = priv->fau + qos * 4; 263
264 /* Build the PKO buffer pointer */
265 hw_buffer.u64 = 0;
266 if (skb_shinfo(skb)->nr_frags == 0) {
267 hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)skb->data);
268 hw_buffer.s.pool = 0;
269 hw_buffer.s.size = skb->len;
270 } else {
271 hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)skb->data);
272 hw_buffer.s.pool = 0;
273 hw_buffer.s.size = skb_headlen(skb);
274 CVM_OCT_SKB_CB(skb)[0] = hw_buffer.u64;
275 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
276 struct skb_frag_struct *fs = skb_shinfo(skb)->frags + i;
277 hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)(page_address(fs->page) + fs->page_offset));
278 hw_buffer.s.size = fs->size;
279 CVM_OCT_SKB_CB(skb)[i + 1] = hw_buffer.u64;
280 }
281 hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)CVM_OCT_SKB_CB(skb));
282 hw_buffer.s.size = skb_shinfo(skb)->nr_frags + 1;
283 pko_command.s.segs = skb_shinfo(skb)->nr_frags + 1;
284 pko_command.s.gather = 1;
285 goto dont_put_skbuff_in_hw;
286 }
287
182 /* 288 /*
183 * See if we can put this skb in the FPA pool. Any strange 289 * See if we can put this skb in the FPA pool. Any strange
184 * behavior from the Linux networking stack will most likely 290 * behavior from the Linux networking stack will most likely
@@ -190,7 +296,7 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
190 * shown a 25% increase in performance under some loads. 296 * shown a 25% increase in performance under some loads.
191 */ 297 */
192#if REUSE_SKBUFFS_WITHOUT_FREE 298#if REUSE_SKBUFFS_WITHOUT_FREE
193 fpa_head = skb->head + 128 - ((unsigned long)skb->head & 0x7f); 299 fpa_head = skb->head + 256 - ((unsigned long)skb->head & 0x7f);
194 if (unlikely(skb->data < fpa_head)) { 300 if (unlikely(skb->data < fpa_head)) {
195 /* 301 /*
196 * printk("TX buffer beginning can't meet FPA 302 * printk("TX buffer beginning can't meet FPA
@@ -248,10 +354,9 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
248 * We can use this buffer in the FPA. We don't need the FAU 354 * We can use this buffer in the FPA. We don't need the FAU
249 * update anymore 355 * update anymore
250 */ 356 */
251 pko_command.s.reg0 = 0;
252 pko_command.s.dontfree = 0; 357 pko_command.s.dontfree = 0;
253 358
254 hw_buffer.s.back = (skb->data - fpa_head) >> 7; 359 hw_buffer.s.back = ((unsigned long)skb->data >> 7) - ((unsigned long)fpa_head >> 7);
255 *(struct sk_buff **)(fpa_head - sizeof(void *)) = skb; 360 *(struct sk_buff **)(fpa_head - sizeof(void *)) = skb;
256 361
257 /* 362 /*
@@ -272,16 +377,16 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
272 skb->tc_verd = 0; 377 skb->tc_verd = 0;
273#endif /* CONFIG_NET_CLS_ACT */ 378#endif /* CONFIG_NET_CLS_ACT */
274#endif /* CONFIG_NET_SCHED */ 379#endif /* CONFIG_NET_SCHED */
380#endif /* REUSE_SKBUFFS_WITHOUT_FREE */
275 381
276dont_put_skbuff_in_hw: 382dont_put_skbuff_in_hw:
277#endif /* REUSE_SKBUFFS_WITHOUT_FREE */
278 383
279 /* Check if we can use the hardware checksumming */ 384 /* Check if we can use the hardware checksumming */
280 if (USE_HW_TCPUDP_CHECKSUM && (skb->protocol == htons(ETH_P_IP)) && 385 if (USE_HW_TCPUDP_CHECKSUM && (skb->protocol == htons(ETH_P_IP)) &&
281 (ip_hdr(skb)->version == 4) && (ip_hdr(skb)->ihl == 5) && 386 (ip_hdr(skb)->version == 4) && (ip_hdr(skb)->ihl == 5) &&
282 ((ip_hdr(skb)->frag_off == 0) || (ip_hdr(skb)->frag_off == 1 << 14)) 387 ((ip_hdr(skb)->frag_off == 0) || (ip_hdr(skb)->frag_off == 1 << 14))
283 && ((ip_hdr(skb)->protocol == IP_PROTOCOL_TCP) 388 && ((ip_hdr(skb)->protocol == IPPROTO_TCP)
284 || (ip_hdr(skb)->protocol == IP_PROTOCOL_UDP))) { 389 || (ip_hdr(skb)->protocol == IPPROTO_UDP))) {
285 /* Use hardware checksum calc */ 390 /* Use hardware checksum calc */
286 pko_command.s.ipoffp1 = sizeof(struct ethhdr) + 1; 391 pko_command.s.ipoffp1 = sizeof(struct ethhdr) + 1;
287 } 392 }
@@ -299,89 +404,116 @@ dont_put_skbuff_in_hw:
299 cvmx_fau_fetch_and_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0); 404 cvmx_fau_fetch_and_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);
300 } 405 }
301 406
302 /* 407 skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free, priv->fau+qos*4);
303 * We try to claim MAX_SKB_TO_FREE buffers. If there were not
304 * that many available, we have to un-claim (undo) any that
305 * were in excess. If skb_to_free is positive we will free
306 * that many buffers.
307 */
308 undo = skb_to_free > 0 ?
309 MAX_SKB_TO_FREE : skb_to_free + MAX_SKB_TO_FREE;
310 if (undo > 0)
311 cvmx_fau_atomic_add32(priv->fau+qos*4, -undo);
312 skb_to_free = -skb_to_free > MAX_SKB_TO_FREE ?
313 MAX_SKB_TO_FREE : -skb_to_free;
314 408
315 /* 409 /*
316 * If we're sending faster than the receive can free them then 410 * If we're sending faster than the receive can free them then
317 * don't do the HW free. 411 * don't do the HW free.
318 */ 412 */
319 if ((buffers_to_free < -100) && !pko_command.s.dontfree) { 413 if ((buffers_to_free < -100) && !pko_command.s.dontfree)
320 pko_command.s.dontfree = 1; 414 pko_command.s.dontfree = 1;
321 pko_command.s.reg0 = priv->fau + qos * 4; 415
416 if (pko_command.s.dontfree) {
417 queue_type = QUEUE_CORE;
418 pko_command.s.reg0 = priv->fau+qos*4;
419 } else {
420 queue_type = QUEUE_HW;
322 } 421 }
422 if (USE_ASYNC_IOBDMA)
423 cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH, FAU_TOTAL_TX_TO_CLEAN, 1);
323 424
324 cvmx_pko_send_packet_prepare(priv->port, priv->queue + qos, 425 spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
325 CVMX_PKO_LOCK_CMD_QUEUE);
326 426
327 /* Drop this packet if we have too many already queued to the HW */ 427 /* Drop this packet if we have too many already queued to the HW */
328 if (unlikely 428 if (unlikely(skb_queue_len(&priv->tx_free_list[qos]) >= MAX_OUT_QUEUE_DEPTH)) {
329 (skb_queue_len(&priv->tx_free_list[qos]) >= MAX_OUT_QUEUE_DEPTH)) { 429 if (dev->tx_queue_len != 0) {
330 /* 430 /* Drop the lock when notifying the core. */
331 DEBUGPRINT("%s: Tx dropped. Too many queued\n", dev->name); 431 spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, flags);
332 */ 432 netif_stop_queue(dev);
333 dropped = 1; 433 spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
434 } else {
435 /* If not using normal queueing. */
436 queue_type = QUEUE_DROP;
437 goto skip_xmit;
438 }
334 } 439 }
440
441 cvmx_pko_send_packet_prepare(priv->port, priv->queue + qos,
442 CVMX_PKO_LOCK_NONE);
443
335 /* Send the packet to the output queue */ 444 /* Send the packet to the output queue */
336 else if (unlikely 445 if (unlikely(cvmx_pko_send_packet_finish(priv->port,
337 (cvmx_pko_send_packet_finish 446 priv->queue + qos,
338 (priv->port, priv->queue + qos, pko_command, hw_buffer, 447 pko_command, hw_buffer,
339 CVMX_PKO_LOCK_CMD_QUEUE))) { 448 CVMX_PKO_LOCK_NONE))) {
340 DEBUGPRINT("%s: Failed to send the packet\n", dev->name); 449 DEBUGPRINT("%s: Failed to send the packet\n", dev->name);
341 dropped = 1; 450 queue_type = QUEUE_DROP;
451 }
452skip_xmit:
453 to_free_list = NULL;
454
455 switch (queue_type) {
456 case QUEUE_DROP:
457 skb->next = to_free_list;
458 to_free_list = skb;
459 priv->stats.tx_dropped++;
460 break;
461 case QUEUE_HW:
462 cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, -1);
463 break;
464 case QUEUE_CORE:
465 __skb_queue_tail(&priv->tx_free_list[qos], skb);
466 break;
467 default:
468 BUG();
469 }
470
471 while (skb_to_free > 0) {
472 struct sk_buff *t = __skb_dequeue(&priv->tx_free_list[qos]);
473 t->next = to_free_list;
474 to_free_list = t;
475 skb_to_free--;
476 }
477
478 spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, flags);
479
480 /* Do the actual freeing outside of the lock. */
481 while (to_free_list) {
482 struct sk_buff *t = to_free_list;
483 to_free_list = to_free_list->next;
484 dev_kfree_skb_any(t);
342 } 485 }
343 486
344 if (USE_ASYNC_IOBDMA) { 487 if (USE_ASYNC_IOBDMA) {
488 CVMX_SYNCIOBDMA;
489 total_to_clean = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
345 /* Restore the scratch area */ 490 /* Restore the scratch area */
346 cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch); 491 cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch);
347 cvmx_scratch_write64(CVMX_SCR_SCRATCH + 8, old_scratch2); 492 cvmx_scratch_write64(CVMX_SCR_SCRATCH + 8, old_scratch2);
348 }
349
350 queue_it_up = 0;
351 if (unlikely(dropped)) {
352 dev_kfree_skb_any(skb);
353 priv->stats.tx_dropped++;
354 } else { 493 } else {
355 if (USE_SKBUFFS_IN_HW) { 494 total_to_clean = cvmx_fau_fetch_and_add32(FAU_TOTAL_TX_TO_CLEAN, 1);
356 /* Put this packet on the queue to be freed later */
357 if (pko_command.s.dontfree)
358 queue_it_up = 1;
359 else
360 cvmx_fau_atomic_add32
361 (FAU_NUM_PACKET_BUFFERS_TO_FREE, -1);
362 } else {
363 /* Put this packet on the queue to be freed later */
364 queue_it_up = 1;
365 }
366 } 495 }
367 496
368 if (queue_it_up) { 497 if (total_to_clean & 0x3ff) {
369 spin_lock(&priv->tx_free_list[qos].lock); 498 /*
370 __skb_queue_tail(&priv->tx_free_list[qos], skb); 499 * Schedule the cleanup tasklet every 1024 packets for
371 cvm_oct_free_tx_skbs(priv, skb_to_free, qos, 0); 500 * the pathological case of high traffic on one port
372 spin_unlock(&priv->tx_free_list[qos].lock); 501 * delaying clean up of packets on a different port
373 } else { 502 * that is blocked waiting for the cleanup.
374 cvm_oct_free_tx_skbs(priv, skb_to_free, qos, 1); 503 */
504 tasklet_schedule(&cvm_oct_tx_cleanup_tasklet);
375 } 505 }
376 506
377 return 0; 507 cvm_oct_kick_tx_poll_watchdog();
508
509 return NETDEV_TX_OK;
378} 510}
379 511
380/** 512/**
381 * Packet transmit to the POW 513 * cvm_oct_xmit_pow - transmit a packet to the POW
382 *
383 * @skb: Packet to send 514 * @skb: Packet to send
384 * @dev: Device info structure 515 * @dev: Device info structure
516
385 * Returns Always returns zero 517 * Returns Always returns zero
386 */ 518 */
387int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev) 519int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev)
@@ -459,8 +591,8 @@ int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev)
459 work->word2.s.dec_ipcomp = 0; /* FIXME */ 591 work->word2.s.dec_ipcomp = 0; /* FIXME */
460#endif 592#endif
461 work->word2.s.tcp_or_udp = 593 work->word2.s.tcp_or_udp =
462 (ip_hdr(skb)->protocol == IP_PROTOCOL_TCP) 594 (ip_hdr(skb)->protocol == IPPROTO_TCP)
463 || (ip_hdr(skb)->protocol == IP_PROTOCOL_UDP); 595 || (ip_hdr(skb)->protocol == IPPROTO_UDP);
464#if 0 596#if 0
465 /* FIXME */ 597 /* FIXME */
466 work->word2.s.dec_ipsec = 0; 598 work->word2.s.dec_ipsec = 0;
@@ -529,116 +661,63 @@ int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev)
529} 661}
530 662
531/** 663/**
532 * Transmit a work queue entry out of the ethernet port. Both 664 * cvm_oct_tx_shutdown_dev - free all skb that are currently queued for TX.
533 * the work queue entry and the packet data can optionally be 665 * @dev: Device being shutdown
534 * freed. The work will be freed on error as well.
535 *
536 * @dev: Device to transmit out.
537 * @work_queue_entry:
538 * Work queue entry to send
539 * @do_free: True if the work queue entry and packet data should be
540 * freed. If false, neither will be freed.
541 * @qos: Index into the queues for this port to transmit on. This
542 * is used to implement QoS if their are multiple queues per
543 * port. This parameter must be between 0 and the number of
544 * queues per port minus 1. Values outside of this range will
545 * be change to zero.
546 * 666 *
547 * Returns Zero on success, negative on failure.
548 */ 667 */
549int cvm_oct_transmit_qos(struct net_device *dev, void *work_queue_entry, 668void cvm_oct_tx_shutdown_dev(struct net_device *dev)
550 int do_free, int qos)
551{ 669{
552 unsigned long flags;
553 union cvmx_buf_ptr hw_buffer;
554 cvmx_pko_command_word0_t pko_command;
555 int dropped;
556 struct octeon_ethernet *priv = netdev_priv(dev); 670 struct octeon_ethernet *priv = netdev_priv(dev);
557 cvmx_wqe_t *work = work_queue_entry; 671 unsigned long flags;
672 int qos;
558 673
559 if (!(dev->flags & IFF_UP)) { 674 for (qos = 0; qos < 16; qos++) {
560 DEBUGPRINT("%s: Device not up\n", dev->name); 675 spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
561 if (do_free) 676 while (skb_queue_len(&priv->tx_free_list[qos]))
562 cvm_oct_free_work(work); 677 dev_kfree_skb_any(__skb_dequeue
563 return -1; 678 (&priv->tx_free_list[qos]));
679 spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, flags);
564 } 680 }
681}
565 682
566 /* The check on CVMX_PKO_QUEUES_PER_PORT_* is designed to completely 683static void cvm_oct_tx_do_cleanup(unsigned long arg)
567 remove "qos" in the event neither interface supports 684{
568 multiple queues per port */ 685 int port;
569 if ((CVMX_PKO_QUEUES_PER_PORT_INTERFACE0 > 1) ||
570 (CVMX_PKO_QUEUES_PER_PORT_INTERFACE1 > 1)) {
571 if (qos <= 0)
572 qos = 0;
573 else if (qos >= cvmx_pko_get_num_queues(priv->port))
574 qos = 0;
575 } else
576 qos = 0;
577
578 /* Start off assuming no drop */
579 dropped = 0;
580
581 local_irq_save(flags);
582 cvmx_pko_send_packet_prepare(priv->port, priv->queue + qos,
583 CVMX_PKO_LOCK_CMD_QUEUE);
584
585 /* Build the PKO buffer pointer */
586 hw_buffer.u64 = 0;
587 hw_buffer.s.addr = work->packet_ptr.s.addr;
588 hw_buffer.s.pool = CVMX_FPA_PACKET_POOL;
589 hw_buffer.s.size = CVMX_FPA_PACKET_POOL_SIZE;
590 hw_buffer.s.back = work->packet_ptr.s.back;
591 686
592 /* Build the PKO command */ 687 for (port = 0; port < TOTAL_NUMBER_OF_PORTS; port++) {
593 pko_command.u64 = 0; 688 if (cvm_oct_device[port]) {
594 pko_command.s.n2 = 1; /* Don't pollute L2 with the outgoing packet */ 689 struct net_device *dev = cvm_oct_device[port];
595 pko_command.s.dontfree = !do_free; 690 cvm_oct_free_tx_skbs(dev);
596 pko_command.s.segs = work->word2.s.bufs; 691 }
597 pko_command.s.total_bytes = work->len; 692 }
693}
598 694
599 /* Check if we can use the hardware checksumming */ 695static irqreturn_t cvm_oct_tx_cleanup_watchdog(int cpl, void *dev_id)
600 if (unlikely(work->word2.s.not_IP || work->word2.s.IP_exc)) 696{
601 pko_command.s.ipoffp1 = 0; 697 /* Disable the interrupt. */
602 else 698 cvmx_write_csr(CVMX_CIU_TIMX(1), 0);
603 pko_command.s.ipoffp1 = sizeof(struct ethhdr) + 1; 699 /* Do the work in the tasklet. */
700 tasklet_schedule(&cvm_oct_tx_cleanup_tasklet);
701 return IRQ_HANDLED;
702}
604 703
605 /* Send the packet to the output queue */ 704void cvm_oct_tx_initialize(void)
606 if (unlikely 705{
607 (cvmx_pko_send_packet_finish 706 int i;
608 (priv->port, priv->queue + qos, pko_command, hw_buffer,
609 CVMX_PKO_LOCK_CMD_QUEUE))) {
610 DEBUGPRINT("%s: Failed to send the packet\n", dev->name);
611 dropped = -1;
612 }
613 local_irq_restore(flags);
614 707
615 if (unlikely(dropped)) { 708 /* Disable the interrupt. */
616 if (do_free) 709 cvmx_write_csr(CVMX_CIU_TIMX(1), 0);
617 cvm_oct_free_work(work); 710 /* Register an IRQ hander for to receive CIU_TIMX(1) interrupts */
618 priv->stats.tx_dropped++; 711 i = request_irq(OCTEON_IRQ_TIMER1,
619 } else if (do_free) 712 cvm_oct_tx_cleanup_watchdog, 0,
620 cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, DONT_WRITEBACK(1)); 713 "Ethernet", cvm_oct_device);
621 714
622 return dropped; 715 if (i)
716 panic("Could not acquire Ethernet IRQ %d\n", OCTEON_IRQ_TIMER1);
623} 717}
624EXPORT_SYMBOL(cvm_oct_transmit_qos);
625 718
626/** 719void cvm_oct_tx_shutdown(void)
627 * This function frees all skb that are currently queued for TX.
628 *
629 * @dev: Device being shutdown
630 */
631void cvm_oct_tx_shutdown(struct net_device *dev)
632{ 720{
633 struct octeon_ethernet *priv = netdev_priv(dev); 721 /* Free the interrupt handler */
634 unsigned long flags; 722 free_irq(OCTEON_IRQ_TIMER1, cvm_oct_device);
635 int qos;
636
637 for (qos = 0; qos < 16; qos++) {
638 spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
639 while (skb_queue_len(&priv->tx_free_list[qos]))
640 dev_kfree_skb_any(__skb_dequeue
641 (&priv->tx_free_list[qos]));
642 spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, flags);
643 }
644} 723}
diff --git a/drivers/staging/octeon/ethernet-tx.h b/drivers/staging/octeon/ethernet-tx.h
index c0bebf750bc0..547680c6c371 100644
--- a/drivers/staging/octeon/ethernet-tx.h
+++ b/drivers/staging/octeon/ethernet-tx.h
@@ -29,29 +29,6 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev);
29int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev); 29int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev);
30int cvm_oct_transmit_qos(struct net_device *dev, void *work_queue_entry, 30int cvm_oct_transmit_qos(struct net_device *dev, void *work_queue_entry,
31 int do_free, int qos); 31 int do_free, int qos);
32void cvm_oct_tx_shutdown(struct net_device *dev); 32void cvm_oct_tx_initialize(void);
33 33void cvm_oct_tx_shutdown(void);
34/** 34void cvm_oct_tx_shutdown_dev(struct net_device *dev);
35 * Free dead transmit skbs.
36 *
37 * @priv: The driver data
38 * @skb_to_free: The number of SKBs to free (free none if negative).
39 * @qos: The queue to free from.
40 * @take_lock: If true, acquire the skb list lock.
41 */
42static inline void cvm_oct_free_tx_skbs(struct octeon_ethernet *priv,
43 int skb_to_free,
44 int qos, int take_lock)
45{
46 /* Free skbuffs not in use by the hardware. */
47 if (skb_to_free > 0) {
48 if (take_lock)
49 spin_lock(&priv->tx_free_list[qos].lock);
50 while (skb_to_free > 0) {
51 dev_kfree_skb(__skb_dequeue(&priv->tx_free_list[qos]));
52 skb_to_free--;
53 }
54 if (take_lock)
55 spin_unlock(&priv->tx_free_list[qos].lock);
56 }
57}
diff --git a/drivers/staging/octeon/ethernet-util.h b/drivers/staging/octeon/ethernet-util.h
index 37b665918000..23467563fe57 100644
--- a/drivers/staging/octeon/ethernet-util.h
+++ b/drivers/staging/octeon/ethernet-util.h
@@ -30,10 +30,9 @@
30 } while (0) 30 } while (0)
31 31
32/** 32/**
33 * Given a packet data address, return a pointer to the 33 * cvm_oct_get_buffer_ptr - convert packet data address to pointer
34 * beginning of the packet buffer.
35 *
36 * @packet_ptr: Packet data hardware address 34 * @packet_ptr: Packet data hardware address
35 *
37 * Returns Packet buffer pointer 36 * Returns Packet buffer pointer
38 */ 37 */
39static inline void *cvm_oct_get_buffer_ptr(union cvmx_buf_ptr packet_ptr) 38static inline void *cvm_oct_get_buffer_ptr(union cvmx_buf_ptr packet_ptr)
@@ -43,9 +42,7 @@ static inline void *cvm_oct_get_buffer_ptr(union cvmx_buf_ptr packet_ptr)
43} 42}
44 43
45/** 44/**
46 * Given an IPD/PKO port number, return the logical interface it is 45 * INTERFACE - convert IPD port to locgical interface
47 * on.
48 *
49 * @ipd_port: Port to check 46 * @ipd_port: Port to check
50 * 47 *
51 * Returns Logical interface 48 * Returns Logical interface
@@ -65,9 +62,7 @@ static inline int INTERFACE(int ipd_port)
65} 62}
66 63
67/** 64/**
68 * Given an IPD/PKO port number, return the port's index on a 65 * INDEX - convert IPD/PKO port number to the port's interface index
69 * logical interface.
70 *
71 * @ipd_port: Port to check 66 * @ipd_port: Port to check
72 * 67 *
73 * Returns Index into interface port list 68 * Returns Index into interface port list
diff --git a/drivers/staging/octeon/ethernet-xaui.c b/drivers/staging/octeon/ethernet-xaui.c
index ee3dc41b2c53..3fca1cc31ed8 100644
--- a/drivers/staging/octeon/ethernet-xaui.c
+++ b/drivers/staging/octeon/ethernet-xaui.c
@@ -26,7 +26,6 @@
26**********************************************************************/ 26**********************************************************************/
27#include <linux/kernel.h> 27#include <linux/kernel.h>
28#include <linux/netdevice.h> 28#include <linux/netdevice.h>
29#include <linux/mii.h>
30#include <net/dst.h> 29#include <net/dst.h>
31 30
32#include <asm/octeon/octeon.h> 31#include <asm/octeon/octeon.h>
diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
index 4cfd4b136b32..02b63678811a 100644
--- a/drivers/staging/octeon/ethernet.c
+++ b/drivers/staging/octeon/ethernet.c
@@ -29,7 +29,6 @@
29#include <linux/module.h> 29#include <linux/module.h>
30#include <linux/netdevice.h> 30#include <linux/netdevice.h>
31#include <linux/etherdevice.h> 31#include <linux/etherdevice.h>
32#include <linux/delay.h>
33#include <linux/phy.h> 32#include <linux/phy.h>
34 33
35#include <net/dst.h> 34#include <net/dst.h>
@@ -43,8 +42,6 @@
43#include "ethernet-tx.h" 42#include "ethernet-tx.h"
44#include "ethernet-mdio.h" 43#include "ethernet-mdio.h"
45#include "ethernet-util.h" 44#include "ethernet-util.h"
46#include "ethernet-proc.h"
47
48 45
49#include "cvmx-pip.h" 46#include "cvmx-pip.h"
50#include "cvmx-pko.h" 47#include "cvmx-pko.h"
@@ -104,13 +101,15 @@ MODULE_PARM_DESC(pow_send_list, "\n"
104 "\t\"eth2,spi3,spi7\" would cause these three devices to transmit\n" 101 "\t\"eth2,spi3,spi7\" would cause these three devices to transmit\n"
105 "\tusing the pow_send_group."); 102 "\tusing the pow_send_group.");
106 103
107static int disable_core_queueing = 1; 104int max_rx_cpus = -1;
108module_param(disable_core_queueing, int, 0444); 105module_param(max_rx_cpus, int, 0444);
109MODULE_PARM_DESC(disable_core_queueing, "\n" 106MODULE_PARM_DESC(max_rx_cpus, "\n"
110 "\tWhen set the networking core's tx_queue_len is set to zero. This\n" 107 "\t\tThe maximum number of CPUs to use for packet reception.\n"
111 "\tallows packets to be sent without lock contention in the packet\n" 108 "\t\tUse -1 to use all available CPUs.");
112 "\tscheduler resulting in some cases in improved throughput.\n");
113 109
110int rx_napi_weight = 32;
111module_param(rx_napi_weight, int, 0444);
112MODULE_PARM_DESC(rx_napi_weight, "The NAPI WEIGHT parameter.");
114 113
115/* 114/*
116 * The offset from mac_addr_base that should be used for the next port 115 * The offset from mac_addr_base that should be used for the next port
@@ -122,9 +121,16 @@ MODULE_PARM_DESC(disable_core_queueing, "\n"
122static unsigned int cvm_oct_mac_addr_offset; 121static unsigned int cvm_oct_mac_addr_offset;
123 122
124/** 123/**
125 * Periodic timer to check auto negotiation 124 * cvm_oct_poll_queue - Workqueue for polling operations.
125 */
126struct workqueue_struct *cvm_oct_poll_queue;
127
128/**
129 * cvm_oct_poll_queue_stopping - flag to indicate polling should stop.
130 *
131 * Set to one right before cvm_oct_poll_queue is destroyed.
126 */ 132 */
127static struct timer_list cvm_oct_poll_timer; 133atomic_t cvm_oct_poll_queue_stopping = ATOMIC_INIT(0);
128 134
129/** 135/**
130 * Array of every ethernet device owned by this driver indexed by 136 * Array of every ethernet device owned by this driver indexed by
@@ -132,65 +138,44 @@ static struct timer_list cvm_oct_poll_timer;
132 */ 138 */
133struct net_device *cvm_oct_device[TOTAL_NUMBER_OF_PORTS]; 139struct net_device *cvm_oct_device[TOTAL_NUMBER_OF_PORTS];
134 140
135/** 141u64 cvm_oct_tx_poll_interval;
136 * Periodic timer tick for slow management operations 142
137 * 143static void cvm_oct_rx_refill_worker(struct work_struct *work);
138 * @arg: Device to check 144static DECLARE_DELAYED_WORK(cvm_oct_rx_refill_work, cvm_oct_rx_refill_worker);
139 */ 145
140static void cvm_do_timer(unsigned long arg) 146static void cvm_oct_rx_refill_worker(struct work_struct *work)
141{ 147{
142 int32_t skb_to_free, undo; 148 /*
143 int queues_per_port; 149 * FPA 0 may have been drained, try to refill it if we need
144 int qos; 150 * more than num_packet_buffers / 2, otherwise normal receive
145 struct octeon_ethernet *priv; 151 * processing will refill it. If it were drained, no packets
146 static int port; 152 * could be received so cvm_oct_napi_poll would never be
153 * invoked to do the refill.
154 */
155 cvm_oct_rx_refill_pool(num_packet_buffers / 2);
147 156
148 if (port >= CVMX_PIP_NUM_INPUT_PORTS) { 157 if (!atomic_read(&cvm_oct_poll_queue_stopping))
149 /* 158 queue_delayed_work(cvm_oct_poll_queue,
150 * All ports have been polled. Start the next 159 &cvm_oct_rx_refill_work, HZ);
151 * iteration through the ports in one second. 160}
152 */ 161
153 port = 0; 162static void cvm_oct_periodic_worker(struct work_struct *work)
154 mod_timer(&cvm_oct_poll_timer, jiffies + HZ); 163{
155 return; 164 struct octeon_ethernet *priv = container_of(work,
156 } 165 struct octeon_ethernet,
157 if (!cvm_oct_device[port]) 166 port_periodic_work.work);
158 goto out;
159 167
160 priv = netdev_priv(cvm_oct_device[port]);
161 if (priv->poll) 168 if (priv->poll)
162 priv->poll(cvm_oct_device[port]); 169 priv->poll(cvm_oct_device[priv->port]);
163
164 queues_per_port = cvmx_pko_get_num_queues(port);
165 /* Drain any pending packets in the free list */
166 for (qos = 0; qos < queues_per_port; qos++) {
167 if (skb_queue_len(&priv->tx_free_list[qos]) == 0)
168 continue;
169 skb_to_free = cvmx_fau_fetch_and_add32(priv->fau + qos * 4,
170 MAX_SKB_TO_FREE);
171 undo = skb_to_free > 0 ?
172 MAX_SKB_TO_FREE : skb_to_free + MAX_SKB_TO_FREE;
173 if (undo > 0)
174 cvmx_fau_atomic_add32(priv->fau+qos*4, -undo);
175 skb_to_free = -skb_to_free > MAX_SKB_TO_FREE ?
176 MAX_SKB_TO_FREE : -skb_to_free;
177 cvm_oct_free_tx_skbs(priv, skb_to_free, qos, 1);
178 }
179 cvm_oct_device[port]->netdev_ops->ndo_get_stats(cvm_oct_device[port]);
180 170
181out: 171 cvm_oct_device[priv->port]->netdev_ops->ndo_get_stats(cvm_oct_device[priv->port]);
182 port++; 172
183 /* Poll the next port in a 50th of a second. 173 if (!atomic_read(&cvm_oct_poll_queue_stopping))
184 This spreads the polling of ports out a little bit */ 174 queue_delayed_work(cvm_oct_poll_queue, &priv->port_periodic_work, HZ);
185 mod_timer(&cvm_oct_poll_timer, jiffies + HZ / 50); 175 }
186}
187 176
188/**
189 * Configure common hardware for all interfaces
190 */
191static __init void cvm_oct_configure_common_hw(void) 177static __init void cvm_oct_configure_common_hw(void)
192{ 178{
193 int r;
194 /* Setup the FPA */ 179 /* Setup the FPA */
195 cvmx_fpa_enable(); 180 cvmx_fpa_enable();
196 cvm_oct_mem_fill_fpa(CVMX_FPA_PACKET_POOL, CVMX_FPA_PACKET_POOL_SIZE, 181 cvm_oct_mem_fill_fpa(CVMX_FPA_PACKET_POOL, CVMX_FPA_PACKET_POOL_SIZE,
@@ -205,28 +190,13 @@ static __init void cvm_oct_configure_common_hw(void)
205 cvmx_helper_setup_red(num_packet_buffers / 4, 190 cvmx_helper_setup_red(num_packet_buffers / 4,
206 num_packet_buffers / 8); 191 num_packet_buffers / 8);
207 192
208 /* Enable the MII interface */
209 if (!octeon_is_simulation())
210 cvmx_write_csr(CVMX_SMIX_EN(0), 1);
211
212 /* Register an IRQ hander for to receive POW interrupts */
213 r = request_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group,
214 cvm_oct_do_interrupt, IRQF_SHARED, "Ethernet",
215 cvm_oct_device);
216
217#if defined(CONFIG_SMP) && 0
218 if (USE_MULTICORE_RECEIVE) {
219 irq_set_affinity(OCTEON_IRQ_WORKQ0 + pow_receive_group,
220 cpu_online_mask);
221 }
222#endif
223} 193}
224 194
225/** 195/**
226 * Free a work queue entry received in a intercept callback. 196 * cvm_oct_free_work- Free a work queue entry
197 *
198 * @work_queue_entry: Work queue entry to free
227 * 199 *
228 * @work_queue_entry:
229 * Work queue entry to free
230 * Returns Zero on success, Negative on failure. 200 * Returns Zero on success, Negative on failure.
231 */ 201 */
232int cvm_oct_free_work(void *work_queue_entry) 202int cvm_oct_free_work(void *work_queue_entry)
@@ -253,9 +223,9 @@ int cvm_oct_free_work(void *work_queue_entry)
253EXPORT_SYMBOL(cvm_oct_free_work); 223EXPORT_SYMBOL(cvm_oct_free_work);
254 224
255/** 225/**
256 * Get the low level ethernet statistics 226 * cvm_oct_common_get_stats - get the low level ethernet statistics
257 *
258 * @dev: Device to get the statistics from 227 * @dev: Device to get the statistics from
228 *
259 * Returns Pointer to the statistics 229 * Returns Pointer to the statistics
260 */ 230 */
261static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev) 231static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
@@ -299,8 +269,7 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
299} 269}
300 270
301/** 271/**
302 * Change the link MTU. Unimplemented 272 * cvm_oct_common_change_mtu - change the link MTU
303 *
304 * @dev: Device to change 273 * @dev: Device to change
305 * @new_mtu: The new MTU 274 * @new_mtu: The new MTU
306 * 275 *
@@ -364,8 +333,7 @@ static int cvm_oct_common_change_mtu(struct net_device *dev, int new_mtu)
364} 333}
365 334
366/** 335/**
367 * Set the multicast list. Currently unimplemented. 336 * cvm_oct_common_set_multicast_list - set the multicast list
368 *
369 * @dev: Device to work on 337 * @dev: Device to work on
370 */ 338 */
371static void cvm_oct_common_set_multicast_list(struct net_device *dev) 339static void cvm_oct_common_set_multicast_list(struct net_device *dev)
@@ -420,10 +388,10 @@ static void cvm_oct_common_set_multicast_list(struct net_device *dev)
420} 388}
421 389
422/** 390/**
423 * Set the hardware MAC address for a device 391 * cvm_oct_common_set_mac_address - set the hardware MAC address for a device
424 * 392 * @dev: The device in question.
425 * @dev: Device to change the MAC address for 393 * @addr: Address structure to change it too.
426 * @addr: Address structure to change it too. MAC address is addr + 2. 394
427 * Returns Zero on success 395 * Returns Zero on success
428 */ 396 */
429static int cvm_oct_common_set_mac_address(struct net_device *dev, void *addr) 397static int cvm_oct_common_set_mac_address(struct net_device *dev, void *addr)
@@ -470,9 +438,9 @@ static int cvm_oct_common_set_mac_address(struct net_device *dev, void *addr)
470} 438}
471 439
472/** 440/**
473 * Per network device initialization 441 * cvm_oct_common_init - per network device initialization
474 *
475 * @dev: Device to initialize 442 * @dev: Device to initialize
443 *
476 * Returns Zero on success 444 * Returns Zero on success
477 */ 445 */
478int cvm_oct_common_init(struct net_device *dev) 446int cvm_oct_common_init(struct net_device *dev)
@@ -510,8 +478,11 @@ int cvm_oct_common_init(struct net_device *dev)
510 && (always_use_pow || strstr(pow_send_list, dev->name))) 478 && (always_use_pow || strstr(pow_send_list, dev->name)))
511 priv->queue = -1; 479 priv->queue = -1;
512 480
513 if (priv->queue != -1 && USE_HW_TCPUDP_CHECKSUM) 481 if (priv->queue != -1) {
514 dev->features |= NETIF_F_IP_CSUM; 482 dev->features |= NETIF_F_SG;
483 if (USE_HW_TCPUDP_CHECKSUM)
484 dev->features |= NETIF_F_IP_CSUM;
485 }
515 486
516 /* We do our own locking, Linux doesn't need to */ 487 /* We do our own locking, Linux doesn't need to */
517 dev->features |= NETIF_F_LLTX; 488 dev->features |= NETIF_F_LLTX;
@@ -625,12 +596,6 @@ static const struct net_device_ops cvm_oct_pow_netdev_ops = {
625 596
626extern void octeon_mdiobus_force_mod_depencency(void); 597extern void octeon_mdiobus_force_mod_depencency(void);
627 598
628/**
629 * Module/ driver initialization. Creates the linux network
630 * devices.
631 *
632 * Returns Zero on success
633 */
634static int __init cvm_oct_init_module(void) 599static int __init cvm_oct_init_module(void)
635{ 600{
636 int num_interfaces; 601 int num_interfaces;
@@ -648,8 +613,12 @@ static int __init cvm_oct_init_module(void)
648 else 613 else
649 cvm_oct_mac_addr_offset = 0; 614 cvm_oct_mac_addr_offset = 0;
650 615
651 cvm_oct_proc_initialize(); 616 cvm_oct_poll_queue = create_singlethread_workqueue("octeon-ethernet");
652 cvm_oct_rx_initialize(); 617 if (cvm_oct_poll_queue == NULL) {
618 pr_err("octeon-ethernet: Cannot create workqueue");
619 return -ENOMEM;
620 }
621
653 cvm_oct_configure_common_hw(); 622 cvm_oct_configure_common_hw();
654 623
655 cvmx_helper_initialize_packet_io_global(); 624 cvmx_helper_initialize_packet_io_global();
@@ -682,6 +651,9 @@ static int __init cvm_oct_init_module(void)
682 */ 651 */
683 cvmx_fau_atomic_write32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0); 652 cvmx_fau_atomic_write32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);
684 653
654 /* Initialize the FAU used for counting tx SKBs that need to be freed */
655 cvmx_fau_atomic_write32(FAU_TOTAL_TX_TO_CLEAN, 0);
656
685 if ((pow_send_group != -1)) { 657 if ((pow_send_group != -1)) {
686 struct net_device *dev; 658 struct net_device *dev;
687 pr_info("\tConfiguring device for POW only access\n"); 659 pr_info("\tConfiguring device for POW only access\n");
@@ -689,7 +661,6 @@ static int __init cvm_oct_init_module(void)
689 if (dev) { 661 if (dev) {
690 /* Initialize the device private structure. */ 662 /* Initialize the device private structure. */
691 struct octeon_ethernet *priv = netdev_priv(dev); 663 struct octeon_ethernet *priv = netdev_priv(dev);
692 memset(priv, 0, sizeof(struct octeon_ethernet));
693 664
694 dev->netdev_ops = &cvm_oct_pow_netdev_ops; 665 dev->netdev_ops = &cvm_oct_pow_netdev_ops;
695 priv->imode = CVMX_HELPER_INTERFACE_MODE_DISABLED; 666 priv->imode = CVMX_HELPER_INTERFACE_MODE_DISABLED;
@@ -700,19 +671,16 @@ static int __init cvm_oct_init_module(void)
700 skb_queue_head_init(&priv->tx_free_list[qos]); 671 skb_queue_head_init(&priv->tx_free_list[qos]);
701 672
702 if (register_netdev(dev) < 0) { 673 if (register_netdev(dev) < 0) {
703 pr_err("Failed to register ethernet " 674 pr_err("Failed to register ethernet device for POW\n");
704 "device for POW\n");
705 kfree(dev); 675 kfree(dev);
706 } else { 676 } else {
707 cvm_oct_device[CVMX_PIP_NUM_INPUT_PORTS] = dev; 677 cvm_oct_device[CVMX_PIP_NUM_INPUT_PORTS] = dev;
708 pr_info("%s: POW send group %d, receive " 678 pr_info("%s: POW send group %d, receive group %d\n",
709 "group %d\n", 679 dev->name, pow_send_group,
710 dev->name, pow_send_group, 680 pow_receive_group);
711 pow_receive_group);
712 } 681 }
713 } else { 682 } else {
714 pr_err("Failed to allocate ethernet device " 683 pr_err("Failed to allocate ethernet device for POW\n");
715 "for POW\n");
716 } 684 }
717 } 685 }
718 686
@@ -730,17 +698,15 @@ static int __init cvm_oct_init_module(void)
730 struct net_device *dev = 698 struct net_device *dev =
731 alloc_etherdev(sizeof(struct octeon_ethernet)); 699 alloc_etherdev(sizeof(struct octeon_ethernet));
732 if (!dev) { 700 if (!dev) {
733 pr_err("Failed to allocate ethernet device " 701 pr_err("Failed to allocate ethernet device for port %d\n", port);
734 "for port %d\n", port);
735 continue; 702 continue;
736 } 703 }
737 if (disable_core_queueing)
738 dev->tx_queue_len = 0;
739 704
740 /* Initialize the device private structure. */ 705 /* Initialize the device private structure. */
741 priv = netdev_priv(dev); 706 priv = netdev_priv(dev);
742 memset(priv, 0, sizeof(struct octeon_ethernet));
743 707
708 INIT_DELAYED_WORK(&priv->port_periodic_work,
709 cvm_oct_periodic_worker);
744 priv->imode = imode; 710 priv->imode = imode;
745 priv->port = port; 711 priv->port = port;
746 priv->queue = cvmx_pko_get_base_queue(priv->port); 712 priv->queue = cvmx_pko_get_base_queue(priv->port);
@@ -803,44 +769,25 @@ static int __init cvm_oct_init_module(void)
803 fau -= 769 fau -=
804 cvmx_pko_get_num_queues(priv->port) * 770 cvmx_pko_get_num_queues(priv->port) *
805 sizeof(uint32_t); 771 sizeof(uint32_t);
772 queue_delayed_work(cvm_oct_poll_queue,
773 &priv->port_periodic_work, HZ);
806 } 774 }
807 } 775 }
808 } 776 }
809 777
810 if (INTERRUPT_LIMIT) { 778 cvm_oct_tx_initialize();
811 /* 779 cvm_oct_rx_initialize();
812 * Set the POW timer rate to give an interrupt at most
813 * INTERRUPT_LIMIT times per second.
814 */
815 cvmx_write_csr(CVMX_POW_WQ_INT_PC,
816 octeon_bootinfo->eclock_hz / (INTERRUPT_LIMIT *
817 16 * 256) << 8);
818 780
819 /* 781 /*
820 * Enable POW timer interrupt. It will count when 782 * 150 uS: about 10 1500-byte packtes at 1GE.
821 * there are packets available. 783 */
822 */ 784 cvm_oct_tx_poll_interval = 150 * (octeon_get_clock_rate() / 1000000);
823 cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group),
824 0x1ful << 24);
825 } else {
826 /* Enable POW interrupt when our port has at least one packet */
827 cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group), 0x1001);
828 }
829 785
830 /* Enable the poll timer for checking RGMII status */ 786 queue_delayed_work(cvm_oct_poll_queue, &cvm_oct_rx_refill_work, HZ);
831 init_timer(&cvm_oct_poll_timer);
832 cvm_oct_poll_timer.data = 0;
833 cvm_oct_poll_timer.function = cvm_do_timer;
834 mod_timer(&cvm_oct_poll_timer, jiffies + HZ);
835 787
836 return 0; 788 return 0;
837} 789}
838 790
839/**
840 * Module / driver shutdown
841 *
842 * Returns Zero on success
843 */
844static void __exit cvm_oct_cleanup_module(void) 791static void __exit cvm_oct_cleanup_module(void)
845{ 792{
846 int port; 793 int port;
@@ -853,22 +800,31 @@ static void __exit cvm_oct_cleanup_module(void)
853 /* Free the interrupt handler */ 800 /* Free the interrupt handler */
854 free_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group, cvm_oct_device); 801 free_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group, cvm_oct_device);
855 802
856 del_timer(&cvm_oct_poll_timer); 803 atomic_inc_return(&cvm_oct_poll_queue_stopping);
804 cancel_delayed_work_sync(&cvm_oct_rx_refill_work);
805
857 cvm_oct_rx_shutdown(); 806 cvm_oct_rx_shutdown();
807 cvm_oct_tx_shutdown();
808
858 cvmx_pko_disable(); 809 cvmx_pko_disable();
859 810
860 /* Free the ethernet devices */ 811 /* Free the ethernet devices */
861 for (port = 0; port < TOTAL_NUMBER_OF_PORTS; port++) { 812 for (port = 0; port < TOTAL_NUMBER_OF_PORTS; port++) {
862 if (cvm_oct_device[port]) { 813 if (cvm_oct_device[port]) {
863 cvm_oct_tx_shutdown(cvm_oct_device[port]); 814 struct net_device *dev = cvm_oct_device[port];
864 unregister_netdev(cvm_oct_device[port]); 815 struct octeon_ethernet *priv = netdev_priv(dev);
865 kfree(cvm_oct_device[port]); 816 cancel_delayed_work_sync(&priv->port_periodic_work);
817
818 cvm_oct_tx_shutdown_dev(dev);
819 unregister_netdev(dev);
820 kfree(dev);
866 cvm_oct_device[port] = NULL; 821 cvm_oct_device[port] = NULL;
867 } 822 }
868 } 823 }
869 824
825 destroy_workqueue(cvm_oct_poll_queue);
826
870 cvmx_pko_shutdown(); 827 cvmx_pko_shutdown();
871 cvm_oct_proc_shutdown();
872 828
873 cvmx_ipd_free_ptr(); 829 cvmx_ipd_free_ptr();
874 830
diff --git a/drivers/staging/octeon/octeon-ethernet.h b/drivers/staging/octeon/octeon-ethernet.h
index 402a15b9bb0e..d58192563552 100644
--- a/drivers/staging/octeon/octeon-ethernet.h
+++ b/drivers/staging/octeon/octeon-ethernet.h
@@ -4,7 +4,7 @@
4 * Contact: support@caviumnetworks.com 4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK 5 * This file is part of the OCTEON SDK
6 * 6 *
7 * Copyright (c) 2003-2007 Cavium Networks 7 * Copyright (c) 2003-2010 Cavium Networks
8 * 8 *
9 * This file is free software; you can redistribute it and/or modify 9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as 10 * it under the terms of the GNU General Public License, Version 2, as
@@ -57,58 +57,12 @@ struct octeon_ethernet {
57 uint64_t link_info; 57 uint64_t link_info;
58 /* Called periodically to check link status */ 58 /* Called periodically to check link status */
59 void (*poll) (struct net_device *dev); 59 void (*poll) (struct net_device *dev);
60 struct delayed_work port_periodic_work;
61 struct work_struct port_work; /* may be unused. */
60}; 62};
61 63
62/**
63 * Free a work queue entry received in a intercept callback.
64 *
65 * @work_queue_entry:
66 * Work queue entry to free
67 * Returns Zero on success, Negative on failure.
68 */
69int cvm_oct_free_work(void *work_queue_entry); 64int cvm_oct_free_work(void *work_queue_entry);
70 65
71/**
72 * Transmit a work queue entry out of the ethernet port. Both
73 * the work queue entry and the packet data can optionally be
74 * freed. The work will be freed on error as well.
75 *
76 * @dev: Device to transmit out.
77 * @work_queue_entry:
78 * Work queue entry to send
79 * @do_free: True if the work queue entry and packet data should be
80 * freed. If false, neither will be freed.
81 * @qos: Index into the queues for this port to transmit on. This
82 * is used to implement QoS if their are multiple queues per
83 * port. This parameter must be between 0 and the number of
84 * queues per port minus 1. Values outside of this range will
85 * be change to zero.
86 *
87 * Returns Zero on success, negative on failure.
88 */
89int cvm_oct_transmit_qos(struct net_device *dev, void *work_queue_entry,
90 int do_free, int qos);
91
92/**
93 * Transmit a work queue entry out of the ethernet port. Both
94 * the work queue entry and the packet data can optionally be
95 * freed. The work will be freed on error as well. This simply
96 * wraps cvmx_oct_transmit_qos() for backwards compatability.
97 *
98 * @dev: Device to transmit out.
99 * @work_queue_entry:
100 * Work queue entry to send
101 * @do_free: True if the work queue entry and packet data should be
102 * freed. If false, neither will be freed.
103 *
104 * Returns Zero on success, negative on failure.
105 */
106static inline int cvm_oct_transmit(struct net_device *dev,
107 void *work_queue_entry, int do_free)
108{
109 return cvm_oct_transmit_qos(dev, work_queue_entry, do_free, 0);
110}
111
112extern int cvm_oct_rgmii_init(struct net_device *dev); 66extern int cvm_oct_rgmii_init(struct net_device *dev);
113extern void cvm_oct_rgmii_uninit(struct net_device *dev); 67extern void cvm_oct_rgmii_uninit(struct net_device *dev);
114extern int cvm_oct_rgmii_open(struct net_device *dev); 68extern int cvm_oct_rgmii_open(struct net_device *dev);
@@ -134,5 +88,11 @@ extern int pow_send_group;
134extern int pow_receive_group; 88extern int pow_receive_group;
135extern char pow_send_list[]; 89extern char pow_send_list[];
136extern struct net_device *cvm_oct_device[]; 90extern struct net_device *cvm_oct_device[];
91extern struct workqueue_struct *cvm_oct_poll_queue;
92extern atomic_t cvm_oct_poll_queue_stopping;
93extern u64 cvm_oct_tx_poll_interval;
94
95extern int max_rx_cpus;
96extern int rx_napi_weight;
137 97
138#endif 98#endif
diff --git a/drivers/staging/sm7xx/smtc2d.c b/drivers/staging/sm7xx/smtc2d.c
index 133b86c6a678..2fff0a0052d1 100644
--- a/drivers/staging/sm7xx/smtc2d.c
+++ b/drivers/staging/sm7xx/smtc2d.c
@@ -5,7 +5,7 @@
5 * Author: Boyod boyod.yang@siliconmotion.com.cn 5 * Author: Boyod boyod.yang@siliconmotion.com.cn
6 * 6 *
7 * Copyright (C) 2009 Lemote, Inc. 7 * Copyright (C) 2009 Lemote, Inc.
8 * Author: Wu Zhangjin, wuzj@lemote.com 8 * Author: Wu Zhangjin, wuzhangjin@gmail.com
9 * 9 *
10 * This file is subject to the terms and conditions of the GNU General Public 10 * This file is subject to the terms and conditions of the GNU General Public
11 * License. See the file COPYING in the main directory of this archive for 11 * License. See the file COPYING in the main directory of this archive for
diff --git a/drivers/staging/sm7xx/smtc2d.h b/drivers/staging/sm7xx/smtc2d.h
index 38d0c335322b..02b4fa29136c 100644
--- a/drivers/staging/sm7xx/smtc2d.h
+++ b/drivers/staging/sm7xx/smtc2d.h
@@ -5,7 +5,7 @@
5 * Author: Ge Wang, gewang@siliconmotion.com 5 * Author: Ge Wang, gewang@siliconmotion.com
6 * 6 *
7 * Copyright (C) 2009 Lemote, Inc. 7 * Copyright (C) 2009 Lemote, Inc.
8 * Author: Wu Zhangjin, wuzj@lemote.com 8 * Author: Wu Zhangjin, wuzhangjin@gmail.com
9 * 9 *
10 * This file is subject to the terms and conditions of the GNU General Public 10 * This file is subject to the terms and conditions of the GNU General Public
11 * License. See the file COPYING in the main directory of this archive for 11 * License. See the file COPYING in the main directory of this archive for
diff --git a/drivers/staging/sm7xx/smtcfb.c b/drivers/staging/sm7xx/smtcfb.c
index 161dbc9c1397..a4f6f49aef48 100644
--- a/drivers/staging/sm7xx/smtcfb.c
+++ b/drivers/staging/sm7xx/smtcfb.c
@@ -6,7 +6,7 @@
6 * Boyod boyod.yang@siliconmotion.com.cn 6 * Boyod boyod.yang@siliconmotion.com.cn
7 * 7 *
8 * Copyright (C) 2009 Lemote, Inc. 8 * Copyright (C) 2009 Lemote, Inc.
9 * Author: Wu Zhangjin, wuzj@lemote.com 9 * Author: Wu Zhangjin, wuzhangjin@gmail.com
10 * 10 *
11 * This file is subject to the terms and conditions of the GNU General Public 11 * This file is subject to the terms and conditions of the GNU General Public
12 * License. See the file COPYING in the main directory of this archive for 12 * License. See the file COPYING in the main directory of this archive for
diff --git a/drivers/staging/sm7xx/smtcfb.h b/drivers/staging/sm7xx/smtcfb.h
index 7f2c34138215..7ee565c2c952 100644
--- a/drivers/staging/sm7xx/smtcfb.h
+++ b/drivers/staging/sm7xx/smtcfb.h
@@ -6,7 +6,7 @@
6 * Boyod boyod.yang@siliconmotion.com.cn 6 * Boyod boyod.yang@siliconmotion.com.cn
7 * 7 *
8 * Copyright (C) 2009 Lemote, Inc. 8 * Copyright (C) 2009 Lemote, Inc.
9 * Author: Wu Zhangjin, wuzj@lemote.com 9 * Author: Wu Zhangjin, wuzhangjin@gmail.com
10 * 10 *
11 * This file is subject to the terms and conditions of the GNU General Public 11 * This file is subject to the terms and conditions of the GNU General Public
12 * License. See the file COPYING in the main directory of this archive for 12 * License. See the file COPYING in the main directory of this archive for
diff --git a/drivers/watchdog/ar7_wdt.c b/drivers/watchdog/ar7_wdt.c
index 2e94b71b20d9..2bb95cd308c1 100644
--- a/drivers/watchdog/ar7_wdt.c
+++ b/drivers/watchdog/ar7_wdt.c
@@ -34,6 +34,7 @@
34#include <linux/ioport.h> 34#include <linux/ioport.h>
35#include <linux/io.h> 35#include <linux/io.h>
36#include <linux/uaccess.h> 36#include <linux/uaccess.h>
37#include <linux/clk.h>
37 38
38#include <asm/addrspace.h> 39#include <asm/addrspace.h>
39#include <asm/mach-ar7/ar7.h> 40#include <asm/mach-ar7/ar7.h>
@@ -80,6 +81,8 @@ static struct resource *ar7_regs_wdt;
80/* Pointer to the remapped WDT IO space */ 81/* Pointer to the remapped WDT IO space */
81static struct ar7_wdt *ar7_wdt; 82static struct ar7_wdt *ar7_wdt;
82 83
84static struct clk *vbus_clk;
85
83static void ar7_wdt_kick(u32 value) 86static void ar7_wdt_kick(u32 value)
84{ 87{
85 WRITE_REG(ar7_wdt->kick_lock, 0x5555); 88 WRITE_REG(ar7_wdt->kick_lock, 0x5555);
@@ -138,17 +141,19 @@ static void ar7_wdt_disable(u32 value)
138static void ar7_wdt_update_margin(int new_margin) 141static void ar7_wdt_update_margin(int new_margin)
139{ 142{
140 u32 change; 143 u32 change;
144 u32 vbus_rate;
141 145
142 change = new_margin * (ar7_vbus_freq() / prescale_value); 146 vbus_rate = clk_get_rate(vbus_clk);
147 change = new_margin * (vbus_rate / prescale_value);
143 if (change < 1) 148 if (change < 1)
144 change = 1; 149 change = 1;
145 if (change > 0xffff) 150 if (change > 0xffff)
146 change = 0xffff; 151 change = 0xffff;
147 ar7_wdt_change(change); 152 ar7_wdt_change(change);
148 margin = change * prescale_value / ar7_vbus_freq(); 153 margin = change * prescale_value / vbus_rate;
149 printk(KERN_INFO DRVNAME 154 printk(KERN_INFO DRVNAME
150 ": timer margin %d seconds (prescale %d, change %d, freq %d)\n", 155 ": timer margin %d seconds (prescale %d, change %d, freq %d)\n",
151 margin, prescale_value, change, ar7_vbus_freq()); 156 margin, prescale_value, change, vbus_rate);
152} 157}
153 158
154static void ar7_wdt_enable_wdt(void) 159static void ar7_wdt_enable_wdt(void)
@@ -298,6 +303,13 @@ static int __devinit ar7_wdt_probe(struct platform_device *pdev)
298 goto out_mem_region; 303 goto out_mem_region;
299 } 304 }
300 305
306 vbus_clk = clk_get(NULL, "vbus");
307 if (IS_ERR(vbus_clk)) {
308 printk(KERN_ERR DRVNAME ": could not get vbus clock\n");
309 rc = PTR_ERR(vbus_clk);
310 goto out_mem_region;
311 }
312
301 ar7_wdt_disable_wdt(); 313 ar7_wdt_disable_wdt();
302 ar7_wdt_prescale(prescale_value); 314 ar7_wdt_prescale(prescale_value);
303 ar7_wdt_update_margin(margin); 315 ar7_wdt_update_margin(margin);