aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/irda
diff options
context:
space:
mode:
authorManuel Lauss <manuel.lauss@googlemail.com>2011-12-08 05:42:15 -0500
committerRalf Baechle <ralf@linux-mips.org>2011-12-08 05:42:15 -0500
commitcd671c16f0b18c83d949218867b2d88583080b36 (patch)
tree68b3bbabb335ca0b76c343bb7a2af54a73dfdb04 /drivers/net/irda
parent4d2216afeeaa1571f7608107f41cdb2ac6fe30b1 (diff)
net/irda: convert au1k_ir to platform driver.
Moderate driver cleanup: convert to platform driver, get rid of board-specific code. Driver loads and runs on a DB1100 board. But since I have no other IrDA hardware to exchange data with I can't say whether it really sends and receives. Signed-off-by: Manuel Lauss <manuel.lauss@googlemail.com> Cc: Samuel Ortiz <samuel@sortiz.org> Cc: netdev@vger.kernel.org To: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/2877/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'drivers/net/irda')
-rw-r--r--drivers/net/irda/Kconfig6
-rw-r--r--drivers/net/irda/au1000_ircc.h125
-rw-r--r--drivers/net/irda/au1k_ir.c1226
3 files changed, 686 insertions, 671 deletions
diff --git a/drivers/net/irda/Kconfig b/drivers/net/irda/Kconfig
index d423d18b4ad6..e535137eb2d0 100644
--- a/drivers/net/irda/Kconfig
+++ b/drivers/net/irda/Kconfig
@@ -313,8 +313,12 @@ config TOSHIBA_FIR
313 donauboe. 313 donauboe.
314 314
315config AU1000_FIR 315config AU1000_FIR
316 tristate "Alchemy Au1000 SIR/FIR" 316 tristate "Alchemy IrDA SIR/FIR"
317 depends on IRDA && MIPS_ALCHEMY 317 depends on IRDA && MIPS_ALCHEMY
318 help
319 Say Y/M here to build suppor the the IrDA peripheral on the
320 Alchemy Au1000 and Au1100 SoCs.
321 Say M to build a module; it will be called au1k_ir.ko
318 322
319config SMC_IRCC_FIR 323config SMC_IRCC_FIR
320 tristate "SMSC IrCC (EXPERIMENTAL)" 324 tristate "SMSC IrCC (EXPERIMENTAL)"
diff --git a/drivers/net/irda/au1000_ircc.h b/drivers/net/irda/au1000_ircc.h
deleted file mode 100644
index c072c09a8d91..000000000000
--- a/drivers/net/irda/au1000_ircc.h
+++ /dev/null
@@ -1,125 +0,0 @@
1/*
2 *
3 * BRIEF MODULE DESCRIPTION
4 * Au1000 IrDA driver.
5 *
6 * Copyright 2001 MontaVista Software Inc.
7 * Author: MontaVista Software, Inc.
8 * ppopov@mvista.com or source@mvista.com
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
14 *
15 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
16 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
17 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
18 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
21 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
22 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 *
26 * You should have received a copy of the GNU General Public License along
27 * with this program; if not, write to the Free Software Foundation, Inc.,
28 * 675 Mass Ave, Cambridge, MA 02139, USA.
29 */
30
31#ifndef AU1000_IRCC_H
32#define AU1000_IRCC_H
33
34#include <linux/time.h>
35
36#include <linux/spinlock.h>
37#include <linux/pm.h>
38#include <asm/io.h>
39
40#define NUM_IR_IFF 1
41#define NUM_IR_DESC 64
42#define RING_SIZE_4 0x0
43#define RING_SIZE_16 0x3
44#define RING_SIZE_64 0xF
45#define MAX_NUM_IR_DESC 64
46#define MAX_BUF_SIZE 2048
47
48#define BPS_115200 0
49#define BPS_57600 1
50#define BPS_38400 2
51#define BPS_19200 5
52#define BPS_9600 11
53#define BPS_2400 47
54
55/* Ring descriptor flags */
56#define AU_OWN (1<<7) /* tx,rx */
57
58#define IR_DIS_CRC (1<<6) /* tx */
59#define IR_BAD_CRC (1<<5) /* tx */
60#define IR_NEED_PULSE (1<<4) /* tx */
61#define IR_FORCE_UNDER (1<<3) /* tx */
62#define IR_DISABLE_TX (1<<2) /* tx */
63#define IR_HW_UNDER (1<<0) /* tx */
64#define IR_TX_ERROR (IR_DIS_CRC|IR_BAD_CRC|IR_HW_UNDER)
65
66#define IR_PHY_ERROR (1<<6) /* rx */
67#define IR_CRC_ERROR (1<<5) /* rx */
68#define IR_MAX_LEN (1<<4) /* rx */
69#define IR_FIFO_OVER (1<<3) /* rx */
70#define IR_SIR_ERROR (1<<2) /* rx */
71#define IR_RX_ERROR (IR_PHY_ERROR|IR_CRC_ERROR| \
72 IR_MAX_LEN|IR_FIFO_OVER|IR_SIR_ERROR)
73
74typedef struct db_dest {
75 struct db_dest *pnext;
76 volatile u32 *vaddr;
77 dma_addr_t dma_addr;
78} db_dest_t;
79
80
81typedef struct ring_desc {
82 u8 count_0; /* 7:0 */
83 u8 count_1; /* 12:8 */
84 u8 reserved;
85 u8 flags;
86 u8 addr_0; /* 7:0 */
87 u8 addr_1; /* 15:8 */
88 u8 addr_2; /* 23:16 */
89 u8 addr_3; /* 31:24 */
90} ring_dest_t;
91
92
93/* Private data for each instance */
94struct au1k_private {
95
96 db_dest_t *pDBfree;
97 db_dest_t db[2*NUM_IR_DESC];
98 volatile ring_dest_t *rx_ring[NUM_IR_DESC];
99 volatile ring_dest_t *tx_ring[NUM_IR_DESC];
100 db_dest_t *rx_db_inuse[NUM_IR_DESC];
101 db_dest_t *tx_db_inuse[NUM_IR_DESC];
102 u32 rx_head;
103 u32 tx_head;
104 u32 tx_tail;
105 u32 tx_full;
106
107 iobuff_t rx_buff;
108
109 struct net_device *netdev;
110
111 struct timeval stamp;
112 struct timeval now;
113 struct qos_info qos;
114 struct irlap_cb *irlap;
115
116 u8 open;
117 u32 speed;
118 u32 newspeed;
119
120 u32 intr_work_done; /* number of Rx and Tx pkts processed in the isr */
121 struct timer_list timer;
122
123 spinlock_t lock; /* For serializing operations */
124};
125#endif /* AU1000_IRCC_H */
diff --git a/drivers/net/irda/au1k_ir.c b/drivers/net/irda/au1k_ir.c
index 670bb0591217..fc503aa5288e 100644
--- a/drivers/net/irda/au1k_ir.c
+++ b/drivers/net/irda/au1k_ir.c
@@ -18,101 +18,220 @@
18 * with this program; if not, write to the Free Software Foundation, Inc., 18 * with this program; if not, write to the Free Software Foundation, Inc.,
19 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. 19 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
20 */ 20 */
21#include <linux/module.h> 21
22#include <linux/types.h>
23#include <linux/init.h> 22#include <linux/init.h>
24#include <linux/errno.h> 23#include <linux/module.h>
25#include <linux/netdevice.h> 24#include <linux/netdevice.h>
26#include <linux/slab.h>
27#include <linux/rtnetlink.h>
28#include <linux/interrupt.h> 25#include <linux/interrupt.h>
29#include <linux/pm.h> 26#include <linux/platform_device.h>
30#include <linux/bitops.h> 27#include <linux/slab.h>
31 28#include <linux/time.h>
32#include <asm/irq.h> 29#include <linux/types.h>
33#include <asm/io.h>
34#include <asm/au1000.h>
35#if defined(CONFIG_MIPS_DB1000)
36#include <asm/mach-db1x00/bcsr.h>
37#else
38#error au1k_ir: unsupported board
39#endif
40 30
41#include <net/irda/irda.h> 31#include <net/irda/irda.h>
42#include <net/irda/irmod.h> 32#include <net/irda/irmod.h>
43#include <net/irda/wrapper.h> 33#include <net/irda/wrapper.h>
44#include <net/irda/irda_device.h> 34#include <net/irda/irda_device.h>
45#include "au1000_ircc.h" 35#include <asm/mach-au1x00/au1000.h>
36
37/* registers */
38#define IR_RING_PTR_STATUS 0x00
39#define IR_RING_BASE_ADDR_H 0x04
40#define IR_RING_BASE_ADDR_L 0x08
41#define IR_RING_SIZE 0x0C
42#define IR_RING_PROMPT 0x10
43#define IR_RING_ADDR_CMPR 0x14
44#define IR_INT_CLEAR 0x18
45#define IR_CONFIG_1 0x20
46#define IR_SIR_FLAGS 0x24
47#define IR_STATUS 0x28
48#define IR_READ_PHY_CONFIG 0x2C
49#define IR_WRITE_PHY_CONFIG 0x30
50#define IR_MAX_PKT_LEN 0x34
51#define IR_RX_BYTE_CNT 0x38
52#define IR_CONFIG_2 0x3C
53#define IR_ENABLE 0x40
54
55/* Config1 */
56#define IR_RX_INVERT_LED (1 << 0)
57#define IR_TX_INVERT_LED (1 << 1)
58#define IR_ST (1 << 2)
59#define IR_SF (1 << 3)
60#define IR_SIR (1 << 4)
61#define IR_MIR (1 << 5)
62#define IR_FIR (1 << 6)
63#define IR_16CRC (1 << 7)
64#define IR_TD (1 << 8)
65#define IR_RX_ALL (1 << 9)
66#define IR_DMA_ENABLE (1 << 10)
67#define IR_RX_ENABLE (1 << 11)
68#define IR_TX_ENABLE (1 << 12)
69#define IR_LOOPBACK (1 << 14)
70#define IR_SIR_MODE (IR_SIR | IR_DMA_ENABLE | \
71 IR_RX_ALL | IR_RX_ENABLE | IR_SF | \
72 IR_16CRC)
73
74/* ir_status */
75#define IR_RX_STATUS (1 << 9)
76#define IR_TX_STATUS (1 << 10)
77#define IR_PHYEN (1 << 15)
78
79/* ir_write_phy_config */
80#define IR_BR(x) (((x) & 0x3f) << 10) /* baud rate */
81#define IR_PW(x) (((x) & 0x1f) << 5) /* pulse width */
82#define IR_P(x) ((x) & 0x1f) /* preamble bits */
83
84/* Config2 */
85#define IR_MODE_INV (1 << 0)
86#define IR_ONE_PIN (1 << 1)
87#define IR_PHYCLK_40MHZ (0 << 2)
88#define IR_PHYCLK_48MHZ (1 << 2)
89#define IR_PHYCLK_56MHZ (2 << 2)
90#define IR_PHYCLK_64MHZ (3 << 2)
91#define IR_DP (1 << 4)
92#define IR_DA (1 << 5)
93#define IR_FLT_HIGH (0 << 6)
94#define IR_FLT_MEDHI (1 << 6)
95#define IR_FLT_MEDLO (2 << 6)
96#define IR_FLT_LO (3 << 6)
97#define IR_IEN (1 << 8)
98
99/* ir_enable */
100#define IR_HC (1 << 3) /* divide SBUS clock by 2 */
101#define IR_CE (1 << 2) /* clock enable */
102#define IR_C (1 << 1) /* coherency bit */
103#define IR_BE (1 << 0) /* set in big endian mode */
104
105#define NUM_IR_DESC 64
106#define RING_SIZE_4 0x0
107#define RING_SIZE_16 0x3
108#define RING_SIZE_64 0xF
109#define MAX_NUM_IR_DESC 64
110#define MAX_BUF_SIZE 2048
111
112/* Ring descriptor flags */
113#define AU_OWN (1 << 7) /* tx,rx */
114#define IR_DIS_CRC (1 << 6) /* tx */
115#define IR_BAD_CRC (1 << 5) /* tx */
116#define IR_NEED_PULSE (1 << 4) /* tx */
117#define IR_FORCE_UNDER (1 << 3) /* tx */
118#define IR_DISABLE_TX (1 << 2) /* tx */
119#define IR_HW_UNDER (1 << 0) /* tx */
120#define IR_TX_ERROR (IR_DIS_CRC | IR_BAD_CRC | IR_HW_UNDER)
121
122#define IR_PHY_ERROR (1 << 6) /* rx */
123#define IR_CRC_ERROR (1 << 5) /* rx */
124#define IR_MAX_LEN (1 << 4) /* rx */
125#define IR_FIFO_OVER (1 << 3) /* rx */
126#define IR_SIR_ERROR (1 << 2) /* rx */
127#define IR_RX_ERROR (IR_PHY_ERROR | IR_CRC_ERROR | \
128 IR_MAX_LEN | IR_FIFO_OVER | IR_SIR_ERROR)
129
130struct db_dest {
131 struct db_dest *pnext;
132 volatile u32 *vaddr;
133 dma_addr_t dma_addr;
134};
46 135
47static int au1k_irda_net_init(struct net_device *); 136struct ring_dest {
48static int au1k_irda_start(struct net_device *); 137 u8 count_0; /* 7:0 */
49static int au1k_irda_stop(struct net_device *dev); 138 u8 count_1; /* 12:8 */
50static int au1k_irda_hard_xmit(struct sk_buff *, struct net_device *); 139 u8 reserved;
51static int au1k_irda_rx(struct net_device *); 140 u8 flags;
52static void au1k_irda_interrupt(int, void *); 141 u8 addr_0; /* 7:0 */
53static void au1k_tx_timeout(struct net_device *); 142 u8 addr_1; /* 15:8 */
54static int au1k_irda_ioctl(struct net_device *, struct ifreq *, int); 143 u8 addr_2; /* 23:16 */
55static int au1k_irda_set_speed(struct net_device *dev, int speed); 144 u8 addr_3; /* 31:24 */
145};
56 146
57static void *dma_alloc(size_t, dma_addr_t *); 147/* Private data for each instance */
58static void dma_free(void *, size_t); 148struct au1k_private {
149 void __iomem *iobase;
150 int irq_rx, irq_tx;
151
152 struct db_dest *pDBfree;
153 struct db_dest db[2 * NUM_IR_DESC];
154 volatile struct ring_dest *rx_ring[NUM_IR_DESC];
155 volatile struct ring_dest *tx_ring[NUM_IR_DESC];
156 struct db_dest *rx_db_inuse[NUM_IR_DESC];
157 struct db_dest *tx_db_inuse[NUM_IR_DESC];
158 u32 rx_head;
159 u32 tx_head;
160 u32 tx_tail;
161 u32 tx_full;
162
163 iobuff_t rx_buff;
164
165 struct net_device *netdev;
166 struct timeval stamp;
167 struct timeval now;
168 struct qos_info qos;
169 struct irlap_cb *irlap;
170
171 u8 open;
172 u32 speed;
173 u32 newspeed;
174
175 struct timer_list timer;
176
177 struct resource *ioarea;
178 struct au1k_irda_platform_data *platdata;
179};
59 180
60static int qos_mtt_bits = 0x07; /* 1 ms or more */ 181static int qos_mtt_bits = 0x07; /* 1 ms or more */
61static struct net_device *ir_devs[NUM_IR_IFF];
62static char version[] __devinitdata =
63 "au1k_ircc:1.2 ppopov@mvista.com\n";
64 182
65#define RUN_AT(x) (jiffies + (x)) 183#define RUN_AT(x) (jiffies + (x))
66 184
67static DEFINE_SPINLOCK(ir_lock); 185static void au1k_irda_plat_set_phy_mode(struct au1k_private *p, int mode)
186{
187 if (p->platdata && p->platdata->set_phy_mode)
188 p->platdata->set_phy_mode(mode);
189}
68 190
69/* 191static inline unsigned long irda_read(struct au1k_private *p,
70 * IrDA peripheral bug. You have to read the register 192 unsigned long ofs)
71 * twice to get the right value. 193{
72 */ 194 /*
73u32 read_ir_reg(u32 addr) 195 * IrDA peripheral bug. You have to read the register
74{ 196 * twice to get the right value.
75 readl(addr); 197 */
76 return readl(addr); 198 (void)__raw_readl(p->iobase + ofs);
199 return __raw_readl(p->iobase + ofs);
77} 200}
78 201
202static inline void irda_write(struct au1k_private *p, unsigned long ofs,
203 unsigned long val)
204{
205 __raw_writel(val, p->iobase + ofs);
206 wmb();
207}
79 208
80/* 209/*
81 * Buffer allocation/deallocation routines. The buffer descriptor returned 210 * Buffer allocation/deallocation routines. The buffer descriptor returned
82 * has the virtual and dma address of a buffer suitable for 211 * has the virtual and dma address of a buffer suitable for
83 * both, receive and transmit operations. 212 * both, receive and transmit operations.
84 */ 213 */
85static db_dest_t *GetFreeDB(struct au1k_private *aup) 214static struct db_dest *GetFreeDB(struct au1k_private *aup)
86{ 215{
87 db_dest_t *pDB; 216 struct db_dest *db;
88 pDB = aup->pDBfree; 217 db = aup->pDBfree;
89
90 if (pDB) {
91 aup->pDBfree = pDB->pnext;
92 }
93 return pDB;
94}
95 218
96static void ReleaseDB(struct au1k_private *aup, db_dest_t *pDB) 219 if (db)
97{ 220 aup->pDBfree = db->pnext;
98 db_dest_t *pDBfree = aup->pDBfree; 221 return db;
99 if (pDBfree)
100 pDBfree->pnext = pDB;
101 aup->pDBfree = pDB;
102} 222}
103 223
104
105/* 224/*
106 DMA memory allocation, derived from pci_alloc_consistent. 225 DMA memory allocation, derived from pci_alloc_consistent.
107 However, the Au1000 data cache is coherent (when programmed 226 However, the Au1000 data cache is coherent (when programmed
108 so), therefore we return KSEG0 address, not KSEG1. 227 so), therefore we return KSEG0 address, not KSEG1.
109*/ 228*/
110static void *dma_alloc(size_t size, dma_addr_t * dma_handle) 229static void *dma_alloc(size_t size, dma_addr_t *dma_handle)
111{ 230{
112 void *ret; 231 void *ret;
113 int gfp = GFP_ATOMIC | GFP_DMA; 232 int gfp = GFP_ATOMIC | GFP_DMA;
114 233
115 ret = (void *) __get_free_pages(gfp, get_order(size)); 234 ret = (void *)__get_free_pages(gfp, get_order(size));
116 235
117 if (ret != NULL) { 236 if (ret != NULL) {
118 memset(ret, 0, size); 237 memset(ret, 0, size);
@@ -122,7 +241,6 @@ static void *dma_alloc(size_t size, dma_addr_t * dma_handle)
122 return ret; 241 return ret;
123} 242}
124 243
125
126static void dma_free(void *vaddr, size_t size) 244static void dma_free(void *vaddr, size_t size)
127{ 245{
128 vaddr = (void *)KSEG0ADDR(vaddr); 246 vaddr = (void *)KSEG0ADDR(vaddr);
@@ -130,206 +248,306 @@ static void dma_free(void *vaddr, size_t size)
130} 248}
131 249
132 250
133static void 251static void setup_hw_rings(struct au1k_private *aup, u32 rx_base, u32 tx_base)
134setup_hw_rings(struct au1k_private *aup, u32 rx_base, u32 tx_base)
135{ 252{
136 int i; 253 int i;
137 for (i=0; i<NUM_IR_DESC; i++) { 254 for (i = 0; i < NUM_IR_DESC; i++) {
138 aup->rx_ring[i] = (volatile ring_dest_t *) 255 aup->rx_ring[i] = (volatile struct ring_dest *)
139 (rx_base + sizeof(ring_dest_t)*i); 256 (rx_base + sizeof(struct ring_dest) * i);
140 } 257 }
141 for (i=0; i<NUM_IR_DESC; i++) { 258 for (i = 0; i < NUM_IR_DESC; i++) {
142 aup->tx_ring[i] = (volatile ring_dest_t *) 259 aup->tx_ring[i] = (volatile struct ring_dest *)
143 (tx_base + sizeof(ring_dest_t)*i); 260 (tx_base + sizeof(struct ring_dest) * i);
144 } 261 }
145} 262}
146 263
147static int au1k_irda_init(void)
148{
149 static unsigned version_printed = 0;
150 struct au1k_private *aup;
151 struct net_device *dev;
152 int err;
153
154 if (version_printed++ == 0) printk(version);
155
156 dev = alloc_irdadev(sizeof(struct au1k_private));
157 if (!dev)
158 return -ENOMEM;
159
160 dev->irq = AU1000_IRDA_RX_INT; /* TX has its own interrupt */
161 err = au1k_irda_net_init(dev);
162 if (err)
163 goto out;
164 err = register_netdev(dev);
165 if (err)
166 goto out1;
167 ir_devs[0] = dev;
168 printk(KERN_INFO "IrDA: Registered device %s\n", dev->name);
169 return 0;
170
171out1:
172 aup = netdev_priv(dev);
173 dma_free((void *)aup->db[0].vaddr,
174 MAX_BUF_SIZE * 2*NUM_IR_DESC);
175 dma_free((void *)aup->rx_ring[0],
176 2 * MAX_NUM_IR_DESC*(sizeof(ring_dest_t)));
177 kfree(aup->rx_buff.head);
178out:
179 free_netdev(dev);
180 return err;
181}
182
183static int au1k_irda_init_iobuf(iobuff_t *io, int size) 264static int au1k_irda_init_iobuf(iobuff_t *io, int size)
184{ 265{
185 io->head = kmalloc(size, GFP_KERNEL); 266 io->head = kmalloc(size, GFP_KERNEL);
186 if (io->head != NULL) { 267 if (io->head != NULL) {
187 io->truesize = size; 268 io->truesize = size;
188 io->in_frame = FALSE; 269 io->in_frame = FALSE;
189 io->state = OUTSIDE_FRAME; 270 io->state = OUTSIDE_FRAME;
190 io->data = io->head; 271 io->data = io->head;
191 } 272 }
192 return io->head ? 0 : -ENOMEM; 273 return io->head ? 0 : -ENOMEM;
193} 274}
194 275
195static const struct net_device_ops au1k_irda_netdev_ops = { 276/*
196 .ndo_open = au1k_irda_start, 277 * Set the IrDA communications speed.
197 .ndo_stop = au1k_irda_stop, 278 */
198 .ndo_start_xmit = au1k_irda_hard_xmit, 279static int au1k_irda_set_speed(struct net_device *dev, int speed)
199 .ndo_tx_timeout = au1k_tx_timeout,
200 .ndo_do_ioctl = au1k_irda_ioctl,
201};
202
203static int au1k_irda_net_init(struct net_device *dev)
204{ 280{
205 struct au1k_private *aup = netdev_priv(dev); 281 struct au1k_private *aup = netdev_priv(dev);
206 int i, retval = 0, err; 282 volatile struct ring_dest *ptxd;
207 db_dest_t *pDB, *pDBfree; 283 unsigned long control;
208 dma_addr_t temp; 284 int ret = 0, timeout = 10, i;
209 285
210 err = au1k_irda_init_iobuf(&aup->rx_buff, 14384); 286 if (speed == aup->speed)
211 if (err) 287 return ret;
212 goto out1;
213 288
214 dev->netdev_ops = &au1k_irda_netdev_ops; 289 /* disable PHY first */
290 au1k_irda_plat_set_phy_mode(aup, AU1000_IRDA_PHY_MODE_OFF);
291 irda_write(aup, IR_STATUS, irda_read(aup, IR_STATUS) & ~IR_PHYEN);
215 292
216 irda_init_max_qos_capabilies(&aup->qos); 293 /* disable RX/TX */
294 irda_write(aup, IR_CONFIG_1,
295 irda_read(aup, IR_CONFIG_1) & ~(IR_RX_ENABLE | IR_TX_ENABLE));
296 msleep(20);
297 while (irda_read(aup, IR_STATUS) & (IR_RX_STATUS | IR_TX_STATUS)) {
298 msleep(20);
299 if (!timeout--) {
300 printk(KERN_ERR "%s: rx/tx disable timeout\n",
301 dev->name);
302 break;
303 }
304 }
217 305
218 /* The only value we must override it the baudrate */ 306 /* disable DMA */
219 aup->qos.baud_rate.bits = IR_9600|IR_19200|IR_38400|IR_57600| 307 irda_write(aup, IR_CONFIG_1,
220 IR_115200|IR_576000 |(IR_4000000 << 8); 308 irda_read(aup, IR_CONFIG_1) & ~IR_DMA_ENABLE);
221 309 msleep(20);
222 aup->qos.min_turn_time.bits = qos_mtt_bits;
223 irda_qos_bits_to_value(&aup->qos);
224 310
225 retval = -ENOMEM; 311 /* After we disable tx/rx. the index pointers go back to zero. */
312 aup->tx_head = aup->tx_tail = aup->rx_head = 0;
313 for (i = 0; i < NUM_IR_DESC; i++) {
314 ptxd = aup->tx_ring[i];
315 ptxd->flags = 0;
316 ptxd->count_0 = 0;
317 ptxd->count_1 = 0;
318 }
226 319
227 /* Tx ring follows rx ring + 512 bytes */ 320 for (i = 0; i < NUM_IR_DESC; i++) {
228 /* we need a 1k aligned buffer */ 321 ptxd = aup->rx_ring[i];
229 aup->rx_ring[0] = (ring_dest_t *) 322 ptxd->count_0 = 0;
230 dma_alloc(2*MAX_NUM_IR_DESC*(sizeof(ring_dest_t)), &temp); 323 ptxd->count_1 = 0;
231 if (!aup->rx_ring[0]) 324 ptxd->flags = AU_OWN;
232 goto out2; 325 }
233 326
234 /* allocate the data buffers */ 327 if (speed == 4000000)
235 aup->db[0].vaddr = 328 au1k_irda_plat_set_phy_mode(aup, AU1000_IRDA_PHY_MODE_FIR);
236 (void *)dma_alloc(MAX_BUF_SIZE * 2*NUM_IR_DESC, &temp); 329 else
237 if (!aup->db[0].vaddr) 330 au1k_irda_plat_set_phy_mode(aup, AU1000_IRDA_PHY_MODE_SIR);
238 goto out3;
239 331
240 setup_hw_rings(aup, (u32)aup->rx_ring[0], (u32)aup->rx_ring[0] + 512); 332 switch (speed) {
333 case 9600:
334 irda_write(aup, IR_WRITE_PHY_CONFIG, IR_BR(11) | IR_PW(12));
335 irda_write(aup, IR_CONFIG_1, IR_SIR_MODE);
336 break;
337 case 19200:
338 irda_write(aup, IR_WRITE_PHY_CONFIG, IR_BR(5) | IR_PW(12));
339 irda_write(aup, IR_CONFIG_1, IR_SIR_MODE);
340 break;
341 case 38400:
342 irda_write(aup, IR_WRITE_PHY_CONFIG, IR_BR(2) | IR_PW(12));
343 irda_write(aup, IR_CONFIG_1, IR_SIR_MODE);
344 break;
345 case 57600:
346 irda_write(aup, IR_WRITE_PHY_CONFIG, IR_BR(1) | IR_PW(12));
347 irda_write(aup, IR_CONFIG_1, IR_SIR_MODE);
348 break;
349 case 115200:
350 irda_write(aup, IR_WRITE_PHY_CONFIG, IR_PW(12));
351 irda_write(aup, IR_CONFIG_1, IR_SIR_MODE);
352 break;
353 case 4000000:
354 irda_write(aup, IR_WRITE_PHY_CONFIG, IR_P(15));
355 irda_write(aup, IR_CONFIG_1, IR_FIR | IR_DMA_ENABLE |
356 IR_RX_ENABLE);
357 break;
358 default:
359 printk(KERN_ERR "%s unsupported speed %x\n", dev->name, speed);
360 ret = -EINVAL;
361 break;
362 }
241 363
242 pDBfree = NULL; 364 aup->speed = speed;
243 pDB = aup->db; 365 irda_write(aup, IR_STATUS, irda_read(aup, IR_STATUS) | IR_PHYEN);
244 for (i=0; i<(2*NUM_IR_DESC); i++) { 366
245 pDB->pnext = pDBfree; 367 control = irda_read(aup, IR_STATUS);
246 pDBfree = pDB; 368 irda_write(aup, IR_RING_PROMPT, 0);
247 pDB->vaddr = 369
248 (u32 *)((unsigned)aup->db[0].vaddr + MAX_BUF_SIZE*i); 370 if (control & (1 << 14)) {
249 pDB->dma_addr = (dma_addr_t)virt_to_bus(pDB->vaddr); 371 printk(KERN_ERR "%s: configuration error\n", dev->name);
250 pDB++; 372 } else {
373 if (control & (1 << 11))
374 printk(KERN_DEBUG "%s Valid SIR config\n", dev->name);
375 if (control & (1 << 12))
376 printk(KERN_DEBUG "%s Valid MIR config\n", dev->name);
377 if (control & (1 << 13))
378 printk(KERN_DEBUG "%s Valid FIR config\n", dev->name);
379 if (control & (1 << 10))
380 printk(KERN_DEBUG "%s TX enabled\n", dev->name);
381 if (control & (1 << 9))
382 printk(KERN_DEBUG "%s RX enabled\n", dev->name);
251 } 383 }
252 aup->pDBfree = pDBfree;
253 384
254 /* attach a data buffer to each descriptor */ 385 return ret;
255 for (i=0; i<NUM_IR_DESC; i++) { 386}
256 pDB = GetFreeDB(aup); 387
257 if (!pDB) goto out; 388static void update_rx_stats(struct net_device *dev, u32 status, u32 count)
258 aup->rx_ring[i]->addr_0 = (u8)(pDB->dma_addr & 0xff); 389{
259 aup->rx_ring[i]->addr_1 = (u8)((pDB->dma_addr>>8) & 0xff); 390 struct net_device_stats *ps = &dev->stats;
260 aup->rx_ring[i]->addr_2 = (u8)((pDB->dma_addr>>16) & 0xff); 391
261 aup->rx_ring[i]->addr_3 = (u8)((pDB->dma_addr>>24) & 0xff); 392 ps->rx_packets++;
262 aup->rx_db_inuse[i] = pDB; 393
394 if (status & IR_RX_ERROR) {
395 ps->rx_errors++;
396 if (status & (IR_PHY_ERROR | IR_FIFO_OVER))
397 ps->rx_missed_errors++;
398 if (status & IR_MAX_LEN)
399 ps->rx_length_errors++;
400 if (status & IR_CRC_ERROR)
401 ps->rx_crc_errors++;
402 } else
403 ps->rx_bytes += count;
404}
405
406static void update_tx_stats(struct net_device *dev, u32 status, u32 pkt_len)
407{
408 struct net_device_stats *ps = &dev->stats;
409
410 ps->tx_packets++;
411 ps->tx_bytes += pkt_len;
412
413 if (status & IR_TX_ERROR) {
414 ps->tx_errors++;
415 ps->tx_aborted_errors++;
263 } 416 }
264 for (i=0; i<NUM_IR_DESC; i++) { 417}
265 pDB = GetFreeDB(aup); 418
266 if (!pDB) goto out; 419static void au1k_tx_ack(struct net_device *dev)
267 aup->tx_ring[i]->addr_0 = (u8)(pDB->dma_addr & 0xff); 420{
268 aup->tx_ring[i]->addr_1 = (u8)((pDB->dma_addr>>8) & 0xff); 421 struct au1k_private *aup = netdev_priv(dev);
269 aup->tx_ring[i]->addr_2 = (u8)((pDB->dma_addr>>16) & 0xff); 422 volatile struct ring_dest *ptxd;
270 aup->tx_ring[i]->addr_3 = (u8)((pDB->dma_addr>>24) & 0xff); 423
271 aup->tx_ring[i]->count_0 = 0; 424 ptxd = aup->tx_ring[aup->tx_tail];
272 aup->tx_ring[i]->count_1 = 0; 425 while (!(ptxd->flags & AU_OWN) && (aup->tx_tail != aup->tx_head)) {
273 aup->tx_ring[i]->flags = 0; 426 update_tx_stats(dev, ptxd->flags,
274 aup->tx_db_inuse[i] = pDB; 427 (ptxd->count_1 << 8) | ptxd->count_0);
428 ptxd->count_0 = 0;
429 ptxd->count_1 = 0;
430 wmb();
431 aup->tx_tail = (aup->tx_tail + 1) & (NUM_IR_DESC - 1);
432 ptxd = aup->tx_ring[aup->tx_tail];
433
434 if (aup->tx_full) {
435 aup->tx_full = 0;
436 netif_wake_queue(dev);
437 }
275 } 438 }
276 439
277#if defined(CONFIG_MIPS_DB1000) 440 if (aup->tx_tail == aup->tx_head) {
278 /* power on */ 441 if (aup->newspeed) {
279 bcsr_mod(BCSR_RESETS, BCSR_RESETS_IRDA_MODE_MASK, 442 au1k_irda_set_speed(dev, aup->newspeed);
280 BCSR_RESETS_IRDA_MODE_FULL); 443 aup->newspeed = 0;
281#endif 444 } else {
445 irda_write(aup, IR_CONFIG_1,
446 irda_read(aup, IR_CONFIG_1) & ~IR_TX_ENABLE);
447 irda_write(aup, IR_CONFIG_1,
448 irda_read(aup, IR_CONFIG_1) | IR_RX_ENABLE);
449 irda_write(aup, IR_RING_PROMPT, 0);
450 }
451 }
452}
282 453
283 return 0; 454static int au1k_irda_rx(struct net_device *dev)
455{
456 struct au1k_private *aup = netdev_priv(dev);
457 volatile struct ring_dest *prxd;
458 struct sk_buff *skb;
459 struct db_dest *pDB;
460 u32 flags, count;
284 461
285out3: 462 prxd = aup->rx_ring[aup->rx_head];
286 dma_free((void *)aup->rx_ring[0], 463 flags = prxd->flags;
287 2 * MAX_NUM_IR_DESC*(sizeof(ring_dest_t))); 464
288out2: 465 while (!(flags & AU_OWN)) {
289 kfree(aup->rx_buff.head); 466 pDB = aup->rx_db_inuse[aup->rx_head];
290out1: 467 count = (prxd->count_1 << 8) | prxd->count_0;
291 printk(KERN_ERR "au1k_init_module failed. Returns %d\n", retval); 468 if (!(flags & IR_RX_ERROR)) {
292 return retval; 469 /* good frame */
470 update_rx_stats(dev, flags, count);
471 skb = alloc_skb(count + 1, GFP_ATOMIC);
472 if (skb == NULL) {
473 dev->stats.rx_dropped++;
474 continue;
475 }
476 skb_reserve(skb, 1);
477 if (aup->speed == 4000000)
478 skb_put(skb, count);
479 else
480 skb_put(skb, count - 2);
481 skb_copy_to_linear_data(skb, (void *)pDB->vaddr,
482 count - 2);
483 skb->dev = dev;
484 skb_reset_mac_header(skb);
485 skb->protocol = htons(ETH_P_IRDA);
486 netif_rx(skb);
487 prxd->count_0 = 0;
488 prxd->count_1 = 0;
489 }
490 prxd->flags |= AU_OWN;
491 aup->rx_head = (aup->rx_head + 1) & (NUM_IR_DESC - 1);
492 irda_write(aup, IR_RING_PROMPT, 0);
493
494 /* next descriptor */
495 prxd = aup->rx_ring[aup->rx_head];
496 flags = prxd->flags;
497
498 }
499 return 0;
293} 500}
294 501
502static irqreturn_t au1k_irda_interrupt(int dummy, void *dev_id)
503{
504 struct net_device *dev = dev_id;
505 struct au1k_private *aup = netdev_priv(dev);
506
507 irda_write(aup, IR_INT_CLEAR, 0); /* ack irda interrupts */
508
509 au1k_irda_rx(dev);
510 au1k_tx_ack(dev);
511
512 return IRQ_HANDLED;
513}
295 514
296static int au1k_init(struct net_device *dev) 515static int au1k_init(struct net_device *dev)
297{ 516{
298 struct au1k_private *aup = netdev_priv(dev); 517 struct au1k_private *aup = netdev_priv(dev);
518 u32 enable, ring_address;
299 int i; 519 int i;
300 u32 control;
301 u32 ring_address;
302 520
303 /* bring the device out of reset */ 521 enable = IR_HC | IR_CE | IR_C;
304 control = 0xe; /* coherent, clock enable, one half system clock */
305
306#ifndef CONFIG_CPU_LITTLE_ENDIAN 522#ifndef CONFIG_CPU_LITTLE_ENDIAN
307 control |= 1; 523 enable |= IR_BE;
308#endif 524#endif
309 aup->tx_head = 0; 525 aup->tx_head = 0;
310 aup->tx_tail = 0; 526 aup->tx_tail = 0;
311 aup->rx_head = 0; 527 aup->rx_head = 0;
312 528
313 for (i=0; i<NUM_IR_DESC; i++) { 529 for (i = 0; i < NUM_IR_DESC; i++)
314 aup->rx_ring[i]->flags = AU_OWN; 530 aup->rx_ring[i]->flags = AU_OWN;
315 }
316 531
317 writel(control, IR_INTERFACE_CONFIG); 532 irda_write(aup, IR_ENABLE, enable);
318 au_sync_delay(10); 533 msleep(20);
319 534
320 writel(read_ir_reg(IR_ENABLE) & ~0x8000, IR_ENABLE); /* disable PHY */ 535 /* disable PHY */
321 au_sync_delay(1); 536 au1k_irda_plat_set_phy_mode(aup, AU1000_IRDA_PHY_MODE_OFF);
537 irda_write(aup, IR_STATUS, irda_read(aup, IR_STATUS) & ~IR_PHYEN);
538 msleep(20);
322 539
323 writel(MAX_BUF_SIZE, IR_MAX_PKT_LEN); 540 irda_write(aup, IR_MAX_PKT_LEN, MAX_BUF_SIZE);
324 541
325 ring_address = (u32)virt_to_phys((void *)aup->rx_ring[0]); 542 ring_address = (u32)virt_to_phys((void *)aup->rx_ring[0]);
326 writel(ring_address >> 26, IR_RING_BASE_ADDR_H); 543 irda_write(aup, IR_RING_BASE_ADDR_H, ring_address >> 26);
327 writel((ring_address >> 10) & 0xffff, IR_RING_BASE_ADDR_L); 544 irda_write(aup, IR_RING_BASE_ADDR_L, (ring_address >> 10) & 0xffff);
328 545
329 writel(RING_SIZE_64<<8 | RING_SIZE_64<<12, IR_RING_SIZE); 546 irda_write(aup, IR_RING_SIZE,
547 (RING_SIZE_64 << 8) | (RING_SIZE_64 << 12));
330 548
331 writel(1<<2 | IR_ONE_PIN, IR_CONFIG_2); /* 48MHz */ 549 irda_write(aup, IR_CONFIG_2, IR_PHYCLK_48MHZ | IR_ONE_PIN);
332 writel(0, IR_RING_ADDR_CMPR); 550 irda_write(aup, IR_RING_ADDR_CMPR, 0);
333 551
334 au1k_irda_set_speed(dev, 9600); 552 au1k_irda_set_speed(dev, 9600);
335 return 0; 553 return 0;
@@ -337,25 +555,28 @@ static int au1k_init(struct net_device *dev)
337 555
338static int au1k_irda_start(struct net_device *dev) 556static int au1k_irda_start(struct net_device *dev)
339{ 557{
340 int retval;
341 char hwname[32];
342 struct au1k_private *aup = netdev_priv(dev); 558 struct au1k_private *aup = netdev_priv(dev);
559 char hwname[32];
560 int retval;
343 561
344 if ((retval = au1k_init(dev))) { 562 retval = au1k_init(dev);
563 if (retval) {
345 printk(KERN_ERR "%s: error in au1k_init\n", dev->name); 564 printk(KERN_ERR "%s: error in au1k_init\n", dev->name);
346 return retval; 565 return retval;
347 } 566 }
348 567
349 if ((retval = request_irq(AU1000_IRDA_TX_INT, au1k_irda_interrupt, 568 retval = request_irq(aup->irq_tx, &au1k_irda_interrupt, 0,
350 0, dev->name, dev))) { 569 dev->name, dev);
351 printk(KERN_ERR "%s: unable to get IRQ %d\n", 570 if (retval) {
571 printk(KERN_ERR "%s: unable to get IRQ %d\n",
352 dev->name, dev->irq); 572 dev->name, dev->irq);
353 return retval; 573 return retval;
354 } 574 }
355 if ((retval = request_irq(AU1000_IRDA_RX_INT, au1k_irda_interrupt, 575 retval = request_irq(aup->irq_rx, &au1k_irda_interrupt, 0,
356 0, dev->name, dev))) { 576 dev->name, dev);
357 free_irq(AU1000_IRDA_TX_INT, dev); 577 if (retval) {
358 printk(KERN_ERR "%s: unable to get IRQ %d\n", 578 free_irq(aup->irq_tx, dev);
579 printk(KERN_ERR "%s: unable to get IRQ %d\n",
359 dev->name, dev->irq); 580 dev->name, dev->irq);
360 return retval; 581 return retval;
361 } 582 }
@@ -365,9 +586,13 @@ static int au1k_irda_start(struct net_device *dev)
365 aup->irlap = irlap_open(dev, &aup->qos, hwname); 586 aup->irlap = irlap_open(dev, &aup->qos, hwname);
366 netif_start_queue(dev); 587 netif_start_queue(dev);
367 588
368 writel(read_ir_reg(IR_CONFIG_2) | 1<<8, IR_CONFIG_2); /* int enable */ 589 /* int enable */
590 irda_write(aup, IR_CONFIG_2, irda_read(aup, IR_CONFIG_2) | IR_IEN);
591
592 /* power up */
593 au1k_irda_plat_set_phy_mode(aup, AU1000_IRDA_PHY_MODE_SIR);
369 594
370 aup->timer.expires = RUN_AT((3*HZ)); 595 aup->timer.expires = RUN_AT((3 * HZ));
371 aup->timer.data = (unsigned long)dev; 596 aup->timer.data = (unsigned long)dev;
372 return 0; 597 return 0;
373} 598}
@@ -376,11 +601,12 @@ static int au1k_irda_stop(struct net_device *dev)
376{ 601{
377 struct au1k_private *aup = netdev_priv(dev); 602 struct au1k_private *aup = netdev_priv(dev);
378 603
604 au1k_irda_plat_set_phy_mode(aup, AU1000_IRDA_PHY_MODE_OFF);
605
379 /* disable interrupts */ 606 /* disable interrupts */
380 writel(read_ir_reg(IR_CONFIG_2) & ~(1<<8), IR_CONFIG_2); 607 irda_write(aup, IR_CONFIG_2, irda_read(aup, IR_CONFIG_2) & ~IR_IEN);
381 writel(0, IR_CONFIG_1); 608 irda_write(aup, IR_CONFIG_1, 0);
382 writel(0, IR_INTERFACE_CONFIG); /* disable clock */ 609 irda_write(aup, IR_ENABLE, 0); /* disable clock */
383 au_sync();
384 610
385 if (aup->irlap) { 611 if (aup->irlap) {
386 irlap_close(aup->irlap); 612 irlap_close(aup->irlap);
@@ -391,83 +617,12 @@ static int au1k_irda_stop(struct net_device *dev)
391 del_timer(&aup->timer); 617 del_timer(&aup->timer);
392 618
393 /* disable the interrupt */ 619 /* disable the interrupt */
394 free_irq(AU1000_IRDA_TX_INT, dev); 620 free_irq(aup->irq_tx, dev);
395 free_irq(AU1000_IRDA_RX_INT, dev); 621 free_irq(aup->irq_rx, dev);
396 return 0;
397}
398
399static void __exit au1k_irda_exit(void)
400{
401 struct net_device *dev = ir_devs[0];
402 struct au1k_private *aup = netdev_priv(dev);
403 622
404 unregister_netdev(dev); 623 return 0;
405
406 dma_free((void *)aup->db[0].vaddr,
407 MAX_BUF_SIZE * 2*NUM_IR_DESC);
408 dma_free((void *)aup->rx_ring[0],
409 2 * MAX_NUM_IR_DESC*(sizeof(ring_dest_t)));
410 kfree(aup->rx_buff.head);
411 free_netdev(dev);
412}
413
414
415static inline void
416update_tx_stats(struct net_device *dev, u32 status, u32 pkt_len)
417{
418 struct au1k_private *aup = netdev_priv(dev);
419 struct net_device_stats *ps = &aup->stats;
420
421 ps->tx_packets++;
422 ps->tx_bytes += pkt_len;
423
424 if (status & IR_TX_ERROR) {
425 ps->tx_errors++;
426 ps->tx_aborted_errors++;
427 }
428}
429
430
431static void au1k_tx_ack(struct net_device *dev)
432{
433 struct au1k_private *aup = netdev_priv(dev);
434 volatile ring_dest_t *ptxd;
435
436 ptxd = aup->tx_ring[aup->tx_tail];
437 while (!(ptxd->flags & AU_OWN) && (aup->tx_tail != aup->tx_head)) {
438 update_tx_stats(dev, ptxd->flags,
439 ptxd->count_1<<8 | ptxd->count_0);
440 ptxd->count_0 = 0;
441 ptxd->count_1 = 0;
442 au_sync();
443
444 aup->tx_tail = (aup->tx_tail + 1) & (NUM_IR_DESC - 1);
445 ptxd = aup->tx_ring[aup->tx_tail];
446
447 if (aup->tx_full) {
448 aup->tx_full = 0;
449 netif_wake_queue(dev);
450 }
451 }
452
453 if (aup->tx_tail == aup->tx_head) {
454 if (aup->newspeed) {
455 au1k_irda_set_speed(dev, aup->newspeed);
456 aup->newspeed = 0;
457 }
458 else {
459 writel(read_ir_reg(IR_CONFIG_1) & ~IR_TX_ENABLE,
460 IR_CONFIG_1);
461 au_sync();
462 writel(read_ir_reg(IR_CONFIG_1) | IR_RX_ENABLE,
463 IR_CONFIG_1);
464 writel(0, IR_RING_PROMPT);
465 au_sync();
466 }
467 }
468} 624}
469 625
470
471/* 626/*
472 * Au1000 transmit routine. 627 * Au1000 transmit routine.
473 */ 628 */
@@ -475,15 +630,12 @@ static int au1k_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev)
475{ 630{
476 struct au1k_private *aup = netdev_priv(dev); 631 struct au1k_private *aup = netdev_priv(dev);
477 int speed = irda_get_next_speed(skb); 632 int speed = irda_get_next_speed(skb);
478 volatile ring_dest_t *ptxd; 633 volatile struct ring_dest *ptxd;
479 u32 len; 634 struct db_dest *pDB;
480 635 u32 len, flags;
481 u32 flags;
482 db_dest_t *pDB;
483 636
484 if (speed != aup->speed && speed != -1) { 637 if (speed != aup->speed && speed != -1)
485 aup->newspeed = speed; 638 aup->newspeed = speed;
486 }
487 639
488 if ((skb->len == 0) && (aup->newspeed)) { 640 if ((skb->len == 0) && (aup->newspeed)) {
489 if (aup->tx_tail == aup->tx_head) { 641 if (aup->tx_tail == aup->tx_head) {
@@ -501,138 +653,47 @@ static int au1k_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev)
501 printk(KERN_DEBUG "%s: tx_full\n", dev->name); 653 printk(KERN_DEBUG "%s: tx_full\n", dev->name);
502 netif_stop_queue(dev); 654 netif_stop_queue(dev);
503 aup->tx_full = 1; 655 aup->tx_full = 1;
504 return NETDEV_TX_BUSY; 656 return 1;
505 } 657 } else if (((aup->tx_head + 1) & (NUM_IR_DESC - 1)) == aup->tx_tail) {
506 else if (((aup->tx_head + 1) & (NUM_IR_DESC - 1)) == aup->tx_tail) {
507 printk(KERN_DEBUG "%s: tx_full\n", dev->name); 658 printk(KERN_DEBUG "%s: tx_full\n", dev->name);
508 netif_stop_queue(dev); 659 netif_stop_queue(dev);
509 aup->tx_full = 1; 660 aup->tx_full = 1;
510 return NETDEV_TX_BUSY; 661 return 1;
511 } 662 }
512 663
513 pDB = aup->tx_db_inuse[aup->tx_head]; 664 pDB = aup->tx_db_inuse[aup->tx_head];
514 665
515#if 0 666#if 0
516 if (read_ir_reg(IR_RX_BYTE_CNT) != 0) { 667 if (irda_read(aup, IR_RX_BYTE_CNT) != 0) {
517 printk("tx warning: rx byte cnt %x\n", 668 printk(KERN_DEBUG "tx warning: rx byte cnt %x\n",
518 read_ir_reg(IR_RX_BYTE_CNT)); 669 irda_read(aup, IR_RX_BYTE_CNT));
519 } 670 }
520#endif 671#endif
521 672
522 if (aup->speed == 4000000) { 673 if (aup->speed == 4000000) {
523 /* FIR */ 674 /* FIR */
524 skb_copy_from_linear_data(skb, pDB->vaddr, skb->len); 675 skb_copy_from_linear_data(skb, (void *)pDB->vaddr, skb->len);
525 ptxd->count_0 = skb->len & 0xff; 676 ptxd->count_0 = skb->len & 0xff;
526 ptxd->count_1 = (skb->len >> 8) & 0xff; 677 ptxd->count_1 = (skb->len >> 8) & 0xff;
527 678 } else {
528 }
529 else {
530 /* SIR */ 679 /* SIR */
531 len = async_wrap_skb(skb, (u8 *)pDB->vaddr, MAX_BUF_SIZE); 680 len = async_wrap_skb(skb, (u8 *)pDB->vaddr, MAX_BUF_SIZE);
532 ptxd->count_0 = len & 0xff; 681 ptxd->count_0 = len & 0xff;
533 ptxd->count_1 = (len >> 8) & 0xff; 682 ptxd->count_1 = (len >> 8) & 0xff;
534 ptxd->flags |= IR_DIS_CRC; 683 ptxd->flags |= IR_DIS_CRC;
535 au_writel(au_readl(0xae00000c) & ~(1<<13), 0xae00000c);
536 } 684 }
537 ptxd->flags |= AU_OWN; 685 ptxd->flags |= AU_OWN;
538 au_sync(); 686 wmb();
539 687
540 writel(read_ir_reg(IR_CONFIG_1) | IR_TX_ENABLE, IR_CONFIG_1); 688 irda_write(aup, IR_CONFIG_1,
541 writel(0, IR_RING_PROMPT); 689 irda_read(aup, IR_CONFIG_1) | IR_TX_ENABLE);
542 au_sync(); 690 irda_write(aup, IR_RING_PROMPT, 0);
543 691
544 dev_kfree_skb(skb); 692 dev_kfree_skb(skb);
545 aup->tx_head = (aup->tx_head + 1) & (NUM_IR_DESC - 1); 693 aup->tx_head = (aup->tx_head + 1) & (NUM_IR_DESC - 1);
546 return NETDEV_TX_OK; 694 return NETDEV_TX_OK;
547} 695}
548 696
549
550static inline void
551update_rx_stats(struct net_device *dev, u32 status, u32 count)
552{
553 struct au1k_private *aup = netdev_priv(dev);
554 struct net_device_stats *ps = &aup->stats;
555
556 ps->rx_packets++;
557
558 if (status & IR_RX_ERROR) {
559 ps->rx_errors++;
560 if (status & (IR_PHY_ERROR|IR_FIFO_OVER))
561 ps->rx_missed_errors++;
562 if (status & IR_MAX_LEN)
563 ps->rx_length_errors++;
564 if (status & IR_CRC_ERROR)
565 ps->rx_crc_errors++;
566 }
567 else
568 ps->rx_bytes += count;
569}
570
571/*
572 * Au1000 receive routine.
573 */
574static int au1k_irda_rx(struct net_device *dev)
575{
576 struct au1k_private *aup = netdev_priv(dev);
577 struct sk_buff *skb;
578 volatile ring_dest_t *prxd;
579 u32 flags, count;
580 db_dest_t *pDB;
581
582 prxd = aup->rx_ring[aup->rx_head];
583 flags = prxd->flags;
584
585 while (!(flags & AU_OWN)) {
586 pDB = aup->rx_db_inuse[aup->rx_head];
587 count = prxd->count_1<<8 | prxd->count_0;
588 if (!(flags & IR_RX_ERROR)) {
589 /* good frame */
590 update_rx_stats(dev, flags, count);
591 skb=alloc_skb(count+1,GFP_ATOMIC);
592 if (skb == NULL) {
593 aup->netdev->stats.rx_dropped++;
594 continue;
595 }
596 skb_reserve(skb, 1);
597 if (aup->speed == 4000000)
598 skb_put(skb, count);
599 else
600 skb_put(skb, count-2);
601 skb_copy_to_linear_data(skb, pDB->vaddr, count - 2);
602 skb->dev = dev;
603 skb_reset_mac_header(skb);
604 skb->protocol = htons(ETH_P_IRDA);
605 netif_rx(skb);
606 prxd->count_0 = 0;
607 prxd->count_1 = 0;
608 }
609 prxd->flags |= AU_OWN;
610 aup->rx_head = (aup->rx_head + 1) & (NUM_IR_DESC - 1);
611 writel(0, IR_RING_PROMPT);
612 au_sync();
613
614 /* next descriptor */
615 prxd = aup->rx_ring[aup->rx_head];
616 flags = prxd->flags;
617
618 }
619 return 0;
620}
621
622
623static irqreturn_t au1k_irda_interrupt(int dummy, void *dev_id)
624{
625 struct net_device *dev = dev_id;
626
627 writel(0, IR_INT_CLEAR); /* ack irda interrupts */
628
629 au1k_irda_rx(dev);
630 au1k_tx_ack(dev);
631
632 return IRQ_HANDLED;
633}
634
635
636/* 697/*
637 * The Tx ring has been full longer than the watchdog timeout 698 * The Tx ring has been full longer than the watchdog timeout
638 * value. The transmitter must be hung? 699 * value. The transmitter must be hung?
@@ -650,142 +711,7 @@ static void au1k_tx_timeout(struct net_device *dev)
650 netif_wake_queue(dev); 711 netif_wake_queue(dev);
651} 712}
652 713
653 714static int au1k_irda_ioctl(struct net_device *dev, struct ifreq *ifreq, int cmd)
654/*
655 * Set the IrDA communications speed.
656 */
657static int
658au1k_irda_set_speed(struct net_device *dev, int speed)
659{
660 unsigned long flags;
661 struct au1k_private *aup = netdev_priv(dev);
662 u32 control;
663 int ret = 0, timeout = 10, i;
664 volatile ring_dest_t *ptxd;
665#if defined(CONFIG_MIPS_DB1000)
666 unsigned long irda_resets;
667#endif
668
669 if (speed == aup->speed)
670 return ret;
671
672 spin_lock_irqsave(&ir_lock, flags);
673
674 /* disable PHY first */
675 writel(read_ir_reg(IR_ENABLE) & ~0x8000, IR_ENABLE);
676
677 /* disable RX/TX */
678 writel(read_ir_reg(IR_CONFIG_1) & ~(IR_RX_ENABLE|IR_TX_ENABLE),
679 IR_CONFIG_1);
680 au_sync_delay(1);
681 while (read_ir_reg(IR_ENABLE) & (IR_RX_STATUS | IR_TX_STATUS)) {
682 mdelay(1);
683 if (!timeout--) {
684 printk(KERN_ERR "%s: rx/tx disable timeout\n",
685 dev->name);
686 break;
687 }
688 }
689
690 /* disable DMA */
691 writel(read_ir_reg(IR_CONFIG_1) & ~IR_DMA_ENABLE, IR_CONFIG_1);
692 au_sync_delay(1);
693
694 /*
695 * After we disable tx/rx. the index pointers
696 * go back to zero.
697 */
698 aup->tx_head = aup->tx_tail = aup->rx_head = 0;
699 for (i=0; i<NUM_IR_DESC; i++) {
700 ptxd = aup->tx_ring[i];
701 ptxd->flags = 0;
702 ptxd->count_0 = 0;
703 ptxd->count_1 = 0;
704 }
705
706 for (i=0; i<NUM_IR_DESC; i++) {
707 ptxd = aup->rx_ring[i];
708 ptxd->count_0 = 0;
709 ptxd->count_1 = 0;
710 ptxd->flags = AU_OWN;
711 }
712
713 if (speed == 4000000) {
714#if defined(CONFIG_MIPS_DB1000)
715 bcsr_mod(BCSR_RESETS, 0, BCSR_RESETS_FIR_SEL);
716#else /* Pb1000 and Pb1100 */
717 writel(1<<13, CPLD_AUX1);
718#endif
719 }
720 else {
721#if defined(CONFIG_MIPS_DB1000)
722 bcsr_mod(BCSR_RESETS, BCSR_RESETS_FIR_SEL, 0);
723#else /* Pb1000 and Pb1100 */
724 writel(readl(CPLD_AUX1) & ~(1<<13), CPLD_AUX1);
725#endif
726 }
727
728 switch (speed) {
729 case 9600:
730 writel(11<<10 | 12<<5, IR_WRITE_PHY_CONFIG);
731 writel(IR_SIR_MODE, IR_CONFIG_1);
732 break;
733 case 19200:
734 writel(5<<10 | 12<<5, IR_WRITE_PHY_CONFIG);
735 writel(IR_SIR_MODE, IR_CONFIG_1);
736 break;
737 case 38400:
738 writel(2<<10 | 12<<5, IR_WRITE_PHY_CONFIG);
739 writel(IR_SIR_MODE, IR_CONFIG_1);
740 break;
741 case 57600:
742 writel(1<<10 | 12<<5, IR_WRITE_PHY_CONFIG);
743 writel(IR_SIR_MODE, IR_CONFIG_1);
744 break;
745 case 115200:
746 writel(12<<5, IR_WRITE_PHY_CONFIG);
747 writel(IR_SIR_MODE, IR_CONFIG_1);
748 break;
749 case 4000000:
750 writel(0xF, IR_WRITE_PHY_CONFIG);
751 writel(IR_FIR|IR_DMA_ENABLE|IR_RX_ENABLE, IR_CONFIG_1);
752 break;
753 default:
754 printk(KERN_ERR "%s unsupported speed %x\n", dev->name, speed);
755 ret = -EINVAL;
756 break;
757 }
758
759 aup->speed = speed;
760 writel(read_ir_reg(IR_ENABLE) | 0x8000, IR_ENABLE);
761 au_sync();
762
763 control = read_ir_reg(IR_ENABLE);
764 writel(0, IR_RING_PROMPT);
765 au_sync();
766
767 if (control & (1<<14)) {
768 printk(KERN_ERR "%s: configuration error\n", dev->name);
769 }
770 else {
771 if (control & (1<<11))
772 printk(KERN_DEBUG "%s Valid SIR config\n", dev->name);
773 if (control & (1<<12))
774 printk(KERN_DEBUG "%s Valid MIR config\n", dev->name);
775 if (control & (1<<13))
776 printk(KERN_DEBUG "%s Valid FIR config\n", dev->name);
777 if (control & (1<<10))
778 printk(KERN_DEBUG "%s TX enabled\n", dev->name);
779 if (control & (1<<9))
780 printk(KERN_DEBUG "%s RX enabled\n", dev->name);
781 }
782
783 spin_unlock_irqrestore(&ir_lock, flags);
784 return ret;
785}
786
787static int
788au1k_irda_ioctl(struct net_device *dev, struct ifreq *ifreq, int cmd)
789{ 715{
790 struct if_irda_req *rq = (struct if_irda_req *)ifreq; 716 struct if_irda_req *rq = (struct if_irda_req *)ifreq;
791 struct au1k_private *aup = netdev_priv(dev); 717 struct au1k_private *aup = netdev_priv(dev);
@@ -826,8 +752,218 @@ au1k_irda_ioctl(struct net_device *dev, struct ifreq *ifreq, int cmd)
826 return ret; 752 return ret;
827} 753}
828 754
755static const struct net_device_ops au1k_irda_netdev_ops = {
756 .ndo_open = au1k_irda_start,
757 .ndo_stop = au1k_irda_stop,
758 .ndo_start_xmit = au1k_irda_hard_xmit,
759 .ndo_tx_timeout = au1k_tx_timeout,
760 .ndo_do_ioctl = au1k_irda_ioctl,
761};
762
763static int __devinit au1k_irda_net_init(struct net_device *dev)
764{
765 struct au1k_private *aup = netdev_priv(dev);
766 struct db_dest *pDB, *pDBfree;
767 int i, err, retval = 0;
768 dma_addr_t temp;
769
770 err = au1k_irda_init_iobuf(&aup->rx_buff, 14384);
771 if (err)
772 goto out1;
773
774 dev->netdev_ops = &au1k_irda_netdev_ops;
775
776 irda_init_max_qos_capabilies(&aup->qos);
777
778 /* The only value we must override it the baudrate */
779 aup->qos.baud_rate.bits = IR_9600 | IR_19200 | IR_38400 |
780 IR_57600 | IR_115200 | IR_576000 | (IR_4000000 << 8);
781
782 aup->qos.min_turn_time.bits = qos_mtt_bits;
783 irda_qos_bits_to_value(&aup->qos);
784
785 retval = -ENOMEM;
786
787 /* Tx ring follows rx ring + 512 bytes */
788 /* we need a 1k aligned buffer */
789 aup->rx_ring[0] = (struct ring_dest *)
790 dma_alloc(2 * MAX_NUM_IR_DESC * (sizeof(struct ring_dest)),
791 &temp);
792 if (!aup->rx_ring[0])
793 goto out2;
794
795 /* allocate the data buffers */
796 aup->db[0].vaddr =
797 (void *)dma_alloc(MAX_BUF_SIZE * 2 * NUM_IR_DESC, &temp);
798 if (!aup->db[0].vaddr)
799 goto out3;
800
801 setup_hw_rings(aup, (u32)aup->rx_ring[0], (u32)aup->rx_ring[0] + 512);
802
803 pDBfree = NULL;
804 pDB = aup->db;
805 for (i = 0; i < (2 * NUM_IR_DESC); i++) {
806 pDB->pnext = pDBfree;
807 pDBfree = pDB;
808 pDB->vaddr =
809 (u32 *)((unsigned)aup->db[0].vaddr + (MAX_BUF_SIZE * i));
810 pDB->dma_addr = (dma_addr_t)virt_to_bus(pDB->vaddr);
811 pDB++;
812 }
813 aup->pDBfree = pDBfree;
814
815 /* attach a data buffer to each descriptor */
816 for (i = 0; i < NUM_IR_DESC; i++) {
817 pDB = GetFreeDB(aup);
818 if (!pDB)
819 goto out3;
820 aup->rx_ring[i]->addr_0 = (u8)(pDB->dma_addr & 0xff);
821 aup->rx_ring[i]->addr_1 = (u8)((pDB->dma_addr >> 8) & 0xff);
822 aup->rx_ring[i]->addr_2 = (u8)((pDB->dma_addr >> 16) & 0xff);
823 aup->rx_ring[i]->addr_3 = (u8)((pDB->dma_addr >> 24) & 0xff);
824 aup->rx_db_inuse[i] = pDB;
825 }
826 for (i = 0; i < NUM_IR_DESC; i++) {
827 pDB = GetFreeDB(aup);
828 if (!pDB)
829 goto out3;
830 aup->tx_ring[i]->addr_0 = (u8)(pDB->dma_addr & 0xff);
831 aup->tx_ring[i]->addr_1 = (u8)((pDB->dma_addr >> 8) & 0xff);
832 aup->tx_ring[i]->addr_2 = (u8)((pDB->dma_addr >> 16) & 0xff);
833 aup->tx_ring[i]->addr_3 = (u8)((pDB->dma_addr >> 24) & 0xff);
834 aup->tx_ring[i]->count_0 = 0;
835 aup->tx_ring[i]->count_1 = 0;
836 aup->tx_ring[i]->flags = 0;
837 aup->tx_db_inuse[i] = pDB;
838 }
839
840 return 0;
841
842out3:
843 dma_free((void *)aup->rx_ring[0],
844 2 * MAX_NUM_IR_DESC * (sizeof(struct ring_dest)));
845out2:
846 kfree(aup->rx_buff.head);
847out1:
848 printk(KERN_ERR "au1k_irda_net_init() failed. Returns %d\n", retval);
849 return retval;
850}
851
852static int __devinit au1k_irda_probe(struct platform_device *pdev)
853{
854 struct au1k_private *aup;
855 struct net_device *dev;
856 struct resource *r;
857 int err;
858
859 dev = alloc_irdadev(sizeof(struct au1k_private));
860 if (!dev)
861 return -ENOMEM;
862
863 aup = netdev_priv(dev);
864
865 aup->platdata = pdev->dev.platform_data;
866
867 err = -EINVAL;
868 r = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
869 if (!r)
870 goto out;
871
872 aup->irq_tx = r->start;
873
874 r = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
875 if (!r)
876 goto out;
877
878 aup->irq_rx = r->start;
879
880 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
881 if (!r)
882 goto out;
883
884 err = -EBUSY;
885 aup->ioarea = request_mem_region(r->start, r->end - r->start + 1,
886 pdev->name);
887 if (!aup->ioarea)
888 goto out;
889
890 aup->iobase = ioremap_nocache(r->start, r->end - r->start + 1);
891 if (!aup->iobase)
892 goto out2;
893
894 dev->irq = aup->irq_rx;
895
896 err = au1k_irda_net_init(dev);
897 if (err)
898 goto out3;
899 err = register_netdev(dev);
900 if (err)
901 goto out4;
902
903 platform_set_drvdata(pdev, dev);
904
905 printk(KERN_INFO "IrDA: Registered device %s\n", dev->name);
906 return 0;
907
908out4:
909 dma_free((void *)aup->db[0].vaddr,
910 MAX_BUF_SIZE * 2 * NUM_IR_DESC);
911 dma_free((void *)aup->rx_ring[0],
912 2 * MAX_NUM_IR_DESC * (sizeof(struct ring_dest)));
913 kfree(aup->rx_buff.head);
914out3:
915 iounmap(aup->iobase);
916out2:
917 release_resource(aup->ioarea);
918 kfree(aup->ioarea);
919out:
920 free_netdev(dev);
921 return err;
922}
923
924static int __devexit au1k_irda_remove(struct platform_device *pdev)
925{
926 struct net_device *dev = platform_get_drvdata(pdev);
927 struct au1k_private *aup = netdev_priv(dev);
928
929 unregister_netdev(dev);
930
931 dma_free((void *)aup->db[0].vaddr,
932 MAX_BUF_SIZE * 2 * NUM_IR_DESC);
933 dma_free((void *)aup->rx_ring[0],
934 2 * MAX_NUM_IR_DESC * (sizeof(struct ring_dest)));
935 kfree(aup->rx_buff.head);
936
937 iounmap(aup->iobase);
938 release_resource(aup->ioarea);
939 kfree(aup->ioarea);
940
941 free_netdev(dev);
942
943 return 0;
944}
945
946static struct platform_driver au1k_irda_driver = {
947 .driver = {
948 .name = "au1000-irda",
949 .owner = THIS_MODULE,
950 },
951 .probe = au1k_irda_probe,
952 .remove = __devexit_p(au1k_irda_remove),
953};
954
955static int __init au1k_irda_load(void)
956{
957 return platform_driver_register(&au1k_irda_driver);
958}
959
960static void __exit au1k_irda_unload(void)
961{
962 return platform_driver_unregister(&au1k_irda_driver);
963}
964
829MODULE_AUTHOR("Pete Popov <ppopov@mvista.com>"); 965MODULE_AUTHOR("Pete Popov <ppopov@mvista.com>");
830MODULE_DESCRIPTION("Au1000 IrDA Device Driver"); 966MODULE_DESCRIPTION("Au1000 IrDA Device Driver");
831 967
832module_init(au1k_irda_init); 968module_init(au1k_irda_load);
833module_exit(au1k_irda_exit); 969module_exit(au1k_irda_unload);