aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/irda
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/irda')
-rw-r--r--drivers/net/irda/Kconfig6
-rw-r--r--drivers/net/irda/au1000_ircc.h125
-rw-r--r--drivers/net/irda/au1k_ir.c1229
3 files changed, 686 insertions, 674 deletions
diff --git a/drivers/net/irda/Kconfig b/drivers/net/irda/Kconfig
index d423d18b4ad..e535137eb2d 100644
--- a/drivers/net/irda/Kconfig
+++ b/drivers/net/irda/Kconfig
@@ -313,8 +313,12 @@ config TOSHIBA_FIR
313 donauboe. 313 donauboe.
314 314
315config AU1000_FIR 315config AU1000_FIR
316 tristate "Alchemy Au1000 SIR/FIR" 316 tristate "Alchemy IrDA SIR/FIR"
317 depends on IRDA && MIPS_ALCHEMY 317 depends on IRDA && MIPS_ALCHEMY
318 help
319 Say Y/M here to build suppor the the IrDA peripheral on the
320 Alchemy Au1000 and Au1100 SoCs.
321 Say M to build a module; it will be called au1k_ir.ko
318 322
319config SMC_IRCC_FIR 323config SMC_IRCC_FIR
320 tristate "SMSC IrCC (EXPERIMENTAL)" 324 tristate "SMSC IrCC (EXPERIMENTAL)"
diff --git a/drivers/net/irda/au1000_ircc.h b/drivers/net/irda/au1000_ircc.h
deleted file mode 100644
index c072c09a8d9..00000000000
--- a/drivers/net/irda/au1000_ircc.h
+++ /dev/null
@@ -1,125 +0,0 @@
1/*
2 *
3 * BRIEF MODULE DESCRIPTION
4 * Au1000 IrDA driver.
5 *
6 * Copyright 2001 MontaVista Software Inc.
7 * Author: MontaVista Software, Inc.
8 * ppopov@mvista.com or source@mvista.com
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
14 *
15 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
16 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
17 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
18 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
21 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
22 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 *
26 * You should have received a copy of the GNU General Public License along
27 * with this program; if not, write to the Free Software Foundation, Inc.,
28 * 675 Mass Ave, Cambridge, MA 02139, USA.
29 */
30
31#ifndef AU1000_IRCC_H
32#define AU1000_IRCC_H
33
34#include <linux/time.h>
35
36#include <linux/spinlock.h>
37#include <linux/pm.h>
38#include <asm/io.h>
39
40#define NUM_IR_IFF 1
41#define NUM_IR_DESC 64
42#define RING_SIZE_4 0x0
43#define RING_SIZE_16 0x3
44#define RING_SIZE_64 0xF
45#define MAX_NUM_IR_DESC 64
46#define MAX_BUF_SIZE 2048
47
48#define BPS_115200 0
49#define BPS_57600 1
50#define BPS_38400 2
51#define BPS_19200 5
52#define BPS_9600 11
53#define BPS_2400 47
54
55/* Ring descriptor flags */
56#define AU_OWN (1<<7) /* tx,rx */
57
58#define IR_DIS_CRC (1<<6) /* tx */
59#define IR_BAD_CRC (1<<5) /* tx */
60#define IR_NEED_PULSE (1<<4) /* tx */
61#define IR_FORCE_UNDER (1<<3) /* tx */
62#define IR_DISABLE_TX (1<<2) /* tx */
63#define IR_HW_UNDER (1<<0) /* tx */
64#define IR_TX_ERROR (IR_DIS_CRC|IR_BAD_CRC|IR_HW_UNDER)
65
66#define IR_PHY_ERROR (1<<6) /* rx */
67#define IR_CRC_ERROR (1<<5) /* rx */
68#define IR_MAX_LEN (1<<4) /* rx */
69#define IR_FIFO_OVER (1<<3) /* rx */
70#define IR_SIR_ERROR (1<<2) /* rx */
71#define IR_RX_ERROR (IR_PHY_ERROR|IR_CRC_ERROR| \
72 IR_MAX_LEN|IR_FIFO_OVER|IR_SIR_ERROR)
73
74typedef struct db_dest {
75 struct db_dest *pnext;
76 volatile u32 *vaddr;
77 dma_addr_t dma_addr;
78} db_dest_t;
79
80
81typedef struct ring_desc {
82 u8 count_0; /* 7:0 */
83 u8 count_1; /* 12:8 */
84 u8 reserved;
85 u8 flags;
86 u8 addr_0; /* 7:0 */
87 u8 addr_1; /* 15:8 */
88 u8 addr_2; /* 23:16 */
89 u8 addr_3; /* 31:24 */
90} ring_dest_t;
91
92
93/* Private data for each instance */
94struct au1k_private {
95
96 db_dest_t *pDBfree;
97 db_dest_t db[2*NUM_IR_DESC];
98 volatile ring_dest_t *rx_ring[NUM_IR_DESC];
99 volatile ring_dest_t *tx_ring[NUM_IR_DESC];
100 db_dest_t *rx_db_inuse[NUM_IR_DESC];
101 db_dest_t *tx_db_inuse[NUM_IR_DESC];
102 u32 rx_head;
103 u32 tx_head;
104 u32 tx_tail;
105 u32 tx_full;
106
107 iobuff_t rx_buff;
108
109 struct net_device *netdev;
110
111 struct timeval stamp;
112 struct timeval now;
113 struct qos_info qos;
114 struct irlap_cb *irlap;
115
116 u8 open;
117 u32 speed;
118 u32 newspeed;
119
120 u32 intr_work_done; /* number of Rx and Tx pkts processed in the isr */
121 struct timer_list timer;
122
123 spinlock_t lock; /* For serializing operations */
124};
125#endif /* AU1000_IRCC_H */
diff --git a/drivers/net/irda/au1k_ir.c b/drivers/net/irda/au1k_ir.c
index a3d696a9456..fc503aa5288 100644
--- a/drivers/net/irda/au1k_ir.c
+++ b/drivers/net/irda/au1k_ir.c
@@ -18,104 +18,220 @@
18 * with this program; if not, write to the Free Software Foundation, Inc., 18 * with this program; if not, write to the Free Software Foundation, Inc.,
19 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. 19 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
20 */ 20 */
21#include <linux/module.h> 21
22#include <linux/types.h>
23#include <linux/init.h> 22#include <linux/init.h>
24#include <linux/errno.h> 23#include <linux/module.h>
25#include <linux/netdevice.h> 24#include <linux/netdevice.h>
26#include <linux/slab.h>
27#include <linux/rtnetlink.h>
28#include <linux/interrupt.h> 25#include <linux/interrupt.h>
29#include <linux/pm.h> 26#include <linux/platform_device.h>
30#include <linux/bitops.h> 27#include <linux/slab.h>
31 28#include <linux/time.h>
32#include <asm/irq.h> 29#include <linux/types.h>
33#include <asm/io.h>
34#include <asm/au1000.h>
35#if defined(CONFIG_MIPS_PB1000) || defined(CONFIG_MIPS_PB1100)
36#include <asm/pb1000.h>
37#elif defined(CONFIG_MIPS_DB1000) || defined(CONFIG_MIPS_DB1100)
38#include <asm/db1x00.h>
39#include <asm/mach-db1x00/bcsr.h>
40#else
41#error au1k_ir: unsupported board
42#endif
43 30
44#include <net/irda/irda.h> 31#include <net/irda/irda.h>
45#include <net/irda/irmod.h> 32#include <net/irda/irmod.h>
46#include <net/irda/wrapper.h> 33#include <net/irda/wrapper.h>
47#include <net/irda/irda_device.h> 34#include <net/irda/irda_device.h>
48#include "au1000_ircc.h" 35#include <asm/mach-au1x00/au1000.h>
36
37/* registers */
38#define IR_RING_PTR_STATUS 0x00
39#define IR_RING_BASE_ADDR_H 0x04
40#define IR_RING_BASE_ADDR_L 0x08
41#define IR_RING_SIZE 0x0C
42#define IR_RING_PROMPT 0x10
43#define IR_RING_ADDR_CMPR 0x14
44#define IR_INT_CLEAR 0x18
45#define IR_CONFIG_1 0x20
46#define IR_SIR_FLAGS 0x24
47#define IR_STATUS 0x28
48#define IR_READ_PHY_CONFIG 0x2C
49#define IR_WRITE_PHY_CONFIG 0x30
50#define IR_MAX_PKT_LEN 0x34
51#define IR_RX_BYTE_CNT 0x38
52#define IR_CONFIG_2 0x3C
53#define IR_ENABLE 0x40
54
55/* Config1 */
56#define IR_RX_INVERT_LED (1 << 0)
57#define IR_TX_INVERT_LED (1 << 1)
58#define IR_ST (1 << 2)
59#define IR_SF (1 << 3)
60#define IR_SIR (1 << 4)
61#define IR_MIR (1 << 5)
62#define IR_FIR (1 << 6)
63#define IR_16CRC (1 << 7)
64#define IR_TD (1 << 8)
65#define IR_RX_ALL (1 << 9)
66#define IR_DMA_ENABLE (1 << 10)
67#define IR_RX_ENABLE (1 << 11)
68#define IR_TX_ENABLE (1 << 12)
69#define IR_LOOPBACK (1 << 14)
70#define IR_SIR_MODE (IR_SIR | IR_DMA_ENABLE | \
71 IR_RX_ALL | IR_RX_ENABLE | IR_SF | \
72 IR_16CRC)
73
74/* ir_status */
75#define IR_RX_STATUS (1 << 9)
76#define IR_TX_STATUS (1 << 10)
77#define IR_PHYEN (1 << 15)
78
79/* ir_write_phy_config */
80#define IR_BR(x) (((x) & 0x3f) << 10) /* baud rate */
81#define IR_PW(x) (((x) & 0x1f) << 5) /* pulse width */
82#define IR_P(x) ((x) & 0x1f) /* preamble bits */
83
84/* Config2 */
85#define IR_MODE_INV (1 << 0)
86#define IR_ONE_PIN (1 << 1)
87#define IR_PHYCLK_40MHZ (0 << 2)
88#define IR_PHYCLK_48MHZ (1 << 2)
89#define IR_PHYCLK_56MHZ (2 << 2)
90#define IR_PHYCLK_64MHZ (3 << 2)
91#define IR_DP (1 << 4)
92#define IR_DA (1 << 5)
93#define IR_FLT_HIGH (0 << 6)
94#define IR_FLT_MEDHI (1 << 6)
95#define IR_FLT_MEDLO (2 << 6)
96#define IR_FLT_LO (3 << 6)
97#define IR_IEN (1 << 8)
98
99/* ir_enable */
100#define IR_HC (1 << 3) /* divide SBUS clock by 2 */
101#define IR_CE (1 << 2) /* clock enable */
102#define IR_C (1 << 1) /* coherency bit */
103#define IR_BE (1 << 0) /* set in big endian mode */
104
105#define NUM_IR_DESC 64
106#define RING_SIZE_4 0x0
107#define RING_SIZE_16 0x3
108#define RING_SIZE_64 0xF
109#define MAX_NUM_IR_DESC 64
110#define MAX_BUF_SIZE 2048
111
112/* Ring descriptor flags */
113#define AU_OWN (1 << 7) /* tx,rx */
114#define IR_DIS_CRC (1 << 6) /* tx */
115#define IR_BAD_CRC (1 << 5) /* tx */
116#define IR_NEED_PULSE (1 << 4) /* tx */
117#define IR_FORCE_UNDER (1 << 3) /* tx */
118#define IR_DISABLE_TX (1 << 2) /* tx */
119#define IR_HW_UNDER (1 << 0) /* tx */
120#define IR_TX_ERROR (IR_DIS_CRC | IR_BAD_CRC | IR_HW_UNDER)
121
122#define IR_PHY_ERROR (1 << 6) /* rx */
123#define IR_CRC_ERROR (1 << 5) /* rx */
124#define IR_MAX_LEN (1 << 4) /* rx */
125#define IR_FIFO_OVER (1 << 3) /* rx */
126#define IR_SIR_ERROR (1 << 2) /* rx */
127#define IR_RX_ERROR (IR_PHY_ERROR | IR_CRC_ERROR | \
128 IR_MAX_LEN | IR_FIFO_OVER | IR_SIR_ERROR)
129
130struct db_dest {
131 struct db_dest *pnext;
132 volatile u32 *vaddr;
133 dma_addr_t dma_addr;
134};
49 135
50static int au1k_irda_net_init(struct net_device *); 136struct ring_dest {
51static int au1k_irda_start(struct net_device *); 137 u8 count_0; /* 7:0 */
52static int au1k_irda_stop(struct net_device *dev); 138 u8 count_1; /* 12:8 */
53static int au1k_irda_hard_xmit(struct sk_buff *, struct net_device *); 139 u8 reserved;
54static int au1k_irda_rx(struct net_device *); 140 u8 flags;
55static void au1k_irda_interrupt(int, void *); 141 u8 addr_0; /* 7:0 */
56static void au1k_tx_timeout(struct net_device *); 142 u8 addr_1; /* 15:8 */
57static int au1k_irda_ioctl(struct net_device *, struct ifreq *, int); 143 u8 addr_2; /* 23:16 */
58static int au1k_irda_set_speed(struct net_device *dev, int speed); 144 u8 addr_3; /* 31:24 */
145};
59 146
60static void *dma_alloc(size_t, dma_addr_t *); 147/* Private data for each instance */
61static void dma_free(void *, size_t); 148struct au1k_private {
149 void __iomem *iobase;
150 int irq_rx, irq_tx;
151
152 struct db_dest *pDBfree;
153 struct db_dest db[2 * NUM_IR_DESC];
154 volatile struct ring_dest *rx_ring[NUM_IR_DESC];
155 volatile struct ring_dest *tx_ring[NUM_IR_DESC];
156 struct db_dest *rx_db_inuse[NUM_IR_DESC];
157 struct db_dest *tx_db_inuse[NUM_IR_DESC];
158 u32 rx_head;
159 u32 tx_head;
160 u32 tx_tail;
161 u32 tx_full;
162
163 iobuff_t rx_buff;
164
165 struct net_device *netdev;
166 struct timeval stamp;
167 struct timeval now;
168 struct qos_info qos;
169 struct irlap_cb *irlap;
170
171 u8 open;
172 u32 speed;
173 u32 newspeed;
174
175 struct timer_list timer;
176
177 struct resource *ioarea;
178 struct au1k_irda_platform_data *platdata;
179};
62 180
63static int qos_mtt_bits = 0x07; /* 1 ms or more */ 181static int qos_mtt_bits = 0x07; /* 1 ms or more */
64static struct net_device *ir_devs[NUM_IR_IFF];
65static char version[] __devinitdata =
66 "au1k_ircc:1.2 ppopov@mvista.com\n";
67 182
68#define RUN_AT(x) (jiffies + (x)) 183#define RUN_AT(x) (jiffies + (x))
69 184
70static DEFINE_SPINLOCK(ir_lock); 185static void au1k_irda_plat_set_phy_mode(struct au1k_private *p, int mode)
186{
187 if (p->platdata && p->platdata->set_phy_mode)
188 p->platdata->set_phy_mode(mode);
189}
71 190
72/* 191static inline unsigned long irda_read(struct au1k_private *p,
73 * IrDA peripheral bug. You have to read the register 192 unsigned long ofs)
74 * twice to get the right value. 193{
75 */ 194 /*
76u32 read_ir_reg(u32 addr) 195 * IrDA peripheral bug. You have to read the register
77{ 196 * twice to get the right value.
78 readl(addr); 197 */
79 return readl(addr); 198 (void)__raw_readl(p->iobase + ofs);
199 return __raw_readl(p->iobase + ofs);
80} 200}
81 201
202static inline void irda_write(struct au1k_private *p, unsigned long ofs,
203 unsigned long val)
204{
205 __raw_writel(val, p->iobase + ofs);
206 wmb();
207}
82 208
83/* 209/*
84 * Buffer allocation/deallocation routines. The buffer descriptor returned 210 * Buffer allocation/deallocation routines. The buffer descriptor returned
85 * has the virtual and dma address of a buffer suitable for 211 * has the virtual and dma address of a buffer suitable for
86 * both, receive and transmit operations. 212 * both, receive and transmit operations.
87 */ 213 */
88static db_dest_t *GetFreeDB(struct au1k_private *aup) 214static struct db_dest *GetFreeDB(struct au1k_private *aup)
89{ 215{
90 db_dest_t *pDB; 216 struct db_dest *db;
91 pDB = aup->pDBfree; 217 db = aup->pDBfree;
92
93 if (pDB) {
94 aup->pDBfree = pDB->pnext;
95 }
96 return pDB;
97}
98 218
99static void ReleaseDB(struct au1k_private *aup, db_dest_t *pDB) 219 if (db)
100{ 220 aup->pDBfree = db->pnext;
101 db_dest_t *pDBfree = aup->pDBfree; 221 return db;
102 if (pDBfree)
103 pDBfree->pnext = pDB;
104 aup->pDBfree = pDB;
105} 222}
106 223
107
108/* 224/*
109 DMA memory allocation, derived from pci_alloc_consistent. 225 DMA memory allocation, derived from pci_alloc_consistent.
110 However, the Au1000 data cache is coherent (when programmed 226 However, the Au1000 data cache is coherent (when programmed
111 so), therefore we return KSEG0 address, not KSEG1. 227 so), therefore we return KSEG0 address, not KSEG1.
112*/ 228*/
113static void *dma_alloc(size_t size, dma_addr_t * dma_handle) 229static void *dma_alloc(size_t size, dma_addr_t *dma_handle)
114{ 230{
115 void *ret; 231 void *ret;
116 int gfp = GFP_ATOMIC | GFP_DMA; 232 int gfp = GFP_ATOMIC | GFP_DMA;
117 233
118 ret = (void *) __get_free_pages(gfp, get_order(size)); 234 ret = (void *)__get_free_pages(gfp, get_order(size));
119 235
120 if (ret != NULL) { 236 if (ret != NULL) {
121 memset(ret, 0, size); 237 memset(ret, 0, size);
@@ -125,7 +241,6 @@ static void *dma_alloc(size_t size, dma_addr_t * dma_handle)
125 return ret; 241 return ret;
126} 242}
127 243
128
129static void dma_free(void *vaddr, size_t size) 244static void dma_free(void *vaddr, size_t size)
130{ 245{
131 vaddr = (void *)KSEG0ADDR(vaddr); 246 vaddr = (void *)KSEG0ADDR(vaddr);
@@ -133,206 +248,306 @@ static void dma_free(void *vaddr, size_t size)
133} 248}
134 249
135 250
136static void 251static void setup_hw_rings(struct au1k_private *aup, u32 rx_base, u32 tx_base)
137setup_hw_rings(struct au1k_private *aup, u32 rx_base, u32 tx_base)
138{ 252{
139 int i; 253 int i;
140 for (i=0; i<NUM_IR_DESC; i++) { 254 for (i = 0; i < NUM_IR_DESC; i++) {
141 aup->rx_ring[i] = (volatile ring_dest_t *) 255 aup->rx_ring[i] = (volatile struct ring_dest *)
142 (rx_base + sizeof(ring_dest_t)*i); 256 (rx_base + sizeof(struct ring_dest) * i);
143 } 257 }
144 for (i=0; i<NUM_IR_DESC; i++) { 258 for (i = 0; i < NUM_IR_DESC; i++) {
145 aup->tx_ring[i] = (volatile ring_dest_t *) 259 aup->tx_ring[i] = (volatile struct ring_dest *)
146 (tx_base + sizeof(ring_dest_t)*i); 260 (tx_base + sizeof(struct ring_dest) * i);
147 } 261 }
148} 262}
149 263
150static int au1k_irda_init(void)
151{
152 static unsigned version_printed = 0;
153 struct au1k_private *aup;
154 struct net_device *dev;
155 int err;
156
157 if (version_printed++ == 0) printk(version);
158
159 dev = alloc_irdadev(sizeof(struct au1k_private));
160 if (!dev)
161 return -ENOMEM;
162
163 dev->irq = AU1000_IRDA_RX_INT; /* TX has its own interrupt */
164 err = au1k_irda_net_init(dev);
165 if (err)
166 goto out;
167 err = register_netdev(dev);
168 if (err)
169 goto out1;
170 ir_devs[0] = dev;
171 printk(KERN_INFO "IrDA: Registered device %s\n", dev->name);
172 return 0;
173
174out1:
175 aup = netdev_priv(dev);
176 dma_free((void *)aup->db[0].vaddr,
177 MAX_BUF_SIZE * 2*NUM_IR_DESC);
178 dma_free((void *)aup->rx_ring[0],
179 2 * MAX_NUM_IR_DESC*(sizeof(ring_dest_t)));
180 kfree(aup->rx_buff.head);
181out:
182 free_netdev(dev);
183 return err;
184}
185
186static int au1k_irda_init_iobuf(iobuff_t *io, int size) 264static int au1k_irda_init_iobuf(iobuff_t *io, int size)
187{ 265{
188 io->head = kmalloc(size, GFP_KERNEL); 266 io->head = kmalloc(size, GFP_KERNEL);
189 if (io->head != NULL) { 267 if (io->head != NULL) {
190 io->truesize = size; 268 io->truesize = size;
191 io->in_frame = FALSE; 269 io->in_frame = FALSE;
192 io->state = OUTSIDE_FRAME; 270 io->state = OUTSIDE_FRAME;
193 io->data = io->head; 271 io->data = io->head;
194 } 272 }
195 return io->head ? 0 : -ENOMEM; 273 return io->head ? 0 : -ENOMEM;
196} 274}
197 275
198static const struct net_device_ops au1k_irda_netdev_ops = { 276/*
199 .ndo_open = au1k_irda_start, 277 * Set the IrDA communications speed.
200 .ndo_stop = au1k_irda_stop, 278 */
201 .ndo_start_xmit = au1k_irda_hard_xmit, 279static int au1k_irda_set_speed(struct net_device *dev, int speed)
202 .ndo_tx_timeout = au1k_tx_timeout,
203 .ndo_do_ioctl = au1k_irda_ioctl,
204};
205
206static int au1k_irda_net_init(struct net_device *dev)
207{ 280{
208 struct au1k_private *aup = netdev_priv(dev); 281 struct au1k_private *aup = netdev_priv(dev);
209 int i, retval = 0, err; 282 volatile struct ring_dest *ptxd;
210 db_dest_t *pDB, *pDBfree; 283 unsigned long control;
211 dma_addr_t temp; 284 int ret = 0, timeout = 10, i;
212 285
213 err = au1k_irda_init_iobuf(&aup->rx_buff, 14384); 286 if (speed == aup->speed)
214 if (err) 287 return ret;
215 goto out1;
216 288
217 dev->netdev_ops = &au1k_irda_netdev_ops; 289 /* disable PHY first */
290 au1k_irda_plat_set_phy_mode(aup, AU1000_IRDA_PHY_MODE_OFF);
291 irda_write(aup, IR_STATUS, irda_read(aup, IR_STATUS) & ~IR_PHYEN);
218 292
219 irda_init_max_qos_capabilies(&aup->qos); 293 /* disable RX/TX */
294 irda_write(aup, IR_CONFIG_1,
295 irda_read(aup, IR_CONFIG_1) & ~(IR_RX_ENABLE | IR_TX_ENABLE));
296 msleep(20);
297 while (irda_read(aup, IR_STATUS) & (IR_RX_STATUS | IR_TX_STATUS)) {
298 msleep(20);
299 if (!timeout--) {
300 printk(KERN_ERR "%s: rx/tx disable timeout\n",
301 dev->name);
302 break;
303 }
304 }
220 305
221 /* The only value we must override it the baudrate */ 306 /* disable DMA */
222 aup->qos.baud_rate.bits = IR_9600|IR_19200|IR_38400|IR_57600| 307 irda_write(aup, IR_CONFIG_1,
223 IR_115200|IR_576000 |(IR_4000000 << 8); 308 irda_read(aup, IR_CONFIG_1) & ~IR_DMA_ENABLE);
224 309 msleep(20);
225 aup->qos.min_turn_time.bits = qos_mtt_bits;
226 irda_qos_bits_to_value(&aup->qos);
227 310
228 retval = -ENOMEM; 311 /* After we disable tx/rx. the index pointers go back to zero. */
312 aup->tx_head = aup->tx_tail = aup->rx_head = 0;
313 for (i = 0; i < NUM_IR_DESC; i++) {
314 ptxd = aup->tx_ring[i];
315 ptxd->flags = 0;
316 ptxd->count_0 = 0;
317 ptxd->count_1 = 0;
318 }
229 319
230 /* Tx ring follows rx ring + 512 bytes */ 320 for (i = 0; i < NUM_IR_DESC; i++) {
231 /* we need a 1k aligned buffer */ 321 ptxd = aup->rx_ring[i];
232 aup->rx_ring[0] = (ring_dest_t *) 322 ptxd->count_0 = 0;
233 dma_alloc(2*MAX_NUM_IR_DESC*(sizeof(ring_dest_t)), &temp); 323 ptxd->count_1 = 0;
234 if (!aup->rx_ring[0]) 324 ptxd->flags = AU_OWN;
235 goto out2; 325 }
236 326
237 /* allocate the data buffers */ 327 if (speed == 4000000)
238 aup->db[0].vaddr = 328 au1k_irda_plat_set_phy_mode(aup, AU1000_IRDA_PHY_MODE_FIR);
239 (void *)dma_alloc(MAX_BUF_SIZE * 2*NUM_IR_DESC, &temp); 329 else
240 if (!aup->db[0].vaddr) 330 au1k_irda_plat_set_phy_mode(aup, AU1000_IRDA_PHY_MODE_SIR);
241 goto out3;
242 331
243 setup_hw_rings(aup, (u32)aup->rx_ring[0], (u32)aup->rx_ring[0] + 512); 332 switch (speed) {
333 case 9600:
334 irda_write(aup, IR_WRITE_PHY_CONFIG, IR_BR(11) | IR_PW(12));
335 irda_write(aup, IR_CONFIG_1, IR_SIR_MODE);
336 break;
337 case 19200:
338 irda_write(aup, IR_WRITE_PHY_CONFIG, IR_BR(5) | IR_PW(12));
339 irda_write(aup, IR_CONFIG_1, IR_SIR_MODE);
340 break;
341 case 38400:
342 irda_write(aup, IR_WRITE_PHY_CONFIG, IR_BR(2) | IR_PW(12));
343 irda_write(aup, IR_CONFIG_1, IR_SIR_MODE);
344 break;
345 case 57600:
346 irda_write(aup, IR_WRITE_PHY_CONFIG, IR_BR(1) | IR_PW(12));
347 irda_write(aup, IR_CONFIG_1, IR_SIR_MODE);
348 break;
349 case 115200:
350 irda_write(aup, IR_WRITE_PHY_CONFIG, IR_PW(12));
351 irda_write(aup, IR_CONFIG_1, IR_SIR_MODE);
352 break;
353 case 4000000:
354 irda_write(aup, IR_WRITE_PHY_CONFIG, IR_P(15));
355 irda_write(aup, IR_CONFIG_1, IR_FIR | IR_DMA_ENABLE |
356 IR_RX_ENABLE);
357 break;
358 default:
359 printk(KERN_ERR "%s unsupported speed %x\n", dev->name, speed);
360 ret = -EINVAL;
361 break;
362 }
244 363
245 pDBfree = NULL; 364 aup->speed = speed;
246 pDB = aup->db; 365 irda_write(aup, IR_STATUS, irda_read(aup, IR_STATUS) | IR_PHYEN);
247 for (i=0; i<(2*NUM_IR_DESC); i++) { 366
248 pDB->pnext = pDBfree; 367 control = irda_read(aup, IR_STATUS);
249 pDBfree = pDB; 368 irda_write(aup, IR_RING_PROMPT, 0);
250 pDB->vaddr = 369
251 (u32 *)((unsigned)aup->db[0].vaddr + MAX_BUF_SIZE*i); 370 if (control & (1 << 14)) {
252 pDB->dma_addr = (dma_addr_t)virt_to_bus(pDB->vaddr); 371 printk(KERN_ERR "%s: configuration error\n", dev->name);
253 pDB++; 372 } else {
373 if (control & (1 << 11))
374 printk(KERN_DEBUG "%s Valid SIR config\n", dev->name);
375 if (control & (1 << 12))
376 printk(KERN_DEBUG "%s Valid MIR config\n", dev->name);
377 if (control & (1 << 13))
378 printk(KERN_DEBUG "%s Valid FIR config\n", dev->name);
379 if (control & (1 << 10))
380 printk(KERN_DEBUG "%s TX enabled\n", dev->name);
381 if (control & (1 << 9))
382 printk(KERN_DEBUG "%s RX enabled\n", dev->name);
254 } 383 }
255 aup->pDBfree = pDBfree;
256 384
257 /* attach a data buffer to each descriptor */ 385 return ret;
258 for (i=0; i<NUM_IR_DESC; i++) { 386}
259 pDB = GetFreeDB(aup); 387
260 if (!pDB) goto out; 388static void update_rx_stats(struct net_device *dev, u32 status, u32 count)
261 aup->rx_ring[i]->addr_0 = (u8)(pDB->dma_addr & 0xff); 389{
262 aup->rx_ring[i]->addr_1 = (u8)((pDB->dma_addr>>8) & 0xff); 390 struct net_device_stats *ps = &dev->stats;
263 aup->rx_ring[i]->addr_2 = (u8)((pDB->dma_addr>>16) & 0xff); 391
264 aup->rx_ring[i]->addr_3 = (u8)((pDB->dma_addr>>24) & 0xff); 392 ps->rx_packets++;
265 aup->rx_db_inuse[i] = pDB; 393
394 if (status & IR_RX_ERROR) {
395 ps->rx_errors++;
396 if (status & (IR_PHY_ERROR | IR_FIFO_OVER))
397 ps->rx_missed_errors++;
398 if (status & IR_MAX_LEN)
399 ps->rx_length_errors++;
400 if (status & IR_CRC_ERROR)
401 ps->rx_crc_errors++;
402 } else
403 ps->rx_bytes += count;
404}
405
406static void update_tx_stats(struct net_device *dev, u32 status, u32 pkt_len)
407{
408 struct net_device_stats *ps = &dev->stats;
409
410 ps->tx_packets++;
411 ps->tx_bytes += pkt_len;
412
413 if (status & IR_TX_ERROR) {
414 ps->tx_errors++;
415 ps->tx_aborted_errors++;
266 } 416 }
267 for (i=0; i<NUM_IR_DESC; i++) { 417}
268 pDB = GetFreeDB(aup); 418
269 if (!pDB) goto out; 419static void au1k_tx_ack(struct net_device *dev)
270 aup->tx_ring[i]->addr_0 = (u8)(pDB->dma_addr & 0xff); 420{
271 aup->tx_ring[i]->addr_1 = (u8)((pDB->dma_addr>>8) & 0xff); 421 struct au1k_private *aup = netdev_priv(dev);
272 aup->tx_ring[i]->addr_2 = (u8)((pDB->dma_addr>>16) & 0xff); 422 volatile struct ring_dest *ptxd;
273 aup->tx_ring[i]->addr_3 = (u8)((pDB->dma_addr>>24) & 0xff); 423
274 aup->tx_ring[i]->count_0 = 0; 424 ptxd = aup->tx_ring[aup->tx_tail];
275 aup->tx_ring[i]->count_1 = 0; 425 while (!(ptxd->flags & AU_OWN) && (aup->tx_tail != aup->tx_head)) {
276 aup->tx_ring[i]->flags = 0; 426 update_tx_stats(dev, ptxd->flags,
277 aup->tx_db_inuse[i] = pDB; 427 (ptxd->count_1 << 8) | ptxd->count_0);
428 ptxd->count_0 = 0;
429 ptxd->count_1 = 0;
430 wmb();
431 aup->tx_tail = (aup->tx_tail + 1) & (NUM_IR_DESC - 1);
432 ptxd = aup->tx_ring[aup->tx_tail];
433
434 if (aup->tx_full) {
435 aup->tx_full = 0;
436 netif_wake_queue(dev);
437 }
278 } 438 }
279 439
280#if defined(CONFIG_MIPS_DB1000) || defined(CONFIG_MIPS_DB1100) 440 if (aup->tx_tail == aup->tx_head) {
281 /* power on */ 441 if (aup->newspeed) {
282 bcsr_mod(BCSR_RESETS, BCSR_RESETS_IRDA_MODE_MASK, 442 au1k_irda_set_speed(dev, aup->newspeed);
283 BCSR_RESETS_IRDA_MODE_FULL); 443 aup->newspeed = 0;
284#endif 444 } else {
445 irda_write(aup, IR_CONFIG_1,
446 irda_read(aup, IR_CONFIG_1) & ~IR_TX_ENABLE);
447 irda_write(aup, IR_CONFIG_1,
448 irda_read(aup, IR_CONFIG_1) | IR_RX_ENABLE);
449 irda_write(aup, IR_RING_PROMPT, 0);
450 }
451 }
452}
285 453
286 return 0; 454static int au1k_irda_rx(struct net_device *dev)
455{
456 struct au1k_private *aup = netdev_priv(dev);
457 volatile struct ring_dest *prxd;
458 struct sk_buff *skb;
459 struct db_dest *pDB;
460 u32 flags, count;
287 461
288out3: 462 prxd = aup->rx_ring[aup->rx_head];
289 dma_free((void *)aup->rx_ring[0], 463 flags = prxd->flags;
290 2 * MAX_NUM_IR_DESC*(sizeof(ring_dest_t))); 464
291out2: 465 while (!(flags & AU_OWN)) {
292 kfree(aup->rx_buff.head); 466 pDB = aup->rx_db_inuse[aup->rx_head];
293out1: 467 count = (prxd->count_1 << 8) | prxd->count_0;
294 printk(KERN_ERR "au1k_init_module failed. Returns %d\n", retval); 468 if (!(flags & IR_RX_ERROR)) {
295 return retval; 469 /* good frame */
470 update_rx_stats(dev, flags, count);
471 skb = alloc_skb(count + 1, GFP_ATOMIC);
472 if (skb == NULL) {
473 dev->stats.rx_dropped++;
474 continue;
475 }
476 skb_reserve(skb, 1);
477 if (aup->speed == 4000000)
478 skb_put(skb, count);
479 else
480 skb_put(skb, count - 2);
481 skb_copy_to_linear_data(skb, (void *)pDB->vaddr,
482 count - 2);
483 skb->dev = dev;
484 skb_reset_mac_header(skb);
485 skb->protocol = htons(ETH_P_IRDA);
486 netif_rx(skb);
487 prxd->count_0 = 0;
488 prxd->count_1 = 0;
489 }
490 prxd->flags |= AU_OWN;
491 aup->rx_head = (aup->rx_head + 1) & (NUM_IR_DESC - 1);
492 irda_write(aup, IR_RING_PROMPT, 0);
493
494 /* next descriptor */
495 prxd = aup->rx_ring[aup->rx_head];
496 flags = prxd->flags;
497
498 }
499 return 0;
296} 500}
297 501
502static irqreturn_t au1k_irda_interrupt(int dummy, void *dev_id)
503{
504 struct net_device *dev = dev_id;
505 struct au1k_private *aup = netdev_priv(dev);
506
507 irda_write(aup, IR_INT_CLEAR, 0); /* ack irda interrupts */
508
509 au1k_irda_rx(dev);
510 au1k_tx_ack(dev);
511
512 return IRQ_HANDLED;
513}
298 514
299static int au1k_init(struct net_device *dev) 515static int au1k_init(struct net_device *dev)
300{ 516{
301 struct au1k_private *aup = netdev_priv(dev); 517 struct au1k_private *aup = netdev_priv(dev);
518 u32 enable, ring_address;
302 int i; 519 int i;
303 u32 control;
304 u32 ring_address;
305 520
306 /* bring the device out of reset */ 521 enable = IR_HC | IR_CE | IR_C;
307 control = 0xe; /* coherent, clock enable, one half system clock */
308
309#ifndef CONFIG_CPU_LITTLE_ENDIAN 522#ifndef CONFIG_CPU_LITTLE_ENDIAN
310 control |= 1; 523 enable |= IR_BE;
311#endif 524#endif
312 aup->tx_head = 0; 525 aup->tx_head = 0;
313 aup->tx_tail = 0; 526 aup->tx_tail = 0;
314 aup->rx_head = 0; 527 aup->rx_head = 0;
315 528
316 for (i=0; i<NUM_IR_DESC; i++) { 529 for (i = 0; i < NUM_IR_DESC; i++)
317 aup->rx_ring[i]->flags = AU_OWN; 530 aup->rx_ring[i]->flags = AU_OWN;
318 }
319 531
320 writel(control, IR_INTERFACE_CONFIG); 532 irda_write(aup, IR_ENABLE, enable);
321 au_sync_delay(10); 533 msleep(20);
322 534
323 writel(read_ir_reg(IR_ENABLE) & ~0x8000, IR_ENABLE); /* disable PHY */ 535 /* disable PHY */
324 au_sync_delay(1); 536 au1k_irda_plat_set_phy_mode(aup, AU1000_IRDA_PHY_MODE_OFF);
537 irda_write(aup, IR_STATUS, irda_read(aup, IR_STATUS) & ~IR_PHYEN);
538 msleep(20);
325 539
326 writel(MAX_BUF_SIZE, IR_MAX_PKT_LEN); 540 irda_write(aup, IR_MAX_PKT_LEN, MAX_BUF_SIZE);
327 541
328 ring_address = (u32)virt_to_phys((void *)aup->rx_ring[0]); 542 ring_address = (u32)virt_to_phys((void *)aup->rx_ring[0]);
329 writel(ring_address >> 26, IR_RING_BASE_ADDR_H); 543 irda_write(aup, IR_RING_BASE_ADDR_H, ring_address >> 26);
330 writel((ring_address >> 10) & 0xffff, IR_RING_BASE_ADDR_L); 544 irda_write(aup, IR_RING_BASE_ADDR_L, (ring_address >> 10) & 0xffff);
331 545
332 writel(RING_SIZE_64<<8 | RING_SIZE_64<<12, IR_RING_SIZE); 546 irda_write(aup, IR_RING_SIZE,
547 (RING_SIZE_64 << 8) | (RING_SIZE_64 << 12));
333 548
334 writel(1<<2 | IR_ONE_PIN, IR_CONFIG_2); /* 48MHz */ 549 irda_write(aup, IR_CONFIG_2, IR_PHYCLK_48MHZ | IR_ONE_PIN);
335 writel(0, IR_RING_ADDR_CMPR); 550 irda_write(aup, IR_RING_ADDR_CMPR, 0);
336 551
337 au1k_irda_set_speed(dev, 9600); 552 au1k_irda_set_speed(dev, 9600);
338 return 0; 553 return 0;
@@ -340,25 +555,28 @@ static int au1k_init(struct net_device *dev)
340 555
341static int au1k_irda_start(struct net_device *dev) 556static int au1k_irda_start(struct net_device *dev)
342{ 557{
343 int retval;
344 char hwname[32];
345 struct au1k_private *aup = netdev_priv(dev); 558 struct au1k_private *aup = netdev_priv(dev);
559 char hwname[32];
560 int retval;
346 561
347 if ((retval = au1k_init(dev))) { 562 retval = au1k_init(dev);
563 if (retval) {
348 printk(KERN_ERR "%s: error in au1k_init\n", dev->name); 564 printk(KERN_ERR "%s: error in au1k_init\n", dev->name);
349 return retval; 565 return retval;
350 } 566 }
351 567
352 if ((retval = request_irq(AU1000_IRDA_TX_INT, au1k_irda_interrupt, 568 retval = request_irq(aup->irq_tx, &au1k_irda_interrupt, 0,
353 0, dev->name, dev))) { 569 dev->name, dev);
354 printk(KERN_ERR "%s: unable to get IRQ %d\n", 570 if (retval) {
571 printk(KERN_ERR "%s: unable to get IRQ %d\n",
355 dev->name, dev->irq); 572 dev->name, dev->irq);
356 return retval; 573 return retval;
357 } 574 }
358 if ((retval = request_irq(AU1000_IRDA_RX_INT, au1k_irda_interrupt, 575 retval = request_irq(aup->irq_rx, &au1k_irda_interrupt, 0,
359 0, dev->name, dev))) { 576 dev->name, dev);
360 free_irq(AU1000_IRDA_TX_INT, dev); 577 if (retval) {
361 printk(KERN_ERR "%s: unable to get IRQ %d\n", 578 free_irq(aup->irq_tx, dev);
579 printk(KERN_ERR "%s: unable to get IRQ %d\n",
362 dev->name, dev->irq); 580 dev->name, dev->irq);
363 return retval; 581 return retval;
364 } 582 }
@@ -368,9 +586,13 @@ static int au1k_irda_start(struct net_device *dev)
368 aup->irlap = irlap_open(dev, &aup->qos, hwname); 586 aup->irlap = irlap_open(dev, &aup->qos, hwname);
369 netif_start_queue(dev); 587 netif_start_queue(dev);
370 588
371 writel(read_ir_reg(IR_CONFIG_2) | 1<<8, IR_CONFIG_2); /* int enable */ 589 /* int enable */
590 irda_write(aup, IR_CONFIG_2, irda_read(aup, IR_CONFIG_2) | IR_IEN);
591
592 /* power up */
593 au1k_irda_plat_set_phy_mode(aup, AU1000_IRDA_PHY_MODE_SIR);
372 594
373 aup->timer.expires = RUN_AT((3*HZ)); 595 aup->timer.expires = RUN_AT((3 * HZ));
374 aup->timer.data = (unsigned long)dev; 596 aup->timer.data = (unsigned long)dev;
375 return 0; 597 return 0;
376} 598}
@@ -379,11 +601,12 @@ static int au1k_irda_stop(struct net_device *dev)
379{ 601{
380 struct au1k_private *aup = netdev_priv(dev); 602 struct au1k_private *aup = netdev_priv(dev);
381 603
604 au1k_irda_plat_set_phy_mode(aup, AU1000_IRDA_PHY_MODE_OFF);
605
382 /* disable interrupts */ 606 /* disable interrupts */
383 writel(read_ir_reg(IR_CONFIG_2) & ~(1<<8), IR_CONFIG_2); 607 irda_write(aup, IR_CONFIG_2, irda_read(aup, IR_CONFIG_2) & ~IR_IEN);
384 writel(0, IR_CONFIG_1); 608 irda_write(aup, IR_CONFIG_1, 0);
385 writel(0, IR_INTERFACE_CONFIG); /* disable clock */ 609 irda_write(aup, IR_ENABLE, 0); /* disable clock */
386 au_sync();
387 610
388 if (aup->irlap) { 611 if (aup->irlap) {
389 irlap_close(aup->irlap); 612 irlap_close(aup->irlap);
@@ -394,83 +617,12 @@ static int au1k_irda_stop(struct net_device *dev)
394 del_timer(&aup->timer); 617 del_timer(&aup->timer);
395 618
396 /* disable the interrupt */ 619 /* disable the interrupt */
397 free_irq(AU1000_IRDA_TX_INT, dev); 620 free_irq(aup->irq_tx, dev);
398 free_irq(AU1000_IRDA_RX_INT, dev); 621 free_irq(aup->irq_rx, dev);
399 return 0;
400}
401
402static void __exit au1k_irda_exit(void)
403{
404 struct net_device *dev = ir_devs[0];
405 struct au1k_private *aup = netdev_priv(dev);
406 622
407 unregister_netdev(dev); 623 return 0;
408
409 dma_free((void *)aup->db[0].vaddr,
410 MAX_BUF_SIZE * 2*NUM_IR_DESC);
411 dma_free((void *)aup->rx_ring[0],
412 2 * MAX_NUM_IR_DESC*(sizeof(ring_dest_t)));
413 kfree(aup->rx_buff.head);
414 free_netdev(dev);
415}
416
417
418static inline void
419update_tx_stats(struct net_device *dev, u32 status, u32 pkt_len)
420{
421 struct au1k_private *aup = netdev_priv(dev);
422 struct net_device_stats *ps = &aup->stats;
423
424 ps->tx_packets++;
425 ps->tx_bytes += pkt_len;
426
427 if (status & IR_TX_ERROR) {
428 ps->tx_errors++;
429 ps->tx_aborted_errors++;
430 }
431}
432
433
434static void au1k_tx_ack(struct net_device *dev)
435{
436 struct au1k_private *aup = netdev_priv(dev);
437 volatile ring_dest_t *ptxd;
438
439 ptxd = aup->tx_ring[aup->tx_tail];
440 while (!(ptxd->flags & AU_OWN) && (aup->tx_tail != aup->tx_head)) {
441 update_tx_stats(dev, ptxd->flags,
442 ptxd->count_1<<8 | ptxd->count_0);
443 ptxd->count_0 = 0;
444 ptxd->count_1 = 0;
445 au_sync();
446
447 aup->tx_tail = (aup->tx_tail + 1) & (NUM_IR_DESC - 1);
448 ptxd = aup->tx_ring[aup->tx_tail];
449
450 if (aup->tx_full) {
451 aup->tx_full = 0;
452 netif_wake_queue(dev);
453 }
454 }
455
456 if (aup->tx_tail == aup->tx_head) {
457 if (aup->newspeed) {
458 au1k_irda_set_speed(dev, aup->newspeed);
459 aup->newspeed = 0;
460 }
461 else {
462 writel(read_ir_reg(IR_CONFIG_1) & ~IR_TX_ENABLE,
463 IR_CONFIG_1);
464 au_sync();
465 writel(read_ir_reg(IR_CONFIG_1) | IR_RX_ENABLE,
466 IR_CONFIG_1);
467 writel(0, IR_RING_PROMPT);
468 au_sync();
469 }
470 }
471} 624}
472 625
473
474/* 626/*
475 * Au1000 transmit routine. 627 * Au1000 transmit routine.
476 */ 628 */
@@ -478,15 +630,12 @@ static int au1k_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev)
478{ 630{
479 struct au1k_private *aup = netdev_priv(dev); 631 struct au1k_private *aup = netdev_priv(dev);
480 int speed = irda_get_next_speed(skb); 632 int speed = irda_get_next_speed(skb);
481 volatile ring_dest_t *ptxd; 633 volatile struct ring_dest *ptxd;
482 u32 len; 634 struct db_dest *pDB;
483 635 u32 len, flags;
484 u32 flags;
485 db_dest_t *pDB;
486 636
487 if (speed != aup->speed && speed != -1) { 637 if (speed != aup->speed && speed != -1)
488 aup->newspeed = speed; 638 aup->newspeed = speed;
489 }
490 639
491 if ((skb->len == 0) && (aup->newspeed)) { 640 if ((skb->len == 0) && (aup->newspeed)) {
492 if (aup->tx_tail == aup->tx_head) { 641 if (aup->tx_tail == aup->tx_head) {
@@ -504,138 +653,47 @@ static int au1k_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev)
504 printk(KERN_DEBUG "%s: tx_full\n", dev->name); 653 printk(KERN_DEBUG "%s: tx_full\n", dev->name);
505 netif_stop_queue(dev); 654 netif_stop_queue(dev);
506 aup->tx_full = 1; 655 aup->tx_full = 1;
507 return NETDEV_TX_BUSY; 656 return 1;
508 } 657 } else if (((aup->tx_head + 1) & (NUM_IR_DESC - 1)) == aup->tx_tail) {
509 else if (((aup->tx_head + 1) & (NUM_IR_DESC - 1)) == aup->tx_tail) {
510 printk(KERN_DEBUG "%s: tx_full\n", dev->name); 658 printk(KERN_DEBUG "%s: tx_full\n", dev->name);
511 netif_stop_queue(dev); 659 netif_stop_queue(dev);
512 aup->tx_full = 1; 660 aup->tx_full = 1;
513 return NETDEV_TX_BUSY; 661 return 1;
514 } 662 }
515 663
516 pDB = aup->tx_db_inuse[aup->tx_head]; 664 pDB = aup->tx_db_inuse[aup->tx_head];
517 665
518#if 0 666#if 0
519 if (read_ir_reg(IR_RX_BYTE_CNT) != 0) { 667 if (irda_read(aup, IR_RX_BYTE_CNT) != 0) {
520 printk("tx warning: rx byte cnt %x\n", 668 printk(KERN_DEBUG "tx warning: rx byte cnt %x\n",
521 read_ir_reg(IR_RX_BYTE_CNT)); 669 irda_read(aup, IR_RX_BYTE_CNT));
522 } 670 }
523#endif 671#endif
524 672
525 if (aup->speed == 4000000) { 673 if (aup->speed == 4000000) {
526 /* FIR */ 674 /* FIR */
527 skb_copy_from_linear_data(skb, pDB->vaddr, skb->len); 675 skb_copy_from_linear_data(skb, (void *)pDB->vaddr, skb->len);
528 ptxd->count_0 = skb->len & 0xff; 676 ptxd->count_0 = skb->len & 0xff;
529 ptxd->count_1 = (skb->len >> 8) & 0xff; 677 ptxd->count_1 = (skb->len >> 8) & 0xff;
530 678 } else {
531 }
532 else {
533 /* SIR */ 679 /* SIR */
534 len = async_wrap_skb(skb, (u8 *)pDB->vaddr, MAX_BUF_SIZE); 680 len = async_wrap_skb(skb, (u8 *)pDB->vaddr, MAX_BUF_SIZE);
535 ptxd->count_0 = len & 0xff; 681 ptxd->count_0 = len & 0xff;
536 ptxd->count_1 = (len >> 8) & 0xff; 682 ptxd->count_1 = (len >> 8) & 0xff;
537 ptxd->flags |= IR_DIS_CRC; 683 ptxd->flags |= IR_DIS_CRC;
538 au_writel(au_readl(0xae00000c) & ~(1<<13), 0xae00000c);
539 } 684 }
540 ptxd->flags |= AU_OWN; 685 ptxd->flags |= AU_OWN;
541 au_sync(); 686 wmb();
542 687
543 writel(read_ir_reg(IR_CONFIG_1) | IR_TX_ENABLE, IR_CONFIG_1); 688 irda_write(aup, IR_CONFIG_1,
544 writel(0, IR_RING_PROMPT); 689 irda_read(aup, IR_CONFIG_1) | IR_TX_ENABLE);
545 au_sync(); 690 irda_write(aup, IR_RING_PROMPT, 0);
546 691
547 dev_kfree_skb(skb); 692 dev_kfree_skb(skb);
548 aup->tx_head = (aup->tx_head + 1) & (NUM_IR_DESC - 1); 693 aup->tx_head = (aup->tx_head + 1) & (NUM_IR_DESC - 1);
549 return NETDEV_TX_OK; 694 return NETDEV_TX_OK;
550} 695}
551 696
552
553static inline void
554update_rx_stats(struct net_device *dev, u32 status, u32 count)
555{
556 struct au1k_private *aup = netdev_priv(dev);
557 struct net_device_stats *ps = &aup->stats;
558
559 ps->rx_packets++;
560
561 if (status & IR_RX_ERROR) {
562 ps->rx_errors++;
563 if (status & (IR_PHY_ERROR|IR_FIFO_OVER))
564 ps->rx_missed_errors++;
565 if (status & IR_MAX_LEN)
566 ps->rx_length_errors++;
567 if (status & IR_CRC_ERROR)
568 ps->rx_crc_errors++;
569 }
570 else
571 ps->rx_bytes += count;
572}
573
574/*
575 * Au1000 receive routine.
576 */
577static int au1k_irda_rx(struct net_device *dev)
578{
579 struct au1k_private *aup = netdev_priv(dev);
580 struct sk_buff *skb;
581 volatile ring_dest_t *prxd;
582 u32 flags, count;
583 db_dest_t *pDB;
584
585 prxd = aup->rx_ring[aup->rx_head];
586 flags = prxd->flags;
587
588 while (!(flags & AU_OWN)) {
589 pDB = aup->rx_db_inuse[aup->rx_head];
590 count = prxd->count_1<<8 | prxd->count_0;
591 if (!(flags & IR_RX_ERROR)) {
592 /* good frame */
593 update_rx_stats(dev, flags, count);
594 skb=alloc_skb(count+1,GFP_ATOMIC);
595 if (skb == NULL) {
596 aup->netdev->stats.rx_dropped++;
597 continue;
598 }
599 skb_reserve(skb, 1);
600 if (aup->speed == 4000000)
601 skb_put(skb, count);
602 else
603 skb_put(skb, count-2);
604 skb_copy_to_linear_data(skb, pDB->vaddr, count - 2);
605 skb->dev = dev;
606 skb_reset_mac_header(skb);
607 skb->protocol = htons(ETH_P_IRDA);
608 netif_rx(skb);
609 prxd->count_0 = 0;
610 prxd->count_1 = 0;
611 }
612 prxd->flags |= AU_OWN;
613 aup->rx_head = (aup->rx_head + 1) & (NUM_IR_DESC - 1);
614 writel(0, IR_RING_PROMPT);
615 au_sync();
616
617 /* next descriptor */
618 prxd = aup->rx_ring[aup->rx_head];
619 flags = prxd->flags;
620
621 }
622 return 0;
623}
624
625
626static irqreturn_t au1k_irda_interrupt(int dummy, void *dev_id)
627{
628 struct net_device *dev = dev_id;
629
630 writel(0, IR_INT_CLEAR); /* ack irda interrupts */
631
632 au1k_irda_rx(dev);
633 au1k_tx_ack(dev);
634
635 return IRQ_HANDLED;
636}
637
638
639/* 697/*
640 * The Tx ring has been full longer than the watchdog timeout 698 * The Tx ring has been full longer than the watchdog timeout
641 * value. The transmitter must be hung? 699 * value. The transmitter must be hung?
@@ -653,142 +711,7 @@ static void au1k_tx_timeout(struct net_device *dev)
653 netif_wake_queue(dev); 711 netif_wake_queue(dev);
654} 712}
655 713
656 714static int au1k_irda_ioctl(struct net_device *dev, struct ifreq *ifreq, int cmd)
657/*
658 * Set the IrDA communications speed.
659 */
660static int
661au1k_irda_set_speed(struct net_device *dev, int speed)
662{
663 unsigned long flags;
664 struct au1k_private *aup = netdev_priv(dev);
665 u32 control;
666 int ret = 0, timeout = 10, i;
667 volatile ring_dest_t *ptxd;
668#if defined(CONFIG_MIPS_DB1000) || defined(CONFIG_MIPS_DB1100)
669 unsigned long irda_resets;
670#endif
671
672 if (speed == aup->speed)
673 return ret;
674
675 spin_lock_irqsave(&ir_lock, flags);
676
677 /* disable PHY first */
678 writel(read_ir_reg(IR_ENABLE) & ~0x8000, IR_ENABLE);
679
680 /* disable RX/TX */
681 writel(read_ir_reg(IR_CONFIG_1) & ~(IR_RX_ENABLE|IR_TX_ENABLE),
682 IR_CONFIG_1);
683 au_sync_delay(1);
684 while (read_ir_reg(IR_ENABLE) & (IR_RX_STATUS | IR_TX_STATUS)) {
685 mdelay(1);
686 if (!timeout--) {
687 printk(KERN_ERR "%s: rx/tx disable timeout\n",
688 dev->name);
689 break;
690 }
691 }
692
693 /* disable DMA */
694 writel(read_ir_reg(IR_CONFIG_1) & ~IR_DMA_ENABLE, IR_CONFIG_1);
695 au_sync_delay(1);
696
697 /*
698 * After we disable tx/rx. the index pointers
699 * go back to zero.
700 */
701 aup->tx_head = aup->tx_tail = aup->rx_head = 0;
702 for (i=0; i<NUM_IR_DESC; i++) {
703 ptxd = aup->tx_ring[i];
704 ptxd->flags = 0;
705 ptxd->count_0 = 0;
706 ptxd->count_1 = 0;
707 }
708
709 for (i=0; i<NUM_IR_DESC; i++) {
710 ptxd = aup->rx_ring[i];
711 ptxd->count_0 = 0;
712 ptxd->count_1 = 0;
713 ptxd->flags = AU_OWN;
714 }
715
716 if (speed == 4000000) {
717#if defined(CONFIG_MIPS_DB1000) || defined(CONFIG_MIPS_DB1100)
718 bcsr_mod(BCSR_RESETS, 0, BCSR_RESETS_FIR_SEL);
719#else /* Pb1000 and Pb1100 */
720 writel(1<<13, CPLD_AUX1);
721#endif
722 }
723 else {
724#if defined(CONFIG_MIPS_DB1000) || defined(CONFIG_MIPS_DB1100)
725 bcsr_mod(BCSR_RESETS, BCSR_RESETS_FIR_SEL, 0);
726#else /* Pb1000 and Pb1100 */
727 writel(readl(CPLD_AUX1) & ~(1<<13), CPLD_AUX1);
728#endif
729 }
730
731 switch (speed) {
732 case 9600:
733 writel(11<<10 | 12<<5, IR_WRITE_PHY_CONFIG);
734 writel(IR_SIR_MODE, IR_CONFIG_1);
735 break;
736 case 19200:
737 writel(5<<10 | 12<<5, IR_WRITE_PHY_CONFIG);
738 writel(IR_SIR_MODE, IR_CONFIG_1);
739 break;
740 case 38400:
741 writel(2<<10 | 12<<5, IR_WRITE_PHY_CONFIG);
742 writel(IR_SIR_MODE, IR_CONFIG_1);
743 break;
744 case 57600:
745 writel(1<<10 | 12<<5, IR_WRITE_PHY_CONFIG);
746 writel(IR_SIR_MODE, IR_CONFIG_1);
747 break;
748 case 115200:
749 writel(12<<5, IR_WRITE_PHY_CONFIG);
750 writel(IR_SIR_MODE, IR_CONFIG_1);
751 break;
752 case 4000000:
753 writel(0xF, IR_WRITE_PHY_CONFIG);
754 writel(IR_FIR|IR_DMA_ENABLE|IR_RX_ENABLE, IR_CONFIG_1);
755 break;
756 default:
757 printk(KERN_ERR "%s unsupported speed %x\n", dev->name, speed);
758 ret = -EINVAL;
759 break;
760 }
761
762 aup->speed = speed;
763 writel(read_ir_reg(IR_ENABLE) | 0x8000, IR_ENABLE);
764 au_sync();
765
766 control = read_ir_reg(IR_ENABLE);
767 writel(0, IR_RING_PROMPT);
768 au_sync();
769
770 if (control & (1<<14)) {
771 printk(KERN_ERR "%s: configuration error\n", dev->name);
772 }
773 else {
774 if (control & (1<<11))
775 printk(KERN_DEBUG "%s Valid SIR config\n", dev->name);
776 if (control & (1<<12))
777 printk(KERN_DEBUG "%s Valid MIR config\n", dev->name);
778 if (control & (1<<13))
779 printk(KERN_DEBUG "%s Valid FIR config\n", dev->name);
780 if (control & (1<<10))
781 printk(KERN_DEBUG "%s TX enabled\n", dev->name);
782 if (control & (1<<9))
783 printk(KERN_DEBUG "%s RX enabled\n", dev->name);
784 }
785
786 spin_unlock_irqrestore(&ir_lock, flags);
787 return ret;
788}
789
790static int
791au1k_irda_ioctl(struct net_device *dev, struct ifreq *ifreq, int cmd)
792{ 715{
793 struct if_irda_req *rq = (struct if_irda_req *)ifreq; 716 struct if_irda_req *rq = (struct if_irda_req *)ifreq;
794 struct au1k_private *aup = netdev_priv(dev); 717 struct au1k_private *aup = netdev_priv(dev);
@@ -829,8 +752,218 @@ au1k_irda_ioctl(struct net_device *dev, struct ifreq *ifreq, int cmd)
829 return ret; 752 return ret;
830} 753}
831 754
755static const struct net_device_ops au1k_irda_netdev_ops = {
756 .ndo_open = au1k_irda_start,
757 .ndo_stop = au1k_irda_stop,
758 .ndo_start_xmit = au1k_irda_hard_xmit,
759 .ndo_tx_timeout = au1k_tx_timeout,
760 .ndo_do_ioctl = au1k_irda_ioctl,
761};
762
763static int __devinit au1k_irda_net_init(struct net_device *dev)
764{
765 struct au1k_private *aup = netdev_priv(dev);
766 struct db_dest *pDB, *pDBfree;
767 int i, err, retval = 0;
768 dma_addr_t temp;
769
770 err = au1k_irda_init_iobuf(&aup->rx_buff, 14384);
771 if (err)
772 goto out1;
773
774 dev->netdev_ops = &au1k_irda_netdev_ops;
775
776 irda_init_max_qos_capabilies(&aup->qos);
777
778 /* The only value we must override it the baudrate */
779 aup->qos.baud_rate.bits = IR_9600 | IR_19200 | IR_38400 |
780 IR_57600 | IR_115200 | IR_576000 | (IR_4000000 << 8);
781
782 aup->qos.min_turn_time.bits = qos_mtt_bits;
783 irda_qos_bits_to_value(&aup->qos);
784
785 retval = -ENOMEM;
786
787 /* Tx ring follows rx ring + 512 bytes */
788 /* we need a 1k aligned buffer */
789 aup->rx_ring[0] = (struct ring_dest *)
790 dma_alloc(2 * MAX_NUM_IR_DESC * (sizeof(struct ring_dest)),
791 &temp);
792 if (!aup->rx_ring[0])
793 goto out2;
794
795 /* allocate the data buffers */
796 aup->db[0].vaddr =
797 (void *)dma_alloc(MAX_BUF_SIZE * 2 * NUM_IR_DESC, &temp);
798 if (!aup->db[0].vaddr)
799 goto out3;
800
801 setup_hw_rings(aup, (u32)aup->rx_ring[0], (u32)aup->rx_ring[0] + 512);
802
803 pDBfree = NULL;
804 pDB = aup->db;
805 for (i = 0; i < (2 * NUM_IR_DESC); i++) {
806 pDB->pnext = pDBfree;
807 pDBfree = pDB;
808 pDB->vaddr =
809 (u32 *)((unsigned)aup->db[0].vaddr + (MAX_BUF_SIZE * i));
810 pDB->dma_addr = (dma_addr_t)virt_to_bus(pDB->vaddr);
811 pDB++;
812 }
813 aup->pDBfree = pDBfree;
814
815 /* attach a data buffer to each descriptor */
816 for (i = 0; i < NUM_IR_DESC; i++) {
817 pDB = GetFreeDB(aup);
818 if (!pDB)
819 goto out3;
820 aup->rx_ring[i]->addr_0 = (u8)(pDB->dma_addr & 0xff);
821 aup->rx_ring[i]->addr_1 = (u8)((pDB->dma_addr >> 8) & 0xff);
822 aup->rx_ring[i]->addr_2 = (u8)((pDB->dma_addr >> 16) & 0xff);
823 aup->rx_ring[i]->addr_3 = (u8)((pDB->dma_addr >> 24) & 0xff);
824 aup->rx_db_inuse[i] = pDB;
825 }
826 for (i = 0; i < NUM_IR_DESC; i++) {
827 pDB = GetFreeDB(aup);
828 if (!pDB)
829 goto out3;
830 aup->tx_ring[i]->addr_0 = (u8)(pDB->dma_addr & 0xff);
831 aup->tx_ring[i]->addr_1 = (u8)((pDB->dma_addr >> 8) & 0xff);
832 aup->tx_ring[i]->addr_2 = (u8)((pDB->dma_addr >> 16) & 0xff);
833 aup->tx_ring[i]->addr_3 = (u8)((pDB->dma_addr >> 24) & 0xff);
834 aup->tx_ring[i]->count_0 = 0;
835 aup->tx_ring[i]->count_1 = 0;
836 aup->tx_ring[i]->flags = 0;
837 aup->tx_db_inuse[i] = pDB;
838 }
839
840 return 0;
841
842out3:
843 dma_free((void *)aup->rx_ring[0],
844 2 * MAX_NUM_IR_DESC * (sizeof(struct ring_dest)));
845out2:
846 kfree(aup->rx_buff.head);
847out1:
848 printk(KERN_ERR "au1k_irda_net_init() failed. Returns %d\n", retval);
849 return retval;
850}
851
852static int __devinit au1k_irda_probe(struct platform_device *pdev)
853{
854 struct au1k_private *aup;
855 struct net_device *dev;
856 struct resource *r;
857 int err;
858
859 dev = alloc_irdadev(sizeof(struct au1k_private));
860 if (!dev)
861 return -ENOMEM;
862
863 aup = netdev_priv(dev);
864
865 aup->platdata = pdev->dev.platform_data;
866
867 err = -EINVAL;
868 r = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
869 if (!r)
870 goto out;
871
872 aup->irq_tx = r->start;
873
874 r = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
875 if (!r)
876 goto out;
877
878 aup->irq_rx = r->start;
879
880 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
881 if (!r)
882 goto out;
883
884 err = -EBUSY;
885 aup->ioarea = request_mem_region(r->start, r->end - r->start + 1,
886 pdev->name);
887 if (!aup->ioarea)
888 goto out;
889
890 aup->iobase = ioremap_nocache(r->start, r->end - r->start + 1);
891 if (!aup->iobase)
892 goto out2;
893
894 dev->irq = aup->irq_rx;
895
896 err = au1k_irda_net_init(dev);
897 if (err)
898 goto out3;
899 err = register_netdev(dev);
900 if (err)
901 goto out4;
902
903 platform_set_drvdata(pdev, dev);
904
905 printk(KERN_INFO "IrDA: Registered device %s\n", dev->name);
906 return 0;
907
908out4:
909 dma_free((void *)aup->db[0].vaddr,
910 MAX_BUF_SIZE * 2 * NUM_IR_DESC);
911 dma_free((void *)aup->rx_ring[0],
912 2 * MAX_NUM_IR_DESC * (sizeof(struct ring_dest)));
913 kfree(aup->rx_buff.head);
914out3:
915 iounmap(aup->iobase);
916out2:
917 release_resource(aup->ioarea);
918 kfree(aup->ioarea);
919out:
920 free_netdev(dev);
921 return err;
922}
923
924static int __devexit au1k_irda_remove(struct platform_device *pdev)
925{
926 struct net_device *dev = platform_get_drvdata(pdev);
927 struct au1k_private *aup = netdev_priv(dev);
928
929 unregister_netdev(dev);
930
931 dma_free((void *)aup->db[0].vaddr,
932 MAX_BUF_SIZE * 2 * NUM_IR_DESC);
933 dma_free((void *)aup->rx_ring[0],
934 2 * MAX_NUM_IR_DESC * (sizeof(struct ring_dest)));
935 kfree(aup->rx_buff.head);
936
937 iounmap(aup->iobase);
938 release_resource(aup->ioarea);
939 kfree(aup->ioarea);
940
941 free_netdev(dev);
942
943 return 0;
944}
945
946static struct platform_driver au1k_irda_driver = {
947 .driver = {
948 .name = "au1000-irda",
949 .owner = THIS_MODULE,
950 },
951 .probe = au1k_irda_probe,
952 .remove = __devexit_p(au1k_irda_remove),
953};
954
955static int __init au1k_irda_load(void)
956{
957 return platform_driver_register(&au1k_irda_driver);
958}
959
960static void __exit au1k_irda_unload(void)
961{
962 return platform_driver_unregister(&au1k_irda_driver);
963}
964
832MODULE_AUTHOR("Pete Popov <ppopov@mvista.com>"); 965MODULE_AUTHOR("Pete Popov <ppopov@mvista.com>");
833MODULE_DESCRIPTION("Au1000 IrDA Device Driver"); 966MODULE_DESCRIPTION("Au1000 IrDA Device Driver");
834 967
835module_init(au1k_irda_init); 968module_init(au1k_irda_load);
836module_exit(au1k_irda_exit); 969module_exit(au1k_irda_unload);