aboutsummaryrefslogtreecommitdiffstats
path: root/fs/fuse
diff options
context:
space:
mode:
Diffstat (limited to 'fs/fuse')
-rw-r--r--fs/fuse/control.c138
-rw-r--r--fs/fuse/dev.c10
-rw-r--r--fs/fuse/fuse_i.h18
-rw-r--r--fs/fuse/inode.c80
4 files changed, 231 insertions, 15 deletions
diff --git a/fs/fuse/control.c b/fs/fuse/control.c
index 99c99dfb0373..3773fd63d2f9 100644
--- a/fs/fuse/control.c
+++ b/fs/fuse/control.c
@@ -61,6 +61,121 @@ static ssize_t fuse_conn_waiting_read(struct file *file, char __user *buf,
61 return simple_read_from_buffer(buf, len, ppos, tmp, size); 61 return simple_read_from_buffer(buf, len, ppos, tmp, size);
62} 62}
63 63
64static ssize_t fuse_conn_limit_read(struct file *file, char __user *buf,
65 size_t len, loff_t *ppos, unsigned val)
66{
67 char tmp[32];
68 size_t size = sprintf(tmp, "%u\n", val);
69
70 return simple_read_from_buffer(buf, len, ppos, tmp, size);
71}
72
73static ssize_t fuse_conn_limit_write(struct file *file, const char __user *buf,
74 size_t count, loff_t *ppos, unsigned *val,
75 unsigned global_limit)
76{
77 unsigned long t;
78 char tmp[32];
79 unsigned limit = (1 << 16) - 1;
80 int err;
81
82 if (*ppos || count >= sizeof(tmp) - 1)
83 return -EINVAL;
84
85 if (copy_from_user(tmp, buf, count))
86 return -EINVAL;
87
88 tmp[count] = '\0';
89
90 err = strict_strtoul(tmp, 0, &t);
91 if (err)
92 return err;
93
94 if (!capable(CAP_SYS_ADMIN))
95 limit = min(limit, global_limit);
96
97 if (t > limit)
98 return -EINVAL;
99
100 *val = t;
101
102 return count;
103}
104
105static ssize_t fuse_conn_max_background_read(struct file *file,
106 char __user *buf, size_t len,
107 loff_t *ppos)
108{
109 struct fuse_conn *fc;
110 unsigned val;
111
112 fc = fuse_ctl_file_conn_get(file);
113 if (!fc)
114 return 0;
115
116 val = fc->max_background;
117 fuse_conn_put(fc);
118
119 return fuse_conn_limit_read(file, buf, len, ppos, val);
120}
121
122static ssize_t fuse_conn_max_background_write(struct file *file,
123 const char __user *buf,
124 size_t count, loff_t *ppos)
125{
126 unsigned val;
127 ssize_t ret;
128
129 ret = fuse_conn_limit_write(file, buf, count, ppos, &val,
130 max_user_bgreq);
131 if (ret > 0) {
132 struct fuse_conn *fc = fuse_ctl_file_conn_get(file);
133 if (fc) {
134 fc->max_background = val;
135 fuse_conn_put(fc);
136 }
137 }
138
139 return ret;
140}
141
142static ssize_t fuse_conn_congestion_threshold_read(struct file *file,
143 char __user *buf, size_t len,
144 loff_t *ppos)
145{
146 struct fuse_conn *fc;
147 unsigned val;
148
149 fc = fuse_ctl_file_conn_get(file);
150 if (!fc)
151 return 0;
152
153 val = fc->congestion_threshold;
154 fuse_conn_put(fc);
155
156 return fuse_conn_limit_read(file, buf, len, ppos, val);
157}
158
159static ssize_t fuse_conn_congestion_threshold_write(struct file *file,
160 const char __user *buf,
161 size_t count, loff_t *ppos)
162{
163 unsigned val;
164 ssize_t ret;
165
166 ret = fuse_conn_limit_write(file, buf, count, ppos, &val,
167 max_user_congthresh);
168 if (ret > 0) {
169 struct fuse_conn *fc = fuse_ctl_file_conn_get(file);
170 if (fc) {
171 fc->congestion_threshold = val;
172 fuse_conn_put(fc);
173 }
174 }
175
176 return ret;
177}
178
64static const struct file_operations fuse_ctl_abort_ops = { 179static const struct file_operations fuse_ctl_abort_ops = {
65 .open = nonseekable_open, 180 .open = nonseekable_open,
66 .write = fuse_conn_abort_write, 181 .write = fuse_conn_abort_write,
@@ -71,6 +186,18 @@ static const struct file_operations fuse_ctl_waiting_ops = {
71 .read = fuse_conn_waiting_read, 186 .read = fuse_conn_waiting_read,
72}; 187};
73 188
189static const struct file_operations fuse_conn_max_background_ops = {
190 .open = nonseekable_open,
191 .read = fuse_conn_max_background_read,
192 .write = fuse_conn_max_background_write,
193};
194
195static const struct file_operations fuse_conn_congestion_threshold_ops = {
196 .open = nonseekable_open,
197 .read = fuse_conn_congestion_threshold_read,
198 .write = fuse_conn_congestion_threshold_write,
199};
200
74static struct dentry *fuse_ctl_add_dentry(struct dentry *parent, 201static struct dentry *fuse_ctl_add_dentry(struct dentry *parent,
75 struct fuse_conn *fc, 202 struct fuse_conn *fc,
76 const char *name, 203 const char *name,
@@ -127,9 +254,14 @@ int fuse_ctl_add_conn(struct fuse_conn *fc)
127 goto err; 254 goto err;
128 255
129 if (!fuse_ctl_add_dentry(parent, fc, "waiting", S_IFREG | 0400, 1, 256 if (!fuse_ctl_add_dentry(parent, fc, "waiting", S_IFREG | 0400, 1,
130 NULL, &fuse_ctl_waiting_ops) || 257 NULL, &fuse_ctl_waiting_ops) ||
131 !fuse_ctl_add_dentry(parent, fc, "abort", S_IFREG | 0200, 1, 258 !fuse_ctl_add_dentry(parent, fc, "abort", S_IFREG | 0200, 1,
132 NULL, &fuse_ctl_abort_ops)) 259 NULL, &fuse_ctl_abort_ops) ||
260 !fuse_ctl_add_dentry(parent, fc, "max_background", S_IFREG | 0600,
261 1, NULL, &fuse_conn_max_background_ops) ||
262 !fuse_ctl_add_dentry(parent, fc, "congestion_threshold",
263 S_IFREG | 0600, 1, NULL,
264 &fuse_conn_congestion_threshold_ops))
133 goto err; 265 goto err;
134 266
135 return 0; 267 return 0;
@@ -156,7 +288,7 @@ void fuse_ctl_remove_conn(struct fuse_conn *fc)
156 d_drop(dentry); 288 d_drop(dentry);
157 dput(dentry); 289 dput(dentry);
158 } 290 }
159 fuse_control_sb->s_root->d_inode->i_nlink--; 291 drop_nlink(fuse_control_sb->s_root->d_inode);
160} 292}
161 293
162static int fuse_ctl_fill_super(struct super_block *sb, void *data, int silent) 294static int fuse_ctl_fill_super(struct super_block *sb, void *data, int silent)
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 6484eb75acd6..51d9e33d634f 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -250,7 +250,7 @@ static void queue_request(struct fuse_conn *fc, struct fuse_req *req)
250 250
251static void flush_bg_queue(struct fuse_conn *fc) 251static void flush_bg_queue(struct fuse_conn *fc)
252{ 252{
253 while (fc->active_background < FUSE_MAX_BACKGROUND && 253 while (fc->active_background < fc->max_background &&
254 !list_empty(&fc->bg_queue)) { 254 !list_empty(&fc->bg_queue)) {
255 struct fuse_req *req; 255 struct fuse_req *req;
256 256
@@ -280,11 +280,11 @@ __releases(&fc->lock)
280 list_del(&req->intr_entry); 280 list_del(&req->intr_entry);
281 req->state = FUSE_REQ_FINISHED; 281 req->state = FUSE_REQ_FINISHED;
282 if (req->background) { 282 if (req->background) {
283 if (fc->num_background == FUSE_MAX_BACKGROUND) { 283 if (fc->num_background == fc->max_background) {
284 fc->blocked = 0; 284 fc->blocked = 0;
285 wake_up_all(&fc->blocked_waitq); 285 wake_up_all(&fc->blocked_waitq);
286 } 286 }
287 if (fc->num_background == FUSE_CONGESTION_THRESHOLD && 287 if (fc->num_background == fc->congestion_threshold &&
288 fc->connected && fc->bdi_initialized) { 288 fc->connected && fc->bdi_initialized) {
289 clear_bdi_congested(&fc->bdi, BLK_RW_SYNC); 289 clear_bdi_congested(&fc->bdi, BLK_RW_SYNC);
290 clear_bdi_congested(&fc->bdi, BLK_RW_ASYNC); 290 clear_bdi_congested(&fc->bdi, BLK_RW_ASYNC);
@@ -410,9 +410,9 @@ static void fuse_request_send_nowait_locked(struct fuse_conn *fc,
410{ 410{
411 req->background = 1; 411 req->background = 1;
412 fc->num_background++; 412 fc->num_background++;
413 if (fc->num_background == FUSE_MAX_BACKGROUND) 413 if (fc->num_background == fc->max_background)
414 fc->blocked = 1; 414 fc->blocked = 1;
415 if (fc->num_background == FUSE_CONGESTION_THRESHOLD && 415 if (fc->num_background == fc->congestion_threshold &&
416 fc->bdi_initialized) { 416 fc->bdi_initialized) {
417 set_bdi_congested(&fc->bdi, BLK_RW_SYNC); 417 set_bdi_congested(&fc->bdi, BLK_RW_SYNC);
418 set_bdi_congested(&fc->bdi, BLK_RW_ASYNC); 418 set_bdi_congested(&fc->bdi, BLK_RW_ASYNC);
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index 52b641fc0faf..fc9c79feb5f7 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -25,12 +25,6 @@
25/** Max number of pages that can be used in a single read request */ 25/** Max number of pages that can be used in a single read request */
26#define FUSE_MAX_PAGES_PER_REQ 32 26#define FUSE_MAX_PAGES_PER_REQ 32
27 27
28/** Maximum number of outstanding background requests */
29#define FUSE_MAX_BACKGROUND 12
30
31/** Congestion starts at 75% of maximum */
32#define FUSE_CONGESTION_THRESHOLD (FUSE_MAX_BACKGROUND * 75 / 100)
33
34/** Bias for fi->writectr, meaning new writepages must not be sent */ 28/** Bias for fi->writectr, meaning new writepages must not be sent */
35#define FUSE_NOWRITE INT_MIN 29#define FUSE_NOWRITE INT_MIN
36 30
@@ -38,7 +32,7 @@
38#define FUSE_NAME_MAX 1024 32#define FUSE_NAME_MAX 1024
39 33
40/** Number of dentries for each connection in the control filesystem */ 34/** Number of dentries for each connection in the control filesystem */
41#define FUSE_CTL_NUM_DENTRIES 3 35#define FUSE_CTL_NUM_DENTRIES 5
42 36
43/** If the FUSE_DEFAULT_PERMISSIONS flag is given, the filesystem 37/** If the FUSE_DEFAULT_PERMISSIONS flag is given, the filesystem
44 module will check permissions based on the file mode. Otherwise no 38 module will check permissions based on the file mode. Otherwise no
@@ -55,6 +49,10 @@ extern struct list_head fuse_conn_list;
55/** Global mutex protecting fuse_conn_list and the control filesystem */ 49/** Global mutex protecting fuse_conn_list and the control filesystem */
56extern struct mutex fuse_mutex; 50extern struct mutex fuse_mutex;
57 51
52/** Module parameters */
53extern unsigned max_user_bgreq;
54extern unsigned max_user_congthresh;
55
58/** FUSE inode */ 56/** FUSE inode */
59struct fuse_inode { 57struct fuse_inode {
60 /** Inode data */ 58 /** Inode data */
@@ -349,6 +347,12 @@ struct fuse_conn {
349 /** rbtree of fuse_files waiting for poll events indexed by ph */ 347 /** rbtree of fuse_files waiting for poll events indexed by ph */
350 struct rb_root polled_files; 348 struct rb_root polled_files;
351 349
350 /** Maximum number of outstanding background requests */
351 unsigned max_background;
352
353 /** Number of background requests at which congestion starts */
354 unsigned congestion_threshold;
355
352 /** Number of requests currently in the background */ 356 /** Number of requests currently in the background */
353 unsigned num_background; 357 unsigned num_background;
354 358
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index e5dbecd87b0f..6da947daabda 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -14,6 +14,7 @@
14#include <linux/seq_file.h> 14#include <linux/seq_file.h>
15#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/module.h> 16#include <linux/module.h>
17#include <linux/moduleparam.h>
17#include <linux/parser.h> 18#include <linux/parser.h>
18#include <linux/statfs.h> 19#include <linux/statfs.h>
19#include <linux/random.h> 20#include <linux/random.h>
@@ -28,10 +29,34 @@ static struct kmem_cache *fuse_inode_cachep;
28struct list_head fuse_conn_list; 29struct list_head fuse_conn_list;
29DEFINE_MUTEX(fuse_mutex); 30DEFINE_MUTEX(fuse_mutex);
30 31
32static int set_global_limit(const char *val, struct kernel_param *kp);
33
34unsigned max_user_bgreq;
35module_param_call(max_user_bgreq, set_global_limit, param_get_uint,
36 &max_user_bgreq, 0644);
37__MODULE_PARM_TYPE(max_user_bgreq, "uint");
38MODULE_PARM_DESC(max_user_bgreq,
39 "Global limit for the maximum number of backgrounded requests an "
40 "unprivileged user can set");
41
42unsigned max_user_congthresh;
43module_param_call(max_user_congthresh, set_global_limit, param_get_uint,
44 &max_user_congthresh, 0644);
45__MODULE_PARM_TYPE(max_user_congthresh, "uint");
46MODULE_PARM_DESC(max_user_congthresh,
47 "Global limit for the maximum congestion threshold an "
48 "unprivileged user can set");
49
31#define FUSE_SUPER_MAGIC 0x65735546 50#define FUSE_SUPER_MAGIC 0x65735546
32 51
33#define FUSE_DEFAULT_BLKSIZE 512 52#define FUSE_DEFAULT_BLKSIZE 512
34 53
54/** Maximum number of outstanding background requests */
55#define FUSE_DEFAULT_MAX_BACKGROUND 12
56
57/** Congestion starts at 75% of maximum */
58#define FUSE_DEFAULT_CONGESTION_THRESHOLD (FUSE_DEFAULT_MAX_BACKGROUND * 3 / 4)
59
35struct fuse_mount_data { 60struct fuse_mount_data {
36 int fd; 61 int fd;
37 unsigned rootmode; 62 unsigned rootmode;
@@ -517,6 +542,8 @@ void fuse_conn_init(struct fuse_conn *fc)
517 INIT_LIST_HEAD(&fc->bg_queue); 542 INIT_LIST_HEAD(&fc->bg_queue);
518 INIT_LIST_HEAD(&fc->entry); 543 INIT_LIST_HEAD(&fc->entry);
519 atomic_set(&fc->num_waiting, 0); 544 atomic_set(&fc->num_waiting, 0);
545 fc->max_background = FUSE_DEFAULT_MAX_BACKGROUND;
546 fc->congestion_threshold = FUSE_DEFAULT_CONGESTION_THRESHOLD;
520 fc->khctr = 0; 547 fc->khctr = 0;
521 fc->polled_files = RB_ROOT; 548 fc->polled_files = RB_ROOT;
522 fc->reqctr = 0; 549 fc->reqctr = 0;
@@ -727,6 +754,54 @@ static const struct super_operations fuse_super_operations = {
727 .show_options = fuse_show_options, 754 .show_options = fuse_show_options,
728}; 755};
729 756
757static void sanitize_global_limit(unsigned *limit)
758{
759 if (*limit == 0)
760 *limit = ((num_physpages << PAGE_SHIFT) >> 13) /
761 sizeof(struct fuse_req);
762
763 if (*limit >= 1 << 16)
764 *limit = (1 << 16) - 1;
765}
766
767static int set_global_limit(const char *val, struct kernel_param *kp)
768{
769 int rv;
770
771 rv = param_set_uint(val, kp);
772 if (rv)
773 return rv;
774
775 sanitize_global_limit((unsigned *)kp->arg);
776
777 return 0;
778}
779
780static void process_init_limits(struct fuse_conn *fc, struct fuse_init_out *arg)
781{
782 int cap_sys_admin = capable(CAP_SYS_ADMIN);
783
784 if (arg->minor < 13)
785 return;
786
787 sanitize_global_limit(&max_user_bgreq);
788 sanitize_global_limit(&max_user_congthresh);
789
790 if (arg->max_background) {
791 fc->max_background = arg->max_background;
792
793 if (!cap_sys_admin && fc->max_background > max_user_bgreq)
794 fc->max_background = max_user_bgreq;
795 }
796 if (arg->congestion_threshold) {
797 fc->congestion_threshold = arg->congestion_threshold;
798
799 if (!cap_sys_admin &&
800 fc->congestion_threshold > max_user_congthresh)
801 fc->congestion_threshold = max_user_congthresh;
802 }
803}
804
730static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req) 805static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
731{ 806{
732 struct fuse_init_out *arg = &req->misc.init_out; 807 struct fuse_init_out *arg = &req->misc.init_out;
@@ -736,6 +811,8 @@ static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
736 else { 811 else {
737 unsigned long ra_pages; 812 unsigned long ra_pages;
738 813
814 process_init_limits(fc, arg);
815
739 if (arg->minor >= 6) { 816 if (arg->minor >= 6) {
740 ra_pages = arg->max_readahead / PAGE_CACHE_SIZE; 817 ra_pages = arg->max_readahead / PAGE_CACHE_SIZE;
741 if (arg->flags & FUSE_ASYNC_READ) 818 if (arg->flags & FUSE_ASYNC_READ)
@@ -1150,6 +1227,9 @@ static int __init fuse_init(void)
1150 if (res) 1227 if (res)
1151 goto err_sysfs_cleanup; 1228 goto err_sysfs_cleanup;
1152 1229
1230 sanitize_global_limit(&max_user_bgreq);
1231 sanitize_global_limit(&max_user_congthresh);
1232
1153 return 0; 1233 return 0;
1154 1234
1155 err_sysfs_cleanup: 1235 err_sysfs_cleanup:
%d cur_tx %d%s cur_rx %d\n", lp->dirty_tx, lp->cur_tx, lp->tx_full ? " (full)" : "", lp->cur_rx )); for( i = 0 ; i < RX_RING_SIZE; i++ ) DPRINTK( 2, ( "rx #%d: base=%04x blen=%04x mlen=%04x\n", i, MEM->rx_head[i].base, -MEM->rx_head[i].buf_length, MEM->rx_head[i].msg_length )); for( i = 0 ; i < TX_RING_SIZE; i++ ) DPRINTK( 2, ( "tx #%d: base=%04x len=%04x misc=%04x\n", i, MEM->tx_head[i].base, -MEM->tx_head[i].length, MEM->tx_head[i].misc )); } #endif /* XXX MSch: maybe purge/reinit ring here */ /* lance_restart, essentially */ lance_init_ring(dev); REGA( CSR0 ) = CSR0_INEA | CSR0_INIT | CSR0_STRT; dev->trans_start = jiffies; netif_wake_queue (dev); } /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev ) { struct lance_private *lp = (struct lance_private *)dev->priv; struct lance_ioreg *IO = lp->iobase; int entry, len; struct lance_tx_head *head; unsigned long flags; DPRINTK( 2, ( "%s: lance_start_xmit() called, csr0 %4.4x.\n", dev->name, DREG )); /* The old LANCE chips doesn't automatically pad buffers to min. size. */ len = skb->len; if (len < ETH_ZLEN) len = ETH_ZLEN; /* PAM-Card has a bug: Can only send packets with even number of bytes! */ else if (lp->cardtype == PAM_CARD && (len & 1)) ++len; if (len > skb->len) { if (skb_padto(skb, len)) return 0; } netif_stop_queue (dev); /* Fill in a Tx ring entry */ if (lance_debug >= 3) { u_char *p; int i; printk( "%s: TX pkt type 0x%04x from ", dev->name, ((u_short *)skb->data)[6]); for( p = &((u_char *)skb->data)[6], i = 0; i < 6; i++ ) printk("%02x%s", *p++, i != 5 ? ":" : "" ); printk(" to "); for( p = (u_char *)skb->data, i = 0; i < 6; i++ ) printk("%02x%s", *p++, i != 5 ? ":" : "" ); printk(" data at 0x%08x len %d\n", (int)skb->data, (int)skb->len ); } /* We're not prepared for the int until the last flags are set/reset. And * the int may happen already after setting the OWN_CHIP... */ spin_lock_irqsave (&lp->devlock, flags); /* Mask to ring buffer boundary. */ entry = lp->cur_tx & TX_RING_MOD_MASK; head = &(MEM->tx_head[entry]); /* Caution: the write order is important here, set the "ownership" bits * last. */ head->length = -len; head->misc = 0; lp->memcpy_f( PKTBUF_ADDR(head), (void *)skb->data, skb->len ); head->flag = TMD1_OWN_CHIP | TMD1_ENP | TMD1_STP; lp->stats.tx_bytes += skb->len; dev_kfree_skb( skb ); lp->cur_tx++; while( lp->cur_tx >= TX_RING_SIZE && lp->dirty_tx >= TX_RING_SIZE ) { lp->cur_tx -= TX_RING_SIZE; lp->dirty_tx -= TX_RING_SIZE; } /* Trigger an immediate send poll. */ DREG = CSR0_INEA | CSR0_TDMD; dev->trans_start = jiffies; if ((MEM->tx_head[(entry+1) & TX_RING_MOD_MASK].flag & TMD1_OWN) == TMD1_OWN_HOST) netif_start_queue (dev); else lp->tx_full = 1; spin_unlock_irqrestore (&lp->devlock, flags); return 0; } /* The LANCE interrupt handler. */ static irqreturn_t lance_interrupt( int irq, void *dev_id ) { struct net_device *dev = dev_id; struct lance_private *lp; struct lance_ioreg *IO; int csr0, boguscnt = 10; int handled = 0; if (dev == NULL) { DPRINTK( 1, ( "lance_interrupt(): interrupt for unknown device.\n" )); return IRQ_NONE; } lp = (struct lance_private *)dev->priv; IO = lp->iobase; spin_lock (&lp->devlock); AREG = CSR0; while( ((csr0 = DREG) & (CSR0_ERR | CSR0_TINT | CSR0_RINT)) && --boguscnt >= 0) { handled = 1; /* Acknowledge all of the current interrupt sources ASAP. */ DREG = csr0 & ~(CSR0_INIT | CSR0_STRT | CSR0_STOP | CSR0_TDMD | CSR0_INEA); DPRINTK( 2, ( "%s: interrupt csr0=%04x new csr=%04x.\n", dev->name, csr0, DREG )); if (csr0 & CSR0_RINT) /* Rx interrupt */ lance_rx( dev ); if (csr0 & CSR0_TINT) { /* Tx-done interrupt */ int dirty_tx = lp->dirty_tx; while( dirty_tx < lp->cur_tx) { int entry = dirty_tx & TX_RING_MOD_MASK; int status = MEM->tx_head[entry].flag; if (status & TMD1_OWN_CHIP) break; /* It still hasn't been Txed */ MEM->tx_head[entry].flag = 0; if (status & TMD1_ERR) { /* There was an major error, log it. */ int err_status = MEM->tx_head[entry].misc; lp->stats.tx_errors++; if (err_status & TMD3_RTRY) lp->stats.tx_aborted_errors++; if (err_status & TMD3_LCAR) lp->stats.tx_carrier_errors++; if (err_status & TMD3_LCOL) lp->stats.tx_window_errors++; if (err_status & TMD3_UFLO) { /* Ackk! On FIFO errors the Tx unit is turned off! */ lp->stats.tx_fifo_errors++; /* Remove this verbosity later! */ DPRINTK( 1, ( "%s: Tx FIFO error! Status %04x\n", dev->name, csr0 )); /* Restart the chip. */ DREG = CSR0_STRT; } } else { if (status & (TMD1_MORE | TMD1_ONE | TMD1_DEF)) lp->stats.collisions++; lp->stats.tx_packets++; } /* XXX MSch: free skb?? */ dirty_tx++; } #ifndef final_version if (lp->cur_tx - dirty_tx >= TX_RING_SIZE) { DPRINTK( 0, ( "out-of-sync dirty pointer," " %d vs. %d, full=%ld.\n", dirty_tx, lp->cur_tx, lp->tx_full )); dirty_tx += TX_RING_SIZE; } #endif if (lp->tx_full && (netif_queue_stopped(dev)) && dirty_tx > lp->cur_tx - TX_RING_SIZE + 2) { /* The ring is no longer full, clear tbusy. */ lp->tx_full = 0; netif_wake_queue (dev); } lp->dirty_tx = dirty_tx; } /* Log misc errors. */ if (csr0 & CSR0_BABL) lp->stats.tx_errors++; /* Tx babble. */ if (csr0 & CSR0_MISS) lp->stats.rx_errors++; /* Missed a Rx frame. */ if (csr0 & CSR0_MERR) { DPRINTK( 1, ( "%s: Bus master arbitration failure (?!?), " "status %04x.\n", dev->name, csr0 )); /* Restart the chip. */ DREG = CSR0_STRT; } } /* Clear any other interrupt, and set interrupt enable. */ DREG = CSR0_BABL | CSR0_CERR | CSR0_MISS | CSR0_MERR | CSR0_IDON | CSR0_INEA; DPRINTK( 2, ( "%s: exiting interrupt, csr0=%#04x.\n", dev->name, DREG )); spin_unlock (&lp->devlock); return IRQ_RETVAL(handled); } static int lance_rx( struct net_device *dev ) { struct lance_private *lp = (struct lance_private *)dev->priv; int entry = lp->cur_rx & RX_RING_MOD_MASK; int i; DPRINTK( 2, ( "%s: rx int, flag=%04x\n", dev->name, MEM->rx_head[entry].flag )); /* If we own the next entry, it's a new packet. Send it up. */ while( (MEM->rx_head[entry].flag & RMD1_OWN) == RMD1_OWN_HOST ) { struct lance_rx_head *head = &(MEM->rx_head[entry]); int status = head->flag; if (status != (RMD1_ENP|RMD1_STP)) { /* There was an error. */ /* There is a tricky error noted by John Murphy, <murf@perftech.com> to Russ Nelson: Even with full-sized buffers it's possible for a jabber packet to use two buffers, with only the last correctly noting the error. */ if (status & RMD1_ENP) /* Only count a general error at the */ lp->stats.rx_errors++; /* end of a packet.*/ if (status & RMD1_FRAM) lp->stats.rx_frame_errors++; if (status & RMD1_OFLO) lp->stats.rx_over_errors++; if (status & RMD1_CRC) lp->stats.rx_crc_errors++; if (status & RMD1_BUFF) lp->stats.rx_fifo_errors++; head->flag &= (RMD1_ENP|RMD1_STP); } else { /* Malloc up new buffer, compatible with net-3. */ short pkt_len = head->msg_length & 0xfff; struct sk_buff *skb; if (pkt_len < 60) { printk( "%s: Runt packet!\n", dev->name ); lp->stats.rx_errors++; } else { skb = dev_alloc_skb( pkt_len+2 ); if (skb == NULL) { DPRINTK( 1, ( "%s: Memory squeeze, deferring packet.\n", dev->name )); for( i = 0; i < RX_RING_SIZE; i++ ) if (MEM->rx_head[(entry+i) & RX_RING_MOD_MASK].flag & RMD1_OWN_CHIP) break; if (i > RX_RING_SIZE - 2) { lp->stats.rx_dropped++; head->flag |= RMD1_OWN_CHIP; lp->cur_rx++; } break; } if (lance_debug >= 3) { u_char *data = PKTBUF_ADDR(head), *p; printk( "%s: RX pkt type 0x%04x from ", dev->name, ((u_short *)data)[6]); for( p = &data[6], i = 0; i < 6; i++ ) printk("%02x%s", *p++, i != 5 ? ":" : "" ); printk(" to "); for( p = data, i = 0; i < 6; i++ ) printk("%02x%s", *p++, i != 5 ? ":" : "" ); printk(" data %02x %02x %02x %02x %02x %02x %02x %02x " "len %d\n", data[15], data[16], data[17], data[18], data[19], data[20], data[21], data[22], pkt_len ); } skb_reserve( skb, 2 ); /* 16 byte align */ skb_put( skb, pkt_len ); /* Make room */ lp->memcpy_f( skb->data, PKTBUF_ADDR(head), pkt_len ); skb->protocol = eth_type_trans( skb, dev ); netif_rx( skb ); dev->last_rx = jiffies; lp->stats.rx_packets++; lp->stats.rx_bytes += pkt_len; } } head->flag |= RMD1_OWN_CHIP; entry = (++lp->cur_rx) & RX_RING_MOD_MASK; } lp->cur_rx &= RX_RING_MOD_MASK; /* From lance.c (Donald Becker): */ /* We should check that at least two ring entries are free. If not, we should free one and mark stats->rx_dropped++. */ return 0; } static int lance_close( struct net_device *dev ) { struct lance_private *lp = (struct lance_private *)dev->priv; struct lance_ioreg *IO = lp->iobase; netif_stop_queue (dev); AREG = CSR0; DPRINTK( 2, ( "%s: Shutting down ethercard, status was %2.2x.\n", dev->name, DREG )); /* We stop the LANCE here -- it occasionally polls memory if we don't. */ DREG = CSR0_STOP; return 0; } static struct net_device_stats *lance_get_stats( struct net_device *dev ) { struct lance_private *lp = (struct lance_private *)dev->priv; return &lp->stats; } /* Set or clear the multicast filter for this adaptor. num_addrs == -1 Promiscuous mode, receive all packets num_addrs == 0 Normal mode, clear multicast list num_addrs > 0 Multicast mode, receive normal and MC packets, and do best-effort filtering. */ static void set_multicast_list( struct net_device *dev ) { struct lance_private *lp = (struct lance_private *)dev->priv; struct lance_ioreg *IO = lp->iobase; if (netif_running(dev)) /* Only possible if board is already started */ return; /* We take the simple way out and always enable promiscuous mode. */ DREG = CSR0_STOP; /* Temporarily stop the lance. */ if (dev->flags & IFF_PROMISC) { /* Log any net taps. */ DPRINTK( 2, ( "%s: Promiscuous mode enabled.\n", dev->name )); REGA( CSR15 ) = 0x8000; /* Set promiscuous mode */ } else { short multicast_table[4]; int num_addrs = dev->mc_count; int i; /* We don't use the multicast table, but rely on upper-layer * filtering. */ memset( multicast_table, (num_addrs == 0) ? 0 : -1, sizeof(multicast_table) ); for( i = 0; i < 4; i++ ) REGA( CSR8+i ) = multicast_table[i]; REGA( CSR15 ) = 0; /* Unset promiscuous mode */ } /* * Always set BSWP after a STOP as STOP puts it back into * little endian mode. */ REGA( CSR3 ) = CSR3_BSWP | (lp->cardtype == PAM_CARD ? CSR3_ACON : 0); /* Resume normal operation and reset AREG to CSR0 */ REGA( CSR0 ) = CSR0_IDON | CSR0_INEA | CSR0_STRT; } /* This is needed for old RieblCards and possible for new RieblCards */ static int lance_set_mac_address( struct net_device *dev, void *addr ) { struct lance_private *lp = (struct lance_private *)dev->priv; struct sockaddr *saddr = addr; int i; if (lp->cardtype != OLD_RIEBL && lp->cardtype != NEW_RIEBL) return( -EOPNOTSUPP ); if (netif_running(dev)) { /* Only possible while card isn't started */ DPRINTK( 1, ( "%s: hwaddr can be set only while card isn't open.\n", dev->name )); return( -EIO ); } memcpy( dev->dev_addr, saddr->sa_data, dev->addr_len ); for( i = 0; i < 6; i++ ) MEM->init.hwaddr[i] = dev->dev_addr[i^1]; /* <- 16 bit swap! */ lp->memcpy_f( RIEBL_HWADDR_ADDR, dev->dev_addr, 6 ); /* set also the magic for future sessions */ *RIEBL_MAGIC_ADDR = RIEBL_MAGIC; return( 0 ); } #ifdef MODULE static struct net_device *atarilance_dev; int __init init_module(void) { atarilance_dev = atarilance_probe(-1); if (IS_ERR(atarilance_dev)) return PTR_ERR(atarilance_dev); return 0; } void __exit cleanup_module(void) { unregister_netdev(atarilance_dev); free_irq(atarilance_dev->irq, atarilance_dev); free_netdev(atarilance_dev); } #endif /* MODULE */ /* * Local variables: * c-indent-level: 4 * tab-width: 4 * End: */