aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/block')
-rw-r--r--drivers/block/aoe/aoe.h14
-rw-r--r--drivers/block/aoe/aoeblk.c22
-rw-r--r--drivers/block/aoe/aoechr.c37
-rw-r--r--drivers/block/aoe/aoecmd.c187
-rw-r--r--drivers/block/aoe/aoedev.c69
-rw-r--r--drivers/block/aoe/aoemain.c4
-rw-r--r--drivers/block/aoe/aoenet.c22
7 files changed, 235 insertions, 120 deletions
diff --git a/drivers/block/aoe/aoe.h b/drivers/block/aoe/aoe.h
index 881c48d941b7..6eebcb7be97e 100644
--- a/drivers/block/aoe/aoe.h
+++ b/drivers/block/aoe/aoe.h
@@ -1,5 +1,5 @@
1/* Copyright (c) 2004 Coraid, Inc. See COPYING for GPL terms. */ 1/* Copyright (c) 2004 Coraid, Inc. See COPYING for GPL terms. */
2#define VERSION "14" 2#define VERSION "22"
3#define AOE_MAJOR 152 3#define AOE_MAJOR 152
4#define DEVICE_NAME "aoe" 4#define DEVICE_NAME "aoe"
5 5
@@ -75,8 +75,9 @@ enum {
75 DEVFL_TKILL = (1<<1), /* flag for timer to know when to kill self */ 75 DEVFL_TKILL = (1<<1), /* flag for timer to know when to kill self */
76 DEVFL_EXT = (1<<2), /* device accepts lba48 commands */ 76 DEVFL_EXT = (1<<2), /* device accepts lba48 commands */
77 DEVFL_CLOSEWAIT = (1<<3), /* device is waiting for all closes to revalidate */ 77 DEVFL_CLOSEWAIT = (1<<3), /* device is waiting for all closes to revalidate */
78 DEVFL_WC_UPDATE = (1<<4), /* this device needs to update write cache status */ 78 DEVFL_GDALLOC = (1<<4), /* need to alloc gendisk */
79 DEVFL_WORKON = (1<<4), 79 DEVFL_PAUSE = (1<<5),
80 DEVFL_NEWSIZE = (1<<6), /* need to update dev size in block layer */
80 81
81 BUFFL_FAIL = 1, 82 BUFFL_FAIL = 1,
82}; 83};
@@ -152,16 +153,17 @@ void aoechr_exit(void);
152void aoechr_error(char *); 153void aoechr_error(char *);
153 154
154void aoecmd_work(struct aoedev *d); 155void aoecmd_work(struct aoedev *d);
155void aoecmd_cfg(ushort, unsigned char); 156void aoecmd_cfg(ushort aoemajor, unsigned char aoeminor);
156void aoecmd_ata_rsp(struct sk_buff *); 157void aoecmd_ata_rsp(struct sk_buff *);
157void aoecmd_cfg_rsp(struct sk_buff *); 158void aoecmd_cfg_rsp(struct sk_buff *);
159void aoecmd_sleepwork(void *vp);
158 160
159int aoedev_init(void); 161int aoedev_init(void);
160void aoedev_exit(void); 162void aoedev_exit(void);
161struct aoedev *aoedev_by_aoeaddr(int maj, int min); 163struct aoedev *aoedev_by_aoeaddr(int maj, int min);
164struct aoedev *aoedev_by_sysminor_m(ulong sysminor, ulong bufcnt);
162void aoedev_downdev(struct aoedev *d); 165void aoedev_downdev(struct aoedev *d);
163struct aoedev *aoedev_set(ulong, unsigned char *, struct net_device *, ulong); 166int aoedev_isbusy(struct aoedev *d);
164int aoedev_busy(void);
165 167
166int aoenet_init(void); 168int aoenet_init(void);
167void aoenet_exit(void); 169void aoenet_exit(void);
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index c05ee8bffd97..32fea55fac48 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -22,7 +22,9 @@ static ssize_t aoedisk_show_state(struct gendisk * disk, char *page)
22 return snprintf(page, PAGE_SIZE, 22 return snprintf(page, PAGE_SIZE,
23 "%s%s\n", 23 "%s%s\n",
24 (d->flags & DEVFL_UP) ? "up" : "down", 24 (d->flags & DEVFL_UP) ? "up" : "down",
25 (d->flags & DEVFL_CLOSEWAIT) ? ",closewait" : ""); 25 (d->flags & DEVFL_PAUSE) ? ",paused" :
26 (d->nopen && !(d->flags & DEVFL_UP)) ? ",closewait" : "");
27 /* I'd rather see nopen exported so we can ditch closewait */
26} 28}
27static ssize_t aoedisk_show_mac(struct gendisk * disk, char *page) 29static ssize_t aoedisk_show_mac(struct gendisk * disk, char *page)
28{ 30{
@@ -107,8 +109,7 @@ aoeblk_release(struct inode *inode, struct file *filp)
107 109
108 spin_lock_irqsave(&d->lock, flags); 110 spin_lock_irqsave(&d->lock, flags);
109 111
110 if (--d->nopen == 0 && (d->flags & DEVFL_CLOSEWAIT)) { 112 if (--d->nopen == 0) {
111 d->flags &= ~DEVFL_CLOSEWAIT;
112 spin_unlock_irqrestore(&d->lock, flags); 113 spin_unlock_irqrestore(&d->lock, flags);
113 aoecmd_cfg(d->aoemajor, d->aoeminor); 114 aoecmd_cfg(d->aoemajor, d->aoeminor);
114 return 0; 115 return 0;
@@ -158,14 +159,14 @@ aoeblk_make_request(request_queue_t *q, struct bio *bio)
158 } 159 }
159 160
160 list_add_tail(&buf->bufs, &d->bufq); 161 list_add_tail(&buf->bufs, &d->bufq);
161 aoecmd_work(d);
162 162
163 aoecmd_work(d);
163 sl = d->sendq_hd; 164 sl = d->sendq_hd;
164 d->sendq_hd = d->sendq_tl = NULL; 165 d->sendq_hd = d->sendq_tl = NULL;
165 166
166 spin_unlock_irqrestore(&d->lock, flags); 167 spin_unlock_irqrestore(&d->lock, flags);
167
168 aoenet_xmit(sl); 168 aoenet_xmit(sl);
169
169 return 0; 170 return 0;
170} 171}
171 172
@@ -205,7 +206,7 @@ aoeblk_gdalloc(void *vp)
205 printk(KERN_ERR "aoe: aoeblk_gdalloc: cannot allocate disk " 206 printk(KERN_ERR "aoe: aoeblk_gdalloc: cannot allocate disk "
206 "structure for %ld.%ld\n", d->aoemajor, d->aoeminor); 207 "structure for %ld.%ld\n", d->aoemajor, d->aoeminor);
207 spin_lock_irqsave(&d->lock, flags); 208 spin_lock_irqsave(&d->lock, flags);
208 d->flags &= ~DEVFL_WORKON; 209 d->flags &= ~DEVFL_GDALLOC;
209 spin_unlock_irqrestore(&d->lock, flags); 210 spin_unlock_irqrestore(&d->lock, flags);
210 return; 211 return;
211 } 212 }
@@ -218,7 +219,7 @@ aoeblk_gdalloc(void *vp)
218 "for %ld.%ld\n", d->aoemajor, d->aoeminor); 219 "for %ld.%ld\n", d->aoemajor, d->aoeminor);
219 put_disk(gd); 220 put_disk(gd);
220 spin_lock_irqsave(&d->lock, flags); 221 spin_lock_irqsave(&d->lock, flags);
221 d->flags &= ~DEVFL_WORKON; 222 d->flags &= ~DEVFL_GDALLOC;
222 spin_unlock_irqrestore(&d->lock, flags); 223 spin_unlock_irqrestore(&d->lock, flags);
223 return; 224 return;
224 } 225 }
@@ -235,18 +236,13 @@ aoeblk_gdalloc(void *vp)
235 236
236 gd->queue = &d->blkq; 237 gd->queue = &d->blkq;
237 d->gd = gd; 238 d->gd = gd;
238 d->flags &= ~DEVFL_WORKON; 239 d->flags &= ~DEVFL_GDALLOC;
239 d->flags |= DEVFL_UP; 240 d->flags |= DEVFL_UP;
240 241
241 spin_unlock_irqrestore(&d->lock, flags); 242 spin_unlock_irqrestore(&d->lock, flags);
242 243
243 add_disk(gd); 244 add_disk(gd);
244 aoedisk_add_sysfs(d); 245 aoedisk_add_sysfs(d);
245
246 printk(KERN_INFO "aoe: %012llx e%lu.%lu v%04x has %llu "
247 "sectors\n", (unsigned long long)mac_addr(d->addr),
248 d->aoemajor, d->aoeminor,
249 d->fw_ver, (long long)d->ssize);
250} 246}
251 247
252void 248void
diff --git a/drivers/block/aoe/aoechr.c b/drivers/block/aoe/aoechr.c
index 41ae0ede619a..5327f553b4f5 100644
--- a/drivers/block/aoe/aoechr.c
+++ b/drivers/block/aoe/aoechr.c
@@ -13,6 +13,7 @@ enum {
13 MINOR_ERR = 2, 13 MINOR_ERR = 2,
14 MINOR_DISCOVER, 14 MINOR_DISCOVER,
15 MINOR_INTERFACES, 15 MINOR_INTERFACES,
16 MINOR_REVALIDATE,
16 MSGSZ = 2048, 17 MSGSZ = 2048,
17 NARGS = 10, 18 NARGS = 10,
18 NMSG = 100, /* message backlog to retain */ 19 NMSG = 100, /* message backlog to retain */
@@ -41,6 +42,7 @@ static struct aoe_chardev chardevs[] = {
41 { MINOR_ERR, "err" }, 42 { MINOR_ERR, "err" },
42 { MINOR_DISCOVER, "discover" }, 43 { MINOR_DISCOVER, "discover" },
43 { MINOR_INTERFACES, "interfaces" }, 44 { MINOR_INTERFACES, "interfaces" },
45 { MINOR_REVALIDATE, "revalidate" },
44}; 46};
45 47
46static int 48static int
@@ -62,6 +64,39 @@ interfaces(const char __user *str, size_t size)
62 return 0; 64 return 0;
63} 65}
64 66
67static int
68revalidate(const char __user *str, size_t size)
69{
70 int major, minor, n;
71 ulong flags;
72 struct aoedev *d;
73 char buf[16];
74
75 if (size >= sizeof buf)
76 return -EINVAL;
77 buf[sizeof buf - 1] = '\0';
78 if (copy_from_user(buf, str, size))
79 return -EFAULT;
80
81 /* should be e%d.%d format */
82 n = sscanf(buf, "e%d.%d", &major, &minor);
83 if (n != 2) {
84 printk(KERN_ERR "aoe: %s: invalid device specification\n",
85 __FUNCTION__);
86 return -EINVAL;
87 }
88 d = aoedev_by_aoeaddr(major, minor);
89 if (!d)
90 return -EINVAL;
91
92 spin_lock_irqsave(&d->lock, flags);
93 d->flags |= DEVFL_PAUSE;
94 spin_unlock_irqrestore(&d->lock, flags);
95 aoecmd_cfg(major, minor);
96
97 return 0;
98}
99
65void 100void
66aoechr_error(char *msg) 101aoechr_error(char *msg)
67{ 102{
@@ -114,6 +149,8 @@ aoechr_write(struct file *filp, const char __user *buf, size_t cnt, loff_t *offp
114 case MINOR_INTERFACES: 149 case MINOR_INTERFACES:
115 ret = interfaces(buf, cnt); 150 ret = interfaces(buf, cnt);
116 break; 151 break;
152 case MINOR_REVALIDATE:
153 ret = revalidate(buf, cnt);
117 } 154 }
118 if (ret == 0) 155 if (ret == 0)
119 ret = cnt; 156 ret = cnt;
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index 326ca3876b68..39da28d344fe 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -8,6 +8,7 @@
8#include <linux/blkdev.h> 8#include <linux/blkdev.h>
9#include <linux/skbuff.h> 9#include <linux/skbuff.h>
10#include <linux/netdevice.h> 10#include <linux/netdevice.h>
11#include <linux/genhd.h>
11#include <asm/unaligned.h> 12#include <asm/unaligned.h>
12#include "aoe.h" 13#include "aoe.h"
13 14
@@ -28,6 +29,7 @@ new_skb(struct net_device *if_dev, ulong len)
28 skb->protocol = __constant_htons(ETH_P_AOE); 29 skb->protocol = __constant_htons(ETH_P_AOE);
29 skb->priority = 0; 30 skb->priority = 0;
30 skb_put(skb, len); 31 skb_put(skb, len);
32 memset(skb->head, 0, len);
31 skb->next = skb->prev = NULL; 33 skb->next = skb->prev = NULL;
32 34
33 /* tell the network layer not to perform IP checksums 35 /* tell the network layer not to perform IP checksums
@@ -188,12 +190,67 @@ aoecmd_ata_rw(struct aoedev *d, struct frame *f)
188 } 190 }
189} 191}
190 192
193/* some callers cannot sleep, and they can call this function,
194 * transmitting the packets later, when interrupts are on
195 */
196static struct sk_buff *
197aoecmd_cfg_pkts(ushort aoemajor, unsigned char aoeminor, struct sk_buff **tail)
198{
199 struct aoe_hdr *h;
200 struct aoe_cfghdr *ch;
201 struct sk_buff *skb, *sl, *sl_tail;
202 struct net_device *ifp;
203
204 sl = sl_tail = NULL;
205
206 read_lock(&dev_base_lock);
207 for (ifp = dev_base; ifp; dev_put(ifp), ifp = ifp->next) {
208 dev_hold(ifp);
209 if (!is_aoe_netif(ifp))
210 continue;
211
212 skb = new_skb(ifp, sizeof *h + sizeof *ch);
213 if (skb == NULL) {
214 printk(KERN_INFO "aoe: aoecmd_cfg: skb alloc failure\n");
215 continue;
216 }
217 if (sl_tail == NULL)
218 sl_tail = skb;
219 h = (struct aoe_hdr *) skb->mac.raw;
220 memset(h, 0, sizeof *h + sizeof *ch);
221
222 memset(h->dst, 0xff, sizeof h->dst);
223 memcpy(h->src, ifp->dev_addr, sizeof h->src);
224 h->type = __constant_cpu_to_be16(ETH_P_AOE);
225 h->verfl = AOE_HVER;
226 h->major = cpu_to_be16(aoemajor);
227 h->minor = aoeminor;
228 h->cmd = AOECMD_CFG;
229
230 skb->next = sl;
231 sl = skb;
232 }
233 read_unlock(&dev_base_lock);
234
235 if (tail != NULL)
236 *tail = sl_tail;
237 return sl;
238}
239
191/* enters with d->lock held */ 240/* enters with d->lock held */
192void 241void
193aoecmd_work(struct aoedev *d) 242aoecmd_work(struct aoedev *d)
194{ 243{
195 struct frame *f; 244 struct frame *f;
196 struct buf *buf; 245 struct buf *buf;
246
247 if (d->flags & DEVFL_PAUSE) {
248 if (!aoedev_isbusy(d))
249 d->sendq_hd = aoecmd_cfg_pkts(d->aoemajor,
250 d->aoeminor, &d->sendq_tl);
251 return;
252 }
253
197loop: 254loop:
198 f = getframe(d, FREETAG); 255 f = getframe(d, FREETAG);
199 if (f == NULL) 256 if (f == NULL)
@@ -229,6 +286,8 @@ rexmit(struct aoedev *d, struct frame *f)
229 h = (struct aoe_hdr *) f->data; 286 h = (struct aoe_hdr *) f->data;
230 f->tag = n; 287 f->tag = n;
231 h->tag = cpu_to_be32(n); 288 h->tag = cpu_to_be32(n);
289 memcpy(h->dst, d->addr, sizeof h->dst);
290 memcpy(h->src, d->ifp->dev_addr, sizeof h->src);
232 291
233 skb = skb_prepare(d, f); 292 skb = skb_prepare(d, f);
234 if (skb) { 293 if (skb) {
@@ -272,7 +331,7 @@ rexmit_timer(ulong vp)
272 spin_lock_irqsave(&d->lock, flags); 331 spin_lock_irqsave(&d->lock, flags);
273 332
274 if (d->flags & DEVFL_TKILL) { 333 if (d->flags & DEVFL_TKILL) {
275tdie: spin_unlock_irqrestore(&d->lock, flags); 334 spin_unlock_irqrestore(&d->lock, flags);
276 return; 335 return;
277 } 336 }
278 f = d->frames; 337 f = d->frames;
@@ -283,7 +342,7 @@ tdie: spin_unlock_irqrestore(&d->lock, flags);
283 n /= HZ; 342 n /= HZ;
284 if (n > MAXWAIT) { /* waited too long. device failure. */ 343 if (n > MAXWAIT) { /* waited too long. device failure. */
285 aoedev_downdev(d); 344 aoedev_downdev(d);
286 goto tdie; 345 break;
287 } 346 }
288 rexmit(d, f); 347 rexmit(d, f);
289 } 348 }
@@ -305,6 +364,37 @@ tdie: spin_unlock_irqrestore(&d->lock, flags);
305 aoenet_xmit(sl); 364 aoenet_xmit(sl);
306} 365}
307 366
367/* this function performs work that has been deferred until sleeping is OK
368 */
369void
370aoecmd_sleepwork(void *vp)
371{
372 struct aoedev *d = (struct aoedev *) vp;
373
374 if (d->flags & DEVFL_GDALLOC)
375 aoeblk_gdalloc(d);
376
377 if (d->flags & DEVFL_NEWSIZE) {
378 struct block_device *bd;
379 unsigned long flags;
380 u64 ssize;
381
382 ssize = d->gd->capacity;
383 bd = bdget_disk(d->gd, 0);
384
385 if (bd) {
386 mutex_lock(&bd->bd_inode->i_mutex);
387 i_size_write(bd->bd_inode, (loff_t)ssize<<9);
388 mutex_unlock(&bd->bd_inode->i_mutex);
389 bdput(bd);
390 }
391 spin_lock_irqsave(&d->lock, flags);
392 d->flags |= DEVFL_UP;
393 d->flags &= ~DEVFL_NEWSIZE;
394 spin_unlock_irqrestore(&d->lock, flags);
395 }
396}
397
308static void 398static void
309ataid_complete(struct aoedev *d, unsigned char *id) 399ataid_complete(struct aoedev *d, unsigned char *id)
310{ 400{
@@ -339,21 +429,29 @@ ataid_complete(struct aoedev *d, unsigned char *id)
339 d->geo.heads = le16_to_cpu(get_unaligned((__le16 *) &id[55<<1])); 429 d->geo.heads = le16_to_cpu(get_unaligned((__le16 *) &id[55<<1]));
340 d->geo.sectors = le16_to_cpu(get_unaligned((__le16 *) &id[56<<1])); 430 d->geo.sectors = le16_to_cpu(get_unaligned((__le16 *) &id[56<<1]));
341 } 431 }
432
433 if (d->ssize != ssize)
434 printk(KERN_INFO "aoe: %012llx e%lu.%lu v%04x has %llu "
435 "sectors\n", (unsigned long long)mac_addr(d->addr),
436 d->aoemajor, d->aoeminor,
437 d->fw_ver, (long long)ssize);
342 d->ssize = ssize; 438 d->ssize = ssize;
343 d->geo.start = 0; 439 d->geo.start = 0;
344 if (d->gd != NULL) { 440 if (d->gd != NULL) {
345 d->gd->capacity = ssize; 441 d->gd->capacity = ssize;
346 d->flags |= DEVFL_UP; 442 d->flags |= DEVFL_NEWSIZE;
347 return; 443 } else {
348 } 444 if (d->flags & DEVFL_GDALLOC) {
349 if (d->flags & DEVFL_WORKON) { 445 printk(KERN_INFO "aoe: %s: %s e%lu.%lu, %s\n",
350 printk(KERN_INFO "aoe: ataid_complete: can't schedule work, it's already on! " 446 __FUNCTION__,
351 "(This really shouldn't happen).\n"); 447 "can't schedule work for",
352 return; 448 d->aoemajor, d->aoeminor,
449 "it's already on! (This really shouldn't happen).\n");
450 return;
451 }
452 d->flags |= DEVFL_GDALLOC;
353 } 453 }
354 INIT_WORK(&d->work, aoeblk_gdalloc, d);
355 schedule_work(&d->work); 454 schedule_work(&d->work);
356 d->flags |= DEVFL_WORKON;
357} 455}
358 456
359static void 457static void
@@ -419,6 +517,8 @@ aoecmd_ata_rsp(struct sk_buff *skb)
419 ahout = (struct aoe_atahdr *) (f->data + sizeof(struct aoe_hdr)); 517 ahout = (struct aoe_atahdr *) (f->data + sizeof(struct aoe_hdr));
420 buf = f->buf; 518 buf = f->buf;
421 519
520 if (ahout->cmdstat == WIN_IDENTIFY)
521 d->flags &= ~DEVFL_PAUSE;
422 if (ahin->cmdstat & 0xa9) { /* these bits cleared on success */ 522 if (ahin->cmdstat & 0xa9) { /* these bits cleared on success */
423 printk(KERN_CRIT "aoe: aoecmd_ata_rsp: ata error cmd=%2.2Xh " 523 printk(KERN_CRIT "aoe: aoecmd_ata_rsp: ata error cmd=%2.2Xh "
424 "stat=%2.2Xh from e%ld.%ld\n", 524 "stat=%2.2Xh from e%ld.%ld\n",
@@ -451,7 +551,6 @@ aoecmd_ata_rsp(struct sk_buff *skb)
451 return; 551 return;
452 } 552 }
453 ataid_complete(d, (char *) (ahin+1)); 553 ataid_complete(d, (char *) (ahin+1));
454 /* d->flags |= DEVFL_WC_UPDATE; */
455 break; 554 break;
456 default: 555 default:
457 printk(KERN_INFO "aoe: aoecmd_ata_rsp: unrecognized " 556 printk(KERN_INFO "aoe: aoecmd_ata_rsp: unrecognized "
@@ -484,51 +583,19 @@ aoecmd_ata_rsp(struct sk_buff *skb)
484 f->tag = FREETAG; 583 f->tag = FREETAG;
485 584
486 aoecmd_work(d); 585 aoecmd_work(d);
487
488 sl = d->sendq_hd; 586 sl = d->sendq_hd;
489 d->sendq_hd = d->sendq_tl = NULL; 587 d->sendq_hd = d->sendq_tl = NULL;
490 588
491 spin_unlock_irqrestore(&d->lock, flags); 589 spin_unlock_irqrestore(&d->lock, flags);
492
493 aoenet_xmit(sl); 590 aoenet_xmit(sl);
494} 591}
495 592
496void 593void
497aoecmd_cfg(ushort aoemajor, unsigned char aoeminor) 594aoecmd_cfg(ushort aoemajor, unsigned char aoeminor)
498{ 595{
499 struct aoe_hdr *h; 596 struct sk_buff *sl;
500 struct aoe_cfghdr *ch;
501 struct sk_buff *skb, *sl;
502 struct net_device *ifp;
503
504 sl = NULL;
505
506 read_lock(&dev_base_lock);
507 for (ifp = dev_base; ifp; dev_put(ifp), ifp = ifp->next) {
508 dev_hold(ifp);
509 if (!is_aoe_netif(ifp))
510 continue;
511
512 skb = new_skb(ifp, sizeof *h + sizeof *ch);
513 if (skb == NULL) {
514 printk(KERN_INFO "aoe: aoecmd_cfg: skb alloc failure\n");
515 continue;
516 }
517 h = (struct aoe_hdr *) skb->mac.raw;
518 memset(h, 0, sizeof *h + sizeof *ch);
519
520 memset(h->dst, 0xff, sizeof h->dst);
521 memcpy(h->src, ifp->dev_addr, sizeof h->src);
522 h->type = __constant_cpu_to_be16(ETH_P_AOE);
523 h->verfl = AOE_HVER;
524 h->major = cpu_to_be16(aoemajor);
525 h->minor = aoeminor;
526 h->cmd = AOECMD_CFG;
527 597
528 skb->next = sl; 598 sl = aoecmd_cfg_pkts(aoemajor, aoeminor, NULL);
529 sl = skb;
530 }
531 read_unlock(&dev_base_lock);
532 599
533 aoenet_xmit(sl); 600 aoenet_xmit(sl);
534} 601}
@@ -561,9 +628,6 @@ aoecmd_ata_id(struct aoedev *d)
561 f->waited = 0; 628 f->waited = 0;
562 f->writedatalen = 0; 629 f->writedatalen = 0;
563 630
564 /* this message initializes the device, so we reset the rttavg */
565 d->rttavg = MAXTIMER;
566
567 /* set up ata header */ 631 /* set up ata header */
568 ah->scnt = 1; 632 ah->scnt = 1;
569 ah->cmdstat = WIN_IDENTIFY; 633 ah->cmdstat = WIN_IDENTIFY;
@@ -571,12 +635,8 @@ aoecmd_ata_id(struct aoedev *d)
571 635
572 skb = skb_prepare(d, f); 636 skb = skb_prepare(d, f);
573 637
574 /* we now want to start the rexmit tracking */ 638 d->rttavg = MAXTIMER;
575 d->flags &= ~DEVFL_TKILL;
576 d->timer.data = (ulong) d;
577 d->timer.function = rexmit_timer; 639 d->timer.function = rexmit_timer;
578 d->timer.expires = jiffies + TIMERTICK;
579 add_timer(&d->timer);
580 640
581 return skb; 641 return skb;
582} 642}
@@ -590,7 +650,7 @@ aoecmd_cfg_rsp(struct sk_buff *skb)
590 ulong flags, sysminor, aoemajor; 650 ulong flags, sysminor, aoemajor;
591 u16 bufcnt; 651 u16 bufcnt;
592 struct sk_buff *sl; 652 struct sk_buff *sl;
593 enum { MAXFRAMES = 8 }; 653 enum { MAXFRAMES = 16 };
594 654
595 h = (struct aoe_hdr *) skb->mac.raw; 655 h = (struct aoe_hdr *) skb->mac.raw;
596 ch = (struct aoe_cfghdr *) (h+1); 656 ch = (struct aoe_cfghdr *) (h+1);
@@ -618,23 +678,28 @@ aoecmd_cfg_rsp(struct sk_buff *skb)
618 if (bufcnt > MAXFRAMES) /* keep it reasonable */ 678 if (bufcnt > MAXFRAMES) /* keep it reasonable */
619 bufcnt = MAXFRAMES; 679 bufcnt = MAXFRAMES;
620 680
621 d = aoedev_set(sysminor, h->src, skb->dev, bufcnt); 681 d = aoedev_by_sysminor_m(sysminor, bufcnt);
622 if (d == NULL) { 682 if (d == NULL) {
623 printk(KERN_INFO "aoe: aoecmd_cfg_rsp: device set failure\n"); 683 printk(KERN_INFO "aoe: aoecmd_cfg_rsp: device sysminor_m failure\n");
624 return; 684 return;
625 } 685 }
626 686
627 spin_lock_irqsave(&d->lock, flags); 687 spin_lock_irqsave(&d->lock, flags);
628 688
629 if (d->flags & (DEVFL_UP | DEVFL_CLOSEWAIT)) { 689 /* permit device to migrate mac and network interface */
690 d->ifp = skb->dev;
691 memcpy(d->addr, h->src, sizeof d->addr);
692
693 /* don't change users' perspective */
694 if (d->nopen && !(d->flags & DEVFL_PAUSE)) {
630 spin_unlock_irqrestore(&d->lock, flags); 695 spin_unlock_irqrestore(&d->lock, flags);
631 return; 696 return;
632 } 697 }
633 698 d->flags |= DEVFL_PAUSE; /* force pause */
634 d->fw_ver = be16_to_cpu(ch->fwver); 699 d->fw_ver = be16_to_cpu(ch->fwver);
635 700
636 /* we get here only if the device is new */ 701 /* check for already outstanding ataid */
637 sl = aoecmd_ata_id(d); 702 sl = aoedev_isbusy(d) == 0 ? aoecmd_ata_id(d) : NULL;
638 703
639 spin_unlock_irqrestore(&d->lock, flags); 704 spin_unlock_irqrestore(&d->lock, flags);
640 705
diff --git a/drivers/block/aoe/aoedev.c b/drivers/block/aoe/aoedev.c
index ded33ba31acc..ed4258a62df5 100644
--- a/drivers/block/aoe/aoedev.c
+++ b/drivers/block/aoe/aoedev.c
@@ -12,6 +12,24 @@
12static struct aoedev *devlist; 12static struct aoedev *devlist;
13static spinlock_t devlist_lock; 13static spinlock_t devlist_lock;
14 14
15int
16aoedev_isbusy(struct aoedev *d)
17{
18 struct frame *f, *e;
19
20 f = d->frames;
21 e = f + d->nframes;
22 do {
23 if (f->tag != FREETAG) {
24 printk(KERN_DEBUG "aoe: %ld.%ld isbusy\n",
25 d->aoemajor, d->aoeminor);
26 return 1;
27 }
28 } while (++f < e);
29
30 return 0;
31}
32
15struct aoedev * 33struct aoedev *
16aoedev_by_aoeaddr(int maj, int min) 34aoedev_by_aoeaddr(int maj, int min)
17{ 35{
@@ -28,6 +46,18 @@ aoedev_by_aoeaddr(int maj, int min)
28 return d; 46 return d;
29} 47}
30 48
49static void
50dummy_timer(ulong vp)
51{
52 struct aoedev *d;
53
54 d = (struct aoedev *)vp;
55 if (d->flags & DEVFL_TKILL)
56 return;
57 d->timer.expires = jiffies + HZ;
58 add_timer(&d->timer);
59}
60
31/* called with devlist lock held */ 61/* called with devlist lock held */
32static struct aoedev * 62static struct aoedev *
33aoedev_newdev(ulong nframes) 63aoedev_newdev(ulong nframes)
@@ -44,6 +74,8 @@ aoedev_newdev(ulong nframes)
44 return NULL; 74 return NULL;
45 } 75 }
46 76
77 INIT_WORK(&d->work, aoecmd_sleepwork, d);
78
47 d->nframes = nframes; 79 d->nframes = nframes;
48 d->frames = f; 80 d->frames = f;
49 e = f + nframes; 81 e = f + nframes;
@@ -52,6 +84,10 @@ aoedev_newdev(ulong nframes)
52 84
53 spin_lock_init(&d->lock); 85 spin_lock_init(&d->lock);
54 init_timer(&d->timer); 86 init_timer(&d->timer);
87 d->timer.data = (ulong) d;
88 d->timer.function = dummy_timer;
89 d->timer.expires = jiffies + HZ;
90 add_timer(&d->timer);
55 d->bufpool = NULL; /* defer to aoeblk_gdalloc */ 91 d->bufpool = NULL; /* defer to aoeblk_gdalloc */
56 INIT_LIST_HEAD(&d->bufq); 92 INIT_LIST_HEAD(&d->bufq);
57 d->next = devlist; 93 d->next = devlist;
@@ -67,9 +103,6 @@ aoedev_downdev(struct aoedev *d)
67 struct buf *buf; 103 struct buf *buf;
68 struct bio *bio; 104 struct bio *bio;
69 105
70 d->flags |= DEVFL_TKILL;
71 del_timer(&d->timer);
72
73 f = d->frames; 106 f = d->frames;
74 e = f + d->nframes; 107 e = f + d->nframes;
75 for (; f<e; f->tag = FREETAG, f->buf = NULL, f++) { 108 for (; f<e; f->tag = FREETAG, f->buf = NULL, f++) {
@@ -92,16 +125,15 @@ aoedev_downdev(struct aoedev *d)
92 bio_endio(bio, bio->bi_size, -EIO); 125 bio_endio(bio, bio->bi_size, -EIO);
93 } 126 }
94 127
95 if (d->nopen)
96 d->flags |= DEVFL_CLOSEWAIT;
97 if (d->gd) 128 if (d->gd)
98 d->gd->capacity = 0; 129 d->gd->capacity = 0;
99 130
100 d->flags &= ~DEVFL_UP; 131 d->flags &= ~(DEVFL_UP | DEVFL_PAUSE);
101} 132}
102 133
134/* find it or malloc it */
103struct aoedev * 135struct aoedev *
104aoedev_set(ulong sysminor, unsigned char *addr, struct net_device *ifp, ulong bufcnt) 136aoedev_by_sysminor_m(ulong sysminor, ulong bufcnt)
105{ 137{
106 struct aoedev *d; 138 struct aoedev *d;
107 ulong flags; 139 ulong flags;
@@ -112,25 +144,19 @@ aoedev_set(ulong sysminor, unsigned char *addr, struct net_device *ifp, ulong bu
112 if (d->sysminor == sysminor) 144 if (d->sysminor == sysminor)
113 break; 145 break;
114 146
115 if (d == NULL && (d = aoedev_newdev(bufcnt)) == NULL) { 147 if (d == NULL) {
116 spin_unlock_irqrestore(&devlist_lock, flags); 148 d = aoedev_newdev(bufcnt);
117 printk(KERN_INFO "aoe: aoedev_set: aoedev_newdev failure.\n"); 149 if (d == NULL) {
118 return NULL; 150 spin_unlock_irqrestore(&devlist_lock, flags);
119 } /* if newdev, (d->flags & DEVFL_UP) == 0 for below */ 151 printk(KERN_INFO "aoe: aoedev_set: aoedev_newdev failure.\n");
120 152 return NULL;
121 spin_unlock_irqrestore(&devlist_lock, flags); 153 }
122 spin_lock_irqsave(&d->lock, flags);
123
124 d->ifp = ifp;
125 memcpy(d->addr, addr, sizeof d->addr);
126 if ((d->flags & DEVFL_UP) == 0) {
127 aoedev_downdev(d); /* flushes outstanding frames */
128 d->sysminor = sysminor; 154 d->sysminor = sysminor;
129 d->aoemajor = AOEMAJOR(sysminor); 155 d->aoemajor = AOEMAJOR(sysminor);
130 d->aoeminor = AOEMINOR(sysminor); 156 d->aoeminor = AOEMINOR(sysminor);
131 } 157 }
132 158
133 spin_unlock_irqrestore(&d->lock, flags); 159 spin_unlock_irqrestore(&devlist_lock, flags);
134 return d; 160 return d;
135} 161}
136 162
@@ -161,6 +187,7 @@ aoedev_exit(void)
161 187
162 spin_lock_irqsave(&d->lock, flags); 188 spin_lock_irqsave(&d->lock, flags);
163 aoedev_downdev(d); 189 aoedev_downdev(d);
190 d->flags |= DEVFL_TKILL;
164 spin_unlock_irqrestore(&d->lock, flags); 191 spin_unlock_irqrestore(&d->lock, flags);
165 192
166 del_timer_sync(&d->timer); 193 del_timer_sync(&d->timer);
diff --git a/drivers/block/aoe/aoemain.c b/drivers/block/aoe/aoemain.c
index 387588a3f4ba..de08491ebe66 100644
--- a/drivers/block/aoe/aoemain.c
+++ b/drivers/block/aoe/aoemain.c
@@ -11,7 +11,7 @@
11 11
12MODULE_LICENSE("GPL"); 12MODULE_LICENSE("GPL");
13MODULE_AUTHOR("Sam Hopkins <sah@coraid.com>"); 13MODULE_AUTHOR("Sam Hopkins <sah@coraid.com>");
14MODULE_DESCRIPTION("AoE block/char driver for 2.6.[0-9]+"); 14MODULE_DESCRIPTION("AoE block/char driver for 2.6.2 and newer 2.6 kernels");
15MODULE_VERSION(VERSION); 15MODULE_VERSION(VERSION);
16 16
17enum { TINIT, TRUN, TKILL }; 17enum { TINIT, TRUN, TKILL };
@@ -89,7 +89,7 @@ aoe_init(void)
89 } 89 }
90 90
91 printk(KERN_INFO 91 printk(KERN_INFO
92 "aoe: aoe_init: AoE v2.6-%s initialised.\n", 92 "aoe: aoe_init: AoE v%s initialised.\n",
93 VERSION); 93 VERSION);
94 discover_timer(TINIT); 94 discover_timer(TINIT);
95 return 0; 95 return 0;
diff --git a/drivers/block/aoe/aoenet.c b/drivers/block/aoe/aoenet.c
index 4be976940f69..fdff774b8ab9 100644
--- a/drivers/block/aoe/aoenet.c
+++ b/drivers/block/aoe/aoenet.c
@@ -92,18 +92,6 @@ mac_addr(char addr[6])
92 return __be64_to_cpu(n); 92 return __be64_to_cpu(n);
93} 93}
94 94
95static struct sk_buff *
96skb_check(struct sk_buff *skb)
97{
98 if (skb_is_nonlinear(skb))
99 if ((skb = skb_share_check(skb, GFP_ATOMIC)))
100 if (skb_linearize(skb, GFP_ATOMIC) < 0) {
101 dev_kfree_skb(skb);
102 return NULL;
103 }
104 return skb;
105}
106
107void 95void
108aoenet_xmit(struct sk_buff *sl) 96aoenet_xmit(struct sk_buff *sl)
109{ 97{
@@ -125,14 +113,14 @@ aoenet_rcv(struct sk_buff *skb, struct net_device *ifp, struct packet_type *pt,
125 struct aoe_hdr *h; 113 struct aoe_hdr *h;
126 u32 n; 114 u32 n;
127 115
128 skb = skb_check(skb); 116 skb = skb_share_check(skb, GFP_ATOMIC);
129 if (!skb) 117 if (skb == NULL)
130 return 0; 118 return 0;
131 119 if (skb_is_nonlinear(skb))
120 if (skb_linearize(skb, GFP_ATOMIC) < 0)
121 goto exit;
132 if (!is_aoe_netif(ifp)) 122 if (!is_aoe_netif(ifp))
133 goto exit; 123 goto exit;
134
135 //skb->len += ETH_HLEN; /* (1) */
136 skb_push(skb, ETH_HLEN); /* (1) */ 124 skb_push(skb, ETH_HLEN); /* (1) */
137 125
138 h = (struct aoe_hdr *) skb->mac.raw; 126 h = (struct aoe_hdr *) skb->mac.raw;