aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorGreg Kroah-Hartman <gregkh@suse.de>2006-01-06 15:59:59 -0500
committerGreg Kroah-Hartman <gregkh@suse.de>2006-01-06 15:59:59 -0500
commitccf18968b1bbc2fb117190a1984ac2a826dac228 (patch)
tree7bc8fbf5722aecf1e84fa50c31c657864cba1daa /drivers
parente91c021c487110386a07facd0396e6c3b7cf9c1f (diff)
parentd99cf9d679a520d67f81d805b7cb91c68e1847f0 (diff)
Merge ../torvalds-2.6/
Diffstat (limited to 'drivers')
-rw-r--r--drivers/base/memory.c7
-rw-r--r--drivers/block/DAC960.c2
-rw-r--r--drivers/block/Kconfig2
-rw-r--r--drivers/block/cciss.c2
-rw-r--r--drivers/block/cpqarray.c2
-rw-r--r--drivers/block/floppy.c2
-rw-r--r--drivers/block/loop.c23
-rw-r--r--drivers/block/nbd.c124
-rw-r--r--drivers/block/paride/Kconfig5
-rw-r--r--drivers/block/rd.c4
-rw-r--r--drivers/block/sx8.c2
-rw-r--r--drivers/block/ub.c2
-rw-r--r--drivers/block/viodasd.c2
-rw-r--r--drivers/cdrom/cdu31a.c2
-rw-r--r--drivers/char/Kconfig2
-rw-r--r--drivers/char/hangcheck-timer.c2
-rw-r--r--drivers/char/hw_random.c70
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c4
-rw-r--r--drivers/char/watchdog/Kconfig2
-rw-r--r--drivers/ide/ide-cd.c4
-rw-r--r--drivers/ide/ide-disk.c137
-rw-r--r--drivers/ide/ide-io.c11
-rw-r--r--drivers/ieee1394/Kconfig23
-rw-r--r--drivers/ieee1394/Makefile2
-rw-r--r--drivers/ieee1394/csr1212.c21
-rw-r--r--drivers/ieee1394/csr1212.h2
-rw-r--r--drivers/ieee1394/dma.c73
-rw-r--r--drivers/ieee1394/dv1394.c13
-rw-r--r--drivers/ieee1394/eth1394.c20
-rw-r--r--drivers/ieee1394/highlevel.c18
-rw-r--r--drivers/ieee1394/hosts.c30
-rw-r--r--drivers/ieee1394/hosts.h162
-rw-r--r--drivers/ieee1394/ieee1394-ioctl.h8
-rw-r--r--drivers/ieee1394/ieee1394.h19
-rw-r--r--drivers/ieee1394/ieee1394_core.c827
-rw-r--r--drivers/ieee1394/ieee1394_core.h100
-rw-r--r--drivers/ieee1394/ieee1394_transactions.c389
-rw-r--r--drivers/ieee1394/iso.c102
-rw-r--r--drivers/ieee1394/nodemgr.c50
-rw-r--r--drivers/ieee1394/nodemgr.h18
-rw-r--r--drivers/ieee1394/ohci1394.c43
-rw-r--r--drivers/ieee1394/ohci1394.h4
-rw-r--r--drivers/ieee1394/pcilynx.c2
-rw-r--r--drivers/ieee1394/raw1394.c79
-rw-r--r--drivers/ieee1394/sbp2.c1040
-rw-r--r--drivers/ieee1394/sbp2.h70
-rw-r--r--drivers/ieee1394/video1394.c106
-rw-r--r--drivers/input/evdev.c2
-rw-r--r--drivers/macintosh/therm_adt746x.c39
-rw-r--r--drivers/macintosh/therm_pm72.c7
-rw-r--r--drivers/macintosh/windfarm_lm75_sensor.c7
-rw-r--r--drivers/md/bitmap.c114
-rw-r--r--drivers/md/dm-crypt.c5
-rw-r--r--drivers/md/dm-io.h3
-rw-r--r--drivers/md/dm-ioctl.c21
-rw-r--r--drivers/md/dm-log.c2
-rw-r--r--drivers/md/dm-raid1.c13
-rw-r--r--drivers/md/dm-snap.c25
-rw-r--r--drivers/md/dm.c95
-rw-r--r--drivers/md/dm.h5
-rw-r--r--drivers/md/faulty.c9
-rw-r--r--drivers/md/kcopyd.c3
-rw-r--r--drivers/md/linear.c14
-rw-r--r--drivers/md/md.c893
-rw-r--r--drivers/md/multipath.c22
-rw-r--r--drivers/md/raid0.c26
-rw-r--r--drivers/md/raid1.c726
-rw-r--r--drivers/md/raid10.c544
-rw-r--r--drivers/md/raid5.c174
-rw-r--r--drivers/md/raid6main.c348
-rw-r--r--drivers/media/video/cpia_pp.c30
-rw-r--r--drivers/message/i2o/Kconfig12
-rw-r--r--drivers/message/i2o/bus-osm.c23
-rw-r--r--drivers/message/i2o/config-osm.c2
-rw-r--r--drivers/message/i2o/core.h20
-rw-r--r--drivers/message/i2o/device.c339
-rw-r--r--drivers/message/i2o/driver.c12
-rw-r--r--drivers/message/i2o/exec-osm.c114
-rw-r--r--drivers/message/i2o/i2o_block.c190
-rw-r--r--drivers/message/i2o/i2o_config.c196
-rw-r--r--drivers/message/i2o/i2o_lan.h38
-rw-r--r--drivers/message/i2o/i2o_proc.c2
-rw-r--r--drivers/message/i2o/i2o_scsi.c89
-rw-r--r--drivers/message/i2o/iop.c356
-rw-r--r--drivers/message/i2o/pci.c7
-rw-r--r--drivers/mmc/mmc_block.c4
-rw-r--r--drivers/net/phy/Kconfig2
-rw-r--r--drivers/net/plip.c2
-rw-r--r--drivers/parport/Kconfig2
-rw-r--r--drivers/parport/daisy.c51
-rw-r--r--drivers/parport/ieee1284_ops.c62
-rw-r--r--drivers/parport/parport_pc.c30
-rw-r--r--drivers/parport/probe.c199
-rw-r--r--drivers/parport/share.c1
-rw-r--r--drivers/pnp/pnpbios/bioscalls.c45
-rw-r--r--drivers/s390/Makefile2
-rw-r--r--drivers/s390/block/Kconfig8
-rw-r--r--drivers/s390/block/dasd.c34
-rw-r--r--drivers/s390/block/dasd_diag.c11
-rw-r--r--drivers/s390/block/dasd_diag.h31
-rw-r--r--drivers/s390/block/dasd_eckd.c9
-rw-r--r--drivers/s390/block/dasd_fba.c6
-rw-r--r--drivers/s390/block/dasd_int.h3
-rw-r--r--drivers/s390/block/dasd_ioctl.c5
-rw-r--r--drivers/s390/block/dcssblk.c2
-rw-r--r--drivers/s390/block/xpram.c4
-rw-r--r--drivers/s390/char/sclp_cpi.c2
-rw-r--r--drivers/s390/char/sclp_quiesce.c2
-rw-r--r--drivers/s390/char/tape_block.c4
-rw-r--r--drivers/s390/char/vmwatchdog.c2
-rw-r--r--drivers/s390/cio/blacklist.c234
-rw-r--r--drivers/s390/cio/blacklist.h2
-rw-r--r--drivers/s390/cio/ccwgroup.c6
-rw-r--r--drivers/s390/cio/chsc.c473
-rw-r--r--drivers/s390/cio/chsc.h13
-rw-r--r--drivers/s390/cio/cio.c168
-rw-r--r--drivers/s390/cio/cio.h11
-rw-r--r--drivers/s390/cio/cmf.c8
-rw-r--r--drivers/s390/cio/css.c297
-rw-r--r--drivers/s390/cio/css.h43
-rw-r--r--drivers/s390/cio/device.c47
-rw-r--r--drivers/s390/cio/device.h1
-rw-r--r--drivers/s390/cio/device_fsm.c29
-rw-r--r--drivers/s390/cio/device_id.c26
-rw-r--r--drivers/s390/cio/device_ops.c4
-rw-r--r--drivers/s390/cio/device_pgid.c56
-rw-r--r--drivers/s390/cio/device_status.c14
-rw-r--r--drivers/s390/cio/ioasm.h86
-rw-r--r--drivers/s390/cio/qdio.c713
-rw-r--r--drivers/s390/cio/qdio.h144
-rw-r--r--drivers/s390/cio/schid.h26
-rw-r--r--drivers/s390/crypto/z90common.h9
-rw-r--r--drivers/s390/crypto/z90crypt.h13
-rw-r--r--drivers/s390/crypto/z90hardware.c309
-rw-r--r--drivers/s390/crypto/z90main.c111
-rw-r--r--drivers/s390/net/Kconfig2
-rw-r--r--drivers/s390/net/claw.c6
-rw-r--r--drivers/s390/net/cu3088.c3
-rw-r--r--drivers/s390/net/iucv.c10
-rw-r--r--drivers/s390/net/qeth_main.c21
-rw-r--r--drivers/s390/s390_rdev.c53
-rw-r--r--drivers/s390/s390mach.c66
-rw-r--r--drivers/s390/sysinfo.c2
-rw-r--r--drivers/scsi/Kconfig10
-rw-r--r--drivers/scsi/ahci.c1
-rw-r--r--drivers/scsi/ata_piix.c5
-rw-r--r--drivers/scsi/hosts.c9
-rw-r--r--drivers/scsi/ide-scsi.c4
-rw-r--r--drivers/scsi/libata-core.c145
-rw-r--r--drivers/scsi/libata-scsi.c48
-rw-r--r--drivers/scsi/libata.h4
-rw-r--r--drivers/scsi/sata_mv.c1
-rw-r--r--drivers/scsi/sata_nv.c1
-rw-r--r--drivers/scsi/sata_promise.c1
-rw-r--r--drivers/scsi/sata_sil.c1
-rw-r--r--drivers/scsi/sata_sil24.c1
-rw-r--r--drivers/scsi/sata_sis.c1
-rw-r--r--drivers/scsi/sata_svw.c1
-rw-r--r--drivers/scsi/sata_sx4.c1
-rw-r--r--drivers/scsi/sata_uli.c1
-rw-r--r--drivers/scsi/sata_via.c1
-rw-r--r--drivers/scsi/sata_vsc.c1
-rw-r--r--drivers/scsi/scsi_lib.c50
-rw-r--r--drivers/scsi/scsi_sysfs.c31
-rw-r--r--drivers/scsi/sd.c85
-rw-r--r--drivers/serial/mpc52xx_uart.c28
166 files changed, 7169 insertions, 4952 deletions
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 7e1d077874df..58801d718cc2 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -49,12 +49,12 @@ static struct kset_uevent_ops memory_uevent_ops = {
49 49
50static struct notifier_block *memory_chain; 50static struct notifier_block *memory_chain;
51 51
52static int register_memory_notifier(struct notifier_block *nb) 52int register_memory_notifier(struct notifier_block *nb)
53{ 53{
54 return notifier_chain_register(&memory_chain, nb); 54 return notifier_chain_register(&memory_chain, nb);
55} 55}
56 56
57static void unregister_memory_notifier(struct notifier_block *nb) 57void unregister_memory_notifier(struct notifier_block *nb)
58{ 58{
59 notifier_chain_unregister(&memory_chain, nb); 59 notifier_chain_unregister(&memory_chain, nb);
60} 60}
@@ -62,8 +62,7 @@ static void unregister_memory_notifier(struct notifier_block *nb)
62/* 62/*
63 * register_memory - Setup a sysfs device for a memory block 63 * register_memory - Setup a sysfs device for a memory block
64 */ 64 */
65static int 65int register_memory(struct memory_block *memory, struct mem_section *section,
66register_memory(struct memory_block *memory, struct mem_section *section,
67 struct node *root) 66 struct node *root)
68{ 67{
69 int error; 68 int error;
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
index 70eaa5c7ac08..21097a39a057 100644
--- a/drivers/block/DAC960.c
+++ b/drivers/block/DAC960.c
@@ -3471,7 +3471,7 @@ static inline boolean DAC960_ProcessCompletedRequest(DAC960_Command_T *Command,
3471 3471
3472 if (!end_that_request_first(Request, UpToDate, Command->BlockCount)) { 3472 if (!end_that_request_first(Request, UpToDate, Command->BlockCount)) {
3473 3473
3474 end_that_request_last(Request); 3474 end_that_request_last(Request, UpToDate);
3475 3475
3476 if (Command->Completion) { 3476 if (Command->Completion) {
3477 complete(Command->Completion); 3477 complete(Command->Completion);
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index c4b9d2adfc08..139cbba76180 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -117,7 +117,7 @@ config BLK_DEV_XD
117 117
118config PARIDE 118config PARIDE
119 tristate "Parallel port IDE device support" 119 tristate "Parallel port IDE device support"
120 depends on PARPORT 120 depends on PARPORT_PC
121 ---help--- 121 ---help---
122 There are many external CD-ROM and disk devices that connect through 122 There are many external CD-ROM and disk devices that connect through
123 your computer's parallel port. Most of them are actually IDE devices 123 your computer's parallel port. Most of them are actually IDE devices
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index c3441b3f086e..d2815b7a9150 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -2310,7 +2310,7 @@ static inline void complete_command( ctlr_info_t *h, CommandList_struct *cmd,
2310 printk("Done with %p\n", cmd->rq); 2310 printk("Done with %p\n", cmd->rq);
2311#endif /* CCISS_DEBUG */ 2311#endif /* CCISS_DEBUG */
2312 2312
2313 end_that_request_last(cmd->rq); 2313 end_that_request_last(cmd->rq, status ? 1 : -EIO);
2314 cmd_free(h,cmd,1); 2314 cmd_free(h,cmd,1);
2315} 2315}
2316 2316
diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
index cf1822a6361c..9bddb6874873 100644
--- a/drivers/block/cpqarray.c
+++ b/drivers/block/cpqarray.c
@@ -1036,7 +1036,7 @@ static inline void complete_command(cmdlist_t *cmd, int timeout)
1036 complete_buffers(cmd->rq->bio, ok); 1036 complete_buffers(cmd->rq->bio, ok);
1037 1037
1038 DBGPX(printk("Done with %p\n", cmd->rq);); 1038 DBGPX(printk("Done with %p\n", cmd->rq););
1039 end_that_request_last(cmd->rq); 1039 end_that_request_last(cmd->rq, ok ? 1 : -EIO);
1040} 1040}
1041 1041
1042/* 1042/*
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index f7e765a1d313..a5b857c5c4b8 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -2301,7 +2301,7 @@ static void floppy_end_request(struct request *req, int uptodate)
2301 add_disk_randomness(req->rq_disk); 2301 add_disk_randomness(req->rq_disk);
2302 floppy_off((long)req->rq_disk->private_data); 2302 floppy_off((long)req->rq_disk->private_data);
2303 blkdev_dequeue_request(req); 2303 blkdev_dequeue_request(req);
2304 end_that_request_last(req); 2304 end_that_request_last(req, uptodate);
2305 2305
2306 /* We're done with the request */ 2306 /* We're done with the request */
2307 current_req = NULL; 2307 current_req = NULL;
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 96c664af8d06..a452b13620a2 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -213,7 +213,7 @@ static int do_lo_send_aops(struct loop_device *lo, struct bio_vec *bvec,
213 struct address_space_operations *aops = mapping->a_ops; 213 struct address_space_operations *aops = mapping->a_ops;
214 pgoff_t index; 214 pgoff_t index;
215 unsigned offset, bv_offs; 215 unsigned offset, bv_offs;
216 int len, ret = 0; 216 int len, ret;
217 217
218 down(&mapping->host->i_sem); 218 down(&mapping->host->i_sem);
219 index = pos >> PAGE_CACHE_SHIFT; 219 index = pos >> PAGE_CACHE_SHIFT;
@@ -232,9 +232,15 @@ static int do_lo_send_aops(struct loop_device *lo, struct bio_vec *bvec,
232 page = grab_cache_page(mapping, index); 232 page = grab_cache_page(mapping, index);
233 if (unlikely(!page)) 233 if (unlikely(!page))
234 goto fail; 234 goto fail;
235 if (unlikely(aops->prepare_write(file, page, offset, 235 ret = aops->prepare_write(file, page, offset,
236 offset + size))) 236 offset + size);
237 if (unlikely(ret)) {
238 if (ret == AOP_TRUNCATED_PAGE) {
239 page_cache_release(page);
240 continue;
241 }
237 goto unlock; 242 goto unlock;
243 }
238 transfer_result = lo_do_transfer(lo, WRITE, page, offset, 244 transfer_result = lo_do_transfer(lo, WRITE, page, offset,
239 bvec->bv_page, bv_offs, size, IV); 245 bvec->bv_page, bv_offs, size, IV);
240 if (unlikely(transfer_result)) { 246 if (unlikely(transfer_result)) {
@@ -251,9 +257,15 @@ static int do_lo_send_aops(struct loop_device *lo, struct bio_vec *bvec,
251 kunmap_atomic(kaddr, KM_USER0); 257 kunmap_atomic(kaddr, KM_USER0);
252 } 258 }
253 flush_dcache_page(page); 259 flush_dcache_page(page);
254 if (unlikely(aops->commit_write(file, page, offset, 260 ret = aops->commit_write(file, page, offset,
255 offset + size))) 261 offset + size);
262 if (unlikely(ret)) {
263 if (ret == AOP_TRUNCATED_PAGE) {
264 page_cache_release(page);
265 continue;
266 }
256 goto unlock; 267 goto unlock;
268 }
257 if (unlikely(transfer_result)) 269 if (unlikely(transfer_result))
258 goto unlock; 270 goto unlock;
259 bv_offs += size; 271 bv_offs += size;
@@ -264,6 +276,7 @@ static int do_lo_send_aops(struct loop_device *lo, struct bio_vec *bvec,
264 unlock_page(page); 276 unlock_page(page);
265 page_cache_release(page); 277 page_cache_release(page);
266 } 278 }
279 ret = 0;
267out: 280out:
268 up(&mapping->host->i_sem); 281 up(&mapping->host->i_sem);
269 return ret; 282 return ret;
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 9e268ddedfbd..33d6f237b2ed 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -54,11 +54,15 @@
54#include <linux/errno.h> 54#include <linux/errno.h>
55#include <linux/file.h> 55#include <linux/file.h>
56#include <linux/ioctl.h> 56#include <linux/ioctl.h>
57#include <linux/compiler.h>
58#include <linux/err.h>
59#include <linux/kernel.h>
57#include <net/sock.h> 60#include <net/sock.h>
58 61
59#include <linux/devfs_fs_kernel.h> 62#include <linux/devfs_fs_kernel.h>
60 63
61#include <asm/uaccess.h> 64#include <asm/uaccess.h>
65#include <asm/system.h>
62#include <asm/types.h> 66#include <asm/types.h>
63 67
64#include <linux/nbd.h> 68#include <linux/nbd.h>
@@ -136,7 +140,7 @@ static void nbd_end_request(struct request *req)
136 140
137 spin_lock_irqsave(q->queue_lock, flags); 141 spin_lock_irqsave(q->queue_lock, flags);
138 if (!end_that_request_first(req, uptodate, req->nr_sectors)) { 142 if (!end_that_request_first(req, uptodate, req->nr_sectors)) {
139 end_that_request_last(req); 143 end_that_request_last(req, uptodate);
140 } 144 }
141 spin_unlock_irqrestore(q->queue_lock, flags); 145 spin_unlock_irqrestore(q->queue_lock, flags);
142} 146}
@@ -230,14 +234,6 @@ static int nbd_send_req(struct nbd_device *lo, struct request *req)
230 request.len = htonl(size); 234 request.len = htonl(size);
231 memcpy(request.handle, &req, sizeof(req)); 235 memcpy(request.handle, &req, sizeof(req));
232 236
233 down(&lo->tx_lock);
234
235 if (!sock || !lo->sock) {
236 printk(KERN_ERR "%s: Attempted send on closed socket\n",
237 lo->disk->disk_name);
238 goto error_out;
239 }
240
241 dprintk(DBG_TX, "%s: request %p: sending control (%s@%llu,%luB)\n", 237 dprintk(DBG_TX, "%s: request %p: sending control (%s@%llu,%luB)\n",
242 lo->disk->disk_name, req, 238 lo->disk->disk_name, req,
243 nbdcmd_to_ascii(nbd_cmd(req)), 239 nbdcmd_to_ascii(nbd_cmd(req)),
@@ -276,11 +272,9 @@ static int nbd_send_req(struct nbd_device *lo, struct request *req)
276 } 272 }
277 } 273 }
278 } 274 }
279 up(&lo->tx_lock);
280 return 0; 275 return 0;
281 276
282error_out: 277error_out:
283 up(&lo->tx_lock);
284 return 1; 278 return 1;
285} 279}
286 280
@@ -289,9 +283,14 @@ static struct request *nbd_find_request(struct nbd_device *lo, char *handle)
289 struct request *req; 283 struct request *req;
290 struct list_head *tmp; 284 struct list_head *tmp;
291 struct request *xreq; 285 struct request *xreq;
286 int err;
292 287
293 memcpy(&xreq, handle, sizeof(xreq)); 288 memcpy(&xreq, handle, sizeof(xreq));
294 289
290 err = wait_event_interruptible(lo->active_wq, lo->active_req != xreq);
291 if (unlikely(err))
292 goto out;
293
295 spin_lock(&lo->queue_lock); 294 spin_lock(&lo->queue_lock);
296 list_for_each(tmp, &lo->queue_head) { 295 list_for_each(tmp, &lo->queue_head) {
297 req = list_entry(tmp, struct request, queuelist); 296 req = list_entry(tmp, struct request, queuelist);
@@ -302,7 +301,11 @@ static struct request *nbd_find_request(struct nbd_device *lo, char *handle)
302 return req; 301 return req;
303 } 302 }
304 spin_unlock(&lo->queue_lock); 303 spin_unlock(&lo->queue_lock);
305 return NULL; 304
305 err = -ENOENT;
306
307out:
308 return ERR_PTR(err);
306} 309}
307 310
308static inline int sock_recv_bvec(struct socket *sock, struct bio_vec *bvec) 311static inline int sock_recv_bvec(struct socket *sock, struct bio_vec *bvec)
@@ -331,7 +334,11 @@ static struct request *nbd_read_stat(struct nbd_device *lo)
331 goto harderror; 334 goto harderror;
332 } 335 }
333 req = nbd_find_request(lo, reply.handle); 336 req = nbd_find_request(lo, reply.handle);
334 if (req == NULL) { 337 if (unlikely(IS_ERR(req))) {
338 result = PTR_ERR(req);
339 if (result != -ENOENT)
340 goto harderror;
341
335 printk(KERN_ERR "%s: Unexpected reply (%p)\n", 342 printk(KERN_ERR "%s: Unexpected reply (%p)\n",
336 lo->disk->disk_name, reply.handle); 343 lo->disk->disk_name, reply.handle);
337 result = -EBADR; 344 result = -EBADR;
@@ -395,19 +402,24 @@ static void nbd_clear_que(struct nbd_device *lo)
395 402
396 BUG_ON(lo->magic != LO_MAGIC); 403 BUG_ON(lo->magic != LO_MAGIC);
397 404
398 do { 405 /*
399 req = NULL; 406 * Because we have set lo->sock to NULL under the tx_lock, all
400 spin_lock(&lo->queue_lock); 407 * modifications to the list must have completed by now. For
401 if (!list_empty(&lo->queue_head)) { 408 * the same reason, the active_req must be NULL.
402 req = list_entry(lo->queue_head.next, struct request, queuelist); 409 *
403 list_del_init(&req->queuelist); 410 * As a consequence, we don't need to take the spin lock while
404 } 411 * purging the list here.
405 spin_unlock(&lo->queue_lock); 412 */
406 if (req) { 413 BUG_ON(lo->sock);
407 req->errors++; 414 BUG_ON(lo->active_req);
408 nbd_end_request(req); 415
409 } 416 while (!list_empty(&lo->queue_head)) {
410 } while (req); 417 req = list_entry(lo->queue_head.next, struct request,
418 queuelist);
419 list_del_init(&req->queuelist);
420 req->errors++;
421 nbd_end_request(req);
422 }
411} 423}
412 424
413/* 425/*
@@ -435,11 +447,6 @@ static void do_nbd_request(request_queue_t * q)
435 447
436 BUG_ON(lo->magic != LO_MAGIC); 448 BUG_ON(lo->magic != LO_MAGIC);
437 449
438 if (!lo->file) {
439 printk(KERN_ERR "%s: Request when not-ready\n",
440 lo->disk->disk_name);
441 goto error_out;
442 }
443 nbd_cmd(req) = NBD_CMD_READ; 450 nbd_cmd(req) = NBD_CMD_READ;
444 if (rq_data_dir(req) == WRITE) { 451 if (rq_data_dir(req) == WRITE) {
445 nbd_cmd(req) = NBD_CMD_WRITE; 452 nbd_cmd(req) = NBD_CMD_WRITE;
@@ -453,32 +460,34 @@ static void do_nbd_request(request_queue_t * q)
453 req->errors = 0; 460 req->errors = 0;
454 spin_unlock_irq(q->queue_lock); 461 spin_unlock_irq(q->queue_lock);
455 462
456 spin_lock(&lo->queue_lock); 463 down(&lo->tx_lock);
457 464 if (unlikely(!lo->sock)) {
458 if (!lo->file) { 465 up(&lo->tx_lock);
459 spin_unlock(&lo->queue_lock); 466 printk(KERN_ERR "%s: Attempted send on closed socket\n",
460 printk(KERN_ERR "%s: failed between accept and semaphore, file lost\n", 467 lo->disk->disk_name);
461 lo->disk->disk_name);
462 req->errors++; 468 req->errors++;
463 nbd_end_request(req); 469 nbd_end_request(req);
464 spin_lock_irq(q->queue_lock); 470 spin_lock_irq(q->queue_lock);
465 continue; 471 continue;
466 } 472 }
467 473
468 list_add(&req->queuelist, &lo->queue_head); 474 lo->active_req = req;
469 spin_unlock(&lo->queue_lock);
470 475
471 if (nbd_send_req(lo, req) != 0) { 476 if (nbd_send_req(lo, req) != 0) {
472 printk(KERN_ERR "%s: Request send failed\n", 477 printk(KERN_ERR "%s: Request send failed\n",
473 lo->disk->disk_name); 478 lo->disk->disk_name);
474 if (nbd_find_request(lo, (char *)&req) != NULL) { 479 req->errors++;
475 /* we still own req */ 480 nbd_end_request(req);
476 req->errors++; 481 } else {
477 nbd_end_request(req); 482 spin_lock(&lo->queue_lock);
478 } else /* we're racing with nbd_clear_que */ 483 list_add(&req->queuelist, &lo->queue_head);
479 printk(KERN_DEBUG "nbd: can't find req\n"); 484 spin_unlock(&lo->queue_lock);
480 } 485 }
481 486
487 lo->active_req = NULL;
488 up(&lo->tx_lock);
489 wake_up_all(&lo->active_wq);
490
482 spin_lock_irq(q->queue_lock); 491 spin_lock_irq(q->queue_lock);
483 continue; 492 continue;
484 493
@@ -529,17 +538,10 @@ static int nbd_ioctl(struct inode *inode, struct file *file,
529 down(&lo->tx_lock); 538 down(&lo->tx_lock);
530 lo->sock = NULL; 539 lo->sock = NULL;
531 up(&lo->tx_lock); 540 up(&lo->tx_lock);
532 spin_lock(&lo->queue_lock);
533 file = lo->file; 541 file = lo->file;
534 lo->file = NULL; 542 lo->file = NULL;
535 spin_unlock(&lo->queue_lock);
536 nbd_clear_que(lo); 543 nbd_clear_que(lo);
537 spin_lock(&lo->queue_lock); 544 BUG_ON(!list_empty(&lo->queue_head));
538 if (!list_empty(&lo->queue_head)) {
539 printk(KERN_ERR "nbd: disconnect: some requests are in progress -> please try again.\n");
540 error = -EBUSY;
541 }
542 spin_unlock(&lo->queue_lock);
543 if (file) 545 if (file)
544 fput(file); 546 fput(file);
545 return error; 547 return error;
@@ -598,24 +600,19 @@ static int nbd_ioctl(struct inode *inode, struct file *file,
598 lo->sock = NULL; 600 lo->sock = NULL;
599 } 601 }
600 up(&lo->tx_lock); 602 up(&lo->tx_lock);
601 spin_lock(&lo->queue_lock);
602 file = lo->file; 603 file = lo->file;
603 lo->file = NULL; 604 lo->file = NULL;
604 spin_unlock(&lo->queue_lock);
605 nbd_clear_que(lo); 605 nbd_clear_que(lo);
606 printk(KERN_WARNING "%s: queue cleared\n", lo->disk->disk_name); 606 printk(KERN_WARNING "%s: queue cleared\n", lo->disk->disk_name);
607 if (file) 607 if (file)
608 fput(file); 608 fput(file);
609 return lo->harderror; 609 return lo->harderror;
610 case NBD_CLEAR_QUE: 610 case NBD_CLEAR_QUE:
611 down(&lo->tx_lock); 611 /*
612 if (lo->sock) { 612 * This is for compatibility only. The queue is always cleared
613 up(&lo->tx_lock); 613 * by NBD_DO_IT or NBD_CLEAR_SOCK.
614 return 0; /* probably should be error, but that would 614 */
615 * break "nbd-client -d", so just return 0 */ 615 BUG_ON(!lo->sock && !list_empty(&lo->queue_head));
616 }
617 up(&lo->tx_lock);
618 nbd_clear_que(lo);
619 return 0; 616 return 0;
620 case NBD_PRINT_DEBUG: 617 case NBD_PRINT_DEBUG:
621 printk(KERN_INFO "%s: next = %p, prev = %p, head = %p\n", 618 printk(KERN_INFO "%s: next = %p, prev = %p, head = %p\n",
@@ -688,6 +685,7 @@ static int __init nbd_init(void)
688 spin_lock_init(&nbd_dev[i].queue_lock); 685 spin_lock_init(&nbd_dev[i].queue_lock);
689 INIT_LIST_HEAD(&nbd_dev[i].queue_head); 686 INIT_LIST_HEAD(&nbd_dev[i].queue_head);
690 init_MUTEX(&nbd_dev[i].tx_lock); 687 init_MUTEX(&nbd_dev[i].tx_lock);
688 init_waitqueue_head(&nbd_dev[i].active_wq);
691 nbd_dev[i].blksize = 1024; 689 nbd_dev[i].blksize = 1024;
692 nbd_dev[i].bytesize = 0x7ffffc00ULL << 10; /* 2TB */ 690 nbd_dev[i].bytesize = 0x7ffffc00ULL << 10; /* 2TB */
693 disk->major = NBD_MAJOR; 691 disk->major = NBD_MAJOR;
diff --git a/drivers/block/paride/Kconfig b/drivers/block/paride/Kconfig
index 17ff40561257..c0d2854dd097 100644
--- a/drivers/block/paride/Kconfig
+++ b/drivers/block/paride/Kconfig
@@ -4,11 +4,12 @@
4# PARIDE doesn't need PARPORT, but if PARPORT is configured as a module, 4# PARIDE doesn't need PARPORT, but if PARPORT is configured as a module,
5# PARIDE must also be a module. The bogus CONFIG_PARIDE_PARPORT option 5# PARIDE must also be a module. The bogus CONFIG_PARIDE_PARPORT option
6# controls the choices given to the user ... 6# controls the choices given to the user ...
7# PARIDE only supports PC style parports. Tough for USB or other parports...
7config PARIDE_PARPORT 8config PARIDE_PARPORT
8 tristate 9 tristate
9 depends on PARIDE!=n 10 depends on PARIDE!=n
10 default m if PARPORT=m 11 default m if PARPORT_PC=m
11 default y if PARPORT!=m 12 default y if PARPORT_PC!=m
12 13
13comment "Parallel IDE high-level drivers" 14comment "Parallel IDE high-level drivers"
14 depends on PARIDE 15 depends on PARIDE
diff --git a/drivers/block/rd.c b/drivers/block/rd.c
index 68c60a5bcdab..ffd6abd6d5a0 100644
--- a/drivers/block/rd.c
+++ b/drivers/block/rd.c
@@ -154,7 +154,7 @@ static int ramdisk_commit_write(struct file *file, struct page *page,
154 154
155/* 155/*
156 * ->writepage to the the blockdev's mapping has to redirty the page so that the 156 * ->writepage to the the blockdev's mapping has to redirty the page so that the
157 * VM doesn't go and steal it. We return WRITEPAGE_ACTIVATE so that the VM 157 * VM doesn't go and steal it. We return AOP_WRITEPAGE_ACTIVATE so that the VM
158 * won't try to (pointlessly) write the page again for a while. 158 * won't try to (pointlessly) write the page again for a while.
159 * 159 *
160 * Really, these pages should not be on the LRU at all. 160 * Really, these pages should not be on the LRU at all.
@@ -165,7 +165,7 @@ static int ramdisk_writepage(struct page *page, struct writeback_control *wbc)
165 make_page_uptodate(page); 165 make_page_uptodate(page);
166 SetPageDirty(page); 166 SetPageDirty(page);
167 if (wbc->for_reclaim) 167 if (wbc->for_reclaim)
168 return WRITEPAGE_ACTIVATE; 168 return AOP_WRITEPAGE_ACTIVATE;
169 unlock_page(page); 169 unlock_page(page);
170 return 0; 170 return 0;
171} 171}
diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c
index 1ded3b433459..9251f4131b53 100644
--- a/drivers/block/sx8.c
+++ b/drivers/block/sx8.c
@@ -770,7 +770,7 @@ static inline void carm_end_request_queued(struct carm_host *host,
770 rc = end_that_request_first(req, uptodate, req->hard_nr_sectors); 770 rc = end_that_request_first(req, uptodate, req->hard_nr_sectors);
771 assert(rc == 0); 771 assert(rc == 0);
772 772
773 end_that_request_last(req); 773 end_that_request_last(req, uptodate);
774 774
775 rc = carm_put_request(host, crq); 775 rc = carm_put_request(host, crq);
776 assert(rc == 0); 776 assert(rc == 0);
diff --git a/drivers/block/ub.c b/drivers/block/ub.c
index 10740a065088..a05fe5843e6c 100644
--- a/drivers/block/ub.c
+++ b/drivers/block/ub.c
@@ -951,7 +951,7 @@ static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
951static void ub_end_rq(struct request *rq, int uptodate) 951static void ub_end_rq(struct request *rq, int uptodate)
952{ 952{
953 end_that_request_first(rq, uptodate, rq->hard_nr_sectors); 953 end_that_request_first(rq, uptodate, rq->hard_nr_sectors);
954 end_that_request_last(rq); 954 end_that_request_last(rq, uptodate);
955} 955}
956 956
957static int ub_rw_cmd_retry(struct ub_dev *sc, struct ub_lun *lun, 957static int ub_rw_cmd_retry(struct ub_dev *sc, struct ub_lun *lun,
diff --git a/drivers/block/viodasd.c b/drivers/block/viodasd.c
index 2d518aa2720a..063f0304a163 100644
--- a/drivers/block/viodasd.c
+++ b/drivers/block/viodasd.c
@@ -305,7 +305,7 @@ static void viodasd_end_request(struct request *req, int uptodate,
305 if (end_that_request_first(req, uptodate, num_sectors)) 305 if (end_that_request_first(req, uptodate, num_sectors))
306 return; 306 return;
307 add_disk_randomness(req->rq_disk); 307 add_disk_randomness(req->rq_disk);
308 end_that_request_last(req); 308 end_that_request_last(req, uptodate);
309} 309}
310 310
311/* 311/*
diff --git a/drivers/cdrom/cdu31a.c b/drivers/cdrom/cdu31a.c
index ac96de15d833..378e88d20757 100644
--- a/drivers/cdrom/cdu31a.c
+++ b/drivers/cdrom/cdu31a.c
@@ -1402,7 +1402,7 @@ static void do_cdu31a_request(request_queue_t * q)
1402 if (!end_that_request_first(req, 1, nblock)) { 1402 if (!end_that_request_first(req, 1, nblock)) {
1403 spin_lock_irq(q->queue_lock); 1403 spin_lock_irq(q->queue_lock);
1404 blkdev_dequeue_request(req); 1404 blkdev_dequeue_request(req);
1405 end_that_request_last(req); 1405 end_that_request_last(req, 1);
1406 spin_unlock_irq(q->queue_lock); 1406 spin_unlock_irq(q->queue_lock);
1407 } 1407 }
1408 continue; 1408 continue;
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 84e68cdd451b..5ebd06b1b4ca 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -985,7 +985,7 @@ config HPET_MMAP
985 985
986config HANGCHECK_TIMER 986config HANGCHECK_TIMER
987 tristate "Hangcheck timer" 987 tristate "Hangcheck timer"
988 depends on X86 || IA64 || PPC64 || ARCH_S390 988 depends on X86 || IA64 || PPC64 || S390
989 help 989 help
990 The hangcheck-timer module detects when the system has gone 990 The hangcheck-timer module detects when the system has gone
991 out to lunch past a certain margin. It can reboot the system 991 out to lunch past a certain margin. It can reboot the system
diff --git a/drivers/char/hangcheck-timer.c b/drivers/char/hangcheck-timer.c
index 66e53dd450ff..40a67c86420c 100644
--- a/drivers/char/hangcheck-timer.c
+++ b/drivers/char/hangcheck-timer.c
@@ -120,7 +120,7 @@ __setup("hcheck_dump_tasks", hangcheck_parse_dump_tasks);
120#if defined(CONFIG_X86) 120#if defined(CONFIG_X86)
121# define HAVE_MONOTONIC 121# define HAVE_MONOTONIC
122# define TIMER_FREQ 1000000000ULL 122# define TIMER_FREQ 1000000000ULL
123#elif defined(CONFIG_ARCH_S390) 123#elif defined(CONFIG_S390)
124/* FA240000 is 1 Second in the IBM time universe (Page 4-38 Principles of Op for zSeries */ 124/* FA240000 is 1 Second in the IBM time universe (Page 4-38 Principles of Op for zSeries */
125# define TIMER_FREQ 0xFA240000ULL 125# define TIMER_FREQ 0xFA240000ULL
126#elif defined(CONFIG_IA64) 126#elif defined(CONFIG_IA64)
diff --git a/drivers/char/hw_random.c b/drivers/char/hw_random.c
index 6f673d2de0b1..49769f59ea1b 100644
--- a/drivers/char/hw_random.c
+++ b/drivers/char/hw_random.c
@@ -1,4 +1,9 @@
1/* 1/*
2 Added support for the AMD Geode LX RNG
3 (c) Copyright 2004-2005 Advanced Micro Devices, Inc.
4
5 derived from
6
2 Hardware driver for the Intel/AMD/VIA Random Number Generators (RNG) 7 Hardware driver for the Intel/AMD/VIA Random Number Generators (RNG)
3 (c) Copyright 2003 Red Hat Inc <jgarzik@redhat.com> 8 (c) Copyright 2003 Red Hat Inc <jgarzik@redhat.com>
4 9
@@ -95,6 +100,11 @@ static unsigned int via_data_present (void);
95static u32 via_data_read (void); 100static u32 via_data_read (void);
96#endif 101#endif
97 102
103static int __init geode_init(struct pci_dev *dev);
104static void geode_cleanup(void);
105static unsigned int geode_data_present (void);
106static u32 geode_data_read (void);
107
98struct rng_operations { 108struct rng_operations {
99 int (*init) (struct pci_dev *dev); 109 int (*init) (struct pci_dev *dev);
100 void (*cleanup) (void); 110 void (*cleanup) (void);
@@ -122,6 +132,7 @@ enum {
122 rng_hw_intel, 132 rng_hw_intel,
123 rng_hw_amd, 133 rng_hw_amd,
124 rng_hw_via, 134 rng_hw_via,
135 rng_hw_geode,
125}; 136};
126 137
127static struct rng_operations rng_vendor_ops[] = { 138static struct rng_operations rng_vendor_ops[] = {
@@ -139,6 +150,9 @@ static struct rng_operations rng_vendor_ops[] = {
139 /* rng_hw_via */ 150 /* rng_hw_via */
140 { via_init, via_cleanup, via_data_present, via_data_read, 1 }, 151 { via_init, via_cleanup, via_data_present, via_data_read, 1 },
141#endif 152#endif
153
154 /* rng_hw_geode */
155 { geode_init, geode_cleanup, geode_data_present, geode_data_read, 4 }
142}; 156};
143 157
144/* 158/*
@@ -159,6 +173,9 @@ static struct pci_device_id rng_pci_tbl[] = {
159 { 0x8086, 0x244e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rng_hw_intel }, 173 { 0x8086, 0x244e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rng_hw_intel },
160 { 0x8086, 0x245e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rng_hw_intel }, 174 { 0x8086, 0x245e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rng_hw_intel },
161 175
176 { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LX_AES,
177 PCI_ANY_ID, PCI_ANY_ID, 0, 0, rng_hw_geode },
178
162 { 0, }, /* terminate list */ 179 { 0, }, /* terminate list */
163}; 180};
164MODULE_DEVICE_TABLE (pci, rng_pci_tbl); 181MODULE_DEVICE_TABLE (pci, rng_pci_tbl);
@@ -460,6 +477,57 @@ static void via_cleanup(void)
460} 477}
461#endif 478#endif
462 479
480/***********************************************************************
481 *
482 * AMD Geode RNG operations
483 *
484 */
485
486static void __iomem *geode_rng_base = NULL;
487
488#define GEODE_RNG_DATA_REG 0x50
489#define GEODE_RNG_STATUS_REG 0x54
490
491static u32 geode_data_read(void)
492{
493 u32 val;
494
495 assert(geode_rng_base != NULL);
496 val = readl(geode_rng_base + GEODE_RNG_DATA_REG);
497 return val;
498}
499
500static unsigned int geode_data_present(void)
501{
502 u32 val;
503
504 assert(geode_rng_base != NULL);
505 val = readl(geode_rng_base + GEODE_RNG_STATUS_REG);
506 return val;
507}
508
509static void geode_cleanup(void)
510{
511 iounmap(geode_rng_base);
512 geode_rng_base = NULL;
513}
514
515static int geode_init(struct pci_dev *dev)
516{
517 unsigned long rng_base = pci_resource_start(dev, 0);
518
519 if (rng_base == 0)
520 return 1;
521
522 geode_rng_base = ioremap(rng_base, 0x58);
523
524 if (geode_rng_base == NULL) {
525 printk(KERN_ERR PFX "Cannot ioremap RNG memory\n");
526 return -EBUSY;
527 }
528
529 return 0;
530}
463 531
464/*********************************************************************** 532/***********************************************************************
465 * 533 *
@@ -574,7 +642,7 @@ static int __init rng_init (void)
574 642
575 DPRINTK ("ENTER\n"); 643 DPRINTK ("ENTER\n");
576 644
577 /* Probe for Intel, AMD RNGs */ 645 /* Probe for Intel, AMD, Geode RNGs */
578 for_each_pci_dev(pdev) { 646 for_each_pci_dev(pdev) {
579 ent = pci_match_id(rng_pci_tbl, pdev); 647 ent = pci_match_id(rng_pci_tbl, pdev);
580 if (ent) { 648 if (ent) {
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 1f56b4cf0f58..561430ed94af 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -787,7 +787,6 @@ int ipmi_destroy_user(ipmi_user_t user)
787 int i; 787 int i;
788 unsigned long flags; 788 unsigned long flags;
789 struct cmd_rcvr *rcvr; 789 struct cmd_rcvr *rcvr;
790 struct list_head *entry1, *entry2;
791 struct cmd_rcvr *rcvrs = NULL; 790 struct cmd_rcvr *rcvrs = NULL;
792 791
793 user->valid = 1; 792 user->valid = 1;
@@ -812,8 +811,7 @@ int ipmi_destroy_user(ipmi_user_t user)
812 * synchronize_rcu()) then free everything in that list. 811 * synchronize_rcu()) then free everything in that list.
813 */ 812 */
814 down(&intf->cmd_rcvrs_lock); 813 down(&intf->cmd_rcvrs_lock);
815 list_for_each_safe_rcu(entry1, entry2, &intf->cmd_rcvrs) { 814 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
816 rcvr = list_entry(entry1, struct cmd_rcvr, link);
817 if (rcvr->user == user) { 815 if (rcvr->user == user) {
818 list_del_rcu(&rcvr->link); 816 list_del_rcu(&rcvr->link);
819 rcvr->next = rcvrs; 817 rcvr->next = rcvrs;
diff --git a/drivers/char/watchdog/Kconfig b/drivers/char/watchdog/Kconfig
index 344001b45af9..a6544790af60 100644
--- a/drivers/char/watchdog/Kconfig
+++ b/drivers/char/watchdog/Kconfig
@@ -438,7 +438,7 @@ config INDYDOG
438 438
439config ZVM_WATCHDOG 439config ZVM_WATCHDOG
440 tristate "z/VM Watchdog Timer" 440 tristate "z/VM Watchdog Timer"
441 depends on WATCHDOG && ARCH_S390 441 depends on WATCHDOG && S390
442 help 442 help
443 IBM s/390 and zSeries machines running under z/VM 5.1 or later 443 IBM s/390 and zSeries machines running under z/VM 5.1 or later
444 provide a virtual watchdog timer to their guest that cause a 444 provide a virtual watchdog timer to their guest that cause a
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index 70aeb3a60120..d31117eb95aa 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -614,7 +614,7 @@ static void cdrom_end_request (ide_drive_t *drive, int uptodate)
614 */ 614 */
615 spin_lock_irqsave(&ide_lock, flags); 615 spin_lock_irqsave(&ide_lock, flags);
616 end_that_request_chunk(failed, 0, failed->data_len); 616 end_that_request_chunk(failed, 0, failed->data_len);
617 end_that_request_last(failed); 617 end_that_request_last(failed, 0);
618 spin_unlock_irqrestore(&ide_lock, flags); 618 spin_unlock_irqrestore(&ide_lock, flags);
619 } 619 }
620 620
@@ -1735,7 +1735,7 @@ end_request:
1735 1735
1736 spin_lock_irqsave(&ide_lock, flags); 1736 spin_lock_irqsave(&ide_lock, flags);
1737 blkdev_dequeue_request(rq); 1737 blkdev_dequeue_request(rq);
1738 end_that_request_last(rq); 1738 end_that_request_last(rq, 1);
1739 HWGROUP(drive)->rq = NULL; 1739 HWGROUP(drive)->rq = NULL;
1740 spin_unlock_irqrestore(&ide_lock, flags); 1740 spin_unlock_irqrestore(&ide_lock, flags);
1741 return ide_stopped; 1741 return ide_stopped;
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
index 4e5767968d7f..4b441720b6ba 100644
--- a/drivers/ide/ide-disk.c
+++ b/drivers/ide/ide-disk.c
@@ -681,50 +681,9 @@ static ide_proc_entry_t idedisk_proc[] = {
681 681
682#endif /* CONFIG_PROC_FS */ 682#endif /* CONFIG_PROC_FS */
683 683
684static void idedisk_end_flush(request_queue_t *q, struct request *flush_rq) 684static void idedisk_prepare_flush(request_queue_t *q, struct request *rq)
685{ 685{
686 ide_drive_t *drive = q->queuedata; 686 ide_drive_t *drive = q->queuedata;
687 struct request *rq = flush_rq->end_io_data;
688 int good_sectors = rq->hard_nr_sectors;
689 int bad_sectors;
690 sector_t sector;
691
692 if (flush_rq->errors & ABRT_ERR) {
693 printk(KERN_ERR "%s: barrier support doesn't work\n", drive->name);
694 blk_queue_ordered(drive->queue, QUEUE_ORDERED_NONE);
695 blk_queue_issue_flush_fn(drive->queue, NULL);
696 good_sectors = 0;
697 } else if (flush_rq->errors) {
698 good_sectors = 0;
699 if (blk_barrier_preflush(rq)) {
700 sector = ide_get_error_location(drive,flush_rq->buffer);
701 if ((sector >= rq->hard_sector) &&
702 (sector < rq->hard_sector + rq->hard_nr_sectors))
703 good_sectors = sector - rq->hard_sector;
704 }
705 }
706
707 if (flush_rq->errors)
708 printk(KERN_ERR "%s: failed barrier write: "
709 "sector=%Lx(good=%d/bad=%d)\n",
710 drive->name, (unsigned long long)rq->sector,
711 good_sectors,
712 (int) (rq->hard_nr_sectors-good_sectors));
713
714 bad_sectors = rq->hard_nr_sectors - good_sectors;
715
716 if (good_sectors)
717 __ide_end_request(drive, rq, 1, good_sectors);
718 if (bad_sectors)
719 __ide_end_request(drive, rq, 0, bad_sectors);
720}
721
722static int idedisk_prepare_flush(request_queue_t *q, struct request *rq)
723{
724 ide_drive_t *drive = q->queuedata;
725
726 if (!drive->wcache)
727 return 0;
728 687
729 memset(rq->cmd, 0, sizeof(rq->cmd)); 688 memset(rq->cmd, 0, sizeof(rq->cmd));
730 689
@@ -735,9 +694,8 @@ static int idedisk_prepare_flush(request_queue_t *q, struct request *rq)
735 rq->cmd[0] = WIN_FLUSH_CACHE; 694 rq->cmd[0] = WIN_FLUSH_CACHE;
736 695
737 696
738 rq->flags |= REQ_DRIVE_TASK | REQ_SOFTBARRIER; 697 rq->flags |= REQ_DRIVE_TASK;
739 rq->buffer = rq->cmd; 698 rq->buffer = rq->cmd;
740 return 1;
741} 699}
742 700
743static int idedisk_issue_flush(request_queue_t *q, struct gendisk *disk, 701static int idedisk_issue_flush(request_queue_t *q, struct gendisk *disk,
@@ -794,27 +752,64 @@ static int set_nowerr(ide_drive_t *drive, int arg)
794 return 0; 752 return 0;
795} 753}
796 754
755static void update_ordered(ide_drive_t *drive)
756{
757 struct hd_driveid *id = drive->id;
758 unsigned ordered = QUEUE_ORDERED_NONE;
759 prepare_flush_fn *prep_fn = NULL;
760 issue_flush_fn *issue_fn = NULL;
761
762 if (drive->wcache) {
763 unsigned long long capacity;
764 int barrier;
765 /*
766 * We must avoid issuing commands a drive does not
767 * understand or we may crash it. We check flush cache
768 * is supported. We also check we have the LBA48 flush
769 * cache if the drive capacity is too large. By this
770 * time we have trimmed the drive capacity if LBA48 is
771 * not available so we don't need to recheck that.
772 */
773 capacity = idedisk_capacity(drive);
774 barrier = ide_id_has_flush_cache(id) &&
775 (drive->addressing == 0 || capacity <= (1ULL << 28) ||
776 ide_id_has_flush_cache_ext(id));
777
778 printk(KERN_INFO "%s: cache flushes %ssupported\n",
779 drive->name, barrier ? "" : "not");
780
781 if (barrier) {
782 ordered = QUEUE_ORDERED_DRAIN_FLUSH;
783 prep_fn = idedisk_prepare_flush;
784 issue_fn = idedisk_issue_flush;
785 }
786 } else
787 ordered = QUEUE_ORDERED_DRAIN;
788
789 blk_queue_ordered(drive->queue, ordered, prep_fn);
790 blk_queue_issue_flush_fn(drive->queue, issue_fn);
791}
792
797static int write_cache(ide_drive_t *drive, int arg) 793static int write_cache(ide_drive_t *drive, int arg)
798{ 794{
799 ide_task_t args; 795 ide_task_t args;
800 int err; 796 int err = 1;
801
802 if (!ide_id_has_flush_cache(drive->id))
803 return 1;
804 797
805 memset(&args, 0, sizeof(ide_task_t)); 798 if (ide_id_has_flush_cache(drive->id)) {
806 args.tfRegister[IDE_FEATURE_OFFSET] = (arg) ? 799 memset(&args, 0, sizeof(ide_task_t));
800 args.tfRegister[IDE_FEATURE_OFFSET] = (arg) ?
807 SETFEATURES_EN_WCACHE : SETFEATURES_DIS_WCACHE; 801 SETFEATURES_EN_WCACHE : SETFEATURES_DIS_WCACHE;
808 args.tfRegister[IDE_COMMAND_OFFSET] = WIN_SETFEATURES; 802 args.tfRegister[IDE_COMMAND_OFFSET] = WIN_SETFEATURES;
809 args.command_type = IDE_DRIVE_TASK_NO_DATA; 803 args.command_type = IDE_DRIVE_TASK_NO_DATA;
810 args.handler = &task_no_data_intr; 804 args.handler = &task_no_data_intr;
805 err = ide_raw_taskfile(drive, &args, NULL);
806 if (err == 0)
807 drive->wcache = arg;
808 }
811 809
812 err = ide_raw_taskfile(drive, &args, NULL); 810 update_ordered(drive);
813 if (err)
814 return err;
815 811
816 drive->wcache = arg; 812 return err;
817 return 0;
818} 813}
819 814
820static int do_idedisk_flushcache (ide_drive_t *drive) 815static int do_idedisk_flushcache (ide_drive_t *drive)
@@ -888,7 +883,6 @@ static void idedisk_setup (ide_drive_t *drive)
888{ 883{
889 struct hd_driveid *id = drive->id; 884 struct hd_driveid *id = drive->id;
890 unsigned long long capacity; 885 unsigned long long capacity;
891 int barrier;
892 886
893 idedisk_add_settings(drive); 887 idedisk_add_settings(drive);
894 888
@@ -992,31 +986,6 @@ static void idedisk_setup (ide_drive_t *drive)
992 drive->wcache = 1; 986 drive->wcache = 1;
993 987
994 write_cache(drive, 1); 988 write_cache(drive, 1);
995
996 /*
997 * We must avoid issuing commands a drive does not understand
998 * or we may crash it. We check flush cache is supported. We also
999 * check we have the LBA48 flush cache if the drive capacity is
1000 * too large. By this time we have trimmed the drive capacity if
1001 * LBA48 is not available so we don't need to recheck that.
1002 */
1003 barrier = 0;
1004 if (ide_id_has_flush_cache(id))
1005 barrier = 1;
1006 if (drive->addressing == 1) {
1007 /* Can't issue the correct flush ? */
1008 if (capacity > (1ULL << 28) && !ide_id_has_flush_cache_ext(id))
1009 barrier = 0;
1010 }
1011
1012 printk(KERN_INFO "%s: cache flushes %ssupported\n",
1013 drive->name, barrier ? "" : "not ");
1014 if (barrier) {
1015 blk_queue_ordered(drive->queue, QUEUE_ORDERED_FLUSH);
1016 drive->queue->prepare_flush_fn = idedisk_prepare_flush;
1017 drive->queue->end_flush_fn = idedisk_end_flush;
1018 blk_queue_issue_flush_fn(drive->queue, idedisk_issue_flush);
1019 }
1020} 989}
1021 990
1022static void ide_cacheflush_p(ide_drive_t *drive) 991static void ide_cacheflush_p(ide_drive_t *drive)
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index ecfafcdafea4..b5dc6df8e67d 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -89,7 +89,7 @@ int __ide_end_request(ide_drive_t *drive, struct request *rq, int uptodate,
89 89
90 blkdev_dequeue_request(rq); 90 blkdev_dequeue_request(rq);
91 HWGROUP(drive)->rq = NULL; 91 HWGROUP(drive)->rq = NULL;
92 end_that_request_last(rq); 92 end_that_request_last(rq, uptodate);
93 ret = 0; 93 ret = 0;
94 } 94 }
95 return ret; 95 return ret;
@@ -119,10 +119,7 @@ int ide_end_request (ide_drive_t *drive, int uptodate, int nr_sectors)
119 if (!nr_sectors) 119 if (!nr_sectors)
120 nr_sectors = rq->hard_cur_sectors; 120 nr_sectors = rq->hard_cur_sectors;
121 121
122 if (blk_complete_barrier_rq_locked(drive->queue, rq, nr_sectors)) 122 ret = __ide_end_request(drive, rq, uptodate, nr_sectors);
123 ret = rq->nr_sectors != 0;
124 else
125 ret = __ide_end_request(drive, rq, uptodate, nr_sectors);
126 123
127 spin_unlock_irqrestore(&ide_lock, flags); 124 spin_unlock_irqrestore(&ide_lock, flags);
128 return ret; 125 return ret;
@@ -247,7 +244,7 @@ static void ide_complete_pm_request (ide_drive_t *drive, struct request *rq)
247 } 244 }
248 blkdev_dequeue_request(rq); 245 blkdev_dequeue_request(rq);
249 HWGROUP(drive)->rq = NULL; 246 HWGROUP(drive)->rq = NULL;
250 end_that_request_last(rq); 247 end_that_request_last(rq, 1);
251 spin_unlock_irqrestore(&ide_lock, flags); 248 spin_unlock_irqrestore(&ide_lock, flags);
252} 249}
253 250
@@ -379,7 +376,7 @@ void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err)
379 blkdev_dequeue_request(rq); 376 blkdev_dequeue_request(rq);
380 HWGROUP(drive)->rq = NULL; 377 HWGROUP(drive)->rq = NULL;
381 rq->errors = err; 378 rq->errors = err;
382 end_that_request_last(rq); 379 end_that_request_last(rq, !rq->errors);
383 spin_unlock_irqrestore(&ide_lock, flags); 380 spin_unlock_irqrestore(&ide_lock, flags);
384} 381}
385 382
diff --git a/drivers/ieee1394/Kconfig b/drivers/ieee1394/Kconfig
index 25103a0ef9b3..39142e2f804b 100644
--- a/drivers/ieee1394/Kconfig
+++ b/drivers/ieee1394/Kconfig
@@ -169,27 +169,4 @@ config IEEE1394_RAWIO
169 To compile this driver as a module, say M here: the 169 To compile this driver as a module, say M here: the
170 module will be called raw1394. 170 module will be called raw1394.
171 171
172config IEEE1394_CMP
173 tristate "IEC61883-1 Plug support"
174 depends on IEEE1394
175 help
176 This option enables the Connection Management Procedures
177 (IEC61883-1) driver, which implements input and output plugs.
178
179 To compile this driver as a module, say M here: the
180 module will be called cmp.
181
182config IEEE1394_AMDTP
183 tristate "IEC61883-6 (Audio transmission) support"
184 depends on IEEE1394 && IEEE1394_OHCI1394 && IEEE1394_CMP
185 help
186 This option enables the Audio & Music Data Transmission Protocol
187 (IEC61883-6) driver, which implements audio transmission over
188 IEEE1394.
189
190 The userspace interface is documented in amdtp.h.
191
192 To compile this driver as a module, say M here: the
193 module will be called amdtp.
194
195endmenu 172endmenu
diff --git a/drivers/ieee1394/Makefile b/drivers/ieee1394/Makefile
index e8b4d48d376e..6f53611fe255 100644
--- a/drivers/ieee1394/Makefile
+++ b/drivers/ieee1394/Makefile
@@ -14,8 +14,6 @@ obj-$(CONFIG_IEEE1394_RAWIO) += raw1394.o
14obj-$(CONFIG_IEEE1394_SBP2) += sbp2.o 14obj-$(CONFIG_IEEE1394_SBP2) += sbp2.o
15obj-$(CONFIG_IEEE1394_DV1394) += dv1394.o 15obj-$(CONFIG_IEEE1394_DV1394) += dv1394.o
16obj-$(CONFIG_IEEE1394_ETH1394) += eth1394.o 16obj-$(CONFIG_IEEE1394_ETH1394) += eth1394.o
17obj-$(CONFIG_IEEE1394_AMDTP) += amdtp.o
18obj-$(CONFIG_IEEE1394_CMP) += cmp.o
19 17
20quiet_cmd_oui2c = OUI2C $@ 18quiet_cmd_oui2c = OUI2C $@
21 cmd_oui2c = $(CONFIG_SHELL) $(srctree)/$(src)/oui2c.sh < $< > $@ 19 cmd_oui2c = $(CONFIG_SHELL) $(srctree)/$(src)/oui2c.sh < $< > $@
diff --git a/drivers/ieee1394/csr1212.c b/drivers/ieee1394/csr1212.c
index 61ddd5d37eff..15773544234b 100644
--- a/drivers/ieee1394/csr1212.c
+++ b/drivers/ieee1394/csr1212.c
@@ -1261,7 +1261,7 @@ static int csr1212_parse_bus_info_block(struct csr1212_csr *csr)
1261 return CSR1212_EINVAL; 1261 return CSR1212_EINVAL;
1262#endif 1262#endif
1263 1263
1264 cr = CSR1212_MALLOC(sizeof(struct csr1212_cache_region)); 1264 cr = CSR1212_MALLOC(sizeof(*cr));
1265 if (!cr) 1265 if (!cr)
1266 return CSR1212_ENOMEM; 1266 return CSR1212_ENOMEM;
1267 1267
@@ -1393,8 +1393,7 @@ int csr1212_parse_keyval(struct csr1212_keyval *kv,
1393 case CSR1212_KV_TYPE_LEAF: 1393 case CSR1212_KV_TYPE_LEAF:
1394 if (kv->key.id != CSR1212_KV_ID_EXTENDED_ROM) { 1394 if (kv->key.id != CSR1212_KV_ID_EXTENDED_ROM) {
1395 kv->value.leaf.data = CSR1212_MALLOC(quads_to_bytes(kvi_len)); 1395 kv->value.leaf.data = CSR1212_MALLOC(quads_to_bytes(kvi_len));
1396 if (!kv->value.leaf.data) 1396 if (!kv->value.leaf.data) {
1397 {
1398 ret = CSR1212_ENOMEM; 1397 ret = CSR1212_ENOMEM;
1399 goto fail; 1398 goto fail;
1400 } 1399 }
@@ -1462,7 +1461,7 @@ int _csr1212_read_keyval(struct csr1212_csr *csr, struct csr1212_keyval *kv)
1462 cache->next = NULL; 1461 cache->next = NULL;
1463 csr->cache_tail = cache; 1462 csr->cache_tail = cache;
1464 cache->filled_head = 1463 cache->filled_head =
1465 CSR1212_MALLOC(sizeof(struct csr1212_cache_region)); 1464 CSR1212_MALLOC(sizeof(*cache->filled_head));
1466 if (!cache->filled_head) { 1465 if (!cache->filled_head) {
1467 return CSR1212_ENOMEM; 1466 return CSR1212_ENOMEM;
1468 } 1467 }
@@ -1484,7 +1483,7 @@ int _csr1212_read_keyval(struct csr1212_csr *csr, struct csr1212_keyval *kv)
1484 /* Now seach read portions of the cache to see if it is there. */ 1483 /* Now seach read portions of the cache to see if it is there. */
1485 for (cr = cache->filled_head; cr; cr = cr->next) { 1484 for (cr = cache->filled_head; cr; cr = cr->next) {
1486 if (cache_index < cr->offset_start) { 1485 if (cache_index < cr->offset_start) {
1487 newcr = CSR1212_MALLOC(sizeof(struct csr1212_cache_region)); 1486 newcr = CSR1212_MALLOC(sizeof(*newcr));
1488 if (!newcr) 1487 if (!newcr)
1489 return CSR1212_ENOMEM; 1488 return CSR1212_ENOMEM;
1490 1489
@@ -1508,7 +1507,7 @@ int _csr1212_read_keyval(struct csr1212_csr *csr, struct csr1212_keyval *kv)
1508 1507
1509 if (!cr) { 1508 if (!cr) {
1510 cr = cache->filled_tail; 1509 cr = cache->filled_tail;
1511 newcr = CSR1212_MALLOC(sizeof(struct csr1212_cache_region)); 1510 newcr = CSR1212_MALLOC(sizeof(*newcr));
1512 if (!newcr) 1511 if (!newcr)
1513 return CSR1212_ENOMEM; 1512 return CSR1212_ENOMEM;
1514 1513
@@ -1611,15 +1610,17 @@ int csr1212_parse_csr(struct csr1212_csr *csr)
1611 csr->root_kv->valid = 0; 1610 csr->root_kv->valid = 0;
1612 csr->root_kv->next = csr->root_kv; 1611 csr->root_kv->next = csr->root_kv;
1613 csr->root_kv->prev = csr->root_kv; 1612 csr->root_kv->prev = csr->root_kv;
1614 csr1212_get_keyval(csr, csr->root_kv); 1613 ret = _csr1212_read_keyval(csr, csr->root_kv);
1614 if (ret != CSR1212_SUCCESS)
1615 return ret;
1615 1616
1616 /* Scan through the Root directory finding all extended ROM regions 1617 /* Scan through the Root directory finding all extended ROM regions
1617 * and make cache regions for them */ 1618 * and make cache regions for them */
1618 for (dentry = csr->root_kv->value.directory.dentries_head; 1619 for (dentry = csr->root_kv->value.directory.dentries_head;
1619 dentry; dentry = dentry->next) { 1620 dentry; dentry = dentry->next) {
1620 if (dentry->kv->key.id == CSR1212_KV_ID_EXTENDED_ROM) { 1621 if (dentry->kv->key.id == CSR1212_KV_ID_EXTENDED_ROM &&
1621 csr1212_get_keyval(csr, dentry->kv); 1622 !dentry->kv->valid) {
1622 1623 ret = _csr1212_read_keyval(csr, dentry->kv);
1623 if (ret != CSR1212_SUCCESS) 1624 if (ret != CSR1212_SUCCESS)
1624 return ret; 1625 return ret;
1625 } 1626 }
diff --git a/drivers/ieee1394/csr1212.h b/drivers/ieee1394/csr1212.h
index 28c5f4b726e2..cecd5871f2de 100644
--- a/drivers/ieee1394/csr1212.h
+++ b/drivers/ieee1394/csr1212.h
@@ -646,7 +646,7 @@ static inline struct csr1212_csr_rom_cache *csr1212_rom_cache_malloc(u_int32_t o
646{ 646{
647 struct csr1212_csr_rom_cache *cache; 647 struct csr1212_csr_rom_cache *cache;
648 648
649 cache = CSR1212_MALLOC(sizeof(struct csr1212_csr_rom_cache) + size); 649 cache = CSR1212_MALLOC(sizeof(*cache) + size);
650 if (!cache) 650 if (!cache)
651 return NULL; 651 return NULL;
652 652
diff --git a/drivers/ieee1394/dma.c b/drivers/ieee1394/dma.c
index b79ddb43e746..9fb2769d9abc 100644
--- a/drivers/ieee1394/dma.c
+++ b/drivers/ieee1394/dma.c
@@ -23,7 +23,8 @@ void dma_prog_region_init(struct dma_prog_region *prog)
23 prog->bus_addr = 0; 23 prog->bus_addr = 0;
24} 24}
25 25
26int dma_prog_region_alloc(struct dma_prog_region *prog, unsigned long n_bytes, struct pci_dev *dev) 26int dma_prog_region_alloc(struct dma_prog_region *prog, unsigned long n_bytes,
27 struct pci_dev *dev)
27{ 28{
28 /* round up to page size */ 29 /* round up to page size */
29 n_bytes = PAGE_ALIGN(n_bytes); 30 n_bytes = PAGE_ALIGN(n_bytes);
@@ -32,7 +33,8 @@ int dma_prog_region_alloc(struct dma_prog_region *prog, unsigned long n_bytes,
32 33
33 prog->kvirt = pci_alloc_consistent(dev, n_bytes, &prog->bus_addr); 34 prog->kvirt = pci_alloc_consistent(dev, n_bytes, &prog->bus_addr);
34 if (!prog->kvirt) { 35 if (!prog->kvirt) {
35 printk(KERN_ERR "dma_prog_region_alloc: pci_alloc_consistent() failed\n"); 36 printk(KERN_ERR
37 "dma_prog_region_alloc: pci_alloc_consistent() failed\n");
36 dma_prog_region_free(prog); 38 dma_prog_region_free(prog);
37 return -ENOMEM; 39 return -ENOMEM;
38 } 40 }
@@ -45,7 +47,8 @@ int dma_prog_region_alloc(struct dma_prog_region *prog, unsigned long n_bytes,
45void dma_prog_region_free(struct dma_prog_region *prog) 47void dma_prog_region_free(struct dma_prog_region *prog)
46{ 48{
47 if (prog->kvirt) { 49 if (prog->kvirt) {
48 pci_free_consistent(prog->dev, prog->n_pages << PAGE_SHIFT, prog->kvirt, prog->bus_addr); 50 pci_free_consistent(prog->dev, prog->n_pages << PAGE_SHIFT,
51 prog->kvirt, prog->bus_addr);
49 } 52 }
50 53
51 prog->kvirt = NULL; 54 prog->kvirt = NULL;
@@ -65,7 +68,8 @@ void dma_region_init(struct dma_region *dma)
65 dma->sglist = NULL; 68 dma->sglist = NULL;
66} 69}
67 70
68int dma_region_alloc(struct dma_region *dma, unsigned long n_bytes, struct pci_dev *dev, int direction) 71int dma_region_alloc(struct dma_region *dma, unsigned long n_bytes,
72 struct pci_dev *dev, int direction)
69{ 73{
70 unsigned int i; 74 unsigned int i;
71 75
@@ -95,14 +99,16 @@ int dma_region_alloc(struct dma_region *dma, unsigned long n_bytes, struct pci_d
95 99
96 /* fill scatter/gather list with pages */ 100 /* fill scatter/gather list with pages */
97 for (i = 0; i < dma->n_pages; i++) { 101 for (i = 0; i < dma->n_pages; i++) {
98 unsigned long va = (unsigned long) dma->kvirt + (i << PAGE_SHIFT); 102 unsigned long va =
103 (unsigned long)dma->kvirt + (i << PAGE_SHIFT);
99 104
100 dma->sglist[i].page = vmalloc_to_page((void *)va); 105 dma->sglist[i].page = vmalloc_to_page((void *)va);
101 dma->sglist[i].length = PAGE_SIZE; 106 dma->sglist[i].length = PAGE_SIZE;
102 } 107 }
103 108
104 /* map sglist to the IOMMU */ 109 /* map sglist to the IOMMU */
105 dma->n_dma_pages = pci_map_sg(dev, dma->sglist, dma->n_pages, direction); 110 dma->n_dma_pages =
111 pci_map_sg(dev, dma->sglist, dma->n_pages, direction);
106 112
107 if (dma->n_dma_pages == 0) { 113 if (dma->n_dma_pages == 0) {
108 printk(KERN_ERR "dma_region_alloc: pci_map_sg() failed\n"); 114 printk(KERN_ERR "dma_region_alloc: pci_map_sg() failed\n");
@@ -114,7 +120,7 @@ int dma_region_alloc(struct dma_region *dma, unsigned long n_bytes, struct pci_d
114 120
115 return 0; 121 return 0;
116 122
117err: 123 err:
118 dma_region_free(dma); 124 dma_region_free(dma);
119 return -ENOMEM; 125 return -ENOMEM;
120} 126}
@@ -122,7 +128,8 @@ err:
122void dma_region_free(struct dma_region *dma) 128void dma_region_free(struct dma_region *dma)
123{ 129{
124 if (dma->n_dma_pages) { 130 if (dma->n_dma_pages) {
125 pci_unmap_sg(dma->dev, dma->sglist, dma->n_pages, dma->direction); 131 pci_unmap_sg(dma->dev, dma->sglist, dma->n_pages,
132 dma->direction);
126 dma->n_dma_pages = 0; 133 dma->n_dma_pages = 0;
127 dma->dev = NULL; 134 dma->dev = NULL;
128 } 135 }
@@ -137,7 +144,8 @@ void dma_region_free(struct dma_region *dma)
137 144
138/* find the scatterlist index and remaining offset corresponding to a 145/* find the scatterlist index and remaining offset corresponding to a
139 given offset from the beginning of the buffer */ 146 given offset from the beginning of the buffer */
140static inline int dma_region_find(struct dma_region *dma, unsigned long offset, unsigned long *rem) 147static inline int dma_region_find(struct dma_region *dma, unsigned long offset,
148 unsigned long *rem)
141{ 149{
142 int i; 150 int i;
143 unsigned long off = offset; 151 unsigned long off = offset;
@@ -156,15 +164,18 @@ static inline int dma_region_find(struct dma_region *dma, unsigned long offset,
156 return i; 164 return i;
157} 165}
158 166
159dma_addr_t dma_region_offset_to_bus(struct dma_region *dma, unsigned long offset) 167dma_addr_t dma_region_offset_to_bus(struct dma_region * dma,
168 unsigned long offset)
160{ 169{
161 unsigned long rem = 0; 170 unsigned long rem = 0;
162 171
163 struct scatterlist *sg = &dma->sglist[dma_region_find(dma, offset, &rem)]; 172 struct scatterlist *sg =
173 &dma->sglist[dma_region_find(dma, offset, &rem)];
164 return sg_dma_address(sg) + rem; 174 return sg_dma_address(sg) + rem;
165} 175}
166 176
167void dma_region_sync_for_cpu(struct dma_region *dma, unsigned long offset, unsigned long len) 177void dma_region_sync_for_cpu(struct dma_region *dma, unsigned long offset,
178 unsigned long len)
168{ 179{
169 int first, last; 180 int first, last;
170 unsigned long rem; 181 unsigned long rem;
@@ -175,10 +186,12 @@ void dma_region_sync_for_cpu(struct dma_region *dma, unsigned long offset, unsig
175 first = dma_region_find(dma, offset, &rem); 186 first = dma_region_find(dma, offset, &rem);
176 last = dma_region_find(dma, offset + len - 1, &rem); 187 last = dma_region_find(dma, offset + len - 1, &rem);
177 188
178 pci_dma_sync_sg_for_cpu(dma->dev, &dma->sglist[first], last - first + 1, dma->direction); 189 pci_dma_sync_sg_for_cpu(dma->dev, &dma->sglist[first], last - first + 1,
190 dma->direction);
179} 191}
180 192
181void dma_region_sync_for_device(struct dma_region *dma, unsigned long offset, unsigned long len) 193void dma_region_sync_for_device(struct dma_region *dma, unsigned long offset,
194 unsigned long len)
182{ 195{
183 int first, last; 196 int first, last;
184 unsigned long rem; 197 unsigned long rem;
@@ -189,44 +202,47 @@ void dma_region_sync_for_device(struct dma_region *dma, unsigned long offset, un
189 first = dma_region_find(dma, offset, &rem); 202 first = dma_region_find(dma, offset, &rem);
190 last = dma_region_find(dma, offset + len - 1, &rem); 203 last = dma_region_find(dma, offset + len - 1, &rem);
191 204
192 pci_dma_sync_sg_for_device(dma->dev, &dma->sglist[first], last - first + 1, dma->direction); 205 pci_dma_sync_sg_for_device(dma->dev, &dma->sglist[first],
206 last - first + 1, dma->direction);
193} 207}
194 208
195#ifdef CONFIG_MMU 209#ifdef CONFIG_MMU
196 210
197/* nopage() handler for mmap access */ 211/* nopage() handler for mmap access */
198 212
199static struct page* 213static struct page *dma_region_pagefault(struct vm_area_struct *area,
200dma_region_pagefault(struct vm_area_struct *area, unsigned long address, int *type) 214 unsigned long address, int *type)
201{ 215{
202 unsigned long offset; 216 unsigned long offset;
203 unsigned long kernel_virt_addr; 217 unsigned long kernel_virt_addr;
204 struct page *ret = NOPAGE_SIGBUS; 218 struct page *ret = NOPAGE_SIGBUS;
205 219
206 struct dma_region *dma = (struct dma_region*) area->vm_private_data; 220 struct dma_region *dma = (struct dma_region *)area->vm_private_data;
207 221
208 if (!dma->kvirt) 222 if (!dma->kvirt)
209 goto out; 223 goto out;
210 224
211 if ( (address < (unsigned long) area->vm_start) || 225 if ((address < (unsigned long)area->vm_start) ||
212 (address > (unsigned long) area->vm_start + (dma->n_pages << PAGE_SHIFT)) ) 226 (address >
227 (unsigned long)area->vm_start + (dma->n_pages << PAGE_SHIFT)))
213 goto out; 228 goto out;
214 229
215 if (type) 230 if (type)
216 *type = VM_FAULT_MINOR; 231 *type = VM_FAULT_MINOR;
217 offset = address - area->vm_start; 232 offset = address - area->vm_start;
218 kernel_virt_addr = (unsigned long) dma->kvirt + offset; 233 kernel_virt_addr = (unsigned long)dma->kvirt + offset;
219 ret = vmalloc_to_page((void*) kernel_virt_addr); 234 ret = vmalloc_to_page((void *)kernel_virt_addr);
220 get_page(ret); 235 get_page(ret);
221out: 236 out:
222 return ret; 237 return ret;
223} 238}
224 239
225static struct vm_operations_struct dma_region_vm_ops = { 240static struct vm_operations_struct dma_region_vm_ops = {
226 .nopage = dma_region_pagefault, 241 .nopage = dma_region_pagefault,
227}; 242};
228 243
229int dma_region_mmap(struct dma_region *dma, struct file *file, struct vm_area_struct *vma) 244int dma_region_mmap(struct dma_region *dma, struct file *file,
245 struct vm_area_struct *vma)
230{ 246{
231 unsigned long size; 247 unsigned long size;
232 248
@@ -250,11 +266,12 @@ int dma_region_mmap(struct dma_region *dma, struct file *file, struct vm_area_st
250 return 0; 266 return 0;
251} 267}
252 268
253#else /* CONFIG_MMU */ 269#else /* CONFIG_MMU */
254 270
255int dma_region_mmap(struct dma_region *dma, struct file *file, struct vm_area_struct *vma) 271int dma_region_mmap(struct dma_region *dma, struct file *file,
272 struct vm_area_struct *vma)
256{ 273{
257 return -EINVAL; 274 return -EINVAL;
258} 275}
259 276
260#endif /* CONFIG_MMU */ 277#endif /* CONFIG_MMU */
diff --git a/drivers/ieee1394/dv1394.c b/drivers/ieee1394/dv1394.c
index cbbbe14b8849..196db7439272 100644
--- a/drivers/ieee1394/dv1394.c
+++ b/drivers/ieee1394/dv1394.c
@@ -123,15 +123,6 @@
123 123
124#include "ohci1394.h" 124#include "ohci1394.h"
125 125
126#ifndef virt_to_page
127#define virt_to_page(x) MAP_NR(x)
128#endif
129
130#ifndef vmalloc_32
131#define vmalloc_32(x) vmalloc(x)
132#endif
133
134
135/* DEBUG LEVELS: 126/* DEBUG LEVELS:
136 0 - no debugging messages 127 0 - no debugging messages
137 1 - some debugging messages, but none during DMA frame transmission 128 1 - some debugging messages, but none during DMA frame transmission
@@ -2218,14 +2209,12 @@ static int dv1394_init(struct ti_ohci *ohci, enum pal_or_ntsc format, enum modes
2218 unsigned long flags; 2209 unsigned long flags;
2219 int i; 2210 int i;
2220 2211
2221 video = kmalloc(sizeof(struct video_card), GFP_KERNEL); 2212 video = kzalloc(sizeof(*video), GFP_KERNEL);
2222 if (!video) { 2213 if (!video) {
2223 printk(KERN_ERR "dv1394: cannot allocate video_card\n"); 2214 printk(KERN_ERR "dv1394: cannot allocate video_card\n");
2224 goto err; 2215 goto err;
2225 } 2216 }
2226 2217
2227 memset(video, 0, sizeof(struct video_card));
2228
2229 video->ohci = ohci; 2218 video->ohci = ohci;
2230 /* lower 2 bits of id indicate which of four "plugs" 2219 /* lower 2 bits of id indicate which of four "plugs"
2231 per host */ 2220 per host */
diff --git a/drivers/ieee1394/eth1394.c b/drivers/ieee1394/eth1394.c
index c9e92d85c893..30fa0d43a43a 100644
--- a/drivers/ieee1394/eth1394.c
+++ b/drivers/ieee1394/eth1394.c
@@ -88,9 +88,6 @@
88 printk(KERN_ERR "%s:%s[%d]: " fmt "\n", driver_name, __FUNCTION__, __LINE__, ## args) 88 printk(KERN_ERR "%s:%s[%d]: " fmt "\n", driver_name, __FUNCTION__, __LINE__, ## args)
89#define TRACE() printk(KERN_ERR "%s:%s[%d] ---- TRACE\n", driver_name, __FUNCTION__, __LINE__) 89#define TRACE() printk(KERN_ERR "%s:%s[%d] ---- TRACE\n", driver_name, __FUNCTION__, __LINE__)
90 90
91static char version[] __devinitdata =
92 "$Rev: 1312 $ Ben Collins <bcollins@debian.org>";
93
94struct fragment_info { 91struct fragment_info {
95 struct list_head list; 92 struct list_head list;
96 int offset; 93 int offset;
@@ -355,12 +352,12 @@ static int eth1394_probe(struct device *dev)
355 if (!hi) 352 if (!hi)
356 return -ENOENT; 353 return -ENOENT;
357 354
358 new_node = kmalloc(sizeof(struct eth1394_node_ref), 355 new_node = kmalloc(sizeof(*new_node),
359 in_interrupt() ? GFP_ATOMIC : GFP_KERNEL); 356 in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
360 if (!new_node) 357 if (!new_node)
361 return -ENOMEM; 358 return -ENOMEM;
362 359
363 node_info = kmalloc(sizeof(struct eth1394_node_info), 360 node_info = kmalloc(sizeof(*node_info),
364 in_interrupt() ? GFP_ATOMIC : GFP_KERNEL); 361 in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
365 if (!node_info) { 362 if (!node_info) {
366 kfree(new_node); 363 kfree(new_node);
@@ -436,12 +433,12 @@ static int eth1394_update(struct unit_directory *ud)
436 node = eth1394_find_node(&priv->ip_node_list, ud); 433 node = eth1394_find_node(&priv->ip_node_list, ud);
437 434
438 if (!node) { 435 if (!node) {
439 node = kmalloc(sizeof(struct eth1394_node_ref), 436 node = kmalloc(sizeof(*node),
440 in_interrupt() ? GFP_ATOMIC : GFP_KERNEL); 437 in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
441 if (!node) 438 if (!node)
442 return -ENOMEM; 439 return -ENOMEM;
443 440
444 node_info = kmalloc(sizeof(struct eth1394_node_info), 441 node_info = kmalloc(sizeof(*node_info),
445 in_interrupt() ? GFP_ATOMIC : GFP_KERNEL); 442 in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
446 if (!node_info) { 443 if (!node_info) {
447 kfree(node); 444 kfree(node);
@@ -566,7 +563,6 @@ static void ether1394_add_host (struct hpsb_host *host)
566 struct eth1394_host_info *hi = NULL; 563 struct eth1394_host_info *hi = NULL;
567 struct net_device *dev = NULL; 564 struct net_device *dev = NULL;
568 struct eth1394_priv *priv; 565 struct eth1394_priv *priv;
569 static int version_printed = 0;
570 u64 fifo_addr; 566 u64 fifo_addr;
571 567
572 if (!(host->config_roms & HPSB_CONFIG_ROM_ENTRY_IP1394)) 568 if (!(host->config_roms & HPSB_CONFIG_ROM_ENTRY_IP1394))
@@ -581,9 +577,6 @@ static void ether1394_add_host (struct hpsb_host *host)
581 if (fifo_addr == ~0ULL) 577 if (fifo_addr == ~0ULL)
582 goto out; 578 goto out;
583 579
584 if (version_printed++ == 0)
585 ETH1394_PRINT_G (KERN_INFO, "%s\n", version);
586
587 /* We should really have our own alloc_hpsbdev() function in 580 /* We should really have our own alloc_hpsbdev() function in
588 * net_init.c instead of calling the one for ethernet then hijacking 581 * net_init.c instead of calling the one for ethernet then hijacking
589 * it for ourselves. That way we'd be a real networking device. */ 582 * it for ourselves. That way we'd be a real networking device. */
@@ -1021,7 +1014,7 @@ static inline int new_fragment(struct list_head *frag_info, int offset, int len)
1021 } 1014 }
1022 } 1015 }
1023 1016
1024 new = kmalloc(sizeof(struct fragment_info), GFP_ATOMIC); 1017 new = kmalloc(sizeof(*new), GFP_ATOMIC);
1025 if (!new) 1018 if (!new)
1026 return -ENOMEM; 1019 return -ENOMEM;
1027 1020
@@ -1040,7 +1033,7 @@ static inline int new_partial_datagram(struct net_device *dev,
1040{ 1033{
1041 struct partial_datagram *new; 1034 struct partial_datagram *new;
1042 1035
1043 new = kmalloc(sizeof(struct partial_datagram), GFP_ATOMIC); 1036 new = kmalloc(sizeof(*new), GFP_ATOMIC);
1044 if (!new) 1037 if (!new)
1045 return -ENOMEM; 1038 return -ENOMEM;
1046 1039
@@ -1768,7 +1761,6 @@ fail:
1768static void ether1394_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 1761static void ether1394_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1769{ 1762{
1770 strcpy (info->driver, driver_name); 1763 strcpy (info->driver, driver_name);
1771 strcpy (info->version, "$Rev: 1312 $");
1772 /* FIXME XXX provide sane businfo */ 1764 /* FIXME XXX provide sane businfo */
1773 strcpy (info->bus_info, "ieee1394"); 1765 strcpy (info->bus_info, "ieee1394");
1774} 1766}
diff --git a/drivers/ieee1394/highlevel.c b/drivers/ieee1394/highlevel.c
index 997e1bf6297f..734b121a0554 100644
--- a/drivers/ieee1394/highlevel.c
+++ b/drivers/ieee1394/highlevel.c
@@ -101,12 +101,10 @@ void *hpsb_create_hostinfo(struct hpsb_highlevel *hl, struct hpsb_host *host,
101 return NULL; 101 return NULL;
102 } 102 }
103 103
104 hi = kmalloc(sizeof(*hi) + data_size, GFP_ATOMIC); 104 hi = kzalloc(sizeof(*hi) + data_size, GFP_ATOMIC);
105 if (!hi) 105 if (!hi)
106 return NULL; 106 return NULL;
107 107
108 memset(hi, 0, sizeof(*hi) + data_size);
109
110 if (data_size) { 108 if (data_size) {
111 data = hi->data = hi + 1; 109 data = hi->data = hi + 1;
112 hi->size = data_size; 110 hi->size = data_size;
@@ -326,11 +324,9 @@ u64 hpsb_allocate_and_register_addrspace(struct hpsb_highlevel *hl,
326 return retval; 324 return retval;
327 } 325 }
328 326
329 as = (struct hpsb_address_serve *) 327 as = kmalloc(sizeof(*as), GFP_KERNEL);
330 kmalloc(sizeof(struct hpsb_address_serve), GFP_KERNEL); 328 if (!as)
331 if (as == NULL) {
332 return retval; 329 return retval;
333 }
334 330
335 INIT_LIST_HEAD(&as->host_list); 331 INIT_LIST_HEAD(&as->host_list);
336 INIT_LIST_HEAD(&as->hl_list); 332 INIT_LIST_HEAD(&as->hl_list);
@@ -383,11 +379,9 @@ int hpsb_register_addrspace(struct hpsb_highlevel *hl, struct hpsb_host *host,
383 return 0; 379 return 0;
384 } 380 }
385 381
386 as = (struct hpsb_address_serve *) 382 as = kmalloc(sizeof(*as), GFP_ATOMIC);
387 kmalloc(sizeof(struct hpsb_address_serve), GFP_ATOMIC); 383 if (!as)
388 if (as == NULL) { 384 return 0;
389 return 0;
390 }
391 385
392 INIT_LIST_HEAD(&as->host_list); 386 INIT_LIST_HEAD(&as->host_list);
393 INIT_LIST_HEAD(&as->hl_list); 387 INIT_LIST_HEAD(&as->hl_list);
diff --git a/drivers/ieee1394/hosts.c b/drivers/ieee1394/hosts.c
index aeeaeb670d03..ba09741fc826 100644
--- a/drivers/ieee1394/hosts.c
+++ b/drivers/ieee1394/hosts.c
@@ -61,12 +61,12 @@ static void delayed_reset_bus(void * __reset_info)
61 61
62static int dummy_transmit_packet(struct hpsb_host *h, struct hpsb_packet *p) 62static int dummy_transmit_packet(struct hpsb_host *h, struct hpsb_packet *p)
63{ 63{
64 return 0; 64 return 0;
65} 65}
66 66
67static int dummy_devctl(struct hpsb_host *h, enum devctl_cmd c, int arg) 67static int dummy_devctl(struct hpsb_host *h, enum devctl_cmd c, int arg)
68{ 68{
69 return -1; 69 return -1;
70} 70}
71 71
72static int dummy_isoctl(struct hpsb_iso *iso, enum isoctl_cmd command, unsigned long arg) 72static int dummy_isoctl(struct hpsb_iso *iso, enum isoctl_cmd command, unsigned long arg)
@@ -75,9 +75,9 @@ static int dummy_isoctl(struct hpsb_iso *iso, enum isoctl_cmd command, unsigned
75} 75}
76 76
77static struct hpsb_host_driver dummy_driver = { 77static struct hpsb_host_driver dummy_driver = {
78 .transmit_packet = dummy_transmit_packet, 78 .transmit_packet = dummy_transmit_packet,
79 .devctl = dummy_devctl, 79 .devctl = dummy_devctl,
80 .isoctl = dummy_isoctl 80 .isoctl = dummy_isoctl
81}; 81};
82 82
83static int alloc_hostnum_cb(struct hpsb_host *host, void *__data) 83static int alloc_hostnum_cb(struct hpsb_host *host, void *__data)
@@ -110,13 +110,13 @@ static DECLARE_MUTEX(host_num_alloc);
110struct hpsb_host *hpsb_alloc_host(struct hpsb_host_driver *drv, size_t extra, 110struct hpsb_host *hpsb_alloc_host(struct hpsb_host_driver *drv, size_t extra,
111 struct device *dev) 111 struct device *dev)
112{ 112{
113 struct hpsb_host *h; 113 struct hpsb_host *h;
114 int i; 114 int i;
115 int hostnum = 0; 115 int hostnum = 0;
116 116
117 h = kmalloc(sizeof(struct hpsb_host) + extra, SLAB_KERNEL); 117 h = kzalloc(sizeof(*h) + extra, SLAB_KERNEL);
118 if (!h) return NULL; 118 if (!h)
119 memset(h, 0, sizeof(struct hpsb_host) + extra); 119 return NULL;
120 120
121 h->csr.rom = csr1212_create_csr(&csr_bus_ops, CSR_BUS_INFO_SIZE, h); 121 h->csr.rom = csr1212_create_csr(&csr_bus_ops, CSR_BUS_INFO_SIZE, h);
122 if (!h->csr.rom) { 122 if (!h->csr.rom) {
@@ -125,7 +125,7 @@ struct hpsb_host *hpsb_alloc_host(struct hpsb_host_driver *drv, size_t extra,
125 } 125 }
126 126
127 h->hostdata = h + 1; 127 h->hostdata = h + 1;
128 h->driver = drv; 128 h->driver = drv;
129 129
130 skb_queue_head_init(&h->pending_packet_queue); 130 skb_queue_head_init(&h->pending_packet_queue);
131 INIT_LIST_HEAD(&h->addr_space); 131 INIT_LIST_HEAD(&h->addr_space);
@@ -145,8 +145,8 @@ struct hpsb_host *hpsb_alloc_host(struct hpsb_host_driver *drv, size_t extra,
145 h->timeout.function = abort_timedouts; 145 h->timeout.function = abort_timedouts;
146 h->timeout_interval = HZ / 20; // 50ms by default 146 h->timeout_interval = HZ / 20; // 50ms by default
147 147
148 h->topology_map = h->csr.topology_map + 3; 148 h->topology_map = h->csr.topology_map + 3;
149 h->speed_map = (u8 *)(h->csr.speed_map + 2); 149 h->speed_map = (u8 *)(h->csr.speed_map + 2);
150 150
151 down(&host_num_alloc); 151 down(&host_num_alloc);
152 152
@@ -186,14 +186,14 @@ int hpsb_add_host(struct hpsb_host *host)
186 186
187void hpsb_remove_host(struct hpsb_host *host) 187void hpsb_remove_host(struct hpsb_host *host)
188{ 188{
189 host->is_shutdown = 1; 189 host->is_shutdown = 1;
190 190
191 cancel_delayed_work(&host->delayed_reset); 191 cancel_delayed_work(&host->delayed_reset);
192 flush_scheduled_work(); 192 flush_scheduled_work();
193 193
194 host->driver = &dummy_driver; 194 host->driver = &dummy_driver;
195 195
196 highlevel_remove_host(host); 196 highlevel_remove_host(host);
197 197
198 hpsb_remove_extra_config_roms(host); 198 hpsb_remove_extra_config_roms(host);
199 199
diff --git a/drivers/ieee1394/hosts.h b/drivers/ieee1394/hosts.h
index ae9b02cc013f..07d188ca8495 100644
--- a/drivers/ieee1394/hosts.h
+++ b/drivers/ieee1394/hosts.h
@@ -17,47 +17,47 @@ struct hpsb_packet;
17struct hpsb_iso; 17struct hpsb_iso;
18 18
19struct hpsb_host { 19struct hpsb_host {
20 struct list_head host_list; 20 struct list_head host_list;
21 21
22 void *hostdata; 22 void *hostdata;
23 23
24 atomic_t generation; 24 atomic_t generation;
25 25
26 struct sk_buff_head pending_packet_queue; 26 struct sk_buff_head pending_packet_queue;
27 27
28 struct timer_list timeout; 28 struct timer_list timeout;
29 unsigned long timeout_interval; 29 unsigned long timeout_interval;
30 30
31 unsigned char iso_listen_count[64]; 31 unsigned char iso_listen_count[64];
32 32
33 int node_count; /* number of identified nodes on this bus */ 33 int node_count; /* number of identified nodes on this bus */
34 int selfid_count; /* total number of SelfIDs received */ 34 int selfid_count; /* total number of SelfIDs received */
35 int nodes_active; /* number of nodes that are actually active */ 35 int nodes_active; /* number of nodes that are actually active */
36 36
37 nodeid_t node_id; /* node ID of this host */ 37 nodeid_t node_id; /* node ID of this host */
38 nodeid_t irm_id; /* ID of this bus' isochronous resource manager */ 38 nodeid_t irm_id; /* ID of this bus' isochronous resource manager */
39 nodeid_t busmgr_id; /* ID of this bus' bus manager */ 39 nodeid_t busmgr_id; /* ID of this bus' bus manager */
40 40
41 /* this nodes state */ 41 /* this nodes state */
42 unsigned in_bus_reset:1; 42 unsigned in_bus_reset:1;
43 unsigned is_shutdown:1; 43 unsigned is_shutdown:1;
44 unsigned resume_packet_sent:1; 44 unsigned resume_packet_sent:1;
45 45
46 /* this nodes' duties on the bus */ 46 /* this nodes' duties on the bus */
47 unsigned is_root:1; 47 unsigned is_root:1;
48 unsigned is_cycmst:1; 48 unsigned is_cycmst:1;
49 unsigned is_irm:1; 49 unsigned is_irm:1;
50 unsigned is_busmgr:1; 50 unsigned is_busmgr:1;
51 51
52 int reset_retries; 52 int reset_retries;
53 quadlet_t *topology_map; 53 quadlet_t *topology_map;
54 u8 *speed_map; 54 u8 *speed_map;
55 struct csr_control csr; 55 struct csr_control csr;
56 56
57 /* Per node tlabel pool allocation */ 57 /* Per node tlabel pool allocation */
58 struct hpsb_tlabel_pool tpool[64]; 58 struct hpsb_tlabel_pool tpool[64];
59 59
60 struct hpsb_host_driver *driver; 60 struct hpsb_host_driver *driver;
61 61
62 struct pci_dev *pdev; 62 struct pci_dev *pdev;
63 63
@@ -77,34 +77,34 @@ struct hpsb_host {
77 77
78 78
79enum devctl_cmd { 79enum devctl_cmd {
80 /* Host is requested to reset its bus and cancel all outstanding async 80 /* Host is requested to reset its bus and cancel all outstanding async
81 * requests. If arg == 1, it shall also attempt to become root on the 81 * requests. If arg == 1, it shall also attempt to become root on the
82 * bus. Return void. */ 82 * bus. Return void. */
83 RESET_BUS, 83 RESET_BUS,
84 84
85 /* Arg is void, return value is the hardware cycle counter value. */ 85 /* Arg is void, return value is the hardware cycle counter value. */
86 GET_CYCLE_COUNTER, 86 GET_CYCLE_COUNTER,
87 87
88 /* Set the hardware cycle counter to the value in arg, return void. 88 /* Set the hardware cycle counter to the value in arg, return void.
89 * FIXME - setting is probably not required. */ 89 * FIXME - setting is probably not required. */
90 SET_CYCLE_COUNTER, 90 SET_CYCLE_COUNTER,
91 91
92 /* Configure hardware for new bus ID in arg, return void. */ 92 /* Configure hardware for new bus ID in arg, return void. */
93 SET_BUS_ID, 93 SET_BUS_ID,
94 94
95 /* If arg true, start sending cycle start packets, stop if arg == 0. 95 /* If arg true, start sending cycle start packets, stop if arg == 0.
96 * Return void. */ 96 * Return void. */
97 ACT_CYCLE_MASTER, 97 ACT_CYCLE_MASTER,
98 98
99 /* Cancel all outstanding async requests without resetting the bus. 99 /* Cancel all outstanding async requests without resetting the bus.
100 * Return void. */ 100 * Return void. */
101 CANCEL_REQUESTS, 101 CANCEL_REQUESTS,
102 102
103 /* Start or stop receiving isochronous channel in arg. Return void. 103 /* Start or stop receiving isochronous channel in arg. Return void.
104 * This acts as an optimization hint, hosts are not required not to 104 * This acts as an optimization hint, hosts are not required not to
105 * listen on unrequested channels. */ 105 * listen on unrequested channels. */
106 ISO_LISTEN_CHANNEL, 106 ISO_LISTEN_CHANNEL,
107 ISO_UNLISTEN_CHANNEL 107 ISO_UNLISTEN_CHANNEL
108}; 108};
109 109
110enum isoctl_cmd { 110enum isoctl_cmd {
@@ -135,13 +135,13 @@ enum isoctl_cmd {
135}; 135};
136 136
137enum reset_types { 137enum reset_types {
138 /* 166 microsecond reset -- only type of reset available on 138 /* 166 microsecond reset -- only type of reset available on
139 non-1394a capable controllers */ 139 non-1394a capable controllers */
140 LONG_RESET, 140 LONG_RESET,
141 141
142 /* Short (arbitrated) reset -- only available on 1394a capable 142 /* Short (arbitrated) reset -- only available on 1394a capable
143 controllers */ 143 controllers */
144 SHORT_RESET, 144 SHORT_RESET,
145 145
146 /* Variants that set force_root before issueing the bus reset */ 146 /* Variants that set force_root before issueing the bus reset */
147 LONG_RESET_FORCE_ROOT, SHORT_RESET_FORCE_ROOT, 147 LONG_RESET_FORCE_ROOT, SHORT_RESET_FORCE_ROOT,
@@ -159,22 +159,22 @@ struct hpsb_host_driver {
159 * reads to the ConfigROM on its own. */ 159 * reads to the ConfigROM on its own. */
160 void (*set_hw_config_rom) (struct hpsb_host *host, quadlet_t *config_rom); 160 void (*set_hw_config_rom) (struct hpsb_host *host, quadlet_t *config_rom);
161 161
162 /* This function shall implement packet transmission based on 162 /* This function shall implement packet transmission based on
163 * packet->type. It shall CRC both parts of the packet (unless 163 * packet->type. It shall CRC both parts of the packet (unless
164 * packet->type == raw) and do byte-swapping as necessary or instruct 164 * packet->type == raw) and do byte-swapping as necessary or instruct
165 * the hardware to do so. It can return immediately after the packet 165 * the hardware to do so. It can return immediately after the packet
166 * was queued for sending. After sending, hpsb_sent_packet() has to be 166 * was queued for sending. After sending, hpsb_sent_packet() has to be
167 * called. Return 0 on success, negative errno on failure. 167 * called. Return 0 on success, negative errno on failure.
168 * NOTE: The function must be callable in interrupt context. 168 * NOTE: The function must be callable in interrupt context.
169 */ 169 */
170 int (*transmit_packet) (struct hpsb_host *host, 170 int (*transmit_packet) (struct hpsb_host *host,
171 struct hpsb_packet *packet); 171 struct hpsb_packet *packet);
172 172
173 /* This function requests miscellanous services from the driver, see 173 /* This function requests miscellanous services from the driver, see
174 * above for command codes and expected actions. Return -1 for unknown 174 * above for command codes and expected actions. Return -1 for unknown
175 * command, though that should never happen. 175 * command, though that should never happen.
176 */ 176 */
177 int (*devctl) (struct hpsb_host *host, enum devctl_cmd command, int arg); 177 int (*devctl) (struct hpsb_host *host, enum devctl_cmd command, int arg);
178 178
179 /* ISO transmission/reception functions. Return 0 on success, -1 179 /* ISO transmission/reception functions. Return 0 on success, -1
180 * (or -EXXX errno code) on failure. If the low-level driver does not 180 * (or -EXXX errno code) on failure. If the low-level driver does not
@@ -182,15 +182,15 @@ struct hpsb_host_driver {
182 */ 182 */
183 int (*isoctl) (struct hpsb_iso *iso, enum isoctl_cmd command, unsigned long arg); 183 int (*isoctl) (struct hpsb_iso *iso, enum isoctl_cmd command, unsigned long arg);
184 184
185 /* This function is mainly to redirect local CSR reads/locks to the iso 185 /* This function is mainly to redirect local CSR reads/locks to the iso
186 * management registers (bus manager id, bandwidth available, channels 186 * management registers (bus manager id, bandwidth available, channels
187 * available) to the hardware registers in OHCI. reg is 0,1,2,3 for bus 187 * available) to the hardware registers in OHCI. reg is 0,1,2,3 for bus
188 * mgr, bwdth avail, ch avail hi, ch avail lo respectively (the same ids 188 * mgr, bwdth avail, ch avail hi, ch avail lo respectively (the same ids
189 * as OHCI uses). data and compare are the new data and expected data 189 * as OHCI uses). data and compare are the new data and expected data
190 * respectively, return value is the old value. 190 * respectively, return value is the old value.
191 */ 191 */
192 quadlet_t (*hw_csr_reg) (struct hpsb_host *host, int reg, 192 quadlet_t (*hw_csr_reg) (struct hpsb_host *host, int reg,
193 quadlet_t data, quadlet_t compare); 193 quadlet_t data, quadlet_t compare);
194}; 194};
195 195
196 196
diff --git a/drivers/ieee1394/ieee1394-ioctl.h b/drivers/ieee1394/ieee1394-ioctl.h
index f92b566363d5..156703986348 100644
--- a/drivers/ieee1394/ieee1394-ioctl.h
+++ b/drivers/ieee1394/ieee1394-ioctl.h
@@ -7,14 +7,6 @@
7#include <linux/ioctl.h> 7#include <linux/ioctl.h>
8#include <linux/types.h> 8#include <linux/types.h>
9 9
10
11/* AMDTP Gets 6 */
12#define AMDTP_IOC_CHANNEL _IOW('#', 0x00, struct amdtp_ioctl)
13#define AMDTP_IOC_PLUG _IOW('#', 0x01, struct amdtp_ioctl)
14#define AMDTP_IOC_PING _IOW('#', 0x02, struct amdtp_ioctl)
15#define AMDTP_IOC_ZAP _IO ('#', 0x03)
16
17
18/* DV1394 Gets 10 */ 10/* DV1394 Gets 10 */
19 11
20/* Get the driver ready to transmit video. pass a struct dv1394_init* as 12/* Get the driver ready to transmit video. pass a struct dv1394_init* as
diff --git a/drivers/ieee1394/ieee1394.h b/drivers/ieee1394/ieee1394.h
index b634a9bb365c..936d776de00a 100644
--- a/drivers/ieee1394/ieee1394.h
+++ b/drivers/ieee1394/ieee1394.h
@@ -62,6 +62,7 @@
62extern const char *hpsb_speedto_str[]; 62extern const char *hpsb_speedto_str[];
63 63
64 64
65/* 1394a cable PHY packets */
65#define SELFID_PWRCL_NO_POWER 0x0 66#define SELFID_PWRCL_NO_POWER 0x0
66#define SELFID_PWRCL_PROVIDE_15W 0x1 67#define SELFID_PWRCL_PROVIDE_15W 0x1
67#define SELFID_PWRCL_PROVIDE_30W 0x2 68#define SELFID_PWRCL_PROVIDE_30W 0x2
@@ -76,8 +77,24 @@ extern const char *hpsb_speedto_str[];
76#define SELFID_PORT_NCONN 0x1 77#define SELFID_PORT_NCONN 0x1
77#define SELFID_PORT_NONE 0x0 78#define SELFID_PORT_NONE 0x0
78 79
80#define PHYPACKET_LINKON 0x40000000
81#define PHYPACKET_PHYCONFIG_R 0x00800000
82#define PHYPACKET_PHYCONFIG_T 0x00400000
83#define EXTPHYPACKET_TYPE_PING 0x00000000
84#define EXTPHYPACKET_TYPE_REMOTEACCESS_BASE 0x00040000
85#define EXTPHYPACKET_TYPE_REMOTEACCESS_PAGED 0x00140000
86#define EXTPHYPACKET_TYPE_REMOTEREPLY_BASE 0x000C0000
87#define EXTPHYPACKET_TYPE_REMOTEREPLY_PAGED 0x001C0000
88#define EXTPHYPACKET_TYPE_REMOTECOMMAND 0x00200000
89#define EXTPHYPACKET_TYPE_REMOTECONFIRMATION 0x00280000
90#define EXTPHYPACKET_TYPE_RESUME 0x003C0000
79 91
80/* 1394a PHY bitmasks */ 92#define EXTPHYPACKET_TYPEMASK 0xC0FC0000
93
94#define PHYPACKET_PORT_SHIFT 24
95#define PHYPACKET_GAPCOUNT_SHIFT 16
96
97/* 1394a PHY register map bitmasks */
81#define PHY_00_PHYSICAL_ID 0xFC 98#define PHY_00_PHYSICAL_ID 0xFC
82#define PHY_00_R 0x02 /* Root */ 99#define PHY_00_R 0x02 /* Root */
83#define PHY_00_PS 0x01 /* Power Status*/ 100#define PHY_00_PS 0x01 /* Power Status*/
diff --git a/drivers/ieee1394/ieee1394_core.c b/drivers/ieee1394/ieee1394_core.c
index 32a1e016c85e..25ef5a86f5f0 100644
--- a/drivers/ieee1394/ieee1394_core.c
+++ b/drivers/ieee1394/ieee1394_core.c
@@ -179,34 +179,34 @@ void hpsb_free_packet(struct hpsb_packet *packet)
179 179
180int hpsb_reset_bus(struct hpsb_host *host, int type) 180int hpsb_reset_bus(struct hpsb_host *host, int type)
181{ 181{
182 if (!host->in_bus_reset) { 182 if (!host->in_bus_reset) {
183 host->driver->devctl(host, RESET_BUS, type); 183 host->driver->devctl(host, RESET_BUS, type);
184 return 0; 184 return 0;
185 } else { 185 } else {
186 return 1; 186 return 1;
187 } 187 }
188} 188}
189 189
190 190
191int hpsb_bus_reset(struct hpsb_host *host) 191int hpsb_bus_reset(struct hpsb_host *host)
192{ 192{
193 if (host->in_bus_reset) { 193 if (host->in_bus_reset) {
194 HPSB_NOTICE("%s called while bus reset already in progress", 194 HPSB_NOTICE("%s called while bus reset already in progress",
195 __FUNCTION__); 195 __FUNCTION__);
196 return 1; 196 return 1;
197 } 197 }
198 198
199 abort_requests(host); 199 abort_requests(host);
200 host->in_bus_reset = 1; 200 host->in_bus_reset = 1;
201 host->irm_id = -1; 201 host->irm_id = -1;
202 host->is_irm = 0; 202 host->is_irm = 0;
203 host->busmgr_id = -1; 203 host->busmgr_id = -1;
204 host->is_busmgr = 0; 204 host->is_busmgr = 0;
205 host->is_cycmst = 0; 205 host->is_cycmst = 0;
206 host->node_count = 0; 206 host->node_count = 0;
207 host->selfid_count = 0; 207 host->selfid_count = 0;
208 208
209 return 0; 209 return 0;
210} 210}
211 211
212 212
@@ -216,150 +216,156 @@ int hpsb_bus_reset(struct hpsb_host *host)
216 */ 216 */
217static int check_selfids(struct hpsb_host *host) 217static int check_selfids(struct hpsb_host *host)
218{ 218{
219 int nodeid = -1; 219 int nodeid = -1;
220 int rest_of_selfids = host->selfid_count; 220 int rest_of_selfids = host->selfid_count;
221 struct selfid *sid = (struct selfid *)host->topology_map; 221 struct selfid *sid = (struct selfid *)host->topology_map;
222 struct ext_selfid *esid; 222 struct ext_selfid *esid;
223 int esid_seq = 23; 223 int esid_seq = 23;
224 224
225 host->nodes_active = 0; 225 host->nodes_active = 0;
226 226
227 while (rest_of_selfids--) { 227 while (rest_of_selfids--) {
228 if (!sid->extended) { 228 if (!sid->extended) {
229 nodeid++; 229 nodeid++;
230 esid_seq = 0; 230 esid_seq = 0;
231 231
232 if (sid->phy_id != nodeid) { 232 if (sid->phy_id != nodeid) {
233 HPSB_INFO("SelfIDs failed monotony check with " 233 HPSB_INFO("SelfIDs failed monotony check with "
234 "%d", sid->phy_id); 234 "%d", sid->phy_id);
235 return 0; 235 return 0;
236 } 236 }
237 237
238 if (sid->link_active) { 238 if (sid->link_active) {
239 host->nodes_active++; 239 host->nodes_active++;
240 if (sid->contender) 240 if (sid->contender)
241 host->irm_id = LOCAL_BUS | sid->phy_id; 241 host->irm_id = LOCAL_BUS | sid->phy_id;
242 } 242 }
243 } else { 243 } else {
244 esid = (struct ext_selfid *)sid; 244 esid = (struct ext_selfid *)sid;
245 245
246 if ((esid->phy_id != nodeid) 246 if ((esid->phy_id != nodeid)
247 || (esid->seq_nr != esid_seq)) { 247 || (esid->seq_nr != esid_seq)) {
248 HPSB_INFO("SelfIDs failed monotony check with " 248 HPSB_INFO("SelfIDs failed monotony check with "
249 "%d/%d", esid->phy_id, esid->seq_nr); 249 "%d/%d", esid->phy_id, esid->seq_nr);
250 return 0; 250 return 0;
251 } 251 }
252 esid_seq++; 252 esid_seq++;
253 } 253 }
254 sid++; 254 sid++;
255 } 255 }
256 256
257 esid = (struct ext_selfid *)(sid - 1); 257 esid = (struct ext_selfid *)(sid - 1);
258 while (esid->extended) { 258 while (esid->extended) {
259 if ((esid->porta == 0x2) || (esid->portb == 0x2) 259 if ((esid->porta == SELFID_PORT_PARENT) ||
260 || (esid->portc == 0x2) || (esid->portd == 0x2) 260 (esid->portb == SELFID_PORT_PARENT) ||
261 || (esid->porte == 0x2) || (esid->portf == 0x2) 261 (esid->portc == SELFID_PORT_PARENT) ||
262 || (esid->portg == 0x2) || (esid->porth == 0x2)) { 262 (esid->portd == SELFID_PORT_PARENT) ||
263 (esid->porte == SELFID_PORT_PARENT) ||
264 (esid->portf == SELFID_PORT_PARENT) ||
265 (esid->portg == SELFID_PORT_PARENT) ||
266 (esid->porth == SELFID_PORT_PARENT)) {
263 HPSB_INFO("SelfIDs failed root check on " 267 HPSB_INFO("SelfIDs failed root check on "
264 "extended SelfID"); 268 "extended SelfID");
265 return 0; 269 return 0;
266 } 270 }
267 esid--; 271 esid--;
268 } 272 }
269 273
270 sid = (struct selfid *)esid; 274 sid = (struct selfid *)esid;
271 if ((sid->port0 == 0x2) || (sid->port1 == 0x2) || (sid->port2 == 0x2)) { 275 if ((sid->port0 == SELFID_PORT_PARENT) ||
276 (sid->port1 == SELFID_PORT_PARENT) ||
277 (sid->port2 == SELFID_PORT_PARENT)) {
272 HPSB_INFO("SelfIDs failed root check"); 278 HPSB_INFO("SelfIDs failed root check");
273 return 0; 279 return 0;
274 } 280 }
275 281
276 host->node_count = nodeid + 1; 282 host->node_count = nodeid + 1;
277 return 1; 283 return 1;
278} 284}
279 285
280static void build_speed_map(struct hpsb_host *host, int nodecount) 286static void build_speed_map(struct hpsb_host *host, int nodecount)
281{ 287{
282 u8 speedcap[nodecount]; 288 u8 speedcap[nodecount];
283 u8 cldcnt[nodecount]; 289 u8 cldcnt[nodecount];
284 u8 *map = host->speed_map; 290 u8 *map = host->speed_map;
285 struct selfid *sid; 291 struct selfid *sid;
286 struct ext_selfid *esid; 292 struct ext_selfid *esid;
287 int i, j, n; 293 int i, j, n;
288 294
289 for (i = 0; i < (nodecount * 64); i += 64) { 295 for (i = 0; i < (nodecount * 64); i += 64) {
290 for (j = 0; j < nodecount; j++) { 296 for (j = 0; j < nodecount; j++) {
291 map[i+j] = IEEE1394_SPEED_MAX; 297 map[i+j] = IEEE1394_SPEED_MAX;
292 } 298 }
293 } 299 }
294 300
295 for (i = 0; i < nodecount; i++) { 301 for (i = 0; i < nodecount; i++) {
296 cldcnt[i] = 0; 302 cldcnt[i] = 0;
297 } 303 }
298 304
299 /* find direct children count and speed */ 305 /* find direct children count and speed */
300 for (sid = (struct selfid *)&host->topology_map[host->selfid_count-1], 306 for (sid = (struct selfid *)&host->topology_map[host->selfid_count-1],
301 n = nodecount - 1; 307 n = nodecount - 1;
302 (void *)sid >= (void *)host->topology_map; sid--) { 308 (void *)sid >= (void *)host->topology_map; sid--) {
303 if (sid->extended) { 309 if (sid->extended) {
304 esid = (struct ext_selfid *)sid; 310 esid = (struct ext_selfid *)sid;
305 311
306 if (esid->porta == 0x3) cldcnt[n]++; 312 if (esid->porta == SELFID_PORT_CHILD) cldcnt[n]++;
307 if (esid->portb == 0x3) cldcnt[n]++; 313 if (esid->portb == SELFID_PORT_CHILD) cldcnt[n]++;
308 if (esid->portc == 0x3) cldcnt[n]++; 314 if (esid->portc == SELFID_PORT_CHILD) cldcnt[n]++;
309 if (esid->portd == 0x3) cldcnt[n]++; 315 if (esid->portd == SELFID_PORT_CHILD) cldcnt[n]++;
310 if (esid->porte == 0x3) cldcnt[n]++; 316 if (esid->porte == SELFID_PORT_CHILD) cldcnt[n]++;
311 if (esid->portf == 0x3) cldcnt[n]++; 317 if (esid->portf == SELFID_PORT_CHILD) cldcnt[n]++;
312 if (esid->portg == 0x3) cldcnt[n]++; 318 if (esid->portg == SELFID_PORT_CHILD) cldcnt[n]++;
313 if (esid->porth == 0x3) cldcnt[n]++; 319 if (esid->porth == SELFID_PORT_CHILD) cldcnt[n]++;
314 } else { 320 } else {
315 if (sid->port0 == 0x3) cldcnt[n]++; 321 if (sid->port0 == SELFID_PORT_CHILD) cldcnt[n]++;
316 if (sid->port1 == 0x3) cldcnt[n]++; 322 if (sid->port1 == SELFID_PORT_CHILD) cldcnt[n]++;
317 if (sid->port2 == 0x3) cldcnt[n]++; 323 if (sid->port2 == SELFID_PORT_CHILD) cldcnt[n]++;
318 324
319 speedcap[n] = sid->speed; 325 speedcap[n] = sid->speed;
320 n--; 326 n--;
321 } 327 }
322 } 328 }
323 329
324 /* set self mapping */ 330 /* set self mapping */
325 for (i = 0; i < nodecount; i++) { 331 for (i = 0; i < nodecount; i++) {
326 map[64*i + i] = speedcap[i]; 332 map[64*i + i] = speedcap[i];
327 } 333 }
328 334
329 /* fix up direct children count to total children count; 335 /* fix up direct children count to total children count;
330 * also fix up speedcaps for sibling and parent communication */ 336 * also fix up speedcaps for sibling and parent communication */
331 for (i = 1; i < nodecount; i++) { 337 for (i = 1; i < nodecount; i++) {
332 for (j = cldcnt[i], n = i - 1; j > 0; j--) { 338 for (j = cldcnt[i], n = i - 1; j > 0; j--) {
333 cldcnt[i] += cldcnt[n]; 339 cldcnt[i] += cldcnt[n];
334 speedcap[n] = min(speedcap[n], speedcap[i]); 340 speedcap[n] = min(speedcap[n], speedcap[i]);
335 n -= cldcnt[n] + 1; 341 n -= cldcnt[n] + 1;
336 } 342 }
337 } 343 }
338 344
339 for (n = 0; n < nodecount; n++) { 345 for (n = 0; n < nodecount; n++) {
340 for (i = n - cldcnt[n]; i <= n; i++) { 346 for (i = n - cldcnt[n]; i <= n; i++) {
341 for (j = 0; j < (n - cldcnt[n]); j++) { 347 for (j = 0; j < (n - cldcnt[n]); j++) {
342 map[j*64 + i] = map[i*64 + j] = 348 map[j*64 + i] = map[i*64 + j] =
343 min(map[i*64 + j], speedcap[n]); 349 min(map[i*64 + j], speedcap[n]);
344 } 350 }
345 for (j = n + 1; j < nodecount; j++) { 351 for (j = n + 1; j < nodecount; j++) {
346 map[j*64 + i] = map[i*64 + j] = 352 map[j*64 + i] = map[i*64 + j] =
347 min(map[i*64 + j], speedcap[n]); 353 min(map[i*64 + j], speedcap[n]);
348 } 354 }
349 } 355 }
350 } 356 }
351} 357}
352 358
353 359
354void hpsb_selfid_received(struct hpsb_host *host, quadlet_t sid) 360void hpsb_selfid_received(struct hpsb_host *host, quadlet_t sid)
355{ 361{
356 if (host->in_bus_reset) { 362 if (host->in_bus_reset) {
357 HPSB_VERBOSE("Including SelfID 0x%x", sid); 363 HPSB_VERBOSE("Including SelfID 0x%x", sid);
358 host->topology_map[host->selfid_count++] = sid; 364 host->topology_map[host->selfid_count++] = sid;
359 } else { 365 } else {
360 HPSB_NOTICE("Spurious SelfID packet (0x%08x) received from bus %d", 366 HPSB_NOTICE("Spurious SelfID packet (0x%08x) received from bus %d",
361 sid, NODEID_TO_BUS(host->node_id)); 367 sid, NODEID_TO_BUS(host->node_id));
362 } 368 }
363} 369}
364 370
365void hpsb_selfid_complete(struct hpsb_host *host, int phyid, int isroot) 371void hpsb_selfid_complete(struct hpsb_host *host, int phyid, int isroot)
@@ -367,50 +373,50 @@ void hpsb_selfid_complete(struct hpsb_host *host, int phyid, int isroot)
367 if (!host->in_bus_reset) 373 if (!host->in_bus_reset)
368 HPSB_NOTICE("SelfID completion called outside of bus reset!"); 374 HPSB_NOTICE("SelfID completion called outside of bus reset!");
369 375
370 host->node_id = LOCAL_BUS | phyid; 376 host->node_id = LOCAL_BUS | phyid;
371 host->is_root = isroot; 377 host->is_root = isroot;
372 378
373 if (!check_selfids(host)) { 379 if (!check_selfids(host)) {
374 if (host->reset_retries++ < 20) { 380 if (host->reset_retries++ < 20) {
375 /* selfid stage did not complete without error */ 381 /* selfid stage did not complete without error */
376 HPSB_NOTICE("Error in SelfID stage, resetting"); 382 HPSB_NOTICE("Error in SelfID stage, resetting");
377 host->in_bus_reset = 0; 383 host->in_bus_reset = 0;
378 /* this should work from ohci1394 now... */ 384 /* this should work from ohci1394 now... */
379 hpsb_reset_bus(host, LONG_RESET); 385 hpsb_reset_bus(host, LONG_RESET);
380 return; 386 return;
381 } else { 387 } else {
382 HPSB_NOTICE("Stopping out-of-control reset loop"); 388 HPSB_NOTICE("Stopping out-of-control reset loop");
383 HPSB_NOTICE("Warning - topology map and speed map will not be valid"); 389 HPSB_NOTICE("Warning - topology map and speed map will not be valid");
384 host->reset_retries = 0; 390 host->reset_retries = 0;
385 } 391 }
386 } else { 392 } else {
387 host->reset_retries = 0; 393 host->reset_retries = 0;
388 build_speed_map(host, host->node_count); 394 build_speed_map(host, host->node_count);
389 } 395 }
390 396
391 HPSB_VERBOSE("selfid_complete called with successful SelfID stage " 397 HPSB_VERBOSE("selfid_complete called with successful SelfID stage "
392 "... irm_id: 0x%X node_id: 0x%X",host->irm_id,host->node_id); 398 "... irm_id: 0x%X node_id: 0x%X",host->irm_id,host->node_id);
393 399
394 /* irm_id is kept up to date by check_selfids() */ 400 /* irm_id is kept up to date by check_selfids() */
395 if (host->irm_id == host->node_id) { 401 if (host->irm_id == host->node_id) {
396 host->is_irm = 1; 402 host->is_irm = 1;
397 } else { 403 } else {
398 host->is_busmgr = 0; 404 host->is_busmgr = 0;
399 host->is_irm = 0; 405 host->is_irm = 0;
400 } 406 }
401 407
402 if (isroot) { 408 if (isroot) {
403 host->driver->devctl(host, ACT_CYCLE_MASTER, 1); 409 host->driver->devctl(host, ACT_CYCLE_MASTER, 1);
404 host->is_cycmst = 1; 410 host->is_cycmst = 1;
405 } 411 }
406 atomic_inc(&host->generation); 412 atomic_inc(&host->generation);
407 host->in_bus_reset = 0; 413 host->in_bus_reset = 0;
408 highlevel_host_reset(host); 414 highlevel_host_reset(host);
409} 415}
410 416
411 417
412void hpsb_packet_sent(struct hpsb_host *host, struct hpsb_packet *packet, 418void hpsb_packet_sent(struct hpsb_host *host, struct hpsb_packet *packet,
413 int ackcode) 419 int ackcode)
414{ 420{
415 unsigned long flags; 421 unsigned long flags;
416 422
@@ -457,6 +463,7 @@ void hpsb_packet_sent(struct hpsb_host *host, struct hpsb_packet *packet,
457int hpsb_send_phy_config(struct hpsb_host *host, int rootid, int gapcnt) 463int hpsb_send_phy_config(struct hpsb_host *host, int rootid, int gapcnt)
458{ 464{
459 struct hpsb_packet *packet; 465 struct hpsb_packet *packet;
466 quadlet_t d = 0;
460 int retval = 0; 467 int retval = 0;
461 468
462 if (rootid >= ALL_NODES || rootid < -1 || gapcnt > 0x3f || gapcnt < -1 || 469 if (rootid >= ALL_NODES || rootid < -1 || gapcnt > 0x3f || gapcnt < -1 ||
@@ -466,26 +473,16 @@ int hpsb_send_phy_config(struct hpsb_host *host, int rootid, int gapcnt)
466 return -EINVAL; 473 return -EINVAL;
467 } 474 }
468 475
469 packet = hpsb_alloc_packet(0);
470 if (!packet)
471 return -ENOMEM;
472
473 packet->host = host;
474 packet->header_size = 8;
475 packet->data_size = 0;
476 packet->expect_response = 0;
477 packet->no_waiter = 0;
478 packet->type = hpsb_raw;
479 packet->header[0] = 0;
480 if (rootid != -1) 476 if (rootid != -1)
481 packet->header[0] |= rootid << 24 | 1 << 23; 477 d |= PHYPACKET_PHYCONFIG_R | rootid << PHYPACKET_PORT_SHIFT;
482 if (gapcnt != -1) 478 if (gapcnt != -1)
483 packet->header[0] |= gapcnt << 16 | 1 << 22; 479 d |= PHYPACKET_PHYCONFIG_T | gapcnt << PHYPACKET_GAPCOUNT_SHIFT;
484 480
485 packet->header[1] = ~packet->header[0]; 481 packet = hpsb_make_phypacket(host, d);
482 if (!packet)
483 return -ENOMEM;
486 484
487 packet->generation = get_hpsb_generation(host); 485 packet->generation = get_hpsb_generation(host);
488
489 retval = hpsb_send_packet_and_wait(packet); 486 retval = hpsb_send_packet_and_wait(packet);
490 hpsb_free_packet(packet); 487 hpsb_free_packet(packet);
491 488
@@ -510,13 +507,13 @@ int hpsb_send_packet(struct hpsb_packet *packet)
510{ 507{
511 struct hpsb_host *host = packet->host; 508 struct hpsb_host *host = packet->host;
512 509
513 if (host->is_shutdown) 510 if (host->is_shutdown)
514 return -EINVAL; 511 return -EINVAL;
515 if (host->in_bus_reset || 512 if (host->in_bus_reset ||
516 (packet->generation != get_hpsb_generation(host))) 513 (packet->generation != get_hpsb_generation(host)))
517 return -EAGAIN; 514 return -EAGAIN;
518 515
519 packet->state = hpsb_queued; 516 packet->state = hpsb_queued;
520 517
521 /* This just seems silly to me */ 518 /* This just seems silly to me */
522 WARN_ON(packet->no_waiter && packet->expect_response); 519 WARN_ON(packet->no_waiter && packet->expect_response);
@@ -530,42 +527,42 @@ int hpsb_send_packet(struct hpsb_packet *packet)
530 skb_queue_tail(&host->pending_packet_queue, packet->skb); 527 skb_queue_tail(&host->pending_packet_queue, packet->skb);
531 } 528 }
532 529
533 if (packet->node_id == host->node_id) { 530 if (packet->node_id == host->node_id) {
534 /* it is a local request, so handle it locally */ 531 /* it is a local request, so handle it locally */
535 532
536 quadlet_t *data; 533 quadlet_t *data;
537 size_t size = packet->data_size + packet->header_size; 534 size_t size = packet->data_size + packet->header_size;
538 535
539 data = kmalloc(size, GFP_ATOMIC); 536 data = kmalloc(size, GFP_ATOMIC);
540 if (!data) { 537 if (!data) {
541 HPSB_ERR("unable to allocate memory for concatenating header and data"); 538 HPSB_ERR("unable to allocate memory for concatenating header and data");
542 return -ENOMEM; 539 return -ENOMEM;
543 } 540 }
544 541
545 memcpy(data, packet->header, packet->header_size); 542 memcpy(data, packet->header, packet->header_size);
546 543
547 if (packet->data_size) 544 if (packet->data_size)
548 memcpy(((u8*)data) + packet->header_size, packet->data, packet->data_size); 545 memcpy(((u8*)data) + packet->header_size, packet->data, packet->data_size);
549 546
550 dump_packet("send packet local", packet->header, packet->header_size, -1); 547 dump_packet("send packet local", packet->header, packet->header_size, -1);
551 548
552 hpsb_packet_sent(host, packet, packet->expect_response ? ACK_PENDING : ACK_COMPLETE); 549 hpsb_packet_sent(host, packet, packet->expect_response ? ACK_PENDING : ACK_COMPLETE);
553 hpsb_packet_received(host, data, size, 0); 550 hpsb_packet_received(host, data, size, 0);
554 551
555 kfree(data); 552 kfree(data);
556 553
557 return 0; 554 return 0;
558 } 555 }
559 556
560 if (packet->type == hpsb_async && packet->node_id != ALL_NODES) { 557 if (packet->type == hpsb_async && packet->node_id != ALL_NODES) {
561 packet->speed_code = 558 packet->speed_code =
562 host->speed_map[NODEID_TO_NODE(host->node_id) * 64 559 host->speed_map[NODEID_TO_NODE(host->node_id) * 64
563 + NODEID_TO_NODE(packet->node_id)]; 560 + NODEID_TO_NODE(packet->node_id)];
564 } 561 }
565 562
566 dump_packet("send packet", packet->header, packet->header_size, packet->speed_code); 563 dump_packet("send packet", packet->header, packet->header_size, packet->speed_code);
567 564
568 return host->driver->transmit_packet(host, packet); 565 return host->driver->transmit_packet(host, packet);
569} 566}
570 567
571/* We could just use complete() directly as the packet complete 568/* We could just use complete() directly as the packet complete
@@ -593,81 +590,81 @@ int hpsb_send_packet_and_wait(struct hpsb_packet *packet)
593 590
594static void send_packet_nocare(struct hpsb_packet *packet) 591static void send_packet_nocare(struct hpsb_packet *packet)
595{ 592{
596 if (hpsb_send_packet(packet) < 0) { 593 if (hpsb_send_packet(packet) < 0) {
597 hpsb_free_packet(packet); 594 hpsb_free_packet(packet);
598 } 595 }
599} 596}
600 597
601 598
602static void handle_packet_response(struct hpsb_host *host, int tcode, 599static void handle_packet_response(struct hpsb_host *host, int tcode,
603 quadlet_t *data, size_t size) 600 quadlet_t *data, size_t size)
604{ 601{
605 struct hpsb_packet *packet = NULL; 602 struct hpsb_packet *packet = NULL;
606 struct sk_buff *skb; 603 struct sk_buff *skb;
607 int tcode_match = 0; 604 int tcode_match = 0;
608 int tlabel; 605 int tlabel;
609 unsigned long flags; 606 unsigned long flags;
610 607
611 tlabel = (data[0] >> 10) & 0x3f; 608 tlabel = (data[0] >> 10) & 0x3f;
612 609
613 spin_lock_irqsave(&host->pending_packet_queue.lock, flags); 610 spin_lock_irqsave(&host->pending_packet_queue.lock, flags);
614 611
615 skb_queue_walk(&host->pending_packet_queue, skb) { 612 skb_queue_walk(&host->pending_packet_queue, skb) {
616 packet = (struct hpsb_packet *)skb->data; 613 packet = (struct hpsb_packet *)skb->data;
617 if ((packet->tlabel == tlabel) 614 if ((packet->tlabel == tlabel)
618 && (packet->node_id == (data[1] >> 16))){ 615 && (packet->node_id == (data[1] >> 16))){
619 break; 616 break;
620 } 617 }
621 618
622 packet = NULL; 619 packet = NULL;
623 } 620 }
624 621
625 if (packet == NULL) { 622 if (packet == NULL) {
626 HPSB_DEBUG("unsolicited response packet received - no tlabel match"); 623 HPSB_DEBUG("unsolicited response packet received - no tlabel match");
627 dump_packet("contents", data, 16, -1); 624 dump_packet("contents", data, 16, -1);
628 spin_unlock_irqrestore(&host->pending_packet_queue.lock, flags); 625 spin_unlock_irqrestore(&host->pending_packet_queue.lock, flags);
629 return; 626 return;
630 } 627 }
631 628
632 switch (packet->tcode) { 629 switch (packet->tcode) {
633 case TCODE_WRITEQ: 630 case TCODE_WRITEQ:
634 case TCODE_WRITEB: 631 case TCODE_WRITEB:
635 if (tcode != TCODE_WRITE_RESPONSE) 632 if (tcode != TCODE_WRITE_RESPONSE)
636 break; 633 break;
637 tcode_match = 1; 634 tcode_match = 1;
638 memcpy(packet->header, data, 12); 635 memcpy(packet->header, data, 12);
639 break; 636 break;
640 case TCODE_READQ: 637 case TCODE_READQ:
641 if (tcode != TCODE_READQ_RESPONSE) 638 if (tcode != TCODE_READQ_RESPONSE)
642 break; 639 break;
643 tcode_match = 1; 640 tcode_match = 1;
644 memcpy(packet->header, data, 16); 641 memcpy(packet->header, data, 16);
645 break; 642 break;
646 case TCODE_READB: 643 case TCODE_READB:
647 if (tcode != TCODE_READB_RESPONSE) 644 if (tcode != TCODE_READB_RESPONSE)
648 break; 645 break;
649 tcode_match = 1; 646 tcode_match = 1;
650 BUG_ON(packet->skb->len - sizeof(*packet) < size - 16); 647 BUG_ON(packet->skb->len - sizeof(*packet) < size - 16);
651 memcpy(packet->header, data, 16); 648 memcpy(packet->header, data, 16);
652 memcpy(packet->data, data + 4, size - 16); 649 memcpy(packet->data, data + 4, size - 16);
653 break; 650 break;
654 case TCODE_LOCK_REQUEST: 651 case TCODE_LOCK_REQUEST:
655 if (tcode != TCODE_LOCK_RESPONSE) 652 if (tcode != TCODE_LOCK_RESPONSE)
656 break; 653 break;
657 tcode_match = 1; 654 tcode_match = 1;
658 size = min((size - 16), (size_t)8); 655 size = min((size - 16), (size_t)8);
659 BUG_ON(packet->skb->len - sizeof(*packet) < size); 656 BUG_ON(packet->skb->len - sizeof(*packet) < size);
660 memcpy(packet->header, data, 16); 657 memcpy(packet->header, data, 16);
661 memcpy(packet->data, data + 4, size); 658 memcpy(packet->data, data + 4, size);
662 break; 659 break;
663 } 660 }
664 661
665 if (!tcode_match) { 662 if (!tcode_match) {
666 spin_unlock_irqrestore(&host->pending_packet_queue.lock, flags); 663 spin_unlock_irqrestore(&host->pending_packet_queue.lock, flags);
667 HPSB_INFO("unsolicited response packet received - tcode mismatch"); 664 HPSB_INFO("unsolicited response packet received - tcode mismatch");
668 dump_packet("contents", data, 16, -1); 665 dump_packet("contents", data, 16, -1);
669 return; 666 return;
670 } 667 }
671 668
672 __skb_unlink(skb, &host->pending_packet_queue); 669 __skb_unlink(skb, &host->pending_packet_queue);
673 670
@@ -686,27 +683,27 @@ static void handle_packet_response(struct hpsb_host *host, int tcode,
686static struct hpsb_packet *create_reply_packet(struct hpsb_host *host, 683static struct hpsb_packet *create_reply_packet(struct hpsb_host *host,
687 quadlet_t *data, size_t dsize) 684 quadlet_t *data, size_t dsize)
688{ 685{
689 struct hpsb_packet *p; 686 struct hpsb_packet *p;
690 687
691 p = hpsb_alloc_packet(dsize); 688 p = hpsb_alloc_packet(dsize);
692 if (unlikely(p == NULL)) { 689 if (unlikely(p == NULL)) {
693 /* FIXME - send data_error response */ 690 /* FIXME - send data_error response */
694 return NULL; 691 return NULL;
695 } 692 }
696 693
697 p->type = hpsb_async; 694 p->type = hpsb_async;
698 p->state = hpsb_unused; 695 p->state = hpsb_unused;
699 p->host = host; 696 p->host = host;
700 p->node_id = data[1] >> 16; 697 p->node_id = data[1] >> 16;
701 p->tlabel = (data[0] >> 10) & 0x3f; 698 p->tlabel = (data[0] >> 10) & 0x3f;
702 p->no_waiter = 1; 699 p->no_waiter = 1;
703 700
704 p->generation = get_hpsb_generation(host); 701 p->generation = get_hpsb_generation(host);
705 702
706 if (dsize % 4) 703 if (dsize % 4)
707 p->data[dsize / 4] = 0; 704 p->data[dsize / 4] = 0;
708 705
709 return p; 706 return p;
710} 707}
711 708
712#define PREP_ASYNC_HEAD_RCODE(tc) \ 709#define PREP_ASYNC_HEAD_RCODE(tc) \
@@ -717,7 +714,7 @@ static struct hpsb_packet *create_reply_packet(struct hpsb_host *host,
717 packet->header[2] = 0 714 packet->header[2] = 0
718 715
719static void fill_async_readquad_resp(struct hpsb_packet *packet, int rcode, 716static void fill_async_readquad_resp(struct hpsb_packet *packet, int rcode,
720 quadlet_t data) 717 quadlet_t data)
721{ 718{
722 PREP_ASYNC_HEAD_RCODE(TCODE_READQ_RESPONSE); 719 PREP_ASYNC_HEAD_RCODE(TCODE_READQ_RESPONSE);
723 packet->header[3] = data; 720 packet->header[3] = data;
@@ -726,7 +723,7 @@ static void fill_async_readquad_resp(struct hpsb_packet *packet, int rcode,
726} 723}
727 724
728static void fill_async_readblock_resp(struct hpsb_packet *packet, int rcode, 725static void fill_async_readblock_resp(struct hpsb_packet *packet, int rcode,
729 int length) 726 int length)
730{ 727{
731 if (rcode != RCODE_COMPLETE) 728 if (rcode != RCODE_COMPLETE)
732 length = 0; 729 length = 0;
@@ -746,7 +743,7 @@ static void fill_async_write_resp(struct hpsb_packet *packet, int rcode)
746} 743}
747 744
748static void fill_async_lock_resp(struct hpsb_packet *packet, int rcode, int extcode, 745static void fill_async_lock_resp(struct hpsb_packet *packet, int rcode, int extcode,
749 int length) 746 int length)
750{ 747{
751 if (rcode != RCODE_COMPLETE) 748 if (rcode != RCODE_COMPLETE)
752 length = 0; 749 length = 0;
@@ -758,184 +755,184 @@ static void fill_async_lock_resp(struct hpsb_packet *packet, int rcode, int extc
758} 755}
759 756
760#define PREP_REPLY_PACKET(length) \ 757#define PREP_REPLY_PACKET(length) \
761 packet = create_reply_packet(host, data, length); \ 758 packet = create_reply_packet(host, data, length); \
762 if (packet == NULL) break 759 if (packet == NULL) break
763 760
764static void handle_incoming_packet(struct hpsb_host *host, int tcode, 761static void handle_incoming_packet(struct hpsb_host *host, int tcode,
765 quadlet_t *data, size_t size, int write_acked) 762 quadlet_t *data, size_t size, int write_acked)
766{ 763{
767 struct hpsb_packet *packet; 764 struct hpsb_packet *packet;
768 int length, rcode, extcode; 765 int length, rcode, extcode;
769 quadlet_t buffer; 766 quadlet_t buffer;
770 nodeid_t source = data[1] >> 16; 767 nodeid_t source = data[1] >> 16;
771 nodeid_t dest = data[0] >> 16; 768 nodeid_t dest = data[0] >> 16;
772 u16 flags = (u16) data[0]; 769 u16 flags = (u16) data[0];
773 u64 addr; 770 u64 addr;
774 771
775 /* big FIXME - no error checking is done for an out of bounds length */ 772 /* big FIXME - no error checking is done for an out of bounds length */
776 773
777 switch (tcode) { 774 switch (tcode) {
778 case TCODE_WRITEQ: 775 case TCODE_WRITEQ:
779 addr = (((u64)(data[1] & 0xffff)) << 32) | data[2]; 776 addr = (((u64)(data[1] & 0xffff)) << 32) | data[2];
780 rcode = highlevel_write(host, source, dest, data+3, 777 rcode = highlevel_write(host, source, dest, data+3,
781 addr, 4, flags); 778 addr, 4, flags);
782 779
783 if (!write_acked 780 if (!write_acked
784 && (NODEID_TO_NODE(data[0] >> 16) != NODE_MASK) 781 && (NODEID_TO_NODE(data[0] >> 16) != NODE_MASK)
785 && (rcode >= 0)) { 782 && (rcode >= 0)) {
786 /* not a broadcast write, reply */ 783 /* not a broadcast write, reply */
787 PREP_REPLY_PACKET(0); 784 PREP_REPLY_PACKET(0);
788 fill_async_write_resp(packet, rcode); 785 fill_async_write_resp(packet, rcode);
789 send_packet_nocare(packet); 786 send_packet_nocare(packet);
790 } 787 }
791 break; 788 break;
792 789
793 case TCODE_WRITEB: 790 case TCODE_WRITEB:
794 addr = (((u64)(data[1] & 0xffff)) << 32) | data[2]; 791 addr = (((u64)(data[1] & 0xffff)) << 32) | data[2];
795 rcode = highlevel_write(host, source, dest, data+4, 792 rcode = highlevel_write(host, source, dest, data+4,
796 addr, data[3]>>16, flags); 793 addr, data[3]>>16, flags);
797 794
798 if (!write_acked 795 if (!write_acked
799 && (NODEID_TO_NODE(data[0] >> 16) != NODE_MASK) 796 && (NODEID_TO_NODE(data[0] >> 16) != NODE_MASK)
800 && (rcode >= 0)) { 797 && (rcode >= 0)) {
801 /* not a broadcast write, reply */ 798 /* not a broadcast write, reply */
802 PREP_REPLY_PACKET(0); 799 PREP_REPLY_PACKET(0);
803 fill_async_write_resp(packet, rcode); 800 fill_async_write_resp(packet, rcode);
804 send_packet_nocare(packet); 801 send_packet_nocare(packet);
805 } 802 }
806 break; 803 break;
807 804
808 case TCODE_READQ: 805 case TCODE_READQ:
809 addr = (((u64)(data[1] & 0xffff)) << 32) | data[2]; 806 addr = (((u64)(data[1] & 0xffff)) << 32) | data[2];
810 rcode = highlevel_read(host, source, &buffer, addr, 4, flags); 807 rcode = highlevel_read(host, source, &buffer, addr, 4, flags);
811 808
812 if (rcode >= 0) { 809 if (rcode >= 0) {
813 PREP_REPLY_PACKET(0); 810 PREP_REPLY_PACKET(0);
814 fill_async_readquad_resp(packet, rcode, buffer); 811 fill_async_readquad_resp(packet, rcode, buffer);
815 send_packet_nocare(packet); 812 send_packet_nocare(packet);
816 } 813 }
817 break; 814 break;
818 815
819 case TCODE_READB: 816 case TCODE_READB:
820 length = data[3] >> 16; 817 length = data[3] >> 16;
821 PREP_REPLY_PACKET(length); 818 PREP_REPLY_PACKET(length);
822 819
823 addr = (((u64)(data[1] & 0xffff)) << 32) | data[2]; 820 addr = (((u64)(data[1] & 0xffff)) << 32) | data[2];
824 rcode = highlevel_read(host, source, packet->data, addr, 821 rcode = highlevel_read(host, source, packet->data, addr,
825 length, flags); 822 length, flags);
826 823
827 if (rcode >= 0) { 824 if (rcode >= 0) {
828 fill_async_readblock_resp(packet, rcode, length); 825 fill_async_readblock_resp(packet, rcode, length);
829 send_packet_nocare(packet); 826 send_packet_nocare(packet);
830 } else { 827 } else {
831 hpsb_free_packet(packet); 828 hpsb_free_packet(packet);
832 } 829 }
833 break; 830 break;
834 831
835 case TCODE_LOCK_REQUEST: 832 case TCODE_LOCK_REQUEST:
836 length = data[3] >> 16; 833 length = data[3] >> 16;
837 extcode = data[3] & 0xffff; 834 extcode = data[3] & 0xffff;
838 addr = (((u64)(data[1] & 0xffff)) << 32) | data[2]; 835 addr = (((u64)(data[1] & 0xffff)) << 32) | data[2];
839 836
840 PREP_REPLY_PACKET(8); 837 PREP_REPLY_PACKET(8);
841 838
842 if ((extcode == 0) || (extcode >= 7)) { 839 if ((extcode == 0) || (extcode >= 7)) {
843 /* let switch default handle error */ 840 /* let switch default handle error */
844 length = 0; 841 length = 0;
845 } 842 }
846 843
847 switch (length) { 844 switch (length) {
848 case 4: 845 case 4:
849 rcode = highlevel_lock(host, source, packet->data, addr, 846 rcode = highlevel_lock(host, source, packet->data, addr,
850 data[4], 0, extcode,flags); 847 data[4], 0, extcode,flags);
851 fill_async_lock_resp(packet, rcode, extcode, 4); 848 fill_async_lock_resp(packet, rcode, extcode, 4);
852 break; 849 break;
853 case 8: 850 case 8:
854 if ((extcode != EXTCODE_FETCH_ADD) 851 if ((extcode != EXTCODE_FETCH_ADD)
855 && (extcode != EXTCODE_LITTLE_ADD)) { 852 && (extcode != EXTCODE_LITTLE_ADD)) {
856 rcode = highlevel_lock(host, source, 853 rcode = highlevel_lock(host, source,
857 packet->data, addr, 854 packet->data, addr,
858 data[5], data[4], 855 data[5], data[4],
859 extcode, flags); 856 extcode, flags);
860 fill_async_lock_resp(packet, rcode, extcode, 4); 857 fill_async_lock_resp(packet, rcode, extcode, 4);
861 } else { 858 } else {
862 rcode = highlevel_lock64(host, source, 859 rcode = highlevel_lock64(host, source,
863 (octlet_t *)packet->data, addr, 860 (octlet_t *)packet->data, addr,
864 *(octlet_t *)(data + 4), 0ULL, 861 *(octlet_t *)(data + 4), 0ULL,
865 extcode, flags); 862 extcode, flags);
866 fill_async_lock_resp(packet, rcode, extcode, 8); 863 fill_async_lock_resp(packet, rcode, extcode, 8);
867 } 864 }
868 break; 865 break;
869 case 16: 866 case 16:
870 rcode = highlevel_lock64(host, source, 867 rcode = highlevel_lock64(host, source,
871 (octlet_t *)packet->data, addr, 868 (octlet_t *)packet->data, addr,
872 *(octlet_t *)(data + 6), 869 *(octlet_t *)(data + 6),
873 *(octlet_t *)(data + 4), 870 *(octlet_t *)(data + 4),
874 extcode, flags); 871 extcode, flags);
875 fill_async_lock_resp(packet, rcode, extcode, 8); 872 fill_async_lock_resp(packet, rcode, extcode, 8);
876 break; 873 break;
877 default: 874 default:
878 rcode = RCODE_TYPE_ERROR; 875 rcode = RCODE_TYPE_ERROR;
879 fill_async_lock_resp(packet, rcode, 876 fill_async_lock_resp(packet, rcode,
880 extcode, 0); 877 extcode, 0);
881 } 878 }
882 879
883 if (rcode >= 0) { 880 if (rcode >= 0) {
884 send_packet_nocare(packet); 881 send_packet_nocare(packet);
885 } else { 882 } else {
886 hpsb_free_packet(packet); 883 hpsb_free_packet(packet);
887 } 884 }
888 break; 885 break;
889 } 886 }
890 887
891} 888}
892#undef PREP_REPLY_PACKET 889#undef PREP_REPLY_PACKET
893 890
894 891
895void hpsb_packet_received(struct hpsb_host *host, quadlet_t *data, size_t size, 892void hpsb_packet_received(struct hpsb_host *host, quadlet_t *data, size_t size,
896 int write_acked) 893 int write_acked)
897{ 894{
898 int tcode; 895 int tcode;
899 896
900 if (host->in_bus_reset) { 897 if (host->in_bus_reset) {
901 HPSB_INFO("received packet during reset; ignoring"); 898 HPSB_INFO("received packet during reset; ignoring");
902 return; 899 return;
903 } 900 }
904 901
905 dump_packet("received packet", data, size, -1); 902 dump_packet("received packet", data, size, -1);
906 903
907 tcode = (data[0] >> 4) & 0xf; 904 tcode = (data[0] >> 4) & 0xf;
908 905
909 switch (tcode) { 906 switch (tcode) {
910 case TCODE_WRITE_RESPONSE: 907 case TCODE_WRITE_RESPONSE:
911 case TCODE_READQ_RESPONSE: 908 case TCODE_READQ_RESPONSE:
912 case TCODE_READB_RESPONSE: 909 case TCODE_READB_RESPONSE:
913 case TCODE_LOCK_RESPONSE: 910 case TCODE_LOCK_RESPONSE:
914 handle_packet_response(host, tcode, data, size); 911 handle_packet_response(host, tcode, data, size);
915 break; 912 break;
916 913
917 case TCODE_WRITEQ: 914 case TCODE_WRITEQ:
918 case TCODE_WRITEB: 915 case TCODE_WRITEB:
919 case TCODE_READQ: 916 case TCODE_READQ:
920 case TCODE_READB: 917 case TCODE_READB:
921 case TCODE_LOCK_REQUEST: 918 case TCODE_LOCK_REQUEST:
922 handle_incoming_packet(host, tcode, data, size, write_acked); 919 handle_incoming_packet(host, tcode, data, size, write_acked);
923 break; 920 break;
924 921
925 922
926 case TCODE_ISO_DATA: 923 case TCODE_ISO_DATA:
927 highlevel_iso_receive(host, data, size); 924 highlevel_iso_receive(host, data, size);
928 break; 925 break;
929 926
930 case TCODE_CYCLE_START: 927 case TCODE_CYCLE_START:
931 /* simply ignore this packet if it is passed on */ 928 /* simply ignore this packet if it is passed on */
932 break; 929 break;
933 930
934 default: 931 default:
935 HPSB_NOTICE("received packet with bogus transaction code %d", 932 HPSB_NOTICE("received packet with bogus transaction code %d",
936 tcode); 933 tcode);
937 break; 934 break;
938 } 935 }
939} 936}
940 937
941 938
@@ -1030,10 +1027,10 @@ static int hpsbpkt_thread(void *__hi)
1030 1027
1031 daemonize("khpsbpkt"); 1028 daemonize("khpsbpkt");
1032 1029
1030 current->flags |= PF_NOFREEZE;
1031
1033 while (1) { 1032 while (1) {
1034 if (down_interruptible(&khpsbpkt_sig)) { 1033 if (down_interruptible(&khpsbpkt_sig)) {
1035 if (try_to_freeze())
1036 continue;
1037 printk("khpsbpkt: received unexpected signal?!\n" ); 1034 printk("khpsbpkt: received unexpected signal?!\n" );
1038 break; 1035 break;
1039 } 1036 }
@@ -1129,7 +1126,7 @@ static int __init ieee1394_init(void)
1129 nodemgr implements functionality required of ieee1394a-2000 1126 nodemgr implements functionality required of ieee1394a-2000
1130 IRMs */ 1127 IRMs */
1131 hpsb_disable_irm = 1; 1128 hpsb_disable_irm = 1;
1132 1129
1133 return 0; 1130 return 0;
1134 } 1131 }
1135 1132
diff --git a/drivers/ieee1394/ieee1394_core.h b/drivers/ieee1394/ieee1394_core.h
index 0b31429d0a68..b35466023f00 100644
--- a/drivers/ieee1394/ieee1394_core.h
+++ b/drivers/ieee1394/ieee1394_core.h
@@ -10,8 +10,8 @@
10 10
11 11
12struct hpsb_packet { 12struct hpsb_packet {
13 /* This struct is basically read-only for hosts with the exception of 13 /* This struct is basically read-only for hosts with the exception of
14 * the data buffer contents and xnext - see below. */ 14 * the data buffer contents and xnext - see below. */
15 15
16 /* This can be used for host driver internal linking. 16 /* This can be used for host driver internal linking.
17 * 17 *
@@ -21,47 +21,47 @@ struct hpsb_packet {
21 * driver_list when free'ing it. */ 21 * driver_list when free'ing it. */
22 struct list_head driver_list; 22 struct list_head driver_list;
23 23
24 nodeid_t node_id; 24 nodeid_t node_id;
25 25
26 /* Async and Iso types should be clear, raw means send-as-is, do not 26 /* Async and Iso types should be clear, raw means send-as-is, do not
27 * CRC! Byte swapping shall still be done in this case. */ 27 * CRC! Byte swapping shall still be done in this case. */
28 enum { hpsb_async, hpsb_iso, hpsb_raw } __attribute__((packed)) type; 28 enum { hpsb_async, hpsb_iso, hpsb_raw } __attribute__((packed)) type;
29 29
30 /* Okay, this is core internal and a no care for hosts. 30 /* Okay, this is core internal and a no care for hosts.
31 * queued = queued for sending 31 * queued = queued for sending
32 * pending = sent, waiting for response 32 * pending = sent, waiting for response
33 * complete = processing completed, successful or not 33 * complete = processing completed, successful or not
34 */ 34 */
35 enum { 35 enum {
36 hpsb_unused, hpsb_queued, hpsb_pending, hpsb_complete 36 hpsb_unused, hpsb_queued, hpsb_pending, hpsb_complete
37 } __attribute__((packed)) state; 37 } __attribute__((packed)) state;
38 38
39 /* These are core internal. */ 39 /* These are core internal. */
40 signed char tlabel; 40 signed char tlabel;
41 signed char ack_code; 41 signed char ack_code;
42 unsigned char tcode; 42 unsigned char tcode;
43 43
44 unsigned expect_response:1; 44 unsigned expect_response:1;
45 unsigned no_waiter:1; 45 unsigned no_waiter:1;
46 46
47 /* Speed to transmit with: 0 = 100Mbps, 1 = 200Mbps, 2 = 400Mbps */ 47 /* Speed to transmit with: 0 = 100Mbps, 1 = 200Mbps, 2 = 400Mbps */
48 unsigned speed_code:2; 48 unsigned speed_code:2;
49 49
50 /* 50 /*
51 * *header and *data are guaranteed to be 32-bit DMAable and may be 51 * *header and *data are guaranteed to be 32-bit DMAable and may be
52 * overwritten to allow in-place byte swapping. Neither of these is 52 * overwritten to allow in-place byte swapping. Neither of these is
53 * CRCed (the sizes also don't include CRC), but contain space for at 53 * CRCed (the sizes also don't include CRC), but contain space for at
54 * least one additional quadlet to allow in-place CRCing. The memory is 54 * least one additional quadlet to allow in-place CRCing. The memory is
55 * also guaranteed to be DMA mappable. 55 * also guaranteed to be DMA mappable.
56 */ 56 */
57 quadlet_t *header; 57 quadlet_t *header;
58 quadlet_t *data; 58 quadlet_t *data;
59 size_t header_size; 59 size_t header_size;
60 size_t data_size; 60 size_t data_size;
61 61
62 62
63 struct hpsb_host *host; 63 struct hpsb_host *host;
64 unsigned int generation; 64 unsigned int generation;
65 65
66 atomic_t refcnt; 66 atomic_t refcnt;
67 67
@@ -73,10 +73,10 @@ struct hpsb_packet {
73 /* XXX This is just a hack at the moment */ 73 /* XXX This is just a hack at the moment */
74 struct sk_buff *skb; 74 struct sk_buff *skb;
75 75
76 /* Store jiffies for implementing bus timeouts. */ 76 /* Store jiffies for implementing bus timeouts. */
77 unsigned long sendtime; 77 unsigned long sendtime;
78 78
79 quadlet_t embedded_header[5]; 79 quadlet_t embedded_header[5];
80}; 80};
81 81
82/* Set a task for when a packet completes */ 82/* Set a task for when a packet completes */
@@ -102,7 +102,7 @@ void hpsb_free_packet(struct hpsb_packet *packet);
102 */ 102 */
103static inline unsigned int get_hpsb_generation(struct hpsb_host *host) 103static inline unsigned int get_hpsb_generation(struct hpsb_host *host)
104{ 104{
105 return atomic_read(&host->generation); 105 return atomic_read(&host->generation);
106} 106}
107 107
108/* 108/*
@@ -157,7 +157,7 @@ void hpsb_selfid_complete(struct hpsb_host *host, int phyid, int isroot);
157 * from within a transmit packet routine. 157 * from within a transmit packet routine.
158 */ 158 */
159void hpsb_packet_sent(struct hpsb_host *host, struct hpsb_packet *packet, 159void hpsb_packet_sent(struct hpsb_host *host, struct hpsb_packet *packet,
160 int ackcode); 160 int ackcode);
161 161
162/* 162/*
163 * Hand over received packet to the core. The contents of data are expected to 163 * Hand over received packet to the core. The contents of data are expected to
@@ -171,7 +171,7 @@ void hpsb_packet_sent(struct hpsb_host *host, struct hpsb_packet *packet,
171 * packet type. 171 * packet type.
172 */ 172 */
173void hpsb_packet_received(struct hpsb_host *host, quadlet_t *data, size_t size, 173void hpsb_packet_received(struct hpsb_host *host, quadlet_t *data, size_t size,
174 int write_acked); 174 int write_acked);
175 175
176 176
177/* 177/*
@@ -197,20 +197,20 @@ void hpsb_packet_received(struct hpsb_host *host, quadlet_t *data, size_t size,
197 * Block 15 (240-255) reserved for drivers under development, etc. 197 * Block 15 (240-255) reserved for drivers under development, etc.
198 */ 198 */
199 199
200#define IEEE1394_MAJOR 171 200#define IEEE1394_MAJOR 171
201 201
202#define IEEE1394_MINOR_BLOCK_RAW1394 0 202#define IEEE1394_MINOR_BLOCK_RAW1394 0
203#define IEEE1394_MINOR_BLOCK_VIDEO1394 1 203#define IEEE1394_MINOR_BLOCK_VIDEO1394 1
204#define IEEE1394_MINOR_BLOCK_DV1394 2 204#define IEEE1394_MINOR_BLOCK_DV1394 2
205#define IEEE1394_MINOR_BLOCK_AMDTP 3 205#define IEEE1394_MINOR_BLOCK_AMDTP 3
206#define IEEE1394_MINOR_BLOCK_EXPERIMENTAL 15 206#define IEEE1394_MINOR_BLOCK_EXPERIMENTAL 15
207 207
208#define IEEE1394_CORE_DEV MKDEV(IEEE1394_MAJOR, 0) 208#define IEEE1394_CORE_DEV MKDEV(IEEE1394_MAJOR, 0)
209#define IEEE1394_RAW1394_DEV MKDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_RAW1394 * 16) 209#define IEEE1394_RAW1394_DEV MKDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_RAW1394 * 16)
210#define IEEE1394_VIDEO1394_DEV MKDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_VIDEO1394 * 16) 210#define IEEE1394_VIDEO1394_DEV MKDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_VIDEO1394 * 16)
211#define IEEE1394_DV1394_DEV MKDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_DV1394 * 16) 211#define IEEE1394_DV1394_DEV MKDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_DV1394 * 16)
212#define IEEE1394_AMDTP_DEV MKDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_AMDTP * 16) 212#define IEEE1394_AMDTP_DEV MKDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_AMDTP * 16)
213#define IEEE1394_EXPERIMENTAL_DEV MKDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_EXPERIMENTAL * 16) 213#define IEEE1394_EXPERIMENTAL_DEV MKDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_EXPERIMENTAL * 16)
214 214
215/* return the index (within a minor number block) of a file */ 215/* return the index (within a minor number block) of a file */
216static inline unsigned char ieee1394_file_to_instance(struct file *file) 216static inline unsigned char ieee1394_file_to_instance(struct file *file)
diff --git a/drivers/ieee1394/ieee1394_transactions.c b/drivers/ieee1394/ieee1394_transactions.c
index 0aa876360f9b..3fe2f6c4a253 100644
--- a/drivers/ieee1394/ieee1394_transactions.c
+++ b/drivers/ieee1394/ieee1394_transactions.c
@@ -22,7 +22,7 @@
22#include "ieee1394_core.h" 22#include "ieee1394_core.h"
23#include "highlevel.h" 23#include "highlevel.h"
24#include "nodemgr.h" 24#include "nodemgr.h"
25 25#include "ieee1394_transactions.h"
26 26
27#define PREP_ASYNC_HEAD_ADDRESS(tc) \ 27#define PREP_ASYNC_HEAD_ADDRESS(tc) \
28 packet->tcode = tc; \ 28 packet->tcode = tc; \
@@ -31,80 +31,82 @@
31 packet->header[1] = (packet->host->node_id << 16) | (addr >> 32); \ 31 packet->header[1] = (packet->host->node_id << 16) | (addr >> 32); \
32 packet->header[2] = addr & 0xffffffff 32 packet->header[2] = addr & 0xffffffff
33 33
34
35static void fill_async_readquad(struct hpsb_packet *packet, u64 addr) 34static void fill_async_readquad(struct hpsb_packet *packet, u64 addr)
36{ 35{
37 PREP_ASYNC_HEAD_ADDRESS(TCODE_READQ); 36 PREP_ASYNC_HEAD_ADDRESS(TCODE_READQ);
38 packet->header_size = 12; 37 packet->header_size = 12;
39 packet->data_size = 0; 38 packet->data_size = 0;
40 packet->expect_response = 1; 39 packet->expect_response = 1;
41} 40}
42 41
43static void fill_async_readblock(struct hpsb_packet *packet, u64 addr, int length) 42static void fill_async_readblock(struct hpsb_packet *packet, u64 addr,
43 int length)
44{ 44{
45 PREP_ASYNC_HEAD_ADDRESS(TCODE_READB); 45 PREP_ASYNC_HEAD_ADDRESS(TCODE_READB);
46 packet->header[3] = length << 16; 46 packet->header[3] = length << 16;
47 packet->header_size = 16; 47 packet->header_size = 16;
48 packet->data_size = 0; 48 packet->data_size = 0;
49 packet->expect_response = 1; 49 packet->expect_response = 1;
50} 50}
51 51
52static void fill_async_writequad(struct hpsb_packet *packet, u64 addr, quadlet_t data) 52static void fill_async_writequad(struct hpsb_packet *packet, u64 addr,
53 quadlet_t data)
53{ 54{
54 PREP_ASYNC_HEAD_ADDRESS(TCODE_WRITEQ); 55 PREP_ASYNC_HEAD_ADDRESS(TCODE_WRITEQ);
55 packet->header[3] = data; 56 packet->header[3] = data;
56 packet->header_size = 16; 57 packet->header_size = 16;
57 packet->data_size = 0; 58 packet->data_size = 0;
58 packet->expect_response = 1; 59 packet->expect_response = 1;
59} 60}
60 61
61static void fill_async_writeblock(struct hpsb_packet *packet, u64 addr, int length) 62static void fill_async_writeblock(struct hpsb_packet *packet, u64 addr,
63 int length)
62{ 64{
63 PREP_ASYNC_HEAD_ADDRESS(TCODE_WRITEB); 65 PREP_ASYNC_HEAD_ADDRESS(TCODE_WRITEB);
64 packet->header[3] = length << 16; 66 packet->header[3] = length << 16;
65 packet->header_size = 16; 67 packet->header_size = 16;
66 packet->expect_response = 1; 68 packet->expect_response = 1;
67 packet->data_size = length + (length % 4 ? 4 - (length % 4) : 0); 69 packet->data_size = length + (length % 4 ? 4 - (length % 4) : 0);
68} 70}
69 71
70static void fill_async_lock(struct hpsb_packet *packet, u64 addr, int extcode, 72static void fill_async_lock(struct hpsb_packet *packet, u64 addr, int extcode,
71 int length) 73 int length)
72{ 74{
73 PREP_ASYNC_HEAD_ADDRESS(TCODE_LOCK_REQUEST); 75 PREP_ASYNC_HEAD_ADDRESS(TCODE_LOCK_REQUEST);
74 packet->header[3] = (length << 16) | extcode; 76 packet->header[3] = (length << 16) | extcode;
75 packet->header_size = 16; 77 packet->header_size = 16;
76 packet->data_size = length; 78 packet->data_size = length;
77 packet->expect_response = 1; 79 packet->expect_response = 1;
78} 80}
79 81
80static void fill_iso_packet(struct hpsb_packet *packet, int length, int channel, 82static void fill_iso_packet(struct hpsb_packet *packet, int length, int channel,
81 int tag, int sync) 83 int tag, int sync)
82{ 84{
83 packet->header[0] = (length << 16) | (tag << 14) | (channel << 8) 85 packet->header[0] = (length << 16) | (tag << 14) | (channel << 8)
84 | (TCODE_ISO_DATA << 4) | sync; 86 | (TCODE_ISO_DATA << 4) | sync;
85 87
86 packet->header_size = 4; 88 packet->header_size = 4;
87 packet->data_size = length; 89 packet->data_size = length;
88 packet->type = hpsb_iso; 90 packet->type = hpsb_iso;
89 packet->tcode = TCODE_ISO_DATA; 91 packet->tcode = TCODE_ISO_DATA;
90} 92}
91 93
92static void fill_phy_packet(struct hpsb_packet *packet, quadlet_t data) 94static void fill_phy_packet(struct hpsb_packet *packet, quadlet_t data)
93{ 95{
94 packet->header[0] = data; 96 packet->header[0] = data;
95 packet->header[1] = ~data; 97 packet->header[1] = ~data;
96 packet->header_size = 8; 98 packet->header_size = 8;
97 packet->data_size = 0; 99 packet->data_size = 0;
98 packet->expect_response = 0; 100 packet->expect_response = 0;
99 packet->type = hpsb_raw; /* No CRC added */ 101 packet->type = hpsb_raw; /* No CRC added */
100 packet->speed_code = IEEE1394_SPEED_100; /* Force speed to be 100Mbps */ 102 packet->speed_code = IEEE1394_SPEED_100; /* Force speed to be 100Mbps */
101} 103}
102 104
103static void fill_async_stream_packet(struct hpsb_packet *packet, int length, 105static void fill_async_stream_packet(struct hpsb_packet *packet, int length,
104 int channel, int tag, int sync) 106 int channel, int tag, int sync)
105{ 107{
106 packet->header[0] = (length << 16) | (tag << 14) | (channel << 8) 108 packet->header[0] = (length << 16) | (tag << 14) | (channel << 8)
107 | (TCODE_STREAM_DATA << 4) | sync; 109 | (TCODE_STREAM_DATA << 4) | sync;
108 110
109 packet->header_size = 4; 111 packet->header_size = 4;
110 packet->data_size = length; 112 packet->data_size = length;
@@ -171,99 +173,96 @@ int hpsb_get_tlabel(struct hpsb_packet *packet)
171 */ 173 */
172void hpsb_free_tlabel(struct hpsb_packet *packet) 174void hpsb_free_tlabel(struct hpsb_packet *packet)
173{ 175{
174 unsigned long flags; 176 unsigned long flags;
175 struct hpsb_tlabel_pool *tp; 177 struct hpsb_tlabel_pool *tp;
176 178
177 tp = &packet->host->tpool[packet->node_id & NODE_MASK]; 179 tp = &packet->host->tpool[packet->node_id & NODE_MASK];
178 180
179 BUG_ON(packet->tlabel > 63 || packet->tlabel < 0); 181 BUG_ON(packet->tlabel > 63 || packet->tlabel < 0);
180 182
181 spin_lock_irqsave(&tp->lock, flags); 183 spin_lock_irqsave(&tp->lock, flags);
182 BUG_ON(!test_and_clear_bit(packet->tlabel, tp->pool)); 184 BUG_ON(!test_and_clear_bit(packet->tlabel, tp->pool));
183 spin_unlock_irqrestore(&tp->lock, flags); 185 spin_unlock_irqrestore(&tp->lock, flags);
184 186
185 up(&tp->count); 187 up(&tp->count);
186} 188}
187 189
188
189
190int hpsb_packet_success(struct hpsb_packet *packet) 190int hpsb_packet_success(struct hpsb_packet *packet)
191{ 191{
192 switch (packet->ack_code) { 192 switch (packet->ack_code) {
193 case ACK_PENDING: 193 case ACK_PENDING:
194 switch ((packet->header[1] >> 12) & 0xf) { 194 switch ((packet->header[1] >> 12) & 0xf) {
195 case RCODE_COMPLETE: 195 case RCODE_COMPLETE:
196 return 0; 196 return 0;
197 case RCODE_CONFLICT_ERROR: 197 case RCODE_CONFLICT_ERROR:
198 return -EAGAIN; 198 return -EAGAIN;
199 case RCODE_DATA_ERROR: 199 case RCODE_DATA_ERROR:
200 return -EREMOTEIO; 200 return -EREMOTEIO;
201 case RCODE_TYPE_ERROR: 201 case RCODE_TYPE_ERROR:
202 return -EACCES; 202 return -EACCES;
203 case RCODE_ADDRESS_ERROR: 203 case RCODE_ADDRESS_ERROR:
204 return -EINVAL; 204 return -EINVAL;
205 default: 205 default:
206 HPSB_ERR("received reserved rcode %d from node %d", 206 HPSB_ERR("received reserved rcode %d from node %d",
207 (packet->header[1] >> 12) & 0xf, 207 (packet->header[1] >> 12) & 0xf,
208 packet->node_id); 208 packet->node_id);
209 return -EAGAIN; 209 return -EAGAIN;
210 } 210 }
211 HPSB_PANIC("reached unreachable code 1 in %s", __FUNCTION__); 211 HPSB_PANIC("reached unreachable code 1 in %s", __FUNCTION__);
212 212
213 case ACK_BUSY_X: 213 case ACK_BUSY_X:
214 case ACK_BUSY_A: 214 case ACK_BUSY_A:
215 case ACK_BUSY_B: 215 case ACK_BUSY_B:
216 return -EBUSY; 216 return -EBUSY;
217 217
218 case ACK_TYPE_ERROR: 218 case ACK_TYPE_ERROR:
219 return -EACCES; 219 return -EACCES;
220 220
221 case ACK_COMPLETE: 221 case ACK_COMPLETE:
222 if (packet->tcode == TCODE_WRITEQ 222 if (packet->tcode == TCODE_WRITEQ
223 || packet->tcode == TCODE_WRITEB) { 223 || packet->tcode == TCODE_WRITEB) {
224 return 0; 224 return 0;
225 } else { 225 } else {
226 HPSB_ERR("impossible ack_complete from node %d " 226 HPSB_ERR("impossible ack_complete from node %d "
227 "(tcode %d)", packet->node_id, packet->tcode); 227 "(tcode %d)", packet->node_id, packet->tcode);
228 return -EAGAIN; 228 return -EAGAIN;
229 } 229 }
230 230
231 231 case ACK_DATA_ERROR:
232 case ACK_DATA_ERROR: 232 if (packet->tcode == TCODE_WRITEB
233 if (packet->tcode == TCODE_WRITEB 233 || packet->tcode == TCODE_LOCK_REQUEST) {
234 || packet->tcode == TCODE_LOCK_REQUEST) { 234 return -EAGAIN;
235 return -EAGAIN; 235 } else {
236 } else { 236 HPSB_ERR("impossible ack_data_error from node %d "
237 HPSB_ERR("impossible ack_data_error from node %d " 237 "(tcode %d)", packet->node_id, packet->tcode);
238 "(tcode %d)", packet->node_id, packet->tcode); 238 return -EAGAIN;
239 return -EAGAIN; 239 }
240 } 240
241 241 case ACK_ADDRESS_ERROR:
242 case ACK_ADDRESS_ERROR: 242 return -EINVAL;
243 return -EINVAL; 243
244 244 case ACK_TARDY:
245 case ACK_TARDY: 245 case ACK_CONFLICT_ERROR:
246 case ACK_CONFLICT_ERROR: 246 case ACKX_NONE:
247 case ACKX_NONE: 247 case ACKX_SEND_ERROR:
248 case ACKX_SEND_ERROR: 248 case ACKX_ABORTED:
249 case ACKX_ABORTED: 249 case ACKX_TIMEOUT:
250 case ACKX_TIMEOUT: 250 /* error while sending */
251 /* error while sending */ 251 return -EAGAIN;
252 return -EAGAIN; 252
253 253 default:
254 default: 254 HPSB_ERR("got invalid ack %d from node %d (tcode %d)",
255 HPSB_ERR("got invalid ack %d from node %d (tcode %d)", 255 packet->ack_code, packet->node_id, packet->tcode);
256 packet->ack_code, packet->node_id, packet->tcode); 256 return -EAGAIN;
257 return -EAGAIN; 257 }
258 } 258
259 259 HPSB_PANIC("reached unreachable code 2 in %s", __FUNCTION__);
260 HPSB_PANIC("reached unreachable code 2 in %s", __FUNCTION__);
261} 260}
262 261
263struct hpsb_packet *hpsb_make_readpacket(struct hpsb_host *host, nodeid_t node, 262struct hpsb_packet *hpsb_make_readpacket(struct hpsb_host *host, nodeid_t node,
264 u64 addr, size_t length) 263 u64 addr, size_t length)
265{ 264{
266 struct hpsb_packet *packet; 265 struct hpsb_packet *packet;
267 266
268 if (length == 0) 267 if (length == 0)
269 return NULL; 268 return NULL;
@@ -288,8 +287,9 @@ struct hpsb_packet *hpsb_make_readpacket(struct hpsb_host *host, nodeid_t node,
288 return packet; 287 return packet;
289} 288}
290 289
291struct hpsb_packet *hpsb_make_writepacket (struct hpsb_host *host, nodeid_t node, 290struct hpsb_packet *hpsb_make_writepacket(struct hpsb_host *host, nodeid_t node,
292 u64 addr, quadlet_t *buffer, size_t length) 291 u64 addr, quadlet_t * buffer,
292 size_t length)
293{ 293{
294 struct hpsb_packet *packet; 294 struct hpsb_packet *packet;
295 295
@@ -300,7 +300,7 @@ struct hpsb_packet *hpsb_make_writepacket (struct hpsb_host *host, nodeid_t node
300 if (!packet) 300 if (!packet)
301 return NULL; 301 return NULL;
302 302
303 if (length % 4) { /* zero padding bytes */ 303 if (length % 4) { /* zero padding bytes */
304 packet->data[length >> 2] = 0; 304 packet->data[length >> 2] = 0;
305 } 305 }
306 packet->host = host; 306 packet->host = host;
@@ -322,8 +322,9 @@ struct hpsb_packet *hpsb_make_writepacket (struct hpsb_host *host, nodeid_t node
322 return packet; 322 return packet;
323} 323}
324 324
325struct hpsb_packet *hpsb_make_streampacket(struct hpsb_host *host, u8 *buffer, int length, 325struct hpsb_packet *hpsb_make_streampacket(struct hpsb_host *host, u8 * buffer,
326 int channel, int tag, int sync) 326 int length, int channel, int tag,
327 int sync)
327{ 328{
328 struct hpsb_packet *packet; 329 struct hpsb_packet *packet;
329 330
@@ -334,7 +335,7 @@ struct hpsb_packet *hpsb_make_streampacket(struct hpsb_host *host, u8 *buffer, i
334 if (!packet) 335 if (!packet)
335 return NULL; 336 return NULL;
336 337
337 if (length % 4) { /* zero padding bytes */ 338 if (length % 4) { /* zero padding bytes */
338 packet->data[length >> 2] = 0; 339 packet->data[length >> 2] = 0;
339 } 340 }
340 packet->host = host; 341 packet->host = host;
@@ -352,14 +353,15 @@ struct hpsb_packet *hpsb_make_streampacket(struct hpsb_host *host, u8 *buffer, i
352} 353}
353 354
354struct hpsb_packet *hpsb_make_lockpacket(struct hpsb_host *host, nodeid_t node, 355struct hpsb_packet *hpsb_make_lockpacket(struct hpsb_host *host, nodeid_t node,
355 u64 addr, int extcode, quadlet_t *data, 356 u64 addr, int extcode,
356 quadlet_t arg) 357 quadlet_t * data, quadlet_t arg)
357{ 358{
358 struct hpsb_packet *p; 359 struct hpsb_packet *p;
359 u32 length; 360 u32 length;
360 361
361 p = hpsb_alloc_packet(8); 362 p = hpsb_alloc_packet(8);
362 if (!p) return NULL; 363 if (!p)
364 return NULL;
363 365
364 p->host = host; 366 p->host = host;
365 p->node_id = node; 367 p->node_id = node;
@@ -388,15 +390,16 @@ struct hpsb_packet *hpsb_make_lockpacket(struct hpsb_host *host, nodeid_t node,
388 return p; 390 return p;
389} 391}
390 392
391struct hpsb_packet *hpsb_make_lock64packet(struct hpsb_host *host, nodeid_t node, 393struct hpsb_packet *hpsb_make_lock64packet(struct hpsb_host *host,
392 u64 addr, int extcode, octlet_t *data, 394 nodeid_t node, u64 addr, int extcode,
393 octlet_t arg) 395 octlet_t * data, octlet_t arg)
394{ 396{
395 struct hpsb_packet *p; 397 struct hpsb_packet *p;
396 u32 length; 398 u32 length;
397 399
398 p = hpsb_alloc_packet(16); 400 p = hpsb_alloc_packet(16);
399 if (!p) return NULL; 401 if (!p)
402 return NULL;
400 403
401 p->host = host; 404 p->host = host;
402 p->node_id = node; 405 p->node_id = node;
@@ -429,18 +432,18 @@ struct hpsb_packet *hpsb_make_lock64packet(struct hpsb_host *host, nodeid_t node
429 return p; 432 return p;
430} 433}
431 434
432struct hpsb_packet *hpsb_make_phypacket(struct hpsb_host *host, 435struct hpsb_packet *hpsb_make_phypacket(struct hpsb_host *host, quadlet_t data)
433 quadlet_t data)
434{ 436{
435 struct hpsb_packet *p; 437 struct hpsb_packet *p;
436 438
437 p = hpsb_alloc_packet(0); 439 p = hpsb_alloc_packet(0);
438 if (!p) return NULL; 440 if (!p)
441 return NULL;
439 442
440 p->host = host; 443 p->host = host;
441 fill_phy_packet(p, data); 444 fill_phy_packet(p, data);
442 445
443 return p; 446 return p;
444} 447}
445 448
446struct hpsb_packet *hpsb_make_isopacket(struct hpsb_host *host, 449struct hpsb_packet *hpsb_make_isopacket(struct hpsb_host *host,
@@ -450,7 +453,8 @@ struct hpsb_packet *hpsb_make_isopacket(struct hpsb_host *host,
450 struct hpsb_packet *p; 453 struct hpsb_packet *p;
451 454
452 p = hpsb_alloc_packet(length); 455 p = hpsb_alloc_packet(length);
453 if (!p) return NULL; 456 if (!p)
457 return NULL;
454 458
455 p->host = host; 459 p->host = host;
456 fill_iso_packet(p, length, channel, tag, sync); 460 fill_iso_packet(p, length, channel, tag, sync);
@@ -466,47 +470,46 @@ struct hpsb_packet *hpsb_make_isopacket(struct hpsb_host *host,
466 */ 470 */
467 471
468int hpsb_read(struct hpsb_host *host, nodeid_t node, unsigned int generation, 472int hpsb_read(struct hpsb_host *host, nodeid_t node, unsigned int generation,
469 u64 addr, quadlet_t *buffer, size_t length) 473 u64 addr, quadlet_t * buffer, size_t length)
470{ 474{
471 struct hpsb_packet *packet; 475 struct hpsb_packet *packet;
472 int retval = 0; 476 int retval = 0;
473 477
474 if (length == 0) 478 if (length == 0)
475 return -EINVAL; 479 return -EINVAL;
476 480
477 BUG_ON(in_interrupt()); // We can't be called in an interrupt, yet 481 BUG_ON(in_interrupt()); // We can't be called in an interrupt, yet
478 482
479 packet = hpsb_make_readpacket(host, node, addr, length); 483 packet = hpsb_make_readpacket(host, node, addr, length);
480 484
481 if (!packet) { 485 if (!packet) {
482 return -ENOMEM; 486 return -ENOMEM;
483 } 487 }
484 488
485 packet->generation = generation; 489 packet->generation = generation;
486 retval = hpsb_send_packet_and_wait(packet); 490 retval = hpsb_send_packet_and_wait(packet);
487 if (retval < 0) 491 if (retval < 0)
488 goto hpsb_read_fail; 492 goto hpsb_read_fail;
489 493
490 retval = hpsb_packet_success(packet); 494 retval = hpsb_packet_success(packet);
491 495
492 if (retval == 0) { 496 if (retval == 0) {
493 if (length == 4) { 497 if (length == 4) {
494 *buffer = packet->header[3]; 498 *buffer = packet->header[3];
495 } else { 499 } else {
496 memcpy(buffer, packet->data, length); 500 memcpy(buffer, packet->data, length);
497 } 501 }
498 } 502 }
499 503
500hpsb_read_fail: 504 hpsb_read_fail:
501 hpsb_free_tlabel(packet); 505 hpsb_free_tlabel(packet);
502 hpsb_free_packet(packet); 506 hpsb_free_packet(packet);
503 507
504 return retval; 508 return retval;
505} 509}
506 510
507
508int hpsb_write(struct hpsb_host *host, nodeid_t node, unsigned int generation, 511int hpsb_write(struct hpsb_host *host, nodeid_t node, unsigned int generation,
509 u64 addr, quadlet_t *buffer, size_t length) 512 u64 addr, quadlet_t * buffer, size_t length)
510{ 513{
511 struct hpsb_packet *packet; 514 struct hpsb_packet *packet;
512 int retval; 515 int retval;
@@ -514,62 +517,61 @@ int hpsb_write(struct hpsb_host *host, nodeid_t node, unsigned int generation,
514 if (length == 0) 517 if (length == 0)
515 return -EINVAL; 518 return -EINVAL;
516 519
517 BUG_ON(in_interrupt()); // We can't be called in an interrupt, yet 520 BUG_ON(in_interrupt()); // We can't be called in an interrupt, yet
518 521
519 packet = hpsb_make_writepacket (host, node, addr, buffer, length); 522 packet = hpsb_make_writepacket(host, node, addr, buffer, length);
520 523
521 if (!packet) 524 if (!packet)
522 return -ENOMEM; 525 return -ENOMEM;
523 526
524 packet->generation = generation; 527 packet->generation = generation;
525 retval = hpsb_send_packet_and_wait(packet); 528 retval = hpsb_send_packet_and_wait(packet);
526 if (retval < 0) 529 if (retval < 0)
527 goto hpsb_write_fail; 530 goto hpsb_write_fail;
528 531
529 retval = hpsb_packet_success(packet); 532 retval = hpsb_packet_success(packet);
530 533
531hpsb_write_fail: 534 hpsb_write_fail:
532 hpsb_free_tlabel(packet); 535 hpsb_free_tlabel(packet);
533 hpsb_free_packet(packet); 536 hpsb_free_packet(packet);
534 537
535 return retval; 538 return retval;
536} 539}
537 540
538#if 0 541#if 0
539 542
540int hpsb_lock(struct hpsb_host *host, nodeid_t node, unsigned int generation, 543int hpsb_lock(struct hpsb_host *host, nodeid_t node, unsigned int generation,
541 u64 addr, int extcode, quadlet_t *data, quadlet_t arg) 544 u64 addr, int extcode, quadlet_t * data, quadlet_t arg)
542{ 545{
543 struct hpsb_packet *packet; 546 struct hpsb_packet *packet;
544 int retval = 0; 547 int retval = 0;
545 548
546 BUG_ON(in_interrupt()); // We can't be called in an interrupt, yet 549 BUG_ON(in_interrupt()); // We can't be called in an interrupt, yet
547 550
548 packet = hpsb_make_lockpacket(host, node, addr, extcode, data, arg); 551 packet = hpsb_make_lockpacket(host, node, addr, extcode, data, arg);
549 if (!packet) 552 if (!packet)
550 return -ENOMEM; 553 return -ENOMEM;
551 554
552 packet->generation = generation; 555 packet->generation = generation;
553 retval = hpsb_send_packet_and_wait(packet); 556 retval = hpsb_send_packet_and_wait(packet);
554 if (retval < 0) 557 if (retval < 0)
555 goto hpsb_lock_fail; 558 goto hpsb_lock_fail;
556 559
557 retval = hpsb_packet_success(packet); 560 retval = hpsb_packet_success(packet);
558 561
559 if (retval == 0) { 562 if (retval == 0) {
560 *data = packet->data[0]; 563 *data = packet->data[0];
561 } 564 }
562 565
563hpsb_lock_fail: 566 hpsb_lock_fail:
564 hpsb_free_tlabel(packet); 567 hpsb_free_tlabel(packet);
565 hpsb_free_packet(packet); 568 hpsb_free_packet(packet);
566 569
567 return retval; 570 return retval;
568} 571}
569 572
570
571int hpsb_send_gasp(struct hpsb_host *host, int channel, unsigned int generation, 573int hpsb_send_gasp(struct hpsb_host *host, int channel, unsigned int generation,
572 quadlet_t *buffer, size_t length, u32 specifier_id, 574 quadlet_t * buffer, size_t length, u32 specifier_id,
573 unsigned int version) 575 unsigned int version)
574{ 576{
575 struct hpsb_packet *packet; 577 struct hpsb_packet *packet;
@@ -586,7 +588,8 @@ int hpsb_send_gasp(struct hpsb_host *host, int channel, unsigned int generation,
586 return -ENOMEM; 588 return -ENOMEM;
587 589
588 packet->data[0] = cpu_to_be32((host->node_id << 16) | specifier_id_hi); 590 packet->data[0] = cpu_to_be32((host->node_id << 16) | specifier_id_hi);
589 packet->data[1] = cpu_to_be32((specifier_id_lo << 24) | (version & 0x00ffffff)); 591 packet->data[1] =
592 cpu_to_be32((specifier_id_lo << 24) | (version & 0x00ffffff));
590 593
591 memcpy(&(packet->data[2]), buffer, length - 8); 594 memcpy(&(packet->data[2]), buffer, length - 8);
592 595
@@ -601,4 +604,4 @@ int hpsb_send_gasp(struct hpsb_host *host, int channel, unsigned int generation,
601 return retval; 604 return retval;
602} 605}
603 606
604#endif /* 0 */ 607#endif /* 0 */
diff --git a/drivers/ieee1394/iso.c b/drivers/ieee1394/iso.c
index 615541b8b90f..f26680ebef7c 100644
--- a/drivers/ieee1394/iso.c
+++ b/drivers/ieee1394/iso.c
@@ -36,20 +36,22 @@ void hpsb_iso_shutdown(struct hpsb_iso *iso)
36 kfree(iso); 36 kfree(iso);
37} 37}
38 38
39static struct hpsb_iso* hpsb_iso_common_init(struct hpsb_host *host, enum hpsb_iso_type type, 39static struct hpsb_iso *hpsb_iso_common_init(struct hpsb_host *host,
40 enum hpsb_iso_type type,
40 unsigned int data_buf_size, 41 unsigned int data_buf_size,
41 unsigned int buf_packets, 42 unsigned int buf_packets,
42 int channel, 43 int channel, int dma_mode,
43 int dma_mode,
44 int irq_interval, 44 int irq_interval,
45 void (*callback)(struct hpsb_iso*)) 45 void (*callback) (struct hpsb_iso
46 *))
46{ 47{
47 struct hpsb_iso *iso; 48 struct hpsb_iso *iso;
48 int dma_direction; 49 int dma_direction;
49 50
50 /* make sure driver supports the ISO API */ 51 /* make sure driver supports the ISO API */
51 if (!host->driver->isoctl) { 52 if (!host->driver->isoctl) {
52 printk(KERN_INFO "ieee1394: host driver '%s' does not support the rawiso API\n", 53 printk(KERN_INFO
54 "ieee1394: host driver '%s' does not support the rawiso API\n",
53 host->driver->name); 55 host->driver->name);
54 return NULL; 56 return NULL;
55 } 57 }
@@ -59,12 +61,13 @@ static struct hpsb_iso* hpsb_iso_common_init(struct hpsb_host *host, enum hpsb_i
59 if (buf_packets < 2) 61 if (buf_packets < 2)
60 buf_packets = 2; 62 buf_packets = 2;
61 63
62 if ((dma_mode < HPSB_ISO_DMA_DEFAULT) || (dma_mode > HPSB_ISO_DMA_PACKET_PER_BUFFER)) 64 if ((dma_mode < HPSB_ISO_DMA_DEFAULT)
63 dma_mode=HPSB_ISO_DMA_DEFAULT; 65 || (dma_mode > HPSB_ISO_DMA_PACKET_PER_BUFFER))
66 dma_mode = HPSB_ISO_DMA_DEFAULT;
64 67
65 if ((irq_interval < 0) || (irq_interval > buf_packets / 4)) 68 if ((irq_interval < 0) || (irq_interval > buf_packets / 4))
66 irq_interval = buf_packets / 4; 69 irq_interval = buf_packets / 4;
67 if (irq_interval == 0) /* really interrupt for each packet*/ 70 if (irq_interval == 0) /* really interrupt for each packet */
68 irq_interval = 1; 71 irq_interval = 1;
69 72
70 if (channel < -1 || channel >= 64) 73 if (channel < -1 || channel >= 64)
@@ -76,7 +79,10 @@ static struct hpsb_iso* hpsb_iso_common_init(struct hpsb_host *host, enum hpsb_i
76 79
77 /* allocate and write the struct hpsb_iso */ 80 /* allocate and write the struct hpsb_iso */
78 81
79 iso = kmalloc(sizeof(*iso) + buf_packets * sizeof(struct hpsb_iso_packet_info), GFP_KERNEL); 82 iso =
83 kmalloc(sizeof(*iso) +
84 buf_packets * sizeof(struct hpsb_iso_packet_info),
85 GFP_KERNEL);
80 if (!iso) 86 if (!iso)
81 return NULL; 87 return NULL;
82 88
@@ -111,17 +117,18 @@ static struct hpsb_iso* hpsb_iso_common_init(struct hpsb_host *host, enum hpsb_i
111 iso->prebuffer = 0; 117 iso->prebuffer = 0;
112 118
113 /* allocate the packet buffer */ 119 /* allocate the packet buffer */
114 if (dma_region_alloc(&iso->data_buf, iso->buf_size, host->pdev, dma_direction)) 120 if (dma_region_alloc
121 (&iso->data_buf, iso->buf_size, host->pdev, dma_direction))
115 goto err; 122 goto err;
116 123
117 return iso; 124 return iso;
118 125
119err: 126 err:
120 hpsb_iso_shutdown(iso); 127 hpsb_iso_shutdown(iso);
121 return NULL; 128 return NULL;
122} 129}
123 130
124int hpsb_iso_n_ready(struct hpsb_iso* iso) 131int hpsb_iso_n_ready(struct hpsb_iso *iso)
125{ 132{
126 unsigned long flags; 133 unsigned long flags;
127 int val; 134 int val;
@@ -133,18 +140,19 @@ int hpsb_iso_n_ready(struct hpsb_iso* iso)
133 return val; 140 return val;
134} 141}
135 142
136 143struct hpsb_iso *hpsb_iso_xmit_init(struct hpsb_host *host,
137struct hpsb_iso* hpsb_iso_xmit_init(struct hpsb_host *host,
138 unsigned int data_buf_size, 144 unsigned int data_buf_size,
139 unsigned int buf_packets, 145 unsigned int buf_packets,
140 int channel, 146 int channel,
141 int speed, 147 int speed,
142 int irq_interval, 148 int irq_interval,
143 void (*callback)(struct hpsb_iso*)) 149 void (*callback) (struct hpsb_iso *))
144{ 150{
145 struct hpsb_iso *iso = hpsb_iso_common_init(host, HPSB_ISO_XMIT, 151 struct hpsb_iso *iso = hpsb_iso_common_init(host, HPSB_ISO_XMIT,
146 data_buf_size, buf_packets, 152 data_buf_size, buf_packets,
147 channel, HPSB_ISO_DMA_DEFAULT, irq_interval, callback); 153 channel,
154 HPSB_ISO_DMA_DEFAULT,
155 irq_interval, callback);
148 if (!iso) 156 if (!iso)
149 return NULL; 157 return NULL;
150 158
@@ -157,22 +165,23 @@ struct hpsb_iso* hpsb_iso_xmit_init(struct hpsb_host *host,
157 iso->flags |= HPSB_ISO_DRIVER_INIT; 165 iso->flags |= HPSB_ISO_DRIVER_INIT;
158 return iso; 166 return iso;
159 167
160err: 168 err:
161 hpsb_iso_shutdown(iso); 169 hpsb_iso_shutdown(iso);
162 return NULL; 170 return NULL;
163} 171}
164 172
165struct hpsb_iso* hpsb_iso_recv_init(struct hpsb_host *host, 173struct hpsb_iso *hpsb_iso_recv_init(struct hpsb_host *host,
166 unsigned int data_buf_size, 174 unsigned int data_buf_size,
167 unsigned int buf_packets, 175 unsigned int buf_packets,
168 int channel, 176 int channel,
169 int dma_mode, 177 int dma_mode,
170 int irq_interval, 178 int irq_interval,
171 void (*callback)(struct hpsb_iso*)) 179 void (*callback) (struct hpsb_iso *))
172{ 180{
173 struct hpsb_iso *iso = hpsb_iso_common_init(host, HPSB_ISO_RECV, 181 struct hpsb_iso *iso = hpsb_iso_common_init(host, HPSB_ISO_RECV,
174 data_buf_size, buf_packets, 182 data_buf_size, buf_packets,
175 channel, dma_mode, irq_interval, callback); 183 channel, dma_mode,
184 irq_interval, callback);
176 if (!iso) 185 if (!iso)
177 return NULL; 186 return NULL;
178 187
@@ -183,7 +192,7 @@ struct hpsb_iso* hpsb_iso_recv_init(struct hpsb_host *host,
183 iso->flags |= HPSB_ISO_DRIVER_INIT; 192 iso->flags |= HPSB_ISO_DRIVER_INIT;
184 return iso; 193 return iso;
185 194
186err: 195 err:
187 hpsb_iso_shutdown(iso); 196 hpsb_iso_shutdown(iso);
188 return NULL; 197 return NULL;
189} 198}
@@ -197,16 +206,17 @@ int hpsb_iso_recv_listen_channel(struct hpsb_iso *iso, unsigned char channel)
197 206
198int hpsb_iso_recv_unlisten_channel(struct hpsb_iso *iso, unsigned char channel) 207int hpsb_iso_recv_unlisten_channel(struct hpsb_iso *iso, unsigned char channel)
199{ 208{
200 if (iso->type != HPSB_ISO_RECV || iso->channel != -1 || channel >= 64) 209 if (iso->type != HPSB_ISO_RECV || iso->channel != -1 || channel >= 64)
201 return -EINVAL; 210 return -EINVAL;
202 return iso->host->driver->isoctl(iso, RECV_UNLISTEN_CHANNEL, channel); 211 return iso->host->driver->isoctl(iso, RECV_UNLISTEN_CHANNEL, channel);
203} 212}
204 213
205int hpsb_iso_recv_set_channel_mask(struct hpsb_iso *iso, u64 mask) 214int hpsb_iso_recv_set_channel_mask(struct hpsb_iso *iso, u64 mask)
206{ 215{
207 if (iso->type != HPSB_ISO_RECV || iso->channel != -1) 216 if (iso->type != HPSB_ISO_RECV || iso->channel != -1)
208 return -EINVAL; 217 return -EINVAL;
209 return iso->host->driver->isoctl(iso, RECV_SET_CHANNEL_MASK, (unsigned long) &mask); 218 return iso->host->driver->isoctl(iso, RECV_SET_CHANNEL_MASK,
219 (unsigned long)&mask);
210} 220}
211 221
212int hpsb_iso_recv_flush(struct hpsb_iso *iso) 222int hpsb_iso_recv_flush(struct hpsb_iso *iso)
@@ -283,7 +293,9 @@ int hpsb_iso_recv_start(struct hpsb_iso *iso, int cycle, int tag_mask, int sync)
283 293
284 isoctl_args[2] = sync; 294 isoctl_args[2] = sync;
285 295
286 retval = iso->host->driver->isoctl(iso, RECV_START, (unsigned long) &isoctl_args[0]); 296 retval =
297 iso->host->driver->isoctl(iso, RECV_START,
298 (unsigned long)&isoctl_args[0]);
287 if (retval) 299 if (retval)
288 return retval; 300 return retval;
289 301
@@ -296,7 +308,8 @@ int hpsb_iso_recv_start(struct hpsb_iso *iso, int cycle, int tag_mask, int sync)
296 308
297static int hpsb_iso_check_offset_len(struct hpsb_iso *iso, 309static int hpsb_iso_check_offset_len(struct hpsb_iso *iso,
298 unsigned int offset, unsigned short len, 310 unsigned int offset, unsigned short len,
299 unsigned int *out_offset, unsigned short *out_len) 311 unsigned int *out_offset,
312 unsigned short *out_len)
300{ 313{
301 if (offset >= iso->buf_size) 314 if (offset >= iso->buf_size)
302 return -EFAULT; 315 return -EFAULT;
@@ -316,8 +329,8 @@ static int hpsb_iso_check_offset_len(struct hpsb_iso *iso,
316 return 0; 329 return 0;
317} 330}
318 331
319 332int hpsb_iso_xmit_queue_packet(struct hpsb_iso *iso, u32 offset, u16 len,
320int hpsb_iso_xmit_queue_packet(struct hpsb_iso *iso, u32 offset, u16 len, u8 tag, u8 sy) 333 u8 tag, u8 sy)
321{ 334{
322 struct hpsb_iso_packet_info *info; 335 struct hpsb_iso_packet_info *info;
323 unsigned long flags; 336 unsigned long flags;
@@ -334,7 +347,8 @@ int hpsb_iso_xmit_queue_packet(struct hpsb_iso *iso, u32 offset, u16 len, u8 tag
334 info = &iso->infos[iso->first_packet]; 347 info = &iso->infos[iso->first_packet];
335 348
336 /* check for bogus offset/length */ 349 /* check for bogus offset/length */
337 if (hpsb_iso_check_offset_len(iso, offset, len, &info->offset, &info->len)) 350 if (hpsb_iso_check_offset_len
351 (iso, offset, len, &info->offset, &info->len))
338 return -EFAULT; 352 return -EFAULT;
339 353
340 info->tag = tag; 354 info->tag = tag;
@@ -342,13 +356,13 @@ int hpsb_iso_xmit_queue_packet(struct hpsb_iso *iso, u32 offset, u16 len, u8 tag
342 356
343 spin_lock_irqsave(&iso->lock, flags); 357 spin_lock_irqsave(&iso->lock, flags);
344 358
345 rv = iso->host->driver->isoctl(iso, XMIT_QUEUE, (unsigned long) info); 359 rv = iso->host->driver->isoctl(iso, XMIT_QUEUE, (unsigned long)info);
346 if (rv) 360 if (rv)
347 goto out; 361 goto out;
348 362
349 /* increment cursors */ 363 /* increment cursors */
350 iso->first_packet = (iso->first_packet+1) % iso->buf_packets; 364 iso->first_packet = (iso->first_packet + 1) % iso->buf_packets;
351 iso->xmit_cycle = (iso->xmit_cycle+1) % 8000; 365 iso->xmit_cycle = (iso->xmit_cycle + 1) % 8000;
352 iso->n_ready_packets--; 366 iso->n_ready_packets--;
353 367
354 if (iso->prebuffer != 0) { 368 if (iso->prebuffer != 0) {
@@ -359,7 +373,7 @@ int hpsb_iso_xmit_queue_packet(struct hpsb_iso *iso, u32 offset, u16 len, u8 tag
359 } 373 }
360 } 374 }
361 375
362out: 376 out:
363 spin_unlock_irqrestore(&iso->lock, flags); 377 spin_unlock_irqrestore(&iso->lock, flags);
364 return rv; 378 return rv;
365} 379}
@@ -369,7 +383,9 @@ int hpsb_iso_xmit_sync(struct hpsb_iso *iso)
369 if (iso->type != HPSB_ISO_XMIT) 383 if (iso->type != HPSB_ISO_XMIT)
370 return -EINVAL; 384 return -EINVAL;
371 385
372 return wait_event_interruptible(iso->waitq, hpsb_iso_n_ready(iso) == iso->buf_packets); 386 return wait_event_interruptible(iso->waitq,
387 hpsb_iso_n_ready(iso) ==
388 iso->buf_packets);
373} 389}
374 390
375void hpsb_iso_packet_sent(struct hpsb_iso *iso, int cycle, int error) 391void hpsb_iso_packet_sent(struct hpsb_iso *iso, int cycle, int error)
@@ -396,7 +412,8 @@ void hpsb_iso_packet_sent(struct hpsb_iso *iso, int cycle, int error)
396} 412}
397 413
398void hpsb_iso_packet_received(struct hpsb_iso *iso, u32 offset, u16 len, 414void hpsb_iso_packet_received(struct hpsb_iso *iso, u32 offset, u16 len,
399 u16 total_len, u16 cycle, u8 channel, u8 tag, u8 sy) 415 u16 total_len, u16 cycle, u8 channel, u8 tag,
416 u8 sy)
400{ 417{
401 unsigned long flags; 418 unsigned long flags;
402 spin_lock_irqsave(&iso->lock, flags); 419 spin_lock_irqsave(&iso->lock, flags);
@@ -416,7 +433,7 @@ void hpsb_iso_packet_received(struct hpsb_iso *iso, u32 offset, u16 len,
416 info->tag = tag; 433 info->tag = tag;
417 info->sy = sy; 434 info->sy = sy;
418 435
419 iso->pkt_dma = (iso->pkt_dma+1) % iso->buf_packets; 436 iso->pkt_dma = (iso->pkt_dma + 1) % iso->buf_packets;
420 iso->n_ready_packets++; 437 iso->n_ready_packets++;
421 } 438 }
422 439
@@ -435,20 +452,21 @@ int hpsb_iso_recv_release_packets(struct hpsb_iso *iso, unsigned int n_packets)
435 spin_lock_irqsave(&iso->lock, flags); 452 spin_lock_irqsave(&iso->lock, flags);
436 for (i = 0; i < n_packets; i++) { 453 for (i = 0; i < n_packets; i++) {
437 rv = iso->host->driver->isoctl(iso, RECV_RELEASE, 454 rv = iso->host->driver->isoctl(iso, RECV_RELEASE,
438 (unsigned long) &iso->infos[iso->first_packet]); 455 (unsigned long)&iso->infos[iso->
456 first_packet]);
439 if (rv) 457 if (rv)
440 break; 458 break;
441 459
442 iso->first_packet = (iso->first_packet+1) % iso->buf_packets; 460 iso->first_packet = (iso->first_packet + 1) % iso->buf_packets;
443 iso->n_ready_packets--; 461 iso->n_ready_packets--;
444 462
445 /* release memory from packets discarded when queue was full */ 463 /* release memory from packets discarded when queue was full */
446 if (iso->n_ready_packets == 0) { /* Release only after all prior packets handled */ 464 if (iso->n_ready_packets == 0) { /* Release only after all prior packets handled */
447 if (iso->bytes_discarded != 0) { 465 if (iso->bytes_discarded != 0) {
448 struct hpsb_iso_packet_info inf; 466 struct hpsb_iso_packet_info inf;
449 inf.total_len = iso->bytes_discarded; 467 inf.total_len = iso->bytes_discarded;
450 iso->host->driver->isoctl(iso, RECV_RELEASE, 468 iso->host->driver->isoctl(iso, RECV_RELEASE,
451 (unsigned long) &inf); 469 (unsigned long)&inf);
452 iso->bytes_discarded = 0; 470 iso->bytes_discarded = 0;
453 } 471 }
454 } 472 }
diff --git a/drivers/ieee1394/nodemgr.c b/drivers/ieee1394/nodemgr.c
index f2453668acf5..082c7fd239f5 100644
--- a/drivers/ieee1394/nodemgr.c
+++ b/drivers/ieee1394/nodemgr.c
@@ -743,21 +743,20 @@ static struct node_entry *nodemgr_create_node(octlet_t guid, struct csr1212_csr
743 unsigned int generation) 743 unsigned int generation)
744{ 744{
745 struct hpsb_host *host = hi->host; 745 struct hpsb_host *host = hi->host;
746 struct node_entry *ne; 746 struct node_entry *ne;
747
748 ne = kmalloc(sizeof(struct node_entry), GFP_KERNEL);
749 if (!ne) return NULL;
750 747
751 memset(ne, 0, sizeof(struct node_entry)); 748 ne = kzalloc(sizeof(*ne), GFP_KERNEL);
749 if (!ne)
750 return NULL;
752 751
753 ne->tpool = &host->tpool[nodeid & NODE_MASK]; 752 ne->tpool = &host->tpool[nodeid & NODE_MASK];
754 753
755 ne->host = host; 754 ne->host = host;
756 ne->nodeid = nodeid; 755 ne->nodeid = nodeid;
757 ne->generation = generation; 756 ne->generation = generation;
758 ne->needs_probe = 1; 757 ne->needs_probe = 1;
759 758
760 ne->guid = guid; 759 ne->guid = guid;
761 ne->guid_vendor_id = (guid >> 40) & 0xffffff; 760 ne->guid_vendor_id = (guid >> 40) & 0xffffff;
762 ne->guid_vendor_oui = nodemgr_find_oui_name(ne->guid_vendor_id); 761 ne->guid_vendor_oui = nodemgr_find_oui_name(ne->guid_vendor_id);
763 ne->csr = csr; 762 ne->csr = csr;
@@ -787,7 +786,7 @@ static struct node_entry *nodemgr_create_node(octlet_t guid, struct csr1212_csr
787 (host->node_id == nodeid) ? "Host" : "Node", 786 (host->node_id == nodeid) ? "Host" : "Node",
788 NODE_BUS_ARGS(host, nodeid), (unsigned long long)guid); 787 NODE_BUS_ARGS(host, nodeid), (unsigned long long)guid);
789 788
790 return ne; 789 return ne;
791} 790}
792 791
793 792
@@ -872,12 +871,10 @@ static struct unit_directory *nodemgr_process_unit_directory
872 struct csr1212_keyval *kv; 871 struct csr1212_keyval *kv;
873 u8 last_key_id = 0; 872 u8 last_key_id = 0;
874 873
875 ud = kmalloc(sizeof(struct unit_directory), GFP_KERNEL); 874 ud = kzalloc(sizeof(*ud), GFP_KERNEL);
876 if (!ud) 875 if (!ud)
877 goto unit_directory_error; 876 goto unit_directory_error;
878 877
879 memset (ud, 0, sizeof(struct unit_directory));
880
881 ud->ne = ne; 878 ud->ne = ne;
882 ud->ignore_driver = ignore_drivers; 879 ud->ignore_driver = ignore_drivers;
883 ud->address = ud_kv->offset + CSR1212_CONFIG_ROM_SPACE_BASE; 880 ud->address = ud_kv->offset + CSR1212_CONFIG_ROM_SPACE_BASE;
@@ -937,10 +934,10 @@ static struct unit_directory *nodemgr_process_unit_directory
937 /* Logical Unit Number */ 934 /* Logical Unit Number */
938 if (kv->key.type == CSR1212_KV_TYPE_IMMEDIATE) { 935 if (kv->key.type == CSR1212_KV_TYPE_IMMEDIATE) {
939 if (ud->flags & UNIT_DIRECTORY_HAS_LUN) { 936 if (ud->flags & UNIT_DIRECTORY_HAS_LUN) {
940 ud_child = kmalloc(sizeof(struct unit_directory), GFP_KERNEL); 937 ud_child = kmalloc(sizeof(*ud_child), GFP_KERNEL);
941 if (!ud_child) 938 if (!ud_child)
942 goto unit_directory_error; 939 goto unit_directory_error;
943 memcpy(ud_child, ud, sizeof(struct unit_directory)); 940 memcpy(ud_child, ud, sizeof(*ud_child));
944 nodemgr_register_device(ne, ud_child, &ne->device); 941 nodemgr_register_device(ne, ud_child, &ne->device);
945 ud_child = NULL; 942 ud_child = NULL;
946 943
@@ -1200,7 +1197,7 @@ static void nodemgr_node_scan_one(struct host_info *hi,
1200 struct csr1212_csr *csr; 1197 struct csr1212_csr *csr;
1201 struct nodemgr_csr_info *ci; 1198 struct nodemgr_csr_info *ci;
1202 1199
1203 ci = kmalloc(sizeof(struct nodemgr_csr_info), GFP_KERNEL); 1200 ci = kmalloc(sizeof(*ci), GFP_KERNEL);
1204 if (!ci) 1201 if (!ci)
1205 return; 1202 return;
1206 1203
@@ -1410,14 +1407,28 @@ static void nodemgr_node_probe(struct host_info *hi, int generation)
1410 struct hpsb_host *host = hi->host; 1407 struct hpsb_host *host = hi->host;
1411 struct class *class = &nodemgr_ne_class; 1408 struct class *class = &nodemgr_ne_class;
1412 struct class_device *cdev; 1409 struct class_device *cdev;
1410 struct node_entry *ne;
1413 1411
1414 /* Do some processing of the nodes we've probed. This pulls them 1412 /* Do some processing of the nodes we've probed. This pulls them
1415 * into the sysfs layer if needed, and can result in processing of 1413 * into the sysfs layer if needed, and can result in processing of
1416 * unit-directories, or just updating the node and it's 1414 * unit-directories, or just updating the node and it's
1417 * unit-directories. */ 1415 * unit-directories.
1416 *
1417 * Run updates before probes. Usually, updates are time-critical
1418 * while probes are time-consuming. (Well, those probes need some
1419 * improvement...) */
1420
1418 down_read(&class->subsys.rwsem); 1421 down_read(&class->subsys.rwsem);
1419 list_for_each_entry(cdev, &class->children, node) 1422 list_for_each_entry(cdev, &class->children, node) {
1420 nodemgr_probe_ne(hi, container_of(cdev, struct node_entry, class_dev), generation); 1423 ne = container_of(cdev, struct node_entry, class_dev);
1424 if (!ne->needs_probe)
1425 nodemgr_probe_ne(hi, ne, generation);
1426 }
1427 list_for_each_entry(cdev, &class->children, node) {
1428 ne = container_of(cdev, struct node_entry, class_dev);
1429 if (ne->needs_probe)
1430 nodemgr_probe_ne(hi, ne, generation);
1431 }
1421 up_read(&class->subsys.rwsem); 1432 up_read(&class->subsys.rwsem);
1422 1433
1423 1434
@@ -1448,7 +1459,8 @@ static int nodemgr_send_resume_packet(struct hpsb_host *host)
1448 int ret = 1; 1459 int ret = 1;
1449 1460
1450 packet = hpsb_make_phypacket(host, 1461 packet = hpsb_make_phypacket(host,
1451 0x003c0000 | NODEID_TO_NODE(host->node_id) << 24); 1462 EXTPHYPACKET_TYPE_RESUME |
1463 NODEID_TO_NODE(host->node_id) << PHYPACKET_PORT_SHIFT);
1452 if (packet) { 1464 if (packet) {
1453 packet->no_waiter = 1; 1465 packet->no_waiter = 1;
1454 packet->generation = get_hpsb_generation(host); 1466 packet->generation = get_hpsb_generation(host);
diff --git a/drivers/ieee1394/nodemgr.h b/drivers/ieee1394/nodemgr.h
index 3a2f0c02fd08..0b26616e16c3 100644
--- a/drivers/ieee1394/nodemgr.h
+++ b/drivers/ieee1394/nodemgr.h
@@ -151,24 +151,6 @@ static inline int hpsb_node_entry_valid(struct node_entry *ne)
151} 151}
152 152
153/* 153/*
154 * Returns a node entry (which has its reference count incremented) or NULL if
155 * the GUID in question is not known. Getting a valid entry does not mean that
156 * the node with this GUID is currently accessible (might be powered down).
157 */
158struct node_entry *hpsb_guid_get_entry(u64 guid);
159
160/* Same as above, but use the nodeid to get an node entry. This is not
161 * fool-proof by itself, since the nodeid can change. */
162struct node_entry *hpsb_nodeid_get_entry(struct hpsb_host *host, nodeid_t nodeid);
163
164/*
165 * If the entry refers to a local host, this function will return the pointer
166 * to the hpsb_host structure. It will return NULL otherwise. Once you have
167 * established it is a local host, you can use that knowledge from then on (the
168 * GUID won't wander to an external node). */
169struct hpsb_host *hpsb_get_host_by_ne(struct node_entry *ne);
170
171/*
172 * This will fill in the given, pre-initialised hpsb_packet with the current 154 * This will fill in the given, pre-initialised hpsb_packet with the current
173 * information from the node entry (host, node ID, generation number). It will 155 * information from the node entry (host, node ID, generation number). It will
174 * return false if the node owning the GUID is not accessible (and not modify the 156 * return false if the node owning the GUID is not accessible (and not modify the
diff --git a/drivers/ieee1394/ohci1394.c b/drivers/ieee1394/ohci1394.c
index 4cf9b8f3e336..b6b96fa04d62 100644
--- a/drivers/ieee1394/ohci1394.c
+++ b/drivers/ieee1394/ohci1394.c
@@ -161,9 +161,6 @@ printk(level "%s: " fmt "\n" , OHCI1394_DRIVER_NAME , ## args)
161#define PRINT(level, fmt, args...) \ 161#define PRINT(level, fmt, args...) \
162printk(level "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args) 162printk(level "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
163 163
164static char version[] __devinitdata =
165 "$Rev: 1313 $ Ben Collins <bcollins@debian.org>";
166
167/* Module Parameters */ 164/* Module Parameters */
168static int phys_dma = 1; 165static int phys_dma = 1;
169module_param(phys_dma, int, 0644); 166module_param(phys_dma, int, 0644);
@@ -587,12 +584,13 @@ static void ohci_initialize(struct ti_ohci *ohci)
587 sprintf (irq_buf, "%s", __irq_itoa(ohci->dev->irq)); 584 sprintf (irq_buf, "%s", __irq_itoa(ohci->dev->irq));
588#endif 585#endif
589 PRINT(KERN_INFO, "OHCI-1394 %d.%d (PCI): IRQ=[%s] " 586 PRINT(KERN_INFO, "OHCI-1394 %d.%d (PCI): IRQ=[%s] "
590 "MMIO=[%lx-%lx] Max Packet=[%d]", 587 "MMIO=[%lx-%lx] Max Packet=[%d] IR/IT contexts=[%d/%d]",
591 ((((buf) >> 16) & 0xf) + (((buf) >> 20) & 0xf) * 10), 588 ((((buf) >> 16) & 0xf) + (((buf) >> 20) & 0xf) * 10),
592 ((((buf) >> 4) & 0xf) + ((buf) & 0xf) * 10), irq_buf, 589 ((((buf) >> 4) & 0xf) + ((buf) & 0xf) * 10), irq_buf,
593 pci_resource_start(ohci->dev, 0), 590 pci_resource_start(ohci->dev, 0),
594 pci_resource_start(ohci->dev, 0) + OHCI1394_REGISTER_SIZE - 1, 591 pci_resource_start(ohci->dev, 0) + OHCI1394_REGISTER_SIZE - 1,
595 ohci->max_packet_size); 592 ohci->max_packet_size,
593 ohci->nb_iso_rcv_ctx, ohci->nb_iso_xmit_ctx);
596 594
597 /* Check all of our ports to make sure that if anything is 595 /* Check all of our ports to make sure that if anything is
598 * connected, we enable that port. */ 596 * connected, we enable that port. */
@@ -2960,28 +2958,23 @@ alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d,
2960 d->ctrlClear = 0; 2958 d->ctrlClear = 0;
2961 d->cmdPtr = 0; 2959 d->cmdPtr = 0;
2962 2960
2963 d->buf_cpu = kmalloc(d->num_desc * sizeof(quadlet_t*), GFP_ATOMIC); 2961 d->buf_cpu = kzalloc(d->num_desc * sizeof(*d->buf_cpu), GFP_ATOMIC);
2964 d->buf_bus = kmalloc(d->num_desc * sizeof(dma_addr_t), GFP_ATOMIC); 2962 d->buf_bus = kzalloc(d->num_desc * sizeof(*d->buf_bus), GFP_ATOMIC);
2965 2963
2966 if (d->buf_cpu == NULL || d->buf_bus == NULL) { 2964 if (d->buf_cpu == NULL || d->buf_bus == NULL) {
2967 PRINT(KERN_ERR, "Failed to allocate dma buffer"); 2965 PRINT(KERN_ERR, "Failed to allocate dma buffer");
2968 free_dma_rcv_ctx(d); 2966 free_dma_rcv_ctx(d);
2969 return -ENOMEM; 2967 return -ENOMEM;
2970 } 2968 }
2971 memset(d->buf_cpu, 0, d->num_desc * sizeof(quadlet_t*));
2972 memset(d->buf_bus, 0, d->num_desc * sizeof(dma_addr_t));
2973 2969
2974 d->prg_cpu = kmalloc(d->num_desc * sizeof(struct dma_cmd*), 2970 d->prg_cpu = kzalloc(d->num_desc * sizeof(*d->prg_cpu), GFP_ATOMIC);
2975 GFP_ATOMIC); 2971 d->prg_bus = kzalloc(d->num_desc * sizeof(*d->prg_bus), GFP_ATOMIC);
2976 d->prg_bus = kmalloc(d->num_desc * sizeof(dma_addr_t), GFP_ATOMIC);
2977 2972
2978 if (d->prg_cpu == NULL || d->prg_bus == NULL) { 2973 if (d->prg_cpu == NULL || d->prg_bus == NULL) {
2979 PRINT(KERN_ERR, "Failed to allocate dma prg"); 2974 PRINT(KERN_ERR, "Failed to allocate dma prg");
2980 free_dma_rcv_ctx(d); 2975 free_dma_rcv_ctx(d);
2981 return -ENOMEM; 2976 return -ENOMEM;
2982 } 2977 }
2983 memset(d->prg_cpu, 0, d->num_desc * sizeof(struct dma_cmd*));
2984 memset(d->prg_bus, 0, d->num_desc * sizeof(dma_addr_t));
2985 2978
2986 d->spb = kmalloc(d->split_buf_size, GFP_ATOMIC); 2979 d->spb = kmalloc(d->split_buf_size, GFP_ATOMIC);
2987 2980
@@ -3093,17 +3086,14 @@ alloc_dma_trm_ctx(struct ti_ohci *ohci, struct dma_trm_ctx *d,
3093 d->ctrlClear = 0; 3086 d->ctrlClear = 0;
3094 d->cmdPtr = 0; 3087 d->cmdPtr = 0;
3095 3088
3096 d->prg_cpu = kmalloc(d->num_desc * sizeof(struct at_dma_prg*), 3089 d->prg_cpu = kzalloc(d->num_desc * sizeof(*d->prg_cpu), GFP_KERNEL);
3097 GFP_KERNEL); 3090 d->prg_bus = kzalloc(d->num_desc * sizeof(*d->prg_bus), GFP_KERNEL);
3098 d->prg_bus = kmalloc(d->num_desc * sizeof(dma_addr_t), GFP_KERNEL);
3099 3091
3100 if (d->prg_cpu == NULL || d->prg_bus == NULL) { 3092 if (d->prg_cpu == NULL || d->prg_bus == NULL) {
3101 PRINT(KERN_ERR, "Failed to allocate at dma prg"); 3093 PRINT(KERN_ERR, "Failed to allocate at dma prg");
3102 free_dma_trm_ctx(d); 3094 free_dma_trm_ctx(d);
3103 return -ENOMEM; 3095 return -ENOMEM;
3104 } 3096 }
3105 memset(d->prg_cpu, 0, d->num_desc * sizeof(struct at_dma_prg*));
3106 memset(d->prg_bus, 0, d->num_desc * sizeof(dma_addr_t));
3107 3097
3108 len = sprintf(pool_name, "ohci1394_trm_prg"); 3098 len = sprintf(pool_name, "ohci1394_trm_prg");
3109 sprintf(pool_name+len, "%d", num_allocs); 3099 sprintf(pool_name+len, "%d", num_allocs);
@@ -3201,8 +3191,6 @@ static struct hpsb_host_driver ohci1394_driver = {
3201 .hw_csr_reg = ohci_hw_csr_reg, 3191 .hw_csr_reg = ohci_hw_csr_reg,
3202}; 3192};
3203 3193
3204
3205
3206/*********************************** 3194/***********************************
3207 * PCI Driver Interface functions * 3195 * PCI Driver Interface functions *
3208 ***********************************/ 3196 ***********************************/
@@ -3217,15 +3205,10 @@ do { \
3217static int __devinit ohci1394_pci_probe(struct pci_dev *dev, 3205static int __devinit ohci1394_pci_probe(struct pci_dev *dev,
3218 const struct pci_device_id *ent) 3206 const struct pci_device_id *ent)
3219{ 3207{
3220 static int version_printed = 0;
3221
3222 struct hpsb_host *host; 3208 struct hpsb_host *host;
3223 struct ti_ohci *ohci; /* shortcut to currently handled device */ 3209 struct ti_ohci *ohci; /* shortcut to currently handled device */
3224 unsigned long ohci_base; 3210 unsigned long ohci_base;
3225 3211
3226 if (version_printed++ == 0)
3227 PRINT_G(KERN_INFO, "%s", version);
3228
3229 if (pci_enable_device(dev)) 3212 if (pci_enable_device(dev))
3230 FAIL(-ENXIO, "Failed to enable OHCI hardware"); 3213 FAIL(-ENXIO, "Failed to enable OHCI hardware");
3231 pci_set_master(dev); 3214 pci_set_master(dev);
@@ -3369,13 +3352,8 @@ static int __devinit ohci1394_pci_probe(struct pci_dev *dev,
3369 /* Determine the number of available IR and IT contexts. */ 3352 /* Determine the number of available IR and IT contexts. */
3370 ohci->nb_iso_rcv_ctx = 3353 ohci->nb_iso_rcv_ctx =
3371 get_nb_iso_ctx(ohci, OHCI1394_IsoRecvIntMaskSet); 3354 get_nb_iso_ctx(ohci, OHCI1394_IsoRecvIntMaskSet);
3372 DBGMSG("%d iso receive contexts available",
3373 ohci->nb_iso_rcv_ctx);
3374
3375 ohci->nb_iso_xmit_ctx = 3355 ohci->nb_iso_xmit_ctx =
3376 get_nb_iso_ctx(ohci, OHCI1394_IsoXmitIntMaskSet); 3356 get_nb_iso_ctx(ohci, OHCI1394_IsoXmitIntMaskSet);
3377 DBGMSG("%d iso transmit contexts available",
3378 ohci->nb_iso_xmit_ctx);
3379 3357
3380 /* Set the usage bits for non-existent contexts so they can't 3358 /* Set the usage bits for non-existent contexts so they can't
3381 * be allocated */ 3359 * be allocated */
@@ -3606,8 +3584,6 @@ static struct pci_driver ohci1394_pci_driver = {
3606 .suspend = ohci1394_pci_suspend, 3584 .suspend = ohci1394_pci_suspend,
3607}; 3585};
3608 3586
3609
3610
3611/*********************************** 3587/***********************************
3612 * OHCI1394 Video Interface * 3588 * OHCI1394 Video Interface *
3613 ***********************************/ 3589 ***********************************/
@@ -3714,7 +3690,6 @@ EXPORT_SYMBOL(ohci1394_init_iso_tasklet);
3714EXPORT_SYMBOL(ohci1394_register_iso_tasklet); 3690EXPORT_SYMBOL(ohci1394_register_iso_tasklet);
3715EXPORT_SYMBOL(ohci1394_unregister_iso_tasklet); 3691EXPORT_SYMBOL(ohci1394_unregister_iso_tasklet);
3716 3692
3717
3718/*********************************** 3693/***********************************
3719 * General module initialization * 3694 * General module initialization *
3720 ***********************************/ 3695 ***********************************/
diff --git a/drivers/ieee1394/ohci1394.h b/drivers/ieee1394/ohci1394.h
index cc66c1cae250..7df0962144e3 100644
--- a/drivers/ieee1394/ohci1394.h
+++ b/drivers/ieee1394/ohci1394.h
@@ -219,8 +219,8 @@ struct ti_ohci {
219 219
220 int self_id_errors; 220 int self_id_errors;
221 221
222 /* Tasklets for iso receive and transmit, used by video1394, 222 /* Tasklets for iso receive and transmit, used by video1394
223 * amdtp and dv1394 */ 223 * and dv1394 */
224 224
225 struct list_head iso_tasklet_list; 225 struct list_head iso_tasklet_list;
226 spinlock_t iso_tasklet_list_lock; 226 spinlock_t iso_tasklet_list_lock;
diff --git a/drivers/ieee1394/pcilynx.c b/drivers/ieee1394/pcilynx.c
index 6b1ab875333b..e2edc41e1b6f 100644
--- a/drivers/ieee1394/pcilynx.c
+++ b/drivers/ieee1394/pcilynx.c
@@ -1435,7 +1435,7 @@ static int __devinit add_card(struct pci_dev *dev,
1435 struct i2c_algo_bit_data i2c_adapter_data; 1435 struct i2c_algo_bit_data i2c_adapter_data;
1436 1436
1437 error = -ENOMEM; 1437 error = -ENOMEM;
1438 i2c_ad = kmalloc(sizeof(struct i2c_adapter), SLAB_KERNEL); 1438 i2c_ad = kmalloc(sizeof(*i2c_ad), SLAB_KERNEL);
1439 if (!i2c_ad) FAIL("failed to allocate I2C adapter memory"); 1439 if (!i2c_ad) FAIL("failed to allocate I2C adapter memory");
1440 1440
1441 memcpy(i2c_ad, &bit_ops, sizeof(struct i2c_adapter)); 1441 memcpy(i2c_ad, &bit_ops, sizeof(struct i2c_adapter));
diff --git a/drivers/ieee1394/raw1394.c b/drivers/ieee1394/raw1394.c
index 24411e666b21..b05235639918 100644
--- a/drivers/ieee1394/raw1394.c
+++ b/drivers/ieee1394/raw1394.c
@@ -102,12 +102,9 @@ static struct pending_request *__alloc_pending_request(gfp_t flags)
102{ 102{
103 struct pending_request *req; 103 struct pending_request *req;
104 104
105 req = (struct pending_request *)kmalloc(sizeof(struct pending_request), 105 req = kzalloc(sizeof(*req), flags);
106 flags); 106 if (req)
107 if (req != NULL) {
108 memset(req, 0, sizeof(struct pending_request));
109 INIT_LIST_HEAD(&req->list); 107 INIT_LIST_HEAD(&req->list);
110 }
111 108
112 return req; 109 return req;
113} 110}
@@ -192,9 +189,9 @@ static void add_host(struct hpsb_host *host)
192 struct host_info *hi; 189 struct host_info *hi;
193 unsigned long flags; 190 unsigned long flags;
194 191
195 hi = (struct host_info *)kmalloc(sizeof(struct host_info), GFP_KERNEL); 192 hi = kmalloc(sizeof(*hi), GFP_KERNEL);
196 193
197 if (hi != NULL) { 194 if (hi) {
198 INIT_LIST_HEAD(&hi->list); 195 INIT_LIST_HEAD(&hi->list);
199 hi->host = host; 196 hi->host = host;
200 INIT_LIST_HEAD(&hi->file_info_list); 197 INIT_LIST_HEAD(&hi->file_info_list);
@@ -315,8 +312,8 @@ static void iso_receive(struct hpsb_host *host, int channel, quadlet_t * data,
315 break; 312 break;
316 313
317 if (!ibs) { 314 if (!ibs) {
318 ibs = kmalloc(sizeof(struct iso_block_store) 315 ibs = kmalloc(sizeof(*ibs) + length,
319 + length, SLAB_ATOMIC); 316 SLAB_ATOMIC);
320 if (!ibs) { 317 if (!ibs) {
321 kfree(req); 318 kfree(req);
322 break; 319 break;
@@ -376,8 +373,8 @@ static void fcp_request(struct hpsb_host *host, int nodeid, int direction,
376 break; 373 break;
377 374
378 if (!ibs) { 375 if (!ibs) {
379 ibs = kmalloc(sizeof(struct iso_block_store) 376 ibs = kmalloc(sizeof(*ibs) + length,
380 + length, SLAB_ATOMIC); 377 SLAB_ATOMIC);
381 if (!ibs) { 378 if (!ibs) {
382 kfree(req); 379 kfree(req);
383 break; 380 break;
@@ -502,10 +499,9 @@ static int state_initialized(struct file_info *fi, struct pending_request *req)
502 switch (req->req.type) { 499 switch (req->req.type) {
503 case RAW1394_REQ_LIST_CARDS: 500 case RAW1394_REQ_LIST_CARDS:
504 spin_lock_irqsave(&host_info_lock, flags); 501 spin_lock_irqsave(&host_info_lock, flags);
505 khl = kmalloc(sizeof(struct raw1394_khost_list) * host_count, 502 khl = kmalloc(sizeof(*khl) * host_count, SLAB_ATOMIC);
506 SLAB_ATOMIC);
507 503
508 if (khl != NULL) { 504 if (khl) {
509 req->req.misc = host_count; 505 req->req.misc = host_count;
510 req->data = (quadlet_t *) khl; 506 req->data = (quadlet_t *) khl;
511 507
@@ -517,7 +513,7 @@ static int state_initialized(struct file_info *fi, struct pending_request *req)
517 } 513 }
518 spin_unlock_irqrestore(&host_info_lock, flags); 514 spin_unlock_irqrestore(&host_info_lock, flags);
519 515
520 if (khl != NULL) { 516 if (khl) {
521 req->req.error = RAW1394_ERROR_NONE; 517 req->req.error = RAW1394_ERROR_NONE;
522 req->req.length = min(req->req.length, 518 req->req.length = min(req->req.length,
523 (u32) (sizeof 519 (u32) (sizeof
@@ -1647,13 +1643,13 @@ static int arm_register(struct file_info *fi, struct pending_request *req)
1647 return (-EINVAL); 1643 return (-EINVAL);
1648 } 1644 }
1649 /* addr-list-entry for fileinfo */ 1645 /* addr-list-entry for fileinfo */
1650 addr = (struct arm_addr *)kmalloc(sizeof(struct arm_addr), SLAB_KERNEL); 1646 addr = kmalloc(sizeof(*addr), SLAB_KERNEL);
1651 if (!addr) { 1647 if (!addr) {
1652 req->req.length = 0; 1648 req->req.length = 0;
1653 return (-ENOMEM); 1649 return (-ENOMEM);
1654 } 1650 }
1655 /* allocation of addr_space_buffer */ 1651 /* allocation of addr_space_buffer */
1656 addr->addr_space_buffer = (u8 *) vmalloc(req->req.length); 1652 addr->addr_space_buffer = vmalloc(req->req.length);
1657 if (!(addr->addr_space_buffer)) { 1653 if (!(addr->addr_space_buffer)) {
1658 kfree(addr); 1654 kfree(addr);
1659 req->req.length = 0; 1655 req->req.length = 0;
@@ -2122,8 +2118,7 @@ static int modify_config_rom(struct file_info *fi, struct pending_request *req)
2122 return -ENOMEM; 2118 return -ENOMEM;
2123 } 2119 }
2124 2120
2125 cache->filled_head = 2121 cache->filled_head = kmalloc(sizeof(*cache->filled_head), GFP_KERNEL);
2126 kmalloc(sizeof(struct csr1212_cache_region), GFP_KERNEL);
2127 if (!cache->filled_head) { 2122 if (!cache->filled_head) {
2128 csr1212_release_keyval(fi->csr1212_dirs[dr]); 2123 csr1212_release_keyval(fi->csr1212_dirs[dr]);
2129 fi->csr1212_dirs[dr] = NULL; 2124 fi->csr1212_dirs[dr] = NULL;
@@ -2136,7 +2131,6 @@ static int modify_config_rom(struct file_info *fi, struct pending_request *req)
2136 req->req.length)) { 2131 req->req.length)) {
2137 csr1212_release_keyval(fi->csr1212_dirs[dr]); 2132 csr1212_release_keyval(fi->csr1212_dirs[dr]);
2138 fi->csr1212_dirs[dr] = NULL; 2133 fi->csr1212_dirs[dr] = NULL;
2139 CSR1212_FREE(cache);
2140 ret = -EFAULT; 2134 ret = -EFAULT;
2141 } else { 2135 } else {
2142 cache->len = req->req.length; 2136 cache->len = req->req.length;
@@ -2172,7 +2166,7 @@ static int modify_config_rom(struct file_info *fi, struct pending_request *req)
2172 } 2166 }
2173 } 2167 }
2174 kfree(cache->filled_head); 2168 kfree(cache->filled_head);
2175 kfree(cache); 2169 CSR1212_FREE(cache);
2176 2170
2177 if (ret >= 0) { 2171 if (ret >= 0) {
2178 /* we have to free the request, because we queue no response, 2172 /* we have to free the request, because we queue no response,
@@ -2488,8 +2482,8 @@ static int raw1394_iso_recv_packets(struct file_info *fi, void __user * uaddr)
2488 2482
2489 /* ensure user-supplied buffer is accessible and big enough */ 2483 /* ensure user-supplied buffer is accessible and big enough */
2490 if (!access_ok(VERIFY_WRITE, upackets.infos, 2484 if (!access_ok(VERIFY_WRITE, upackets.infos,
2491 upackets.n_packets * 2485 upackets.n_packets *
2492 sizeof(struct raw1394_iso_packet_info))) 2486 sizeof(struct raw1394_iso_packet_info)))
2493 return -EFAULT; 2487 return -EFAULT;
2494 2488
2495 /* copy the packet_infos out */ 2489 /* copy the packet_infos out */
@@ -2522,8 +2516,8 @@ static int raw1394_iso_send_packets(struct file_info *fi, void __user * uaddr)
2522 2516
2523 /* ensure user-supplied buffer is accessible and big enough */ 2517 /* ensure user-supplied buffer is accessible and big enough */
2524 if (!access_ok(VERIFY_READ, upackets.infos, 2518 if (!access_ok(VERIFY_READ, upackets.infos,
2525 upackets.n_packets * 2519 upackets.n_packets *
2526 sizeof(struct raw1394_iso_packet_info))) 2520 sizeof(struct raw1394_iso_packet_info)))
2527 return -EFAULT; 2521 return -EFAULT;
2528 2522
2529 /* copy the infos structs in and queue the packets */ 2523 /* copy the infos structs in and queue the packets */
@@ -2684,11 +2678,10 @@ static int raw1394_open(struct inode *inode, struct file *file)
2684{ 2678{
2685 struct file_info *fi; 2679 struct file_info *fi;
2686 2680
2687 fi = kmalloc(sizeof(struct file_info), SLAB_KERNEL); 2681 fi = kzalloc(sizeof(*fi), SLAB_KERNEL);
2688 if (fi == NULL) 2682 if (!fi)
2689 return -ENOMEM; 2683 return -ENOMEM;
2690 2684
2691 memset(fi, 0, sizeof(struct file_info));
2692 fi->notification = (u8) RAW1394_NOTIFY_ON; /* busreset notification */ 2685 fi->notification = (u8) RAW1394_NOTIFY_ON; /* busreset notification */
2693 2686
2694 INIT_LIST_HEAD(&fi->list); 2687 INIT_LIST_HEAD(&fi->list);
@@ -2748,8 +2741,7 @@ static int raw1394_release(struct inode *inode, struct file *file)
2748 list) { 2741 list) {
2749 entry = fi_hlp->addr_list.next; 2742 entry = fi_hlp->addr_list.next;
2750 while (entry != &(fi_hlp->addr_list)) { 2743 while (entry != &(fi_hlp->addr_list)) {
2751 arm_addr = list_entry(entry, 2744 arm_addr = list_entry(entry, struct
2752 struct
2753 arm_addr, 2745 arm_addr,
2754 addr_list); 2746 addr_list);
2755 if (arm_addr->start == 2747 if (arm_addr->start ==
@@ -2912,16 +2904,17 @@ static int __init init_raw1394(void)
2912 2904
2913 hpsb_register_highlevel(&raw1394_highlevel); 2905 hpsb_register_highlevel(&raw1394_highlevel);
2914 2906
2915 if (IS_ERR(class_device_create(hpsb_protocol_class, NULL, MKDEV( 2907 if (IS_ERR
2916 IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_RAW1394 * 16), 2908 (class_device_create
2917 NULL, RAW1394_DEVICE_NAME))) { 2909 (hpsb_protocol_class, NULL,
2910 MKDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_RAW1394 * 16), NULL,
2911 RAW1394_DEVICE_NAME))) {
2918 ret = -EFAULT; 2912 ret = -EFAULT;
2919 goto out_unreg; 2913 goto out_unreg;
2920 } 2914 }
2921 2915
2922 devfs_mk_cdev(MKDEV( 2916 devfs_mk_cdev(MKDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_RAW1394 * 16),
2923 IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_RAW1394 * 16), 2917 S_IFCHR | S_IRUSR | S_IWUSR, RAW1394_DEVICE_NAME);
2924 S_IFCHR | S_IRUSR | S_IWUSR, RAW1394_DEVICE_NAME);
2925 2918
2926 cdev_init(&raw1394_cdev, &raw1394_fops); 2919 cdev_init(&raw1394_cdev, &raw1394_fops);
2927 raw1394_cdev.owner = THIS_MODULE; 2920 raw1394_cdev.owner = THIS_MODULE;
@@ -2943,20 +2936,22 @@ static int __init init_raw1394(void)
2943 2936
2944 goto out; 2937 goto out;
2945 2938
2946out_dev: 2939 out_dev:
2947 devfs_remove(RAW1394_DEVICE_NAME); 2940 devfs_remove(RAW1394_DEVICE_NAME);
2948 class_device_destroy(hpsb_protocol_class, 2941 class_device_destroy(hpsb_protocol_class,
2949 MKDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_RAW1394 * 16)); 2942 MKDEV(IEEE1394_MAJOR,
2950out_unreg: 2943 IEEE1394_MINOR_BLOCK_RAW1394 * 16));
2944 out_unreg:
2951 hpsb_unregister_highlevel(&raw1394_highlevel); 2945 hpsb_unregister_highlevel(&raw1394_highlevel);
2952out: 2946 out:
2953 return ret; 2947 return ret;
2954} 2948}
2955 2949
2956static void __exit cleanup_raw1394(void) 2950static void __exit cleanup_raw1394(void)
2957{ 2951{
2958 class_device_destroy(hpsb_protocol_class, 2952 class_device_destroy(hpsb_protocol_class,
2959 MKDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_RAW1394 * 16)); 2953 MKDEV(IEEE1394_MAJOR,
2954 IEEE1394_MINOR_BLOCK_RAW1394 * 16));
2960 cdev_del(&raw1394_cdev); 2955 cdev_del(&raw1394_cdev);
2961 devfs_remove(RAW1394_DEVICE_NAME); 2956 devfs_remove(RAW1394_DEVICE_NAME);
2962 hpsb_unregister_highlevel(&raw1394_highlevel); 2957 hpsb_unregister_highlevel(&raw1394_highlevel);
diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c
index f7e18ccc5c0a..18d7eda38851 100644
--- a/drivers/ieee1394/sbp2.c
+++ b/drivers/ieee1394/sbp2.c
@@ -80,9 +80,6 @@
80#include "ieee1394_transactions.h" 80#include "ieee1394_transactions.h"
81#include "sbp2.h" 81#include "sbp2.h"
82 82
83static char version[] __devinitdata =
84 "$Rev: 1306 $ Ben Collins <bcollins@debian.org>";
85
86/* 83/*
87 * Module load parameter definitions 84 * Module load parameter definitions
88 */ 85 */
@@ -151,18 +148,15 @@ static int force_inquiry_hack;
151module_param(force_inquiry_hack, int, 0444); 148module_param(force_inquiry_hack, int, 0444);
152MODULE_PARM_DESC(force_inquiry_hack, "Force SCSI inquiry hack (default = 0)"); 149MODULE_PARM_DESC(force_inquiry_hack, "Force SCSI inquiry hack (default = 0)");
153 150
154
155/* 151/*
156 * Export information about protocols/devices supported by this driver. 152 * Export information about protocols/devices supported by this driver.
157 */ 153 */
158static struct ieee1394_device_id sbp2_id_table[] = { 154static struct ieee1394_device_id sbp2_id_table[] = {
159 { 155 {
160 .match_flags =IEEE1394_MATCH_SPECIFIER_ID | 156 .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION,
161 IEEE1394_MATCH_VERSION, 157 .specifier_id = SBP2_UNIT_SPEC_ID_ENTRY & 0xffffff,
162 .specifier_id = SBP2_UNIT_SPEC_ID_ENTRY & 0xffffff, 158 .version = SBP2_SW_VERSION_ENTRY & 0xffffff},
163 .version = SBP2_SW_VERSION_ENTRY & 0xffffff 159 {}
164 },
165 { }
166}; 160};
167 161
168MODULE_DEVICE_TABLE(ieee1394, sbp2_id_table); 162MODULE_DEVICE_TABLE(ieee1394, sbp2_id_table);
@@ -221,7 +215,6 @@ static u32 global_outstanding_dmas = 0;
221 215
222#define SBP2_ERR(fmt, args...) HPSB_ERR("sbp2: "fmt, ## args) 216#define SBP2_ERR(fmt, args...) HPSB_ERR("sbp2: "fmt, ## args)
223 217
224
225/* 218/*
226 * Globals 219 * Globals
227 */ 220 */
@@ -254,8 +247,8 @@ static struct hpsb_address_ops sbp2_ops = {
254 247
255#ifdef CONFIG_IEEE1394_SBP2_PHYS_DMA 248#ifdef CONFIG_IEEE1394_SBP2_PHYS_DMA
256static struct hpsb_address_ops sbp2_physdma_ops = { 249static struct hpsb_address_ops sbp2_physdma_ops = {
257 .read = sbp2_handle_physdma_read, 250 .read = sbp2_handle_physdma_read,
258 .write = sbp2_handle_physdma_write, 251 .write = sbp2_handle_physdma_write,
259}; 252};
260#endif 253#endif
261 254
@@ -287,7 +280,6 @@ static u32 sbp2_broken_inquiry_list[] = {
287 * General utility functions 280 * General utility functions
288 **************************************/ 281 **************************************/
289 282
290
291#ifndef __BIG_ENDIAN 283#ifndef __BIG_ENDIAN
292/* 284/*
293 * Converts a buffer from be32 to cpu byte ordering. Length is in bytes. 285 * Converts a buffer from be32 to cpu byte ordering. Length is in bytes.
@@ -324,7 +316,8 @@ static __inline__ void sbp2util_cpu_to_be32_buffer(void *buffer, int length)
324/* 316/*
325 * Debug packet dump routine. Length is in bytes. 317 * Debug packet dump routine. Length is in bytes.
326 */ 318 */
327static void sbp2util_packet_dump(void *buffer, int length, char *dump_name, u32 dump_phys_addr) 319static void sbp2util_packet_dump(void *buffer, int length, char *dump_name,
320 u32 dump_phys_addr)
328{ 321{
329 int i; 322 int i;
330 unsigned char *dump = buffer; 323 unsigned char *dump = buffer;
@@ -345,7 +338,7 @@ static void sbp2util_packet_dump(void *buffer, int length, char *dump_name, u32
345 printk(" "); 338 printk(" ");
346 if ((i & 0xf) == 0) 339 if ((i & 0xf) == 0)
347 printk("\n "); 340 printk("\n ");
348 printk("%02x ", (int) dump[i]); 341 printk("%02x ", (int)dump[i]);
349 } 342 }
350 printk("\n"); 343 printk("\n");
351 344
@@ -364,9 +357,9 @@ static int sbp2util_down_timeout(atomic_t *done, int timeout)
364 357
365 for (i = timeout; (i > 0 && atomic_read(done) == 0); i-= HZ/10) { 358 for (i = timeout; (i > 0 && atomic_read(done) == 0); i-= HZ/10) {
366 if (msleep_interruptible(100)) /* 100ms */ 359 if (msleep_interruptible(100)) /* 100ms */
367 return(1); 360 return 1;
368 } 361 }
369 return ((i > 0) ? 0:1); 362 return (i > 0) ? 0 : 1;
370} 363}
371 364
372/* Free's an allocated packet */ 365/* Free's an allocated packet */
@@ -380,21 +373,22 @@ static void sbp2_free_packet(struct hpsb_packet *packet)
380 * subaction and returns immediately. Can be used from interrupts. 373 * subaction and returns immediately. Can be used from interrupts.
381 */ 374 */
382static int sbp2util_node_write_no_wait(struct node_entry *ne, u64 addr, 375static int sbp2util_node_write_no_wait(struct node_entry *ne, u64 addr,
383 quadlet_t *buffer, size_t length) 376 quadlet_t *buffer, size_t length)
384{ 377{
385 struct hpsb_packet *packet; 378 struct hpsb_packet *packet;
386 379
387 packet = hpsb_make_writepacket(ne->host, ne->nodeid, 380 packet = hpsb_make_writepacket(ne->host, ne->nodeid,
388 addr, buffer, length); 381 addr, buffer, length);
389 if (!packet) 382 if (!packet)
390 return -ENOMEM; 383 return -ENOMEM;
391 384
392 hpsb_set_packet_complete_task(packet, (void (*)(void*))sbp2_free_packet, 385 hpsb_set_packet_complete_task(packet,
386 (void (*)(void *))sbp2_free_packet,
393 packet); 387 packet);
394 388
395 hpsb_node_fill_packet(ne, packet); 389 hpsb_node_fill_packet(ne, packet);
396 390
397 if (hpsb_send_packet(packet) < 0) { 391 if (hpsb_send_packet(packet) < 0) {
398 sbp2_free_packet(packet); 392 sbp2_free_packet(packet);
399 return -EIO; 393 return -EIO;
400 } 394 }
@@ -417,22 +411,22 @@ static int sbp2util_create_command_orb_pool(struct scsi_id_instance_data *scsi_i
417 411
418 spin_lock_irqsave(&scsi_id->sbp2_command_orb_lock, flags); 412 spin_lock_irqsave(&scsi_id->sbp2_command_orb_lock, flags);
419 for (i = 0; i < orbs; i++) { 413 for (i = 0; i < orbs; i++) {
420 command = (struct sbp2_command_info *) 414 command = kzalloc(sizeof(*command), GFP_ATOMIC);
421 kmalloc(sizeof(struct sbp2_command_info), GFP_ATOMIC);
422 if (!command) { 415 if (!command) {
423 spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags); 416 spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock,
424 return(-ENOMEM); 417 flags);
418 return -ENOMEM;
425 } 419 }
426 memset(command, '\0', sizeof(struct sbp2_command_info));
427 command->command_orb_dma = 420 command->command_orb_dma =
428 pci_map_single (hi->host->pdev, &command->command_orb, 421 pci_map_single(hi->host->pdev, &command->command_orb,
429 sizeof(struct sbp2_command_orb), 422 sizeof(struct sbp2_command_orb),
430 PCI_DMA_BIDIRECTIONAL); 423 PCI_DMA_BIDIRECTIONAL);
431 SBP2_DMA_ALLOC("single command orb DMA"); 424 SBP2_DMA_ALLOC("single command orb DMA");
432 command->sge_dma = 425 command->sge_dma =
433 pci_map_single (hi->host->pdev, &command->scatter_gather_element, 426 pci_map_single(hi->host->pdev,
434 sizeof(command->scatter_gather_element), 427 &command->scatter_gather_element,
435 PCI_DMA_BIDIRECTIONAL); 428 sizeof(command->scatter_gather_element),
429 PCI_DMA_BIDIRECTIONAL);
436 SBP2_DMA_ALLOC("scatter_gather_element"); 430 SBP2_DMA_ALLOC("scatter_gather_element");
437 INIT_LIST_HEAD(&command->list); 431 INIT_LIST_HEAD(&command->list);
438 list_add_tail(&command->list, &scsi_id->sbp2_command_orb_completed); 432 list_add_tail(&command->list, &scsi_id->sbp2_command_orb_completed);
@@ -488,7 +482,7 @@ static struct sbp2_command_info *sbp2util_find_command_for_orb(
488 list_for_each_entry(command, &scsi_id->sbp2_command_orb_inuse, list) { 482 list_for_each_entry(command, &scsi_id->sbp2_command_orb_inuse, list) {
489 if (command->command_orb_dma == orb) { 483 if (command->command_orb_dma == orb) {
490 spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags); 484 spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags);
491 return (command); 485 return command;
492 } 486 }
493 } 487 }
494 } 488 }
@@ -496,7 +490,7 @@ static struct sbp2_command_info *sbp2util_find_command_for_orb(
496 490
497 SBP2_ORB_DEBUG("could not match command orb %x", (unsigned int)orb); 491 SBP2_ORB_DEBUG("could not match command orb %x", (unsigned int)orb);
498 492
499 return(NULL); 493 return NULL;
500} 494}
501 495
502/* 496/*
@@ -513,12 +507,12 @@ static struct sbp2_command_info *sbp2util_find_command_for_SCpnt(struct scsi_id_
513 list_for_each_entry(command, &scsi_id->sbp2_command_orb_inuse, list) { 507 list_for_each_entry(command, &scsi_id->sbp2_command_orb_inuse, list) {
514 if (command->Current_SCpnt == SCpnt) { 508 if (command->Current_SCpnt == SCpnt) {
515 spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags); 509 spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags);
516 return (command); 510 return command;
517 } 511 }
518 } 512 }
519 } 513 }
520 spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags); 514 spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags);
521 return(NULL); 515 return NULL;
522} 516}
523 517
524/* 518/*
@@ -545,7 +539,7 @@ static struct sbp2_command_info *sbp2util_allocate_command_orb(
545 SBP2_ERR("sbp2util_allocate_command_orb - No orbs available!"); 539 SBP2_ERR("sbp2util_allocate_command_orb - No orbs available!");
546 } 540 }
547 spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags); 541 spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags);
548 return (command); 542 return command;
549} 543}
550 544
551/* Free our DMA's */ 545/* Free our DMA's */
@@ -587,7 +581,8 @@ static void sbp2util_free_command_dma(struct sbp2_command_info *command)
587/* 581/*
588 * This function moves a command to the completed orb list. 582 * This function moves a command to the completed orb list.
589 */ 583 */
590static void sbp2util_mark_command_completed(struct scsi_id_instance_data *scsi_id, struct sbp2_command_info *command) 584static void sbp2util_mark_command_completed(struct scsi_id_instance_data *scsi_id,
585 struct sbp2_command_info *command)
591{ 586{
592 unsigned long flags; 587 unsigned long flags;
593 588
@@ -606,8 +601,6 @@ static inline int sbp2util_node_is_available(struct scsi_id_instance_data *scsi_
606 return scsi_id && scsi_id->ne && !scsi_id->ne->in_limbo; 601 return scsi_id && scsi_id->ne && !scsi_id->ne->in_limbo;
607} 602}
608 603
609
610
611/********************************************* 604/*********************************************
612 * IEEE-1394 core driver stack related section 605 * IEEE-1394 core driver stack related section
613 *********************************************/ 606 *********************************************/
@@ -627,14 +620,14 @@ static int sbp2_probe(struct device *dev)
627 if (ud->flags & UNIT_DIRECTORY_HAS_LUN_DIRECTORY) 620 if (ud->flags & UNIT_DIRECTORY_HAS_LUN_DIRECTORY)
628 return -ENODEV; 621 return -ENODEV;
629 622
630 scsi_id = sbp2_alloc_device(ud); 623 scsi_id = sbp2_alloc_device(ud);
631 624
632 if (!scsi_id) 625 if (!scsi_id)
633 return -ENOMEM; 626 return -ENOMEM;
634 627
635 sbp2_parse_unit_directory(scsi_id, ud); 628 sbp2_parse_unit_directory(scsi_id, ud);
636 629
637 return sbp2_start_device(scsi_id); 630 return sbp2_start_device(scsi_id);
638} 631}
639 632
640static int sbp2_remove(struct device *dev) 633static int sbp2_remove(struct device *dev)
@@ -719,12 +712,11 @@ static struct scsi_id_instance_data *sbp2_alloc_device(struct unit_directory *ud
719 712
720 SBP2_DEBUG("sbp2_alloc_device"); 713 SBP2_DEBUG("sbp2_alloc_device");
721 714
722 scsi_id = kmalloc(sizeof(*scsi_id), GFP_KERNEL); 715 scsi_id = kzalloc(sizeof(*scsi_id), GFP_KERNEL);
723 if (!scsi_id) { 716 if (!scsi_id) {
724 SBP2_ERR("failed to create scsi_id"); 717 SBP2_ERR("failed to create scsi_id");
725 goto failed_alloc; 718 goto failed_alloc;
726 } 719 }
727 memset(scsi_id, 0, sizeof(*scsi_id));
728 720
729 scsi_id->ne = ud->ne; 721 scsi_id->ne = ud->ne;
730 scsi_id->ud = ud; 722 scsi_id->ud = ud;
@@ -735,7 +727,7 @@ static struct scsi_id_instance_data *sbp2_alloc_device(struct unit_directory *ud
735 INIT_LIST_HEAD(&scsi_id->sbp2_command_orb_completed); 727 INIT_LIST_HEAD(&scsi_id->sbp2_command_orb_completed);
736 INIT_LIST_HEAD(&scsi_id->scsi_list); 728 INIT_LIST_HEAD(&scsi_id->scsi_list);
737 spin_lock_init(&scsi_id->sbp2_command_orb_lock); 729 spin_lock_init(&scsi_id->sbp2_command_orb_lock);
738 scsi_id->sbp2_device_type_and_lun = SBP2_DEVICE_TYPE_LUN_UNINITIALIZED; 730 scsi_id->sbp2_lun = 0;
739 731
740 ud->device.driver_data = scsi_id; 732 ud->device.driver_data = scsi_id;
741 733
@@ -769,7 +761,7 @@ static struct scsi_id_instance_data *sbp2_alloc_device(struct unit_directory *ud
769 761
770 /* Register our host with the SCSI stack. */ 762 /* Register our host with the SCSI stack. */
771 scsi_host = scsi_host_alloc(&scsi_driver_template, 763 scsi_host = scsi_host_alloc(&scsi_driver_template,
772 sizeof (unsigned long)); 764 sizeof(unsigned long));
773 if (!scsi_host) { 765 if (!scsi_host) {
774 SBP2_ERR("failed to register scsi host"); 766 SBP2_ERR("failed to register scsi host");
775 goto failed_alloc; 767 goto failed_alloc;
@@ -790,7 +782,6 @@ failed_alloc:
790 return NULL; 782 return NULL;
791} 783}
792 784
793
794static void sbp2_host_reset(struct hpsb_host *host) 785static void sbp2_host_reset(struct hpsb_host *host)
795{ 786{
796 struct sbp2scsi_host_info *hi; 787 struct sbp2scsi_host_info *hi;
@@ -804,7 +795,6 @@ static void sbp2_host_reset(struct hpsb_host *host)
804 } 795 }
805} 796}
806 797
807
808/* 798/*
809 * This function is where we first pull the node unique ids, and then 799 * This function is where we first pull the node unique ids, and then
810 * allocate memory and register a SBP-2 device. 800 * allocate memory and register a SBP-2 device.
@@ -818,7 +808,8 @@ static int sbp2_start_device(struct scsi_id_instance_data *scsi_id)
818 808
819 /* Login FIFO DMA */ 809 /* Login FIFO DMA */
820 scsi_id->login_response = 810 scsi_id->login_response =
821 pci_alloc_consistent(hi->host->pdev, sizeof(struct sbp2_login_response), 811 pci_alloc_consistent(hi->host->pdev,
812 sizeof(struct sbp2_login_response),
822 &scsi_id->login_response_dma); 813 &scsi_id->login_response_dma);
823 if (!scsi_id->login_response) 814 if (!scsi_id->login_response)
824 goto alloc_fail; 815 goto alloc_fail;
@@ -826,7 +817,8 @@ static int sbp2_start_device(struct scsi_id_instance_data *scsi_id)
826 817
827 /* Query logins ORB DMA */ 818 /* Query logins ORB DMA */
828 scsi_id->query_logins_orb = 819 scsi_id->query_logins_orb =
829 pci_alloc_consistent(hi->host->pdev, sizeof(struct sbp2_query_logins_orb), 820 pci_alloc_consistent(hi->host->pdev,
821 sizeof(struct sbp2_query_logins_orb),
830 &scsi_id->query_logins_orb_dma); 822 &scsi_id->query_logins_orb_dma);
831 if (!scsi_id->query_logins_orb) 823 if (!scsi_id->query_logins_orb)
832 goto alloc_fail; 824 goto alloc_fail;
@@ -834,7 +826,8 @@ static int sbp2_start_device(struct scsi_id_instance_data *scsi_id)
834 826
835 /* Query logins response DMA */ 827 /* Query logins response DMA */
836 scsi_id->query_logins_response = 828 scsi_id->query_logins_response =
837 pci_alloc_consistent(hi->host->pdev, sizeof(struct sbp2_query_logins_response), 829 pci_alloc_consistent(hi->host->pdev,
830 sizeof(struct sbp2_query_logins_response),
838 &scsi_id->query_logins_response_dma); 831 &scsi_id->query_logins_response_dma);
839 if (!scsi_id->query_logins_response) 832 if (!scsi_id->query_logins_response)
840 goto alloc_fail; 833 goto alloc_fail;
@@ -842,7 +835,8 @@ static int sbp2_start_device(struct scsi_id_instance_data *scsi_id)
842 835
843 /* Reconnect ORB DMA */ 836 /* Reconnect ORB DMA */
844 scsi_id->reconnect_orb = 837 scsi_id->reconnect_orb =
845 pci_alloc_consistent(hi->host->pdev, sizeof(struct sbp2_reconnect_orb), 838 pci_alloc_consistent(hi->host->pdev,
839 sizeof(struct sbp2_reconnect_orb),
846 &scsi_id->reconnect_orb_dma); 840 &scsi_id->reconnect_orb_dma);
847 if (!scsi_id->reconnect_orb) 841 if (!scsi_id->reconnect_orb)
848 goto alloc_fail; 842 goto alloc_fail;
@@ -850,7 +844,8 @@ static int sbp2_start_device(struct scsi_id_instance_data *scsi_id)
850 844
851 /* Logout ORB DMA */ 845 /* Logout ORB DMA */
852 scsi_id->logout_orb = 846 scsi_id->logout_orb =
853 pci_alloc_consistent(hi->host->pdev, sizeof(struct sbp2_logout_orb), 847 pci_alloc_consistent(hi->host->pdev,
848 sizeof(struct sbp2_logout_orb),
854 &scsi_id->logout_orb_dma); 849 &scsi_id->logout_orb_dma);
855 if (!scsi_id->logout_orb) 850 if (!scsi_id->logout_orb)
856 goto alloc_fail; 851 goto alloc_fail;
@@ -858,58 +853,11 @@ static int sbp2_start_device(struct scsi_id_instance_data *scsi_id)
858 853
859 /* Login ORB DMA */ 854 /* Login ORB DMA */
860 scsi_id->login_orb = 855 scsi_id->login_orb =
861 pci_alloc_consistent(hi->host->pdev, sizeof(struct sbp2_login_orb), 856 pci_alloc_consistent(hi->host->pdev,
857 sizeof(struct sbp2_login_orb),
862 &scsi_id->login_orb_dma); 858 &scsi_id->login_orb_dma);
863 if (!scsi_id->login_orb) { 859 if (!scsi_id->login_orb)
864alloc_fail: 860 goto alloc_fail;
865 if (scsi_id->query_logins_response) {
866 pci_free_consistent(hi->host->pdev,
867 sizeof(struct sbp2_query_logins_response),
868 scsi_id->query_logins_response,
869 scsi_id->query_logins_response_dma);
870 SBP2_DMA_FREE("query logins response DMA");
871 }
872
873 if (scsi_id->query_logins_orb) {
874 pci_free_consistent(hi->host->pdev,
875 sizeof(struct sbp2_query_logins_orb),
876 scsi_id->query_logins_orb,
877 scsi_id->query_logins_orb_dma);
878 SBP2_DMA_FREE("query logins ORB DMA");
879 }
880
881 if (scsi_id->logout_orb) {
882 pci_free_consistent(hi->host->pdev,
883 sizeof(struct sbp2_logout_orb),
884 scsi_id->logout_orb,
885 scsi_id->logout_orb_dma);
886 SBP2_DMA_FREE("logout ORB DMA");
887 }
888
889 if (scsi_id->reconnect_orb) {
890 pci_free_consistent(hi->host->pdev,
891 sizeof(struct sbp2_reconnect_orb),
892 scsi_id->reconnect_orb,
893 scsi_id->reconnect_orb_dma);
894 SBP2_DMA_FREE("reconnect ORB DMA");
895 }
896
897 if (scsi_id->login_response) {
898 pci_free_consistent(hi->host->pdev,
899 sizeof(struct sbp2_login_response),
900 scsi_id->login_response,
901 scsi_id->login_response_dma);
902 SBP2_DMA_FREE("login FIFO DMA");
903 }
904
905 list_del(&scsi_id->scsi_list);
906
907 kfree(scsi_id);
908
909 SBP2_ERR ("Could not allocate memory for scsi_id");
910
911 return -ENOMEM;
912 }
913 SBP2_DMA_ALLOC("consistent DMA region for login ORB"); 861 SBP2_DMA_ALLOC("consistent DMA region for login ORB");
914 862
915 SBP2_DEBUG("New SBP-2 device inserted, SCSI ID = %x", scsi_id->ud->id); 863 SBP2_DEBUG("New SBP-2 device inserted, SCSI ID = %x", scsi_id->ud->id);
@@ -935,7 +883,7 @@ alloc_fail:
935 sbp2_remove_device(scsi_id); 883 sbp2_remove_device(scsi_id);
936 return -EINTR; 884 return -EINTR;
937 } 885 }
938 886
939 /* 887 /*
940 * Login to the sbp-2 device 888 * Login to the sbp-2 device
941 */ 889 */
@@ -964,10 +912,17 @@ alloc_fail:
964 error = scsi_add_device(scsi_id->scsi_host, 0, scsi_id->ud->id, 0); 912 error = scsi_add_device(scsi_id->scsi_host, 0, scsi_id->ud->id, 0);
965 if (error) { 913 if (error) {
966 SBP2_ERR("scsi_add_device failed"); 914 SBP2_ERR("scsi_add_device failed");
915 sbp2_logout_device(scsi_id);
916 sbp2_remove_device(scsi_id);
967 return error; 917 return error;
968 } 918 }
969 919
970 return 0; 920 return 0;
921
922alloc_fail:
923 SBP2_ERR("Could not allocate memory for scsi_id");
924 sbp2_remove_device(scsi_id);
925 return -ENOMEM;
971} 926}
972 927
973/* 928/*
@@ -1054,51 +1009,44 @@ static void sbp2_remove_device(struct scsi_id_instance_data *scsi_id)
1054 * This function deals with physical dma write requests (for adapters that do not support 1009 * This function deals with physical dma write requests (for adapters that do not support
1055 * physical dma in hardware). Mostly just here for debugging... 1010 * physical dma in hardware). Mostly just here for debugging...
1056 */ 1011 */
1057static int sbp2_handle_physdma_write(struct hpsb_host *host, int nodeid, int destid, quadlet_t *data, 1012static int sbp2_handle_physdma_write(struct hpsb_host *host, int nodeid,
1058 u64 addr, size_t length, u16 flags) 1013 int destid, quadlet_t *data, u64 addr,
1014 size_t length, u16 flags)
1059{ 1015{
1060 1016
1061 /* 1017 /*
1062 * Manually put the data in the right place. 1018 * Manually put the data in the right place.
1063 */ 1019 */
1064 memcpy(bus_to_virt((u32)addr), data, length); 1020 memcpy(bus_to_virt((u32) addr), data, length);
1065 sbp2util_packet_dump(data, length, "sbp2 phys dma write by device", (u32)addr); 1021 sbp2util_packet_dump(data, length, "sbp2 phys dma write by device",
1066 return(RCODE_COMPLETE); 1022 (u32) addr);
1023 return RCODE_COMPLETE;
1067} 1024}
1068 1025
1069/* 1026/*
1070 * This function deals with physical dma read requests (for adapters that do not support 1027 * This function deals with physical dma read requests (for adapters that do not support
1071 * physical dma in hardware). Mostly just here for debugging... 1028 * physical dma in hardware). Mostly just here for debugging...
1072 */ 1029 */
1073static int sbp2_handle_physdma_read(struct hpsb_host *host, int nodeid, quadlet_t *data, 1030static int sbp2_handle_physdma_read(struct hpsb_host *host, int nodeid,
1074 u64 addr, size_t length, u16 flags) 1031 quadlet_t *data, u64 addr, size_t length,
1032 u16 flags)
1075{ 1033{
1076 1034
1077 /* 1035 /*
1078 * Grab data from memory and send a read response. 1036 * Grab data from memory and send a read response.
1079 */ 1037 */
1080 memcpy(data, bus_to_virt((u32)addr), length); 1038 memcpy(data, bus_to_virt((u32) addr), length);
1081 sbp2util_packet_dump(data, length, "sbp2 phys dma read by device", (u32)addr); 1039 sbp2util_packet_dump(data, length, "sbp2 phys dma read by device",
1082 return(RCODE_COMPLETE); 1040 (u32) addr);
1041 return RCODE_COMPLETE;
1083} 1042}
1084#endif 1043#endif
1085 1044
1086
1087/************************************** 1045/**************************************
1088 * SBP-2 protocol related section 1046 * SBP-2 protocol related section
1089 **************************************/ 1047 **************************************/
1090 1048
1091/* 1049/*
1092 * This function determines if we should convert scsi commands for a particular sbp2 device type
1093 */
1094static __inline__ int sbp2_command_conversion_device_type(u8 device_type)
1095{
1096 return (((device_type == TYPE_DISK) ||
1097 (device_type == TYPE_RBC) ||
1098 (device_type == TYPE_ROM)) ? 1:0);
1099}
1100
1101/*
1102 * This function queries the device for the maximum concurrent logins it 1050 * This function queries the device for the maximum concurrent logins it
1103 * supports. 1051 * supports.
1104 */ 1052 */
@@ -1120,11 +1068,7 @@ static int sbp2_query_logins(struct scsi_id_instance_data *scsi_id)
1120 1068
1121 scsi_id->query_logins_orb->lun_misc = ORB_SET_FUNCTION(SBP2_QUERY_LOGINS_REQUEST); 1069 scsi_id->query_logins_orb->lun_misc = ORB_SET_FUNCTION(SBP2_QUERY_LOGINS_REQUEST);
1122 scsi_id->query_logins_orb->lun_misc |= ORB_SET_NOTIFY(1); 1070 scsi_id->query_logins_orb->lun_misc |= ORB_SET_NOTIFY(1);
1123 if (scsi_id->sbp2_device_type_and_lun != SBP2_DEVICE_TYPE_LUN_UNINITIALIZED) { 1071 scsi_id->query_logins_orb->lun_misc |= ORB_SET_LUN(scsi_id->sbp2_lun);
1124 scsi_id->query_logins_orb->lun_misc |= ORB_SET_LUN(scsi_id->sbp2_device_type_and_lun);
1125 SBP2_DEBUG("sbp2_query_logins: set lun to %d",
1126 ORB_SET_LUN(scsi_id->sbp2_device_type_and_lun));
1127 }
1128 SBP2_DEBUG("sbp2_query_logins: lun_misc initialized"); 1072 SBP2_DEBUG("sbp2_query_logins: lun_misc initialized");
1129 1073
1130 scsi_id->query_logins_orb->reserved_resp_length = 1074 scsi_id->query_logins_orb->reserved_resp_length =
@@ -1161,12 +1105,12 @@ static int sbp2_query_logins(struct scsi_id_instance_data *scsi_id)
1161 1105
1162 if (sbp2util_down_timeout(&scsi_id->sbp2_login_complete, 2*HZ)) { 1106 if (sbp2util_down_timeout(&scsi_id->sbp2_login_complete, 2*HZ)) {
1163 SBP2_INFO("Error querying logins to SBP-2 device - timed out"); 1107 SBP2_INFO("Error querying logins to SBP-2 device - timed out");
1164 return(-EIO); 1108 return -EIO;
1165 } 1109 }
1166 1110
1167 if (scsi_id->status_block.ORB_offset_lo != scsi_id->query_logins_orb_dma) { 1111 if (scsi_id->status_block.ORB_offset_lo != scsi_id->query_logins_orb_dma) {
1168 SBP2_INFO("Error querying logins to SBP-2 device - timed out"); 1112 SBP2_INFO("Error querying logins to SBP-2 device - timed out");
1169 return(-EIO); 1113 return -EIO;
1170 } 1114 }
1171 1115
1172 if (STATUS_GET_RESP(scsi_id->status_block.ORB_offset_hi_misc) || 1116 if (STATUS_GET_RESP(scsi_id->status_block.ORB_offset_hi_misc) ||
@@ -1174,7 +1118,7 @@ static int sbp2_query_logins(struct scsi_id_instance_data *scsi_id)
1174 STATUS_GET_SBP_STATUS(scsi_id->status_block.ORB_offset_hi_misc)) { 1118 STATUS_GET_SBP_STATUS(scsi_id->status_block.ORB_offset_hi_misc)) {
1175 1119
1176 SBP2_INFO("Error querying logins to SBP-2 device - timed out"); 1120 SBP2_INFO("Error querying logins to SBP-2 device - timed out");
1177 return(-EIO); 1121 return -EIO;
1178 } 1122 }
1179 1123
1180 sbp2util_cpu_to_be32_buffer(scsi_id->query_logins_response, sizeof(struct sbp2_query_logins_response)); 1124 sbp2util_cpu_to_be32_buffer(scsi_id->query_logins_response, sizeof(struct sbp2_query_logins_response));
@@ -1191,7 +1135,7 @@ static int sbp2_query_logins(struct scsi_id_instance_data *scsi_id)
1191 SBP2_DEBUG("Number of active logins: %d", active_logins); 1135 SBP2_DEBUG("Number of active logins: %d", active_logins);
1192 1136
1193 if (active_logins >= max_logins) { 1137 if (active_logins >= max_logins) {
1194 return(-EIO); 1138 return -EIO;
1195 } 1139 }
1196 1140
1197 return 0; 1141 return 0;
@@ -1210,13 +1154,13 @@ static int sbp2_login_device(struct scsi_id_instance_data *scsi_id)
1210 1154
1211 if (!scsi_id->login_orb) { 1155 if (!scsi_id->login_orb) {
1212 SBP2_DEBUG("sbp2_login_device: login_orb not alloc'd!"); 1156 SBP2_DEBUG("sbp2_login_device: login_orb not alloc'd!");
1213 return(-EIO); 1157 return -EIO;
1214 } 1158 }
1215 1159
1216 if (!exclusive_login) { 1160 if (!exclusive_login) {
1217 if (sbp2_query_logins(scsi_id)) { 1161 if (sbp2_query_logins(scsi_id)) {
1218 SBP2_INFO("Device does not support any more concurrent logins"); 1162 SBP2_INFO("Device does not support any more concurrent logins");
1219 return(-EIO); 1163 return -EIO;
1220 } 1164 }
1221 } 1165 }
1222 1166
@@ -1233,12 +1177,7 @@ static int sbp2_login_device(struct scsi_id_instance_data *scsi_id)
1233 scsi_id->login_orb->lun_misc |= ORB_SET_RECONNECT(0); /* One second reconnect time */ 1177 scsi_id->login_orb->lun_misc |= ORB_SET_RECONNECT(0); /* One second reconnect time */
1234 scsi_id->login_orb->lun_misc |= ORB_SET_EXCLUSIVE(exclusive_login); /* Exclusive access to device */ 1178 scsi_id->login_orb->lun_misc |= ORB_SET_EXCLUSIVE(exclusive_login); /* Exclusive access to device */
1235 scsi_id->login_orb->lun_misc |= ORB_SET_NOTIFY(1); /* Notify us of login complete */ 1179 scsi_id->login_orb->lun_misc |= ORB_SET_NOTIFY(1); /* Notify us of login complete */
1236 /* Set the lun if we were able to pull it from the device's unit directory */ 1180 scsi_id->login_orb->lun_misc |= ORB_SET_LUN(scsi_id->sbp2_lun);
1237 if (scsi_id->sbp2_device_type_and_lun != SBP2_DEVICE_TYPE_LUN_UNINITIALIZED) {
1238 scsi_id->login_orb->lun_misc |= ORB_SET_LUN(scsi_id->sbp2_device_type_and_lun);
1239 SBP2_DEBUG("sbp2_query_logins: set lun to %d",
1240 ORB_SET_LUN(scsi_id->sbp2_device_type_and_lun));
1241 }
1242 SBP2_DEBUG("sbp2_login_device: lun_misc initialized"); 1181 SBP2_DEBUG("sbp2_login_device: lun_misc initialized");
1243 1182
1244 scsi_id->login_orb->passwd_resp_lengths = 1183 scsi_id->login_orb->passwd_resp_lengths =
@@ -1288,7 +1227,7 @@ static int sbp2_login_device(struct scsi_id_instance_data *scsi_id)
1288 */ 1227 */
1289 if (sbp2util_down_timeout(&scsi_id->sbp2_login_complete, 20*HZ)) { 1228 if (sbp2util_down_timeout(&scsi_id->sbp2_login_complete, 20*HZ)) {
1290 SBP2_ERR("Error logging into SBP-2 device - login timed-out"); 1229 SBP2_ERR("Error logging into SBP-2 device - login timed-out");
1291 return(-EIO); 1230 return -EIO;
1292 } 1231 }
1293 1232
1294 /* 1233 /*
@@ -1296,7 +1235,7 @@ static int sbp2_login_device(struct scsi_id_instance_data *scsi_id)
1296 */ 1235 */
1297 if (scsi_id->status_block.ORB_offset_lo != scsi_id->login_orb_dma) { 1236 if (scsi_id->status_block.ORB_offset_lo != scsi_id->login_orb_dma) {
1298 SBP2_ERR("Error logging into SBP-2 device - login timed-out"); 1237 SBP2_ERR("Error logging into SBP-2 device - login timed-out");
1299 return(-EIO); 1238 return -EIO;
1300 } 1239 }
1301 1240
1302 /* 1241 /*
@@ -1307,7 +1246,7 @@ static int sbp2_login_device(struct scsi_id_instance_data *scsi_id)
1307 STATUS_GET_SBP_STATUS(scsi_id->status_block.ORB_offset_hi_misc)) { 1246 STATUS_GET_SBP_STATUS(scsi_id->status_block.ORB_offset_hi_misc)) {
1308 1247
1309 SBP2_ERR("Error logging into SBP-2 device - login failed"); 1248 SBP2_ERR("Error logging into SBP-2 device - login failed");
1310 return(-EIO); 1249 return -EIO;
1311 } 1250 }
1312 1251
1313 /* 1252 /*
@@ -1331,7 +1270,7 @@ static int sbp2_login_device(struct scsi_id_instance_data *scsi_id)
1331 1270
1332 SBP2_INFO("Logged into SBP-2 device"); 1271 SBP2_INFO("Logged into SBP-2 device");
1333 1272
1334 return(0); 1273 return 0;
1335 1274
1336} 1275}
1337 1276
@@ -1385,8 +1324,7 @@ static int sbp2_logout_device(struct scsi_id_instance_data *scsi_id)
1385 atomic_set(&scsi_id->sbp2_login_complete, 0); 1324 atomic_set(&scsi_id->sbp2_login_complete, 0);
1386 1325
1387 error = hpsb_node_write(scsi_id->ne, 1326 error = hpsb_node_write(scsi_id->ne,
1388 scsi_id->sbp2_management_agent_addr, 1327 scsi_id->sbp2_management_agent_addr, data, 8);
1389 data, 8);
1390 if (error) 1328 if (error)
1391 return error; 1329 return error;
1392 1330
@@ -1396,7 +1334,7 @@ static int sbp2_logout_device(struct scsi_id_instance_data *scsi_id)
1396 1334
1397 SBP2_INFO("Logged out of SBP-2 device"); 1335 SBP2_INFO("Logged out of SBP-2 device");
1398 1336
1399 return(0); 1337 return 0;
1400 1338
1401} 1339}
1402 1340
@@ -1456,8 +1394,7 @@ static int sbp2_reconnect_device(struct scsi_id_instance_data *scsi_id)
1456 atomic_set(&scsi_id->sbp2_login_complete, 0); 1394 atomic_set(&scsi_id->sbp2_login_complete, 0);
1457 1395
1458 error = hpsb_node_write(scsi_id->ne, 1396 error = hpsb_node_write(scsi_id->ne,
1459 scsi_id->sbp2_management_agent_addr, 1397 scsi_id->sbp2_management_agent_addr, data, 8);
1460 data, 8);
1461 if (error) 1398 if (error)
1462 return error; 1399 return error;
1463 1400
@@ -1466,7 +1403,7 @@ static int sbp2_reconnect_device(struct scsi_id_instance_data *scsi_id)
1466 */ 1403 */
1467 if (sbp2util_down_timeout(&scsi_id->sbp2_login_complete, HZ)) { 1404 if (sbp2util_down_timeout(&scsi_id->sbp2_login_complete, HZ)) {
1468 SBP2_ERR("Error reconnecting to SBP-2 device - reconnect timed-out"); 1405 SBP2_ERR("Error reconnecting to SBP-2 device - reconnect timed-out");
1469 return(-EIO); 1406 return -EIO;
1470 } 1407 }
1471 1408
1472 /* 1409 /*
@@ -1474,7 +1411,7 @@ static int sbp2_reconnect_device(struct scsi_id_instance_data *scsi_id)
1474 */ 1411 */
1475 if (scsi_id->status_block.ORB_offset_lo != scsi_id->reconnect_orb_dma) { 1412 if (scsi_id->status_block.ORB_offset_lo != scsi_id->reconnect_orb_dma) {
1476 SBP2_ERR("Error reconnecting to SBP-2 device - reconnect timed-out"); 1413 SBP2_ERR("Error reconnecting to SBP-2 device - reconnect timed-out");
1477 return(-EIO); 1414 return -EIO;
1478 } 1415 }
1479 1416
1480 /* 1417 /*
@@ -1485,12 +1422,12 @@ static int sbp2_reconnect_device(struct scsi_id_instance_data *scsi_id)
1485 STATUS_GET_SBP_STATUS(scsi_id->status_block.ORB_offset_hi_misc)) { 1422 STATUS_GET_SBP_STATUS(scsi_id->status_block.ORB_offset_hi_misc)) {
1486 1423
1487 SBP2_ERR("Error reconnecting to SBP-2 device - reconnect failed"); 1424 SBP2_ERR("Error reconnecting to SBP-2 device - reconnect failed");
1488 return(-EIO); 1425 return -EIO;
1489 } 1426 }
1490 1427
1491 HPSB_DEBUG("Reconnected to SBP-2 device"); 1428 HPSB_DEBUG("Reconnected to SBP-2 device");
1492 1429
1493 return(0); 1430 return 0;
1494 1431
1495} 1432}
1496 1433
@@ -1513,10 +1450,9 @@ static int sbp2_set_busy_timeout(struct scsi_id_instance_data *scsi_id)
1513 SBP2_ERR("sbp2_set_busy_timeout error"); 1450 SBP2_ERR("sbp2_set_busy_timeout error");
1514 } 1451 }
1515 1452
1516 return(0); 1453 return 0;
1517} 1454}
1518 1455
1519
1520/* 1456/*
1521 * This function is called to parse sbp2 device's config rom unit 1457 * This function is called to parse sbp2 device's config rom unit
1522 * directory. Used to determine things like sbp2 management agent offset, 1458 * directory. Used to determine things like sbp2 management agent offset,
@@ -1529,7 +1465,7 @@ static void sbp2_parse_unit_directory(struct scsi_id_instance_data *scsi_id,
1529 struct csr1212_dentry *dentry; 1465 struct csr1212_dentry *dentry;
1530 u64 management_agent_addr; 1466 u64 management_agent_addr;
1531 u32 command_set_spec_id, command_set, unit_characteristics, 1467 u32 command_set_spec_id, command_set, unit_characteristics,
1532 firmware_revision, workarounds; 1468 firmware_revision, workarounds;
1533 int i; 1469 int i;
1534 1470
1535 SBP2_DEBUG("sbp2_parse_unit_directory"); 1471 SBP2_DEBUG("sbp2_parse_unit_directory");
@@ -1547,13 +1483,14 @@ static void sbp2_parse_unit_directory(struct scsi_id_instance_data *scsi_id,
1547 if (kv->key.type == CSR1212_KV_TYPE_CSR_OFFSET) { 1483 if (kv->key.type == CSR1212_KV_TYPE_CSR_OFFSET) {
1548 /* Save off the management agent address */ 1484 /* Save off the management agent address */
1549 management_agent_addr = 1485 management_agent_addr =
1550 CSR1212_REGISTER_SPACE_BASE + 1486 CSR1212_REGISTER_SPACE_BASE +
1551 (kv->value.csr_offset << 2); 1487 (kv->value.csr_offset << 2);
1552 1488
1553 SBP2_DEBUG("sbp2_management_agent_addr = %x", 1489 SBP2_DEBUG("sbp2_management_agent_addr = %x",
1554 (unsigned int) management_agent_addr); 1490 (unsigned int)management_agent_addr);
1555 } else if (kv->key.type == CSR1212_KV_TYPE_IMMEDIATE) { 1491 } else if (kv->key.type == CSR1212_KV_TYPE_IMMEDIATE) {
1556 scsi_id->sbp2_device_type_and_lun = kv->value.immediate; 1492 scsi_id->sbp2_lun =
1493 ORB_SET_LUN(kv->value.immediate);
1557 } 1494 }
1558 break; 1495 break;
1559 1496
@@ -1561,14 +1498,14 @@ static void sbp2_parse_unit_directory(struct scsi_id_instance_data *scsi_id,
1561 /* Command spec organization */ 1498 /* Command spec organization */
1562 command_set_spec_id = kv->value.immediate; 1499 command_set_spec_id = kv->value.immediate;
1563 SBP2_DEBUG("sbp2_command_set_spec_id = %x", 1500 SBP2_DEBUG("sbp2_command_set_spec_id = %x",
1564 (unsigned int) command_set_spec_id); 1501 (unsigned int)command_set_spec_id);
1565 break; 1502 break;
1566 1503
1567 case SBP2_COMMAND_SET_KEY: 1504 case SBP2_COMMAND_SET_KEY:
1568 /* Command set used by sbp2 device */ 1505 /* Command set used by sbp2 device */
1569 command_set = kv->value.immediate; 1506 command_set = kv->value.immediate;
1570 SBP2_DEBUG("sbp2_command_set = %x", 1507 SBP2_DEBUG("sbp2_command_set = %x",
1571 (unsigned int) command_set); 1508 (unsigned int)command_set);
1572 break; 1509 break;
1573 1510
1574 case SBP2_UNIT_CHARACTERISTICS_KEY: 1511 case SBP2_UNIT_CHARACTERISTICS_KEY:
@@ -1578,7 +1515,7 @@ static void sbp2_parse_unit_directory(struct scsi_id_instance_data *scsi_id,
1578 */ 1515 */
1579 unit_characteristics = kv->value.immediate; 1516 unit_characteristics = kv->value.immediate;
1580 SBP2_DEBUG("sbp2_unit_characteristics = %x", 1517 SBP2_DEBUG("sbp2_unit_characteristics = %x",
1581 (unsigned int) unit_characteristics); 1518 (unsigned int)unit_characteristics);
1582 break; 1519 break;
1583 1520
1584 case SBP2_FIRMWARE_REVISION_KEY: 1521 case SBP2_FIRMWARE_REVISION_KEY:
@@ -1586,9 +1523,10 @@ static void sbp2_parse_unit_directory(struct scsi_id_instance_data *scsi_id,
1586 firmware_revision = kv->value.immediate; 1523 firmware_revision = kv->value.immediate;
1587 if (force_inquiry_hack) 1524 if (force_inquiry_hack)
1588 SBP2_INFO("sbp2_firmware_revision = %x", 1525 SBP2_INFO("sbp2_firmware_revision = %x",
1589 (unsigned int) firmware_revision); 1526 (unsigned int)firmware_revision);
1590 else SBP2_DEBUG("sbp2_firmware_revision = %x", 1527 else
1591 (unsigned int) firmware_revision); 1528 SBP2_DEBUG("sbp2_firmware_revision = %x",
1529 (unsigned int)firmware_revision);
1592 break; 1530 break;
1593 1531
1594 default: 1532 default:
@@ -1646,7 +1584,7 @@ static void sbp2_parse_unit_directory(struct scsi_id_instance_data *scsi_id,
1646 scsi_id->sbp2_firmware_revision = firmware_revision; 1584 scsi_id->sbp2_firmware_revision = firmware_revision;
1647 scsi_id->workarounds = workarounds; 1585 scsi_id->workarounds = workarounds;
1648 if (ud->flags & UNIT_DIRECTORY_HAS_LUN) 1586 if (ud->flags & UNIT_DIRECTORY_HAS_LUN)
1649 scsi_id->sbp2_device_type_and_lun = ud->lun; 1587 scsi_id->sbp2_lun = ORB_SET_LUN(ud->lun);
1650 } 1588 }
1651} 1589}
1652 1590
@@ -1666,8 +1604,9 @@ static int sbp2_max_speed_and_size(struct scsi_id_instance_data *scsi_id)
1666 SBP2_DEBUG("sbp2_max_speed_and_size"); 1604 SBP2_DEBUG("sbp2_max_speed_and_size");
1667 1605
1668 /* Initial setting comes from the hosts speed map */ 1606 /* Initial setting comes from the hosts speed map */
1669 scsi_id->speed_code = hi->host->speed_map[NODEID_TO_NODE(hi->host->node_id) * 64 1607 scsi_id->speed_code =
1670 + NODEID_TO_NODE(scsi_id->ne->nodeid)]; 1608 hi->host->speed_map[NODEID_TO_NODE(hi->host->node_id) * 64 +
1609 NODEID_TO_NODE(scsi_id->ne->nodeid)];
1671 1610
1672 /* Bump down our speed if the user requested it */ 1611 /* Bump down our speed if the user requested it */
1673 if (scsi_id->speed_code > max_speed) { 1612 if (scsi_id->speed_code > max_speed) {
@@ -1678,15 +1617,16 @@ static int sbp2_max_speed_and_size(struct scsi_id_instance_data *scsi_id)
1678 1617
1679 /* Payload size is the lesser of what our speed supports and what 1618 /* Payload size is the lesser of what our speed supports and what
1680 * our host supports. */ 1619 * our host supports. */
1681 scsi_id->max_payload_size = min(sbp2_speedto_max_payload[scsi_id->speed_code], 1620 scsi_id->max_payload_size =
1682 (u8)(hi->host->csr.max_rec - 1)); 1621 min(sbp2_speedto_max_payload[scsi_id->speed_code],
1622 (u8) (hi->host->csr.max_rec - 1));
1683 1623
1684 HPSB_DEBUG("Node " NODE_BUS_FMT ": Max speed [%s] - Max payload [%u]", 1624 HPSB_DEBUG("Node " NODE_BUS_FMT ": Max speed [%s] - Max payload [%u]",
1685 NODE_BUS_ARGS(hi->host, scsi_id->ne->nodeid), 1625 NODE_BUS_ARGS(hi->host, scsi_id->ne->nodeid),
1686 hpsb_speedto_str[scsi_id->speed_code], 1626 hpsb_speedto_str[scsi_id->speed_code],
1687 1 << ((u32)scsi_id->max_payload_size + 2)); 1627 1 << ((u32) scsi_id->max_payload_size + 2));
1688 1628
1689 return(0); 1629 return 0;
1690} 1630}
1691 1631
1692/* 1632/*
@@ -1721,30 +1661,187 @@ static int sbp2_agent_reset(struct scsi_id_instance_data *scsi_id, int wait)
1721 */ 1661 */
1722 scsi_id->last_orb = NULL; 1662 scsi_id->last_orb = NULL;
1723 1663
1724 return(0); 1664 return 0;
1665}
1666
1667static void sbp2_prep_command_orb_sg(struct sbp2_command_orb *orb,
1668 struct sbp2scsi_host_info *hi,
1669 struct sbp2_command_info *command,
1670 unsigned int scsi_use_sg,
1671 struct scatterlist *sgpnt,
1672 u32 orb_direction,
1673 enum dma_data_direction dma_dir)
1674{
1675 command->dma_dir = dma_dir;
1676 orb->data_descriptor_hi = ORB_SET_NODE_ID(hi->host->node_id);
1677 orb->misc |= ORB_SET_DIRECTION(orb_direction);
1678
1679 /* Special case if only one element (and less than 64KB in size) */
1680 if ((scsi_use_sg == 1) &&
1681 (sgpnt[0].length <= SBP2_MAX_SG_ELEMENT_LENGTH)) {
1682
1683 SBP2_DEBUG("Only one s/g element");
1684 command->dma_size = sgpnt[0].length;
1685 command->dma_type = CMD_DMA_PAGE;
1686 command->cmd_dma = pci_map_page(hi->host->pdev,
1687 sgpnt[0].page,
1688 sgpnt[0].offset,
1689 command->dma_size,
1690 command->dma_dir);
1691 SBP2_DMA_ALLOC("single page scatter element");
1692
1693 orb->data_descriptor_lo = command->cmd_dma;
1694 orb->misc |= ORB_SET_DATA_SIZE(command->dma_size);
1695
1696 } else {
1697 struct sbp2_unrestricted_page_table *sg_element =
1698 &command->scatter_gather_element[0];
1699 u32 sg_count, sg_len;
1700 dma_addr_t sg_addr;
1701 int i, count = pci_map_sg(hi->host->pdev, sgpnt, scsi_use_sg,
1702 dma_dir);
1703
1704 SBP2_DMA_ALLOC("scatter list");
1705
1706 command->dma_size = scsi_use_sg;
1707 command->sge_buffer = sgpnt;
1708
1709 /* use page tables (s/g) */
1710 orb->misc |= ORB_SET_PAGE_TABLE_PRESENT(0x1);
1711 orb->data_descriptor_lo = command->sge_dma;
1712
1713 /*
1714 * Loop through and fill out our sbp-2 page tables
1715 * (and split up anything too large)
1716 */
1717 for (i = 0, sg_count = 0 ; i < count; i++, sgpnt++) {
1718 sg_len = sg_dma_len(sgpnt);
1719 sg_addr = sg_dma_address(sgpnt);
1720 while (sg_len) {
1721 sg_element[sg_count].segment_base_lo = sg_addr;
1722 if (sg_len > SBP2_MAX_SG_ELEMENT_LENGTH) {
1723 sg_element[sg_count].length_segment_base_hi =
1724 PAGE_TABLE_SET_SEGMENT_LENGTH(SBP2_MAX_SG_ELEMENT_LENGTH);
1725 sg_addr += SBP2_MAX_SG_ELEMENT_LENGTH;
1726 sg_len -= SBP2_MAX_SG_ELEMENT_LENGTH;
1727 } else {
1728 sg_element[sg_count].length_segment_base_hi =
1729 PAGE_TABLE_SET_SEGMENT_LENGTH(sg_len);
1730 sg_len = 0;
1731 }
1732 sg_count++;
1733 }
1734 }
1735
1736 /* Number of page table (s/g) elements */
1737 orb->misc |= ORB_SET_DATA_SIZE(sg_count);
1738
1739 sbp2util_packet_dump(sg_element,
1740 (sizeof(struct sbp2_unrestricted_page_table)) * sg_count,
1741 "sbp2 s/g list", command->sge_dma);
1742
1743 /* Byte swap page tables if necessary */
1744 sbp2util_cpu_to_be32_buffer(sg_element,
1745 (sizeof(struct sbp2_unrestricted_page_table)) *
1746 sg_count);
1747 }
1748}
1749
1750static void sbp2_prep_command_orb_no_sg(struct sbp2_command_orb *orb,
1751 struct sbp2scsi_host_info *hi,
1752 struct sbp2_command_info *command,
1753 struct scatterlist *sgpnt,
1754 u32 orb_direction,
1755 unsigned int scsi_request_bufflen,
1756 void *scsi_request_buffer,
1757 enum dma_data_direction dma_dir)
1758{
1759 command->dma_dir = dma_dir;
1760 command->dma_size = scsi_request_bufflen;
1761 command->dma_type = CMD_DMA_SINGLE;
1762 command->cmd_dma = pci_map_single(hi->host->pdev, scsi_request_buffer,
1763 command->dma_size, command->dma_dir);
1764 orb->data_descriptor_hi = ORB_SET_NODE_ID(hi->host->node_id);
1765 orb->misc |= ORB_SET_DIRECTION(orb_direction);
1766
1767 SBP2_DMA_ALLOC("single bulk");
1768
1769 /*
1770 * Handle case where we get a command w/o s/g enabled (but
1771 * check for transfers larger than 64K)
1772 */
1773 if (scsi_request_bufflen <= SBP2_MAX_SG_ELEMENT_LENGTH) {
1774
1775 orb->data_descriptor_lo = command->cmd_dma;
1776 orb->misc |= ORB_SET_DATA_SIZE(scsi_request_bufflen);
1777
1778 } else {
1779 struct sbp2_unrestricted_page_table *sg_element =
1780 &command->scatter_gather_element[0];
1781 u32 sg_count, sg_len;
1782 dma_addr_t sg_addr;
1783
1784 /*
1785 * Need to turn this into page tables, since the
1786 * buffer is too large.
1787 */
1788 orb->data_descriptor_lo = command->sge_dma;
1789
1790 /* Use page tables (s/g) */
1791 orb->misc |= ORB_SET_PAGE_TABLE_PRESENT(0x1);
1792
1793 /*
1794 * fill out our sbp-2 page tables (and split up
1795 * the large buffer)
1796 */
1797 sg_count = 0;
1798 sg_len = scsi_request_bufflen;
1799 sg_addr = command->cmd_dma;
1800 while (sg_len) {
1801 sg_element[sg_count].segment_base_lo = sg_addr;
1802 if (sg_len > SBP2_MAX_SG_ELEMENT_LENGTH) {
1803 sg_element[sg_count].length_segment_base_hi =
1804 PAGE_TABLE_SET_SEGMENT_LENGTH(SBP2_MAX_SG_ELEMENT_LENGTH);
1805 sg_addr += SBP2_MAX_SG_ELEMENT_LENGTH;
1806 sg_len -= SBP2_MAX_SG_ELEMENT_LENGTH;
1807 } else {
1808 sg_element[sg_count].length_segment_base_hi =
1809 PAGE_TABLE_SET_SEGMENT_LENGTH(sg_len);
1810 sg_len = 0;
1811 }
1812 sg_count++;
1813 }
1814
1815 /* Number of page table (s/g) elements */
1816 orb->misc |= ORB_SET_DATA_SIZE(sg_count);
1817
1818 sbp2util_packet_dump(sg_element,
1819 (sizeof(struct sbp2_unrestricted_page_table)) * sg_count,
1820 "sbp2 s/g list", command->sge_dma);
1821
1822 /* Byte swap page tables if necessary */
1823 sbp2util_cpu_to_be32_buffer(sg_element,
1824 (sizeof(struct sbp2_unrestricted_page_table)) *
1825 sg_count);
1826 }
1725} 1827}
1726 1828
1727/* 1829/*
1728 * This function is called to create the actual command orb and s/g list 1830 * This function is called to create the actual command orb and s/g list
1729 * out of the scsi command itself. 1831 * out of the scsi command itself.
1730 */ 1832 */
1731static int sbp2_create_command_orb(struct scsi_id_instance_data *scsi_id, 1833static void sbp2_create_command_orb(struct scsi_id_instance_data *scsi_id,
1732 struct sbp2_command_info *command, 1834 struct sbp2_command_info *command,
1733 unchar *scsi_cmd, 1835 unchar *scsi_cmd,
1734 unsigned int scsi_use_sg, 1836 unsigned int scsi_use_sg,
1735 unsigned int scsi_request_bufflen, 1837 unsigned int scsi_request_bufflen,
1736 void *scsi_request_buffer, 1838 void *scsi_request_buffer,
1737 enum dma_data_direction dma_dir) 1839 enum dma_data_direction dma_dir)
1738
1739{ 1840{
1740 struct sbp2scsi_host_info *hi = scsi_id->hi; 1841 struct sbp2scsi_host_info *hi = scsi_id->hi;
1741 struct scatterlist *sgpnt = (struct scatterlist *) scsi_request_buffer; 1842 struct scatterlist *sgpnt = (struct scatterlist *)scsi_request_buffer;
1742 struct sbp2_command_orb *command_orb = &command->command_orb; 1843 struct sbp2_command_orb *command_orb = &command->command_orb;
1743 struct sbp2_unrestricted_page_table *scatter_gather_element = 1844 u32 orb_direction;
1744 &command->scatter_gather_element[0];
1745 u32 sg_count, sg_len, orb_direction;
1746 dma_addr_t sg_addr;
1747 int i;
1748 1845
1749 /* 1846 /*
1750 * Set-up our command ORB.. 1847 * Set-up our command ORB..
@@ -1758,222 +1855,42 @@ static int sbp2_create_command_orb(struct scsi_id_instance_data *scsi_id,
1758 command_orb->next_ORB_lo = 0x0; 1855 command_orb->next_ORB_lo = 0x0;
1759 command_orb->misc = ORB_SET_MAX_PAYLOAD(scsi_id->max_payload_size); 1856 command_orb->misc = ORB_SET_MAX_PAYLOAD(scsi_id->max_payload_size);
1760 command_orb->misc |= ORB_SET_SPEED(scsi_id->speed_code); 1857 command_orb->misc |= ORB_SET_SPEED(scsi_id->speed_code);
1761 command_orb->misc |= ORB_SET_NOTIFY(1); /* Notify us when complete */ 1858 command_orb->misc |= ORB_SET_NOTIFY(1); /* Notify us when complete */
1762 1859
1763 /* 1860 if (dma_dir == DMA_NONE)
1764 * Get the direction of the transfer. If the direction is unknown, then use our 1861 orb_direction = ORB_DIRECTION_NO_DATA_TRANSFER;
1765 * goofy table as a back-up. 1862 else if (dma_dir == DMA_TO_DEVICE && scsi_request_bufflen)
1766 */ 1863 orb_direction = ORB_DIRECTION_WRITE_TO_MEDIA;
1767 switch (dma_dir) { 1864 else if (dma_dir == DMA_FROM_DEVICE && scsi_request_bufflen)
1768 case DMA_NONE: 1865 orb_direction = ORB_DIRECTION_READ_FROM_MEDIA;
1769 orb_direction = ORB_DIRECTION_NO_DATA_TRANSFER; 1866 else {
1770 break; 1867 SBP2_WARN("Falling back to DMA_NONE");
1771 case DMA_TO_DEVICE: 1868 orb_direction = ORB_DIRECTION_NO_DATA_TRANSFER;
1772 orb_direction = ORB_DIRECTION_WRITE_TO_MEDIA;
1773 break;
1774 case DMA_FROM_DEVICE:
1775 orb_direction = ORB_DIRECTION_READ_FROM_MEDIA;
1776 break;
1777 case DMA_BIDIRECTIONAL:
1778 default:
1779 SBP2_ERR("SCSI data transfer direction not specified. "
1780 "Update the SBP2 direction table in sbp2.h if "
1781 "necessary for your application");
1782 __scsi_print_command(scsi_cmd);
1783 orb_direction = sbp2scsi_direction_table[*scsi_cmd];
1784 break;
1785 } 1869 }
1786 1870
1787 /* 1871 /* Set-up our pagetable stuff */
1788 * Set-up our pagetable stuff... unfortunately, this has become
1789 * messier than I'd like. Need to clean this up a bit. ;-)
1790 */
1791 if (orb_direction == ORB_DIRECTION_NO_DATA_TRANSFER) { 1872 if (orb_direction == ORB_DIRECTION_NO_DATA_TRANSFER) {
1792
1793 SBP2_DEBUG("No data transfer"); 1873 SBP2_DEBUG("No data transfer");
1794
1795 /*
1796 * Handle no data transfer
1797 */
1798 command_orb->data_descriptor_hi = 0x0; 1874 command_orb->data_descriptor_hi = 0x0;
1799 command_orb->data_descriptor_lo = 0x0; 1875 command_orb->data_descriptor_lo = 0x0;
1800 command_orb->misc |= ORB_SET_DIRECTION(1); 1876 command_orb->misc |= ORB_SET_DIRECTION(1);
1801
1802 } else if (scsi_use_sg) { 1877 } else if (scsi_use_sg) {
1803
1804 SBP2_DEBUG("Use scatter/gather"); 1878 SBP2_DEBUG("Use scatter/gather");
1805 1879 sbp2_prep_command_orb_sg(command_orb, hi, command, scsi_use_sg,
1806 /* 1880 sgpnt, orb_direction, dma_dir);
1807 * Special case if only one element (and less than 64KB in size)
1808 */
1809 if ((scsi_use_sg == 1) && (sgpnt[0].length <= SBP2_MAX_SG_ELEMENT_LENGTH)) {
1810
1811 SBP2_DEBUG("Only one s/g element");
1812 command->dma_dir = dma_dir;
1813 command->dma_size = sgpnt[0].length;
1814 command->dma_type = CMD_DMA_PAGE;
1815 command->cmd_dma = pci_map_page(hi->host->pdev,
1816 sgpnt[0].page,
1817 sgpnt[0].offset,
1818 command->dma_size,
1819 command->dma_dir);
1820 SBP2_DMA_ALLOC("single page scatter element");
1821
1822 command_orb->data_descriptor_hi = ORB_SET_NODE_ID(hi->host->node_id);
1823 command_orb->data_descriptor_lo = command->cmd_dma;
1824 command_orb->misc |= ORB_SET_DATA_SIZE(command->dma_size);
1825 command_orb->misc |= ORB_SET_DIRECTION(orb_direction);
1826
1827 } else {
1828 int count = pci_map_sg(hi->host->pdev, sgpnt, scsi_use_sg, dma_dir);
1829 SBP2_DMA_ALLOC("scatter list");
1830
1831 command->dma_size = scsi_use_sg;
1832 command->dma_dir = dma_dir;
1833 command->sge_buffer = sgpnt;
1834
1835 /* use page tables (s/g) */
1836 command_orb->misc |= ORB_SET_PAGE_TABLE_PRESENT(0x1);
1837 command_orb->misc |= ORB_SET_DIRECTION(orb_direction);
1838 command_orb->data_descriptor_hi = ORB_SET_NODE_ID(hi->host->node_id);
1839 command_orb->data_descriptor_lo = command->sge_dma;
1840
1841 /*
1842 * Loop through and fill out our sbp-2 page tables
1843 * (and split up anything too large)
1844 */
1845 for (i = 0, sg_count = 0 ; i < count; i++, sgpnt++) {
1846 sg_len = sg_dma_len(sgpnt);
1847 sg_addr = sg_dma_address(sgpnt);
1848 while (sg_len) {
1849 scatter_gather_element[sg_count].segment_base_lo = sg_addr;
1850 if (sg_len > SBP2_MAX_SG_ELEMENT_LENGTH) {
1851 scatter_gather_element[sg_count].length_segment_base_hi =
1852 PAGE_TABLE_SET_SEGMENT_LENGTH(SBP2_MAX_SG_ELEMENT_LENGTH);
1853 sg_addr += SBP2_MAX_SG_ELEMENT_LENGTH;
1854 sg_len -= SBP2_MAX_SG_ELEMENT_LENGTH;
1855 } else {
1856 scatter_gather_element[sg_count].length_segment_base_hi =
1857 PAGE_TABLE_SET_SEGMENT_LENGTH(sg_len);
1858 sg_len = 0;
1859 }
1860 sg_count++;
1861 }
1862 }
1863
1864 /* Number of page table (s/g) elements */
1865 command_orb->misc |= ORB_SET_DATA_SIZE(sg_count);
1866
1867 sbp2util_packet_dump(scatter_gather_element,
1868 (sizeof(struct sbp2_unrestricted_page_table)) * sg_count,
1869 "sbp2 s/g list", command->sge_dma);
1870
1871 /*
1872 * Byte swap page tables if necessary
1873 */
1874 sbp2util_cpu_to_be32_buffer(scatter_gather_element,
1875 (sizeof(struct sbp2_unrestricted_page_table)) *
1876 sg_count);
1877
1878 }
1879
1880 } else { 1881 } else {
1881
1882 SBP2_DEBUG("No scatter/gather"); 1882 SBP2_DEBUG("No scatter/gather");
1883 1883 sbp2_prep_command_orb_no_sg(command_orb, hi, command, sgpnt,
1884 command->dma_dir = dma_dir; 1884 orb_direction, scsi_request_bufflen,
1885 command->dma_size = scsi_request_bufflen; 1885 scsi_request_buffer, dma_dir);
1886 command->dma_type = CMD_DMA_SINGLE;
1887 command->cmd_dma = pci_map_single (hi->host->pdev, scsi_request_buffer,
1888 command->dma_size,
1889 command->dma_dir);
1890 SBP2_DMA_ALLOC("single bulk");
1891
1892 /*
1893 * Handle case where we get a command w/o s/g enabled (but
1894 * check for transfers larger than 64K)
1895 */
1896 if (scsi_request_bufflen <= SBP2_MAX_SG_ELEMENT_LENGTH) {
1897
1898 command_orb->data_descriptor_hi = ORB_SET_NODE_ID(hi->host->node_id);
1899 command_orb->data_descriptor_lo = command->cmd_dma;
1900 command_orb->misc |= ORB_SET_DATA_SIZE(scsi_request_bufflen);
1901 command_orb->misc |= ORB_SET_DIRECTION(orb_direction);
1902
1903 /*
1904 * Sanity, in case our direction table is not
1905 * up-to-date
1906 */
1907 if (!scsi_request_bufflen) {
1908 command_orb->data_descriptor_hi = 0x0;
1909 command_orb->data_descriptor_lo = 0x0;
1910 command_orb->misc |= ORB_SET_DIRECTION(1);
1911 }
1912
1913 } else {
1914 /*
1915 * Need to turn this into page tables, since the
1916 * buffer is too large.
1917 */
1918 command_orb->data_descriptor_hi = ORB_SET_NODE_ID(hi->host->node_id);
1919 command_orb->data_descriptor_lo = command->sge_dma;
1920
1921 /* Use page tables (s/g) */
1922 command_orb->misc |= ORB_SET_PAGE_TABLE_PRESENT(0x1);
1923 command_orb->misc |= ORB_SET_DIRECTION(orb_direction);
1924
1925 /*
1926 * fill out our sbp-2 page tables (and split up
1927 * the large buffer)
1928 */
1929 sg_count = 0;
1930 sg_len = scsi_request_bufflen;
1931 sg_addr = command->cmd_dma;
1932 while (sg_len) {
1933 scatter_gather_element[sg_count].segment_base_lo = sg_addr;
1934 if (sg_len > SBP2_MAX_SG_ELEMENT_LENGTH) {
1935 scatter_gather_element[sg_count].length_segment_base_hi =
1936 PAGE_TABLE_SET_SEGMENT_LENGTH(SBP2_MAX_SG_ELEMENT_LENGTH);
1937 sg_addr += SBP2_MAX_SG_ELEMENT_LENGTH;
1938 sg_len -= SBP2_MAX_SG_ELEMENT_LENGTH;
1939 } else {
1940 scatter_gather_element[sg_count].length_segment_base_hi =
1941 PAGE_TABLE_SET_SEGMENT_LENGTH(sg_len);
1942 sg_len = 0;
1943 }
1944 sg_count++;
1945 }
1946
1947 /* Number of page table (s/g) elements */
1948 command_orb->misc |= ORB_SET_DATA_SIZE(sg_count);
1949
1950 sbp2util_packet_dump(scatter_gather_element,
1951 (sizeof(struct sbp2_unrestricted_page_table)) * sg_count,
1952 "sbp2 s/g list", command->sge_dma);
1953
1954 /*
1955 * Byte swap page tables if necessary
1956 */
1957 sbp2util_cpu_to_be32_buffer(scatter_gather_element,
1958 (sizeof(struct sbp2_unrestricted_page_table)) *
1959 sg_count);
1960
1961 }
1962
1963 } 1886 }
1964 1887
1965 /* 1888 /* Byte swap command ORB if necessary */
1966 * Byte swap command ORB if necessary
1967 */
1968 sbp2util_cpu_to_be32_buffer(command_orb, sizeof(struct sbp2_command_orb)); 1889 sbp2util_cpu_to_be32_buffer(command_orb, sizeof(struct sbp2_command_orb));
1969 1890
1970 /* 1891 /* Put our scsi command in the command ORB */
1971 * Put our scsi command in the command ORB
1972 */
1973 memset(command_orb->cdb, 0, 12); 1892 memset(command_orb->cdb, 0, 12);
1974 memcpy(command_orb->cdb, scsi_cmd, COMMAND_SIZE(*scsi_cmd)); 1893 memcpy(command_orb->cdb, scsi_cmd, COMMAND_SIZE(*scsi_cmd));
1975
1976 return(0);
1977} 1894}
1978 1895
1979/* 1896/*
@@ -1989,7 +1906,7 @@ static int sbp2_link_orb_command(struct scsi_id_instance_data *scsi_id,
1989 1906
1990 outstanding_orb_incr; 1907 outstanding_orb_incr;
1991 SBP2_ORB_DEBUG("sending command orb %p, total orbs = %x", 1908 SBP2_ORB_DEBUG("sending command orb %p, total orbs = %x",
1992 command_orb, global_outstanding_command_orbs); 1909 command_orb, global_outstanding_command_orbs);
1993 1910
1994 pci_dma_sync_single_for_device(hi->host->pdev, command->command_orb_dma, 1911 pci_dma_sync_single_for_device(hi->host->pdev, command->command_orb_dma,
1995 sizeof(struct sbp2_command_orb), 1912 sizeof(struct sbp2_command_orb),
@@ -2034,10 +1951,11 @@ static int sbp2_link_orb_command(struct scsi_id_instance_data *scsi_id,
2034 * both by the sbp2 device and us. 1951 * both by the sbp2 device and us.
2035 */ 1952 */
2036 scsi_id->last_orb->next_ORB_lo = 1953 scsi_id->last_orb->next_ORB_lo =
2037 cpu_to_be32(command->command_orb_dma); 1954 cpu_to_be32(command->command_orb_dma);
2038 /* Tells hardware that this pointer is valid */ 1955 /* Tells hardware that this pointer is valid */
2039 scsi_id->last_orb->next_ORB_hi = 0x0; 1956 scsi_id->last_orb->next_ORB_hi = 0x0;
2040 pci_dma_sync_single_for_device(hi->host->pdev, scsi_id->last_orb_dma, 1957 pci_dma_sync_single_for_device(hi->host->pdev,
1958 scsi_id->last_orb_dma,
2041 sizeof(struct sbp2_command_orb), 1959 sizeof(struct sbp2_command_orb),
2042 PCI_DMA_BIDIRECTIONAL); 1960 PCI_DMA_BIDIRECTIONAL);
2043 1961
@@ -2051,14 +1969,14 @@ static int sbp2_link_orb_command(struct scsi_id_instance_data *scsi_id,
2051 1969
2052 if (sbp2util_node_write_no_wait(ne, addr, &data, 4) < 0) { 1970 if (sbp2util_node_write_no_wait(ne, addr, &data, 4) < 0) {
2053 SBP2_ERR("sbp2util_node_write_no_wait failed"); 1971 SBP2_ERR("sbp2util_node_write_no_wait failed");
2054 return(-EIO); 1972 return -EIO;
2055 } 1973 }
2056 1974
2057 scsi_id->last_orb = command_orb; 1975 scsi_id->last_orb = command_orb;
2058 scsi_id->last_orb_dma = command->command_orb_dma; 1976 scsi_id->last_orb_dma = command->command_orb_dma;
2059 1977
2060 } 1978 }
2061 return(0); 1979 return 0;
2062} 1980}
2063 1981
2064/* 1982/*
@@ -2085,7 +2003,7 @@ static int sbp2_send_command(struct scsi_id_instance_data *scsi_id,
2085 */ 2003 */
2086 command = sbp2util_allocate_command_orb(scsi_id, SCpnt, done); 2004 command = sbp2util_allocate_command_orb(scsi_id, SCpnt, done);
2087 if (!command) { 2005 if (!command) {
2088 return(-EIO); 2006 return -EIO;
2089 } 2007 }
2090 2008
2091 /* 2009 /*
@@ -2106,11 +2024,6 @@ static int sbp2_send_command(struct scsi_id_instance_data *scsi_id,
2106 sbp2_create_command_orb(scsi_id, command, cmd, SCpnt->use_sg, 2024 sbp2_create_command_orb(scsi_id, command, cmd, SCpnt->use_sg,
2107 request_bufflen, SCpnt->request_buffer, 2025 request_bufflen, SCpnt->request_buffer,
2108 SCpnt->sc_data_direction); 2026 SCpnt->sc_data_direction);
2109 /*
2110 * Update our cdb if necessary (to handle sbp2 RBC command set
2111 * differences). This is where the command set hacks go! =)
2112 */
2113 sbp2_check_sbp2_command(scsi_id, command->command_orb.cdb);
2114 2027
2115 sbp2util_packet_dump(&command->command_orb, sizeof(struct sbp2_command_orb), 2028 sbp2util_packet_dump(&command->command_orb, sizeof(struct sbp2_command_orb),
2116 "sbp2 command orb", command->command_orb_dma); 2029 "sbp2 command orb", command->command_orb_dma);
@@ -2125,112 +2038,7 @@ static int sbp2_send_command(struct scsi_id_instance_data *scsi_id,
2125 */ 2038 */
2126 sbp2_link_orb_command(scsi_id, command); 2039 sbp2_link_orb_command(scsi_id, command);
2127 2040
2128 return(0); 2041 return 0;
2129}
2130
2131
2132/*
2133 * This function deals with command set differences between Linux scsi
2134 * command set and sbp2 RBC command set.
2135 */
2136static void sbp2_check_sbp2_command(struct scsi_id_instance_data *scsi_id, unchar *cmd)
2137{
2138 unchar new_cmd[16];
2139 u8 device_type = SBP2_DEVICE_TYPE (scsi_id->sbp2_device_type_and_lun);
2140
2141 SBP2_DEBUG("sbp2_check_sbp2_command");
2142
2143 switch (*cmd) {
2144
2145 case READ_6:
2146
2147 if (sbp2_command_conversion_device_type(device_type)) {
2148
2149 SBP2_DEBUG("Convert READ_6 to READ_10");
2150
2151 /*
2152 * Need to turn read_6 into read_10
2153 */
2154 new_cmd[0] = 0x28;
2155 new_cmd[1] = (cmd[1] & 0xe0);
2156 new_cmd[2] = 0x0;
2157 new_cmd[3] = (cmd[1] & 0x1f);
2158 new_cmd[4] = cmd[2];
2159 new_cmd[5] = cmd[3];
2160 new_cmd[6] = 0x0;
2161 new_cmd[7] = 0x0;
2162 new_cmd[8] = cmd[4];
2163 new_cmd[9] = cmd[5];
2164
2165 memcpy(cmd, new_cmd, 10);
2166
2167 }
2168
2169 break;
2170
2171 case WRITE_6:
2172
2173 if (sbp2_command_conversion_device_type(device_type)) {
2174
2175 SBP2_DEBUG("Convert WRITE_6 to WRITE_10");
2176
2177 /*
2178 * Need to turn write_6 into write_10
2179 */
2180 new_cmd[0] = 0x2a;
2181 new_cmd[1] = (cmd[1] & 0xe0);
2182 new_cmd[2] = 0x0;
2183 new_cmd[3] = (cmd[1] & 0x1f);
2184 new_cmd[4] = cmd[2];
2185 new_cmd[5] = cmd[3];
2186 new_cmd[6] = 0x0;
2187 new_cmd[7] = 0x0;
2188 new_cmd[8] = cmd[4];
2189 new_cmd[9] = cmd[5];
2190
2191 memcpy(cmd, new_cmd, 10);
2192
2193 }
2194
2195 break;
2196
2197 case MODE_SENSE:
2198
2199 if (sbp2_command_conversion_device_type(device_type)) {
2200
2201 SBP2_DEBUG("Convert MODE_SENSE_6 to MODE_SENSE_10");
2202
2203 /*
2204 * Need to turn mode_sense_6 into mode_sense_10
2205 */
2206 new_cmd[0] = 0x5a;
2207 new_cmd[1] = cmd[1];
2208 new_cmd[2] = cmd[2];
2209 new_cmd[3] = 0x0;
2210 new_cmd[4] = 0x0;
2211 new_cmd[5] = 0x0;
2212 new_cmd[6] = 0x0;
2213 new_cmd[7] = 0x0;
2214 new_cmd[8] = cmd[4];
2215 new_cmd[9] = cmd[5];
2216
2217 memcpy(cmd, new_cmd, 10);
2218
2219 }
2220
2221 break;
2222
2223 case MODE_SELECT:
2224
2225 /*
2226 * TODO. Probably need to change mode select to 10 byte version
2227 */
2228
2229 default:
2230 break;
2231 }
2232
2233 return;
2234} 2042}
2235 2043
2236/* 2044/*
@@ -2260,80 +2068,40 @@ static unsigned int sbp2_status_to_sense_data(unchar *sbp2_status, unchar *sense
2260 sense_data[14] = sbp2_status[20]; 2068 sense_data[14] = sbp2_status[20];
2261 sense_data[15] = sbp2_status[21]; 2069 sense_data[15] = sbp2_status[21];
2262 2070
2263 return(sbp2_status[8] & 0x3f); /* return scsi status */ 2071 return sbp2_status[8] & 0x3f; /* return scsi status */
2264} 2072}
2265 2073
2266/* 2074/*
2267 * This function is called after a command is completed, in order to do any necessary SBP-2 2075 * This function is called after a command is completed, in order to do any necessary SBP-2
2268 * response data translations for the SCSI stack 2076 * response data translations for the SCSI stack
2269 */ 2077 */
2270static void sbp2_check_sbp2_response(struct scsi_id_instance_data *scsi_id, 2078static void sbp2_check_sbp2_response(struct scsi_id_instance_data *scsi_id,
2271 struct scsi_cmnd *SCpnt) 2079 struct scsi_cmnd *SCpnt)
2272{ 2080{
2273 u8 *scsi_buf = SCpnt->request_buffer; 2081 u8 *scsi_buf = SCpnt->request_buffer;
2274 u8 device_type = SBP2_DEVICE_TYPE (scsi_id->sbp2_device_type_and_lun);
2275 2082
2276 SBP2_DEBUG("sbp2_check_sbp2_response"); 2083 SBP2_DEBUG("sbp2_check_sbp2_response");
2277 2084
2278 switch (SCpnt->cmnd[0]) { 2085 switch (SCpnt->cmnd[0]) {
2279 2086
2280 case INQUIRY: 2087 case INQUIRY:
2281 2088 /*
2282 /* 2089 * Make sure data length is ok. Minimum length is 36 bytes
2283 * If scsi_id->sbp2_device_type_and_lun is uninitialized, then fill 2090 */
2284 * this information in from the inquiry response data. Lun is set to zero. 2091 if (scsi_buf[4] == 0) {
2285 */ 2092 scsi_buf[4] = 36 - 5;
2286 if (scsi_id->sbp2_device_type_and_lun == SBP2_DEVICE_TYPE_LUN_UNINITIALIZED) { 2093 }
2287 SBP2_DEBUG("Creating sbp2_device_type_and_lun from scsi inquiry data");
2288 scsi_id->sbp2_device_type_and_lun = (scsi_buf[0] & 0x1f) << 16;
2289 }
2290
2291 /*
2292 * Make sure data length is ok. Minimum length is 36 bytes
2293 */
2294 if (scsi_buf[4] == 0) {
2295 scsi_buf[4] = 36 - 5;
2296 }
2297
2298 /*
2299 * Check for Simple Direct Access Device and change it to TYPE_DISK
2300 */
2301 if ((scsi_buf[0] & 0x1f) == TYPE_RBC) {
2302 SBP2_DEBUG("Changing TYPE_RBC to TYPE_DISK");
2303 scsi_buf[0] &= 0xe0;
2304 }
2305
2306 /*
2307 * Fix ansi revision and response data format
2308 */
2309 scsi_buf[2] |= 2;
2310 scsi_buf[3] = (scsi_buf[3] & 0xf0) | 2;
2311
2312 break;
2313
2314 case MODE_SENSE:
2315
2316 if (sbp2_command_conversion_device_type(device_type)) {
2317
2318 SBP2_DEBUG("Modify mode sense response (10 byte version)");
2319
2320 scsi_buf[0] = scsi_buf[1]; /* Mode data length */
2321 scsi_buf[1] = scsi_buf[2]; /* Medium type */
2322 scsi_buf[2] = scsi_buf[3]; /* Device specific parameter */
2323 scsi_buf[3] = scsi_buf[7]; /* Block descriptor length */
2324 memcpy(scsi_buf + 4, scsi_buf + 8, scsi_buf[0]);
2325 }
2326
2327 break;
2328 2094
2329 case MODE_SELECT: 2095 /*
2096 * Fix ansi revision and response data format
2097 */
2098 scsi_buf[2] |= 2;
2099 scsi_buf[3] = (scsi_buf[3] & 0xf0) | 2;
2330 2100
2331 /* 2101 break;
2332 * TODO. Probably need to change mode select to 10 byte version
2333 */
2334 2102
2335 default: 2103 default:
2336 break; 2104 break;
2337 } 2105 }
2338 return; 2106 return;
2339} 2107}
@@ -2358,14 +2126,14 @@ static int sbp2_handle_status_write(struct hpsb_host *host, int nodeid, int dest
2358 2126
2359 if (!host) { 2127 if (!host) {
2360 SBP2_ERR("host is NULL - this is bad!"); 2128 SBP2_ERR("host is NULL - this is bad!");
2361 return(RCODE_ADDRESS_ERROR); 2129 return RCODE_ADDRESS_ERROR;
2362 } 2130 }
2363 2131
2364 hi = hpsb_get_hostinfo(&sbp2_highlevel, host); 2132 hi = hpsb_get_hostinfo(&sbp2_highlevel, host);
2365 2133
2366 if (!hi) { 2134 if (!hi) {
2367 SBP2_ERR("host info is NULL - this is bad!"); 2135 SBP2_ERR("host info is NULL - this is bad!");
2368 return(RCODE_ADDRESS_ERROR); 2136 return RCODE_ADDRESS_ERROR;
2369 } 2137 }
2370 2138
2371 /* 2139 /*
@@ -2382,7 +2150,7 @@ static int sbp2_handle_status_write(struct hpsb_host *host, int nodeid, int dest
2382 2150
2383 if (!scsi_id) { 2151 if (!scsi_id) {
2384 SBP2_ERR("scsi_id is NULL - device is gone?"); 2152 SBP2_ERR("scsi_id is NULL - device is gone?");
2385 return(RCODE_ADDRESS_ERROR); 2153 return RCODE_ADDRESS_ERROR;
2386 } 2154 }
2387 2155
2388 /* 2156 /*
@@ -2480,10 +2248,9 @@ static int sbp2_handle_status_write(struct hpsb_host *host, int nodeid, int dest
2480 SBP2_ORB_DEBUG("command orb completed"); 2248 SBP2_ORB_DEBUG("command orb completed");
2481 } 2249 }
2482 2250
2483 return(RCODE_COMPLETE); 2251 return RCODE_COMPLETE;
2484} 2252}
2485 2253
2486
2487/************************************** 2254/**************************************
2488 * SCSI interface related section 2255 * SCSI interface related section
2489 **************************************/ 2256 **************************************/
@@ -2541,6 +2308,16 @@ static int sbp2scsi_queuecommand(struct scsi_cmnd *SCpnt,
2541 } 2308 }
2542 2309
2543 /* 2310 /*
2311 * Bidirectional commands are not yet implemented,
2312 * and unknown transfer direction not handled.
2313 */
2314 if (SCpnt->sc_data_direction == DMA_BIDIRECTIONAL) {
2315 SBP2_ERR("Cannot handle DMA_BIDIRECTIONAL - rejecting command");
2316 result = DID_ERROR << 16;
2317 goto done;
2318 }
2319
2320 /*
2544 * Try and send our SCSI command 2321 * Try and send our SCSI command
2545 */ 2322 */
2546 if (sbp2_send_command(scsi_id, SCpnt, done)) { 2323 if (sbp2_send_command(scsi_id, SCpnt, done)) {
@@ -2616,55 +2393,56 @@ static void sbp2scsi_complete_command(struct scsi_id_instance_data *scsi_id,
2616 * complete the command, just let it get retried at the end of the 2393 * complete the command, just let it get retried at the end of the
2617 * bus reset. 2394 * bus reset.
2618 */ 2395 */
2619 if (!hpsb_node_entry_valid(scsi_id->ne) && (scsi_status != SBP2_SCSI_STATUS_GOOD)) { 2396 if (!hpsb_node_entry_valid(scsi_id->ne)
2397 && (scsi_status != SBP2_SCSI_STATUS_GOOD)) {
2620 SBP2_ERR("Bus reset in progress - retry command later"); 2398 SBP2_ERR("Bus reset in progress - retry command later");
2621 return; 2399 return;
2622 } 2400 }
2623 2401
2624 /* 2402 /*
2625 * Switch on scsi status 2403 * Switch on scsi status
2626 */ 2404 */
2627 switch (scsi_status) { 2405 switch (scsi_status) {
2628 case SBP2_SCSI_STATUS_GOOD: 2406 case SBP2_SCSI_STATUS_GOOD:
2629 SCpnt->result = DID_OK; 2407 SCpnt->result = DID_OK;
2630 break; 2408 break;
2631 2409
2632 case SBP2_SCSI_STATUS_BUSY: 2410 case SBP2_SCSI_STATUS_BUSY:
2633 SBP2_ERR("SBP2_SCSI_STATUS_BUSY"); 2411 SBP2_ERR("SBP2_SCSI_STATUS_BUSY");
2634 SCpnt->result = DID_BUS_BUSY << 16; 2412 SCpnt->result = DID_BUS_BUSY << 16;
2635 break; 2413 break;
2636 2414
2637 case SBP2_SCSI_STATUS_CHECK_CONDITION: 2415 case SBP2_SCSI_STATUS_CHECK_CONDITION:
2638 SBP2_DEBUG("SBP2_SCSI_STATUS_CHECK_CONDITION"); 2416 SBP2_DEBUG("SBP2_SCSI_STATUS_CHECK_CONDITION");
2639 SCpnt->result = CHECK_CONDITION << 1; 2417 SCpnt->result = CHECK_CONDITION << 1;
2640 2418
2641 /* 2419 /*
2642 * Debug stuff 2420 * Debug stuff
2643 */ 2421 */
2644#if CONFIG_IEEE1394_SBP2_DEBUG >= 1 2422#if CONFIG_IEEE1394_SBP2_DEBUG >= 1
2645 scsi_print_command(SCpnt); 2423 scsi_print_command(SCpnt);
2646 scsi_print_sense("bh", SCpnt); 2424 scsi_print_sense("bh", SCpnt);
2647#endif 2425#endif
2648 2426
2649 break; 2427 break;
2650 2428
2651 case SBP2_SCSI_STATUS_SELECTION_TIMEOUT: 2429 case SBP2_SCSI_STATUS_SELECTION_TIMEOUT:
2652 SBP2_ERR("SBP2_SCSI_STATUS_SELECTION_TIMEOUT"); 2430 SBP2_ERR("SBP2_SCSI_STATUS_SELECTION_TIMEOUT");
2653 SCpnt->result = DID_NO_CONNECT << 16; 2431 SCpnt->result = DID_NO_CONNECT << 16;
2654 scsi_print_command(SCpnt); 2432 scsi_print_command(SCpnt);
2655 break; 2433 break;
2656 2434
2657 case SBP2_SCSI_STATUS_CONDITION_MET: 2435 case SBP2_SCSI_STATUS_CONDITION_MET:
2658 case SBP2_SCSI_STATUS_RESERVATION_CONFLICT: 2436 case SBP2_SCSI_STATUS_RESERVATION_CONFLICT:
2659 case SBP2_SCSI_STATUS_COMMAND_TERMINATED: 2437 case SBP2_SCSI_STATUS_COMMAND_TERMINATED:
2660 SBP2_ERR("Bad SCSI status = %x", scsi_status); 2438 SBP2_ERR("Bad SCSI status = %x", scsi_status);
2661 SCpnt->result = DID_ERROR << 16; 2439 SCpnt->result = DID_ERROR << 16;
2662 scsi_print_command(SCpnt); 2440 scsi_print_command(SCpnt);
2663 break; 2441 break;
2664 2442
2665 default: 2443 default:
2666 SBP2_ERR("Unsupported SCSI status = %x", scsi_status); 2444 SBP2_ERR("Unsupported SCSI status = %x", scsi_status);
2667 SCpnt->result = DID_ERROR << 16; 2445 SCpnt->result = DID_ERROR << 16;
2668 } 2446 }
2669 2447
2670 /* 2448 /*
@@ -2678,7 +2456,8 @@ static void sbp2scsi_complete_command(struct scsi_id_instance_data *scsi_id,
2678 * If a bus reset is in progress and there was an error, complete 2456 * If a bus reset is in progress and there was an error, complete
2679 * the command as busy so that it will get retried. 2457 * the command as busy so that it will get retried.
2680 */ 2458 */
2681 if (!hpsb_node_entry_valid(scsi_id->ne) && (scsi_status != SBP2_SCSI_STATUS_GOOD)) { 2459 if (!hpsb_node_entry_valid(scsi_id->ne)
2460 && (scsi_status != SBP2_SCSI_STATUS_GOOD)) {
2682 SBP2_ERR("Completing command with busy (bus reset)"); 2461 SBP2_ERR("Completing command with busy (bus reset)");
2683 SCpnt->result = DID_BUS_BUSY << 16; 2462 SCpnt->result = DID_BUS_BUSY << 16;
2684 } 2463 }
@@ -2699,31 +2478,29 @@ static void sbp2scsi_complete_command(struct scsi_id_instance_data *scsi_id,
2699 /* 2478 /*
2700 * Tell scsi stack that we're done with this command 2479 * Tell scsi stack that we're done with this command
2701 */ 2480 */
2702 done (SCpnt); 2481 done(SCpnt);
2703} 2482}
2704 2483
2705
2706static int sbp2scsi_slave_alloc(struct scsi_device *sdev) 2484static int sbp2scsi_slave_alloc(struct scsi_device *sdev)
2707{ 2485{
2708 ((struct scsi_id_instance_data *)sdev->host->hostdata[0])->sdev = sdev; 2486 ((struct scsi_id_instance_data *)sdev->host->hostdata[0])->sdev = sdev;
2709 return 0; 2487 return 0;
2710} 2488}
2711 2489
2712
2713static int sbp2scsi_slave_configure(struct scsi_device *sdev) 2490static int sbp2scsi_slave_configure(struct scsi_device *sdev)
2714{ 2491{
2715 blk_queue_dma_alignment(sdev->request_queue, (512 - 1)); 2492 blk_queue_dma_alignment(sdev->request_queue, (512 - 1));
2493 sdev->use_10_for_rw = 1;
2494 sdev->use_10_for_ms = 1;
2716 return 0; 2495 return 0;
2717} 2496}
2718 2497
2719
2720static void sbp2scsi_slave_destroy(struct scsi_device *sdev) 2498static void sbp2scsi_slave_destroy(struct scsi_device *sdev)
2721{ 2499{
2722 ((struct scsi_id_instance_data *)sdev->host->hostdata[0])->sdev = NULL; 2500 ((struct scsi_id_instance_data *)sdev->host->hostdata[0])->sdev = NULL;
2723 return; 2501 return;
2724} 2502}
2725 2503
2726
2727/* 2504/*
2728 * Called by scsi stack when something has really gone wrong. Usually 2505 * Called by scsi stack when something has really gone wrong. Usually
2729 * called when a command has timed-out for some reason. 2506 * called when a command has timed-out for some reason.
@@ -2769,7 +2546,7 @@ static int sbp2scsi_abort(struct scsi_cmnd *SCpnt)
2769 sbp2scsi_complete_all_commands(scsi_id, DID_BUS_BUSY); 2546 sbp2scsi_complete_all_commands(scsi_id, DID_BUS_BUSY);
2770 } 2547 }
2771 2548
2772 return(SUCCESS); 2549 return SUCCESS;
2773} 2550}
2774 2551
2775/* 2552/*
@@ -2779,28 +2556,20 @@ static int sbp2scsi_reset(struct scsi_cmnd *SCpnt)
2779{ 2556{
2780 struct scsi_id_instance_data *scsi_id = 2557 struct scsi_id_instance_data *scsi_id =
2781 (struct scsi_id_instance_data *)SCpnt->device->host->hostdata[0]; 2558 (struct scsi_id_instance_data *)SCpnt->device->host->hostdata[0];
2782 unsigned long flags;
2783 2559
2784 SBP2_ERR("reset requested"); 2560 SBP2_ERR("reset requested");
2785 2561
2786 spin_lock_irqsave(SCpnt->device->host->host_lock, flags);
2787
2788 if (sbp2util_node_is_available(scsi_id)) { 2562 if (sbp2util_node_is_available(scsi_id)) {
2789 SBP2_ERR("Generating sbp2 fetch agent reset"); 2563 SBP2_ERR("Generating sbp2 fetch agent reset");
2790 sbp2_agent_reset(scsi_id, 0); 2564 sbp2_agent_reset(scsi_id, 0);
2791 } 2565 }
2792 2566
2793 spin_unlock_irqrestore(SCpnt->device->host->host_lock, flags);
2794
2795 return SUCCESS; 2567 return SUCCESS;
2796} 2568}
2797 2569
2798static const char *sbp2scsi_info (struct Scsi_Host *host) 2570static ssize_t sbp2_sysfs_ieee1394_id_show(struct device *dev,
2799{ 2571 struct device_attribute *attr,
2800 return "SCSI emulation for IEEE-1394 SBP-2 Devices"; 2572 char *buf)
2801}
2802
2803static ssize_t sbp2_sysfs_ieee1394_id_show(struct device *dev, struct device_attribute *attr, char *buf)
2804{ 2573{
2805 struct scsi_device *sdev; 2574 struct scsi_device *sdev;
2806 struct scsi_id_instance_data *scsi_id; 2575 struct scsi_id_instance_data *scsi_id;
@@ -2812,10 +2581,7 @@ static ssize_t sbp2_sysfs_ieee1394_id_show(struct device *dev, struct device_att
2812 if (!(scsi_id = (struct scsi_id_instance_data *)sdev->host->hostdata[0])) 2581 if (!(scsi_id = (struct scsi_id_instance_data *)sdev->host->hostdata[0]))
2813 return 0; 2582 return 0;
2814 2583
2815 if (scsi_id->sbp2_device_type_and_lun == SBP2_DEVICE_TYPE_LUN_UNINITIALIZED) 2584 lun = ORB_SET_LUN(scsi_id->sbp2_lun);
2816 lun = 0;
2817 else
2818 lun = ORB_SET_LUN(scsi_id->sbp2_device_type_and_lun);
2819 2585
2820 return sprintf(buf, "%016Lx:%d:%d\n", (unsigned long long)scsi_id->ne->guid, 2586 return sprintf(buf, "%016Lx:%d:%d\n", (unsigned long long)scsi_id->ne->guid,
2821 scsi_id->ud->id, lun); 2587 scsi_id->ud->id, lun);
@@ -2837,12 +2603,9 @@ static struct scsi_host_template scsi_driver_template = {
2837 .module = THIS_MODULE, 2603 .module = THIS_MODULE,
2838 .name = "SBP-2 IEEE-1394", 2604 .name = "SBP-2 IEEE-1394",
2839 .proc_name = SBP2_DEVICE_NAME, 2605 .proc_name = SBP2_DEVICE_NAME,
2840 .info = sbp2scsi_info,
2841 .queuecommand = sbp2scsi_queuecommand, 2606 .queuecommand = sbp2scsi_queuecommand,
2842 .eh_abort_handler = sbp2scsi_abort, 2607 .eh_abort_handler = sbp2scsi_abort,
2843 .eh_device_reset_handler = sbp2scsi_reset, 2608 .eh_device_reset_handler = sbp2scsi_reset,
2844 .eh_bus_reset_handler = sbp2scsi_reset,
2845 .eh_host_reset_handler = sbp2scsi_reset,
2846 .slave_alloc = sbp2scsi_slave_alloc, 2609 .slave_alloc = sbp2scsi_slave_alloc,
2847 .slave_configure = sbp2scsi_slave_configure, 2610 .slave_configure = sbp2scsi_slave_configure,
2848 .slave_destroy = sbp2scsi_slave_destroy, 2611 .slave_destroy = sbp2scsi_slave_destroy,
@@ -2861,8 +2624,6 @@ static int sbp2_module_init(void)
2861 2624
2862 SBP2_DEBUG("sbp2_module_init"); 2625 SBP2_DEBUG("sbp2_module_init");
2863 2626
2864 printk(KERN_INFO "sbp2: %s\n", version);
2865
2866 /* Module load debug option to force one command at a time (serializing I/O) */ 2627 /* Module load debug option to force one command at a time (serializing I/O) */
2867 if (serialize_io) { 2628 if (serialize_io) {
2868 SBP2_INFO("Driver forced to serialize I/O (serialize_io=1)"); 2629 SBP2_INFO("Driver forced to serialize I/O (serialize_io=1)");
@@ -2874,7 +2635,6 @@ static int sbp2_module_init(void)
2874 /* Set max sectors (module load option). Default is 255 sectors. */ 2635 /* Set max sectors (module load option). Default is 255 sectors. */
2875 scsi_driver_template.max_sectors = max_sectors; 2636 scsi_driver_template.max_sectors = max_sectors;
2876 2637
2877
2878 /* Register our high level driver with 1394 stack */ 2638 /* Register our high level driver with 1394 stack */
2879 hpsb_register_highlevel(&sbp2_highlevel); 2639 hpsb_register_highlevel(&sbp2_highlevel);
2880 2640
diff --git a/drivers/ieee1394/sbp2.h b/drivers/ieee1394/sbp2.h
index cd425be74841..900ea1d25e71 100644
--- a/drivers/ieee1394/sbp2.h
+++ b/drivers/ieee1394/sbp2.h
@@ -119,8 +119,8 @@ struct sbp2_query_logins_response {
119struct sbp2_reconnect_orb { 119struct sbp2_reconnect_orb {
120 u32 reserved1; 120 u32 reserved1;
121 u32 reserved2; 121 u32 reserved2;
122 u32 reserved3; 122 u32 reserved3;
123 u32 reserved4; 123 u32 reserved4;
124 u32 login_ID_misc; 124 u32 login_ID_misc;
125 u32 reserved5; 125 u32 reserved5;
126 u32 status_FIFO_hi; 126 u32 status_FIFO_hi;
@@ -130,8 +130,8 @@ struct sbp2_reconnect_orb {
130struct sbp2_logout_orb { 130struct sbp2_logout_orb {
131 u32 reserved1; 131 u32 reserved1;
132 u32 reserved2; 132 u32 reserved2;
133 u32 reserved3; 133 u32 reserved3;
134 u32 reserved4; 134 u32 reserved4;
135 u32 login_ID_misc; 135 u32 login_ID_misc;
136 u32 reserved5; 136 u32 reserved5;
137 u32 status_FIFO_hi; 137 u32 status_FIFO_hi;
@@ -188,7 +188,7 @@ struct sbp2_unrestricted_page_table {
188struct sbp2_status_block { 188struct sbp2_status_block {
189 u32 ORB_offset_hi_misc; 189 u32 ORB_offset_hi_misc;
190 u32 ORB_offset_lo; 190 u32 ORB_offset_lo;
191 u8 command_set_dependent[24]; 191 u8 command_set_dependent[24];
192}; 192};
193 193
194/* 194/*
@@ -211,7 +211,7 @@ struct sbp2_status_block {
211 * specified for write posting, where the ohci controller will 211 * specified for write posting, where the ohci controller will
212 * automatically send an ack_complete when the status is written by the 212 * automatically send an ack_complete when the status is written by the
213 * sbp2 device... saving a split transaction. =) 213 * sbp2 device... saving a split transaction. =)
214 */ 214 */
215#define SBP2_STATUS_FIFO_ADDRESS 0xfffe00000000ULL 215#define SBP2_STATUS_FIFO_ADDRESS 0xfffe00000000ULL
216#define SBP2_STATUS_FIFO_ADDRESS_HI 0xfffe 216#define SBP2_STATUS_FIFO_ADDRESS_HI 0xfffe
217#define SBP2_STATUS_FIFO_ADDRESS_LO 0x0 217#define SBP2_STATUS_FIFO_ADDRESS_LO 0x0
@@ -229,9 +229,6 @@ struct sbp2_status_block {
229#define SBP2_DEVICE_TYPE_AND_LUN_KEY 0x14 229#define SBP2_DEVICE_TYPE_AND_LUN_KEY 0x14
230#define SBP2_FIRMWARE_REVISION_KEY 0x3c 230#define SBP2_FIRMWARE_REVISION_KEY 0x3c
231 231
232#define SBP2_DEVICE_TYPE(q) (((q) >> 16) & 0x1f)
233#define SBP2_DEVICE_LUN(q) ((q) & 0xffff)
234
235#define SBP2_AGENT_STATE_OFFSET 0x00ULL 232#define SBP2_AGENT_STATE_OFFSET 0x00ULL
236#define SBP2_AGENT_RESET_OFFSET 0x04ULL 233#define SBP2_AGENT_RESET_OFFSET 0x04ULL
237#define SBP2_ORB_POINTER_OFFSET 0x08ULL 234#define SBP2_ORB_POINTER_OFFSET 0x08ULL
@@ -256,8 +253,6 @@ struct sbp2_status_block {
256 */ 253 */
257#define SBP2_128KB_BROKEN_FIRMWARE 0xa0b800 254#define SBP2_128KB_BROKEN_FIRMWARE 0xa0b800
258 255
259#define SBP2_DEVICE_TYPE_LUN_UNINITIALIZED 0xffffffff
260
261/* 256/*
262 * SCSI specific stuff 257 * SCSI specific stuff
263 */ 258 */
@@ -265,45 +260,7 @@ struct sbp2_status_block {
265#define SBP2_MAX_SG_ELEMENT_LENGTH 0xf000 260#define SBP2_MAX_SG_ELEMENT_LENGTH 0xf000
266#define SBP2_MAX_UDS_PER_NODE 16 /* Maximum scsi devices per node */ 261#define SBP2_MAX_UDS_PER_NODE 16 /* Maximum scsi devices per node */
267#define SBP2_MAX_SECTORS 255 /* Max sectors supported */ 262#define SBP2_MAX_SECTORS 255 /* Max sectors supported */
268 263#define SBP2_MAX_CMDS 8 /* This should be safe */
269/*
270 * SCSI direction table...
271 * (now used as a back-up in case the direction passed down from above is "unknown")
272 *
273 * DIN = IN data direction
274 * DOU = OUT data direction
275 * DNO = No data transfer
276 * DUN = Unknown data direction
277 *
278 * Opcode 0xec (Teac specific "opc execute") possibly should be DNO,
279 * but we'll change it when somebody reports a problem with this.
280 */
281#define DIN ORB_DIRECTION_READ_FROM_MEDIA
282#define DOU ORB_DIRECTION_WRITE_TO_MEDIA
283#define DNO ORB_DIRECTION_NO_DATA_TRANSFER
284#define DUN DIN
285
286static unchar sbp2scsi_direction_table[0x100] = {
287 DNO,DNO,DIN,DIN,DOU,DIN,DIN,DOU,DIN,DUN,DOU,DOU,DUN,DUN,DUN,DIN,
288 DNO,DIN,DIN,DOU,DIN,DOU,DNO,DNO,DOU,DNO,DIN,DNO,DIN,DOU,DNO,DUN,
289 DIN,DUN,DIN,DIN,DOU,DIN,DUN,DUN,DIN,DIN,DOU,DNO,DUN,DIN,DOU,DOU,
290 DOU,DOU,DOU,DNO,DIN,DNO,DNO,DIN,DOU,DOU,DOU,DOU,DIN,DOU,DIN,DOU,
291 DOU,DOU,DIN,DIN,DIN,DNO,DIN,DNO,DNO,DNO,DUN,DNO,DOU,DIN,DNO,DUN,
292 DUN,DIN,DIN,DNO,DNO,DOU,DUN,DUN,DNO,DIN,DIN,DNO,DIN,DOU,DUN,DUN,
293 DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,
294 DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,
295 DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,
296 DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,
297 DUN,DNO,DOU,DOU,DIN,DNO,DNO,DNO,DIN,DNO,DOU,DUN,DNO,DIN,DOU,DOU,
298 DOU,DOU,DOU,DNO,DUN,DIN,DOU,DIN,DIN,DIN,DNO,DNO,DNO,DIN,DIN,DUN,
299 DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,
300 DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,
301 DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DOU,DUN,DUN,DUN,DUN,DUN,
302 DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN
303};
304
305/* This should be safe */
306#define SBP2_MAX_CMDS 8
307 264
308/* This is the two dma types we use for cmd_dma below */ 265/* This is the two dma types we use for cmd_dma below */
309enum cmd_dma_types { 266enum cmd_dma_types {
@@ -338,10 +295,8 @@ struct sbp2_command_info {
338#define SBP2_BREAKAGE_128K_MAX_TRANSFER 0x1 295#define SBP2_BREAKAGE_128K_MAX_TRANSFER 0x1
339#define SBP2_BREAKAGE_INQUIRY_HACK 0x2 296#define SBP2_BREAKAGE_INQUIRY_HACK 0x2
340 297
341
342struct sbp2scsi_host_info; 298struct sbp2scsi_host_info;
343 299
344
345/* 300/*
346 * Information needed on a per scsi id basis (one for each sbp2 device) 301 * Information needed on a per scsi id basis (one for each sbp2 device)
347 */ 302 */
@@ -379,7 +334,7 @@ struct scsi_id_instance_data {
379 u32 sbp2_command_set_spec_id; 334 u32 sbp2_command_set_spec_id;
380 u32 sbp2_command_set; 335 u32 sbp2_command_set;
381 u32 sbp2_unit_characteristics; 336 u32 sbp2_unit_characteristics;
382 u32 sbp2_device_type_and_lun; 337 u32 sbp2_lun;
383 u32 sbp2_firmware_revision; 338 u32 sbp2_firmware_revision;
384 339
385 /* 340 /*
@@ -411,7 +366,6 @@ struct scsi_id_instance_data {
411 u32 workarounds; 366 u32 workarounds;
412}; 367};
413 368
414
415/* Sbp2 host data structure (one per IEEE1394 host) */ 369/* Sbp2 host data structure (one per IEEE1394 host) */
416struct sbp2scsi_host_info { 370struct sbp2scsi_host_info {
417 struct hpsb_host *host; /* IEEE1394 host */ 371 struct hpsb_host *host; /* IEEE1394 host */
@@ -456,20 +410,12 @@ static int sbp2_logout_device(struct scsi_id_instance_data *scsi_id);
456static int sbp2_handle_status_write(struct hpsb_host *host, int nodeid, int destid, 410static int sbp2_handle_status_write(struct hpsb_host *host, int nodeid, int destid,
457 quadlet_t *data, u64 addr, size_t length, u16 flags); 411 quadlet_t *data, u64 addr, size_t length, u16 flags);
458static int sbp2_agent_reset(struct scsi_id_instance_data *scsi_id, int wait); 412static int sbp2_agent_reset(struct scsi_id_instance_data *scsi_id, int wait);
459static int sbp2_create_command_orb(struct scsi_id_instance_data *scsi_id,
460 struct sbp2_command_info *command,
461 unchar *scsi_cmd,
462 unsigned int scsi_use_sg,
463 unsigned int scsi_request_bufflen,
464 void *scsi_request_buffer,
465 enum dma_data_direction dma_dir);
466static int sbp2_link_orb_command(struct scsi_id_instance_data *scsi_id, 413static int sbp2_link_orb_command(struct scsi_id_instance_data *scsi_id,
467 struct sbp2_command_info *command); 414 struct sbp2_command_info *command);
468static int sbp2_send_command(struct scsi_id_instance_data *scsi_id, 415static int sbp2_send_command(struct scsi_id_instance_data *scsi_id,
469 struct scsi_cmnd *SCpnt, 416 struct scsi_cmnd *SCpnt,
470 void (*done)(struct scsi_cmnd *)); 417 void (*done)(struct scsi_cmnd *));
471static unsigned int sbp2_status_to_sense_data(unchar *sbp2_status, unchar *sense_data); 418static unsigned int sbp2_status_to_sense_data(unchar *sbp2_status, unchar *sense_data);
472static void sbp2_check_sbp2_command(struct scsi_id_instance_data *scsi_id, unchar *cmd);
473static void sbp2_check_sbp2_response(struct scsi_id_instance_data *scsi_id, 419static void sbp2_check_sbp2_response(struct scsi_id_instance_data *scsi_id,
474 struct scsi_cmnd *SCpnt); 420 struct scsi_cmnd *SCpnt);
475static void sbp2_parse_unit_directory(struct scsi_id_instance_data *scsi_id, 421static void sbp2_parse_unit_directory(struct scsi_id_instance_data *scsi_id,
diff --git a/drivers/ieee1394/video1394.c b/drivers/ieee1394/video1394.c
index 23911da50154..608479b2df14 100644
--- a/drivers/ieee1394/video1394.c
+++ b/drivers/ieee1394/video1394.c
@@ -19,12 +19,6 @@
19 * 19 *
20 * NOTES: 20 * NOTES:
21 * 21 *
22 * jds -- add private data to file to keep track of iso contexts associated
23 * with each open -- so release won't kill all iso transfers.
24 *
25 * Damien Douxchamps: Fix failure when the number of DMA pages per frame is
26 * one.
27 *
28 * ioctl return codes: 22 * ioctl return codes:
29 * EFAULT is only for invalid address for the argp 23 * EFAULT is only for invalid address for the argp
30 * EINVAL for out of range values 24 * EINVAL for out of range values
@@ -34,12 +28,6 @@
34 * ENOTTY for unsupported ioctl request 28 * ENOTTY for unsupported ioctl request
35 * 29 *
36 */ 30 */
37
38/* Markus Tavenrath <speedygoo@speedygoo.de> :
39 - fixed checks for valid buffer-numbers in video1394_icotl
40 - changed the ways the dma prg's are used, now it's possible to use
41 even a single dma buffer
42*/
43#include <linux/config.h> 31#include <linux/config.h>
44#include <linux/kernel.h> 32#include <linux/kernel.h>
45#include <linux/list.h> 33#include <linux/list.h>
@@ -77,14 +65,6 @@
77 65
78#define ISO_CHANNELS 64 66#define ISO_CHANNELS 64
79 67
80#ifndef virt_to_page
81#define virt_to_page(x) MAP_NR(x)
82#endif
83
84#ifndef vmalloc_32
85#define vmalloc_32(x) vmalloc(x)
86#endif
87
88struct it_dma_prg { 68struct it_dma_prg {
89 struct dma_cmd begin; 69 struct dma_cmd begin;
90 quadlet_t data[4]; 70 quadlet_t data[4];
@@ -206,14 +186,12 @@ alloc_dma_iso_ctx(struct ti_ohci *ohci, int type, int num_desc,
206 struct dma_iso_ctx *d; 186 struct dma_iso_ctx *d;
207 int i; 187 int i;
208 188
209 d = kmalloc(sizeof(struct dma_iso_ctx), GFP_KERNEL); 189 d = kzalloc(sizeof(*d), GFP_KERNEL);
210 if (d == NULL) { 190 if (!d) {
211 PRINT(KERN_ERR, ohci->host->id, "Failed to allocate dma_iso_ctx"); 191 PRINT(KERN_ERR, ohci->host->id, "Failed to allocate dma_iso_ctx");
212 return NULL; 192 return NULL;
213 } 193 }
214 194
215 memset(d, 0, sizeof *d);
216
217 d->ohci = ohci; 195 d->ohci = ohci;
218 d->type = type; 196 d->type = type;
219 d->channel = channel; 197 d->channel = channel;
@@ -251,9 +229,8 @@ alloc_dma_iso_ctx(struct ti_ohci *ohci, int type, int num_desc,
251 } 229 }
252 d->ctx = d->iso_tasklet.context; 230 d->ctx = d->iso_tasklet.context;
253 231
254 d->prg_reg = kmalloc(d->num_desc * sizeof(struct dma_prog_region), 232 d->prg_reg = kmalloc(d->num_desc * sizeof(*d->prg_reg), GFP_KERNEL);
255 GFP_KERNEL); 233 if (!d->prg_reg) {
256 if (d->prg_reg == NULL) {
257 PRINT(KERN_ERR, ohci->host->id, "Failed to allocate ir prg regs"); 234 PRINT(KERN_ERR, ohci->host->id, "Failed to allocate ir prg regs");
258 free_dma_iso_ctx(d); 235 free_dma_iso_ctx(d);
259 return NULL; 236 return NULL;
@@ -268,15 +245,14 @@ alloc_dma_iso_ctx(struct ti_ohci *ohci, int type, int num_desc,
268 d->cmdPtr = OHCI1394_IsoRcvCommandPtr+32*d->ctx; 245 d->cmdPtr = OHCI1394_IsoRcvCommandPtr+32*d->ctx;
269 d->ctxMatch = OHCI1394_IsoRcvContextMatch+32*d->ctx; 246 d->ctxMatch = OHCI1394_IsoRcvContextMatch+32*d->ctx;
270 247
271 d->ir_prg = kmalloc(d->num_desc * sizeof(struct dma_cmd *), 248 d->ir_prg = kzalloc(d->num_desc * sizeof(*d->ir_prg),
272 GFP_KERNEL); 249 GFP_KERNEL);
273 250
274 if (d->ir_prg == NULL) { 251 if (!d->ir_prg) {
275 PRINT(KERN_ERR, ohci->host->id, "Failed to allocate dma ir prg"); 252 PRINT(KERN_ERR, ohci->host->id, "Failed to allocate dma ir prg");
276 free_dma_iso_ctx(d); 253 free_dma_iso_ctx(d);
277 return NULL; 254 return NULL;
278 } 255 }
279 memset(d->ir_prg, 0, d->num_desc * sizeof(struct dma_cmd *));
280 256
281 d->nb_cmd = d->buf_size / PAGE_SIZE + 1; 257 d->nb_cmd = d->buf_size / PAGE_SIZE + 1;
282 d->left_size = (d->frame_size % PAGE_SIZE) ? 258 d->left_size = (d->frame_size % PAGE_SIZE) ?
@@ -297,16 +273,15 @@ alloc_dma_iso_ctx(struct ti_ohci *ohci, int type, int num_desc,
297 d->ctrlClear = OHCI1394_IsoXmitContextControlClear+16*d->ctx; 273 d->ctrlClear = OHCI1394_IsoXmitContextControlClear+16*d->ctx;
298 d->cmdPtr = OHCI1394_IsoXmitCommandPtr+16*d->ctx; 274 d->cmdPtr = OHCI1394_IsoXmitCommandPtr+16*d->ctx;
299 275
300 d->it_prg = kmalloc(d->num_desc * sizeof(struct it_dma_prg *), 276 d->it_prg = kzalloc(d->num_desc * sizeof(*d->it_prg),
301 GFP_KERNEL); 277 GFP_KERNEL);
302 278
303 if (d->it_prg == NULL) { 279 if (!d->it_prg) {
304 PRINT(KERN_ERR, ohci->host->id, 280 PRINT(KERN_ERR, ohci->host->id,
305 "Failed to allocate dma it prg"); 281 "Failed to allocate dma it prg");
306 free_dma_iso_ctx(d); 282 free_dma_iso_ctx(d);
307 return NULL; 283 return NULL;
308 } 284 }
309 memset(d->it_prg, 0, d->num_desc*sizeof(struct it_dma_prg *));
310 285
311 d->packet_size = packet_size; 286 d->packet_size = packet_size;
312 287
@@ -337,47 +312,24 @@ alloc_dma_iso_ctx(struct ti_ohci *ohci, int type, int num_desc,
337 } 312 }
338 } 313 }
339 314
340 d->buffer_status = kmalloc(d->num_desc * sizeof(unsigned int), 315 d->buffer_status =
341 GFP_KERNEL); 316 kzalloc(d->num_desc * sizeof(*d->buffer_status), GFP_KERNEL);
342 d->buffer_prg_assignment = kmalloc(d->num_desc * sizeof(unsigned int), 317 d->buffer_prg_assignment =
343 GFP_KERNEL); 318 kzalloc(d->num_desc * sizeof(*d->buffer_prg_assignment), GFP_KERNEL);
344 d->buffer_time = kmalloc(d->num_desc * sizeof(struct timeval), 319 d->buffer_time =
345 GFP_KERNEL); 320 kzalloc(d->num_desc * sizeof(*d->buffer_time), GFP_KERNEL);
346 d->last_used_cmd = kmalloc(d->num_desc * sizeof(unsigned int), 321 d->last_used_cmd =
347 GFP_KERNEL); 322 kzalloc(d->num_desc * sizeof(*d->last_used_cmd), GFP_KERNEL);
348 d->next_buffer = kmalloc(d->num_desc * sizeof(int), 323 d->next_buffer =
349 GFP_KERNEL); 324 kzalloc(d->num_desc * sizeof(*d->next_buffer), GFP_KERNEL);
350 325
351 if (d->buffer_status == NULL) { 326 if (!d->buffer_status || !d->buffer_prg_assignment || !d->buffer_time ||
352 PRINT(KERN_ERR, ohci->host->id, "Failed to allocate buffer_status"); 327 !d->last_used_cmd || !d->next_buffer) {
353 free_dma_iso_ctx(d); 328 PRINT(KERN_ERR, ohci->host->id,
354 return NULL; 329 "Failed to allocate dma_iso_ctx member");
355 }
356 if (d->buffer_prg_assignment == NULL) {
357 PRINT(KERN_ERR, ohci->host->id, "Failed to allocate buffer_prg_assignment");
358 free_dma_iso_ctx(d);
359 return NULL;
360 }
361 if (d->buffer_time == NULL) {
362 PRINT(KERN_ERR, ohci->host->id, "Failed to allocate buffer_time");
363 free_dma_iso_ctx(d);
364 return NULL;
365 }
366 if (d->last_used_cmd == NULL) {
367 PRINT(KERN_ERR, ohci->host->id, "Failed to allocate last_used_cmd");
368 free_dma_iso_ctx(d);
369 return NULL;
370 }
371 if (d->next_buffer == NULL) {
372 PRINT(KERN_ERR, ohci->host->id, "Failed to allocate next_buffer");
373 free_dma_iso_ctx(d); 330 free_dma_iso_ctx(d);
374 return NULL; 331 return NULL;
375 } 332 }
376 memset(d->buffer_status, 0, d->num_desc * sizeof(unsigned int));
377 memset(d->buffer_prg_assignment, 0, d->num_desc * sizeof(unsigned int));
378 memset(d->buffer_time, 0, d->num_desc * sizeof(struct timeval));
379 memset(d->last_used_cmd, 0, d->num_desc * sizeof(unsigned int));
380 memset(d->next_buffer, -1, d->num_desc * sizeof(int));
381 333
382 spin_lock_init(&d->lock); 334 spin_lock_init(&d->lock);
383 335
@@ -539,7 +491,7 @@ static void wakeup_dma_ir_ctx(unsigned long l)
539 if (d->ir_prg[i][d->nb_cmd-1].status & cpu_to_le32(0xFFFF0000)) { 491 if (d->ir_prg[i][d->nb_cmd-1].status & cpu_to_le32(0xFFFF0000)) {
540 reset_ir_status(d, i); 492 reset_ir_status(d, i);
541 d->buffer_status[d->buffer_prg_assignment[i]] = VIDEO1394_BUFFER_READY; 493 d->buffer_status[d->buffer_prg_assignment[i]] = VIDEO1394_BUFFER_READY;
542 do_gettimeofday(&d->buffer_time[i]); 494 do_gettimeofday(&d->buffer_time[d->buffer_prg_assignment[i]]);
543 } 495 }
544 } 496 }
545 497
@@ -1046,7 +998,6 @@ static int __video1394_ioctl(struct file *file,
1046 998
1047 /* set time of buffer */ 999 /* set time of buffer */
1048 v.filltime = d->buffer_time[v.buffer]; 1000 v.filltime = d->buffer_time[v.buffer];
1049// printk("Buffer %d time %d\n", v.buffer, (d->buffer_time[v.buffer]).tv_usec);
1050 1001
1051 /* 1002 /*
1052 * Look ahead to see how many more buffers have been received 1003 * Look ahead to see how many more buffers have been received
@@ -1085,7 +1036,7 @@ static int __video1394_ioctl(struct file *file,
1085 } 1036 }
1086 1037
1087 if (d->flags & VIDEO1394_VARIABLE_PACKET_SIZE) { 1038 if (d->flags & VIDEO1394_VARIABLE_PACKET_SIZE) {
1088 int buf_size = d->nb_cmd * sizeof(unsigned int); 1039 int buf_size = d->nb_cmd * sizeof(*psizes);
1089 struct video1394_queue_variable __user *p = argp; 1040 struct video1394_queue_variable __user *p = argp;
1090 unsigned int __user *qv; 1041 unsigned int __user *qv;
1091 1042
@@ -1104,7 +1055,7 @@ static int __video1394_ioctl(struct file *file,
1104 1055
1105 spin_lock_irqsave(&d->lock,flags); 1056 spin_lock_irqsave(&d->lock,flags);
1106 1057
1107 // last_buffer is last_prg 1058 /* last_buffer is last_prg */
1108 next_prg = (d->last_buffer + 1) % d->num_desc; 1059 next_prg = (d->last_buffer + 1) % d->num_desc;
1109 if (d->buffer_status[v.buffer]!=VIDEO1394_BUFFER_FREE) { 1060 if (d->buffer_status[v.buffer]!=VIDEO1394_BUFFER_FREE) {
1110 PRINT(KERN_ERR, ohci->host->id, 1061 PRINT(KERN_ERR, ohci->host->id,
@@ -1251,13 +1202,12 @@ static int video1394_open(struct inode *inode, struct file *file)
1251 if (ohci == NULL) 1202 if (ohci == NULL)
1252 return -EIO; 1203 return -EIO;
1253 1204
1254 ctx = kmalloc(sizeof(struct file_ctx), GFP_KERNEL); 1205 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1255 if (ctx == NULL) { 1206 if (!ctx) {
1256 PRINT(KERN_ERR, ohci->host->id, "Cannot malloc file_ctx"); 1207 PRINT(KERN_ERR, ohci->host->id, "Cannot malloc file_ctx");
1257 return -ENOMEM; 1208 return -ENOMEM;
1258 } 1209 }
1259 1210
1260 memset(ctx, 0, sizeof(struct file_ctx));
1261 ctx->ohci = ohci; 1211 ctx->ohci = ohci;
1262 INIT_LIST_HEAD(&ctx->context_list); 1212 INIT_LIST_HEAD(&ctx->context_list);
1263 ctx->current_ctx = NULL; 1213 ctx->current_ctx = NULL;
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index 9f2352bd8348..a1e660e3531d 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -157,7 +157,7 @@ struct input_event_compat {
157# define COMPAT_TEST test_thread_flag(TIF_IA32) 157# define COMPAT_TEST test_thread_flag(TIF_IA32)
158#elif defined(CONFIG_IA64) 158#elif defined(CONFIG_IA64)
159# define COMPAT_TEST IS_IA32_PROCESS(ia64_task_regs(current)) 159# define COMPAT_TEST IS_IA32_PROCESS(ia64_task_regs(current))
160#elif defined(CONFIG_ARCH_S390) 160#elif defined(CONFIG_S390)
161# define COMPAT_TEST test_thread_flag(TIF_31BIT) 161# define COMPAT_TEST test_thread_flag(TIF_31BIT)
162#elif defined(CONFIG_MIPS) 162#elif defined(CONFIG_MIPS)
163# define COMPAT_TEST (current->thread.mflags & MF_32BIT_ADDR) 163# define COMPAT_TEST (current->thread.mflags & MF_32BIT_ADDR)
diff --git a/drivers/macintosh/therm_adt746x.c b/drivers/macintosh/therm_adt746x.c
index 02a3117ef92d..5ebfd1d138da 100644
--- a/drivers/macintosh/therm_adt746x.c
+++ b/drivers/macintosh/therm_adt746x.c
@@ -52,6 +52,7 @@ static char *sensor_location[3] = {NULL, NULL, NULL};
52 52
53static int limit_adjust = 0; 53static int limit_adjust = 0;
54static int fan_speed = -1; 54static int fan_speed = -1;
55static int verbose = 0;
55 56
56MODULE_AUTHOR("Colin Leroy <colin@colino.net>"); 57MODULE_AUTHOR("Colin Leroy <colin@colino.net>");
57MODULE_DESCRIPTION("Driver for ADT746x thermostat in iBook G4 and " 58MODULE_DESCRIPTION("Driver for ADT746x thermostat in iBook G4 and "
@@ -66,6 +67,10 @@ module_param(fan_speed, int, 0644);
66MODULE_PARM_DESC(fan_speed,"Specify starting fan speed (0-255) " 67MODULE_PARM_DESC(fan_speed,"Specify starting fan speed (0-255) "
67 "(default 64)"); 68 "(default 64)");
68 69
70module_param(verbose, bool, 0);
71MODULE_PARM_DESC(verbose,"Verbose log operations "
72 "(default 0)");
73
69struct thermostat { 74struct thermostat {
70 struct i2c_client clt; 75 struct i2c_client clt;
71 u8 temps[3]; 76 u8 temps[3];
@@ -149,13 +154,13 @@ detach_thermostat(struct i2c_adapter *adapter)
149 if (thread_therm != NULL) { 154 if (thread_therm != NULL) {
150 kthread_stop(thread_therm); 155 kthread_stop(thread_therm);
151 } 156 }
152 157
153 printk(KERN_INFO "adt746x: Putting max temperatures back from " 158 printk(KERN_INFO "adt746x: Putting max temperatures back from "
154 "%d, %d, %d to %d, %d, %d\n", 159 "%d, %d, %d to %d, %d, %d\n",
155 th->limits[0], th->limits[1], th->limits[2], 160 th->limits[0], th->limits[1], th->limits[2],
156 th->initial_limits[0], th->initial_limits[1], 161 th->initial_limits[0], th->initial_limits[1],
157 th->initial_limits[2]); 162 th->initial_limits[2]);
158 163
159 for (i = 0; i < 3; i++) 164 for (i = 0; i < 3; i++)
160 write_reg(th, LIMIT_REG[i], th->initial_limits[i]); 165 write_reg(th, LIMIT_REG[i], th->initial_limits[i]);
161 166
@@ -212,12 +217,14 @@ static void write_fan_speed(struct thermostat *th, int speed, int fan)
212 return; 217 return;
213 218
214 if (th->last_speed[fan] != speed) { 219 if (th->last_speed[fan] != speed) {
215 if (speed == -1) 220 if (verbose) {
216 printk(KERN_DEBUG "adt746x: Setting speed to automatic " 221 if (speed == -1)
217 "for %s fan.\n", sensor_location[fan+1]); 222 printk(KERN_DEBUG "adt746x: Setting speed to automatic "
218 else 223 "for %s fan.\n", sensor_location[fan+1]);
219 printk(KERN_DEBUG "adt746x: Setting speed to %d " 224 else
220 "for %s fan.\n", speed, sensor_location[fan+1]); 225 printk(KERN_DEBUG "adt746x: Setting speed to %d "
226 "for %s fan.\n", speed, sensor_location[fan+1]);
227 }
221 } else 228 } else
222 return; 229 return;
223 230
@@ -298,10 +305,11 @@ static void update_fans_speed (struct thermostat *th)
298 if (new_speed > 255) 305 if (new_speed > 255)
299 new_speed = 255; 306 new_speed = 255;
300 307
301 printk(KERN_DEBUG "adt746x: setting fans speed to %d " 308 if (verbose)
302 "(limit exceeded by %d on %s) \n", 309 printk(KERN_DEBUG "adt746x: Setting fans speed to %d "
303 new_speed, var, 310 "(limit exceeded by %d on %s) \n",
304 sensor_location[fan_number+1]); 311 new_speed, var,
312 sensor_location[fan_number+1]);
305 write_both_fan_speed(th, new_speed); 313 write_both_fan_speed(th, new_speed);
306 th->last_var[fan_number] = var; 314 th->last_var[fan_number] = var;
307 } else if (var < -2) { 315 } else if (var < -2) {
@@ -309,8 +317,9 @@ static void update_fans_speed (struct thermostat *th)
309 * so cold (lastvar >= -1) */ 317 * so cold (lastvar >= -1) */
310 if (i == 2 && lastvar < -1) { 318 if (i == 2 && lastvar < -1) {
311 if (th->last_speed[fan_number] != 0) 319 if (th->last_speed[fan_number] != 0)
312 printk(KERN_DEBUG "adt746x: Stopping " 320 if (verbose)
313 "fans.\n"); 321 printk(KERN_DEBUG "adt746x: Stopping "
322 "fans.\n");
314 write_both_fan_speed(th, 0); 323 write_both_fan_speed(th, 0);
315 } 324 }
316 } 325 }
@@ -406,7 +415,7 @@ static int attach_one_thermostat(struct i2c_adapter *adapter, int addr,
406 th->initial_limits[i] = read_reg(th, LIMIT_REG[i]); 415 th->initial_limits[i] = read_reg(th, LIMIT_REG[i]);
407 set_limit(th, i); 416 set_limit(th, i);
408 } 417 }
409 418
410 printk(KERN_INFO "adt746x: Lowering max temperatures from %d, %d, %d" 419 printk(KERN_INFO "adt746x: Lowering max temperatures from %d, %d, %d"
411 " to %d, %d, %d\n", 420 " to %d, %d, %d\n",
412 th->initial_limits[0], th->initial_limits[1], 421 th->initial_limits[0], th->initial_limits[1],
diff --git a/drivers/macintosh/therm_pm72.c b/drivers/macintosh/therm_pm72.c
index cf72b782f60f..8d0958c38b6b 100644
--- a/drivers/macintosh/therm_pm72.c
+++ b/drivers/macintosh/therm_pm72.c
@@ -1988,18 +1988,13 @@ static void fcu_lookup_fans(struct device_node *fcu_node)
1988 1988
1989static int fcu_of_probe(struct of_device* dev, const struct of_device_id *match) 1989static int fcu_of_probe(struct of_device* dev, const struct of_device_id *match)
1990{ 1990{
1991 int rc;
1992
1993 state = state_detached; 1991 state = state_detached;
1994 1992
1995 /* Lookup the fans in the device tree */ 1993 /* Lookup the fans in the device tree */
1996 fcu_lookup_fans(dev->node); 1994 fcu_lookup_fans(dev->node);
1997 1995
1998 /* Add the driver */ 1996 /* Add the driver */
1999 rc = i2c_add_driver(&therm_pm72_driver); 1997 return i2c_add_driver(&therm_pm72_driver);
2000 if (rc < 0)
2001 return rc;
2002 return 0;
2003} 1998}
2004 1999
2005static int fcu_of_remove(struct of_device* dev) 2000static int fcu_of_remove(struct of_device* dev)
diff --git a/drivers/macintosh/windfarm_lm75_sensor.c b/drivers/macintosh/windfarm_lm75_sensor.c
index fd16642d98ab..57460e46c89f 100644
--- a/drivers/macintosh/windfarm_lm75_sensor.c
+++ b/drivers/macintosh/windfarm_lm75_sensor.c
@@ -240,12 +240,7 @@ static int wf_lm75_detach(struct i2c_client *client)
240 240
241static int __init wf_lm75_sensor_init(void) 241static int __init wf_lm75_sensor_init(void)
242{ 242{
243 int rc; 243 return i2c_add_driver(&wf_lm75_driver);
244
245 rc = i2c_add_driver(&wf_lm75_driver);
246 if (rc < 0)
247 return rc;
248 return 0;
249} 244}
250 245
251static void __exit wf_lm75_sensor_exit(void) 246static void __exit wf_lm75_sensor_exit(void)
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 252d55df9642..76a189ceb529 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -315,6 +315,8 @@ static int write_page(struct bitmap *bitmap, struct page *page, int wait)
315 if (bitmap->file == NULL) 315 if (bitmap->file == NULL)
316 return write_sb_page(bitmap->mddev, bitmap->offset, page, wait); 316 return write_sb_page(bitmap->mddev, bitmap->offset, page, wait);
317 317
318 flush_dcache_page(page); /* make sure visible to anyone reading the file */
319
318 if (wait) 320 if (wait)
319 lock_page(page); 321 lock_page(page);
320 else { 322 else {
@@ -341,7 +343,7 @@ static int write_page(struct bitmap *bitmap, struct page *page, int wait)
341 /* add to list to be waited for by daemon */ 343 /* add to list to be waited for by daemon */
342 struct page_list *item = mempool_alloc(bitmap->write_pool, GFP_NOIO); 344 struct page_list *item = mempool_alloc(bitmap->write_pool, GFP_NOIO);
343 item->page = page; 345 item->page = page;
344 page_cache_get(page); 346 get_page(page);
345 spin_lock(&bitmap->write_lock); 347 spin_lock(&bitmap->write_lock);
346 list_add(&item->list, &bitmap->complete_pages); 348 list_add(&item->list, &bitmap->complete_pages);
347 spin_unlock(&bitmap->write_lock); 349 spin_unlock(&bitmap->write_lock);
@@ -357,10 +359,10 @@ static struct page *read_page(struct file *file, unsigned long index,
357 struct inode *inode = file->f_mapping->host; 359 struct inode *inode = file->f_mapping->host;
358 struct page *page = NULL; 360 struct page *page = NULL;
359 loff_t isize = i_size_read(inode); 361 loff_t isize = i_size_read(inode);
360 unsigned long end_index = isize >> PAGE_CACHE_SHIFT; 362 unsigned long end_index = isize >> PAGE_SHIFT;
361 363
362 PRINTK("read bitmap file (%dB @ %Lu)\n", (int)PAGE_CACHE_SIZE, 364 PRINTK("read bitmap file (%dB @ %Lu)\n", (int)PAGE_SIZE,
363 (unsigned long long)index << PAGE_CACHE_SHIFT); 365 (unsigned long long)index << PAGE_SHIFT);
364 366
365 page = read_cache_page(inode->i_mapping, index, 367 page = read_cache_page(inode->i_mapping, index,
366 (filler_t *)inode->i_mapping->a_ops->readpage, file); 368 (filler_t *)inode->i_mapping->a_ops->readpage, file);
@@ -368,7 +370,7 @@ static struct page *read_page(struct file *file, unsigned long index,
368 goto out; 370 goto out;
369 wait_on_page_locked(page); 371 wait_on_page_locked(page);
370 if (!PageUptodate(page) || PageError(page)) { 372 if (!PageUptodate(page) || PageError(page)) {
371 page_cache_release(page); 373 put_page(page);
372 page = ERR_PTR(-EIO); 374 page = ERR_PTR(-EIO);
373 goto out; 375 goto out;
374 } 376 }
@@ -376,14 +378,14 @@ static struct page *read_page(struct file *file, unsigned long index,
376 if (index > end_index) /* we have read beyond EOF */ 378 if (index > end_index) /* we have read beyond EOF */
377 *bytes_read = 0; 379 *bytes_read = 0;
378 else if (index == end_index) /* possible short read */ 380 else if (index == end_index) /* possible short read */
379 *bytes_read = isize & ~PAGE_CACHE_MASK; 381 *bytes_read = isize & ~PAGE_MASK;
380 else 382 else
381 *bytes_read = PAGE_CACHE_SIZE; /* got a full page */ 383 *bytes_read = PAGE_SIZE; /* got a full page */
382out: 384out:
383 if (IS_ERR(page)) 385 if (IS_ERR(page))
384 printk(KERN_ALERT "md: bitmap read error: (%dB @ %Lu): %ld\n", 386 printk(KERN_ALERT "md: bitmap read error: (%dB @ %Lu): %ld\n",
385 (int)PAGE_CACHE_SIZE, 387 (int)PAGE_SIZE,
386 (unsigned long long)index << PAGE_CACHE_SHIFT, 388 (unsigned long long)index << PAGE_SHIFT,
387 PTR_ERR(page)); 389 PTR_ERR(page));
388 return page; 390 return page;
389} 391}
@@ -406,11 +408,11 @@ int bitmap_update_sb(struct bitmap *bitmap)
406 return 0; 408 return 0;
407 } 409 }
408 spin_unlock_irqrestore(&bitmap->lock, flags); 410 spin_unlock_irqrestore(&bitmap->lock, flags);
409 sb = (bitmap_super_t *)kmap(bitmap->sb_page); 411 sb = (bitmap_super_t *)kmap_atomic(bitmap->sb_page, KM_USER0);
410 sb->events = cpu_to_le64(bitmap->mddev->events); 412 sb->events = cpu_to_le64(bitmap->mddev->events);
411 if (!bitmap->mddev->degraded) 413 if (!bitmap->mddev->degraded)
412 sb->events_cleared = cpu_to_le64(bitmap->mddev->events); 414 sb->events_cleared = cpu_to_le64(bitmap->mddev->events);
413 kunmap(bitmap->sb_page); 415 kunmap_atomic(sb, KM_USER0);
414 return write_page(bitmap, bitmap->sb_page, 1); 416 return write_page(bitmap, bitmap->sb_page, 1);
415} 417}
416 418
@@ -421,7 +423,7 @@ void bitmap_print_sb(struct bitmap *bitmap)
421 423
422 if (!bitmap || !bitmap->sb_page) 424 if (!bitmap || !bitmap->sb_page)
423 return; 425 return;
424 sb = (bitmap_super_t *)kmap(bitmap->sb_page); 426 sb = (bitmap_super_t *)kmap_atomic(bitmap->sb_page, KM_USER0);
425 printk(KERN_DEBUG "%s: bitmap file superblock:\n", bmname(bitmap)); 427 printk(KERN_DEBUG "%s: bitmap file superblock:\n", bmname(bitmap));
426 printk(KERN_DEBUG " magic: %08x\n", le32_to_cpu(sb->magic)); 428 printk(KERN_DEBUG " magic: %08x\n", le32_to_cpu(sb->magic));
427 printk(KERN_DEBUG " version: %d\n", le32_to_cpu(sb->version)); 429 printk(KERN_DEBUG " version: %d\n", le32_to_cpu(sb->version));
@@ -440,7 +442,7 @@ void bitmap_print_sb(struct bitmap *bitmap)
440 printk(KERN_DEBUG " sync size: %llu KB\n", 442 printk(KERN_DEBUG " sync size: %llu KB\n",
441 (unsigned long long)le64_to_cpu(sb->sync_size)/2); 443 (unsigned long long)le64_to_cpu(sb->sync_size)/2);
442 printk(KERN_DEBUG "max write behind: %d\n", le32_to_cpu(sb->write_behind)); 444 printk(KERN_DEBUG "max write behind: %d\n", le32_to_cpu(sb->write_behind));
443 kunmap(bitmap->sb_page); 445 kunmap_atomic(sb, KM_USER0);
444} 446}
445 447
446/* read the superblock from the bitmap file and initialize some bitmap fields */ 448/* read the superblock from the bitmap file and initialize some bitmap fields */
@@ -466,7 +468,7 @@ static int bitmap_read_sb(struct bitmap *bitmap)
466 return err; 468 return err;
467 } 469 }
468 470
469 sb = (bitmap_super_t *)kmap(bitmap->sb_page); 471 sb = (bitmap_super_t *)kmap_atomic(bitmap->sb_page, KM_USER0);
470 472
471 if (bytes_read < sizeof(*sb)) { /* short read */ 473 if (bytes_read < sizeof(*sb)) { /* short read */
472 printk(KERN_INFO "%s: bitmap file superblock truncated\n", 474 printk(KERN_INFO "%s: bitmap file superblock truncated\n",
@@ -485,12 +487,12 @@ static int bitmap_read_sb(struct bitmap *bitmap)
485 else if (le32_to_cpu(sb->version) < BITMAP_MAJOR_LO || 487 else if (le32_to_cpu(sb->version) < BITMAP_MAJOR_LO ||
486 le32_to_cpu(sb->version) > BITMAP_MAJOR_HI) 488 le32_to_cpu(sb->version) > BITMAP_MAJOR_HI)
487 reason = "unrecognized superblock version"; 489 reason = "unrecognized superblock version";
488 else if (chunksize < 512 || chunksize > (1024 * 1024 * 4)) 490 else if (chunksize < PAGE_SIZE)
489 reason = "bitmap chunksize out of range (512B - 4MB)"; 491 reason = "bitmap chunksize too small";
490 else if ((1 << ffz(~chunksize)) != chunksize) 492 else if ((1 << ffz(~chunksize)) != chunksize)
491 reason = "bitmap chunksize not a power of 2"; 493 reason = "bitmap chunksize not a power of 2";
492 else if (daemon_sleep < 1 || daemon_sleep > 15) 494 else if (daemon_sleep < 1 || daemon_sleep > MAX_SCHEDULE_TIMEOUT / HZ)
493 reason = "daemon sleep period out of range (1-15s)"; 495 reason = "daemon sleep period out of range";
494 else if (write_behind > COUNTER_MAX) 496 else if (write_behind > COUNTER_MAX)
495 reason = "write-behind limit out of range (0 - 16383)"; 497 reason = "write-behind limit out of range (0 - 16383)";
496 if (reason) { 498 if (reason) {
@@ -535,7 +537,7 @@ success:
535 bitmap->events_cleared = bitmap->mddev->events; 537 bitmap->events_cleared = bitmap->mddev->events;
536 err = 0; 538 err = 0;
537out: 539out:
538 kunmap(bitmap->sb_page); 540 kunmap_atomic(sb, KM_USER0);
539 if (err) 541 if (err)
540 bitmap_print_sb(bitmap); 542 bitmap_print_sb(bitmap);
541 return err; 543 return err;
@@ -558,9 +560,9 @@ static void bitmap_mask_state(struct bitmap *bitmap, enum bitmap_state bits,
558 spin_unlock_irqrestore(&bitmap->lock, flags); 560 spin_unlock_irqrestore(&bitmap->lock, flags);
559 return; 561 return;
560 } 562 }
561 page_cache_get(bitmap->sb_page); 563 get_page(bitmap->sb_page);
562 spin_unlock_irqrestore(&bitmap->lock, flags); 564 spin_unlock_irqrestore(&bitmap->lock, flags);
563 sb = (bitmap_super_t *)kmap(bitmap->sb_page); 565 sb = (bitmap_super_t *)kmap_atomic(bitmap->sb_page, KM_USER0);
564 switch (op) { 566 switch (op) {
565 case MASK_SET: sb->state |= bits; 567 case MASK_SET: sb->state |= bits;
566 break; 568 break;
@@ -568,8 +570,8 @@ static void bitmap_mask_state(struct bitmap *bitmap, enum bitmap_state bits,
568 break; 570 break;
569 default: BUG(); 571 default: BUG();
570 } 572 }
571 kunmap(bitmap->sb_page); 573 kunmap_atomic(sb, KM_USER0);
572 page_cache_release(bitmap->sb_page); 574 put_page(bitmap->sb_page);
573} 575}
574 576
575/* 577/*
@@ -622,12 +624,11 @@ static void bitmap_file_unmap(struct bitmap *bitmap)
622 624
623 while (pages--) 625 while (pages--)
624 if (map[pages]->index != 0) /* 0 is sb_page, release it below */ 626 if (map[pages]->index != 0) /* 0 is sb_page, release it below */
625 page_cache_release(map[pages]); 627 put_page(map[pages]);
626 kfree(map); 628 kfree(map);
627 kfree(attr); 629 kfree(attr);
628 630
629 if (sb_page) 631 safe_put_page(sb_page);
630 page_cache_release(sb_page);
631} 632}
632 633
633static void bitmap_stop_daemon(struct bitmap *bitmap); 634static void bitmap_stop_daemon(struct bitmap *bitmap);
@@ -654,7 +655,7 @@ static void drain_write_queues(struct bitmap *bitmap)
654 655
655 while ((item = dequeue_page(bitmap))) { 656 while ((item = dequeue_page(bitmap))) {
656 /* don't bother to wait */ 657 /* don't bother to wait */
657 page_cache_release(item->page); 658 put_page(item->page);
658 mempool_free(item, bitmap->write_pool); 659 mempool_free(item, bitmap->write_pool);
659 } 660 }
660 661
@@ -763,7 +764,7 @@ static void bitmap_file_set_bit(struct bitmap *bitmap, sector_t block)
763 764
764 /* make sure the page stays cached until it gets written out */ 765 /* make sure the page stays cached until it gets written out */
765 if (! (get_page_attr(bitmap, page) & BITMAP_PAGE_DIRTY)) 766 if (! (get_page_attr(bitmap, page) & BITMAP_PAGE_DIRTY))
766 page_cache_get(page); 767 get_page(page);
767 768
768 /* set the bit */ 769 /* set the bit */
769 kaddr = kmap_atomic(page, KM_USER0); 770 kaddr = kmap_atomic(page, KM_USER0);
@@ -854,6 +855,7 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
854 unsigned long bytes, offset, dummy; 855 unsigned long bytes, offset, dummy;
855 int outofdate; 856 int outofdate;
856 int ret = -ENOSPC; 857 int ret = -ENOSPC;
858 void *paddr;
857 859
858 chunks = bitmap->chunks; 860 chunks = bitmap->chunks;
859 file = bitmap->file; 861 file = bitmap->file;
@@ -887,12 +889,10 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
887 if (!bitmap->filemap) 889 if (!bitmap->filemap)
888 goto out; 890 goto out;
889 891
890 bitmap->filemap_attr = kmalloc(sizeof(long) * num_pages, GFP_KERNEL); 892 bitmap->filemap_attr = kzalloc(sizeof(long) * num_pages, GFP_KERNEL);
891 if (!bitmap->filemap_attr) 893 if (!bitmap->filemap_attr)
892 goto out; 894 goto out;
893 895
894 memset(bitmap->filemap_attr, 0, sizeof(long) * num_pages);
895
896 oldindex = ~0L; 896 oldindex = ~0L;
897 897
898 for (i = 0; i < chunks; i++) { 898 for (i = 0; i < chunks; i++) {
@@ -901,8 +901,6 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
901 bit = file_page_offset(i); 901 bit = file_page_offset(i);
902 if (index != oldindex) { /* this is a new page, read it in */ 902 if (index != oldindex) { /* this is a new page, read it in */
903 /* unmap the old page, we're done with it */ 903 /* unmap the old page, we're done with it */
904 if (oldpage != NULL)
905 kunmap(oldpage);
906 if (index == 0) { 904 if (index == 0) {
907 /* 905 /*
908 * if we're here then the superblock page 906 * if we're here then the superblock page
@@ -925,30 +923,32 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
925 923
926 oldindex = index; 924 oldindex = index;
927 oldpage = page; 925 oldpage = page;
928 kmap(page);
929 926
930 if (outofdate) { 927 if (outofdate) {
931 /* 928 /*
932 * if bitmap is out of date, dirty the 929 * if bitmap is out of date, dirty the
933 * whole page and write it out 930 * whole page and write it out
934 */ 931 */
935 memset(page_address(page) + offset, 0xff, 932 paddr = kmap_atomic(page, KM_USER0);
933 memset(paddr + offset, 0xff,
936 PAGE_SIZE - offset); 934 PAGE_SIZE - offset);
935 kunmap_atomic(paddr, KM_USER0);
937 ret = write_page(bitmap, page, 1); 936 ret = write_page(bitmap, page, 1);
938 if (ret) { 937 if (ret) {
939 kunmap(page);
940 /* release, page not in filemap yet */ 938 /* release, page not in filemap yet */
941 page_cache_release(page); 939 put_page(page);
942 goto out; 940 goto out;
943 } 941 }
944 } 942 }
945 943
946 bitmap->filemap[bitmap->file_pages++] = page; 944 bitmap->filemap[bitmap->file_pages++] = page;
947 } 945 }
946 paddr = kmap_atomic(page, KM_USER0);
948 if (bitmap->flags & BITMAP_HOSTENDIAN) 947 if (bitmap->flags & BITMAP_HOSTENDIAN)
949 b = test_bit(bit, page_address(page)); 948 b = test_bit(bit, paddr);
950 else 949 else
951 b = ext2_test_bit(bit, page_address(page)); 950 b = ext2_test_bit(bit, paddr);
951 kunmap_atomic(paddr, KM_USER0);
952 if (b) { 952 if (b) {
953 /* if the disk bit is set, set the memory bit */ 953 /* if the disk bit is set, set the memory bit */
954 bitmap_set_memory_bits(bitmap, i << CHUNK_BLOCK_SHIFT(bitmap), 954 bitmap_set_memory_bits(bitmap, i << CHUNK_BLOCK_SHIFT(bitmap),
@@ -963,9 +963,6 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
963 ret = 0; 963 ret = 0;
964 bitmap_mask_state(bitmap, BITMAP_STALE, MASK_UNSET); 964 bitmap_mask_state(bitmap, BITMAP_STALE, MASK_UNSET);
965 965
966 if (page) /* unmap the last page */
967 kunmap(page);
968
969 if (bit_cnt) { /* Kick recovery if any bits were set */ 966 if (bit_cnt) { /* Kick recovery if any bits were set */
970 set_bit(MD_RECOVERY_NEEDED, &bitmap->mddev->recovery); 967 set_bit(MD_RECOVERY_NEEDED, &bitmap->mddev->recovery);
971 md_wakeup_thread(bitmap->mddev->thread); 968 md_wakeup_thread(bitmap->mddev->thread);
@@ -1021,6 +1018,7 @@ int bitmap_daemon_work(struct bitmap *bitmap)
1021 int err = 0; 1018 int err = 0;
1022 int blocks; 1019 int blocks;
1023 int attr; 1020 int attr;
1021 void *paddr;
1024 1022
1025 if (bitmap == NULL) 1023 if (bitmap == NULL)
1026 return 0; 1024 return 0;
@@ -1043,7 +1041,7 @@ int bitmap_daemon_work(struct bitmap *bitmap)
1043 /* skip this page unless it's marked as needing cleaning */ 1041 /* skip this page unless it's marked as needing cleaning */
1044 if (!((attr=get_page_attr(bitmap, page)) & BITMAP_PAGE_CLEAN)) { 1042 if (!((attr=get_page_attr(bitmap, page)) & BITMAP_PAGE_CLEAN)) {
1045 if (attr & BITMAP_PAGE_NEEDWRITE) { 1043 if (attr & BITMAP_PAGE_NEEDWRITE) {
1046 page_cache_get(page); 1044 get_page(page);
1047 clear_page_attr(bitmap, page, BITMAP_PAGE_NEEDWRITE); 1045 clear_page_attr(bitmap, page, BITMAP_PAGE_NEEDWRITE);
1048 } 1046 }
1049 spin_unlock_irqrestore(&bitmap->lock, flags); 1047 spin_unlock_irqrestore(&bitmap->lock, flags);
@@ -1057,13 +1055,13 @@ int bitmap_daemon_work(struct bitmap *bitmap)
1057 default: 1055 default:
1058 bitmap_file_kick(bitmap); 1056 bitmap_file_kick(bitmap);
1059 } 1057 }
1060 page_cache_release(page); 1058 put_page(page);
1061 } 1059 }
1062 continue; 1060 continue;
1063 } 1061 }
1064 1062
1065 /* grab the new page, sync and release the old */ 1063 /* grab the new page, sync and release the old */
1066 page_cache_get(page); 1064 get_page(page);
1067 if (lastpage != NULL) { 1065 if (lastpage != NULL) {
1068 if (get_page_attr(bitmap, lastpage) & BITMAP_PAGE_NEEDWRITE) { 1066 if (get_page_attr(bitmap, lastpage) & BITMAP_PAGE_NEEDWRITE) {
1069 clear_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE); 1067 clear_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE);
@@ -1077,14 +1075,12 @@ int bitmap_daemon_work(struct bitmap *bitmap)
1077 set_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE); 1075 set_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE);
1078 spin_unlock_irqrestore(&bitmap->lock, flags); 1076 spin_unlock_irqrestore(&bitmap->lock, flags);
1079 } 1077 }
1080 kunmap(lastpage); 1078 put_page(lastpage);
1081 page_cache_release(lastpage);
1082 if (err) 1079 if (err)
1083 bitmap_file_kick(bitmap); 1080 bitmap_file_kick(bitmap);
1084 } else 1081 } else
1085 spin_unlock_irqrestore(&bitmap->lock, flags); 1082 spin_unlock_irqrestore(&bitmap->lock, flags);
1086 lastpage = page; 1083 lastpage = page;
1087 kmap(page);
1088/* 1084/*
1089 printk("bitmap clean at page %lu\n", j); 1085 printk("bitmap clean at page %lu\n", j);
1090*/ 1086*/
@@ -1107,10 +1103,12 @@ int bitmap_daemon_work(struct bitmap *bitmap)
1107 -1); 1103 -1);
1108 1104
1109 /* clear the bit */ 1105 /* clear the bit */
1106 paddr = kmap_atomic(page, KM_USER0);
1110 if (bitmap->flags & BITMAP_HOSTENDIAN) 1107 if (bitmap->flags & BITMAP_HOSTENDIAN)
1111 clear_bit(file_page_offset(j), page_address(page)); 1108 clear_bit(file_page_offset(j), paddr);
1112 else 1109 else
1113 ext2_clear_bit(file_page_offset(j), page_address(page)); 1110 ext2_clear_bit(file_page_offset(j), paddr);
1111 kunmap_atomic(paddr, KM_USER0);
1114 } 1112 }
1115 } 1113 }
1116 spin_unlock_irqrestore(&bitmap->lock, flags); 1114 spin_unlock_irqrestore(&bitmap->lock, flags);
@@ -1118,7 +1116,6 @@ int bitmap_daemon_work(struct bitmap *bitmap)
1118 1116
1119 /* now sync the final page */ 1117 /* now sync the final page */
1120 if (lastpage != NULL) { 1118 if (lastpage != NULL) {
1121 kunmap(lastpage);
1122 spin_lock_irqsave(&bitmap->lock, flags); 1119 spin_lock_irqsave(&bitmap->lock, flags);
1123 if (get_page_attr(bitmap, lastpage) &BITMAP_PAGE_NEEDWRITE) { 1120 if (get_page_attr(bitmap, lastpage) &BITMAP_PAGE_NEEDWRITE) {
1124 clear_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE); 1121 clear_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE);
@@ -1133,7 +1130,7 @@ int bitmap_daemon_work(struct bitmap *bitmap)
1133 spin_unlock_irqrestore(&bitmap->lock, flags); 1130 spin_unlock_irqrestore(&bitmap->lock, flags);
1134 } 1131 }
1135 1132
1136 page_cache_release(lastpage); 1133 put_page(lastpage);
1137 } 1134 }
1138 1135
1139 return err; 1136 return err;
@@ -1184,7 +1181,7 @@ static void bitmap_writeback_daemon(mddev_t *mddev)
1184 PRINTK("finished page writeback: %p\n", page); 1181 PRINTK("finished page writeback: %p\n", page);
1185 1182
1186 err = PageError(page); 1183 err = PageError(page);
1187 page_cache_release(page); 1184 put_page(page);
1188 if (err) { 1185 if (err) {
1189 printk(KERN_WARNING "%s: bitmap file writeback " 1186 printk(KERN_WARNING "%s: bitmap file writeback "
1190 "failed (page %lu): %d\n", 1187 "failed (page %lu): %d\n",
@@ -1530,6 +1527,8 @@ void bitmap_destroy(mddev_t *mddev)
1530 return; 1527 return;
1531 1528
1532 mddev->bitmap = NULL; /* disconnect from the md device */ 1529 mddev->bitmap = NULL; /* disconnect from the md device */
1530 if (mddev->thread)
1531 mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT;
1533 1532
1534 bitmap_free(bitmap); 1533 bitmap_free(bitmap);
1535} 1534}
@@ -1555,12 +1554,10 @@ int bitmap_create(mddev_t *mddev)
1555 1554
1556 BUG_ON(file && mddev->bitmap_offset); 1555 BUG_ON(file && mddev->bitmap_offset);
1557 1556
1558 bitmap = kmalloc(sizeof(*bitmap), GFP_KERNEL); 1557 bitmap = kzalloc(sizeof(*bitmap), GFP_KERNEL);
1559 if (!bitmap) 1558 if (!bitmap)
1560 return -ENOMEM; 1559 return -ENOMEM;
1561 1560
1562 memset(bitmap, 0, sizeof(*bitmap));
1563
1564 spin_lock_init(&bitmap->lock); 1561 spin_lock_init(&bitmap->lock);
1565 bitmap->mddev = mddev; 1562 bitmap->mddev = mddev;
1566 1563
@@ -1601,12 +1598,11 @@ int bitmap_create(mddev_t *mddev)
1601#ifdef INJECT_FATAL_FAULT_1 1598#ifdef INJECT_FATAL_FAULT_1
1602 bitmap->bp = NULL; 1599 bitmap->bp = NULL;
1603#else 1600#else
1604 bitmap->bp = kmalloc(pages * sizeof(*bitmap->bp), GFP_KERNEL); 1601 bitmap->bp = kzalloc(pages * sizeof(*bitmap->bp), GFP_KERNEL);
1605#endif 1602#endif
1606 err = -ENOMEM; 1603 err = -ENOMEM;
1607 if (!bitmap->bp) 1604 if (!bitmap->bp)
1608 goto error; 1605 goto error;
1609 memset(bitmap->bp, 0, pages * sizeof(*bitmap->bp));
1610 1606
1611 bitmap->flags |= BITMAP_ACTIVE; 1607 bitmap->flags |= BITMAP_ACTIVE;
1612 1608
@@ -1636,6 +1632,8 @@ int bitmap_create(mddev_t *mddev)
1636 1632
1637 if (IS_ERR(bitmap->writeback_daemon)) 1633 if (IS_ERR(bitmap->writeback_daemon))
1638 return PTR_ERR(bitmap->writeback_daemon); 1634 return PTR_ERR(bitmap->writeback_daemon);
1635 mddev->thread->timeout = bitmap->daemon_sleep * HZ;
1636
1639 return bitmap_update_sb(bitmap); 1637 return bitmap_update_sb(bitmap);
1640 1638
1641 error: 1639 error:
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index cf6631056683..a601a427885c 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -690,6 +690,8 @@ bad3:
690bad2: 690bad2:
691 crypto_free_tfm(tfm); 691 crypto_free_tfm(tfm);
692bad1: 692bad1:
693 /* Must zero key material before freeing */
694 memset(cc, 0, sizeof(*cc) + cc->key_size * sizeof(u8));
693 kfree(cc); 695 kfree(cc);
694 return -EINVAL; 696 return -EINVAL;
695} 697}
@@ -706,6 +708,9 @@ static void crypt_dtr(struct dm_target *ti)
706 cc->iv_gen_ops->dtr(cc); 708 cc->iv_gen_ops->dtr(cc);
707 crypto_free_tfm(cc->tfm); 709 crypto_free_tfm(cc->tfm);
708 dm_put_device(ti, cc->dev); 710 dm_put_device(ti, cc->dev);
711
712 /* Must zero key material before freeing */
713 memset(cc, 0, sizeof(*cc) + cc->key_size * sizeof(u8));
709 kfree(cc); 714 kfree(cc);
710} 715}
711 716
diff --git a/drivers/md/dm-io.h b/drivers/md/dm-io.h
index 1a77f3265706..f9035bfd1a9f 100644
--- a/drivers/md/dm-io.h
+++ b/drivers/md/dm-io.h
@@ -9,9 +9,6 @@
9 9
10#include "dm.h" 10#include "dm.h"
11 11
12/* FIXME make this configurable */
13#define DM_MAX_IO_REGIONS 8
14
15struct io_region { 12struct io_region {
16 struct block_device *bdev; 13 struct block_device *bdev;
17 sector_t sector; 14 sector_t sector;
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index 07d44e19536e..561bda5011e0 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -270,6 +270,7 @@ static int dm_hash_rename(const char *old, const char *new)
270{ 270{
271 char *new_name, *old_name; 271 char *new_name, *old_name;
272 struct hash_cell *hc; 272 struct hash_cell *hc;
273 struct dm_table *table;
273 274
274 /* 275 /*
275 * duplicate new. 276 * duplicate new.
@@ -317,6 +318,15 @@ static int dm_hash_rename(const char *old, const char *new)
317 /* rename the device node in devfs */ 318 /* rename the device node in devfs */
318 register_with_devfs(hc); 319 register_with_devfs(hc);
319 320
321 /*
322 * Wake up any dm event waiters.
323 */
324 table = dm_get_table(hc->md);
325 if (table) {
326 dm_table_event(table);
327 dm_table_put(table);
328 }
329
320 up_write(&_hash_lock); 330 up_write(&_hash_lock);
321 kfree(old_name); 331 kfree(old_name);
322 return 0; 332 return 0;
@@ -683,14 +693,18 @@ static int dev_rename(struct dm_ioctl *param, size_t param_size)
683static int do_suspend(struct dm_ioctl *param) 693static int do_suspend(struct dm_ioctl *param)
684{ 694{
685 int r = 0; 695 int r = 0;
696 int do_lockfs = 1;
686 struct mapped_device *md; 697 struct mapped_device *md;
687 698
688 md = find_device(param); 699 md = find_device(param);
689 if (!md) 700 if (!md)
690 return -ENXIO; 701 return -ENXIO;
691 702
703 if (param->flags & DM_SKIP_LOCKFS_FLAG)
704 do_lockfs = 0;
705
692 if (!dm_suspended(md)) 706 if (!dm_suspended(md))
693 r = dm_suspend(md); 707 r = dm_suspend(md, do_lockfs);
694 708
695 if (!r) 709 if (!r)
696 r = __dev_status(md, param); 710 r = __dev_status(md, param);
@@ -702,6 +716,7 @@ static int do_suspend(struct dm_ioctl *param)
702static int do_resume(struct dm_ioctl *param) 716static int do_resume(struct dm_ioctl *param)
703{ 717{
704 int r = 0; 718 int r = 0;
719 int do_lockfs = 1;
705 struct hash_cell *hc; 720 struct hash_cell *hc;
706 struct mapped_device *md; 721 struct mapped_device *md;
707 struct dm_table *new_map; 722 struct dm_table *new_map;
@@ -727,8 +742,10 @@ static int do_resume(struct dm_ioctl *param)
727 /* Do we need to load a new map ? */ 742 /* Do we need to load a new map ? */
728 if (new_map) { 743 if (new_map) {
729 /* Suspend if it isn't already suspended */ 744 /* Suspend if it isn't already suspended */
745 if (param->flags & DM_SKIP_LOCKFS_FLAG)
746 do_lockfs = 0;
730 if (!dm_suspended(md)) 747 if (!dm_suspended(md))
731 dm_suspend(md); 748 dm_suspend(md, do_lockfs);
732 749
733 r = dm_swap_table(md, new_map); 750 r = dm_swap_table(md, new_map);
734 if (r) { 751 if (r) {
diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c
index a76349cb10a5..efe4adf78530 100644
--- a/drivers/md/dm-log.c
+++ b/drivers/md/dm-log.c
@@ -573,7 +573,7 @@ static int core_get_resync_work(struct dirty_log *log, region_t *region)
573 lc->sync_search); 573 lc->sync_search);
574 lc->sync_search = *region + 1; 574 lc->sync_search = *region + 1;
575 575
576 if (*region == lc->region_count) 576 if (*region >= lc->region_count)
577 return 0; 577 return 0;
578 578
579 } while (log_test_bit(lc->recovering_bits, *region)); 579 } while (log_test_bit(lc->recovering_bits, *region));
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index 6b0fc1670929..6cfa8d435d55 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -562,6 +562,8 @@ struct mirror_set {
562 region_t nr_regions; 562 region_t nr_regions;
563 int in_sync; 563 int in_sync;
564 564
565 struct mirror *default_mirror; /* Default mirror */
566
565 unsigned int nr_mirrors; 567 unsigned int nr_mirrors;
566 struct mirror mirror[0]; 568 struct mirror mirror[0];
567}; 569};
@@ -611,7 +613,7 @@ static int recover(struct mirror_set *ms, struct region *reg)
611 unsigned long flags = 0; 613 unsigned long flags = 0;
612 614
613 /* fill in the source */ 615 /* fill in the source */
614 m = ms->mirror + DEFAULT_MIRROR; 616 m = ms->default_mirror;
615 from.bdev = m->dev->bdev; 617 from.bdev = m->dev->bdev;
616 from.sector = m->offset + region_to_sector(reg->rh, reg->key); 618 from.sector = m->offset + region_to_sector(reg->rh, reg->key);
617 if (reg->key == (ms->nr_regions - 1)) { 619 if (reg->key == (ms->nr_regions - 1)) {
@@ -627,7 +629,7 @@ static int recover(struct mirror_set *ms, struct region *reg)
627 629
628 /* fill in the destinations */ 630 /* fill in the destinations */
629 for (i = 0, dest = to; i < ms->nr_mirrors; i++) { 631 for (i = 0, dest = to; i < ms->nr_mirrors; i++) {
630 if (i == DEFAULT_MIRROR) 632 if (&ms->mirror[i] == ms->default_mirror)
631 continue; 633 continue;
632 634
633 m = ms->mirror + i; 635 m = ms->mirror + i;
@@ -682,7 +684,7 @@ static void do_recovery(struct mirror_set *ms)
682static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector) 684static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
683{ 685{
684 /* FIXME: add read balancing */ 686 /* FIXME: add read balancing */
685 return ms->mirror + DEFAULT_MIRROR; 687 return ms->default_mirror;
686} 688}
687 689
688/* 690/*
@@ -709,7 +711,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
709 if (rh_in_sync(&ms->rh, region, 0)) 711 if (rh_in_sync(&ms->rh, region, 0))
710 m = choose_mirror(ms, bio->bi_sector); 712 m = choose_mirror(ms, bio->bi_sector);
711 else 713 else
712 m = ms->mirror + DEFAULT_MIRROR; 714 m = ms->default_mirror;
713 715
714 map_bio(ms, m, bio); 716 map_bio(ms, m, bio);
715 generic_make_request(bio); 717 generic_make_request(bio);
@@ -833,7 +835,7 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes)
833 rh_delay(&ms->rh, bio); 835 rh_delay(&ms->rh, bio);
834 836
835 while ((bio = bio_list_pop(&nosync))) { 837 while ((bio = bio_list_pop(&nosync))) {
836 map_bio(ms, ms->mirror + DEFAULT_MIRROR, bio); 838 map_bio(ms, ms->default_mirror, bio);
837 generic_make_request(bio); 839 generic_make_request(bio);
838 } 840 }
839} 841}
@@ -900,6 +902,7 @@ static struct mirror_set *alloc_context(unsigned int nr_mirrors,
900 ms->nr_mirrors = nr_mirrors; 902 ms->nr_mirrors = nr_mirrors;
901 ms->nr_regions = dm_sector_div_up(ti->len, region_size); 903 ms->nr_regions = dm_sector_div_up(ti->len, region_size);
902 ms->in_sync = 0; 904 ms->in_sync = 0;
905 ms->default_mirror = &ms->mirror[DEFAULT_MIRROR];
903 906
904 if (rh_init(&ms->rh, ms, dl, region_size, ms->nr_regions)) { 907 if (rh_init(&ms->rh, ms, dl, region_size, ms->nr_regions)) {
905 ti->error = "dm-mirror: Error creating dirty region hash"; 908 ti->error = "dm-mirror: Error creating dirty region hash";
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index ab54f99b7c3b..4b9dd8fb1e5c 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -371,6 +371,20 @@ static inline ulong round_up(ulong n, ulong size)
371 return (n + size) & ~size; 371 return (n + size) & ~size;
372} 372}
373 373
374static void read_snapshot_metadata(struct dm_snapshot *s)
375{
376 if (s->have_metadata)
377 return;
378
379 if (s->store.read_metadata(&s->store)) {
380 down_write(&s->lock);
381 s->valid = 0;
382 up_write(&s->lock);
383 }
384
385 s->have_metadata = 1;
386}
387
374/* 388/*
375 * Construct a snapshot mapping: <origin_dev> <COW-dev> <p/n> <chunk-size> 389 * Construct a snapshot mapping: <origin_dev> <COW-dev> <p/n> <chunk-size>
376 */ 390 */
@@ -848,16 +862,7 @@ static void snapshot_resume(struct dm_target *ti)
848{ 862{
849 struct dm_snapshot *s = (struct dm_snapshot *) ti->private; 863 struct dm_snapshot *s = (struct dm_snapshot *) ti->private;
850 864
851 if (s->have_metadata) 865 read_snapshot_metadata(s);
852 return;
853
854 if (s->store.read_metadata(&s->store)) {
855 down_write(&s->lock);
856 s->valid = 0;
857 up_write(&s->lock);
858 }
859
860 s->have_metadata = 1;
861} 866}
862 867
863static int snapshot_status(struct dm_target *ti, status_type_t type, 868static int snapshot_status(struct dm_target *ti, status_type_t type,
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 930b9fc27953..0e481512f918 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -55,6 +55,7 @@ union map_info *dm_get_mapinfo(struct bio *bio)
55 */ 55 */
56#define DMF_BLOCK_IO 0 56#define DMF_BLOCK_IO 0
57#define DMF_SUSPENDED 1 57#define DMF_SUSPENDED 1
58#define DMF_FROZEN 2
58 59
59struct mapped_device { 60struct mapped_device {
60 struct rw_semaphore io_lock; 61 struct rw_semaphore io_lock;
@@ -97,7 +98,7 @@ struct mapped_device {
97 * freeze/thaw support require holding onto a super block 98 * freeze/thaw support require holding onto a super block
98 */ 99 */
99 struct super_block *frozen_sb; 100 struct super_block *frozen_sb;
100 struct block_device *frozen_bdev; 101 struct block_device *suspended_bdev;
101}; 102};
102 103
103#define MIN_IOS 256 104#define MIN_IOS 256
@@ -836,9 +837,9 @@ static void __set_size(struct mapped_device *md, sector_t size)
836{ 837{
837 set_capacity(md->disk, size); 838 set_capacity(md->disk, size);
838 839
839 down(&md->frozen_bdev->bd_inode->i_sem); 840 down(&md->suspended_bdev->bd_inode->i_sem);
840 i_size_write(md->frozen_bdev->bd_inode, (loff_t)size << SECTOR_SHIFT); 841 i_size_write(md->suspended_bdev->bd_inode, (loff_t)size << SECTOR_SHIFT);
841 up(&md->frozen_bdev->bd_inode->i_sem); 842 up(&md->suspended_bdev->bd_inode->i_sem);
842} 843}
843 844
844static int __bind(struct mapped_device *md, struct dm_table *t) 845static int __bind(struct mapped_device *md, struct dm_table *t)
@@ -902,10 +903,9 @@ int dm_create_with_minor(unsigned int minor, struct mapped_device **result)
902 return create_aux(minor, 1, result); 903 return create_aux(minor, 1, result);
903} 904}
904 905
905void *dm_get_mdptr(dev_t dev) 906static struct mapped_device *dm_find_md(dev_t dev)
906{ 907{
907 struct mapped_device *md; 908 struct mapped_device *md;
908 void *mdptr = NULL;
909 unsigned minor = MINOR(dev); 909 unsigned minor = MINOR(dev);
910 910
911 if (MAJOR(dev) != _major || minor >= (1 << MINORBITS)) 911 if (MAJOR(dev) != _major || minor >= (1 << MINORBITS))
@@ -914,12 +914,32 @@ void *dm_get_mdptr(dev_t dev)
914 down(&_minor_lock); 914 down(&_minor_lock);
915 915
916 md = idr_find(&_minor_idr, minor); 916 md = idr_find(&_minor_idr, minor);
917 917 if (!md || (dm_disk(md)->first_minor != minor))
918 if (md && (dm_disk(md)->first_minor == minor)) 918 md = NULL;
919 mdptr = md->interface_ptr;
920 919
921 up(&_minor_lock); 920 up(&_minor_lock);
922 921
922 return md;
923}
924
925struct mapped_device *dm_get_md(dev_t dev)
926{
927 struct mapped_device *md = dm_find_md(dev);
928
929 if (md)
930 dm_get(md);
931
932 return md;
933}
934
935void *dm_get_mdptr(dev_t dev)
936{
937 struct mapped_device *md;
938 void *mdptr = NULL;
939
940 md = dm_find_md(dev);
941 if (md)
942 mdptr = md->interface_ptr;
923 return mdptr; 943 return mdptr;
924} 944}
925 945
@@ -991,43 +1011,33 @@ out:
991 */ 1011 */
992static int lock_fs(struct mapped_device *md) 1012static int lock_fs(struct mapped_device *md)
993{ 1013{
994 int r = -ENOMEM; 1014 int r;
995
996 md->frozen_bdev = bdget_disk(md->disk, 0);
997 if (!md->frozen_bdev) {
998 DMWARN("bdget failed in lock_fs");
999 goto out;
1000 }
1001 1015
1002 WARN_ON(md->frozen_sb); 1016 WARN_ON(md->frozen_sb);
1003 1017
1004 md->frozen_sb = freeze_bdev(md->frozen_bdev); 1018 md->frozen_sb = freeze_bdev(md->suspended_bdev);
1005 if (IS_ERR(md->frozen_sb)) { 1019 if (IS_ERR(md->frozen_sb)) {
1006 r = PTR_ERR(md->frozen_sb); 1020 r = PTR_ERR(md->frozen_sb);
1007 goto out_bdput; 1021 md->frozen_sb = NULL;
1022 return r;
1008 } 1023 }
1009 1024
1025 set_bit(DMF_FROZEN, &md->flags);
1026
1010 /* don't bdput right now, we don't want the bdev 1027 /* don't bdput right now, we don't want the bdev
1011 * to go away while it is locked. We'll bdput 1028 * to go away while it is locked.
1012 * in unlock_fs
1013 */ 1029 */
1014 return 0; 1030 return 0;
1015
1016out_bdput:
1017 bdput(md->frozen_bdev);
1018 md->frozen_sb = NULL;
1019 md->frozen_bdev = NULL;
1020out:
1021 return r;
1022} 1031}
1023 1032
1024static void unlock_fs(struct mapped_device *md) 1033static void unlock_fs(struct mapped_device *md)
1025{ 1034{
1026 thaw_bdev(md->frozen_bdev, md->frozen_sb); 1035 if (!test_bit(DMF_FROZEN, &md->flags))
1027 bdput(md->frozen_bdev); 1036 return;
1028 1037
1038 thaw_bdev(md->suspended_bdev, md->frozen_sb);
1029 md->frozen_sb = NULL; 1039 md->frozen_sb = NULL;
1030 md->frozen_bdev = NULL; 1040 clear_bit(DMF_FROZEN, &md->flags);
1031} 1041}
1032 1042
1033/* 1043/*
@@ -1037,7 +1047,7 @@ static void unlock_fs(struct mapped_device *md)
1037 * dm_bind_table, dm_suspend must be called to flush any in 1047 * dm_bind_table, dm_suspend must be called to flush any in
1038 * flight bios and ensure that any further io gets deferred. 1048 * flight bios and ensure that any further io gets deferred.
1039 */ 1049 */
1040int dm_suspend(struct mapped_device *md) 1050int dm_suspend(struct mapped_device *md, int do_lockfs)
1041{ 1051{
1042 struct dm_table *map = NULL; 1052 struct dm_table *map = NULL;
1043 DECLARE_WAITQUEUE(wait, current); 1053 DECLARE_WAITQUEUE(wait, current);
@@ -1053,10 +1063,19 @@ int dm_suspend(struct mapped_device *md)
1053 /* This does not get reverted if there's an error later. */ 1063 /* This does not get reverted if there's an error later. */
1054 dm_table_presuspend_targets(map); 1064 dm_table_presuspend_targets(map);
1055 1065
1056 /* Flush I/O to the device. */ 1066 md->suspended_bdev = bdget_disk(md->disk, 0);
1057 r = lock_fs(md); 1067 if (!md->suspended_bdev) {
1058 if (r) 1068 DMWARN("bdget failed in dm_suspend");
1069 r = -ENOMEM;
1059 goto out; 1070 goto out;
1071 }
1072
1073 /* Flush I/O to the device. */
1074 if (do_lockfs) {
1075 r = lock_fs(md);
1076 if (r)
1077 goto out;
1078 }
1060 1079
1061 /* 1080 /*
1062 * First we set the BLOCK_IO flag so no more ios will be mapped. 1081 * First we set the BLOCK_IO flag so no more ios will be mapped.
@@ -1105,6 +1124,11 @@ int dm_suspend(struct mapped_device *md)
1105 r = 0; 1124 r = 0;
1106 1125
1107out: 1126out:
1127 if (r && md->suspended_bdev) {
1128 bdput(md->suspended_bdev);
1129 md->suspended_bdev = NULL;
1130 }
1131
1108 dm_table_put(map); 1132 dm_table_put(map);
1109 up(&md->suspend_lock); 1133 up(&md->suspend_lock);
1110 return r; 1134 return r;
@@ -1135,6 +1159,9 @@ int dm_resume(struct mapped_device *md)
1135 1159
1136 unlock_fs(md); 1160 unlock_fs(md);
1137 1161
1162 bdput(md->suspended_bdev);
1163 md->suspended_bdev = NULL;
1164
1138 clear_bit(DMF_SUSPENDED, &md->flags); 1165 clear_bit(DMF_SUSPENDED, &md->flags);
1139 1166
1140 dm_table_unplug_all(map); 1167 dm_table_unplug_all(map);
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index e38c3fc1a1db..4eaf075da217 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -28,7 +28,7 @@
28 * in types.h. 28 * in types.h.
29 */ 29 */
30#ifdef CONFIG_LBD 30#ifdef CONFIG_LBD
31#define SECTOR_FORMAT "%Lu" 31#define SECTOR_FORMAT "%llu"
32#else 32#else
33#define SECTOR_FORMAT "%lu" 33#define SECTOR_FORMAT "%lu"
34#endif 34#endif
@@ -58,6 +58,7 @@ int dm_create(struct mapped_device **md);
58int dm_create_with_minor(unsigned int minor, struct mapped_device **md); 58int dm_create_with_minor(unsigned int minor, struct mapped_device **md);
59void dm_set_mdptr(struct mapped_device *md, void *ptr); 59void dm_set_mdptr(struct mapped_device *md, void *ptr);
60void *dm_get_mdptr(dev_t dev); 60void *dm_get_mdptr(dev_t dev);
61struct mapped_device *dm_get_md(dev_t dev);
61 62
62/* 63/*
63 * Reference counting for md. 64 * Reference counting for md.
@@ -68,7 +69,7 @@ void dm_put(struct mapped_device *md);
68/* 69/*
69 * A device can still be used while suspended, but I/O is deferred. 70 * A device can still be used while suspended, but I/O is deferred.
70 */ 71 */
71int dm_suspend(struct mapped_device *md); 72int dm_suspend(struct mapped_device *md, int with_lockfs);
72int dm_resume(struct mapped_device *md); 73int dm_resume(struct mapped_device *md);
73 74
74/* 75/*
diff --git a/drivers/md/faulty.c b/drivers/md/faulty.c
index 0248f8e7eac0..a7a5ab554338 100644
--- a/drivers/md/faulty.c
+++ b/drivers/md/faulty.c
@@ -316,9 +316,10 @@ static int stop(mddev_t *mddev)
316 return 0; 316 return 0;
317} 317}
318 318
319static mdk_personality_t faulty_personality = 319static struct mdk_personality faulty_personality =
320{ 320{
321 .name = "faulty", 321 .name = "faulty",
322 .level = LEVEL_FAULTY,
322 .owner = THIS_MODULE, 323 .owner = THIS_MODULE,
323 .make_request = make_request, 324 .make_request = make_request,
324 .run = run, 325 .run = run,
@@ -329,15 +330,17 @@ static mdk_personality_t faulty_personality =
329 330
330static int __init raid_init(void) 331static int __init raid_init(void)
331{ 332{
332 return register_md_personality(FAULTY, &faulty_personality); 333 return register_md_personality(&faulty_personality);
333} 334}
334 335
335static void raid_exit(void) 336static void raid_exit(void)
336{ 337{
337 unregister_md_personality(FAULTY); 338 unregister_md_personality(&faulty_personality);
338} 339}
339 340
340module_init(raid_init); 341module_init(raid_init);
341module_exit(raid_exit); 342module_exit(raid_exit);
342MODULE_LICENSE("GPL"); 343MODULE_LICENSE("GPL");
343MODULE_ALIAS("md-personality-10"); /* faulty */ 344MODULE_ALIAS("md-personality-10"); /* faulty */
345MODULE_ALIAS("md-faulty");
346MODULE_ALIAS("md-level--5");
diff --git a/drivers/md/kcopyd.c b/drivers/md/kcopyd.c
index eb7036485975..ca99979c868a 100644
--- a/drivers/md/kcopyd.c
+++ b/drivers/md/kcopyd.c
@@ -561,11 +561,13 @@ int kcopyd_copy(struct kcopyd_client *kc, struct io_region *from,
561 * Cancels a kcopyd job, eg. someone might be deactivating a 561 * Cancels a kcopyd job, eg. someone might be deactivating a
562 * mirror. 562 * mirror.
563 */ 563 */
564#if 0
564int kcopyd_cancel(struct kcopyd_job *job, int block) 565int kcopyd_cancel(struct kcopyd_job *job, int block)
565{ 566{
566 /* FIXME: finish */ 567 /* FIXME: finish */
567 return -1; 568 return -1;
568} 569}
570#endif /* 0 */
569 571
570/*----------------------------------------------------------------- 572/*-----------------------------------------------------------------
571 * Unit setup 573 * Unit setup
@@ -684,4 +686,3 @@ void kcopyd_client_destroy(struct kcopyd_client *kc)
684EXPORT_SYMBOL(kcopyd_client_create); 686EXPORT_SYMBOL(kcopyd_client_create);
685EXPORT_SYMBOL(kcopyd_client_destroy); 687EXPORT_SYMBOL(kcopyd_client_destroy);
686EXPORT_SYMBOL(kcopyd_copy); 688EXPORT_SYMBOL(kcopyd_copy);
687EXPORT_SYMBOL(kcopyd_cancel);
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 946efef3a8f5..777585458c85 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -121,11 +121,10 @@ static int linear_run (mddev_t *mddev)
121 sector_t curr_offset; 121 sector_t curr_offset;
122 struct list_head *tmp; 122 struct list_head *tmp;
123 123
124 conf = kmalloc (sizeof (*conf) + mddev->raid_disks*sizeof(dev_info_t), 124 conf = kzalloc (sizeof (*conf) + mddev->raid_disks*sizeof(dev_info_t),
125 GFP_KERNEL); 125 GFP_KERNEL);
126 if (!conf) 126 if (!conf)
127 goto out; 127 goto out;
128 memset(conf, 0, sizeof(*conf) + mddev->raid_disks*sizeof(dev_info_t));
129 mddev->private = conf; 128 mddev->private = conf;
130 129
131 cnt = 0; 130 cnt = 0;
@@ -352,9 +351,10 @@ static void linear_status (struct seq_file *seq, mddev_t *mddev)
352} 351}
353 352
354 353
355static mdk_personality_t linear_personality= 354static struct mdk_personality linear_personality =
356{ 355{
357 .name = "linear", 356 .name = "linear",
357 .level = LEVEL_LINEAR,
358 .owner = THIS_MODULE, 358 .owner = THIS_MODULE,
359 .make_request = linear_make_request, 359 .make_request = linear_make_request,
360 .run = linear_run, 360 .run = linear_run,
@@ -364,16 +364,18 @@ static mdk_personality_t linear_personality=
364 364
365static int __init linear_init (void) 365static int __init linear_init (void)
366{ 366{
367 return register_md_personality (LINEAR, &linear_personality); 367 return register_md_personality (&linear_personality);
368} 368}
369 369
370static void linear_exit (void) 370static void linear_exit (void)
371{ 371{
372 unregister_md_personality (LINEAR); 372 unregister_md_personality (&linear_personality);
373} 373}
374 374
375 375
376module_init(linear_init); 376module_init(linear_init);
377module_exit(linear_exit); 377module_exit(linear_exit);
378MODULE_LICENSE("GPL"); 378MODULE_LICENSE("GPL");
379MODULE_ALIAS("md-personality-1"); /* LINEAR */ 379MODULE_ALIAS("md-personality-1"); /* LINEAR - deprecated*/
380MODULE_ALIAS("md-linear");
381MODULE_ALIAS("md-level--1");
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 8175a2a222da..1b76fb29fb70 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -42,6 +42,7 @@
42#include <linux/devfs_fs_kernel.h> 42#include <linux/devfs_fs_kernel.h>
43#include <linux/buffer_head.h> /* for invalidate_bdev */ 43#include <linux/buffer_head.h> /* for invalidate_bdev */
44#include <linux/suspend.h> 44#include <linux/suspend.h>
45#include <linux/poll.h>
45 46
46#include <linux/init.h> 47#include <linux/init.h>
47 48
@@ -67,7 +68,7 @@
67static void autostart_arrays (int part); 68static void autostart_arrays (int part);
68#endif 69#endif
69 70
70static mdk_personality_t *pers[MAX_PERSONALITY]; 71static LIST_HEAD(pers_list);
71static DEFINE_SPINLOCK(pers_lock); 72static DEFINE_SPINLOCK(pers_lock);
72 73
73/* 74/*
@@ -80,10 +81,22 @@ static DEFINE_SPINLOCK(pers_lock);
80 * idle IO detection. 81 * idle IO detection.
81 * 82 *
82 * you can change it via /proc/sys/dev/raid/speed_limit_min and _max. 83 * you can change it via /proc/sys/dev/raid/speed_limit_min and _max.
84 * or /sys/block/mdX/md/sync_speed_{min,max}
83 */ 85 */
84 86
85static int sysctl_speed_limit_min = 1000; 87static int sysctl_speed_limit_min = 1000;
86static int sysctl_speed_limit_max = 200000; 88static int sysctl_speed_limit_max = 200000;
89static inline int speed_min(mddev_t *mddev)
90{
91 return mddev->sync_speed_min ?
92 mddev->sync_speed_min : sysctl_speed_limit_min;
93}
94
95static inline int speed_max(mddev_t *mddev)
96{
97 return mddev->sync_speed_max ?
98 mddev->sync_speed_max : sysctl_speed_limit_max;
99}
87 100
88static struct ctl_table_header *raid_table_header; 101static struct ctl_table_header *raid_table_header;
89 102
@@ -134,6 +147,24 @@ static struct block_device_operations md_fops;
134static int start_readonly; 147static int start_readonly;
135 148
136/* 149/*
150 * We have a system wide 'event count' that is incremented
151 * on any 'interesting' event, and readers of /proc/mdstat
152 * can use 'poll' or 'select' to find out when the event
153 * count increases.
154 *
155 * Events are:
156 * start array, stop array, error, add device, remove device,
157 * start build, activate spare
158 */
159static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
160static atomic_t md_event_count;
161static void md_new_event(mddev_t *mddev)
162{
163 atomic_inc(&md_event_count);
164 wake_up(&md_event_waiters);
165}
166
167/*
137 * Enables to iterate over all existing md arrays 168 * Enables to iterate over all existing md arrays
138 * all_mddevs_lock protects this list. 169 * all_mddevs_lock protects this list.
139 */ 170 */
@@ -209,12 +240,10 @@ static mddev_t * mddev_find(dev_t unit)
209 } 240 }
210 spin_unlock(&all_mddevs_lock); 241 spin_unlock(&all_mddevs_lock);
211 242
212 new = (mddev_t *) kmalloc(sizeof(*new), GFP_KERNEL); 243 new = kzalloc(sizeof(*new), GFP_KERNEL);
213 if (!new) 244 if (!new)
214 return NULL; 245 return NULL;
215 246
216 memset(new, 0, sizeof(*new));
217
218 new->unit = unit; 247 new->unit = unit;
219 if (MAJOR(unit) == MD_MAJOR) 248 if (MAJOR(unit) == MD_MAJOR)
220 new->md_minor = MINOR(unit); 249 new->md_minor = MINOR(unit);
@@ -262,7 +291,7 @@ static inline void mddev_unlock(mddev_t * mddev)
262 md_wakeup_thread(mddev->thread); 291 md_wakeup_thread(mddev->thread);
263} 292}
264 293
265mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr) 294static mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr)
266{ 295{
267 mdk_rdev_t * rdev; 296 mdk_rdev_t * rdev;
268 struct list_head *tmp; 297 struct list_head *tmp;
@@ -286,6 +315,18 @@ static mdk_rdev_t * find_rdev(mddev_t * mddev, dev_t dev)
286 return NULL; 315 return NULL;
287} 316}
288 317
318static struct mdk_personality *find_pers(int level, char *clevel)
319{
320 struct mdk_personality *pers;
321 list_for_each_entry(pers, &pers_list, list) {
322 if (level != LEVEL_NONE && pers->level == level)
323 return pers;
324 if (strcmp(pers->name, clevel)==0)
325 return pers;
326 }
327 return NULL;
328}
329
289static inline sector_t calc_dev_sboffset(struct block_device *bdev) 330static inline sector_t calc_dev_sboffset(struct block_device *bdev)
290{ 331{
291 sector_t size = bdev->bd_inode->i_size >> BLOCK_SIZE_BITS; 332 sector_t size = bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
@@ -320,7 +361,7 @@ static int alloc_disk_sb(mdk_rdev_t * rdev)
320static void free_disk_sb(mdk_rdev_t * rdev) 361static void free_disk_sb(mdk_rdev_t * rdev)
321{ 362{
322 if (rdev->sb_page) { 363 if (rdev->sb_page) {
323 page_cache_release(rdev->sb_page); 364 put_page(rdev->sb_page);
324 rdev->sb_loaded = 0; 365 rdev->sb_loaded = 0;
325 rdev->sb_page = NULL; 366 rdev->sb_page = NULL;
326 rdev->sb_offset = 0; 367 rdev->sb_offset = 0;
@@ -461,6 +502,7 @@ int sync_page_io(struct block_device *bdev, sector_t sector, int size,
461 bio_put(bio); 502 bio_put(bio);
462 return ret; 503 return ret;
463} 504}
505EXPORT_SYMBOL_GPL(sync_page_io);
464 506
465static int read_disk_sb(mdk_rdev_t * rdev, int size) 507static int read_disk_sb(mdk_rdev_t * rdev, int size)
466{ 508{
@@ -665,6 +707,10 @@ static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version
665 } 707 }
666 rdev->size = calc_dev_size(rdev, sb->chunk_size); 708 rdev->size = calc_dev_size(rdev, sb->chunk_size);
667 709
710 if (rdev->size < sb->size && sb->level > 1)
711 /* "this cannot possibly happen" ... */
712 ret = -EINVAL;
713
668 abort: 714 abort:
669 return ret; 715 return ret;
670} 716}
@@ -688,6 +734,7 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
688 mddev->ctime = sb->ctime; 734 mddev->ctime = sb->ctime;
689 mddev->utime = sb->utime; 735 mddev->utime = sb->utime;
690 mddev->level = sb->level; 736 mddev->level = sb->level;
737 mddev->clevel[0] = 0;
691 mddev->layout = sb->layout; 738 mddev->layout = sb->layout;
692 mddev->raid_disks = sb->raid_disks; 739 mddev->raid_disks = sb->raid_disks;
693 mddev->size = sb->size; 740 mddev->size = sb->size;
@@ -714,9 +761,10 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
714 761
715 if (sb->state & (1<<MD_SB_BITMAP_PRESENT) && 762 if (sb->state & (1<<MD_SB_BITMAP_PRESENT) &&
716 mddev->bitmap_file == NULL) { 763 mddev->bitmap_file == NULL) {
717 if (mddev->level != 1 && mddev->level != 5 && mddev->level != 6) { 764 if (mddev->level != 1 && mddev->level != 5 && mddev->level != 6
765 && mddev->level != 10) {
718 /* FIXME use a better test */ 766 /* FIXME use a better test */
719 printk(KERN_WARNING "md: bitmaps only support for raid1\n"); 767 printk(KERN_WARNING "md: bitmaps not supported for this level.\n");
720 return -EINVAL; 768 return -EINVAL;
721 } 769 }
722 mddev->bitmap_offset = mddev->default_bitmap_offset; 770 mddev->bitmap_offset = mddev->default_bitmap_offset;
@@ -968,6 +1016,7 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
968 } 1016 }
969 rdev->preferred_minor = 0xffff; 1017 rdev->preferred_minor = 0xffff;
970 rdev->data_offset = le64_to_cpu(sb->data_offset); 1018 rdev->data_offset = le64_to_cpu(sb->data_offset);
1019 atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
971 1020
972 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256; 1021 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
973 bmask = queue_hardsect_size(rdev->bdev->bd_disk->queue)-1; 1022 bmask = queue_hardsect_size(rdev->bdev->bd_disk->queue)-1;
@@ -1006,6 +1055,9 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
1006 rdev->size = le64_to_cpu(sb->data_size)/2; 1055 rdev->size = le64_to_cpu(sb->data_size)/2;
1007 if (le32_to_cpu(sb->chunksize)) 1056 if (le32_to_cpu(sb->chunksize))
1008 rdev->size &= ~((sector_t)le32_to_cpu(sb->chunksize)/2 - 1); 1057 rdev->size &= ~((sector_t)le32_to_cpu(sb->chunksize)/2 - 1);
1058
1059 if (le32_to_cpu(sb->size) > rdev->size*2)
1060 return -EINVAL;
1009 return 0; 1061 return 0;
1010} 1062}
1011 1063
@@ -1023,6 +1075,7 @@ static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
1023 mddev->ctime = le64_to_cpu(sb->ctime) & ((1ULL << 32)-1); 1075 mddev->ctime = le64_to_cpu(sb->ctime) & ((1ULL << 32)-1);
1024 mddev->utime = le64_to_cpu(sb->utime) & ((1ULL << 32)-1); 1076 mddev->utime = le64_to_cpu(sb->utime) & ((1ULL << 32)-1);
1025 mddev->level = le32_to_cpu(sb->level); 1077 mddev->level = le32_to_cpu(sb->level);
1078 mddev->clevel[0] = 0;
1026 mddev->layout = le32_to_cpu(sb->layout); 1079 mddev->layout = le32_to_cpu(sb->layout);
1027 mddev->raid_disks = le32_to_cpu(sb->raid_disks); 1080 mddev->raid_disks = le32_to_cpu(sb->raid_disks);
1028 mddev->size = le64_to_cpu(sb->size)/2; 1081 mddev->size = le64_to_cpu(sb->size)/2;
@@ -1037,8 +1090,9 @@ static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
1037 1090
1038 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) && 1091 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) &&
1039 mddev->bitmap_file == NULL ) { 1092 mddev->bitmap_file == NULL ) {
1040 if (mddev->level != 1) { 1093 if (mddev->level != 1 && mddev->level != 5 && mddev->level != 6
1041 printk(KERN_WARNING "md: bitmaps only supported for raid1\n"); 1094 && mddev->level != 10) {
1095 printk(KERN_WARNING "md: bitmaps not supported for this level.\n");
1042 return -EINVAL; 1096 return -EINVAL;
1043 } 1097 }
1044 mddev->bitmap_offset = (__s32)le32_to_cpu(sb->bitmap_offset); 1098 mddev->bitmap_offset = (__s32)le32_to_cpu(sb->bitmap_offset);
@@ -1105,6 +1159,8 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
1105 else 1159 else
1106 sb->resync_offset = cpu_to_le64(0); 1160 sb->resync_offset = cpu_to_le64(0);
1107 1161
1162 sb->cnt_corrected_read = atomic_read(&rdev->corrected_errors);
1163
1108 if (mddev->bitmap && mddev->bitmap_file == NULL) { 1164 if (mddev->bitmap && mddev->bitmap_file == NULL) {
1109 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_offset); 1165 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_offset);
1110 sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET); 1166 sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET);
@@ -1187,6 +1243,14 @@ static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev)
1187 MD_BUG(); 1243 MD_BUG();
1188 return -EINVAL; 1244 return -EINVAL;
1189 } 1245 }
1246 /* make sure rdev->size exceeds mddev->size */
1247 if (rdev->size && (mddev->size == 0 || rdev->size < mddev->size)) {
1248 if (mddev->pers)
1249 /* Cannot change size, so fail */
1250 return -ENOSPC;
1251 else
1252 mddev->size = rdev->size;
1253 }
1190 same_pdev = match_dev_unit(mddev, rdev); 1254 same_pdev = match_dev_unit(mddev, rdev);
1191 if (same_pdev) 1255 if (same_pdev)
1192 printk(KERN_WARNING 1256 printk(KERN_WARNING
@@ -1496,6 +1560,26 @@ repeat:
1496 1560
1497} 1561}
1498 1562
1563/* words written to sysfs files may, or my not, be \n terminated.
1564 * We want to accept with case. For this we use cmd_match.
1565 */
1566static int cmd_match(const char *cmd, const char *str)
1567{
1568 /* See if cmd, written into a sysfs file, matches
1569 * str. They must either be the same, or cmd can
1570 * have a trailing newline
1571 */
1572 while (*cmd && *str && *cmd == *str) {
1573 cmd++;
1574 str++;
1575 }
1576 if (*cmd == '\n')
1577 cmd++;
1578 if (*str || *cmd)
1579 return 0;
1580 return 1;
1581}
1582
1499struct rdev_sysfs_entry { 1583struct rdev_sysfs_entry {
1500 struct attribute attr; 1584 struct attribute attr;
1501 ssize_t (*show)(mdk_rdev_t *, char *); 1585 ssize_t (*show)(mdk_rdev_t *, char *);
@@ -1538,9 +1622,113 @@ super_show(mdk_rdev_t *rdev, char *page)
1538} 1622}
1539static struct rdev_sysfs_entry rdev_super = __ATTR_RO(super); 1623static struct rdev_sysfs_entry rdev_super = __ATTR_RO(super);
1540 1624
1625static ssize_t
1626errors_show(mdk_rdev_t *rdev, char *page)
1627{
1628 return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
1629}
1630
1631static ssize_t
1632errors_store(mdk_rdev_t *rdev, const char *buf, size_t len)
1633{
1634 char *e;
1635 unsigned long n = simple_strtoul(buf, &e, 10);
1636 if (*buf && (*e == 0 || *e == '\n')) {
1637 atomic_set(&rdev->corrected_errors, n);
1638 return len;
1639 }
1640 return -EINVAL;
1641}
1642static struct rdev_sysfs_entry rdev_errors =
1643__ATTR(errors, 0644, errors_show, errors_store);
1644
1645static ssize_t
1646slot_show(mdk_rdev_t *rdev, char *page)
1647{
1648 if (rdev->raid_disk < 0)
1649 return sprintf(page, "none\n");
1650 else
1651 return sprintf(page, "%d\n", rdev->raid_disk);
1652}
1653
1654static ssize_t
1655slot_store(mdk_rdev_t *rdev, const char *buf, size_t len)
1656{
1657 char *e;
1658 int slot = simple_strtoul(buf, &e, 10);
1659 if (strncmp(buf, "none", 4)==0)
1660 slot = -1;
1661 else if (e==buf || (*e && *e!= '\n'))
1662 return -EINVAL;
1663 if (rdev->mddev->pers)
1664 /* Cannot set slot in active array (yet) */
1665 return -EBUSY;
1666 if (slot >= rdev->mddev->raid_disks)
1667 return -ENOSPC;
1668 rdev->raid_disk = slot;
1669 /* assume it is working */
1670 rdev->flags = 0;
1671 set_bit(In_sync, &rdev->flags);
1672 return len;
1673}
1674
1675
1676static struct rdev_sysfs_entry rdev_slot =
1677__ATTR(slot, 0644, slot_show, slot_store);
1678
1679static ssize_t
1680offset_show(mdk_rdev_t *rdev, char *page)
1681{
1682 return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset);
1683}
1684
1685static ssize_t
1686offset_store(mdk_rdev_t *rdev, const char *buf, size_t len)
1687{
1688 char *e;
1689 unsigned long long offset = simple_strtoull(buf, &e, 10);
1690 if (e==buf || (*e && *e != '\n'))
1691 return -EINVAL;
1692 if (rdev->mddev->pers)
1693 return -EBUSY;
1694 rdev->data_offset = offset;
1695 return len;
1696}
1697
1698static struct rdev_sysfs_entry rdev_offset =
1699__ATTR(offset, 0644, offset_show, offset_store);
1700
1701static ssize_t
1702rdev_size_show(mdk_rdev_t *rdev, char *page)
1703{
1704 return sprintf(page, "%llu\n", (unsigned long long)rdev->size);
1705}
1706
1707static ssize_t
1708rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len)
1709{
1710 char *e;
1711 unsigned long long size = simple_strtoull(buf, &e, 10);
1712 if (e==buf || (*e && *e != '\n'))
1713 return -EINVAL;
1714 if (rdev->mddev->pers)
1715 return -EBUSY;
1716 rdev->size = size;
1717 if (size < rdev->mddev->size || rdev->mddev->size == 0)
1718 rdev->mddev->size = size;
1719 return len;
1720}
1721
1722static struct rdev_sysfs_entry rdev_size =
1723__ATTR(size, 0644, rdev_size_show, rdev_size_store);
1724
1541static struct attribute *rdev_default_attrs[] = { 1725static struct attribute *rdev_default_attrs[] = {
1542 &rdev_state.attr, 1726 &rdev_state.attr,
1543 &rdev_super.attr, 1727 &rdev_super.attr,
1728 &rdev_errors.attr,
1729 &rdev_slot.attr,
1730 &rdev_offset.attr,
1731 &rdev_size.attr,
1544 NULL, 1732 NULL,
1545}; 1733};
1546static ssize_t 1734static ssize_t
@@ -1598,12 +1786,11 @@ static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_mi
1598 mdk_rdev_t *rdev; 1786 mdk_rdev_t *rdev;
1599 sector_t size; 1787 sector_t size;
1600 1788
1601 rdev = (mdk_rdev_t *) kmalloc(sizeof(*rdev), GFP_KERNEL); 1789 rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
1602 if (!rdev) { 1790 if (!rdev) {
1603 printk(KERN_ERR "md: could not alloc mem for new device!\n"); 1791 printk(KERN_ERR "md: could not alloc mem for new device!\n");
1604 return ERR_PTR(-ENOMEM); 1792 return ERR_PTR(-ENOMEM);
1605 } 1793 }
1606 memset(rdev, 0, sizeof(*rdev));
1607 1794
1608 if ((err = alloc_disk_sb(rdev))) 1795 if ((err = alloc_disk_sb(rdev)))
1609 goto abort_free; 1796 goto abort_free;
@@ -1621,6 +1808,7 @@ static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_mi
1621 rdev->data_offset = 0; 1808 rdev->data_offset = 0;
1622 atomic_set(&rdev->nr_pending, 0); 1809 atomic_set(&rdev->nr_pending, 0);
1623 atomic_set(&rdev->read_errors, 0); 1810 atomic_set(&rdev->read_errors, 0);
1811 atomic_set(&rdev->corrected_errors, 0);
1624 1812
1625 size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS; 1813 size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
1626 if (!size) { 1814 if (!size) {
@@ -1725,16 +1913,37 @@ static void analyze_sbs(mddev_t * mddev)
1725static ssize_t 1913static ssize_t
1726level_show(mddev_t *mddev, char *page) 1914level_show(mddev_t *mddev, char *page)
1727{ 1915{
1728 mdk_personality_t *p = mddev->pers; 1916 struct mdk_personality *p = mddev->pers;
1729 if (p == NULL && mddev->raid_disks == 0) 1917 if (p)
1730 return 0;
1731 if (mddev->level >= 0)
1732 return sprintf(page, "raid%d\n", mddev->level);
1733 else
1734 return sprintf(page, "%s\n", p->name); 1918 return sprintf(page, "%s\n", p->name);
1919 else if (mddev->clevel[0])
1920 return sprintf(page, "%s\n", mddev->clevel);
1921 else if (mddev->level != LEVEL_NONE)
1922 return sprintf(page, "%d\n", mddev->level);
1923 else
1924 return 0;
1925}
1926
1927static ssize_t
1928level_store(mddev_t *mddev, const char *buf, size_t len)
1929{
1930 int rv = len;
1931 if (mddev->pers)
1932 return -EBUSY;
1933 if (len == 0)
1934 return 0;
1935 if (len >= sizeof(mddev->clevel))
1936 return -ENOSPC;
1937 strncpy(mddev->clevel, buf, len);
1938 if (mddev->clevel[len-1] == '\n')
1939 len--;
1940 mddev->clevel[len] = 0;
1941 mddev->level = LEVEL_NONE;
1942 return rv;
1735} 1943}
1736 1944
1737static struct md_sysfs_entry md_level = __ATTR_RO(level); 1945static struct md_sysfs_entry md_level =
1946__ATTR(level, 0644, level_show, level_store);
1738 1947
1739static ssize_t 1948static ssize_t
1740raid_disks_show(mddev_t *mddev, char *page) 1949raid_disks_show(mddev_t *mddev, char *page)
@@ -1744,7 +1953,197 @@ raid_disks_show(mddev_t *mddev, char *page)
1744 return sprintf(page, "%d\n", mddev->raid_disks); 1953 return sprintf(page, "%d\n", mddev->raid_disks);
1745} 1954}
1746 1955
1747static struct md_sysfs_entry md_raid_disks = __ATTR_RO(raid_disks); 1956static int update_raid_disks(mddev_t *mddev, int raid_disks);
1957
1958static ssize_t
1959raid_disks_store(mddev_t *mddev, const char *buf, size_t len)
1960{
1961 /* can only set raid_disks if array is not yet active */
1962 char *e;
1963 int rv = 0;
1964 unsigned long n = simple_strtoul(buf, &e, 10);
1965
1966 if (!*buf || (*e && *e != '\n'))
1967 return -EINVAL;
1968
1969 if (mddev->pers)
1970 rv = update_raid_disks(mddev, n);
1971 else
1972 mddev->raid_disks = n;
1973 return rv ? rv : len;
1974}
1975static struct md_sysfs_entry md_raid_disks =
1976__ATTR(raid_disks, 0644, raid_disks_show, raid_disks_store);
1977
1978static ssize_t
1979chunk_size_show(mddev_t *mddev, char *page)
1980{
1981 return sprintf(page, "%d\n", mddev->chunk_size);
1982}
1983
1984static ssize_t
1985chunk_size_store(mddev_t *mddev, const char *buf, size_t len)
1986{
1987 /* can only set chunk_size if array is not yet active */
1988 char *e;
1989 unsigned long n = simple_strtoul(buf, &e, 10);
1990
1991 if (mddev->pers)
1992 return -EBUSY;
1993 if (!*buf || (*e && *e != '\n'))
1994 return -EINVAL;
1995
1996 mddev->chunk_size = n;
1997 return len;
1998}
1999static struct md_sysfs_entry md_chunk_size =
2000__ATTR(chunk_size, 0644, chunk_size_show, chunk_size_store);
2001
2002static ssize_t
2003null_show(mddev_t *mddev, char *page)
2004{
2005 return -EINVAL;
2006}
2007
2008static ssize_t
2009new_dev_store(mddev_t *mddev, const char *buf, size_t len)
2010{
2011 /* buf must be %d:%d\n? giving major and minor numbers */
2012 /* The new device is added to the array.
2013 * If the array has a persistent superblock, we read the
2014 * superblock to initialise info and check validity.
2015 * Otherwise, only checking done is that in bind_rdev_to_array,
2016 * which mainly checks size.
2017 */
2018 char *e;
2019 int major = simple_strtoul(buf, &e, 10);
2020 int minor;
2021 dev_t dev;
2022 mdk_rdev_t *rdev;
2023 int err;
2024
2025 if (!*buf || *e != ':' || !e[1] || e[1] == '\n')
2026 return -EINVAL;
2027 minor = simple_strtoul(e+1, &e, 10);
2028 if (*e && *e != '\n')
2029 return -EINVAL;
2030 dev = MKDEV(major, minor);
2031 if (major != MAJOR(dev) ||
2032 minor != MINOR(dev))
2033 return -EOVERFLOW;
2034
2035
2036 if (mddev->persistent) {
2037 rdev = md_import_device(dev, mddev->major_version,
2038 mddev->minor_version);
2039 if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) {
2040 mdk_rdev_t *rdev0 = list_entry(mddev->disks.next,
2041 mdk_rdev_t, same_set);
2042 err = super_types[mddev->major_version]
2043 .load_super(rdev, rdev0, mddev->minor_version);
2044 if (err < 0)
2045 goto out;
2046 }
2047 } else
2048 rdev = md_import_device(dev, -1, -1);
2049
2050 if (IS_ERR(rdev))
2051 return PTR_ERR(rdev);
2052 err = bind_rdev_to_array(rdev, mddev);
2053 out:
2054 if (err)
2055 export_rdev(rdev);
2056 return err ? err : len;
2057}
2058
2059static struct md_sysfs_entry md_new_device =
2060__ATTR(new_dev, 0200, null_show, new_dev_store);
2061
2062static ssize_t
2063size_show(mddev_t *mddev, char *page)
2064{
2065 return sprintf(page, "%llu\n", (unsigned long long)mddev->size);
2066}
2067
2068static int update_size(mddev_t *mddev, unsigned long size);
2069
2070static ssize_t
2071size_store(mddev_t *mddev, const char *buf, size_t len)
2072{
2073 /* If array is inactive, we can reduce the component size, but
2074 * not increase it (except from 0).
2075 * If array is active, we can try an on-line resize
2076 */
2077 char *e;
2078 int err = 0;
2079 unsigned long long size = simple_strtoull(buf, &e, 10);
2080 if (!*buf || *buf == '\n' ||
2081 (*e && *e != '\n'))
2082 return -EINVAL;
2083
2084 if (mddev->pers) {
2085 err = update_size(mddev, size);
2086 md_update_sb(mddev);
2087 } else {
2088 if (mddev->size == 0 ||
2089 mddev->size > size)
2090 mddev->size = size;
2091 else
2092 err = -ENOSPC;
2093 }
2094 return err ? err : len;
2095}
2096
2097static struct md_sysfs_entry md_size =
2098__ATTR(component_size, 0644, size_show, size_store);
2099
2100
2101/* Metdata version.
2102 * This is either 'none' for arrays with externally managed metadata,
2103 * or N.M for internally known formats
2104 */
2105static ssize_t
2106metadata_show(mddev_t *mddev, char *page)
2107{
2108 if (mddev->persistent)
2109 return sprintf(page, "%d.%d\n",
2110 mddev->major_version, mddev->minor_version);
2111 else
2112 return sprintf(page, "none\n");
2113}
2114
2115static ssize_t
2116metadata_store(mddev_t *mddev, const char *buf, size_t len)
2117{
2118 int major, minor;
2119 char *e;
2120 if (!list_empty(&mddev->disks))
2121 return -EBUSY;
2122
2123 if (cmd_match(buf, "none")) {
2124 mddev->persistent = 0;
2125 mddev->major_version = 0;
2126 mddev->minor_version = 90;
2127 return len;
2128 }
2129 major = simple_strtoul(buf, &e, 10);
2130 if (e==buf || *e != '.')
2131 return -EINVAL;
2132 buf = e+1;
2133 minor = simple_strtoul(buf, &e, 10);
2134 if (e==buf || *e != '\n')
2135 return -EINVAL;
2136 if (major >= sizeof(super_types)/sizeof(super_types[0]) ||
2137 super_types[major].name == NULL)
2138 return -ENOENT;
2139 mddev->major_version = major;
2140 mddev->minor_version = minor;
2141 mddev->persistent = 1;
2142 return len;
2143}
2144
2145static struct md_sysfs_entry md_metadata =
2146__ATTR(metadata_version, 0644, metadata_show, metadata_store);
1748 2147
1749static ssize_t 2148static ssize_t
1750action_show(mddev_t *mddev, char *page) 2149action_show(mddev_t *mddev, char *page)
@@ -1771,31 +2170,27 @@ action_store(mddev_t *mddev, const char *page, size_t len)
1771 if (!mddev->pers || !mddev->pers->sync_request) 2170 if (!mddev->pers || !mddev->pers->sync_request)
1772 return -EINVAL; 2171 return -EINVAL;
1773 2172
1774 if (strcmp(page, "idle")==0 || strcmp(page, "idle\n")==0) { 2173 if (cmd_match(page, "idle")) {
1775 if (mddev->sync_thread) { 2174 if (mddev->sync_thread) {
1776 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 2175 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1777 md_unregister_thread(mddev->sync_thread); 2176 md_unregister_thread(mddev->sync_thread);
1778 mddev->sync_thread = NULL; 2177 mddev->sync_thread = NULL;
1779 mddev->recovery = 0; 2178 mddev->recovery = 0;
1780 } 2179 }
1781 return len; 2180 } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
1782 } 2181 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
1783
1784 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
1785 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
1786 return -EBUSY; 2182 return -EBUSY;
1787 if (strcmp(page, "resync")==0 || strcmp(page, "resync\n")==0 || 2183 else if (cmd_match(page, "resync") || cmd_match(page, "recover"))
1788 strcmp(page, "recover")==0 || strcmp(page, "recover\n")==0)
1789 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 2184 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
1790 else { 2185 else {
1791 if (strcmp(page, "check")==0 || strcmp(page, "check\n")==0) 2186 if (cmd_match(page, "check"))
1792 set_bit(MD_RECOVERY_CHECK, &mddev->recovery); 2187 set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
1793 else if (strcmp(page, "repair")!=0 && strcmp(page, "repair\n")!=0) 2188 else if (cmd_match(page, "repair"))
1794 return -EINVAL; 2189 return -EINVAL;
1795 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); 2190 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
1796 set_bit(MD_RECOVERY_SYNC, &mddev->recovery); 2191 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
1797 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
1798 } 2192 }
2193 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
1799 md_wakeup_thread(mddev->thread); 2194 md_wakeup_thread(mddev->thread);
1800 return len; 2195 return len;
1801} 2196}
@@ -1814,15 +2209,107 @@ md_scan_mode = __ATTR(sync_action, S_IRUGO|S_IWUSR, action_show, action_store);
1814static struct md_sysfs_entry 2209static struct md_sysfs_entry
1815md_mismatches = __ATTR_RO(mismatch_cnt); 2210md_mismatches = __ATTR_RO(mismatch_cnt);
1816 2211
2212static ssize_t
2213sync_min_show(mddev_t *mddev, char *page)
2214{
2215 return sprintf(page, "%d (%s)\n", speed_min(mddev),
2216 mddev->sync_speed_min ? "local": "system");
2217}
2218
2219static ssize_t
2220sync_min_store(mddev_t *mddev, const char *buf, size_t len)
2221{
2222 int min;
2223 char *e;
2224 if (strncmp(buf, "system", 6)==0) {
2225 mddev->sync_speed_min = 0;
2226 return len;
2227 }
2228 min = simple_strtoul(buf, &e, 10);
2229 if (buf == e || (*e && *e != '\n') || min <= 0)
2230 return -EINVAL;
2231 mddev->sync_speed_min = min;
2232 return len;
2233}
2234
2235static struct md_sysfs_entry md_sync_min =
2236__ATTR(sync_speed_min, S_IRUGO|S_IWUSR, sync_min_show, sync_min_store);
2237
2238static ssize_t
2239sync_max_show(mddev_t *mddev, char *page)
2240{
2241 return sprintf(page, "%d (%s)\n", speed_max(mddev),
2242 mddev->sync_speed_max ? "local": "system");
2243}
2244
2245static ssize_t
2246sync_max_store(mddev_t *mddev, const char *buf, size_t len)
2247{
2248 int max;
2249 char *e;
2250 if (strncmp(buf, "system", 6)==0) {
2251 mddev->sync_speed_max = 0;
2252 return len;
2253 }
2254 max = simple_strtoul(buf, &e, 10);
2255 if (buf == e || (*e && *e != '\n') || max <= 0)
2256 return -EINVAL;
2257 mddev->sync_speed_max = max;
2258 return len;
2259}
2260
2261static struct md_sysfs_entry md_sync_max =
2262__ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store);
2263
2264
2265static ssize_t
2266sync_speed_show(mddev_t *mddev, char *page)
2267{
2268 unsigned long resync, dt, db;
2269 resync = (mddev->curr_resync - atomic_read(&mddev->recovery_active));
2270 dt = ((jiffies - mddev->resync_mark) / HZ);
2271 if (!dt) dt++;
2272 db = resync - (mddev->resync_mark_cnt);
2273 return sprintf(page, "%ld\n", db/dt/2); /* K/sec */
2274}
2275
2276static struct md_sysfs_entry
2277md_sync_speed = __ATTR_RO(sync_speed);
2278
2279static ssize_t
2280sync_completed_show(mddev_t *mddev, char *page)
2281{
2282 unsigned long max_blocks, resync;
2283
2284 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
2285 max_blocks = mddev->resync_max_sectors;
2286 else
2287 max_blocks = mddev->size << 1;
2288
2289 resync = (mddev->curr_resync - atomic_read(&mddev->recovery_active));
2290 return sprintf(page, "%lu / %lu\n", resync, max_blocks);
2291}
2292
2293static struct md_sysfs_entry
2294md_sync_completed = __ATTR_RO(sync_completed);
2295
1817static struct attribute *md_default_attrs[] = { 2296static struct attribute *md_default_attrs[] = {
1818 &md_level.attr, 2297 &md_level.attr,
1819 &md_raid_disks.attr, 2298 &md_raid_disks.attr,
2299 &md_chunk_size.attr,
2300 &md_size.attr,
2301 &md_metadata.attr,
2302 &md_new_device.attr,
1820 NULL, 2303 NULL,
1821}; 2304};
1822 2305
1823static struct attribute *md_redundancy_attrs[] = { 2306static struct attribute *md_redundancy_attrs[] = {
1824 &md_scan_mode.attr, 2307 &md_scan_mode.attr,
1825 &md_mismatches.attr, 2308 &md_mismatches.attr,
2309 &md_sync_min.attr,
2310 &md_sync_max.attr,
2311 &md_sync_speed.attr,
2312 &md_sync_completed.attr,
1826 NULL, 2313 NULL,
1827}; 2314};
1828static struct attribute_group md_redundancy_group = { 2315static struct attribute_group md_redundancy_group = {
@@ -1937,14 +2424,16 @@ static void md_safemode_timeout(unsigned long data)
1937 md_wakeup_thread(mddev->thread); 2424 md_wakeup_thread(mddev->thread);
1938} 2425}
1939 2426
2427static int start_dirty_degraded;
1940 2428
1941static int do_md_run(mddev_t * mddev) 2429static int do_md_run(mddev_t * mddev)
1942{ 2430{
1943 int pnum, err; 2431 int err;
1944 int chunk_size; 2432 int chunk_size;
1945 struct list_head *tmp; 2433 struct list_head *tmp;
1946 mdk_rdev_t *rdev; 2434 mdk_rdev_t *rdev;
1947 struct gendisk *disk; 2435 struct gendisk *disk;
2436 struct mdk_personality *pers;
1948 char b[BDEVNAME_SIZE]; 2437 char b[BDEVNAME_SIZE];
1949 2438
1950 if (list_empty(&mddev->disks)) 2439 if (list_empty(&mddev->disks))
@@ -1961,20 +2450,8 @@ static int do_md_run(mddev_t * mddev)
1961 analyze_sbs(mddev); 2450 analyze_sbs(mddev);
1962 2451
1963 chunk_size = mddev->chunk_size; 2452 chunk_size = mddev->chunk_size;
1964 pnum = level_to_pers(mddev->level);
1965 2453
1966 if ((pnum != MULTIPATH) && (pnum != RAID1)) { 2454 if (chunk_size) {
1967 if (!chunk_size) {
1968 /*
1969 * 'default chunksize' in the old md code used to
1970 * be PAGE_SIZE, baaad.
1971 * we abort here to be on the safe side. We don't
1972 * want to continue the bad practice.
1973 */
1974 printk(KERN_ERR
1975 "no chunksize specified, see 'man raidtab'\n");
1976 return -EINVAL;
1977 }
1978 if (chunk_size > MAX_CHUNK_SIZE) { 2455 if (chunk_size > MAX_CHUNK_SIZE) {
1979 printk(KERN_ERR "too big chunk_size: %d > %d\n", 2456 printk(KERN_ERR "too big chunk_size: %d > %d\n",
1980 chunk_size, MAX_CHUNK_SIZE); 2457 chunk_size, MAX_CHUNK_SIZE);
@@ -2010,10 +2487,10 @@ static int do_md_run(mddev_t * mddev)
2010 } 2487 }
2011 2488
2012#ifdef CONFIG_KMOD 2489#ifdef CONFIG_KMOD
2013 if (!pers[pnum]) 2490 if (mddev->level != LEVEL_NONE)
2014 { 2491 request_module("md-level-%d", mddev->level);
2015 request_module("md-personality-%d", pnum); 2492 else if (mddev->clevel[0])
2016 } 2493 request_module("md-%s", mddev->clevel);
2017#endif 2494#endif
2018 2495
2019 /* 2496 /*
@@ -2035,30 +2512,39 @@ static int do_md_run(mddev_t * mddev)
2035 return -ENOMEM; 2512 return -ENOMEM;
2036 2513
2037 spin_lock(&pers_lock); 2514 spin_lock(&pers_lock);
2038 if (!pers[pnum] || !try_module_get(pers[pnum]->owner)) { 2515 pers = find_pers(mddev->level, mddev->clevel);
2516 if (!pers || !try_module_get(pers->owner)) {
2039 spin_unlock(&pers_lock); 2517 spin_unlock(&pers_lock);
2040 printk(KERN_WARNING "md: personality %d is not loaded!\n", 2518 if (mddev->level != LEVEL_NONE)
2041 pnum); 2519 printk(KERN_WARNING "md: personality for level %d is not loaded!\n",
2520 mddev->level);
2521 else
2522 printk(KERN_WARNING "md: personality for level %s is not loaded!\n",
2523 mddev->clevel);
2042 return -EINVAL; 2524 return -EINVAL;
2043 } 2525 }
2044 2526 mddev->pers = pers;
2045 mddev->pers = pers[pnum];
2046 spin_unlock(&pers_lock); 2527 spin_unlock(&pers_lock);
2528 mddev->level = pers->level;
2529 strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
2047 2530
2048 mddev->recovery = 0; 2531 mddev->recovery = 0;
2049 mddev->resync_max_sectors = mddev->size << 1; /* may be over-ridden by personality */ 2532 mddev->resync_max_sectors = mddev->size << 1; /* may be over-ridden by personality */
2050 mddev->barriers_work = 1; 2533 mddev->barriers_work = 1;
2534 mddev->ok_start_degraded = start_dirty_degraded;
2051 2535
2052 if (start_readonly) 2536 if (start_readonly)
2053 mddev->ro = 2; /* read-only, but switch on first write */ 2537 mddev->ro = 2; /* read-only, but switch on first write */
2054 2538
2055 /* before we start the array running, initialise the bitmap */ 2539 err = mddev->pers->run(mddev);
2056 err = bitmap_create(mddev); 2540 if (!err && mddev->pers->sync_request) {
2057 if (err) 2541 err = bitmap_create(mddev);
2058 printk(KERN_ERR "%s: failed to create bitmap (%d)\n", 2542 if (err) {
2059 mdname(mddev), err); 2543 printk(KERN_ERR "%s: failed to create bitmap (%d)\n",
2060 else 2544 mdname(mddev), err);
2061 err = mddev->pers->run(mddev); 2545 mddev->pers->stop(mddev);
2546 }
2547 }
2062 if (err) { 2548 if (err) {
2063 printk(KERN_ERR "md: pers->run() failed ...\n"); 2549 printk(KERN_ERR "md: pers->run() failed ...\n");
2064 module_put(mddev->pers->owner); 2550 module_put(mddev->pers->owner);
@@ -2104,6 +2590,7 @@ static int do_md_run(mddev_t * mddev)
2104 mddev->queue->make_request_fn = mddev->pers->make_request; 2590 mddev->queue->make_request_fn = mddev->pers->make_request;
2105 2591
2106 mddev->changed = 1; 2592 mddev->changed = 1;
2593 md_new_event(mddev);
2107 return 0; 2594 return 0;
2108} 2595}
2109 2596
@@ -2231,6 +2718,7 @@ static int do_md_stop(mddev_t * mddev, int ro)
2231 printk(KERN_INFO "md: %s switched to read-only mode.\n", 2718 printk(KERN_INFO "md: %s switched to read-only mode.\n",
2232 mdname(mddev)); 2719 mdname(mddev));
2233 err = 0; 2720 err = 0;
2721 md_new_event(mddev);
2234out: 2722out:
2235 return err; 2723 return err;
2236} 2724}
@@ -2668,12 +3156,6 @@ static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info)
2668 if (info->state & (1<<MD_DISK_WRITEMOSTLY)) 3156 if (info->state & (1<<MD_DISK_WRITEMOSTLY))
2669 set_bit(WriteMostly, &rdev->flags); 3157 set_bit(WriteMostly, &rdev->flags);
2670 3158
2671 err = bind_rdev_to_array(rdev, mddev);
2672 if (err) {
2673 export_rdev(rdev);
2674 return err;
2675 }
2676
2677 if (!mddev->persistent) { 3159 if (!mddev->persistent) {
2678 printk(KERN_INFO "md: nonpersistent superblock ...\n"); 3160 printk(KERN_INFO "md: nonpersistent superblock ...\n");
2679 rdev->sb_offset = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS; 3161 rdev->sb_offset = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
@@ -2681,8 +3163,11 @@ static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info)
2681 rdev->sb_offset = calc_dev_sboffset(rdev->bdev); 3163 rdev->sb_offset = calc_dev_sboffset(rdev->bdev);
2682 rdev->size = calc_dev_size(rdev, mddev->chunk_size); 3164 rdev->size = calc_dev_size(rdev, mddev->chunk_size);
2683 3165
2684 if (!mddev->size || (mddev->size > rdev->size)) 3166 err = bind_rdev_to_array(rdev, mddev);
2685 mddev->size = rdev->size; 3167 if (err) {
3168 export_rdev(rdev);
3169 return err;
3170 }
2686 } 3171 }
2687 3172
2688 return 0; 3173 return 0;
@@ -2705,6 +3190,7 @@ static int hot_remove_disk(mddev_t * mddev, dev_t dev)
2705 3190
2706 kick_rdev_from_array(rdev); 3191 kick_rdev_from_array(rdev);
2707 md_update_sb(mddev); 3192 md_update_sb(mddev);
3193 md_new_event(mddev);
2708 3194
2709 return 0; 3195 return 0;
2710busy: 3196busy:
@@ -2753,15 +3239,6 @@ static int hot_add_disk(mddev_t * mddev, dev_t dev)
2753 size = calc_dev_size(rdev, mddev->chunk_size); 3239 size = calc_dev_size(rdev, mddev->chunk_size);
2754 rdev->size = size; 3240 rdev->size = size;
2755 3241
2756 if (size < mddev->size) {
2757 printk(KERN_WARNING
2758 "%s: disk size %llu blocks < array size %llu\n",
2759 mdname(mddev), (unsigned long long)size,
2760 (unsigned long long)mddev->size);
2761 err = -ENOSPC;
2762 goto abort_export;
2763 }
2764
2765 if (test_bit(Faulty, &rdev->flags)) { 3242 if (test_bit(Faulty, &rdev->flags)) {
2766 printk(KERN_WARNING 3243 printk(KERN_WARNING
2767 "md: can not hot-add faulty %s disk to %s!\n", 3244 "md: can not hot-add faulty %s disk to %s!\n",
@@ -2771,7 +3248,9 @@ static int hot_add_disk(mddev_t * mddev, dev_t dev)
2771 } 3248 }
2772 clear_bit(In_sync, &rdev->flags); 3249 clear_bit(In_sync, &rdev->flags);
2773 rdev->desc_nr = -1; 3250 rdev->desc_nr = -1;
2774 bind_rdev_to_array(rdev, mddev); 3251 err = bind_rdev_to_array(rdev, mddev);
3252 if (err)
3253 goto abort_export;
2775 3254
2776 /* 3255 /*
2777 * The rest should better be atomic, we can have disk failures 3256 * The rest should better be atomic, we can have disk failures
@@ -2795,7 +3274,7 @@ static int hot_add_disk(mddev_t * mddev, dev_t dev)
2795 */ 3274 */
2796 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 3275 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2797 md_wakeup_thread(mddev->thread); 3276 md_wakeup_thread(mddev->thread);
2798 3277 md_new_event(mddev);
2799 return 0; 3278 return 0;
2800 3279
2801abort_unbind_export: 3280abort_unbind_export:
@@ -2942,6 +3421,81 @@ static int set_array_info(mddev_t * mddev, mdu_array_info_t *info)
2942 return 0; 3421 return 0;
2943} 3422}
2944 3423
3424static int update_size(mddev_t *mddev, unsigned long size)
3425{
3426 mdk_rdev_t * rdev;
3427 int rv;
3428 struct list_head *tmp;
3429
3430 if (mddev->pers->resize == NULL)
3431 return -EINVAL;
3432 /* The "size" is the amount of each device that is used.
3433 * This can only make sense for arrays with redundancy.
3434 * linear and raid0 always use whatever space is available
3435 * We can only consider changing the size if no resync
3436 * or reconstruction is happening, and if the new size
3437 * is acceptable. It must fit before the sb_offset or,
3438 * if that is <data_offset, it must fit before the
3439 * size of each device.
3440 * If size is zero, we find the largest size that fits.
3441 */
3442 if (mddev->sync_thread)
3443 return -EBUSY;
3444 ITERATE_RDEV(mddev,rdev,tmp) {
3445 sector_t avail;
3446 int fit = (size == 0);
3447 if (rdev->sb_offset > rdev->data_offset)
3448 avail = (rdev->sb_offset*2) - rdev->data_offset;
3449 else
3450 avail = get_capacity(rdev->bdev->bd_disk)
3451 - rdev->data_offset;
3452 if (fit && (size == 0 || size > avail/2))
3453 size = avail/2;
3454 if (avail < ((sector_t)size << 1))
3455 return -ENOSPC;
3456 }
3457 rv = mddev->pers->resize(mddev, (sector_t)size *2);
3458 if (!rv) {
3459 struct block_device *bdev;
3460
3461 bdev = bdget_disk(mddev->gendisk, 0);
3462 if (bdev) {
3463 down(&bdev->bd_inode->i_sem);
3464 i_size_write(bdev->bd_inode, mddev->array_size << 10);
3465 up(&bdev->bd_inode->i_sem);
3466 bdput(bdev);
3467 }
3468 }
3469 return rv;
3470}
3471
3472static int update_raid_disks(mddev_t *mddev, int raid_disks)
3473{
3474 int rv;
3475 /* change the number of raid disks */
3476 if (mddev->pers->reshape == NULL)
3477 return -EINVAL;
3478 if (raid_disks <= 0 ||
3479 raid_disks >= mddev->max_disks)
3480 return -EINVAL;
3481 if (mddev->sync_thread)
3482 return -EBUSY;
3483 rv = mddev->pers->reshape(mddev, raid_disks);
3484 if (!rv) {
3485 struct block_device *bdev;
3486
3487 bdev = bdget_disk(mddev->gendisk, 0);
3488 if (bdev) {
3489 down(&bdev->bd_inode->i_sem);
3490 i_size_write(bdev->bd_inode, mddev->array_size << 10);
3491 up(&bdev->bd_inode->i_sem);
3492 bdput(bdev);
3493 }
3494 }
3495 return rv;
3496}
3497
3498
2945/* 3499/*
2946 * update_array_info is used to change the configuration of an 3500 * update_array_info is used to change the configuration of an
2947 * on-line array. 3501 * on-line array.
@@ -2990,71 +3544,12 @@ static int update_array_info(mddev_t *mddev, mdu_array_info_t *info)
2990 else 3544 else
2991 return mddev->pers->reconfig(mddev, info->layout, -1); 3545 return mddev->pers->reconfig(mddev, info->layout, -1);
2992 } 3546 }
2993 if (mddev->size != info->size) { 3547 if (mddev->size != info->size)
2994 mdk_rdev_t * rdev; 3548 rv = update_size(mddev, info->size);
2995 struct list_head *tmp; 3549
2996 if (mddev->pers->resize == NULL) 3550 if (mddev->raid_disks != info->raid_disks)
2997 return -EINVAL; 3551 rv = update_raid_disks(mddev, info->raid_disks);
2998 /* The "size" is the amount of each device that is used. 3552
2999 * This can only make sense for arrays with redundancy.
3000 * linear and raid0 always use whatever space is available
3001 * We can only consider changing the size if no resync
3002 * or reconstruction is happening, and if the new size
3003 * is acceptable. It must fit before the sb_offset or,
3004 * if that is <data_offset, it must fit before the
3005 * size of each device.
3006 * If size is zero, we find the largest size that fits.
3007 */
3008 if (mddev->sync_thread)
3009 return -EBUSY;
3010 ITERATE_RDEV(mddev,rdev,tmp) {
3011 sector_t avail;
3012 int fit = (info->size == 0);
3013 if (rdev->sb_offset > rdev->data_offset)
3014 avail = (rdev->sb_offset*2) - rdev->data_offset;
3015 else
3016 avail = get_capacity(rdev->bdev->bd_disk)
3017 - rdev->data_offset;
3018 if (fit && (info->size == 0 || info->size > avail/2))
3019 info->size = avail/2;
3020 if (avail < ((sector_t)info->size << 1))
3021 return -ENOSPC;
3022 }
3023 rv = mddev->pers->resize(mddev, (sector_t)info->size *2);
3024 if (!rv) {
3025 struct block_device *bdev;
3026
3027 bdev = bdget_disk(mddev->gendisk, 0);
3028 if (bdev) {
3029 down(&bdev->bd_inode->i_sem);
3030 i_size_write(bdev->bd_inode, mddev->array_size << 10);
3031 up(&bdev->bd_inode->i_sem);
3032 bdput(bdev);
3033 }
3034 }
3035 }
3036 if (mddev->raid_disks != info->raid_disks) {
3037 /* change the number of raid disks */
3038 if (mddev->pers->reshape == NULL)
3039 return -EINVAL;
3040 if (info->raid_disks <= 0 ||
3041 info->raid_disks >= mddev->max_disks)
3042 return -EINVAL;
3043 if (mddev->sync_thread)
3044 return -EBUSY;
3045 rv = mddev->pers->reshape(mddev, info->raid_disks);
3046 if (!rv) {
3047 struct block_device *bdev;
3048
3049 bdev = bdget_disk(mddev->gendisk, 0);
3050 if (bdev) {
3051 down(&bdev->bd_inode->i_sem);
3052 i_size_write(bdev->bd_inode, mddev->array_size << 10);
3053 up(&bdev->bd_inode->i_sem);
3054 bdput(bdev);
3055 }
3056 }
3057 }
3058 if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) { 3553 if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) {
3059 if (mddev->pers->quiesce == NULL) 3554 if (mddev->pers->quiesce == NULL)
3060 return -EINVAL; 3555 return -EINVAL;
@@ -3476,11 +3971,10 @@ mdk_thread_t *md_register_thread(void (*run) (mddev_t *), mddev_t *mddev,
3476{ 3971{
3477 mdk_thread_t *thread; 3972 mdk_thread_t *thread;
3478 3973
3479 thread = kmalloc(sizeof(mdk_thread_t), GFP_KERNEL); 3974 thread = kzalloc(sizeof(mdk_thread_t), GFP_KERNEL);
3480 if (!thread) 3975 if (!thread)
3481 return NULL; 3976 return NULL;
3482 3977
3483 memset(thread, 0, sizeof(mdk_thread_t));
3484 init_waitqueue_head(&thread->wqueue); 3978 init_waitqueue_head(&thread->wqueue);
3485 3979
3486 thread->run = run; 3980 thread->run = run;
@@ -3524,6 +4018,7 @@ void md_error(mddev_t *mddev, mdk_rdev_t *rdev)
3524 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 4018 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
3525 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 4019 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3526 md_wakeup_thread(mddev->thread); 4020 md_wakeup_thread(mddev->thread);
4021 md_new_event(mddev);
3527} 4022}
3528 4023
3529/* seq_file implementation /proc/mdstat */ 4024/* seq_file implementation /proc/mdstat */
@@ -3664,24 +4159,29 @@ static void md_seq_stop(struct seq_file *seq, void *v)
3664 mddev_put(mddev); 4159 mddev_put(mddev);
3665} 4160}
3666 4161
4162struct mdstat_info {
4163 int event;
4164};
4165
3667static int md_seq_show(struct seq_file *seq, void *v) 4166static int md_seq_show(struct seq_file *seq, void *v)
3668{ 4167{
3669 mddev_t *mddev = v; 4168 mddev_t *mddev = v;
3670 sector_t size; 4169 sector_t size;
3671 struct list_head *tmp2; 4170 struct list_head *tmp2;
3672 mdk_rdev_t *rdev; 4171 mdk_rdev_t *rdev;
3673 int i; 4172 struct mdstat_info *mi = seq->private;
3674 struct bitmap *bitmap; 4173 struct bitmap *bitmap;
3675 4174
3676 if (v == (void*)1) { 4175 if (v == (void*)1) {
4176 struct mdk_personality *pers;
3677 seq_printf(seq, "Personalities : "); 4177 seq_printf(seq, "Personalities : ");
3678 spin_lock(&pers_lock); 4178 spin_lock(&pers_lock);
3679 for (i = 0; i < MAX_PERSONALITY; i++) 4179 list_for_each_entry(pers, &pers_list, list)
3680 if (pers[i]) 4180 seq_printf(seq, "[%s] ", pers->name);
3681 seq_printf(seq, "[%s] ", pers[i]->name);
3682 4181
3683 spin_unlock(&pers_lock); 4182 spin_unlock(&pers_lock);
3684 seq_printf(seq, "\n"); 4183 seq_printf(seq, "\n");
4184 mi->event = atomic_read(&md_event_count);
3685 return 0; 4185 return 0;
3686 } 4186 }
3687 if (v == (void*)2) { 4187 if (v == (void*)2) {
@@ -3790,47 +4290,68 @@ static struct seq_operations md_seq_ops = {
3790static int md_seq_open(struct inode *inode, struct file *file) 4290static int md_seq_open(struct inode *inode, struct file *file)
3791{ 4291{
3792 int error; 4292 int error;
4293 struct mdstat_info *mi = kmalloc(sizeof(*mi), GFP_KERNEL);
4294 if (mi == NULL)
4295 return -ENOMEM;
3793 4296
3794 error = seq_open(file, &md_seq_ops); 4297 error = seq_open(file, &md_seq_ops);
4298 if (error)
4299 kfree(mi);
4300 else {
4301 struct seq_file *p = file->private_data;
4302 p->private = mi;
4303 mi->event = atomic_read(&md_event_count);
4304 }
3795 return error; 4305 return error;
3796} 4306}
3797 4307
4308static int md_seq_release(struct inode *inode, struct file *file)
4309{
4310 struct seq_file *m = file->private_data;
4311 struct mdstat_info *mi = m->private;
4312 m->private = NULL;
4313 kfree(mi);
4314 return seq_release(inode, file);
4315}
4316
4317static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
4318{
4319 struct seq_file *m = filp->private_data;
4320 struct mdstat_info *mi = m->private;
4321 int mask;
4322
4323 poll_wait(filp, &md_event_waiters, wait);
4324
4325 /* always allow read */
4326 mask = POLLIN | POLLRDNORM;
4327
4328 if (mi->event != atomic_read(&md_event_count))
4329 mask |= POLLERR | POLLPRI;
4330 return mask;
4331}
4332
3798static struct file_operations md_seq_fops = { 4333static struct file_operations md_seq_fops = {
3799 .open = md_seq_open, 4334 .open = md_seq_open,
3800 .read = seq_read, 4335 .read = seq_read,
3801 .llseek = seq_lseek, 4336 .llseek = seq_lseek,
3802 .release = seq_release, 4337 .release = md_seq_release,
4338 .poll = mdstat_poll,
3803}; 4339};
3804 4340
3805int register_md_personality(int pnum, mdk_personality_t *p) 4341int register_md_personality(struct mdk_personality *p)
3806{ 4342{
3807 if (pnum >= MAX_PERSONALITY) {
3808 printk(KERN_ERR
3809 "md: tried to install personality %s as nr %d, but max is %lu\n",
3810 p->name, pnum, MAX_PERSONALITY-1);
3811 return -EINVAL;
3812 }
3813
3814 spin_lock(&pers_lock); 4343 spin_lock(&pers_lock);
3815 if (pers[pnum]) { 4344 list_add_tail(&p->list, &pers_list);
3816 spin_unlock(&pers_lock); 4345 printk(KERN_INFO "md: %s personality registered for level %d\n", p->name, p->level);
3817 return -EBUSY;
3818 }
3819
3820 pers[pnum] = p;
3821 printk(KERN_INFO "md: %s personality registered as nr %d\n", p->name, pnum);
3822 spin_unlock(&pers_lock); 4346 spin_unlock(&pers_lock);
3823 return 0; 4347 return 0;
3824} 4348}
3825 4349
3826int unregister_md_personality(int pnum) 4350int unregister_md_personality(struct mdk_personality *p)
3827{ 4351{
3828 if (pnum >= MAX_PERSONALITY) 4352 printk(KERN_INFO "md: %s personality unregistered\n", p->name);
3829 return -EINVAL;
3830
3831 printk(KERN_INFO "md: %s personality unregistered\n", pers[pnum]->name);
3832 spin_lock(&pers_lock); 4353 spin_lock(&pers_lock);
3833 pers[pnum] = NULL; 4354 list_del_init(&p->list);
3834 spin_unlock(&pers_lock); 4355 spin_unlock(&pers_lock);
3835 return 0; 4356 return 0;
3836} 4357}
@@ -4012,10 +4533,10 @@ static void md_do_sync(mddev_t *mddev)
4012 4533
4013 printk(KERN_INFO "md: syncing RAID array %s\n", mdname(mddev)); 4534 printk(KERN_INFO "md: syncing RAID array %s\n", mdname(mddev));
4014 printk(KERN_INFO "md: minimum _guaranteed_ reconstruction speed:" 4535 printk(KERN_INFO "md: minimum _guaranteed_ reconstruction speed:"
4015 " %d KB/sec/disc.\n", sysctl_speed_limit_min); 4536 " %d KB/sec/disc.\n", speed_min(mddev));
4016 printk(KERN_INFO "md: using maximum available idle IO bandwidth " 4537 printk(KERN_INFO "md: using maximum available idle IO bandwidth "
4017 "(but not more than %d KB/sec) for reconstruction.\n", 4538 "(but not more than %d KB/sec) for reconstruction.\n",
4018 sysctl_speed_limit_max); 4539 speed_max(mddev));
4019 4540
4020 is_mddev_idle(mddev); /* this also initializes IO event counters */ 4541 is_mddev_idle(mddev); /* this also initializes IO event counters */
4021 /* we don't use the checkpoint if there's a bitmap */ 4542 /* we don't use the checkpoint if there's a bitmap */
@@ -4056,7 +4577,7 @@ static void md_do_sync(mddev_t *mddev)
4056 4577
4057 skipped = 0; 4578 skipped = 0;
4058 sectors = mddev->pers->sync_request(mddev, j, &skipped, 4579 sectors = mddev->pers->sync_request(mddev, j, &skipped,
4059 currspeed < sysctl_speed_limit_min); 4580 currspeed < speed_min(mddev));
4060 if (sectors == 0) { 4581 if (sectors == 0) {
4061 set_bit(MD_RECOVERY_ERR, &mddev->recovery); 4582 set_bit(MD_RECOVERY_ERR, &mddev->recovery);
4062 goto out; 4583 goto out;
@@ -4069,7 +4590,11 @@ static void md_do_sync(mddev_t *mddev)
4069 4590
4070 j += sectors; 4591 j += sectors;
4071 if (j>1) mddev->curr_resync = j; 4592 if (j>1) mddev->curr_resync = j;
4072 4593 if (last_check == 0)
4594 /* this is the earliers that rebuilt will be
4595 * visible in /proc/mdstat
4596 */
4597 md_new_event(mddev);
4073 4598
4074 if (last_check + window > io_sectors || j == max_sectors) 4599 if (last_check + window > io_sectors || j == max_sectors)
4075 continue; 4600 continue;
@@ -4117,8 +4642,8 @@ static void md_do_sync(mddev_t *mddev)
4117 currspeed = ((unsigned long)(io_sectors-mddev->resync_mark_cnt))/2 4642 currspeed = ((unsigned long)(io_sectors-mddev->resync_mark_cnt))/2
4118 /((jiffies-mddev->resync_mark)/HZ +1) +1; 4643 /((jiffies-mddev->resync_mark)/HZ +1) +1;
4119 4644
4120 if (currspeed > sysctl_speed_limit_min) { 4645 if (currspeed > speed_min(mddev)) {
4121 if ((currspeed > sysctl_speed_limit_max) || 4646 if ((currspeed > speed_max(mddev)) ||
4122 !is_mddev_idle(mddev)) { 4647 !is_mddev_idle(mddev)) {
4123 msleep(500); 4648 msleep(500);
4124 goto repeat; 4649 goto repeat;
@@ -4255,6 +4780,7 @@ void md_check_recovery(mddev_t *mddev)
4255 mddev->recovery = 0; 4780 mddev->recovery = 0;
4256 /* flag recovery needed just to double check */ 4781 /* flag recovery needed just to double check */
4257 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 4782 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4783 md_new_event(mddev);
4258 goto unlock; 4784 goto unlock;
4259 } 4785 }
4260 /* Clear some bits that don't mean anything, but 4786 /* Clear some bits that don't mean anything, but
@@ -4292,6 +4818,7 @@ void md_check_recovery(mddev_t *mddev)
4292 sprintf(nm, "rd%d", rdev->raid_disk); 4818 sprintf(nm, "rd%d", rdev->raid_disk);
4293 sysfs_create_link(&mddev->kobj, &rdev->kobj, nm); 4819 sysfs_create_link(&mddev->kobj, &rdev->kobj, nm);
4294 spares++; 4820 spares++;
4821 md_new_event(mddev);
4295 } else 4822 } else
4296 break; 4823 break;
4297 } 4824 }
@@ -4324,9 +4851,9 @@ void md_check_recovery(mddev_t *mddev)
4324 mdname(mddev)); 4851 mdname(mddev));
4325 /* leave the spares where they are, it shouldn't hurt */ 4852 /* leave the spares where they are, it shouldn't hurt */
4326 mddev->recovery = 0; 4853 mddev->recovery = 0;
4327 } else { 4854 } else
4328 md_wakeup_thread(mddev->sync_thread); 4855 md_wakeup_thread(mddev->sync_thread);
4329 } 4856 md_new_event(mddev);
4330 } 4857 }
4331 unlock: 4858 unlock:
4332 mddev_unlock(mddev); 4859 mddev_unlock(mddev);
@@ -4503,12 +5030,14 @@ static int set_ro(const char *val, struct kernel_param *kp)
4503 int num = simple_strtoul(val, &e, 10); 5030 int num = simple_strtoul(val, &e, 10);
4504 if (*val && (*e == '\0' || *e == '\n')) { 5031 if (*val && (*e == '\0' || *e == '\n')) {
4505 start_readonly = num; 5032 start_readonly = num;
4506 return 0;; 5033 return 0;
4507 } 5034 }
4508 return -EINVAL; 5035 return -EINVAL;
4509} 5036}
4510 5037
4511module_param_call(start_ro, set_ro, get_ro, NULL, 0600); 5038module_param_call(start_ro, set_ro, get_ro, NULL, 0600);
5039module_param(start_dirty_degraded, int, 0644);
5040
4512 5041
4513EXPORT_SYMBOL(register_md_personality); 5042EXPORT_SYMBOL(register_md_personality);
4514EXPORT_SYMBOL(unregister_md_personality); 5043EXPORT_SYMBOL(unregister_md_personality);
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 145cdc5ad008..e6aa309a66d7 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -35,15 +35,10 @@
35#define NR_RESERVED_BUFS 32 35#define NR_RESERVED_BUFS 32
36 36
37 37
38static mdk_personality_t multipath_personality;
39
40
41static void *mp_pool_alloc(gfp_t gfp_flags, void *data) 38static void *mp_pool_alloc(gfp_t gfp_flags, void *data)
42{ 39{
43 struct multipath_bh *mpb; 40 struct multipath_bh *mpb;
44 mpb = kmalloc(sizeof(*mpb), gfp_flags); 41 mpb = kzalloc(sizeof(*mpb), gfp_flags);
45 if (mpb)
46 memset(mpb, 0, sizeof(*mpb));
47 return mpb; 42 return mpb;
48} 43}
49 44
@@ -444,7 +439,7 @@ static int multipath_run (mddev_t *mddev)
444 * should be freed in multipath_stop()] 439 * should be freed in multipath_stop()]
445 */ 440 */
446 441
447 conf = kmalloc(sizeof(multipath_conf_t), GFP_KERNEL); 442 conf = kzalloc(sizeof(multipath_conf_t), GFP_KERNEL);
448 mddev->private = conf; 443 mddev->private = conf;
449 if (!conf) { 444 if (!conf) {
450 printk(KERN_ERR 445 printk(KERN_ERR
@@ -452,9 +447,8 @@ static int multipath_run (mddev_t *mddev)
452 mdname(mddev)); 447 mdname(mddev));
453 goto out; 448 goto out;
454 } 449 }
455 memset(conf, 0, sizeof(*conf));
456 450
457 conf->multipaths = kmalloc(sizeof(struct multipath_info)*mddev->raid_disks, 451 conf->multipaths = kzalloc(sizeof(struct multipath_info)*mddev->raid_disks,
458 GFP_KERNEL); 452 GFP_KERNEL);
459 if (!conf->multipaths) { 453 if (!conf->multipaths) {
460 printk(KERN_ERR 454 printk(KERN_ERR
@@ -462,7 +456,6 @@ static int multipath_run (mddev_t *mddev)
462 mdname(mddev)); 456 mdname(mddev));
463 goto out_free_conf; 457 goto out_free_conf;
464 } 458 }
465 memset(conf->multipaths, 0, sizeof(struct multipath_info)*mddev->raid_disks);
466 459
467 conf->working_disks = 0; 460 conf->working_disks = 0;
468 ITERATE_RDEV(mddev,rdev,tmp) { 461 ITERATE_RDEV(mddev,rdev,tmp) {
@@ -557,9 +550,10 @@ static int multipath_stop (mddev_t *mddev)
557 return 0; 550 return 0;
558} 551}
559 552
560static mdk_personality_t multipath_personality= 553static struct mdk_personality multipath_personality =
561{ 554{
562 .name = "multipath", 555 .name = "multipath",
556 .level = LEVEL_MULTIPATH,
563 .owner = THIS_MODULE, 557 .owner = THIS_MODULE,
564 .make_request = multipath_make_request, 558 .make_request = multipath_make_request,
565 .run = multipath_run, 559 .run = multipath_run,
@@ -572,15 +566,17 @@ static mdk_personality_t multipath_personality=
572 566
573static int __init multipath_init (void) 567static int __init multipath_init (void)
574{ 568{
575 return register_md_personality (MULTIPATH, &multipath_personality); 569 return register_md_personality (&multipath_personality);
576} 570}
577 571
578static void __exit multipath_exit (void) 572static void __exit multipath_exit (void)
579{ 573{
580 unregister_md_personality (MULTIPATH); 574 unregister_md_personality (&multipath_personality);
581} 575}
582 576
583module_init(multipath_init); 577module_init(multipath_init);
584module_exit(multipath_exit); 578module_exit(multipath_exit);
585MODULE_LICENSE("GPL"); 579MODULE_LICENSE("GPL");
586MODULE_ALIAS("md-personality-7"); /* MULTIPATH */ 580MODULE_ALIAS("md-personality-7"); /* MULTIPATH */
581MODULE_ALIAS("md-multipath");
582MODULE_ALIAS("md-level--4");
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index fece3277c2a5..abbca150202b 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -113,21 +113,16 @@ static int create_strip_zones (mddev_t *mddev)
113 } 113 }
114 printk("raid0: FINAL %d zones\n", conf->nr_strip_zones); 114 printk("raid0: FINAL %d zones\n", conf->nr_strip_zones);
115 115
116 conf->strip_zone = kmalloc(sizeof(struct strip_zone)* 116 conf->strip_zone = kzalloc(sizeof(struct strip_zone)*
117 conf->nr_strip_zones, GFP_KERNEL); 117 conf->nr_strip_zones, GFP_KERNEL);
118 if (!conf->strip_zone) 118 if (!conf->strip_zone)
119 return 1; 119 return 1;
120 conf->devlist = kmalloc(sizeof(mdk_rdev_t*)* 120 conf->devlist = kzalloc(sizeof(mdk_rdev_t*)*
121 conf->nr_strip_zones*mddev->raid_disks, 121 conf->nr_strip_zones*mddev->raid_disks,
122 GFP_KERNEL); 122 GFP_KERNEL);
123 if (!conf->devlist) 123 if (!conf->devlist)
124 return 1; 124 return 1;
125 125
126 memset(conf->strip_zone, 0,sizeof(struct strip_zone)*
127 conf->nr_strip_zones);
128 memset(conf->devlist, 0,
129 sizeof(mdk_rdev_t*) * conf->nr_strip_zones * mddev->raid_disks);
130
131 /* The first zone must contain all devices, so here we check that 126 /* The first zone must contain all devices, so here we check that
132 * there is a proper alignment of slots to devices and find them all 127 * there is a proper alignment of slots to devices and find them all
133 */ 128 */
@@ -280,7 +275,11 @@ static int raid0_run (mddev_t *mddev)
280 mdk_rdev_t *rdev; 275 mdk_rdev_t *rdev;
281 struct list_head *tmp; 276 struct list_head *tmp;
282 277
283 printk("%s: setting max_sectors to %d, segment boundary to %d\n", 278 if (mddev->chunk_size == 0) {
279 printk(KERN_ERR "md/raid0: non-zero chunk size required.\n");
280 return -EINVAL;
281 }
282 printk(KERN_INFO "%s: setting max_sectors to %d, segment boundary to %d\n",
284 mdname(mddev), 283 mdname(mddev),
285 mddev->chunk_size >> 9, 284 mddev->chunk_size >> 9,
286 (mddev->chunk_size>>1)-1); 285 (mddev->chunk_size>>1)-1);
@@ -361,7 +360,7 @@ static int raid0_run (mddev_t *mddev)
361 * chunksize should be used in that case. 360 * chunksize should be used in that case.
362 */ 361 */
363 { 362 {
364 int stripe = mddev->raid_disks * mddev->chunk_size / PAGE_CACHE_SIZE; 363 int stripe = mddev->raid_disks * mddev->chunk_size / PAGE_SIZE;
365 if (mddev->queue->backing_dev_info.ra_pages < 2* stripe) 364 if (mddev->queue->backing_dev_info.ra_pages < 2* stripe)
366 mddev->queue->backing_dev_info.ra_pages = 2* stripe; 365 mddev->queue->backing_dev_info.ra_pages = 2* stripe;
367 } 366 }
@@ -512,9 +511,10 @@ static void raid0_status (struct seq_file *seq, mddev_t *mddev)
512 return; 511 return;
513} 512}
514 513
515static mdk_personality_t raid0_personality= 514static struct mdk_personality raid0_personality=
516{ 515{
517 .name = "raid0", 516 .name = "raid0",
517 .level = 0,
518 .owner = THIS_MODULE, 518 .owner = THIS_MODULE,
519 .make_request = raid0_make_request, 519 .make_request = raid0_make_request,
520 .run = raid0_run, 520 .run = raid0_run,
@@ -524,15 +524,17 @@ static mdk_personality_t raid0_personality=
524 524
525static int __init raid0_init (void) 525static int __init raid0_init (void)
526{ 526{
527 return register_md_personality (RAID0, &raid0_personality); 527 return register_md_personality (&raid0_personality);
528} 528}
529 529
530static void raid0_exit (void) 530static void raid0_exit (void)
531{ 531{
532 unregister_md_personality (RAID0); 532 unregister_md_personality (&raid0_personality);
533} 533}
534 534
535module_init(raid0_init); 535module_init(raid0_init);
536module_exit(raid0_exit); 536module_exit(raid0_exit);
537MODULE_LICENSE("GPL"); 537MODULE_LICENSE("GPL");
538MODULE_ALIAS("md-personality-2"); /* RAID0 */ 538MODULE_ALIAS("md-personality-2"); /* RAID0 */
539MODULE_ALIAS("md-raid0");
540MODULE_ALIAS("md-level-0");
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 229d7b204297..a06ff91f27e2 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -47,10 +47,11 @@
47 */ 47 */
48#define NR_RAID1_BIOS 256 48#define NR_RAID1_BIOS 256
49 49
50static mdk_personality_t raid1_personality;
51 50
52static void unplug_slaves(mddev_t *mddev); 51static void unplug_slaves(mddev_t *mddev);
53 52
53static void allow_barrier(conf_t *conf);
54static void lower_barrier(conf_t *conf);
54 55
55static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data) 56static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data)
56{ 57{
@@ -59,10 +60,8 @@ static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data)
59 int size = offsetof(r1bio_t, bios[pi->raid_disks]); 60 int size = offsetof(r1bio_t, bios[pi->raid_disks]);
60 61
61 /* allocate a r1bio with room for raid_disks entries in the bios array */ 62 /* allocate a r1bio with room for raid_disks entries in the bios array */
62 r1_bio = kmalloc(size, gfp_flags); 63 r1_bio = kzalloc(size, gfp_flags);
63 if (r1_bio) 64 if (!r1_bio)
64 memset(r1_bio, 0, size);
65 else
66 unplug_slaves(pi->mddev); 65 unplug_slaves(pi->mddev);
67 66
68 return r1_bio; 67 return r1_bio;
@@ -104,15 +103,30 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
104 } 103 }
105 /* 104 /*
106 * Allocate RESYNC_PAGES data pages and attach them to 105 * Allocate RESYNC_PAGES data pages and attach them to
107 * the first bio; 106 * the first bio.
107 * If this is a user-requested check/repair, allocate
108 * RESYNC_PAGES for each bio.
108 */ 109 */
109 bio = r1_bio->bios[0]; 110 if (test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery))
110 for (i = 0; i < RESYNC_PAGES; i++) { 111 j = pi->raid_disks;
111 page = alloc_page(gfp_flags); 112 else
112 if (unlikely(!page)) 113 j = 1;
113 goto out_free_pages; 114 while(j--) {
114 115 bio = r1_bio->bios[j];
115 bio->bi_io_vec[i].bv_page = page; 116 for (i = 0; i < RESYNC_PAGES; i++) {
117 page = alloc_page(gfp_flags);
118 if (unlikely(!page))
119 goto out_free_pages;
120
121 bio->bi_io_vec[i].bv_page = page;
122 }
123 }
124 /* If not user-requests, copy the page pointers to all bios */
125 if (!test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery)) {
126 for (i=0; i<RESYNC_PAGES ; i++)
127 for (j=1; j<pi->raid_disks; j++)
128 r1_bio->bios[j]->bi_io_vec[i].bv_page =
129 r1_bio->bios[0]->bi_io_vec[i].bv_page;
116 } 130 }
117 131
118 r1_bio->master_bio = NULL; 132 r1_bio->master_bio = NULL;
@@ -120,8 +134,10 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
120 return r1_bio; 134 return r1_bio;
121 135
122out_free_pages: 136out_free_pages:
123 for ( ; i > 0 ; i--) 137 for (i=0; i < RESYNC_PAGES ; i++)
124 __free_page(bio->bi_io_vec[i-1].bv_page); 138 for (j=0 ; j < pi->raid_disks; j++)
139 safe_put_page(r1_bio->bios[j]->bi_io_vec[i].bv_page);
140 j = -1;
125out_free_bio: 141out_free_bio:
126 while ( ++j < pi->raid_disks ) 142 while ( ++j < pi->raid_disks )
127 bio_put(r1_bio->bios[j]); 143 bio_put(r1_bio->bios[j]);
@@ -132,14 +148,16 @@ out_free_bio:
132static void r1buf_pool_free(void *__r1_bio, void *data) 148static void r1buf_pool_free(void *__r1_bio, void *data)
133{ 149{
134 struct pool_info *pi = data; 150 struct pool_info *pi = data;
135 int i; 151 int i,j;
136 r1bio_t *r1bio = __r1_bio; 152 r1bio_t *r1bio = __r1_bio;
137 struct bio *bio = r1bio->bios[0];
138 153
139 for (i = 0; i < RESYNC_PAGES; i++) { 154 for (i = 0; i < RESYNC_PAGES; i++)
140 __free_page(bio->bi_io_vec[i].bv_page); 155 for (j = pi->raid_disks; j-- ;) {
141 bio->bi_io_vec[i].bv_page = NULL; 156 if (j == 0 ||
142 } 157 r1bio->bios[j]->bi_io_vec[i].bv_page !=
158 r1bio->bios[0]->bi_io_vec[i].bv_page)
159 safe_put_page(r1bio->bios[j]->bi_io_vec[i].bv_page);
160 }
143 for (i=0 ; i < pi->raid_disks; i++) 161 for (i=0 ; i < pi->raid_disks; i++)
144 bio_put(r1bio->bios[i]); 162 bio_put(r1bio->bios[i]);
145 163
@@ -152,7 +170,7 @@ static void put_all_bios(conf_t *conf, r1bio_t *r1_bio)
152 170
153 for (i = 0; i < conf->raid_disks; i++) { 171 for (i = 0; i < conf->raid_disks; i++) {
154 struct bio **bio = r1_bio->bios + i; 172 struct bio **bio = r1_bio->bios + i;
155 if (*bio) 173 if (*bio && *bio != IO_BLOCKED)
156 bio_put(*bio); 174 bio_put(*bio);
157 *bio = NULL; 175 *bio = NULL;
158 } 176 }
@@ -160,20 +178,13 @@ static void put_all_bios(conf_t *conf, r1bio_t *r1_bio)
160 178
161static inline void free_r1bio(r1bio_t *r1_bio) 179static inline void free_r1bio(r1bio_t *r1_bio)
162{ 180{
163 unsigned long flags;
164
165 conf_t *conf = mddev_to_conf(r1_bio->mddev); 181 conf_t *conf = mddev_to_conf(r1_bio->mddev);
166 182
167 /* 183 /*
168 * Wake up any possible resync thread that waits for the device 184 * Wake up any possible resync thread that waits for the device
169 * to go idle. 185 * to go idle.
170 */ 186 */
171 spin_lock_irqsave(&conf->resync_lock, flags); 187 allow_barrier(conf);
172 if (!--conf->nr_pending) {
173 wake_up(&conf->wait_idle);
174 wake_up(&conf->wait_resume);
175 }
176 spin_unlock_irqrestore(&conf->resync_lock, flags);
177 188
178 put_all_bios(conf, r1_bio); 189 put_all_bios(conf, r1_bio);
179 mempool_free(r1_bio, conf->r1bio_pool); 190 mempool_free(r1_bio, conf->r1bio_pool);
@@ -182,22 +193,17 @@ static inline void free_r1bio(r1bio_t *r1_bio)
182static inline void put_buf(r1bio_t *r1_bio) 193static inline void put_buf(r1bio_t *r1_bio)
183{ 194{
184 conf_t *conf = mddev_to_conf(r1_bio->mddev); 195 conf_t *conf = mddev_to_conf(r1_bio->mddev);
185 unsigned long flags; 196 int i;
186 197
187 mempool_free(r1_bio, conf->r1buf_pool); 198 for (i=0; i<conf->raid_disks; i++) {
199 struct bio *bio = r1_bio->bios[i];
200 if (bio->bi_end_io)
201 rdev_dec_pending(conf->mirrors[i].rdev, r1_bio->mddev);
202 }
188 203
189 spin_lock_irqsave(&conf->resync_lock, flags); 204 mempool_free(r1_bio, conf->r1buf_pool);
190 if (!conf->barrier)
191 BUG();
192 --conf->barrier;
193 wake_up(&conf->wait_resume);
194 wake_up(&conf->wait_idle);
195 205
196 if (!--conf->nr_pending) { 206 lower_barrier(conf);
197 wake_up(&conf->wait_idle);
198 wake_up(&conf->wait_resume);
199 }
200 spin_unlock_irqrestore(&conf->resync_lock, flags);
201} 207}
202 208
203static void reschedule_retry(r1bio_t *r1_bio) 209static void reschedule_retry(r1bio_t *r1_bio)
@@ -208,8 +214,10 @@ static void reschedule_retry(r1bio_t *r1_bio)
208 214
209 spin_lock_irqsave(&conf->device_lock, flags); 215 spin_lock_irqsave(&conf->device_lock, flags);
210 list_add(&r1_bio->retry_list, &conf->retry_list); 216 list_add(&r1_bio->retry_list, &conf->retry_list);
217 conf->nr_queued ++;
211 spin_unlock_irqrestore(&conf->device_lock, flags); 218 spin_unlock_irqrestore(&conf->device_lock, flags);
212 219
220 wake_up(&conf->wait_barrier);
213 md_wakeup_thread(mddev->thread); 221 md_wakeup_thread(mddev->thread);
214} 222}
215 223
@@ -261,9 +269,9 @@ static int raid1_end_read_request(struct bio *bio, unsigned int bytes_done, int
261 /* 269 /*
262 * this branch is our 'one mirror IO has finished' event handler: 270 * this branch is our 'one mirror IO has finished' event handler:
263 */ 271 */
264 if (!uptodate) 272 update_head_pos(mirror, r1_bio);
265 md_error(r1_bio->mddev, conf->mirrors[mirror].rdev); 273
266 else 274 if (uptodate || conf->working_disks <= 1) {
267 /* 275 /*
268 * Set R1BIO_Uptodate in our master bio, so that 276 * Set R1BIO_Uptodate in our master bio, so that
269 * we will return a good error code for to the higher 277 * we will return a good error code for to the higher
@@ -273,16 +281,11 @@ static int raid1_end_read_request(struct bio *bio, unsigned int bytes_done, int
273 * user-side. So if something waits for IO, then it will 281 * user-side. So if something waits for IO, then it will
274 * wait for the 'master' bio. 282 * wait for the 'master' bio.
275 */ 283 */
276 set_bit(R1BIO_Uptodate, &r1_bio->state); 284 if (uptodate)
277 285 set_bit(R1BIO_Uptodate, &r1_bio->state);
278 update_head_pos(mirror, r1_bio);
279 286
280 /*
281 * we have only one bio on the read side
282 */
283 if (uptodate)
284 raid_end_bio_io(r1_bio); 287 raid_end_bio_io(r1_bio);
285 else { 288 } else {
286 /* 289 /*
287 * oops, read error: 290 * oops, read error:
288 */ 291 */
@@ -378,7 +381,7 @@ static int raid1_end_write_request(struct bio *bio, unsigned int bytes_done, int
378 /* free extra copy of the data pages */ 381 /* free extra copy of the data pages */
379 int i = bio->bi_vcnt; 382 int i = bio->bi_vcnt;
380 while (i--) 383 while (i--)
381 __free_page(bio->bi_io_vec[i].bv_page); 384 safe_put_page(bio->bi_io_vec[i].bv_page);
382 } 385 }
383 /* clear the bitmap if all writes complete successfully */ 386 /* clear the bitmap if all writes complete successfully */
384 bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector, 387 bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector,
@@ -433,11 +436,13 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio)
433 new_disk = 0; 436 new_disk = 0;
434 437
435 for (rdev = rcu_dereference(conf->mirrors[new_disk].rdev); 438 for (rdev = rcu_dereference(conf->mirrors[new_disk].rdev);
439 r1_bio->bios[new_disk] == IO_BLOCKED ||
436 !rdev || !test_bit(In_sync, &rdev->flags) 440 !rdev || !test_bit(In_sync, &rdev->flags)
437 || test_bit(WriteMostly, &rdev->flags); 441 || test_bit(WriteMostly, &rdev->flags);
438 rdev = rcu_dereference(conf->mirrors[++new_disk].rdev)) { 442 rdev = rcu_dereference(conf->mirrors[++new_disk].rdev)) {
439 443
440 if (rdev && test_bit(In_sync, &rdev->flags)) 444 if (rdev && test_bit(In_sync, &rdev->flags) &&
445 r1_bio->bios[new_disk] != IO_BLOCKED)
441 wonly_disk = new_disk; 446 wonly_disk = new_disk;
442 447
443 if (new_disk == conf->raid_disks - 1) { 448 if (new_disk == conf->raid_disks - 1) {
@@ -451,11 +456,13 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio)
451 456
452 /* make sure the disk is operational */ 457 /* make sure the disk is operational */
453 for (rdev = rcu_dereference(conf->mirrors[new_disk].rdev); 458 for (rdev = rcu_dereference(conf->mirrors[new_disk].rdev);
459 r1_bio->bios[new_disk] == IO_BLOCKED ||
454 !rdev || !test_bit(In_sync, &rdev->flags) || 460 !rdev || !test_bit(In_sync, &rdev->flags) ||
455 test_bit(WriteMostly, &rdev->flags); 461 test_bit(WriteMostly, &rdev->flags);
456 rdev = rcu_dereference(conf->mirrors[new_disk].rdev)) { 462 rdev = rcu_dereference(conf->mirrors[new_disk].rdev)) {
457 463
458 if (rdev && test_bit(In_sync, &rdev->flags)) 464 if (rdev && test_bit(In_sync, &rdev->flags) &&
465 r1_bio->bios[new_disk] != IO_BLOCKED)
459 wonly_disk = new_disk; 466 wonly_disk = new_disk;
460 467
461 if (new_disk <= 0) 468 if (new_disk <= 0)
@@ -492,7 +499,7 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio)
492 499
493 rdev = rcu_dereference(conf->mirrors[disk].rdev); 500 rdev = rcu_dereference(conf->mirrors[disk].rdev);
494 501
495 if (!rdev || 502 if (!rdev || r1_bio->bios[disk] == IO_BLOCKED ||
496 !test_bit(In_sync, &rdev->flags) || 503 !test_bit(In_sync, &rdev->flags) ||
497 test_bit(WriteMostly, &rdev->flags)) 504 test_bit(WriteMostly, &rdev->flags))
498 continue; 505 continue;
@@ -520,7 +527,7 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio)
520 /* cannot risk returning a device that failed 527 /* cannot risk returning a device that failed
521 * before we inc'ed nr_pending 528 * before we inc'ed nr_pending
522 */ 529 */
523 atomic_dec(&rdev->nr_pending); 530 rdev_dec_pending(rdev, conf->mddev);
524 goto retry; 531 goto retry;
525 } 532 }
526 conf->next_seq_sect = this_sector + sectors; 533 conf->next_seq_sect = this_sector + sectors;
@@ -593,42 +600,119 @@ static int raid1_issue_flush(request_queue_t *q, struct gendisk *disk,
593 return ret; 600 return ret;
594} 601}
595 602
596/* 603/* Barriers....
597 * Throttle resync depth, so that we can both get proper overlapping of 604 * Sometimes we need to suspend IO while we do something else,
598 * requests, but are still able to handle normal requests quickly. 605 * either some resync/recovery, or reconfigure the array.
606 * To do this we raise a 'barrier'.
607 * The 'barrier' is a counter that can be raised multiple times
608 * to count how many activities are happening which preclude
609 * normal IO.
610 * We can only raise the barrier if there is no pending IO.
611 * i.e. if nr_pending == 0.
612 * We choose only to raise the barrier if no-one is waiting for the
613 * barrier to go down. This means that as soon as an IO request
614 * is ready, no other operations which require a barrier will start
615 * until the IO request has had a chance.
616 *
617 * So: regular IO calls 'wait_barrier'. When that returns there
618 * is no backgroup IO happening, It must arrange to call
619 * allow_barrier when it has finished its IO.
620 * backgroup IO calls must call raise_barrier. Once that returns
621 * there is no normal IO happeing. It must arrange to call
622 * lower_barrier when the particular background IO completes.
599 */ 623 */
600#define RESYNC_DEPTH 32 624#define RESYNC_DEPTH 32
601 625
602static void device_barrier(conf_t *conf, sector_t sect) 626static void raise_barrier(conf_t *conf)
603{ 627{
604 spin_lock_irq(&conf->resync_lock); 628 spin_lock_irq(&conf->resync_lock);
605 wait_event_lock_irq(conf->wait_idle, !waitqueue_active(&conf->wait_resume), 629
606 conf->resync_lock, raid1_unplug(conf->mddev->queue)); 630 /* Wait until no block IO is waiting */
607 631 wait_event_lock_irq(conf->wait_barrier, !conf->nr_waiting,
608 if (!conf->barrier++) { 632 conf->resync_lock,
609 wait_event_lock_irq(conf->wait_idle, !conf->nr_pending, 633 raid1_unplug(conf->mddev->queue));
610 conf->resync_lock, raid1_unplug(conf->mddev->queue)); 634
611 if (conf->nr_pending) 635 /* block any new IO from starting */
612 BUG(); 636 conf->barrier++;
637
638 /* No wait for all pending IO to complete */
639 wait_event_lock_irq(conf->wait_barrier,
640 !conf->nr_pending && conf->barrier < RESYNC_DEPTH,
641 conf->resync_lock,
642 raid1_unplug(conf->mddev->queue));
643
644 spin_unlock_irq(&conf->resync_lock);
645}
646
647static void lower_barrier(conf_t *conf)
648{
649 unsigned long flags;
650 spin_lock_irqsave(&conf->resync_lock, flags);
651 conf->barrier--;
652 spin_unlock_irqrestore(&conf->resync_lock, flags);
653 wake_up(&conf->wait_barrier);
654}
655
656static void wait_barrier(conf_t *conf)
657{
658 spin_lock_irq(&conf->resync_lock);
659 if (conf->barrier) {
660 conf->nr_waiting++;
661 wait_event_lock_irq(conf->wait_barrier, !conf->barrier,
662 conf->resync_lock,
663 raid1_unplug(conf->mddev->queue));
664 conf->nr_waiting--;
613 } 665 }
614 wait_event_lock_irq(conf->wait_resume, conf->barrier < RESYNC_DEPTH, 666 conf->nr_pending++;
615 conf->resync_lock, raid1_unplug(conf->mddev->queue)); 667 spin_unlock_irq(&conf->resync_lock);
616 conf->next_resync = sect; 668}
669
670static void allow_barrier(conf_t *conf)
671{
672 unsigned long flags;
673 spin_lock_irqsave(&conf->resync_lock, flags);
674 conf->nr_pending--;
675 spin_unlock_irqrestore(&conf->resync_lock, flags);
676 wake_up(&conf->wait_barrier);
677}
678
679static void freeze_array(conf_t *conf)
680{
681 /* stop syncio and normal IO and wait for everything to
682 * go quite.
683 * We increment barrier and nr_waiting, and then
684 * wait until barrier+nr_pending match nr_queued+2
685 */
686 spin_lock_irq(&conf->resync_lock);
687 conf->barrier++;
688 conf->nr_waiting++;
689 wait_event_lock_irq(conf->wait_barrier,
690 conf->barrier+conf->nr_pending == conf->nr_queued+2,
691 conf->resync_lock,
692 raid1_unplug(conf->mddev->queue));
693 spin_unlock_irq(&conf->resync_lock);
694}
695static void unfreeze_array(conf_t *conf)
696{
697 /* reverse the effect of the freeze */
698 spin_lock_irq(&conf->resync_lock);
699 conf->barrier--;
700 conf->nr_waiting--;
701 wake_up(&conf->wait_barrier);
617 spin_unlock_irq(&conf->resync_lock); 702 spin_unlock_irq(&conf->resync_lock);
618} 703}
619 704
705
620/* duplicate the data pages for behind I/O */ 706/* duplicate the data pages for behind I/O */
621static struct page **alloc_behind_pages(struct bio *bio) 707static struct page **alloc_behind_pages(struct bio *bio)
622{ 708{
623 int i; 709 int i;
624 struct bio_vec *bvec; 710 struct bio_vec *bvec;
625 struct page **pages = kmalloc(bio->bi_vcnt * sizeof(struct page *), 711 struct page **pages = kzalloc(bio->bi_vcnt * sizeof(struct page *),
626 GFP_NOIO); 712 GFP_NOIO);
627 if (unlikely(!pages)) 713 if (unlikely(!pages))
628 goto do_sync_io; 714 goto do_sync_io;
629 715
630 memset(pages, 0, bio->bi_vcnt * sizeof(struct page *));
631
632 bio_for_each_segment(bvec, bio, i) { 716 bio_for_each_segment(bvec, bio, i) {
633 pages[i] = alloc_page(GFP_NOIO); 717 pages[i] = alloc_page(GFP_NOIO);
634 if (unlikely(!pages[i])) 718 if (unlikely(!pages[i]))
@@ -644,7 +728,7 @@ static struct page **alloc_behind_pages(struct bio *bio)
644do_sync_io: 728do_sync_io:
645 if (pages) 729 if (pages)
646 for (i = 0; i < bio->bi_vcnt && pages[i]; i++) 730 for (i = 0; i < bio->bi_vcnt && pages[i]; i++)
647 __free_page(pages[i]); 731 put_page(pages[i]);
648 kfree(pages); 732 kfree(pages);
649 PRINTK("%dB behind alloc failed, doing sync I/O\n", bio->bi_size); 733 PRINTK("%dB behind alloc failed, doing sync I/O\n", bio->bi_size);
650 return NULL; 734 return NULL;
@@ -678,10 +762,7 @@ static int make_request(request_queue_t *q, struct bio * bio)
678 */ 762 */
679 md_write_start(mddev, bio); /* wait on superblock update early */ 763 md_write_start(mddev, bio); /* wait on superblock update early */
680 764
681 spin_lock_irq(&conf->resync_lock); 765 wait_barrier(conf);
682 wait_event_lock_irq(conf->wait_resume, !conf->barrier, conf->resync_lock, );
683 conf->nr_pending++;
684 spin_unlock_irq(&conf->resync_lock);
685 766
686 disk_stat_inc(mddev->gendisk, ios[rw]); 767 disk_stat_inc(mddev->gendisk, ios[rw]);
687 disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bio)); 768 disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bio));
@@ -749,7 +830,7 @@ static int make_request(request_queue_t *q, struct bio * bio)
749 !test_bit(Faulty, &rdev->flags)) { 830 !test_bit(Faulty, &rdev->flags)) {
750 atomic_inc(&rdev->nr_pending); 831 atomic_inc(&rdev->nr_pending);
751 if (test_bit(Faulty, &rdev->flags)) { 832 if (test_bit(Faulty, &rdev->flags)) {
752 atomic_dec(&rdev->nr_pending); 833 rdev_dec_pending(rdev, mddev);
753 r1_bio->bios[i] = NULL; 834 r1_bio->bios[i] = NULL;
754 } else 835 } else
755 r1_bio->bios[i] = bio; 836 r1_bio->bios[i] = bio;
@@ -909,13 +990,8 @@ static void print_conf(conf_t *conf)
909 990
910static void close_sync(conf_t *conf) 991static void close_sync(conf_t *conf)
911{ 992{
912 spin_lock_irq(&conf->resync_lock); 993 wait_barrier(conf);
913 wait_event_lock_irq(conf->wait_resume, !conf->barrier, 994 allow_barrier(conf);
914 conf->resync_lock, raid1_unplug(conf->mddev->queue));
915 spin_unlock_irq(&conf->resync_lock);
916
917 if (conf->barrier) BUG();
918 if (waitqueue_active(&conf->wait_idle)) BUG();
919 995
920 mempool_destroy(conf->r1buf_pool); 996 mempool_destroy(conf->r1buf_pool);
921 conf->r1buf_pool = NULL; 997 conf->r1buf_pool = NULL;
@@ -1015,28 +1091,27 @@ abort:
1015 1091
1016static int end_sync_read(struct bio *bio, unsigned int bytes_done, int error) 1092static int end_sync_read(struct bio *bio, unsigned int bytes_done, int error)
1017{ 1093{
1018 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1019 r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private); 1094 r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
1020 conf_t *conf = mddev_to_conf(r1_bio->mddev); 1095 int i;
1021 1096
1022 if (bio->bi_size) 1097 if (bio->bi_size)
1023 return 1; 1098 return 1;
1024 1099
1025 if (r1_bio->bios[r1_bio->read_disk] != bio) 1100 for (i=r1_bio->mddev->raid_disks; i--; )
1026 BUG(); 1101 if (r1_bio->bios[i] == bio)
1027 update_head_pos(r1_bio->read_disk, r1_bio); 1102 break;
1103 BUG_ON(i < 0);
1104 update_head_pos(i, r1_bio);
1028 /* 1105 /*
1029 * we have read a block, now it needs to be re-written, 1106 * we have read a block, now it needs to be re-written,
1030 * or re-read if the read failed. 1107 * or re-read if the read failed.
1031 * We don't do much here, just schedule handling by raid1d 1108 * We don't do much here, just schedule handling by raid1d
1032 */ 1109 */
1033 if (!uptodate) { 1110 if (test_bit(BIO_UPTODATE, &bio->bi_flags))
1034 md_error(r1_bio->mddev,
1035 conf->mirrors[r1_bio->read_disk].rdev);
1036 } else
1037 set_bit(R1BIO_Uptodate, &r1_bio->state); 1111 set_bit(R1BIO_Uptodate, &r1_bio->state);
1038 rdev_dec_pending(conf->mirrors[r1_bio->read_disk].rdev, conf->mddev); 1112
1039 reschedule_retry(r1_bio); 1113 if (atomic_dec_and_test(&r1_bio->remaining))
1114 reschedule_retry(r1_bio);
1040 return 0; 1115 return 0;
1041} 1116}
1042 1117
@@ -1066,7 +1141,6 @@ static int end_sync_write(struct bio *bio, unsigned int bytes_done, int error)
1066 md_done_sync(mddev, r1_bio->sectors, uptodate); 1141 md_done_sync(mddev, r1_bio->sectors, uptodate);
1067 put_buf(r1_bio); 1142 put_buf(r1_bio);
1068 } 1143 }
1069 rdev_dec_pending(conf->mirrors[mirror].rdev, mddev);
1070 return 0; 1144 return 0;
1071} 1145}
1072 1146
@@ -1079,34 +1153,173 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio)
1079 1153
1080 bio = r1_bio->bios[r1_bio->read_disk]; 1154 bio = r1_bio->bios[r1_bio->read_disk];
1081 1155
1082/* 1156
1083 if (r1_bio->sector == 0) printk("First sync write startss\n"); 1157 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
1084*/ 1158 /* We have read all readable devices. If we haven't
1085 /* 1159 * got the block, then there is no hope left.
1086 * schedule writes 1160 * If we have, then we want to do a comparison
1087 */ 1161 * and skip the write if everything is the same.
1162 * If any blocks failed to read, then we need to
1163 * attempt an over-write
1164 */
1165 int primary;
1166 if (!test_bit(R1BIO_Uptodate, &r1_bio->state)) {
1167 for (i=0; i<mddev->raid_disks; i++)
1168 if (r1_bio->bios[i]->bi_end_io == end_sync_read)
1169 md_error(mddev, conf->mirrors[i].rdev);
1170
1171 md_done_sync(mddev, r1_bio->sectors, 1);
1172 put_buf(r1_bio);
1173 return;
1174 }
1175 for (primary=0; primary<mddev->raid_disks; primary++)
1176 if (r1_bio->bios[primary]->bi_end_io == end_sync_read &&
1177 test_bit(BIO_UPTODATE, &r1_bio->bios[primary]->bi_flags)) {
1178 r1_bio->bios[primary]->bi_end_io = NULL;
1179 rdev_dec_pending(conf->mirrors[primary].rdev, mddev);
1180 break;
1181 }
1182 r1_bio->read_disk = primary;
1183 for (i=0; i<mddev->raid_disks; i++)
1184 if (r1_bio->bios[i]->bi_end_io == end_sync_read &&
1185 test_bit(BIO_UPTODATE, &r1_bio->bios[i]->bi_flags)) {
1186 int j;
1187 int vcnt = r1_bio->sectors >> (PAGE_SHIFT- 9);
1188 struct bio *pbio = r1_bio->bios[primary];
1189 struct bio *sbio = r1_bio->bios[i];
1190 for (j = vcnt; j-- ; )
1191 if (memcmp(page_address(pbio->bi_io_vec[j].bv_page),
1192 page_address(sbio->bi_io_vec[j].bv_page),
1193 PAGE_SIZE))
1194 break;
1195 if (j >= 0)
1196 mddev->resync_mismatches += r1_bio->sectors;
1197 if (j < 0 || test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) {
1198 sbio->bi_end_io = NULL;
1199 rdev_dec_pending(conf->mirrors[i].rdev, mddev);
1200 } else {
1201 /* fixup the bio for reuse */
1202 sbio->bi_vcnt = vcnt;
1203 sbio->bi_size = r1_bio->sectors << 9;
1204 sbio->bi_idx = 0;
1205 sbio->bi_phys_segments = 0;
1206 sbio->bi_hw_segments = 0;
1207 sbio->bi_hw_front_size = 0;
1208 sbio->bi_hw_back_size = 0;
1209 sbio->bi_flags &= ~(BIO_POOL_MASK - 1);
1210 sbio->bi_flags |= 1 << BIO_UPTODATE;
1211 sbio->bi_next = NULL;
1212 sbio->bi_sector = r1_bio->sector +
1213 conf->mirrors[i].rdev->data_offset;
1214 sbio->bi_bdev = conf->mirrors[i].rdev->bdev;
1215 }
1216 }
1217 }
1088 if (!test_bit(R1BIO_Uptodate, &r1_bio->state)) { 1218 if (!test_bit(R1BIO_Uptodate, &r1_bio->state)) {
1089 /* 1219 /* ouch - failed to read all of that.
1090 * There is no point trying a read-for-reconstruct as 1220 * Try some synchronous reads of other devices to get
1091 * reconstruct is about to be aborted 1221 * good data, much like with normal read errors. Only
1222 * read into the pages we already have so they we don't
1223 * need to re-issue the read request.
1224 * We don't need to freeze the array, because being in an
1225 * active sync request, there is no normal IO, and
1226 * no overlapping syncs.
1092 */ 1227 */
1093 char b[BDEVNAME_SIZE]; 1228 sector_t sect = r1_bio->sector;
1094 printk(KERN_ALERT "raid1: %s: unrecoverable I/O read error" 1229 int sectors = r1_bio->sectors;
1095 " for block %llu\n", 1230 int idx = 0;
1096 bdevname(bio->bi_bdev,b), 1231
1097 (unsigned long long)r1_bio->sector); 1232 while(sectors) {
1098 md_done_sync(mddev, r1_bio->sectors, 0); 1233 int s = sectors;
1099 put_buf(r1_bio); 1234 int d = r1_bio->read_disk;
1100 return; 1235 int success = 0;
1236 mdk_rdev_t *rdev;
1237
1238 if (s > (PAGE_SIZE>>9))
1239 s = PAGE_SIZE >> 9;
1240 do {
1241 if (r1_bio->bios[d]->bi_end_io == end_sync_read) {
1242 rdev = conf->mirrors[d].rdev;
1243 if (sync_page_io(rdev->bdev,
1244 sect + rdev->data_offset,
1245 s<<9,
1246 bio->bi_io_vec[idx].bv_page,
1247 READ)) {
1248 success = 1;
1249 break;
1250 }
1251 }
1252 d++;
1253 if (d == conf->raid_disks)
1254 d = 0;
1255 } while (!success && d != r1_bio->read_disk);
1256
1257 if (success) {
1258 int start = d;
1259 /* write it back and re-read */
1260 set_bit(R1BIO_Uptodate, &r1_bio->state);
1261 while (d != r1_bio->read_disk) {
1262 if (d == 0)
1263 d = conf->raid_disks;
1264 d--;
1265 if (r1_bio->bios[d]->bi_end_io != end_sync_read)
1266 continue;
1267 rdev = conf->mirrors[d].rdev;
1268 atomic_add(s, &rdev->corrected_errors);
1269 if (sync_page_io(rdev->bdev,
1270 sect + rdev->data_offset,
1271 s<<9,
1272 bio->bi_io_vec[idx].bv_page,
1273 WRITE) == 0)
1274 md_error(mddev, rdev);
1275 }
1276 d = start;
1277 while (d != r1_bio->read_disk) {
1278 if (d == 0)
1279 d = conf->raid_disks;
1280 d--;
1281 if (r1_bio->bios[d]->bi_end_io != end_sync_read)
1282 continue;
1283 rdev = conf->mirrors[d].rdev;
1284 if (sync_page_io(rdev->bdev,
1285 sect + rdev->data_offset,
1286 s<<9,
1287 bio->bi_io_vec[idx].bv_page,
1288 READ) == 0)
1289 md_error(mddev, rdev);
1290 }
1291 } else {
1292 char b[BDEVNAME_SIZE];
1293 /* Cannot read from anywhere, array is toast */
1294 md_error(mddev, conf->mirrors[r1_bio->read_disk].rdev);
1295 printk(KERN_ALERT "raid1: %s: unrecoverable I/O read error"
1296 " for block %llu\n",
1297 bdevname(bio->bi_bdev,b),
1298 (unsigned long long)r1_bio->sector);
1299 md_done_sync(mddev, r1_bio->sectors, 0);
1300 put_buf(r1_bio);
1301 return;
1302 }
1303 sectors -= s;
1304 sect += s;
1305 idx ++;
1306 }
1101 } 1307 }
1102 1308
1309 /*
1310 * schedule writes
1311 */
1103 atomic_set(&r1_bio->remaining, 1); 1312 atomic_set(&r1_bio->remaining, 1);
1104 for (i = 0; i < disks ; i++) { 1313 for (i = 0; i < disks ; i++) {
1105 wbio = r1_bio->bios[i]; 1314 wbio = r1_bio->bios[i];
1106 if (wbio->bi_end_io != end_sync_write) 1315 if (wbio->bi_end_io == NULL ||
1316 (wbio->bi_end_io == end_sync_read &&
1317 (i == r1_bio->read_disk ||
1318 !test_bit(MD_RECOVERY_SYNC, &mddev->recovery))))
1107 continue; 1319 continue;
1108 1320
1109 atomic_inc(&conf->mirrors[i].rdev->nr_pending); 1321 wbio->bi_rw = WRITE;
1322 wbio->bi_end_io = end_sync_write;
1110 atomic_inc(&r1_bio->remaining); 1323 atomic_inc(&r1_bio->remaining);
1111 md_sync_acct(conf->mirrors[i].rdev->bdev, wbio->bi_size >> 9); 1324 md_sync_acct(conf->mirrors[i].rdev->bdev, wbio->bi_size >> 9);
1112 1325
@@ -1167,6 +1380,7 @@ static void raid1d(mddev_t *mddev)
1167 break; 1380 break;
1168 r1_bio = list_entry(head->prev, r1bio_t, retry_list); 1381 r1_bio = list_entry(head->prev, r1bio_t, retry_list);
1169 list_del(head->prev); 1382 list_del(head->prev);
1383 conf->nr_queued--;
1170 spin_unlock_irqrestore(&conf->device_lock, flags); 1384 spin_unlock_irqrestore(&conf->device_lock, flags);
1171 1385
1172 mddev = r1_bio->mddev; 1386 mddev = r1_bio->mddev;
@@ -1206,6 +1420,86 @@ static void raid1d(mddev_t *mddev)
1206 } 1420 }
1207 } else { 1421 } else {
1208 int disk; 1422 int disk;
1423
1424 /* we got a read error. Maybe the drive is bad. Maybe just
1425 * the block and we can fix it.
1426 * We freeze all other IO, and try reading the block from
1427 * other devices. When we find one, we re-write
1428 * and check it that fixes the read error.
1429 * This is all done synchronously while the array is
1430 * frozen
1431 */
1432 sector_t sect = r1_bio->sector;
1433 int sectors = r1_bio->sectors;
1434 freeze_array(conf);
1435 if (mddev->ro == 0) while(sectors) {
1436 int s = sectors;
1437 int d = r1_bio->read_disk;
1438 int success = 0;
1439
1440 if (s > (PAGE_SIZE>>9))
1441 s = PAGE_SIZE >> 9;
1442
1443 do {
1444 rdev = conf->mirrors[d].rdev;
1445 if (rdev &&
1446 test_bit(In_sync, &rdev->flags) &&
1447 sync_page_io(rdev->bdev,
1448 sect + rdev->data_offset,
1449 s<<9,
1450 conf->tmppage, READ))
1451 success = 1;
1452 else {
1453 d++;
1454 if (d == conf->raid_disks)
1455 d = 0;
1456 }
1457 } while (!success && d != r1_bio->read_disk);
1458
1459 if (success) {
1460 /* write it back and re-read */
1461 int start = d;
1462 while (d != r1_bio->read_disk) {
1463 if (d==0)
1464 d = conf->raid_disks;
1465 d--;
1466 rdev = conf->mirrors[d].rdev;
1467 atomic_add(s, &rdev->corrected_errors);
1468 if (rdev &&
1469 test_bit(In_sync, &rdev->flags)) {
1470 if (sync_page_io(rdev->bdev,
1471 sect + rdev->data_offset,
1472 s<<9, conf->tmppage, WRITE) == 0)
1473 /* Well, this device is dead */
1474 md_error(mddev, rdev);
1475 }
1476 }
1477 d = start;
1478 while (d != r1_bio->read_disk) {
1479 if (d==0)
1480 d = conf->raid_disks;
1481 d--;
1482 rdev = conf->mirrors[d].rdev;
1483 if (rdev &&
1484 test_bit(In_sync, &rdev->flags)) {
1485 if (sync_page_io(rdev->bdev,
1486 sect + rdev->data_offset,
1487 s<<9, conf->tmppage, READ) == 0)
1488 /* Well, this device is dead */
1489 md_error(mddev, rdev);
1490 }
1491 }
1492 } else {
1493 /* Cannot read from anywhere -- bye bye array */
1494 md_error(mddev, conf->mirrors[r1_bio->read_disk].rdev);
1495 break;
1496 }
1497 sectors -= s;
1498 sect += s;
1499 }
1500
1501 unfreeze_array(conf);
1502
1209 bio = r1_bio->bios[r1_bio->read_disk]; 1503 bio = r1_bio->bios[r1_bio->read_disk];
1210 if ((disk=read_balance(conf, r1_bio)) == -1) { 1504 if ((disk=read_balance(conf, r1_bio)) == -1) {
1211 printk(KERN_ALERT "raid1: %s: unrecoverable I/O" 1505 printk(KERN_ALERT "raid1: %s: unrecoverable I/O"
@@ -1214,7 +1508,8 @@ static void raid1d(mddev_t *mddev)
1214 (unsigned long long)r1_bio->sector); 1508 (unsigned long long)r1_bio->sector);
1215 raid_end_bio_io(r1_bio); 1509 raid_end_bio_io(r1_bio);
1216 } else { 1510 } else {
1217 r1_bio->bios[r1_bio->read_disk] = NULL; 1511 r1_bio->bios[r1_bio->read_disk] =
1512 mddev->ro ? IO_BLOCKED : NULL;
1218 r1_bio->read_disk = disk; 1513 r1_bio->read_disk = disk;
1219 bio_put(bio); 1514 bio_put(bio);
1220 bio = bio_clone(r1_bio->master_bio, GFP_NOIO); 1515 bio = bio_clone(r1_bio->master_bio, GFP_NOIO);
@@ -1269,14 +1564,13 @@ static int init_resync(conf_t *conf)
1269static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster) 1564static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster)
1270{ 1565{
1271 conf_t *conf = mddev_to_conf(mddev); 1566 conf_t *conf = mddev_to_conf(mddev);
1272 mirror_info_t *mirror;
1273 r1bio_t *r1_bio; 1567 r1bio_t *r1_bio;
1274 struct bio *bio; 1568 struct bio *bio;
1275 sector_t max_sector, nr_sectors; 1569 sector_t max_sector, nr_sectors;
1276 int disk; 1570 int disk = -1;
1277 int i; 1571 int i;
1278 int wonly; 1572 int wonly = -1;
1279 int write_targets = 0; 1573 int write_targets = 0, read_targets = 0;
1280 int sync_blocks; 1574 int sync_blocks;
1281 int still_degraded = 0; 1575 int still_degraded = 0;
1282 1576
@@ -1317,55 +1611,35 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
1317 return sync_blocks; 1611 return sync_blocks;
1318 } 1612 }
1319 /* 1613 /*
1320 * If there is non-resync activity waiting for us then 1614 * If there is non-resync activity waiting for a turn,
1321 * put in a delay to throttle resync. 1615 * and resync is going fast enough,
1616 * then let it though before starting on this new sync request.
1322 */ 1617 */
1323 if (!go_faster && waitqueue_active(&conf->wait_resume)) 1618 if (!go_faster && conf->nr_waiting)
1324 msleep_interruptible(1000); 1619 msleep_interruptible(1000);
1325 device_barrier(conf, sector_nr + RESYNC_SECTORS);
1326
1327 /*
1328 * If reconstructing, and >1 working disc,
1329 * could dedicate one to rebuild and others to
1330 * service read requests ..
1331 */
1332 disk = conf->last_used;
1333 /* make sure disk is operational */
1334 wonly = disk;
1335 while (conf->mirrors[disk].rdev == NULL ||
1336 !test_bit(In_sync, &conf->mirrors[disk].rdev->flags) ||
1337 test_bit(WriteMostly, &conf->mirrors[disk].rdev->flags)
1338 ) {
1339 if (conf->mirrors[disk].rdev &&
1340 test_bit(In_sync, &conf->mirrors[disk].rdev->flags))
1341 wonly = disk;
1342 if (disk <= 0)
1343 disk = conf->raid_disks;
1344 disk--;
1345 if (disk == conf->last_used) {
1346 disk = wonly;
1347 break;
1348 }
1349 }
1350 conf->last_used = disk;
1351 atomic_inc(&conf->mirrors[disk].rdev->nr_pending);
1352 1620
1621 raise_barrier(conf);
1353 1622
1354 mirror = conf->mirrors + disk; 1623 conf->next_resync = sector_nr;
1355 1624
1356 r1_bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO); 1625 r1_bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO);
1357 1626 rcu_read_lock();
1358 spin_lock_irq(&conf->resync_lock); 1627 /*
1359 conf->nr_pending++; 1628 * If we get a correctably read error during resync or recovery,
1360 spin_unlock_irq(&conf->resync_lock); 1629 * we might want to read from a different device. So we
1630 * flag all drives that could conceivably be read from for READ,
1631 * and any others (which will be non-In_sync devices) for WRITE.
1632 * If a read fails, we try reading from something else for which READ
1633 * is OK.
1634 */
1361 1635
1362 r1_bio->mddev = mddev; 1636 r1_bio->mddev = mddev;
1363 r1_bio->sector = sector_nr; 1637 r1_bio->sector = sector_nr;
1364 r1_bio->state = 0; 1638 r1_bio->state = 0;
1365 set_bit(R1BIO_IsSync, &r1_bio->state); 1639 set_bit(R1BIO_IsSync, &r1_bio->state);
1366 r1_bio->read_disk = disk;
1367 1640
1368 for (i=0; i < conf->raid_disks; i++) { 1641 for (i=0; i < conf->raid_disks; i++) {
1642 mdk_rdev_t *rdev;
1369 bio = r1_bio->bios[i]; 1643 bio = r1_bio->bios[i];
1370 1644
1371 /* take from bio_init */ 1645 /* take from bio_init */
@@ -1380,35 +1654,49 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
1380 bio->bi_end_io = NULL; 1654 bio->bi_end_io = NULL;
1381 bio->bi_private = NULL; 1655 bio->bi_private = NULL;
1382 1656
1383 if (i == disk) { 1657 rdev = rcu_dereference(conf->mirrors[i].rdev);
1384 bio->bi_rw = READ; 1658 if (rdev == NULL ||
1385 bio->bi_end_io = end_sync_read; 1659 test_bit(Faulty, &rdev->flags)) {
1386 } else if (conf->mirrors[i].rdev == NULL ||
1387 test_bit(Faulty, &conf->mirrors[i].rdev->flags)) {
1388 still_degraded = 1; 1660 still_degraded = 1;
1389 continue; 1661 continue;
1390 } else if (!test_bit(In_sync, &conf->mirrors[i].rdev->flags) || 1662 } else if (!test_bit(In_sync, &rdev->flags)) {
1391 sector_nr + RESYNC_SECTORS > mddev->recovery_cp ||
1392 test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
1393 bio->bi_rw = WRITE; 1663 bio->bi_rw = WRITE;
1394 bio->bi_end_io = end_sync_write; 1664 bio->bi_end_io = end_sync_write;
1395 write_targets ++; 1665 write_targets ++;
1396 } else 1666 } else {
1397 /* no need to read or write here */ 1667 /* may need to read from here */
1398 continue; 1668 bio->bi_rw = READ;
1399 bio->bi_sector = sector_nr + conf->mirrors[i].rdev->data_offset; 1669 bio->bi_end_io = end_sync_read;
1400 bio->bi_bdev = conf->mirrors[i].rdev->bdev; 1670 if (test_bit(WriteMostly, &rdev->flags)) {
1671 if (wonly < 0)
1672 wonly = i;
1673 } else {
1674 if (disk < 0)
1675 disk = i;
1676 }
1677 read_targets++;
1678 }
1679 atomic_inc(&rdev->nr_pending);
1680 bio->bi_sector = sector_nr + rdev->data_offset;
1681 bio->bi_bdev = rdev->bdev;
1401 bio->bi_private = r1_bio; 1682 bio->bi_private = r1_bio;
1402 } 1683 }
1684 rcu_read_unlock();
1685 if (disk < 0)
1686 disk = wonly;
1687 r1_bio->read_disk = disk;
1688
1689 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && read_targets > 0)
1690 /* extra read targets are also write targets */
1691 write_targets += read_targets-1;
1403 1692
1404 if (write_targets == 0) { 1693 if (write_targets == 0 || read_targets == 0) {
1405 /* There is nowhere to write, so all non-sync 1694 /* There is nowhere to write, so all non-sync
1406 * drives must be failed - so we are finished 1695 * drives must be failed - so we are finished
1407 */ 1696 */
1408 sector_t rv = max_sector - sector_nr; 1697 sector_t rv = max_sector - sector_nr;
1409 *skipped = 1; 1698 *skipped = 1;
1410 put_buf(r1_bio); 1699 put_buf(r1_bio);
1411 rdev_dec_pending(conf->mirrors[disk].rdev, mddev);
1412 return rv; 1700 return rv;
1413 } 1701 }
1414 1702
@@ -1436,10 +1724,10 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
1436 for (i=0 ; i < conf->raid_disks; i++) { 1724 for (i=0 ; i < conf->raid_disks; i++) {
1437 bio = r1_bio->bios[i]; 1725 bio = r1_bio->bios[i];
1438 if (bio->bi_end_io) { 1726 if (bio->bi_end_io) {
1439 page = r1_bio->bios[0]->bi_io_vec[bio->bi_vcnt].bv_page; 1727 page = bio->bi_io_vec[bio->bi_vcnt].bv_page;
1440 if (bio_add_page(bio, page, len, 0) == 0) { 1728 if (bio_add_page(bio, page, len, 0) == 0) {
1441 /* stop here */ 1729 /* stop here */
1442 r1_bio->bios[0]->bi_io_vec[bio->bi_vcnt].bv_page = page; 1730 bio->bi_io_vec[bio->bi_vcnt].bv_page = page;
1443 while (i > 0) { 1731 while (i > 0) {
1444 i--; 1732 i--;
1445 bio = r1_bio->bios[i]; 1733 bio = r1_bio->bios[i];
@@ -1459,12 +1747,28 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
1459 sync_blocks -= (len>>9); 1747 sync_blocks -= (len>>9);
1460 } while (r1_bio->bios[disk]->bi_vcnt < RESYNC_PAGES); 1748 } while (r1_bio->bios[disk]->bi_vcnt < RESYNC_PAGES);
1461 bio_full: 1749 bio_full:
1462 bio = r1_bio->bios[disk];
1463 r1_bio->sectors = nr_sectors; 1750 r1_bio->sectors = nr_sectors;
1464 1751
1465 md_sync_acct(mirror->rdev->bdev, nr_sectors); 1752 /* For a user-requested sync, we read all readable devices and do a
1753 * compare
1754 */
1755 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
1756 atomic_set(&r1_bio->remaining, read_targets);
1757 for (i=0; i<conf->raid_disks; i++) {
1758 bio = r1_bio->bios[i];
1759 if (bio->bi_end_io == end_sync_read) {
1760 md_sync_acct(conf->mirrors[i].rdev->bdev, nr_sectors);
1761 generic_make_request(bio);
1762 }
1763 }
1764 } else {
1765 atomic_set(&r1_bio->remaining, 1);
1766 bio = r1_bio->bios[r1_bio->read_disk];
1767 md_sync_acct(conf->mirrors[r1_bio->read_disk].rdev->bdev,
1768 nr_sectors);
1769 generic_make_request(bio);
1466 1770
1467 generic_make_request(bio); 1771 }
1468 1772
1469 return nr_sectors; 1773 return nr_sectors;
1470} 1774}
@@ -1487,18 +1791,19 @@ static int run(mddev_t *mddev)
1487 * bookkeeping area. [whatever we allocate in run(), 1791 * bookkeeping area. [whatever we allocate in run(),
1488 * should be freed in stop()] 1792 * should be freed in stop()]
1489 */ 1793 */
1490 conf = kmalloc(sizeof(conf_t), GFP_KERNEL); 1794 conf = kzalloc(sizeof(conf_t), GFP_KERNEL);
1491 mddev->private = conf; 1795 mddev->private = conf;
1492 if (!conf) 1796 if (!conf)
1493 goto out_no_mem; 1797 goto out_no_mem;
1494 1798
1495 memset(conf, 0, sizeof(*conf)); 1799 conf->mirrors = kzalloc(sizeof(struct mirror_info)*mddev->raid_disks,
1496 conf->mirrors = kmalloc(sizeof(struct mirror_info)*mddev->raid_disks,
1497 GFP_KERNEL); 1800 GFP_KERNEL);
1498 if (!conf->mirrors) 1801 if (!conf->mirrors)
1499 goto out_no_mem; 1802 goto out_no_mem;
1500 1803
1501 memset(conf->mirrors, 0, sizeof(struct mirror_info)*mddev->raid_disks); 1804 conf->tmppage = alloc_page(GFP_KERNEL);
1805 if (!conf->tmppage)
1806 goto out_no_mem;
1502 1807
1503 conf->poolinfo = kmalloc(sizeof(*conf->poolinfo), GFP_KERNEL); 1808 conf->poolinfo = kmalloc(sizeof(*conf->poolinfo), GFP_KERNEL);
1504 if (!conf->poolinfo) 1809 if (!conf->poolinfo)
@@ -1542,8 +1847,7 @@ static int run(mddev_t *mddev)
1542 mddev->recovery_cp = MaxSector; 1847 mddev->recovery_cp = MaxSector;
1543 1848
1544 spin_lock_init(&conf->resync_lock); 1849 spin_lock_init(&conf->resync_lock);
1545 init_waitqueue_head(&conf->wait_idle); 1850 init_waitqueue_head(&conf->wait_barrier);
1546 init_waitqueue_head(&conf->wait_resume);
1547 1851
1548 bio_list_init(&conf->pending_bio_list); 1852 bio_list_init(&conf->pending_bio_list);
1549 bio_list_init(&conf->flushing_bio_list); 1853 bio_list_init(&conf->flushing_bio_list);
@@ -1583,7 +1887,6 @@ static int run(mddev_t *mddev)
1583 mdname(mddev)); 1887 mdname(mddev));
1584 goto out_free_conf; 1888 goto out_free_conf;
1585 } 1889 }
1586 if (mddev->bitmap) mddev->thread->timeout = mddev->bitmap->daemon_sleep * HZ;
1587 1890
1588 printk(KERN_INFO 1891 printk(KERN_INFO
1589 "raid1: raid set %s active with %d out of %d mirrors\n", 1892 "raid1: raid set %s active with %d out of %d mirrors\n",
@@ -1608,6 +1911,7 @@ out_free_conf:
1608 if (conf->r1bio_pool) 1911 if (conf->r1bio_pool)
1609 mempool_destroy(conf->r1bio_pool); 1912 mempool_destroy(conf->r1bio_pool);
1610 kfree(conf->mirrors); 1913 kfree(conf->mirrors);
1914 safe_put_page(conf->tmppage);
1611 kfree(conf->poolinfo); 1915 kfree(conf->poolinfo);
1612 kfree(conf); 1916 kfree(conf);
1613 mddev->private = NULL; 1917 mddev->private = NULL;
@@ -1706,19 +2010,14 @@ static int raid1_reshape(mddev_t *mddev, int raid_disks)
1706 kfree(newpoolinfo); 2010 kfree(newpoolinfo);
1707 return -ENOMEM; 2011 return -ENOMEM;
1708 } 2012 }
1709 newmirrors = kmalloc(sizeof(struct mirror_info) * raid_disks, GFP_KERNEL); 2013 newmirrors = kzalloc(sizeof(struct mirror_info) * raid_disks, GFP_KERNEL);
1710 if (!newmirrors) { 2014 if (!newmirrors) {
1711 kfree(newpoolinfo); 2015 kfree(newpoolinfo);
1712 mempool_destroy(newpool); 2016 mempool_destroy(newpool);
1713 return -ENOMEM; 2017 return -ENOMEM;
1714 } 2018 }
1715 memset(newmirrors, 0, sizeof(struct mirror_info)*raid_disks);
1716 2019
1717 spin_lock_irq(&conf->resync_lock); 2020 raise_barrier(conf);
1718 conf->barrier++;
1719 wait_event_lock_irq(conf->wait_idle, !conf->nr_pending,
1720 conf->resync_lock, raid1_unplug(mddev->queue));
1721 spin_unlock_irq(&conf->resync_lock);
1722 2021
1723 /* ok, everything is stopped */ 2022 /* ok, everything is stopped */
1724 oldpool = conf->r1bio_pool; 2023 oldpool = conf->r1bio_pool;
@@ -1738,12 +2037,7 @@ static int raid1_reshape(mddev_t *mddev, int raid_disks)
1738 conf->raid_disks = mddev->raid_disks = raid_disks; 2037 conf->raid_disks = mddev->raid_disks = raid_disks;
1739 2038
1740 conf->last_used = 0; /* just make sure it is in-range */ 2039 conf->last_used = 0; /* just make sure it is in-range */
1741 spin_lock_irq(&conf->resync_lock); 2040 lower_barrier(conf);
1742 conf->barrier--;
1743 spin_unlock_irq(&conf->resync_lock);
1744 wake_up(&conf->wait_resume);
1745 wake_up(&conf->wait_idle);
1746
1747 2041
1748 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 2042 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
1749 md_wakeup_thread(mddev->thread); 2043 md_wakeup_thread(mddev->thread);
@@ -1758,33 +2052,19 @@ static void raid1_quiesce(mddev_t *mddev, int state)
1758 2052
1759 switch(state) { 2053 switch(state) {
1760 case 1: 2054 case 1:
1761 spin_lock_irq(&conf->resync_lock); 2055 raise_barrier(conf);
1762 conf->barrier++;
1763 wait_event_lock_irq(conf->wait_idle, !conf->nr_pending,
1764 conf->resync_lock, raid1_unplug(mddev->queue));
1765 spin_unlock_irq(&conf->resync_lock);
1766 break; 2056 break;
1767 case 0: 2057 case 0:
1768 spin_lock_irq(&conf->resync_lock); 2058 lower_barrier(conf);
1769 conf->barrier--;
1770 spin_unlock_irq(&conf->resync_lock);
1771 wake_up(&conf->wait_resume);
1772 wake_up(&conf->wait_idle);
1773 break; 2059 break;
1774 } 2060 }
1775 if (mddev->thread) {
1776 if (mddev->bitmap)
1777 mddev->thread->timeout = mddev->bitmap->daemon_sleep * HZ;
1778 else
1779 mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT;
1780 md_wakeup_thread(mddev->thread);
1781 }
1782} 2061}
1783 2062
1784 2063
1785static mdk_personality_t raid1_personality = 2064static struct mdk_personality raid1_personality =
1786{ 2065{
1787 .name = "raid1", 2066 .name = "raid1",
2067 .level = 1,
1788 .owner = THIS_MODULE, 2068 .owner = THIS_MODULE,
1789 .make_request = make_request, 2069 .make_request = make_request,
1790 .run = run, 2070 .run = run,
@@ -1802,15 +2082,17 @@ static mdk_personality_t raid1_personality =
1802 2082
1803static int __init raid_init(void) 2083static int __init raid_init(void)
1804{ 2084{
1805 return register_md_personality(RAID1, &raid1_personality); 2085 return register_md_personality(&raid1_personality);
1806} 2086}
1807 2087
1808static void raid_exit(void) 2088static void raid_exit(void)
1809{ 2089{
1810 unregister_md_personality(RAID1); 2090 unregister_md_personality(&raid1_personality);
1811} 2091}
1812 2092
1813module_init(raid_init); 2093module_init(raid_init);
1814module_exit(raid_exit); 2094module_exit(raid_exit);
1815MODULE_LICENSE("GPL"); 2095MODULE_LICENSE("GPL");
1816MODULE_ALIAS("md-personality-3"); /* RAID1 */ 2096MODULE_ALIAS("md-personality-3"); /* RAID1 */
2097MODULE_ALIAS("md-raid1");
2098MODULE_ALIAS("md-level-1");
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 713dc9c2c730..9e658e519a27 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -18,7 +18,9 @@
18 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 18 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */ 19 */
20 20
21#include "dm-bio-list.h"
21#include <linux/raid/raid10.h> 22#include <linux/raid/raid10.h>
23#include <linux/raid/bitmap.h>
22 24
23/* 25/*
24 * RAID10 provides a combination of RAID0 and RAID1 functionality. 26 * RAID10 provides a combination of RAID0 and RAID1 functionality.
@@ -47,6 +49,9 @@
47 49
48static void unplug_slaves(mddev_t *mddev); 50static void unplug_slaves(mddev_t *mddev);
49 51
52static void allow_barrier(conf_t *conf);
53static void lower_barrier(conf_t *conf);
54
50static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data) 55static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data)
51{ 56{
52 conf_t *conf = data; 57 conf_t *conf = data;
@@ -54,10 +59,8 @@ static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data)
54 int size = offsetof(struct r10bio_s, devs[conf->copies]); 59 int size = offsetof(struct r10bio_s, devs[conf->copies]);
55 60
56 /* allocate a r10bio with room for raid_disks entries in the bios array */ 61 /* allocate a r10bio with room for raid_disks entries in the bios array */
57 r10_bio = kmalloc(size, gfp_flags); 62 r10_bio = kzalloc(size, gfp_flags);
58 if (r10_bio) 63 if (!r10_bio)
59 memset(r10_bio, 0, size);
60 else
61 unplug_slaves(conf->mddev); 64 unplug_slaves(conf->mddev);
62 65
63 return r10_bio; 66 return r10_bio;
@@ -129,10 +132,10 @@ static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
129 132
130out_free_pages: 133out_free_pages:
131 for ( ; i > 0 ; i--) 134 for ( ; i > 0 ; i--)
132 __free_page(bio->bi_io_vec[i-1].bv_page); 135 safe_put_page(bio->bi_io_vec[i-1].bv_page);
133 while (j--) 136 while (j--)
134 for (i = 0; i < RESYNC_PAGES ; i++) 137 for (i = 0; i < RESYNC_PAGES ; i++)
135 __free_page(r10_bio->devs[j].bio->bi_io_vec[i].bv_page); 138 safe_put_page(r10_bio->devs[j].bio->bi_io_vec[i].bv_page);
136 j = -1; 139 j = -1;
137out_free_bio: 140out_free_bio:
138 while ( ++j < nalloc ) 141 while ( ++j < nalloc )
@@ -152,7 +155,7 @@ static void r10buf_pool_free(void *__r10_bio, void *data)
152 struct bio *bio = r10bio->devs[j].bio; 155 struct bio *bio = r10bio->devs[j].bio;
153 if (bio) { 156 if (bio) {
154 for (i = 0; i < RESYNC_PAGES; i++) { 157 for (i = 0; i < RESYNC_PAGES; i++) {
155 __free_page(bio->bi_io_vec[i].bv_page); 158 safe_put_page(bio->bi_io_vec[i].bv_page);
156 bio->bi_io_vec[i].bv_page = NULL; 159 bio->bi_io_vec[i].bv_page = NULL;
157 } 160 }
158 bio_put(bio); 161 bio_put(bio);
@@ -167,7 +170,7 @@ static void put_all_bios(conf_t *conf, r10bio_t *r10_bio)
167 170
168 for (i = 0; i < conf->copies; i++) { 171 for (i = 0; i < conf->copies; i++) {
169 struct bio **bio = & r10_bio->devs[i].bio; 172 struct bio **bio = & r10_bio->devs[i].bio;
170 if (*bio) 173 if (*bio && *bio != IO_BLOCKED)
171 bio_put(*bio); 174 bio_put(*bio);
172 *bio = NULL; 175 *bio = NULL;
173 } 176 }
@@ -175,20 +178,13 @@ static void put_all_bios(conf_t *conf, r10bio_t *r10_bio)
175 178
176static inline void free_r10bio(r10bio_t *r10_bio) 179static inline void free_r10bio(r10bio_t *r10_bio)
177{ 180{
178 unsigned long flags;
179
180 conf_t *conf = mddev_to_conf(r10_bio->mddev); 181 conf_t *conf = mddev_to_conf(r10_bio->mddev);
181 182
182 /* 183 /*
183 * Wake up any possible resync thread that waits for the device 184 * Wake up any possible resync thread that waits for the device
184 * to go idle. 185 * to go idle.
185 */ 186 */
186 spin_lock_irqsave(&conf->resync_lock, flags); 187 allow_barrier(conf);
187 if (!--conf->nr_pending) {
188 wake_up(&conf->wait_idle);
189 wake_up(&conf->wait_resume);
190 }
191 spin_unlock_irqrestore(&conf->resync_lock, flags);
192 188
193 put_all_bios(conf, r10_bio); 189 put_all_bios(conf, r10_bio);
194 mempool_free(r10_bio, conf->r10bio_pool); 190 mempool_free(r10_bio, conf->r10bio_pool);
@@ -197,22 +193,10 @@ static inline void free_r10bio(r10bio_t *r10_bio)
197static inline void put_buf(r10bio_t *r10_bio) 193static inline void put_buf(r10bio_t *r10_bio)
198{ 194{
199 conf_t *conf = mddev_to_conf(r10_bio->mddev); 195 conf_t *conf = mddev_to_conf(r10_bio->mddev);
200 unsigned long flags;
201 196
202 mempool_free(r10_bio, conf->r10buf_pool); 197 mempool_free(r10_bio, conf->r10buf_pool);
203 198
204 spin_lock_irqsave(&conf->resync_lock, flags); 199 lower_barrier(conf);
205 if (!conf->barrier)
206 BUG();
207 --conf->barrier;
208 wake_up(&conf->wait_resume);
209 wake_up(&conf->wait_idle);
210
211 if (!--conf->nr_pending) {
212 wake_up(&conf->wait_idle);
213 wake_up(&conf->wait_resume);
214 }
215 spin_unlock_irqrestore(&conf->resync_lock, flags);
216} 200}
217 201
218static void reschedule_retry(r10bio_t *r10_bio) 202static void reschedule_retry(r10bio_t *r10_bio)
@@ -223,6 +207,7 @@ static void reschedule_retry(r10bio_t *r10_bio)
223 207
224 spin_lock_irqsave(&conf->device_lock, flags); 208 spin_lock_irqsave(&conf->device_lock, flags);
225 list_add(&r10_bio->retry_list, &conf->retry_list); 209 list_add(&r10_bio->retry_list, &conf->retry_list);
210 conf->nr_queued ++;
226 spin_unlock_irqrestore(&conf->device_lock, flags); 211 spin_unlock_irqrestore(&conf->device_lock, flags);
227 212
228 md_wakeup_thread(mddev->thread); 213 md_wakeup_thread(mddev->thread);
@@ -268,9 +253,9 @@ static int raid10_end_read_request(struct bio *bio, unsigned int bytes_done, int
268 /* 253 /*
269 * this branch is our 'one mirror IO has finished' event handler: 254 * this branch is our 'one mirror IO has finished' event handler:
270 */ 255 */
271 if (!uptodate) 256 update_head_pos(slot, r10_bio);
272 md_error(r10_bio->mddev, conf->mirrors[dev].rdev); 257
273 else 258 if (uptodate) {
274 /* 259 /*
275 * Set R10BIO_Uptodate in our master bio, so that 260 * Set R10BIO_Uptodate in our master bio, so that
276 * we will return a good error code to the higher 261 * we will return a good error code to the higher
@@ -281,15 +266,8 @@ static int raid10_end_read_request(struct bio *bio, unsigned int bytes_done, int
281 * wait for the 'master' bio. 266 * wait for the 'master' bio.
282 */ 267 */
283 set_bit(R10BIO_Uptodate, &r10_bio->state); 268 set_bit(R10BIO_Uptodate, &r10_bio->state);
284
285 update_head_pos(slot, r10_bio);
286
287 /*
288 * we have only one bio on the read side
289 */
290 if (uptodate)
291 raid_end_bio_io(r10_bio); 269 raid_end_bio_io(r10_bio);
292 else { 270 } else {
293 /* 271 /*
294 * oops, read error: 272 * oops, read error:
295 */ 273 */
@@ -322,9 +300,11 @@ static int raid10_end_write_request(struct bio *bio, unsigned int bytes_done, in
322 /* 300 /*
323 * this branch is our 'one mirror IO has finished' event handler: 301 * this branch is our 'one mirror IO has finished' event handler:
324 */ 302 */
325 if (!uptodate) 303 if (!uptodate) {
326 md_error(r10_bio->mddev, conf->mirrors[dev].rdev); 304 md_error(r10_bio->mddev, conf->mirrors[dev].rdev);
327 else 305 /* an I/O failed, we can't clear the bitmap */
306 set_bit(R10BIO_Degraded, &r10_bio->state);
307 } else
328 /* 308 /*
329 * Set R10BIO_Uptodate in our master bio, so that 309 * Set R10BIO_Uptodate in our master bio, so that
330 * we will return a good error code for to the higher 310 * we will return a good error code for to the higher
@@ -344,6 +324,11 @@ static int raid10_end_write_request(struct bio *bio, unsigned int bytes_done, in
344 * already. 324 * already.
345 */ 325 */
346 if (atomic_dec_and_test(&r10_bio->remaining)) { 326 if (atomic_dec_and_test(&r10_bio->remaining)) {
327 /* clear the bitmap if all writes complete successfully */
328 bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector,
329 r10_bio->sectors,
330 !test_bit(R10BIO_Degraded, &r10_bio->state),
331 0);
347 md_write_end(r10_bio->mddev); 332 md_write_end(r10_bio->mddev);
348 raid_end_bio_io(r10_bio); 333 raid_end_bio_io(r10_bio);
349 } 334 }
@@ -502,8 +487,9 @@ static int read_balance(conf_t *conf, r10bio_t *r10_bio)
502 rcu_read_lock(); 487 rcu_read_lock();
503 /* 488 /*
504 * Check if we can balance. We can balance on the whole 489 * Check if we can balance. We can balance on the whole
505 * device if no resync is going on, or below the resync window. 490 * device if no resync is going on (recovery is ok), or below
506 * We take the first readable disk when above the resync window. 491 * the resync window. We take the first readable disk when
492 * above the resync window.
507 */ 493 */
508 if (conf->mddev->recovery_cp < MaxSector 494 if (conf->mddev->recovery_cp < MaxSector
509 && (this_sector + sectors >= conf->next_resync)) { 495 && (this_sector + sectors >= conf->next_resync)) {
@@ -512,6 +498,7 @@ static int read_balance(conf_t *conf, r10bio_t *r10_bio)
512 disk = r10_bio->devs[slot].devnum; 498 disk = r10_bio->devs[slot].devnum;
513 499
514 while ((rdev = rcu_dereference(conf->mirrors[disk].rdev)) == NULL || 500 while ((rdev = rcu_dereference(conf->mirrors[disk].rdev)) == NULL ||
501 r10_bio->devs[slot].bio == IO_BLOCKED ||
515 !test_bit(In_sync, &rdev->flags)) { 502 !test_bit(In_sync, &rdev->flags)) {
516 slot++; 503 slot++;
517 if (slot == conf->copies) { 504 if (slot == conf->copies) {
@@ -529,6 +516,7 @@ static int read_balance(conf_t *conf, r10bio_t *r10_bio)
529 slot = 0; 516 slot = 0;
530 disk = r10_bio->devs[slot].devnum; 517 disk = r10_bio->devs[slot].devnum;
531 while ((rdev=rcu_dereference(conf->mirrors[disk].rdev)) == NULL || 518 while ((rdev=rcu_dereference(conf->mirrors[disk].rdev)) == NULL ||
519 r10_bio->devs[slot].bio == IO_BLOCKED ||
532 !test_bit(In_sync, &rdev->flags)) { 520 !test_bit(In_sync, &rdev->flags)) {
533 slot ++; 521 slot ++;
534 if (slot == conf->copies) { 522 if (slot == conf->copies) {
@@ -549,6 +537,7 @@ static int read_balance(conf_t *conf, r10bio_t *r10_bio)
549 537
550 538
551 if ((rdev=rcu_dereference(conf->mirrors[ndisk].rdev)) == NULL || 539 if ((rdev=rcu_dereference(conf->mirrors[ndisk].rdev)) == NULL ||
540 r10_bio->devs[nslot].bio == IO_BLOCKED ||
552 !test_bit(In_sync, &rdev->flags)) 541 !test_bit(In_sync, &rdev->flags))
553 continue; 542 continue;
554 543
@@ -607,7 +596,10 @@ static void unplug_slaves(mddev_t *mddev)
607 596
608static void raid10_unplug(request_queue_t *q) 597static void raid10_unplug(request_queue_t *q)
609{ 598{
599 mddev_t *mddev = q->queuedata;
600
610 unplug_slaves(q->queuedata); 601 unplug_slaves(q->queuedata);
602 md_wakeup_thread(mddev->thread);
611} 603}
612 604
613static int raid10_issue_flush(request_queue_t *q, struct gendisk *disk, 605static int raid10_issue_flush(request_queue_t *q, struct gendisk *disk,
@@ -640,27 +632,107 @@ static int raid10_issue_flush(request_queue_t *q, struct gendisk *disk,
640 return ret; 632 return ret;
641} 633}
642 634
643/* 635/* Barriers....
644 * Throttle resync depth, so that we can both get proper overlapping of 636 * Sometimes we need to suspend IO while we do something else,
645 * requests, but are still able to handle normal requests quickly. 637 * either some resync/recovery, or reconfigure the array.
638 * To do this we raise a 'barrier'.
639 * The 'barrier' is a counter that can be raised multiple times
640 * to count how many activities are happening which preclude
641 * normal IO.
642 * We can only raise the barrier if there is no pending IO.
643 * i.e. if nr_pending == 0.
644 * We choose only to raise the barrier if no-one is waiting for the
645 * barrier to go down. This means that as soon as an IO request
646 * is ready, no other operations which require a barrier will start
647 * until the IO request has had a chance.
648 *
649 * So: regular IO calls 'wait_barrier'. When that returns there
650 * is no backgroup IO happening, It must arrange to call
651 * allow_barrier when it has finished its IO.
652 * backgroup IO calls must call raise_barrier. Once that returns
653 * there is no normal IO happeing. It must arrange to call
654 * lower_barrier when the particular background IO completes.
646 */ 655 */
647#define RESYNC_DEPTH 32 656#define RESYNC_DEPTH 32
648 657
649static void device_barrier(conf_t *conf, sector_t sect) 658static void raise_barrier(conf_t *conf, int force)
659{
660 BUG_ON(force && !conf->barrier);
661 spin_lock_irq(&conf->resync_lock);
662
663 /* Wait until no block IO is waiting (unless 'force') */
664 wait_event_lock_irq(conf->wait_barrier, force || !conf->nr_waiting,
665 conf->resync_lock,
666 raid10_unplug(conf->mddev->queue));
667
668 /* block any new IO from starting */
669 conf->barrier++;
670
671 /* No wait for all pending IO to complete */
672 wait_event_lock_irq(conf->wait_barrier,
673 !conf->nr_pending && conf->barrier < RESYNC_DEPTH,
674 conf->resync_lock,
675 raid10_unplug(conf->mddev->queue));
676
677 spin_unlock_irq(&conf->resync_lock);
678}
679
680static void lower_barrier(conf_t *conf)
681{
682 unsigned long flags;
683 spin_lock_irqsave(&conf->resync_lock, flags);
684 conf->barrier--;
685 spin_unlock_irqrestore(&conf->resync_lock, flags);
686 wake_up(&conf->wait_barrier);
687}
688
689static void wait_barrier(conf_t *conf)
650{ 690{
651 spin_lock_irq(&conf->resync_lock); 691 spin_lock_irq(&conf->resync_lock);
652 wait_event_lock_irq(conf->wait_idle, !waitqueue_active(&conf->wait_resume), 692 if (conf->barrier) {
653 conf->resync_lock, unplug_slaves(conf->mddev)); 693 conf->nr_waiting++;
654 694 wait_event_lock_irq(conf->wait_barrier, !conf->barrier,
655 if (!conf->barrier++) { 695 conf->resync_lock,
656 wait_event_lock_irq(conf->wait_idle, !conf->nr_pending, 696 raid10_unplug(conf->mddev->queue));
657 conf->resync_lock, unplug_slaves(conf->mddev)); 697 conf->nr_waiting--;
658 if (conf->nr_pending)
659 BUG();
660 } 698 }
661 wait_event_lock_irq(conf->wait_resume, conf->barrier < RESYNC_DEPTH, 699 conf->nr_pending++;
662 conf->resync_lock, unplug_slaves(conf->mddev)); 700 spin_unlock_irq(&conf->resync_lock);
663 conf->next_resync = sect; 701}
702
703static void allow_barrier(conf_t *conf)
704{
705 unsigned long flags;
706 spin_lock_irqsave(&conf->resync_lock, flags);
707 conf->nr_pending--;
708 spin_unlock_irqrestore(&conf->resync_lock, flags);
709 wake_up(&conf->wait_barrier);
710}
711
712static void freeze_array(conf_t *conf)
713{
714 /* stop syncio and normal IO and wait for everything to
715 * go quiet.
716 * We increment barrier and nr_waiting, and then
717 * wait until barrier+nr_pending match nr_queued+2
718 */
719 spin_lock_irq(&conf->resync_lock);
720 conf->barrier++;
721 conf->nr_waiting++;
722 wait_event_lock_irq(conf->wait_barrier,
723 conf->barrier+conf->nr_pending == conf->nr_queued+2,
724 conf->resync_lock,
725 raid10_unplug(conf->mddev->queue));
726 spin_unlock_irq(&conf->resync_lock);
727}
728
729static void unfreeze_array(conf_t *conf)
730{
731 /* reverse the effect of the freeze */
732 spin_lock_irq(&conf->resync_lock);
733 conf->barrier--;
734 conf->nr_waiting--;
735 wake_up(&conf->wait_barrier);
664 spin_unlock_irq(&conf->resync_lock); 736 spin_unlock_irq(&conf->resync_lock);
665} 737}
666 738
@@ -674,6 +746,8 @@ static int make_request(request_queue_t *q, struct bio * bio)
674 int i; 746 int i;
675 int chunk_sects = conf->chunk_mask + 1; 747 int chunk_sects = conf->chunk_mask + 1;
676 const int rw = bio_data_dir(bio); 748 const int rw = bio_data_dir(bio);
749 struct bio_list bl;
750 unsigned long flags;
677 751
678 if (unlikely(bio_barrier(bio))) { 752 if (unlikely(bio_barrier(bio))) {
679 bio_endio(bio, bio->bi_size, -EOPNOTSUPP); 753 bio_endio(bio, bio->bi_size, -EOPNOTSUPP);
@@ -719,10 +793,7 @@ static int make_request(request_queue_t *q, struct bio * bio)
719 * thread has put up a bar for new requests. 793 * thread has put up a bar for new requests.
720 * Continue immediately if no resync is active currently. 794 * Continue immediately if no resync is active currently.
721 */ 795 */
722 spin_lock_irq(&conf->resync_lock); 796 wait_barrier(conf);
723 wait_event_lock_irq(conf->wait_resume, !conf->barrier, conf->resync_lock, );
724 conf->nr_pending++;
725 spin_unlock_irq(&conf->resync_lock);
726 797
727 disk_stat_inc(mddev->gendisk, ios[rw]); 798 disk_stat_inc(mddev->gendisk, ios[rw]);
728 disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bio)); 799 disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bio));
@@ -734,6 +805,7 @@ static int make_request(request_queue_t *q, struct bio * bio)
734 805
735 r10_bio->mddev = mddev; 806 r10_bio->mddev = mddev;
736 r10_bio->sector = bio->bi_sector; 807 r10_bio->sector = bio->bi_sector;
808 r10_bio->state = 0;
737 809
738 if (rw == READ) { 810 if (rw == READ) {
739 /* 811 /*
@@ -778,13 +850,16 @@ static int make_request(request_queue_t *q, struct bio * bio)
778 !test_bit(Faulty, &rdev->flags)) { 850 !test_bit(Faulty, &rdev->flags)) {
779 atomic_inc(&rdev->nr_pending); 851 atomic_inc(&rdev->nr_pending);
780 r10_bio->devs[i].bio = bio; 852 r10_bio->devs[i].bio = bio;
781 } else 853 } else {
782 r10_bio->devs[i].bio = NULL; 854 r10_bio->devs[i].bio = NULL;
855 set_bit(R10BIO_Degraded, &r10_bio->state);
856 }
783 } 857 }
784 rcu_read_unlock(); 858 rcu_read_unlock();
785 859
786 atomic_set(&r10_bio->remaining, 1); 860 atomic_set(&r10_bio->remaining, 0);
787 861
862 bio_list_init(&bl);
788 for (i = 0; i < conf->copies; i++) { 863 for (i = 0; i < conf->copies; i++) {
789 struct bio *mbio; 864 struct bio *mbio;
790 int d = r10_bio->devs[i].devnum; 865 int d = r10_bio->devs[i].devnum;
@@ -802,13 +877,14 @@ static int make_request(request_queue_t *q, struct bio * bio)
802 mbio->bi_private = r10_bio; 877 mbio->bi_private = r10_bio;
803 878
804 atomic_inc(&r10_bio->remaining); 879 atomic_inc(&r10_bio->remaining);
805 generic_make_request(mbio); 880 bio_list_add(&bl, mbio);
806 } 881 }
807 882
808 if (atomic_dec_and_test(&r10_bio->remaining)) { 883 bitmap_startwrite(mddev->bitmap, bio->bi_sector, r10_bio->sectors, 0);
809 md_write_end(mddev); 884 spin_lock_irqsave(&conf->device_lock, flags);
810 raid_end_bio_io(r10_bio); 885 bio_list_merge(&conf->pending_bio_list, &bl);
811 } 886 blk_plug_device(mddev->queue);
887 spin_unlock_irqrestore(&conf->device_lock, flags);
812 888
813 return 0; 889 return 0;
814} 890}
@@ -897,13 +973,8 @@ static void print_conf(conf_t *conf)
897 973
898static void close_sync(conf_t *conf) 974static void close_sync(conf_t *conf)
899{ 975{
900 spin_lock_irq(&conf->resync_lock); 976 wait_barrier(conf);
901 wait_event_lock_irq(conf->wait_resume, !conf->barrier, 977 allow_barrier(conf);
902 conf->resync_lock, unplug_slaves(conf->mddev));
903 spin_unlock_irq(&conf->resync_lock);
904
905 if (conf->barrier) BUG();
906 if (waitqueue_active(&conf->wait_idle)) BUG();
907 978
908 mempool_destroy(conf->r10buf_pool); 979 mempool_destroy(conf->r10buf_pool);
909 conf->r10buf_pool = NULL; 980 conf->r10buf_pool = NULL;
@@ -971,7 +1042,12 @@ static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
971 if (!enough(conf)) 1042 if (!enough(conf))
972 return 0; 1043 return 0;
973 1044
974 for (mirror=0; mirror < mddev->raid_disks; mirror++) 1045 if (rdev->saved_raid_disk >= 0 &&
1046 conf->mirrors[rdev->saved_raid_disk].rdev == NULL)
1047 mirror = rdev->saved_raid_disk;
1048 else
1049 mirror = 0;
1050 for ( ; mirror < mddev->raid_disks; mirror++)
975 if ( !(p=conf->mirrors+mirror)->rdev) { 1051 if ( !(p=conf->mirrors+mirror)->rdev) {
976 1052
977 blk_queue_stack_limits(mddev->queue, 1053 blk_queue_stack_limits(mddev->queue,
@@ -987,6 +1063,8 @@ static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
987 p->head_position = 0; 1063 p->head_position = 0;
988 rdev->raid_disk = mirror; 1064 rdev->raid_disk = mirror;
989 found = 1; 1065 found = 1;
1066 if (rdev->saved_raid_disk != mirror)
1067 conf->fullsync = 1;
990 rcu_assign_pointer(p->rdev, rdev); 1068 rcu_assign_pointer(p->rdev, rdev);
991 break; 1069 break;
992 } 1070 }
@@ -1027,7 +1105,6 @@ abort:
1027 1105
1028static int end_sync_read(struct bio *bio, unsigned int bytes_done, int error) 1106static int end_sync_read(struct bio *bio, unsigned int bytes_done, int error)
1029{ 1107{
1030 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1031 r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private); 1108 r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private);
1032 conf_t *conf = mddev_to_conf(r10_bio->mddev); 1109 conf_t *conf = mddev_to_conf(r10_bio->mddev);
1033 int i,d; 1110 int i,d;
@@ -1042,9 +1119,16 @@ static int end_sync_read(struct bio *bio, unsigned int bytes_done, int error)
1042 BUG(); 1119 BUG();
1043 update_head_pos(i, r10_bio); 1120 update_head_pos(i, r10_bio);
1044 d = r10_bio->devs[i].devnum; 1121 d = r10_bio->devs[i].devnum;
1045 if (!uptodate) 1122
1046 md_error(r10_bio->mddev, 1123 if (test_bit(BIO_UPTODATE, &bio->bi_flags))
1047 conf->mirrors[d].rdev); 1124 set_bit(R10BIO_Uptodate, &r10_bio->state);
1125 else {
1126 atomic_add(r10_bio->sectors,
1127 &conf->mirrors[d].rdev->corrected_errors);
1128 if (!test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
1129 md_error(r10_bio->mddev,
1130 conf->mirrors[d].rdev);
1131 }
1048 1132
1049 /* for reconstruct, we always reschedule after a read. 1133 /* for reconstruct, we always reschedule after a read.
1050 * for resync, only after all reads 1134 * for resync, only after all reads
@@ -1132,23 +1216,32 @@ static void sync_request_write(mddev_t *mddev, r10bio_t *r10_bio)
1132 fbio = r10_bio->devs[i].bio; 1216 fbio = r10_bio->devs[i].bio;
1133 1217
1134 /* now find blocks with errors */ 1218 /* now find blocks with errors */
1135 for (i=first+1 ; i < conf->copies ; i++) { 1219 for (i=0 ; i < conf->copies ; i++) {
1136 int vcnt, j, d; 1220 int j, d;
1221 int vcnt = r10_bio->sectors >> (PAGE_SHIFT-9);
1137 1222
1138 if (!test_bit(BIO_UPTODATE, &r10_bio->devs[i].bio->bi_flags))
1139 continue;
1140 /* We know that the bi_io_vec layout is the same for
1141 * both 'first' and 'i', so we just compare them.
1142 * All vec entries are PAGE_SIZE;
1143 */
1144 tbio = r10_bio->devs[i].bio; 1223 tbio = r10_bio->devs[i].bio;
1145 vcnt = r10_bio->sectors >> (PAGE_SHIFT-9); 1224
1146 for (j = 0; j < vcnt; j++) 1225 if (tbio->bi_end_io != end_sync_read)
1147 if (memcmp(page_address(fbio->bi_io_vec[j].bv_page), 1226 continue;
1148 page_address(tbio->bi_io_vec[j].bv_page), 1227 if (i == first)
1149 PAGE_SIZE)) 1228 continue;
1150 break; 1229 if (test_bit(BIO_UPTODATE, &r10_bio->devs[i].bio->bi_flags)) {
1151 if (j == vcnt) 1230 /* We know that the bi_io_vec layout is the same for
1231 * both 'first' and 'i', so we just compare them.
1232 * All vec entries are PAGE_SIZE;
1233 */
1234 for (j = 0; j < vcnt; j++)
1235 if (memcmp(page_address(fbio->bi_io_vec[j].bv_page),
1236 page_address(tbio->bi_io_vec[j].bv_page),
1237 PAGE_SIZE))
1238 break;
1239 if (j == vcnt)
1240 continue;
1241 mddev->resync_mismatches += r10_bio->sectors;
1242 }
1243 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
1244 /* Don't fix anything. */
1152 continue; 1245 continue;
1153 /* Ok, we need to write this bio 1246 /* Ok, we need to write this bio
1154 * First we need to fixup bv_offset, bv_len and 1247 * First we need to fixup bv_offset, bv_len and
@@ -1227,7 +1320,10 @@ static void recovery_request_write(mddev_t *mddev, r10bio_t *r10_bio)
1227 1320
1228 atomic_inc(&conf->mirrors[d].rdev->nr_pending); 1321 atomic_inc(&conf->mirrors[d].rdev->nr_pending);
1229 md_sync_acct(conf->mirrors[d].rdev->bdev, wbio->bi_size >> 9); 1322 md_sync_acct(conf->mirrors[d].rdev->bdev, wbio->bi_size >> 9);
1230 generic_make_request(wbio); 1323 if (test_bit(R10BIO_Uptodate, &r10_bio->state))
1324 generic_make_request(wbio);
1325 else
1326 bio_endio(wbio, wbio->bi_size, -EIO);
1231} 1327}
1232 1328
1233 1329
@@ -1254,10 +1350,31 @@ static void raid10d(mddev_t *mddev)
1254 for (;;) { 1350 for (;;) {
1255 char b[BDEVNAME_SIZE]; 1351 char b[BDEVNAME_SIZE];
1256 spin_lock_irqsave(&conf->device_lock, flags); 1352 spin_lock_irqsave(&conf->device_lock, flags);
1353
1354 if (conf->pending_bio_list.head) {
1355 bio = bio_list_get(&conf->pending_bio_list);
1356 blk_remove_plug(mddev->queue);
1357 spin_unlock_irqrestore(&conf->device_lock, flags);
1358 /* flush any pending bitmap writes to disk before proceeding w/ I/O */
1359 if (bitmap_unplug(mddev->bitmap) != 0)
1360 printk("%s: bitmap file write failed!\n", mdname(mddev));
1361
1362 while (bio) { /* submit pending writes */
1363 struct bio *next = bio->bi_next;
1364 bio->bi_next = NULL;
1365 generic_make_request(bio);
1366 bio = next;
1367 }
1368 unplug = 1;
1369
1370 continue;
1371 }
1372
1257 if (list_empty(head)) 1373 if (list_empty(head))
1258 break; 1374 break;
1259 r10_bio = list_entry(head->prev, r10bio_t, retry_list); 1375 r10_bio = list_entry(head->prev, r10bio_t, retry_list);
1260 list_del(head->prev); 1376 list_del(head->prev);
1377 conf->nr_queued--;
1261 spin_unlock_irqrestore(&conf->device_lock, flags); 1378 spin_unlock_irqrestore(&conf->device_lock, flags);
1262 1379
1263 mddev = r10_bio->mddev; 1380 mddev = r10_bio->mddev;
@@ -1270,8 +1387,96 @@ static void raid10d(mddev_t *mddev)
1270 unplug = 1; 1387 unplug = 1;
1271 } else { 1388 } else {
1272 int mirror; 1389 int mirror;
1390 /* we got a read error. Maybe the drive is bad. Maybe just
1391 * the block and we can fix it.
1392 * We freeze all other IO, and try reading the block from
1393 * other devices. When we find one, we re-write
1394 * and check it that fixes the read error.
1395 * This is all done synchronously while the array is
1396 * frozen.
1397 */
1398 int sect = 0; /* Offset from r10_bio->sector */
1399 int sectors = r10_bio->sectors;
1400 freeze_array(conf);
1401 if (mddev->ro == 0) while(sectors) {
1402 int s = sectors;
1403 int sl = r10_bio->read_slot;
1404 int success = 0;
1405
1406 if (s > (PAGE_SIZE>>9))
1407 s = PAGE_SIZE >> 9;
1408
1409 do {
1410 int d = r10_bio->devs[sl].devnum;
1411 rdev = conf->mirrors[d].rdev;
1412 if (rdev &&
1413 test_bit(In_sync, &rdev->flags) &&
1414 sync_page_io(rdev->bdev,
1415 r10_bio->devs[sl].addr +
1416 sect + rdev->data_offset,
1417 s<<9,
1418 conf->tmppage, READ))
1419 success = 1;
1420 else {
1421 sl++;
1422 if (sl == conf->copies)
1423 sl = 0;
1424 }
1425 } while (!success && sl != r10_bio->read_slot);
1426
1427 if (success) {
1428 int start = sl;
1429 /* write it back and re-read */
1430 while (sl != r10_bio->read_slot) {
1431 int d;
1432 if (sl==0)
1433 sl = conf->copies;
1434 sl--;
1435 d = r10_bio->devs[sl].devnum;
1436 rdev = conf->mirrors[d].rdev;
1437 atomic_add(s, &rdev->corrected_errors);
1438 if (rdev &&
1439 test_bit(In_sync, &rdev->flags)) {
1440 if (sync_page_io(rdev->bdev,
1441 r10_bio->devs[sl].addr +
1442 sect + rdev->data_offset,
1443 s<<9, conf->tmppage, WRITE) == 0)
1444 /* Well, this device is dead */
1445 md_error(mddev, rdev);
1446 }
1447 }
1448 sl = start;
1449 while (sl != r10_bio->read_slot) {
1450 int d;
1451 if (sl==0)
1452 sl = conf->copies;
1453 sl--;
1454 d = r10_bio->devs[sl].devnum;
1455 rdev = conf->mirrors[d].rdev;
1456 if (rdev &&
1457 test_bit(In_sync, &rdev->flags)) {
1458 if (sync_page_io(rdev->bdev,
1459 r10_bio->devs[sl].addr +
1460 sect + rdev->data_offset,
1461 s<<9, conf->tmppage, READ) == 0)
1462 /* Well, this device is dead */
1463 md_error(mddev, rdev);
1464 }
1465 }
1466 } else {
1467 /* Cannot read from anywhere -- bye bye array */
1468 md_error(mddev, conf->mirrors[r10_bio->devs[r10_bio->read_slot].devnum].rdev);
1469 break;
1470 }
1471 sectors -= s;
1472 sect += s;
1473 }
1474
1475 unfreeze_array(conf);
1476
1273 bio = r10_bio->devs[r10_bio->read_slot].bio; 1477 bio = r10_bio->devs[r10_bio->read_slot].bio;
1274 r10_bio->devs[r10_bio->read_slot].bio = NULL; 1478 r10_bio->devs[r10_bio->read_slot].bio =
1479 mddev->ro ? IO_BLOCKED : NULL;
1275 bio_put(bio); 1480 bio_put(bio);
1276 mirror = read_balance(conf, r10_bio); 1481 mirror = read_balance(conf, r10_bio);
1277 if (mirror == -1) { 1482 if (mirror == -1) {
@@ -1360,6 +1565,8 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
1360 sector_t max_sector, nr_sectors; 1565 sector_t max_sector, nr_sectors;
1361 int disk; 1566 int disk;
1362 int i; 1567 int i;
1568 int max_sync;
1569 int sync_blocks;
1363 1570
1364 sector_t sectors_skipped = 0; 1571 sector_t sectors_skipped = 0;
1365 int chunks_skipped = 0; 1572 int chunks_skipped = 0;
@@ -1373,6 +1580,29 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
1373 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) 1580 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
1374 max_sector = mddev->resync_max_sectors; 1581 max_sector = mddev->resync_max_sectors;
1375 if (sector_nr >= max_sector) { 1582 if (sector_nr >= max_sector) {
1583 /* If we aborted, we need to abort the
1584 * sync on the 'current' bitmap chucks (there can
1585 * be several when recovering multiple devices).
1586 * as we may have started syncing it but not finished.
1587 * We can find the current address in
1588 * mddev->curr_resync, but for recovery,
1589 * we need to convert that to several
1590 * virtual addresses.
1591 */
1592 if (mddev->curr_resync < max_sector) { /* aborted */
1593 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
1594 bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
1595 &sync_blocks, 1);
1596 else for (i=0; i<conf->raid_disks; i++) {
1597 sector_t sect =
1598 raid10_find_virt(conf, mddev->curr_resync, i);
1599 bitmap_end_sync(mddev->bitmap, sect,
1600 &sync_blocks, 1);
1601 }
1602 } else /* completed sync */
1603 conf->fullsync = 0;
1604
1605 bitmap_close_sync(mddev->bitmap);
1376 close_sync(conf); 1606 close_sync(conf);
1377 *skipped = 1; 1607 *skipped = 1;
1378 return sectors_skipped; 1608 return sectors_skipped;
@@ -1395,9 +1625,8 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
1395 * If there is non-resync activity waiting for us then 1625 * If there is non-resync activity waiting for us then
1396 * put in a delay to throttle resync. 1626 * put in a delay to throttle resync.
1397 */ 1627 */
1398 if (!go_faster && waitqueue_active(&conf->wait_resume)) 1628 if (!go_faster && conf->nr_waiting)
1399 msleep_interruptible(1000); 1629 msleep_interruptible(1000);
1400 device_barrier(conf, sector_nr + RESYNC_SECTORS);
1401 1630
1402 /* Again, very different code for resync and recovery. 1631 /* Again, very different code for resync and recovery.
1403 * Both must result in an r10bio with a list of bios that 1632 * Both must result in an r10bio with a list of bios that
@@ -1414,6 +1643,7 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
1414 * end_sync_write if we will want to write. 1643 * end_sync_write if we will want to write.
1415 */ 1644 */
1416 1645
1646 max_sync = RESYNC_PAGES << (PAGE_SHIFT-9);
1417 if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 1647 if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
1418 /* recovery... the complicated one */ 1648 /* recovery... the complicated one */
1419 int i, j, k; 1649 int i, j, k;
@@ -1422,14 +1652,29 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
1422 for (i=0 ; i<conf->raid_disks; i++) 1652 for (i=0 ; i<conf->raid_disks; i++)
1423 if (conf->mirrors[i].rdev && 1653 if (conf->mirrors[i].rdev &&
1424 !test_bit(In_sync, &conf->mirrors[i].rdev->flags)) { 1654 !test_bit(In_sync, &conf->mirrors[i].rdev->flags)) {
1655 int still_degraded = 0;
1425 /* want to reconstruct this device */ 1656 /* want to reconstruct this device */
1426 r10bio_t *rb2 = r10_bio; 1657 r10bio_t *rb2 = r10_bio;
1658 sector_t sect = raid10_find_virt(conf, sector_nr, i);
1659 int must_sync;
1660 /* Unless we are doing a full sync, we only need
1661 * to recover the block if it is set in the bitmap
1662 */
1663 must_sync = bitmap_start_sync(mddev->bitmap, sect,
1664 &sync_blocks, 1);
1665 if (sync_blocks < max_sync)
1666 max_sync = sync_blocks;
1667 if (!must_sync &&
1668 !conf->fullsync) {
1669 /* yep, skip the sync_blocks here, but don't assume
1670 * that there will never be anything to do here
1671 */
1672 chunks_skipped = -1;
1673 continue;
1674 }
1427 1675
1428 r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO); 1676 r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
1429 spin_lock_irq(&conf->resync_lock); 1677 raise_barrier(conf, rb2 != NULL);
1430 conf->nr_pending++;
1431 if (rb2) conf->barrier++;
1432 spin_unlock_irq(&conf->resync_lock);
1433 atomic_set(&r10_bio->remaining, 0); 1678 atomic_set(&r10_bio->remaining, 0);
1434 1679
1435 r10_bio->master_bio = (struct bio*)rb2; 1680 r10_bio->master_bio = (struct bio*)rb2;
@@ -1437,8 +1682,23 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
1437 atomic_inc(&rb2->remaining); 1682 atomic_inc(&rb2->remaining);
1438 r10_bio->mddev = mddev; 1683 r10_bio->mddev = mddev;
1439 set_bit(R10BIO_IsRecover, &r10_bio->state); 1684 set_bit(R10BIO_IsRecover, &r10_bio->state);
1440 r10_bio->sector = raid10_find_virt(conf, sector_nr, i); 1685 r10_bio->sector = sect;
1686
1441 raid10_find_phys(conf, r10_bio); 1687 raid10_find_phys(conf, r10_bio);
1688 /* Need to check if this section will still be
1689 * degraded
1690 */
1691 for (j=0; j<conf->copies;j++) {
1692 int d = r10_bio->devs[j].devnum;
1693 if (conf->mirrors[d].rdev == NULL ||
1694 test_bit(Faulty, &conf->mirrors[d].rdev->flags)) {
1695 still_degraded = 1;
1696 break;
1697 }
1698 }
1699 must_sync = bitmap_start_sync(mddev->bitmap, sect,
1700 &sync_blocks, still_degraded);
1701
1442 for (j=0; j<conf->copies;j++) { 1702 for (j=0; j<conf->copies;j++) {
1443 int d = r10_bio->devs[j].devnum; 1703 int d = r10_bio->devs[j].devnum;
1444 if (conf->mirrors[d].rdev && 1704 if (conf->mirrors[d].rdev &&
@@ -1498,14 +1758,22 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
1498 } else { 1758 } else {
1499 /* resync. Schedule a read for every block at this virt offset */ 1759 /* resync. Schedule a read for every block at this virt offset */
1500 int count = 0; 1760 int count = 0;
1501 r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
1502 1761
1503 spin_lock_irq(&conf->resync_lock); 1762 if (!bitmap_start_sync(mddev->bitmap, sector_nr,
1504 conf->nr_pending++; 1763 &sync_blocks, mddev->degraded) &&
1505 spin_unlock_irq(&conf->resync_lock); 1764 !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
1765 /* We can skip this block */
1766 *skipped = 1;
1767 return sync_blocks + sectors_skipped;
1768 }
1769 if (sync_blocks < max_sync)
1770 max_sync = sync_blocks;
1771 r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
1506 1772
1507 r10_bio->mddev = mddev; 1773 r10_bio->mddev = mddev;
1508 atomic_set(&r10_bio->remaining, 0); 1774 atomic_set(&r10_bio->remaining, 0);
1775 raise_barrier(conf, 0);
1776 conf->next_resync = sector_nr;
1509 1777
1510 r10_bio->master_bio = NULL; 1778 r10_bio->master_bio = NULL;
1511 r10_bio->sector = sector_nr; 1779 r10_bio->sector = sector_nr;
@@ -1558,6 +1826,8 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
1558 } 1826 }
1559 1827
1560 nr_sectors = 0; 1828 nr_sectors = 0;
1829 if (sector_nr + max_sync < max_sector)
1830 max_sector = sector_nr + max_sync;
1561 do { 1831 do {
1562 struct page *page; 1832 struct page *page;
1563 int len = PAGE_SIZE; 1833 int len = PAGE_SIZE;
@@ -1632,11 +1902,11 @@ static int run(mddev_t *mddev)
1632 int nc, fc; 1902 int nc, fc;
1633 sector_t stride, size; 1903 sector_t stride, size;
1634 1904
1635 if (mddev->level != 10) { 1905 if (mddev->chunk_size == 0) {
1636 printk(KERN_ERR "raid10: %s: raid level not set correctly... (%d)\n", 1906 printk(KERN_ERR "md/raid10: non-zero chunk size required.\n");
1637 mdname(mddev), mddev->level); 1907 return -EINVAL;
1638 goto out;
1639 } 1908 }
1909
1640 nc = mddev->layout & 255; 1910 nc = mddev->layout & 255;
1641 fc = (mddev->layout >> 8) & 255; 1911 fc = (mddev->layout >> 8) & 255;
1642 if ((nc*fc) <2 || (nc*fc) > mddev->raid_disks || 1912 if ((nc*fc) <2 || (nc*fc) > mddev->raid_disks ||
@@ -1650,22 +1920,24 @@ static int run(mddev_t *mddev)
1650 * bookkeeping area. [whatever we allocate in run(), 1920 * bookkeeping area. [whatever we allocate in run(),
1651 * should be freed in stop()] 1921 * should be freed in stop()]
1652 */ 1922 */
1653 conf = kmalloc(sizeof(conf_t), GFP_KERNEL); 1923 conf = kzalloc(sizeof(conf_t), GFP_KERNEL);
1654 mddev->private = conf; 1924 mddev->private = conf;
1655 if (!conf) { 1925 if (!conf) {
1656 printk(KERN_ERR "raid10: couldn't allocate memory for %s\n", 1926 printk(KERN_ERR "raid10: couldn't allocate memory for %s\n",
1657 mdname(mddev)); 1927 mdname(mddev));
1658 goto out; 1928 goto out;
1659 } 1929 }
1660 memset(conf, 0, sizeof(*conf)); 1930 conf->mirrors = kzalloc(sizeof(struct mirror_info)*mddev->raid_disks,
1661 conf->mirrors = kmalloc(sizeof(struct mirror_info)*mddev->raid_disks,
1662 GFP_KERNEL); 1931 GFP_KERNEL);
1663 if (!conf->mirrors) { 1932 if (!conf->mirrors) {
1664 printk(KERN_ERR "raid10: couldn't allocate memory for %s\n", 1933 printk(KERN_ERR "raid10: couldn't allocate memory for %s\n",
1665 mdname(mddev)); 1934 mdname(mddev));
1666 goto out_free_conf; 1935 goto out_free_conf;
1667 } 1936 }
1668 memset(conf->mirrors, 0, sizeof(struct mirror_info)*mddev->raid_disks); 1937
1938 conf->tmppage = alloc_page(GFP_KERNEL);
1939 if (!conf->tmppage)
1940 goto out_free_conf;
1669 1941
1670 conf->near_copies = nc; 1942 conf->near_copies = nc;
1671 conf->far_copies = fc; 1943 conf->far_copies = fc;
@@ -1713,8 +1985,7 @@ static int run(mddev_t *mddev)
1713 INIT_LIST_HEAD(&conf->retry_list); 1985 INIT_LIST_HEAD(&conf->retry_list);
1714 1986
1715 spin_lock_init(&conf->resync_lock); 1987 spin_lock_init(&conf->resync_lock);
1716 init_waitqueue_head(&conf->wait_idle); 1988 init_waitqueue_head(&conf->wait_barrier);
1717 init_waitqueue_head(&conf->wait_resume);
1718 1989
1719 /* need to check that every block has at least one working mirror */ 1990 /* need to check that every block has at least one working mirror */
1720 if (!enough(conf)) { 1991 if (!enough(conf)) {
@@ -1763,7 +2034,7 @@ static int run(mddev_t *mddev)
1763 * maybe... 2034 * maybe...
1764 */ 2035 */
1765 { 2036 {
1766 int stripe = conf->raid_disks * mddev->chunk_size / PAGE_CACHE_SIZE; 2037 int stripe = conf->raid_disks * mddev->chunk_size / PAGE_SIZE;
1767 stripe /= conf->near_copies; 2038 stripe /= conf->near_copies;
1768 if (mddev->queue->backing_dev_info.ra_pages < 2* stripe) 2039 if (mddev->queue->backing_dev_info.ra_pages < 2* stripe)
1769 mddev->queue->backing_dev_info.ra_pages = 2* stripe; 2040 mddev->queue->backing_dev_info.ra_pages = 2* stripe;
@@ -1776,6 +2047,7 @@ static int run(mddev_t *mddev)
1776out_free_conf: 2047out_free_conf:
1777 if (conf->r10bio_pool) 2048 if (conf->r10bio_pool)
1778 mempool_destroy(conf->r10bio_pool); 2049 mempool_destroy(conf->r10bio_pool);
2050 safe_put_page(conf->tmppage);
1779 kfree(conf->mirrors); 2051 kfree(conf->mirrors);
1780 kfree(conf); 2052 kfree(conf);
1781 mddev->private = NULL; 2053 mddev->private = NULL;
@@ -1798,10 +2070,31 @@ static int stop(mddev_t *mddev)
1798 return 0; 2070 return 0;
1799} 2071}
1800 2072
2073static void raid10_quiesce(mddev_t *mddev, int state)
2074{
2075 conf_t *conf = mddev_to_conf(mddev);
2076
2077 switch(state) {
2078 case 1:
2079 raise_barrier(conf, 0);
2080 break;
2081 case 0:
2082 lower_barrier(conf);
2083 break;
2084 }
2085 if (mddev->thread) {
2086 if (mddev->bitmap)
2087 mddev->thread->timeout = mddev->bitmap->daemon_sleep * HZ;
2088 else
2089 mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT;
2090 md_wakeup_thread(mddev->thread);
2091 }
2092}
1801 2093
1802static mdk_personality_t raid10_personality = 2094static struct mdk_personality raid10_personality =
1803{ 2095{
1804 .name = "raid10", 2096 .name = "raid10",
2097 .level = 10,
1805 .owner = THIS_MODULE, 2098 .owner = THIS_MODULE,
1806 .make_request = make_request, 2099 .make_request = make_request,
1807 .run = run, 2100 .run = run,
@@ -1812,19 +2105,22 @@ static mdk_personality_t raid10_personality =
1812 .hot_remove_disk= raid10_remove_disk, 2105 .hot_remove_disk= raid10_remove_disk,
1813 .spare_active = raid10_spare_active, 2106 .spare_active = raid10_spare_active,
1814 .sync_request = sync_request, 2107 .sync_request = sync_request,
2108 .quiesce = raid10_quiesce,
1815}; 2109};
1816 2110
1817static int __init raid_init(void) 2111static int __init raid_init(void)
1818{ 2112{
1819 return register_md_personality(RAID10, &raid10_personality); 2113 return register_md_personality(&raid10_personality);
1820} 2114}
1821 2115
1822static void raid_exit(void) 2116static void raid_exit(void)
1823{ 2117{
1824 unregister_md_personality(RAID10); 2118 unregister_md_personality(&raid10_personality);
1825} 2119}
1826 2120
1827module_init(raid_init); 2121module_init(raid_init);
1828module_exit(raid_exit); 2122module_exit(raid_exit);
1829MODULE_LICENSE("GPL"); 2123MODULE_LICENSE("GPL");
1830MODULE_ALIAS("md-personality-9"); /* RAID10 */ 2124MODULE_ALIAS("md-personality-9"); /* RAID10 */
2125MODULE_ALIAS("md-raid10");
2126MODULE_ALIAS("md-level-10");
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index fafc4bc045f7..54f4a9847e38 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -35,12 +35,10 @@
35#define STRIPE_SHIFT (PAGE_SHIFT - 9) 35#define STRIPE_SHIFT (PAGE_SHIFT - 9)
36#define STRIPE_SECTORS (STRIPE_SIZE>>9) 36#define STRIPE_SECTORS (STRIPE_SIZE>>9)
37#define IO_THRESHOLD 1 37#define IO_THRESHOLD 1
38#define HASH_PAGES 1 38#define NR_HASH (PAGE_SIZE / sizeof(struct hlist_head))
39#define HASH_PAGES_ORDER 0
40#define NR_HASH (HASH_PAGES * PAGE_SIZE / sizeof(struct stripe_head *))
41#define HASH_MASK (NR_HASH - 1) 39#define HASH_MASK (NR_HASH - 1)
42 40
43#define stripe_hash(conf, sect) ((conf)->stripe_hashtbl[((sect) >> STRIPE_SHIFT) & HASH_MASK]) 41#define stripe_hash(conf, sect) (&((conf)->stripe_hashtbl[((sect) >> STRIPE_SHIFT) & HASH_MASK]))
44 42
45/* bio's attached to a stripe+device for I/O are linked together in bi_sector 43/* bio's attached to a stripe+device for I/O are linked together in bi_sector
46 * order without overlap. There may be several bio's per stripe+device, and 44 * order without overlap. There may be several bio's per stripe+device, and
@@ -113,29 +111,21 @@ static void release_stripe(struct stripe_head *sh)
113 spin_unlock_irqrestore(&conf->device_lock, flags); 111 spin_unlock_irqrestore(&conf->device_lock, flags);
114} 112}
115 113
116static void remove_hash(struct stripe_head *sh) 114static inline void remove_hash(struct stripe_head *sh)
117{ 115{
118 PRINTK("remove_hash(), stripe %llu\n", (unsigned long long)sh->sector); 116 PRINTK("remove_hash(), stripe %llu\n", (unsigned long long)sh->sector);
119 117
120 if (sh->hash_pprev) { 118 hlist_del_init(&sh->hash);
121 if (sh->hash_next)
122 sh->hash_next->hash_pprev = sh->hash_pprev;
123 *sh->hash_pprev = sh->hash_next;
124 sh->hash_pprev = NULL;
125 }
126} 119}
127 120
128static __inline__ void insert_hash(raid5_conf_t *conf, struct stripe_head *sh) 121static inline void insert_hash(raid5_conf_t *conf, struct stripe_head *sh)
129{ 122{
130 struct stripe_head **shp = &stripe_hash(conf, sh->sector); 123 struct hlist_head *hp = stripe_hash(conf, sh->sector);
131 124
132 PRINTK("insert_hash(), stripe %llu\n", (unsigned long long)sh->sector); 125 PRINTK("insert_hash(), stripe %llu\n", (unsigned long long)sh->sector);
133 126
134 CHECK_DEVLOCK(); 127 CHECK_DEVLOCK();
135 if ((sh->hash_next = *shp) != NULL) 128 hlist_add_head(&sh->hash, hp);
136 (*shp)->hash_pprev = &sh->hash_next;
137 *shp = sh;
138 sh->hash_pprev = shp;
139} 129}
140 130
141 131
@@ -167,7 +157,7 @@ static void shrink_buffers(struct stripe_head *sh, int num)
167 if (!p) 157 if (!p)
168 continue; 158 continue;
169 sh->dev[i].page = NULL; 159 sh->dev[i].page = NULL;
170 page_cache_release(p); 160 put_page(p);
171 } 161 }
172} 162}
173 163
@@ -228,10 +218,11 @@ static inline void init_stripe(struct stripe_head *sh, sector_t sector, int pd_i
228static struct stripe_head *__find_stripe(raid5_conf_t *conf, sector_t sector) 218static struct stripe_head *__find_stripe(raid5_conf_t *conf, sector_t sector)
229{ 219{
230 struct stripe_head *sh; 220 struct stripe_head *sh;
221 struct hlist_node *hn;
231 222
232 CHECK_DEVLOCK(); 223 CHECK_DEVLOCK();
233 PRINTK("__find_stripe, sector %llu\n", (unsigned long long)sector); 224 PRINTK("__find_stripe, sector %llu\n", (unsigned long long)sector);
234 for (sh = stripe_hash(conf, sector); sh; sh = sh->hash_next) 225 hlist_for_each_entry(sh, hn, stripe_hash(conf, sector), hash)
235 if (sh->sector == sector) 226 if (sh->sector == sector)
236 return sh; 227 return sh;
237 PRINTK("__stripe %llu not in cache\n", (unsigned long long)sector); 228 PRINTK("__stripe %llu not in cache\n", (unsigned long long)sector);
@@ -417,7 +408,7 @@ static int raid5_end_read_request(struct bio * bi, unsigned int bytes_done,
417 set_bit(R5_UPTODATE, &sh->dev[i].flags); 408 set_bit(R5_UPTODATE, &sh->dev[i].flags);
418#endif 409#endif
419 if (test_bit(R5_ReadError, &sh->dev[i].flags)) { 410 if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
420 printk("R5: read error corrected!!\n"); 411 printk(KERN_INFO "raid5: read error corrected!!\n");
421 clear_bit(R5_ReadError, &sh->dev[i].flags); 412 clear_bit(R5_ReadError, &sh->dev[i].flags);
422 clear_bit(R5_ReWrite, &sh->dev[i].flags); 413 clear_bit(R5_ReWrite, &sh->dev[i].flags);
423 } 414 }
@@ -428,13 +419,14 @@ static int raid5_end_read_request(struct bio * bi, unsigned int bytes_done,
428 clear_bit(R5_UPTODATE, &sh->dev[i].flags); 419 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
429 atomic_inc(&conf->disks[i].rdev->read_errors); 420 atomic_inc(&conf->disks[i].rdev->read_errors);
430 if (conf->mddev->degraded) 421 if (conf->mddev->degraded)
431 printk("R5: read error not correctable.\n"); 422 printk(KERN_WARNING "raid5: read error not correctable.\n");
432 else if (test_bit(R5_ReWrite, &sh->dev[i].flags)) 423 else if (test_bit(R5_ReWrite, &sh->dev[i].flags))
433 /* Oh, no!!! */ 424 /* Oh, no!!! */
434 printk("R5: read error NOT corrected!!\n"); 425 printk(KERN_WARNING "raid5: read error NOT corrected!!\n");
435 else if (atomic_read(&conf->disks[i].rdev->read_errors) 426 else if (atomic_read(&conf->disks[i].rdev->read_errors)
436 > conf->max_nr_stripes) 427 > conf->max_nr_stripes)
437 printk("raid5: Too many read errors, failing device.\n"); 428 printk(KERN_WARNING
429 "raid5: Too many read errors, failing device.\n");
438 else 430 else
439 retry = 1; 431 retry = 1;
440 if (retry) 432 if (retry)
@@ -604,7 +596,7 @@ static sector_t raid5_compute_sector(sector_t r_sector, unsigned int raid_disks,
604 *dd_idx = (*pd_idx + 1 + *dd_idx) % raid_disks; 596 *dd_idx = (*pd_idx + 1 + *dd_idx) % raid_disks;
605 break; 597 break;
606 default: 598 default:
607 printk("raid5: unsupported algorithm %d\n", 599 printk(KERN_ERR "raid5: unsupported algorithm %d\n",
608 conf->algorithm); 600 conf->algorithm);
609 } 601 }
610 602
@@ -645,7 +637,7 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i)
645 i -= (sh->pd_idx + 1); 637 i -= (sh->pd_idx + 1);
646 break; 638 break;
647 default: 639 default:
648 printk("raid5: unsupported algorithm %d\n", 640 printk(KERN_ERR "raid5: unsupported algorithm %d\n",
649 conf->algorithm); 641 conf->algorithm);
650 } 642 }
651 643
@@ -654,7 +646,7 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i)
654 646
655 check = raid5_compute_sector (r_sector, raid_disks, data_disks, &dummy1, &dummy2, conf); 647 check = raid5_compute_sector (r_sector, raid_disks, data_disks, &dummy1, &dummy2, conf);
656 if (check != sh->sector || dummy1 != dd_idx || dummy2 != sh->pd_idx) { 648 if (check != sh->sector || dummy1 != dd_idx || dummy2 != sh->pd_idx) {
657 printk("compute_blocknr: map not correct\n"); 649 printk(KERN_ERR "compute_blocknr: map not correct\n");
658 return 0; 650 return 0;
659 } 651 }
660 return r_sector; 652 return r_sector;
@@ -737,7 +729,7 @@ static void compute_block(struct stripe_head *sh, int dd_idx)
737 if (test_bit(R5_UPTODATE, &sh->dev[i].flags)) 729 if (test_bit(R5_UPTODATE, &sh->dev[i].flags))
738 ptr[count++] = p; 730 ptr[count++] = p;
739 else 731 else
740 printk("compute_block() %d, stripe %llu, %d" 732 printk(KERN_ERR "compute_block() %d, stripe %llu, %d"
741 " not present\n", dd_idx, 733 " not present\n", dd_idx,
742 (unsigned long long)sh->sector, i); 734 (unsigned long long)sh->sector, i);
743 735
@@ -960,11 +952,11 @@ static void handle_stripe(struct stripe_head *sh)
960 syncing = test_bit(STRIPE_SYNCING, &sh->state); 952 syncing = test_bit(STRIPE_SYNCING, &sh->state);
961 /* Now to look around and see what can be done */ 953 /* Now to look around and see what can be done */
962 954
955 rcu_read_lock();
963 for (i=disks; i--; ) { 956 for (i=disks; i--; ) {
964 mdk_rdev_t *rdev; 957 mdk_rdev_t *rdev;
965 dev = &sh->dev[i]; 958 dev = &sh->dev[i];
966 clear_bit(R5_Insync, &dev->flags); 959 clear_bit(R5_Insync, &dev->flags);
967 clear_bit(R5_Syncio, &dev->flags);
968 960
969 PRINTK("check %d: state 0x%lx read %p write %p written %p\n", 961 PRINTK("check %d: state 0x%lx read %p write %p written %p\n",
970 i, dev->flags, dev->toread, dev->towrite, dev->written); 962 i, dev->flags, dev->toread, dev->towrite, dev->written);
@@ -1003,9 +995,9 @@ static void handle_stripe(struct stripe_head *sh)
1003 non_overwrite++; 995 non_overwrite++;
1004 } 996 }
1005 if (dev->written) written++; 997 if (dev->written) written++;
1006 rdev = conf->disks[i].rdev; /* FIXME, should I be looking rdev */ 998 rdev = rcu_dereference(conf->disks[i].rdev);
1007 if (!rdev || !test_bit(In_sync, &rdev->flags)) { 999 if (!rdev || !test_bit(In_sync, &rdev->flags)) {
1008 /* The ReadError flag wil just be confusing now */ 1000 /* The ReadError flag will just be confusing now */
1009 clear_bit(R5_ReadError, &dev->flags); 1001 clear_bit(R5_ReadError, &dev->flags);
1010 clear_bit(R5_ReWrite, &dev->flags); 1002 clear_bit(R5_ReWrite, &dev->flags);
1011 } 1003 }
@@ -1016,6 +1008,7 @@ static void handle_stripe(struct stripe_head *sh)
1016 } else 1008 } else
1017 set_bit(R5_Insync, &dev->flags); 1009 set_bit(R5_Insync, &dev->flags);
1018 } 1010 }
1011 rcu_read_unlock();
1019 PRINTK("locked=%d uptodate=%d to_read=%d" 1012 PRINTK("locked=%d uptodate=%d to_read=%d"
1020 " to_write=%d failed=%d failed_num=%d\n", 1013 " to_write=%d failed=%d failed_num=%d\n",
1021 locked, uptodate, to_read, to_write, failed, failed_num); 1014 locked, uptodate, to_read, to_write, failed, failed_num);
@@ -1027,10 +1020,13 @@ static void handle_stripe(struct stripe_head *sh)
1027 int bitmap_end = 0; 1020 int bitmap_end = 0;
1028 1021
1029 if (test_bit(R5_ReadError, &sh->dev[i].flags)) { 1022 if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
1030 mdk_rdev_t *rdev = conf->disks[i].rdev; 1023 mdk_rdev_t *rdev;
1024 rcu_read_lock();
1025 rdev = rcu_dereference(conf->disks[i].rdev);
1031 if (rdev && test_bit(In_sync, &rdev->flags)) 1026 if (rdev && test_bit(In_sync, &rdev->flags))
1032 /* multiple read failures in one stripe */ 1027 /* multiple read failures in one stripe */
1033 md_error(conf->mddev, rdev); 1028 md_error(conf->mddev, rdev);
1029 rcu_read_unlock();
1034 } 1030 }
1035 1031
1036 spin_lock_irq(&conf->device_lock); 1032 spin_lock_irq(&conf->device_lock);
@@ -1179,9 +1175,6 @@ static void handle_stripe(struct stripe_head *sh)
1179 locked++; 1175 locked++;
1180 PRINTK("Reading block %d (sync=%d)\n", 1176 PRINTK("Reading block %d (sync=%d)\n",
1181 i, syncing); 1177 i, syncing);
1182 if (syncing)
1183 md_sync_acct(conf->disks[i].rdev->bdev,
1184 STRIPE_SECTORS);
1185 } 1178 }
1186 } 1179 }
1187 } 1180 }
@@ -1288,7 +1281,7 @@ static void handle_stripe(struct stripe_head *sh)
1288 * is available 1281 * is available
1289 */ 1282 */
1290 if (syncing && locked == 0 && 1283 if (syncing && locked == 0 &&
1291 !test_bit(STRIPE_INSYNC, &sh->state) && failed <= 1) { 1284 !test_bit(STRIPE_INSYNC, &sh->state)) {
1292 set_bit(STRIPE_HANDLE, &sh->state); 1285 set_bit(STRIPE_HANDLE, &sh->state);
1293 if (failed == 0) { 1286 if (failed == 0) {
1294 char *pagea; 1287 char *pagea;
@@ -1306,27 +1299,25 @@ static void handle_stripe(struct stripe_head *sh)
1306 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) 1299 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
1307 /* don't try to repair!! */ 1300 /* don't try to repair!! */
1308 set_bit(STRIPE_INSYNC, &sh->state); 1301 set_bit(STRIPE_INSYNC, &sh->state);
1302 else {
1303 compute_block(sh, sh->pd_idx);
1304 uptodate++;
1305 }
1309 } 1306 }
1310 } 1307 }
1311 if (!test_bit(STRIPE_INSYNC, &sh->state)) { 1308 if (!test_bit(STRIPE_INSYNC, &sh->state)) {
1309 /* either failed parity check, or recovery is happening */
1312 if (failed==0) 1310 if (failed==0)
1313 failed_num = sh->pd_idx; 1311 failed_num = sh->pd_idx;
1314 /* should be able to compute the missing block and write it to spare */
1315 if (!test_bit(R5_UPTODATE, &sh->dev[failed_num].flags)) {
1316 if (uptodate+1 != disks)
1317 BUG();
1318 compute_block(sh, failed_num);
1319 uptodate++;
1320 }
1321 if (uptodate != disks)
1322 BUG();
1323 dev = &sh->dev[failed_num]; 1312 dev = &sh->dev[failed_num];
1313 BUG_ON(!test_bit(R5_UPTODATE, &dev->flags));
1314 BUG_ON(uptodate != disks);
1315
1324 set_bit(R5_LOCKED, &dev->flags); 1316 set_bit(R5_LOCKED, &dev->flags);
1325 set_bit(R5_Wantwrite, &dev->flags); 1317 set_bit(R5_Wantwrite, &dev->flags);
1326 clear_bit(STRIPE_DEGRADED, &sh->state); 1318 clear_bit(STRIPE_DEGRADED, &sh->state);
1327 locked++; 1319 locked++;
1328 set_bit(STRIPE_INSYNC, &sh->state); 1320 set_bit(STRIPE_INSYNC, &sh->state);
1329 set_bit(R5_Syncio, &dev->flags);
1330 } 1321 }
1331 } 1322 }
1332 if (syncing && locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) { 1323 if (syncing && locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) {
@@ -1392,7 +1383,7 @@ static void handle_stripe(struct stripe_head *sh)
1392 rcu_read_unlock(); 1383 rcu_read_unlock();
1393 1384
1394 if (rdev) { 1385 if (rdev) {
1395 if (test_bit(R5_Syncio, &sh->dev[i].flags)) 1386 if (syncing)
1396 md_sync_acct(rdev->bdev, STRIPE_SECTORS); 1387 md_sync_acct(rdev->bdev, STRIPE_SECTORS);
1397 1388
1398 bi->bi_bdev = rdev->bdev; 1389 bi->bi_bdev = rdev->bdev;
@@ -1409,6 +1400,9 @@ static void handle_stripe(struct stripe_head *sh)
1409 bi->bi_io_vec[0].bv_offset = 0; 1400 bi->bi_io_vec[0].bv_offset = 0;
1410 bi->bi_size = STRIPE_SIZE; 1401 bi->bi_size = STRIPE_SIZE;
1411 bi->bi_next = NULL; 1402 bi->bi_next = NULL;
1403 if (rw == WRITE &&
1404 test_bit(R5_ReWrite, &sh->dev[i].flags))
1405 atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
1412 generic_make_request(bi); 1406 generic_make_request(bi);
1413 } else { 1407 } else {
1414 if (rw == 1) 1408 if (rw == 1)
@@ -1822,21 +1816,21 @@ static int run(mddev_t *mddev)
1822 struct list_head *tmp; 1816 struct list_head *tmp;
1823 1817
1824 if (mddev->level != 5 && mddev->level != 4) { 1818 if (mddev->level != 5 && mddev->level != 4) {
1825 printk("raid5: %s: raid level not set to 4/5 (%d)\n", mdname(mddev), mddev->level); 1819 printk(KERN_ERR "raid5: %s: raid level not set to 4/5 (%d)\n",
1820 mdname(mddev), mddev->level);
1826 return -EIO; 1821 return -EIO;
1827 } 1822 }
1828 1823
1829 mddev->private = kmalloc (sizeof (raid5_conf_t) 1824 mddev->private = kzalloc(sizeof (raid5_conf_t)
1830 + mddev->raid_disks * sizeof(struct disk_info), 1825 + mddev->raid_disks * sizeof(struct disk_info),
1831 GFP_KERNEL); 1826 GFP_KERNEL);
1832 if ((conf = mddev->private) == NULL) 1827 if ((conf = mddev->private) == NULL)
1833 goto abort; 1828 goto abort;
1834 memset (conf, 0, sizeof (*conf) + mddev->raid_disks * sizeof(struct disk_info) ); 1829
1835 conf->mddev = mddev; 1830 conf->mddev = mddev;
1836 1831
1837 if ((conf->stripe_hashtbl = (struct stripe_head **) __get_free_pages(GFP_ATOMIC, HASH_PAGES_ORDER)) == NULL) 1832 if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL)
1838 goto abort; 1833 goto abort;
1839 memset(conf->stripe_hashtbl, 0, HASH_PAGES * PAGE_SIZE);
1840 1834
1841 spin_lock_init(&conf->device_lock); 1835 spin_lock_init(&conf->device_lock);
1842 init_waitqueue_head(&conf->wait_for_stripe); 1836 init_waitqueue_head(&conf->wait_for_stripe);
@@ -1903,10 +1897,17 @@ static int run(mddev_t *mddev)
1903 1897
1904 if (mddev->degraded == 1 && 1898 if (mddev->degraded == 1 &&
1905 mddev->recovery_cp != MaxSector) { 1899 mddev->recovery_cp != MaxSector) {
1906 printk(KERN_ERR 1900 if (mddev->ok_start_degraded)
1907 "raid5: cannot start dirty degraded array for %s\n", 1901 printk(KERN_WARNING
1908 mdname(mddev)); 1902 "raid5: starting dirty degraded array: %s"
1909 goto abort; 1903 "- data corruption possible.\n",
1904 mdname(mddev));
1905 else {
1906 printk(KERN_ERR
1907 "raid5: cannot start dirty degraded array for %s\n",
1908 mdname(mddev));
1909 goto abort;
1910 }
1910 } 1911 }
1911 1912
1912 { 1913 {
@@ -1948,7 +1949,7 @@ static int run(mddev_t *mddev)
1948 */ 1949 */
1949 { 1950 {
1950 int stripe = (mddev->raid_disks-1) * mddev->chunk_size 1951 int stripe = (mddev->raid_disks-1) * mddev->chunk_size
1951 / PAGE_CACHE_SIZE; 1952 / PAGE_SIZE;
1952 if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe) 1953 if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
1953 mddev->queue->backing_dev_info.ra_pages = 2 * stripe; 1954 mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
1954 } 1955 }
@@ -1956,9 +1957,6 @@ static int run(mddev_t *mddev)
1956 /* Ok, everything is just fine now */ 1957 /* Ok, everything is just fine now */
1957 sysfs_create_group(&mddev->kobj, &raid5_attrs_group); 1958 sysfs_create_group(&mddev->kobj, &raid5_attrs_group);
1958 1959
1959 if (mddev->bitmap)
1960 mddev->thread->timeout = mddev->bitmap->daemon_sleep * HZ;
1961
1962 mddev->queue->unplug_fn = raid5_unplug_device; 1960 mddev->queue->unplug_fn = raid5_unplug_device;
1963 mddev->queue->issue_flush_fn = raid5_issue_flush; 1961 mddev->queue->issue_flush_fn = raid5_issue_flush;
1964 1962
@@ -1967,9 +1965,7 @@ static int run(mddev_t *mddev)
1967abort: 1965abort:
1968 if (conf) { 1966 if (conf) {
1969 print_raid5_conf(conf); 1967 print_raid5_conf(conf);
1970 if (conf->stripe_hashtbl) 1968 kfree(conf->stripe_hashtbl);
1971 free_pages((unsigned long) conf->stripe_hashtbl,
1972 HASH_PAGES_ORDER);
1973 kfree(conf); 1969 kfree(conf);
1974 } 1970 }
1975 mddev->private = NULL; 1971 mddev->private = NULL;
@@ -1986,7 +1982,7 @@ static int stop(mddev_t *mddev)
1986 md_unregister_thread(mddev->thread); 1982 md_unregister_thread(mddev->thread);
1987 mddev->thread = NULL; 1983 mddev->thread = NULL;
1988 shrink_stripes(conf); 1984 shrink_stripes(conf);
1989 free_pages((unsigned long) conf->stripe_hashtbl, HASH_PAGES_ORDER); 1985 kfree(conf->stripe_hashtbl);
1990 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ 1986 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
1991 sysfs_remove_group(&mddev->kobj, &raid5_attrs_group); 1987 sysfs_remove_group(&mddev->kobj, &raid5_attrs_group);
1992 kfree(conf); 1988 kfree(conf);
@@ -2014,12 +2010,12 @@ static void print_sh (struct stripe_head *sh)
2014static void printall (raid5_conf_t *conf) 2010static void printall (raid5_conf_t *conf)
2015{ 2011{
2016 struct stripe_head *sh; 2012 struct stripe_head *sh;
2013 struct hlist_node *hn;
2017 int i; 2014 int i;
2018 2015
2019 spin_lock_irq(&conf->device_lock); 2016 spin_lock_irq(&conf->device_lock);
2020 for (i = 0; i < NR_HASH; i++) { 2017 for (i = 0; i < NR_HASH; i++) {
2021 sh = conf->stripe_hashtbl[i]; 2018 hlist_for_each_entry(sh, hn, &conf->stripe_hashtbl[i], hash) {
2022 for (; sh; sh = sh->hash_next) {
2023 if (sh->raid_conf != conf) 2019 if (sh->raid_conf != conf)
2024 continue; 2020 continue;
2025 print_sh(sh); 2021 print_sh(sh);
@@ -2192,17 +2188,12 @@ static void raid5_quiesce(mddev_t *mddev, int state)
2192 spin_unlock_irq(&conf->device_lock); 2188 spin_unlock_irq(&conf->device_lock);
2193 break; 2189 break;
2194 } 2190 }
2195 if (mddev->thread) {
2196 if (mddev->bitmap)
2197 mddev->thread->timeout = mddev->bitmap->daemon_sleep * HZ;
2198 else
2199 mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT;
2200 md_wakeup_thread(mddev->thread);
2201 }
2202} 2191}
2203static mdk_personality_t raid5_personality= 2192
2193static struct mdk_personality raid5_personality =
2204{ 2194{
2205 .name = "raid5", 2195 .name = "raid5",
2196 .level = 5,
2206 .owner = THIS_MODULE, 2197 .owner = THIS_MODULE,
2207 .make_request = make_request, 2198 .make_request = make_request,
2208 .run = run, 2199 .run = run,
@@ -2217,17 +2208,42 @@ static mdk_personality_t raid5_personality=
2217 .quiesce = raid5_quiesce, 2208 .quiesce = raid5_quiesce,
2218}; 2209};
2219 2210
2220static int __init raid5_init (void) 2211static struct mdk_personality raid4_personality =
2221{ 2212{
2222 return register_md_personality (RAID5, &raid5_personality); 2213 .name = "raid4",
2214 .level = 4,
2215 .owner = THIS_MODULE,
2216 .make_request = make_request,
2217 .run = run,
2218 .stop = stop,
2219 .status = status,
2220 .error_handler = error,
2221 .hot_add_disk = raid5_add_disk,
2222 .hot_remove_disk= raid5_remove_disk,
2223 .spare_active = raid5_spare_active,
2224 .sync_request = sync_request,
2225 .resize = raid5_resize,
2226 .quiesce = raid5_quiesce,
2227};
2228
2229static int __init raid5_init(void)
2230{
2231 register_md_personality(&raid5_personality);
2232 register_md_personality(&raid4_personality);
2233 return 0;
2223} 2234}
2224 2235
2225static void raid5_exit (void) 2236static void raid5_exit(void)
2226{ 2237{
2227 unregister_md_personality (RAID5); 2238 unregister_md_personality(&raid5_personality);
2239 unregister_md_personality(&raid4_personality);
2228} 2240}
2229 2241
2230module_init(raid5_init); 2242module_init(raid5_init);
2231module_exit(raid5_exit); 2243module_exit(raid5_exit);
2232MODULE_LICENSE("GPL"); 2244MODULE_LICENSE("GPL");
2233MODULE_ALIAS("md-personality-4"); /* RAID5 */ 2245MODULE_ALIAS("md-personality-4"); /* RAID5 */
2246MODULE_ALIAS("md-raid5");
2247MODULE_ALIAS("md-raid4");
2248MODULE_ALIAS("md-level-5");
2249MODULE_ALIAS("md-level-4");
diff --git a/drivers/md/raid6main.c b/drivers/md/raid6main.c
index 0000d162d198..8c823d686a60 100644
--- a/drivers/md/raid6main.c
+++ b/drivers/md/raid6main.c
@@ -40,12 +40,10 @@
40#define STRIPE_SHIFT (PAGE_SHIFT - 9) 40#define STRIPE_SHIFT (PAGE_SHIFT - 9)
41#define STRIPE_SECTORS (STRIPE_SIZE>>9) 41#define STRIPE_SECTORS (STRIPE_SIZE>>9)
42#define IO_THRESHOLD 1 42#define IO_THRESHOLD 1
43#define HASH_PAGES 1 43#define NR_HASH (PAGE_SIZE / sizeof(struct hlist_head))
44#define HASH_PAGES_ORDER 0
45#define NR_HASH (HASH_PAGES * PAGE_SIZE / sizeof(struct stripe_head *))
46#define HASH_MASK (NR_HASH - 1) 44#define HASH_MASK (NR_HASH - 1)
47 45
48#define stripe_hash(conf, sect) ((conf)->stripe_hashtbl[((sect) >> STRIPE_SHIFT) & HASH_MASK]) 46#define stripe_hash(conf, sect) (&((conf)->stripe_hashtbl[((sect) >> STRIPE_SHIFT) & HASH_MASK]))
49 47
50/* bio's attached to a stripe+device for I/O are linked together in bi_sector 48/* bio's attached to a stripe+device for I/O are linked together in bi_sector
51 * order without overlap. There may be several bio's per stripe+device, and 49 * order without overlap. There may be several bio's per stripe+device, and
@@ -132,29 +130,21 @@ static void release_stripe(struct stripe_head *sh)
132 spin_unlock_irqrestore(&conf->device_lock, flags); 130 spin_unlock_irqrestore(&conf->device_lock, flags);
133} 131}
134 132
135static void remove_hash(struct stripe_head *sh) 133static inline void remove_hash(struct stripe_head *sh)
136{ 134{
137 PRINTK("remove_hash(), stripe %llu\n", (unsigned long long)sh->sector); 135 PRINTK("remove_hash(), stripe %llu\n", (unsigned long long)sh->sector);
138 136
139 if (sh->hash_pprev) { 137 hlist_del_init(&sh->hash);
140 if (sh->hash_next)
141 sh->hash_next->hash_pprev = sh->hash_pprev;
142 *sh->hash_pprev = sh->hash_next;
143 sh->hash_pprev = NULL;
144 }
145} 138}
146 139
147static __inline__ void insert_hash(raid6_conf_t *conf, struct stripe_head *sh) 140static inline void insert_hash(raid6_conf_t *conf, struct stripe_head *sh)
148{ 141{
149 struct stripe_head **shp = &stripe_hash(conf, sh->sector); 142 struct hlist_head *hp = stripe_hash(conf, sh->sector);
150 143
151 PRINTK("insert_hash(), stripe %llu\n", (unsigned long long)sh->sector); 144 PRINTK("insert_hash(), stripe %llu\n", (unsigned long long)sh->sector);
152 145
153 CHECK_DEVLOCK(); 146 CHECK_DEVLOCK();
154 if ((sh->hash_next = *shp) != NULL) 147 hlist_add_head(&sh->hash, hp);
155 (*shp)->hash_pprev = &sh->hash_next;
156 *shp = sh;
157 sh->hash_pprev = shp;
158} 148}
159 149
160 150
@@ -186,7 +176,7 @@ static void shrink_buffers(struct stripe_head *sh, int num)
186 if (!p) 176 if (!p)
187 continue; 177 continue;
188 sh->dev[i].page = NULL; 178 sh->dev[i].page = NULL;
189 page_cache_release(p); 179 put_page(p);
190 } 180 }
191} 181}
192 182
@@ -247,10 +237,11 @@ static inline void init_stripe(struct stripe_head *sh, sector_t sector, int pd_i
247static struct stripe_head *__find_stripe(raid6_conf_t *conf, sector_t sector) 237static struct stripe_head *__find_stripe(raid6_conf_t *conf, sector_t sector)
248{ 238{
249 struct stripe_head *sh; 239 struct stripe_head *sh;
240 struct hlist_node *hn;
250 241
251 CHECK_DEVLOCK(); 242 CHECK_DEVLOCK();
252 PRINTK("__find_stripe, sector %llu\n", (unsigned long long)sector); 243 PRINTK("__find_stripe, sector %llu\n", (unsigned long long)sector);
253 for (sh = stripe_hash(conf, sector); sh; sh = sh->hash_next) 244 hlist_for_each_entry (sh, hn, stripe_hash(conf, sector), hash)
254 if (sh->sector == sector) 245 if (sh->sector == sector)
255 return sh; 246 return sh;
256 PRINTK("__stripe %llu not in cache\n", (unsigned long long)sector); 247 PRINTK("__stripe %llu not in cache\n", (unsigned long long)sector);
@@ -367,8 +358,8 @@ static void shrink_stripes(raid6_conf_t *conf)
367 conf->slab_cache = NULL; 358 conf->slab_cache = NULL;
368} 359}
369 360
370static int raid6_end_read_request (struct bio * bi, unsigned int bytes_done, 361static int raid6_end_read_request(struct bio * bi, unsigned int bytes_done,
371 int error) 362 int error)
372{ 363{
373 struct stripe_head *sh = bi->bi_private; 364 struct stripe_head *sh = bi->bi_private;
374 raid6_conf_t *conf = sh->raid_conf; 365 raid6_conf_t *conf = sh->raid_conf;
@@ -420,9 +411,35 @@ static int raid6_end_read_request (struct bio * bi, unsigned int bytes_done,
420#else 411#else
421 set_bit(R5_UPTODATE, &sh->dev[i].flags); 412 set_bit(R5_UPTODATE, &sh->dev[i].flags);
422#endif 413#endif
414 if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
415 printk(KERN_INFO "raid6: read error corrected!!\n");
416 clear_bit(R5_ReadError, &sh->dev[i].flags);
417 clear_bit(R5_ReWrite, &sh->dev[i].flags);
418 }
419 if (atomic_read(&conf->disks[i].rdev->read_errors))
420 atomic_set(&conf->disks[i].rdev->read_errors, 0);
423 } else { 421 } else {
424 md_error(conf->mddev, conf->disks[i].rdev); 422 int retry = 0;
425 clear_bit(R5_UPTODATE, &sh->dev[i].flags); 423 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
424 atomic_inc(&conf->disks[i].rdev->read_errors);
425 if (conf->mddev->degraded)
426 printk(KERN_WARNING "raid6: read error not correctable.\n");
427 else if (test_bit(R5_ReWrite, &sh->dev[i].flags))
428 /* Oh, no!!! */
429 printk(KERN_WARNING "raid6: read error NOT corrected!!\n");
430 else if (atomic_read(&conf->disks[i].rdev->read_errors)
431 > conf->max_nr_stripes)
432 printk(KERN_WARNING
433 "raid6: Too many read errors, failing device.\n");
434 else
435 retry = 1;
436 if (retry)
437 set_bit(R5_ReadError, &sh->dev[i].flags);
438 else {
439 clear_bit(R5_ReadError, &sh->dev[i].flags);
440 clear_bit(R5_ReWrite, &sh->dev[i].flags);
441 md_error(conf->mddev, conf->disks[i].rdev);
442 }
426 } 443 }
427 rdev_dec_pending(conf->disks[i].rdev, conf->mddev); 444 rdev_dec_pending(conf->disks[i].rdev, conf->mddev);
428#if 0 445#if 0
@@ -805,7 +822,7 @@ static void compute_parity(struct stripe_head *sh, int method)
805} 822}
806 823
807/* Compute one missing block */ 824/* Compute one missing block */
808static void compute_block_1(struct stripe_head *sh, int dd_idx) 825static void compute_block_1(struct stripe_head *sh, int dd_idx, int nozero)
809{ 826{
810 raid6_conf_t *conf = sh->raid_conf; 827 raid6_conf_t *conf = sh->raid_conf;
811 int i, count, disks = conf->raid_disks; 828 int i, count, disks = conf->raid_disks;
@@ -821,7 +838,7 @@ static void compute_block_1(struct stripe_head *sh, int dd_idx)
821 compute_parity(sh, UPDATE_PARITY); 838 compute_parity(sh, UPDATE_PARITY);
822 } else { 839 } else {
823 ptr[0] = page_address(sh->dev[dd_idx].page); 840 ptr[0] = page_address(sh->dev[dd_idx].page);
824 memset(ptr[0], 0, STRIPE_SIZE); 841 if (!nozero) memset(ptr[0], 0, STRIPE_SIZE);
825 count = 1; 842 count = 1;
826 for (i = disks ; i--; ) { 843 for (i = disks ; i--; ) {
827 if (i == dd_idx || i == qd_idx) 844 if (i == dd_idx || i == qd_idx)
@@ -838,7 +855,8 @@ static void compute_block_1(struct stripe_head *sh, int dd_idx)
838 } 855 }
839 if (count != 1) 856 if (count != 1)
840 xor_block(count, STRIPE_SIZE, ptr); 857 xor_block(count, STRIPE_SIZE, ptr);
841 set_bit(R5_UPTODATE, &sh->dev[dd_idx].flags); 858 if (!nozero) set_bit(R5_UPTODATE, &sh->dev[dd_idx].flags);
859 else clear_bit(R5_UPTODATE, &sh->dev[dd_idx].flags);
842 } 860 }
843} 861}
844 862
@@ -871,7 +889,7 @@ static void compute_block_2(struct stripe_head *sh, int dd_idx1, int dd_idx2)
871 return; 889 return;
872 } else { 890 } else {
873 /* We're missing D+Q; recompute D from P */ 891 /* We're missing D+Q; recompute D from P */
874 compute_block_1(sh, (dd_idx1 == qd_idx) ? dd_idx2 : dd_idx1); 892 compute_block_1(sh, (dd_idx1 == qd_idx) ? dd_idx2 : dd_idx1, 0);
875 compute_parity(sh, UPDATE_PARITY); /* Is this necessary? */ 893 compute_parity(sh, UPDATE_PARITY); /* Is this necessary? */
876 return; 894 return;
877 } 895 }
@@ -982,6 +1000,12 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
982} 1000}
983 1001
984 1002
1003static int page_is_zero(struct page *p)
1004{
1005 char *a = page_address(p);
1006 return ((*(u32*)a) == 0 &&
1007 memcmp(a, a+4, STRIPE_SIZE-4)==0);
1008}
985/* 1009/*
986 * handle_stripe - do things to a stripe. 1010 * handle_stripe - do things to a stripe.
987 * 1011 *
@@ -1000,7 +1024,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
1000 * 1024 *
1001 */ 1025 */
1002 1026
1003static void handle_stripe(struct stripe_head *sh) 1027static void handle_stripe(struct stripe_head *sh, struct page *tmp_page)
1004{ 1028{
1005 raid6_conf_t *conf = sh->raid_conf; 1029 raid6_conf_t *conf = sh->raid_conf;
1006 int disks = conf->raid_disks; 1030 int disks = conf->raid_disks;
@@ -1027,11 +1051,11 @@ static void handle_stripe(struct stripe_head *sh)
1027 syncing = test_bit(STRIPE_SYNCING, &sh->state); 1051 syncing = test_bit(STRIPE_SYNCING, &sh->state);
1028 /* Now to look around and see what can be done */ 1052 /* Now to look around and see what can be done */
1029 1053
1054 rcu_read_lock();
1030 for (i=disks; i--; ) { 1055 for (i=disks; i--; ) {
1031 mdk_rdev_t *rdev; 1056 mdk_rdev_t *rdev;
1032 dev = &sh->dev[i]; 1057 dev = &sh->dev[i];
1033 clear_bit(R5_Insync, &dev->flags); 1058 clear_bit(R5_Insync, &dev->flags);
1034 clear_bit(R5_Syncio, &dev->flags);
1035 1059
1036 PRINTK("check %d: state 0x%lx read %p write %p written %p\n", 1060 PRINTK("check %d: state 0x%lx read %p write %p written %p\n",
1037 i, dev->flags, dev->toread, dev->towrite, dev->written); 1061 i, dev->flags, dev->toread, dev->towrite, dev->written);
@@ -1070,14 +1094,21 @@ static void handle_stripe(struct stripe_head *sh)
1070 non_overwrite++; 1094 non_overwrite++;
1071 } 1095 }
1072 if (dev->written) written++; 1096 if (dev->written) written++;
1073 rdev = conf->disks[i].rdev; /* FIXME, should I be looking rdev */ 1097 rdev = rcu_dereference(conf->disks[i].rdev);
1074 if (!rdev || !test_bit(In_sync, &rdev->flags)) { 1098 if (!rdev || !test_bit(In_sync, &rdev->flags)) {
1099 /* The ReadError flag will just be confusing now */
1100 clear_bit(R5_ReadError, &dev->flags);
1101 clear_bit(R5_ReWrite, &dev->flags);
1102 }
1103 if (!rdev || !test_bit(In_sync, &rdev->flags)
1104 || test_bit(R5_ReadError, &dev->flags)) {
1075 if ( failed < 2 ) 1105 if ( failed < 2 )
1076 failed_num[failed] = i; 1106 failed_num[failed] = i;
1077 failed++; 1107 failed++;
1078 } else 1108 } else
1079 set_bit(R5_Insync, &dev->flags); 1109 set_bit(R5_Insync, &dev->flags);
1080 } 1110 }
1111 rcu_read_unlock();
1081 PRINTK("locked=%d uptodate=%d to_read=%d" 1112 PRINTK("locked=%d uptodate=%d to_read=%d"
1082 " to_write=%d failed=%d failed_num=%d,%d\n", 1113 " to_write=%d failed=%d failed_num=%d,%d\n",
1083 locked, uptodate, to_read, to_write, failed, 1114 locked, uptodate, to_read, to_write, failed,
@@ -1088,6 +1119,17 @@ static void handle_stripe(struct stripe_head *sh)
1088 if (failed > 2 && to_read+to_write+written) { 1119 if (failed > 2 && to_read+to_write+written) {
1089 for (i=disks; i--; ) { 1120 for (i=disks; i--; ) {
1090 int bitmap_end = 0; 1121 int bitmap_end = 0;
1122
1123 if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
1124 mdk_rdev_t *rdev;
1125 rcu_read_lock();
1126 rdev = rcu_dereference(conf->disks[i].rdev);
1127 if (rdev && test_bit(In_sync, &rdev->flags))
1128 /* multiple read failures in one stripe */
1129 md_error(conf->mddev, rdev);
1130 rcu_read_unlock();
1131 }
1132
1091 spin_lock_irq(&conf->device_lock); 1133 spin_lock_irq(&conf->device_lock);
1092 /* fail all writes first */ 1134 /* fail all writes first */
1093 bi = sh->dev[i].towrite; 1135 bi = sh->dev[i].towrite;
@@ -1123,7 +1165,8 @@ static void handle_stripe(struct stripe_head *sh)
1123 } 1165 }
1124 1166
1125 /* fail any reads if this device is non-operational */ 1167 /* fail any reads if this device is non-operational */
1126 if (!test_bit(R5_Insync, &sh->dev[i].flags)) { 1168 if (!test_bit(R5_Insync, &sh->dev[i].flags) ||
1169 test_bit(R5_ReadError, &sh->dev[i].flags)) {
1127 bi = sh->dev[i].toread; 1170 bi = sh->dev[i].toread;
1128 sh->dev[i].toread = NULL; 1171 sh->dev[i].toread = NULL;
1129 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 1172 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
@@ -1228,7 +1271,7 @@ static void handle_stripe(struct stripe_head *sh)
1228 if (uptodate == disks-1) { 1271 if (uptodate == disks-1) {
1229 PRINTK("Computing stripe %llu block %d\n", 1272 PRINTK("Computing stripe %llu block %d\n",
1230 (unsigned long long)sh->sector, i); 1273 (unsigned long long)sh->sector, i);
1231 compute_block_1(sh, i); 1274 compute_block_1(sh, i, 0);
1232 uptodate++; 1275 uptodate++;
1233 } else if ( uptodate == disks-2 && failed >= 2 ) { 1276 } else if ( uptodate == disks-2 && failed >= 2 ) {
1234 /* Computing 2-failure is *very* expensive; only do it if failed >= 2 */ 1277 /* Computing 2-failure is *very* expensive; only do it if failed >= 2 */
@@ -1259,9 +1302,6 @@ static void handle_stripe(struct stripe_head *sh)
1259 locked++; 1302 locked++;
1260 PRINTK("Reading block %d (sync=%d)\n", 1303 PRINTK("Reading block %d (sync=%d)\n",
1261 i, syncing); 1304 i, syncing);
1262 if (syncing)
1263 md_sync_acct(conf->disks[i].rdev->bdev,
1264 STRIPE_SECTORS);
1265 } 1305 }
1266 } 1306 }
1267 } 1307 }
@@ -1323,7 +1363,7 @@ static void handle_stripe(struct stripe_head *sh)
1323 /* We have failed blocks and need to compute them */ 1363 /* We have failed blocks and need to compute them */
1324 switch ( failed ) { 1364 switch ( failed ) {
1325 case 0: BUG(); 1365 case 0: BUG();
1326 case 1: compute_block_1(sh, failed_num[0]); break; 1366 case 1: compute_block_1(sh, failed_num[0], 0); break;
1327 case 2: compute_block_2(sh, failed_num[0], failed_num[1]); break; 1367 case 2: compute_block_2(sh, failed_num[0], failed_num[1]); break;
1328 default: BUG(); /* This request should have been failed? */ 1368 default: BUG(); /* This request should have been failed? */
1329 } 1369 }
@@ -1338,12 +1378,10 @@ static void handle_stripe(struct stripe_head *sh)
1338 (unsigned long long)sh->sector, i); 1378 (unsigned long long)sh->sector, i);
1339 locked++; 1379 locked++;
1340 set_bit(R5_Wantwrite, &sh->dev[i].flags); 1380 set_bit(R5_Wantwrite, &sh->dev[i].flags);
1341#if 0 /**** FIX: I don't understand the logic here... ****/
1342 if (!test_bit(R5_Insync, &sh->dev[i].flags)
1343 || ((i==pd_idx || i==qd_idx) && failed == 0)) /* FIX? */
1344 set_bit(STRIPE_INSYNC, &sh->state);
1345#endif
1346 } 1381 }
1382 /* after a RECONSTRUCT_WRITE, the stripe MUST be in-sync */
1383 set_bit(STRIPE_INSYNC, &sh->state);
1384
1347 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { 1385 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
1348 atomic_dec(&conf->preread_active_stripes); 1386 atomic_dec(&conf->preread_active_stripes);
1349 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) 1387 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD)
@@ -1356,84 +1394,119 @@ static void handle_stripe(struct stripe_head *sh)
1356 * Any reads will already have been scheduled, so we just see if enough data 1394 * Any reads will already have been scheduled, so we just see if enough data
1357 * is available 1395 * is available
1358 */ 1396 */
1359 if (syncing && locked == 0 && 1397 if (syncing && locked == 0 && !test_bit(STRIPE_INSYNC, &sh->state)) {
1360 !test_bit(STRIPE_INSYNC, &sh->state) && failed <= 2) { 1398 int update_p = 0, update_q = 0;
1361 set_bit(STRIPE_HANDLE, &sh->state); 1399 struct r5dev *dev;
1362#if 0 /* RAID-6: Don't support CHECK PARITY yet */
1363 if (failed == 0) {
1364 char *pagea;
1365 if (uptodate != disks)
1366 BUG();
1367 compute_parity(sh, CHECK_PARITY);
1368 uptodate--;
1369 pagea = page_address(sh->dev[pd_idx].page);
1370 if ((*(u32*)pagea) == 0 &&
1371 !memcmp(pagea, pagea+4, STRIPE_SIZE-4)) {
1372 /* parity is correct (on disc, not in buffer any more) */
1373 set_bit(STRIPE_INSYNC, &sh->state);
1374 }
1375 }
1376#endif
1377 if (!test_bit(STRIPE_INSYNC, &sh->state)) {
1378 int failed_needupdate[2];
1379 struct r5dev *adev, *bdev;
1380
1381 if ( failed < 1 )
1382 failed_num[0] = pd_idx;
1383 if ( failed < 2 )
1384 failed_num[1] = (failed_num[0] == qd_idx) ? pd_idx : qd_idx;
1385 1400
1386 failed_needupdate[0] = !test_bit(R5_UPTODATE, &sh->dev[failed_num[0]].flags); 1401 set_bit(STRIPE_HANDLE, &sh->state);
1387 failed_needupdate[1] = !test_bit(R5_UPTODATE, &sh->dev[failed_num[1]].flags);
1388 1402
1389 PRINTK("sync: failed=%d num=%d,%d fnu=%u%u\n", 1403 BUG_ON(failed>2);
1390 failed, failed_num[0], failed_num[1], failed_needupdate[0], failed_needupdate[1]); 1404 BUG_ON(uptodate < disks);
1405 /* Want to check and possibly repair P and Q.
1406 * However there could be one 'failed' device, in which
1407 * case we can only check one of them, possibly using the
1408 * other to generate missing data
1409 */
1391 1410
1392#if 0 /* RAID-6: This code seems to require that CHECK_PARITY destroys the uptodateness of the parity */ 1411 /* If !tmp_page, we cannot do the calculations,
1393 /* should be able to compute the missing block(s) and write to spare */ 1412 * but as we have set STRIPE_HANDLE, we will soon be called
1394 if ( failed_needupdate[0] ^ failed_needupdate[1] ) { 1413 * by stripe_handle with a tmp_page - just wait until then.
1395 if (uptodate+1 != disks) 1414 */
1396 BUG(); 1415 if (tmp_page) {
1397 compute_block_1(sh, failed_needupdate[0] ? failed_num[0] : failed_num[1]); 1416 if (failed == q_failed) {
1398 uptodate++; 1417 /* The only possible failed device holds 'Q', so it makes
1399 } else if ( failed_needupdate[0] & failed_needupdate[1] ) { 1418 * sense to check P (If anything else were failed, we would
1400 if (uptodate+2 != disks) 1419 * have used P to recreate it).
1401 BUG(); 1420 */
1402 compute_block_2(sh, failed_num[0], failed_num[1]); 1421 compute_block_1(sh, pd_idx, 1);
1403 uptodate += 2; 1422 if (!page_is_zero(sh->dev[pd_idx].page)) {
1423 compute_block_1(sh,pd_idx,0);
1424 update_p = 1;
1425 }
1426 }
1427 if (!q_failed && failed < 2) {
1428 /* q is not failed, and we didn't use it to generate
1429 * anything, so it makes sense to check it
1430 */
1431 memcpy(page_address(tmp_page),
1432 page_address(sh->dev[qd_idx].page),
1433 STRIPE_SIZE);
1434 compute_parity(sh, UPDATE_PARITY);
1435 if (memcmp(page_address(tmp_page),
1436 page_address(sh->dev[qd_idx].page),
1437 STRIPE_SIZE)!= 0) {
1438 clear_bit(STRIPE_INSYNC, &sh->state);
1439 update_q = 1;
1440 }
1441 }
1442 if (update_p || update_q) {
1443 conf->mddev->resync_mismatches += STRIPE_SECTORS;
1444 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
1445 /* don't try to repair!! */
1446 update_p = update_q = 0;
1404 } 1447 }
1405#else
1406 compute_block_2(sh, failed_num[0], failed_num[1]);
1407 uptodate += failed_needupdate[0] + failed_needupdate[1];
1408#endif
1409 1448
1410 if (uptodate != disks) 1449 /* now write out any block on a failed drive,
1411 BUG(); 1450 * or P or Q if they need it
1451 */
1412 1452
1413 PRINTK("Marking for sync stripe %llu blocks %d,%d\n", 1453 if (failed == 2) {
1414 (unsigned long long)sh->sector, failed_num[0], failed_num[1]); 1454 dev = &sh->dev[failed_num[1]];
1455 locked++;
1456 set_bit(R5_LOCKED, &dev->flags);
1457 set_bit(R5_Wantwrite, &dev->flags);
1458 }
1459 if (failed >= 1) {
1460 dev = &sh->dev[failed_num[0]];
1461 locked++;
1462 set_bit(R5_LOCKED, &dev->flags);
1463 set_bit(R5_Wantwrite, &dev->flags);
1464 }
1415 1465
1416 /**** FIX: Should we really do both of these unconditionally? ****/ 1466 if (update_p) {
1417 adev = &sh->dev[failed_num[0]]; 1467 dev = &sh->dev[pd_idx];
1418 locked += !test_bit(R5_LOCKED, &adev->flags); 1468 locked ++;
1419 set_bit(R5_LOCKED, &adev->flags); 1469 set_bit(R5_LOCKED, &dev->flags);
1420 set_bit(R5_Wantwrite, &adev->flags); 1470 set_bit(R5_Wantwrite, &dev->flags);
1421 bdev = &sh->dev[failed_num[1]]; 1471 }
1422 locked += !test_bit(R5_LOCKED, &bdev->flags); 1472 if (update_q) {
1423 set_bit(R5_LOCKED, &bdev->flags); 1473 dev = &sh->dev[qd_idx];
1474 locked++;
1475 set_bit(R5_LOCKED, &dev->flags);
1476 set_bit(R5_Wantwrite, &dev->flags);
1477 }
1424 clear_bit(STRIPE_DEGRADED, &sh->state); 1478 clear_bit(STRIPE_DEGRADED, &sh->state);
1425 set_bit(R5_Wantwrite, &bdev->flags);
1426 1479
1427 set_bit(STRIPE_INSYNC, &sh->state); 1480 set_bit(STRIPE_INSYNC, &sh->state);
1428 set_bit(R5_Syncio, &adev->flags);
1429 set_bit(R5_Syncio, &bdev->flags);
1430 } 1481 }
1431 } 1482 }
1483
1432 if (syncing && locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) { 1484 if (syncing && locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) {
1433 md_done_sync(conf->mddev, STRIPE_SECTORS,1); 1485 md_done_sync(conf->mddev, STRIPE_SECTORS,1);
1434 clear_bit(STRIPE_SYNCING, &sh->state); 1486 clear_bit(STRIPE_SYNCING, &sh->state);
1435 } 1487 }
1436 1488
1489 /* If the failed drives are just a ReadError, then we might need
1490 * to progress the repair/check process
1491 */
1492 if (failed <= 2 && ! conf->mddev->ro)
1493 for (i=0; i<failed;i++) {
1494 dev = &sh->dev[failed_num[i]];
1495 if (test_bit(R5_ReadError, &dev->flags)
1496 && !test_bit(R5_LOCKED, &dev->flags)
1497 && test_bit(R5_UPTODATE, &dev->flags)
1498 ) {
1499 if (!test_bit(R5_ReWrite, &dev->flags)) {
1500 set_bit(R5_Wantwrite, &dev->flags);
1501 set_bit(R5_ReWrite, &dev->flags);
1502 set_bit(R5_LOCKED, &dev->flags);
1503 } else {
1504 /* let's read it back */
1505 set_bit(R5_Wantread, &dev->flags);
1506 set_bit(R5_LOCKED, &dev->flags);
1507 }
1508 }
1509 }
1437 spin_unlock(&sh->lock); 1510 spin_unlock(&sh->lock);
1438 1511
1439 while ((bi=return_bi)) { 1512 while ((bi=return_bi)) {
@@ -1472,7 +1545,7 @@ static void handle_stripe(struct stripe_head *sh)
1472 rcu_read_unlock(); 1545 rcu_read_unlock();
1473 1546
1474 if (rdev) { 1547 if (rdev) {
1475 if (test_bit(R5_Syncio, &sh->dev[i].flags)) 1548 if (syncing)
1476 md_sync_acct(rdev->bdev, STRIPE_SECTORS); 1549 md_sync_acct(rdev->bdev, STRIPE_SECTORS);
1477 1550
1478 bi->bi_bdev = rdev->bdev; 1551 bi->bi_bdev = rdev->bdev;
@@ -1489,6 +1562,9 @@ static void handle_stripe(struct stripe_head *sh)
1489 bi->bi_io_vec[0].bv_offset = 0; 1562 bi->bi_io_vec[0].bv_offset = 0;
1490 bi->bi_size = STRIPE_SIZE; 1563 bi->bi_size = STRIPE_SIZE;
1491 bi->bi_next = NULL; 1564 bi->bi_next = NULL;
1565 if (rw == WRITE &&
1566 test_bit(R5_ReWrite, &sh->dev[i].flags))
1567 atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
1492 generic_make_request(bi); 1568 generic_make_request(bi);
1493 } else { 1569 } else {
1494 if (rw == 1) 1570 if (rw == 1)
@@ -1664,7 +1740,7 @@ static int make_request (request_queue_t *q, struct bio * bi)
1664 } 1740 }
1665 finish_wait(&conf->wait_for_overlap, &w); 1741 finish_wait(&conf->wait_for_overlap, &w);
1666 raid6_plug_device(conf); 1742 raid6_plug_device(conf);
1667 handle_stripe(sh); 1743 handle_stripe(sh, NULL);
1668 release_stripe(sh); 1744 release_stripe(sh);
1669 } else { 1745 } else {
1670 /* cannot get stripe for read-ahead, just give-up */ 1746 /* cannot get stripe for read-ahead, just give-up */
@@ -1728,6 +1804,7 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
1728 return rv; 1804 return rv;
1729 } 1805 }
1730 if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) && 1806 if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
1807 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
1731 !conf->fullsync && sync_blocks >= STRIPE_SECTORS) { 1808 !conf->fullsync && sync_blocks >= STRIPE_SECTORS) {
1732 /* we can skip this block, and probably more */ 1809 /* we can skip this block, and probably more */
1733 sync_blocks /= STRIPE_SECTORS; 1810 sync_blocks /= STRIPE_SECTORS;
@@ -1765,7 +1842,7 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
1765 clear_bit(STRIPE_INSYNC, &sh->state); 1842 clear_bit(STRIPE_INSYNC, &sh->state);
1766 spin_unlock(&sh->lock); 1843 spin_unlock(&sh->lock);
1767 1844
1768 handle_stripe(sh); 1845 handle_stripe(sh, NULL);
1769 release_stripe(sh); 1846 release_stripe(sh);
1770 1847
1771 return STRIPE_SECTORS; 1848 return STRIPE_SECTORS;
@@ -1821,7 +1898,7 @@ static void raid6d (mddev_t *mddev)
1821 spin_unlock_irq(&conf->device_lock); 1898 spin_unlock_irq(&conf->device_lock);
1822 1899
1823 handled++; 1900 handled++;
1824 handle_stripe(sh); 1901 handle_stripe(sh, conf->spare_page);
1825 release_stripe(sh); 1902 release_stripe(sh);
1826 1903
1827 spin_lock_irq(&conf->device_lock); 1904 spin_lock_irq(&conf->device_lock);
@@ -1848,17 +1925,19 @@ static int run(mddev_t *mddev)
1848 return -EIO; 1925 return -EIO;
1849 } 1926 }
1850 1927
1851 mddev->private = kmalloc (sizeof (raid6_conf_t) 1928 mddev->private = kzalloc(sizeof (raid6_conf_t)
1852 + mddev->raid_disks * sizeof(struct disk_info), 1929 + mddev->raid_disks * sizeof(struct disk_info),
1853 GFP_KERNEL); 1930 GFP_KERNEL);
1854 if ((conf = mddev->private) == NULL) 1931 if ((conf = mddev->private) == NULL)
1855 goto abort; 1932 goto abort;
1856 memset (conf, 0, sizeof (*conf) + mddev->raid_disks * sizeof(struct disk_info) );
1857 conf->mddev = mddev; 1933 conf->mddev = mddev;
1858 1934
1859 if ((conf->stripe_hashtbl = (struct stripe_head **) __get_free_pages(GFP_ATOMIC, HASH_PAGES_ORDER)) == NULL) 1935 if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL)
1936 goto abort;
1937
1938 conf->spare_page = alloc_page(GFP_KERNEL);
1939 if (!conf->spare_page)
1860 goto abort; 1940 goto abort;
1861 memset(conf->stripe_hashtbl, 0, HASH_PAGES * PAGE_SIZE);
1862 1941
1863 spin_lock_init(&conf->device_lock); 1942 spin_lock_init(&conf->device_lock);
1864 init_waitqueue_head(&conf->wait_for_stripe); 1943 init_waitqueue_head(&conf->wait_for_stripe);
@@ -1929,13 +2008,18 @@ static int run(mddev_t *mddev)
1929 goto abort; 2008 goto abort;
1930 } 2009 }
1931 2010
1932#if 0 /* FIX: For now */
1933 if (mddev->degraded > 0 && 2011 if (mddev->degraded > 0 &&
1934 mddev->recovery_cp != MaxSector) { 2012 mddev->recovery_cp != MaxSector) {
1935 printk(KERN_ERR "raid6: cannot start dirty degraded array for %s\n", mdname(mddev)); 2013 if (mddev->ok_start_degraded)
1936 goto abort; 2014 printk(KERN_WARNING "raid6: starting dirty degraded array:%s"
2015 "- data corruption possible.\n",
2016 mdname(mddev));
2017 else {
2018 printk(KERN_ERR "raid6: cannot start dirty degraded array"
2019 " for %s\n", mdname(mddev));
2020 goto abort;
2021 }
1937 } 2022 }
1938#endif
1939 2023
1940 { 2024 {
1941 mddev->thread = md_register_thread(raid6d, mddev, "%s_raid6"); 2025 mddev->thread = md_register_thread(raid6d, mddev, "%s_raid6");
@@ -1977,7 +2061,7 @@ static int run(mddev_t *mddev)
1977 */ 2061 */
1978 { 2062 {
1979 int stripe = (mddev->raid_disks-2) * mddev->chunk_size 2063 int stripe = (mddev->raid_disks-2) * mddev->chunk_size
1980 / PAGE_CACHE_SIZE; 2064 / PAGE_SIZE;
1981 if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe) 2065 if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
1982 mddev->queue->backing_dev_info.ra_pages = 2 * stripe; 2066 mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
1983 } 2067 }
@@ -1985,18 +2069,14 @@ static int run(mddev_t *mddev)
1985 /* Ok, everything is just fine now */ 2069 /* Ok, everything is just fine now */
1986 mddev->array_size = mddev->size * (mddev->raid_disks - 2); 2070 mddev->array_size = mddev->size * (mddev->raid_disks - 2);
1987 2071
1988 if (mddev->bitmap)
1989 mddev->thread->timeout = mddev->bitmap->daemon_sleep * HZ;
1990
1991 mddev->queue->unplug_fn = raid6_unplug_device; 2072 mddev->queue->unplug_fn = raid6_unplug_device;
1992 mddev->queue->issue_flush_fn = raid6_issue_flush; 2073 mddev->queue->issue_flush_fn = raid6_issue_flush;
1993 return 0; 2074 return 0;
1994abort: 2075abort:
1995 if (conf) { 2076 if (conf) {
1996 print_raid6_conf(conf); 2077 print_raid6_conf(conf);
1997 if (conf->stripe_hashtbl) 2078 safe_put_page(conf->spare_page);
1998 free_pages((unsigned long) conf->stripe_hashtbl, 2079 kfree(conf->stripe_hashtbl);
1999 HASH_PAGES_ORDER);
2000 kfree(conf); 2080 kfree(conf);
2001 } 2081 }
2002 mddev->private = NULL; 2082 mddev->private = NULL;
@@ -2013,7 +2093,7 @@ static int stop (mddev_t *mddev)
2013 md_unregister_thread(mddev->thread); 2093 md_unregister_thread(mddev->thread);
2014 mddev->thread = NULL; 2094 mddev->thread = NULL;
2015 shrink_stripes(conf); 2095 shrink_stripes(conf);
2016 free_pages((unsigned long) conf->stripe_hashtbl, HASH_PAGES_ORDER); 2096 kfree(conf->stripe_hashtbl);
2017 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ 2097 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
2018 kfree(conf); 2098 kfree(conf);
2019 mddev->private = NULL; 2099 mddev->private = NULL;
@@ -2040,12 +2120,13 @@ static void print_sh (struct seq_file *seq, struct stripe_head *sh)
2040static void printall (struct seq_file *seq, raid6_conf_t *conf) 2120static void printall (struct seq_file *seq, raid6_conf_t *conf)
2041{ 2121{
2042 struct stripe_head *sh; 2122 struct stripe_head *sh;
2123 struct hlist_node *hn;
2043 int i; 2124 int i;
2044 2125
2045 spin_lock_irq(&conf->device_lock); 2126 spin_lock_irq(&conf->device_lock);
2046 for (i = 0; i < NR_HASH; i++) { 2127 for (i = 0; i < NR_HASH; i++) {
2047 sh = conf->stripe_hashtbl[i]; 2128 sh = conf->stripe_hashtbl[i];
2048 for (; sh; sh = sh->hash_next) { 2129 hlist_for_each_entry(sh, hn, &conf->stripe_hashtbl[i], hash) {
2049 if (sh->raid_conf != conf) 2130 if (sh->raid_conf != conf)
2050 continue; 2131 continue;
2051 print_sh(seq, sh); 2132 print_sh(seq, sh);
@@ -2223,17 +2304,12 @@ static void raid6_quiesce(mddev_t *mddev, int state)
2223 spin_unlock_irq(&conf->device_lock); 2304 spin_unlock_irq(&conf->device_lock);
2224 break; 2305 break;
2225 } 2306 }
2226 if (mddev->thread) {
2227 if (mddev->bitmap)
2228 mddev->thread->timeout = mddev->bitmap->daemon_sleep * HZ;
2229 else
2230 mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT;
2231 md_wakeup_thread(mddev->thread);
2232 }
2233} 2307}
2234static mdk_personality_t raid6_personality= 2308
2309static struct mdk_personality raid6_personality =
2235{ 2310{
2236 .name = "raid6", 2311 .name = "raid6",
2312 .level = 6,
2237 .owner = THIS_MODULE, 2313 .owner = THIS_MODULE,
2238 .make_request = make_request, 2314 .make_request = make_request,
2239 .run = run, 2315 .run = run,
@@ -2248,7 +2324,7 @@ static mdk_personality_t raid6_personality=
2248 .quiesce = raid6_quiesce, 2324 .quiesce = raid6_quiesce,
2249}; 2325};
2250 2326
2251static int __init raid6_init (void) 2327static int __init raid6_init(void)
2252{ 2328{
2253 int e; 2329 int e;
2254 2330
@@ -2256,15 +2332,17 @@ static int __init raid6_init (void)
2256 if ( e ) 2332 if ( e )
2257 return e; 2333 return e;
2258 2334
2259 return register_md_personality (RAID6, &raid6_personality); 2335 return register_md_personality(&raid6_personality);
2260} 2336}
2261 2337
2262static void raid6_exit (void) 2338static void raid6_exit (void)
2263{ 2339{
2264 unregister_md_personality (RAID6); 2340 unregister_md_personality(&raid6_personality);
2265} 2341}
2266 2342
2267module_init(raid6_init); 2343module_init(raid6_init);
2268module_exit(raid6_exit); 2344module_exit(raid6_exit);
2269MODULE_LICENSE("GPL"); 2345MODULE_LICENSE("GPL");
2270MODULE_ALIAS("md-personality-8"); /* RAID6 */ 2346MODULE_ALIAS("md-personality-8"); /* RAID6 */
2347MODULE_ALIAS("md-raid6");
2348MODULE_ALIAS("md-level-6");
diff --git a/drivers/media/video/cpia_pp.c b/drivers/media/video/cpia_pp.c
index ddf184f95d80..6861d408f1b3 100644
--- a/drivers/media/video/cpia_pp.c
+++ b/drivers/media/video/cpia_pp.c
@@ -170,16 +170,9 @@ static size_t cpia_read_nibble (struct parport *port,
170 /* Does the error line indicate end of data? */ 170 /* Does the error line indicate end of data? */
171 if (((i /*& 1*/) == 0) && 171 if (((i /*& 1*/) == 0) &&
172 (parport_read_status(port) & PARPORT_STATUS_ERROR)) { 172 (parport_read_status(port) & PARPORT_STATUS_ERROR)) {
173 port->physport->ieee1284.phase = IEEE1284_PH_HBUSY_DNA; 173 DBG("%s: No more nibble data (%d bytes)\n",
174 DBG("%s: No more nibble data (%d bytes)\n", 174 port->name, i/2);
175 port->name, i/2); 175 goto end_of_data;
176
177 /* Go to reverse idle phase. */
178 parport_frob_control (port,
179 PARPORT_CONTROL_AUTOFD,
180 PARPORT_CONTROL_AUTOFD);
181 port->physport->ieee1284.phase = IEEE1284_PH_REV_IDLE;
182 break;
183 } 176 }
184 177
185 /* Event 7: Set nAutoFd low. */ 178 /* Event 7: Set nAutoFd low. */
@@ -227,18 +220,21 @@ static size_t cpia_read_nibble (struct parport *port,
227 byte = nibble; 220 byte = nibble;
228 } 221 }
229 222
230 i /= 2; /* i is now in bytes */
231
232 if (i == len) { 223 if (i == len) {
233 /* Read the last nibble without checking data avail. */ 224 /* Read the last nibble without checking data avail. */
234 port = port->physport; 225 if (parport_read_status (port) & PARPORT_STATUS_ERROR) {
235 if (parport_read_status (port) & PARPORT_STATUS_ERROR) 226 end_of_data:
236 port->ieee1284.phase = IEEE1284_PH_HBUSY_DNA; 227 /* Go to reverse idle phase. */
228 parport_frob_control (port,
229 PARPORT_CONTROL_AUTOFD,
230 PARPORT_CONTROL_AUTOFD);
231 port->physport->ieee1284.phase = IEEE1284_PH_REV_IDLE;
232 }
237 else 233 else
238 port->ieee1284.phase = IEEE1284_PH_HBUSY_DAVAIL; 234 port->physport->ieee1284.phase = IEEE1284_PH_HBUSY_DAVAIL;
239 } 235 }
240 236
241 return i; 237 return i/2;
242} 238}
243 239
244/* CPiA nonstandard "Nibble Stream" mode (2 nibbles per cycle, instead of 1) 240/* CPiA nonstandard "Nibble Stream" mode (2 nibbles per cycle, instead of 1)
diff --git a/drivers/message/i2o/Kconfig b/drivers/message/i2o/Kconfig
index 43a942a29c2e..fef677103880 100644
--- a/drivers/message/i2o/Kconfig
+++ b/drivers/message/i2o/Kconfig
@@ -24,6 +24,18 @@ config I2O
24 24
25 If unsure, say N. 25 If unsure, say N.
26 26
27config I2O_LCT_NOTIFY_ON_CHANGES
28 bool "Enable LCT notification"
29 depends on I2O
30 default y
31 ---help---
32 Only say N here if you have a I2O controller from SUN. The SUN
33 firmware doesn't support LCT notification on changes. If this option
34 is enabled on such a controller the driver will hang up in a endless
35 loop. On all other controllers say Y.
36
37 If unsure, say Y.
38
27config I2O_EXT_ADAPTEC 39config I2O_EXT_ADAPTEC
28 bool "Enable Adaptec extensions" 40 bool "Enable Adaptec extensions"
29 depends on I2O 41 depends on I2O
diff --git a/drivers/message/i2o/bus-osm.c b/drivers/message/i2o/bus-osm.c
index 151b228e1cb3..ac06f10c54ec 100644
--- a/drivers/message/i2o/bus-osm.c
+++ b/drivers/message/i2o/bus-osm.c
@@ -17,7 +17,7 @@
17#include <linux/i2o.h> 17#include <linux/i2o.h>
18 18
19#define OSM_NAME "bus-osm" 19#define OSM_NAME "bus-osm"
20#define OSM_VERSION "$Rev$" 20#define OSM_VERSION "1.317"
21#define OSM_DESCRIPTION "I2O Bus Adapter OSM" 21#define OSM_DESCRIPTION "I2O Bus Adapter OSM"
22 22
23static struct i2o_driver i2o_bus_driver; 23static struct i2o_driver i2o_bus_driver;
@@ -39,18 +39,18 @@ static struct i2o_class_id i2o_bus_class_id[] = {
39 */ 39 */
40static int i2o_bus_scan(struct i2o_device *dev) 40static int i2o_bus_scan(struct i2o_device *dev)
41{ 41{
42 struct i2o_message __iomem *msg; 42 struct i2o_message *msg;
43 u32 m;
44 43
45 m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET); 44 msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET);
46 if (m == I2O_QUEUE_EMPTY) 45 if (IS_ERR(msg))
47 return -ETIMEDOUT; 46 return -ETIMEDOUT;
48 47
49 writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); 48 msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0);
50 writel(I2O_CMD_BUS_SCAN << 24 | HOST_TID << 12 | dev->lct_data.tid, 49 msg->u.head[1] =
51 &msg->u.head[1]); 50 cpu_to_le32(I2O_CMD_BUS_SCAN << 24 | HOST_TID << 12 | dev->lct_data.
51 tid);
52 52
53 return i2o_msg_post_wait(dev->iop, m, 60); 53 return i2o_msg_post_wait(dev->iop, msg, 60);
54}; 54};
55 55
56/** 56/**
@@ -59,8 +59,9 @@ static int i2o_bus_scan(struct i2o_device *dev)
59 * 59 *
60 * Returns count. 60 * Returns count.
61 */ 61 */
62static ssize_t i2o_bus_store_scan(struct device *d, struct device_attribute *attr, const char *buf, 62static ssize_t i2o_bus_store_scan(struct device *d,
63 size_t count) 63 struct device_attribute *attr,
64 const char *buf, size_t count)
64{ 65{
65 struct i2o_device *i2o_dev = to_i2o_device(d); 66 struct i2o_device *i2o_dev = to_i2o_device(d);
66 int rc; 67 int rc;
diff --git a/drivers/message/i2o/config-osm.c b/drivers/message/i2o/config-osm.c
index 10432f665201..3bba7aa82e58 100644
--- a/drivers/message/i2o/config-osm.c
+++ b/drivers/message/i2o/config-osm.c
@@ -22,7 +22,7 @@
22#include <asm/uaccess.h> 22#include <asm/uaccess.h>
23 23
24#define OSM_NAME "config-osm" 24#define OSM_NAME "config-osm"
25#define OSM_VERSION "1.248" 25#define OSM_VERSION "1.323"
26#define OSM_DESCRIPTION "I2O Configuration OSM" 26#define OSM_DESCRIPTION "I2O Configuration OSM"
27 27
28/* access mode user rw */ 28/* access mode user rw */
diff --git a/drivers/message/i2o/core.h b/drivers/message/i2o/core.h
index 9eefedb16211..90628562851e 100644
--- a/drivers/message/i2o/core.h
+++ b/drivers/message/i2o/core.h
@@ -14,8 +14,6 @@
14 */ 14 */
15 15
16/* Exec-OSM */ 16/* Exec-OSM */
17extern struct bus_type i2o_bus_type;
18
19extern struct i2o_driver i2o_exec_driver; 17extern struct i2o_driver i2o_exec_driver;
20extern int i2o_exec_lct_get(struct i2o_controller *); 18extern int i2o_exec_lct_get(struct i2o_controller *);
21 19
@@ -23,6 +21,8 @@ extern int __init i2o_exec_init(void);
23extern void __exit i2o_exec_exit(void); 21extern void __exit i2o_exec_exit(void);
24 22
25/* driver */ 23/* driver */
24extern struct bus_type i2o_bus_type;
25
26extern int i2o_driver_dispatch(struct i2o_controller *, u32); 26extern int i2o_driver_dispatch(struct i2o_controller *, u32);
27 27
28extern int __init i2o_driver_init(void); 28extern int __init i2o_driver_init(void);
@@ -33,19 +33,27 @@ extern int __init i2o_pci_init(void);
33extern void __exit i2o_pci_exit(void); 33extern void __exit i2o_pci_exit(void);
34 34
35/* device */ 35/* device */
36extern struct device_attribute i2o_device_attrs[];
37
36extern void i2o_device_remove(struct i2o_device *); 38extern void i2o_device_remove(struct i2o_device *);
37extern int i2o_device_parse_lct(struct i2o_controller *); 39extern int i2o_device_parse_lct(struct i2o_controller *);
38 40
39/* IOP */ 41/* IOP */
40extern struct i2o_controller *i2o_iop_alloc(void); 42extern struct i2o_controller *i2o_iop_alloc(void);
41extern void i2o_iop_free(struct i2o_controller *); 43
44/**
45 * i2o_iop_free - Free the i2o_controller struct
46 * @c: I2O controller to free
47 */
48static inline void i2o_iop_free(struct i2o_controller *c)
49{
50 i2o_pool_free(&c->in_msg);
51 kfree(c);
52}
42 53
43extern int i2o_iop_add(struct i2o_controller *); 54extern int i2o_iop_add(struct i2o_controller *);
44extern void i2o_iop_remove(struct i2o_controller *); 55extern void i2o_iop_remove(struct i2o_controller *);
45 56
46/* config */
47extern int i2o_parm_issue(struct i2o_device *, int, void *, int, void *, int);
48
49/* control registers relative to c->base */ 57/* control registers relative to c->base */
50#define I2O_IRQ_STATUS 0x30 58#define I2O_IRQ_STATUS 0x30
51#define I2O_IRQ_MASK 0x34 59#define I2O_IRQ_MASK 0x34
diff --git a/drivers/message/i2o/device.c b/drivers/message/i2o/device.c
index 8eb50cdb8ae1..ee183053fa23 100644
--- a/drivers/message/i2o/device.c
+++ b/drivers/message/i2o/device.c
@@ -35,18 +35,18 @@
35static inline int i2o_device_issue_claim(struct i2o_device *dev, u32 cmd, 35static inline int i2o_device_issue_claim(struct i2o_device *dev, u32 cmd,
36 u32 type) 36 u32 type)
37{ 37{
38 struct i2o_message __iomem *msg; 38 struct i2o_message *msg;
39 u32 m;
40 39
41 m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET); 40 msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET);
42 if (m == I2O_QUEUE_EMPTY) 41 if (IS_ERR(msg))
43 return -ETIMEDOUT; 42 return PTR_ERR(msg);
44 43
45 writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); 44 msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0);
46 writel(cmd << 24 | HOST_TID << 12 | dev->lct_data.tid, &msg->u.head[1]); 45 msg->u.head[1] =
47 writel(type, &msg->body[0]); 46 cpu_to_le32(cmd << 24 | HOST_TID << 12 | dev->lct_data.tid);
47 msg->body[0] = cpu_to_le32(type);
48 48
49 return i2o_msg_post_wait(dev->iop, m, 60); 49 return i2o_msg_post_wait(dev->iop, msg, 60);
50} 50}
51 51
52/** 52/**
@@ -123,7 +123,6 @@ int i2o_device_claim_release(struct i2o_device *dev)
123 return rc; 123 return rc;
124} 124}
125 125
126
127/** 126/**
128 * i2o_device_release - release the memory for a I2O device 127 * i2o_device_release - release the memory for a I2O device
129 * @dev: I2O device which should be released 128 * @dev: I2O device which should be released
@@ -140,10 +139,10 @@ static void i2o_device_release(struct device *dev)
140 kfree(i2o_dev); 139 kfree(i2o_dev);
141} 140}
142 141
143
144/** 142/**
145 * i2o_device_class_show_class_id - Displays class id of I2O device 143 * i2o_device_show_class_id - Displays class id of I2O device
146 * @cd: class device of which the class id should be displayed 144 * @dev: device of which the class id should be displayed
145 * @attr: pointer to device attribute
147 * @buf: buffer into which the class id should be printed 146 * @buf: buffer into which the class id should be printed
148 * 147 *
149 * Returns the number of bytes which are printed into the buffer. 148 * Returns the number of bytes which are printed into the buffer.
@@ -159,15 +158,15 @@ static ssize_t i2o_device_show_class_id(struct device *dev,
159} 158}
160 159
161/** 160/**
162 * i2o_device_class_show_tid - Displays TID of I2O device 161 * i2o_device_show_tid - Displays TID of I2O device
163 * @cd: class device of which the TID should be displayed 162 * @dev: device of which the TID should be displayed
164 * @buf: buffer into which the class id should be printed 163 * @attr: pointer to device attribute
164 * @buf: buffer into which the TID should be printed
165 * 165 *
166 * Returns the number of bytes which are printed into the buffer. 166 * Returns the number of bytes which are printed into the buffer.
167 */ 167 */
168static ssize_t i2o_device_show_tid(struct device *dev, 168static ssize_t i2o_device_show_tid(struct device *dev,
169 struct device_attribute *attr, 169 struct device_attribute *attr, char *buf)
170 char *buf)
171{ 170{
172 struct i2o_device *i2o_dev = to_i2o_device(dev); 171 struct i2o_device *i2o_dev = to_i2o_device(dev);
173 172
@@ -175,6 +174,7 @@ static ssize_t i2o_device_show_tid(struct device *dev,
175 return strlen(buf) + 1; 174 return strlen(buf) + 1;
176} 175}
177 176
177/* I2O device attributes */
178struct device_attribute i2o_device_attrs[] = { 178struct device_attribute i2o_device_attrs[] = {
179 __ATTR(class_id, S_IRUGO, i2o_device_show_class_id, NULL), 179 __ATTR(class_id, S_IRUGO, i2o_device_show_class_id, NULL),
180 __ATTR(tid, S_IRUGO, i2o_device_show_tid, NULL), 180 __ATTR(tid, S_IRUGO, i2o_device_show_tid, NULL),
@@ -193,12 +193,10 @@ static struct i2o_device *i2o_device_alloc(void)
193{ 193{
194 struct i2o_device *dev; 194 struct i2o_device *dev;
195 195
196 dev = kmalloc(sizeof(*dev), GFP_KERNEL); 196 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
197 if (!dev) 197 if (!dev)
198 return ERR_PTR(-ENOMEM); 198 return ERR_PTR(-ENOMEM);
199 199
200 memset(dev, 0, sizeof(*dev));
201
202 INIT_LIST_HEAD(&dev->list); 200 INIT_LIST_HEAD(&dev->list);
203 init_MUTEX(&dev->lock); 201 init_MUTEX(&dev->lock);
204 202
@@ -209,66 +207,6 @@ static struct i2o_device *i2o_device_alloc(void)
209} 207}
210 208
211/** 209/**
212 * i2o_setup_sysfs_links - Adds attributes to the I2O device
213 * @cd: I2O class device which is added to the I2O device class
214 *
215 * This function get called when a I2O device is added to the class. It
216 * creates the attributes for each device and creates user/parent symlink
217 * if necessary.
218 *
219 * Returns 0 on success or negative error code on failure.
220 */
221static void i2o_setup_sysfs_links(struct i2o_device *i2o_dev)
222{
223 struct i2o_controller *c = i2o_dev->iop;
224 struct i2o_device *tmp;
225
226 /* create user entries for this device */
227 tmp = i2o_iop_find_device(i2o_dev->iop, i2o_dev->lct_data.user_tid);
228 if (tmp && tmp != i2o_dev)
229 sysfs_create_link(&i2o_dev->device.kobj,
230 &tmp->device.kobj, "user");
231
232 /* create user entries refering to this device */
233 list_for_each_entry(tmp, &c->devices, list)
234 if (tmp->lct_data.user_tid == i2o_dev->lct_data.tid &&
235 tmp != i2o_dev)
236 sysfs_create_link(&tmp->device.kobj,
237 &i2o_dev->device.kobj, "user");
238
239 /* create parent entries for this device */
240 tmp = i2o_iop_find_device(i2o_dev->iop, i2o_dev->lct_data.parent_tid);
241 if (tmp && tmp != i2o_dev)
242 sysfs_create_link(&i2o_dev->device.kobj,
243 &tmp->device.kobj, "parent");
244
245 /* create parent entries refering to this device */
246 list_for_each_entry(tmp, &c->devices, list)
247 if (tmp->lct_data.parent_tid == i2o_dev->lct_data.tid &&
248 tmp != i2o_dev)
249 sysfs_create_link(&tmp->device.kobj,
250 &i2o_dev->device.kobj, "parent");
251}
252
253static void i2o_remove_sysfs_links(struct i2o_device *i2o_dev)
254{
255 struct i2o_controller *c = i2o_dev->iop;
256 struct i2o_device *tmp;
257
258 sysfs_remove_link(&i2o_dev->device.kobj, "parent");
259 sysfs_remove_link(&i2o_dev->device.kobj, "user");
260
261 list_for_each_entry(tmp, &c->devices, list) {
262 if (tmp->lct_data.parent_tid == i2o_dev->lct_data.tid)
263 sysfs_remove_link(&tmp->device.kobj, "parent");
264 if (tmp->lct_data.user_tid == i2o_dev->lct_data.tid)
265 sysfs_remove_link(&tmp->device.kobj, "user");
266 }
267}
268
269
270
271/**
272 * i2o_device_add - allocate a new I2O device and add it to the IOP 210 * i2o_device_add - allocate a new I2O device and add it to the IOP
273 * @iop: I2O controller where the device is on 211 * @iop: I2O controller where the device is on
274 * @entry: LCT entry of the I2O device 212 * @entry: LCT entry of the I2O device
@@ -282,33 +220,57 @@ static void i2o_remove_sysfs_links(struct i2o_device *i2o_dev)
282static struct i2o_device *i2o_device_add(struct i2o_controller *c, 220static struct i2o_device *i2o_device_add(struct i2o_controller *c,
283 i2o_lct_entry * entry) 221 i2o_lct_entry * entry)
284{ 222{
285 struct i2o_device *dev; 223 struct i2o_device *i2o_dev, *tmp;
286 224
287 dev = i2o_device_alloc(); 225 i2o_dev = i2o_device_alloc();
288 if (IS_ERR(dev)) { 226 if (IS_ERR(i2o_dev)) {
289 printk(KERN_ERR "i2o: unable to allocate i2o device\n"); 227 printk(KERN_ERR "i2o: unable to allocate i2o device\n");
290 return dev; 228 return i2o_dev;
291 } 229 }
292 230
293 dev->lct_data = *entry; 231 i2o_dev->lct_data = *entry;
294 dev->iop = c;
295 232
296 snprintf(dev->device.bus_id, BUS_ID_SIZE, "%d:%03x", c->unit, 233 snprintf(i2o_dev->device.bus_id, BUS_ID_SIZE, "%d:%03x", c->unit,
297 dev->lct_data.tid); 234 i2o_dev->lct_data.tid);
298 235
299 dev->device.parent = &c->device; 236 i2o_dev->iop = c;
237 i2o_dev->device.parent = &c->device;
300 238
301 device_register(&dev->device); 239 device_register(&i2o_dev->device);
302 240
303 list_add_tail(&dev->list, &c->devices); 241 list_add_tail(&i2o_dev->list, &c->devices);
304 242
305 i2o_setup_sysfs_links(dev); 243 /* create user entries for this device */
244 tmp = i2o_iop_find_device(i2o_dev->iop, i2o_dev->lct_data.user_tid);
245 if (tmp && (tmp != i2o_dev))
246 sysfs_create_link(&i2o_dev->device.kobj, &tmp->device.kobj,
247 "user");
306 248
307 i2o_driver_notify_device_add_all(dev); 249 /* create user entries refering to this device */
250 list_for_each_entry(tmp, &c->devices, list)
251 if ((tmp->lct_data.user_tid == i2o_dev->lct_data.tid)
252 && (tmp != i2o_dev))
253 sysfs_create_link(&tmp->device.kobj,
254 &i2o_dev->device.kobj, "user");
308 255
309 pr_debug("i2o: device %s added\n", dev->device.bus_id); 256 /* create parent entries for this device */
257 tmp = i2o_iop_find_device(i2o_dev->iop, i2o_dev->lct_data.parent_tid);
258 if (tmp && (tmp != i2o_dev))
259 sysfs_create_link(&i2o_dev->device.kobj, &tmp->device.kobj,
260 "parent");
310 261
311 return dev; 262 /* create parent entries refering to this device */
263 list_for_each_entry(tmp, &c->devices, list)
264 if ((tmp->lct_data.parent_tid == i2o_dev->lct_data.tid)
265 && (tmp != i2o_dev))
266 sysfs_create_link(&tmp->device.kobj,
267 &i2o_dev->device.kobj, "parent");
268
269 i2o_driver_notify_device_add_all(i2o_dev);
270
271 pr_debug("i2o: device %s added\n", i2o_dev->device.bus_id);
272
273 return i2o_dev;
312} 274}
313 275
314/** 276/**
@@ -321,9 +283,22 @@ static struct i2o_device *i2o_device_add(struct i2o_controller *c,
321 */ 283 */
322void i2o_device_remove(struct i2o_device *i2o_dev) 284void i2o_device_remove(struct i2o_device *i2o_dev)
323{ 285{
286 struct i2o_device *tmp;
287 struct i2o_controller *c = i2o_dev->iop;
288
324 i2o_driver_notify_device_remove_all(i2o_dev); 289 i2o_driver_notify_device_remove_all(i2o_dev);
325 i2o_remove_sysfs_links(i2o_dev); 290
291 sysfs_remove_link(&i2o_dev->device.kobj, "parent");
292 sysfs_remove_link(&i2o_dev->device.kobj, "user");
293
294 list_for_each_entry(tmp, &c->devices, list) {
295 if (tmp->lct_data.parent_tid == i2o_dev->lct_data.tid)
296 sysfs_remove_link(&tmp->device.kobj, "parent");
297 if (tmp->lct_data.user_tid == i2o_dev->lct_data.tid)
298 sysfs_remove_link(&tmp->device.kobj, "user");
299 }
326 list_del(&i2o_dev->list); 300 list_del(&i2o_dev->list);
301
327 device_unregister(&i2o_dev->device); 302 device_unregister(&i2o_dev->device);
328} 303}
329 304
@@ -341,56 +316,83 @@ int i2o_device_parse_lct(struct i2o_controller *c)
341{ 316{
342 struct i2o_device *dev, *tmp; 317 struct i2o_device *dev, *tmp;
343 i2o_lct *lct; 318 i2o_lct *lct;
344 int i; 319 u32 *dlct = c->dlct.virt;
345 int max; 320 int max = 0, i = 0;
321 u16 table_size;
322 u32 buf;
346 323
347 down(&c->lct_lock); 324 down(&c->lct_lock);
348 325
349 kfree(c->lct); 326 kfree(c->lct);
350 327
351 lct = c->dlct.virt; 328 buf = le32_to_cpu(*dlct++);
329 table_size = buf & 0xffff;
352 330
353 c->lct = kmalloc(lct->table_size * 4, GFP_KERNEL); 331 lct = c->lct = kmalloc(table_size * 4, GFP_KERNEL);
354 if (!c->lct) { 332 if (!lct) {
355 up(&c->lct_lock); 333 up(&c->lct_lock);
356 return -ENOMEM; 334 return -ENOMEM;
357 } 335 }
358 336
359 if (lct->table_size * 4 > c->dlct.len) { 337 lct->lct_ver = buf >> 28;
360 memcpy(c->lct, c->dlct.virt, c->dlct.len); 338 lct->boot_tid = buf >> 16 & 0xfff;
361 up(&c->lct_lock); 339 lct->table_size = table_size;
362 return -EAGAIN; 340 lct->change_ind = le32_to_cpu(*dlct++);
363 } 341 lct->iop_flags = le32_to_cpu(*dlct++);
364 342
365 memcpy(c->lct, c->dlct.virt, lct->table_size * 4); 343 table_size -= 3;
366
367 lct = c->lct;
368
369 max = (lct->table_size - 3) / 9;
370 344
371 pr_debug("%s: LCT has %d entries (LCT size: %d)\n", c->name, max, 345 pr_debug("%s: LCT has %d entries (LCT size: %d)\n", c->name, max,
372 lct->table_size); 346 lct->table_size);
373 347
374 /* remove devices, which are not in the LCT anymore */ 348 while (table_size > 0) {
375 list_for_each_entry_safe(dev, tmp, &c->devices, list) { 349 i2o_lct_entry *entry = &lct->lct_entry[max];
376 int found = 0; 350 int found = 0;
377 351
378 for (i = 0; i < max; i++) { 352 buf = le32_to_cpu(*dlct++);
379 if (lct->lct_entry[i].tid == dev->lct_data.tid) { 353 entry->entry_size = buf & 0xffff;
354 entry->tid = buf >> 16 & 0xfff;
355
356 entry->change_ind = le32_to_cpu(*dlct++);
357 entry->device_flags = le32_to_cpu(*dlct++);
358
359 buf = le32_to_cpu(*dlct++);
360 entry->class_id = buf & 0xfff;
361 entry->version = buf >> 12 & 0xf;
362 entry->vendor_id = buf >> 16;
363
364 entry->sub_class = le32_to_cpu(*dlct++);
365
366 buf = le32_to_cpu(*dlct++);
367 entry->user_tid = buf & 0xfff;
368 entry->parent_tid = buf >> 12 & 0xfff;
369 entry->bios_info = buf >> 24;
370
371 memcpy(&entry->identity_tag, dlct, 8);
372 dlct += 2;
373
374 entry->event_capabilities = le32_to_cpu(*dlct++);
375
376 /* add new devices, which are new in the LCT */
377 list_for_each_entry_safe(dev, tmp, &c->devices, list) {
378 if (entry->tid == dev->lct_data.tid) {
380 found = 1; 379 found = 1;
381 break; 380 break;
382 } 381 }
383 } 382 }
384 383
385 if (!found) 384 if (!found)
386 i2o_device_remove(dev); 385 i2o_device_add(c, entry);
386
387 table_size -= 9;
388 max++;
387 } 389 }
388 390
389 /* add new devices, which are new in the LCT */ 391 /* remove devices, which are not in the LCT anymore */
390 for (i = 0; i < max; i++) { 392 list_for_each_entry_safe(dev, tmp, &c->devices, list) {
391 int found = 0; 393 int found = 0;
392 394
393 list_for_each_entry_safe(dev, tmp, &c->devices, list) { 395 for (i = 0; i < max; i++) {
394 if (lct->lct_entry[i].tid == dev->lct_data.tid) { 396 if (lct->lct_entry[i].tid == dev->lct_data.tid) {
395 found = 1; 397 found = 1;
396 break; 398 break;
@@ -398,14 +400,14 @@ int i2o_device_parse_lct(struct i2o_controller *c)
398 } 400 }
399 401
400 if (!found) 402 if (!found)
401 i2o_device_add(c, &lct->lct_entry[i]); 403 i2o_device_remove(dev);
402 } 404 }
405
403 up(&c->lct_lock); 406 up(&c->lct_lock);
404 407
405 return 0; 408 return 0;
406} 409}
407 410
408
409/* 411/*
410 * Run time support routines 412 * Run time support routines
411 */ 413 */
@@ -419,13 +421,9 @@ int i2o_device_parse_lct(struct i2o_controller *c)
419 * ResultCount, ErrorInfoSize, BlockStatus and BlockSize. 421 * ResultCount, ErrorInfoSize, BlockStatus and BlockSize.
420 */ 422 */
421int i2o_parm_issue(struct i2o_device *i2o_dev, int cmd, void *oplist, 423int i2o_parm_issue(struct i2o_device *i2o_dev, int cmd, void *oplist,
422 int oplen, void *reslist, int reslen) 424 int oplen, void *reslist, int reslen)
423{ 425{
424 struct i2o_message __iomem *msg; 426 struct i2o_message *msg;
425 u32 m;
426 u32 *res32 = (u32 *) reslist;
427 u32 *restmp = (u32 *) reslist;
428 int len = 0;
429 int i = 0; 427 int i = 0;
430 int rc; 428 int rc;
431 struct i2o_dma res; 429 struct i2o_dma res;
@@ -437,26 +435,27 @@ int i2o_parm_issue(struct i2o_device *i2o_dev, int cmd, void *oplist,
437 if (i2o_dma_alloc(dev, &res, reslen, GFP_KERNEL)) 435 if (i2o_dma_alloc(dev, &res, reslen, GFP_KERNEL))
438 return -ENOMEM; 436 return -ENOMEM;
439 437
440 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 438 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
441 if (m == I2O_QUEUE_EMPTY) { 439 if (IS_ERR(msg)) {
442 i2o_dma_free(dev, &res); 440 i2o_dma_free(dev, &res);
443 return -ETIMEDOUT; 441 return PTR_ERR(msg);
444 } 442 }
445 443
446 i = 0; 444 i = 0;
447 writel(cmd << 24 | HOST_TID << 12 | i2o_dev->lct_data.tid, 445 msg->u.head[1] =
448 &msg->u.head[1]); 446 cpu_to_le32(cmd << 24 | HOST_TID << 12 | i2o_dev->lct_data.tid);
449 writel(0, &msg->body[i++]); 447 msg->body[i++] = cpu_to_le32(0x00000000);
450 writel(0x4C000000 | oplen, &msg->body[i++]); /* OperationList */ 448 msg->body[i++] = cpu_to_le32(0x4C000000 | oplen); /* OperationList */
451 memcpy_toio(&msg->body[i], oplist, oplen); 449 memcpy(&msg->body[i], oplist, oplen);
452 i += (oplen / 4 + (oplen % 4 ? 1 : 0)); 450 i += (oplen / 4 + (oplen % 4 ? 1 : 0));
453 writel(0xD0000000 | res.len, &msg->body[i++]); /* ResultList */ 451 msg->body[i++] = cpu_to_le32(0xD0000000 | res.len); /* ResultList */
454 writel(res.phys, &msg->body[i++]); 452 msg->body[i++] = cpu_to_le32(res.phys);
455 453
456 writel(I2O_MESSAGE_SIZE(i + sizeof(struct i2o_message) / 4) | 454 msg->u.head[0] =
457 SGL_OFFSET_5, &msg->u.head[0]); 455 cpu_to_le32(I2O_MESSAGE_SIZE(i + sizeof(struct i2o_message) / 4) |
456 SGL_OFFSET_5);
458 457
459 rc = i2o_msg_post_wait_mem(c, m, 10, &res); 458 rc = i2o_msg_post_wait_mem(c, msg, 10, &res);
460 459
461 /* This only looks like a memory leak - don't "fix" it. */ 460 /* This only looks like a memory leak - don't "fix" it. */
462 if (rc == -ETIMEDOUT) 461 if (rc == -ETIMEDOUT)
@@ -465,36 +464,7 @@ int i2o_parm_issue(struct i2o_device *i2o_dev, int cmd, void *oplist,
465 memcpy(reslist, res.virt, res.len); 464 memcpy(reslist, res.virt, res.len);
466 i2o_dma_free(dev, &res); 465 i2o_dma_free(dev, &res);
467 466
468 /* Query failed */ 467 return rc;
469 if (rc)
470 return rc;
471 /*
472 * Calculate number of bytes of Result LIST
473 * We need to loop through each Result BLOCK and grab the length
474 */
475 restmp = res32 + 1;
476 len = 1;
477 for (i = 0; i < (res32[0] & 0X0000FFFF); i++) {
478 if (restmp[0] & 0x00FF0000) { /* BlockStatus != SUCCESS */
479 printk(KERN_WARNING
480 "%s - Error:\n ErrorInfoSize = 0x%02x, "
481 "BlockStatus = 0x%02x, BlockSize = 0x%04x\n",
482 (cmd ==
483 I2O_CMD_UTIL_PARAMS_SET) ? "PARAMS_SET" :
484 "PARAMS_GET", res32[1] >> 24,
485 (res32[1] >> 16) & 0xFF, res32[1] & 0xFFFF);
486
487 /*
488 * If this is the only request,than we return an error
489 */
490 if ((res32[0] & 0x0000FFFF) == 1) {
491 return -((res32[1] >> 16) & 0xFF); /* -BlockStatus */
492 }
493 }
494 len += restmp[0] & 0x0000FFFF; /* Length of res BLOCK */
495 restmp += restmp[0] & 0x0000FFFF; /* Skip to next BLOCK */
496 }
497 return (len << 2); /* bytes used by result list */
498} 468}
499 469
500/* 470/*
@@ -503,28 +473,25 @@ int i2o_parm_issue(struct i2o_device *i2o_dev, int cmd, void *oplist,
503int i2o_parm_field_get(struct i2o_device *i2o_dev, int group, int field, 473int i2o_parm_field_get(struct i2o_device *i2o_dev, int group, int field,
504 void *buf, int buflen) 474 void *buf, int buflen)
505{ 475{
506 u16 opblk[] = { 1, 0, I2O_PARAMS_FIELD_GET, group, 1, field }; 476 u32 opblk[] = { cpu_to_le32(0x00000001),
477 cpu_to_le32((u16) group << 16 | I2O_PARAMS_FIELD_GET),
478 cpu_to_le32((s16) field << 16 | 0x00000001)
479 };
507 u8 *resblk; /* 8 bytes for header */ 480 u8 *resblk; /* 8 bytes for header */
508 int size; 481 int rc;
509
510 if (field == -1) /* whole group */
511 opblk[4] = -1;
512 482
513 resblk = kmalloc(buflen + 8, GFP_KERNEL | GFP_ATOMIC); 483 resblk = kmalloc(buflen + 8, GFP_KERNEL | GFP_ATOMIC);
514 if (!resblk) 484 if (!resblk)
515 return -ENOMEM; 485 return -ENOMEM;
516 486
517 size = i2o_parm_issue(i2o_dev, I2O_CMD_UTIL_PARAMS_GET, opblk, 487 rc = i2o_parm_issue(i2o_dev, I2O_CMD_UTIL_PARAMS_GET, opblk,
518 sizeof(opblk), resblk, buflen + 8); 488 sizeof(opblk), resblk, buflen + 8);
519 489
520 memcpy(buf, resblk + 8, buflen); /* cut off header */ 490 memcpy(buf, resblk + 8, buflen); /* cut off header */
521 491
522 kfree(resblk); 492 kfree(resblk);
523 493
524 if (size > buflen) 494 return rc;
525 return buflen;
526
527 return size;
528} 495}
529 496
530/* 497/*
@@ -534,12 +501,12 @@ int i2o_parm_field_get(struct i2o_device *i2o_dev, int group, int field,
534 * else return specific fields 501 * else return specific fields
535 * ibuf contains fieldindexes 502 * ibuf contains fieldindexes
536 * 503 *
537 * if oper == I2O_PARAMS_LIST_GET, get from specific rows 504 * if oper == I2O_PARAMS_LIST_GET, get from specific rows
538 * if fieldcount == -1 return all fields 505 * if fieldcount == -1 return all fields
539 * ibuf contains rowcount, keyvalues 506 * ibuf contains rowcount, keyvalues
540 * else return specific fields 507 * else return specific fields
541 * fieldcount is # of fieldindexes 508 * fieldcount is # of fieldindexes
542 * ibuf contains fieldindexes, rowcount, keyvalues 509 * ibuf contains fieldindexes, rowcount, keyvalues
543 * 510 *
544 * You could also use directly function i2o_issue_params(). 511 * You could also use directly function i2o_issue_params().
545 */ 512 */
diff --git a/drivers/message/i2o/driver.c b/drivers/message/i2o/driver.c
index 0fb9c4e2ad4c..64130227574f 100644
--- a/drivers/message/i2o/driver.c
+++ b/drivers/message/i2o/driver.c
@@ -61,12 +61,10 @@ static int i2o_bus_match(struct device *dev, struct device_driver *drv)
61}; 61};
62 62
63/* I2O bus type */ 63/* I2O bus type */
64extern struct device_attribute i2o_device_attrs[];
65
66struct bus_type i2o_bus_type = { 64struct bus_type i2o_bus_type = {
67 .name = "i2o", 65 .name = "i2o",
68 .match = i2o_bus_match, 66 .match = i2o_bus_match,
69 .dev_attrs = i2o_device_attrs, 67 .dev_attrs = i2o_device_attrs
70}; 68};
71 69
72/** 70/**
@@ -219,14 +217,14 @@ int i2o_driver_dispatch(struct i2o_controller *c, u32 m)
219 /* cut of header from message size (in 32-bit words) */ 217 /* cut of header from message size (in 32-bit words) */
220 size = (le32_to_cpu(msg->u.head[0]) >> 16) - 5; 218 size = (le32_to_cpu(msg->u.head[0]) >> 16) - 5;
221 219
222 evt = kmalloc(size * 4 + sizeof(*evt), GFP_ATOMIC | __GFP_ZERO); 220 evt = kzalloc(size * 4 + sizeof(*evt), GFP_ATOMIC);
223 if (!evt) 221 if (!evt)
224 return -ENOMEM; 222 return -ENOMEM;
225 223
226 evt->size = size; 224 evt->size = size;
227 evt->tcntxt = le32_to_cpu(msg->u.s.tcntxt); 225 evt->tcntxt = le32_to_cpu(msg->u.s.tcntxt);
228 evt->event_indicator = le32_to_cpu(msg->body[0]); 226 evt->event_indicator = le32_to_cpu(msg->body[0]);
229 memcpy(&evt->tcntxt, &msg->u.s.tcntxt, size * 4); 227 memcpy(&evt->data, &msg->body[1], size * 4);
230 228
231 list_for_each_entry_safe(dev, tmp, &c->devices, list) 229 list_for_each_entry_safe(dev, tmp, &c->devices, list)
232 if (dev->lct_data.tid == tid) { 230 if (dev->lct_data.tid == tid) {
@@ -349,12 +347,10 @@ int __init i2o_driver_init(void)
349 osm_info("max drivers = %d\n", i2o_max_drivers); 347 osm_info("max drivers = %d\n", i2o_max_drivers);
350 348
351 i2o_drivers = 349 i2o_drivers =
352 kmalloc(i2o_max_drivers * sizeof(*i2o_drivers), GFP_KERNEL); 350 kzalloc(i2o_max_drivers * sizeof(*i2o_drivers), GFP_KERNEL);
353 if (!i2o_drivers) 351 if (!i2o_drivers)
354 return -ENOMEM; 352 return -ENOMEM;
355 353
356 memset(i2o_drivers, 0, i2o_max_drivers * sizeof(*i2o_drivers));
357
358 rc = bus_register(&i2o_bus_type); 354 rc = bus_register(&i2o_bus_type);
359 355
360 if (rc < 0) 356 if (rc < 0)
diff --git a/drivers/message/i2o/exec-osm.c b/drivers/message/i2o/exec-osm.c
index 9c339a2505b0..9bb9859f6dfe 100644
--- a/drivers/message/i2o/exec-osm.c
+++ b/drivers/message/i2o/exec-osm.c
@@ -33,7 +33,7 @@
33#include <linux/workqueue.h> 33#include <linux/workqueue.h>
34#include <linux/string.h> 34#include <linux/string.h>
35#include <linux/slab.h> 35#include <linux/slab.h>
36#include <linux/sched.h> /* wait_event_interruptible_timeout() needs this */ 36#include <linux/sched.h> /* wait_event_interruptible_timeout() needs this */
37#include <asm/param.h> /* HZ */ 37#include <asm/param.h> /* HZ */
38#include "core.h" 38#include "core.h"
39 39
@@ -75,11 +75,9 @@ static struct i2o_exec_wait *i2o_exec_wait_alloc(void)
75{ 75{
76 struct i2o_exec_wait *wait; 76 struct i2o_exec_wait *wait;
77 77
78 wait = kmalloc(sizeof(*wait), GFP_KERNEL); 78 wait = kzalloc(sizeof(*wait), GFP_KERNEL);
79 if (!wait) 79 if (!wait)
80 return ERR_PTR(-ENOMEM); 80 return NULL;
81
82 memset(wait, 0, sizeof(*wait));
83 81
84 INIT_LIST_HEAD(&wait->list); 82 INIT_LIST_HEAD(&wait->list);
85 83
@@ -114,13 +112,12 @@ static void i2o_exec_wait_free(struct i2o_exec_wait *wait)
114 * Returns 0 on success, negative error code on timeout or positive error 112 * Returns 0 on success, negative error code on timeout or positive error
115 * code from reply. 113 * code from reply.
116 */ 114 */
117int i2o_msg_post_wait_mem(struct i2o_controller *c, u32 m, unsigned long 115int i2o_msg_post_wait_mem(struct i2o_controller *c, struct i2o_message *msg,
118 timeout, struct i2o_dma *dma) 116 unsigned long timeout, struct i2o_dma *dma)
119{ 117{
120 DECLARE_WAIT_QUEUE_HEAD(wq); 118 DECLARE_WAIT_QUEUE_HEAD(wq);
121 struct i2o_exec_wait *wait; 119 struct i2o_exec_wait *wait;
122 static u32 tcntxt = 0x80000000; 120 static u32 tcntxt = 0x80000000;
123 struct i2o_message __iomem *msg = i2o_msg_in_to_virt(c, m);
124 int rc = 0; 121 int rc = 0;
125 122
126 wait = i2o_exec_wait_alloc(); 123 wait = i2o_exec_wait_alloc();
@@ -138,15 +135,15 @@ int i2o_msg_post_wait_mem(struct i2o_controller *c, u32 m, unsigned long
138 * We will only use transaction contexts >= 0x80000000 for POST WAIT, 135 * We will only use transaction contexts >= 0x80000000 for POST WAIT,
139 * so we could find a POST WAIT reply easier in the reply handler. 136 * so we could find a POST WAIT reply easier in the reply handler.
140 */ 137 */
141 writel(i2o_exec_driver.context, &msg->u.s.icntxt); 138 msg->u.s.icntxt = cpu_to_le32(i2o_exec_driver.context);
142 wait->tcntxt = tcntxt++; 139 wait->tcntxt = tcntxt++;
143 writel(wait->tcntxt, &msg->u.s.tcntxt); 140 msg->u.s.tcntxt = cpu_to_le32(wait->tcntxt);
144 141
145 /* 142 /*
146 * Post the message to the controller. At some point later it will 143 * Post the message to the controller. At some point later it will
147 * return. If we time out before it returns then complete will be zero. 144 * return. If we time out before it returns then complete will be zero.
148 */ 145 */
149 i2o_msg_post(c, m); 146 i2o_msg_post(c, msg);
150 147
151 if (!wait->complete) { 148 if (!wait->complete) {
152 wait->wq = &wq; 149 wait->wq = &wq;
@@ -266,13 +263,14 @@ static int i2o_msg_post_wait_complete(struct i2o_controller *c, u32 m,
266 * 263 *
267 * Returns number of bytes printed into buffer. 264 * Returns number of bytes printed into buffer.
268 */ 265 */
269static ssize_t i2o_exec_show_vendor_id(struct device *d, struct device_attribute *attr, char *buf) 266static ssize_t i2o_exec_show_vendor_id(struct device *d,
267 struct device_attribute *attr, char *buf)
270{ 268{
271 struct i2o_device *dev = to_i2o_device(d); 269 struct i2o_device *dev = to_i2o_device(d);
272 u16 id; 270 u16 id;
273 271
274 if (i2o_parm_field_get(dev, 0x0000, 0, &id, 2)) { 272 if (!i2o_parm_field_get(dev, 0x0000, 0, &id, 2)) {
275 sprintf(buf, "0x%04x", id); 273 sprintf(buf, "0x%04x", le16_to_cpu(id));
276 return strlen(buf) + 1; 274 return strlen(buf) + 1;
277 } 275 }
278 276
@@ -286,13 +284,15 @@ static ssize_t i2o_exec_show_vendor_id(struct device *d, struct device_attribute
286 * 284 *
287 * Returns number of bytes printed into buffer. 285 * Returns number of bytes printed into buffer.
288 */ 286 */
289static ssize_t i2o_exec_show_product_id(struct device *d, struct device_attribute *attr, char *buf) 287static ssize_t i2o_exec_show_product_id(struct device *d,
288 struct device_attribute *attr,
289 char *buf)
290{ 290{
291 struct i2o_device *dev = to_i2o_device(d); 291 struct i2o_device *dev = to_i2o_device(d);
292 u16 id; 292 u16 id;
293 293
294 if (i2o_parm_field_get(dev, 0x0000, 1, &id, 2)) { 294 if (!i2o_parm_field_get(dev, 0x0000, 1, &id, 2)) {
295 sprintf(buf, "0x%04x", id); 295 sprintf(buf, "0x%04x", le16_to_cpu(id));
296 return strlen(buf) + 1; 296 return strlen(buf) + 1;
297 } 297 }
298 298
@@ -362,7 +362,9 @@ static void i2o_exec_lct_modified(struct i2o_controller *c)
362 if (i2o_device_parse_lct(c) != -EAGAIN) 362 if (i2o_device_parse_lct(c) != -EAGAIN)
363 change_ind = c->lct->change_ind + 1; 363 change_ind = c->lct->change_ind + 1;
364 364
365#ifdef CONFIG_I2O_LCT_NOTIFY_ON_CHANGES
365 i2o_exec_lct_notify(c, change_ind); 366 i2o_exec_lct_notify(c, change_ind);
367#endif
366}; 368};
367 369
368/** 370/**
@@ -385,23 +387,22 @@ static int i2o_exec_reply(struct i2o_controller *c, u32 m,
385 u32 context; 387 u32 context;
386 388
387 if (le32_to_cpu(msg->u.head[0]) & MSG_FAIL) { 389 if (le32_to_cpu(msg->u.head[0]) & MSG_FAIL) {
390 struct i2o_message __iomem *pmsg;
391 u32 pm;
392
388 /* 393 /*
389 * If Fail bit is set we must take the transaction context of 394 * If Fail bit is set we must take the transaction context of
390 * the preserved message to find the right request again. 395 * the preserved message to find the right request again.
391 */ 396 */
392 struct i2o_message __iomem *pmsg;
393 u32 pm;
394 397
395 pm = le32_to_cpu(msg->body[3]); 398 pm = le32_to_cpu(msg->body[3]);
396
397 pmsg = i2o_msg_in_to_virt(c, pm); 399 pmsg = i2o_msg_in_to_virt(c, pm);
400 context = readl(&pmsg->u.s.tcntxt);
398 401
399 i2o_report_status(KERN_INFO, "i2o_core", msg); 402 i2o_report_status(KERN_INFO, "i2o_core", msg);
400 403
401 context = readl(&pmsg->u.s.tcntxt);
402
403 /* Release the preserved msg */ 404 /* Release the preserved msg */
404 i2o_msg_nop(c, pm); 405 i2o_msg_nop_mfa(c, pm);
405 } else 406 } else
406 context = le32_to_cpu(msg->u.s.tcntxt); 407 context = le32_to_cpu(msg->u.s.tcntxt);
407 408
@@ -462,25 +463,26 @@ static void i2o_exec_event(struct i2o_event *evt)
462 */ 463 */
463int i2o_exec_lct_get(struct i2o_controller *c) 464int i2o_exec_lct_get(struct i2o_controller *c)
464{ 465{
465 struct i2o_message __iomem *msg; 466 struct i2o_message *msg;
466 u32 m;
467 int i = 0; 467 int i = 0;
468 int rc = -EAGAIN; 468 int rc = -EAGAIN;
469 469
470 for (i = 1; i <= I2O_LCT_GET_TRIES; i++) { 470 for (i = 1; i <= I2O_LCT_GET_TRIES; i++) {
471 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 471 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
472 if (m == I2O_QUEUE_EMPTY) 472 if (IS_ERR(msg))
473 return -ETIMEDOUT; 473 return PTR_ERR(msg);
474 474
475 writel(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_6, &msg->u.head[0]); 475 msg->u.head[0] =
476 writel(I2O_CMD_LCT_NOTIFY << 24 | HOST_TID << 12 | ADAPTER_TID, 476 cpu_to_le32(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_6);
477 &msg->u.head[1]); 477 msg->u.head[1] =
478 writel(0xffffffff, &msg->body[0]); 478 cpu_to_le32(I2O_CMD_LCT_NOTIFY << 24 | HOST_TID << 12 |
479 writel(0x00000000, &msg->body[1]); 479 ADAPTER_TID);
480 writel(0xd0000000 | c->dlct.len, &msg->body[2]); 480 msg->body[0] = cpu_to_le32(0xffffffff);
481 writel(c->dlct.phys, &msg->body[3]); 481 msg->body[1] = cpu_to_le32(0x00000000);
482 482 msg->body[2] = cpu_to_le32(0xd0000000 | c->dlct.len);
483 rc = i2o_msg_post_wait(c, m, I2O_TIMEOUT_LCT_GET); 483 msg->body[3] = cpu_to_le32(c->dlct.phys);
484
485 rc = i2o_msg_post_wait(c, msg, I2O_TIMEOUT_LCT_GET);
484 if (rc < 0) 486 if (rc < 0)
485 break; 487 break;
486 488
@@ -506,29 +508,29 @@ static int i2o_exec_lct_notify(struct i2o_controller *c, u32 change_ind)
506{ 508{
507 i2o_status_block *sb = c->status_block.virt; 509 i2o_status_block *sb = c->status_block.virt;
508 struct device *dev; 510 struct device *dev;
509 struct i2o_message __iomem *msg; 511 struct i2o_message *msg;
510 u32 m;
511 512
512 dev = &c->pdev->dev; 513 dev = &c->pdev->dev;
513 514
514 if (i2o_dma_realloc(dev, &c->dlct, sb->expected_lct_size, GFP_KERNEL)) 515 if (i2o_dma_realloc
516 (dev, &c->dlct, le32_to_cpu(sb->expected_lct_size), GFP_KERNEL))
515 return -ENOMEM; 517 return -ENOMEM;
516 518
517 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 519 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
518 if (m == I2O_QUEUE_EMPTY) 520 if (IS_ERR(msg))
519 return -ETIMEDOUT; 521 return PTR_ERR(msg);
520 522
521 writel(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_6, &msg->u.head[0]); 523 msg->u.head[0] = cpu_to_le32(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_6);
522 writel(I2O_CMD_LCT_NOTIFY << 24 | HOST_TID << 12 | ADAPTER_TID, 524 msg->u.head[1] = cpu_to_le32(I2O_CMD_LCT_NOTIFY << 24 | HOST_TID << 12 |
523 &msg->u.head[1]); 525 ADAPTER_TID);
524 writel(i2o_exec_driver.context, &msg->u.s.icntxt); 526 msg->u.s.icntxt = cpu_to_le32(i2o_exec_driver.context);
525 writel(0, &msg->u.s.tcntxt); /* FIXME */ 527 msg->u.s.tcntxt = cpu_to_le32(0x00000000);
526 writel(0xffffffff, &msg->body[0]); 528 msg->body[0] = cpu_to_le32(0xffffffff);
527 writel(change_ind, &msg->body[1]); 529 msg->body[1] = cpu_to_le32(change_ind);
528 writel(0xd0000000 | c->dlct.len, &msg->body[2]); 530 msg->body[2] = cpu_to_le32(0xd0000000 | c->dlct.len);
529 writel(c->dlct.phys, &msg->body[3]); 531 msg->body[3] = cpu_to_le32(c->dlct.phys);
530 532
531 i2o_msg_post(c, m); 533 i2o_msg_post(c, msg);
532 534
533 return 0; 535 return 0;
534}; 536};
diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c
index f283b5bafdd3..5b1febed3133 100644
--- a/drivers/message/i2o/i2o_block.c
+++ b/drivers/message/i2o/i2o_block.c
@@ -59,10 +59,12 @@
59#include <linux/blkdev.h> 59#include <linux/blkdev.h>
60#include <linux/hdreg.h> 60#include <linux/hdreg.h>
61 61
62#include <scsi/scsi.h>
63
62#include "i2o_block.h" 64#include "i2o_block.h"
63 65
64#define OSM_NAME "block-osm" 66#define OSM_NAME "block-osm"
65#define OSM_VERSION "1.287" 67#define OSM_VERSION "1.325"
66#define OSM_DESCRIPTION "I2O Block Device OSM" 68#define OSM_DESCRIPTION "I2O Block Device OSM"
67 69
68static struct i2o_driver i2o_block_driver; 70static struct i2o_driver i2o_block_driver;
@@ -130,20 +132,20 @@ static int i2o_block_remove(struct device *dev)
130 */ 132 */
131static int i2o_block_device_flush(struct i2o_device *dev) 133static int i2o_block_device_flush(struct i2o_device *dev)
132{ 134{
133 struct i2o_message __iomem *msg; 135 struct i2o_message *msg;
134 u32 m;
135 136
136 m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET); 137 msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET);
137 if (m == I2O_QUEUE_EMPTY) 138 if (IS_ERR(msg))
138 return -ETIMEDOUT; 139 return PTR_ERR(msg);
139 140
140 writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); 141 msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0);
141 writel(I2O_CMD_BLOCK_CFLUSH << 24 | HOST_TID << 12 | dev->lct_data.tid, 142 msg->u.head[1] =
142 &msg->u.head[1]); 143 cpu_to_le32(I2O_CMD_BLOCK_CFLUSH << 24 | HOST_TID << 12 | dev->
143 writel(60 << 16, &msg->body[0]); 144 lct_data.tid);
145 msg->body[0] = cpu_to_le32(60 << 16);
144 osm_debug("Flushing...\n"); 146 osm_debug("Flushing...\n");
145 147
146 return i2o_msg_post_wait(dev->iop, m, 60); 148 return i2o_msg_post_wait(dev->iop, msg, 60);
147}; 149};
148 150
149/** 151/**
@@ -181,21 +183,21 @@ static int i2o_block_issue_flush(request_queue_t * queue, struct gendisk *disk,
181 */ 183 */
182static int i2o_block_device_mount(struct i2o_device *dev, u32 media_id) 184static int i2o_block_device_mount(struct i2o_device *dev, u32 media_id)
183{ 185{
184 struct i2o_message __iomem *msg; 186 struct i2o_message *msg;
185 u32 m; 187
186 188 msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET);
187 m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET); 189 if (IS_ERR(msg))
188 if (m == I2O_QUEUE_EMPTY) 190 return PTR_ERR(msg);
189 return -ETIMEDOUT; 191
190 192 msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0);
191 writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); 193 msg->u.head[1] =
192 writel(I2O_CMD_BLOCK_MMOUNT << 24 | HOST_TID << 12 | dev->lct_data.tid, 194 cpu_to_le32(I2O_CMD_BLOCK_MMOUNT << 24 | HOST_TID << 12 | dev->
193 &msg->u.head[1]); 195 lct_data.tid);
194 writel(-1, &msg->body[0]); 196 msg->body[0] = cpu_to_le32(-1);
195 writel(0, &msg->body[1]); 197 msg->body[1] = cpu_to_le32(0x00000000);
196 osm_debug("Mounting...\n"); 198 osm_debug("Mounting...\n");
197 199
198 return i2o_msg_post_wait(dev->iop, m, 2); 200 return i2o_msg_post_wait(dev->iop, msg, 2);
199}; 201};
200 202
201/** 203/**
@@ -210,20 +212,20 @@ static int i2o_block_device_mount(struct i2o_device *dev, u32 media_id)
210 */ 212 */
211static int i2o_block_device_lock(struct i2o_device *dev, u32 media_id) 213static int i2o_block_device_lock(struct i2o_device *dev, u32 media_id)
212{ 214{
213 struct i2o_message __iomem *msg; 215 struct i2o_message *msg;
214 u32 m;
215 216
216 m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET); 217 msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET);
217 if (m == I2O_QUEUE_EMPTY) 218 if (IS_ERR(msg) == I2O_QUEUE_EMPTY)
218 return -ETIMEDOUT; 219 return PTR_ERR(msg);
219 220
220 writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); 221 msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0);
221 writel(I2O_CMD_BLOCK_MLOCK << 24 | HOST_TID << 12 | dev->lct_data.tid, 222 msg->u.head[1] =
222 &msg->u.head[1]); 223 cpu_to_le32(I2O_CMD_BLOCK_MLOCK << 24 | HOST_TID << 12 | dev->
223 writel(-1, &msg->body[0]); 224 lct_data.tid);
225 msg->body[0] = cpu_to_le32(-1);
224 osm_debug("Locking...\n"); 226 osm_debug("Locking...\n");
225 227
226 return i2o_msg_post_wait(dev->iop, m, 2); 228 return i2o_msg_post_wait(dev->iop, msg, 2);
227}; 229};
228 230
229/** 231/**
@@ -238,20 +240,20 @@ static int i2o_block_device_lock(struct i2o_device *dev, u32 media_id)
238 */ 240 */
239static int i2o_block_device_unlock(struct i2o_device *dev, u32 media_id) 241static int i2o_block_device_unlock(struct i2o_device *dev, u32 media_id)
240{ 242{
241 struct i2o_message __iomem *msg; 243 struct i2o_message *msg;
242 u32 m;
243 244
244 m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET); 245 msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET);
245 if (m == I2O_QUEUE_EMPTY) 246 if (IS_ERR(msg))
246 return -ETIMEDOUT; 247 return PTR_ERR(msg);
247 248
248 writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); 249 msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0);
249 writel(I2O_CMD_BLOCK_MUNLOCK << 24 | HOST_TID << 12 | dev->lct_data.tid, 250 msg->u.head[1] =
250 &msg->u.head[1]); 251 cpu_to_le32(I2O_CMD_BLOCK_MUNLOCK << 24 | HOST_TID << 12 | dev->
251 writel(media_id, &msg->body[0]); 252 lct_data.tid);
253 msg->body[0] = cpu_to_le32(media_id);
252 osm_debug("Unlocking...\n"); 254 osm_debug("Unlocking...\n");
253 255
254 return i2o_msg_post_wait(dev->iop, m, 2); 256 return i2o_msg_post_wait(dev->iop, msg, 2);
255}; 257};
256 258
257/** 259/**
@@ -267,21 +269,21 @@ static int i2o_block_device_power(struct i2o_block_device *dev, u8 op)
267{ 269{
268 struct i2o_device *i2o_dev = dev->i2o_dev; 270 struct i2o_device *i2o_dev = dev->i2o_dev;
269 struct i2o_controller *c = i2o_dev->iop; 271 struct i2o_controller *c = i2o_dev->iop;
270 struct i2o_message __iomem *msg; 272 struct i2o_message *msg;
271 u32 m;
272 int rc; 273 int rc;
273 274
274 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 275 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
275 if (m == I2O_QUEUE_EMPTY) 276 if (IS_ERR(msg))
276 return -ETIMEDOUT; 277 return PTR_ERR(msg);
277 278
278 writel(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); 279 msg->u.head[0] = cpu_to_le32(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0);
279 writel(I2O_CMD_BLOCK_POWER << 24 | HOST_TID << 12 | i2o_dev->lct_data. 280 msg->u.head[1] =
280 tid, &msg->u.head[1]); 281 cpu_to_le32(I2O_CMD_BLOCK_POWER << 24 | HOST_TID << 12 | i2o_dev->
281 writel(op << 24, &msg->body[0]); 282 lct_data.tid);
283 msg->body[0] = cpu_to_le32(op << 24);
282 osm_debug("Power...\n"); 284 osm_debug("Power...\n");
283 285
284 rc = i2o_msg_post_wait(c, m, 60); 286 rc = i2o_msg_post_wait(c, msg, 60);
285 if (!rc) 287 if (!rc)
286 dev->power = op; 288 dev->power = op;
287 289
@@ -331,7 +333,7 @@ static inline void i2o_block_request_free(struct i2o_block_request *ireq)
331 */ 333 */
332static inline int i2o_block_sglist_alloc(struct i2o_controller *c, 334static inline int i2o_block_sglist_alloc(struct i2o_controller *c,
333 struct i2o_block_request *ireq, 335 struct i2o_block_request *ireq,
334 u32 __iomem ** mptr) 336 u32 ** mptr)
335{ 337{
336 int nents; 338 int nents;
337 enum dma_data_direction direction; 339 enum dma_data_direction direction;
@@ -466,7 +468,7 @@ static void i2o_block_end_request(struct request *req, int uptodate,
466 468
467 spin_lock_irqsave(q->queue_lock, flags); 469 spin_lock_irqsave(q->queue_lock, flags);
468 470
469 end_that_request_last(req); 471 end_that_request_last(req, uptodate);
470 472
471 if (likely(dev)) { 473 if (likely(dev)) {
472 dev->open_queue_depth--; 474 dev->open_queue_depth--;
@@ -745,10 +747,9 @@ static int i2o_block_transfer(struct request *req)
745 struct i2o_block_device *dev = req->rq_disk->private_data; 747 struct i2o_block_device *dev = req->rq_disk->private_data;
746 struct i2o_controller *c; 748 struct i2o_controller *c;
747 int tid = dev->i2o_dev->lct_data.tid; 749 int tid = dev->i2o_dev->lct_data.tid;
748 struct i2o_message __iomem *msg; 750 struct i2o_message *msg;
749 u32 __iomem *mptr; 751 u32 *mptr;
750 struct i2o_block_request *ireq = req->special; 752 struct i2o_block_request *ireq = req->special;
751 u32 m;
752 u32 tcntxt; 753 u32 tcntxt;
753 u32 sgl_offset = SGL_OFFSET_8; 754 u32 sgl_offset = SGL_OFFSET_8;
754 u32 ctl_flags = 0x00000000; 755 u32 ctl_flags = 0x00000000;
@@ -763,9 +764,9 @@ static int i2o_block_transfer(struct request *req)
763 764
764 c = dev->i2o_dev->iop; 765 c = dev->i2o_dev->iop;
765 766
766 m = i2o_msg_get(c, &msg); 767 msg = i2o_msg_get(c);
767 if (m == I2O_QUEUE_EMPTY) { 768 if (IS_ERR(msg)) {
768 rc = -EBUSY; 769 rc = PTR_ERR(msg);
769 goto exit; 770 goto exit;
770 } 771 }
771 772
@@ -775,8 +776,8 @@ static int i2o_block_transfer(struct request *req)
775 goto nop_msg; 776 goto nop_msg;
776 } 777 }
777 778
778 writel(i2o_block_driver.context, &msg->u.s.icntxt); 779 msg->u.s.icntxt = cpu_to_le32(i2o_block_driver.context);
779 writel(tcntxt, &msg->u.s.tcntxt); 780 msg->u.s.tcntxt = cpu_to_le32(tcntxt);
780 781
781 mptr = &msg->body[0]; 782 mptr = &msg->body[0];
782 783
@@ -834,11 +835,11 @@ static int i2o_block_transfer(struct request *req)
834 835
835 sgl_offset = SGL_OFFSET_12; 836 sgl_offset = SGL_OFFSET_12;
836 837
837 writel(I2O_CMD_PRIVATE << 24 | HOST_TID << 12 | tid, 838 msg->u.head[1] =
838 &msg->u.head[1]); 839 cpu_to_le32(I2O_CMD_PRIVATE << 24 | HOST_TID << 12 | tid);
839 840
840 writel(I2O_VENDOR_DPT << 16 | I2O_CMD_SCSI_EXEC, mptr++); 841 *mptr++ = cpu_to_le32(I2O_VENDOR_DPT << 16 | I2O_CMD_SCSI_EXEC);
841 writel(tid, mptr++); 842 *mptr++ = cpu_to_le32(tid);
842 843
843 /* 844 /*
844 * ENABLE_DISCONNECT 845 * ENABLE_DISCONNECT
@@ -846,29 +847,31 @@ static int i2o_block_transfer(struct request *req)
846 * RETURN_SENSE_DATA_IN_REPLY_MESSAGE_FRAME 847 * RETURN_SENSE_DATA_IN_REPLY_MESSAGE_FRAME
847 */ 848 */
848 if (rq_data_dir(req) == READ) { 849 if (rq_data_dir(req) == READ) {
849 cmd[0] = 0x28; 850 cmd[0] = READ_10;
850 scsi_flags = 0x60a0000a; 851 scsi_flags = 0x60a0000a;
851 } else { 852 } else {
852 cmd[0] = 0x2A; 853 cmd[0] = WRITE_10;
853 scsi_flags = 0xa0a0000a; 854 scsi_flags = 0xa0a0000a;
854 } 855 }
855 856
856 writel(scsi_flags, mptr++); 857 *mptr++ = cpu_to_le32(scsi_flags);
857 858
858 *((u32 *) & cmd[2]) = cpu_to_be32(req->sector * hwsec); 859 *((u32 *) & cmd[2]) = cpu_to_be32(req->sector * hwsec);
859 *((u16 *) & cmd[7]) = cpu_to_be16(req->nr_sectors * hwsec); 860 *((u16 *) & cmd[7]) = cpu_to_be16(req->nr_sectors * hwsec);
860 861
861 memcpy_toio(mptr, cmd, 10); 862 memcpy(mptr, cmd, 10);
862 mptr += 4; 863 mptr += 4;
863 writel(req->nr_sectors << KERNEL_SECTOR_SHIFT, mptr++); 864 *mptr++ = cpu_to_le32(req->nr_sectors << KERNEL_SECTOR_SHIFT);
864 } else 865 } else
865#endif 866#endif
866 { 867 {
867 writel(cmd | HOST_TID << 12 | tid, &msg->u.head[1]); 868 msg->u.head[1] = cpu_to_le32(cmd | HOST_TID << 12 | tid);
868 writel(ctl_flags, mptr++); 869 *mptr++ = cpu_to_le32(ctl_flags);
869 writel(req->nr_sectors << KERNEL_SECTOR_SHIFT, mptr++); 870 *mptr++ = cpu_to_le32(req->nr_sectors << KERNEL_SECTOR_SHIFT);
870 writel((u32) (req->sector << KERNEL_SECTOR_SHIFT), mptr++); 871 *mptr++ =
871 writel(req->sector >> (32 - KERNEL_SECTOR_SHIFT), mptr++); 872 cpu_to_le32((u32) (req->sector << KERNEL_SECTOR_SHIFT));
873 *mptr++ =
874 cpu_to_le32(req->sector >> (32 - KERNEL_SECTOR_SHIFT));
872 } 875 }
873 876
874 if (!i2o_block_sglist_alloc(c, ireq, &mptr)) { 877 if (!i2o_block_sglist_alloc(c, ireq, &mptr)) {
@@ -876,13 +879,13 @@ static int i2o_block_transfer(struct request *req)
876 goto context_remove; 879 goto context_remove;
877 } 880 }
878 881
879 writel(I2O_MESSAGE_SIZE(mptr - &msg->u.head[0]) | 882 msg->u.head[0] =
880 sgl_offset, &msg->u.head[0]); 883 cpu_to_le32(I2O_MESSAGE_SIZE(mptr - &msg->u.head[0]) | sgl_offset);
881 884
882 list_add_tail(&ireq->queue, &dev->open_queue); 885 list_add_tail(&ireq->queue, &dev->open_queue);
883 dev->open_queue_depth++; 886 dev->open_queue_depth++;
884 887
885 i2o_msg_post(c, m); 888 i2o_msg_post(c, msg);
886 889
887 return 0; 890 return 0;
888 891
@@ -890,7 +893,7 @@ static int i2o_block_transfer(struct request *req)
890 i2o_cntxt_list_remove(c, req); 893 i2o_cntxt_list_remove(c, req);
891 894
892 nop_msg: 895 nop_msg:
893 i2o_msg_nop(c, m); 896 i2o_msg_nop(c, msg);
894 897
895 exit: 898 exit:
896 return rc; 899 return rc;
@@ -978,13 +981,12 @@ static struct i2o_block_device *i2o_block_device_alloc(void)
978 struct request_queue *queue; 981 struct request_queue *queue;
979 int rc; 982 int rc;
980 983
981 dev = kmalloc(sizeof(*dev), GFP_KERNEL); 984 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
982 if (!dev) { 985 if (!dev) {
983 osm_err("Insufficient memory to allocate I2O Block disk.\n"); 986 osm_err("Insufficient memory to allocate I2O Block disk.\n");
984 rc = -ENOMEM; 987 rc = -ENOMEM;
985 goto exit; 988 goto exit;
986 } 989 }
987 memset(dev, 0, sizeof(*dev));
988 990
989 INIT_LIST_HEAD(&dev->open_queue); 991 INIT_LIST_HEAD(&dev->open_queue);
990 spin_lock_init(&dev->lock); 992 spin_lock_init(&dev->lock);
@@ -1049,8 +1051,8 @@ static int i2o_block_probe(struct device *dev)
1049 int rc; 1051 int rc;
1050 u64 size; 1052 u64 size;
1051 u32 blocksize; 1053 u32 blocksize;
1052 u32 flags, status;
1053 u16 body_size = 4; 1054 u16 body_size = 4;
1055 u16 power;
1054 unsigned short max_sectors; 1056 unsigned short max_sectors;
1055 1057
1056#ifdef CONFIG_I2O_EXT_ADAPTEC 1058#ifdef CONFIG_I2O_EXT_ADAPTEC
@@ -1108,22 +1110,20 @@ static int i2o_block_probe(struct device *dev)
1108 * Ask for the current media data. If that isn't supported 1110 * Ask for the current media data. If that isn't supported
1109 * then we ask for the device capacity data 1111 * then we ask for the device capacity data
1110 */ 1112 */
1111 if (i2o_parm_field_get(i2o_dev, 0x0004, 1, &blocksize, 4) || 1113 if (!i2o_parm_field_get(i2o_dev, 0x0004, 1, &blocksize, 4) ||
1112 i2o_parm_field_get(i2o_dev, 0x0000, 3, &blocksize, 4)) { 1114 !i2o_parm_field_get(i2o_dev, 0x0000, 3, &blocksize, 4)) {
1113 blk_queue_hardsect_size(queue, blocksize); 1115 blk_queue_hardsect_size(queue, le32_to_cpu(blocksize));
1114 } else 1116 } else
1115 osm_warn("unable to get blocksize of %s\n", gd->disk_name); 1117 osm_warn("unable to get blocksize of %s\n", gd->disk_name);
1116 1118
1117 if (i2o_parm_field_get(i2o_dev, 0x0004, 0, &size, 8) || 1119 if (!i2o_parm_field_get(i2o_dev, 0x0004, 0, &size, 8) ||
1118 i2o_parm_field_get(i2o_dev, 0x0000, 4, &size, 8)) { 1120 !i2o_parm_field_get(i2o_dev, 0x0000, 4, &size, 8)) {
1119 set_capacity(gd, size >> KERNEL_SECTOR_SHIFT); 1121 set_capacity(gd, le64_to_cpu(size) >> KERNEL_SECTOR_SHIFT);
1120 } else 1122 } else
1121 osm_warn("could not get size of %s\n", gd->disk_name); 1123 osm_warn("could not get size of %s\n", gd->disk_name);
1122 1124
1123 if (!i2o_parm_field_get(i2o_dev, 0x0000, 2, &i2o_blk_dev->power, 2)) 1125 if (!i2o_parm_field_get(i2o_dev, 0x0000, 2, &power, 2))
1124 i2o_blk_dev->power = 0; 1126 i2o_blk_dev->power = power;
1125 i2o_parm_field_get(i2o_dev, 0x0000, 5, &flags, 4);
1126 i2o_parm_field_get(i2o_dev, 0x0000, 6, &status, 4);
1127 1127
1128 i2o_event_register(i2o_dev, &i2o_block_driver, 0, 0xffffffff); 1128 i2o_event_register(i2o_dev, &i2o_block_driver, 0, 0xffffffff);
1129 1129
diff --git a/drivers/message/i2o/i2o_config.c b/drivers/message/i2o/i2o_config.c
index 3c3a7abebb1b..89daf67b764d 100644
--- a/drivers/message/i2o/i2o_config.c
+++ b/drivers/message/i2o/i2o_config.c
@@ -36,12 +36,12 @@
36 36
37#include <asm/uaccess.h> 37#include <asm/uaccess.h>
38 38
39#include "core.h"
40
41#define SG_TABLESIZE 30 39#define SG_TABLESIZE 30
42 40
43static int i2o_cfg_ioctl(struct inode *inode, struct file *fp, unsigned int cmd, 41extern int i2o_parm_issue(struct i2o_device *, int, void *, int, void *, int);
44 unsigned long arg); 42
43static int i2o_cfg_ioctl(struct inode *, struct file *, unsigned int,
44 unsigned long);
45 45
46static spinlock_t i2o_config_lock; 46static spinlock_t i2o_config_lock;
47 47
@@ -230,8 +230,7 @@ static int i2o_cfg_swdl(unsigned long arg)
230 struct i2o_sw_xfer __user *pxfer = (struct i2o_sw_xfer __user *)arg; 230 struct i2o_sw_xfer __user *pxfer = (struct i2o_sw_xfer __user *)arg;
231 unsigned char maxfrag = 0, curfrag = 1; 231 unsigned char maxfrag = 0, curfrag = 1;
232 struct i2o_dma buffer; 232 struct i2o_dma buffer;
233 struct i2o_message __iomem *msg; 233 struct i2o_message *msg;
234 u32 m;
235 unsigned int status = 0, swlen = 0, fragsize = 8192; 234 unsigned int status = 0, swlen = 0, fragsize = 8192;
236 struct i2o_controller *c; 235 struct i2o_controller *c;
237 236
@@ -257,31 +256,34 @@ static int i2o_cfg_swdl(unsigned long arg)
257 if (!c) 256 if (!c)
258 return -ENXIO; 257 return -ENXIO;
259 258
260 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 259 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
261 if (m == I2O_QUEUE_EMPTY) 260 if (IS_ERR(msg))
262 return -EBUSY; 261 return PTR_ERR(msg);
263 262
264 if (i2o_dma_alloc(&c->pdev->dev, &buffer, fragsize, GFP_KERNEL)) { 263 if (i2o_dma_alloc(&c->pdev->dev, &buffer, fragsize, GFP_KERNEL)) {
265 i2o_msg_nop(c, m); 264 i2o_msg_nop(c, msg);
266 return -ENOMEM; 265 return -ENOMEM;
267 } 266 }
268 267
269 __copy_from_user(buffer.virt, kxfer.buf, fragsize); 268 __copy_from_user(buffer.virt, kxfer.buf, fragsize);
270 269
271 writel(NINE_WORD_MSG_SIZE | SGL_OFFSET_7, &msg->u.head[0]); 270 msg->u.head[0] = cpu_to_le32(NINE_WORD_MSG_SIZE | SGL_OFFSET_7);
272 writel(I2O_CMD_SW_DOWNLOAD << 24 | HOST_TID << 12 | ADAPTER_TID, 271 msg->u.head[1] =
273 &msg->u.head[1]); 272 cpu_to_le32(I2O_CMD_SW_DOWNLOAD << 24 | HOST_TID << 12 |
274 writel(i2o_config_driver.context, &msg->u.head[2]); 273 ADAPTER_TID);
275 writel(0, &msg->u.head[3]); 274 msg->u.head[2] = cpu_to_le32(i2o_config_driver.context);
276 writel((((u32) kxfer.flags) << 24) | (((u32) kxfer.sw_type) << 16) | 275 msg->u.head[3] = cpu_to_le32(0);
277 (((u32) maxfrag) << 8) | (((u32) curfrag)), &msg->body[0]); 276 msg->body[0] =
278 writel(swlen, &msg->body[1]); 277 cpu_to_le32((((u32) kxfer.flags) << 24) | (((u32) kxfer.
279 writel(kxfer.sw_id, &msg->body[2]); 278 sw_type) << 16) |
280 writel(0xD0000000 | fragsize, &msg->body[3]); 279 (((u32) maxfrag) << 8) | (((u32) curfrag)));
281 writel(buffer.phys, &msg->body[4]); 280 msg->body[1] = cpu_to_le32(swlen);
281 msg->body[2] = cpu_to_le32(kxfer.sw_id);
282 msg->body[3] = cpu_to_le32(0xD0000000 | fragsize);
283 msg->body[4] = cpu_to_le32(buffer.phys);
282 284
283 osm_debug("swdl frag %d/%d (size %d)\n", curfrag, maxfrag, fragsize); 285 osm_debug("swdl frag %d/%d (size %d)\n", curfrag, maxfrag, fragsize);
284 status = i2o_msg_post_wait_mem(c, m, 60, &buffer); 286 status = i2o_msg_post_wait_mem(c, msg, 60, &buffer);
285 287
286 if (status != -ETIMEDOUT) 288 if (status != -ETIMEDOUT)
287 i2o_dma_free(&c->pdev->dev, &buffer); 289 i2o_dma_free(&c->pdev->dev, &buffer);
@@ -302,8 +304,7 @@ static int i2o_cfg_swul(unsigned long arg)
302 struct i2o_sw_xfer __user *pxfer = (struct i2o_sw_xfer __user *)arg; 304 struct i2o_sw_xfer __user *pxfer = (struct i2o_sw_xfer __user *)arg;
303 unsigned char maxfrag = 0, curfrag = 1; 305 unsigned char maxfrag = 0, curfrag = 1;
304 struct i2o_dma buffer; 306 struct i2o_dma buffer;
305 struct i2o_message __iomem *msg; 307 struct i2o_message *msg;
306 u32 m;
307 unsigned int status = 0, swlen = 0, fragsize = 8192; 308 unsigned int status = 0, swlen = 0, fragsize = 8192;
308 struct i2o_controller *c; 309 struct i2o_controller *c;
309 int ret = 0; 310 int ret = 0;
@@ -330,30 +331,30 @@ static int i2o_cfg_swul(unsigned long arg)
330 if (!c) 331 if (!c)
331 return -ENXIO; 332 return -ENXIO;
332 333
333 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 334 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
334 if (m == I2O_QUEUE_EMPTY) 335 if (IS_ERR(msg))
335 return -EBUSY; 336 return PTR_ERR(msg);
336 337
337 if (i2o_dma_alloc(&c->pdev->dev, &buffer, fragsize, GFP_KERNEL)) { 338 if (i2o_dma_alloc(&c->pdev->dev, &buffer, fragsize, GFP_KERNEL)) {
338 i2o_msg_nop(c, m); 339 i2o_msg_nop(c, msg);
339 return -ENOMEM; 340 return -ENOMEM;
340 } 341 }
341 342
342 writel(NINE_WORD_MSG_SIZE | SGL_OFFSET_7, &msg->u.head[0]); 343 msg->u.head[0] = cpu_to_le32(NINE_WORD_MSG_SIZE | SGL_OFFSET_7);
343 writel(I2O_CMD_SW_UPLOAD << 24 | HOST_TID << 12 | ADAPTER_TID, 344 msg->u.head[1] =
344 &msg->u.head[1]); 345 cpu_to_le32(I2O_CMD_SW_UPLOAD << 24 | HOST_TID << 12 | ADAPTER_TID);
345 writel(i2o_config_driver.context, &msg->u.head[2]); 346 msg->u.head[2] = cpu_to_le32(i2o_config_driver.context);
346 writel(0, &msg->u.head[3]); 347 msg->u.head[3] = cpu_to_le32(0);
347 writel((u32) kxfer.flags << 24 | (u32) kxfer. 348 msg->body[0] =
348 sw_type << 16 | (u32) maxfrag << 8 | (u32) curfrag, 349 cpu_to_le32((u32) kxfer.flags << 24 | (u32) kxfer.
349 &msg->body[0]); 350 sw_type << 16 | (u32) maxfrag << 8 | (u32) curfrag);
350 writel(swlen, &msg->body[1]); 351 msg->body[1] = cpu_to_le32(swlen);
351 writel(kxfer.sw_id, &msg->body[2]); 352 msg->body[2] = cpu_to_le32(kxfer.sw_id);
352 writel(0xD0000000 | fragsize, &msg->body[3]); 353 msg->body[3] = cpu_to_le32(0xD0000000 | fragsize);
353 writel(buffer.phys, &msg->body[4]); 354 msg->body[4] = cpu_to_le32(buffer.phys);
354 355
355 osm_debug("swul frag %d/%d (size %d)\n", curfrag, maxfrag, fragsize); 356 osm_debug("swul frag %d/%d (size %d)\n", curfrag, maxfrag, fragsize);
356 status = i2o_msg_post_wait_mem(c, m, 60, &buffer); 357 status = i2o_msg_post_wait_mem(c, msg, 60, &buffer);
357 358
358 if (status != I2O_POST_WAIT_OK) { 359 if (status != I2O_POST_WAIT_OK) {
359 if (status != -ETIMEDOUT) 360 if (status != -ETIMEDOUT)
@@ -380,8 +381,7 @@ static int i2o_cfg_swdel(unsigned long arg)
380 struct i2o_controller *c; 381 struct i2o_controller *c;
381 struct i2o_sw_xfer kxfer; 382 struct i2o_sw_xfer kxfer;
382 struct i2o_sw_xfer __user *pxfer = (struct i2o_sw_xfer __user *)arg; 383 struct i2o_sw_xfer __user *pxfer = (struct i2o_sw_xfer __user *)arg;
383 struct i2o_message __iomem *msg; 384 struct i2o_message *msg;
384 u32 m;
385 unsigned int swlen; 385 unsigned int swlen;
386 int token; 386 int token;
387 387
@@ -395,21 +395,21 @@ static int i2o_cfg_swdel(unsigned long arg)
395 if (!c) 395 if (!c)
396 return -ENXIO; 396 return -ENXIO;
397 397
398 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 398 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
399 if (m == I2O_QUEUE_EMPTY) 399 if (IS_ERR(msg))
400 return -EBUSY; 400 return PTR_ERR(msg);
401 401
402 writel(SEVEN_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); 402 msg->u.head[0] = cpu_to_le32(SEVEN_WORD_MSG_SIZE | SGL_OFFSET_0);
403 writel(I2O_CMD_SW_REMOVE << 24 | HOST_TID << 12 | ADAPTER_TID, 403 msg->u.head[1] =
404 &msg->u.head[1]); 404 cpu_to_le32(I2O_CMD_SW_REMOVE << 24 | HOST_TID << 12 | ADAPTER_TID);
405 writel(i2o_config_driver.context, &msg->u.head[2]); 405 msg->u.head[2] = cpu_to_le32(i2o_config_driver.context);
406 writel(0, &msg->u.head[3]); 406 msg->u.head[3] = cpu_to_le32(0);
407 writel((u32) kxfer.flags << 24 | (u32) kxfer.sw_type << 16, 407 msg->body[0] =
408 &msg->body[0]); 408 cpu_to_le32((u32) kxfer.flags << 24 | (u32) kxfer.sw_type << 16);
409 writel(swlen, &msg->body[1]); 409 msg->body[1] = cpu_to_le32(swlen);
410 writel(kxfer.sw_id, &msg->body[2]); 410 msg->body[2] = cpu_to_le32(kxfer.sw_id);
411 411
412 token = i2o_msg_post_wait(c, m, 10); 412 token = i2o_msg_post_wait(c, msg, 10);
413 413
414 if (token != I2O_POST_WAIT_OK) { 414 if (token != I2O_POST_WAIT_OK) {
415 osm_info("swdel failed, DetailedStatus = %d\n", token); 415 osm_info("swdel failed, DetailedStatus = %d\n", token);
@@ -423,25 +423,24 @@ static int i2o_cfg_validate(unsigned long arg)
423{ 423{
424 int token; 424 int token;
425 int iop = (int)arg; 425 int iop = (int)arg;
426 struct i2o_message __iomem *msg; 426 struct i2o_message *msg;
427 u32 m;
428 struct i2o_controller *c; 427 struct i2o_controller *c;
429 428
430 c = i2o_find_iop(iop); 429 c = i2o_find_iop(iop);
431 if (!c) 430 if (!c)
432 return -ENXIO; 431 return -ENXIO;
433 432
434 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 433 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
435 if (m == I2O_QUEUE_EMPTY) 434 if (IS_ERR(msg))
436 return -EBUSY; 435 return PTR_ERR(msg);
437 436
438 writel(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); 437 msg->u.head[0] = cpu_to_le32(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0);
439 writel(I2O_CMD_CONFIG_VALIDATE << 24 | HOST_TID << 12 | iop, 438 msg->u.head[1] =
440 &msg->u.head[1]); 439 cpu_to_le32(I2O_CMD_CONFIG_VALIDATE << 24 | HOST_TID << 12 | iop);
441 writel(i2o_config_driver.context, &msg->u.head[2]); 440 msg->u.head[2] = cpu_to_le32(i2o_config_driver.context);
442 writel(0, &msg->u.head[3]); 441 msg->u.head[3] = cpu_to_le32(0);
443 442
444 token = i2o_msg_post_wait(c, m, 10); 443 token = i2o_msg_post_wait(c, msg, 10);
445 444
446 if (token != I2O_POST_WAIT_OK) { 445 if (token != I2O_POST_WAIT_OK) {
447 osm_info("Can't validate configuration, ErrorStatus = %d\n", 446 osm_info("Can't validate configuration, ErrorStatus = %d\n",
@@ -454,8 +453,7 @@ static int i2o_cfg_validate(unsigned long arg)
454 453
455static int i2o_cfg_evt_reg(unsigned long arg, struct file *fp) 454static int i2o_cfg_evt_reg(unsigned long arg, struct file *fp)
456{ 455{
457 struct i2o_message __iomem *msg; 456 struct i2o_message *msg;
458 u32 m;
459 struct i2o_evt_id __user *pdesc = (struct i2o_evt_id __user *)arg; 457 struct i2o_evt_id __user *pdesc = (struct i2o_evt_id __user *)arg;
460 struct i2o_evt_id kdesc; 458 struct i2o_evt_id kdesc;
461 struct i2o_controller *c; 459 struct i2o_controller *c;
@@ -474,18 +472,19 @@ static int i2o_cfg_evt_reg(unsigned long arg, struct file *fp)
474 if (!d) 472 if (!d)
475 return -ENODEV; 473 return -ENODEV;
476 474
477 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 475 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
478 if (m == I2O_QUEUE_EMPTY) 476 if (IS_ERR(msg))
479 return -EBUSY; 477 return PTR_ERR(msg);
480 478
481 writel(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); 479 msg->u.head[0] = cpu_to_le32(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0);
482 writel(I2O_CMD_UTIL_EVT_REGISTER << 24 | HOST_TID << 12 | kdesc.tid, 480 msg->u.head[1] =
483 &msg->u.head[1]); 481 cpu_to_le32(I2O_CMD_UTIL_EVT_REGISTER << 24 | HOST_TID << 12 |
484 writel(i2o_config_driver.context, &msg->u.head[2]); 482 kdesc.tid);
485 writel(i2o_cntxt_list_add(c, fp->private_data), &msg->u.head[3]); 483 msg->u.head[2] = cpu_to_le32(i2o_config_driver.context);
486 writel(kdesc.evt_mask, &msg->body[0]); 484 msg->u.head[3] = cpu_to_le32(i2o_cntxt_list_add(c, fp->private_data));
485 msg->body[0] = cpu_to_le32(kdesc.evt_mask);
487 486
488 i2o_msg_post(c, m); 487 i2o_msg_post(c, msg);
489 488
490 return 0; 489 return 0;
491} 490}
@@ -537,7 +536,6 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd,
537 u32 sg_index = 0; 536 u32 sg_index = 0;
538 i2o_status_block *sb; 537 i2o_status_block *sb;
539 struct i2o_message *msg; 538 struct i2o_message *msg;
540 u32 m;
541 unsigned int iop; 539 unsigned int iop;
542 540
543 cmd = (struct i2o_cmd_passthru32 __user *)arg; 541 cmd = (struct i2o_cmd_passthru32 __user *)arg;
@@ -553,7 +551,7 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd,
553 return -ENXIO; 551 return -ENXIO;
554 } 552 }
555 553
556 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 554 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
557 555
558 sb = c->status_block.virt; 556 sb = c->status_block.virt;
559 557
@@ -585,19 +583,15 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd,
585 reply_size >>= 16; 583 reply_size >>= 16;
586 reply_size <<= 2; 584 reply_size <<= 2;
587 585
588 reply = kmalloc(reply_size, GFP_KERNEL); 586 reply = kzalloc(reply_size, GFP_KERNEL);
589 if (!reply) { 587 if (!reply) {
590 printk(KERN_WARNING "%s: Could not allocate reply buffer\n", 588 printk(KERN_WARNING "%s: Could not allocate reply buffer\n",
591 c->name); 589 c->name);
592 return -ENOMEM; 590 return -ENOMEM;
593 } 591 }
594 memset(reply, 0, reply_size);
595 592
596 sg_offset = (msg->u.head[0] >> 4) & 0x0f; 593 sg_offset = (msg->u.head[0] >> 4) & 0x0f;
597 594
598 writel(i2o_config_driver.context, &msg->u.s.icntxt);
599 writel(i2o_cntxt_list_add(c, reply), &msg->u.s.tcntxt);
600
601 memset(sg_list, 0, sizeof(sg_list[0]) * SG_TABLESIZE); 595 memset(sg_list, 0, sizeof(sg_list[0]) * SG_TABLESIZE);
602 if (sg_offset) { 596 if (sg_offset) {
603 struct sg_simple_element *sg; 597 struct sg_simple_element *sg;
@@ -631,7 +625,7 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd,
631 goto cleanup; 625 goto cleanup;
632 } 626 }
633 sg_size = sg[i].flag_count & 0xffffff; 627 sg_size = sg[i].flag_count & 0xffffff;
634 p = &(sg_list[sg_index++]); 628 p = &(sg_list[sg_index]);
635 /* Allocate memory for the transfer */ 629 /* Allocate memory for the transfer */
636 if (i2o_dma_alloc 630 if (i2o_dma_alloc
637 (&c->pdev->dev, p, sg_size, 631 (&c->pdev->dev, p, sg_size,
@@ -642,6 +636,7 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd,
642 rcode = -ENOMEM; 636 rcode = -ENOMEM;
643 goto sg_list_cleanup; 637 goto sg_list_cleanup;
644 } 638 }
639 sg_index++;
645 /* Copy in the user's SG buffer if necessary */ 640 /* Copy in the user's SG buffer if necessary */
646 if (sg[i]. 641 if (sg[i].
647 flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR */ ) { 642 flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR */ ) {
@@ -662,9 +657,11 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd,
662 } 657 }
663 } 658 }
664 659
665 rcode = i2o_msg_post_wait(c, m, 60); 660 rcode = i2o_msg_post_wait(c, msg, 60);
666 if (rcode) 661 if (rcode) {
662 reply[4] = ((u32) rcode) << 24;
667 goto sg_list_cleanup; 663 goto sg_list_cleanup;
664 }
668 665
669 if (sg_offset) { 666 if (sg_offset) {
670 u32 msg[I2O_OUTBOUND_MSG_FRAME_SIZE]; 667 u32 msg[I2O_OUTBOUND_MSG_FRAME_SIZE];
@@ -714,6 +711,7 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd,
714 } 711 }
715 } 712 }
716 713
714 sg_list_cleanup:
717 /* Copy back the reply to user space */ 715 /* Copy back the reply to user space */
718 if (reply_size) { 716 if (reply_size) {
719 // we wrote our own values for context - now restore the user supplied ones 717 // we wrote our own values for context - now restore the user supplied ones
@@ -731,7 +729,6 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd,
731 } 729 }
732 } 730 }
733 731
734 sg_list_cleanup:
735 for (i = 0; i < sg_index; i++) 732 for (i = 0; i < sg_index; i++)
736 i2o_dma_free(&c->pdev->dev, &sg_list[i]); 733 i2o_dma_free(&c->pdev->dev, &sg_list[i]);
737 734
@@ -780,8 +777,7 @@ static int i2o_cfg_passthru(unsigned long arg)
780 u32 i = 0; 777 u32 i = 0;
781 void *p = NULL; 778 void *p = NULL;
782 i2o_status_block *sb; 779 i2o_status_block *sb;
783 struct i2o_message __iomem *msg; 780 struct i2o_message *msg;
784 u32 m;
785 unsigned int iop; 781 unsigned int iop;
786 782
787 if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg)) 783 if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg))
@@ -793,7 +789,7 @@ static int i2o_cfg_passthru(unsigned long arg)
793 return -ENXIO; 789 return -ENXIO;
794 } 790 }
795 791
796 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 792 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
797 793
798 sb = c->status_block.virt; 794 sb = c->status_block.virt;
799 795
@@ -820,19 +816,15 @@ static int i2o_cfg_passthru(unsigned long arg)
820 reply_size >>= 16; 816 reply_size >>= 16;
821 reply_size <<= 2; 817 reply_size <<= 2;
822 818
823 reply = kmalloc(reply_size, GFP_KERNEL); 819 reply = kzalloc(reply_size, GFP_KERNEL);
824 if (!reply) { 820 if (!reply) {
825 printk(KERN_WARNING "%s: Could not allocate reply buffer\n", 821 printk(KERN_WARNING "%s: Could not allocate reply buffer\n",
826 c->name); 822 c->name);
827 return -ENOMEM; 823 return -ENOMEM;
828 } 824 }
829 memset(reply, 0, reply_size);
830 825
831 sg_offset = (msg->u.head[0] >> 4) & 0x0f; 826 sg_offset = (msg->u.head[0] >> 4) & 0x0f;
832 827
833 writel(i2o_config_driver.context, &msg->u.s.icntxt);
834 writel(i2o_cntxt_list_add(c, reply), &msg->u.s.tcntxt);
835
836 memset(sg_list, 0, sizeof(sg_list[0]) * SG_TABLESIZE); 828 memset(sg_list, 0, sizeof(sg_list[0]) * SG_TABLESIZE);
837 if (sg_offset) { 829 if (sg_offset) {
838 struct sg_simple_element *sg; 830 struct sg_simple_element *sg;
@@ -894,9 +886,11 @@ static int i2o_cfg_passthru(unsigned long arg)
894 } 886 }
895 } 887 }
896 888
897 rcode = i2o_msg_post_wait(c, m, 60); 889 rcode = i2o_msg_post_wait(c, msg, 60);
898 if (rcode) 890 if (rcode) {
891 reply[4] = ((u32) rcode) << 24;
899 goto sg_list_cleanup; 892 goto sg_list_cleanup;
893 }
900 894
901 if (sg_offset) { 895 if (sg_offset) {
902 u32 msg[128]; 896 u32 msg[128];
@@ -946,6 +940,7 @@ static int i2o_cfg_passthru(unsigned long arg)
946 } 940 }
947 } 941 }
948 942
943 sg_list_cleanup:
949 /* Copy back the reply to user space */ 944 /* Copy back the reply to user space */
950 if (reply_size) { 945 if (reply_size) {
951 // we wrote our own values for context - now restore the user supplied ones 946 // we wrote our own values for context - now restore the user supplied ones
@@ -962,7 +957,6 @@ static int i2o_cfg_passthru(unsigned long arg)
962 } 957 }
963 } 958 }
964 959
965 sg_list_cleanup:
966 for (i = 0; i < sg_index; i++) 960 for (i = 0; i < sg_index; i++)
967 kfree(sg_list[i]); 961 kfree(sg_list[i]);
968 962
diff --git a/drivers/message/i2o/i2o_lan.h b/drivers/message/i2o/i2o_lan.h
index 561d63304d7e..6502b817df58 100644
--- a/drivers/message/i2o/i2o_lan.h
+++ b/drivers/message/i2o/i2o_lan.h
@@ -103,14 +103,14 @@
103#define I2O_LAN_DSC_SUSPENDED 0x11 103#define I2O_LAN_DSC_SUSPENDED 0x11
104 104
105struct i2o_packet_info { 105struct i2o_packet_info {
106 u32 offset : 24; 106 u32 offset:24;
107 u32 flags : 8; 107 u32 flags:8;
108 u32 len : 24; 108 u32 len:24;
109 u32 status : 8; 109 u32 status:8;
110}; 110};
111 111
112struct i2o_bucket_descriptor { 112struct i2o_bucket_descriptor {
113 u32 context; /* FIXME: 64bit support */ 113 u32 context; /* FIXME: 64bit support */
114 struct i2o_packet_info packet_info[1]; 114 struct i2o_packet_info packet_info[1];
115}; 115};
116 116
@@ -127,14 +127,14 @@ struct i2o_lan_local {
127 u8 unit; 127 u8 unit;
128 struct i2o_device *i2o_dev; 128 struct i2o_device *i2o_dev;
129 129
130 struct fddi_statistics stats; /* see also struct net_device_stats */ 130 struct fddi_statistics stats; /* see also struct net_device_stats */
131 unsigned short (*type_trans)(struct sk_buff *, struct net_device *); 131 unsigned short (*type_trans) (struct sk_buff *, struct net_device *);
132 atomic_t buckets_out; /* nbr of unused buckets on DDM */ 132 atomic_t buckets_out; /* nbr of unused buckets on DDM */
133 atomic_t tx_out; /* outstanding TXes */ 133 atomic_t tx_out; /* outstanding TXes */
134 u8 tx_count; /* packets in one TX message frame */ 134 u8 tx_count; /* packets in one TX message frame */
135 u16 tx_max_out; /* DDM's Tx queue len */ 135 u16 tx_max_out; /* DDM's Tx queue len */
136 u8 sgl_max; /* max SGLs in one message frame */ 136 u8 sgl_max; /* max SGLs in one message frame */
137 u32 m; /* IOP address of the batch msg frame */ 137 u32 m; /* IOP address of the batch msg frame */
138 138
139 struct work_struct i2o_batch_send_task; 139 struct work_struct i2o_batch_send_task;
140 int send_active; 140 int send_active;
@@ -144,16 +144,16 @@ struct i2o_lan_local {
144 144
145 spinlock_t tx_lock; 145 spinlock_t tx_lock;
146 146
147 u32 max_size_mc_table; /* max number of multicast addresses */ 147 u32 max_size_mc_table; /* max number of multicast addresses */
148 148
149 /* LAN OSM configurable parameters are here: */ 149 /* LAN OSM configurable parameters are here: */
150 150
151 u16 max_buckets_out; /* max nbr of buckets to send to DDM */ 151 u16 max_buckets_out; /* max nbr of buckets to send to DDM */
152 u16 bucket_thresh; /* send more when this many used */ 152 u16 bucket_thresh; /* send more when this many used */
153 u16 rx_copybreak; 153 u16 rx_copybreak;
154 154
155 u8 tx_batch_mode; /* Set when using batch mode sends */ 155 u8 tx_batch_mode; /* Set when using batch mode sends */
156 u32 i2o_event_mask; /* To turn on interesting event flags */ 156 u32 i2o_event_mask; /* To turn on interesting event flags */
157}; 157};
158 158
159#endif /* _I2O_LAN_H */ 159#endif /* _I2O_LAN_H */
diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
index d559a1758363..2a0c42b8cda5 100644
--- a/drivers/message/i2o/i2o_proc.c
+++ b/drivers/message/i2o/i2o_proc.c
@@ -28,7 +28,7 @@
28 */ 28 */
29 29
30#define OSM_NAME "proc-osm" 30#define OSM_NAME "proc-osm"
31#define OSM_VERSION "1.145" 31#define OSM_VERSION "1.316"
32#define OSM_DESCRIPTION "I2O ProcFS OSM" 32#define OSM_DESCRIPTION "I2O ProcFS OSM"
33 33
34#define I2O_MAX_MODULES 4 34#define I2O_MAX_MODULES 4
diff --git a/drivers/message/i2o/i2o_scsi.c b/drivers/message/i2o/i2o_scsi.c
index 9f1744c3933b..f9e5a23697a1 100644
--- a/drivers/message/i2o/i2o_scsi.c
+++ b/drivers/message/i2o/i2o_scsi.c
@@ -70,7 +70,7 @@
70#include <scsi/sg_request.h> 70#include <scsi/sg_request.h>
71 71
72#define OSM_NAME "scsi-osm" 72#define OSM_NAME "scsi-osm"
73#define OSM_VERSION "1.282" 73#define OSM_VERSION "1.316"
74#define OSM_DESCRIPTION "I2O SCSI Peripheral OSM" 74#define OSM_DESCRIPTION "I2O SCSI Peripheral OSM"
75 75
76static struct i2o_driver i2o_scsi_driver; 76static struct i2o_driver i2o_scsi_driver;
@@ -113,7 +113,7 @@ static struct i2o_scsi_host *i2o_scsi_host_alloc(struct i2o_controller *c)
113 113
114 list_for_each_entry(i2o_dev, &c->devices, list) 114 list_for_each_entry(i2o_dev, &c->devices, list)
115 if (i2o_dev->lct_data.class_id == I2O_CLASS_BUS_ADAPTER) { 115 if (i2o_dev->lct_data.class_id == I2O_CLASS_BUS_ADAPTER) {
116 if (i2o_parm_field_get(i2o_dev, 0x0000, 0, &type, 1) 116 if (!i2o_parm_field_get(i2o_dev, 0x0000, 0, &type, 1)
117 && (type == 0x01)) /* SCSI bus */ 117 && (type == 0x01)) /* SCSI bus */
118 max_channel++; 118 max_channel++;
119 } 119 }
@@ -146,7 +146,7 @@ static struct i2o_scsi_host *i2o_scsi_host_alloc(struct i2o_controller *c)
146 i = 0; 146 i = 0;
147 list_for_each_entry(i2o_dev, &c->devices, list) 147 list_for_each_entry(i2o_dev, &c->devices, list)
148 if (i2o_dev->lct_data.class_id == I2O_CLASS_BUS_ADAPTER) { 148 if (i2o_dev->lct_data.class_id == I2O_CLASS_BUS_ADAPTER) {
149 if (i2o_parm_field_get(i2o_dev, 0x0000, 0, &type, 1) 149 if (!i2o_parm_field_get(i2o_dev, 0x0000, 0, &type, 1)
150 && (type == 0x01)) /* only SCSI bus */ 150 && (type == 0x01)) /* only SCSI bus */
151 i2o_shost->channel[i++] = i2o_dev; 151 i2o_shost->channel[i++] = i2o_dev;
152 152
@@ -238,13 +238,15 @@ static int i2o_scsi_probe(struct device *dev)
238 u8 type; 238 u8 type;
239 struct i2o_device *d = i2o_shost->channel[0]; 239 struct i2o_device *d = i2o_shost->channel[0];
240 240
241 if (i2o_parm_field_get(d, 0x0000, 0, &type, 1) 241 if (!i2o_parm_field_get(d, 0x0000, 0, &type, 1)
242 && (type == 0x01)) /* SCSI bus */ 242 && (type == 0x01)) /* SCSI bus */
243 if (i2o_parm_field_get(d, 0x0200, 4, &id, 4)) { 243 if (!i2o_parm_field_get(d, 0x0200, 4, &id, 4)) {
244 channel = 0; 244 channel = 0;
245 if (i2o_dev->lct_data.class_id == 245 if (i2o_dev->lct_data.class_id ==
246 I2O_CLASS_RANDOM_BLOCK_STORAGE) 246 I2O_CLASS_RANDOM_BLOCK_STORAGE)
247 lun = i2o_shost->lun++; 247 lun =
248 cpu_to_le64(i2o_shost->
249 lun++);
248 else 250 else
249 lun = 0; 251 lun = 0;
250 } 252 }
@@ -253,10 +255,10 @@ static int i2o_scsi_probe(struct device *dev)
253 break; 255 break;
254 256
255 case I2O_CLASS_SCSI_PERIPHERAL: 257 case I2O_CLASS_SCSI_PERIPHERAL:
256 if (i2o_parm_field_get(i2o_dev, 0x0000, 3, &id, 4) < 0) 258 if (i2o_parm_field_get(i2o_dev, 0x0000, 3, &id, 4))
257 return -EFAULT; 259 return -EFAULT;
258 260
259 if (i2o_parm_field_get(i2o_dev, 0x0000, 4, &lun, 8) < 0) 261 if (i2o_parm_field_get(i2o_dev, 0x0000, 4, &lun, 8))
260 return -EFAULT; 262 return -EFAULT;
261 263
262 parent = i2o_iop_find_device(c, i2o_dev->lct_data.parent_tid); 264 parent = i2o_iop_find_device(c, i2o_dev->lct_data.parent_tid);
@@ -281,20 +283,22 @@ static int i2o_scsi_probe(struct device *dev)
281 return -EFAULT; 283 return -EFAULT;
282 } 284 }
283 285
284 if (id >= scsi_host->max_id) { 286 if (le32_to_cpu(id) >= scsi_host->max_id) {
285 osm_warn("SCSI device id (%d) >= max_id of I2O host (%d)", id, 287 osm_warn("SCSI device id (%d) >= max_id of I2O host (%d)",
286 scsi_host->max_id); 288 le32_to_cpu(id), scsi_host->max_id);
287 return -EFAULT; 289 return -EFAULT;
288 } 290 }
289 291
290 if (lun >= scsi_host->max_lun) { 292 if (le64_to_cpu(lun) >= scsi_host->max_lun) {
291 osm_warn("SCSI device id (%d) >= max_lun of I2O host (%d)", 293 osm_warn("SCSI device lun (%lu) >= max_lun of I2O host (%d)",
292 (unsigned int)lun, scsi_host->max_lun); 294 (long unsigned int)le64_to_cpu(lun),
295 scsi_host->max_lun);
293 return -EFAULT; 296 return -EFAULT;
294 } 297 }
295 298
296 scsi_dev = 299 scsi_dev =
297 __scsi_add_device(i2o_shost->scsi_host, channel, id, lun, i2o_dev); 300 __scsi_add_device(i2o_shost->scsi_host, channel, le32_to_cpu(id),
301 le64_to_cpu(lun), i2o_dev);
298 302
299 if (IS_ERR(scsi_dev)) { 303 if (IS_ERR(scsi_dev)) {
300 osm_warn("can not add SCSI device %03x\n", 304 osm_warn("can not add SCSI device %03x\n",
@@ -305,8 +309,9 @@ static int i2o_scsi_probe(struct device *dev)
305 sysfs_create_link(&i2o_dev->device.kobj, &scsi_dev->sdev_gendev.kobj, 309 sysfs_create_link(&i2o_dev->device.kobj, &scsi_dev->sdev_gendev.kobj,
306 "scsi"); 310 "scsi");
307 311
308 osm_info("device added (TID: %03x) channel: %d, id: %d, lun: %d\n", 312 osm_info("device added (TID: %03x) channel: %d, id: %d, lun: %ld\n",
309 i2o_dev->lct_data.tid, channel, id, (unsigned int)lun); 313 i2o_dev->lct_data.tid, channel, le32_to_cpu(id),
314 (long unsigned int)le64_to_cpu(lun));
310 315
311 return 0; 316 return 0;
312}; 317};
@@ -510,8 +515,7 @@ static int i2o_scsi_queuecommand(struct scsi_cmnd *SCpnt,
510 struct i2o_controller *c; 515 struct i2o_controller *c;
511 struct i2o_device *i2o_dev; 516 struct i2o_device *i2o_dev;
512 int tid; 517 int tid;
513 struct i2o_message __iomem *msg; 518 struct i2o_message *msg;
514 u32 m;
515 /* 519 /*
516 * ENABLE_DISCONNECT 520 * ENABLE_DISCONNECT
517 * SIMPLE_TAG 521 * SIMPLE_TAG
@@ -519,7 +523,7 @@ static int i2o_scsi_queuecommand(struct scsi_cmnd *SCpnt,
519 */ 523 */
520 u32 scsi_flags = 0x20a00000; 524 u32 scsi_flags = 0x20a00000;
521 u32 sgl_offset; 525 u32 sgl_offset;
522 u32 __iomem *mptr; 526 u32 *mptr;
523 u32 cmd = I2O_CMD_SCSI_EXEC << 24; 527 u32 cmd = I2O_CMD_SCSI_EXEC << 24;
524 int rc = 0; 528 int rc = 0;
525 529
@@ -576,8 +580,8 @@ static int i2o_scsi_queuecommand(struct scsi_cmnd *SCpnt,
576 * throw it back to the scsi layer 580 * throw it back to the scsi layer
577 */ 581 */
578 582
579 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 583 msg = i2o_msg_get(c);
580 if (m == I2O_QUEUE_EMPTY) { 584 if (IS_ERR(msg)) {
581 rc = SCSI_MLQUEUE_HOST_BUSY; 585 rc = SCSI_MLQUEUE_HOST_BUSY;
582 goto exit; 586 goto exit;
583 } 587 }
@@ -617,16 +621,16 @@ static int i2o_scsi_queuecommand(struct scsi_cmnd *SCpnt,
617 if (sgl_offset == SGL_OFFSET_10) 621 if (sgl_offset == SGL_OFFSET_10)
618 sgl_offset = SGL_OFFSET_12; 622 sgl_offset = SGL_OFFSET_12;
619 cmd = I2O_CMD_PRIVATE << 24; 623 cmd = I2O_CMD_PRIVATE << 24;
620 writel(I2O_VENDOR_DPT << 16 | I2O_CMD_SCSI_EXEC, mptr++); 624 *mptr++ = cpu_to_le32(I2O_VENDOR_DPT << 16 | I2O_CMD_SCSI_EXEC);
621 writel(adpt_flags | tid, mptr++); 625 *mptr++ = cpu_to_le32(adpt_flags | tid);
622 } 626 }
623#endif 627#endif
624 628
625 writel(cmd | HOST_TID << 12 | tid, &msg->u.head[1]); 629 msg->u.head[1] = cpu_to_le32(cmd | HOST_TID << 12 | tid);
626 writel(i2o_scsi_driver.context, &msg->u.s.icntxt); 630 msg->u.s.icntxt = cpu_to_le32(i2o_scsi_driver.context);
627 631
628 /* We want the SCSI control block back */ 632 /* We want the SCSI control block back */
629 writel(i2o_cntxt_list_add(c, SCpnt), &msg->u.s.tcntxt); 633 msg->u.s.tcntxt = cpu_to_le32(i2o_cntxt_list_add(c, SCpnt));
630 634
631 /* LSI_920_PCI_QUIRK 635 /* LSI_920_PCI_QUIRK
632 * 636 *
@@ -649,15 +653,15 @@ static int i2o_scsi_queuecommand(struct scsi_cmnd *SCpnt,
649 } 653 }
650 */ 654 */
651 655
652 writel(scsi_flags | SCpnt->cmd_len, mptr++); 656 *mptr++ = cpu_to_le32(scsi_flags | SCpnt->cmd_len);
653 657
654 /* Write SCSI command into the message - always 16 byte block */ 658 /* Write SCSI command into the message - always 16 byte block */
655 memcpy_toio(mptr, SCpnt->cmnd, 16); 659 memcpy(mptr, SCpnt->cmnd, 16);
656 mptr += 4; 660 mptr += 4;
657 661
658 if (sgl_offset != SGL_OFFSET_0) { 662 if (sgl_offset != SGL_OFFSET_0) {
659 /* write size of data addressed by SGL */ 663 /* write size of data addressed by SGL */
660 writel(SCpnt->request_bufflen, mptr++); 664 *mptr++ = cpu_to_le32(SCpnt->request_bufflen);
661 665
662 /* Now fill in the SGList and command */ 666 /* Now fill in the SGList and command */
663 if (SCpnt->use_sg) { 667 if (SCpnt->use_sg) {
@@ -676,11 +680,11 @@ static int i2o_scsi_queuecommand(struct scsi_cmnd *SCpnt,
676 } 680 }
677 681
678 /* Stick the headers on */ 682 /* Stick the headers on */
679 writel(I2O_MESSAGE_SIZE(mptr - &msg->u.head[0]) | sgl_offset, 683 msg->u.head[0] =
680 &msg->u.head[0]); 684 cpu_to_le32(I2O_MESSAGE_SIZE(mptr - &msg->u.head[0]) | sgl_offset);
681 685
682 /* Queue the message */ 686 /* Queue the message */
683 i2o_msg_post(c, m); 687 i2o_msg_post(c, msg);
684 688
685 osm_debug("Issued %ld\n", SCpnt->serial_number); 689 osm_debug("Issued %ld\n", SCpnt->serial_number);
686 690
@@ -688,7 +692,7 @@ static int i2o_scsi_queuecommand(struct scsi_cmnd *SCpnt,
688 692
689 nomem: 693 nomem:
690 rc = -ENOMEM; 694 rc = -ENOMEM;
691 i2o_msg_nop(c, m); 695 i2o_msg_nop(c, msg);
692 696
693 exit: 697 exit:
694 return rc; 698 return rc;
@@ -709,8 +713,7 @@ static int i2o_scsi_abort(struct scsi_cmnd *SCpnt)
709{ 713{
710 struct i2o_device *i2o_dev; 714 struct i2o_device *i2o_dev;
711 struct i2o_controller *c; 715 struct i2o_controller *c;
712 struct i2o_message __iomem *msg; 716 struct i2o_message *msg;
713 u32 m;
714 int tid; 717 int tid;
715 int status = FAILED; 718 int status = FAILED;
716 719
@@ -720,16 +723,16 @@ static int i2o_scsi_abort(struct scsi_cmnd *SCpnt)
720 c = i2o_dev->iop; 723 c = i2o_dev->iop;
721 tid = i2o_dev->lct_data.tid; 724 tid = i2o_dev->lct_data.tid;
722 725
723 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 726 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
724 if (m == I2O_QUEUE_EMPTY) 727 if (IS_ERR(msg))
725 return SCSI_MLQUEUE_HOST_BUSY; 728 return SCSI_MLQUEUE_HOST_BUSY;
726 729
727 writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); 730 msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0);
728 writel(I2O_CMD_SCSI_ABORT << 24 | HOST_TID << 12 | tid, 731 msg->u.head[1] =
729 &msg->u.head[1]); 732 cpu_to_le32(I2O_CMD_SCSI_ABORT << 24 | HOST_TID << 12 | tid);
730 writel(i2o_cntxt_list_get_ptr(c, SCpnt), &msg->body[0]); 733 msg->body[0] = cpu_to_le32(i2o_cntxt_list_get_ptr(c, SCpnt));
731 734
732 if (i2o_msg_post_wait(c, m, I2O_TIMEOUT_SCSI_SCB_ABORT)) 735 if (i2o_msg_post_wait(c, msg, I2O_TIMEOUT_SCSI_SCB_ABORT))
733 status = SUCCESS; 736 status = SUCCESS;
734 737
735 return status; 738 return status;
diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
index 4eb53258842e..492167446936 100644
--- a/drivers/message/i2o/iop.c
+++ b/drivers/message/i2o/iop.c
@@ -32,7 +32,7 @@
32#include "core.h" 32#include "core.h"
33 33
34#define OSM_NAME "i2o" 34#define OSM_NAME "i2o"
35#define OSM_VERSION "1.288" 35#define OSM_VERSION "1.325"
36#define OSM_DESCRIPTION "I2O subsystem" 36#define OSM_DESCRIPTION "I2O subsystem"
37 37
38/* global I2O controller list */ 38/* global I2O controller list */
@@ -47,27 +47,6 @@ static struct i2o_dma i2o_systab;
47static int i2o_hrt_get(struct i2o_controller *c); 47static int i2o_hrt_get(struct i2o_controller *c);
48 48
49/** 49/**
50 * i2o_msg_nop - Returns a message which is not used
51 * @c: I2O controller from which the message was created
52 * @m: message which should be returned
53 *
54 * If you fetch a message via i2o_msg_get, and can't use it, you must
55 * return the message with this function. Otherwise the message frame
56 * is lost.
57 */
58void i2o_msg_nop(struct i2o_controller *c, u32 m)
59{
60 struct i2o_message __iomem *msg = i2o_msg_in_to_virt(c, m);
61
62 writel(THREE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
63 writel(I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | ADAPTER_TID,
64 &msg->u.head[1]);
65 writel(0, &msg->u.head[2]);
66 writel(0, &msg->u.head[3]);
67 i2o_msg_post(c, m);
68};
69
70/**
71 * i2o_msg_get_wait - obtain an I2O message from the IOP 50 * i2o_msg_get_wait - obtain an I2O message from the IOP
72 * @c: I2O controller 51 * @c: I2O controller
73 * @msg: pointer to a I2O message pointer 52 * @msg: pointer to a I2O message pointer
@@ -81,22 +60,21 @@ void i2o_msg_nop(struct i2o_controller *c, u32 m)
81 * address from the read port (see the i2o spec). If no message is 60 * address from the read port (see the i2o spec). If no message is
82 * available returns I2O_QUEUE_EMPTY and msg is leaved untouched. 61 * available returns I2O_QUEUE_EMPTY and msg is leaved untouched.
83 */ 62 */
84u32 i2o_msg_get_wait(struct i2o_controller *c, 63struct i2o_message *i2o_msg_get_wait(struct i2o_controller *c, int wait)
85 struct i2o_message __iomem ** msg, int wait)
86{ 64{
87 unsigned long timeout = jiffies + wait * HZ; 65 unsigned long timeout = jiffies + wait * HZ;
88 u32 m; 66 struct i2o_message *msg;
89 67
90 while ((m = i2o_msg_get(c, msg)) == I2O_QUEUE_EMPTY) { 68 while (IS_ERR(msg = i2o_msg_get(c))) {
91 if (time_after(jiffies, timeout)) { 69 if (time_after(jiffies, timeout)) {
92 osm_debug("%s: Timeout waiting for message frame.\n", 70 osm_debug("%s: Timeout waiting for message frame.\n",
93 c->name); 71 c->name);
94 return I2O_QUEUE_EMPTY; 72 return ERR_PTR(-ETIMEDOUT);
95 } 73 }
96 schedule_timeout_uninterruptible(1); 74 schedule_timeout_uninterruptible(1);
97 } 75 }
98 76
99 return m; 77 return msg;
100}; 78};
101 79
102#if BITS_PER_LONG == 64 80#if BITS_PER_LONG == 64
@@ -301,8 +279,7 @@ struct i2o_device *i2o_iop_find_device(struct i2o_controller *c, u16 tid)
301 */ 279 */
302static int i2o_iop_quiesce(struct i2o_controller *c) 280static int i2o_iop_quiesce(struct i2o_controller *c)
303{ 281{
304 struct i2o_message __iomem *msg; 282 struct i2o_message *msg;
305 u32 m;
306 i2o_status_block *sb = c->status_block.virt; 283 i2o_status_block *sb = c->status_block.virt;
307 int rc; 284 int rc;
308 285
@@ -313,16 +290,17 @@ static int i2o_iop_quiesce(struct i2o_controller *c)
313 (sb->iop_state != ADAPTER_STATE_OPERATIONAL)) 290 (sb->iop_state != ADAPTER_STATE_OPERATIONAL))
314 return 0; 291 return 0;
315 292
316 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 293 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
317 if (m == I2O_QUEUE_EMPTY) 294 if (IS_ERR(msg))
318 return -ETIMEDOUT; 295 return PTR_ERR(msg);
319 296
320 writel(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); 297 msg->u.head[0] = cpu_to_le32(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0);
321 writel(I2O_CMD_SYS_QUIESCE << 24 | HOST_TID << 12 | ADAPTER_TID, 298 msg->u.head[1] =
322 &msg->u.head[1]); 299 cpu_to_le32(I2O_CMD_SYS_QUIESCE << 24 | HOST_TID << 12 |
300 ADAPTER_TID);
323 301
324 /* Long timeout needed for quiesce if lots of devices */ 302 /* Long timeout needed for quiesce if lots of devices */
325 if ((rc = i2o_msg_post_wait(c, m, 240))) 303 if ((rc = i2o_msg_post_wait(c, msg, 240)))
326 osm_info("%s: Unable to quiesce (status=%#x).\n", c->name, -rc); 304 osm_info("%s: Unable to quiesce (status=%#x).\n", c->name, -rc);
327 else 305 else
328 osm_debug("%s: Quiesced.\n", c->name); 306 osm_debug("%s: Quiesced.\n", c->name);
@@ -342,8 +320,7 @@ static int i2o_iop_quiesce(struct i2o_controller *c)
342 */ 320 */
343static int i2o_iop_enable(struct i2o_controller *c) 321static int i2o_iop_enable(struct i2o_controller *c)
344{ 322{
345 struct i2o_message __iomem *msg; 323 struct i2o_message *msg;
346 u32 m;
347 i2o_status_block *sb = c->status_block.virt; 324 i2o_status_block *sb = c->status_block.virt;
348 int rc; 325 int rc;
349 326
@@ -353,16 +330,17 @@ static int i2o_iop_enable(struct i2o_controller *c)
353 if (sb->iop_state != ADAPTER_STATE_READY) 330 if (sb->iop_state != ADAPTER_STATE_READY)
354 return -EINVAL; 331 return -EINVAL;
355 332
356 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 333 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
357 if (m == I2O_QUEUE_EMPTY) 334 if (IS_ERR(msg))
358 return -ETIMEDOUT; 335 return PTR_ERR(msg);
359 336
360 writel(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); 337 msg->u.head[0] = cpu_to_le32(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0);
361 writel(I2O_CMD_SYS_ENABLE << 24 | HOST_TID << 12 | ADAPTER_TID, 338 msg->u.head[1] =
362 &msg->u.head[1]); 339 cpu_to_le32(I2O_CMD_SYS_ENABLE << 24 | HOST_TID << 12 |
340 ADAPTER_TID);
363 341
364 /* How long of a timeout do we need? */ 342 /* How long of a timeout do we need? */
365 if ((rc = i2o_msg_post_wait(c, m, 240))) 343 if ((rc = i2o_msg_post_wait(c, msg, 240)))
366 osm_err("%s: Could not enable (status=%#x).\n", c->name, -rc); 344 osm_err("%s: Could not enable (status=%#x).\n", c->name, -rc);
367 else 345 else
368 osm_debug("%s: Enabled.\n", c->name); 346 osm_debug("%s: Enabled.\n", c->name);
@@ -413,22 +391,22 @@ static inline void i2o_iop_enable_all(void)
413 */ 391 */
414static int i2o_iop_clear(struct i2o_controller *c) 392static int i2o_iop_clear(struct i2o_controller *c)
415{ 393{
416 struct i2o_message __iomem *msg; 394 struct i2o_message *msg;
417 u32 m;
418 int rc; 395 int rc;
419 396
420 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 397 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
421 if (m == I2O_QUEUE_EMPTY) 398 if (IS_ERR(msg))
422 return -ETIMEDOUT; 399 return PTR_ERR(msg);
423 400
424 /* Quiesce all IOPs first */ 401 /* Quiesce all IOPs first */
425 i2o_iop_quiesce_all(); 402 i2o_iop_quiesce_all();
426 403
427 writel(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); 404 msg->u.head[0] = cpu_to_le32(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0);
428 writel(I2O_CMD_ADAPTER_CLEAR << 24 | HOST_TID << 12 | ADAPTER_TID, 405 msg->u.head[1] =
429 &msg->u.head[1]); 406 cpu_to_le32(I2O_CMD_ADAPTER_CLEAR << 24 | HOST_TID << 12 |
407 ADAPTER_TID);
430 408
431 if ((rc = i2o_msg_post_wait(c, m, 30))) 409 if ((rc = i2o_msg_post_wait(c, msg, 30)))
432 osm_info("%s: Unable to clear (status=%#x).\n", c->name, -rc); 410 osm_info("%s: Unable to clear (status=%#x).\n", c->name, -rc);
433 else 411 else
434 osm_debug("%s: Cleared.\n", c->name); 412 osm_debug("%s: Cleared.\n", c->name);
@@ -446,13 +424,13 @@ static int i2o_iop_clear(struct i2o_controller *c)
446 * Clear and (re)initialize IOP's outbound queue and post the message 424 * Clear and (re)initialize IOP's outbound queue and post the message
447 * frames to the IOP. 425 * frames to the IOP.
448 * 426 *
449 * Returns 0 on success or a negative errno code on failure. 427 * Returns 0 on success or negative error code on failure.
450 */ 428 */
451static int i2o_iop_init_outbound_queue(struct i2o_controller *c) 429static int i2o_iop_init_outbound_queue(struct i2o_controller *c)
452{ 430{
453 volatile u8 *status = c->status.virt;
454 u32 m; 431 u32 m;
455 struct i2o_message __iomem *msg; 432 volatile u8 *status = c->status.virt;
433 struct i2o_message *msg;
456 ulong timeout; 434 ulong timeout;
457 int i; 435 int i;
458 436
@@ -460,23 +438,24 @@ static int i2o_iop_init_outbound_queue(struct i2o_controller *c)
460 438
461 memset(c->status.virt, 0, 4); 439 memset(c->status.virt, 0, 4);
462 440
463 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 441 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
464 if (m == I2O_QUEUE_EMPTY) 442 if (IS_ERR(msg))
465 return -ETIMEDOUT; 443 return PTR_ERR(msg);
466 444
467 writel(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_6, &msg->u.head[0]); 445 msg->u.head[0] = cpu_to_le32(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_6);
468 writel(I2O_CMD_OUTBOUND_INIT << 24 | HOST_TID << 12 | ADAPTER_TID, 446 msg->u.head[1] =
469 &msg->u.head[1]); 447 cpu_to_le32(I2O_CMD_OUTBOUND_INIT << 24 | HOST_TID << 12 |
470 writel(i2o_exec_driver.context, &msg->u.s.icntxt); 448 ADAPTER_TID);
471 writel(0x00000000, &msg->u.s.tcntxt); 449 msg->u.s.icntxt = cpu_to_le32(i2o_exec_driver.context);
472 writel(PAGE_SIZE, &msg->body[0]); 450 msg->u.s.tcntxt = cpu_to_le32(0x00000000);
451 msg->body[0] = cpu_to_le32(PAGE_SIZE);
473 /* Outbound msg frame size in words and Initcode */ 452 /* Outbound msg frame size in words and Initcode */
474 writel(I2O_OUTBOUND_MSG_FRAME_SIZE << 16 | 0x80, &msg->body[1]); 453 msg->body[1] = cpu_to_le32(I2O_OUTBOUND_MSG_FRAME_SIZE << 16 | 0x80);
475 writel(0xd0000004, &msg->body[2]); 454 msg->body[2] = cpu_to_le32(0xd0000004);
476 writel(i2o_dma_low(c->status.phys), &msg->body[3]); 455 msg->body[3] = cpu_to_le32(i2o_dma_low(c->status.phys));
477 writel(i2o_dma_high(c->status.phys), &msg->body[4]); 456 msg->body[4] = cpu_to_le32(i2o_dma_high(c->status.phys));
478 457
479 i2o_msg_post(c, m); 458 i2o_msg_post(c, msg);
480 459
481 timeout = jiffies + I2O_TIMEOUT_INIT_OUTBOUND_QUEUE * HZ; 460 timeout = jiffies + I2O_TIMEOUT_INIT_OUTBOUND_QUEUE * HZ;
482 while (*status <= I2O_CMD_IN_PROGRESS) { 461 while (*status <= I2O_CMD_IN_PROGRESS) {
@@ -511,34 +490,34 @@ static int i2o_iop_init_outbound_queue(struct i2o_controller *c)
511static int i2o_iop_reset(struct i2o_controller *c) 490static int i2o_iop_reset(struct i2o_controller *c)
512{ 491{
513 volatile u8 *status = c->status.virt; 492 volatile u8 *status = c->status.virt;
514 struct i2o_message __iomem *msg; 493 struct i2o_message *msg;
515 u32 m;
516 unsigned long timeout; 494 unsigned long timeout;
517 i2o_status_block *sb = c->status_block.virt; 495 i2o_status_block *sb = c->status_block.virt;
518 int rc = 0; 496 int rc = 0;
519 497
520 osm_debug("%s: Resetting controller\n", c->name); 498 osm_debug("%s: Resetting controller\n", c->name);
521 499
522 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 500 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
523 if (m == I2O_QUEUE_EMPTY) 501 if (IS_ERR(msg))
524 return -ETIMEDOUT; 502 return PTR_ERR(msg);
525 503
526 memset(c->status_block.virt, 0, 8); 504 memset(c->status_block.virt, 0, 8);
527 505
528 /* Quiesce all IOPs first */ 506 /* Quiesce all IOPs first */
529 i2o_iop_quiesce_all(); 507 i2o_iop_quiesce_all();
530 508
531 writel(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); 509 msg->u.head[0] = cpu_to_le32(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_0);
532 writel(I2O_CMD_ADAPTER_RESET << 24 | HOST_TID << 12 | ADAPTER_TID, 510 msg->u.head[1] =
533 &msg->u.head[1]); 511 cpu_to_le32(I2O_CMD_ADAPTER_RESET << 24 | HOST_TID << 12 |
534 writel(i2o_exec_driver.context, &msg->u.s.icntxt); 512 ADAPTER_TID);
535 writel(0, &msg->u.s.tcntxt); //FIXME: use reasonable transaction context 513 msg->u.s.icntxt = cpu_to_le32(i2o_exec_driver.context);
536 writel(0, &msg->body[0]); 514 msg->u.s.tcntxt = cpu_to_le32(0x00000000);
537 writel(0, &msg->body[1]); 515 msg->body[0] = cpu_to_le32(0x00000000);
538 writel(i2o_dma_low(c->status.phys), &msg->body[2]); 516 msg->body[1] = cpu_to_le32(0x00000000);
539 writel(i2o_dma_high(c->status.phys), &msg->body[3]); 517 msg->body[2] = cpu_to_le32(i2o_dma_low(c->status.phys));
518 msg->body[3] = cpu_to_le32(i2o_dma_high(c->status.phys));
540 519
541 i2o_msg_post(c, m); 520 i2o_msg_post(c, msg);
542 521
543 /* Wait for a reply */ 522 /* Wait for a reply */
544 timeout = jiffies + I2O_TIMEOUT_RESET * HZ; 523 timeout = jiffies + I2O_TIMEOUT_RESET * HZ;
@@ -567,18 +546,15 @@ static int i2o_iop_reset(struct i2o_controller *c)
567 osm_debug("%s: Reset in progress, waiting for reboot...\n", 546 osm_debug("%s: Reset in progress, waiting for reboot...\n",
568 c->name); 547 c->name);
569 548
570 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_RESET); 549 while (IS_ERR(msg = i2o_msg_get_wait(c, I2O_TIMEOUT_RESET))) {
571 while (m == I2O_QUEUE_EMPTY) {
572 if (time_after(jiffies, timeout)) { 550 if (time_after(jiffies, timeout)) {
573 osm_err("%s: IOP reset timeout.\n", c->name); 551 osm_err("%s: IOP reset timeout.\n", c->name);
574 rc = -ETIMEDOUT; 552 rc = PTR_ERR(msg);
575 goto exit; 553 goto exit;
576 } 554 }
577 schedule_timeout_uninterruptible(1); 555 schedule_timeout_uninterruptible(1);
578
579 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_RESET);
580 } 556 }
581 i2o_msg_nop(c, m); 557 i2o_msg_nop(c, msg);
582 558
583 /* from here all quiesce commands are safe */ 559 /* from here all quiesce commands are safe */
584 c->no_quiesce = 0; 560 c->no_quiesce = 0;
@@ -686,8 +662,7 @@ static int i2o_iop_activate(struct i2o_controller *c)
686 */ 662 */
687static int i2o_iop_systab_set(struct i2o_controller *c) 663static int i2o_iop_systab_set(struct i2o_controller *c)
688{ 664{
689 struct i2o_message __iomem *msg; 665 struct i2o_message *msg;
690 u32 m;
691 i2o_status_block *sb = c->status_block.virt; 666 i2o_status_block *sb = c->status_block.virt;
692 struct device *dev = &c->pdev->dev; 667 struct device *dev = &c->pdev->dev;
693 struct resource *root; 668 struct resource *root;
@@ -735,41 +710,38 @@ static int i2o_iop_systab_set(struct i2o_controller *c)
735 } 710 }
736 } 711 }
737 712
738 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 713 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
739 if (m == I2O_QUEUE_EMPTY) 714 if (IS_ERR(msg))
740 return -ETIMEDOUT; 715 return PTR_ERR(msg);
741 716
742 i2o_systab.phys = dma_map_single(dev, i2o_systab.virt, i2o_systab.len, 717 i2o_systab.phys = dma_map_single(dev, i2o_systab.virt, i2o_systab.len,
743 PCI_DMA_TODEVICE); 718 PCI_DMA_TODEVICE);
744 if (!i2o_systab.phys) { 719 if (!i2o_systab.phys) {
745 i2o_msg_nop(c, m); 720 i2o_msg_nop(c, msg);
746 return -ENOMEM; 721 return -ENOMEM;
747 } 722 }
748 723
749 writel(I2O_MESSAGE_SIZE(12) | SGL_OFFSET_6, &msg->u.head[0]); 724 msg->u.head[0] = cpu_to_le32(I2O_MESSAGE_SIZE(12) | SGL_OFFSET_6);
750 writel(I2O_CMD_SYS_TAB_SET << 24 | HOST_TID << 12 | ADAPTER_TID, 725 msg->u.head[1] =
751 &msg->u.head[1]); 726 cpu_to_le32(I2O_CMD_SYS_TAB_SET << 24 | HOST_TID << 12 |
727 ADAPTER_TID);
752 728
753 /* 729 /*
754 * Provide three SGL-elements: 730 * Provide three SGL-elements:
755 * System table (SysTab), Private memory space declaration and 731 * System table (SysTab), Private memory space declaration and
756 * Private i/o space declaration 732 * Private i/o space declaration
757 *
758 * FIXME: is this still true?
759 * Nasty one here. We can't use dma_alloc_coherent to send the
760 * same table to everyone. We have to go remap it for them all
761 */ 733 */
762 734
763 writel(c->unit + 2, &msg->body[0]); 735 msg->body[0] = cpu_to_le32(c->unit + 2);
764 writel(0, &msg->body[1]); 736 msg->body[1] = cpu_to_le32(0x00000000);
765 writel(0x54000000 | i2o_systab.len, &msg->body[2]); 737 msg->body[2] = cpu_to_le32(0x54000000 | i2o_systab.len);
766 writel(i2o_systab.phys, &msg->body[3]); 738 msg->body[3] = cpu_to_le32(i2o_systab.phys);
767 writel(0x54000000 | sb->current_mem_size, &msg->body[4]); 739 msg->body[4] = cpu_to_le32(0x54000000 | sb->current_mem_size);
768 writel(sb->current_mem_base, &msg->body[5]); 740 msg->body[5] = cpu_to_le32(sb->current_mem_base);
769 writel(0xd4000000 | sb->current_io_size, &msg->body[6]); 741 msg->body[6] = cpu_to_le32(0xd4000000 | sb->current_io_size);
770 writel(sb->current_io_base, &msg->body[6]); 742 msg->body[6] = cpu_to_le32(sb->current_io_base);
771 743
772 rc = i2o_msg_post_wait(c, m, 120); 744 rc = i2o_msg_post_wait(c, msg, 120);
773 745
774 dma_unmap_single(dev, i2o_systab.phys, i2o_systab.len, 746 dma_unmap_single(dev, i2o_systab.phys, i2o_systab.len,
775 PCI_DMA_TODEVICE); 747 PCI_DMA_TODEVICE);
@@ -780,8 +752,6 @@ static int i2o_iop_systab_set(struct i2o_controller *c)
780 else 752 else
781 osm_debug("%s: SysTab set.\n", c->name); 753 osm_debug("%s: SysTab set.\n", c->name);
782 754
783 i2o_status_get(c); // Entered READY state
784
785 return rc; 755 return rc;
786} 756}
787 757
@@ -791,7 +761,7 @@ static int i2o_iop_systab_set(struct i2o_controller *c)
791 * 761 *
792 * Send the system table and enable the I2O controller. 762 * Send the system table and enable the I2O controller.
793 * 763 *
794 * Returns 0 on success or negativer error code on failure. 764 * Returns 0 on success or negative error code on failure.
795 */ 765 */
796static int i2o_iop_online(struct i2o_controller *c) 766static int i2o_iop_online(struct i2o_controller *c)
797{ 767{
@@ -830,7 +800,6 @@ void i2o_iop_remove(struct i2o_controller *c)
830 list_for_each_entry_safe(dev, tmp, &c->devices, list) 800 list_for_each_entry_safe(dev, tmp, &c->devices, list)
831 i2o_device_remove(dev); 801 i2o_device_remove(dev);
832 802
833 class_device_unregister(c->classdev);
834 device_del(&c->device); 803 device_del(&c->device);
835 804
836 /* Ask the IOP to switch to RESET state */ 805 /* Ask the IOP to switch to RESET state */
@@ -869,12 +838,11 @@ static int i2o_systab_build(void)
869 i2o_systab.len = sizeof(struct i2o_sys_tbl) + num_controllers * 838 i2o_systab.len = sizeof(struct i2o_sys_tbl) + num_controllers *
870 sizeof(struct i2o_sys_tbl_entry); 839 sizeof(struct i2o_sys_tbl_entry);
871 840
872 systab = i2o_systab.virt = kmalloc(i2o_systab.len, GFP_KERNEL); 841 systab = i2o_systab.virt = kzalloc(i2o_systab.len, GFP_KERNEL);
873 if (!systab) { 842 if (!systab) {
874 osm_err("unable to allocate memory for System Table\n"); 843 osm_err("unable to allocate memory for System Table\n");
875 return -ENOMEM; 844 return -ENOMEM;
876 } 845 }
877 memset(systab, 0, i2o_systab.len);
878 846
879 systab->version = I2OVERSION; 847 systab->version = I2OVERSION;
880 systab->change_ind = change_ind + 1; 848 systab->change_ind = change_ind + 1;
@@ -952,30 +920,30 @@ static int i2o_parse_hrt(struct i2o_controller *c)
952 */ 920 */
953int i2o_status_get(struct i2o_controller *c) 921int i2o_status_get(struct i2o_controller *c)
954{ 922{
955 struct i2o_message __iomem *msg; 923 struct i2o_message *msg;
956 u32 m;
957 volatile u8 *status_block; 924 volatile u8 *status_block;
958 unsigned long timeout; 925 unsigned long timeout;
959 926
960 status_block = (u8 *) c->status_block.virt; 927 status_block = (u8 *) c->status_block.virt;
961 memset(c->status_block.virt, 0, sizeof(i2o_status_block)); 928 memset(c->status_block.virt, 0, sizeof(i2o_status_block));
962 929
963 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 930 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
964 if (m == I2O_QUEUE_EMPTY) 931 if (IS_ERR(msg))
965 return -ETIMEDOUT; 932 return PTR_ERR(msg);
966 933
967 writel(NINE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); 934 msg->u.head[0] = cpu_to_le32(NINE_WORD_MSG_SIZE | SGL_OFFSET_0);
968 writel(I2O_CMD_STATUS_GET << 24 | HOST_TID << 12 | ADAPTER_TID, 935 msg->u.head[1] =
969 &msg->u.head[1]); 936 cpu_to_le32(I2O_CMD_STATUS_GET << 24 | HOST_TID << 12 |
970 writel(i2o_exec_driver.context, &msg->u.s.icntxt); 937 ADAPTER_TID);
971 writel(0, &msg->u.s.tcntxt); // FIXME: use resonable transaction context 938 msg->u.s.icntxt = cpu_to_le32(i2o_exec_driver.context);
972 writel(0, &msg->body[0]); 939 msg->u.s.tcntxt = cpu_to_le32(0x00000000);
973 writel(0, &msg->body[1]); 940 msg->body[0] = cpu_to_le32(0x00000000);
974 writel(i2o_dma_low(c->status_block.phys), &msg->body[2]); 941 msg->body[1] = cpu_to_le32(0x00000000);
975 writel(i2o_dma_high(c->status_block.phys), &msg->body[3]); 942 msg->body[2] = cpu_to_le32(i2o_dma_low(c->status_block.phys));
976 writel(sizeof(i2o_status_block), &msg->body[4]); /* always 88 bytes */ 943 msg->body[3] = cpu_to_le32(i2o_dma_high(c->status_block.phys));
944 msg->body[4] = cpu_to_le32(sizeof(i2o_status_block)); /* always 88 bytes */
977 945
978 i2o_msg_post(c, m); 946 i2o_msg_post(c, msg);
979 947
980 /* Wait for a reply */ 948 /* Wait for a reply */
981 timeout = jiffies + I2O_TIMEOUT_STATUS_GET * HZ; 949 timeout = jiffies + I2O_TIMEOUT_STATUS_GET * HZ;
@@ -1002,7 +970,7 @@ int i2o_status_get(struct i2o_controller *c)
1002 * The HRT contains information about possible hidden devices but is 970 * The HRT contains information about possible hidden devices but is
1003 * mostly useless to us. 971 * mostly useless to us.
1004 * 972 *
1005 * Returns 0 on success or negativer error code on failure. 973 * Returns 0 on success or negative error code on failure.
1006 */ 974 */
1007static int i2o_hrt_get(struct i2o_controller *c) 975static int i2o_hrt_get(struct i2o_controller *c)
1008{ 976{
@@ -1013,20 +981,20 @@ static int i2o_hrt_get(struct i2o_controller *c)
1013 struct device *dev = &c->pdev->dev; 981 struct device *dev = &c->pdev->dev;
1014 982
1015 for (i = 0; i < I2O_HRT_GET_TRIES; i++) { 983 for (i = 0; i < I2O_HRT_GET_TRIES; i++) {
1016 struct i2o_message __iomem *msg; 984 struct i2o_message *msg;
1017 u32 m;
1018 985
1019 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 986 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
1020 if (m == I2O_QUEUE_EMPTY) 987 if (IS_ERR(msg))
1021 return -ETIMEDOUT; 988 return PTR_ERR(msg);
1022 989
1023 writel(SIX_WORD_MSG_SIZE | SGL_OFFSET_4, &msg->u.head[0]); 990 msg->u.head[0] = cpu_to_le32(SIX_WORD_MSG_SIZE | SGL_OFFSET_4);
1024 writel(I2O_CMD_HRT_GET << 24 | HOST_TID << 12 | ADAPTER_TID, 991 msg->u.head[1] =
1025 &msg->u.head[1]); 992 cpu_to_le32(I2O_CMD_HRT_GET << 24 | HOST_TID << 12 |
1026 writel(0xd0000000 | c->hrt.len, &msg->body[0]); 993 ADAPTER_TID);
1027 writel(c->hrt.phys, &msg->body[1]); 994 msg->body[0] = cpu_to_le32(0xd0000000 | c->hrt.len);
995 msg->body[1] = cpu_to_le32(c->hrt.phys);
1028 996
1029 rc = i2o_msg_post_wait_mem(c, m, 20, &c->hrt); 997 rc = i2o_msg_post_wait_mem(c, msg, 20, &c->hrt);
1030 998
1031 if (rc < 0) { 999 if (rc < 0) {
1032 osm_err("%s: Unable to get HRT (status=%#x)\n", c->name, 1000 osm_err("%s: Unable to get HRT (status=%#x)\n", c->name,
@@ -1051,15 +1019,6 @@ static int i2o_hrt_get(struct i2o_controller *c)
1051} 1019}
1052 1020
1053/** 1021/**
1054 * i2o_iop_free - Free the i2o_controller struct
1055 * @c: I2O controller to free
1056 */
1057void i2o_iop_free(struct i2o_controller *c)
1058{
1059 kfree(c);
1060};
1061
1062/**
1063 * i2o_iop_release - release the memory for a I2O controller 1022 * i2o_iop_release - release the memory for a I2O controller
1064 * @dev: I2O controller which should be released 1023 * @dev: I2O controller which should be released
1065 * 1024 *
@@ -1073,14 +1032,11 @@ static void i2o_iop_release(struct device *dev)
1073 i2o_iop_free(c); 1032 i2o_iop_free(c);
1074}; 1033};
1075 1034
1076/* I2O controller class */
1077static struct class *i2o_controller_class;
1078
1079/** 1035/**
1080 * i2o_iop_alloc - Allocate and initialize a i2o_controller struct 1036 * i2o_iop_alloc - Allocate and initialize a i2o_controller struct
1081 * 1037 *
1082 * Allocate the necessary memory for a i2o_controller struct and 1038 * Allocate the necessary memory for a i2o_controller struct and
1083 * initialize the lists. 1039 * initialize the lists and message mempool.
1084 * 1040 *
1085 * Returns a pointer to the I2O controller or a negative error code on 1041 * Returns a pointer to the I2O controller or a negative error code on
1086 * failure. 1042 * failure.
@@ -1089,20 +1045,29 @@ struct i2o_controller *i2o_iop_alloc(void)
1089{ 1045{
1090 static int unit = 0; /* 0 and 1 are NULL IOP and Local Host */ 1046 static int unit = 0; /* 0 and 1 are NULL IOP and Local Host */
1091 struct i2o_controller *c; 1047 struct i2o_controller *c;
1048 char poolname[32];
1092 1049
1093 c = kmalloc(sizeof(*c), GFP_KERNEL); 1050 c = kzalloc(sizeof(*c), GFP_KERNEL);
1094 if (!c) { 1051 if (!c) {
1095 osm_err("i2o: Insufficient memory to allocate a I2O controller." 1052 osm_err("i2o: Insufficient memory to allocate a I2O controller."
1096 "\n"); 1053 "\n");
1097 return ERR_PTR(-ENOMEM); 1054 return ERR_PTR(-ENOMEM);
1098 } 1055 }
1099 memset(c, 0, sizeof(*c)); 1056
1057 c->unit = unit++;
1058 sprintf(c->name, "iop%d", c->unit);
1059
1060 snprintf(poolname, sizeof(poolname), "i2o_%s_msg_inpool", c->name);
1061 if (i2o_pool_alloc
1062 (&c->in_msg, poolname, I2O_INBOUND_MSG_FRAME_SIZE * 4,
1063 I2O_MSG_INPOOL_MIN)) {
1064 kfree(c);
1065 return ERR_PTR(-ENOMEM);
1066 };
1100 1067
1101 INIT_LIST_HEAD(&c->devices); 1068 INIT_LIST_HEAD(&c->devices);
1102 spin_lock_init(&c->lock); 1069 spin_lock_init(&c->lock);
1103 init_MUTEX(&c->lct_lock); 1070 init_MUTEX(&c->lct_lock);
1104 c->unit = unit++;
1105 sprintf(c->name, "iop%d", c->unit);
1106 1071
1107 device_initialize(&c->device); 1072 device_initialize(&c->device);
1108 1073
@@ -1137,36 +1102,29 @@ int i2o_iop_add(struct i2o_controller *c)
1137 goto iop_reset; 1102 goto iop_reset;
1138 } 1103 }
1139 1104
1140 c->classdev = class_device_create(i2o_controller_class, NULL, MKDEV(0,0),
1141 &c->device, "iop%d", c->unit);
1142 if (IS_ERR(c->classdev)) {
1143 osm_err("%s: could not add controller class\n", c->name);
1144 goto device_del;
1145 }
1146
1147 osm_info("%s: Activating I2O controller...\n", c->name); 1105 osm_info("%s: Activating I2O controller...\n", c->name);
1148 osm_info("%s: This may take a few minutes if there are many devices\n", 1106 osm_info("%s: This may take a few minutes if there are many devices\n",
1149 c->name); 1107 c->name);
1150 1108
1151 if ((rc = i2o_iop_activate(c))) { 1109 if ((rc = i2o_iop_activate(c))) {
1152 osm_err("%s: could not activate controller\n", c->name); 1110 osm_err("%s: could not activate controller\n", c->name);
1153 goto class_del; 1111 goto device_del;
1154 } 1112 }
1155 1113
1156 osm_debug("%s: building sys table...\n", c->name); 1114 osm_debug("%s: building sys table...\n", c->name);
1157 1115
1158 if ((rc = i2o_systab_build())) 1116 if ((rc = i2o_systab_build()))
1159 goto class_del; 1117 goto device_del;
1160 1118
1161 osm_debug("%s: online controller...\n", c->name); 1119 osm_debug("%s: online controller...\n", c->name);
1162 1120
1163 if ((rc = i2o_iop_online(c))) 1121 if ((rc = i2o_iop_online(c)))
1164 goto class_del; 1122 goto device_del;
1165 1123
1166 osm_debug("%s: getting LCT...\n", c->name); 1124 osm_debug("%s: getting LCT...\n", c->name);
1167 1125
1168 if ((rc = i2o_exec_lct_get(c))) 1126 if ((rc = i2o_exec_lct_get(c)))
1169 goto class_del; 1127 goto device_del;
1170 1128
1171 list_add(&c->list, &i2o_controllers); 1129 list_add(&c->list, &i2o_controllers);
1172 1130
@@ -1176,9 +1134,6 @@ int i2o_iop_add(struct i2o_controller *c)
1176 1134
1177 return 0; 1135 return 0;
1178 1136
1179 class_del:
1180 class_device_unregister(c->classdev);
1181
1182 device_del: 1137 device_del:
1183 device_del(&c->device); 1138 device_del(&c->device);
1184 1139
@@ -1199,28 +1154,27 @@ int i2o_iop_add(struct i2o_controller *c)
1199 * is waited for, or expected. If you do not want further notifications, 1154 * is waited for, or expected. If you do not want further notifications,
1200 * call the i2o_event_register again with a evt_mask of 0. 1155 * call the i2o_event_register again with a evt_mask of 0.
1201 * 1156 *
1202 * Returns 0 on success or -ETIMEDOUT if no message could be fetched for 1157 * Returns 0 on success or negative error code on failure.
1203 * sending the request.
1204 */ 1158 */
1205int i2o_event_register(struct i2o_device *dev, struct i2o_driver *drv, 1159int i2o_event_register(struct i2o_device *dev, struct i2o_driver *drv,
1206 int tcntxt, u32 evt_mask) 1160 int tcntxt, u32 evt_mask)
1207{ 1161{
1208 struct i2o_controller *c = dev->iop; 1162 struct i2o_controller *c = dev->iop;
1209 struct i2o_message __iomem *msg; 1163 struct i2o_message *msg;
1210 u32 m;
1211 1164
1212 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 1165 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
1213 if (m == I2O_QUEUE_EMPTY) 1166 if (IS_ERR(msg))
1214 return -ETIMEDOUT; 1167 return PTR_ERR(msg);
1215 1168
1216 writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); 1169 msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0);
1217 writel(I2O_CMD_UTIL_EVT_REGISTER << 24 | HOST_TID << 12 | dev->lct_data. 1170 msg->u.head[1] =
1218 tid, &msg->u.head[1]); 1171 cpu_to_le32(I2O_CMD_UTIL_EVT_REGISTER << 24 | HOST_TID << 12 | dev->
1219 writel(drv->context, &msg->u.s.icntxt); 1172 lct_data.tid);
1220 writel(tcntxt, &msg->u.s.tcntxt); 1173 msg->u.s.icntxt = cpu_to_le32(drv->context);
1221 writel(evt_mask, &msg->body[0]); 1174 msg->u.s.tcntxt = cpu_to_le32(tcntxt);
1175 msg->body[0] = cpu_to_le32(evt_mask);
1222 1176
1223 i2o_msg_post(c, m); 1177 i2o_msg_post(c, msg);
1224 1178
1225 return 0; 1179 return 0;
1226}; 1180};
@@ -1239,14 +1193,8 @@ static int __init i2o_iop_init(void)
1239 1193
1240 printk(KERN_INFO OSM_DESCRIPTION " v" OSM_VERSION "\n"); 1194 printk(KERN_INFO OSM_DESCRIPTION " v" OSM_VERSION "\n");
1241 1195
1242 i2o_controller_class = class_create(THIS_MODULE, "i2o_controller");
1243 if (IS_ERR(i2o_controller_class)) {
1244 osm_err("can't register class i2o_controller\n");
1245 goto exit;
1246 }
1247
1248 if ((rc = i2o_driver_init())) 1196 if ((rc = i2o_driver_init()))
1249 goto class_exit; 1197 goto exit;
1250 1198
1251 if ((rc = i2o_exec_init())) 1199 if ((rc = i2o_exec_init()))
1252 goto driver_exit; 1200 goto driver_exit;
@@ -1262,9 +1210,6 @@ static int __init i2o_iop_init(void)
1262 driver_exit: 1210 driver_exit:
1263 i2o_driver_exit(); 1211 i2o_driver_exit();
1264 1212
1265 class_exit:
1266 class_destroy(i2o_controller_class);
1267
1268 exit: 1213 exit:
1269 return rc; 1214 return rc;
1270} 1215}
@@ -1279,7 +1224,6 @@ static void __exit i2o_iop_exit(void)
1279 i2o_pci_exit(); 1224 i2o_pci_exit();
1280 i2o_exec_exit(); 1225 i2o_exec_exit();
1281 i2o_driver_exit(); 1226 i2o_driver_exit();
1282 class_destroy(i2o_controller_class);
1283}; 1227};
1284 1228
1285module_init(i2o_iop_init); 1229module_init(i2o_iop_init);
diff --git a/drivers/message/i2o/pci.c b/drivers/message/i2o/pci.c
index ee7075fa1ec3..c5b656cdea7c 100644
--- a/drivers/message/i2o/pci.c
+++ b/drivers/message/i2o/pci.c
@@ -339,7 +339,7 @@ static int __devinit i2o_pci_probe(struct pci_dev *pdev,
339 pci_name(pdev)); 339 pci_name(pdev));
340 340
341 c->pdev = pdev; 341 c->pdev = pdev;
342 c->device.parent = get_device(&pdev->dev); 342 c->device.parent = &pdev->dev;
343 343
344 /* Cards that fall apart if you hit them with large I/O loads... */ 344 /* Cards that fall apart if you hit them with large I/O loads... */
345 if (pdev->vendor == PCI_VENDOR_ID_NCR && pdev->device == 0x0630) { 345 if (pdev->vendor == PCI_VENDOR_ID_NCR && pdev->device == 0x0630) {
@@ -410,8 +410,6 @@ static int __devinit i2o_pci_probe(struct pci_dev *pdev,
410 if ((rc = i2o_iop_add(c))) 410 if ((rc = i2o_iop_add(c)))
411 goto uninstall; 411 goto uninstall;
412 412
413 get_device(&c->device);
414
415 if (i960) 413 if (i960)
416 pci_write_config_word(i960, 0x42, 0x03ff); 414 pci_write_config_word(i960, 0x42, 0x03ff);
417 415
@@ -424,7 +422,6 @@ static int __devinit i2o_pci_probe(struct pci_dev *pdev,
424 i2o_pci_free(c); 422 i2o_pci_free(c);
425 423
426 free_controller: 424 free_controller:
427 put_device(c->device.parent);
428 i2o_iop_free(c); 425 i2o_iop_free(c);
429 426
430 disable: 427 disable:
@@ -454,7 +451,6 @@ static void __devexit i2o_pci_remove(struct pci_dev *pdev)
454 451
455 printk(KERN_INFO "%s: Controller removed.\n", c->name); 452 printk(KERN_INFO "%s: Controller removed.\n", c->name);
456 453
457 put_device(c->device.parent);
458 put_device(&c->device); 454 put_device(&c->device);
459}; 455};
460 456
@@ -483,4 +479,5 @@ void __exit i2o_pci_exit(void)
483{ 479{
484 pci_unregister_driver(&i2o_pci_driver); 480 pci_unregister_driver(&i2o_pci_driver);
485}; 481};
482
486MODULE_DEVICE_TABLE(pci, i2o_pci_ids); 483MODULE_DEVICE_TABLE(pci, i2o_pci_ids);
diff --git a/drivers/mmc/mmc_block.c b/drivers/mmc/mmc_block.c
index abcf19116d70..8e380c14bf65 100644
--- a/drivers/mmc/mmc_block.c
+++ b/drivers/mmc/mmc_block.c
@@ -263,7 +263,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
263 */ 263 */
264 add_disk_randomness(req->rq_disk); 264 add_disk_randomness(req->rq_disk);
265 blkdev_dequeue_request(req); 265 blkdev_dequeue_request(req);
266 end_that_request_last(req); 266 end_that_request_last(req, 1);
267 } 267 }
268 spin_unlock_irq(&md->lock); 268 spin_unlock_irq(&md->lock);
269 } while (ret); 269 } while (ret);
@@ -289,7 +289,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
289 289
290 add_disk_randomness(req->rq_disk); 290 add_disk_randomness(req->rq_disk);
291 blkdev_dequeue_request(req); 291 blkdev_dequeue_request(req);
292 end_that_request_last(req); 292 end_that_request_last(req, 0);
293 spin_unlock_irq(&md->lock); 293 spin_unlock_irq(&md->lock);
294 294
295 return 0; 295 return 0;
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index c782a6329805..fa39b944bc46 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -6,7 +6,7 @@ menu "PHY device support"
6 6
7config PHYLIB 7config PHYLIB
8 tristate "PHY Device support and infrastructure" 8 tristate "PHY Device support and infrastructure"
9 depends on NET_ETHERNET && (BROKEN || !ARCH_S390) 9 depends on NET_ETHERNET && (BROKEN || !S390)
10 help 10 help
11 Ethernet controllers are usually attached to PHY 11 Ethernet controllers are usually attached to PHY
12 devices. This option provides infrastructure for 12 devices. This option provides infrastructure for
diff --git a/drivers/net/plip.c b/drivers/net/plip.c
index 1bd22cd40c75..87ee3271b17d 100644
--- a/drivers/net/plip.c
+++ b/drivers/net/plip.c
@@ -98,7 +98,6 @@ static const char version[] = "NET3 PLIP version 2.4-parport gniibe@mri.co.jp\n"
98#include <linux/in.h> 98#include <linux/in.h>
99#include <linux/errno.h> 99#include <linux/errno.h>
100#include <linux/delay.h> 100#include <linux/delay.h>
101#include <linux/lp.h>
102#include <linux/init.h> 101#include <linux/init.h>
103#include <linux/netdevice.h> 102#include <linux/netdevice.h>
104#include <linux/etherdevice.h> 103#include <linux/etherdevice.h>
@@ -106,7 +105,6 @@ static const char version[] = "NET3 PLIP version 2.4-parport gniibe@mri.co.jp\n"
106#include <linux/skbuff.h> 105#include <linux/skbuff.h>
107#include <linux/if_plip.h> 106#include <linux/if_plip.h>
108#include <linux/workqueue.h> 107#include <linux/workqueue.h>
109#include <linux/ioport.h>
110#include <linux/spinlock.h> 108#include <linux/spinlock.h>
111#include <linux/parport.h> 109#include <linux/parport.h>
112#include <linux/bitops.h> 110#include <linux/bitops.h>
diff --git a/drivers/parport/Kconfig b/drivers/parport/Kconfig
index 725a14119f2a..b8241561da45 100644
--- a/drivers/parport/Kconfig
+++ b/drivers/parport/Kconfig
@@ -77,7 +77,7 @@ config PARPORT_PC_SUPERIO
77 77
78config PARPORT_PC_PCMCIA 78config PARPORT_PC_PCMCIA
79 tristate "Support for PCMCIA management for PC-style ports" 79 tristate "Support for PCMCIA management for PC-style ports"
80 depends on PARPORT!=n && (PCMCIA!=n && PARPORT_PC=m && PARPORT_PC || PARPORT_PC=y && PCMCIA) 80 depends on PCMCIA && PARPORT_PC
81 help 81 help
82 Say Y here if you need PCMCIA support for your PC-style parallel 82 Say Y here if you need PCMCIA support for your PC-style parallel
83 ports. If unsure, say N. 83 ports. If unsure, say N.
diff --git a/drivers/parport/daisy.c b/drivers/parport/daisy.c
index 075c7eb5c85d..9ee67321b630 100644
--- a/drivers/parport/daisy.c
+++ b/drivers/parport/daisy.c
@@ -144,9 +144,9 @@ again:
144 add_dev (numdevs++, port, -1); 144 add_dev (numdevs++, port, -1);
145 145
146 /* Find out the legacy device's IEEE 1284 device ID. */ 146 /* Find out the legacy device's IEEE 1284 device ID. */
147 deviceid = kmalloc (1000, GFP_KERNEL); 147 deviceid = kmalloc (1024, GFP_KERNEL);
148 if (deviceid) { 148 if (deviceid) {
149 if (parport_device_id (numdevs - 1, deviceid, 1000) > 2) 149 if (parport_device_id (numdevs - 1, deviceid, 1024) > 2)
150 detected++; 150 detected++;
151 151
152 kfree (deviceid); 152 kfree (deviceid);
@@ -252,7 +252,7 @@ struct pardevice *parport_open (int devnum, const char *name,
252 selected = port->daisy; 252 selected = port->daisy;
253 parport_release (dev); 253 parport_release (dev);
254 254
255 if (selected != port->daisy) { 255 if (selected != daisy) {
256 /* No corresponding device. */ 256 /* No corresponding device. */
257 parport_unregister_device (dev); 257 parport_unregister_device (dev);
258 return NULL; 258 return NULL;
@@ -344,9 +344,9 @@ static int cpp_daisy (struct parport *port, int cmd)
344 PARPORT_CONTROL_STROBE, 344 PARPORT_CONTROL_STROBE,
345 PARPORT_CONTROL_STROBE); 345 PARPORT_CONTROL_STROBE);
346 udelay (1); 346 udelay (1);
347 s = parport_read_status (port);
347 parport_frob_control (port, PARPORT_CONTROL_STROBE, 0); 348 parport_frob_control (port, PARPORT_CONTROL_STROBE, 0);
348 udelay (1); 349 udelay (1);
349 s = parport_read_status (port);
350 parport_write_data (port, 0xff); udelay (2); 350 parport_write_data (port, 0xff); udelay (2);
351 351
352 return s; 352 return s;
@@ -395,15 +395,15 @@ int parport_daisy_select (struct parport *port, int daisy, int mode)
395 case IEEE1284_MODE_EPP: 395 case IEEE1284_MODE_EPP:
396 case IEEE1284_MODE_EPPSL: 396 case IEEE1284_MODE_EPPSL:
397 case IEEE1284_MODE_EPPSWE: 397 case IEEE1284_MODE_EPPSWE:
398 return (cpp_daisy (port, 0x20 + daisy) & 398 return !(cpp_daisy (port, 0x20 + daisy) &
399 PARPORT_STATUS_ERROR); 399 PARPORT_STATUS_ERROR);
400 400
401 // For these modes we should switch to ECP mode: 401 // For these modes we should switch to ECP mode:
402 case IEEE1284_MODE_ECP: 402 case IEEE1284_MODE_ECP:
403 case IEEE1284_MODE_ECPRLE: 403 case IEEE1284_MODE_ECPRLE:
404 case IEEE1284_MODE_ECPSWE: 404 case IEEE1284_MODE_ECPSWE:
405 return (cpp_daisy (port, 0xd0 + daisy) & 405 return !(cpp_daisy (port, 0xd0 + daisy) &
406 PARPORT_STATUS_ERROR); 406 PARPORT_STATUS_ERROR);
407 407
408 // Nothing was told for BECP in Daisy chain specification. 408 // Nothing was told for BECP in Daisy chain specification.
409 // May be it's wise to use ECP? 409 // May be it's wise to use ECP?
@@ -413,8 +413,8 @@ int parport_daisy_select (struct parport *port, int daisy, int mode)
413 case IEEE1284_MODE_BYTE: 413 case IEEE1284_MODE_BYTE:
414 case IEEE1284_MODE_COMPAT: 414 case IEEE1284_MODE_COMPAT:
415 default: 415 default:
416 return (cpp_daisy (port, 0xe0 + daisy) & 416 return !(cpp_daisy (port, 0xe0 + daisy) &
417 PARPORT_STATUS_ERROR); 417 PARPORT_STATUS_ERROR);
418 } 418 }
419} 419}
420 420
@@ -436,7 +436,7 @@ static int select_port (struct parport *port)
436 436
437static int assign_addrs (struct parport *port) 437static int assign_addrs (struct parport *port)
438{ 438{
439 unsigned char s, last_dev; 439 unsigned char s;
440 unsigned char daisy; 440 unsigned char daisy;
441 int thisdev = numdevs; 441 int thisdev = numdevs;
442 int detected; 442 int detected;
@@ -472,10 +472,13 @@ static int assign_addrs (struct parport *port)
472 } 472 }
473 473
474 parport_write_data (port, 0x78); udelay (2); 474 parport_write_data (port, 0x78); udelay (2);
475 last_dev = 0; /* We've just been speaking to a device, so we 475 s = parport_read_status (port);
476 know there must be at least _one_ out there. */
477 476
478 for (daisy = 0; daisy < 4; daisy++) { 477 for (daisy = 0;
478 (s & (PARPORT_STATUS_PAPEROUT|PARPORT_STATUS_SELECT))
479 == (PARPORT_STATUS_PAPEROUT|PARPORT_STATUS_SELECT)
480 && daisy < 4;
481 ++daisy) {
479 parport_write_data (port, daisy); 482 parport_write_data (port, daisy);
480 udelay (2); 483 udelay (2);
481 parport_frob_control (port, 484 parport_frob_control (port,
@@ -485,14 +488,18 @@ static int assign_addrs (struct parport *port)
485 parport_frob_control (port, PARPORT_CONTROL_STROBE, 0); 488 parport_frob_control (port, PARPORT_CONTROL_STROBE, 0);
486 udelay (1); 489 udelay (1);
487 490
488 if (last_dev) 491 add_dev (numdevs++, port, daisy);
489 /* No more devices. */
490 break;
491 492
492 last_dev = !(parport_read_status (port) 493 /* See if this device thought it was the last in the
493 & PARPORT_STATUS_BUSY); 494 * chain. */
495 if (!(s & PARPORT_STATUS_BUSY))
496 break;
494 497
495 add_dev (numdevs++, port, daisy); 498 /* We are seeing pass through status now. We see
499 last_dev from next device or if last_dev does not
500 work status lines from some non-daisy chain
501 device. */
502 s = parport_read_status (port);
496 } 503 }
497 504
498 parport_write_data (port, 0xff); udelay (2); 505 parport_write_data (port, 0xff); udelay (2);
@@ -501,11 +508,11 @@ static int assign_addrs (struct parport *port)
501 detected); 508 detected);
502 509
503 /* Ask the new devices to introduce themselves. */ 510 /* Ask the new devices to introduce themselves. */
504 deviceid = kmalloc (1000, GFP_KERNEL); 511 deviceid = kmalloc (1024, GFP_KERNEL);
505 if (!deviceid) return 0; 512 if (!deviceid) return 0;
506 513
507 for (daisy = 0; thisdev < numdevs; thisdev++, daisy++) 514 for (daisy = 0; thisdev < numdevs; thisdev++, daisy++)
508 parport_device_id (thisdev, deviceid, 1000); 515 parport_device_id (thisdev, deviceid, 1024);
509 516
510 kfree (deviceid); 517 kfree (deviceid);
511 return detected; 518 return detected;
diff --git a/drivers/parport/ieee1284_ops.c b/drivers/parport/ieee1284_ops.c
index ce1e2aad8b10..d6c77658231e 100644
--- a/drivers/parport/ieee1284_ops.c
+++ b/drivers/parport/ieee1284_ops.c
@@ -165,17 +165,7 @@ size_t parport_ieee1284_read_nibble (struct parport *port,
165 /* Does the error line indicate end of data? */ 165 /* Does the error line indicate end of data? */
166 if (((i & 1) == 0) && 166 if (((i & 1) == 0) &&
167 (parport_read_status(port) & PARPORT_STATUS_ERROR)) { 167 (parport_read_status(port) & PARPORT_STATUS_ERROR)) {
168 port->physport->ieee1284.phase = IEEE1284_PH_HBUSY_DNA; 168 goto end_of_data;
169 DPRINTK (KERN_DEBUG
170 "%s: No more nibble data (%d bytes)\n",
171 port->name, i/2);
172
173 /* Go to reverse idle phase. */
174 parport_frob_control (port,
175 PARPORT_CONTROL_AUTOFD,
176 PARPORT_CONTROL_AUTOFD);
177 port->physport->ieee1284.phase = IEEE1284_PH_REV_IDLE;
178 break;
179 } 169 }
180 170
181 /* Event 7: Set nAutoFd low. */ 171 /* Event 7: Set nAutoFd low. */
@@ -225,18 +215,25 @@ size_t parport_ieee1284_read_nibble (struct parport *port,
225 byte = nibble; 215 byte = nibble;
226 } 216 }
227 217
228 i /= 2; /* i is now in bytes */
229
230 if (i == len) { 218 if (i == len) {
231 /* Read the last nibble without checking data avail. */ 219 /* Read the last nibble without checking data avail. */
232 port = port->physport; 220 if (parport_read_status (port) & PARPORT_STATUS_ERROR) {
233 if (parport_read_status (port) & PARPORT_STATUS_ERROR) 221 end_of_data:
234 port->ieee1284.phase = IEEE1284_PH_HBUSY_DNA; 222 DPRINTK (KERN_DEBUG
223 "%s: No more nibble data (%d bytes)\n",
224 port->name, i/2);
225
226 /* Go to reverse idle phase. */
227 parport_frob_control (port,
228 PARPORT_CONTROL_AUTOFD,
229 PARPORT_CONTROL_AUTOFD);
230 port->physport->ieee1284.phase = IEEE1284_PH_REV_IDLE;
231 }
235 else 232 else
236 port->ieee1284.phase = IEEE1284_PH_HBUSY_DAVAIL; 233 port->physport->ieee1284.phase = IEEE1284_PH_HBUSY_DAVAIL;
237 } 234 }
238 235
239 return i; 236 return i/2;
240#endif /* IEEE1284 support */ 237#endif /* IEEE1284 support */
241} 238}
242 239
@@ -256,17 +253,7 @@ size_t parport_ieee1284_read_byte (struct parport *port,
256 253
257 /* Data available? */ 254 /* Data available? */
258 if (parport_read_status (port) & PARPORT_STATUS_ERROR) { 255 if (parport_read_status (port) & PARPORT_STATUS_ERROR) {
259 port->physport->ieee1284.phase = IEEE1284_PH_HBUSY_DNA; 256 goto end_of_data;
260 DPRINTK (KERN_DEBUG
261 "%s: No more byte data (%Zd bytes)\n",
262 port->name, count);
263
264 /* Go to reverse idle phase. */
265 parport_frob_control (port,
266 PARPORT_CONTROL_AUTOFD,
267 PARPORT_CONTROL_AUTOFD);
268 port->physport->ieee1284.phase = IEEE1284_PH_REV_IDLE;
269 break;
270 } 257 }
271 258
272 /* Event 14: Place data bus in high impedance state. */ 259 /* Event 14: Place data bus in high impedance state. */
@@ -318,11 +305,20 @@ size_t parport_ieee1284_read_byte (struct parport *port,
318 305
319 if (count == len) { 306 if (count == len) {
320 /* Read the last byte without checking data avail. */ 307 /* Read the last byte without checking data avail. */
321 port = port->physport; 308 if (parport_read_status (port) & PARPORT_STATUS_ERROR) {
322 if (parport_read_status (port) & PARPORT_STATUS_ERROR) 309 end_of_data:
323 port->ieee1284.phase = IEEE1284_PH_HBUSY_DNA; 310 DPRINTK (KERN_DEBUG
311 "%s: No more byte data (%Zd bytes)\n",
312 port->name, count);
313
314 /* Go to reverse idle phase. */
315 parport_frob_control (port,
316 PARPORT_CONTROL_AUTOFD,
317 PARPORT_CONTROL_AUTOFD);
318 port->physport->ieee1284.phase = IEEE1284_PH_REV_IDLE;
319 }
324 else 320 else
325 port->ieee1284.phase = IEEE1284_PH_HBUSY_DAVAIL; 321 port->physport->ieee1284.phase = IEEE1284_PH_HBUSY_DAVAIL;
326 } 322 }
327 323
328 return count; 324 return count;
diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c
index c6493ad7c0c8..18e85ccdae67 100644
--- a/drivers/parport/parport_pc.c
+++ b/drivers/parport/parport_pc.c
@@ -1169,7 +1169,7 @@ dump_parport_state ("fwd idle", port);
1169 1169
1170/* GCC is not inlining extern inline function later overwriten to non-inline, 1170/* GCC is not inlining extern inline function later overwriten to non-inline,
1171 so we use outlined_ variants here. */ 1171 so we use outlined_ variants here. */
1172static struct parport_operations parport_pc_ops = 1172static const struct parport_operations parport_pc_ops =
1173{ 1173{
1174 .write_data = parport_pc_write_data, 1174 .write_data = parport_pc_write_data,
1175 .read_data = parport_pc_read_data, 1175 .read_data = parport_pc_read_data,
@@ -1211,10 +1211,11 @@ static struct parport_operations parport_pc_ops =
1211static void __devinit show_parconfig_smsc37c669(int io, int key) 1211static void __devinit show_parconfig_smsc37c669(int io, int key)
1212{ 1212{
1213 int cr1,cr4,cra,cr23,cr26,cr27,i=0; 1213 int cr1,cr4,cra,cr23,cr26,cr27,i=0;
1214 static const char *modes[]={ "SPP and Bidirectional (PS/2)", 1214 static const char *const modes[]={
1215 "EPP and SPP", 1215 "SPP and Bidirectional (PS/2)",
1216 "ECP", 1216 "EPP and SPP",
1217 "ECP and EPP" }; 1217 "ECP",
1218 "ECP and EPP" };
1218 1219
1219 outb(key,io); 1220 outb(key,io);
1220 outb(key,io); 1221 outb(key,io);
@@ -1288,7 +1289,7 @@ static void __devinit show_parconfig_smsc37c669(int io, int key)
1288static void __devinit show_parconfig_winbond(int io, int key) 1289static void __devinit show_parconfig_winbond(int io, int key)
1289{ 1290{
1290 int cr30,cr60,cr61,cr70,cr74,crf0,i=0; 1291 int cr30,cr60,cr61,cr70,cr74,crf0,i=0;
1291 static const char *modes[] = { 1292 static const char *const modes[] = {
1292 "Standard (SPP) and Bidirectional(PS/2)", /* 0 */ 1293 "Standard (SPP) and Bidirectional(PS/2)", /* 0 */
1293 "EPP-1.9 and SPP", 1294 "EPP-1.9 and SPP",
1294 "ECP", 1295 "ECP",
@@ -1297,7 +1298,9 @@ static void __devinit show_parconfig_winbond(int io, int key)
1297 "EPP-1.7 and SPP", /* 5 */ 1298 "EPP-1.7 and SPP", /* 5 */
1298 "undefined!", 1299 "undefined!",
1299 "ECP and EPP-1.7" }; 1300 "ECP and EPP-1.7" };
1300 static char *irqtypes[] = { "pulsed low, high-Z", "follows nACK" }; 1301 static char *const irqtypes[] = {
1302 "pulsed low, high-Z",
1303 "follows nACK" };
1301 1304
1302 /* The registers are called compatible-PnP because the 1305 /* The registers are called compatible-PnP because the
1303 register layout is modelled after ISA-PnP, the access 1306 register layout is modelled after ISA-PnP, the access
@@ -2396,7 +2399,8 @@ EXPORT_SYMBOL (parport_pc_unregister_port);
2396 2399
2397/* ITE support maintained by Rich Liu <richliu@poorman.org> */ 2400/* ITE support maintained by Rich Liu <richliu@poorman.org> */
2398static int __devinit sio_ite_8872_probe (struct pci_dev *pdev, int autoirq, 2401static int __devinit sio_ite_8872_probe (struct pci_dev *pdev, int autoirq,
2399 int autodma, struct parport_pc_via_data *via) 2402 int autodma,
2403 const struct parport_pc_via_data *via)
2400{ 2404{
2401 short inta_addr[6] = { 0x2A0, 0x2C0, 0x220, 0x240, 0x1E0 }; 2405 short inta_addr[6] = { 0x2A0, 0x2C0, 0x220, 0x240, 0x1E0 };
2402 struct resource *base_res; 2406 struct resource *base_res;
@@ -2524,7 +2528,8 @@ static struct parport_pc_via_data via_8231_data __devinitdata = {
2524}; 2528};
2525 2529
2526static int __devinit sio_via_probe (struct pci_dev *pdev, int autoirq, 2530static int __devinit sio_via_probe (struct pci_dev *pdev, int autoirq,
2527 int autodma, struct parport_pc_via_data *via) 2531 int autodma,
2532 const struct parport_pc_via_data *via)
2528{ 2533{
2529 u8 tmp, tmp2, siofunc; 2534 u8 tmp, tmp2, siofunc;
2530 u8 ppcontrol = 0; 2535 u8 ppcontrol = 0;
@@ -2694,8 +2699,9 @@ enum parport_pc_sio_types {
2694 2699
2695/* each element directly indexed from enum list, above */ 2700/* each element directly indexed from enum list, above */
2696static struct parport_pc_superio { 2701static struct parport_pc_superio {
2697 int (*probe) (struct pci_dev *pdev, int autoirq, int autodma, struct parport_pc_via_data *via); 2702 int (*probe) (struct pci_dev *pdev, int autoirq, int autodma,
2698 struct parport_pc_via_data *via; 2703 const struct parport_pc_via_data *via);
2704 const struct parport_pc_via_data *via;
2699} parport_pc_superio_info[] __devinitdata = { 2705} parport_pc_superio_info[] __devinitdata = {
2700 { sio_via_probe, &via_686a_data, }, 2706 { sio_via_probe, &via_686a_data, },
2701 { sio_via_probe, &via_8231_data, }, 2707 { sio_via_probe, &via_8231_data, },
@@ -2828,7 +2834,7 @@ static struct parport_pc_pci {
2828 /* netmos_9815 */ { 2, { { 0, -1 }, { 2, -1 }, } }, /* untested */ 2834 /* netmos_9815 */ { 2, { { 0, -1 }, { 2, -1 }, } }, /* untested */
2829}; 2835};
2830 2836
2831static struct pci_device_id parport_pc_pci_tbl[] = { 2837static const struct pci_device_id parport_pc_pci_tbl[] = {
2832 /* Super-IO onboard chips */ 2838 /* Super-IO onboard chips */
2833 { 0x1106, 0x0686, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sio_via_686a }, 2839 { 0x1106, 0x0686, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sio_via_686a },
2834 { 0x1106, 0x8231, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sio_via_8231 }, 2840 { 0x1106, 0x8231, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sio_via_8231 },
diff --git a/drivers/parport/probe.c b/drivers/parport/probe.c
index 4b48b31ec235..b62aee8de3cb 100644
--- a/drivers/parport/probe.c
+++ b/drivers/parport/probe.c
@@ -11,9 +11,9 @@
11#include <linux/string.h> 11#include <linux/string.h>
12#include <asm/uaccess.h> 12#include <asm/uaccess.h>
13 13
14static struct { 14static const struct {
15 char *token; 15 const char *token;
16 char *descr; 16 const char *descr;
17} classes[] = { 17} classes[] = {
18 { "", "Legacy device" }, 18 { "", "Legacy device" },
19 { "PRINTER", "Printer" }, 19 { "PRINTER", "Printer" },
@@ -128,8 +128,131 @@ static void parse_data(struct parport *port, int device, char *str)
128 kfree(txt); 128 kfree(txt);
129} 129}
130 130
131/* Read up to count-1 bytes of device id. Terminate buffer with
132 * '\0'. Buffer begins with two Device ID length bytes as given by
133 * device. */
134static ssize_t parport_read_device_id (struct parport *port, char *buffer,
135 size_t count)
136{
137 unsigned char length[2];
138 unsigned lelen, belen;
139 size_t idlens[4];
140 unsigned numidlens;
141 unsigned current_idlen;
142 ssize_t retval;
143 size_t len;
144
145 /* First two bytes are MSB,LSB of inclusive length. */
146 retval = parport_read (port, length, 2);
147
148 if (retval < 0)
149 return retval;
150 if (retval != 2)
151 return -EIO;
152
153 if (count < 2)
154 return 0;
155 memcpy(buffer, length, 2);
156 len = 2;
157
158 /* Some devices wrongly send LE length, and some send it two
159 * bytes short. Construct a sorted array of lengths to try. */
160 belen = (length[0] << 8) + length[1];
161 lelen = (length[1] << 8) + length[0];
162 idlens[0] = min(belen, lelen);
163 idlens[1] = idlens[0]+2;
164 if (belen != lelen) {
165 int off = 2;
166 /* Don't try lenghts of 0x100 and 0x200 as 1 and 2 */
167 if (idlens[0] <= 2)
168 off = 0;
169 idlens[off] = max(belen, lelen);
170 idlens[off+1] = idlens[off]+2;
171 numidlens = off+2;
172 }
173 else {
174 /* Some devices don't truly implement Device ID, but
175 * just return constant nibble forever. This catches
176 * also those cases. */
177 if (idlens[0] == 0 || idlens[0] > 0xFFF) {
178 printk (KERN_DEBUG "%s: reported broken Device ID"
179 " length of %#zX bytes\n",
180 port->name, idlens[0]);
181 return -EIO;
182 }
183 numidlens = 2;
184 }
185
186 /* Try to respect the given ID length despite all the bugs in
187 * the ID length. Read according to shortest possible ID
188 * first. */
189 for (current_idlen = 0; current_idlen < numidlens; ++current_idlen) {
190 size_t idlen = idlens[current_idlen];
191 if (idlen+1 >= count)
192 break;
193
194 retval = parport_read (port, buffer+len, idlen-len);
195
196 if (retval < 0)
197 return retval;
198 len += retval;
199
200 if (port->physport->ieee1284.phase != IEEE1284_PH_HBUSY_DAVAIL) {
201 if (belen != len) {
202 printk (KERN_DEBUG "%s: Device ID was %d bytes"
203 " while device told it would be %d"
204 " bytes\n",
205 port->name, len, belen);
206 }
207 goto done;
208 }
209
210 /* This might end reading the Device ID too
211 * soon. Hopefully the needed fields were already in
212 * the first 256 bytes or so that we must have read so
213 * far. */
214 if (buffer[len-1] == ';') {
215 printk (KERN_DEBUG "%s: Device ID reading stopped"
216 " before device told data not available. "
217 "Current idlen %d of %d, len bytes %02X %02X\n",
218 port->name, current_idlen, numidlens,
219 length[0], length[1]);
220 goto done;
221 }
222 }
223 if (current_idlen < numidlens) {
224 /* Buffer not large enough, read to end of buffer. */
225 size_t idlen, len2;
226 if (len+1 < count) {
227 retval = parport_read (port, buffer+len, count-len-1);
228 if (retval < 0)
229 return retval;
230 len += retval;
231 }
232 /* Read the whole ID since some devices would not
233 * otherwise give back the Device ID from beginning
234 * next time when asked. */
235 idlen = idlens[current_idlen];
236 len2 = len;
237 while(len2 < idlen && retval > 0) {
238 char tmp[4];
239 retval = parport_read (port, tmp,
240 min(sizeof tmp, idlen-len2));
241 if (retval < 0)
242 return retval;
243 len2 += retval;
244 }
245 }
246 /* In addition, there are broken devices out there that don't
247 even finish off with a semi-colon. We do not need to care
248 about those at this time. */
249 done:
250 buffer[len] = '\0';
251 return len;
252}
253
131/* Get Std 1284 Device ID. */ 254/* Get Std 1284 Device ID. */
132ssize_t parport_device_id (int devnum, char *buffer, size_t len) 255ssize_t parport_device_id (int devnum, char *buffer, size_t count)
133{ 256{
134 ssize_t retval = -ENXIO; 257 ssize_t retval = -ENXIO;
135 struct pardevice *dev = parport_open (devnum, "Device ID probe", 258 struct pardevice *dev = parport_open (devnum, "Device ID probe",
@@ -139,76 +262,20 @@ ssize_t parport_device_id (int devnum, char *buffer, size_t len)
139 262
140 parport_claim_or_block (dev); 263 parport_claim_or_block (dev);
141 264
142 /* Negotiate to compatibility mode, and then to device ID mode. 265 /* Negotiate to compatibility mode, and then to device ID
143 * (This is in case we are already in device ID mode.) */ 266 * mode. (This so that we start form beginning of device ID if
267 * already in device ID mode.) */
144 parport_negotiate (dev->port, IEEE1284_MODE_COMPAT); 268 parport_negotiate (dev->port, IEEE1284_MODE_COMPAT);
145 retval = parport_negotiate (dev->port, 269 retval = parport_negotiate (dev->port,
146 IEEE1284_MODE_NIBBLE | IEEE1284_DEVICEID); 270 IEEE1284_MODE_NIBBLE | IEEE1284_DEVICEID);
147 271
148 if (!retval) { 272 if (!retval) {
149 int idlen; 273 retval = parport_read_device_id (dev->port, buffer, count);
150 unsigned char length[2];
151
152 /* First two bytes are MSB,LSB of inclusive length. */
153 retval = parport_read (dev->port, length, 2);
154
155 if (retval != 2) goto end_id;
156
157 idlen = (length[0] << 8) + length[1] - 2;
158 /*
159 * Check if the caller-allocated buffer is large enough
160 * otherwise bail out or there will be an at least off by one.
161 */
162 if (idlen + 1 < len)
163 len = idlen;
164 else {
165 retval = -EINVAL;
166 goto out;
167 }
168 retval = parport_read (dev->port, buffer, len);
169
170 if (retval != len)
171 printk (KERN_DEBUG "%s: only read %Zd of %Zd ID bytes\n",
172 dev->port->name, retval,
173 len);
174
175 /* Some printer manufacturers mistakenly believe that
176 the length field is supposed to be _exclusive_.
177 In addition, there are broken devices out there
178 that don't even finish off with a semi-colon. */
179 if (buffer[len - 1] != ';') {
180 ssize_t diff;
181 diff = parport_read (dev->port, buffer + len, 2);
182 retval += diff;
183
184 if (diff)
185 printk (KERN_DEBUG
186 "%s: device reported incorrect "
187 "length field (%d, should be %Zd)\n",
188 dev->port->name, idlen, retval);
189 else {
190 /* One semi-colon short of a device ID. */
191 buffer[len++] = ';';
192 printk (KERN_DEBUG "%s: faking semi-colon\n",
193 dev->port->name);
194
195 /* If we get here, I don't think we
196 need to worry about the possible
197 standard violation of having read
198 more than we were told to. The
199 device is non-compliant anyhow. */
200 }
201 }
202
203 end_id:
204 buffer[len] = '\0';
205 parport_negotiate (dev->port, IEEE1284_MODE_COMPAT); 274 parport_negotiate (dev->port, IEEE1284_MODE_COMPAT);
275 if (retval > 2)
276 parse_data (dev->port, dev->daisy, buffer+2);
206 } 277 }
207 278
208 if (retval > 2)
209 parse_data (dev->port, dev->daisy, buffer);
210
211out:
212 parport_release (dev); 279 parport_release (dev);
213 parport_close (dev); 280 parport_close (dev);
214 return retval; 281 return retval;
diff --git a/drivers/parport/share.c b/drivers/parport/share.c
index 9cb3ab156b09..ea62bed6bc83 100644
--- a/drivers/parport/share.c
+++ b/drivers/parport/share.c
@@ -1002,6 +1002,7 @@ EXPORT_SYMBOL(parport_register_driver);
1002EXPORT_SYMBOL(parport_unregister_driver); 1002EXPORT_SYMBOL(parport_unregister_driver);
1003EXPORT_SYMBOL(parport_register_device); 1003EXPORT_SYMBOL(parport_register_device);
1004EXPORT_SYMBOL(parport_unregister_device); 1004EXPORT_SYMBOL(parport_unregister_device);
1005EXPORT_SYMBOL(parport_get_port);
1005EXPORT_SYMBOL(parport_put_port); 1006EXPORT_SYMBOL(parport_put_port);
1006EXPORT_SYMBOL(parport_find_number); 1007EXPORT_SYMBOL(parport_find_number);
1007EXPORT_SYMBOL(parport_find_base); 1008EXPORT_SYMBOL(parport_find_base);
diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
index 6b7583f497d0..a1f0b0ba2bfe 100644
--- a/drivers/pnp/pnpbios/bioscalls.c
+++ b/drivers/pnp/pnpbios/bioscalls.c
@@ -31,15 +31,6 @@ static struct {
31} pnp_bios_callpoint; 31} pnp_bios_callpoint;
32 32
33 33
34/* The PnP BIOS entries in the GDT */
35#define PNP_GDT (GDT_ENTRY_PNPBIOS_BASE * 8)
36
37#define PNP_CS32 (PNP_GDT+0x00) /* segment for calling fn */
38#define PNP_CS16 (PNP_GDT+0x08) /* code segment for BIOS */
39#define PNP_DS (PNP_GDT+0x10) /* data segment for BIOS */
40#define PNP_TS1 (PNP_GDT+0x18) /* transfer data segment */
41#define PNP_TS2 (PNP_GDT+0x20) /* another data segment */
42
43/* 34/*
44 * These are some opcodes for a "static asmlinkage" 35 * These are some opcodes for a "static asmlinkage"
45 * As this code is *not* executed inside the linux kernel segment, but in a 36 * As this code is *not* executed inside the linux kernel segment, but in a
@@ -67,16 +58,11 @@ __asm__(
67 ".previous \n" 58 ".previous \n"
68); 59);
69 60
70#define Q_SET_SEL(cpu, selname, address, size) \
71do { \
72set_base(per_cpu(cpu_gdt_table,cpu)[(selname) >> 3], __va((u32)(address))); \
73set_limit(per_cpu(cpu_gdt_table,cpu)[(selname) >> 3], size); \
74} while(0)
75
76#define Q2_SET_SEL(cpu, selname, address, size) \ 61#define Q2_SET_SEL(cpu, selname, address, size) \
77do { \ 62do { \
78set_base(per_cpu(cpu_gdt_table,cpu)[(selname) >> 3], (u32)(address)); \ 63struct desc_struct *gdt = get_cpu_gdt_table((cpu)); \
79set_limit(per_cpu(cpu_gdt_table,cpu)[(selname) >> 3], size); \ 64set_base(gdt[(selname) >> 3], (u32)(address)); \
65set_limit(gdt[(selname) >> 3], size); \
80} while(0) 66} while(0)
81 67
82static struct desc_struct bad_bios_desc = { 0, 0x00409200 }; 68static struct desc_struct bad_bios_desc = { 0, 0x00409200 };
@@ -115,8 +101,8 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
115 return PNP_FUNCTION_NOT_SUPPORTED; 101 return PNP_FUNCTION_NOT_SUPPORTED;
116 102
117 cpu = get_cpu(); 103 cpu = get_cpu();
118 save_desc_40 = per_cpu(cpu_gdt_table,cpu)[0x40 / 8]; 104 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
119 per_cpu(cpu_gdt_table,cpu)[0x40 / 8] = bad_bios_desc; 105 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
120 106
121 /* On some boxes IRQ's during PnP BIOS calls are deadly. */ 107 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
122 spin_lock_irqsave(&pnp_bios_lock, flags); 108 spin_lock_irqsave(&pnp_bios_lock, flags);
@@ -158,7 +144,7 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
158 ); 144 );
159 spin_unlock_irqrestore(&pnp_bios_lock, flags); 145 spin_unlock_irqrestore(&pnp_bios_lock, flags);
160 146
161 per_cpu(cpu_gdt_table,cpu)[0x40 / 8] = save_desc_40; 147 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
162 put_cpu(); 148 put_cpu();
163 149
164 /* If we get here and this is set then the PnP BIOS faulted on us. */ 150 /* If we get here and this is set then the PnP BIOS faulted on us. */
@@ -290,12 +276,15 @@ int pnp_bios_dev_node_info(struct pnp_dev_node_info *data)
290static int __pnp_bios_get_dev_node(u8 *nodenum, char boot, struct pnp_bios_node *data) 276static int __pnp_bios_get_dev_node(u8 *nodenum, char boot, struct pnp_bios_node *data)
291{ 277{
292 u16 status; 278 u16 status;
279 u16 tmp_nodenum;
293 if (!pnp_bios_present()) 280 if (!pnp_bios_present())
294 return PNP_FUNCTION_NOT_SUPPORTED; 281 return PNP_FUNCTION_NOT_SUPPORTED;
295 if ( !boot && pnpbios_dont_use_current_config ) 282 if ( !boot && pnpbios_dont_use_current_config )
296 return PNP_FUNCTION_NOT_SUPPORTED; 283 return PNP_FUNCTION_NOT_SUPPORTED;
284 tmp_nodenum = *nodenum;
297 status = call_pnp_bios(PNP_GET_SYS_DEV_NODE, 0, PNP_TS1, 0, PNP_TS2, boot ? 2 : 1, PNP_DS, 0, 285 status = call_pnp_bios(PNP_GET_SYS_DEV_NODE, 0, PNP_TS1, 0, PNP_TS2, boot ? 2 : 1, PNP_DS, 0,
298 nodenum, sizeof(char), data, 65536); 286 &tmp_nodenum, sizeof(tmp_nodenum), data, 65536);
287 *nodenum = tmp_nodenum;
299 return status; 288 return status;
300} 289}
301 290
@@ -535,10 +524,12 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
535 524
536 set_base(bad_bios_desc, __va((unsigned long)0x40 << 4)); 525 set_base(bad_bios_desc, __va((unsigned long)0x40 << 4));
537 _set_limit((char *)&bad_bios_desc, 4095 - (0x40 << 4)); 526 _set_limit((char *)&bad_bios_desc, 4095 - (0x40 << 4));
538 for(i=0; i < NR_CPUS; i++) 527 for (i = 0; i < NR_CPUS; i++) {
539 { 528 struct desc_struct *gdt = get_cpu_gdt_table(i);
540 Q2_SET_SEL(i, PNP_CS32, &pnp_bios_callfunc, 64 * 1024); 529 if (!gdt)
541 Q_SET_SEL(i, PNP_CS16, header->fields.pm16cseg, 64 * 1024); 530 continue;
542 Q_SET_SEL(i, PNP_DS, header->fields.pm16dseg, 64 * 1024); 531 set_base(gdt[GDT_ENTRY_PNPBIOS_CS32], &pnp_bios_callfunc);
543 } 532 set_base(gdt[GDT_ENTRY_PNPBIOS_CS16], __va(header->fields.pm16cseg));
533 set_base(gdt[GDT_ENTRY_PNPBIOS_DS], __va(header->fields.pm16dseg));
534 }
544} 535}
diff --git a/drivers/s390/Makefile b/drivers/s390/Makefile
index c99a2fe92fb0..9803c9352d78 100644
--- a/drivers/s390/Makefile
+++ b/drivers/s390/Makefile
@@ -2,7 +2,7 @@
2# Makefile for the S/390 specific device drivers 2# Makefile for the S/390 specific device drivers
3# 3#
4 4
5obj-y += s390mach.o sysinfo.o 5obj-y += s390mach.o sysinfo.o s390_rdev.o
6obj-y += cio/ block/ char/ crypto/ net/ scsi/ 6obj-y += cio/ block/ char/ crypto/ net/ scsi/
7 7
8drivers-y += drivers/s390/built-in.o 8drivers-y += drivers/s390/built-in.o
diff --git a/drivers/s390/block/Kconfig b/drivers/s390/block/Kconfig
index 6e7d7b06421d..6f50cc9323d9 100644
--- a/drivers/s390/block/Kconfig
+++ b/drivers/s390/block/Kconfig
@@ -1,11 +1,11 @@
1if ARCH_S390 1if S390
2 2
3comment "S/390 block device drivers" 3comment "S/390 block device drivers"
4 depends on ARCH_S390 4 depends on S390
5 5
6config BLK_DEV_XPRAM 6config BLK_DEV_XPRAM
7 tristate "XPRAM disk support" 7 tristate "XPRAM disk support"
8 depends on ARCH_S390 8 depends on S390
9 help 9 help
10 Select this option if you want to use your expanded storage on S/390 10 Select this option if you want to use your expanded storage on S/390
11 or zSeries as a disk. This is useful as a _fast_ swap device if you 11 or zSeries as a disk. This is useful as a _fast_ swap device if you
@@ -49,7 +49,7 @@ config DASD_FBA
49 49
50config DASD_DIAG 50config DASD_DIAG
51 tristate "Support for DIAG access to Disks" 51 tristate "Support for DIAG access to Disks"
52 depends on DASD && ( ARCH_S390X = 'n' || EXPERIMENTAL) 52 depends on DASD && ( 64BIT = 'n' || EXPERIMENTAL)
53 help 53 help
54 Select this option if you want to use Diagnose250 command to access 54 Select this option if you want to use Diagnose250 command to access
55 Disks under VM. If you are not running under VM or unsure what it is, 55 Disks under VM. If you are not running under VM or unsure what it is,
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 7008d32433bf..f779f674dfa0 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -7,7 +7,7 @@
7 * Bugreports.to..: <Linux390@de.ibm.com> 7 * Bugreports.to..: <Linux390@de.ibm.com>
8 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2001 8 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2001
9 * 9 *
10 * $Revision: 1.167 $ 10 * $Revision: 1.172 $
11 */ 11 */
12 12
13#include <linux/config.h> 13#include <linux/config.h>
@@ -604,7 +604,7 @@ dasd_smalloc_request(char *magic, int cplength, int datasize,
604void 604void
605dasd_kfree_request(struct dasd_ccw_req * cqr, struct dasd_device * device) 605dasd_kfree_request(struct dasd_ccw_req * cqr, struct dasd_device * device)
606{ 606{
607#ifdef CONFIG_ARCH_S390X 607#ifdef CONFIG_64BIT
608 struct ccw1 *ccw; 608 struct ccw1 *ccw;
609 609
610 /* Clear any idals used for the request. */ 610 /* Clear any idals used for the request. */
@@ -1035,7 +1035,7 @@ dasd_end_request(struct request *req, int uptodate)
1035 if (end_that_request_first(req, uptodate, req->hard_nr_sectors)) 1035 if (end_that_request_first(req, uptodate, req->hard_nr_sectors))
1036 BUG(); 1036 BUG();
1037 add_disk_randomness(req->rq_disk); 1037 add_disk_randomness(req->rq_disk);
1038 end_that_request_last(req); 1038 end_that_request_last(req, uptodate);
1039} 1039}
1040 1040
1041/* 1041/*
@@ -1224,6 +1224,12 @@ __dasd_start_head(struct dasd_device * device)
1224 if (list_empty(&device->ccw_queue)) 1224 if (list_empty(&device->ccw_queue))
1225 return; 1225 return;
1226 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list); 1226 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list);
1227 /* check FAILFAST */
1228 if (device->stopped & ~DASD_STOPPED_PENDING &&
1229 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags)) {
1230 cqr->status = DASD_CQR_FAILED;
1231 dasd_schedule_bh(device);
1232 }
1227 if ((cqr->status == DASD_CQR_QUEUED) && 1233 if ((cqr->status == DASD_CQR_QUEUED) &&
1228 (!device->stopped)) { 1234 (!device->stopped)) {
1229 /* try to start the first I/O that can be started */ 1235 /* try to start the first I/O that can be started */
@@ -1323,7 +1329,7 @@ void
1323dasd_schedule_bh(struct dasd_device * device) 1329dasd_schedule_bh(struct dasd_device * device)
1324{ 1330{
1325 /* Protect against rescheduling. */ 1331 /* Protect against rescheduling. */
1326 if (atomic_compare_and_swap (0, 1, &device->tasklet_scheduled)) 1332 if (atomic_cmpxchg (&device->tasklet_scheduled, 0, 1) != 0)
1327 return; 1333 return;
1328 dasd_get_device(device); 1334 dasd_get_device(device);
1329 tasklet_hi_schedule(&device->tasklet); 1335 tasklet_hi_schedule(&device->tasklet);
@@ -1750,8 +1756,10 @@ dasd_exit(void)
1750 * SECTION: common functions for ccw_driver use 1756 * SECTION: common functions for ccw_driver use
1751 */ 1757 */
1752 1758
1753/* initial attempt at a probe function. this can be simplified once 1759/*
1754 * the other detection code is gone */ 1760 * Initial attempt at a probe function. this can be simplified once
1761 * the other detection code is gone.
1762 */
1755int 1763int
1756dasd_generic_probe (struct ccw_device *cdev, 1764dasd_generic_probe (struct ccw_device *cdev,
1757 struct dasd_discipline *discipline) 1765 struct dasd_discipline *discipline)
@@ -1770,8 +1778,10 @@ dasd_generic_probe (struct ccw_device *cdev,
1770 return ret; 1778 return ret;
1771} 1779}
1772 1780
1773/* this will one day be called from a global not_oper handler. 1781/*
1774 * It is also used by driver_unregister during module unload */ 1782 * This will one day be called from a global not_oper handler.
1783 * It is also used by driver_unregister during module unload.
1784 */
1775void 1785void
1776dasd_generic_remove (struct ccw_device *cdev) 1786dasd_generic_remove (struct ccw_device *cdev)
1777{ 1787{
@@ -1798,9 +1808,11 @@ dasd_generic_remove (struct ccw_device *cdev)
1798 dasd_delete_device(device); 1808 dasd_delete_device(device);
1799} 1809}
1800 1810
1801/* activate a device. This is called from dasd_{eckd,fba}_probe() when either 1811/*
1812 * Activate a device. This is called from dasd_{eckd,fba}_probe() when either
1802 * the device is detected for the first time and is supposed to be used 1813 * the device is detected for the first time and is supposed to be used
1803 * or the user has started activation through sysfs */ 1814 * or the user has started activation through sysfs.
1815 */
1804int 1816int
1805dasd_generic_set_online (struct ccw_device *cdev, 1817dasd_generic_set_online (struct ccw_device *cdev,
1806 struct dasd_discipline *discipline) 1818 struct dasd_discipline *discipline)
@@ -1917,7 +1929,6 @@ dasd_generic_notify(struct ccw_device *cdev, int event)
1917 if (cqr->status == DASD_CQR_IN_IO) 1929 if (cqr->status == DASD_CQR_IN_IO)
1918 cqr->status = DASD_CQR_FAILED; 1930 cqr->status = DASD_CQR_FAILED;
1919 device->stopped |= DASD_STOPPED_DC_EIO; 1931 device->stopped |= DASD_STOPPED_DC_EIO;
1920 dasd_schedule_bh(device);
1921 } else { 1932 } else {
1922 list_for_each_entry(cqr, &device->ccw_queue, list) 1933 list_for_each_entry(cqr, &device->ccw_queue, list)
1923 if (cqr->status == DASD_CQR_IN_IO) { 1934 if (cqr->status == DASD_CQR_IN_IO) {
@@ -1927,6 +1938,7 @@ dasd_generic_notify(struct ccw_device *cdev, int event)
1927 device->stopped |= DASD_STOPPED_DC_WAIT; 1938 device->stopped |= DASD_STOPPED_DC_WAIT;
1928 dasd_set_timer(device, 0); 1939 dasd_set_timer(device, 0);
1929 } 1940 }
1941 dasd_schedule_bh(device);
1930 ret = 1; 1942 ret = 1;
1931 break; 1943 break;
1932 case CIO_OPER: 1944 case CIO_OPER:
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index ab8754e566bc..ba80fdea7ebf 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -6,7 +6,7 @@
6 * Bugreports.to..: <Linux390@de.ibm.com> 6 * Bugreports.to..: <Linux390@de.ibm.com>
7 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000 7 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000
8 * 8 *
9 * $Revision: 1.51 $ 9 * $Revision: 1.53 $
10 */ 10 */
11 11
12#include <linux/config.h> 12#include <linux/config.h>
@@ -25,6 +25,7 @@
25#include <asm/io.h> 25#include <asm/io.h>
26#include <asm/s390_ext.h> 26#include <asm/s390_ext.h>
27#include <asm/todclk.h> 27#include <asm/todclk.h>
28#include <asm/vtoc.h>
28 29
29#include "dasd_int.h" 30#include "dasd_int.h"
30#include "dasd_diag.h" 31#include "dasd_diag.h"
@@ -74,7 +75,7 @@ dia250(void *iob, int cmd)
74 int rc; 75 int rc;
75 76
76 __asm__ __volatile__( 77 __asm__ __volatile__(
77#ifdef CONFIG_ARCH_S390X 78#ifdef CONFIG_64BIT
78 " lghi %0,3\n" 79 " lghi %0,3\n"
79 " lgr 0,%3\n" 80 " lgr 0,%3\n"
80 " diag 0,%2,0x250\n" 81 " diag 0,%2,0x250\n"
@@ -329,7 +330,7 @@ dasd_diag_check_device(struct dasd_device *device)
329 struct dasd_diag_private *private; 330 struct dasd_diag_private *private;
330 struct dasd_diag_characteristics *rdc_data; 331 struct dasd_diag_characteristics *rdc_data;
331 struct dasd_diag_bio bio; 332 struct dasd_diag_bio bio;
332 struct dasd_diag_cms_label *label; 333 struct vtoc_cms_label *label;
333 blocknum_t end_block; 334 blocknum_t end_block;
334 unsigned int sb, bsize; 335 unsigned int sb, bsize;
335 int rc; 336 int rc;
@@ -380,7 +381,7 @@ dasd_diag_check_device(struct dasd_device *device)
380 mdsk_term_io(device); 381 mdsk_term_io(device);
381 382
382 /* figure out blocksize of device */ 383 /* figure out blocksize of device */
383 label = (struct dasd_diag_cms_label *) get_zeroed_page(GFP_KERNEL); 384 label = (struct vtoc_cms_label *) get_zeroed_page(GFP_KERNEL);
384 if (label == NULL) { 385 if (label == NULL) {
385 DEV_MESSAGE(KERN_WARNING, device, "%s", 386 DEV_MESSAGE(KERN_WARNING, device, "%s",
386 "No memory to allocate initialization request"); 387 "No memory to allocate initialization request");
@@ -548,6 +549,8 @@ dasd_diag_build_cp(struct dasd_device * device, struct request *req)
548 } 549 }
549 cqr->retries = DIAG_MAX_RETRIES; 550 cqr->retries = DIAG_MAX_RETRIES;
550 cqr->buildclk = get_clock(); 551 cqr->buildclk = get_clock();
552 if (req->flags & REQ_FAILFAST)
553 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
551 cqr->device = device; 554 cqr->device = device;
552 cqr->expires = DIAG_TIMEOUT; 555 cqr->expires = DIAG_TIMEOUT;
553 cqr->status = DASD_CQR_FILLED; 556 cqr->status = DASD_CQR_FILLED;
diff --git a/drivers/s390/block/dasd_diag.h b/drivers/s390/block/dasd_diag.h
index df31484d73a7..a4f80bd735f1 100644
--- a/drivers/s390/block/dasd_diag.h
+++ b/drivers/s390/block/dasd_diag.h
@@ -6,7 +6,7 @@
6 * Bugreports.to..: <Linux390@de.ibm.com> 6 * Bugreports.to..: <Linux390@de.ibm.com>
7 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000 7 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000
8 * 8 *
9 * $Revision: 1.8 $ 9 * $Revision: 1.9 $
10 */ 10 */
11 11
12#define MDSK_WRITE_REQ 0x01 12#define MDSK_WRITE_REQ 0x01
@@ -44,29 +44,8 @@ struct dasd_diag_characteristics {
44 u8 rdev_features; 44 u8 rdev_features;
45} __attribute__ ((packed, aligned(4))); 45} __attribute__ ((packed, aligned(4)));
46 46
47struct dasd_diag_cms_label { 47
48 u8 label_id[4]; 48#ifdef CONFIG_64BIT
49 u8 vol_id[6];
50 u16 version_id;
51 u32 block_size;
52 u32 origin_ptr;
53 u32 usable_count;
54 u32 formatted_count;
55 u32 block_count;
56 u32 used_count;
57 u32 fst_size;
58 u32 fst_count;
59 u8 format_date[6];
60 u8 reserved1[2];
61 u32 disk_offset;
62 u32 map_block;
63 u32 hblk_disp;
64 u32 user_disp;
65 u8 reserved2[4];
66 u8 segment_name[8];
67} __attribute__ ((packed));
68
69#ifdef CONFIG_ARCH_S390X
70#define DASD_DIAG_FLAGA_DEFAULT DASD_DIAG_FLAGA_FORMAT_64BIT 49#define DASD_DIAG_FLAGA_DEFAULT DASD_DIAG_FLAGA_FORMAT_64BIT
71 50
72typedef u64 blocknum_t; 51typedef u64 blocknum_t;
@@ -107,7 +86,7 @@ struct dasd_diag_rw_io {
107 struct dasd_diag_bio *bio_list; 86 struct dasd_diag_bio *bio_list;
108 u8 spare4[8]; 87 u8 spare4[8];
109} __attribute__ ((packed, aligned(8))); 88} __attribute__ ((packed, aligned(8)));
110#else /* CONFIG_ARCH_S390X */ 89#else /* CONFIG_64BIT */
111#define DASD_DIAG_FLAGA_DEFAULT 0x0 90#define DASD_DIAG_FLAGA_DEFAULT 0x0
112 91
113typedef u32 blocknum_t; 92typedef u32 blocknum_t;
@@ -146,4 +125,4 @@ struct dasd_diag_rw_io {
146 u32 interrupt_params; 125 u32 interrupt_params;
147 u8 spare3[20]; 126 u8 spare3[20];
148} __attribute__ ((packed, aligned(8))); 127} __attribute__ ((packed, aligned(8)));
149#endif /* CONFIG_ARCH_S390X */ 128#endif /* CONFIG_64BIT */
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 811060e10c00..96eb48258580 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -7,7 +7,7 @@
7 * Bugreports.to..: <Linux390@de.ibm.com> 7 * Bugreports.to..: <Linux390@de.ibm.com>
8 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000 8 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000
9 * 9 *
10 * $Revision: 1.71 $ 10 * $Revision: 1.74 $
11 */ 11 */
12 12
13#include <linux/config.h> 13#include <linux/config.h>
@@ -1041,7 +1041,7 @@ dasd_eckd_build_cp(struct dasd_device * device, struct request *req)
1041 /* Eckd can only do full blocks. */ 1041 /* Eckd can only do full blocks. */
1042 return ERR_PTR(-EINVAL); 1042 return ERR_PTR(-EINVAL);
1043 count += bv->bv_len >> (device->s2b_shift + 9); 1043 count += bv->bv_len >> (device->s2b_shift + 9);
1044#if defined(CONFIG_ARCH_S390X) 1044#if defined(CONFIG_64BIT)
1045 if (idal_is_needed (page_address(bv->bv_page), 1045 if (idal_is_needed (page_address(bv->bv_page),
1046 bv->bv_len)) 1046 bv->bv_len))
1047 cidaw += bv->bv_len >> (device->s2b_shift + 9); 1047 cidaw += bv->bv_len >> (device->s2b_shift + 9);
@@ -1136,6 +1136,8 @@ dasd_eckd_build_cp(struct dasd_device * device, struct request *req)
1136 recid++; 1136 recid++;
1137 } 1137 }
1138 } 1138 }
1139 if (req->flags & REQ_FAILFAST)
1140 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
1139 cqr->device = device; 1141 cqr->device = device;
1140 cqr->expires = 5 * 60 * HZ; /* 5 minutes */ 1142 cqr->expires = 5 * 60 * HZ; /* 5 minutes */
1141 cqr->lpm = private->path_data.ppm; 1143 cqr->lpm = private->path_data.ppm;
@@ -1252,6 +1254,7 @@ dasd_eckd_release(struct block_device *bdev, int no, long args)
1252 cqr->cpaddr->cda = (__u32)(addr_t) cqr->data; 1254 cqr->cpaddr->cda = (__u32)(addr_t) cqr->data;
1253 cqr->device = device; 1255 cqr->device = device;
1254 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 1256 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
1257 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
1255 cqr->retries = 0; 1258 cqr->retries = 0;
1256 cqr->expires = 2 * HZ; 1259 cqr->expires = 2 * HZ;
1257 cqr->buildclk = get_clock(); 1260 cqr->buildclk = get_clock();
@@ -1296,6 +1299,7 @@ dasd_eckd_reserve(struct block_device *bdev, int no, long args)
1296 cqr->cpaddr->cda = (__u32)(addr_t) cqr->data; 1299 cqr->cpaddr->cda = (__u32)(addr_t) cqr->data;
1297 cqr->device = device; 1300 cqr->device = device;
1298 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 1301 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
1302 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
1299 cqr->retries = 0; 1303 cqr->retries = 0;
1300 cqr->expires = 2 * HZ; 1304 cqr->expires = 2 * HZ;
1301 cqr->buildclk = get_clock(); 1305 cqr->buildclk = get_clock();
@@ -1339,6 +1343,7 @@ dasd_eckd_steal_lock(struct block_device *bdev, int no, long args)
1339 cqr->cpaddr->cda = (__u32)(addr_t) cqr->data; 1343 cqr->cpaddr->cda = (__u32)(addr_t) cqr->data;
1340 cqr->device = device; 1344 cqr->device = device;
1341 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 1345 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
1346 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
1342 cqr->retries = 0; 1347 cqr->retries = 0;
1343 cqr->expires = 2 * HZ; 1348 cqr->expires = 2 * HZ;
1344 cqr->buildclk = get_clock(); 1349 cqr->buildclk = get_clock();
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index 28cb4613b7f5..8ec75dc08e2c 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -4,7 +4,7 @@
4 * Bugreports.to..: <Linux390@de.ibm.com> 4 * Bugreports.to..: <Linux390@de.ibm.com>
5 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000 5 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000
6 * 6 *
7 * $Revision: 1.40 $ 7 * $Revision: 1.41 $
8 */ 8 */
9 9
10#include <linux/config.h> 10#include <linux/config.h>
@@ -271,7 +271,7 @@ dasd_fba_build_cp(struct dasd_device * device, struct request *req)
271 /* Fba can only do full blocks. */ 271 /* Fba can only do full blocks. */
272 return ERR_PTR(-EINVAL); 272 return ERR_PTR(-EINVAL);
273 count += bv->bv_len >> (device->s2b_shift + 9); 273 count += bv->bv_len >> (device->s2b_shift + 9);
274#if defined(CONFIG_ARCH_S390X) 274#if defined(CONFIG_64BIT)
275 if (idal_is_needed (page_address(bv->bv_page), 275 if (idal_is_needed (page_address(bv->bv_page),
276 bv->bv_len)) 276 bv->bv_len))
277 cidaw += bv->bv_len / blksize; 277 cidaw += bv->bv_len / blksize;
@@ -352,6 +352,8 @@ dasd_fba_build_cp(struct dasd_device * device, struct request *req)
352 recid++; 352 recid++;
353 } 353 }
354 } 354 }
355 if (req->flags & REQ_FAILFAST)
356 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
355 cqr->device = device; 357 cqr->device = device;
356 cqr->expires = 5 * 60 * HZ; /* 5 minutes */ 358 cqr->expires = 5 * 60 * HZ; /* 5 minutes */
357 cqr->retries = 32; 359 cqr->retries = 32;
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index 9fab04f3056d..2fb05c4a528c 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -6,7 +6,7 @@
6 * Bugreports.to..: <Linux390@de.ibm.com> 6 * Bugreports.to..: <Linux390@de.ibm.com>
7 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000 7 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000
8 * 8 *
9 * $Revision: 1.65 $ 9 * $Revision: 1.68 $
10 */ 10 */
11 11
12#ifndef DASD_INT_H 12#ifndef DASD_INT_H
@@ -208,6 +208,7 @@ struct dasd_ccw_req {
208 208
209/* per dasd_ccw_req flags */ 209/* per dasd_ccw_req flags */
210#define DASD_CQR_FLAGS_USE_ERP 0 /* use ERP for this request */ 210#define DASD_CQR_FLAGS_USE_ERP 0 /* use ERP for this request */
211#define DASD_CQR_FLAGS_FAILFAST 1 /* FAILFAST */
211 212
212/* Signature for error recovery functions. */ 213/* Signature for error recovery functions. */
213typedef struct dasd_ccw_req *(*dasd_erp_fn_t) (struct dasd_ccw_req *); 214typedef struct dasd_ccw_req *(*dasd_erp_fn_t) (struct dasd_ccw_req *);
diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c
index 789595b3fa09..044b75371990 100644
--- a/drivers/s390/block/dasd_ioctl.c
+++ b/drivers/s390/block/dasd_ioctl.c
@@ -7,7 +7,7 @@
7 * Bugreports.to..: <Linux390@de.ibm.com> 7 * Bugreports.to..: <Linux390@de.ibm.com>
8 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2001 8 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2001
9 * 9 *
10 * $Revision: 1.47 $ 10 * $Revision: 1.50 $
11 * 11 *
12 * i/o controls for the dasd driver. 12 * i/o controls for the dasd driver.
13 */ 13 */
@@ -352,6 +352,9 @@ dasd_ioctl_read_profile(struct block_device *bdev, int no, long args)
352 if (device == NULL) 352 if (device == NULL)
353 return -ENODEV; 353 return -ENODEV;
354 354
355 if (dasd_profile_level == DASD_PROFILE_OFF)
356 return -EIO;
357
355 if (copy_to_user((long __user *) args, (long *) &device->profile, 358 if (copy_to_user((long __user *) args, (long *) &device->profile,
356 sizeof (struct dasd_profile_info_t))) 359 sizeof (struct dasd_profile_info_t)))
357 return -EFAULT; 360 return -EFAULT;
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 4fde41188996..2e727f49ad19 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -15,7 +15,7 @@
15#include <asm/io.h> 15#include <asm/io.h>
16#include <linux/completion.h> 16#include <linux/completion.h>
17#include <linux/interrupt.h> 17#include <linux/interrupt.h>
18#include <asm/ccwdev.h> // for s390_root_dev_(un)register() 18#include <asm/s390_rdev.h>
19 19
20//#define DCSSBLK_DEBUG /* Debug messages on/off */ 20//#define DCSSBLK_DEBUG /* Debug messages on/off */
21#define DCSSBLK_NAME "dcssblk" 21#define DCSSBLK_NAME "dcssblk"
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c
index d428c909b8a0..bf3a67c3cc5e 100644
--- a/drivers/s390/block/xpram.c
+++ b/drivers/s390/block/xpram.c
@@ -160,7 +160,7 @@ static int xpram_page_in (unsigned long page_addr, unsigned int xpage_index)
160 "0: ipm %0\n" 160 "0: ipm %0\n"
161 " srl %0,28\n" 161 " srl %0,28\n"
162 "1:\n" 162 "1:\n"
163#ifndef CONFIG_ARCH_S390X 163#ifndef CONFIG_64BIT
164 ".section __ex_table,\"a\"\n" 164 ".section __ex_table,\"a\"\n"
165 " .align 4\n" 165 " .align 4\n"
166 " .long 0b,1b\n" 166 " .long 0b,1b\n"
@@ -208,7 +208,7 @@ static long xpram_page_out (unsigned long page_addr, unsigned int xpage_index)
208 "0: ipm %0\n" 208 "0: ipm %0\n"
209 " srl %0,28\n" 209 " srl %0,28\n"
210 "1:\n" 210 "1:\n"
211#ifndef CONFIG_ARCH_S390X 211#ifndef CONFIG_64BIT
212 ".section __ex_table,\"a\"\n" 212 ".section __ex_table,\"a\"\n"
213 " .align 4\n" 213 " .align 4\n"
214 " .long 0b,1b\n" 214 " .long 0b,1b\n"
diff --git a/drivers/s390/char/sclp_cpi.c b/drivers/s390/char/sclp_cpi.c
index 5a6cef2dfa13..80f7f31310e6 100644
--- a/drivers/s390/char/sclp_cpi.c
+++ b/drivers/s390/char/sclp_cpi.c
@@ -204,7 +204,7 @@ cpi_module_init(void)
204 printk(KERN_WARNING "cpi: no control program identification " 204 printk(KERN_WARNING "cpi: no control program identification "
205 "support\n"); 205 "support\n");
206 sclp_unregister(&sclp_cpi_event); 206 sclp_unregister(&sclp_cpi_event);
207 return -ENOTSUPP; 207 return -EOPNOTSUPP;
208 } 208 }
209 209
210 req = cpi_prepare_req(); 210 req = cpi_prepare_req();
diff --git a/drivers/s390/char/sclp_quiesce.c b/drivers/s390/char/sclp_quiesce.c
index 83f75774df60..56fa69168898 100644
--- a/drivers/s390/char/sclp_quiesce.c
+++ b/drivers/s390/char/sclp_quiesce.c
@@ -32,7 +32,7 @@ do_load_quiesce_psw(void * __unused)
32 psw_t quiesce_psw; 32 psw_t quiesce_psw;
33 int cpu; 33 int cpu;
34 34
35 if (atomic_compare_and_swap(-1, smp_processor_id(), &cpuid)) 35 if (atomic_cmpxchg(&cpuid, -1, smp_processor_id()) != -1)
36 signal_processor(smp_processor_id(), sigp_stop); 36 signal_processor(smp_processor_id(), sigp_stop);
37 /* Wait for all other cpus to enter stopped state */ 37 /* Wait for all other cpus to enter stopped state */
38 for_each_online_cpu(cpu) { 38 for_each_online_cpu(cpu) {
diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c
index 1efc9f21229e..5ced2725d6c7 100644
--- a/drivers/s390/char/tape_block.c
+++ b/drivers/s390/char/tape_block.c
@@ -65,7 +65,7 @@ static void
65tapeblock_trigger_requeue(struct tape_device *device) 65tapeblock_trigger_requeue(struct tape_device *device)
66{ 66{
67 /* Protect against rescheduling. */ 67 /* Protect against rescheduling. */
68 if (atomic_compare_and_swap(0, 1, &device->blk_data.requeue_scheduled)) 68 if (atomic_cmpxchg(&device->blk_data.requeue_scheduled, 0, 1) != 0)
69 return; 69 return;
70 schedule_work(&device->blk_data.requeue_task); 70 schedule_work(&device->blk_data.requeue_task);
71} 71}
@@ -78,7 +78,7 @@ tapeblock_end_request(struct request *req, int uptodate)
78{ 78{
79 if (end_that_request_first(req, uptodate, req->hard_nr_sectors)) 79 if (end_that_request_first(req, uptodate, req->hard_nr_sectors))
80 BUG(); 80 BUG();
81 end_that_request_last(req); 81 end_that_request_last(req, uptodate);
82} 82}
83 83
84static void 84static void
diff --git a/drivers/s390/char/vmwatchdog.c b/drivers/s390/char/vmwatchdog.c
index 5473c23fcb52..5acc0ace3d7d 100644
--- a/drivers/s390/char/vmwatchdog.c
+++ b/drivers/s390/char/vmwatchdog.c
@@ -66,7 +66,7 @@ static int __diag288(enum vmwdt_func func, unsigned int timeout,
66 __cmdl = len; 66 __cmdl = len;
67 err = 0; 67 err = 0;
68 asm volatile ( 68 asm volatile (
69#ifdef __s390x__ 69#ifdef CONFIG_64BIT
70 "diag %2,%4,0x288\n" 70 "diag %2,%4,0x288\n"
71 "1: \n" 71 "1: \n"
72 ".section .fixup,\"ax\"\n" 72 ".section .fixup,\"ax\"\n"
diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c
index a1c52a682191..daf21e03b21d 100644
--- a/drivers/s390/cio/blacklist.c
+++ b/drivers/s390/cio/blacklist.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * drivers/s390/cio/blacklist.c 2 * drivers/s390/cio/blacklist.c
3 * S/390 common I/O routines -- blacklisting of specific devices 3 * S/390 common I/O routines -- blacklisting of specific devices
4 * $Revision: 1.35 $ 4 * $Revision: 1.39 $
5 * 5 *
6 * Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH, 6 * Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH,
7 * IBM Corporation 7 * IBM Corporation
@@ -15,6 +15,7 @@
15#include <linux/vmalloc.h> 15#include <linux/vmalloc.h>
16#include <linux/slab.h> 16#include <linux/slab.h>
17#include <linux/proc_fs.h> 17#include <linux/proc_fs.h>
18#include <linux/seq_file.h>
18#include <linux/ctype.h> 19#include <linux/ctype.h>
19#include <linux/device.h> 20#include <linux/device.h>
20 21
@@ -34,10 +35,10 @@
34 * These can be single devices or ranges of devices 35 * These can be single devices or ranges of devices
35 */ 36 */
36 37
37/* 65536 bits to indicate if a devno is blacklisted or not */ 38/* 65536 bits for each set to indicate if a devno is blacklisted or not */
38#define __BL_DEV_WORDS ((__MAX_SUBCHANNELS + (8*sizeof(long) - 1)) / \ 39#define __BL_DEV_WORDS ((__MAX_SUBCHANNEL + (8*sizeof(long) - 1)) / \
39 (8*sizeof(long))) 40 (8*sizeof(long)))
40static unsigned long bl_dev[__BL_DEV_WORDS]; 41static unsigned long bl_dev[__MAX_SSID + 1][__BL_DEV_WORDS];
41typedef enum {add, free} range_action; 42typedef enum {add, free} range_action;
42 43
43/* 44/*
@@ -45,21 +46,23 @@ typedef enum {add, free} range_action;
45 * (Un-)blacklist the devices from-to 46 * (Un-)blacklist the devices from-to
46 */ 47 */
47static inline void 48static inline void
48blacklist_range (range_action action, unsigned int from, unsigned int to) 49blacklist_range (range_action action, unsigned int from, unsigned int to,
50 unsigned int ssid)
49{ 51{
50 if (!to) 52 if (!to)
51 to = from; 53 to = from;
52 54
53 if (from > to || to > __MAX_SUBCHANNELS) { 55 if (from > to || to > __MAX_SUBCHANNEL || ssid > __MAX_SSID) {
54 printk (KERN_WARNING "Invalid blacklist range " 56 printk (KERN_WARNING "Invalid blacklist range "
55 "0x%04x to 0x%04x, skipping\n", from, to); 57 "0.%x.%04x to 0.%x.%04x, skipping\n",
58 ssid, from, ssid, to);
56 return; 59 return;
57 } 60 }
58 for (; from <= to; from++) { 61 for (; from <= to; from++) {
59 if (action == add) 62 if (action == add)
60 set_bit (from, bl_dev); 63 set_bit (from, bl_dev[ssid]);
61 else 64 else
62 clear_bit (from, bl_dev); 65 clear_bit (from, bl_dev[ssid]);
63 } 66 }
64} 67}
65 68
@@ -69,7 +72,7 @@ blacklist_range (range_action action, unsigned int from, unsigned int to)
69 * Shamelessly grabbed from dasd_devmap.c. 72 * Shamelessly grabbed from dasd_devmap.c.
70 */ 73 */
71static inline int 74static inline int
72blacklist_busid(char **str, int *id0, int *id1, int *devno) 75blacklist_busid(char **str, int *id0, int *ssid, int *devno)
73{ 76{
74 int val, old_style; 77 int val, old_style;
75 char *sav; 78 char *sav;
@@ -86,7 +89,7 @@ blacklist_busid(char **str, int *id0, int *id1, int *devno)
86 goto confused; 89 goto confused;
87 val = simple_strtoul(*str, str, 16); 90 val = simple_strtoul(*str, str, 16);
88 if (old_style || (*str)[0] != '.') { 91 if (old_style || (*str)[0] != '.') {
89 *id0 = *id1 = 0; 92 *id0 = *ssid = 0;
90 if (val < 0 || val > 0xffff) 93 if (val < 0 || val > 0xffff)
91 goto confused; 94 goto confused;
92 *devno = val; 95 *devno = val;
@@ -105,7 +108,7 @@ blacklist_busid(char **str, int *id0, int *id1, int *devno)
105 val = simple_strtoul(*str, str, 16); 108 val = simple_strtoul(*str, str, 16);
106 if (val < 0 || val > 0xff || (*str)++[0] != '.') 109 if (val < 0 || val > 0xff || (*str)++[0] != '.')
107 goto confused; 110 goto confused;
108 *id1 = val; 111 *ssid = val;
109 if (!isxdigit((*str)[0])) /* We require at least one hex digit */ 112 if (!isxdigit((*str)[0])) /* We require at least one hex digit */
110 goto confused; 113 goto confused;
111 val = simple_strtoul(*str, str, 16); 114 val = simple_strtoul(*str, str, 16);
@@ -125,7 +128,7 @@ confused:
125static inline int 128static inline int
126blacklist_parse_parameters (char *str, range_action action) 129blacklist_parse_parameters (char *str, range_action action)
127{ 130{
128 unsigned int from, to, from_id0, to_id0, from_id1, to_id1; 131 unsigned int from, to, from_id0, to_id0, from_ssid, to_ssid;
129 132
130 while (*str != 0 && *str != '\n') { 133 while (*str != 0 && *str != '\n') {
131 range_action ra = action; 134 range_action ra = action;
@@ -142,23 +145,25 @@ blacklist_parse_parameters (char *str, range_action action)
142 */ 145 */
143 if (strncmp(str,"all,",4) == 0 || strcmp(str,"all") == 0 || 146 if (strncmp(str,"all,",4) == 0 || strcmp(str,"all") == 0 ||
144 strncmp(str,"all\n",4) == 0 || strncmp(str,"all ",4) == 0) { 147 strncmp(str,"all\n",4) == 0 || strncmp(str,"all ",4) == 0) {
145 from = 0; 148 int j;
146 to = __MAX_SUBCHANNELS; 149
147 str += 3; 150 str += 3;
151 for (j=0; j <= __MAX_SSID; j++)
152 blacklist_range(ra, 0, __MAX_SUBCHANNEL, j);
148 } else { 153 } else {
149 int rc; 154 int rc;
150 155
151 rc = blacklist_busid(&str, &from_id0, 156 rc = blacklist_busid(&str, &from_id0,
152 &from_id1, &from); 157 &from_ssid, &from);
153 if (rc) 158 if (rc)
154 continue; 159 continue;
155 to = from; 160 to = from;
156 to_id0 = from_id0; 161 to_id0 = from_id0;
157 to_id1 = from_id1; 162 to_ssid = from_ssid;
158 if (*str == '-') { 163 if (*str == '-') {
159 str++; 164 str++;
160 rc = blacklist_busid(&str, &to_id0, 165 rc = blacklist_busid(&str, &to_id0,
161 &to_id1, &to); 166 &to_ssid, &to);
162 if (rc) 167 if (rc)
163 continue; 168 continue;
164 } 169 }
@@ -168,18 +173,19 @@ blacklist_parse_parameters (char *str, range_action action)
168 strsep(&str, ",\n")); 173 strsep(&str, ",\n"));
169 continue; 174 continue;
170 } 175 }
171 if ((from_id0 != to_id0) || (from_id1 != to_id1)) { 176 if ((from_id0 != to_id0) ||
177 (from_ssid != to_ssid)) {
172 printk(KERN_WARNING "invalid cio_ignore range " 178 printk(KERN_WARNING "invalid cio_ignore range "
173 "%x.%x.%04x-%x.%x.%04x\n", 179 "%x.%x.%04x-%x.%x.%04x\n",
174 from_id0, from_id1, from, 180 from_id0, from_ssid, from,
175 to_id0, to_id1, to); 181 to_id0, to_ssid, to);
176 continue; 182 continue;
177 } 183 }
184 pr_debug("blacklist_setup: adding range "
185 "from %x.%x.%04x to %x.%x.%04x\n",
186 from_id0, from_ssid, from, to_id0, to_ssid, to);
187 blacklist_range (ra, from, to, to_ssid);
178 } 188 }
179 /* FIXME: ignoring id0 and id1 here. */
180 pr_debug("blacklist_setup: adding range "
181 "from 0.0.%04x to 0.0.%04x\n", from, to);
182 blacklist_range (ra, from, to);
183 } 189 }
184 return 1; 190 return 1;
185} 191}
@@ -213,12 +219,33 @@ __setup ("cio_ignore=", blacklist_setup);
213 * Used by validate_subchannel() 219 * Used by validate_subchannel()
214 */ 220 */
215int 221int
216is_blacklisted (int devno) 222is_blacklisted (int ssid, int devno)
217{ 223{
218 return test_bit (devno, bl_dev); 224 return test_bit (devno, bl_dev[ssid]);
219} 225}
220 226
221#ifdef CONFIG_PROC_FS 227#ifdef CONFIG_PROC_FS
228static int
229__s390_redo_validation(struct subchannel_id schid, void *data)
230{
231 int ret;
232 struct subchannel *sch;
233
234 sch = get_subchannel_by_schid(schid);
235 if (sch) {
236 /* Already known. */
237 put_device(&sch->dev);
238 return 0;
239 }
240 ret = css_probe_device(schid);
241 if (ret == -ENXIO)
242 return ret; /* We're through. */
243 if (ret == -ENOMEM)
244 /* Stop validation for now. Bad, but no need for a panic. */
245 return ret;
246 return 0;
247}
248
222/* 249/*
223 * Function: s390_redo_validation 250 * Function: s390_redo_validation
224 * Look for no longer blacklisted devices 251 * Look for no longer blacklisted devices
@@ -226,29 +253,9 @@ is_blacklisted (int devno)
226static inline void 253static inline void
227s390_redo_validation (void) 254s390_redo_validation (void)
228{ 255{
229 unsigned int irq;
230
231 CIO_TRACE_EVENT (0, "redoval"); 256 CIO_TRACE_EVENT (0, "redoval");
232 for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) { 257
233 int ret; 258 for_each_subchannel(__s390_redo_validation, NULL);
234 struct subchannel *sch;
235
236 sch = get_subchannel_by_schid(irq);
237 if (sch) {
238 /* Already known. */
239 put_device(&sch->dev);
240 continue;
241 }
242 ret = css_probe_device(irq);
243 if (ret == -ENXIO)
244 break; /* We're through. */
245 if (ret == -ENOMEM)
246 /*
247 * Stop validation for now. Bad, but no need for a
248 * panic.
249 */
250 break;
251 }
252} 259}
253 260
254/* 261/*
@@ -278,41 +285,90 @@ blacklist_parse_proc_parameters (char *buf)
278 s390_redo_validation (); 285 s390_redo_validation ();
279} 286}
280 287
281/* FIXME: These should be real bus ids and not home-grown ones! */ 288/* Iterator struct for all devices. */
282static int cio_ignore_read (char *page, char **start, off_t off, 289struct ccwdev_iter {
283 int count, int *eof, void *data) 290 int devno;
291 int ssid;
292 int in_range;
293};
294
295static void *
296cio_ignore_proc_seq_start(struct seq_file *s, loff_t *offset)
284{ 297{
285 const unsigned int entry_size = 18; /* "0.0.ABCD-0.0.EFGH\n" */ 298 struct ccwdev_iter *iter;
286 long devno; 299
287 int len; 300 if (*offset >= (__MAX_SUBCHANNEL + 1) * (__MAX_SSID + 1))
288 301 return NULL;
289 len = 0; 302 iter = kzalloc(sizeof(struct ccwdev_iter), GFP_KERNEL);
290 for (devno = off; /* abuse the page variable 303 if (!iter)
291 * as counter, see fs/proc/generic.c */ 304 return ERR_PTR(-ENOMEM);
292 devno < __MAX_SUBCHANNELS && len + entry_size < count; devno++) { 305 iter->ssid = *offset / (__MAX_SUBCHANNEL + 1);
293 if (!test_bit(devno, bl_dev)) 306 iter->devno = *offset % (__MAX_SUBCHANNEL + 1);
294 continue; 307 return iter;
295 len += sprintf(page + len, "0.0.%04lx", devno); 308}
296 if (test_bit(devno + 1, bl_dev)) { /* print range */ 309
297 while (++devno < __MAX_SUBCHANNELS) 310static void
298 if (!test_bit(devno, bl_dev)) 311cio_ignore_proc_seq_stop(struct seq_file *s, void *it)
299 break; 312{
300 len += sprintf(page + len, "-0.0.%04lx", --devno); 313 if (!IS_ERR(it))
301 } 314 kfree(it);
302 len += sprintf(page + len, "\n"); 315}
303 } 316
317static void *
318cio_ignore_proc_seq_next(struct seq_file *s, void *it, loff_t *offset)
319{
320 struct ccwdev_iter *iter;
321
322 if (*offset >= (__MAX_SUBCHANNEL + 1) * (__MAX_SSID + 1))
323 return NULL;
324 iter = it;
325 if (iter->devno == __MAX_SUBCHANNEL) {
326 iter->devno = 0;
327 iter->ssid++;
328 if (iter->ssid > __MAX_SSID)
329 return NULL;
330 } else
331 iter->devno++;
332 (*offset)++;
333 return iter;
334}
304 335
305 if (devno < __MAX_SUBCHANNELS) 336static int
306 *eof = 1; 337cio_ignore_proc_seq_show(struct seq_file *s, void *it)
307 *start = (char *) (devno - off); /* number of checked entries */ 338{
308 return len; 339 struct ccwdev_iter *iter;
340
341 iter = it;
342 if (!is_blacklisted(iter->ssid, iter->devno))
343 /* Not blacklisted, nothing to output. */
344 return 0;
345 if (!iter->in_range) {
346 /* First device in range. */
347 if ((iter->devno == __MAX_SUBCHANNEL) ||
348 !is_blacklisted(iter->ssid, iter->devno + 1))
349 /* Singular device. */
350 return seq_printf(s, "0.%x.%04x\n",
351 iter->ssid, iter->devno);
352 iter->in_range = 1;
353 return seq_printf(s, "0.%x.%04x-", iter->ssid, iter->devno);
354 }
355 if ((iter->devno == __MAX_SUBCHANNEL) ||
356 !is_blacklisted(iter->ssid, iter->devno + 1)) {
357 /* Last device in range. */
358 iter->in_range = 0;
359 return seq_printf(s, "0.%x.%04x\n", iter->ssid, iter->devno);
360 }
361 return 0;
309} 362}
310 363
311static int cio_ignore_write(struct file *file, const char __user *user_buf, 364static ssize_t
312 unsigned long user_len, void *data) 365cio_ignore_write(struct file *file, const char __user *user_buf,
366 size_t user_len, loff_t *offset)
313{ 367{
314 char *buf; 368 char *buf;
315 369
370 if (*offset)
371 return -EINVAL;
316 if (user_len > 65536) 372 if (user_len > 65536)
317 user_len = 65536; 373 user_len = 65536;
318 buf = vmalloc (user_len + 1); /* maybe better use the stack? */ 374 buf = vmalloc (user_len + 1); /* maybe better use the stack? */
@@ -330,6 +386,27 @@ static int cio_ignore_write(struct file *file, const char __user *user_buf,
330 return user_len; 386 return user_len;
331} 387}
332 388
389static struct seq_operations cio_ignore_proc_seq_ops = {
390 .start = cio_ignore_proc_seq_start,
391 .stop = cio_ignore_proc_seq_stop,
392 .next = cio_ignore_proc_seq_next,
393 .show = cio_ignore_proc_seq_show,
394};
395
396static int
397cio_ignore_proc_open(struct inode *inode, struct file *file)
398{
399 return seq_open(file, &cio_ignore_proc_seq_ops);
400}
401
402static struct file_operations cio_ignore_proc_fops = {
403 .open = cio_ignore_proc_open,
404 .read = seq_read,
405 .llseek = seq_lseek,
406 .release = seq_release,
407 .write = cio_ignore_write,
408};
409
333static int 410static int
334cio_ignore_proc_init (void) 411cio_ignore_proc_init (void)
335{ 412{
@@ -340,8 +417,7 @@ cio_ignore_proc_init (void)
340 if (!entry) 417 if (!entry)
341 return 0; 418 return 0;
342 419
343 entry->read_proc = cio_ignore_read; 420 entry->proc_fops = &cio_ignore_proc_fops;
344 entry->write_proc = cio_ignore_write;
345 421
346 return 1; 422 return 1;
347} 423}
diff --git a/drivers/s390/cio/blacklist.h b/drivers/s390/cio/blacklist.h
index fb42cafbe57c..95e25c1df922 100644
--- a/drivers/s390/cio/blacklist.h
+++ b/drivers/s390/cio/blacklist.h
@@ -1,6 +1,6 @@
1#ifndef S390_BLACKLIST_H 1#ifndef S390_BLACKLIST_H
2#define S390_BLACKLIST_H 2#define S390_BLACKLIST_H
3 3
4extern int is_blacklisted (int devno); 4extern int is_blacklisted (int ssid, int devno);
5 5
6#endif 6#endif
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index be9d2d65c22f..e849289d4f3c 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * drivers/s390/cio/ccwgroup.c 2 * drivers/s390/cio/ccwgroup.c
3 * bus driver for ccwgroup 3 * bus driver for ccwgroup
4 * $Revision: 1.32 $ 4 * $Revision: 1.33 $
5 * 5 *
6 * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, 6 * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
7 * IBM Corporation 7 * IBM Corporation
@@ -263,7 +263,7 @@ ccwgroup_set_online(struct ccwgroup_device *gdev)
263 struct ccwgroup_driver *gdrv; 263 struct ccwgroup_driver *gdrv;
264 int ret; 264 int ret;
265 265
266 if (atomic_compare_and_swap(0, 1, &gdev->onoff)) 266 if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0)
267 return -EAGAIN; 267 return -EAGAIN;
268 if (gdev->state == CCWGROUP_ONLINE) { 268 if (gdev->state == CCWGROUP_ONLINE) {
269 ret = 0; 269 ret = 0;
@@ -289,7 +289,7 @@ ccwgroup_set_offline(struct ccwgroup_device *gdev)
289 struct ccwgroup_driver *gdrv; 289 struct ccwgroup_driver *gdrv;
290 int ret; 290 int ret;
291 291
292 if (atomic_compare_and_swap(0, 1, &gdev->onoff)) 292 if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0)
293 return -EAGAIN; 293 return -EAGAIN;
294 if (gdev->state == CCWGROUP_OFFLINE) { 294 if (gdev->state == CCWGROUP_OFFLINE) {
295 ret = 0; 295 ret = 0;
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index fa3c23b80e3a..7270808c02d1 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * drivers/s390/cio/chsc.c 2 * drivers/s390/cio/chsc.c
3 * S/390 common I/O routines -- channel subsystem call 3 * S/390 common I/O routines -- channel subsystem call
4 * $Revision: 1.120 $ 4 * $Revision: 1.126 $
5 * 5 *
6 * Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH, 6 * Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH,
7 * IBM Corporation 7 * IBM Corporation
@@ -24,8 +24,6 @@
24#include "ioasm.h" 24#include "ioasm.h"
25#include "chsc.h" 25#include "chsc.h"
26 26
27static struct channel_path *chps[NR_CHPIDS];
28
29static void *sei_page; 27static void *sei_page;
30 28
31static int new_channel_path(int chpid); 29static int new_channel_path(int chpid);
@@ -33,13 +31,13 @@ static int new_channel_path(int chpid);
33static inline void 31static inline void
34set_chp_logically_online(int chp, int onoff) 32set_chp_logically_online(int chp, int onoff)
35{ 33{
36 chps[chp]->state = onoff; 34 css[0]->chps[chp]->state = onoff;
37} 35}
38 36
39static int 37static int
40get_chp_status(int chp) 38get_chp_status(int chp)
41{ 39{
42 return (chps[chp] ? chps[chp]->state : -ENODEV); 40 return (css[0]->chps[chp] ? css[0]->chps[chp]->state : -ENODEV);
43} 41}
44 42
45void 43void
@@ -77,7 +75,9 @@ chsc_get_sch_desc_irq(struct subchannel *sch, void *page)
77 75
78 struct { 76 struct {
79 struct chsc_header request; 77 struct chsc_header request;
80 u16 reserved1; 78 u16 reserved1a:10;
79 u16 ssid:2;
80 u16 reserved1b:4;
81 u16 f_sch; /* first subchannel */ 81 u16 f_sch; /* first subchannel */
82 u16 reserved2; 82 u16 reserved2;
83 u16 l_sch; /* last subchannel */ 83 u16 l_sch; /* last subchannel */
@@ -104,8 +104,9 @@ chsc_get_sch_desc_irq(struct subchannel *sch, void *page)
104 .code = 0x0004, 104 .code = 0x0004,
105 }; 105 };
106 106
107 ssd_area->f_sch = sch->irq; 107 ssd_area->ssid = sch->schid.ssid;
108 ssd_area->l_sch = sch->irq; 108 ssd_area->f_sch = sch->schid.sch_no;
109 ssd_area->l_sch = sch->schid.sch_no;
109 110
110 ccode = chsc(ssd_area); 111 ccode = chsc(ssd_area);
111 if (ccode > 0) { 112 if (ccode > 0) {
@@ -147,7 +148,8 @@ chsc_get_sch_desc_irq(struct subchannel *sch, void *page)
147 */ 148 */
148 if (ssd_area->st > 3) { /* uhm, that looks strange... */ 149 if (ssd_area->st > 3) { /* uhm, that looks strange... */
149 CIO_CRW_EVENT(0, "Strange subchannel type %d" 150 CIO_CRW_EVENT(0, "Strange subchannel type %d"
150 " for sch %04x\n", ssd_area->st, sch->irq); 151 " for sch 0.%x.%04x\n", ssd_area->st,
152 sch->schid.ssid, sch->schid.sch_no);
151 /* 153 /*
152 * There may have been a new subchannel type defined in the 154 * There may have been a new subchannel type defined in the
153 * time since this code was written; since we don't know which 155 * time since this code was written; since we don't know which
@@ -156,8 +158,9 @@ chsc_get_sch_desc_irq(struct subchannel *sch, void *page)
156 return 0; 158 return 0;
157 } else { 159 } else {
158 const char *type[4] = {"I/O", "chsc", "message", "ADM"}; 160 const char *type[4] = {"I/O", "chsc", "message", "ADM"};
159 CIO_CRW_EVENT(6, "ssd: sch %04x is %s subchannel\n", 161 CIO_CRW_EVENT(6, "ssd: sch 0.%x.%04x is %s subchannel\n",
160 sch->irq, type[ssd_area->st]); 162 sch->schid.ssid, sch->schid.sch_no,
163 type[ssd_area->st]);
161 164
162 sch->ssd_info.valid = 1; 165 sch->ssd_info.valid = 1;
163 sch->ssd_info.type = ssd_area->st; 166 sch->ssd_info.type = ssd_area->st;
@@ -218,13 +221,13 @@ s390_subchannel_remove_chpid(struct device *dev, void *data)
218 int j; 221 int j;
219 int mask; 222 int mask;
220 struct subchannel *sch; 223 struct subchannel *sch;
221 __u8 *chpid; 224 struct channel_path *chpid;
222 struct schib schib; 225 struct schib schib;
223 226
224 sch = to_subchannel(dev); 227 sch = to_subchannel(dev);
225 chpid = data; 228 chpid = data;
226 for (j = 0; j < 8; j++) 229 for (j = 0; j < 8; j++)
227 if (sch->schib.pmcw.chpid[j] == *chpid) 230 if (sch->schib.pmcw.chpid[j] == chpid->id)
228 break; 231 break;
229 if (j >= 8) 232 if (j >= 8)
230 return 0; 233 return 0;
@@ -232,7 +235,7 @@ s390_subchannel_remove_chpid(struct device *dev, void *data)
232 mask = 0x80 >> j; 235 mask = 0x80 >> j;
233 spin_lock(&sch->lock); 236 spin_lock(&sch->lock);
234 237
235 stsch(sch->irq, &schib); 238 stsch(sch->schid, &schib);
236 if (!schib.pmcw.dnv) 239 if (!schib.pmcw.dnv)
237 goto out_unreg; 240 goto out_unreg;
238 memcpy(&sch->schib, &schib, sizeof(struct schib)); 241 memcpy(&sch->schib, &schib, sizeof(struct schib));
@@ -284,7 +287,7 @@ out_unlock:
284out_unreg: 287out_unreg:
285 spin_unlock(&sch->lock); 288 spin_unlock(&sch->lock);
286 sch->lpm = 0; 289 sch->lpm = 0;
287 if (css_enqueue_subchannel_slow(sch->irq)) { 290 if (css_enqueue_subchannel_slow(sch->schid)) {
288 css_clear_subchannel_slow_list(); 291 css_clear_subchannel_slow_list();
289 need_rescan = 1; 292 need_rescan = 1;
290 } 293 }
@@ -295,23 +298,30 @@ static inline void
295s390_set_chpid_offline( __u8 chpid) 298s390_set_chpid_offline( __u8 chpid)
296{ 299{
297 char dbf_txt[15]; 300 char dbf_txt[15];
301 struct device *dev;
298 302
299 sprintf(dbf_txt, "chpr%x", chpid); 303 sprintf(dbf_txt, "chpr%x", chpid);
300 CIO_TRACE_EVENT(2, dbf_txt); 304 CIO_TRACE_EVENT(2, dbf_txt);
301 305
302 if (get_chp_status(chpid) <= 0) 306 if (get_chp_status(chpid) <= 0)
303 return; 307 return;
304 308 dev = get_device(&css[0]->chps[chpid]->dev);
305 bus_for_each_dev(&css_bus_type, NULL, &chpid, 309 bus_for_each_dev(&css_bus_type, NULL, to_channelpath(dev),
306 s390_subchannel_remove_chpid); 310 s390_subchannel_remove_chpid);
307 311
308 if (need_rescan || css_slow_subchannels_exist()) 312 if (need_rescan || css_slow_subchannels_exist())
309 queue_work(slow_path_wq, &slow_path_work); 313 queue_work(slow_path_wq, &slow_path_work);
314 put_device(dev);
310} 315}
311 316
317struct res_acc_data {
318 struct channel_path *chp;
319 u32 fla_mask;
320 u16 fla;
321};
322
312static int 323static int
313s390_process_res_acc_sch(u8 chpid, __u16 fla, u32 fla_mask, 324s390_process_res_acc_sch(struct res_acc_data *res_data, struct subchannel *sch)
314 struct subchannel *sch)
315{ 325{
316 int found; 326 int found;
317 int chp; 327 int chp;
@@ -323,8 +333,9 @@ s390_process_res_acc_sch(u8 chpid, __u16 fla, u32 fla_mask,
323 * check if chpid is in information updated by ssd 333 * check if chpid is in information updated by ssd
324 */ 334 */
325 if (sch->ssd_info.valid && 335 if (sch->ssd_info.valid &&
326 sch->ssd_info.chpid[chp] == chpid && 336 sch->ssd_info.chpid[chp] == res_data->chp->id &&
327 (sch->ssd_info.fla[chp] & fla_mask) == fla) { 337 (sch->ssd_info.fla[chp] & res_data->fla_mask)
338 == res_data->fla) {
328 found = 1; 339 found = 1;
329 break; 340 break;
330 } 341 }
@@ -337,24 +348,87 @@ s390_process_res_acc_sch(u8 chpid, __u16 fla, u32 fla_mask,
337 * new path information and eventually check for logically 348 * new path information and eventually check for logically
338 * offline chpids. 349 * offline chpids.
339 */ 350 */
340 ccode = stsch(sch->irq, &sch->schib); 351 ccode = stsch(sch->schid, &sch->schib);
341 if (ccode > 0) 352 if (ccode > 0)
342 return 0; 353 return 0;
343 354
344 return 0x80 >> chp; 355 return 0x80 >> chp;
345} 356}
346 357
358static inline int
359s390_process_res_acc_new_sch(struct subchannel_id schid)
360{
361 struct schib schib;
362 int ret;
363 /*
364 * We don't know the device yet, but since a path
365 * may be available now to the device we'll have
366 * to do recognition again.
367 * Since we don't have any idea about which chpid
368 * that beast may be on we'll have to do a stsch
369 * on all devices, grr...
370 */
371 if (stsch_err(schid, &schib))
372 /* We're through */
373 return need_rescan ? -EAGAIN : -ENXIO;
374
375 /* Put it on the slow path. */
376 ret = css_enqueue_subchannel_slow(schid);
377 if (ret) {
378 css_clear_subchannel_slow_list();
379 need_rescan = 1;
380 return -EAGAIN;
381 }
382 return 0;
383}
384
347static int 385static int
348s390_process_res_acc (u8 chpid, __u16 fla, u32 fla_mask) 386__s390_process_res_acc(struct subchannel_id schid, void *data)
349{ 387{
388 int chp_mask, old_lpm;
389 struct res_acc_data *res_data;
350 struct subchannel *sch; 390 struct subchannel *sch;
351 int irq, rc; 391
392 res_data = (struct res_acc_data *)data;
393 sch = get_subchannel_by_schid(schid);
394 if (!sch)
395 /* Check if a subchannel is newly available. */
396 return s390_process_res_acc_new_sch(schid);
397
398 spin_lock_irq(&sch->lock);
399
400 chp_mask = s390_process_res_acc_sch(res_data, sch);
401
402 if (chp_mask == 0) {
403 spin_unlock_irq(&sch->lock);
404 return 0;
405 }
406 old_lpm = sch->lpm;
407 sch->lpm = ((sch->schib.pmcw.pim &
408 sch->schib.pmcw.pam &
409 sch->schib.pmcw.pom)
410 | chp_mask) & sch->opm;
411 if (!old_lpm && sch->lpm)
412 device_trigger_reprobe(sch);
413 else if (sch->driver && sch->driver->verify)
414 sch->driver->verify(&sch->dev);
415
416 spin_unlock_irq(&sch->lock);
417 put_device(&sch->dev);
418 return (res_data->fla_mask == 0xffff) ? -ENODEV : 0;
419}
420
421
422static int
423s390_process_res_acc (struct res_acc_data *res_data)
424{
425 int rc;
352 char dbf_txt[15]; 426 char dbf_txt[15];
353 427
354 sprintf(dbf_txt, "accpr%x", chpid); 428 sprintf(dbf_txt, "accpr%x", res_data->chp->id);
355 CIO_TRACE_EVENT( 2, dbf_txt); 429 CIO_TRACE_EVENT( 2, dbf_txt);
356 if (fla != 0) { 430 if (res_data->fla != 0) {
357 sprintf(dbf_txt, "fla%x", fla); 431 sprintf(dbf_txt, "fla%x", res_data->fla);
358 CIO_TRACE_EVENT( 2, dbf_txt); 432 CIO_TRACE_EVENT( 2, dbf_txt);
359 } 433 }
360 434
@@ -365,70 +439,11 @@ s390_process_res_acc (u8 chpid, __u16 fla, u32 fla_mask)
365 * The more information we have (info), the less scanning 439 * The more information we have (info), the less scanning
366 * will we have to do. 440 * will we have to do.
367 */ 441 */
368 442 rc = for_each_subchannel(__s390_process_res_acc, res_data);
369 if (!get_chp_status(chpid)) 443 if (css_slow_subchannels_exist())
370 return 0; /* no need to do the rest */ 444 rc = -EAGAIN;
371 445 else if (rc != -EAGAIN)
372 rc = 0; 446 rc = 0;
373 for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) {
374 int chp_mask, old_lpm;
375
376 sch = get_subchannel_by_schid(irq);
377 if (!sch) {
378 struct schib schib;
379 int ret;
380 /*
381 * We don't know the device yet, but since a path
382 * may be available now to the device we'll have
383 * to do recognition again.
384 * Since we don't have any idea about which chpid
385 * that beast may be on we'll have to do a stsch
386 * on all devices, grr...
387 */
388 if (stsch(irq, &schib)) {
389 /* We're through */
390 if (need_rescan)
391 rc = -EAGAIN;
392 break;
393 }
394 if (need_rescan) {
395 rc = -EAGAIN;
396 continue;
397 }
398 /* Put it on the slow path. */
399 ret = css_enqueue_subchannel_slow(irq);
400 if (ret) {
401 css_clear_subchannel_slow_list();
402 need_rescan = 1;
403 }
404 rc = -EAGAIN;
405 continue;
406 }
407
408 spin_lock_irq(&sch->lock);
409
410 chp_mask = s390_process_res_acc_sch(chpid, fla, fla_mask, sch);
411
412 if (chp_mask == 0) {
413
414 spin_unlock_irq(&sch->lock);
415 continue;
416 }
417 old_lpm = sch->lpm;
418 sch->lpm = ((sch->schib.pmcw.pim &
419 sch->schib.pmcw.pam &
420 sch->schib.pmcw.pom)
421 | chp_mask) & sch->opm;
422 if (!old_lpm && sch->lpm)
423 device_trigger_reprobe(sch);
424 else if (sch->driver && sch->driver->verify)
425 sch->driver->verify(&sch->dev);
426
427 spin_unlock_irq(&sch->lock);
428 put_device(&sch->dev);
429 if (fla_mask == 0xffff)
430 break;
431 }
432 return rc; 447 return rc;
433} 448}
434 449
@@ -466,6 +481,7 @@ int
466chsc_process_crw(void) 481chsc_process_crw(void)
467{ 482{
468 int chpid, ret; 483 int chpid, ret;
484 struct res_acc_data res_data;
469 struct { 485 struct {
470 struct chsc_header request; 486 struct chsc_header request;
471 u32 reserved1; 487 u32 reserved1;
@@ -499,8 +515,9 @@ chsc_process_crw(void)
499 ret = 0; 515 ret = 0;
500 do { 516 do {
501 int ccode, status; 517 int ccode, status;
518 struct device *dev;
502 memset(sei_area, 0, sizeof(*sei_area)); 519 memset(sei_area, 0, sizeof(*sei_area));
503 520 memset(&res_data, 0, sizeof(struct res_acc_data));
504 sei_area->request = (struct chsc_header) { 521 sei_area->request = (struct chsc_header) {
505 .length = 0x0010, 522 .length = 0x0010,
506 .code = 0x000e, 523 .code = 0x000e,
@@ -573,26 +590,25 @@ chsc_process_crw(void)
573 if (status < 0) 590 if (status < 0)
574 new_channel_path(sei_area->rsid); 591 new_channel_path(sei_area->rsid);
575 else if (!status) 592 else if (!status)
576 return 0; 593 break;
577 if ((sei_area->vf & 0x80) == 0) { 594 dev = get_device(&css[0]->chps[sei_area->rsid]->dev);
578 pr_debug("chpid: %x\n", sei_area->rsid); 595 res_data.chp = to_channelpath(dev);
579 ret = s390_process_res_acc(sei_area->rsid, 596 pr_debug("chpid: %x", sei_area->rsid);
580 0, 0); 597 if ((sei_area->vf & 0xc0) != 0) {
581 } else if ((sei_area->vf & 0xc0) == 0x80) { 598 res_data.fla = sei_area->fla;
582 pr_debug("chpid: %x link addr: %x\n", 599 if ((sei_area->vf & 0xc0) == 0xc0) {
583 sei_area->rsid, sei_area->fla); 600 pr_debug(" full link addr: %x",
584 ret = s390_process_res_acc(sei_area->rsid, 601 sei_area->fla);
585 sei_area->fla, 602 res_data.fla_mask = 0xffff;
586 0xff00); 603 } else {
587 } else if ((sei_area->vf & 0xc0) == 0xc0) { 604 pr_debug(" link addr: %x",
588 pr_debug("chpid: %x full link addr: %x\n", 605 sei_area->fla);
589 sei_area->rsid, sei_area->fla); 606 res_data.fla_mask = 0xff00;
590 ret = s390_process_res_acc(sei_area->rsid, 607 }
591 sei_area->fla,
592 0xffff);
593 } 608 }
594 pr_debug("\n"); 609 ret = s390_process_res_acc(&res_data);
595 610 pr_debug("\n\n");
611 put_device(dev);
596 break; 612 break;
597 613
598 default: /* other stuff */ 614 default: /* other stuff */
@@ -604,12 +620,72 @@ chsc_process_crw(void)
604 return ret; 620 return ret;
605} 621}
606 622
623static inline int
624__chp_add_new_sch(struct subchannel_id schid)
625{
626 struct schib schib;
627 int ret;
628
629 if (stsch(schid, &schib))
630 /* We're through */
631 return need_rescan ? -EAGAIN : -ENXIO;
632
633 /* Put it on the slow path. */
634 ret = css_enqueue_subchannel_slow(schid);
635 if (ret) {
636 css_clear_subchannel_slow_list();
637 need_rescan = 1;
638 return -EAGAIN;
639 }
640 return 0;
641}
642
643
607static int 644static int
608chp_add(int chpid) 645__chp_add(struct subchannel_id schid, void *data)
609{ 646{
647 int i;
648 struct channel_path *chp;
610 struct subchannel *sch; 649 struct subchannel *sch;
611 int irq, ret, rc; 650
651 chp = (struct channel_path *)data;
652 sch = get_subchannel_by_schid(schid);
653 if (!sch)
654 /* Check if the subchannel is now available. */
655 return __chp_add_new_sch(schid);
656 spin_lock(&sch->lock);
657 for (i=0; i<8; i++)
658 if (sch->schib.pmcw.chpid[i] == chp->id) {
659 if (stsch(sch->schid, &sch->schib) != 0) {
660 /* Endgame. */
661 spin_unlock(&sch->lock);
662 return -ENXIO;
663 }
664 break;
665 }
666 if (i==8) {
667 spin_unlock(&sch->lock);
668 return 0;
669 }
670 sch->lpm = ((sch->schib.pmcw.pim &
671 sch->schib.pmcw.pam &
672 sch->schib.pmcw.pom)
673 | 0x80 >> i) & sch->opm;
674
675 if (sch->driver && sch->driver->verify)
676 sch->driver->verify(&sch->dev);
677
678 spin_unlock(&sch->lock);
679 put_device(&sch->dev);
680 return 0;
681}
682
683static int
684chp_add(int chpid)
685{
686 int rc;
612 char dbf_txt[15]; 687 char dbf_txt[15];
688 struct device *dev;
613 689
614 if (!get_chp_status(chpid)) 690 if (!get_chp_status(chpid))
615 return 0; /* no need to do the rest */ 691 return 0; /* no need to do the rest */
@@ -617,59 +693,13 @@ chp_add(int chpid)
617 sprintf(dbf_txt, "cadd%x", chpid); 693 sprintf(dbf_txt, "cadd%x", chpid);
618 CIO_TRACE_EVENT(2, dbf_txt); 694 CIO_TRACE_EVENT(2, dbf_txt);
619 695
620 rc = 0; 696 dev = get_device(&css[0]->chps[chpid]->dev);
621 for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) { 697 rc = for_each_subchannel(__chp_add, to_channelpath(dev));
622 int i; 698 if (css_slow_subchannels_exist())
623 699 rc = -EAGAIN;
624 sch = get_subchannel_by_schid(irq); 700 if (rc != -EAGAIN)
625 if (!sch) { 701 rc = 0;
626 struct schib schib; 702 put_device(dev);
627
628 if (stsch(irq, &schib)) {
629 /* We're through */
630 if (need_rescan)
631 rc = -EAGAIN;
632 break;
633 }
634 if (need_rescan) {
635 rc = -EAGAIN;
636 continue;
637 }
638 /* Put it on the slow path. */
639 ret = css_enqueue_subchannel_slow(irq);
640 if (ret) {
641 css_clear_subchannel_slow_list();
642 need_rescan = 1;
643 }
644 rc = -EAGAIN;
645 continue;
646 }
647
648 spin_lock(&sch->lock);
649 for (i=0; i<8; i++)
650 if (sch->schib.pmcw.chpid[i] == chpid) {
651 if (stsch(sch->irq, &sch->schib) != 0) {
652 /* Endgame. */
653 spin_unlock(&sch->lock);
654 return rc;
655 }
656 break;
657 }
658 if (i==8) {
659 spin_unlock(&sch->lock);
660 return rc;
661 }
662 sch->lpm = ((sch->schib.pmcw.pim &
663 sch->schib.pmcw.pam &
664 sch->schib.pmcw.pom)
665 | 0x80 >> i) & sch->opm;
666
667 if (sch->driver && sch->driver->verify)
668 sch->driver->verify(&sch->dev);
669
670 spin_unlock(&sch->lock);
671 put_device(&sch->dev);
672 }
673 return rc; 703 return rc;
674} 704}
675 705
@@ -702,7 +732,7 @@ __check_for_io_and_kill(struct subchannel *sch, int index)
702 if (!device_is_online(sch)) 732 if (!device_is_online(sch))
703 /* cio could be doing I/O. */ 733 /* cio could be doing I/O. */
704 return 0; 734 return 0;
705 cc = stsch(sch->irq, &sch->schib); 735 cc = stsch(sch->schid, &sch->schib);
706 if (cc) 736 if (cc)
707 return 0; 737 return 0;
708 if (sch->schib.scsw.actl && sch->schib.pmcw.lpum == (0x80 >> index)) { 738 if (sch->schib.scsw.actl && sch->schib.pmcw.lpum == (0x80 >> index)) {
@@ -743,7 +773,7 @@ __s390_subchannel_vary_chpid(struct subchannel *sch, __u8 chpid, int on)
743 * just varied off path. Then kill it. 773 * just varied off path. Then kill it.
744 */ 774 */
745 if (!__check_for_io_and_kill(sch, chp) && !sch->lpm) { 775 if (!__check_for_io_and_kill(sch, chp) && !sch->lpm) {
746 if (css_enqueue_subchannel_slow(sch->irq)) { 776 if (css_enqueue_subchannel_slow(sch->schid)) {
747 css_clear_subchannel_slow_list(); 777 css_clear_subchannel_slow_list();
748 need_rescan = 1; 778 need_rescan = 1;
749 } 779 }
@@ -781,6 +811,29 @@ s390_subchannel_vary_chpid_on(struct device *dev, void *data)
781 return 0; 811 return 0;
782} 812}
783 813
814static int
815__s390_vary_chpid_on(struct subchannel_id schid, void *data)
816{
817 struct schib schib;
818 struct subchannel *sch;
819
820 sch = get_subchannel_by_schid(schid);
821 if (sch) {
822 put_device(&sch->dev);
823 return 0;
824 }
825 if (stsch_err(schid, &schib))
826 /* We're through */
827 return -ENXIO;
828 /* Put it on the slow path. */
829 if (css_enqueue_subchannel_slow(schid)) {
830 css_clear_subchannel_slow_list();
831 need_rescan = 1;
832 return -EAGAIN;
833 }
834 return 0;
835}
836
784/* 837/*
785 * Function: s390_vary_chpid 838 * Function: s390_vary_chpid
786 * Varies the specified chpid online or offline 839 * Varies the specified chpid online or offline
@@ -789,8 +842,7 @@ static int
789s390_vary_chpid( __u8 chpid, int on) 842s390_vary_chpid( __u8 chpid, int on)
790{ 843{
791 char dbf_text[15]; 844 char dbf_text[15];
792 int status, irq, ret; 845 int status;
793 struct subchannel *sch;
794 846
795 sprintf(dbf_text, on?"varyon%x":"varyoff%x", chpid); 847 sprintf(dbf_text, on?"varyon%x":"varyoff%x", chpid);
796 CIO_TRACE_EVENT( 2, dbf_text); 848 CIO_TRACE_EVENT( 2, dbf_text);
@@ -815,30 +867,9 @@ s390_vary_chpid( __u8 chpid, int on)
815 bus_for_each_dev(&css_bus_type, NULL, &chpid, on ? 867 bus_for_each_dev(&css_bus_type, NULL, &chpid, on ?
816 s390_subchannel_vary_chpid_on : 868 s390_subchannel_vary_chpid_on :
817 s390_subchannel_vary_chpid_off); 869 s390_subchannel_vary_chpid_off);
818 if (!on) 870 if (on)
819 goto out; 871 /* Scan for new devices on varied on path. */
820 /* Scan for new devices on varied on path. */ 872 for_each_subchannel(__s390_vary_chpid_on, NULL);
821 for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) {
822 struct schib schib;
823
824 if (need_rescan)
825 break;
826 sch = get_subchannel_by_schid(irq);
827 if (sch) {
828 put_device(&sch->dev);
829 continue;
830 }
831 if (stsch(irq, &schib))
832 /* We're through */
833 break;
834 /* Put it on the slow path. */
835 ret = css_enqueue_subchannel_slow(irq);
836 if (ret) {
837 css_clear_subchannel_slow_list();
838 need_rescan = 1;
839 }
840 }
841out:
842 if (need_rescan || css_slow_subchannels_exist()) 873 if (need_rescan || css_slow_subchannels_exist())
843 queue_work(slow_path_wq, &slow_path_work); 874 queue_work(slow_path_wq, &slow_path_work);
844 return 0; 875 return 0;
@@ -995,7 +1026,7 @@ new_channel_path(int chpid)
995 chp->id = chpid; 1026 chp->id = chpid;
996 chp->state = 1; 1027 chp->state = 1;
997 chp->dev = (struct device) { 1028 chp->dev = (struct device) {
998 .parent = &css_bus_device, 1029 .parent = &css[0]->device,
999 .release = chp_release, 1030 .release = chp_release,
1000 }; 1031 };
1001 snprintf(chp->dev.bus_id, BUS_ID_SIZE, "chp0.%x", chpid); 1032 snprintf(chp->dev.bus_id, BUS_ID_SIZE, "chp0.%x", chpid);
@@ -1017,7 +1048,7 @@ new_channel_path(int chpid)
1017 device_unregister(&chp->dev); 1048 device_unregister(&chp->dev);
1018 goto out_free; 1049 goto out_free;
1019 } else 1050 } else
1020 chps[chpid] = chp; 1051 css[0]->chps[chpid] = chp;
1021 return ret; 1052 return ret;
1022out_free: 1053out_free:
1023 kfree(chp); 1054 kfree(chp);
@@ -1030,7 +1061,7 @@ chsc_get_chp_desc(struct subchannel *sch, int chp_no)
1030 struct channel_path *chp; 1061 struct channel_path *chp;
1031 struct channel_path_desc *desc; 1062 struct channel_path_desc *desc;
1032 1063
1033 chp = chps[sch->schib.pmcw.chpid[chp_no]]; 1064 chp = css[0]->chps[sch->schib.pmcw.chpid[chp_no]];
1034 if (!chp) 1065 if (!chp)
1035 return NULL; 1066 return NULL;
1036 desc = kmalloc(sizeof(struct channel_path_desc), GFP_KERNEL); 1067 desc = kmalloc(sizeof(struct channel_path_desc), GFP_KERNEL);
@@ -1051,6 +1082,54 @@ chsc_alloc_sei_area(void)
1051 return (sei_page ? 0 : -ENOMEM); 1082 return (sei_page ? 0 : -ENOMEM);
1052} 1083}
1053 1084
1085int __init
1086chsc_enable_facility(int operation_code)
1087{
1088 int ret;
1089 struct {
1090 struct chsc_header request;
1091 u8 reserved1:4;
1092 u8 format:4;
1093 u8 reserved2;
1094 u16 operation_code;
1095 u32 reserved3;
1096 u32 reserved4;
1097 u32 operation_data_area[252];
1098 struct chsc_header response;
1099 u32 reserved5:4;
1100 u32 format2:4;
1101 u32 reserved6:24;
1102 } *sda_area;
1103
1104 sda_area = (void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
1105 if (!sda_area)
1106 return -ENOMEM;
1107 sda_area->request = (struct chsc_header) {
1108 .length = 0x0400,
1109 .code = 0x0031,
1110 };
1111 sda_area->operation_code = operation_code;
1112
1113 ret = chsc(sda_area);
1114 if (ret > 0) {
1115 ret = (ret == 3) ? -ENODEV : -EBUSY;
1116 goto out;
1117 }
1118 switch (sda_area->response.code) {
1119 case 0x0003: /* invalid request block */
1120 case 0x0007:
1121 ret = -EINVAL;
1122 break;
1123 case 0x0004: /* command not provided */
1124 case 0x0101: /* facility not provided */
1125 ret = -EOPNOTSUPP;
1126 break;
1127 }
1128 out:
1129 free_page((unsigned long)sda_area);
1130 return ret;
1131}
1132
1054subsys_initcall(chsc_alloc_sei_area); 1133subsys_initcall(chsc_alloc_sei_area);
1055 1134
1056struct css_general_char css_general_characteristics; 1135struct css_general_char css_general_characteristics;
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h
index be20da49d147..44e4b4bb1c5a 100644
--- a/drivers/s390/cio/chsc.h
+++ b/drivers/s390/cio/chsc.h
@@ -1,12 +1,12 @@
1#ifndef S390_CHSC_H 1#ifndef S390_CHSC_H
2#define S390_CHSC_H 2#define S390_CHSC_H
3 3
4#define NR_CHPIDS 256
5
6#define CHSC_SEI_ACC_CHPID 1 4#define CHSC_SEI_ACC_CHPID 1
7#define CHSC_SEI_ACC_LINKADDR 2 5#define CHSC_SEI_ACC_LINKADDR 2
8#define CHSC_SEI_ACC_FULLLINKADDR 3 6#define CHSC_SEI_ACC_FULLLINKADDR 3
9 7
8#define CHSC_SDA_OC_MSS 0x2
9
10struct chsc_header { 10struct chsc_header {
11 u16 length; 11 u16 length;
12 u16 code; 12 u16 code;
@@ -43,7 +43,9 @@ struct css_general_char {
43 u32 ext_mb : 1; /* bit 48 */ 43 u32 ext_mb : 1; /* bit 48 */
44 u32 : 7; 44 u32 : 7;
45 u32 aif_tdd : 1; /* bit 56 */ 45 u32 aif_tdd : 1; /* bit 56 */
46 u32 : 10; 46 u32 : 1;
47 u32 qebsm : 1; /* bit 58 */
48 u32 : 8;
47 u32 aif_osa : 1; /* bit 67 */ 49 u32 aif_osa : 1; /* bit 67 */
48 u32 : 28; 50 u32 : 28;
49}__attribute__((packed)); 51}__attribute__((packed));
@@ -63,4 +65,9 @@ extern int chsc_determine_css_characteristics(void);
63extern int css_characteristics_avail; 65extern int css_characteristics_avail;
64 66
65extern void *chsc_get_chp_desc(struct subchannel*, int); 67extern void *chsc_get_chp_desc(struct subchannel*, int);
68
69extern int chsc_enable_facility(int);
70
71#define to_channelpath(dev) container_of(dev, struct channel_path, dev)
72
66#endif 73#endif
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index 185bc73c3ecd..7376bc87206d 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * drivers/s390/cio/cio.c 2 * drivers/s390/cio/cio.c
3 * S/390 common I/O routines -- low level i/o calls 3 * S/390 common I/O routines -- low level i/o calls
4 * $Revision: 1.135 $ 4 * $Revision: 1.138 $
5 * 5 *
6 * Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH, 6 * Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH,
7 * IBM Corporation 7 * IBM Corporation
@@ -135,7 +135,7 @@ cio_tpi(void)
135 return 0; 135 return 0;
136 irb = (struct irb *) __LC_IRB; 136 irb = (struct irb *) __LC_IRB;
137 /* Store interrupt response block to lowcore. */ 137 /* Store interrupt response block to lowcore. */
138 if (tsch (tpi_info->irq, irb) != 0) 138 if (tsch (tpi_info->schid, irb) != 0)
139 /* Not status pending or not operational. */ 139 /* Not status pending or not operational. */
140 return 1; 140 return 1;
141 sch = (struct subchannel *)(unsigned long)tpi_info->intparm; 141 sch = (struct subchannel *)(unsigned long)tpi_info->intparm;
@@ -163,10 +163,11 @@ cio_start_handle_notoper(struct subchannel *sch, __u8 lpm)
163 else 163 else
164 sch->lpm = 0; 164 sch->lpm = 0;
165 165
166 stsch (sch->irq, &sch->schib); 166 stsch (sch->schid, &sch->schib);
167 167
168 CIO_MSG_EVENT(0, "cio_start: 'not oper' status for " 168 CIO_MSG_EVENT(0, "cio_start: 'not oper' status for "
169 "subchannel %04x!\n", sch->irq); 169 "subchannel 0.%x.%04x!\n", sch->schid.ssid,
170 sch->schid.sch_no);
170 sprintf(dbf_text, "no%s", sch->dev.bus_id); 171 sprintf(dbf_text, "no%s", sch->dev.bus_id);
171 CIO_TRACE_EVENT(0, dbf_text); 172 CIO_TRACE_EVENT(0, dbf_text);
172 CIO_HEX_EVENT(0, &sch->schib, sizeof (struct schib)); 173 CIO_HEX_EVENT(0, &sch->schib, sizeof (struct schib));
@@ -194,7 +195,7 @@ cio_start_key (struct subchannel *sch, /* subchannel structure */
194 sch->orb.spnd = sch->options.suspend; 195 sch->orb.spnd = sch->options.suspend;
195 sch->orb.ssic = sch->options.suspend && sch->options.inter; 196 sch->orb.ssic = sch->options.suspend && sch->options.inter;
196 sch->orb.lpm = (lpm != 0) ? (lpm & sch->opm) : sch->lpm; 197 sch->orb.lpm = (lpm != 0) ? (lpm & sch->opm) : sch->lpm;
197#ifdef CONFIG_ARCH_S390X 198#ifdef CONFIG_64BIT
198 /* 199 /*
199 * for 64 bit we always support 64 bit IDAWs with 4k page size only 200 * for 64 bit we always support 64 bit IDAWs with 4k page size only
200 */ 201 */
@@ -204,7 +205,7 @@ cio_start_key (struct subchannel *sch, /* subchannel structure */
204 sch->orb.key = key >> 4; 205 sch->orb.key = key >> 4;
205 /* issue "Start Subchannel" */ 206 /* issue "Start Subchannel" */
206 sch->orb.cpa = (__u32) __pa (cpa); 207 sch->orb.cpa = (__u32) __pa (cpa);
207 ccode = ssch (sch->irq, &sch->orb); 208 ccode = ssch (sch->schid, &sch->orb);
208 209
209 /* process condition code */ 210 /* process condition code */
210 sprintf (dbf_txt, "ccode:%d", ccode); 211 sprintf (dbf_txt, "ccode:%d", ccode);
@@ -243,7 +244,7 @@ cio_resume (struct subchannel *sch)
243 CIO_TRACE_EVENT (4, "resIO"); 244 CIO_TRACE_EVENT (4, "resIO");
244 CIO_TRACE_EVENT (4, sch->dev.bus_id); 245 CIO_TRACE_EVENT (4, sch->dev.bus_id);
245 246
246 ccode = rsch (sch->irq); 247 ccode = rsch (sch->schid);
247 248
248 sprintf (dbf_txt, "ccode:%d", ccode); 249 sprintf (dbf_txt, "ccode:%d", ccode);
249 CIO_TRACE_EVENT (4, dbf_txt); 250 CIO_TRACE_EVENT (4, dbf_txt);
@@ -283,7 +284,7 @@ cio_halt(struct subchannel *sch)
283 /* 284 /*
284 * Issue "Halt subchannel" and process condition code 285 * Issue "Halt subchannel" and process condition code
285 */ 286 */
286 ccode = hsch (sch->irq); 287 ccode = hsch (sch->schid);
287 288
288 sprintf (dbf_txt, "ccode:%d", ccode); 289 sprintf (dbf_txt, "ccode:%d", ccode);
289 CIO_TRACE_EVENT (2, dbf_txt); 290 CIO_TRACE_EVENT (2, dbf_txt);
@@ -318,7 +319,7 @@ cio_clear(struct subchannel *sch)
318 /* 319 /*
319 * Issue "Clear subchannel" and process condition code 320 * Issue "Clear subchannel" and process condition code
320 */ 321 */
321 ccode = csch (sch->irq); 322 ccode = csch (sch->schid);
322 323
323 sprintf (dbf_txt, "ccode:%d", ccode); 324 sprintf (dbf_txt, "ccode:%d", ccode);
324 CIO_TRACE_EVENT (2, dbf_txt); 325 CIO_TRACE_EVENT (2, dbf_txt);
@@ -351,7 +352,7 @@ cio_cancel (struct subchannel *sch)
351 CIO_TRACE_EVENT (2, "cancelIO"); 352 CIO_TRACE_EVENT (2, "cancelIO");
352 CIO_TRACE_EVENT (2, sch->dev.bus_id); 353 CIO_TRACE_EVENT (2, sch->dev.bus_id);
353 354
354 ccode = xsch (sch->irq); 355 ccode = xsch (sch->schid);
355 356
356 sprintf (dbf_txt, "ccode:%d", ccode); 357 sprintf (dbf_txt, "ccode:%d", ccode);
357 CIO_TRACE_EVENT (2, dbf_txt); 358 CIO_TRACE_EVENT (2, dbf_txt);
@@ -359,7 +360,7 @@ cio_cancel (struct subchannel *sch)
359 switch (ccode) { 360 switch (ccode) {
360 case 0: /* success */ 361 case 0: /* success */
361 /* Update information in scsw. */ 362 /* Update information in scsw. */
362 stsch (sch->irq, &sch->schib); 363 stsch (sch->schid, &sch->schib);
363 return 0; 364 return 0;
364 case 1: /* status pending */ 365 case 1: /* status pending */
365 return -EBUSY; 366 return -EBUSY;
@@ -381,7 +382,7 @@ cio_modify (struct subchannel *sch)
381 382
382 ret = 0; 383 ret = 0;
383 for (retry = 0; retry < 5; retry++) { 384 for (retry = 0; retry < 5; retry++) {
384 ccode = msch_err (sch->irq, &sch->schib); 385 ccode = msch_err (sch->schid, &sch->schib);
385 if (ccode < 0) /* -EIO if msch gets a program check. */ 386 if (ccode < 0) /* -EIO if msch gets a program check. */
386 return ccode; 387 return ccode;
387 switch (ccode) { 388 switch (ccode) {
@@ -414,7 +415,7 @@ cio_enable_subchannel (struct subchannel *sch, unsigned int isc)
414 CIO_TRACE_EVENT (2, "ensch"); 415 CIO_TRACE_EVENT (2, "ensch");
415 CIO_TRACE_EVENT (2, sch->dev.bus_id); 416 CIO_TRACE_EVENT (2, sch->dev.bus_id);
416 417
417 ccode = stsch (sch->irq, &sch->schib); 418 ccode = stsch (sch->schid, &sch->schib);
418 if (ccode) 419 if (ccode)
419 return -ENODEV; 420 return -ENODEV;
420 421
@@ -432,13 +433,13 @@ cio_enable_subchannel (struct subchannel *sch, unsigned int isc)
432 */ 433 */
433 sch->schib.pmcw.csense = 0; 434 sch->schib.pmcw.csense = 0;
434 if (ret == 0) { 435 if (ret == 0) {
435 stsch (sch->irq, &sch->schib); 436 stsch (sch->schid, &sch->schib);
436 if (sch->schib.pmcw.ena) 437 if (sch->schib.pmcw.ena)
437 break; 438 break;
438 } 439 }
439 if (ret == -EBUSY) { 440 if (ret == -EBUSY) {
440 struct irb irb; 441 struct irb irb;
441 if (tsch(sch->irq, &irb) != 0) 442 if (tsch(sch->schid, &irb) != 0)
442 break; 443 break;
443 } 444 }
444 } 445 }
@@ -461,7 +462,7 @@ cio_disable_subchannel (struct subchannel *sch)
461 CIO_TRACE_EVENT (2, "dissch"); 462 CIO_TRACE_EVENT (2, "dissch");
462 CIO_TRACE_EVENT (2, sch->dev.bus_id); 463 CIO_TRACE_EVENT (2, sch->dev.bus_id);
463 464
464 ccode = stsch (sch->irq, &sch->schib); 465 ccode = stsch (sch->schid, &sch->schib);
465 if (ccode == 3) /* Not operational. */ 466 if (ccode == 3) /* Not operational. */
466 return -ENODEV; 467 return -ENODEV;
467 468
@@ -485,7 +486,7 @@ cio_disable_subchannel (struct subchannel *sch)
485 */ 486 */
486 break; 487 break;
487 if (ret == 0) { 488 if (ret == 0) {
488 stsch (sch->irq, &sch->schib); 489 stsch (sch->schid, &sch->schib);
489 if (!sch->schib.pmcw.ena) 490 if (!sch->schib.pmcw.ena)
490 break; 491 break;
491 } 492 }
@@ -508,12 +509,12 @@ cio_disable_subchannel (struct subchannel *sch)
508 * -ENODEV for subchannels with invalid device number or blacklisted devices 509 * -ENODEV for subchannels with invalid device number or blacklisted devices
509 */ 510 */
510int 511int
511cio_validate_subchannel (struct subchannel *sch, unsigned int irq) 512cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid)
512{ 513{
513 char dbf_txt[15]; 514 char dbf_txt[15];
514 int ccode; 515 int ccode;
515 516
516 sprintf (dbf_txt, "valsch%x", irq); 517 sprintf (dbf_txt, "valsch%x", schid.sch_no);
517 CIO_TRACE_EVENT (4, dbf_txt); 518 CIO_TRACE_EVENT (4, dbf_txt);
518 519
519 /* Nuke all fields. */ 520 /* Nuke all fields. */
@@ -522,17 +523,20 @@ cio_validate_subchannel (struct subchannel *sch, unsigned int irq)
522 spin_lock_init(&sch->lock); 523 spin_lock_init(&sch->lock);
523 524
524 /* Set a name for the subchannel */ 525 /* Set a name for the subchannel */
525 snprintf (sch->dev.bus_id, BUS_ID_SIZE, "0.0.%04x", irq); 526 snprintf (sch->dev.bus_id, BUS_ID_SIZE, "0.%x.%04x", schid.ssid,
527 schid.sch_no);
526 528
527 /* 529 /*
528 * The first subchannel that is not-operational (ccode==3) 530 * The first subchannel that is not-operational (ccode==3)
529 * indicates that there aren't any more devices available. 531 * indicates that there aren't any more devices available.
532 * If stsch gets an exception, it means the current subchannel set
533 * is not valid.
530 */ 534 */
531 sch->irq = irq; 535 ccode = stsch_err (schid, &sch->schib);
532 ccode = stsch (irq, &sch->schib);
533 if (ccode) 536 if (ccode)
534 return -ENXIO; 537 return (ccode == 3) ? -ENXIO : ccode;
535 538
539 sch->schid = schid;
536 /* Copy subchannel type from path management control word. */ 540 /* Copy subchannel type from path management control word. */
537 sch->st = sch->schib.pmcw.st; 541 sch->st = sch->schib.pmcw.st;
538 542
@@ -541,9 +545,9 @@ cio_validate_subchannel (struct subchannel *sch, unsigned int irq)
541 */ 545 */
542 if (sch->st != 0) { 546 if (sch->st != 0) {
543 CIO_DEBUG(KERN_INFO, 0, 547 CIO_DEBUG(KERN_INFO, 0,
544 "Subchannel %04X reports " 548 "Subchannel 0.%x.%04x reports "
545 "non-I/O subchannel type %04X\n", 549 "non-I/O subchannel type %04X\n",
546 sch->irq, sch->st); 550 sch->schid.ssid, sch->schid.sch_no, sch->st);
547 /* We stop here for non-io subchannels. */ 551 /* We stop here for non-io subchannels. */
548 return sch->st; 552 return sch->st;
549 } 553 }
@@ -554,26 +558,29 @@ cio_validate_subchannel (struct subchannel *sch, unsigned int irq)
554 return -ENODEV; 558 return -ENODEV;
555 559
556 /* Devno is valid. */ 560 /* Devno is valid. */
557 if (is_blacklisted (sch->schib.pmcw.dev)) { 561 if (is_blacklisted (sch->schid.ssid, sch->schib.pmcw.dev)) {
558 /* 562 /*
559 * This device must not be known to Linux. So we simply 563 * This device must not be known to Linux. So we simply
560 * say that there is no device and return ENODEV. 564 * say that there is no device and return ENODEV.
561 */ 565 */
562 CIO_MSG_EVENT(0, "Blacklisted device detected " 566 CIO_MSG_EVENT(0, "Blacklisted device detected "
563 "at devno %04X\n", sch->schib.pmcw.dev); 567 "at devno %04X, subchannel set %x\n",
568 sch->schib.pmcw.dev, sch->schid.ssid);
564 return -ENODEV; 569 return -ENODEV;
565 } 570 }
566 sch->opm = 0xff; 571 sch->opm = 0xff;
567 chsc_validate_chpids(sch); 572 if (!cio_is_console(sch->schid))
573 chsc_validate_chpids(sch);
568 sch->lpm = sch->schib.pmcw.pim & 574 sch->lpm = sch->schib.pmcw.pim &
569 sch->schib.pmcw.pam & 575 sch->schib.pmcw.pam &
570 sch->schib.pmcw.pom & 576 sch->schib.pmcw.pom &
571 sch->opm; 577 sch->opm;
572 578
573 CIO_DEBUG(KERN_INFO, 0, 579 CIO_DEBUG(KERN_INFO, 0,
574 "Detected device %04X on subchannel %04X" 580 "Detected device %04x on subchannel 0.%x.%04X"
575 " - PIM = %02X, PAM = %02X, POM = %02X\n", 581 " - PIM = %02X, PAM = %02X, POM = %02X\n",
576 sch->schib.pmcw.dev, sch->irq, sch->schib.pmcw.pim, 582 sch->schib.pmcw.dev, sch->schid.ssid,
583 sch->schid.sch_no, sch->schib.pmcw.pim,
577 sch->schib.pmcw.pam, sch->schib.pmcw.pom); 584 sch->schib.pmcw.pam, sch->schib.pmcw.pom);
578 585
579 /* 586 /*
@@ -632,7 +639,7 @@ do_IRQ (struct pt_regs *regs)
632 if (sch) 639 if (sch)
633 spin_lock(&sch->lock); 640 spin_lock(&sch->lock);
634 /* Store interrupt response block to lowcore. */ 641 /* Store interrupt response block to lowcore. */
635 if (tsch (tpi_info->irq, irb) == 0 && sch) { 642 if (tsch (tpi_info->schid, irb) == 0 && sch) {
636 /* Keep subchannel information word up to date. */ 643 /* Keep subchannel information word up to date. */
637 memcpy (&sch->schib.scsw, &irb->scsw, 644 memcpy (&sch->schib.scsw, &irb->scsw,
638 sizeof (irb->scsw)); 645 sizeof (irb->scsw));
@@ -691,28 +698,36 @@ wait_cons_dev (void)
691} 698}
692 699
693static int 700static int
694cio_console_irq(void) 701cio_test_for_console(struct subchannel_id schid, void *data)
695{ 702{
696 int irq; 703 if (stsch_err(schid, &console_subchannel.schib) != 0)
704 return -ENXIO;
705 if (console_subchannel.schib.pmcw.dnv &&
706 console_subchannel.schib.pmcw.dev ==
707 console_devno) {
708 console_irq = schid.sch_no;
709 return 1; /* found */
710 }
711 return 0;
712}
713
714
715static int
716cio_get_console_sch_no(void)
717{
718 struct subchannel_id schid;
697 719
720 init_subchannel_id(&schid);
698 if (console_irq != -1) { 721 if (console_irq != -1) {
699 /* VM provided us with the irq number of the console. */ 722 /* VM provided us with the irq number of the console. */
700 if (stsch(console_irq, &console_subchannel.schib) != 0 || 723 schid.sch_no = console_irq;
724 if (stsch(schid, &console_subchannel.schib) != 0 ||
701 !console_subchannel.schib.pmcw.dnv) 725 !console_subchannel.schib.pmcw.dnv)
702 return -1; 726 return -1;
703 console_devno = console_subchannel.schib.pmcw.dev; 727 console_devno = console_subchannel.schib.pmcw.dev;
704 } else if (console_devno != -1) { 728 } else if (console_devno != -1) {
705 /* At least the console device number is known. */ 729 /* At least the console device number is known. */
706 for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) { 730 for_each_subchannel(cio_test_for_console, NULL);
707 if (stsch(irq, &console_subchannel.schib) != 0)
708 break;
709 if (console_subchannel.schib.pmcw.dnv &&
710 console_subchannel.schib.pmcw.dev ==
711 console_devno) {
712 console_irq = irq;
713 break;
714 }
715 }
716 if (console_irq == -1) 731 if (console_irq == -1)
717 return -1; 732 return -1;
718 } else { 733 } else {
@@ -728,17 +743,20 @@ cio_console_irq(void)
728struct subchannel * 743struct subchannel *
729cio_probe_console(void) 744cio_probe_console(void)
730{ 745{
731 int irq, ret; 746 int sch_no, ret;
747 struct subchannel_id schid;
732 748
733 if (xchg(&console_subchannel_in_use, 1) != 0) 749 if (xchg(&console_subchannel_in_use, 1) != 0)
734 return ERR_PTR(-EBUSY); 750 return ERR_PTR(-EBUSY);
735 irq = cio_console_irq(); 751 sch_no = cio_get_console_sch_no();
736 if (irq == -1) { 752 if (sch_no == -1) {
737 console_subchannel_in_use = 0; 753 console_subchannel_in_use = 0;
738 return ERR_PTR(-ENODEV); 754 return ERR_PTR(-ENODEV);
739 } 755 }
740 memset(&console_subchannel, 0, sizeof(struct subchannel)); 756 memset(&console_subchannel, 0, sizeof(struct subchannel));
741 ret = cio_validate_subchannel(&console_subchannel, irq); 757 init_subchannel_id(&schid);
758 schid.sch_no = sch_no;
759 ret = cio_validate_subchannel(&console_subchannel, schid);
742 if (ret) { 760 if (ret) {
743 console_subchannel_in_use = 0; 761 console_subchannel_in_use = 0;
744 return ERR_PTR(-ENODEV); 762 return ERR_PTR(-ENODEV);
@@ -770,11 +788,11 @@ cio_release_console(void)
770 788
771/* Bah... hack to catch console special sausages. */ 789/* Bah... hack to catch console special sausages. */
772int 790int
773cio_is_console(int irq) 791cio_is_console(struct subchannel_id schid)
774{ 792{
775 if (!console_subchannel_in_use) 793 if (!console_subchannel_in_use)
776 return 0; 794 return 0;
777 return (irq == console_subchannel.irq); 795 return schid_equal(&schid, &console_subchannel.schid);
778} 796}
779 797
780struct subchannel * 798struct subchannel *
@@ -787,7 +805,7 @@ cio_get_console_subchannel(void)
787 805
788#endif 806#endif
789static inline int 807static inline int
790__disable_subchannel_easy(unsigned int schid, struct schib *schib) 808__disable_subchannel_easy(struct subchannel_id schid, struct schib *schib)
791{ 809{
792 int retry, cc; 810 int retry, cc;
793 811
@@ -805,7 +823,7 @@ __disable_subchannel_easy(unsigned int schid, struct schib *schib)
805} 823}
806 824
807static inline int 825static inline int
808__clear_subchannel_easy(unsigned int schid) 826__clear_subchannel_easy(struct subchannel_id schid)
809{ 827{
810 int retry; 828 int retry;
811 829
@@ -815,8 +833,8 @@ __clear_subchannel_easy(unsigned int schid)
815 struct tpi_info ti; 833 struct tpi_info ti;
816 834
817 if (tpi(&ti)) { 835 if (tpi(&ti)) {
818 tsch(ti.irq, (struct irb *)__LC_IRB); 836 tsch(ti.schid, (struct irb *)__LC_IRB);
819 if (ti.irq == schid) 837 if (schid_equal(&ti.schid, &schid))
820 return 0; 838 return 0;
821 } 839 }
822 udelay(100); 840 udelay(100);
@@ -825,31 +843,33 @@ __clear_subchannel_easy(unsigned int schid)
825} 843}
826 844
827extern void do_reipl(unsigned long devno); 845extern void do_reipl(unsigned long devno);
846static int
847__shutdown_subchannel_easy(struct subchannel_id schid, void *data)
848{
849 struct schib schib;
850
851 if (stsch_err(schid, &schib))
852 return -ENXIO;
853 if (!schib.pmcw.ena)
854 return 0;
855 switch(__disable_subchannel_easy(schid, &schib)) {
856 case 0:
857 case -ENODEV:
858 break;
859 default: /* -EBUSY */
860 if (__clear_subchannel_easy(schid))
861 break; /* give up... */
862 stsch(schid, &schib);
863 __disable_subchannel_easy(schid, &schib);
864 }
865 return 0;
866}
828 867
829/* Clear all subchannels. */
830void 868void
831clear_all_subchannels(void) 869clear_all_subchannels(void)
832{ 870{
833 unsigned int schid;
834
835 local_irq_disable(); 871 local_irq_disable();
836 for (schid=0;schid<=highest_subchannel;schid++) { 872 for_each_subchannel(__shutdown_subchannel_easy, NULL);
837 struct schib schib;
838 if (stsch(schid, &schib))
839 break; /* break out of the loop */
840 if (!schib.pmcw.ena)
841 continue;
842 switch(__disable_subchannel_easy(schid, &schib)) {
843 case 0:
844 case -ENODEV:
845 break;
846 default: /* -EBUSY */
847 if (__clear_subchannel_easy(schid))
848 break; /* give up... jump out of switch */
849 stsch(schid, &schib);
850 __disable_subchannel_easy(schid, &schib);
851 }
852 }
853} 873}
854 874
855/* Make sure all subchannels are quiet before we re-ipl an lpar. */ 875/* Make sure all subchannels are quiet before we re-ipl an lpar. */
diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h
index c50a9da420a9..0ca987344e07 100644
--- a/drivers/s390/cio/cio.h
+++ b/drivers/s390/cio/cio.h
@@ -1,6 +1,8 @@
1#ifndef S390_CIO_H 1#ifndef S390_CIO_H
2#define S390_CIO_H 2#define S390_CIO_H
3 3
4#include "schid.h"
5
4/* 6/*
5 * where we put the ssd info 7 * where we put the ssd info
6 */ 8 */
@@ -83,7 +85,7 @@ struct orb {
83 85
84/* subchannel data structure used by I/O subroutines */ 86/* subchannel data structure used by I/O subroutines */
85struct subchannel { 87struct subchannel {
86 unsigned int irq; /* aka. subchannel number */ 88 struct subchannel_id schid;
87 spinlock_t lock; /* subchannel lock */ 89 spinlock_t lock; /* subchannel lock */
88 90
89 enum { 91 enum {
@@ -114,7 +116,7 @@ struct subchannel {
114 116
115#define to_subchannel(n) container_of(n, struct subchannel, dev) 117#define to_subchannel(n) container_of(n, struct subchannel, dev)
116 118
117extern int cio_validate_subchannel (struct subchannel *, unsigned int); 119extern int cio_validate_subchannel (struct subchannel *, struct subchannel_id);
118extern int cio_enable_subchannel (struct subchannel *, unsigned int); 120extern int cio_enable_subchannel (struct subchannel *, unsigned int);
119extern int cio_disable_subchannel (struct subchannel *); 121extern int cio_disable_subchannel (struct subchannel *);
120extern int cio_cancel (struct subchannel *); 122extern int cio_cancel (struct subchannel *);
@@ -127,14 +129,15 @@ extern int cio_cancel (struct subchannel *);
127extern int cio_set_options (struct subchannel *, int); 129extern int cio_set_options (struct subchannel *, int);
128extern int cio_get_options (struct subchannel *); 130extern int cio_get_options (struct subchannel *);
129extern int cio_modify (struct subchannel *); 131extern int cio_modify (struct subchannel *);
132
130/* Use with care. */ 133/* Use with care. */
131#ifdef CONFIG_CCW_CONSOLE 134#ifdef CONFIG_CCW_CONSOLE
132extern struct subchannel *cio_probe_console(void); 135extern struct subchannel *cio_probe_console(void);
133extern void cio_release_console(void); 136extern void cio_release_console(void);
134extern int cio_is_console(int irq); 137extern int cio_is_console(struct subchannel_id);
135extern struct subchannel *cio_get_console_subchannel(void); 138extern struct subchannel *cio_get_console_subchannel(void);
136#else 139#else
137#define cio_is_console(irq) 0 140#define cio_is_console(schid) 0
138#define cio_get_console_subchannel() NULL 141#define cio_get_console_subchannel() NULL
139#endif 142#endif
140 143
diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c
index b978f7fe8327..0b03714e696a 100644
--- a/drivers/s390/cio/cmf.c
+++ b/drivers/s390/cio/cmf.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * linux/drivers/s390/cio/cmf.c ($Revision: 1.16 $) 2 * linux/drivers/s390/cio/cmf.c ($Revision: 1.19 $)
3 * 3 *
4 * Linux on zSeries Channel Measurement Facility support 4 * Linux on zSeries Channel Measurement Facility support
5 * 5 *
@@ -178,7 +178,7 @@ set_schib(struct ccw_device *cdev, u32 mme, int mbfc, unsigned long address)
178 /* msch can silently fail, so do it again if necessary */ 178 /* msch can silently fail, so do it again if necessary */
179 for (retry = 0; retry < 3; retry++) { 179 for (retry = 0; retry < 3; retry++) {
180 /* prepare schib */ 180 /* prepare schib */
181 stsch(sch->irq, schib); 181 stsch(sch->schid, schib);
182 schib->pmcw.mme = mme; 182 schib->pmcw.mme = mme;
183 schib->pmcw.mbfc = mbfc; 183 schib->pmcw.mbfc = mbfc;
184 /* address can be either a block address or a block index */ 184 /* address can be either a block address or a block index */
@@ -188,7 +188,7 @@ set_schib(struct ccw_device *cdev, u32 mme, int mbfc, unsigned long address)
188 schib->pmcw.mbi = address; 188 schib->pmcw.mbi = address;
189 189
190 /* try to submit it */ 190 /* try to submit it */
191 switch(ret = msch_err(sch->irq, schib)) { 191 switch(ret = msch_err(sch->schid, schib)) {
192 case 0: 192 case 0:
193 break; 193 break;
194 case 1: 194 case 1:
@@ -202,7 +202,7 @@ set_schib(struct ccw_device *cdev, u32 mme, int mbfc, unsigned long address)
202 ret = -EINVAL; 202 ret = -EINVAL;
203 break; 203 break;
204 } 204 }
205 stsch(sch->irq, schib); /* restore the schib */ 205 stsch(sch->schid, schib); /* restore the schib */
206 206
207 if (ret) 207 if (ret)
208 break; 208 break;
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 555119cacc27..e565193650c7 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * drivers/s390/cio/css.c 2 * drivers/s390/cio/css.c
3 * driver for channel subsystem 3 * driver for channel subsystem
4 * $Revision: 1.85 $ 4 * $Revision: 1.93 $
5 * 5 *
6 * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, 6 * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
7 * IBM Corporation 7 * IBM Corporation
@@ -21,19 +21,35 @@
21#include "ioasm.h" 21#include "ioasm.h"
22#include "chsc.h" 22#include "chsc.h"
23 23
24unsigned int highest_subchannel;
25int need_rescan = 0; 24int need_rescan = 0;
26int css_init_done = 0; 25int css_init_done = 0;
26static int max_ssid = 0;
27
28struct channel_subsystem *css[__MAX_CSSID + 1];
27 29
28struct pgid global_pgid;
29int css_characteristics_avail = 0; 30int css_characteristics_avail = 0;
30 31
31struct device css_bus_device = { 32inline int
32 .bus_id = "css0", 33for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data)
33}; 34{
35 struct subchannel_id schid;
36 int ret;
37
38 init_subchannel_id(&schid);
39 ret = -ENODEV;
40 do {
41 do {
42 ret = fn(schid, data);
43 if (ret)
44 break;
45 } while (schid.sch_no++ < __MAX_SUBCHANNEL);
46 schid.sch_no = 0;
47 } while (schid.ssid++ < max_ssid);
48 return ret;
49}
34 50
35static struct subchannel * 51static struct subchannel *
36css_alloc_subchannel(int irq) 52css_alloc_subchannel(struct subchannel_id schid)
37{ 53{
38 struct subchannel *sch; 54 struct subchannel *sch;
39 int ret; 55 int ret;
@@ -41,13 +57,11 @@ css_alloc_subchannel(int irq)
41 sch = kmalloc (sizeof (*sch), GFP_KERNEL | GFP_DMA); 57 sch = kmalloc (sizeof (*sch), GFP_KERNEL | GFP_DMA);
42 if (sch == NULL) 58 if (sch == NULL)
43 return ERR_PTR(-ENOMEM); 59 return ERR_PTR(-ENOMEM);
44 ret = cio_validate_subchannel (sch, irq); 60 ret = cio_validate_subchannel (sch, schid);
45 if (ret < 0) { 61 if (ret < 0) {
46 kfree(sch); 62 kfree(sch);
47 return ERR_PTR(ret); 63 return ERR_PTR(ret);
48 } 64 }
49 if (irq > highest_subchannel)
50 highest_subchannel = irq;
51 65
52 if (sch->st != SUBCHANNEL_TYPE_IO) { 66 if (sch->st != SUBCHANNEL_TYPE_IO) {
53 /* For now we ignore all non-io subchannels. */ 67 /* For now we ignore all non-io subchannels. */
@@ -87,7 +101,7 @@ css_subchannel_release(struct device *dev)
87 struct subchannel *sch; 101 struct subchannel *sch;
88 102
89 sch = to_subchannel(dev); 103 sch = to_subchannel(dev);
90 if (!cio_is_console(sch->irq)) 104 if (!cio_is_console(sch->schid))
91 kfree(sch); 105 kfree(sch);
92} 106}
93 107
@@ -99,7 +113,7 @@ css_register_subchannel(struct subchannel *sch)
99 int ret; 113 int ret;
100 114
101 /* Initialize the subchannel structure */ 115 /* Initialize the subchannel structure */
102 sch->dev.parent = &css_bus_device; 116 sch->dev.parent = &css[0]->device;
103 sch->dev.bus = &css_bus_type; 117 sch->dev.bus = &css_bus_type;
104 sch->dev.release = &css_subchannel_release; 118 sch->dev.release = &css_subchannel_release;
105 119
@@ -114,12 +128,12 @@ css_register_subchannel(struct subchannel *sch)
114} 128}
115 129
116int 130int
117css_probe_device(int irq) 131css_probe_device(struct subchannel_id schid)
118{ 132{
119 int ret; 133 int ret;
120 struct subchannel *sch; 134 struct subchannel *sch;
121 135
122 sch = css_alloc_subchannel(irq); 136 sch = css_alloc_subchannel(schid);
123 if (IS_ERR(sch)) 137 if (IS_ERR(sch))
124 return PTR_ERR(sch); 138 return PTR_ERR(sch);
125 ret = css_register_subchannel(sch); 139 ret = css_register_subchannel(sch);
@@ -132,26 +146,26 @@ static int
132check_subchannel(struct device * dev, void * data) 146check_subchannel(struct device * dev, void * data)
133{ 147{
134 struct subchannel *sch; 148 struct subchannel *sch;
135 int irq = (unsigned long)data; 149 struct subchannel_id *schid = data;
136 150
137 sch = to_subchannel(dev); 151 sch = to_subchannel(dev);
138 return (sch->irq == irq); 152 return schid_equal(&sch->schid, schid);
139} 153}
140 154
141struct subchannel * 155struct subchannel *
142get_subchannel_by_schid(int irq) 156get_subchannel_by_schid(struct subchannel_id schid)
143{ 157{
144 struct device *dev; 158 struct device *dev;
145 159
146 dev = bus_find_device(&css_bus_type, NULL, 160 dev = bus_find_device(&css_bus_type, NULL,
147 (void *)(unsigned long)irq, check_subchannel); 161 (void *)&schid, check_subchannel);
148 162
149 return dev ? to_subchannel(dev) : NULL; 163 return dev ? to_subchannel(dev) : NULL;
150} 164}
151 165
152 166
153static inline int 167static inline int
154css_get_subchannel_status(struct subchannel *sch, int schid) 168css_get_subchannel_status(struct subchannel *sch, struct subchannel_id schid)
155{ 169{
156 struct schib schib; 170 struct schib schib;
157 int cc; 171 int cc;
@@ -170,13 +184,13 @@ css_get_subchannel_status(struct subchannel *sch, int schid)
170} 184}
171 185
172static int 186static int
173css_evaluate_subchannel(int irq, int slow) 187css_evaluate_subchannel(struct subchannel_id schid, int slow)
174{ 188{
175 int event, ret, disc; 189 int event, ret, disc;
176 struct subchannel *sch; 190 struct subchannel *sch;
177 unsigned long flags; 191 unsigned long flags;
178 192
179 sch = get_subchannel_by_schid(irq); 193 sch = get_subchannel_by_schid(schid);
180 disc = sch ? device_is_disconnected(sch) : 0; 194 disc = sch ? device_is_disconnected(sch) : 0;
181 if (disc && slow) { 195 if (disc && slow) {
182 if (sch) 196 if (sch)
@@ -194,9 +208,10 @@ css_evaluate_subchannel(int irq, int slow)
194 put_device(&sch->dev); 208 put_device(&sch->dev);
195 return -EAGAIN; /* Will be done on the slow path. */ 209 return -EAGAIN; /* Will be done on the slow path. */
196 } 210 }
197 event = css_get_subchannel_status(sch, irq); 211 event = css_get_subchannel_status(sch, schid);
198 CIO_MSG_EVENT(4, "Evaluating schid %04x, event %d, %s, %s path.\n", 212 CIO_MSG_EVENT(4, "Evaluating schid 0.%x.%04x, event %d, %s, %s path.\n",
199 irq, event, sch?(disc?"disconnected":"normal"):"unknown", 213 schid.ssid, schid.sch_no, event,
214 sch?(disc?"disconnected":"normal"):"unknown",
200 slow?"slow":"fast"); 215 slow?"slow":"fast");
201 switch (event) { 216 switch (event) {
202 case CIO_NO_PATH: 217 case CIO_NO_PATH:
@@ -253,7 +268,7 @@ css_evaluate_subchannel(int irq, int slow)
253 sch->schib.pmcw.intparm = 0; 268 sch->schib.pmcw.intparm = 0;
254 cio_modify(sch); 269 cio_modify(sch);
255 put_device(&sch->dev); 270 put_device(&sch->dev);
256 ret = css_probe_device(irq); 271 ret = css_probe_device(schid);
257 } else { 272 } else {
258 /* 273 /*
259 * We can't immediately deregister the disconnected 274 * We can't immediately deregister the disconnected
@@ -272,7 +287,7 @@ css_evaluate_subchannel(int irq, int slow)
272 device_trigger_reprobe(sch); 287 device_trigger_reprobe(sch);
273 spin_unlock_irqrestore(&sch->lock, flags); 288 spin_unlock_irqrestore(&sch->lock, flags);
274 } 289 }
275 ret = sch ? 0 : css_probe_device(irq); 290 ret = sch ? 0 : css_probe_device(schid);
276 break; 291 break;
277 default: 292 default:
278 BUG(); 293 BUG();
@@ -281,28 +296,15 @@ css_evaluate_subchannel(int irq, int slow)
281 return ret; 296 return ret;
282} 297}
283 298
284static void 299static int
285css_rescan_devices(void) 300css_rescan_devices(struct subchannel_id schid, void *data)
286{ 301{
287 int irq, ret; 302 return css_evaluate_subchannel(schid, 1);
288
289 for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) {
290 ret = css_evaluate_subchannel(irq, 1);
291 /* No more memory. It doesn't make sense to continue. No
292 * panic because this can happen in midflight and just
293 * because we can't use a new device is no reason to crash
294 * the system. */
295 if (ret == -ENOMEM)
296 break;
297 /* -ENXIO indicates that there are no more subchannels. */
298 if (ret == -ENXIO)
299 break;
300 }
301} 303}
302 304
303struct slow_subchannel { 305struct slow_subchannel {
304 struct list_head slow_list; 306 struct list_head slow_list;
305 unsigned long schid; 307 struct subchannel_id schid;
306}; 308};
307 309
308static LIST_HEAD(slow_subchannels_head); 310static LIST_HEAD(slow_subchannels_head);
@@ -315,7 +317,7 @@ css_trigger_slow_path(void)
315 317
316 if (need_rescan) { 318 if (need_rescan) {
317 need_rescan = 0; 319 need_rescan = 0;
318 css_rescan_devices(); 320 for_each_subchannel(css_rescan_devices, NULL);
319 return; 321 return;
320 } 322 }
321 323
@@ -354,23 +356,31 @@ css_reiterate_subchannels(void)
354 * Called from the machine check handler for subchannel report words. 356 * Called from the machine check handler for subchannel report words.
355 */ 357 */
356int 358int
357css_process_crw(int irq) 359css_process_crw(int rsid1, int rsid2)
358{ 360{
359 int ret; 361 int ret;
362 struct subchannel_id mchk_schid;
360 363
361 CIO_CRW_EVENT(2, "source is subchannel %04X\n", irq); 364 CIO_CRW_EVENT(2, "source is subchannel %04X, subsystem id %x\n",
365 rsid1, rsid2);
362 366
363 if (need_rescan) 367 if (need_rescan)
364 /* We need to iterate all subchannels anyway. */ 368 /* We need to iterate all subchannels anyway. */
365 return -EAGAIN; 369 return -EAGAIN;
370
371 init_subchannel_id(&mchk_schid);
372 mchk_schid.sch_no = rsid1;
373 if (rsid2 != 0)
374 mchk_schid.ssid = (rsid2 >> 8) & 3;
375
366 /* 376 /*
367 * Since we are always presented with IPI in the CRW, we have to 377 * Since we are always presented with IPI in the CRW, we have to
368 * use stsch() to find out if the subchannel in question has come 378 * use stsch() to find out if the subchannel in question has come
369 * or gone. 379 * or gone.
370 */ 380 */
371 ret = css_evaluate_subchannel(irq, 0); 381 ret = css_evaluate_subchannel(mchk_schid, 0);
372 if (ret == -EAGAIN) { 382 if (ret == -EAGAIN) {
373 if (css_enqueue_subchannel_slow(irq)) { 383 if (css_enqueue_subchannel_slow(mchk_schid)) {
374 css_clear_subchannel_slow_list(); 384 css_clear_subchannel_slow_list();
375 need_rescan = 1; 385 need_rescan = 1;
376 } 386 }
@@ -378,22 +388,83 @@ css_process_crw(int irq)
378 return ret; 388 return ret;
379} 389}
380 390
381static void __init 391static int __init
382css_generate_pgid(void) 392__init_channel_subsystem(struct subchannel_id schid, void *data)
383{ 393{
384 /* Let's build our path group ID here. */ 394 struct subchannel *sch;
385 if (css_characteristics_avail && css_general_characteristics.mcss) 395 int ret;
386 global_pgid.cpu_addr = 0x8000; 396
397 if (cio_is_console(schid))
398 sch = cio_get_console_subchannel();
387 else { 399 else {
400 sch = css_alloc_subchannel(schid);
401 if (IS_ERR(sch))
402 ret = PTR_ERR(sch);
403 else
404 ret = 0;
405 switch (ret) {
406 case 0:
407 break;
408 case -ENOMEM:
409 panic("Out of memory in init_channel_subsystem\n");
410 /* -ENXIO: no more subchannels. */
411 case -ENXIO:
412 return ret;
413 default:
414 return 0;
415 }
416 }
417 /*
418 * We register ALL valid subchannels in ioinfo, even those
419 * that have been present before init_channel_subsystem.
420 * These subchannels can't have been registered yet (kmalloc
421 * not working) so we do it now. This is true e.g. for the
422 * console subchannel.
423 */
424 css_register_subchannel(sch);
425 return 0;
426}
427
428static void __init
429css_generate_pgid(struct channel_subsystem *css, u32 tod_high)
430{
431 if (css_characteristics_avail && css_general_characteristics.mcss) {
432 css->global_pgid.pgid_high.ext_cssid.version = 0x80;
433 css->global_pgid.pgid_high.ext_cssid.cssid = css->cssid;
434 } else {
388#ifdef CONFIG_SMP 435#ifdef CONFIG_SMP
389 global_pgid.cpu_addr = hard_smp_processor_id(); 436 css->global_pgid.pgid_high.cpu_addr = hard_smp_processor_id();
390#else 437#else
391 global_pgid.cpu_addr = 0; 438 css->global_pgid.pgid_high.cpu_addr = 0;
392#endif 439#endif
393 } 440 }
394 global_pgid.cpu_id = ((cpuid_t *) __LC_CPUID)->ident; 441 css->global_pgid.cpu_id = ((cpuid_t *) __LC_CPUID)->ident;
395 global_pgid.cpu_model = ((cpuid_t *) __LC_CPUID)->machine; 442 css->global_pgid.cpu_model = ((cpuid_t *) __LC_CPUID)->machine;
396 global_pgid.tod_high = (__u32) (get_clock() >> 32); 443 css->global_pgid.tod_high = tod_high;
444
445}
446
447static void
448channel_subsystem_release(struct device *dev)
449{
450 struct channel_subsystem *css;
451
452 css = to_css(dev);
453 kfree(css);
454}
455
456static inline void __init
457setup_css(int nr)
458{
459 u32 tod_high;
460
461 memset(css[nr], 0, sizeof(struct channel_subsystem));
462 css[nr]->valid = 1;
463 css[nr]->cssid = nr;
464 sprintf(css[nr]->device.bus_id, "css%x", nr);
465 css[nr]->device.release = channel_subsystem_release;
466 tod_high = (u32) (get_clock() >> 32);
467 css_generate_pgid(css[nr], tod_high);
397} 468}
398 469
399/* 470/*
@@ -404,53 +475,50 @@ css_generate_pgid(void)
404static int __init 475static int __init
405init_channel_subsystem (void) 476init_channel_subsystem (void)
406{ 477{
407 int ret, irq; 478 int ret, i;
408 479
409 if (chsc_determine_css_characteristics() == 0) 480 if (chsc_determine_css_characteristics() == 0)
410 css_characteristics_avail = 1; 481 css_characteristics_avail = 1;
411 482
412 css_generate_pgid();
413
414 if ((ret = bus_register(&css_bus_type))) 483 if ((ret = bus_register(&css_bus_type)))
415 goto out; 484 goto out;
416 if ((ret = device_register (&css_bus_device)))
417 goto out_bus;
418 485
486 /* Try to enable MSS. */
487 ret = chsc_enable_facility(CHSC_SDA_OC_MSS);
488 switch (ret) {
489 case 0: /* Success. */
490 max_ssid = __MAX_SSID;
491 break;
492 case -ENOMEM:
493 goto out_bus;
494 default:
495 max_ssid = 0;
496 }
497 /* Setup css structure. */
498 for (i = 0; i <= __MAX_CSSID; i++) {
499 css[i] = kmalloc(sizeof(struct channel_subsystem), GFP_KERNEL);
500 if (!css[i]) {
501 ret = -ENOMEM;
502 goto out_unregister;
503 }
504 setup_css(i);
505 ret = device_register(&css[i]->device);
506 if (ret)
507 goto out_free;
508 }
419 css_init_done = 1; 509 css_init_done = 1;
420 510
421 ctl_set_bit(6, 28); 511 ctl_set_bit(6, 28);
422 512
423 for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) { 513 for_each_subchannel(__init_channel_subsystem, NULL);
424 struct subchannel *sch;
425
426 if (cio_is_console(irq))
427 sch = cio_get_console_subchannel();
428 else {
429 sch = css_alloc_subchannel(irq);
430 if (IS_ERR(sch))
431 ret = PTR_ERR(sch);
432 else
433 ret = 0;
434 if (ret == -ENOMEM)
435 panic("Out of memory in "
436 "init_channel_subsystem\n");
437 /* -ENXIO: no more subchannels. */
438 if (ret == -ENXIO)
439 break;
440 if (ret)
441 continue;
442 }
443 /*
444 * We register ALL valid subchannels in ioinfo, even those
445 * that have been present before init_channel_subsystem.
446 * These subchannels can't have been registered yet (kmalloc
447 * not working) so we do it now. This is true e.g. for the
448 * console subchannel.
449 */
450 css_register_subchannel(sch);
451 }
452 return 0; 514 return 0;
453 515out_free:
516 kfree(css[i]);
517out_unregister:
518 while (i > 0) {
519 i--;
520 device_unregister(&css[i]->device);
521 }
454out_bus: 522out_bus:
455 bus_unregister(&css_bus_type); 523 bus_unregister(&css_bus_type);
456out: 524out:
@@ -481,47 +549,8 @@ struct bus_type css_bus_type = {
481 549
482subsys_initcall(init_channel_subsystem); 550subsys_initcall(init_channel_subsystem);
483 551
484/*
485 * Register root devices for some drivers. The release function must not be
486 * in the device drivers, so we do it here.
487 */
488static void
489s390_root_dev_release(struct device *dev)
490{
491 kfree(dev);
492}
493
494struct device *
495s390_root_dev_register(const char *name)
496{
497 struct device *dev;
498 int ret;
499
500 if (!strlen(name))
501 return ERR_PTR(-EINVAL);
502 dev = kmalloc(sizeof(struct device), GFP_KERNEL);
503 if (!dev)
504 return ERR_PTR(-ENOMEM);
505 memset(dev, 0, sizeof(struct device));
506 strncpy(dev->bus_id, name, min(strlen(name), (size_t)BUS_ID_SIZE));
507 dev->release = s390_root_dev_release;
508 ret = device_register(dev);
509 if (ret) {
510 kfree(dev);
511 return ERR_PTR(ret);
512 }
513 return dev;
514}
515
516void
517s390_root_dev_unregister(struct device *dev)
518{
519 if (dev)
520 device_unregister(dev);
521}
522
523int 552int
524css_enqueue_subchannel_slow(unsigned long schid) 553css_enqueue_subchannel_slow(struct subchannel_id schid)
525{ 554{
526 struct slow_subchannel *new_slow_sch; 555 struct slow_subchannel *new_slow_sch;
527 unsigned long flags; 556 unsigned long flags;
@@ -564,6 +593,4 @@ css_slow_subchannels_exist(void)
564 593
565MODULE_LICENSE("GPL"); 594MODULE_LICENSE("GPL");
566EXPORT_SYMBOL(css_bus_type); 595EXPORT_SYMBOL(css_bus_type);
567EXPORT_SYMBOL(s390_root_dev_register);
568EXPORT_SYMBOL(s390_root_dev_unregister);
569EXPORT_SYMBOL_GPL(css_characteristics_avail); 596EXPORT_SYMBOL_GPL(css_characteristics_avail);
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h
index 2004a6c49388..251ebd7a7d3a 100644
--- a/drivers/s390/cio/css.h
+++ b/drivers/s390/cio/css.h
@@ -6,6 +6,8 @@
6 6
7#include <asm/cio.h> 7#include <asm/cio.h>
8 8
9#include "schid.h"
10
9/* 11/*
10 * path grouping stuff 12 * path grouping stuff
11 */ 13 */
@@ -33,19 +35,25 @@ struct path_state {
33 __u8 resvd : 3; /* reserved */ 35 __u8 resvd : 3; /* reserved */
34} __attribute__ ((packed)); 36} __attribute__ ((packed));
35 37
38struct extended_cssid {
39 u8 version;
40 u8 cssid;
41} __attribute__ ((packed));
42
36struct pgid { 43struct pgid {
37 union { 44 union {
38 __u8 fc; /* SPID function code */ 45 __u8 fc; /* SPID function code */
39 struct path_state ps; /* SNID path state */ 46 struct path_state ps; /* SNID path state */
40 } inf; 47 } inf;
41 __u32 cpu_addr : 16; /* CPU address */ 48 union {
49 __u32 cpu_addr : 16; /* CPU address */
50 struct extended_cssid ext_cssid;
51 } pgid_high;
42 __u32 cpu_id : 24; /* CPU identification */ 52 __u32 cpu_id : 24; /* CPU identification */
43 __u32 cpu_model : 16; /* CPU model */ 53 __u32 cpu_model : 16; /* CPU model */
44 __u32 tod_high; /* high word TOD clock */ 54 __u32 tod_high; /* high word TOD clock */
45} __attribute__ ((packed)); 55} __attribute__ ((packed));
46 56
47extern struct pgid global_pgid;
48
49#define MAX_CIWS 8 57#define MAX_CIWS 8
50 58
51/* 59/*
@@ -68,7 +76,8 @@ struct ccw_device_private {
68 atomic_t onoff; 76 atomic_t onoff;
69 unsigned long registered; 77 unsigned long registered;
70 __u16 devno; /* device number */ 78 __u16 devno; /* device number */
71 __u16 irq; /* subchannel number */ 79 __u16 sch_no; /* subchannel number */
80 __u8 ssid; /* subchannel set id */
72 __u8 imask; /* lpm mask for SNID/SID/SPGID */ 81 __u8 imask; /* lpm mask for SNID/SID/SPGID */
73 int iretry; /* retry counter SNID/SID/SPGID */ 82 int iretry; /* retry counter SNID/SID/SPGID */
74 struct { 83 struct {
@@ -121,15 +130,27 @@ struct css_driver {
121extern struct bus_type css_bus_type; 130extern struct bus_type css_bus_type;
122extern struct css_driver io_subchannel_driver; 131extern struct css_driver io_subchannel_driver;
123 132
124int css_probe_device(int irq); 133extern int css_probe_device(struct subchannel_id);
125extern struct subchannel * get_subchannel_by_schid(int irq); 134extern struct subchannel * get_subchannel_by_schid(struct subchannel_id);
126extern unsigned int highest_subchannel;
127extern int css_init_done; 135extern int css_init_done;
128 136extern int for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *);
129#define __MAX_SUBCHANNELS 65536 137
138#define __MAX_SUBCHANNEL 65535
139#define __MAX_SSID 3
140#define __MAX_CHPID 255
141#define __MAX_CSSID 0
142
143struct channel_subsystem {
144 u8 cssid;
145 int valid;
146 struct channel_path *chps[__MAX_CHPID];
147 struct device device;
148 struct pgid global_pgid;
149};
150#define to_css(dev) container_of(dev, struct channel_subsystem, device)
130 151
131extern struct bus_type css_bus_type; 152extern struct bus_type css_bus_type;
132extern struct device css_bus_device; 153extern struct channel_subsystem *css[];
133 154
134/* Some helper functions for disconnected state. */ 155/* Some helper functions for disconnected state. */
135int device_is_disconnected(struct subchannel *); 156int device_is_disconnected(struct subchannel *);
@@ -144,7 +165,7 @@ void device_set_waiting(struct subchannel *);
144void device_kill_pending_timer(struct subchannel *); 165void device_kill_pending_timer(struct subchannel *);
145 166
146/* Helper functions to build lists for the slow path. */ 167/* Helper functions to build lists for the slow path. */
147int css_enqueue_subchannel_slow(unsigned long schid); 168extern int css_enqueue_subchannel_slow(struct subchannel_id schid);
148void css_walk_subchannel_slow_list(void (*fn)(unsigned long)); 169void css_walk_subchannel_slow_list(void (*fn)(unsigned long));
149void css_clear_subchannel_slow_list(void); 170void css_clear_subchannel_slow_list(void);
150int css_slow_subchannels_exist(void); 171int css_slow_subchannels_exist(void);
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 85908cacc3b8..fa3e4c0a2536 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * drivers/s390/cio/device.c 2 * drivers/s390/cio/device.c
3 * bus driver for ccw devices 3 * bus driver for ccw devices
4 * $Revision: 1.131 $ 4 * $Revision: 1.137 $
5 * 5 *
6 * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, 6 * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
7 * IBM Corporation 7 * IBM Corporation
@@ -374,7 +374,7 @@ online_store (struct device *dev, struct device_attribute *attr, const char *buf
374 int i, force, ret; 374 int i, force, ret;
375 char *tmp; 375 char *tmp;
376 376
377 if (atomic_compare_and_swap(0, 1, &cdev->private->onoff)) 377 if (atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0)
378 return -EAGAIN; 378 return -EAGAIN;
379 379
380 if (cdev->drv && !try_module_get(cdev->drv->owner)) { 380 if (cdev->drv && !try_module_get(cdev->drv->owner)) {
@@ -535,7 +535,8 @@ ccw_device_register(struct ccw_device *cdev)
535} 535}
536 536
537struct match_data { 537struct match_data {
538 unsigned int devno; 538 unsigned int devno;
539 unsigned int ssid;
539 struct ccw_device * sibling; 540 struct ccw_device * sibling;
540}; 541};
541 542
@@ -548,6 +549,7 @@ match_devno(struct device * dev, void * data)
548 cdev = to_ccwdev(dev); 549 cdev = to_ccwdev(dev);
549 if ((cdev->private->state == DEV_STATE_DISCONNECTED) && 550 if ((cdev->private->state == DEV_STATE_DISCONNECTED) &&
550 (cdev->private->devno == d->devno) && 551 (cdev->private->devno == d->devno) &&
552 (cdev->private->ssid == d->ssid) &&
551 (cdev != d->sibling)) { 553 (cdev != d->sibling)) {
552 cdev->private->state = DEV_STATE_NOT_OPER; 554 cdev->private->state = DEV_STATE_NOT_OPER;
553 return 1; 555 return 1;
@@ -556,11 +558,13 @@ match_devno(struct device * dev, void * data)
556} 558}
557 559
558static struct ccw_device * 560static struct ccw_device *
559get_disc_ccwdev_by_devno(unsigned int devno, struct ccw_device *sibling) 561get_disc_ccwdev_by_devno(unsigned int devno, unsigned int ssid,
562 struct ccw_device *sibling)
560{ 563{
561 struct device *dev; 564 struct device *dev;
562 struct match_data data = { 565 struct match_data data = {
563 .devno = devno, 566 .devno = devno,
567 .ssid = ssid,
564 .sibling = sibling, 568 .sibling = sibling,
565 }; 569 };
566 570
@@ -616,13 +620,13 @@ ccw_device_do_unreg_rereg(void *data)
616 620
617 need_rename = 1; 621 need_rename = 1;
618 other_cdev = get_disc_ccwdev_by_devno(sch->schib.pmcw.dev, 622 other_cdev = get_disc_ccwdev_by_devno(sch->schib.pmcw.dev,
619 cdev); 623 sch->schid.ssid, cdev);
620 if (other_cdev) { 624 if (other_cdev) {
621 struct subchannel *other_sch; 625 struct subchannel *other_sch;
622 626
623 other_sch = to_subchannel(other_cdev->dev.parent); 627 other_sch = to_subchannel(other_cdev->dev.parent);
624 if (get_device(&other_sch->dev)) { 628 if (get_device(&other_sch->dev)) {
625 stsch(other_sch->irq, &other_sch->schib); 629 stsch(other_sch->schid, &other_sch->schib);
626 if (other_sch->schib.pmcw.dnv) { 630 if (other_sch->schib.pmcw.dnv) {
627 other_sch->schib.pmcw.intparm = 0; 631 other_sch->schib.pmcw.intparm = 0;
628 cio_modify(other_sch); 632 cio_modify(other_sch);
@@ -639,8 +643,8 @@ ccw_device_do_unreg_rereg(void *data)
639 if (test_and_clear_bit(1, &cdev->private->registered)) 643 if (test_and_clear_bit(1, &cdev->private->registered))
640 device_del(&cdev->dev); 644 device_del(&cdev->dev);
641 if (need_rename) 645 if (need_rename)
642 snprintf (cdev->dev.bus_id, BUS_ID_SIZE, "0.0.%04x", 646 snprintf (cdev->dev.bus_id, BUS_ID_SIZE, "0.%x.%04x",
643 sch->schib.pmcw.dev); 647 sch->schid.ssid, sch->schib.pmcw.dev);
644 PREPARE_WORK(&cdev->private->kick_work, 648 PREPARE_WORK(&cdev->private->kick_work,
645 ccw_device_add_changed, (void *)cdev); 649 ccw_device_add_changed, (void *)cdev);
646 queue_work(ccw_device_work, &cdev->private->kick_work); 650 queue_work(ccw_device_work, &cdev->private->kick_work);
@@ -769,18 +773,20 @@ io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch)
769 sch->dev.driver_data = cdev; 773 sch->dev.driver_data = cdev;
770 sch->driver = &io_subchannel_driver; 774 sch->driver = &io_subchannel_driver;
771 cdev->ccwlock = &sch->lock; 775 cdev->ccwlock = &sch->lock;
776
772 /* Init private data. */ 777 /* Init private data. */
773 priv = cdev->private; 778 priv = cdev->private;
774 priv->devno = sch->schib.pmcw.dev; 779 priv->devno = sch->schib.pmcw.dev;
775 priv->irq = sch->irq; 780 priv->ssid = sch->schid.ssid;
781 priv->sch_no = sch->schid.sch_no;
776 priv->state = DEV_STATE_NOT_OPER; 782 priv->state = DEV_STATE_NOT_OPER;
777 INIT_LIST_HEAD(&priv->cmb_list); 783 INIT_LIST_HEAD(&priv->cmb_list);
778 init_waitqueue_head(&priv->wait_q); 784 init_waitqueue_head(&priv->wait_q);
779 init_timer(&priv->timer); 785 init_timer(&priv->timer);
780 786
781 /* Set an initial name for the device. */ 787 /* Set an initial name for the device. */
782 snprintf (cdev->dev.bus_id, BUS_ID_SIZE, "0.0.%04x", 788 snprintf (cdev->dev.bus_id, BUS_ID_SIZE, "0.%x.%04x",
783 sch->schib.pmcw.dev); 789 sch->schid.ssid, sch->schib.pmcw.dev);
784 790
785 /* Increase counter of devices currently in recognition. */ 791 /* Increase counter of devices currently in recognition. */
786 atomic_inc(&ccw_device_init_count); 792 atomic_inc(&ccw_device_init_count);
@@ -951,7 +957,7 @@ io_subchannel_shutdown(struct device *dev)
951 sch = to_subchannel(dev); 957 sch = to_subchannel(dev);
952 cdev = dev->driver_data; 958 cdev = dev->driver_data;
953 959
954 if (cio_is_console(sch->irq)) 960 if (cio_is_console(sch->schid))
955 return; 961 return;
956 if (!sch->schib.pmcw.ena) 962 if (!sch->schib.pmcw.ena)
957 /* Nothing to do. */ 963 /* Nothing to do. */
@@ -986,10 +992,6 @@ ccw_device_console_enable (struct ccw_device *cdev, struct subchannel *sch)
986 cdev->dev = (struct device) { 992 cdev->dev = (struct device) {
987 .parent = &sch->dev, 993 .parent = &sch->dev,
988 }; 994 };
989 /* Initialize the subchannel structure */
990 sch->dev.parent = &css_bus_device;
991 sch->dev.bus = &css_bus_type;
992
993 rc = io_subchannel_recog(cdev, sch); 995 rc = io_subchannel_recog(cdev, sch);
994 if (rc) 996 if (rc)
995 return rc; 997 return rc;
@@ -1146,6 +1148,16 @@ ccw_driver_unregister (struct ccw_driver *cdriver)
1146 driver_unregister(&cdriver->driver); 1148 driver_unregister(&cdriver->driver);
1147} 1149}
1148 1150
1151/* Helper func for qdio. */
1152struct subchannel_id
1153ccw_device_get_subchannel_id(struct ccw_device *cdev)
1154{
1155 struct subchannel *sch;
1156
1157 sch = to_subchannel(cdev->dev.parent);
1158 return sch->schid;
1159}
1160
1149MODULE_LICENSE("GPL"); 1161MODULE_LICENSE("GPL");
1150EXPORT_SYMBOL(ccw_device_set_online); 1162EXPORT_SYMBOL(ccw_device_set_online);
1151EXPORT_SYMBOL(ccw_device_set_offline); 1163EXPORT_SYMBOL(ccw_device_set_offline);
@@ -1155,3 +1167,4 @@ EXPORT_SYMBOL(get_ccwdev_by_busid);
1155EXPORT_SYMBOL(ccw_bus_type); 1167EXPORT_SYMBOL(ccw_bus_type);
1156EXPORT_SYMBOL(ccw_device_work); 1168EXPORT_SYMBOL(ccw_device_work);
1157EXPORT_SYMBOL(ccw_device_notify_work); 1169EXPORT_SYMBOL(ccw_device_notify_work);
1170EXPORT_SYMBOL_GPL(ccw_device_get_subchannel_id);
diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h
index a3aa056d7245..11587ebb7289 100644
--- a/drivers/s390/cio/device.h
+++ b/drivers/s390/cio/device.h
@@ -110,6 +110,7 @@ int ccw_device_stlck(struct ccw_device *);
110 110
111/* qdio needs this. */ 111/* qdio needs this. */
112void ccw_device_set_timeout(struct ccw_device *, int); 112void ccw_device_set_timeout(struct ccw_device *, int);
113extern struct subchannel_id ccw_device_get_subchannel_id(struct ccw_device *);
113 114
114void retry_set_schib(struct ccw_device *cdev); 115void retry_set_schib(struct ccw_device *cdev);
115#endif 116#endif
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index c1c89f4fd4e3..23d12b65e5fa 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -133,7 +133,7 @@ ccw_device_cancel_halt_clear(struct ccw_device *cdev)
133 int ret; 133 int ret;
134 134
135 sch = to_subchannel(cdev->dev.parent); 135 sch = to_subchannel(cdev->dev.parent);
136 ret = stsch(sch->irq, &sch->schib); 136 ret = stsch(sch->schid, &sch->schib);
137 if (ret || !sch->schib.pmcw.dnv) 137 if (ret || !sch->schib.pmcw.dnv)
138 return -ENODEV; 138 return -ENODEV;
139 if (!sch->schib.pmcw.ena || sch->schib.scsw.actl == 0) 139 if (!sch->schib.pmcw.ena || sch->schib.scsw.actl == 0)
@@ -231,7 +231,7 @@ ccw_device_recog_done(struct ccw_device *cdev, int state)
231 * through ssch() and the path information is up to date. 231 * through ssch() and the path information is up to date.
232 */ 232 */
233 old_lpm = sch->lpm; 233 old_lpm = sch->lpm;
234 stsch(sch->irq, &sch->schib); 234 stsch(sch->schid, &sch->schib);
235 sch->lpm = sch->schib.pmcw.pim & 235 sch->lpm = sch->schib.pmcw.pim &
236 sch->schib.pmcw.pam & 236 sch->schib.pmcw.pam &
237 sch->schib.pmcw.pom & 237 sch->schib.pmcw.pom &
@@ -257,8 +257,9 @@ ccw_device_recog_done(struct ccw_device *cdev, int state)
257 switch (state) { 257 switch (state) {
258 case DEV_STATE_NOT_OPER: 258 case DEV_STATE_NOT_OPER:
259 CIO_DEBUG(KERN_WARNING, 2, 259 CIO_DEBUG(KERN_WARNING, 2,
260 "SenseID : unknown device %04x on subchannel %04x\n", 260 "SenseID : unknown device %04x on subchannel "
261 cdev->private->devno, sch->irq); 261 "0.%x.%04x\n", cdev->private->devno,
262 sch->schid.ssid, sch->schid.sch_no);
262 break; 263 break;
263 case DEV_STATE_OFFLINE: 264 case DEV_STATE_OFFLINE:
264 if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID) { 265 if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID) {
@@ -282,16 +283,18 @@ ccw_device_recog_done(struct ccw_device *cdev, int state)
282 return; 283 return;
283 } 284 }
284 /* Issue device info message. */ 285 /* Issue device info message. */
285 CIO_DEBUG(KERN_INFO, 2, "SenseID : device %04x reports: " 286 CIO_DEBUG(KERN_INFO, 2, "SenseID : device 0.%x.%04x reports: "
286 "CU Type/Mod = %04X/%02X, Dev Type/Mod = " 287 "CU Type/Mod = %04X/%02X, Dev Type/Mod = "
287 "%04X/%02X\n", cdev->private->devno, 288 "%04X/%02X\n",
289 cdev->private->ssid, cdev->private->devno,
288 cdev->id.cu_type, cdev->id.cu_model, 290 cdev->id.cu_type, cdev->id.cu_model,
289 cdev->id.dev_type, cdev->id.dev_model); 291 cdev->id.dev_type, cdev->id.dev_model);
290 break; 292 break;
291 case DEV_STATE_BOXED: 293 case DEV_STATE_BOXED:
292 CIO_DEBUG(KERN_WARNING, 2, 294 CIO_DEBUG(KERN_WARNING, 2,
293 "SenseID : boxed device %04x on subchannel %04x\n", 295 "SenseID : boxed device %04x on subchannel "
294 cdev->private->devno, sch->irq); 296 "0.%x.%04x\n", cdev->private->devno,
297 sch->schid.ssid, sch->schid.sch_no);
295 break; 298 break;
296 } 299 }
297 cdev->private->state = state; 300 cdev->private->state = state;
@@ -359,7 +362,7 @@ ccw_device_done(struct ccw_device *cdev, int state)
359 if (state == DEV_STATE_BOXED) 362 if (state == DEV_STATE_BOXED)
360 CIO_DEBUG(KERN_WARNING, 2, 363 CIO_DEBUG(KERN_WARNING, 2,
361 "Boxed device %04x on subchannel %04x\n", 364 "Boxed device %04x on subchannel %04x\n",
362 cdev->private->devno, sch->irq); 365 cdev->private->devno, sch->schid.sch_no);
363 366
364 if (cdev->private->flags.donotify) { 367 if (cdev->private->flags.donotify) {
365 cdev->private->flags.donotify = 0; 368 cdev->private->flags.donotify = 0;
@@ -592,7 +595,7 @@ ccw_device_offline(struct ccw_device *cdev)
592 struct subchannel *sch; 595 struct subchannel *sch;
593 596
594 sch = to_subchannel(cdev->dev.parent); 597 sch = to_subchannel(cdev->dev.parent);
595 if (stsch(sch->irq, &sch->schib) || !sch->schib.pmcw.dnv) 598 if (stsch(sch->schid, &sch->schib) || !sch->schib.pmcw.dnv)
596 return -ENODEV; 599 return -ENODEV;
597 if (cdev->private->state != DEV_STATE_ONLINE) { 600 if (cdev->private->state != DEV_STATE_ONLINE) {
598 if (sch->schib.scsw.actl != 0) 601 if (sch->schib.scsw.actl != 0)
@@ -711,7 +714,7 @@ ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event)
711 * Since we might not just be coming from an interrupt from the 714 * Since we might not just be coming from an interrupt from the
712 * subchannel we have to update the schib. 715 * subchannel we have to update the schib.
713 */ 716 */
714 stsch(sch->irq, &sch->schib); 717 stsch(sch->schid, &sch->schib);
715 718
716 if (sch->schib.scsw.actl != 0 || 719 if (sch->schib.scsw.actl != 0 ||
717 (cdev->private->irb.scsw.stctl & SCSW_STCTL_STATUS_PEND)) { 720 (cdev->private->irb.scsw.stctl & SCSW_STCTL_STATUS_PEND)) {
@@ -923,7 +926,7 @@ ccw_device_wait4io_irq(struct ccw_device *cdev, enum dev_event dev_event)
923 926
924 /* Iff device is idle, reset timeout. */ 927 /* Iff device is idle, reset timeout. */
925 sch = to_subchannel(cdev->dev.parent); 928 sch = to_subchannel(cdev->dev.parent);
926 if (!stsch(sch->irq, &sch->schib)) 929 if (!stsch(sch->schid, &sch->schib))
927 if (sch->schib.scsw.actl == 0) 930 if (sch->schib.scsw.actl == 0)
928 ccw_device_set_timeout(cdev, 0); 931 ccw_device_set_timeout(cdev, 0);
929 /* Call the handler. */ 932 /* Call the handler. */
@@ -1035,7 +1038,7 @@ device_trigger_reprobe(struct subchannel *sch)
1035 return; 1038 return;
1036 1039
1037 /* Update some values. */ 1040 /* Update some values. */
1038 if (stsch(sch->irq, &sch->schib)) 1041 if (stsch(sch->schid, &sch->schib))
1039 return; 1042 return;
1040 1043
1041 /* 1044 /*
diff --git a/drivers/s390/cio/device_id.c b/drivers/s390/cio/device_id.c
index 0e68fb511dc9..04ceba343db8 100644
--- a/drivers/s390/cio/device_id.c
+++ b/drivers/s390/cio/device_id.c
@@ -27,7 +27,7 @@
27/* 27/*
28 * diag210 is used under VM to get information about a virtual device 28 * diag210 is used under VM to get information about a virtual device
29 */ 29 */
30#ifdef CONFIG_ARCH_S390X 30#ifdef CONFIG_64BIT
31int 31int
32diag210(struct diag210 * addr) 32diag210(struct diag210 * addr)
33{ 33{
@@ -256,16 +256,17 @@ ccw_device_check_sense_id(struct ccw_device *cdev)
256 * sense id information. So, for intervention required, 256 * sense id information. So, for intervention required,
257 * we use the "whack it until it talks" strategy... 257 * we use the "whack it until it talks" strategy...
258 */ 258 */
259 CIO_MSG_EVENT(2, "SenseID : device %04x on Subchannel %04x " 259 CIO_MSG_EVENT(2, "SenseID : device %04x on Subchannel "
260 "reports cmd reject\n", 260 "0.%x.%04x reports cmd reject\n",
261 cdev->private->devno, sch->irq); 261 cdev->private->devno, sch->schid.ssid,
262 sch->schid.sch_no);
262 return -EOPNOTSUPP; 263 return -EOPNOTSUPP;
263 } 264 }
264 if (irb->esw.esw0.erw.cons) { 265 if (irb->esw.esw0.erw.cons) {
265 CIO_MSG_EVENT(2, "SenseID : UC on dev %04x, " 266 CIO_MSG_EVENT(2, "SenseID : UC on dev 0.%x.%04x, "
266 "lpum %02X, cnt %02d, sns :" 267 "lpum %02X, cnt %02d, sns :"
267 " %02X%02X%02X%02X %02X%02X%02X%02X ...\n", 268 " %02X%02X%02X%02X %02X%02X%02X%02X ...\n",
268 cdev->private->devno, 269 cdev->private->ssid, cdev->private->devno,
269 irb->esw.esw0.sublog.lpum, 270 irb->esw.esw0.sublog.lpum,
270 irb->esw.esw0.erw.scnt, 271 irb->esw.esw0.erw.scnt,
271 irb->ecw[0], irb->ecw[1], 272 irb->ecw[0], irb->ecw[1],
@@ -277,16 +278,17 @@ ccw_device_check_sense_id(struct ccw_device *cdev)
277 if (irb->scsw.cc == 3) { 278 if (irb->scsw.cc == 3) {
278 if ((sch->orb.lpm & 279 if ((sch->orb.lpm &
279 sch->schib.pmcw.pim & sch->schib.pmcw.pam) != 0) 280 sch->schib.pmcw.pim & sch->schib.pmcw.pam) != 0)
280 CIO_MSG_EVENT(2, "SenseID : path %02X for device %04x on" 281 CIO_MSG_EVENT(2, "SenseID : path %02X for device %04x "
281 " subchannel %04x is 'not operational'\n", 282 "on subchannel 0.%x.%04x is "
282 sch->orb.lpm, cdev->private->devno, 283 "'not operational'\n", sch->orb.lpm,
283 sch->irq); 284 cdev->private->devno, sch->schid.ssid,
285 sch->schid.sch_no);
284 return -EACCES; 286 return -EACCES;
285 } 287 }
286 /* Hmm, whatever happened, try again. */ 288 /* Hmm, whatever happened, try again. */
287 CIO_MSG_EVENT(2, "SenseID : start_IO() for device %04x on " 289 CIO_MSG_EVENT(2, "SenseID : start_IO() for device %04x on "
288 "subchannel %04x returns status %02X%02X\n", 290 "subchannel 0.%x.%04x returns status %02X%02X\n",
289 cdev->private->devno, sch->irq, 291 cdev->private->devno, sch->schid.ssid, sch->schid.sch_no,
290 irb->scsw.dstat, irb->scsw.cstat); 292 irb->scsw.dstat, irb->scsw.cstat);
291 return -EAGAIN; 293 return -EAGAIN;
292} 294}
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index 85a3026e6900..143b6c25a4e6 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * drivers/s390/cio/device_ops.c 2 * drivers/s390/cio/device_ops.c
3 * 3 *
4 * $Revision: 1.57 $ 4 * $Revision: 1.58 $
5 * 5 *
6 * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, 6 * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
7 * IBM Corporation 7 * IBM Corporation
@@ -570,7 +570,7 @@ ccw_device_get_chp_desc(struct ccw_device *cdev, int chp_no)
570int 570int
571_ccw_device_get_subchannel_number(struct ccw_device *cdev) 571_ccw_device_get_subchannel_number(struct ccw_device *cdev)
572{ 572{
573 return cdev->private->irq; 573 return cdev->private->sch_no;
574} 574}
575 575
576int 576int
diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c
index 0adac8a67331..052832d03d38 100644
--- a/drivers/s390/cio/device_pgid.c
+++ b/drivers/s390/cio/device_pgid.c
@@ -22,6 +22,7 @@
22#include "cio_debug.h" 22#include "cio_debug.h"
23#include "css.h" 23#include "css.h"
24#include "device.h" 24#include "device.h"
25#include "ioasm.h"
25 26
26/* 27/*
27 * Start Sense Path Group ID helper function. Used in ccw_device_recog 28 * Start Sense Path Group ID helper function. Used in ccw_device_recog
@@ -56,10 +57,10 @@ __ccw_device_sense_pgid_start(struct ccw_device *cdev)
56 if (ret != -EACCES) 57 if (ret != -EACCES)
57 return ret; 58 return ret;
58 CIO_MSG_EVENT(2, "SNID - Device %04x on Subchannel " 59 CIO_MSG_EVENT(2, "SNID - Device %04x on Subchannel "
59 "%04x, lpm %02X, became 'not " 60 "0.%x.%04x, lpm %02X, became 'not "
60 "operational'\n", 61 "operational'\n",
61 cdev->private->devno, sch->irq, 62 cdev->private->devno, sch->schid.ssid,
62 cdev->private->imask); 63 sch->schid.sch_no, cdev->private->imask);
63 64
64 } 65 }
65 cdev->private->imask >>= 1; 66 cdev->private->imask >>= 1;
@@ -105,10 +106,10 @@ __ccw_device_check_sense_pgid(struct ccw_device *cdev)
105 return -EOPNOTSUPP; 106 return -EOPNOTSUPP;
106 } 107 }
107 if (irb->esw.esw0.erw.cons) { 108 if (irb->esw.esw0.erw.cons) {
108 CIO_MSG_EVENT(2, "SNID - device %04x, unit check, " 109 CIO_MSG_EVENT(2, "SNID - device 0.%x.%04x, unit check, "
109 "lpum %02X, cnt %02d, sns : " 110 "lpum %02X, cnt %02d, sns : "
110 "%02X%02X%02X%02X %02X%02X%02X%02X ...\n", 111 "%02X%02X%02X%02X %02X%02X%02X%02X ...\n",
111 cdev->private->devno, 112 cdev->private->ssid, cdev->private->devno,
112 irb->esw.esw0.sublog.lpum, 113 irb->esw.esw0.sublog.lpum,
113 irb->esw.esw0.erw.scnt, 114 irb->esw.esw0.erw.scnt,
114 irb->ecw[0], irb->ecw[1], 115 irb->ecw[0], irb->ecw[1],
@@ -118,15 +119,17 @@ __ccw_device_check_sense_pgid(struct ccw_device *cdev)
118 return -EAGAIN; 119 return -EAGAIN;
119 } 120 }
120 if (irb->scsw.cc == 3) { 121 if (irb->scsw.cc == 3) {
121 CIO_MSG_EVENT(2, "SNID - Device %04x on Subchannel " 122 CIO_MSG_EVENT(2, "SNID - Device %04x on Subchannel 0.%x.%04x,"
122 "%04x, lpm %02X, became 'not operational'\n", 123 " lpm %02X, became 'not operational'\n",
123 cdev->private->devno, sch->irq, sch->orb.lpm); 124 cdev->private->devno, sch->schid.ssid,
125 sch->schid.sch_no, sch->orb.lpm);
124 return -EACCES; 126 return -EACCES;
125 } 127 }
126 if (cdev->private->pgid.inf.ps.state2 == SNID_STATE2_RESVD_ELSE) { 128 if (cdev->private->pgid.inf.ps.state2 == SNID_STATE2_RESVD_ELSE) {
127 CIO_MSG_EVENT(2, "SNID - Device %04x on Subchannel %04x " 129 CIO_MSG_EVENT(2, "SNID - Device %04x on Subchannel 0.%x.%04x "
128 "is reserved by someone else\n", 130 "is reserved by someone else\n",
129 cdev->private->devno, sch->irq); 131 cdev->private->devno, sch->schid.ssid,
132 sch->schid.sch_no);
130 return -EUSERS; 133 return -EUSERS;
131 } 134 }
132 return 0; 135 return 0;
@@ -162,7 +165,7 @@ ccw_device_sense_pgid_irq(struct ccw_device *cdev, enum dev_event dev_event)
162 /* 0, -ETIME, -EOPNOTSUPP, -EAGAIN, -EACCES or -EUSERS */ 165 /* 0, -ETIME, -EOPNOTSUPP, -EAGAIN, -EACCES or -EUSERS */
163 case 0: /* Sense Path Group ID successful. */ 166 case 0: /* Sense Path Group ID successful. */
164 if (cdev->private->pgid.inf.ps.state1 == SNID_STATE1_RESET) 167 if (cdev->private->pgid.inf.ps.state1 == SNID_STATE1_RESET)
165 memcpy(&cdev->private->pgid, &global_pgid, 168 memcpy(&cdev->private->pgid, &css[0]->global_pgid,
166 sizeof(struct pgid)); 169 sizeof(struct pgid));
167 ccw_device_sense_pgid_done(cdev, 0); 170 ccw_device_sense_pgid_done(cdev, 0);
168 break; 171 break;
@@ -235,8 +238,9 @@ __ccw_device_do_pgid(struct ccw_device *cdev, __u8 func)
235 sch->lpm &= ~cdev->private->imask; 238 sch->lpm &= ~cdev->private->imask;
236 sch->vpm &= ~cdev->private->imask; 239 sch->vpm &= ~cdev->private->imask;
237 CIO_MSG_EVENT(2, "SPID - Device %04x on Subchannel " 240 CIO_MSG_EVENT(2, "SPID - Device %04x on Subchannel "
238 "%04x, lpm %02X, became 'not operational'\n", 241 "0.%x.%04x, lpm %02X, became 'not operational'\n",
239 cdev->private->devno, sch->irq, cdev->private->imask); 242 cdev->private->devno, sch->schid.ssid,
243 sch->schid.sch_no, cdev->private->imask);
240 return ret; 244 return ret;
241} 245}
242 246
@@ -258,8 +262,10 @@ __ccw_device_check_pgid(struct ccw_device *cdev)
258 if (irb->ecw[0] & SNS0_CMD_REJECT) 262 if (irb->ecw[0] & SNS0_CMD_REJECT)
259 return -EOPNOTSUPP; 263 return -EOPNOTSUPP;
260 /* Hmm, whatever happened, try again. */ 264 /* Hmm, whatever happened, try again. */
261 CIO_MSG_EVENT(2, "SPID - device %04x, unit check, cnt %02d, " 265 CIO_MSG_EVENT(2, "SPID - device 0.%x.%04x, unit check, "
266 "cnt %02d, "
262 "sns : %02X%02X%02X%02X %02X%02X%02X%02X ...\n", 267 "sns : %02X%02X%02X%02X %02X%02X%02X%02X ...\n",
268 cdev->private->ssid,
263 cdev->private->devno, irb->esw.esw0.erw.scnt, 269 cdev->private->devno, irb->esw.esw0.erw.scnt,
264 irb->ecw[0], irb->ecw[1], 270 irb->ecw[0], irb->ecw[1],
265 irb->ecw[2], irb->ecw[3], 271 irb->ecw[2], irb->ecw[3],
@@ -268,10 +274,10 @@ __ccw_device_check_pgid(struct ccw_device *cdev)
268 return -EAGAIN; 274 return -EAGAIN;
269 } 275 }
270 if (irb->scsw.cc == 3) { 276 if (irb->scsw.cc == 3) {
271 CIO_MSG_EVENT(2, "SPID - Device %04x on Subchannel " 277 CIO_MSG_EVENT(2, "SPID - Device %04x on Subchannel 0.%x.%04x,"
272 "%04x, lpm %02X, became 'not operational'\n", 278 " lpm %02X, became 'not operational'\n",
273 cdev->private->devno, sch->irq, 279 cdev->private->devno, sch->schid.ssid,
274 cdev->private->imask); 280 sch->schid.sch_no, cdev->private->imask);
275 return -EACCES; 281 return -EACCES;
276 } 282 }
277 return 0; 283 return 0;
@@ -364,8 +370,22 @@ ccw_device_verify_irq(struct ccw_device *cdev, enum dev_event dev_event)
364void 370void
365ccw_device_verify_start(struct ccw_device *cdev) 371ccw_device_verify_start(struct ccw_device *cdev)
366{ 372{
373 struct subchannel *sch = to_subchannel(cdev->dev.parent);
374
367 cdev->private->flags.pgid_single = 0; 375 cdev->private->flags.pgid_single = 0;
368 cdev->private->iretry = 5; 376 cdev->private->iretry = 5;
377 /*
378 * Update sch->lpm with current values to catch paths becoming
379 * available again.
380 */
381 if (stsch(sch->schid, &sch->schib)) {
382 ccw_device_verify_done(cdev, -ENODEV);
383 return;
384 }
385 sch->lpm = sch->schib.pmcw.pim &
386 sch->schib.pmcw.pam &
387 sch->schib.pmcw.pom &
388 sch->opm;
369 __ccw_device_verify_start(cdev); 389 __ccw_device_verify_start(cdev);
370} 390}
371 391
diff --git a/drivers/s390/cio/device_status.c b/drivers/s390/cio/device_status.c
index 12a24d4331a2..db09c209098b 100644
--- a/drivers/s390/cio/device_status.c
+++ b/drivers/s390/cio/device_status.c
@@ -36,15 +36,16 @@ ccw_device_msg_control_check(struct ccw_device *cdev, struct irb *irb)
36 36
37 CIO_MSG_EVENT(0, "Channel-Check or Interface-Control-Check " 37 CIO_MSG_EVENT(0, "Channel-Check or Interface-Control-Check "
38 "received" 38 "received"
39 " ... device %04X on subchannel %04X, dev_stat " 39 " ... device %04x on subchannel 0.%x.%04x, dev_stat "
40 ": %02X sch_stat : %02X\n", 40 ": %02X sch_stat : %02X\n",
41 cdev->private->devno, cdev->private->irq, 41 cdev->private->devno, cdev->private->ssid,
42 cdev->private->sch_no,
42 irb->scsw.dstat, irb->scsw.cstat); 43 irb->scsw.dstat, irb->scsw.cstat);
43 44
44 if (irb->scsw.cc != 3) { 45 if (irb->scsw.cc != 3) {
45 char dbf_text[15]; 46 char dbf_text[15];
46 47
47 sprintf(dbf_text, "chk%x", cdev->private->irq); 48 sprintf(dbf_text, "chk%x", cdev->private->sch_no);
48 CIO_TRACE_EVENT(0, dbf_text); 49 CIO_TRACE_EVENT(0, dbf_text);
49 CIO_HEX_EVENT(0, irb, sizeof (struct irb)); 50 CIO_HEX_EVENT(0, irb, sizeof (struct irb));
50 } 51 }
@@ -59,10 +60,11 @@ ccw_device_path_notoper(struct ccw_device *cdev)
59 struct subchannel *sch; 60 struct subchannel *sch;
60 61
61 sch = to_subchannel(cdev->dev.parent); 62 sch = to_subchannel(cdev->dev.parent);
62 stsch (sch->irq, &sch->schib); 63 stsch (sch->schid, &sch->schib);
63 64
64 CIO_MSG_EVENT(0, "%s(%04x) - path(s) %02x are " 65 CIO_MSG_EVENT(0, "%s(0.%x.%04x) - path(s) %02x are "
65 "not operational \n", __FUNCTION__, sch->irq, 66 "not operational \n", __FUNCTION__,
67 sch->schid.ssid, sch->schid.sch_no,
66 sch->schib.pmcw.pnom); 68 sch->schib.pmcw.pnom);
67 69
68 sch->lpm &= ~sch->schib.pmcw.pnom; 70 sch->lpm &= ~sch->schib.pmcw.pnom;
diff --git a/drivers/s390/cio/ioasm.h b/drivers/s390/cio/ioasm.h
index 45480a2bc4c0..95a9462f9a91 100644
--- a/drivers/s390/cio/ioasm.h
+++ b/drivers/s390/cio/ioasm.h
@@ -1,12 +1,13 @@
1#ifndef S390_CIO_IOASM_H 1#ifndef S390_CIO_IOASM_H
2#define S390_CIO_IOASM_H 2#define S390_CIO_IOASM_H
3 3
4#include "schid.h"
5
4/* 6/*
5 * TPI info structure 7 * TPI info structure
6 */ 8 */
7struct tpi_info { 9struct tpi_info {
8 __u32 reserved1 : 16; /* reserved 0x00000001 */ 10 struct subchannel_id schid;
9 __u32 irq : 16; /* aka. subchannel number */
10 __u32 intparm; /* interruption parameter */ 11 __u32 intparm; /* interruption parameter */
11 __u32 adapter_IO : 1; 12 __u32 adapter_IO : 1;
12 __u32 reserved2 : 1; 13 __u32 reserved2 : 1;
@@ -21,7 +22,8 @@ struct tpi_info {
21 * Some S390 specific IO instructions as inline 22 * Some S390 specific IO instructions as inline
22 */ 23 */
23 24
24static inline int stsch(int irq, volatile struct schib *addr) 25static inline int stsch(struct subchannel_id schid,
26 volatile struct schib *addr)
25{ 27{
26 int ccode; 28 int ccode;
27 29
@@ -31,12 +33,42 @@ static inline int stsch(int irq, volatile struct schib *addr)
31 " ipm %0\n" 33 " ipm %0\n"
32 " srl %0,28" 34 " srl %0,28"
33 : "=d" (ccode) 35 : "=d" (ccode)
34 : "d" (irq | 0x10000), "a" (addr) 36 : "d" (schid), "a" (addr), "m" (*addr)
37 : "cc", "1" );
38 return ccode;
39}
40
41static inline int stsch_err(struct subchannel_id schid,
42 volatile struct schib *addr)
43{
44 int ccode;
45
46 __asm__ __volatile__(
47 " lhi %0,%3\n"
48 " lr 1,%1\n"
49 " stsch 0(%2)\n"
50 "0: ipm %0\n"
51 " srl %0,28\n"
52 "1:\n"
53#ifdef CONFIG_64BIT
54 ".section __ex_table,\"a\"\n"
55 " .align 8\n"
56 " .quad 0b,1b\n"
57 ".previous"
58#else
59 ".section __ex_table,\"a\"\n"
60 " .align 4\n"
61 " .long 0b,1b\n"
62 ".previous"
63#endif
64 : "=&d" (ccode)
65 : "d" (schid), "a" (addr), "K" (-EIO), "m" (*addr)
35 : "cc", "1" ); 66 : "cc", "1" );
36 return ccode; 67 return ccode;
37} 68}
38 69
39static inline int msch(int irq, volatile struct schib *addr) 70static inline int msch(struct subchannel_id schid,
71 volatile struct schib *addr)
40{ 72{
41 int ccode; 73 int ccode;
42 74
@@ -46,12 +78,13 @@ static inline int msch(int irq, volatile struct schib *addr)
46 " ipm %0\n" 78 " ipm %0\n"
47 " srl %0,28" 79 " srl %0,28"
48 : "=d" (ccode) 80 : "=d" (ccode)
49 : "d" (irq | 0x10000L), "a" (addr) 81 : "d" (schid), "a" (addr), "m" (*addr)
50 : "cc", "1" ); 82 : "cc", "1" );
51 return ccode; 83 return ccode;
52} 84}
53 85
54static inline int msch_err(int irq, volatile struct schib *addr) 86static inline int msch_err(struct subchannel_id schid,
87 volatile struct schib *addr)
55{ 88{
56 int ccode; 89 int ccode;
57 90
@@ -62,7 +95,7 @@ static inline int msch_err(int irq, volatile struct schib *addr)
62 "0: ipm %0\n" 95 "0: ipm %0\n"
63 " srl %0,28\n" 96 " srl %0,28\n"
64 "1:\n" 97 "1:\n"
65#ifdef CONFIG_ARCH_S390X 98#ifdef CONFIG_64BIT
66 ".section __ex_table,\"a\"\n" 99 ".section __ex_table,\"a\"\n"
67 " .align 8\n" 100 " .align 8\n"
68 " .quad 0b,1b\n" 101 " .quad 0b,1b\n"
@@ -74,12 +107,13 @@ static inline int msch_err(int irq, volatile struct schib *addr)
74 ".previous" 107 ".previous"
75#endif 108#endif
76 : "=&d" (ccode) 109 : "=&d" (ccode)
77 : "d" (irq | 0x10000L), "a" (addr), "K" (-EIO) 110 : "d" (schid), "a" (addr), "K" (-EIO), "m" (*addr)
78 : "cc", "1" ); 111 : "cc", "1" );
79 return ccode; 112 return ccode;
80} 113}
81 114
82static inline int tsch(int irq, volatile struct irb *addr) 115static inline int tsch(struct subchannel_id schid,
116 volatile struct irb *addr)
83{ 117{
84 int ccode; 118 int ccode;
85 119
@@ -89,7 +123,7 @@ static inline int tsch(int irq, volatile struct irb *addr)
89 " ipm %0\n" 123 " ipm %0\n"
90 " srl %0,28" 124 " srl %0,28"
91 : "=d" (ccode) 125 : "=d" (ccode)
92 : "d" (irq | 0x10000L), "a" (addr) 126 : "d" (schid), "a" (addr), "m" (*addr)
93 : "cc", "1" ); 127 : "cc", "1" );
94 return ccode; 128 return ccode;
95} 129}
@@ -103,12 +137,13 @@ static inline int tpi( volatile struct tpi_info *addr)
103 " ipm %0\n" 137 " ipm %0\n"
104 " srl %0,28" 138 " srl %0,28"
105 : "=d" (ccode) 139 : "=d" (ccode)
106 : "a" (addr) 140 : "a" (addr), "m" (*addr)
107 : "cc", "1" ); 141 : "cc", "1" );
108 return ccode; 142 return ccode;
109} 143}
110 144
111static inline int ssch(int irq, volatile struct orb *addr) 145static inline int ssch(struct subchannel_id schid,
146 volatile struct orb *addr)
112{ 147{
113 int ccode; 148 int ccode;
114 149
@@ -118,12 +153,12 @@ static inline int ssch(int irq, volatile struct orb *addr)
118 " ipm %0\n" 153 " ipm %0\n"
119 " srl %0,28" 154 " srl %0,28"
120 : "=d" (ccode) 155 : "=d" (ccode)
121 : "d" (irq | 0x10000L), "a" (addr) 156 : "d" (schid), "a" (addr), "m" (*addr)
122 : "cc", "1" ); 157 : "cc", "1" );
123 return ccode; 158 return ccode;
124} 159}
125 160
126static inline int rsch(int irq) 161static inline int rsch(struct subchannel_id schid)
127{ 162{
128 int ccode; 163 int ccode;
129 164
@@ -133,12 +168,12 @@ static inline int rsch(int irq)
133 " ipm %0\n" 168 " ipm %0\n"
134 " srl %0,28" 169 " srl %0,28"
135 : "=d" (ccode) 170 : "=d" (ccode)
136 : "d" (irq | 0x10000L) 171 : "d" (schid)
137 : "cc", "1" ); 172 : "cc", "1" );
138 return ccode; 173 return ccode;
139} 174}
140 175
141static inline int csch(int irq) 176static inline int csch(struct subchannel_id schid)
142{ 177{
143 int ccode; 178 int ccode;
144 179
@@ -148,12 +183,12 @@ static inline int csch(int irq)
148 " ipm %0\n" 183 " ipm %0\n"
149 " srl %0,28" 184 " srl %0,28"
150 : "=d" (ccode) 185 : "=d" (ccode)
151 : "d" (irq | 0x10000L) 186 : "d" (schid)
152 : "cc", "1" ); 187 : "cc", "1" );
153 return ccode; 188 return ccode;
154} 189}
155 190
156static inline int hsch(int irq) 191static inline int hsch(struct subchannel_id schid)
157{ 192{
158 int ccode; 193 int ccode;
159 194
@@ -163,12 +198,12 @@ static inline int hsch(int irq)
163 " ipm %0\n" 198 " ipm %0\n"
164 " srl %0,28" 199 " srl %0,28"
165 : "=d" (ccode) 200 : "=d" (ccode)
166 : "d" (irq | 0x10000L) 201 : "d" (schid)
167 : "cc", "1" ); 202 : "cc", "1" );
168 return ccode; 203 return ccode;
169} 204}
170 205
171static inline int xsch(int irq) 206static inline int xsch(struct subchannel_id schid)
172{ 207{
173 int ccode; 208 int ccode;
174 209
@@ -178,21 +213,22 @@ static inline int xsch(int irq)
178 " ipm %0\n" 213 " ipm %0\n"
179 " srl %0,28" 214 " srl %0,28"
180 : "=d" (ccode) 215 : "=d" (ccode)
181 : "d" (irq | 0x10000L) 216 : "d" (schid)
182 : "cc", "1" ); 217 : "cc", "1" );
183 return ccode; 218 return ccode;
184} 219}
185 220
186static inline int chsc(void *chsc_area) 221static inline int chsc(void *chsc_area)
187{ 222{
223 typedef struct { char _[4096]; } addr_type;
188 int cc; 224 int cc;
189 225
190 __asm__ __volatile__ ( 226 __asm__ __volatile__ (
191 ".insn rre,0xb25f0000,%1,0 \n\t" 227 ".insn rre,0xb25f0000,%2,0 \n\t"
192 "ipm %0 \n\t" 228 "ipm %0 \n\t"
193 "srl %0,28 \n\t" 229 "srl %0,28 \n\t"
194 : "=d" (cc) 230 : "=d" (cc), "=m" (*(addr_type *) chsc_area)
195 : "d" (chsc_area) 231 : "d" (chsc_area), "m" (*(addr_type *) chsc_area)
196 : "cc" ); 232 : "cc" );
197 233
198 return cc; 234 return cc;
diff --git a/drivers/s390/cio/qdio.c b/drivers/s390/cio/qdio.c
index eb39218b925e..30a836ffc31f 100644
--- a/drivers/s390/cio/qdio.c
+++ b/drivers/s390/cio/qdio.c
@@ -56,7 +56,7 @@
56#include "ioasm.h" 56#include "ioasm.h"
57#include "chsc.h" 57#include "chsc.h"
58 58
59#define VERSION_QDIO_C "$Revision: 1.108 $" 59#define VERSION_QDIO_C "$Revision: 1.114 $"
60 60
61/****************** MODULE PARAMETER VARIABLES ********************/ 61/****************** MODULE PARAMETER VARIABLES ********************/
62MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>"); 62MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>");
@@ -76,6 +76,7 @@ static struct qdio_perf_stats perf_stats;
76#endif /* QDIO_PERFORMANCE_STATS */ 76#endif /* QDIO_PERFORMANCE_STATS */
77 77
78static int hydra_thinints; 78static int hydra_thinints;
79static int is_passthrough = 0;
79static int omit_svs; 80static int omit_svs;
80 81
81static int indicator_used[INDICATORS_PER_CACHELINE]; 82static int indicator_used[INDICATORS_PER_CACHELINE];
@@ -136,12 +137,126 @@ qdio_release_q(struct qdio_q *q)
136 atomic_dec(&q->use_count); 137 atomic_dec(&q->use_count);
137} 138}
138 139
139static volatile inline void 140/*check ccq */
140qdio_set_slsb(volatile char *slsb, unsigned char value) 141static inline int
142qdio_check_ccq(struct qdio_q *q, unsigned int ccq)
143{
144 char dbf_text[15];
145
146 if (ccq == 0 || ccq == 32 || ccq == 96)
147 return 0;
148 if (ccq == 97)
149 return 1;
150 /*notify devices immediately*/
151 sprintf(dbf_text,"%d", ccq);
152 QDIO_DBF_TEXT2(1,trace,dbf_text);
153 return -EIO;
154}
155/* EQBS: extract buffer states */
156static inline int
157qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
158 unsigned int *start, unsigned int *cnt)
159{
160 struct qdio_irq *irq;
161 unsigned int tmp_cnt, q_no, ccq;
162 int rc ;
163 char dbf_text[15];
164
165 ccq = 0;
166 tmp_cnt = *cnt;
167 irq = (struct qdio_irq*)q->irq_ptr;
168 q_no = q->q_no;
169 if(!q->is_input_q)
170 q_no += irq->no_input_qs;
171 ccq = do_eqbs(irq->sch_token, state, q_no, start, cnt);
172 rc = qdio_check_ccq(q, ccq);
173 if (rc < 0) {
174 QDIO_DBF_TEXT2(1,trace,"eqberr");
175 sprintf(dbf_text,"%2x,%2x,%d,%d",tmp_cnt, *cnt, ccq, q_no);
176 QDIO_DBF_TEXT2(1,trace,dbf_text);
177 q->handler(q->cdev,QDIO_STATUS_ACTIVATE_CHECK_CONDITION|
178 QDIO_STATUS_LOOK_FOR_ERROR,
179 0, 0, 0, -1, -1, q->int_parm);
180 return 0;
181 }
182 return (tmp_cnt - *cnt);
183}
184
185/* SQBS: set buffer states */
186static inline int
187qdio_do_sqbs(struct qdio_q *q, unsigned char state,
188 unsigned int *start, unsigned int *cnt)
141{ 189{
142 xchg((char*)slsb,value); 190 struct qdio_irq *irq;
191 unsigned int tmp_cnt, q_no, ccq;
192 int rc;
193 char dbf_text[15];
194
195 ccq = 0;
196 tmp_cnt = *cnt;
197 irq = (struct qdio_irq*)q->irq_ptr;
198 q_no = q->q_no;
199 if(!q->is_input_q)
200 q_no += irq->no_input_qs;
201 ccq = do_sqbs(irq->sch_token, state, q_no, start, cnt);
202 rc = qdio_check_ccq(q, ccq);
203 if (rc < 0) {
204 QDIO_DBF_TEXT3(1,trace,"sqberr");
205 sprintf(dbf_text,"%2x,%2x,%d,%d",tmp_cnt,*cnt,ccq,q_no);
206 QDIO_DBF_TEXT3(1,trace,dbf_text);
207 q->handler(q->cdev,QDIO_STATUS_ACTIVATE_CHECK_CONDITION|
208 QDIO_STATUS_LOOK_FOR_ERROR,
209 0, 0, 0, -1, -1, q->int_parm);
210 return 0;
211 }
212 return (tmp_cnt - *cnt);
143} 213}
144 214
215static inline int
216qdio_set_slsb(struct qdio_q *q, unsigned int *bufno,
217 unsigned char state, unsigned int *count)
218{
219 volatile char *slsb;
220 struct qdio_irq *irq;
221
222 irq = (struct qdio_irq*)q->irq_ptr;
223 if (!irq->is_qebsm) {
224 slsb = (char *)&q->slsb.acc.val[(*bufno)];
225 xchg(slsb, state);
226 return 1;
227 }
228 return qdio_do_sqbs(q, state, bufno, count);
229}
230
231#ifdef CONFIG_QDIO_DEBUG
232static inline void
233qdio_trace_slsb(struct qdio_q *q)
234{
235 if (q->queue_type==QDIO_TRACE_QTYPE) {
236 if (q->is_input_q)
237 QDIO_DBF_HEX2(0,slsb_in,&q->slsb,
238 QDIO_MAX_BUFFERS_PER_Q);
239 else
240 QDIO_DBF_HEX2(0,slsb_out,&q->slsb,
241 QDIO_MAX_BUFFERS_PER_Q);
242 }
243}
244#endif
245
246static inline int
247set_slsb(struct qdio_q *q, unsigned int *bufno,
248 unsigned char state, unsigned int *count)
249{
250 int rc;
251#ifdef CONFIG_QDIO_DEBUG
252 qdio_trace_slsb(q);
253#endif
254 rc = qdio_set_slsb(q, bufno, state, count);
255#ifdef CONFIG_QDIO_DEBUG
256 qdio_trace_slsb(q);
257#endif
258 return rc;
259}
145static inline int 260static inline int
146qdio_siga_sync(struct qdio_q *q, unsigned int gpr2, 261qdio_siga_sync(struct qdio_q *q, unsigned int gpr2,
147 unsigned int gpr3) 262 unsigned int gpr3)
@@ -155,7 +270,7 @@ qdio_siga_sync(struct qdio_q *q, unsigned int gpr2,
155 perf_stats.siga_syncs++; 270 perf_stats.siga_syncs++;
156#endif /* QDIO_PERFORMANCE_STATS */ 271#endif /* QDIO_PERFORMANCE_STATS */
157 272
158 cc = do_siga_sync(q->irq, gpr2, gpr3); 273 cc = do_siga_sync(q->schid, gpr2, gpr3);
159 if (cc) 274 if (cc)
160 QDIO_DBF_HEX3(0,trace,&cc,sizeof(int*)); 275 QDIO_DBF_HEX3(0,trace,&cc,sizeof(int*));
161 276
@@ -170,6 +285,23 @@ qdio_siga_sync_q(struct qdio_q *q)
170 return qdio_siga_sync(q, q->mask, 0); 285 return qdio_siga_sync(q, q->mask, 0);
171} 286}
172 287
288static int
289__do_siga_output(struct qdio_q *q, unsigned int *busy_bit)
290{
291 struct qdio_irq *irq;
292 unsigned int fc = 0;
293 unsigned long schid;
294
295 irq = (struct qdio_irq *) q->irq_ptr;
296 if (!irq->is_qebsm)
297 schid = *((u32 *)&q->schid);
298 else {
299 schid = irq->sch_token;
300 fc |= 0x80;
301 }
302 return do_siga_output(schid, q->mask, busy_bit, fc);
303}
304
173/* 305/*
174 * returns QDIO_SIGA_ERROR_ACCESS_EXCEPTION as cc, when SIGA returns 306 * returns QDIO_SIGA_ERROR_ACCESS_EXCEPTION as cc, when SIGA returns
175 * an access exception 307 * an access exception
@@ -189,7 +321,7 @@ qdio_siga_output(struct qdio_q *q)
189 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); 321 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
190 322
191 for (;;) { 323 for (;;) {
192 cc = do_siga_output(q->irq, q->mask, &busy_bit); 324 cc = __do_siga_output(q, &busy_bit);
193//QDIO_PRINT_ERR("cc=%x, busy=%x\n",cc,busy_bit); 325//QDIO_PRINT_ERR("cc=%x, busy=%x\n",cc,busy_bit);
194 if ((cc==2) && (busy_bit) && (q->is_iqdio_q)) { 326 if ((cc==2) && (busy_bit) && (q->is_iqdio_q)) {
195 if (!start_time) 327 if (!start_time)
@@ -221,7 +353,7 @@ qdio_siga_input(struct qdio_q *q)
221 perf_stats.siga_ins++; 353 perf_stats.siga_ins++;
222#endif /* QDIO_PERFORMANCE_STATS */ 354#endif /* QDIO_PERFORMANCE_STATS */
223 355
224 cc = do_siga_input(q->irq, q->mask); 356 cc = do_siga_input(q->schid, q->mask);
225 357
226 if (cc) 358 if (cc)
227 QDIO_DBF_HEX3(0,trace,&cc,sizeof(int*)); 359 QDIO_DBF_HEX3(0,trace,&cc,sizeof(int*));
@@ -230,7 +362,7 @@ qdio_siga_input(struct qdio_q *q)
230} 362}
231 363
232/* locked by the locks in qdio_activate and qdio_cleanup */ 364/* locked by the locks in qdio_activate and qdio_cleanup */
233static __u32 volatile * 365static __u32 *
234qdio_get_indicator(void) 366qdio_get_indicator(void)
235{ 367{
236 int i; 368 int i;
@@ -258,7 +390,7 @@ qdio_put_indicator(__u32 *addr)
258 atomic_dec(&spare_indicator_usecount); 390 atomic_dec(&spare_indicator_usecount);
259} 391}
260 392
261static inline volatile void 393static inline void
262tiqdio_clear_summary_bit(__u32 *location) 394tiqdio_clear_summary_bit(__u32 *location)
263{ 395{
264 QDIO_DBF_TEXT5(0,trace,"clrsummb"); 396 QDIO_DBF_TEXT5(0,trace,"clrsummb");
@@ -267,7 +399,7 @@ tiqdio_clear_summary_bit(__u32 *location)
267 xchg(location,0); 399 xchg(location,0);
268} 400}
269 401
270static inline volatile void 402static inline void
271tiqdio_set_summary_bit(__u32 *location) 403tiqdio_set_summary_bit(__u32 *location)
272{ 404{
273 QDIO_DBF_TEXT5(0,trace,"setsummb"); 405 QDIO_DBF_TEXT5(0,trace,"setsummb");
@@ -336,7 +468,9 @@ static inline int
336qdio_stop_polling(struct qdio_q *q) 468qdio_stop_polling(struct qdio_q *q)
337{ 469{
338#ifdef QDIO_USE_PROCESSING_STATE 470#ifdef QDIO_USE_PROCESSING_STATE
339 int gsf; 471 unsigned int tmp, gsf, count = 1;
472 unsigned char state = 0;
473 struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr;
340 474
341 if (!atomic_swap(&q->polling,0)) 475 if (!atomic_swap(&q->polling,0))
342 return 1; 476 return 1;
@@ -348,17 +482,22 @@ qdio_stop_polling(struct qdio_q *q)
348 if (!q->is_input_q) 482 if (!q->is_input_q)
349 return 1; 483 return 1;
350 484
351 gsf=GET_SAVED_FRONTIER(q); 485 tmp = gsf = GET_SAVED_FRONTIER(q);
352 set_slsb(&q->slsb.acc.val[(gsf+QDIO_MAX_BUFFERS_PER_Q-1)& 486 tmp = ((tmp + QDIO_MAX_BUFFERS_PER_Q-1) & (QDIO_MAX_BUFFERS_PER_Q-1) );
353 (QDIO_MAX_BUFFERS_PER_Q-1)], 487 set_slsb(q, &tmp, SLSB_P_INPUT_NOT_INIT, &count);
354 SLSB_P_INPUT_NOT_INIT); 488
355 /* 489 /*
356 * we don't issue this SYNC_MEMORY, as we trust Rick T and 490 * we don't issue this SYNC_MEMORY, as we trust Rick T and
357 * moreover will not use the PROCESSING state under VM, so 491 * moreover will not use the PROCESSING state under VM, so
358 * q->polling was 0 anyway 492 * q->polling was 0 anyway
359 */ 493 */
360 /*SYNC_MEMORY;*/ 494 /*SYNC_MEMORY;*/
361 if (q->slsb.acc.val[gsf]!=SLSB_P_INPUT_PRIMED) 495 if (irq->is_qebsm) {
496 count = 1;
497 qdio_do_eqbs(q, &state, &gsf, &count);
498 } else
499 state = q->slsb.acc.val[gsf];
500 if (state != SLSB_P_INPUT_PRIMED)
362 return 1; 501 return 1;
363 /* 502 /*
364 * set our summary bit again, as otherwise there is a 503 * set our summary bit again, as otherwise there is a
@@ -431,18 +570,136 @@ tiqdio_clear_global_summary(void)
431 570
432 571
433/************************* OUTBOUND ROUTINES *******************************/ 572/************************* OUTBOUND ROUTINES *******************************/
573static int
574qdio_qebsm_get_outbound_buffer_frontier(struct qdio_q *q)
575{
576 struct qdio_irq *irq;
577 unsigned char state;
578 unsigned int cnt, count, ftc;
579
580 irq = (struct qdio_irq *) q->irq_ptr;
581 if ((!q->is_iqdio_q) && (!q->hydra_gives_outbound_pcis))
582 SYNC_MEMORY;
583
584 ftc = q->first_to_check;
585 count = qdio_min(atomic_read(&q->number_of_buffers_used),
586 (QDIO_MAX_BUFFERS_PER_Q-1));
587 if (count == 0)
588 return q->first_to_check;
589 cnt = qdio_do_eqbs(q, &state, &ftc, &count);
590 if (cnt == 0)
591 return q->first_to_check;
592 switch (state) {
593 case SLSB_P_OUTPUT_ERROR:
594 QDIO_DBF_TEXT3(0,trace,"outperr");
595 atomic_sub(cnt , &q->number_of_buffers_used);
596 if (q->qdio_error)
597 q->error_status_flags |=
598 QDIO_STATUS_MORE_THAN_ONE_QDIO_ERROR;
599 q->qdio_error = SLSB_P_OUTPUT_ERROR;
600 q->error_status_flags |= QDIO_STATUS_LOOK_FOR_ERROR;
601 q->first_to_check = ftc;
602 break;
603 case SLSB_P_OUTPUT_EMPTY:
604 QDIO_DBF_TEXT5(0,trace,"outpempt");
605 atomic_sub(cnt, &q->number_of_buffers_used);
606 q->first_to_check = ftc;
607 break;
608 case SLSB_CU_OUTPUT_PRIMED:
609 /* all buffers primed */
610 QDIO_DBF_TEXT5(0,trace,"outpprim");
611 break;
612 default:
613 break;
614 }
615 QDIO_DBF_HEX4(0,trace,&q->first_to_check,sizeof(int));
616 return q->first_to_check;
617}
618
619static int
620qdio_qebsm_get_inbound_buffer_frontier(struct qdio_q *q)
621{
622 struct qdio_irq *irq;
623 unsigned char state;
624 int tmp, ftc, count, cnt;
625 char dbf_text[15];
626
627
628 irq = (struct qdio_irq *) q->irq_ptr;
629 ftc = q->first_to_check;
630 count = qdio_min(atomic_read(&q->number_of_buffers_used),
631 (QDIO_MAX_BUFFERS_PER_Q-1));
632 if (count == 0)
633 return q->first_to_check;
634 cnt = qdio_do_eqbs(q, &state, &ftc, &count);
635 if (cnt == 0)
636 return q->first_to_check;
637 switch (state) {
638 case SLSB_P_INPUT_ERROR :
639#ifdef CONFIG_QDIO_DEBUG
640 QDIO_DBF_TEXT3(1,trace,"inperr");
641 sprintf(dbf_text,"%2x,%2x",ftc,count);
642 QDIO_DBF_TEXT3(1,trace,dbf_text);
643#endif /* CONFIG_QDIO_DEBUG */
644 if (q->qdio_error)
645 q->error_status_flags |=
646 QDIO_STATUS_MORE_THAN_ONE_QDIO_ERROR;
647 q->qdio_error = SLSB_P_INPUT_ERROR;
648 q->error_status_flags |= QDIO_STATUS_LOOK_FOR_ERROR;
649 atomic_sub(cnt, &q->number_of_buffers_used);
650 q->first_to_check = ftc;
651 break;
652 case SLSB_P_INPUT_PRIMED :
653 QDIO_DBF_TEXT3(0,trace,"inptprim");
654 sprintf(dbf_text,"%2x,%2x",ftc,count);
655 QDIO_DBF_TEXT3(1,trace,dbf_text);
656 tmp = 0;
657 ftc = q->first_to_check;
658#ifdef QDIO_USE_PROCESSING_STATE
659 if (cnt > 1) {
660 cnt -= 1;
661 tmp = set_slsb(q, &ftc, SLSB_P_INPUT_NOT_INIT, &cnt);
662 if (!tmp)
663 break;
664 }
665 cnt = 1;
666 tmp += set_slsb(q, &ftc,
667 SLSB_P_INPUT_PROCESSING, &cnt);
668 atomic_set(&q->polling, 1);
669#else
670 tmp = set_slsb(q, &ftc, SLSB_P_INPUT_NOT_INIT, &cnt);
671#endif
672 atomic_sub(tmp, &q->number_of_buffers_used);
673 q->first_to_check = ftc;
674 break;
675 case SLSB_CU_INPUT_EMPTY:
676 case SLSB_P_INPUT_NOT_INIT:
677 case SLSB_P_INPUT_PROCESSING:
678 QDIO_DBF_TEXT5(0,trace,"inpnipro");
679 break;
680 default:
681 break;
682 }
683 QDIO_DBF_HEX4(0,trace,&q->first_to_check,sizeof(int));
684 return q->first_to_check;
685}
434 686
435static inline int 687static inline int
436qdio_get_outbound_buffer_frontier(struct qdio_q *q) 688qdio_get_outbound_buffer_frontier(struct qdio_q *q)
437{ 689{
438 int f,f_mod_no; 690 struct qdio_irq *irq;
439 volatile char *slsb; 691 volatile char *slsb;
440 int first_not_to_check; 692 unsigned int count = 1;
693 int first_not_to_check, f, f_mod_no;
441 char dbf_text[15]; 694 char dbf_text[15];
442 695
443 QDIO_DBF_TEXT4(0,trace,"getobfro"); 696 QDIO_DBF_TEXT4(0,trace,"getobfro");
444 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); 697 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
445 698
699 irq = (struct qdio_irq *) q->irq_ptr;
700 if (irq->is_qebsm)
701 return qdio_qebsm_get_outbound_buffer_frontier(q);
702
446 slsb=&q->slsb.acc.val[0]; 703 slsb=&q->slsb.acc.val[0];
447 f_mod_no=f=q->first_to_check; 704 f_mod_no=f=q->first_to_check;
448 /* 705 /*
@@ -484,7 +741,7 @@ check_next:
484 QDIO_DBF_HEX2(1,sbal,q->sbal[f_mod_no],256); 741 QDIO_DBF_HEX2(1,sbal,q->sbal[f_mod_no],256);
485 742
486 /* kind of process the buffer */ 743 /* kind of process the buffer */
487 set_slsb(&q->slsb.acc.val[f_mod_no], SLSB_P_OUTPUT_NOT_INIT); 744 set_slsb(q, &f_mod_no, SLSB_P_OUTPUT_NOT_INIT, &count);
488 745
489 /* 746 /*
490 * we increment the frontier, as this buffer 747 * we increment the frontier, as this buffer
@@ -597,48 +854,48 @@ qdio_kick_outbound_q(struct qdio_q *q)
597 854
598 result=qdio_siga_output(q); 855 result=qdio_siga_output(q);
599 856
600 switch (result) { 857 switch (result) {
601 case 0: 858 case 0:
602 /* went smooth this time, reset timestamp */ 859 /* went smooth this time, reset timestamp */
603#ifdef CONFIG_QDIO_DEBUG 860#ifdef CONFIG_QDIO_DEBUG
604 QDIO_DBF_TEXT3(0,trace,"cc2reslv"); 861 QDIO_DBF_TEXT3(0,trace,"cc2reslv");
605 sprintf(dbf_text,"%4x%2x%2x",q->irq,q->q_no, 862 sprintf(dbf_text,"%4x%2x%2x",q->schid.sch_no,q->q_no,
606 atomic_read(&q->busy_siga_counter)); 863 atomic_read(&q->busy_siga_counter));
607 QDIO_DBF_TEXT3(0,trace,dbf_text); 864 QDIO_DBF_TEXT3(0,trace,dbf_text);
608#endif /* CONFIG_QDIO_DEBUG */ 865#endif /* CONFIG_QDIO_DEBUG */
609 q->timing.busy_start=0; 866 q->timing.busy_start=0;
867 break;
868 case (2|QDIO_SIGA_ERROR_B_BIT_SET):
869 /* cc=2 and busy bit: */
870 atomic_inc(&q->busy_siga_counter);
871
872 /* if the last siga was successful, save
873 * timestamp here */
874 if (!q->timing.busy_start)
875 q->timing.busy_start=NOW;
876
877 /* if we're in time, don't touch error_status_flags
878 * and siga_error */
879 if (NOW-q->timing.busy_start<QDIO_BUSY_BIT_GIVE_UP) {
880 qdio_mark_q(q);
610 break; 881 break;
611 case (2|QDIO_SIGA_ERROR_B_BIT_SET): 882 }
612 /* cc=2 and busy bit: */ 883 QDIO_DBF_TEXT2(0,trace,"cc2REPRT");
613 atomic_inc(&q->busy_siga_counter);
614
615 /* if the last siga was successful, save
616 * timestamp here */
617 if (!q->timing.busy_start)
618 q->timing.busy_start=NOW;
619
620 /* if we're in time, don't touch error_status_flags
621 * and siga_error */
622 if (NOW-q->timing.busy_start<QDIO_BUSY_BIT_GIVE_UP) {
623 qdio_mark_q(q);
624 break;
625 }
626 QDIO_DBF_TEXT2(0,trace,"cc2REPRT");
627#ifdef CONFIG_QDIO_DEBUG 884#ifdef CONFIG_QDIO_DEBUG
628 sprintf(dbf_text,"%4x%2x%2x",q->irq,q->q_no, 885 sprintf(dbf_text,"%4x%2x%2x",q->schid.sch_no,q->q_no,
629 atomic_read(&q->busy_siga_counter)); 886 atomic_read(&q->busy_siga_counter));
630 QDIO_DBF_TEXT3(0,trace,dbf_text); 887 QDIO_DBF_TEXT3(0,trace,dbf_text);
631#endif /* CONFIG_QDIO_DEBUG */ 888#endif /* CONFIG_QDIO_DEBUG */
632 /* else fallthrough and report error */ 889 /* else fallthrough and report error */
633 default: 890 default:
634 /* for plain cc=1, 2 or 3: */ 891 /* for plain cc=1, 2 or 3: */
635 if (q->siga_error) 892 if (q->siga_error)
636 q->error_status_flags|=
637 QDIO_STATUS_MORE_THAN_ONE_SIGA_ERROR;
638 q->error_status_flags|= 893 q->error_status_flags|=
639 QDIO_STATUS_LOOK_FOR_ERROR; 894 QDIO_STATUS_MORE_THAN_ONE_SIGA_ERROR;
640 q->siga_error=result; 895 q->error_status_flags|=
641 } 896 QDIO_STATUS_LOOK_FOR_ERROR;
897 q->siga_error=result;
898 }
642} 899}
643 900
644static inline void 901static inline void
@@ -743,8 +1000,10 @@ qdio_outbound_processing(struct qdio_q *q)
743static inline int 1000static inline int
744qdio_get_inbound_buffer_frontier(struct qdio_q *q) 1001qdio_get_inbound_buffer_frontier(struct qdio_q *q)
745{ 1002{
1003 struct qdio_irq *irq;
746 int f,f_mod_no; 1004 int f,f_mod_no;
747 volatile char *slsb; 1005 volatile char *slsb;
1006 unsigned int count = 1;
748 int first_not_to_check; 1007 int first_not_to_check;
749#ifdef CONFIG_QDIO_DEBUG 1008#ifdef CONFIG_QDIO_DEBUG
750 char dbf_text[15]; 1009 char dbf_text[15];
@@ -756,6 +1015,10 @@ qdio_get_inbound_buffer_frontier(struct qdio_q *q)
756 QDIO_DBF_TEXT4(0,trace,"getibfro"); 1015 QDIO_DBF_TEXT4(0,trace,"getibfro");
757 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); 1016 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
758 1017
1018 irq = (struct qdio_irq *) q->irq_ptr;
1019 if (irq->is_qebsm)
1020 return qdio_qebsm_get_inbound_buffer_frontier(q);
1021
759 slsb=&q->slsb.acc.val[0]; 1022 slsb=&q->slsb.acc.val[0];
760 f_mod_no=f=q->first_to_check; 1023 f_mod_no=f=q->first_to_check;
761 /* 1024 /*
@@ -792,19 +1055,19 @@ check_next:
792 * kill VM in terms of CP overhead 1055 * kill VM in terms of CP overhead
793 */ 1056 */
794 if (q->siga_sync) { 1057 if (q->siga_sync) {
795 set_slsb(&slsb[f_mod_no],SLSB_P_INPUT_NOT_INIT); 1058 set_slsb(q, &f_mod_no, SLSB_P_INPUT_NOT_INIT, &count);
796 } else { 1059 } else {
797 /* set the previous buffer to NOT_INIT. The current 1060 /* set the previous buffer to NOT_INIT. The current
798 * buffer will be set to PROCESSING at the end of 1061 * buffer will be set to PROCESSING at the end of
799 * this function to avoid further interrupts. */ 1062 * this function to avoid further interrupts. */
800 if (last_position>=0) 1063 if (last_position>=0)
801 set_slsb(&slsb[last_position], 1064 set_slsb(q, &last_position,
802 SLSB_P_INPUT_NOT_INIT); 1065 SLSB_P_INPUT_NOT_INIT, &count);
803 atomic_set(&q->polling,1); 1066 atomic_set(&q->polling,1);
804 last_position=f_mod_no; 1067 last_position=f_mod_no;
805 } 1068 }
806#else /* QDIO_USE_PROCESSING_STATE */ 1069#else /* QDIO_USE_PROCESSING_STATE */
807 set_slsb(&slsb[f_mod_no],SLSB_P_INPUT_NOT_INIT); 1070 set_slsb(q, &f_mod_no, SLSB_P_INPUT_NOT_INIT, &count);
808#endif /* QDIO_USE_PROCESSING_STATE */ 1071#endif /* QDIO_USE_PROCESSING_STATE */
809 /* 1072 /*
810 * not needed, as the inbound queue will be synced on the next 1073 * not needed, as the inbound queue will be synced on the next
@@ -829,7 +1092,7 @@ check_next:
829 QDIO_DBF_HEX2(1,sbal,q->sbal[f_mod_no],256); 1092 QDIO_DBF_HEX2(1,sbal,q->sbal[f_mod_no],256);
830 1093
831 /* kind of process the buffer */ 1094 /* kind of process the buffer */
832 set_slsb(&slsb[f_mod_no],SLSB_P_INPUT_NOT_INIT); 1095 set_slsb(q, &f_mod_no, SLSB_P_INPUT_NOT_INIT, &count);
833 1096
834 if (q->qdio_error) 1097 if (q->qdio_error)
835 q->error_status_flags|= 1098 q->error_status_flags|=
@@ -857,7 +1120,7 @@ out:
857 1120
858#ifdef QDIO_USE_PROCESSING_STATE 1121#ifdef QDIO_USE_PROCESSING_STATE
859 if (last_position>=0) 1122 if (last_position>=0)
860 set_slsb(&slsb[last_position],SLSB_P_INPUT_PROCESSING); 1123 set_slsb(q, &last_position, SLSB_P_INPUT_NOT_INIT, &count);
861#endif /* QDIO_USE_PROCESSING_STATE */ 1124#endif /* QDIO_USE_PROCESSING_STATE */
862 1125
863 QDIO_DBF_HEX4(0,trace,&q->first_to_check,sizeof(int)); 1126 QDIO_DBF_HEX4(0,trace,&q->first_to_check,sizeof(int));
@@ -902,6 +1165,10 @@ static inline int
902tiqdio_is_inbound_q_done(struct qdio_q *q) 1165tiqdio_is_inbound_q_done(struct qdio_q *q)
903{ 1166{
904 int no_used; 1167 int no_used;
1168 unsigned int start_buf, count;
1169 unsigned char state = 0;
1170 struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr;
1171
905#ifdef CONFIG_QDIO_DEBUG 1172#ifdef CONFIG_QDIO_DEBUG
906 char dbf_text[15]; 1173 char dbf_text[15];
907#endif 1174#endif
@@ -927,8 +1194,13 @@ tiqdio_is_inbound_q_done(struct qdio_q *q)
927 if (!q->siga_sync) 1194 if (!q->siga_sync)
928 /* we'll check for more primed buffers in qeth_stop_polling */ 1195 /* we'll check for more primed buffers in qeth_stop_polling */
929 return 0; 1196 return 0;
930 1197 if (irq->is_qebsm) {
931 if (q->slsb.acc.val[q->first_to_check]!=SLSB_P_INPUT_PRIMED) 1198 count = 1;
1199 start_buf = q->first_to_check;
1200 qdio_do_eqbs(q, &state, &start_buf, &count);
1201 } else
1202 state = q->slsb.acc.val[q->first_to_check];
1203 if (state != SLSB_P_INPUT_PRIMED)
932 /* 1204 /*
933 * nothing more to do, if next buffer is not PRIMED. 1205 * nothing more to do, if next buffer is not PRIMED.
934 * note that we did a SYNC_MEMORY before, that there 1206 * note that we did a SYNC_MEMORY before, that there
@@ -955,6 +1227,10 @@ static inline int
955qdio_is_inbound_q_done(struct qdio_q *q) 1227qdio_is_inbound_q_done(struct qdio_q *q)
956{ 1228{
957 int no_used; 1229 int no_used;
1230 unsigned int start_buf, count;
1231 unsigned char state = 0;
1232 struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr;
1233
958#ifdef CONFIG_QDIO_DEBUG 1234#ifdef CONFIG_QDIO_DEBUG
959 char dbf_text[15]; 1235 char dbf_text[15];
960#endif 1236#endif
@@ -973,8 +1249,13 @@ qdio_is_inbound_q_done(struct qdio_q *q)
973 QDIO_DBF_TEXT4(0,trace,dbf_text); 1249 QDIO_DBF_TEXT4(0,trace,dbf_text);
974 return 1; 1250 return 1;
975 } 1251 }
976 1252 if (irq->is_qebsm) {
977 if (q->slsb.acc.val[q->first_to_check]==SLSB_P_INPUT_PRIMED) { 1253 count = 1;
1254 start_buf = q->first_to_check;
1255 qdio_do_eqbs(q, &state, &start_buf, &count);
1256 } else
1257 state = q->slsb.acc.val[q->first_to_check];
1258 if (state == SLSB_P_INPUT_PRIMED) {
978 /* we got something to do */ 1259 /* we got something to do */
979 QDIO_DBF_TEXT4(0,trace,"inqisntA"); 1260 QDIO_DBF_TEXT4(0,trace,"inqisntA");
980 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); 1261 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
@@ -1456,7 +1737,7 @@ qdio_fill_qs(struct qdio_irq *irq_ptr, struct ccw_device *cdev,
1456 void *ptr; 1737 void *ptr;
1457 int available; 1738 int available;
1458 1739
1459 sprintf(dbf_text,"qfqs%4x",cdev->private->irq); 1740 sprintf(dbf_text,"qfqs%4x",cdev->private->sch_no);
1460 QDIO_DBF_TEXT0(0,setup,dbf_text); 1741 QDIO_DBF_TEXT0(0,setup,dbf_text);
1461 for (i=0;i<no_input_qs;i++) { 1742 for (i=0;i<no_input_qs;i++) {
1462 q=irq_ptr->input_qs[i]; 1743 q=irq_ptr->input_qs[i];
@@ -1476,7 +1757,7 @@ qdio_fill_qs(struct qdio_irq *irq_ptr, struct ccw_device *cdev,
1476 1757
1477 q->queue_type=q_format; 1758 q->queue_type=q_format;
1478 q->int_parm=int_parm; 1759 q->int_parm=int_parm;
1479 q->irq=irq_ptr->irq; 1760 q->schid = irq_ptr->schid;
1480 q->irq_ptr = irq_ptr; 1761 q->irq_ptr = irq_ptr;
1481 q->cdev = cdev; 1762 q->cdev = cdev;
1482 q->mask=1<<(31-i); 1763 q->mask=1<<(31-i);
@@ -1523,11 +1804,11 @@ qdio_fill_qs(struct qdio_irq *irq_ptr, struct ccw_device *cdev,
1523 QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*)); 1804 QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*));
1524 1805
1525 /* fill in slsb */ 1806 /* fill in slsb */
1526 for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++) { 1807 if (!irq_ptr->is_qebsm) {
1527 set_slsb(&q->slsb.acc.val[j], 1808 unsigned int count = 1;
1528 SLSB_P_INPUT_NOT_INIT); 1809 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
1529/* q->sbal[j]->element[1].sbalf.i1.key=QDIO_STORAGE_KEY;*/ 1810 set_slsb(q, &j, SLSB_P_INPUT_NOT_INIT, &count);
1530 } 1811 }
1531 } 1812 }
1532 1813
1533 for (i=0;i<no_output_qs;i++) { 1814 for (i=0;i<no_output_qs;i++) {
@@ -1549,7 +1830,7 @@ qdio_fill_qs(struct qdio_irq *irq_ptr, struct ccw_device *cdev,
1549 q->queue_type=q_format; 1830 q->queue_type=q_format;
1550 q->int_parm=int_parm; 1831 q->int_parm=int_parm;
1551 q->is_input_q=0; 1832 q->is_input_q=0;
1552 q->irq=irq_ptr->irq; 1833 q->schid = irq_ptr->schid;
1553 q->cdev = cdev; 1834 q->cdev = cdev;
1554 q->irq_ptr = irq_ptr; 1835 q->irq_ptr = irq_ptr;
1555 q->mask=1<<(31-i); 1836 q->mask=1<<(31-i);
@@ -1584,11 +1865,11 @@ qdio_fill_qs(struct qdio_irq *irq_ptr, struct ccw_device *cdev,
1584 QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*)); 1865 QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*));
1585 1866
1586 /* fill in slsb */ 1867 /* fill in slsb */
1587 for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++) { 1868 if (!irq_ptr->is_qebsm) {
1588 set_slsb(&q->slsb.acc.val[j], 1869 unsigned int count = 1;
1589 SLSB_P_OUTPUT_NOT_INIT); 1870 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
1590/* q->sbal[j]->element[1].sbalf.i1.key=QDIO_STORAGE_KEY;*/ 1871 set_slsb(q, &j, SLSB_P_OUTPUT_NOT_INIT, &count);
1591 } 1872 }
1592 } 1873 }
1593} 1874}
1594 1875
@@ -1656,7 +1937,7 @@ qdio_set_state(struct qdio_irq *irq_ptr, enum qdio_irq_states state)
1656 char dbf_text[15]; 1937 char dbf_text[15];
1657 1938
1658 QDIO_DBF_TEXT5(0,trace,"newstate"); 1939 QDIO_DBF_TEXT5(0,trace,"newstate");
1659 sprintf(dbf_text,"%4x%4x",irq_ptr->irq,state); 1940 sprintf(dbf_text,"%4x%4x",irq_ptr->schid.sch_no,state);
1660 QDIO_DBF_TEXT5(0,trace,dbf_text); 1941 QDIO_DBF_TEXT5(0,trace,dbf_text);
1661#endif /* CONFIG_QDIO_DEBUG */ 1942#endif /* CONFIG_QDIO_DEBUG */
1662 1943
@@ -1669,12 +1950,12 @@ qdio_set_state(struct qdio_irq *irq_ptr, enum qdio_irq_states state)
1669} 1950}
1670 1951
1671static inline void 1952static inline void
1672qdio_irq_check_sense(int irq, struct irb *irb) 1953qdio_irq_check_sense(struct subchannel_id schid, struct irb *irb)
1673{ 1954{
1674 char dbf_text[15]; 1955 char dbf_text[15];
1675 1956
1676 if (irb->esw.esw0.erw.cons) { 1957 if (irb->esw.esw0.erw.cons) {
1677 sprintf(dbf_text,"sens%4x",irq); 1958 sprintf(dbf_text,"sens%4x",schid.sch_no);
1678 QDIO_DBF_TEXT2(1,trace,dbf_text); 1959 QDIO_DBF_TEXT2(1,trace,dbf_text);
1679 QDIO_DBF_HEX0(0,sense,irb,QDIO_DBF_SENSE_LEN); 1960 QDIO_DBF_HEX0(0,sense,irb,QDIO_DBF_SENSE_LEN);
1680 1961
@@ -1785,21 +2066,22 @@ qdio_timeout_handler(struct ccw_device *cdev)
1785 2066
1786 switch (irq_ptr->state) { 2067 switch (irq_ptr->state) {
1787 case QDIO_IRQ_STATE_INACTIVE: 2068 case QDIO_IRQ_STATE_INACTIVE:
1788 QDIO_PRINT_ERR("establish queues on irq %04x: timed out\n", 2069 QDIO_PRINT_ERR("establish queues on irq 0.%x.%04x: timed out\n",
1789 irq_ptr->irq); 2070 irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
1790 QDIO_DBF_TEXT2(1,setup,"eq:timeo"); 2071 QDIO_DBF_TEXT2(1,setup,"eq:timeo");
1791 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR); 2072 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
1792 break; 2073 break;
1793 case QDIO_IRQ_STATE_CLEANUP: 2074 case QDIO_IRQ_STATE_CLEANUP:
1794 QDIO_PRINT_INFO("Did not get interrupt on cleanup, irq=0x%x.\n", 2075 QDIO_PRINT_INFO("Did not get interrupt on cleanup, "
1795 irq_ptr->irq); 2076 "irq=0.%x.%x.\n",
2077 irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
1796 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR); 2078 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
1797 break; 2079 break;
1798 case QDIO_IRQ_STATE_ESTABLISHED: 2080 case QDIO_IRQ_STATE_ESTABLISHED:
1799 case QDIO_IRQ_STATE_ACTIVE: 2081 case QDIO_IRQ_STATE_ACTIVE:
1800 /* I/O has been terminated by common I/O layer. */ 2082 /* I/O has been terminated by common I/O layer. */
1801 QDIO_PRINT_INFO("Queues on irq %04x killed by cio.\n", 2083 QDIO_PRINT_INFO("Queues on irq 0.%x.%04x killed by cio.\n",
1802 irq_ptr->irq); 2084 irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
1803 QDIO_DBF_TEXT2(1, trace, "cio:term"); 2085 QDIO_DBF_TEXT2(1, trace, "cio:term");
1804 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED); 2086 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
1805 if (get_device(&cdev->dev)) { 2087 if (get_device(&cdev->dev)) {
@@ -1862,7 +2144,7 @@ qdio_handler(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
1862 } 2144 }
1863 } 2145 }
1864 2146
1865 qdio_irq_check_sense(irq_ptr->irq, irb); 2147 qdio_irq_check_sense(irq_ptr->schid, irb);
1866 2148
1867#ifdef CONFIG_QDIO_DEBUG 2149#ifdef CONFIG_QDIO_DEBUG
1868 sprintf(dbf_text, "state:%d", irq_ptr->state); 2150 sprintf(dbf_text, "state:%d", irq_ptr->state);
@@ -1905,7 +2187,7 @@ int
1905qdio_synchronize(struct ccw_device *cdev, unsigned int flags, 2187qdio_synchronize(struct ccw_device *cdev, unsigned int flags,
1906 unsigned int queue_number) 2188 unsigned int queue_number)
1907{ 2189{
1908 int cc; 2190 int cc = 0;
1909 struct qdio_q *q; 2191 struct qdio_q *q;
1910 struct qdio_irq *irq_ptr; 2192 struct qdio_irq *irq_ptr;
1911 void *ptr; 2193 void *ptr;
@@ -1918,7 +2200,7 @@ qdio_synchronize(struct ccw_device *cdev, unsigned int flags,
1918 return -ENODEV; 2200 return -ENODEV;
1919 2201
1920#ifdef CONFIG_QDIO_DEBUG 2202#ifdef CONFIG_QDIO_DEBUG
1921 *((int*)(&dbf_text[4])) = irq_ptr->irq; 2203 *((int*)(&dbf_text[4])) = irq_ptr->schid.sch_no;
1922 QDIO_DBF_HEX4(0,trace,dbf_text,QDIO_DBF_TRACE_LEN); 2204 QDIO_DBF_HEX4(0,trace,dbf_text,QDIO_DBF_TRACE_LEN);
1923 *((int*)(&dbf_text[0]))=flags; 2205 *((int*)(&dbf_text[0]))=flags;
1924 *((int*)(&dbf_text[4]))=queue_number; 2206 *((int*)(&dbf_text[4]))=queue_number;
@@ -1929,12 +2211,14 @@ qdio_synchronize(struct ccw_device *cdev, unsigned int flags,
1929 q=irq_ptr->input_qs[queue_number]; 2211 q=irq_ptr->input_qs[queue_number];
1930 if (!q) 2212 if (!q)
1931 return -EINVAL; 2213 return -EINVAL;
1932 cc = do_siga_sync(q->irq, 0, q->mask); 2214 if (!(irq_ptr->is_qebsm))
2215 cc = do_siga_sync(q->schid, 0, q->mask);
1933 } else if (flags&QDIO_FLAG_SYNC_OUTPUT) { 2216 } else if (flags&QDIO_FLAG_SYNC_OUTPUT) {
1934 q=irq_ptr->output_qs[queue_number]; 2217 q=irq_ptr->output_qs[queue_number];
1935 if (!q) 2218 if (!q)
1936 return -EINVAL; 2219 return -EINVAL;
1937 cc = do_siga_sync(q->irq, q->mask, 0); 2220 if (!(irq_ptr->is_qebsm))
2221 cc = do_siga_sync(q->schid, q->mask, 0);
1938 } else 2222 } else
1939 return -EINVAL; 2223 return -EINVAL;
1940 2224
@@ -1945,15 +2229,54 @@ qdio_synchronize(struct ccw_device *cdev, unsigned int flags,
1945 return cc; 2229 return cc;
1946} 2230}
1947 2231
1948static unsigned char 2232static inline void
1949qdio_check_siga_needs(int sch) 2233qdio_check_subchannel_qebsm(struct qdio_irq *irq_ptr, unsigned char qdioac,
2234 unsigned long token)
2235{
2236 struct qdio_q *q;
2237 int i;
2238 unsigned int count, start_buf;
2239 char dbf_text[15];
2240
2241 /*check if QEBSM is disabled */
2242 if (!(irq_ptr->is_qebsm) || !(qdioac & 0x01)) {
2243 irq_ptr->is_qebsm = 0;
2244 irq_ptr->sch_token = 0;
2245 irq_ptr->qib.rflags &= ~QIB_RFLAGS_ENABLE_QEBSM;
2246 QDIO_DBF_TEXT0(0,setup,"noV=V");
2247 return;
2248 }
2249 irq_ptr->sch_token = token;
2250 /*input queue*/
2251 for (i = 0; i < irq_ptr->no_input_qs;i++) {
2252 q = irq_ptr->input_qs[i];
2253 count = QDIO_MAX_BUFFERS_PER_Q;
2254 start_buf = 0;
2255 set_slsb(q, &start_buf, SLSB_P_INPUT_NOT_INIT, &count);
2256 }
2257 sprintf(dbf_text,"V=V:%2x",irq_ptr->is_qebsm);
2258 QDIO_DBF_TEXT0(0,setup,dbf_text);
2259 sprintf(dbf_text,"%8lx",irq_ptr->sch_token);
2260 QDIO_DBF_TEXT0(0,setup,dbf_text);
2261 /*output queue*/
2262 for (i = 0; i < irq_ptr->no_output_qs; i++) {
2263 q = irq_ptr->output_qs[i];
2264 count = QDIO_MAX_BUFFERS_PER_Q;
2265 start_buf = 0;
2266 set_slsb(q, &start_buf, SLSB_P_OUTPUT_NOT_INIT, &count);
2267 }
2268}
2269
2270static void
2271qdio_get_ssqd_information(struct qdio_irq *irq_ptr)
1950{ 2272{
1951 int result; 2273 int result;
1952 unsigned char qdioac; 2274 unsigned char qdioac;
1953
1954 struct { 2275 struct {
1955 struct chsc_header request; 2276 struct chsc_header request;
1956 u16 reserved1; 2277 u16 reserved1:10;
2278 u16 ssid:2;
2279 u16 fmt:4;
1957 u16 first_sch; 2280 u16 first_sch;
1958 u16 reserved2; 2281 u16 reserved2;
1959 u16 last_sch; 2282 u16 last_sch;
@@ -1964,67 +2287,83 @@ qdio_check_siga_needs(int sch)
1964 u8 reserved5; 2287 u8 reserved5;
1965 u16 sch; 2288 u16 sch;
1966 u8 qfmt; 2289 u8 qfmt;
1967 u8 reserved6; 2290 u8 parm;
1968 u8 qdioac; 2291 u8 qdioac1;
1969 u8 sch_class; 2292 u8 sch_class;
1970 u8 reserved7; 2293 u8 reserved7;
1971 u8 icnt; 2294 u8 icnt;
1972 u8 reserved8; 2295 u8 reserved8;
1973 u8 ocnt; 2296 u8 ocnt;
2297 u8 reserved9;
2298 u8 mbccnt;
2299 u16 qdioac2;
2300 u64 sch_token;
1974 } *ssqd_area; 2301 } *ssqd_area;
1975 2302
2303 QDIO_DBF_TEXT0(0,setup,"getssqd");
2304 qdioac = 0;
1976 ssqd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 2305 ssqd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
1977 if (!ssqd_area) { 2306 if (!ssqd_area) {
1978 QDIO_PRINT_WARN("Could not get memory for chsc. Using all " \ 2307 QDIO_PRINT_WARN("Could not get memory for chsc. Using all " \
1979 "SIGAs for sch x%x.\n", sch); 2308 "SIGAs for sch x%x.\n", irq_ptr->schid.sch_no);
1980 return CHSC_FLAG_SIGA_INPUT_NECESSARY || 2309 irq_ptr->qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY ||
1981 CHSC_FLAG_SIGA_OUTPUT_NECESSARY || 2310 CHSC_FLAG_SIGA_OUTPUT_NECESSARY ||
1982 CHSC_FLAG_SIGA_SYNC_NECESSARY; /* all flags set */ 2311 CHSC_FLAG_SIGA_SYNC_NECESSARY; /* all flags set */
2312 irq_ptr->is_qebsm = 0;
2313 irq_ptr->sch_token = 0;
2314 irq_ptr->qib.rflags &= ~QIB_RFLAGS_ENABLE_QEBSM;
2315 return;
1983 } 2316 }
2317
1984 ssqd_area->request = (struct chsc_header) { 2318 ssqd_area->request = (struct chsc_header) {
1985 .length = 0x0010, 2319 .length = 0x0010,
1986 .code = 0x0024, 2320 .code = 0x0024,
1987 }; 2321 };
1988 2322 ssqd_area->first_sch = irq_ptr->schid.sch_no;
1989 ssqd_area->first_sch = sch; 2323 ssqd_area->last_sch = irq_ptr->schid.sch_no;
1990 ssqd_area->last_sch = sch; 2324 ssqd_area->ssid = irq_ptr->schid.ssid;
1991 2325 result = chsc(ssqd_area);
1992 result=chsc(ssqd_area);
1993 2326
1994 if (result) { 2327 if (result) {
1995 QDIO_PRINT_WARN("CHSC returned cc %i. Using all " \ 2328 QDIO_PRINT_WARN("CHSC returned cc %i. Using all " \
1996 "SIGAs for sch x%x.\n", 2329 "SIGAs for sch 0.%x.%x.\n", result,
1997 result,sch); 2330 irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
1998 qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY || 2331 qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY ||
1999 CHSC_FLAG_SIGA_OUTPUT_NECESSARY || 2332 CHSC_FLAG_SIGA_OUTPUT_NECESSARY ||
2000 CHSC_FLAG_SIGA_SYNC_NECESSARY; /* all flags set */ 2333 CHSC_FLAG_SIGA_SYNC_NECESSARY; /* all flags set */
2334 irq_ptr->is_qebsm = 0;
2001 goto out; 2335 goto out;
2002 } 2336 }
2003 2337
2004 if (ssqd_area->response.code != QDIO_CHSC_RESPONSE_CODE_OK) { 2338 if (ssqd_area->response.code != QDIO_CHSC_RESPONSE_CODE_OK) {
2005 QDIO_PRINT_WARN("response upon checking SIGA needs " \ 2339 QDIO_PRINT_WARN("response upon checking SIGA needs " \
2006 "is 0x%x. Using all SIGAs for sch x%x.\n", 2340 "is 0x%x. Using all SIGAs for sch 0.%x.%x.\n",
2007 ssqd_area->response.code, sch); 2341 ssqd_area->response.code,
2342 irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
2008 qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY || 2343 qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY ||
2009 CHSC_FLAG_SIGA_OUTPUT_NECESSARY || 2344 CHSC_FLAG_SIGA_OUTPUT_NECESSARY ||
2010 CHSC_FLAG_SIGA_SYNC_NECESSARY; /* all flags set */ 2345 CHSC_FLAG_SIGA_SYNC_NECESSARY; /* all flags set */
2346 irq_ptr->is_qebsm = 0;
2011 goto out; 2347 goto out;
2012 } 2348 }
2013 if (!(ssqd_area->flags & CHSC_FLAG_QDIO_CAPABILITY) || 2349 if (!(ssqd_area->flags & CHSC_FLAG_QDIO_CAPABILITY) ||
2014 !(ssqd_area->flags & CHSC_FLAG_VALIDITY) || 2350 !(ssqd_area->flags & CHSC_FLAG_VALIDITY) ||
2015 (ssqd_area->sch != sch)) { 2351 (ssqd_area->sch != irq_ptr->schid.sch_no)) {
2016 QDIO_PRINT_WARN("huh? problems checking out sch x%x... " \ 2352 QDIO_PRINT_WARN("huh? problems checking out sch 0.%x.%x... " \
2017 "using all SIGAs.\n",sch); 2353 "using all SIGAs.\n",
2354 irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
2018 qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY | 2355 qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY |
2019 CHSC_FLAG_SIGA_OUTPUT_NECESSARY | 2356 CHSC_FLAG_SIGA_OUTPUT_NECESSARY |
2020 CHSC_FLAG_SIGA_SYNC_NECESSARY; /* worst case */ 2357 CHSC_FLAG_SIGA_SYNC_NECESSARY; /* worst case */
2358 irq_ptr->is_qebsm = 0;
2021 goto out; 2359 goto out;
2022 } 2360 }
2023 2361 qdioac = ssqd_area->qdioac1;
2024 qdioac = ssqd_area->qdioac;
2025out: 2362out:
2363 qdio_check_subchannel_qebsm(irq_ptr, qdioac,
2364 ssqd_area->sch_token);
2026 free_page ((unsigned long) ssqd_area); 2365 free_page ((unsigned long) ssqd_area);
2027 return qdioac; 2366 irq_ptr->qdioac = qdioac;
2028} 2367}
2029 2368
2030static unsigned int 2369static unsigned int
@@ -2055,6 +2394,13 @@ tiqdio_check_chsc_availability(void)
2055 sprintf(dbf_text,"hydrati%1x", hydra_thinints); 2394 sprintf(dbf_text,"hydrati%1x", hydra_thinints);
2056 QDIO_DBF_TEXT0(0,setup,dbf_text); 2395 QDIO_DBF_TEXT0(0,setup,dbf_text);
2057 2396
2397#ifdef CONFIG_64BIT
2398 /* Check for QEBSM support in general (bit 58). */
2399 is_passthrough = css_general_characteristics.qebsm;
2400#endif
2401 sprintf(dbf_text,"cssQBS:%1x", is_passthrough);
2402 QDIO_DBF_TEXT0(0,setup,dbf_text);
2403
2058 /* Check for aif time delay disablement fac (bit 56). If installed, 2404 /* Check for aif time delay disablement fac (bit 56). If installed,
2059 * omit svs even under lpar (good point by rick again) */ 2405 * omit svs even under lpar (good point by rick again) */
2060 omit_svs = css_general_characteristics.aif_tdd; 2406 omit_svs = css_general_characteristics.aif_tdd;
@@ -2091,7 +2437,7 @@ tiqdio_set_subchannel_ind(struct qdio_irq *irq_ptr, int reset_to_zero)
2091 /* set to 0x10000000 to enable 2437 /* set to 0x10000000 to enable
2092 * time delay disablement facility */ 2438 * time delay disablement facility */
2093 u32 reserved5; 2439 u32 reserved5;
2094 u32 subsystem_id; 2440 struct subchannel_id schid;
2095 u32 reserved6[1004]; 2441 u32 reserved6[1004];
2096 struct chsc_header response; 2442 struct chsc_header response;
2097 u32 reserved7; 2443 u32 reserved7;
@@ -2113,7 +2459,8 @@ tiqdio_set_subchannel_ind(struct qdio_irq *irq_ptr, int reset_to_zero)
2113 scssc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 2459 scssc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
2114 if (!scssc_area) { 2460 if (!scssc_area) {
2115 QDIO_PRINT_WARN("No memory for setting indicators on " \ 2461 QDIO_PRINT_WARN("No memory for setting indicators on " \
2116 "subchannel x%x.\n", irq_ptr->irq); 2462 "subchannel 0.%x.%x.\n",
2463 irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
2117 return -ENOMEM; 2464 return -ENOMEM;
2118 } 2465 }
2119 scssc_area->request = (struct chsc_header) { 2466 scssc_area->request = (struct chsc_header) {
@@ -2127,7 +2474,7 @@ tiqdio_set_subchannel_ind(struct qdio_irq *irq_ptr, int reset_to_zero)
2127 scssc_area->ks = QDIO_STORAGE_KEY; 2474 scssc_area->ks = QDIO_STORAGE_KEY;
2128 scssc_area->kc = QDIO_STORAGE_KEY; 2475 scssc_area->kc = QDIO_STORAGE_KEY;
2129 scssc_area->isc = TIQDIO_THININT_ISC; 2476 scssc_area->isc = TIQDIO_THININT_ISC;
2130 scssc_area->subsystem_id = (1<<16) + irq_ptr->irq; 2477 scssc_area->schid = irq_ptr->schid;
2131 /* enables the time delay disablement facility. Don't care 2478 /* enables the time delay disablement facility. Don't care
2132 * whether it is really there (i.e. we haven't checked for 2479 * whether it is really there (i.e. we haven't checked for
2133 * it) */ 2480 * it) */
@@ -2137,12 +2484,11 @@ tiqdio_set_subchannel_ind(struct qdio_irq *irq_ptr, int reset_to_zero)
2137 QDIO_PRINT_WARN("Time delay disablement facility " \ 2484 QDIO_PRINT_WARN("Time delay disablement facility " \
2138 "not available\n"); 2485 "not available\n");
2139 2486
2140
2141
2142 result = chsc(scssc_area); 2487 result = chsc(scssc_area);
2143 if (result) { 2488 if (result) {
2144 QDIO_PRINT_WARN("could not set indicators on irq x%x, " \ 2489 QDIO_PRINT_WARN("could not set indicators on irq 0.%x.%x, " \
2145 "cc=%i.\n",irq_ptr->irq,result); 2490 "cc=%i.\n",
2491 irq_ptr->schid.ssid, irq_ptr->schid.sch_no,result);
2146 result = -EIO; 2492 result = -EIO;
2147 goto out; 2493 goto out;
2148 } 2494 }
@@ -2198,7 +2544,8 @@ tiqdio_set_delay_target(struct qdio_irq *irq_ptr, unsigned long delay_target)
2198 scsscf_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 2544 scsscf_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
2199 if (!scsscf_area) { 2545 if (!scsscf_area) {
2200 QDIO_PRINT_WARN("No memory for setting delay target on " \ 2546 QDIO_PRINT_WARN("No memory for setting delay target on " \
2201 "subchannel x%x.\n", irq_ptr->irq); 2547 "subchannel 0.%x.%x.\n",
2548 irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
2202 return -ENOMEM; 2549 return -ENOMEM;
2203 } 2550 }
2204 scsscf_area->request = (struct chsc_header) { 2551 scsscf_area->request = (struct chsc_header) {
@@ -2210,8 +2557,10 @@ tiqdio_set_delay_target(struct qdio_irq *irq_ptr, unsigned long delay_target)
2210 2557
2211 result=chsc(scsscf_area); 2558 result=chsc(scsscf_area);
2212 if (result) { 2559 if (result) {
2213 QDIO_PRINT_WARN("could not set delay target on irq x%x, " \ 2560 QDIO_PRINT_WARN("could not set delay target on irq 0.%x.%x, " \
2214 "cc=%i. Continuing.\n",irq_ptr->irq,result); 2561 "cc=%i. Continuing.\n",
2562 irq_ptr->schid.ssid, irq_ptr->schid.sch_no,
2563 result);
2215 result = -EIO; 2564 result = -EIO;
2216 goto out; 2565 goto out;
2217 } 2566 }
@@ -2245,7 +2594,7 @@ qdio_cleanup(struct ccw_device *cdev, int how)
2245 if (!irq_ptr) 2594 if (!irq_ptr)
2246 return -ENODEV; 2595 return -ENODEV;
2247 2596
2248 sprintf(dbf_text,"qcln%4x",irq_ptr->irq); 2597 sprintf(dbf_text,"qcln%4x",irq_ptr->schid.sch_no);
2249 QDIO_DBF_TEXT1(0,trace,dbf_text); 2598 QDIO_DBF_TEXT1(0,trace,dbf_text);
2250 QDIO_DBF_TEXT0(0,setup,dbf_text); 2599 QDIO_DBF_TEXT0(0,setup,dbf_text);
2251 2600
@@ -2272,7 +2621,7 @@ qdio_shutdown(struct ccw_device *cdev, int how)
2272 2621
2273 down(&irq_ptr->setting_up_sema); 2622 down(&irq_ptr->setting_up_sema);
2274 2623
2275 sprintf(dbf_text,"qsqs%4x",irq_ptr->irq); 2624 sprintf(dbf_text,"qsqs%4x",irq_ptr->schid.sch_no);
2276 QDIO_DBF_TEXT1(0,trace,dbf_text); 2625 QDIO_DBF_TEXT1(0,trace,dbf_text);
2277 QDIO_DBF_TEXT0(0,setup,dbf_text); 2626 QDIO_DBF_TEXT0(0,setup,dbf_text);
2278 2627
@@ -2378,7 +2727,7 @@ qdio_free(struct ccw_device *cdev)
2378 2727
2379 down(&irq_ptr->setting_up_sema); 2728 down(&irq_ptr->setting_up_sema);
2380 2729
2381 sprintf(dbf_text,"qfqs%4x",irq_ptr->irq); 2730 sprintf(dbf_text,"qfqs%4x",irq_ptr->schid.sch_no);
2382 QDIO_DBF_TEXT1(0,trace,dbf_text); 2731 QDIO_DBF_TEXT1(0,trace,dbf_text);
2383 QDIO_DBF_TEXT0(0,setup,dbf_text); 2732 QDIO_DBF_TEXT0(0,setup,dbf_text);
2384 2733
@@ -2526,13 +2875,14 @@ qdio_establish_irq_check_for_errors(struct ccw_device *cdev, int cstat,
2526 irq_ptr = cdev->private->qdio_data; 2875 irq_ptr = cdev->private->qdio_data;
2527 2876
2528 if (cstat || (dstat & ~(DEV_STAT_CHN_END|DEV_STAT_DEV_END))) { 2877 if (cstat || (dstat & ~(DEV_STAT_CHN_END|DEV_STAT_DEV_END))) {
2529 sprintf(dbf_text,"ick1%4x",irq_ptr->irq); 2878 sprintf(dbf_text,"ick1%4x",irq_ptr->schid.sch_no);
2530 QDIO_DBF_TEXT2(1,trace,dbf_text); 2879 QDIO_DBF_TEXT2(1,trace,dbf_text);
2531 QDIO_DBF_HEX2(0,trace,&dstat,sizeof(int)); 2880 QDIO_DBF_HEX2(0,trace,&dstat,sizeof(int));
2532 QDIO_DBF_HEX2(0,trace,&cstat,sizeof(int)); 2881 QDIO_DBF_HEX2(0,trace,&cstat,sizeof(int));
2533 QDIO_PRINT_ERR("received check condition on establish " \ 2882 QDIO_PRINT_ERR("received check condition on establish " \
2534 "queues on irq 0x%x (cs=x%x, ds=x%x).\n", 2883 "queues on irq 0.%x.%x (cs=x%x, ds=x%x).\n",
2535 irq_ptr->irq,cstat,dstat); 2884 irq_ptr->schid.ssid, irq_ptr->schid.sch_no,
2885 cstat,dstat);
2536 qdio_set_state(irq_ptr,QDIO_IRQ_STATE_ERR); 2886 qdio_set_state(irq_ptr,QDIO_IRQ_STATE_ERR);
2537 } 2887 }
2538 2888
@@ -2540,9 +2890,10 @@ qdio_establish_irq_check_for_errors(struct ccw_device *cdev, int cstat,
2540 QDIO_DBF_TEXT2(1,setup,"eq:no de"); 2890 QDIO_DBF_TEXT2(1,setup,"eq:no de");
2541 QDIO_DBF_HEX2(0,setup,&dstat, sizeof(dstat)); 2891 QDIO_DBF_HEX2(0,setup,&dstat, sizeof(dstat));
2542 QDIO_DBF_HEX2(0,setup,&cstat, sizeof(cstat)); 2892 QDIO_DBF_HEX2(0,setup,&cstat, sizeof(cstat));
2543 QDIO_PRINT_ERR("establish queues on irq %04x: didn't get " 2893 QDIO_PRINT_ERR("establish queues on irq 0.%x.%04x: didn't get "
2544 "device end: dstat=%02x, cstat=%02x\n", 2894 "device end: dstat=%02x, cstat=%02x\n",
2545 irq_ptr->irq, dstat, cstat); 2895 irq_ptr->schid.ssid, irq_ptr->schid.sch_no,
2896 dstat, cstat);
2546 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR); 2897 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
2547 return 1; 2898 return 1;
2548 } 2899 }
@@ -2551,10 +2902,10 @@ qdio_establish_irq_check_for_errors(struct ccw_device *cdev, int cstat,
2551 QDIO_DBF_TEXT2(1,setup,"eq:badio"); 2902 QDIO_DBF_TEXT2(1,setup,"eq:badio");
2552 QDIO_DBF_HEX2(0,setup,&dstat, sizeof(dstat)); 2903 QDIO_DBF_HEX2(0,setup,&dstat, sizeof(dstat));
2553 QDIO_DBF_HEX2(0,setup,&cstat, sizeof(cstat)); 2904 QDIO_DBF_HEX2(0,setup,&cstat, sizeof(cstat));
2554 QDIO_PRINT_ERR("establish queues on irq %04x: got " 2905 QDIO_PRINT_ERR("establish queues on irq 0.%x.%04x: got "
2555 "the following devstat: dstat=%02x, " 2906 "the following devstat: dstat=%02x, "
2556 "cstat=%02x\n", 2907 "cstat=%02x\n", irq_ptr->schid.ssid,
2557 irq_ptr->irq, dstat, cstat); 2908 irq_ptr->schid.sch_no, dstat, cstat);
2558 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR); 2909 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
2559 return 1; 2910 return 1;
2560 } 2911 }
@@ -2569,7 +2920,7 @@ qdio_establish_handle_irq(struct ccw_device *cdev, int cstat, int dstat)
2569 2920
2570 irq_ptr = cdev->private->qdio_data; 2921 irq_ptr = cdev->private->qdio_data;
2571 2922
2572 sprintf(dbf_text,"qehi%4x",cdev->private->irq); 2923 sprintf(dbf_text,"qehi%4x",cdev->private->sch_no);
2573 QDIO_DBF_TEXT0(0,setup,dbf_text); 2924 QDIO_DBF_TEXT0(0,setup,dbf_text);
2574 QDIO_DBF_TEXT0(0,trace,dbf_text); 2925 QDIO_DBF_TEXT0(0,trace,dbf_text);
2575 2926
@@ -2588,7 +2939,7 @@ qdio_initialize(struct qdio_initialize *init_data)
2588 int rc; 2939 int rc;
2589 char dbf_text[15]; 2940 char dbf_text[15];
2590 2941
2591 sprintf(dbf_text,"qini%4x",init_data->cdev->private->irq); 2942 sprintf(dbf_text,"qini%4x",init_data->cdev->private->sch_no);
2592 QDIO_DBF_TEXT0(0,setup,dbf_text); 2943 QDIO_DBF_TEXT0(0,setup,dbf_text);
2593 QDIO_DBF_TEXT0(0,trace,dbf_text); 2944 QDIO_DBF_TEXT0(0,trace,dbf_text);
2594 2945
@@ -2609,7 +2960,7 @@ qdio_allocate(struct qdio_initialize *init_data)
2609 struct qdio_irq *irq_ptr; 2960 struct qdio_irq *irq_ptr;
2610 char dbf_text[15]; 2961 char dbf_text[15];
2611 2962
2612 sprintf(dbf_text,"qalc%4x",init_data->cdev->private->irq); 2963 sprintf(dbf_text,"qalc%4x",init_data->cdev->private->sch_no);
2613 QDIO_DBF_TEXT0(0,setup,dbf_text); 2964 QDIO_DBF_TEXT0(0,setup,dbf_text);
2614 QDIO_DBF_TEXT0(0,trace,dbf_text); 2965 QDIO_DBF_TEXT0(0,trace,dbf_text);
2615 if ( (init_data->no_input_qs>QDIO_MAX_QUEUES_PER_IRQ) || 2966 if ( (init_data->no_input_qs>QDIO_MAX_QUEUES_PER_IRQ) ||
@@ -2682,7 +3033,7 @@ int qdio_fill_irq(struct qdio_initialize *init_data)
2682 3033
2683 irq_ptr->int_parm=init_data->int_parm; 3034 irq_ptr->int_parm=init_data->int_parm;
2684 3035
2685 irq_ptr->irq = init_data->cdev->private->irq; 3036 irq_ptr->schid = ccw_device_get_subchannel_id(init_data->cdev);
2686 irq_ptr->no_input_qs=init_data->no_input_qs; 3037 irq_ptr->no_input_qs=init_data->no_input_qs;
2687 irq_ptr->no_output_qs=init_data->no_output_qs; 3038 irq_ptr->no_output_qs=init_data->no_output_qs;
2688 3039
@@ -2698,11 +3049,12 @@ int qdio_fill_irq(struct qdio_initialize *init_data)
2698 QDIO_DBF_TEXT2(0,setup,dbf_text); 3049 QDIO_DBF_TEXT2(0,setup,dbf_text);
2699 3050
2700 if (irq_ptr->is_thinint_irq) { 3051 if (irq_ptr->is_thinint_irq) {
2701 irq_ptr->dev_st_chg_ind=qdio_get_indicator(); 3052 irq_ptr->dev_st_chg_ind = qdio_get_indicator();
2702 QDIO_DBF_HEX1(0,setup,&irq_ptr->dev_st_chg_ind,sizeof(void*)); 3053 QDIO_DBF_HEX1(0,setup,&irq_ptr->dev_st_chg_ind,sizeof(void*));
2703 if (!irq_ptr->dev_st_chg_ind) { 3054 if (!irq_ptr->dev_st_chg_ind) {
2704 QDIO_PRINT_WARN("no indicator location available " \ 3055 QDIO_PRINT_WARN("no indicator location available " \
2705 "for irq 0x%x\n",irq_ptr->irq); 3056 "for irq 0.%x.%x\n",
3057 irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
2706 qdio_release_irq_memory(irq_ptr); 3058 qdio_release_irq_memory(irq_ptr);
2707 return -ENOBUFS; 3059 return -ENOBUFS;
2708 } 3060 }
@@ -2747,6 +3099,10 @@ int qdio_fill_irq(struct qdio_initialize *init_data)
2747 irq_ptr->qdr->qkey=QDIO_STORAGE_KEY; 3099 irq_ptr->qdr->qkey=QDIO_STORAGE_KEY;
2748 3100
2749 /* fill in qib */ 3101 /* fill in qib */
3102 irq_ptr->is_qebsm = is_passthrough;
3103 if (irq_ptr->is_qebsm)
3104 irq_ptr->qib.rflags |= QIB_RFLAGS_ENABLE_QEBSM;
3105
2750 irq_ptr->qib.qfmt=init_data->q_format; 3106 irq_ptr->qib.qfmt=init_data->q_format;
2751 if (init_data->no_input_qs) 3107 if (init_data->no_input_qs)
2752 irq_ptr->qib.isliba=(unsigned long)(irq_ptr->input_qs[0]->slib); 3108 irq_ptr->qib.isliba=(unsigned long)(irq_ptr->input_qs[0]->slib);
@@ -2829,7 +3185,7 @@ qdio_establish(struct qdio_initialize *init_data)
2829 tiqdio_set_delay_target(irq_ptr,TIQDIO_DELAY_TARGET); 3185 tiqdio_set_delay_target(irq_ptr,TIQDIO_DELAY_TARGET);
2830 } 3186 }
2831 3187
2832 sprintf(dbf_text,"qest%4x",cdev->private->irq); 3188 sprintf(dbf_text,"qest%4x",cdev->private->sch_no);
2833 QDIO_DBF_TEXT0(0,setup,dbf_text); 3189 QDIO_DBF_TEXT0(0,setup,dbf_text);
2834 QDIO_DBF_TEXT0(0,trace,dbf_text); 3190 QDIO_DBF_TEXT0(0,trace,dbf_text);
2835 3191
@@ -2855,9 +3211,10 @@ qdio_establish(struct qdio_initialize *init_data)
2855 sprintf(dbf_text,"eq:io%4x",result); 3211 sprintf(dbf_text,"eq:io%4x",result);
2856 QDIO_DBF_TEXT2(1,setup,dbf_text); 3212 QDIO_DBF_TEXT2(1,setup,dbf_text);
2857 } 3213 }
2858 QDIO_PRINT_WARN("establish queues on irq %04x: do_IO " \ 3214 QDIO_PRINT_WARN("establish queues on irq 0.%x.%04x: do_IO " \
2859 "returned %i, next try returned %i\n", 3215 "returned %i, next try returned %i\n",
2860 irq_ptr->irq,result,result2); 3216 irq_ptr->schid.ssid, irq_ptr->schid.sch_no,
3217 result, result2);
2861 result=result2; 3218 result=result2;
2862 if (result) 3219 if (result)
2863 ccw_device_set_timeout(cdev, 0); 3220 ccw_device_set_timeout(cdev, 0);
@@ -2884,7 +3241,7 @@ qdio_establish(struct qdio_initialize *init_data)
2884 return -EIO; 3241 return -EIO;
2885 } 3242 }
2886 3243
2887 irq_ptr->qdioac=qdio_check_siga_needs(irq_ptr->irq); 3244 qdio_get_ssqd_information(irq_ptr);
2888 /* if this gets set once, we're running under VM and can omit SVSes */ 3245 /* if this gets set once, we're running under VM and can omit SVSes */
2889 if (irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_NECESSARY) 3246 if (irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_NECESSARY)
2890 omit_svs=1; 3247 omit_svs=1;
@@ -2930,7 +3287,7 @@ qdio_activate(struct ccw_device *cdev, int flags)
2930 goto out; 3287 goto out;
2931 } 3288 }
2932 3289
2933 sprintf(dbf_text,"qact%4x", irq_ptr->irq); 3290 sprintf(dbf_text,"qact%4x", irq_ptr->schid.sch_no);
2934 QDIO_DBF_TEXT2(0,setup,dbf_text); 3291 QDIO_DBF_TEXT2(0,setup,dbf_text);
2935 QDIO_DBF_TEXT2(0,trace,dbf_text); 3292 QDIO_DBF_TEXT2(0,trace,dbf_text);
2936 3293
@@ -2955,9 +3312,10 @@ qdio_activate(struct ccw_device *cdev, int flags)
2955 sprintf(dbf_text,"aq:io%4x",result); 3312 sprintf(dbf_text,"aq:io%4x",result);
2956 QDIO_DBF_TEXT2(1,setup,dbf_text); 3313 QDIO_DBF_TEXT2(1,setup,dbf_text);
2957 } 3314 }
2958 QDIO_PRINT_WARN("activate queues on irq %04x: do_IO " \ 3315 QDIO_PRINT_WARN("activate queues on irq 0.%x.%04x: do_IO " \
2959 "returned %i, next try returned %i\n", 3316 "returned %i, next try returned %i\n",
2960 irq_ptr->irq,result,result2); 3317 irq_ptr->schid.ssid, irq_ptr->schid.sch_no,
3318 result, result2);
2961 result=result2; 3319 result=result2;
2962 } 3320 }
2963 3321
@@ -3015,30 +3373,40 @@ static inline void
3015qdio_do_qdio_fill_input(struct qdio_q *q, unsigned int qidx, 3373qdio_do_qdio_fill_input(struct qdio_q *q, unsigned int qidx,
3016 unsigned int count, struct qdio_buffer *buffers) 3374 unsigned int count, struct qdio_buffer *buffers)
3017{ 3375{
3376 struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr;
3377 qidx &= (QDIO_MAX_BUFFERS_PER_Q - 1);
3378 if (irq->is_qebsm) {
3379 while (count)
3380 set_slsb(q, &qidx, SLSB_CU_INPUT_EMPTY, &count);
3381 return;
3382 }
3018 for (;;) { 3383 for (;;) {
3019 set_slsb(&q->slsb.acc.val[qidx],SLSB_CU_INPUT_EMPTY); 3384 set_slsb(q, &qidx, SLSB_CU_INPUT_EMPTY, &count);
3020 count--; 3385 count--;
3021 if (!count) break; 3386 if (!count) break;
3022 qidx=(qidx+1)&(QDIO_MAX_BUFFERS_PER_Q-1); 3387 qidx = (qidx + 1) & (QDIO_MAX_BUFFERS_PER_Q - 1);
3023 } 3388 }
3024
3025 /* not necessary, as the queues are synced during the SIGA read */
3026 /*SYNC_MEMORY;*/
3027} 3389}
3028 3390
3029static inline void 3391static inline void
3030qdio_do_qdio_fill_output(struct qdio_q *q, unsigned int qidx, 3392qdio_do_qdio_fill_output(struct qdio_q *q, unsigned int qidx,
3031 unsigned int count, struct qdio_buffer *buffers) 3393 unsigned int count, struct qdio_buffer *buffers)
3032{ 3394{
3395 struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr;
3396
3397 qidx &= (QDIO_MAX_BUFFERS_PER_Q - 1);
3398 if (irq->is_qebsm) {
3399 while (count)
3400 set_slsb(q, &qidx, SLSB_CU_OUTPUT_PRIMED, &count);
3401 return;
3402 }
3403
3033 for (;;) { 3404 for (;;) {
3034 set_slsb(&q->slsb.acc.val[qidx],SLSB_CU_OUTPUT_PRIMED); 3405 set_slsb(q, &qidx, SLSB_CU_OUTPUT_PRIMED, &count);
3035 count--; 3406 count--;
3036 if (!count) break; 3407 if (!count) break;
3037 qidx=(qidx+1)&(QDIO_MAX_BUFFERS_PER_Q-1); 3408 qidx = (qidx + 1) & (QDIO_MAX_BUFFERS_PER_Q - 1);
3038 } 3409 }
3039
3040 /* SIGA write will sync the queues */
3041 /*SYNC_MEMORY;*/
3042} 3410}
3043 3411
3044static inline void 3412static inline void
@@ -3083,6 +3451,9 @@ do_qdio_handle_outbound(struct qdio_q *q, unsigned int callflags,
3083 struct qdio_buffer *buffers) 3451 struct qdio_buffer *buffers)
3084{ 3452{
3085 int used_elements; 3453 int used_elements;
3454 unsigned int cnt, start_buf;
3455 unsigned char state = 0;
3456 struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr;
3086 3457
3087 /* This is the outbound handling of queues */ 3458 /* This is the outbound handling of queues */
3088#ifdef QDIO_PERFORMANCE_STATS 3459#ifdef QDIO_PERFORMANCE_STATS
@@ -3115,9 +3486,15 @@ do_qdio_handle_outbound(struct qdio_q *q, unsigned int callflags,
3115 * SYNC_MEMORY :-/ ), we try to 3486 * SYNC_MEMORY :-/ ), we try to
3116 * fast-requeue buffers 3487 * fast-requeue buffers
3117 */ 3488 */
3118 if (q->slsb.acc.val[(qidx+QDIO_MAX_BUFFERS_PER_Q-1) 3489 if (irq->is_qebsm) {
3119 &(QDIO_MAX_BUFFERS_PER_Q-1)]!= 3490 cnt = 1;
3120 SLSB_CU_OUTPUT_PRIMED) { 3491 start_buf = ((qidx+QDIO_MAX_BUFFERS_PER_Q-1) &
3492 (QDIO_MAX_BUFFERS_PER_Q-1));
3493 qdio_do_eqbs(q, &state, &start_buf, &cnt);
3494 } else
3495 state = q->slsb.acc.val[(qidx+QDIO_MAX_BUFFERS_PER_Q-1)
3496 &(QDIO_MAX_BUFFERS_PER_Q-1) ];
3497 if (state != SLSB_CU_OUTPUT_PRIMED) {
3121 qdio_kick_outbound_q(q); 3498 qdio_kick_outbound_q(q);
3122 } else { 3499 } else {
3123 QDIO_DBF_TEXT3(0,trace, "fast-req"); 3500 QDIO_DBF_TEXT3(0,trace, "fast-req");
@@ -3150,7 +3527,7 @@ do_QDIO(struct ccw_device *cdev,unsigned int callflags,
3150#ifdef CONFIG_QDIO_DEBUG 3527#ifdef CONFIG_QDIO_DEBUG
3151 char dbf_text[20]; 3528 char dbf_text[20];
3152 3529
3153 sprintf(dbf_text,"doQD%04x",cdev->private->irq); 3530 sprintf(dbf_text,"doQD%04x",cdev->private->sch_no);
3154 QDIO_DBF_TEXT3(0,trace,dbf_text); 3531 QDIO_DBF_TEXT3(0,trace,dbf_text);
3155#endif /* CONFIG_QDIO_DEBUG */ 3532#endif /* CONFIG_QDIO_DEBUG */
3156 3533
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index 328e31cc6854..fa385e761fe1 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -3,14 +3,15 @@
3 3
4#include <asm/page.h> 4#include <asm/page.h>
5 5
6#define VERSION_CIO_QDIO_H "$Revision: 1.33 $" 6#include "schid.h"
7
8#define VERSION_CIO_QDIO_H "$Revision: 1.40 $"
7 9
8#ifdef CONFIG_QDIO_DEBUG 10#ifdef CONFIG_QDIO_DEBUG
9#define QDIO_VERBOSE_LEVEL 9 11#define QDIO_VERBOSE_LEVEL 9
10#else /* CONFIG_QDIO_DEBUG */ 12#else /* CONFIG_QDIO_DEBUG */
11#define QDIO_VERBOSE_LEVEL 5 13#define QDIO_VERBOSE_LEVEL 5
12#endif /* CONFIG_QDIO_DEBUG */ 14#endif /* CONFIG_QDIO_DEBUG */
13
14#define QDIO_USE_PROCESSING_STATE 15#define QDIO_USE_PROCESSING_STATE
15 16
16#ifdef CONFIG_QDIO_PERF_STATS 17#ifdef CONFIG_QDIO_PERF_STATS
@@ -265,12 +266,64 @@ QDIO_PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \
265/* 266/*
266 * Some instructions as assembly 267 * Some instructions as assembly
267 */ 268 */
269
270static inline int
271do_sqbs(unsigned long sch, unsigned char state, int queue,
272 unsigned int *start, unsigned int *count)
273{
274#ifdef CONFIG_64BIT
275 register unsigned long _ccq asm ("0") = *count;
276 register unsigned long _sch asm ("1") = sch;
277 unsigned long _queuestart = ((unsigned long)queue << 32) | *start;
278
279 asm volatile (
280 " .insn rsy,0xeb000000008A,%1,0,0(%2)\n\t"
281 : "+d" (_ccq), "+d" (_queuestart)
282 : "d" ((unsigned long)state), "d" (_sch)
283 : "memory", "cc"
284 );
285 *count = _ccq & 0xff;
286 *start = _queuestart & 0xff;
287
288 return (_ccq >> 32) & 0xff;
289#else
290 return 0;
291#endif
292}
293
294static inline int
295do_eqbs(unsigned long sch, unsigned char *state, int queue,
296 unsigned int *start, unsigned int *count)
297{
298#ifdef CONFIG_64BIT
299 register unsigned long _ccq asm ("0") = *count;
300 register unsigned long _sch asm ("1") = sch;
301 unsigned long _queuestart = ((unsigned long)queue << 32) | *start;
302 unsigned long _state = 0;
303
304 asm volatile (
305 " .insn rrf,0xB99c0000,%1,%2,0,0 \n\t"
306 : "+d" (_ccq), "+d" (_queuestart), "+d" (_state)
307 : "d" (_sch)
308 : "memory", "cc"
309 );
310 *count = _ccq & 0xff;
311 *start = _queuestart & 0xff;
312 *state = _state & 0xff;
313
314 return (_ccq >> 32) & 0xff;
315#else
316 return 0;
317#endif
318}
319
320
268static inline int 321static inline int
269do_siga_sync(unsigned int irq, unsigned int mask1, unsigned int mask2) 322do_siga_sync(struct subchannel_id schid, unsigned int mask1, unsigned int mask2)
270{ 323{
271 int cc; 324 int cc;
272 325
273#ifndef CONFIG_ARCH_S390X 326#ifndef CONFIG_64BIT
274 asm volatile ( 327 asm volatile (
275 "lhi 0,2 \n\t" 328 "lhi 0,2 \n\t"
276 "lr 1,%1 \n\t" 329 "lr 1,%1 \n\t"
@@ -280,10 +333,10 @@ do_siga_sync(unsigned int irq, unsigned int mask1, unsigned int mask2)
280 "ipm %0 \n\t" 333 "ipm %0 \n\t"
281 "srl %0,28 \n\t" 334 "srl %0,28 \n\t"
282 : "=d" (cc) 335 : "=d" (cc)
283 : "d" (0x10000|irq), "d" (mask1), "d" (mask2) 336 : "d" (schid), "d" (mask1), "d" (mask2)
284 : "cc", "0", "1", "2", "3" 337 : "cc", "0", "1", "2", "3"
285 ); 338 );
286#else /* CONFIG_ARCH_S390X */ 339#else /* CONFIG_64BIT */
287 asm volatile ( 340 asm volatile (
288 "lghi 0,2 \n\t" 341 "lghi 0,2 \n\t"
289 "llgfr 1,%1 \n\t" 342 "llgfr 1,%1 \n\t"
@@ -293,19 +346,19 @@ do_siga_sync(unsigned int irq, unsigned int mask1, unsigned int mask2)
293 "ipm %0 \n\t" 346 "ipm %0 \n\t"
294 "srl %0,28 \n\t" 347 "srl %0,28 \n\t"
295 : "=d" (cc) 348 : "=d" (cc)
296 : "d" (0x10000|irq), "d" (mask1), "d" (mask2) 349 : "d" (schid), "d" (mask1), "d" (mask2)
297 : "cc", "0", "1", "2", "3" 350 : "cc", "0", "1", "2", "3"
298 ); 351 );
299#endif /* CONFIG_ARCH_S390X */ 352#endif /* CONFIG_64BIT */
300 return cc; 353 return cc;
301} 354}
302 355
303static inline int 356static inline int
304do_siga_input(unsigned int irq, unsigned int mask) 357do_siga_input(struct subchannel_id schid, unsigned int mask)
305{ 358{
306 int cc; 359 int cc;
307 360
308#ifndef CONFIG_ARCH_S390X 361#ifndef CONFIG_64BIT
309 asm volatile ( 362 asm volatile (
310 "lhi 0,1 \n\t" 363 "lhi 0,1 \n\t"
311 "lr 1,%1 \n\t" 364 "lr 1,%1 \n\t"
@@ -314,10 +367,10 @@ do_siga_input(unsigned int irq, unsigned int mask)
314 "ipm %0 \n\t" 367 "ipm %0 \n\t"
315 "srl %0,28 \n\t" 368 "srl %0,28 \n\t"
316 : "=d" (cc) 369 : "=d" (cc)
317 : "d" (0x10000|irq), "d" (mask) 370 : "d" (schid), "d" (mask)
318 : "cc", "0", "1", "2", "memory" 371 : "cc", "0", "1", "2", "memory"
319 ); 372 );
320#else /* CONFIG_ARCH_S390X */ 373#else /* CONFIG_64BIT */
321 asm volatile ( 374 asm volatile (
322 "lghi 0,1 \n\t" 375 "lghi 0,1 \n\t"
323 "llgfr 1,%1 \n\t" 376 "llgfr 1,%1 \n\t"
@@ -326,21 +379,22 @@ do_siga_input(unsigned int irq, unsigned int mask)
326 "ipm %0 \n\t" 379 "ipm %0 \n\t"
327 "srl %0,28 \n\t" 380 "srl %0,28 \n\t"
328 : "=d" (cc) 381 : "=d" (cc)
329 : "d" (0x10000|irq), "d" (mask) 382 : "d" (schid), "d" (mask)
330 : "cc", "0", "1", "2", "memory" 383 : "cc", "0", "1", "2", "memory"
331 ); 384 );
332#endif /* CONFIG_ARCH_S390X */ 385#endif /* CONFIG_64BIT */
333 386
334 return cc; 387 return cc;
335} 388}
336 389
337static inline int 390static inline int
338do_siga_output(unsigned long irq, unsigned long mask, __u32 *bb) 391do_siga_output(unsigned long schid, unsigned long mask, __u32 *bb,
392 unsigned int fc)
339{ 393{
340 int cc; 394 int cc;
341 __u32 busy_bit; 395 __u32 busy_bit;
342 396
343#ifndef CONFIG_ARCH_S390X 397#ifndef CONFIG_64BIT
344 asm volatile ( 398 asm volatile (
345 "lhi 0,0 \n\t" 399 "lhi 0,0 \n\t"
346 "lr 1,%2 \n\t" 400 "lr 1,%2 \n\t"
@@ -366,14 +420,14 @@ do_siga_output(unsigned long irq, unsigned long mask, __u32 *bb)
366 ".long 0b,2b \n\t" 420 ".long 0b,2b \n\t"
367 ".previous \n\t" 421 ".previous \n\t"
368 : "=d" (cc), "=d" (busy_bit) 422 : "=d" (cc), "=d" (busy_bit)
369 : "d" (0x10000|irq), "d" (mask), 423 : "d" (schid), "d" (mask),
370 "i" (QDIO_SIGA_ERROR_ACCESS_EXCEPTION) 424 "i" (QDIO_SIGA_ERROR_ACCESS_EXCEPTION)
371 : "cc", "0", "1", "2", "memory" 425 : "cc", "0", "1", "2", "memory"
372 ); 426 );
373#else /* CONFIG_ARCH_S390X */ 427#else /* CONFIG_64BIT */
374 asm volatile ( 428 asm volatile (
375 "lghi 0,0 \n\t" 429 "llgfr 0,%5 \n\t"
376 "llgfr 1,%2 \n\t" 430 "lgr 1,%2 \n\t"
377 "llgfr 2,%3 \n\t" 431 "llgfr 2,%3 \n\t"
378 "siga 0 \n\t" 432 "siga 0 \n\t"
379 "0:" 433 "0:"
@@ -391,11 +445,11 @@ do_siga_output(unsigned long irq, unsigned long mask, __u32 *bb)
391 ".quad 0b,1b \n\t" 445 ".quad 0b,1b \n\t"
392 ".previous \n\t" 446 ".previous \n\t"
393 : "=d" (cc), "=d" (busy_bit) 447 : "=d" (cc), "=d" (busy_bit)
394 : "d" (0x10000|irq), "d" (mask), 448 : "d" (schid), "d" (mask),
395 "i" (QDIO_SIGA_ERROR_ACCESS_EXCEPTION) 449 "i" (QDIO_SIGA_ERROR_ACCESS_EXCEPTION), "d" (fc)
396 : "cc", "0", "1", "2", "memory" 450 : "cc", "0", "1", "2", "memory"
397 ); 451 );
398#endif /* CONFIG_ARCH_S390X */ 452#endif /* CONFIG_64BIT */
399 453
400 (*bb) = busy_bit; 454 (*bb) = busy_bit;
401 return cc; 455 return cc;
@@ -407,21 +461,21 @@ do_clear_global_summary(void)
407 461
408 unsigned long time; 462 unsigned long time;
409 463
410#ifndef CONFIG_ARCH_S390X 464#ifndef CONFIG_64BIT
411 asm volatile ( 465 asm volatile (
412 "lhi 1,3 \n\t" 466 "lhi 1,3 \n\t"
413 ".insn rre,0xb2650000,2,0 \n\t" 467 ".insn rre,0xb2650000,2,0 \n\t"
414 "lr %0,3 \n\t" 468 "lr %0,3 \n\t"
415 : "=d" (time) : : "cc", "1", "2", "3" 469 : "=d" (time) : : "cc", "1", "2", "3"
416 ); 470 );
417#else /* CONFIG_ARCH_S390X */ 471#else /* CONFIG_64BIT */
418 asm volatile ( 472 asm volatile (
419 "lghi 1,3 \n\t" 473 "lghi 1,3 \n\t"
420 ".insn rre,0xb2650000,2,0 \n\t" 474 ".insn rre,0xb2650000,2,0 \n\t"
421 "lgr %0,3 \n\t" 475 "lgr %0,3 \n\t"
422 : "=d" (time) : : "cc", "1", "2", "3" 476 : "=d" (time) : : "cc", "1", "2", "3"
423 ); 477 );
424#endif /* CONFIG_ARCH_S390X */ 478#endif /* CONFIG_64BIT */
425 479
426 return time; 480 return time;
427} 481}
@@ -488,42 +542,21 @@ struct qdio_perf_stats {
488 542
489#define MY_MODULE_STRING(x) #x 543#define MY_MODULE_STRING(x) #x
490 544
491#ifdef CONFIG_ARCH_S390X 545#ifdef CONFIG_64BIT
492#define QDIO_GET_ADDR(x) ((__u32)(unsigned long)x) 546#define QDIO_GET_ADDR(x) ((__u32)(unsigned long)x)
493#else /* CONFIG_ARCH_S390X */ 547#else /* CONFIG_64BIT */
494#define QDIO_GET_ADDR(x) ((__u32)(long)x) 548#define QDIO_GET_ADDR(x) ((__u32)(long)x)
495#endif /* CONFIG_ARCH_S390X */ 549#endif /* CONFIG_64BIT */
496
497#ifdef CONFIG_QDIO_DEBUG
498#define set_slsb(x,y) \
499 if(q->queue_type==QDIO_TRACE_QTYPE) { \
500 if(q->is_input_q) { \
501 QDIO_DBF_HEX2(0,slsb_in,&q->slsb,QDIO_MAX_BUFFERS_PER_Q); \
502 } else { \
503 QDIO_DBF_HEX2(0,slsb_out,&q->slsb,QDIO_MAX_BUFFERS_PER_Q); \
504 } \
505 } \
506 qdio_set_slsb(x,y); \
507 if(q->queue_type==QDIO_TRACE_QTYPE) { \
508 if(q->is_input_q) { \
509 QDIO_DBF_HEX2(0,slsb_in,&q->slsb,QDIO_MAX_BUFFERS_PER_Q); \
510 } else { \
511 QDIO_DBF_HEX2(0,slsb_out,&q->slsb,QDIO_MAX_BUFFERS_PER_Q); \
512 } \
513 }
514#else /* CONFIG_QDIO_DEBUG */
515#define set_slsb(x,y) qdio_set_slsb(x,y)
516#endif /* CONFIG_QDIO_DEBUG */
517 550
518struct qdio_q { 551struct qdio_q {
519 volatile struct slsb slsb; 552 volatile struct slsb slsb;
520 553
521 char unused[QDIO_MAX_BUFFERS_PER_Q]; 554 char unused[QDIO_MAX_BUFFERS_PER_Q];
522 555
523 __u32 * volatile dev_st_chg_ind; 556 __u32 * dev_st_chg_ind;
524 557
525 int is_input_q; 558 int is_input_q;
526 int irq; 559 struct subchannel_id schid;
527 struct ccw_device *cdev; 560 struct ccw_device *cdev;
528 561
529 unsigned int is_iqdio_q; 562 unsigned int is_iqdio_q;
@@ -568,6 +601,7 @@ struct qdio_q {
568 struct tasklet_struct tasklet; 601 struct tasklet_struct tasklet;
569#endif /* QDIO_USE_TIMERS_FOR_POLLING */ 602#endif /* QDIO_USE_TIMERS_FOR_POLLING */
570 603
604
571 enum qdio_irq_states state; 605 enum qdio_irq_states state;
572 606
573 /* used to store the error condition during a data transfer */ 607 /* used to store the error condition during a data transfer */
@@ -617,13 +651,17 @@ struct qdio_irq {
617 __u32 * volatile dev_st_chg_ind; 651 __u32 * volatile dev_st_chg_ind;
618 652
619 unsigned long int_parm; 653 unsigned long int_parm;
620 int irq; 654 struct subchannel_id schid;
621 655
622 unsigned int is_iqdio_irq; 656 unsigned int is_iqdio_irq;
623 unsigned int is_thinint_irq; 657 unsigned int is_thinint_irq;
624 unsigned int hydra_gives_outbound_pcis; 658 unsigned int hydra_gives_outbound_pcis;
625 unsigned int sync_done_on_outb_pcis; 659 unsigned int sync_done_on_outb_pcis;
626 660
661 /* QEBSM facility */
662 unsigned int is_qebsm;
663 unsigned long sch_token;
664
627 enum qdio_irq_states state; 665 enum qdio_irq_states state;
628 666
629 unsigned int no_input_qs; 667 unsigned int no_input_qs;
diff --git a/drivers/s390/cio/schid.h b/drivers/s390/cio/schid.h
new file mode 100644
index 000000000000..54328fec5ade
--- /dev/null
+++ b/drivers/s390/cio/schid.h
@@ -0,0 +1,26 @@
1#ifndef S390_SCHID_H
2#define S390_SCHID_H
3
4struct subchannel_id {
5 __u32 reserved:13;
6 __u32 ssid:2;
7 __u32 one:1;
8 __u32 sch_no:16;
9} __attribute__ ((packed,aligned(4)));
10
11
12/* Helper function for sane state of pre-allocated subchannel_id. */
13static inline void
14init_subchannel_id(struct subchannel_id *schid)
15{
16 memset(schid, 0, sizeof(struct subchannel_id));
17 schid->one = 1;
18}
19
20static inline int
21schid_equal(struct subchannel_id *schid1, struct subchannel_id *schid2)
22{
23 return !memcmp(schid1, schid2, sizeof(struct subchannel_id));
24}
25
26#endif /* S390_SCHID_H */
diff --git a/drivers/s390/crypto/z90common.h b/drivers/s390/crypto/z90common.h
index e319e78b5ea2..f87c785f2039 100644
--- a/drivers/s390/crypto/z90common.h
+++ b/drivers/s390/crypto/z90common.h
@@ -1,9 +1,9 @@
1/* 1/*
2 * linux/drivers/s390/crypto/z90common.h 2 * linux/drivers/s390/crypto/z90common.h
3 * 3 *
4 * z90crypt 1.3.2 4 * z90crypt 1.3.3
5 * 5 *
6 * Copyright (C) 2001, 2004 IBM Corporation 6 * Copyright (C) 2001, 2005 IBM Corporation
7 * Author(s): Robert Burroughs (burrough@us.ibm.com) 7 * Author(s): Robert Burroughs (burrough@us.ibm.com)
8 * Eric Rossman (edrossma@us.ibm.com) 8 * Eric Rossman (edrossma@us.ibm.com)
9 * 9 *
@@ -91,12 +91,13 @@ enum hdstat {
91#define TSQ_FATAL_ERROR 34 91#define TSQ_FATAL_ERROR 34
92#define RSQ_FATAL_ERROR 35 92#define RSQ_FATAL_ERROR 35
93 93
94#define Z90CRYPT_NUM_TYPES 5 94#define Z90CRYPT_NUM_TYPES 6
95#define PCICA 0 95#define PCICA 0
96#define PCICC 1 96#define PCICC 1
97#define PCIXCC_MCL2 2 97#define PCIXCC_MCL2 2
98#define PCIXCC_MCL3 3 98#define PCIXCC_MCL3 3
99#define CEX2C 4 99#define CEX2C 4
100#define CEX2A 5
100#define NILDEV -1 101#define NILDEV -1
101#define ANYDEV -1 102#define ANYDEV -1
102#define PCIXCC_UNK -2 103#define PCIXCC_UNK -2
@@ -105,7 +106,7 @@ enum hdevice_type {
105 PCICC_HW = 3, 106 PCICC_HW = 3,
106 PCICA_HW = 4, 107 PCICA_HW = 4,
107 PCIXCC_HW = 5, 108 PCIXCC_HW = 5,
108 OTHER_HW = 6, 109 CEX2A_HW = 6,
109 CEX2C_HW = 7 110 CEX2C_HW = 7
110}; 111};
111 112
diff --git a/drivers/s390/crypto/z90crypt.h b/drivers/s390/crypto/z90crypt.h
index 0a3bb5a10dd4..3a18443fdfa7 100644
--- a/drivers/s390/crypto/z90crypt.h
+++ b/drivers/s390/crypto/z90crypt.h
@@ -1,9 +1,9 @@
1/* 1/*
2 * linux/drivers/s390/crypto/z90crypt.h 2 * linux/drivers/s390/crypto/z90crypt.h
3 * 3 *
4 * z90crypt 1.3.2 4 * z90crypt 1.3.3
5 * 5 *
6 * Copyright (C) 2001, 2004 IBM Corporation 6 * Copyright (C) 2001, 2005 IBM Corporation
7 * Author(s): Robert Burroughs (burrough@us.ibm.com) 7 * Author(s): Robert Burroughs (burrough@us.ibm.com)
8 * Eric Rossman (edrossma@us.ibm.com) 8 * Eric Rossman (edrossma@us.ibm.com)
9 * 9 *
@@ -29,11 +29,11 @@
29 29
30#include <linux/ioctl.h> 30#include <linux/ioctl.h>
31 31
32#define VERSION_Z90CRYPT_H "$Revision: 1.11 $" 32#define VERSION_Z90CRYPT_H "$Revision: 1.2.2.4 $"
33 33
34#define z90crypt_VERSION 1 34#define z90crypt_VERSION 1
35#define z90crypt_RELEASE 3 // 2 = PCIXCC, 3 = rewrite for coding standards 35#define z90crypt_RELEASE 3 // 2 = PCIXCC, 3 = rewrite for coding standards
36#define z90crypt_VARIANT 2 // 2 = added PCIXCC MCL3 and CEX2C support 36#define z90crypt_VARIANT 3 // 3 = CEX2A support
37 37
38/** 38/**
39 * struct ica_rsa_modexpo 39 * struct ica_rsa_modexpo
@@ -122,6 +122,9 @@ struct ica_rsa_modexpo_crt {
122 * Z90STAT_CEX2CCOUNT 122 * Z90STAT_CEX2CCOUNT
123 * Return an integer count of all CEX2Cs. 123 * Return an integer count of all CEX2Cs.
124 * 124 *
125 * Z90STAT_CEX2ACOUNT
126 * Return an integer count of all CEX2As.
127 *
125 * Z90STAT_REQUESTQ_COUNT 128 * Z90STAT_REQUESTQ_COUNT
126 * Return an integer count of the number of entries waiting to be 129 * Return an integer count of the number of entries waiting to be
127 * sent to a device. 130 * sent to a device.
@@ -144,6 +147,7 @@ struct ica_rsa_modexpo_crt {
144 * 0x03: PCIXCC_MCL2 147 * 0x03: PCIXCC_MCL2
145 * 0x04: PCIXCC_MCL3 148 * 0x04: PCIXCC_MCL3
146 * 0x05: CEX2C 149 * 0x05: CEX2C
150 * 0x06: CEX2A
147 * 0x0d: device is disabled via the proc filesystem 151 * 0x0d: device is disabled via the proc filesystem
148 * 152 *
149 * Z90STAT_QDEPTH_MASK 153 * Z90STAT_QDEPTH_MASK
@@ -199,6 +203,7 @@ struct ica_rsa_modexpo_crt {
199#define Z90STAT_PCIXCCMCL2COUNT _IOR(Z90_IOCTL_MAGIC, 0x4b, int) 203#define Z90STAT_PCIXCCMCL2COUNT _IOR(Z90_IOCTL_MAGIC, 0x4b, int)
200#define Z90STAT_PCIXCCMCL3COUNT _IOR(Z90_IOCTL_MAGIC, 0x4c, int) 204#define Z90STAT_PCIXCCMCL3COUNT _IOR(Z90_IOCTL_MAGIC, 0x4c, int)
201#define Z90STAT_CEX2CCOUNT _IOR(Z90_IOCTL_MAGIC, 0x4d, int) 205#define Z90STAT_CEX2CCOUNT _IOR(Z90_IOCTL_MAGIC, 0x4d, int)
206#define Z90STAT_CEX2ACOUNT _IOR(Z90_IOCTL_MAGIC, 0x4e, int)
202#define Z90STAT_REQUESTQ_COUNT _IOR(Z90_IOCTL_MAGIC, 0x44, int) 207#define Z90STAT_REQUESTQ_COUNT _IOR(Z90_IOCTL_MAGIC, 0x44, int)
203#define Z90STAT_PENDINGQ_COUNT _IOR(Z90_IOCTL_MAGIC, 0x45, int) 208#define Z90STAT_PENDINGQ_COUNT _IOR(Z90_IOCTL_MAGIC, 0x45, int)
204#define Z90STAT_TOTALOPEN_COUNT _IOR(Z90_IOCTL_MAGIC, 0x46, int) 209#define Z90STAT_TOTALOPEN_COUNT _IOR(Z90_IOCTL_MAGIC, 0x46, int)
diff --git a/drivers/s390/crypto/z90hardware.c b/drivers/s390/crypto/z90hardware.c
index c215e0889736..d7f7494a0cbe 100644
--- a/drivers/s390/crypto/z90hardware.c
+++ b/drivers/s390/crypto/z90hardware.c
@@ -1,9 +1,9 @@
1/* 1/*
2 * linux/drivers/s390/crypto/z90hardware.c 2 * linux/drivers/s390/crypto/z90hardware.c
3 * 3 *
4 * z90crypt 1.3.2 4 * z90crypt 1.3.3
5 * 5 *
6 * Copyright (C) 2001, 2004 IBM Corporation 6 * Copyright (C) 2001, 2005 IBM Corporation
7 * Author(s): Robert Burroughs (burrough@us.ibm.com) 7 * Author(s): Robert Burroughs (burrough@us.ibm.com)
8 * Eric Rossman (edrossma@us.ibm.com) 8 * Eric Rossman (edrossma@us.ibm.com)
9 * 9 *
@@ -648,6 +648,87 @@ static struct cca_public_sec static_cca_pub_sec = {
648#define RESPONSE_CPRB_SIZE 0x000006B8 648#define RESPONSE_CPRB_SIZE 0x000006B8
649#define RESPONSE_CPRBX_SIZE 0x00000724 649#define RESPONSE_CPRBX_SIZE 0x00000724
650 650
651struct type50_hdr {
652 u8 reserved1;
653 u8 msg_type_code;
654 u16 msg_len;
655 u8 reserved2;
656 u8 ignored;
657 u16 reserved3;
658};
659
660#define TYPE50_TYPE_CODE 0x50
661
662#define TYPE50_MEB1_LEN (sizeof(struct type50_meb1_msg))
663#define TYPE50_MEB2_LEN (sizeof(struct type50_meb2_msg))
664#define TYPE50_CRB1_LEN (sizeof(struct type50_crb1_msg))
665#define TYPE50_CRB2_LEN (sizeof(struct type50_crb2_msg))
666
667#define TYPE50_MEB1_FMT 0x0001
668#define TYPE50_MEB2_FMT 0x0002
669#define TYPE50_CRB1_FMT 0x0011
670#define TYPE50_CRB2_FMT 0x0012
671
672struct type50_meb1_msg {
673 struct type50_hdr header;
674 u16 keyblock_type;
675 u8 reserved[6];
676 u8 exponent[128];
677 u8 modulus[128];
678 u8 message[128];
679};
680
681struct type50_meb2_msg {
682 struct type50_hdr header;
683 u16 keyblock_type;
684 u8 reserved[6];
685 u8 exponent[256];
686 u8 modulus[256];
687 u8 message[256];
688};
689
690struct type50_crb1_msg {
691 struct type50_hdr header;
692 u16 keyblock_type;
693 u8 reserved[6];
694 u8 p[64];
695 u8 q[64];
696 u8 dp[64];
697 u8 dq[64];
698 u8 u[64];
699 u8 message[128];
700};
701
702struct type50_crb2_msg {
703 struct type50_hdr header;
704 u16 keyblock_type;
705 u8 reserved[6];
706 u8 p[128];
707 u8 q[128];
708 u8 dp[128];
709 u8 dq[128];
710 u8 u[128];
711 u8 message[256];
712};
713
714union type50_msg {
715 struct type50_meb1_msg meb1;
716 struct type50_meb2_msg meb2;
717 struct type50_crb1_msg crb1;
718 struct type50_crb2_msg crb2;
719};
720
721struct type80_hdr {
722 u8 reserved1;
723 u8 type;
724 u16 len;
725 u8 code;
726 u8 reserved2[3];
727 u8 reserved3[8];
728};
729
730#define TYPE80_RSP_CODE 0x80
731
651struct error_hdr { 732struct error_hdr {
652 unsigned char reserved1; 733 unsigned char reserved1;
653 unsigned char type; 734 unsigned char type;
@@ -657,6 +738,7 @@ struct error_hdr {
657}; 738};
658 739
659#define TYPE82_RSP_CODE 0x82 740#define TYPE82_RSP_CODE 0x82
741#define TYPE88_RSP_CODE 0x88
660 742
661#define REP82_ERROR_MACHINE_FAILURE 0x10 743#define REP82_ERROR_MACHINE_FAILURE 0x10
662#define REP82_ERROR_PREEMPT_FAILURE 0x12 744#define REP82_ERROR_PREEMPT_FAILURE 0x12
@@ -679,6 +761,22 @@ struct error_hdr {
679#define REP82_ERROR_PACKET_TRUNCATED 0xA0 761#define REP82_ERROR_PACKET_TRUNCATED 0xA0
680#define REP82_ERROR_ZERO_BUFFER_LEN 0xB0 762#define REP82_ERROR_ZERO_BUFFER_LEN 0xB0
681 763
764#define REP88_ERROR_MODULE_FAILURE 0x10
765#define REP88_ERROR_MODULE_TIMEOUT 0x11
766#define REP88_ERROR_MODULE_NOTINIT 0x13
767#define REP88_ERROR_MODULE_NOTAVAIL 0x14
768#define REP88_ERROR_MODULE_DISABLED 0x15
769#define REP88_ERROR_MODULE_IN_DIAGN 0x17
770#define REP88_ERROR_FASTPATH_DISABLD 0x19
771#define REP88_ERROR_MESSAGE_TYPE 0x20
772#define REP88_ERROR_MESSAGE_MALFORMD 0x22
773#define REP88_ERROR_MESSAGE_LENGTH 0x23
774#define REP88_ERROR_RESERVED_FIELD 0x24
775#define REP88_ERROR_KEY_TYPE 0x34
776#define REP88_ERROR_INVALID_KEY 0x82
777#define REP88_ERROR_OPERAND 0x84
778#define REP88_ERROR_OPERAND_EVEN_MOD 0x85
779
682#define CALLER_HEADER 12 780#define CALLER_HEADER 12
683 781
684static inline int 782static inline int
@@ -687,7 +785,7 @@ testq(int q_nr, int *q_depth, int *dev_type, struct ap_status_word *stat)
687 int ccode; 785 int ccode;
688 786
689 asm volatile 787 asm volatile
690#ifdef __s390x__ 788#ifdef CONFIG_64BIT
691 (" llgfr 0,%4 \n" 789 (" llgfr 0,%4 \n"
692 " slgr 1,1 \n" 790 " slgr 1,1 \n"
693 " lgr 2,1 \n" 791 " lgr 2,1 \n"
@@ -757,7 +855,7 @@ resetq(int q_nr, struct ap_status_word *stat_p)
757 int ccode; 855 int ccode;
758 856
759 asm volatile 857 asm volatile
760#ifdef __s390x__ 858#ifdef CONFIG_64BIT
761 (" llgfr 0,%2 \n" 859 (" llgfr 0,%2 \n"
762 " lghi 1,1 \n" 860 " lghi 1,1 \n"
763 " sll 1,24 \n" 861 " sll 1,24 \n"
@@ -823,7 +921,7 @@ sen(int msg_len, unsigned char *msg_ext, struct ap_status_word *stat)
823 int ccode; 921 int ccode;
824 922
825 asm volatile 923 asm volatile
826#ifdef __s390x__ 924#ifdef CONFIG_64BIT
827 (" lgr 6,%3 \n" 925 (" lgr 6,%3 \n"
828 " llgfr 7,%2 \n" 926 " llgfr 7,%2 \n"
829 " llgt 0,0(6) \n" 927 " llgt 0,0(6) \n"
@@ -902,7 +1000,7 @@ rec(int q_nr, int buff_l, unsigned char *rsp, unsigned char *id,
902 int ccode; 1000 int ccode;
903 1001
904 asm volatile 1002 asm volatile
905#ifdef __s390x__ 1003#ifdef CONFIG_64BIT
906 (" llgfr 0,%2 \n" 1004 (" llgfr 0,%2 \n"
907 " lgr 3,%4 \n" 1005 " lgr 3,%4 \n"
908 " lgr 6,%3 \n" 1006 " lgr 6,%3 \n"
@@ -1029,10 +1127,6 @@ query_online(int deviceNr, int cdx, int resetNr, int *q_depth, int *dev_type)
1029 stat = HD_ONLINE; 1127 stat = HD_ONLINE;
1030 *q_depth = t_depth + 1; 1128 *q_depth = t_depth + 1;
1031 switch (t_dev_type) { 1129 switch (t_dev_type) {
1032 case OTHER_HW:
1033 stat = HD_NOT_THERE;
1034 *dev_type = NILDEV;
1035 break;
1036 case PCICA_HW: 1130 case PCICA_HW:
1037 *dev_type = PCICA; 1131 *dev_type = PCICA;
1038 break; 1132 break;
@@ -1045,6 +1139,9 @@ query_online(int deviceNr, int cdx, int resetNr, int *q_depth, int *dev_type)
1045 case CEX2C_HW: 1139 case CEX2C_HW:
1046 *dev_type = CEX2C; 1140 *dev_type = CEX2C;
1047 break; 1141 break;
1142 case CEX2A_HW:
1143 *dev_type = CEX2A;
1144 break;
1048 default: 1145 default:
1049 *dev_type = NILDEV; 1146 *dev_type = NILDEV;
1050 break; 1147 break;
@@ -2029,6 +2126,177 @@ ICACRT_msg_to_type6CRT_msgX(struct ica_rsa_modexpo_crt *icaMsg_p, int cdx,
2029 return 0; 2126 return 0;
2030} 2127}
2031 2128
2129static int
2130ICAMEX_msg_to_type50MEX_msg(struct ica_rsa_modexpo *icaMex_p, int *z90cMsg_l_p,
2131 union type50_msg *z90cMsg_p)
2132{
2133 int mod_len, msg_size, mod_tgt_len, exp_tgt_len, inp_tgt_len;
2134 unsigned char *mod_tgt, *exp_tgt, *inp_tgt;
2135 union type50_msg *tmp_type50_msg;
2136
2137 mod_len = icaMex_p->inputdatalength;
2138
2139 msg_size = ((mod_len <= 128) ? TYPE50_MEB1_LEN : TYPE50_MEB2_LEN) +
2140 CALLER_HEADER;
2141
2142 memset(z90cMsg_p, 0, msg_size);
2143
2144 tmp_type50_msg = (union type50_msg *)
2145 ((unsigned char *) z90cMsg_p + CALLER_HEADER);
2146
2147 tmp_type50_msg->meb1.header.msg_type_code = TYPE50_TYPE_CODE;
2148
2149 if (mod_len <= 128) {
2150 tmp_type50_msg->meb1.header.msg_len = TYPE50_MEB1_LEN;
2151 tmp_type50_msg->meb1.keyblock_type = TYPE50_MEB1_FMT;
2152 mod_tgt = tmp_type50_msg->meb1.modulus;
2153 mod_tgt_len = sizeof(tmp_type50_msg->meb1.modulus);
2154 exp_tgt = tmp_type50_msg->meb1.exponent;
2155 exp_tgt_len = sizeof(tmp_type50_msg->meb1.exponent);
2156 inp_tgt = tmp_type50_msg->meb1.message;
2157 inp_tgt_len = sizeof(tmp_type50_msg->meb1.message);
2158 } else {
2159 tmp_type50_msg->meb2.header.msg_len = TYPE50_MEB2_LEN;
2160 tmp_type50_msg->meb2.keyblock_type = TYPE50_MEB2_FMT;
2161 mod_tgt = tmp_type50_msg->meb2.modulus;
2162 mod_tgt_len = sizeof(tmp_type50_msg->meb2.modulus);
2163 exp_tgt = tmp_type50_msg->meb2.exponent;
2164 exp_tgt_len = sizeof(tmp_type50_msg->meb2.exponent);
2165 inp_tgt = tmp_type50_msg->meb2.message;
2166 inp_tgt_len = sizeof(tmp_type50_msg->meb2.message);
2167 }
2168
2169 mod_tgt += (mod_tgt_len - mod_len);
2170 if (copy_from_user(mod_tgt, icaMex_p->n_modulus, mod_len))
2171 return SEN_RELEASED;
2172 if (is_empty(mod_tgt, mod_len))
2173 return SEN_USER_ERROR;
2174 exp_tgt += (exp_tgt_len - mod_len);
2175 if (copy_from_user(exp_tgt, icaMex_p->b_key, mod_len))
2176 return SEN_RELEASED;
2177 if (is_empty(exp_tgt, mod_len))
2178 return SEN_USER_ERROR;
2179 inp_tgt += (inp_tgt_len - mod_len);
2180 if (copy_from_user(inp_tgt, icaMex_p->inputdata, mod_len))
2181 return SEN_RELEASED;
2182 if (is_empty(inp_tgt, mod_len))
2183 return SEN_USER_ERROR;
2184
2185 *z90cMsg_l_p = msg_size - CALLER_HEADER;
2186
2187 return 0;
2188}
2189
2190static int
2191ICACRT_msg_to_type50CRT_msg(struct ica_rsa_modexpo_crt *icaMsg_p,
2192 int *z90cMsg_l_p, union type50_msg *z90cMsg_p)
2193{
2194 int mod_len, short_len, long_len, tmp_size, p_tgt_len, q_tgt_len,
2195 dp_tgt_len, dq_tgt_len, u_tgt_len, inp_tgt_len, long_offset;
2196 unsigned char *p_tgt, *q_tgt, *dp_tgt, *dq_tgt, *u_tgt, *inp_tgt,
2197 temp[8];
2198 union type50_msg *tmp_type50_msg;
2199
2200 mod_len = icaMsg_p->inputdatalength;
2201 short_len = mod_len / 2;
2202 long_len = mod_len / 2 + 8;
2203 long_offset = 0;
2204
2205 if (long_len > 128) {
2206 memset(temp, 0x00, sizeof(temp));
2207 if (copy_from_user(temp, icaMsg_p->np_prime, long_len-128))
2208 return SEN_RELEASED;
2209 if (!is_empty(temp, 8))
2210 return SEN_NOT_AVAIL;
2211 if (copy_from_user(temp, icaMsg_p->bp_key, long_len-128))
2212 return SEN_RELEASED;
2213 if (!is_empty(temp, 8))
2214 return SEN_NOT_AVAIL;
2215 if (copy_from_user(temp, icaMsg_p->u_mult_inv, long_len-128))
2216 return SEN_RELEASED;
2217 if (!is_empty(temp, 8))
2218 return SEN_NOT_AVAIL;
2219 long_offset = long_len - 128;
2220 long_len = 128;
2221 }
2222
2223 tmp_size = ((mod_len <= 128) ? TYPE50_CRB1_LEN : TYPE50_CRB2_LEN) +
2224 CALLER_HEADER;
2225
2226 memset(z90cMsg_p, 0, tmp_size);
2227
2228 tmp_type50_msg = (union type50_msg *)
2229 ((unsigned char *) z90cMsg_p + CALLER_HEADER);
2230
2231 tmp_type50_msg->crb1.header.msg_type_code = TYPE50_TYPE_CODE;
2232 if (long_len <= 64) {
2233 tmp_type50_msg->crb1.header.msg_len = TYPE50_CRB1_LEN;
2234 tmp_type50_msg->crb1.keyblock_type = TYPE50_CRB1_FMT;
2235 p_tgt = tmp_type50_msg->crb1.p;
2236 p_tgt_len = sizeof(tmp_type50_msg->crb1.p);
2237 q_tgt = tmp_type50_msg->crb1.q;
2238 q_tgt_len = sizeof(tmp_type50_msg->crb1.q);
2239 dp_tgt = tmp_type50_msg->crb1.dp;
2240 dp_tgt_len = sizeof(tmp_type50_msg->crb1.dp);
2241 dq_tgt = tmp_type50_msg->crb1.dq;
2242 dq_tgt_len = sizeof(tmp_type50_msg->crb1.dq);
2243 u_tgt = tmp_type50_msg->crb1.u;
2244 u_tgt_len = sizeof(tmp_type50_msg->crb1.u);
2245 inp_tgt = tmp_type50_msg->crb1.message;
2246 inp_tgt_len = sizeof(tmp_type50_msg->crb1.message);
2247 } else {
2248 tmp_type50_msg->crb2.header.msg_len = TYPE50_CRB2_LEN;
2249 tmp_type50_msg->crb2.keyblock_type = TYPE50_CRB2_FMT;
2250 p_tgt = tmp_type50_msg->crb2.p;
2251 p_tgt_len = sizeof(tmp_type50_msg->crb2.p);
2252 q_tgt = tmp_type50_msg->crb2.q;
2253 q_tgt_len = sizeof(tmp_type50_msg->crb2.q);
2254 dp_tgt = tmp_type50_msg->crb2.dp;
2255 dp_tgt_len = sizeof(tmp_type50_msg->crb2.dp);
2256 dq_tgt = tmp_type50_msg->crb2.dq;
2257 dq_tgt_len = sizeof(tmp_type50_msg->crb2.dq);
2258 u_tgt = tmp_type50_msg->crb2.u;
2259 u_tgt_len = sizeof(tmp_type50_msg->crb2.u);
2260 inp_tgt = tmp_type50_msg->crb2.message;
2261 inp_tgt_len = sizeof(tmp_type50_msg->crb2.message);
2262 }
2263
2264 p_tgt += (p_tgt_len - long_len);
2265 if (copy_from_user(p_tgt, icaMsg_p->np_prime + long_offset, long_len))
2266 return SEN_RELEASED;
2267 if (is_empty(p_tgt, long_len))
2268 return SEN_USER_ERROR;
2269 q_tgt += (q_tgt_len - short_len);
2270 if (copy_from_user(q_tgt, icaMsg_p->nq_prime, short_len))
2271 return SEN_RELEASED;
2272 if (is_empty(q_tgt, short_len))
2273 return SEN_USER_ERROR;
2274 dp_tgt += (dp_tgt_len - long_len);
2275 if (copy_from_user(dp_tgt, icaMsg_p->bp_key + long_offset, long_len))
2276 return SEN_RELEASED;
2277 if (is_empty(dp_tgt, long_len))
2278 return SEN_USER_ERROR;
2279 dq_tgt += (dq_tgt_len - short_len);
2280 if (copy_from_user(dq_tgt, icaMsg_p->bq_key, short_len))
2281 return SEN_RELEASED;
2282 if (is_empty(dq_tgt, short_len))
2283 return SEN_USER_ERROR;
2284 u_tgt += (u_tgt_len - long_len);
2285 if (copy_from_user(u_tgt, icaMsg_p->u_mult_inv + long_offset, long_len))
2286 return SEN_RELEASED;
2287 if (is_empty(u_tgt, long_len))
2288 return SEN_USER_ERROR;
2289 inp_tgt += (inp_tgt_len - mod_len);
2290 if (copy_from_user(inp_tgt, icaMsg_p->inputdata, mod_len))
2291 return SEN_RELEASED;
2292 if (is_empty(inp_tgt, mod_len))
2293 return SEN_USER_ERROR;
2294
2295 *z90cMsg_l_p = tmp_size - CALLER_HEADER;
2296
2297 return 0;
2298}
2299
2032int 2300int
2033convert_request(unsigned char *buffer, int func, unsigned short function, 2301convert_request(unsigned char *buffer, int func, unsigned short function,
2034 int cdx, int dev_type, int *msg_l_p, unsigned char *msg_p) 2302 int cdx, int dev_type, int *msg_l_p, unsigned char *msg_p)
@@ -2071,6 +2339,16 @@ convert_request(unsigned char *buffer, int func, unsigned short function,
2071 cdx, msg_l_p, (struct type6_msg *) msg_p, 2339 cdx, msg_l_p, (struct type6_msg *) msg_p,
2072 dev_type); 2340 dev_type);
2073 } 2341 }
2342 if (dev_type == CEX2A) {
2343 if (func == ICARSACRT)
2344 return ICACRT_msg_to_type50CRT_msg(
2345 (struct ica_rsa_modexpo_crt *) buffer,
2346 msg_l_p, (union type50_msg *) msg_p);
2347 else
2348 return ICAMEX_msg_to_type50MEX_msg(
2349 (struct ica_rsa_modexpo *) buffer,
2350 msg_l_p, (union type50_msg *) msg_p);
2351 }
2074 2352
2075 return 0; 2353 return 0;
2076} 2354}
@@ -2081,8 +2359,8 @@ unset_ext_bitlens(void)
2081{ 2359{
2082 if (!ext_bitlens_msg_count) { 2360 if (!ext_bitlens_msg_count) {
2083 PRINTK("Unable to use coprocessors for extended bitlengths. " 2361 PRINTK("Unable to use coprocessors for extended bitlengths. "
2084 "Using PCICAs (if present) for extended bitlengths. " 2362 "Using PCICAs/CEX2As (if present) for extended "
2085 "This is not an error.\n"); 2363 "bitlengths. This is not an error.\n");
2086 ext_bitlens_msg_count++; 2364 ext_bitlens_msg_count++;
2087 } 2365 }
2088 ext_bitlens = 0; 2366 ext_bitlens = 0;
@@ -2094,6 +2372,7 @@ convert_response(unsigned char *response, unsigned char *buffer,
2094{ 2372{
2095 struct ica_rsa_modexpo *icaMsg_p = (struct ica_rsa_modexpo *) buffer; 2373 struct ica_rsa_modexpo *icaMsg_p = (struct ica_rsa_modexpo *) buffer;
2096 struct error_hdr *errh_p = (struct error_hdr *) response; 2374 struct error_hdr *errh_p = (struct error_hdr *) response;
2375 struct type80_hdr *t80h_p = (struct type80_hdr *) response;
2097 struct type84_hdr *t84h_p = (struct type84_hdr *) response; 2376 struct type84_hdr *t84h_p = (struct type84_hdr *) response;
2098 struct type86_fmt2_msg *t86m_p = (struct type86_fmt2_msg *) response; 2377 struct type86_fmt2_msg *t86m_p = (struct type86_fmt2_msg *) response;
2099 int reply_code, service_rc, service_rs, src_l; 2378 int reply_code, service_rc, service_rs, src_l;
@@ -2108,6 +2387,7 @@ convert_response(unsigned char *response, unsigned char *buffer,
2108 src_l = 0; 2387 src_l = 0;
2109 switch (errh_p->type) { 2388 switch (errh_p->type) {
2110 case TYPE82_RSP_CODE: 2389 case TYPE82_RSP_CODE:
2390 case TYPE88_RSP_CODE:
2111 reply_code = errh_p->reply_code; 2391 reply_code = errh_p->reply_code;
2112 src_p = (unsigned char *)errh_p; 2392 src_p = (unsigned char *)errh_p;
2113 PRINTK("Hardware error: Type %02X Message Header: " 2393 PRINTK("Hardware error: Type %02X Message Header: "
@@ -2116,6 +2396,10 @@ convert_response(unsigned char *response, unsigned char *buffer,
2116 src_p[0], src_p[1], src_p[2], src_p[3], 2396 src_p[0], src_p[1], src_p[2], src_p[3],
2117 src_p[4], src_p[5], src_p[6], src_p[7]); 2397 src_p[4], src_p[5], src_p[6], src_p[7]);
2118 break; 2398 break;
2399 case TYPE80_RSP_CODE:
2400 src_l = icaMsg_p->outputdatalength;
2401 src_p = response + (int)t80h_p->len - src_l;
2402 break;
2119 case TYPE84_RSP_CODE: 2403 case TYPE84_RSP_CODE:
2120 src_l = icaMsg_p->outputdatalength; 2404 src_l = icaMsg_p->outputdatalength;
2121 src_p = response + (int)t84h_p->len - src_l; 2405 src_p = response + (int)t84h_p->len - src_l;
@@ -2202,6 +2486,7 @@ convert_response(unsigned char *response, unsigned char *buffer,
2202 if (reply_code) 2486 if (reply_code)
2203 switch (reply_code) { 2487 switch (reply_code) {
2204 case REP82_ERROR_OPERAND_INVALID: 2488 case REP82_ERROR_OPERAND_INVALID:
2489 case REP88_ERROR_MESSAGE_MALFORMD:
2205 return REC_OPERAND_INV; 2490 return REC_OPERAND_INV;
2206 case REP82_ERROR_OPERAND_SIZE: 2491 case REP82_ERROR_OPERAND_SIZE:
2207 return REC_OPERAND_SIZE; 2492 return REC_OPERAND_SIZE;
diff --git a/drivers/s390/crypto/z90main.c b/drivers/s390/crypto/z90main.c
index 790fcbb74b43..135ae04e6e75 100644
--- a/drivers/s390/crypto/z90main.c
+++ b/drivers/s390/crypto/z90main.c
@@ -228,7 +228,7 @@ struct device_x {
228 */ 228 */
229struct device { 229struct device {
230 int dev_type; // PCICA, PCICC, PCIXCC_MCL2, 230 int dev_type; // PCICA, PCICC, PCIXCC_MCL2,
231 // PCIXCC_MCL3, CEX2C 231 // PCIXCC_MCL3, CEX2C, CEX2A
232 enum devstat dev_stat; // current device status 232 enum devstat dev_stat; // current device status
233 int dev_self_x; // Index in array 233 int dev_self_x; // Index in array
234 int disabled; // Set when device is in error 234 int disabled; // Set when device is in error
@@ -295,26 +295,30 @@ struct caller {
295/** 295/**
296 * Function prototypes from z90hardware.c 296 * Function prototypes from z90hardware.c
297 */ 297 */
298enum hdstat query_online(int, int, int, int *, int *); 298enum hdstat query_online(int deviceNr, int cdx, int resetNr, int *q_depth,
299enum devstat reset_device(int, int, int); 299 int *dev_type);
300enum devstat send_to_AP(int, int, int, unsigned char *); 300enum devstat reset_device(int deviceNr, int cdx, int resetNr);
301enum devstat receive_from_AP(int, int, int, unsigned char *, unsigned char *); 301enum devstat send_to_AP(int dev_nr, int cdx, int msg_len, unsigned char *msg_ext);
302int convert_request(unsigned char *, int, short, int, int, int *, 302enum devstat receive_from_AP(int dev_nr, int cdx, int resplen,
303 unsigned char *); 303 unsigned char *resp, unsigned char *psmid);
304int convert_response(unsigned char *, unsigned char *, int *, unsigned char *); 304int convert_request(unsigned char *buffer, int func, unsigned short function,
305 int cdx, int dev_type, int *msg_l_p, unsigned char *msg_p);
306int convert_response(unsigned char *response, unsigned char *buffer,
307 int *respbufflen_p, unsigned char *resp_buff);
305 308
306/** 309/**
307 * Low level function prototypes 310 * Low level function prototypes
308 */ 311 */
309static int create_z90crypt(int *); 312static int create_z90crypt(int *cdx_p);
310static int refresh_z90crypt(int *); 313static int refresh_z90crypt(int *cdx_p);
311static int find_crypto_devices(struct status *); 314static int find_crypto_devices(struct status *deviceMask);
312static int create_crypto_device(int); 315static int create_crypto_device(int index);
313static int destroy_crypto_device(int); 316static int destroy_crypto_device(int index);
314static void destroy_z90crypt(void); 317static void destroy_z90crypt(void);
315static int refresh_index_array(struct status *, struct device_x *); 318static int refresh_index_array(struct status *status_str,
316static int probe_device_type(struct device *); 319 struct device_x *index_array);
317static int probe_PCIXCC_type(struct device *); 320static int probe_device_type(struct device *devPtr);
321static int probe_PCIXCC_type(struct device *devPtr);
318 322
319/** 323/**
320 * proc fs definitions 324 * proc fs definitions
@@ -425,7 +429,7 @@ static struct miscdevice z90crypt_misc_device = {
425MODULE_AUTHOR("zSeries Linux Crypto Team: Robert H. Burroughs, Eric D. Rossman" 429MODULE_AUTHOR("zSeries Linux Crypto Team: Robert H. Burroughs, Eric D. Rossman"
426 "and Jochen Roehrig"); 430 "and Jochen Roehrig");
427MODULE_DESCRIPTION("zSeries Linux Cryptographic Coprocessor device driver, " 431MODULE_DESCRIPTION("zSeries Linux Cryptographic Coprocessor device driver, "
428 "Copyright 2001, 2004 IBM Corporation"); 432 "Copyright 2001, 2005 IBM Corporation");
429MODULE_LICENSE("GPL"); 433MODULE_LICENSE("GPL");
430module_param(domain, int, 0); 434module_param(domain, int, 0);
431MODULE_PARM_DESC(domain, "domain index for device"); 435MODULE_PARM_DESC(domain, "domain index for device");
@@ -860,6 +864,12 @@ get_status_CEX2Ccount(void)
860} 864}
861 865
862static inline int 866static inline int
867get_status_CEX2Acount(void)
868{
869 return z90crypt.hdware_info->type_mask[CEX2A].st_count;
870}
871
872static inline int
863get_status_requestq_count(void) 873get_status_requestq_count(void)
864{ 874{
865 return requestq_count; 875 return requestq_count;
@@ -1008,11 +1018,13 @@ static inline int
1008select_device_type(int *dev_type_p, int bytelength) 1018select_device_type(int *dev_type_p, int bytelength)
1009{ 1019{
1010 static int count = 0; 1020 static int count = 0;
1011 int PCICA_avail, PCIXCC_MCL3_avail, CEX2C_avail, index_to_use; 1021 int PCICA_avail, PCIXCC_MCL3_avail, CEX2C_avail, CEX2A_avail,
1022 index_to_use;
1012 struct status *stat; 1023 struct status *stat;
1013 if ((*dev_type_p != PCICC) && (*dev_type_p != PCICA) && 1024 if ((*dev_type_p != PCICC) && (*dev_type_p != PCICA) &&
1014 (*dev_type_p != PCIXCC_MCL2) && (*dev_type_p != PCIXCC_MCL3) && 1025 (*dev_type_p != PCIXCC_MCL2) && (*dev_type_p != PCIXCC_MCL3) &&
1015 (*dev_type_p != CEX2C) && (*dev_type_p != ANYDEV)) 1026 (*dev_type_p != CEX2C) && (*dev_type_p != CEX2A) &&
1027 (*dev_type_p != ANYDEV))
1016 return -1; 1028 return -1;
1017 if (*dev_type_p != ANYDEV) { 1029 if (*dev_type_p != ANYDEV) {
1018 stat = &z90crypt.hdware_info->type_mask[*dev_type_p]; 1030 stat = &z90crypt.hdware_info->type_mask[*dev_type_p];
@@ -1022,7 +1034,13 @@ select_device_type(int *dev_type_p, int bytelength)
1022 return -1; 1034 return -1;
1023 } 1035 }
1024 1036
1025 /* Assumption: PCICA, PCIXCC_MCL3, and CEX2C are all similar in speed */ 1037 /**
1038 * Assumption: PCICA, PCIXCC_MCL3, CEX2C, and CEX2A are all similar in
1039 * speed.
1040 *
1041 * PCICA and CEX2A do NOT co-exist, so it would be either one or the
1042 * other present.
1043 */
1026 stat = &z90crypt.hdware_info->type_mask[PCICA]; 1044 stat = &z90crypt.hdware_info->type_mask[PCICA];
1027 PCICA_avail = stat->st_count - 1045 PCICA_avail = stat->st_count -
1028 (stat->disabled_count + stat->user_disabled_count); 1046 (stat->disabled_count + stat->user_disabled_count);
@@ -1032,29 +1050,38 @@ select_device_type(int *dev_type_p, int bytelength)
1032 stat = &z90crypt.hdware_info->type_mask[CEX2C]; 1050 stat = &z90crypt.hdware_info->type_mask[CEX2C];
1033 CEX2C_avail = stat->st_count - 1051 CEX2C_avail = stat->st_count -
1034 (stat->disabled_count + stat->user_disabled_count); 1052 (stat->disabled_count + stat->user_disabled_count);
1035 if (PCICA_avail || PCIXCC_MCL3_avail || CEX2C_avail) { 1053 stat = &z90crypt.hdware_info->type_mask[CEX2A];
1054 CEX2A_avail = stat->st_count -
1055 (stat->disabled_count + stat->user_disabled_count);
1056 if (PCICA_avail || PCIXCC_MCL3_avail || CEX2C_avail || CEX2A_avail) {
1036 /** 1057 /**
1037 * bitlength is a factor, PCICA is the most capable, even with 1058 * bitlength is a factor, PCICA or CEX2A are the most capable,
1038 * the new MCL for PCIXCC. 1059 * even with the new MCL for PCIXCC.
1039 */ 1060 */
1040 if ((bytelength < PCIXCC_MIN_MOD_SIZE) || 1061 if ((bytelength < PCIXCC_MIN_MOD_SIZE) ||
1041 (!ext_bitlens && (bytelength < OLD_PCIXCC_MIN_MOD_SIZE))) { 1062 (!ext_bitlens && (bytelength < OLD_PCIXCC_MIN_MOD_SIZE))) {
1042 if (!PCICA_avail) 1063 if (PCICA_avail) {
1043 return -1;
1044 else {
1045 *dev_type_p = PCICA; 1064 *dev_type_p = PCICA;
1046 return 0; 1065 return 0;
1047 } 1066 }
1067 if (CEX2A_avail) {
1068 *dev_type_p = CEX2A;
1069 return 0;
1070 }
1071 return -1;
1048 } 1072 }
1049 1073
1050 index_to_use = count % (PCICA_avail + PCIXCC_MCL3_avail + 1074 index_to_use = count % (PCICA_avail + PCIXCC_MCL3_avail +
1051 CEX2C_avail); 1075 CEX2C_avail + CEX2A_avail);
1052 if (index_to_use < PCICA_avail) 1076 if (index_to_use < PCICA_avail)
1053 *dev_type_p = PCICA; 1077 *dev_type_p = PCICA;
1054 else if (index_to_use < (PCICA_avail + PCIXCC_MCL3_avail)) 1078 else if (index_to_use < (PCICA_avail + PCIXCC_MCL3_avail))
1055 *dev_type_p = PCIXCC_MCL3; 1079 *dev_type_p = PCIXCC_MCL3;
1056 else 1080 else if (index_to_use < (PCICA_avail + PCIXCC_MCL3_avail +
1081 CEX2C_avail))
1057 *dev_type_p = CEX2C; 1082 *dev_type_p = CEX2C;
1083 else
1084 *dev_type_p = CEX2A;
1058 count++; 1085 count++;
1059 return 0; 1086 return 0;
1060 } 1087 }
@@ -1359,7 +1386,7 @@ build_caller(struct work_element *we_p, short function)
1359 1386
1360 if ((we_p->devtype != PCICC) && (we_p->devtype != PCICA) && 1387 if ((we_p->devtype != PCICC) && (we_p->devtype != PCICA) &&
1361 (we_p->devtype != PCIXCC_MCL2) && (we_p->devtype != PCIXCC_MCL3) && 1388 (we_p->devtype != PCIXCC_MCL2) && (we_p->devtype != PCIXCC_MCL3) &&
1362 (we_p->devtype != CEX2C)) 1389 (we_p->devtype != CEX2C) && (we_p->devtype != CEX2A))
1363 return SEN_NOT_AVAIL; 1390 return SEN_NOT_AVAIL;
1364 1391
1365 memcpy(caller_p->caller_id, we_p->caller_id, 1392 memcpy(caller_p->caller_id, we_p->caller_id,
@@ -1428,7 +1455,8 @@ get_crypto_request_buffer(struct work_element *we_p)
1428 1455
1429 if ((we_p->devtype != PCICA) && (we_p->devtype != PCICC) && 1456 if ((we_p->devtype != PCICA) && (we_p->devtype != PCICC) &&
1430 (we_p->devtype != PCIXCC_MCL2) && (we_p->devtype != PCIXCC_MCL3) && 1457 (we_p->devtype != PCIXCC_MCL2) && (we_p->devtype != PCIXCC_MCL3) &&
1431 (we_p->devtype != CEX2C) && (we_p->devtype != ANYDEV)) { 1458 (we_p->devtype != CEX2C) && (we_p->devtype != CEX2A) &&
1459 (we_p->devtype != ANYDEV)) {
1432 PRINTK("invalid device type\n"); 1460 PRINTK("invalid device type\n");
1433 return SEN_USER_ERROR; 1461 return SEN_USER_ERROR;
1434 } 1462 }
@@ -1503,8 +1531,9 @@ get_crypto_request_buffer(struct work_element *we_p)
1503 1531
1504 function = PCI_FUNC_KEY_ENCRYPT; 1532 function = PCI_FUNC_KEY_ENCRYPT;
1505 switch (we_p->devtype) { 1533 switch (we_p->devtype) {
1506 /* PCICA does everything with a simple RSA mod-expo operation */ 1534 /* PCICA and CEX2A do everything with a simple RSA mod-expo operation */
1507 case PCICA: 1535 case PCICA:
1536 case CEX2A:
1508 function = PCI_FUNC_KEY_ENCRYPT; 1537 function = PCI_FUNC_KEY_ENCRYPT;
1509 break; 1538 break;
1510 /** 1539 /**
@@ -1662,7 +1691,8 @@ z90crypt_rsa(struct priv_data *private_data_p, pid_t pid,
1662 * trigger a fallback to software. 1691 * trigger a fallback to software.
1663 */ 1692 */
1664 case -EINVAL: 1693 case -EINVAL:
1665 if (we_p->devtype != PCICA) 1694 if ((we_p->devtype != PCICA) &&
1695 (we_p->devtype != CEX2A))
1666 rv = -EGETBUFF; 1696 rv = -EGETBUFF;
1667 break; 1697 break;
1668 case -ETIMEOUT: 1698 case -ETIMEOUT:
@@ -1779,6 +1809,12 @@ z90crypt_unlocked_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1779 ret = -EFAULT; 1809 ret = -EFAULT;
1780 break; 1810 break;
1781 1811
1812 case Z90STAT_CEX2ACOUNT:
1813 tempstat = get_status_CEX2Acount();
1814 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
1815 ret = -EFAULT;
1816 break;
1817
1782 case Z90STAT_REQUESTQ_COUNT: 1818 case Z90STAT_REQUESTQ_COUNT:
1783 tempstat = get_status_requestq_count(); 1819 tempstat = get_status_requestq_count();
1784 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0) 1820 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
@@ -2019,6 +2055,8 @@ z90crypt_status(char *resp_buff, char **start, off_t offset,
2019 get_status_PCIXCCMCL3count()); 2055 get_status_PCIXCCMCL3count());
2020 len += sprintf(resp_buff+len, "CEX2C count: %d\n", 2056 len += sprintf(resp_buff+len, "CEX2C count: %d\n",
2021 get_status_CEX2Ccount()); 2057 get_status_CEX2Ccount());
2058 len += sprintf(resp_buff+len, "CEX2A count: %d\n",
2059 get_status_CEX2Acount());
2022 len += sprintf(resp_buff+len, "requestq count: %d\n", 2060 len += sprintf(resp_buff+len, "requestq count: %d\n",
2023 get_status_requestq_count()); 2061 get_status_requestq_count());
2024 len += sprintf(resp_buff+len, "pendingq count: %d\n", 2062 len += sprintf(resp_buff+len, "pendingq count: %d\n",
@@ -2026,8 +2064,8 @@ z90crypt_status(char *resp_buff, char **start, off_t offset,
2026 len += sprintf(resp_buff+len, "Total open handles: %d\n\n", 2064 len += sprintf(resp_buff+len, "Total open handles: %d\n\n",
2027 get_status_totalopen_count()); 2065 get_status_totalopen_count());
2028 len += sprinthx( 2066 len += sprinthx(
2029 "Online devices: 1: PCICA, 2: PCICC, 3: PCIXCC (MCL2), " 2067 "Online devices: 1=PCICA 2=PCICC 3=PCIXCC(MCL2) "
2030 "4: PCIXCC (MCL3), 5: CEX2C", 2068 "4=PCIXCC(MCL3) 5=CEX2C 6=CEX2A",
2031 resp_buff+len, 2069 resp_buff+len,
2032 get_status_status_mask(workarea), 2070 get_status_status_mask(workarea),
2033 Z90CRYPT_NUM_APS); 2071 Z90CRYPT_NUM_APS);
@@ -2140,6 +2178,7 @@ z90crypt_status_write(struct file *file, const char __user *buffer,
2140 case '3': // PCIXCC_MCL2 2178 case '3': // PCIXCC_MCL2
2141 case '4': // PCIXCC_MCL3 2179 case '4': // PCIXCC_MCL3
2142 case '5': // CEX2C 2180 case '5': // CEX2C
2181 case '6': // CEX2A
2143 j++; 2182 j++;
2144 break; 2183 break;
2145 case 'd': 2184 case 'd':
@@ -3007,7 +3046,9 @@ create_crypto_device(int index)
3007 z90crypt.hdware_info->device_type_array[index] = 4; 3046 z90crypt.hdware_info->device_type_array[index] = 4;
3008 else if (deviceType == CEX2C) 3047 else if (deviceType == CEX2C)
3009 z90crypt.hdware_info->device_type_array[index] = 5; 3048 z90crypt.hdware_info->device_type_array[index] = 5;
3010 else 3049 else if (deviceType == CEX2A)
3050 z90crypt.hdware_info->device_type_array[index] = 6;
3051 else // No idea how this would happen.
3011 z90crypt.hdware_info->device_type_array[index] = -1; 3052 z90crypt.hdware_info->device_type_array[index] = -1;
3012 } 3053 }
3013 3054
diff --git a/drivers/s390/net/Kconfig b/drivers/s390/net/Kconfig
index a7efc394515e..548854754921 100644
--- a/drivers/s390/net/Kconfig
+++ b/drivers/s390/net/Kconfig
@@ -1,5 +1,5 @@
1menu "S/390 network device drivers" 1menu "S/390 network device drivers"
2 depends on NETDEVICES && ARCH_S390 2 depends on NETDEVICES && S390
3 3
4config LCS 4config LCS
5 tristate "Lan Channel Station Interface" 5 tristate "Lan Channel Station Interface"
diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c
index 6b63d21612ec..e70af7f39946 100644
--- a/drivers/s390/net/claw.c
+++ b/drivers/s390/net/claw.c
@@ -1603,7 +1603,7 @@ dumpit(char* buf, int len)
1603 __u32 ct, sw, rm, dup; 1603 __u32 ct, sw, rm, dup;
1604 char *ptr, *rptr; 1604 char *ptr, *rptr;
1605 char tbuf[82], tdup[82]; 1605 char tbuf[82], tdup[82];
1606#if (CONFIG_ARCH_S390X) 1606#if (CONFIG_64BIT)
1607 char addr[22]; 1607 char addr[22];
1608#else 1608#else
1609 char addr[12]; 1609 char addr[12];
@@ -1619,7 +1619,7 @@ dumpit(char* buf, int len)
1619 dup = 0; 1619 dup = 0;
1620 for ( ct=0; ct < len; ct++, ptr++, rptr++ ) { 1620 for ( ct=0; ct < len; ct++, ptr++, rptr++ ) {
1621 if (sw == 0) { 1621 if (sw == 0) {
1622#if (CONFIG_ARCH_S390X) 1622#if (CONFIG_64BIT)
1623 sprintf(addr, "%16.16lX",(unsigned long)rptr); 1623 sprintf(addr, "%16.16lX",(unsigned long)rptr);
1624#else 1624#else
1625 sprintf(addr, "%8.8X",(__u32)rptr); 1625 sprintf(addr, "%8.8X",(__u32)rptr);
@@ -1634,7 +1634,7 @@ dumpit(char* buf, int len)
1634 if (sw == 8) { 1634 if (sw == 8) {
1635 strcat(bhex, " "); 1635 strcat(bhex, " ");
1636 } 1636 }
1637#if (CONFIG_ARCH_S390X) 1637#if (CONFIG_64BIT)
1638 sprintf(tbuf,"%2.2lX", (unsigned long)*ptr); 1638 sprintf(tbuf,"%2.2lX", (unsigned long)*ptr);
1639#else 1639#else
1640 sprintf(tbuf,"%2.2X", (__u32)*ptr); 1640 sprintf(tbuf,"%2.2X", (__u32)*ptr);
diff --git a/drivers/s390/net/cu3088.c b/drivers/s390/net/cu3088.c
index 0075894c71db..77dacb465732 100644
--- a/drivers/s390/net/cu3088.c
+++ b/drivers/s390/net/cu3088.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * $Id: cu3088.c,v 1.35 2005/03/30 19:28:52 richtera Exp $ 2 * $Id: cu3088.c,v 1.36 2005/10/25 14:37:17 cohuck Exp $
3 * 3 *
4 * CTC / LCS ccw_device driver 4 * CTC / LCS ccw_device driver
5 * 5 *
@@ -27,6 +27,7 @@
27#include <linux/module.h> 27#include <linux/module.h>
28#include <linux/err.h> 28#include <linux/err.h>
29 29
30#include <asm/s390_rdev.h>
30#include <asm/ccwdev.h> 31#include <asm/ccwdev.h>
31#include <asm/ccwgroup.h> 32#include <asm/ccwgroup.h>
32 33
diff --git a/drivers/s390/net/iucv.c b/drivers/s390/net/iucv.c
index df7647c3c100..ea8177392564 100644
--- a/drivers/s390/net/iucv.c
+++ b/drivers/s390/net/iucv.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * $Id: iucv.c,v 1.45 2005/04/26 22:59:06 braunu Exp $ 2 * $Id: iucv.c,v 1.47 2005/11/21 11:35:22 mschwide Exp $
3 * 3 *
4 * IUCV network driver 4 * IUCV network driver
5 * 5 *
@@ -29,7 +29,7 @@
29 * along with this program; if not, write to the Free Software 29 * along with this program; if not, write to the Free Software
30 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 30 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
31 * 31 *
32 * RELEASE-TAG: IUCV lowlevel driver $Revision: 1.45 $ 32 * RELEASE-TAG: IUCV lowlevel driver $Revision: 1.47 $
33 * 33 *
34 */ 34 */
35 35
@@ -54,7 +54,7 @@
54#include <asm/s390_ext.h> 54#include <asm/s390_ext.h>
55#include <asm/ebcdic.h> 55#include <asm/ebcdic.h>
56#include <asm/smp.h> 56#include <asm/smp.h>
57#include <asm/ccwdev.h> //for root device stuff 57#include <asm/s390_rdev.h>
58 58
59/* FLAGS: 59/* FLAGS:
60 * All flags are defined in the field IPFLAGS1 of each function 60 * All flags are defined in the field IPFLAGS1 of each function
@@ -355,7 +355,7 @@ do { \
355static void 355static void
356iucv_banner(void) 356iucv_banner(void)
357{ 357{
358 char vbuf[] = "$Revision: 1.45 $"; 358 char vbuf[] = "$Revision: 1.47 $";
359 char *version = vbuf; 359 char *version = vbuf;
360 360
361 if ((version = strchr(version, ':'))) { 361 if ((version = strchr(version, ':'))) {
@@ -477,7 +477,7 @@ grab_param(void)
477 ptr++; 477 ptr++;
478 if (ptr >= iucv_param_pool + PARAM_POOL_SIZE) 478 if (ptr >= iucv_param_pool + PARAM_POOL_SIZE)
479 ptr = iucv_param_pool; 479 ptr = iucv_param_pool;
480 } while (atomic_compare_and_swap(0, 1, &ptr->in_use)); 480 } while (atomic_cmpxchg(&ptr->in_use, 0, 1) != 0);
481 hint = ptr - iucv_param_pool; 481 hint = ptr - iucv_param_pool;
482 482
483 memset(&ptr->param, 0, sizeof(ptr->param)); 483 memset(&ptr->param, 0, sizeof(ptr->param));
diff --git a/drivers/s390/net/qeth_main.c b/drivers/s390/net/qeth_main.c
index f8f55cc468ba..97f927c01a82 100644
--- a/drivers/s390/net/qeth_main.c
+++ b/drivers/s390/net/qeth_main.c
@@ -65,6 +65,7 @@
65#include <asm/timex.h> 65#include <asm/timex.h>
66#include <asm/semaphore.h> 66#include <asm/semaphore.h>
67#include <asm/uaccess.h> 67#include <asm/uaccess.h>
68#include <asm/s390_rdev.h>
68 69
69#include "qeth.h" 70#include "qeth.h"
70#include "qeth_mpc.h" 71#include "qeth_mpc.h"
@@ -1396,7 +1397,7 @@ qeth_idx_activate_get_answer(struct qeth_channel *channel,
1396 channel->ccw.cda = (__u32) __pa(iob->data); 1397 channel->ccw.cda = (__u32) __pa(iob->data);
1397 1398
1398 wait_event(card->wait_q, 1399 wait_event(card->wait_q,
1399 atomic_compare_and_swap(0,1,&channel->irq_pending) == 0); 1400 atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0);
1400 QETH_DBF_TEXT(setup, 6, "noirqpnd"); 1401 QETH_DBF_TEXT(setup, 6, "noirqpnd");
1401 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); 1402 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
1402 rc = ccw_device_start(channel->ccwdev, 1403 rc = ccw_device_start(channel->ccwdev,
@@ -1463,7 +1464,7 @@ qeth_idx_activate_channel(struct qeth_channel *channel,
1463 memcpy(QETH_IDX_ACT_QDIO_DEV_REALADDR(iob->data), &temp, 2); 1464 memcpy(QETH_IDX_ACT_QDIO_DEV_REALADDR(iob->data), &temp, 2);
1464 1465
1465 wait_event(card->wait_q, 1466 wait_event(card->wait_q,
1466 atomic_compare_and_swap(0,1,&channel->irq_pending) == 0); 1467 atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0);
1467 QETH_DBF_TEXT(setup, 6, "noirqpnd"); 1468 QETH_DBF_TEXT(setup, 6, "noirqpnd");
1468 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); 1469 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
1469 rc = ccw_device_start(channel->ccwdev, 1470 rc = ccw_device_start(channel->ccwdev,
@@ -1616,7 +1617,7 @@ qeth_issue_next_read(struct qeth_card *card)
1616 } 1617 }
1617 qeth_setup_ccw(&card->read, iob->data, QETH_BUFSIZE); 1618 qeth_setup_ccw(&card->read, iob->data, QETH_BUFSIZE);
1618 wait_event(card->wait_q, 1619 wait_event(card->wait_q,
1619 atomic_compare_and_swap(0,1,&card->read.irq_pending) == 0); 1620 atomic_cmpxchg(&card->read.irq_pending, 0, 1) == 0);
1620 QETH_DBF_TEXT(trace, 6, "noirqpnd"); 1621 QETH_DBF_TEXT(trace, 6, "noirqpnd");
1621 rc = ccw_device_start(card->read.ccwdev, &card->read.ccw, 1622 rc = ccw_device_start(card->read.ccwdev, &card->read.ccw,
1622 (addr_t) iob, 0, 0); 1623 (addr_t) iob, 0, 0);
@@ -1882,7 +1883,7 @@ qeth_send_control_data(struct qeth_card *card, int len,
1882 spin_unlock_irqrestore(&card->lock, flags); 1883 spin_unlock_irqrestore(&card->lock, flags);
1883 QETH_DBF_HEX(control, 2, iob->data, QETH_DBF_CONTROL_LEN); 1884 QETH_DBF_HEX(control, 2, iob->data, QETH_DBF_CONTROL_LEN);
1884 wait_event(card->wait_q, 1885 wait_event(card->wait_q,
1885 atomic_compare_and_swap(0,1,&card->write.irq_pending) == 0); 1886 atomic_cmpxchg(&card->write.irq_pending, 0, 1) == 0);
1886 qeth_prepare_control_data(card, len, iob); 1887 qeth_prepare_control_data(card, len, iob);
1887 if (IS_IPA(iob->data)) 1888 if (IS_IPA(iob->data))
1888 timer.expires = jiffies + QETH_IPA_TIMEOUT; 1889 timer.expires = jiffies + QETH_IPA_TIMEOUT;
@@ -1924,7 +1925,7 @@ qeth_osn_send_control_data(struct qeth_card *card, int len,
1924 QETH_DBF_TEXT(trace, 5, "osndctrd"); 1925 QETH_DBF_TEXT(trace, 5, "osndctrd");
1925 1926
1926 wait_event(card->wait_q, 1927 wait_event(card->wait_q,
1927 atomic_compare_and_swap(0,1,&card->write.irq_pending) == 0); 1928 atomic_cmpxchg(&card->write.irq_pending, 0, 1) == 0);
1928 qeth_prepare_control_data(card, len, iob); 1929 qeth_prepare_control_data(card, len, iob);
1929 QETH_DBF_TEXT(trace, 6, "osnoirqp"); 1930 QETH_DBF_TEXT(trace, 6, "osnoirqp");
1930 spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags); 1931 spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags);
@@ -4236,9 +4237,8 @@ qeth_do_send_packet_fast(struct qeth_card *card, struct qeth_qdio_out_q *queue,
4236 QETH_DBF_TEXT(trace, 6, "dosndpfa"); 4237 QETH_DBF_TEXT(trace, 6, "dosndpfa");
4237 4238
4238 /* spin until we get the queue ... */ 4239 /* spin until we get the queue ... */
4239 while (atomic_compare_and_swap(QETH_OUT_Q_UNLOCKED, 4240 while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED,
4240 QETH_OUT_Q_LOCKED, 4241 QETH_OUT_Q_LOCKED) != QETH_OUT_Q_UNLOCKED);
4241 &queue->state));
4242 /* ... now we've got the queue */ 4242 /* ... now we've got the queue */
4243 index = queue->next_buf_to_fill; 4243 index = queue->next_buf_to_fill;
4244 buffer = &queue->bufs[queue->next_buf_to_fill]; 4244 buffer = &queue->bufs[queue->next_buf_to_fill];
@@ -4292,9 +4292,8 @@ qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
4292 QETH_DBF_TEXT(trace, 6, "dosndpkt"); 4292 QETH_DBF_TEXT(trace, 6, "dosndpkt");
4293 4293
4294 /* spin until we get the queue ... */ 4294 /* spin until we get the queue ... */
4295 while (atomic_compare_and_swap(QETH_OUT_Q_UNLOCKED, 4295 while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED,
4296 QETH_OUT_Q_LOCKED, 4296 QETH_OUT_Q_LOCKED) != QETH_OUT_Q_UNLOCKED);
4297 &queue->state));
4298 start_index = queue->next_buf_to_fill; 4297 start_index = queue->next_buf_to_fill;
4299 buffer = &queue->bufs[queue->next_buf_to_fill]; 4298 buffer = &queue->bufs[queue->next_buf_to_fill];
4300 /* 4299 /*
diff --git a/drivers/s390/s390_rdev.c b/drivers/s390/s390_rdev.c
new file mode 100644
index 000000000000..566cc3d185b6
--- /dev/null
+++ b/drivers/s390/s390_rdev.c
@@ -0,0 +1,53 @@
1/*
2 * drivers/s390/s390_rdev.c
3 * s390 root device
4 * $Revision: 1.2 $
5 *
6 * Copyright (C) 2002, 2005 IBM Deutschland Entwicklung GmbH,
7 * IBM Corporation
8 * Author(s): Cornelia Huck (cohuck@de.ibm.com)
9 * Carsten Otte (cotte@de.ibm.com)
10 */
11
12#include <linux/slab.h>
13#include <linux/err.h>
14#include <linux/device.h>
15#include <asm/s390_rdev.h>
16
17static void
18s390_root_dev_release(struct device *dev)
19{
20 kfree(dev);
21}
22
23struct device *
24s390_root_dev_register(const char *name)
25{
26 struct device *dev;
27 int ret;
28
29 if (!strlen(name))
30 return ERR_PTR(-EINVAL);
31 dev = kmalloc(sizeof(struct device), GFP_KERNEL);
32 if (!dev)
33 return ERR_PTR(-ENOMEM);
34 memset(dev, 0, sizeof(struct device));
35 strncpy(dev->bus_id, name, min(strlen(name), (size_t)BUS_ID_SIZE));
36 dev->release = s390_root_dev_release;
37 ret = device_register(dev);
38 if (ret) {
39 kfree(dev);
40 return ERR_PTR(ret);
41 }
42 return dev;
43}
44
45void
46s390_root_dev_unregister(struct device *dev)
47{
48 if (dev)
49 device_unregister(dev);
50}
51
52EXPORT_SYMBOL(s390_root_dev_register);
53EXPORT_SYMBOL(s390_root_dev_unregister);
diff --git a/drivers/s390/s390mach.c b/drivers/s390/s390mach.c
index 4191fd9d4d11..3bf466603512 100644
--- a/drivers/s390/s390mach.c
+++ b/drivers/s390/s390mach.c
@@ -23,7 +23,7 @@
23 23
24static struct semaphore m_sem; 24static struct semaphore m_sem;
25 25
26extern int css_process_crw(int); 26extern int css_process_crw(int, int);
27extern int chsc_process_crw(void); 27extern int chsc_process_crw(void);
28extern int chp_process_crw(int, int); 28extern int chp_process_crw(int, int);
29extern void css_reiterate_subchannels(void); 29extern void css_reiterate_subchannels(void);
@@ -49,9 +49,10 @@ s390_handle_damage(char *msg)
49static int 49static int
50s390_collect_crw_info(void *param) 50s390_collect_crw_info(void *param)
51{ 51{
52 struct crw crw; 52 struct crw crw[2];
53 int ccode, ret, slow; 53 int ccode, ret, slow;
54 struct semaphore *sem; 54 struct semaphore *sem;
55 unsigned int chain;
55 56
56 sem = (struct semaphore *)param; 57 sem = (struct semaphore *)param;
57 /* Set a nice name. */ 58 /* Set a nice name. */
@@ -59,25 +60,50 @@ s390_collect_crw_info(void *param)
59repeat: 60repeat:
60 down_interruptible(sem); 61 down_interruptible(sem);
61 slow = 0; 62 slow = 0;
63 chain = 0;
62 while (1) { 64 while (1) {
63 ccode = stcrw(&crw); 65 if (unlikely(chain > 1)) {
66 struct crw tmp_crw;
67
68 printk(KERN_WARNING"%s: Code does not support more "
69 "than two chained crws; please report to "
70 "linux390@de.ibm.com!\n", __FUNCTION__);
71 ccode = stcrw(&tmp_crw);
72 printk(KERN_WARNING"%s: crw reports slct=%d, oflw=%d, "
73 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
74 __FUNCTION__, tmp_crw.slct, tmp_crw.oflw,
75 tmp_crw.chn, tmp_crw.rsc, tmp_crw.anc,
76 tmp_crw.erc, tmp_crw.rsid);
77 printk(KERN_WARNING"%s: This was crw number %x in the "
78 "chain\n", __FUNCTION__, chain);
79 if (ccode != 0)
80 break;
81 chain = tmp_crw.chn ? chain + 1 : 0;
82 continue;
83 }
84 ccode = stcrw(&crw[chain]);
64 if (ccode != 0) 85 if (ccode != 0)
65 break; 86 break;
66 DBG(KERN_DEBUG "crw_info : CRW reports slct=%d, oflw=%d, " 87 DBG(KERN_DEBUG "crw_info : CRW reports slct=%d, oflw=%d, "
67 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", 88 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
68 crw.slct, crw.oflw, crw.chn, crw.rsc, crw.anc, 89 crw[chain].slct, crw[chain].oflw, crw[chain].chn,
69 crw.erc, crw.rsid); 90 crw[chain].rsc, crw[chain].anc, crw[chain].erc,
91 crw[chain].rsid);
70 /* Check for overflows. */ 92 /* Check for overflows. */
71 if (crw.oflw) { 93 if (crw[chain].oflw) {
72 pr_debug("%s: crw overflow detected!\n", __FUNCTION__); 94 pr_debug("%s: crw overflow detected!\n", __FUNCTION__);
73 css_reiterate_subchannels(); 95 css_reiterate_subchannels();
96 chain = 0;
74 slow = 1; 97 slow = 1;
75 continue; 98 continue;
76 } 99 }
77 switch (crw.rsc) { 100 switch (crw[chain].rsc) {
78 case CRW_RSC_SCH: 101 case CRW_RSC_SCH:
79 pr_debug("source is subchannel %04X\n", crw.rsid); 102 if (crw[0].chn && !chain)
80 ret = css_process_crw (crw.rsid); 103 break;
104 pr_debug("source is subchannel %04X\n", crw[0].rsid);
105 ret = css_process_crw (crw[0].rsid,
106 chain ? crw[1].rsid : 0);
81 if (ret == -EAGAIN) 107 if (ret == -EAGAIN)
82 slow = 1; 108 slow = 1;
83 break; 109 break;
@@ -85,18 +111,18 @@ repeat:
85 pr_debug("source is monitoring facility\n"); 111 pr_debug("source is monitoring facility\n");
86 break; 112 break;
87 case CRW_RSC_CPATH: 113 case CRW_RSC_CPATH:
88 pr_debug("source is channel path %02X\n", crw.rsid); 114 pr_debug("source is channel path %02X\n", crw[0].rsid);
89 switch (crw.erc) { 115 switch (crw[0].erc) {
90 case CRW_ERC_IPARM: /* Path has come. */ 116 case CRW_ERC_IPARM: /* Path has come. */
91 ret = chp_process_crw(crw.rsid, 1); 117 ret = chp_process_crw(crw[0].rsid, 1);
92 break; 118 break;
93 case CRW_ERC_PERRI: /* Path has gone. */ 119 case CRW_ERC_PERRI: /* Path has gone. */
94 case CRW_ERC_PERRN: 120 case CRW_ERC_PERRN:
95 ret = chp_process_crw(crw.rsid, 0); 121 ret = chp_process_crw(crw[0].rsid, 0);
96 break; 122 break;
97 default: 123 default:
98 pr_debug("Don't know how to handle erc=%x\n", 124 pr_debug("Don't know how to handle erc=%x\n",
99 crw.erc); 125 crw[0].erc);
100 ret = 0; 126 ret = 0;
101 } 127 }
102 if (ret == -EAGAIN) 128 if (ret == -EAGAIN)
@@ -115,6 +141,8 @@ repeat:
115 pr_debug("unknown source\n"); 141 pr_debug("unknown source\n");
116 break; 142 break;
117 } 143 }
144 /* chain is always 0 or 1 here. */
145 chain = crw[chain].chn ? chain + 1 : 0;
118 } 146 }
119 if (slow) 147 if (slow)
120 queue_work(slow_path_wq, &slow_path_work); 148 queue_work(slow_path_wq, &slow_path_work);
@@ -218,7 +246,7 @@ s390_revalidate_registers(struct mci *mci)
218 */ 246 */
219 kill_task = 1; 247 kill_task = 1;
220 248
221#ifndef __s390x__ 249#ifndef CONFIG_64BIT
222 asm volatile("ld 0,0(%0)\n" 250 asm volatile("ld 0,0(%0)\n"
223 "ld 2,8(%0)\n" 251 "ld 2,8(%0)\n"
224 "ld 4,16(%0)\n" 252 "ld 4,16(%0)\n"
@@ -227,7 +255,7 @@ s390_revalidate_registers(struct mci *mci)
227#endif 255#endif
228 256
229 if (MACHINE_HAS_IEEE) { 257 if (MACHINE_HAS_IEEE) {
230#ifdef __s390x__ 258#ifdef CONFIG_64BIT
231 fpt_save_area = &S390_lowcore.floating_pt_save_area; 259 fpt_save_area = &S390_lowcore.floating_pt_save_area;
232 fpt_creg_save_area = &S390_lowcore.fpt_creg_save_area; 260 fpt_creg_save_area = &S390_lowcore.fpt_creg_save_area;
233#else 261#else
@@ -286,7 +314,7 @@ s390_revalidate_registers(struct mci *mci)
286 */ 314 */
287 s390_handle_damage("invalid control registers."); 315 s390_handle_damage("invalid control registers.");
288 else 316 else
289#ifdef __s390x__ 317#ifdef CONFIG_64BIT
290 asm volatile("lctlg 0,15,0(%0)" 318 asm volatile("lctlg 0,15,0(%0)"
291 : : "a" (&S390_lowcore.cregs_save_area)); 319 : : "a" (&S390_lowcore.cregs_save_area));
292#else 320#else
@@ -299,7 +327,7 @@ s390_revalidate_registers(struct mci *mci)
299 * can't write something sensible into that register. 327 * can't write something sensible into that register.
300 */ 328 */
301 329
302#ifdef __s390x__ 330#ifdef CONFIG_64BIT
303 /* 331 /*
304 * See if we can revalidate the TOD programmable register with its 332 * See if we can revalidate the TOD programmable register with its
305 * old contents (should be zero) otherwise set it to zero. 333 * old contents (should be zero) otherwise set it to zero.
@@ -356,7 +384,7 @@ s390_do_machine_check(struct pt_regs *regs)
356 if (mci->b) { 384 if (mci->b) {
357 /* Processing backup -> verify if we can survive this */ 385 /* Processing backup -> verify if we can survive this */
358 u64 z_mcic, o_mcic, t_mcic; 386 u64 z_mcic, o_mcic, t_mcic;
359#ifdef __s390x__ 387#ifdef CONFIG_64BIT
360 z_mcic = (1ULL<<63 | 1ULL<<59 | 1ULL<<29); 388 z_mcic = (1ULL<<63 | 1ULL<<59 | 1ULL<<29);
361 o_mcic = (1ULL<<43 | 1ULL<<42 | 1ULL<<41 | 1ULL<<40 | 389 o_mcic = (1ULL<<43 | 1ULL<<42 | 1ULL<<41 | 1ULL<<40 |
362 1ULL<<36 | 1ULL<<35 | 1ULL<<34 | 1ULL<<32 | 390 1ULL<<36 | 1ULL<<35 | 1ULL<<34 | 1ULL<<32 |
diff --git a/drivers/s390/sysinfo.c b/drivers/s390/sysinfo.c
index 87c2db1bd4f5..66da840c9316 100644
--- a/drivers/s390/sysinfo.c
+++ b/drivers/s390/sysinfo.c
@@ -106,7 +106,7 @@ static inline int stsi (void *sysinfo,
106{ 106{
107 int cc, retv; 107 int cc, retv;
108 108
109#ifndef CONFIG_ARCH_S390X 109#ifndef CONFIG_64BIT
110 __asm__ __volatile__ ( "lr\t0,%2\n" 110 __asm__ __volatile__ ( "lr\t0,%2\n"
111 "\tlr\t1,%3\n" 111 "\tlr\t1,%3\n"
112 "\tstsi\t0(%4)\n" 112 "\tstsi\t0(%4)\n"
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 4c42065dea88..3c606cf8c8ca 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -914,7 +914,7 @@ config SCSI_INIA100
914 914
915config SCSI_PPA 915config SCSI_PPA
916 tristate "IOMEGA parallel port (ppa - older drives)" 916 tristate "IOMEGA parallel port (ppa - older drives)"
917 depends on SCSI && PARPORT 917 depends on SCSI && PARPORT_PC
918 ---help--- 918 ---help---
919 This driver supports older versions of IOMEGA's parallel port ZIP 919 This driver supports older versions of IOMEGA's parallel port ZIP
920 drive (a 100 MB removable media device). 920 drive (a 100 MB removable media device).
@@ -941,7 +941,7 @@ config SCSI_PPA
941 941
942config SCSI_IMM 942config SCSI_IMM
943 tristate "IOMEGA parallel port (imm - newer drives)" 943 tristate "IOMEGA parallel port (imm - newer drives)"
944 depends on SCSI && PARPORT 944 depends on SCSI && PARPORT_PC
945 ---help--- 945 ---help---
946 This driver supports newer versions of IOMEGA's parallel port ZIP 946 This driver supports newer versions of IOMEGA's parallel port ZIP
947 drive (a 100 MB removable media device). 947 drive (a 100 MB removable media device).
@@ -968,7 +968,7 @@ config SCSI_IMM
968 968
969config SCSI_IZIP_EPP16 969config SCSI_IZIP_EPP16
970 bool "ppa/imm option - Use slow (but safe) EPP-16" 970 bool "ppa/imm option - Use slow (but safe) EPP-16"
971 depends on PARPORT && (SCSI_PPA || SCSI_IMM) 971 depends on SCSI_PPA || SCSI_IMM
972 ---help--- 972 ---help---
973 EPP (Enhanced Parallel Port) is a standard for parallel ports which 973 EPP (Enhanced Parallel Port) is a standard for parallel ports which
974 allows them to act as expansion buses that can handle up to 64 974 allows them to act as expansion buses that can handle up to 64
@@ -983,7 +983,7 @@ config SCSI_IZIP_EPP16
983 983
984config SCSI_IZIP_SLOW_CTR 984config SCSI_IZIP_SLOW_CTR
985 bool "ppa/imm option - Assume slow parport control register" 985 bool "ppa/imm option - Assume slow parport control register"
986 depends on PARPORT && (SCSI_PPA || SCSI_IMM) 986 depends on SCSI_PPA || SCSI_IMM
987 help 987 help
988 Some parallel ports are known to have excessive delays between 988 Some parallel ports are known to have excessive delays between
989 changing the parallel port control register and good data being 989 changing the parallel port control register and good data being
@@ -1815,7 +1815,7 @@ config SCSI_SUNESP
1815 1815
1816config ZFCP 1816config ZFCP
1817 tristate "FCP host bus adapter driver for IBM eServer zSeries" 1817 tristate "FCP host bus adapter driver for IBM eServer zSeries"
1818 depends on ARCH_S390 && QDIO && SCSI 1818 depends on S390 && QDIO && SCSI
1819 select SCSI_FC_ATTRS 1819 select SCSI_FC_ATTRS
1820 help 1820 help
1821 If you want to access SCSI devices attached to your IBM eServer 1821 If you want to access SCSI devices attached to your IBM eServer
diff --git a/drivers/scsi/ahci.c b/drivers/scsi/ahci.c
index 887eaa2a3ebf..d113290b5fc0 100644
--- a/drivers/scsi/ahci.c
+++ b/drivers/scsi/ahci.c
@@ -214,7 +214,6 @@ static struct scsi_host_template ahci_sht = {
214 .dma_boundary = AHCI_DMA_BOUNDARY, 214 .dma_boundary = AHCI_DMA_BOUNDARY,
215 .slave_configure = ata_scsi_slave_config, 215 .slave_configure = ata_scsi_slave_config,
216 .bios_param = ata_std_bios_param, 216 .bios_param = ata_std_bios_param,
217 .ordered_flush = 1,
218}; 217};
219 218
220static const struct ata_port_operations ahci_ops = { 219static const struct ata_port_operations ahci_ops = {
diff --git a/drivers/scsi/ata_piix.c b/drivers/scsi/ata_piix.c
index 0ea27873b9ff..557788ec4eec 100644
--- a/drivers/scsi/ata_piix.c
+++ b/drivers/scsi/ata_piix.c
@@ -166,6 +166,8 @@ static struct pci_driver piix_pci_driver = {
166 .id_table = piix_pci_tbl, 166 .id_table = piix_pci_tbl,
167 .probe = piix_init_one, 167 .probe = piix_init_one,
168 .remove = ata_pci_remove_one, 168 .remove = ata_pci_remove_one,
169 .suspend = ata_pci_device_suspend,
170 .resume = ata_pci_device_resume,
169}; 171};
170 172
171static struct scsi_host_template piix_sht = { 173static struct scsi_host_template piix_sht = {
@@ -185,7 +187,8 @@ static struct scsi_host_template piix_sht = {
185 .dma_boundary = ATA_DMA_BOUNDARY, 187 .dma_boundary = ATA_DMA_BOUNDARY,
186 .slave_configure = ata_scsi_slave_config, 188 .slave_configure = ata_scsi_slave_config,
187 .bios_param = ata_std_bios_param, 189 .bios_param = ata_std_bios_param,
188 .ordered_flush = 1, 190 .resume = ata_scsi_device_resume,
191 .suspend = ata_scsi_device_suspend,
189}; 192};
190 193
191static const struct ata_port_operations piix_pata_ops = { 194static const struct ata_port_operations piix_pata_ops = {
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index 5b9c2c5a7f0e..66783c860a19 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -347,17 +347,8 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
347 shost->cmd_per_lun = sht->cmd_per_lun; 347 shost->cmd_per_lun = sht->cmd_per_lun;
348 shost->unchecked_isa_dma = sht->unchecked_isa_dma; 348 shost->unchecked_isa_dma = sht->unchecked_isa_dma;
349 shost->use_clustering = sht->use_clustering; 349 shost->use_clustering = sht->use_clustering;
350 shost->ordered_flush = sht->ordered_flush;
351 shost->ordered_tag = sht->ordered_tag; 350 shost->ordered_tag = sht->ordered_tag;
352 351
353 /*
354 * hosts/devices that do queueing must support ordered tags
355 */
356 if (shost->can_queue > 1 && shost->ordered_flush) {
357 printk(KERN_ERR "scsi: ordered flushes don't support queueing\n");
358 shost->ordered_flush = 0;
359 }
360
361 if (sht->max_host_blocked) 352 if (sht->max_host_blocked)
362 shost->max_host_blocked = sht->max_host_blocked; 353 shost->max_host_blocked = sht->max_host_blocked;
363 else 354 else
diff --git a/drivers/scsi/ide-scsi.c b/drivers/scsi/ide-scsi.c
index 4cb1f3ed9100..3c688ef54660 100644
--- a/drivers/scsi/ide-scsi.c
+++ b/drivers/scsi/ide-scsi.c
@@ -1046,7 +1046,7 @@ static int idescsi_eh_reset (struct scsi_cmnd *cmd)
1046 1046
1047 /* kill current request */ 1047 /* kill current request */
1048 blkdev_dequeue_request(req); 1048 blkdev_dequeue_request(req);
1049 end_that_request_last(req); 1049 end_that_request_last(req, 0);
1050 if (req->flags & REQ_SENSE) 1050 if (req->flags & REQ_SENSE)
1051 kfree(scsi->pc->buffer); 1051 kfree(scsi->pc->buffer);
1052 kfree(scsi->pc); 1052 kfree(scsi->pc);
@@ -1056,7 +1056,7 @@ static int idescsi_eh_reset (struct scsi_cmnd *cmd)
1056 /* now nuke the drive queue */ 1056 /* now nuke the drive queue */
1057 while ((req = elv_next_request(drive->queue))) { 1057 while ((req = elv_next_request(drive->queue))) {
1058 blkdev_dequeue_request(req); 1058 blkdev_dequeue_request(req);
1059 end_that_request_last(req); 1059 end_that_request_last(req, 0);
1060 } 1060 }
1061 1061
1062 HWGROUP(drive)->rq = NULL; 1062 HWGROUP(drive)->rq = NULL;
diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c
index 9ea102587914..f55b9b3f7b37 100644
--- a/drivers/scsi/libata-core.c
+++ b/drivers/scsi/libata-core.c
@@ -562,16 +562,28 @@ static const u8 ata_rw_cmds[] = {
562 ATA_CMD_WRITE_MULTI, 562 ATA_CMD_WRITE_MULTI,
563 ATA_CMD_READ_MULTI_EXT, 563 ATA_CMD_READ_MULTI_EXT,
564 ATA_CMD_WRITE_MULTI_EXT, 564 ATA_CMD_WRITE_MULTI_EXT,
565 0,
566 0,
567 0,
568 ATA_CMD_WRITE_MULTI_FUA_EXT,
565 /* pio */ 569 /* pio */
566 ATA_CMD_PIO_READ, 570 ATA_CMD_PIO_READ,
567 ATA_CMD_PIO_WRITE, 571 ATA_CMD_PIO_WRITE,
568 ATA_CMD_PIO_READ_EXT, 572 ATA_CMD_PIO_READ_EXT,
569 ATA_CMD_PIO_WRITE_EXT, 573 ATA_CMD_PIO_WRITE_EXT,
574 0,
575 0,
576 0,
577 0,
570 /* dma */ 578 /* dma */
571 ATA_CMD_READ, 579 ATA_CMD_READ,
572 ATA_CMD_WRITE, 580 ATA_CMD_WRITE,
573 ATA_CMD_READ_EXT, 581 ATA_CMD_READ_EXT,
574 ATA_CMD_WRITE_EXT 582 ATA_CMD_WRITE_EXT,
583 0,
584 0,
585 0,
586 ATA_CMD_WRITE_FUA_EXT
575}; 587};
576 588
577/** 589/**
@@ -584,25 +596,32 @@ static const u8 ata_rw_cmds[] = {
584 * LOCKING: 596 * LOCKING:
585 * caller. 597 * caller.
586 */ 598 */
587void ata_rwcmd_protocol(struct ata_queued_cmd *qc) 599int ata_rwcmd_protocol(struct ata_queued_cmd *qc)
588{ 600{
589 struct ata_taskfile *tf = &qc->tf; 601 struct ata_taskfile *tf = &qc->tf;
590 struct ata_device *dev = qc->dev; 602 struct ata_device *dev = qc->dev;
603 u8 cmd;
591 604
592 int index, lba48, write; 605 int index, fua, lba48, write;
593 606
607 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
594 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0; 608 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
595 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0; 609 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
596 610
597 if (dev->flags & ATA_DFLAG_PIO) { 611 if (dev->flags & ATA_DFLAG_PIO) {
598 tf->protocol = ATA_PROT_PIO; 612 tf->protocol = ATA_PROT_PIO;
599 index = dev->multi_count ? 0 : 4; 613 index = dev->multi_count ? 0 : 8;
600 } else { 614 } else {
601 tf->protocol = ATA_PROT_DMA; 615 tf->protocol = ATA_PROT_DMA;
602 index = 8; 616 index = 16;
603 } 617 }
604 618
605 tf->command = ata_rw_cmds[index + lba48 + write]; 619 cmd = ata_rw_cmds[index + fua + lba48 + write];
620 if (cmd) {
621 tf->command = cmd;
622 return 0;
623 }
624 return -1;
606} 625}
607 626
608static const char * const xfer_mode_str[] = { 627static const char * const xfer_mode_str[] = {
@@ -4154,6 +4173,96 @@ err_out:
4154 * Inherited from caller. 4173 * Inherited from caller.
4155 */ 4174 */
4156 4175
4176/*
4177 * Execute a 'simple' command, that only consists of the opcode 'cmd' itself,
4178 * without filling any other registers
4179 */
4180static int ata_do_simple_cmd(struct ata_port *ap, struct ata_device *dev,
4181 u8 cmd)
4182{
4183 struct ata_taskfile tf;
4184 int err;
4185
4186 ata_tf_init(ap, &tf, dev->devno);
4187
4188 tf.command = cmd;
4189 tf.flags |= ATA_TFLAG_DEVICE;
4190 tf.protocol = ATA_PROT_NODATA;
4191
4192 err = ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0);
4193 if (err)
4194 printk(KERN_ERR "%s: ata command failed: %d\n",
4195 __FUNCTION__, err);
4196
4197 return err;
4198}
4199
4200static int ata_flush_cache(struct ata_port *ap, struct ata_device *dev)
4201{
4202 u8 cmd;
4203
4204 if (!ata_try_flush_cache(dev))
4205 return 0;
4206
4207 if (ata_id_has_flush_ext(dev->id))
4208 cmd = ATA_CMD_FLUSH_EXT;
4209 else
4210 cmd = ATA_CMD_FLUSH;
4211
4212 return ata_do_simple_cmd(ap, dev, cmd);
4213}
4214
4215static int ata_standby_drive(struct ata_port *ap, struct ata_device *dev)
4216{
4217 return ata_do_simple_cmd(ap, dev, ATA_CMD_STANDBYNOW1);
4218}
4219
4220static int ata_start_drive(struct ata_port *ap, struct ata_device *dev)
4221{
4222 return ata_do_simple_cmd(ap, dev, ATA_CMD_IDLEIMMEDIATE);
4223}
4224
4225/**
4226 * ata_device_resume - wakeup a previously suspended devices
4227 *
4228 * Kick the drive back into action, by sending it an idle immediate
4229 * command and making sure its transfer mode matches between drive
4230 * and host.
4231 *
4232 */
4233int ata_device_resume(struct ata_port *ap, struct ata_device *dev)
4234{
4235 if (ap->flags & ATA_FLAG_SUSPENDED) {
4236 ap->flags &= ~ATA_FLAG_SUSPENDED;
4237 ata_set_mode(ap);
4238 }
4239 if (!ata_dev_present(dev))
4240 return 0;
4241 if (dev->class == ATA_DEV_ATA)
4242 ata_start_drive(ap, dev);
4243
4244 return 0;
4245}
4246
4247/**
4248 * ata_device_suspend - prepare a device for suspend
4249 *
4250 * Flush the cache on the drive, if appropriate, then issue a
4251 * standbynow command.
4252 *
4253 */
4254int ata_device_suspend(struct ata_port *ap, struct ata_device *dev)
4255{
4256 if (!ata_dev_present(dev))
4257 return 0;
4258 if (dev->class == ATA_DEV_ATA)
4259 ata_flush_cache(ap, dev);
4260
4261 ata_standby_drive(ap, dev);
4262 ap->flags |= ATA_FLAG_SUSPENDED;
4263 return 0;
4264}
4265
4157int ata_port_start (struct ata_port *ap) 4266int ata_port_start (struct ata_port *ap)
4158{ 4267{
4159 struct device *dev = ap->host_set->dev; 4268 struct device *dev = ap->host_set->dev;
@@ -4902,6 +5011,23 @@ int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
4902 5011
4903 return (tmp == bits->val) ? 1 : 0; 5012 return (tmp == bits->val) ? 1 : 0;
4904} 5013}
5014
5015int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t state)
5016{
5017 pci_save_state(pdev);
5018 pci_disable_device(pdev);
5019 pci_set_power_state(pdev, PCI_D3hot);
5020 return 0;
5021}
5022
5023int ata_pci_device_resume(struct pci_dev *pdev)
5024{
5025 pci_set_power_state(pdev, PCI_D0);
5026 pci_restore_state(pdev);
5027 pci_enable_device(pdev);
5028 pci_set_master(pdev);
5029 return 0;
5030}
4905#endif /* CONFIG_PCI */ 5031#endif /* CONFIG_PCI */
4906 5032
4907 5033
@@ -5005,4 +5131,11 @@ EXPORT_SYMBOL_GPL(ata_pci_host_stop);
5005EXPORT_SYMBOL_GPL(ata_pci_init_native_mode); 5131EXPORT_SYMBOL_GPL(ata_pci_init_native_mode);
5006EXPORT_SYMBOL_GPL(ata_pci_init_one); 5132EXPORT_SYMBOL_GPL(ata_pci_init_one);
5007EXPORT_SYMBOL_GPL(ata_pci_remove_one); 5133EXPORT_SYMBOL_GPL(ata_pci_remove_one);
5134EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
5135EXPORT_SYMBOL_GPL(ata_pci_device_resume);
5008#endif /* CONFIG_PCI */ 5136#endif /* CONFIG_PCI */
5137
5138EXPORT_SYMBOL_GPL(ata_device_suspend);
5139EXPORT_SYMBOL_GPL(ata_device_resume);
5140EXPORT_SYMBOL_GPL(ata_scsi_device_suspend);
5141EXPORT_SYMBOL_GPL(ata_scsi_device_resume);
diff --git a/drivers/scsi/libata-scsi.c b/drivers/scsi/libata-scsi.c
index e0439be4b573..cfbceb504718 100644
--- a/drivers/scsi/libata-scsi.c
+++ b/drivers/scsi/libata-scsi.c
@@ -396,6 +396,22 @@ void ata_dump_status(unsigned id, struct ata_taskfile *tf)
396 } 396 }
397} 397}
398 398
399int ata_scsi_device_resume(struct scsi_device *sdev)
400{
401 struct ata_port *ap = (struct ata_port *) &sdev->host->hostdata[0];
402 struct ata_device *dev = &ap->device[sdev->id];
403
404 return ata_device_resume(ap, dev);
405}
406
407int ata_scsi_device_suspend(struct scsi_device *sdev)
408{
409 struct ata_port *ap = (struct ata_port *) &sdev->host->hostdata[0];
410 struct ata_device *dev = &ap->device[sdev->id];
411
412 return ata_device_suspend(ap, dev);
413}
414
399/** 415/**
400 * ata_to_sense_error - convert ATA error to SCSI error 416 * ata_to_sense_error - convert ATA error to SCSI error
401 * @id: ATA device number 417 * @id: ATA device number
@@ -1080,11 +1096,13 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, const u8 *scsicm
1080 scsicmd[0] == WRITE_16) 1096 scsicmd[0] == WRITE_16)
1081 tf->flags |= ATA_TFLAG_WRITE; 1097 tf->flags |= ATA_TFLAG_WRITE;
1082 1098
1083 /* Calculate the SCSI LBA and transfer length. */ 1099 /* Calculate the SCSI LBA, transfer length and FUA. */
1084 switch (scsicmd[0]) { 1100 switch (scsicmd[0]) {
1085 case READ_10: 1101 case READ_10:
1086 case WRITE_10: 1102 case WRITE_10:
1087 scsi_10_lba_len(scsicmd, &block, &n_block); 1103 scsi_10_lba_len(scsicmd, &block, &n_block);
1104 if (unlikely(scsicmd[1] & (1 << 3)))
1105 tf->flags |= ATA_TFLAG_FUA;
1088 break; 1106 break;
1089 case READ_6: 1107 case READ_6:
1090 case WRITE_6: 1108 case WRITE_6:
@@ -1099,6 +1117,8 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, const u8 *scsicm
1099 case READ_16: 1117 case READ_16:
1100 case WRITE_16: 1118 case WRITE_16:
1101 scsi_16_lba_len(scsicmd, &block, &n_block); 1119 scsi_16_lba_len(scsicmd, &block, &n_block);
1120 if (unlikely(scsicmd[1] & (1 << 3)))
1121 tf->flags |= ATA_TFLAG_FUA;
1102 break; 1122 break;
1103 default: 1123 default:
1104 DPRINTK("no-byte command\n"); 1124 DPRINTK("no-byte command\n");
@@ -1142,7 +1162,8 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, const u8 *scsicm
1142 tf->device |= (block >> 24) & 0xf; 1162 tf->device |= (block >> 24) & 0xf;
1143 } 1163 }
1144 1164
1145 ata_rwcmd_protocol(qc); 1165 if (unlikely(ata_rwcmd_protocol(qc) < 0))
1166 goto invalid_fld;
1146 1167
1147 qc->nsect = n_block; 1168 qc->nsect = n_block;
1148 tf->nsect = n_block & 0xff; 1169 tf->nsect = n_block & 0xff;
@@ -1160,7 +1181,8 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, const u8 *scsicm
1160 if ((block >> 28) || (n_block > 256)) 1181 if ((block >> 28) || (n_block > 256))
1161 goto out_of_range; 1182 goto out_of_range;
1162 1183
1163 ata_rwcmd_protocol(qc); 1184 if (unlikely(ata_rwcmd_protocol(qc) < 0))
1185 goto invalid_fld;
1164 1186
1165 /* Convert LBA to CHS */ 1187 /* Convert LBA to CHS */
1166 track = (u32)block / dev->sectors; 1188 track = (u32)block / dev->sectors;
@@ -1695,6 +1717,7 @@ static unsigned int ata_msense_rw_recovery(u8 **ptr_io, const u8 *last)
1695unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf, 1717unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf,
1696 unsigned int buflen) 1718 unsigned int buflen)
1697{ 1719{
1720 struct ata_device *dev = args->dev;
1698 u8 *scsicmd = args->cmd->cmnd, *p, *last; 1721 u8 *scsicmd = args->cmd->cmnd, *p, *last;
1699 const u8 sat_blk_desc[] = { 1722 const u8 sat_blk_desc[] = {
1700 0, 0, 0, 0, /* number of blocks: sat unspecified */ 1723 0, 0, 0, 0, /* number of blocks: sat unspecified */
@@ -1703,6 +1726,7 @@ unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf,
1703 }; 1726 };
1704 u8 pg, spg; 1727 u8 pg, spg;
1705 unsigned int ebd, page_control, six_byte, output_len, alloc_len, minlen; 1728 unsigned int ebd, page_control, six_byte, output_len, alloc_len, minlen;
1729 u8 dpofua;
1706 1730
1707 VPRINTK("ENTER\n"); 1731 VPRINTK("ENTER\n");
1708 1732
@@ -1771,9 +1795,17 @@ unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf,
1771 1795
1772 if (minlen < 1) 1796 if (minlen < 1)
1773 return 0; 1797 return 0;
1798
1799 dpofua = 0;
1800 if (ata_id_has_fua(args->id) && dev->flags & ATA_DFLAG_LBA48 &&
1801 (!(dev->flags & ATA_DFLAG_PIO) || dev->multi_count))
1802 dpofua = 1 << 4;
1803
1774 if (six_byte) { 1804 if (six_byte) {
1775 output_len--; 1805 output_len--;
1776 rbuf[0] = output_len; 1806 rbuf[0] = output_len;
1807 if (minlen > 2)
1808 rbuf[2] |= dpofua;
1777 if (ebd) { 1809 if (ebd) {
1778 if (minlen > 3) 1810 if (minlen > 3)
1779 rbuf[3] = sizeof(sat_blk_desc); 1811 rbuf[3] = sizeof(sat_blk_desc);
@@ -1786,6 +1818,8 @@ unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf,
1786 rbuf[0] = output_len >> 8; 1818 rbuf[0] = output_len >> 8;
1787 if (minlen > 1) 1819 if (minlen > 1)
1788 rbuf[1] = output_len; 1820 rbuf[1] = output_len;
1821 if (minlen > 3)
1822 rbuf[3] |= dpofua;
1789 if (ebd) { 1823 if (ebd) {
1790 if (minlen > 7) 1824 if (minlen > 7)
1791 rbuf[7] = sizeof(sat_blk_desc); 1825 rbuf[7] = sizeof(sat_blk_desc);
@@ -2446,7 +2480,7 @@ int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
2446 if (xlat_func) 2480 if (xlat_func)
2447 ata_scsi_translate(ap, dev, cmd, done, xlat_func); 2481 ata_scsi_translate(ap, dev, cmd, done, xlat_func);
2448 else 2482 else
2449 ata_scsi_simulate(dev->id, cmd, done); 2483 ata_scsi_simulate(ap, dev, cmd, done);
2450 } else 2484 } else
2451 ata_scsi_translate(ap, dev, cmd, done, atapi_xlat); 2485 ata_scsi_translate(ap, dev, cmd, done, atapi_xlat);
2452 2486
@@ -2469,14 +2503,16 @@ out_unlock:
2469 * spin_lock_irqsave(host_set lock) 2503 * spin_lock_irqsave(host_set lock)
2470 */ 2504 */
2471 2505
2472void ata_scsi_simulate(u16 *id, 2506void ata_scsi_simulate(struct ata_port *ap, struct ata_device *dev,
2473 struct scsi_cmnd *cmd, 2507 struct scsi_cmnd *cmd,
2474 void (*done)(struct scsi_cmnd *)) 2508 void (*done)(struct scsi_cmnd *))
2475{ 2509{
2476 struct ata_scsi_args args; 2510 struct ata_scsi_args args;
2477 const u8 *scsicmd = cmd->cmnd; 2511 const u8 *scsicmd = cmd->cmnd;
2478 2512
2479 args.id = id; 2513 args.ap = ap;
2514 args.dev = dev;
2515 args.id = dev->id;
2480 args.cmd = cmd; 2516 args.cmd = cmd;
2481 args.done = done; 2517 args.done = done;
2482 2518
diff --git a/drivers/scsi/libata.h b/drivers/scsi/libata.h
index 251e53bdc6e0..e03ce48b7b4b 100644
--- a/drivers/scsi/libata.h
+++ b/drivers/scsi/libata.h
@@ -32,6 +32,8 @@
32#define DRV_VERSION "1.20" /* must be exactly four chars */ 32#define DRV_VERSION "1.20" /* must be exactly four chars */
33 33
34struct ata_scsi_args { 34struct ata_scsi_args {
35 struct ata_port *ap;
36 struct ata_device *dev;
35 u16 *id; 37 u16 *id;
36 struct scsi_cmnd *cmd; 38 struct scsi_cmnd *cmd;
37 void (*done)(struct scsi_cmnd *); 39 void (*done)(struct scsi_cmnd *);
@@ -41,7 +43,7 @@ struct ata_scsi_args {
41extern int atapi_enabled; 43extern int atapi_enabled;
42extern struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap, 44extern struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap,
43 struct ata_device *dev); 45 struct ata_device *dev);
44extern void ata_rwcmd_protocol(struct ata_queued_cmd *qc); 46extern int ata_rwcmd_protocol(struct ata_queued_cmd *qc);
45extern void ata_qc_free(struct ata_queued_cmd *qc); 47extern void ata_qc_free(struct ata_queued_cmd *qc);
46extern int ata_qc_issue(struct ata_queued_cmd *qc); 48extern int ata_qc_issue(struct ata_queued_cmd *qc);
47extern int ata_check_atapi_dma(struct ata_queued_cmd *qc); 49extern int ata_check_atapi_dma(struct ata_queued_cmd *qc);
diff --git a/drivers/scsi/sata_mv.c b/drivers/scsi/sata_mv.c
index b2bf16a9bf4b..cd54244058b5 100644
--- a/drivers/scsi/sata_mv.c
+++ b/drivers/scsi/sata_mv.c
@@ -374,7 +374,6 @@ static struct scsi_host_template mv_sht = {
374 .dma_boundary = MV_DMA_BOUNDARY, 374 .dma_boundary = MV_DMA_BOUNDARY,
375 .slave_configure = ata_scsi_slave_config, 375 .slave_configure = ata_scsi_slave_config,
376 .bios_param = ata_std_bios_param, 376 .bios_param = ata_std_bios_param,
377 .ordered_flush = 1,
378}; 377};
379 378
380static const struct ata_port_operations mv5_ops = { 379static const struct ata_port_operations mv5_ops = {
diff --git a/drivers/scsi/sata_nv.c b/drivers/scsi/sata_nv.c
index 4954896dfdb9..c0cf52cb975a 100644
--- a/drivers/scsi/sata_nv.c
+++ b/drivers/scsi/sata_nv.c
@@ -235,7 +235,6 @@ static struct scsi_host_template nv_sht = {
235 .dma_boundary = ATA_DMA_BOUNDARY, 235 .dma_boundary = ATA_DMA_BOUNDARY,
236 .slave_configure = ata_scsi_slave_config, 236 .slave_configure = ata_scsi_slave_config,
237 .bios_param = ata_std_bios_param, 237 .bios_param = ata_std_bios_param,
238 .ordered_flush = 1,
239}; 238};
240 239
241static const struct ata_port_operations nv_ops = { 240static const struct ata_port_operations nv_ops = {
diff --git a/drivers/scsi/sata_promise.c b/drivers/scsi/sata_promise.c
index da7fa04b8a73..3d1ea09a06a1 100644
--- a/drivers/scsi/sata_promise.c
+++ b/drivers/scsi/sata_promise.c
@@ -114,7 +114,6 @@ static struct scsi_host_template pdc_ata_sht = {
114 .dma_boundary = ATA_DMA_BOUNDARY, 114 .dma_boundary = ATA_DMA_BOUNDARY,
115 .slave_configure = ata_scsi_slave_config, 115 .slave_configure = ata_scsi_slave_config,
116 .bios_param = ata_std_bios_param, 116 .bios_param = ata_std_bios_param,
117 .ordered_flush = 1,
118}; 117};
119 118
120static const struct ata_port_operations pdc_sata_ops = { 119static const struct ata_port_operations pdc_sata_ops = {
diff --git a/drivers/scsi/sata_sil.c b/drivers/scsi/sata_sil.c
index d2053487c73b..b017f85e6d6a 100644
--- a/drivers/scsi/sata_sil.c
+++ b/drivers/scsi/sata_sil.c
@@ -147,7 +147,6 @@ static struct scsi_host_template sil_sht = {
147 .dma_boundary = ATA_DMA_BOUNDARY, 147 .dma_boundary = ATA_DMA_BOUNDARY,
148 .slave_configure = ata_scsi_slave_config, 148 .slave_configure = ata_scsi_slave_config,
149 .bios_param = ata_std_bios_param, 149 .bios_param = ata_std_bios_param,
150 .ordered_flush = 1,
151}; 150};
152 151
153static const struct ata_port_operations sil_ops = { 152static const struct ata_port_operations sil_ops = {
diff --git a/drivers/scsi/sata_sil24.c b/drivers/scsi/sata_sil24.c
index a0ad3ed2200a..923130185a9e 100644
--- a/drivers/scsi/sata_sil24.c
+++ b/drivers/scsi/sata_sil24.c
@@ -292,7 +292,6 @@ static struct scsi_host_template sil24_sht = {
292 .dma_boundary = ATA_DMA_BOUNDARY, 292 .dma_boundary = ATA_DMA_BOUNDARY,
293 .slave_configure = ata_scsi_slave_config, 293 .slave_configure = ata_scsi_slave_config,
294 .bios_param = ata_std_bios_param, 294 .bios_param = ata_std_bios_param,
295 .ordered_flush = 1, /* NCQ not supported yet */
296}; 295};
297 296
298static const struct ata_port_operations sil24_ops = { 297static const struct ata_port_operations sil24_ops = {
diff --git a/drivers/scsi/sata_sis.c b/drivers/scsi/sata_sis.c
index 32e12620b162..2df8c5632ac3 100644
--- a/drivers/scsi/sata_sis.c
+++ b/drivers/scsi/sata_sis.c
@@ -99,7 +99,6 @@ static struct scsi_host_template sis_sht = {
99 .dma_boundary = ATA_DMA_BOUNDARY, 99 .dma_boundary = ATA_DMA_BOUNDARY,
100 .slave_configure = ata_scsi_slave_config, 100 .slave_configure = ata_scsi_slave_config,
101 .bios_param = ata_std_bios_param, 101 .bios_param = ata_std_bios_param,
102 .ordered_flush = 1,
103}; 102};
104 103
105static const struct ata_port_operations sis_ops = { 104static const struct ata_port_operations sis_ops = {
diff --git a/drivers/scsi/sata_svw.c b/drivers/scsi/sata_svw.c
index 6e7f7c83a75a..668373590aa4 100644
--- a/drivers/scsi/sata_svw.c
+++ b/drivers/scsi/sata_svw.c
@@ -303,7 +303,6 @@ static struct scsi_host_template k2_sata_sht = {
303 .proc_info = k2_sata_proc_info, 303 .proc_info = k2_sata_proc_info,
304#endif 304#endif
305 .bios_param = ata_std_bios_param, 305 .bios_param = ata_std_bios_param,
306 .ordered_flush = 1,
307}; 306};
308 307
309 308
diff --git a/drivers/scsi/sata_sx4.c b/drivers/scsi/sata_sx4.c
index 94b253b80da8..bc87c16c80d2 100644
--- a/drivers/scsi/sata_sx4.c
+++ b/drivers/scsi/sata_sx4.c
@@ -194,7 +194,6 @@ static struct scsi_host_template pdc_sata_sht = {
194 .dma_boundary = ATA_DMA_BOUNDARY, 194 .dma_boundary = ATA_DMA_BOUNDARY,
195 .slave_configure = ata_scsi_slave_config, 195 .slave_configure = ata_scsi_slave_config,
196 .bios_param = ata_std_bios_param, 196 .bios_param = ata_std_bios_param,
197 .ordered_flush = 1,
198}; 197};
199 198
200static const struct ata_port_operations pdc_20621_ops = { 199static const struct ata_port_operations pdc_20621_ops = {
diff --git a/drivers/scsi/sata_uli.c b/drivers/scsi/sata_uli.c
index b2422a0f25c8..9635ca700977 100644
--- a/drivers/scsi/sata_uli.c
+++ b/drivers/scsi/sata_uli.c
@@ -87,7 +87,6 @@ static struct scsi_host_template uli_sht = {
87 .dma_boundary = ATA_DMA_BOUNDARY, 87 .dma_boundary = ATA_DMA_BOUNDARY,
88 .slave_configure = ata_scsi_slave_config, 88 .slave_configure = ata_scsi_slave_config,
89 .bios_param = ata_std_bios_param, 89 .bios_param = ata_std_bios_param,
90 .ordered_flush = 1,
91}; 90};
92 91
93static const struct ata_port_operations uli_ops = { 92static const struct ata_port_operations uli_ops = {
diff --git a/drivers/scsi/sata_via.c b/drivers/scsi/sata_via.c
index c76215692da2..6d5b0a794cfd 100644
--- a/drivers/scsi/sata_via.c
+++ b/drivers/scsi/sata_via.c
@@ -106,7 +106,6 @@ static struct scsi_host_template svia_sht = {
106 .dma_boundary = ATA_DMA_BOUNDARY, 106 .dma_boundary = ATA_DMA_BOUNDARY,
107 .slave_configure = ata_scsi_slave_config, 107 .slave_configure = ata_scsi_slave_config,
108 .bios_param = ata_std_bios_param, 108 .bios_param = ata_std_bios_param,
109 .ordered_flush = 1,
110}; 109};
111 110
112static const struct ata_port_operations svia_sata_ops = { 111static const struct ata_port_operations svia_sata_ops = {
diff --git a/drivers/scsi/sata_vsc.c b/drivers/scsi/sata_vsc.c
index fcfa486965b4..2e2c3b7acb0c 100644
--- a/drivers/scsi/sata_vsc.c
+++ b/drivers/scsi/sata_vsc.c
@@ -235,7 +235,6 @@ static struct scsi_host_template vsc_sata_sht = {
235 .dma_boundary = ATA_DMA_BOUNDARY, 235 .dma_boundary = ATA_DMA_BOUNDARY,
236 .slave_configure = ata_scsi_slave_config, 236 .slave_configure = ata_scsi_slave_config,
237 .bios_param = ata_std_bios_param, 237 .bios_param = ata_std_bios_param,
238 .ordered_flush = 1,
239}; 238};
240 239
241 240
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index a7f3f0c84db7..ba93d6e66d48 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -308,7 +308,7 @@ struct scsi_io_context {
308 308
309static kmem_cache_t *scsi_io_context_cache; 309static kmem_cache_t *scsi_io_context_cache;
310 310
311static void scsi_end_async(struct request *req) 311static void scsi_end_async(struct request *req, int uptodate)
312{ 312{
313 struct scsi_io_context *sioc = req->end_io_data; 313 struct scsi_io_context *sioc = req->end_io_data;
314 314
@@ -791,7 +791,7 @@ static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate,
791 spin_lock_irqsave(q->queue_lock, flags); 791 spin_lock_irqsave(q->queue_lock, flags);
792 if (blk_rq_tagged(req)) 792 if (blk_rq_tagged(req))
793 blk_queue_end_tag(q, req); 793 blk_queue_end_tag(q, req);
794 end_that_request_last(req); 794 end_that_request_last(req, uptodate);
795 spin_unlock_irqrestore(q->queue_lock, flags); 795 spin_unlock_irqrestore(q->queue_lock, flags);
796 796
797 /* 797 /*
@@ -932,9 +932,6 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes,
932 int sense_valid = 0; 932 int sense_valid = 0;
933 int sense_deferred = 0; 933 int sense_deferred = 0;
934 934
935 if (blk_complete_barrier_rq(q, req, good_bytes >> 9))
936 return;
937
938 /* 935 /*
939 * Free up any indirection buffers we allocated for DMA purposes. 936 * Free up any indirection buffers we allocated for DMA purposes.
940 * For the case of a READ, we need to copy the data out of the 937 * For the case of a READ, we need to copy the data out of the
@@ -1199,38 +1196,6 @@ static int scsi_init_io(struct scsi_cmnd *cmd)
1199 return BLKPREP_KILL; 1196 return BLKPREP_KILL;
1200} 1197}
1201 1198
1202static int scsi_prepare_flush_fn(request_queue_t *q, struct request *rq)
1203{
1204 struct scsi_device *sdev = q->queuedata;
1205 struct scsi_driver *drv;
1206
1207 if (sdev->sdev_state == SDEV_RUNNING) {
1208 drv = *(struct scsi_driver **) rq->rq_disk->private_data;
1209
1210 if (drv->prepare_flush)
1211 return drv->prepare_flush(q, rq);
1212 }
1213
1214 return 0;
1215}
1216
1217static void scsi_end_flush_fn(request_queue_t *q, struct request *rq)
1218{
1219 struct scsi_device *sdev = q->queuedata;
1220 struct request *flush_rq = rq->end_io_data;
1221 struct scsi_driver *drv;
1222
1223 if (flush_rq->errors) {
1224 printk("scsi: barrier error, disabling flush support\n");
1225 blk_queue_ordered(q, QUEUE_ORDERED_NONE);
1226 }
1227
1228 if (sdev->sdev_state == SDEV_RUNNING) {
1229 drv = *(struct scsi_driver **) rq->rq_disk->private_data;
1230 drv->end_flush(q, rq);
1231 }
1232}
1233
1234static int scsi_issue_flush_fn(request_queue_t *q, struct gendisk *disk, 1199static int scsi_issue_flush_fn(request_queue_t *q, struct gendisk *disk,
1235 sector_t *error_sector) 1200 sector_t *error_sector)
1236{ 1201{
@@ -1703,17 +1668,6 @@ struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
1703 blk_queue_segment_boundary(q, shost->dma_boundary); 1668 blk_queue_segment_boundary(q, shost->dma_boundary);
1704 blk_queue_issue_flush_fn(q, scsi_issue_flush_fn); 1669 blk_queue_issue_flush_fn(q, scsi_issue_flush_fn);
1705 1670
1706 /*
1707 * ordered tags are superior to flush ordering
1708 */
1709 if (shost->ordered_tag)
1710 blk_queue_ordered(q, QUEUE_ORDERED_TAG);
1711 else if (shost->ordered_flush) {
1712 blk_queue_ordered(q, QUEUE_ORDERED_FLUSH);
1713 q->prepare_flush_fn = scsi_prepare_flush_fn;
1714 q->end_flush_fn = scsi_end_flush_fn;
1715 }
1716
1717 if (!shost->use_clustering) 1671 if (!shost->use_clustering)
1718 clear_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); 1672 clear_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
1719 return q; 1673 return q;
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 15842b1f0f4a..ea7f3a433572 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -263,9 +263,40 @@ static int scsi_bus_match(struct device *dev, struct device_driver *gendrv)
263 return (sdp->inq_periph_qual == SCSI_INQ_PQ_CON)? 1: 0; 263 return (sdp->inq_periph_qual == SCSI_INQ_PQ_CON)? 1: 0;
264} 264}
265 265
266static int scsi_bus_suspend(struct device * dev, pm_message_t state)
267{
268 struct scsi_device *sdev = to_scsi_device(dev);
269 struct scsi_host_template *sht = sdev->host->hostt;
270 int err;
271
272 err = scsi_device_quiesce(sdev);
273 if (err)
274 return err;
275
276 if (sht->suspend)
277 err = sht->suspend(sdev);
278
279 return err;
280}
281
282static int scsi_bus_resume(struct device * dev)
283{
284 struct scsi_device *sdev = to_scsi_device(dev);
285 struct scsi_host_template *sht = sdev->host->hostt;
286 int err = 0;
287
288 if (sht->resume)
289 err = sht->resume(sdev);
290
291 scsi_device_resume(sdev);
292 return err;
293}
294
266struct bus_type scsi_bus_type = { 295struct bus_type scsi_bus_type = {
267 .name = "scsi", 296 .name = "scsi",
268 .match = scsi_bus_match, 297 .match = scsi_bus_match,
298 .suspend = scsi_bus_suspend,
299 .resume = scsi_bus_resume,
269}; 300};
270 301
271int scsi_sysfs_register(void) 302int scsi_sysfs_register(void)
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 3d3ad7d1b779..32d4d8d7b9f3 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -102,6 +102,7 @@ struct scsi_disk {
102 u8 write_prot; 102 u8 write_prot;
103 unsigned WCE : 1; /* state of disk WCE bit */ 103 unsigned WCE : 1; /* state of disk WCE bit */
104 unsigned RCD : 1; /* state of disk RCD bit, unused */ 104 unsigned RCD : 1; /* state of disk RCD bit, unused */
105 unsigned DPOFUA : 1; /* state of disk DPOFUA bit */
105}; 106};
106 107
107static DEFINE_IDR(sd_index_idr); 108static DEFINE_IDR(sd_index_idr);
@@ -121,8 +122,7 @@ static void sd_shutdown(struct device *dev);
121static void sd_rescan(struct device *); 122static void sd_rescan(struct device *);
122static int sd_init_command(struct scsi_cmnd *); 123static int sd_init_command(struct scsi_cmnd *);
123static int sd_issue_flush(struct device *, sector_t *); 124static int sd_issue_flush(struct device *, sector_t *);
124static void sd_end_flush(request_queue_t *, struct request *); 125static void sd_prepare_flush(request_queue_t *, struct request *);
125static int sd_prepare_flush(request_queue_t *, struct request *);
126static void sd_read_capacity(struct scsi_disk *sdkp, char *diskname, 126static void sd_read_capacity(struct scsi_disk *sdkp, char *diskname,
127 unsigned char *buffer); 127 unsigned char *buffer);
128 128
@@ -137,8 +137,6 @@ static struct scsi_driver sd_template = {
137 .rescan = sd_rescan, 137 .rescan = sd_rescan,
138 .init_command = sd_init_command, 138 .init_command = sd_init_command,
139 .issue_flush = sd_issue_flush, 139 .issue_flush = sd_issue_flush,
140 .prepare_flush = sd_prepare_flush,
141 .end_flush = sd_end_flush,
142}; 140};
143 141
144/* 142/*
@@ -346,6 +344,7 @@ static int sd_init_command(struct scsi_cmnd * SCpnt)
346 344
347 if (block > 0xffffffff) { 345 if (block > 0xffffffff) {
348 SCpnt->cmnd[0] += READ_16 - READ_6; 346 SCpnt->cmnd[0] += READ_16 - READ_6;
347 SCpnt->cmnd[1] |= blk_fua_rq(rq) ? 0x8 : 0;
349 SCpnt->cmnd[2] = sizeof(block) > 4 ? (unsigned char) (block >> 56) & 0xff : 0; 348 SCpnt->cmnd[2] = sizeof(block) > 4 ? (unsigned char) (block >> 56) & 0xff : 0;
350 SCpnt->cmnd[3] = sizeof(block) > 4 ? (unsigned char) (block >> 48) & 0xff : 0; 349 SCpnt->cmnd[3] = sizeof(block) > 4 ? (unsigned char) (block >> 48) & 0xff : 0;
351 SCpnt->cmnd[4] = sizeof(block) > 4 ? (unsigned char) (block >> 40) & 0xff : 0; 350 SCpnt->cmnd[4] = sizeof(block) > 4 ? (unsigned char) (block >> 40) & 0xff : 0;
@@ -365,6 +364,7 @@ static int sd_init_command(struct scsi_cmnd * SCpnt)
365 this_count = 0xffff; 364 this_count = 0xffff;
366 365
367 SCpnt->cmnd[0] += READ_10 - READ_6; 366 SCpnt->cmnd[0] += READ_10 - READ_6;
367 SCpnt->cmnd[1] |= blk_fua_rq(rq) ? 0x8 : 0;
368 SCpnt->cmnd[2] = (unsigned char) (block >> 24) & 0xff; 368 SCpnt->cmnd[2] = (unsigned char) (block >> 24) & 0xff;
369 SCpnt->cmnd[3] = (unsigned char) (block >> 16) & 0xff; 369 SCpnt->cmnd[3] = (unsigned char) (block >> 16) & 0xff;
370 SCpnt->cmnd[4] = (unsigned char) (block >> 8) & 0xff; 370 SCpnt->cmnd[4] = (unsigned char) (block >> 8) & 0xff;
@@ -373,6 +373,17 @@ static int sd_init_command(struct scsi_cmnd * SCpnt)
373 SCpnt->cmnd[7] = (unsigned char) (this_count >> 8) & 0xff; 373 SCpnt->cmnd[7] = (unsigned char) (this_count >> 8) & 0xff;
374 SCpnt->cmnd[8] = (unsigned char) this_count & 0xff; 374 SCpnt->cmnd[8] = (unsigned char) this_count & 0xff;
375 } else { 375 } else {
376 if (unlikely(blk_fua_rq(rq))) {
377 /*
378 * This happens only if this drive failed
379 * 10byte rw command with ILLEGAL_REQUEST
380 * during operation and thus turned off
381 * use_10_for_rw.
382 */
383 printk(KERN_ERR "sd: FUA write on READ/WRITE(6) drive\n");
384 return 0;
385 }
386
376 SCpnt->cmnd[1] |= (unsigned char) ((block >> 16) & 0x1f); 387 SCpnt->cmnd[1] |= (unsigned char) ((block >> 16) & 0x1f);
377 SCpnt->cmnd[2] = (unsigned char) ((block >> 8) & 0xff); 388 SCpnt->cmnd[2] = (unsigned char) ((block >> 8) & 0xff);
378 SCpnt->cmnd[3] = (unsigned char) block & 0xff; 389 SCpnt->cmnd[3] = (unsigned char) block & 0xff;
@@ -729,42 +740,13 @@ static int sd_issue_flush(struct device *dev, sector_t *error_sector)
729 return ret; 740 return ret;
730} 741}
731 742
732static void sd_end_flush(request_queue_t *q, struct request *flush_rq) 743static void sd_prepare_flush(request_queue_t *q, struct request *rq)
733{
734 struct request *rq = flush_rq->end_io_data;
735 struct scsi_cmnd *cmd = rq->special;
736 unsigned int bytes = rq->hard_nr_sectors << 9;
737
738 if (!flush_rq->errors) {
739 spin_unlock(q->queue_lock);
740 scsi_io_completion(cmd, bytes, 0);
741 spin_lock(q->queue_lock);
742 } else if (blk_barrier_postflush(rq)) {
743 spin_unlock(q->queue_lock);
744 scsi_io_completion(cmd, 0, bytes);
745 spin_lock(q->queue_lock);
746 } else {
747 /*
748 * force journal abort of barriers
749 */
750 end_that_request_first(rq, -EOPNOTSUPP, rq->hard_nr_sectors);
751 end_that_request_last(rq);
752 }
753}
754
755static int sd_prepare_flush(request_queue_t *q, struct request *rq)
756{ 744{
757 struct scsi_device *sdev = q->queuedata;
758 struct scsi_disk *sdkp = dev_get_drvdata(&sdev->sdev_gendev);
759
760 if (!sdkp || !sdkp->WCE)
761 return 0;
762
763 memset(rq->cmd, 0, sizeof(rq->cmd)); 745 memset(rq->cmd, 0, sizeof(rq->cmd));
764 rq->flags |= REQ_BLOCK_PC | REQ_SOFTBARRIER; 746 rq->flags |= REQ_BLOCK_PC;
765 rq->timeout = SD_TIMEOUT; 747 rq->timeout = SD_TIMEOUT;
766 rq->cmd[0] = SYNCHRONIZE_CACHE; 748 rq->cmd[0] = SYNCHRONIZE_CACHE;
767 return 1; 749 rq->cmd_len = 10;
768} 750}
769 751
770static void sd_rescan(struct device *dev) 752static void sd_rescan(struct device *dev)
@@ -1427,10 +1409,18 @@ sd_read_cache_type(struct scsi_disk *sdkp, char *diskname,
1427 sdkp->RCD = 0; 1409 sdkp->RCD = 0;
1428 } 1410 }
1429 1411
1412 sdkp->DPOFUA = (data.device_specific & 0x10) != 0;
1413 if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw) {
1414 printk(KERN_NOTICE "SCSI device %s: uses "
1415 "READ/WRITE(6), disabling FUA\n", diskname);
1416 sdkp->DPOFUA = 0;
1417 }
1418
1430 ct = sdkp->RCD + 2*sdkp->WCE; 1419 ct = sdkp->RCD + 2*sdkp->WCE;
1431 1420
1432 printk(KERN_NOTICE "SCSI device %s: drive cache: %s\n", 1421 printk(KERN_NOTICE "SCSI device %s: drive cache: %s%s\n",
1433 diskname, types[ct]); 1422 diskname, types[ct],
1423 sdkp->DPOFUA ? " w/ FUA" : "");
1434 1424
1435 return; 1425 return;
1436 } 1426 }
@@ -1462,6 +1452,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
1462 struct scsi_disk *sdkp = scsi_disk(disk); 1452 struct scsi_disk *sdkp = scsi_disk(disk);
1463 struct scsi_device *sdp = sdkp->device; 1453 struct scsi_device *sdp = sdkp->device;
1464 unsigned char *buffer; 1454 unsigned char *buffer;
1455 unsigned ordered;
1465 1456
1466 SCSI_LOG_HLQUEUE(3, printk("sd_revalidate_disk: disk=%s\n", disk->disk_name)); 1457 SCSI_LOG_HLQUEUE(3, printk("sd_revalidate_disk: disk=%s\n", disk->disk_name));
1467 1458
@@ -1498,7 +1489,21 @@ static int sd_revalidate_disk(struct gendisk *disk)
1498 sd_read_write_protect_flag(sdkp, disk->disk_name, buffer); 1489 sd_read_write_protect_flag(sdkp, disk->disk_name, buffer);
1499 sd_read_cache_type(sdkp, disk->disk_name, buffer); 1490 sd_read_cache_type(sdkp, disk->disk_name, buffer);
1500 } 1491 }
1501 1492
1493 /*
1494 * We now have all cache related info, determine how we deal
1495 * with ordered requests. Note that as the current SCSI
1496 * dispatch function can alter request order, we cannot use
1497 * QUEUE_ORDERED_TAG_* even when ordered tag is supported.
1498 */
1499 if (sdkp->WCE)
1500 ordered = sdkp->DPOFUA
1501 ? QUEUE_ORDERED_DRAIN_FUA : QUEUE_ORDERED_DRAIN_FLUSH;
1502 else
1503 ordered = QUEUE_ORDERED_DRAIN;
1504
1505 blk_queue_ordered(sdkp->disk->queue, ordered, sd_prepare_flush);
1506
1502 set_capacity(disk, sdkp->capacity); 1507 set_capacity(disk, sdkp->capacity);
1503 kfree(buffer); 1508 kfree(buffer);
1504 1509
@@ -1598,6 +1603,7 @@ static int sd_probe(struct device *dev)
1598 strcpy(gd->devfs_name, sdp->devfs_name); 1603 strcpy(gd->devfs_name, sdp->devfs_name);
1599 1604
1600 gd->private_data = &sdkp->driver; 1605 gd->private_data = &sdkp->driver;
1606 gd->queue = sdkp->device->request_queue;
1601 1607
1602 sd_revalidate_disk(gd); 1608 sd_revalidate_disk(gd);
1603 1609
@@ -1605,7 +1611,6 @@ static int sd_probe(struct device *dev)
1605 gd->flags = GENHD_FL_DRIVERFS; 1611 gd->flags = GENHD_FL_DRIVERFS;
1606 if (sdp->removable) 1612 if (sdp->removable)
1607 gd->flags |= GENHD_FL_REMOVABLE; 1613 gd->flags |= GENHD_FL_REMOVABLE;
1608 gd->queue = sdkp->device->request_queue;
1609 1614
1610 dev_set_drvdata(dev, sdkp); 1615 dev_set_drvdata(dev, sdkp);
1611 add_disk(gd); 1616 add_disk(gd);
diff --git a/drivers/serial/mpc52xx_uart.c b/drivers/serial/mpc52xx_uart.c
index b8727d9bf690..1288d6203e94 100644
--- a/drivers/serial/mpc52xx_uart.c
+++ b/drivers/serial/mpc52xx_uart.c
@@ -37,11 +37,11 @@
37 * by the bootloader or in the platform init code. 37 * by the bootloader or in the platform init code.
38 * 38 *
39 * The idx field must be equal to the PSC index ( e.g. 0 for PSC1, 1 for PSC2, 39 * The idx field must be equal to the PSC index ( e.g. 0 for PSC1, 1 for PSC2,
40 * and so on). So the PSC1 is mapped to /dev/ttyS0, PSC2 to /dev/ttyS1 and so 40 * and so on). So the PSC1 is mapped to /dev/ttyPSC0, PSC2 to /dev/ttyPSC1 and
41 * on. But be warned, it's an ABSOLUTE REQUIREMENT ! This is needed mainly for 41 * so on. But be warned, it's an ABSOLUTE REQUIREMENT ! This is needed mainly
42 * the console code : without this 1:1 mapping, at early boot time, when we are 42 * fpr the console code : without this 1:1 mapping, at early boot time, when we
43 * parsing the kernel args console=ttyS?, we wouldn't know wich PSC it will be 43 * are parsing the kernel args console=ttyPSC?, we wouldn't know wich PSC it
44 * mapped to. 44 * will be mapped to.
45 */ 45 */
46 46
47#include <linux/config.h> 47#include <linux/config.h>
@@ -65,6 +65,10 @@
65#include <linux/serial_core.h> 65#include <linux/serial_core.h>
66 66
67 67
68/* We've been assigned a range on the "Low-density serial ports" major */
69#define SERIAL_PSC_MAJOR 204
70#define SERIAL_PSC_MINOR 148
71
68 72
69#define ISR_PASS_LIMIT 256 /* Max number of iteration in the interrupt */ 73#define ISR_PASS_LIMIT 256 /* Max number of iteration in the interrupt */
70 74
@@ -668,15 +672,15 @@ mpc52xx_console_setup(struct console *co, char *options)
668} 672}
669 673
670 674
671extern struct uart_driver mpc52xx_uart_driver; 675static struct uart_driver mpc52xx_uart_driver;
672 676
673static struct console mpc52xx_console = { 677static struct console mpc52xx_console = {
674 .name = "ttyS", 678 .name = "ttyPSC",
675 .write = mpc52xx_console_write, 679 .write = mpc52xx_console_write,
676 .device = uart_console_device, 680 .device = uart_console_device,
677 .setup = mpc52xx_console_setup, 681 .setup = mpc52xx_console_setup,
678 .flags = CON_PRINTBUFFER, 682 .flags = CON_PRINTBUFFER,
679 .index = -1, /* Specified on the cmdline (e.g. console=ttyS0 ) */ 683 .index = -1, /* Specified on the cmdline (e.g. console=ttyPSC0 ) */
680 .data = &mpc52xx_uart_driver, 684 .data = &mpc52xx_uart_driver,
681}; 685};
682 686
@@ -703,10 +707,10 @@ console_initcall(mpc52xx_console_init);
703static struct uart_driver mpc52xx_uart_driver = { 707static struct uart_driver mpc52xx_uart_driver = {
704 .owner = THIS_MODULE, 708 .owner = THIS_MODULE,
705 .driver_name = "mpc52xx_psc_uart", 709 .driver_name = "mpc52xx_psc_uart",
706 .dev_name = "ttyS", 710 .dev_name = "ttyPSC",
707 .devfs_name = "ttyS", 711 .devfs_name = "ttyPSC",
708 .major = TTY_MAJOR, 712 .major = SERIAL_PSC_MAJOR,
709 .minor = 64, 713 .minor = SERIAL_PSC_MINOR,
710 .nr = MPC52xx_PSC_MAXNUM, 714 .nr = MPC52xx_PSC_MAXNUM,
711 .cons = MPC52xx_PSC_CONSOLE, 715 .cons = MPC52xx_PSC_CONSOLE,
712}; 716};