aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block
diff options
context:
space:
mode:
authorJeff Garzik <jgarzik@pobox.com>2005-09-14 08:12:20 -0400
committerJeff Garzik <jgarzik@pobox.com>2005-09-14 08:12:20 -0400
commit165415f700b0c77fa1f8db6198f48582639adf78 (patch)
tree088e305b0b5b0c6753072e13be1177824c3ed59d /drivers/block
parentc324b44c34050cf2a9b58830e11c974806bd85d8 (diff)
parent2f4ba45a75d6383b4a1201169a808ffea416ffa0 (diff)
Merge /spare/repo/linux-2.6/
Diffstat (limited to 'drivers/block')
-rw-r--r--drivers/block/acsi.c2
-rw-r--r--drivers/block/acsi_slm.c2
-rw-r--r--drivers/block/aoe/aoe.h12
-rw-r--r--drivers/block/ataflop.c14
-rw-r--r--drivers/block/cciss.c7
-rw-r--r--drivers/block/cfq-iosched.c3
-rw-r--r--drivers/block/deadline-iosched.c5
-rw-r--r--drivers/block/floppy.c4
-rw-r--r--drivers/block/paride/pcd.c3
-rw-r--r--drivers/block/paride/pf.c3
-rw-r--r--drivers/block/paride/pg.c3
-rw-r--r--drivers/block/paride/pt.c3
-rw-r--r--drivers/block/ps2esdi.c3
-rw-r--r--drivers/block/scsi_ioctl.c1
-rw-r--r--drivers/block/swim3.c9
-rw-r--r--drivers/block/swim_iop.c3
-rw-r--r--drivers/block/ub.c273
-rw-r--r--drivers/block/umem.c11
-rw-r--r--drivers/block/xd.c21
-rw-r--r--drivers/block/z2ram.c2
20 files changed, 197 insertions, 187 deletions
diff --git a/drivers/block/acsi.c b/drivers/block/acsi.c
index ce933de48084..0e1f34fef0c8 100644
--- a/drivers/block/acsi.c
+++ b/drivers/block/acsi.c
@@ -371,7 +371,7 @@ static int acsi_revalidate (struct gendisk *disk);
371/************************* End of Prototypes **************************/ 371/************************* End of Prototypes **************************/
372 372
373 373
374struct timer_list acsi_timer = TIMER_INITIALIZER(acsi_times_out, 0, 0); 374DEFINE_TIMER(acsi_timer, acsi_times_out, 0, 0);
375 375
376 376
377#ifdef CONFIG_ATARI_SLM 377#ifdef CONFIG_ATARI_SLM
diff --git a/drivers/block/acsi_slm.c b/drivers/block/acsi_slm.c
index e3be8c31a74c..a5c1c8e871ec 100644
--- a/drivers/block/acsi_slm.c
+++ b/drivers/block/acsi_slm.c
@@ -268,7 +268,7 @@ static int slm_get_pagesize( int device, int *w, int *h );
268/************************* End of Prototypes **************************/ 268/************************* End of Prototypes **************************/
269 269
270 270
271static struct timer_list slm_timer = TIMER_INITIALIZER(slm_test_ready, 0, 0); 271static DEFINE_TIMER(slm_timer, slm_test_ready, 0, 0);
272 272
273static struct file_operations slm_fops = { 273static struct file_operations slm_fops = {
274 .owner = THIS_MODULE, 274 .owner = THIS_MODULE,
diff --git a/drivers/block/aoe/aoe.h b/drivers/block/aoe/aoe.h
index 721ba8086043..0e9e586e9ba3 100644
--- a/drivers/block/aoe/aoe.h
+++ b/drivers/block/aoe/aoe.h
@@ -1,5 +1,5 @@
1/* Copyright (c) 2004 Coraid, Inc. See COPYING for GPL terms. */ 1/* Copyright (c) 2004 Coraid, Inc. See COPYING for GPL terms. */
2#define VERSION "10" 2#define VERSION "12"
3#define AOE_MAJOR 152 3#define AOE_MAJOR 152
4#define DEVICE_NAME "aoe" 4#define DEVICE_NAME "aoe"
5 5
@@ -7,12 +7,12 @@
7 * default is 16, which is 15 partitions plus the whole disk 7 * default is 16, which is 15 partitions plus the whole disk
8 */ 8 */
9#ifndef AOE_PARTITIONS 9#ifndef AOE_PARTITIONS
10#define AOE_PARTITIONS 16 10#define AOE_PARTITIONS (16)
11#endif 11#endif
12 12
13#define SYSMINOR(aoemajor, aoeminor) ((aoemajor) * 10 + (aoeminor)) 13#define SYSMINOR(aoemajor, aoeminor) ((aoemajor) * NPERSHELF + (aoeminor))
14#define AOEMAJOR(sysminor) ((sysminor) / 10) 14#define AOEMAJOR(sysminor) ((sysminor) / NPERSHELF)
15#define AOEMINOR(sysminor) ((sysminor) % 10) 15#define AOEMINOR(sysminor) ((sysminor) % NPERSHELF)
16#define WHITESPACE " \t\v\f\n" 16#define WHITESPACE " \t\v\f\n"
17 17
18enum { 18enum {
@@ -83,7 +83,7 @@ enum {
83 83
84enum { 84enum {
85 MAXATADATA = 1024, 85 MAXATADATA = 1024,
86 NPERSHELF = 10, 86 NPERSHELF = 16, /* number of slots per shelf address */
87 FREETAG = -1, 87 FREETAG = -1,
88 MIN_BUFS = 8, 88 MIN_BUFS = 8,
89}; 89};
diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c
index db05a5a99f35..22bda05fc693 100644
--- a/drivers/block/ataflop.c
+++ b/drivers/block/ataflop.c
@@ -371,16 +371,10 @@ static int floppy_release( struct inode * inode, struct file * filp );
371 371
372/************************* End of Prototypes **************************/ 372/************************* End of Prototypes **************************/
373 373
374static struct timer_list motor_off_timer = 374static DEFINE_TIMER(motor_off_timer, fd_motor_off_timer, 0, 0);
375 TIMER_INITIALIZER(fd_motor_off_timer, 0, 0); 375static DEFINE_TIMER(readtrack_timer, fd_readtrack_check, 0, 0);
376static struct timer_list readtrack_timer = 376static DEFINE_TIMER(timeout_timer, fd_times_out, 0, 0);
377 TIMER_INITIALIZER(fd_readtrack_check, 0, 0); 377static DEFINE_TIMER(fd_timer, check_change, 0, 0);
378
379static struct timer_list timeout_timer =
380 TIMER_INITIALIZER(fd_times_out, 0, 0);
381
382static struct timer_list fd_timer =
383 TIMER_INITIALIZER(check_change, 0, 0);
384 378
385static inline void start_motor_off_timer(void) 379static inline void start_motor_off_timer(void)
386{ 380{
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 418b1469d75d..28f2c177a541 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -1713,10 +1713,9 @@ static unsigned long pollcomplete(int ctlr)
1713 1713
1714 for (i = 20 * HZ; i > 0; i--) { 1714 for (i = 20 * HZ; i > 0; i--) {
1715 done = hba[ctlr]->access.command_completed(hba[ctlr]); 1715 done = hba[ctlr]->access.command_completed(hba[ctlr]);
1716 if (done == FIFO_EMPTY) { 1716 if (done == FIFO_EMPTY)
1717 set_current_state(TASK_UNINTERRUPTIBLE); 1717 schedule_timeout_uninterruptible(1);
1718 schedule_timeout(1); 1718 else
1719 } else
1720 return (done); 1719 return (done);
1721 } 1720 }
1722 /* Invalid address to tell caller we ran out of time */ 1721 /* Invalid address to tell caller we ran out of time */
diff --git a/drivers/block/cfq-iosched.c b/drivers/block/cfq-iosched.c
index 30c0903c7cdd..cd056e7e64ec 100644
--- a/drivers/block/cfq-iosched.c
+++ b/drivers/block/cfq-iosched.c
@@ -2260,6 +2260,8 @@ static void cfq_put_cfqd(struct cfq_data *cfqd)
2260 if (!atomic_dec_and_test(&cfqd->ref)) 2260 if (!atomic_dec_and_test(&cfqd->ref))
2261 return; 2261 return;
2262 2262
2263 blk_put_queue(q);
2264
2263 cfq_shutdown_timer_wq(cfqd); 2265 cfq_shutdown_timer_wq(cfqd);
2264 q->elevator->elevator_data = NULL; 2266 q->elevator->elevator_data = NULL;
2265 2267
@@ -2316,6 +2318,7 @@ static int cfq_init_queue(request_queue_t *q, elevator_t *e)
2316 e->elevator_data = cfqd; 2318 e->elevator_data = cfqd;
2317 2319
2318 cfqd->queue = q; 2320 cfqd->queue = q;
2321 atomic_inc(&q->refcnt);
2319 2322
2320 cfqd->max_queued = q->nr_requests / 4; 2323 cfqd->max_queued = q->nr_requests / 4;
2321 q->nr_batching = cfq_queued; 2324 q->nr_batching = cfq_queued;
diff --git a/drivers/block/deadline-iosched.c b/drivers/block/deadline-iosched.c
index 24594c57c323..52a3ae5289a0 100644
--- a/drivers/block/deadline-iosched.c
+++ b/drivers/block/deadline-iosched.c
@@ -512,7 +512,10 @@ static int deadline_dispatch_requests(struct deadline_data *dd)
512 /* 512 /*
513 * batches are currently reads XOR writes 513 * batches are currently reads XOR writes
514 */ 514 */
515 drq = dd->next_drq[WRITE] ? : dd->next_drq[READ]; 515 if (dd->next_drq[WRITE])
516 drq = dd->next_drq[WRITE];
517 else
518 drq = dd->next_drq[READ];
516 519
517 if (drq) { 520 if (drq) {
518 /* we have a "next request" */ 521 /* we have a "next request" */
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 888dad5eef34..00895477155e 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -628,7 +628,7 @@ static inline void debugt(const char *message) { }
628#endif /* DEBUGT */ 628#endif /* DEBUGT */
629 629
630typedef void (*timeout_fn) (unsigned long); 630typedef void (*timeout_fn) (unsigned long);
631static struct timer_list fd_timeout = TIMER_INITIALIZER(floppy_shutdown, 0, 0); 631static DEFINE_TIMER(fd_timeout, floppy_shutdown, 0, 0);
632 632
633static const char *timeout_message; 633static const char *timeout_message;
634 634
@@ -1012,7 +1012,7 @@ static void schedule_bh(void (*handler) (void))
1012 schedule_work(&floppy_work); 1012 schedule_work(&floppy_work);
1013} 1013}
1014 1014
1015static struct timer_list fd_timer = TIMER_INITIALIZER(NULL, 0, 0); 1015static DEFINE_TIMER(fd_timer, NULL, 0, 0);
1016 1016
1017static void cancel_activity(void) 1017static void cancel_activity(void)
1018{ 1018{
diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c
index 7289f67e9568..ac5ba462710b 100644
--- a/drivers/block/paride/pcd.c
+++ b/drivers/block/paride/pcd.c
@@ -516,8 +516,7 @@ static int pcd_tray_move(struct cdrom_device_info *cdi, int position)
516 516
517static void pcd_sleep(int cs) 517static void pcd_sleep(int cs)
518{ 518{
519 current->state = TASK_INTERRUPTIBLE; 519 schedule_timeout_interruptible(cs);
520 schedule_timeout(cs);
521} 520}
522 521
523static int pcd_reset(struct pcd_unit *cd) 522static int pcd_reset(struct pcd_unit *cd)
diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c
index 060b1f2a91dd..711d2f314ac3 100644
--- a/drivers/block/paride/pf.c
+++ b/drivers/block/paride/pf.c
@@ -507,8 +507,7 @@ static void pf_eject(struct pf_unit *pf)
507 507
508static void pf_sleep(int cs) 508static void pf_sleep(int cs)
509{ 509{
510 current->state = TASK_INTERRUPTIBLE; 510 schedule_timeout_interruptible(cs);
511 schedule_timeout(cs);
512} 511}
513 512
514/* the ATAPI standard actually specifies the contents of all 7 registers 513/* the ATAPI standard actually specifies the contents of all 7 registers
diff --git a/drivers/block/paride/pg.c b/drivers/block/paride/pg.c
index 84d8e291ed96..b3982395f22b 100644
--- a/drivers/block/paride/pg.c
+++ b/drivers/block/paride/pg.c
@@ -276,8 +276,7 @@ static inline u8 DRIVE(struct pg *dev)
276 276
277static void pg_sleep(int cs) 277static void pg_sleep(int cs)
278{ 278{
279 current->state = TASK_INTERRUPTIBLE; 279 schedule_timeout_interruptible(cs);
280 schedule_timeout(cs);
281} 280}
282 281
283static int pg_wait(struct pg *dev, int go, int stop, unsigned long tmo, char *msg) 282static int pg_wait(struct pg *dev, int go, int stop, unsigned long tmo, char *msg)
diff --git a/drivers/block/paride/pt.c b/drivers/block/paride/pt.c
index 5fe8ee86f095..d8d35233cf49 100644
--- a/drivers/block/paride/pt.c
+++ b/drivers/block/paride/pt.c
@@ -383,8 +383,7 @@ static int pt_atapi(struct pt_unit *tape, char *cmd, int dlen, char *buf, char *
383 383
384static void pt_sleep(int cs) 384static void pt_sleep(int cs)
385{ 385{
386 current->state = TASK_INTERRUPTIBLE; 386 schedule_timeout_interruptible(cs);
387 schedule_timeout(cs);
388} 387}
389 388
390static int pt_poll_dsc(struct pt_unit *tape, int pause, int tmo, char *msg) 389static int pt_poll_dsc(struct pt_unit *tape, int pause, int tmo, char *msg)
diff --git a/drivers/block/ps2esdi.c b/drivers/block/ps2esdi.c
index 29548784cb7b..29d1518be72a 100644
--- a/drivers/block/ps2esdi.c
+++ b/drivers/block/ps2esdi.c
@@ -99,8 +99,7 @@ static DECLARE_WAIT_QUEUE_HEAD(ps2esdi_int);
99static int no_int_yet; 99static int no_int_yet;
100static int ps2esdi_drives; 100static int ps2esdi_drives;
101static u_short io_base; 101static u_short io_base;
102static struct timer_list esdi_timer = 102static DEFINE_TIMER(esdi_timer, ps2esdi_reset_timer, 0, 0);
103 TIMER_INITIALIZER(ps2esdi_reset_timer, 0, 0);
104static int reset_status; 103static int reset_status;
105static int ps2esdi_slot = -1; 104static int ps2esdi_slot = -1;
106static int tp720esdi = 0; /* Is it Integrated ESDI of ThinkPad-720? */ 105static int tp720esdi = 0; /* Is it Integrated ESDI of ThinkPad-720? */
diff --git a/drivers/block/scsi_ioctl.c b/drivers/block/scsi_ioctl.c
index abb2df249fd3..856c2278e9d0 100644
--- a/drivers/block/scsi_ioctl.c
+++ b/drivers/block/scsi_ioctl.c
@@ -123,6 +123,7 @@ static int verify_command(struct file *file, unsigned char *cmd)
123 safe_for_read(READ_12), 123 safe_for_read(READ_12),
124 safe_for_read(READ_16), 124 safe_for_read(READ_16),
125 safe_for_read(READ_BUFFER), 125 safe_for_read(READ_BUFFER),
126 safe_for_read(READ_DEFECT_DATA),
126 safe_for_read(READ_LONG), 127 safe_for_read(READ_LONG),
127 safe_for_read(INQUIRY), 128 safe_for_read(INQUIRY),
128 safe_for_read(MODE_SENSE), 129 safe_for_read(MODE_SENSE),
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index e5f7494c00ee..e425ad3eebba 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -834,8 +834,7 @@ static int fd_eject(struct floppy_state *fs)
834 break; 834 break;
835 } 835 }
836 swim3_select(fs, RELAX); 836 swim3_select(fs, RELAX);
837 current->state = TASK_INTERRUPTIBLE; 837 schedule_timeout_interruptible(1);
838 schedule_timeout(1);
839 if (swim3_readbit(fs, DISK_IN) == 0) 838 if (swim3_readbit(fs, DISK_IN) == 0)
840 break; 839 break;
841 } 840 }
@@ -906,8 +905,7 @@ static int floppy_open(struct inode *inode, struct file *filp)
906 break; 905 break;
907 } 906 }
908 swim3_select(fs, RELAX); 907 swim3_select(fs, RELAX);
909 current->state = TASK_INTERRUPTIBLE; 908 schedule_timeout_interruptible(1);
910 schedule_timeout(1);
911 } 909 }
912 if (err == 0 && (swim3_readbit(fs, SEEK_COMPLETE) == 0 910 if (err == 0 && (swim3_readbit(fs, SEEK_COMPLETE) == 0
913 || swim3_readbit(fs, DISK_IN) == 0)) 911 || swim3_readbit(fs, DISK_IN) == 0))
@@ -992,8 +990,7 @@ static int floppy_revalidate(struct gendisk *disk)
992 if (signal_pending(current)) 990 if (signal_pending(current))
993 break; 991 break;
994 swim3_select(fs, RELAX); 992 swim3_select(fs, RELAX);
995 current->state = TASK_INTERRUPTIBLE; 993 schedule_timeout_interruptible(1);
996 schedule_timeout(1);
997 } 994 }
998 ret = swim3_readbit(fs, SEEK_COMPLETE) == 0 995 ret = swim3_readbit(fs, SEEK_COMPLETE) == 0
999 || swim3_readbit(fs, DISK_IN) == 0; 996 || swim3_readbit(fs, DISK_IN) == 0;
diff --git a/drivers/block/swim_iop.c b/drivers/block/swim_iop.c
index a1283f6dc018..89e3c2f8b776 100644
--- a/drivers/block/swim_iop.c
+++ b/drivers/block/swim_iop.c
@@ -338,8 +338,7 @@ static int swimiop_eject(struct floppy_state *fs)
338 err = -EINTR; 338 err = -EINTR;
339 break; 339 break;
340 } 340 }
341 current->state = TASK_INTERRUPTIBLE; 341 schedule_timeout_interruptible(1);
342 schedule_timeout(1);
343 } 342 }
344 release_drive(fs); 343 release_drive(fs);
345 return cmd->error; 344 return cmd->error;
diff --git a/drivers/block/ub.c b/drivers/block/ub.c
index a026567f5d18..aa0bf7ee008d 100644
--- a/drivers/block/ub.c
+++ b/drivers/block/ub.c
@@ -16,9 +16,10 @@
16 * -- verify the 13 conditions and do bulk resets 16 * -- verify the 13 conditions and do bulk resets
17 * -- kill last_pipe and simply do two-state clearing on both pipes 17 * -- kill last_pipe and simply do two-state clearing on both pipes
18 * -- verify protocol (bulk) from USB descriptors (maybe...) 18 * -- verify protocol (bulk) from USB descriptors (maybe...)
19 * -- highmem and sg 19 * -- highmem
20 * -- move top_sense and work_bcs into separate allocations (if they survive) 20 * -- move top_sense and work_bcs into separate allocations (if they survive)
21 * for cache purists and esoteric architectures. 21 * for cache purists and esoteric architectures.
22 * -- Allocate structure for LUN 0 before the first ub_sync_tur, avoid NULL. ?
22 * -- prune comments, they are too volumnous 23 * -- prune comments, they are too volumnous
23 * -- Exterminate P3 printks 24 * -- Exterminate P3 printks
24 * -- Resove XXX's 25 * -- Resove XXX's
@@ -171,7 +172,7 @@ struct bulk_cs_wrap {
171 */ 172 */
172struct ub_dev; 173struct ub_dev;
173 174
174#define UB_MAX_REQ_SG 1 175#define UB_MAX_REQ_SG 4
175#define UB_MAX_SECTORS 64 176#define UB_MAX_SECTORS 64
176 177
177/* 178/*
@@ -234,13 +235,10 @@ struct ub_scsi_cmd {
234 235
235 int stat_count; /* Retries getting status. */ 236 int stat_count; /* Retries getting status. */
236 237
237 /*
238 * We do not support transfers from highmem pages
239 * because the underlying USB framework does not do what we need.
240 */
241 char *data; /* Requested buffer */
242 unsigned int len; /* Requested length */ 238 unsigned int len; /* Requested length */
243 // struct scatterlist sgv[UB_MAX_REQ_SG]; 239 unsigned int current_sg;
240 unsigned int nsg; /* sgv[nsg] */
241 struct scatterlist sgv[UB_MAX_REQ_SG];
244 242
245 struct ub_lun *lun; 243 struct ub_lun *lun;
246 void (*done)(struct ub_dev *, struct ub_scsi_cmd *); 244 void (*done)(struct ub_dev *, struct ub_scsi_cmd *);
@@ -389,17 +387,18 @@ struct ub_dev {
389 struct bulk_cs_wrap work_bcs; 387 struct bulk_cs_wrap work_bcs;
390 struct usb_ctrlrequest work_cr; 388 struct usb_ctrlrequest work_cr;
391 389
390 int sg_stat[UB_MAX_REQ_SG+1];
392 struct ub_scsi_trace tr; 391 struct ub_scsi_trace tr;
393}; 392};
394 393
395/* 394/*
396 */ 395 */
397static void ub_cleanup(struct ub_dev *sc); 396static void ub_cleanup(struct ub_dev *sc);
398static int ub_bd_rq_fn_1(struct ub_lun *lun, struct request *rq); 397static int ub_request_fn_1(struct ub_lun *lun, struct request *rq);
399static int ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun, 398static int ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun,
400 struct ub_scsi_cmd *cmd, struct request *rq); 399 struct ub_scsi_cmd *cmd, struct request *rq);
401static int ub_cmd_build_packet(struct ub_dev *sc, struct ub_scsi_cmd *cmd, 400static int ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun,
402 struct request *rq); 401 struct ub_scsi_cmd *cmd, struct request *rq);
403static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd); 402static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
404static void ub_end_rq(struct request *rq, int uptodate); 403static void ub_end_rq(struct request *rq, int uptodate);
405static int ub_submit_scsi(struct ub_dev *sc, struct ub_scsi_cmd *cmd); 404static int ub_submit_scsi(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
@@ -407,6 +406,7 @@ static void ub_urb_complete(struct urb *urb, struct pt_regs *pt);
407static void ub_scsi_action(unsigned long _dev); 406static void ub_scsi_action(unsigned long _dev);
408static void ub_scsi_dispatch(struct ub_dev *sc); 407static void ub_scsi_dispatch(struct ub_dev *sc);
409static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd); 408static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
409static void ub_data_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
410static void ub_state_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd, int rc); 410static void ub_state_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd, int rc);
411static int __ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd); 411static int __ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
412static void ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd); 412static void ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
@@ -500,7 +500,8 @@ static void ub_cmdtr_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd,
500 } 500 }
501} 501}
502 502
503static ssize_t ub_diag_show(struct device *dev, struct device_attribute *attr, char *page) 503static ssize_t ub_diag_show(struct device *dev, struct device_attribute *attr,
504 char *page)
504{ 505{
505 struct usb_interface *intf; 506 struct usb_interface *intf;
506 struct ub_dev *sc; 507 struct ub_dev *sc;
@@ -523,6 +524,13 @@ static ssize_t ub_diag_show(struct device *dev, struct device_attribute *attr, c
523 cnt += sprintf(page + cnt, 524 cnt += sprintf(page + cnt,
524 "qlen %d qmax %d\n", 525 "qlen %d qmax %d\n",
525 sc->cmd_queue.qlen, sc->cmd_queue.qmax); 526 sc->cmd_queue.qlen, sc->cmd_queue.qmax);
527 cnt += sprintf(page + cnt,
528 "sg %d %d %d %d %d\n",
529 sc->sg_stat[0],
530 sc->sg_stat[1],
531 sc->sg_stat[2],
532 sc->sg_stat[3],
533 sc->sg_stat[4]);
526 534
527 list_for_each (p, &sc->luns) { 535 list_for_each (p, &sc->luns) {
528 lun = list_entry(p, struct ub_lun, link); 536 lun = list_entry(p, struct ub_lun, link);
@@ -744,20 +752,20 @@ static struct ub_scsi_cmd *ub_cmdq_pop(struct ub_dev *sc)
744 * The request function is our main entry point 752 * The request function is our main entry point
745 */ 753 */
746 754
747static void ub_bd_rq_fn(request_queue_t *q) 755static void ub_request_fn(request_queue_t *q)
748{ 756{
749 struct ub_lun *lun = q->queuedata; 757 struct ub_lun *lun = q->queuedata;
750 struct request *rq; 758 struct request *rq;
751 759
752 while ((rq = elv_next_request(q)) != NULL) { 760 while ((rq = elv_next_request(q)) != NULL) {
753 if (ub_bd_rq_fn_1(lun, rq) != 0) { 761 if (ub_request_fn_1(lun, rq) != 0) {
754 blk_stop_queue(q); 762 blk_stop_queue(q);
755 break; 763 break;
756 } 764 }
757 } 765 }
758} 766}
759 767
760static int ub_bd_rq_fn_1(struct ub_lun *lun, struct request *rq) 768static int ub_request_fn_1(struct ub_lun *lun, struct request *rq)
761{ 769{
762 struct ub_dev *sc = lun->udev; 770 struct ub_dev *sc = lun->udev;
763 struct ub_scsi_cmd *cmd; 771 struct ub_scsi_cmd *cmd;
@@ -774,9 +782,8 @@ static int ub_bd_rq_fn_1(struct ub_lun *lun, struct request *rq)
774 memset(cmd, 0, sizeof(struct ub_scsi_cmd)); 782 memset(cmd, 0, sizeof(struct ub_scsi_cmd));
775 783
776 blkdev_dequeue_request(rq); 784 blkdev_dequeue_request(rq);
777
778 if (blk_pc_request(rq)) { 785 if (blk_pc_request(rq)) {
779 rc = ub_cmd_build_packet(sc, cmd, rq); 786 rc = ub_cmd_build_packet(sc, lun, cmd, rq);
780 } else { 787 } else {
781 rc = ub_cmd_build_block(sc, lun, cmd, rq); 788 rc = ub_cmd_build_block(sc, lun, cmd, rq);
782 } 789 }
@@ -791,7 +798,7 @@ static int ub_bd_rq_fn_1(struct ub_lun *lun, struct request *rq)
791 cmd->back = rq; 798 cmd->back = rq;
792 799
793 cmd->tag = sc->tagcnt++; 800 cmd->tag = sc->tagcnt++;
794 if ((rc = ub_submit_scsi(sc, cmd)) != 0) { 801 if (ub_submit_scsi(sc, cmd) != 0) {
795 ub_put_cmd(lun, cmd); 802 ub_put_cmd(lun, cmd);
796 ub_end_rq(rq, 0); 803 ub_end_rq(rq, 0);
797 return 0; 804 return 0;
@@ -804,58 +811,31 @@ static int ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun,
804 struct ub_scsi_cmd *cmd, struct request *rq) 811 struct ub_scsi_cmd *cmd, struct request *rq)
805{ 812{
806 int ub_dir; 813 int ub_dir;
807#if 0 /* We use rq->buffer for now */
808 struct scatterlist *sg;
809 int n_elem; 814 int n_elem;
810#endif
811 unsigned int block, nblks; 815 unsigned int block, nblks;
812 816
813 if (rq_data_dir(rq) == WRITE) 817 if (rq_data_dir(rq) == WRITE)
814 ub_dir = UB_DIR_WRITE; 818 ub_dir = UB_DIR_WRITE;
815 else 819 else
816 ub_dir = UB_DIR_READ; 820 ub_dir = UB_DIR_READ;
821 cmd->dir = ub_dir;
817 822
818 /* 823 /*
819 * get scatterlist from block layer 824 * get scatterlist from block layer
820 */ 825 */
821#if 0 /* We use rq->buffer for now */ 826 n_elem = blk_rq_map_sg(lun->disk->queue, rq, &cmd->sgv[0]);
822 sg = &cmd->sgv[0];
823 n_elem = blk_rq_map_sg(q, rq, sg);
824 if (n_elem <= 0) { 827 if (n_elem <= 0) {
825 ub_put_cmd(lun, cmd); 828 printk(KERN_INFO "%s: failed request map (%d)\n",
826 ub_end_rq(rq, 0); 829 sc->name, n_elem); /* P3 */
827 blk_start_queue(q); 830 return -1; /* request with no s/g entries? */
828 return 0; /* request with no s/g entries? */
829 } 831 }
830 832 if (n_elem > UB_MAX_REQ_SG) { /* Paranoia */
831 if (n_elem != 1) { /* Paranoia */
832 printk(KERN_WARNING "%s: request with %d segments\n", 833 printk(KERN_WARNING "%s: request with %d segments\n",
833 sc->name, n_elem); 834 sc->name, n_elem);
834 ub_put_cmd(lun, cmd);
835 ub_end_rq(rq, 0);
836 blk_start_queue(q);
837 return 0;
838 }
839#endif
840
841 /*
842 * XXX Unfortunately, this check does not work. It is quite possible
843 * to get bogus non-null rq->buffer if you allow sg by mistake.
844 */
845 if (rq->buffer == NULL) {
846 /*
847 * This must not happen if we set the queue right.
848 * The block level must create bounce buffers for us.
849 */
850 static int do_print = 1;
851 if (do_print) {
852 printk(KERN_WARNING "%s: unmapped block request"
853 " flags 0x%lx sectors %lu\n",
854 sc->name, rq->flags, rq->nr_sectors);
855 do_print = 0;
856 }
857 return -1; 835 return -1;
858 } 836 }
837 cmd->nsg = n_elem;
838 sc->sg_stat[n_elem]++;
859 839
860 /* 840 /*
861 * build the command 841 * build the command
@@ -876,30 +856,15 @@ static int ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun,
876 cmd->cdb[8] = nblks; 856 cmd->cdb[8] = nblks;
877 cmd->cdb_len = 10; 857 cmd->cdb_len = 10;
878 858
879 cmd->dir = ub_dir;
880 cmd->data = rq->buffer;
881 cmd->len = rq->nr_sectors * 512; 859 cmd->len = rq->nr_sectors * 512;
882 860
883 return 0; 861 return 0;
884} 862}
885 863
886static int ub_cmd_build_packet(struct ub_dev *sc, struct ub_scsi_cmd *cmd, 864static int ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun,
887 struct request *rq) 865 struct ub_scsi_cmd *cmd, struct request *rq)
888{ 866{
889 867 int n_elem;
890 if (rq->data_len != 0 && rq->data == NULL) {
891 static int do_print = 1;
892 if (do_print) {
893 printk(KERN_WARNING "%s: unmapped packet request"
894 " flags 0x%lx length %d\n",
895 sc->name, rq->flags, rq->data_len);
896 do_print = 0;
897 }
898 return -1;
899 }
900
901 memcpy(&cmd->cdb, rq->cmd, rq->cmd_len);
902 cmd->cdb_len = rq->cmd_len;
903 868
904 if (rq->data_len == 0) { 869 if (rq->data_len == 0) {
905 cmd->dir = UB_DIR_NONE; 870 cmd->dir = UB_DIR_NONE;
@@ -908,8 +873,29 @@ static int ub_cmd_build_packet(struct ub_dev *sc, struct ub_scsi_cmd *cmd,
908 cmd->dir = UB_DIR_WRITE; 873 cmd->dir = UB_DIR_WRITE;
909 else 874 else
910 cmd->dir = UB_DIR_READ; 875 cmd->dir = UB_DIR_READ;
876
877 }
878
879 /*
880 * get scatterlist from block layer
881 */
882 n_elem = blk_rq_map_sg(lun->disk->queue, rq, &cmd->sgv[0]);
883 if (n_elem < 0) {
884 printk(KERN_INFO "%s: failed request map (%d)\n",
885 sc->name, n_elem); /* P3 */
886 return -1;
911 } 887 }
912 cmd->data = rq->data; 888 if (n_elem > UB_MAX_REQ_SG) { /* Paranoia */
889 printk(KERN_WARNING "%s: request with %d segments\n",
890 sc->name, n_elem);
891 return -1;
892 }
893 cmd->nsg = n_elem;
894 sc->sg_stat[n_elem]++;
895
896 memcpy(&cmd->cdb, rq->cmd, rq->cmd_len);
897 cmd->cdb_len = rq->cmd_len;
898
913 cmd->len = rq->data_len; 899 cmd->len = rq->data_len;
914 900
915 return 0; 901 return 0;
@@ -919,24 +905,34 @@ static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
919{ 905{
920 struct request *rq = cmd->back; 906 struct request *rq = cmd->back;
921 struct ub_lun *lun = cmd->lun; 907 struct ub_lun *lun = cmd->lun;
922 struct gendisk *disk = lun->disk;
923 request_queue_t *q = disk->queue;
924 int uptodate; 908 int uptodate;
925 909
926 if (blk_pc_request(rq)) { 910 if (cmd->error == 0) {
927 /* UB_SENSE_SIZE is smaller than SCSI_SENSE_BUFFERSIZE */
928 memcpy(rq->sense, sc->top_sense, UB_SENSE_SIZE);
929 rq->sense_len = UB_SENSE_SIZE;
930 }
931
932 if (cmd->error == 0)
933 uptodate = 1; 911 uptodate = 1;
934 else 912
913 if (blk_pc_request(rq)) {
914 if (cmd->act_len >= rq->data_len)
915 rq->data_len = 0;
916 else
917 rq->data_len -= cmd->act_len;
918 }
919 } else {
935 uptodate = 0; 920 uptodate = 0;
936 921
922 if (blk_pc_request(rq)) {
923 /* UB_SENSE_SIZE is smaller than SCSI_SENSE_BUFFERSIZE */
924 memcpy(rq->sense, sc->top_sense, UB_SENSE_SIZE);
925 rq->sense_len = UB_SENSE_SIZE;
926 if (sc->top_sense[0] != 0)
927 rq->errors = SAM_STAT_CHECK_CONDITION;
928 else
929 rq->errors = DID_ERROR << 16;
930 }
931 }
932
937 ub_put_cmd(lun, cmd); 933 ub_put_cmd(lun, cmd);
938 ub_end_rq(rq, uptodate); 934 ub_end_rq(rq, uptodate);
939 blk_start_queue(q); 935 blk_start_queue(lun->disk->queue);
940} 936}
941 937
942static void ub_end_rq(struct request *rq, int uptodate) 938static void ub_end_rq(struct request *rq, int uptodate)
@@ -1014,7 +1010,7 @@ static int ub_scsi_cmd_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1014 sc->last_pipe = sc->send_bulk_pipe; 1010 sc->last_pipe = sc->send_bulk_pipe;
1015 usb_fill_bulk_urb(&sc->work_urb, sc->dev, sc->send_bulk_pipe, 1011 usb_fill_bulk_urb(&sc->work_urb, sc->dev, sc->send_bulk_pipe,
1016 bcb, US_BULK_CB_WRAP_LEN, ub_urb_complete, sc); 1012 bcb, US_BULK_CB_WRAP_LEN, ub_urb_complete, sc);
1017 sc->work_urb.transfer_flags = URB_ASYNC_UNLINK; 1013 sc->work_urb.transfer_flags = 0;
1018 1014
1019 /* Fill what we shouldn't be filling, because usb-storage did so. */ 1015 /* Fill what we shouldn't be filling, because usb-storage did so. */
1020 sc->work_urb.actual_length = 0; 1016 sc->work_urb.actual_length = 0;
@@ -1103,7 +1099,6 @@ static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1103{ 1099{
1104 struct urb *urb = &sc->work_urb; 1100 struct urb *urb = &sc->work_urb;
1105 struct bulk_cs_wrap *bcs; 1101 struct bulk_cs_wrap *bcs;
1106 int pipe;
1107 int rc; 1102 int rc;
1108 1103
1109 if (atomic_read(&sc->poison)) { 1104 if (atomic_read(&sc->poison)) {
@@ -1204,38 +1199,13 @@ static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1204 goto Bad_End; 1199 goto Bad_End;
1205 } 1200 }
1206 1201
1207 if (cmd->dir == UB_DIR_NONE) { 1202 if (cmd->dir == UB_DIR_NONE || cmd->nsg < 1) {
1208 ub_state_stat(sc, cmd); 1203 ub_state_stat(sc, cmd);
1209 return; 1204 return;
1210 } 1205 }
1211 1206
1212 UB_INIT_COMPLETION(sc->work_done); 1207 // udelay(125); // usb-storage has this
1213 1208 ub_data_start(sc, cmd);
1214 if (cmd->dir == UB_DIR_READ)
1215 pipe = sc->recv_bulk_pipe;
1216 else
1217 pipe = sc->send_bulk_pipe;
1218 sc->last_pipe = pipe;
1219 usb_fill_bulk_urb(&sc->work_urb, sc->dev, pipe,
1220 cmd->data, cmd->len, ub_urb_complete, sc);
1221 sc->work_urb.transfer_flags = URB_ASYNC_UNLINK;
1222 sc->work_urb.actual_length = 0;
1223 sc->work_urb.error_count = 0;
1224 sc->work_urb.status = 0;
1225
1226 if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) {
1227 /* XXX Clear stalls */
1228 printk("ub: data #%d submit failed (%d)\n", cmd->tag, rc); /* P3 */
1229 ub_complete(&sc->work_done);
1230 ub_state_done(sc, cmd, rc);
1231 return;
1232 }
1233
1234 sc->work_timer.expires = jiffies + UB_DATA_TIMEOUT;
1235 add_timer(&sc->work_timer);
1236
1237 cmd->state = UB_CMDST_DATA;
1238 ub_cmdtr_state(sc, cmd);
1239 1209
1240 } else if (cmd->state == UB_CMDST_DATA) { 1210 } else if (cmd->state == UB_CMDST_DATA) {
1241 if (urb->status == -EPIPE) { 1211 if (urb->status == -EPIPE) {
@@ -1257,16 +1227,22 @@ static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1257 if (urb->status == -EOVERFLOW) { 1227 if (urb->status == -EOVERFLOW) {
1258 /* 1228 /*
1259 * A babble? Failure, but we must transfer CSW now. 1229 * A babble? Failure, but we must transfer CSW now.
1230 * XXX This is going to end in perpetual babble. Reset.
1260 */ 1231 */
1261 cmd->error = -EOVERFLOW; /* A cheap trick... */ 1232 cmd->error = -EOVERFLOW; /* A cheap trick... */
1262 } else { 1233 ub_state_stat(sc, cmd);
1263 if (urb->status != 0) 1234 return;
1264 goto Bad_End;
1265 } 1235 }
1236 if (urb->status != 0)
1237 goto Bad_End;
1266 1238
1267 cmd->act_len = urb->actual_length; 1239 cmd->act_len += urb->actual_length;
1268 ub_cmdtr_act_len(sc, cmd); 1240 ub_cmdtr_act_len(sc, cmd);
1269 1241
1242 if (++cmd->current_sg < cmd->nsg) {
1243 ub_data_start(sc, cmd);
1244 return;
1245 }
1270 ub_state_stat(sc, cmd); 1246 ub_state_stat(sc, cmd);
1271 1247
1272 } else if (cmd->state == UB_CMDST_STAT) { 1248 } else if (cmd->state == UB_CMDST_STAT) {
@@ -1401,6 +1377,46 @@ Bad_End: /* Little Excel is dead */
1401 1377
1402/* 1378/*
1403 * Factorization helper for the command state machine: 1379 * Factorization helper for the command state machine:
1380 * Initiate a data segment transfer.
1381 */
1382static void ub_data_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1383{
1384 struct scatterlist *sg = &cmd->sgv[cmd->current_sg];
1385 int pipe;
1386 int rc;
1387
1388 UB_INIT_COMPLETION(sc->work_done);
1389
1390 if (cmd->dir == UB_DIR_READ)
1391 pipe = sc->recv_bulk_pipe;
1392 else
1393 pipe = sc->send_bulk_pipe;
1394 sc->last_pipe = pipe;
1395 usb_fill_bulk_urb(&sc->work_urb, sc->dev, pipe,
1396 page_address(sg->page) + sg->offset, sg->length,
1397 ub_urb_complete, sc);
1398 sc->work_urb.transfer_flags = 0;
1399 sc->work_urb.actual_length = 0;
1400 sc->work_urb.error_count = 0;
1401 sc->work_urb.status = 0;
1402
1403 if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) {
1404 /* XXX Clear stalls */
1405 printk("ub: data #%d submit failed (%d)\n", cmd->tag, rc); /* P3 */
1406 ub_complete(&sc->work_done);
1407 ub_state_done(sc, cmd, rc);
1408 return;
1409 }
1410
1411 sc->work_timer.expires = jiffies + UB_DATA_TIMEOUT;
1412 add_timer(&sc->work_timer);
1413
1414 cmd->state = UB_CMDST_DATA;
1415 ub_cmdtr_state(sc, cmd);
1416}
1417
1418/*
1419 * Factorization helper for the command state machine:
1404 * Finish the command. 1420 * Finish the command.
1405 */ 1421 */
1406static void ub_state_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd, int rc) 1422static void ub_state_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd, int rc)
@@ -1426,7 +1442,7 @@ static int __ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1426 sc->last_pipe = sc->recv_bulk_pipe; 1442 sc->last_pipe = sc->recv_bulk_pipe;
1427 usb_fill_bulk_urb(&sc->work_urb, sc->dev, sc->recv_bulk_pipe, 1443 usb_fill_bulk_urb(&sc->work_urb, sc->dev, sc->recv_bulk_pipe,
1428 &sc->work_bcs, US_BULK_CS_WRAP_LEN, ub_urb_complete, sc); 1444 &sc->work_bcs, US_BULK_CS_WRAP_LEN, ub_urb_complete, sc);
1429 sc->work_urb.transfer_flags = URB_ASYNC_UNLINK; 1445 sc->work_urb.transfer_flags = 0;
1430 sc->work_urb.actual_length = 0; 1446 sc->work_urb.actual_length = 0;
1431 sc->work_urb.error_count = 0; 1447 sc->work_urb.error_count = 0;
1432 sc->work_urb.status = 0; 1448 sc->work_urb.status = 0;
@@ -1484,6 +1500,7 @@ static void ub_state_stat_counted(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1484static void ub_state_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd) 1500static void ub_state_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1485{ 1501{
1486 struct ub_scsi_cmd *scmd; 1502 struct ub_scsi_cmd *scmd;
1503 struct scatterlist *sg;
1487 int rc; 1504 int rc;
1488 1505
1489 if (cmd->cdb[0] == REQUEST_SENSE) { 1506 if (cmd->cdb[0] == REQUEST_SENSE) {
@@ -1492,12 +1509,17 @@ static void ub_state_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1492 } 1509 }
1493 1510
1494 scmd = &sc->top_rqs_cmd; 1511 scmd = &sc->top_rqs_cmd;
1512 memset(scmd, 0, sizeof(struct ub_scsi_cmd));
1495 scmd->cdb[0] = REQUEST_SENSE; 1513 scmd->cdb[0] = REQUEST_SENSE;
1496 scmd->cdb[4] = UB_SENSE_SIZE; 1514 scmd->cdb[4] = UB_SENSE_SIZE;
1497 scmd->cdb_len = 6; 1515 scmd->cdb_len = 6;
1498 scmd->dir = UB_DIR_READ; 1516 scmd->dir = UB_DIR_READ;
1499 scmd->state = UB_CMDST_INIT; 1517 scmd->state = UB_CMDST_INIT;
1500 scmd->data = sc->top_sense; 1518 scmd->nsg = 1;
1519 sg = &scmd->sgv[0];
1520 sg->page = virt_to_page(sc->top_sense);
1521 sg->offset = (unsigned int)sc->top_sense & (PAGE_SIZE-1);
1522 sg->length = UB_SENSE_SIZE;
1501 scmd->len = UB_SENSE_SIZE; 1523 scmd->len = UB_SENSE_SIZE;
1502 scmd->lun = cmd->lun; 1524 scmd->lun = cmd->lun;
1503 scmd->done = ub_top_sense_done; 1525 scmd->done = ub_top_sense_done;
@@ -1541,7 +1563,7 @@ static int ub_submit_clear_stall(struct ub_dev *sc, struct ub_scsi_cmd *cmd,
1541 1563
1542 usb_fill_control_urb(&sc->work_urb, sc->dev, sc->send_ctrl_pipe, 1564 usb_fill_control_urb(&sc->work_urb, sc->dev, sc->send_ctrl_pipe,
1543 (unsigned char*) cr, NULL, 0, ub_urb_complete, sc); 1565 (unsigned char*) cr, NULL, 0, ub_urb_complete, sc);
1544 sc->work_urb.transfer_flags = URB_ASYNC_UNLINK; 1566 sc->work_urb.transfer_flags = 0;
1545 sc->work_urb.actual_length = 0; 1567 sc->work_urb.actual_length = 0;
1546 sc->work_urb.error_count = 0; 1568 sc->work_urb.error_count = 0;
1547 sc->work_urb.status = 0; 1569 sc->work_urb.status = 0;
@@ -1560,7 +1582,7 @@ static int ub_submit_clear_stall(struct ub_dev *sc, struct ub_scsi_cmd *cmd,
1560 */ 1582 */
1561static void ub_top_sense_done(struct ub_dev *sc, struct ub_scsi_cmd *scmd) 1583static void ub_top_sense_done(struct ub_dev *sc, struct ub_scsi_cmd *scmd)
1562{ 1584{
1563 unsigned char *sense = scmd->data; 1585 unsigned char *sense = sc->top_sense;
1564 struct ub_scsi_cmd *cmd; 1586 struct ub_scsi_cmd *cmd;
1565 1587
1566 /* 1588 /*
@@ -1852,6 +1874,7 @@ static int ub_sync_read_cap(struct ub_dev *sc, struct ub_lun *lun,
1852 struct ub_capacity *ret) 1874 struct ub_capacity *ret)
1853{ 1875{
1854 struct ub_scsi_cmd *cmd; 1876 struct ub_scsi_cmd *cmd;
1877 struct scatterlist *sg;
1855 char *p; 1878 char *p;
1856 enum { ALLOC_SIZE = sizeof(struct ub_scsi_cmd) + 8 }; 1879 enum { ALLOC_SIZE = sizeof(struct ub_scsi_cmd) + 8 };
1857 unsigned long flags; 1880 unsigned long flags;
@@ -1872,7 +1895,11 @@ static int ub_sync_read_cap(struct ub_dev *sc, struct ub_lun *lun,
1872 cmd->cdb_len = 10; 1895 cmd->cdb_len = 10;
1873 cmd->dir = UB_DIR_READ; 1896 cmd->dir = UB_DIR_READ;
1874 cmd->state = UB_CMDST_INIT; 1897 cmd->state = UB_CMDST_INIT;
1875 cmd->data = p; 1898 cmd->nsg = 1;
1899 sg = &cmd->sgv[0];
1900 sg->page = virt_to_page(p);
1901 sg->offset = (unsigned int)p & (PAGE_SIZE-1);
1902 sg->length = 8;
1876 cmd->len = 8; 1903 cmd->len = 8;
1877 cmd->lun = lun; 1904 cmd->lun = lun;
1878 cmd->done = ub_probe_done; 1905 cmd->done = ub_probe_done;
@@ -2289,7 +2316,7 @@ static int ub_probe_lun(struct ub_dev *sc, int lnum)
2289 disk->driverfs_dev = &sc->intf->dev; /* XXX Many to one ok? */ 2316 disk->driverfs_dev = &sc->intf->dev; /* XXX Many to one ok? */
2290 2317
2291 rc = -ENOMEM; 2318 rc = -ENOMEM;
2292 if ((q = blk_init_queue(ub_bd_rq_fn, &sc->lock)) == NULL) 2319 if ((q = blk_init_queue(ub_request_fn, &sc->lock)) == NULL)
2293 goto err_blkqinit; 2320 goto err_blkqinit;
2294 2321
2295 disk->queue = q; 2322 disk->queue = q;
diff --git a/drivers/block/umem.c b/drivers/block/umem.c
index 0c4c121d2e79..0f48301342da 100644
--- a/drivers/block/umem.c
+++ b/drivers/block/umem.c
@@ -34,6 +34,7 @@
34 * - set initialised bit then. 34 * - set initialised bit then.
35 */ 35 */
36 36
37//#define DEBUG /* uncomment if you want debugging info (pr_debug) */
37#include <linux/config.h> 38#include <linux/config.h>
38#include <linux/sched.h> 39#include <linux/sched.h>
39#include <linux/fs.h> 40#include <linux/fs.h>
@@ -58,10 +59,6 @@
58#include <asm/uaccess.h> 59#include <asm/uaccess.h>
59#include <asm/io.h> 60#include <asm/io.h>
60 61
61#define PRINTK(x...) do {} while (0)
62#define dprintk(x...) do {} while (0)
63/*#define dprintk(x...) printk(x) */
64
65#define MM_MAXCARDS 4 62#define MM_MAXCARDS 4
66#define MM_RAHEAD 2 /* two sectors */ 63#define MM_RAHEAD 2 /* two sectors */
67#define MM_BLKSIZE 1024 /* 1k blocks */ 64#define MM_BLKSIZE 1024 /* 1k blocks */
@@ -299,7 +296,7 @@ static void mm_start_io(struct cardinfo *card)
299 296
300 /* make the last descriptor end the chain */ 297 /* make the last descriptor end the chain */
301 page = &card->mm_pages[card->Active]; 298 page = &card->mm_pages[card->Active];
302 PRINTK("start_io: %d %d->%d\n", card->Active, page->headcnt, page->cnt-1); 299 pr_debug("start_io: %d %d->%d\n", card->Active, page->headcnt, page->cnt-1);
303 desc = &page->desc[page->cnt-1]; 300 desc = &page->desc[page->cnt-1];
304 301
305 desc->control_bits |= cpu_to_le32(DMASCR_CHAIN_COMP_EN); 302 desc->control_bits |= cpu_to_le32(DMASCR_CHAIN_COMP_EN);
@@ -532,7 +529,7 @@ static void process_page(unsigned long data)
532 activate(card); 529 activate(card);
533 } else { 530 } else {
534 /* haven't finished with this one yet */ 531 /* haven't finished with this one yet */
535 PRINTK("do some more\n"); 532 pr_debug("do some more\n");
536 mm_start_io(card); 533 mm_start_io(card);
537 } 534 }
538 out_unlock: 535 out_unlock:
@@ -555,7 +552,7 @@ static void process_page(unsigned long data)
555static int mm_make_request(request_queue_t *q, struct bio *bio) 552static int mm_make_request(request_queue_t *q, struct bio *bio)
556{ 553{
557 struct cardinfo *card = q->queuedata; 554 struct cardinfo *card = q->queuedata;
558 PRINTK("mm_make_request %ld %d\n", bh->b_rsector, bh->b_size); 555 pr_debug("mm_make_request %ld %d\n", bh->b_rsector, bh->b_size);
559 556
560 bio->bi_phys_segments = bio->bi_idx; /* count of completed segments*/ 557 bio->bi_phys_segments = bio->bi_idx; /* count of completed segments*/
561 spin_lock_irq(&card->lock); 558 spin_lock_irq(&card->lock);
diff --git a/drivers/block/xd.c b/drivers/block/xd.c
index 1676033da6c6..68b6d7b154cf 100644
--- a/drivers/block/xd.c
+++ b/drivers/block/xd.c
@@ -47,6 +47,7 @@
47#include <linux/wait.h> 47#include <linux/wait.h>
48#include <linux/blkdev.h> 48#include <linux/blkdev.h>
49#include <linux/blkpg.h> 49#include <linux/blkpg.h>
50#include <linux/delay.h>
50 51
51#include <asm/system.h> 52#include <asm/system.h>
52#include <asm/io.h> 53#include <asm/io.h>
@@ -62,7 +63,7 @@ static int xd[5] = { -1,-1,-1,-1, };
62 63
63#define XD_DONT_USE_DMA 0 /* Initial value. may be overriden using 64#define XD_DONT_USE_DMA 0 /* Initial value. may be overriden using
64 "nodma" module option */ 65 "nodma" module option */
65#define XD_INIT_DISK_DELAY (30*HZ/1000) /* 30 ms delay during disk initialization */ 66#define XD_INIT_DISK_DELAY (30) /* 30 ms delay during disk initialization */
66 67
67/* Above may need to be increased if a problem with the 2nd drive detection 68/* Above may need to be increased if a problem with the 2nd drive detection
68 (ST11M controller) or resetting a controller (WD) appears */ 69 (ST11M controller) or resetting a controller (WD) appears */
@@ -529,10 +530,8 @@ static inline u_char xd_waitport (u_short port,u_char flags,u_char mask,u_long t
529 int success; 530 int success;
530 531
531 xdc_busy = 1; 532 xdc_busy = 1;
532 while ((success = ((inb(port) & mask) != flags)) && time_before(jiffies, expiry)) { 533 while ((success = ((inb(port) & mask) != flags)) && time_before(jiffies, expiry))
533 set_current_state(TASK_UNINTERRUPTIBLE); 534 schedule_timeout_uninterruptible(1);
534 schedule_timeout(1);
535 }
536 xdc_busy = 0; 535 xdc_busy = 0;
537 return (success); 536 return (success);
538} 537}
@@ -633,14 +632,12 @@ static u_char __init xd_initdrives (void (*init_drive)(u_char drive))
633 for (i = 0; i < XD_MAXDRIVES; i++) { 632 for (i = 0; i < XD_MAXDRIVES; i++) {
634 xd_build(cmdblk,CMD_TESTREADY,i,0,0,0,0,0); 633 xd_build(cmdblk,CMD_TESTREADY,i,0,0,0,0,0);
635 if (!xd_command(cmdblk,PIO_MODE,NULL,NULL,NULL,XD_TIMEOUT*8)) { 634 if (!xd_command(cmdblk,PIO_MODE,NULL,NULL,NULL,XD_TIMEOUT*8)) {
636 set_current_state(TASK_INTERRUPTIBLE); 635 msleep_interruptible(XD_INIT_DISK_DELAY);
637 schedule_timeout(XD_INIT_DISK_DELAY);
638 636
639 init_drive(count); 637 init_drive(count);
640 count++; 638 count++;
641 639
642 set_current_state(TASK_INTERRUPTIBLE); 640 msleep_interruptible(XD_INIT_DISK_DELAY);
643 schedule_timeout(XD_INIT_DISK_DELAY);
644 } 641 }
645 } 642 }
646 return (count); 643 return (count);
@@ -761,8 +758,7 @@ static void __init xd_wd_init_controller (unsigned int address)
761 758
762 outb(0,XD_RESET); /* reset the controller */ 759 outb(0,XD_RESET); /* reset the controller */
763 760
764 set_current_state(TASK_UNINTERRUPTIBLE); 761 msleep(XD_INIT_DISK_DELAY);
765 schedule_timeout(XD_INIT_DISK_DELAY);
766} 762}
767 763
768static void __init xd_wd_init_drive (u_char drive) 764static void __init xd_wd_init_drive (u_char drive)
@@ -936,8 +932,7 @@ If you need non-standard settings use the xd=... command */
936 xd_maxsectors = 0x01; 932 xd_maxsectors = 0x01;
937 outb(0,XD_RESET); /* reset the controller */ 933 outb(0,XD_RESET); /* reset the controller */
938 934
939 set_current_state(TASK_UNINTERRUPTIBLE); 935 msleep(XD_INIT_DISK_DELAY);
940 schedule_timeout(XD_INIT_DISK_DELAY);
941} 936}
942 937
943static void __init xd_xebec_init_drive (u_char drive) 938static void __init xd_xebec_init_drive (u_char drive)
diff --git a/drivers/block/z2ram.c b/drivers/block/z2ram.c
index 007f6a662439..bb5e8d665a2a 100644
--- a/drivers/block/z2ram.c
+++ b/drivers/block/z2ram.c
@@ -296,7 +296,7 @@ z2_open( struct inode *inode, struct file *filp )
296 return 0; 296 return 0;
297 297
298err_out_kfree: 298err_out_kfree:
299 kfree( z2ram_map ); 299 kfree(z2ram_map);
300err_out: 300err_out:
301 return rc; 301 return rc;
302} 302}