diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-03-01 12:00:29 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-03-01 12:00:29 -0500 |
commit | b1bf9368407ae7e89d8a005bb40beb70a41df539 (patch) | |
tree | 3815c8aab19c6c186736673c624fef5f3faab716 /drivers/block | |
parent | 524df55725217b13d5a232fb5badb5846418ea0e (diff) | |
parent | 4671a1322052425afa38fcb7980d2fd2bb0fc99b (diff) |
Merge branch 'for-2.6.34' of git://git.kernel.dk/linux-2.6-block
* 'for-2.6.34' of git://git.kernel.dk/linux-2.6-block: (38 commits)
block: don't access jiffies when initialising io_context
cfq: remove 8 bytes of padding from cfq_rb_root on 64 bit builds
block: fix for "Consolidate phys_segment and hw_segment limits"
cfq-iosched: quantum check tweak
blktrace: perform cleanup after setup error
blkdev: fix merge_bvec_fn return value checks
cfq-iosched: requests "in flight" vs "in driver" clarification
cciss: Fix problem with scatter gather elements in the scsi half of the driver
cciss: eliminate unnecessary pointer use in cciss scsi code
cciss: do not use void pointer for scsi hba data
cciss: factor out scatter gather chain block mapping code
cciss: fix scatter gather chain block dma direction kludge
cciss: simplify scatter gather code
cciss: factor out scatter gather chain block allocation and freeing
cciss: detect bad alignment of scsi commands at build time
cciss: clarify command list padding calculation
cfq-iosched: rethink seeky detection for SSDs
cfq-iosched: rework seeky detection
block: remove padding from io_context on 64bit builds
block: Consolidate phys_segment and hw_segment limits
...
Diffstat (limited to 'drivers/block')
-rw-r--r-- | drivers/block/DAC960.c | 6 | ||||
-rw-r--r-- | drivers/block/brd.c | 2 | ||||
-rw-r--r-- | drivers/block/cciss.c | 218 | ||||
-rw-r--r-- | drivers/block/cciss.h | 21 | ||||
-rw-r--r-- | drivers/block/cciss_cmd.h | 164 | ||||
-rw-r--r-- | drivers/block/cciss_scsi.c | 145 | ||||
-rw-r--r-- | drivers/block/cciss_scsi.h | 18 | ||||
-rw-r--r-- | drivers/block/cpqarray.c | 5 | ||||
-rw-r--r-- | drivers/block/drbd/drbd_nl.c | 5 | ||||
-rw-r--r-- | drivers/block/floppy.c | 2 | ||||
-rw-r--r-- | drivers/block/hd.c | 2 | ||||
-rw-r--r-- | drivers/block/mg_disk.c | 2 | ||||
-rw-r--r-- | drivers/block/paride/pd.c | 2 | ||||
-rw-r--r-- | drivers/block/paride/pf.c | 3 | ||||
-rw-r--r-- | drivers/block/pktcdvd.c | 97 | ||||
-rw-r--r-- | drivers/block/ps3disk.c | 5 | ||||
-rw-r--r-- | drivers/block/ps3vram.c | 7 | ||||
-rw-r--r-- | drivers/block/sunvdc.c | 5 | ||||
-rw-r--r-- | drivers/block/sx8.c | 5 | ||||
-rw-r--r-- | drivers/block/ub.c | 7 | ||||
-rw-r--r-- | drivers/block/viodasd.c | 5 | ||||
-rw-r--r-- | drivers/block/virtio_blk.c | 2 | ||||
-rw-r--r-- | drivers/block/xd.c | 2 | ||||
-rw-r--r-- | drivers/block/xen-blkfront.c | 7 | ||||
-rw-r--r-- | drivers/block/xsysace.c | 2 |
25 files changed, 299 insertions, 440 deletions
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c index ce1fa923c414..459f1bc25a7b 100644 --- a/drivers/block/DAC960.c +++ b/drivers/block/DAC960.c | |||
@@ -2534,8 +2534,8 @@ static bool DAC960_RegisterBlockDevice(DAC960_Controller_T *Controller) | |||
2534 | blk_queue_bounce_limit(RequestQueue, Controller->BounceBufferLimit); | 2534 | blk_queue_bounce_limit(RequestQueue, Controller->BounceBufferLimit); |
2535 | RequestQueue->queuedata = Controller; | 2535 | RequestQueue->queuedata = Controller; |
2536 | blk_queue_max_hw_segments(RequestQueue, Controller->DriverScatterGatherLimit); | 2536 | blk_queue_max_hw_segments(RequestQueue, Controller->DriverScatterGatherLimit); |
2537 | blk_queue_max_phys_segments(RequestQueue, Controller->DriverScatterGatherLimit); | 2537 | blk_queue_max_segments(RequestQueue, Controller->DriverScatterGatherLimit); |
2538 | blk_queue_max_sectors(RequestQueue, Controller->MaxBlocksPerCommand); | 2538 | blk_queue_max_hw_sectors(RequestQueue, Controller->MaxBlocksPerCommand); |
2539 | disk->queue = RequestQueue; | 2539 | disk->queue = RequestQueue; |
2540 | sprintf(disk->disk_name, "rd/c%dd%d", Controller->ControllerNumber, n); | 2540 | sprintf(disk->disk_name, "rd/c%dd%d", Controller->ControllerNumber, n); |
2541 | disk->major = MajorNumber; | 2541 | disk->major = MajorNumber; |
@@ -7134,7 +7134,7 @@ static struct DAC960_privdata DAC960_P_privdata = { | |||
7134 | .MemoryWindowSize = DAC960_PD_RegisterWindowSize, | 7134 | .MemoryWindowSize = DAC960_PD_RegisterWindowSize, |
7135 | }; | 7135 | }; |
7136 | 7136 | ||
7137 | static struct pci_device_id DAC960_id_table[] = { | 7137 | static const struct pci_device_id DAC960_id_table[] = { |
7138 | { | 7138 | { |
7139 | .vendor = PCI_VENDOR_ID_MYLEX, | 7139 | .vendor = PCI_VENDOR_ID_MYLEX, |
7140 | .device = PCI_DEVICE_ID_MYLEX_DAC960_GEM, | 7140 | .device = PCI_DEVICE_ID_MYLEX_DAC960_GEM, |
diff --git a/drivers/block/brd.c b/drivers/block/brd.c index 4f688434daf1..c6ddeacb77fd 100644 --- a/drivers/block/brd.c +++ b/drivers/block/brd.c | |||
@@ -434,7 +434,7 @@ static struct brd_device *brd_alloc(int i) | |||
434 | goto out_free_dev; | 434 | goto out_free_dev; |
435 | blk_queue_make_request(brd->brd_queue, brd_make_request); | 435 | blk_queue_make_request(brd->brd_queue, brd_make_request); |
436 | blk_queue_ordered(brd->brd_queue, QUEUE_ORDERED_TAG, NULL); | 436 | blk_queue_ordered(brd->brd_queue, QUEUE_ORDERED_TAG, NULL); |
437 | blk_queue_max_sectors(brd->brd_queue, 1024); | 437 | blk_queue_max_hw_sectors(brd->brd_queue, 1024); |
438 | blk_queue_bounce_limit(brd->brd_queue, BLK_BOUNCE_ANY); | 438 | blk_queue_bounce_limit(brd->brd_queue, BLK_BOUNCE_ANY); |
439 | 439 | ||
440 | disk = brd->brd_disk = alloc_disk(1 << part_shift); | 440 | disk = brd->brd_disk = alloc_disk(1 << part_shift); |
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index 9291614ac6b7..9e3af307aae1 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c | |||
@@ -257,6 +257,79 @@ static inline void removeQ(CommandList_struct *c) | |||
257 | hlist_del_init(&c->list); | 257 | hlist_del_init(&c->list); |
258 | } | 258 | } |
259 | 259 | ||
260 | static void cciss_free_sg_chain_blocks(SGDescriptor_struct **cmd_sg_list, | ||
261 | int nr_cmds) | ||
262 | { | ||
263 | int i; | ||
264 | |||
265 | if (!cmd_sg_list) | ||
266 | return; | ||
267 | for (i = 0; i < nr_cmds; i++) { | ||
268 | kfree(cmd_sg_list[i]); | ||
269 | cmd_sg_list[i] = NULL; | ||
270 | } | ||
271 | kfree(cmd_sg_list); | ||
272 | } | ||
273 | |||
274 | static SGDescriptor_struct **cciss_allocate_sg_chain_blocks( | ||
275 | ctlr_info_t *h, int chainsize, int nr_cmds) | ||
276 | { | ||
277 | int j; | ||
278 | SGDescriptor_struct **cmd_sg_list; | ||
279 | |||
280 | if (chainsize <= 0) | ||
281 | return NULL; | ||
282 | |||
283 | cmd_sg_list = kmalloc(sizeof(*cmd_sg_list) * nr_cmds, GFP_KERNEL); | ||
284 | if (!cmd_sg_list) | ||
285 | return NULL; | ||
286 | |||
287 | /* Build up chain blocks for each command */ | ||
288 | for (j = 0; j < nr_cmds; j++) { | ||
289 | /* Need a block of chainsized s/g elements. */ | ||
290 | cmd_sg_list[j] = kmalloc((chainsize * | ||
291 | sizeof(*cmd_sg_list[j])), GFP_KERNEL); | ||
292 | if (!cmd_sg_list[j]) { | ||
293 | dev_err(&h->pdev->dev, "Cannot get memory " | ||
294 | "for s/g chains.\n"); | ||
295 | goto clean; | ||
296 | } | ||
297 | } | ||
298 | return cmd_sg_list; | ||
299 | clean: | ||
300 | cciss_free_sg_chain_blocks(cmd_sg_list, nr_cmds); | ||
301 | return NULL; | ||
302 | } | ||
303 | |||
304 | static void cciss_unmap_sg_chain_block(ctlr_info_t *h, CommandList_struct *c) | ||
305 | { | ||
306 | SGDescriptor_struct *chain_sg; | ||
307 | u64bit temp64; | ||
308 | |||
309 | if (c->Header.SGTotal <= h->max_cmd_sgentries) | ||
310 | return; | ||
311 | |||
312 | chain_sg = &c->SG[h->max_cmd_sgentries - 1]; | ||
313 | temp64.val32.lower = chain_sg->Addr.lower; | ||
314 | temp64.val32.upper = chain_sg->Addr.upper; | ||
315 | pci_unmap_single(h->pdev, temp64.val, chain_sg->Len, PCI_DMA_TODEVICE); | ||
316 | } | ||
317 | |||
318 | static void cciss_map_sg_chain_block(ctlr_info_t *h, CommandList_struct *c, | ||
319 | SGDescriptor_struct *chain_block, int len) | ||
320 | { | ||
321 | SGDescriptor_struct *chain_sg; | ||
322 | u64bit temp64; | ||
323 | |||
324 | chain_sg = &c->SG[h->max_cmd_sgentries - 1]; | ||
325 | chain_sg->Ext = CCISS_SG_CHAIN; | ||
326 | chain_sg->Len = len; | ||
327 | temp64.val = pci_map_single(h->pdev, chain_block, len, | ||
328 | PCI_DMA_TODEVICE); | ||
329 | chain_sg->Addr.lower = temp64.val32.lower; | ||
330 | chain_sg->Addr.upper = temp64.val32.upper; | ||
331 | } | ||
332 | |||
260 | #include "cciss_scsi.c" /* For SCSI tape support */ | 333 | #include "cciss_scsi.c" /* For SCSI tape support */ |
261 | 334 | ||
262 | static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG", | 335 | static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG", |
@@ -1344,26 +1417,27 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode, | |||
1344 | kfree(buff); | 1417 | kfree(buff); |
1345 | return -ENOMEM; | 1418 | return -ENOMEM; |
1346 | } | 1419 | } |
1347 | // Fill in the command type | 1420 | /* Fill in the command type */ |
1348 | c->cmd_type = CMD_IOCTL_PEND; | 1421 | c->cmd_type = CMD_IOCTL_PEND; |
1349 | // Fill in Command Header | 1422 | /* Fill in Command Header */ |
1350 | c->Header.ReplyQueue = 0; // unused in simple mode | 1423 | c->Header.ReplyQueue = 0; /* unused in simple mode */ |
1351 | if (iocommand.buf_size > 0) // buffer to fill | 1424 | if (iocommand.buf_size > 0) /* buffer to fill */ |
1352 | { | 1425 | { |
1353 | c->Header.SGList = 1; | 1426 | c->Header.SGList = 1; |
1354 | c->Header.SGTotal = 1; | 1427 | c->Header.SGTotal = 1; |
1355 | } else // no buffers to fill | 1428 | } else /* no buffers to fill */ |
1356 | { | 1429 | { |
1357 | c->Header.SGList = 0; | 1430 | c->Header.SGList = 0; |
1358 | c->Header.SGTotal = 0; | 1431 | c->Header.SGTotal = 0; |
1359 | } | 1432 | } |
1360 | c->Header.LUN = iocommand.LUN_info; | 1433 | c->Header.LUN = iocommand.LUN_info; |
1361 | c->Header.Tag.lower = c->busaddr; // use the kernel address the cmd block for tag | 1434 | /* use the kernel address the cmd block for tag */ |
1435 | c->Header.Tag.lower = c->busaddr; | ||
1362 | 1436 | ||
1363 | // Fill in Request block | 1437 | /* Fill in Request block */ |
1364 | c->Request = iocommand.Request; | 1438 | c->Request = iocommand.Request; |
1365 | 1439 | ||
1366 | // Fill in the scatter gather information | 1440 | /* Fill in the scatter gather information */ |
1367 | if (iocommand.buf_size > 0) { | 1441 | if (iocommand.buf_size > 0) { |
1368 | temp64.val = pci_map_single(host->pdev, buff, | 1442 | temp64.val = pci_map_single(host->pdev, buff, |
1369 | iocommand.buf_size, | 1443 | iocommand.buf_size, |
@@ -1371,7 +1445,7 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode, | |||
1371 | c->SG[0].Addr.lower = temp64.val32.lower; | 1445 | c->SG[0].Addr.lower = temp64.val32.lower; |
1372 | c->SG[0].Addr.upper = temp64.val32.upper; | 1446 | c->SG[0].Addr.upper = temp64.val32.upper; |
1373 | c->SG[0].Len = iocommand.buf_size; | 1447 | c->SG[0].Len = iocommand.buf_size; |
1374 | c->SG[0].Ext = 0; // we are not chaining | 1448 | c->SG[0].Ext = 0; /* we are not chaining */ |
1375 | } | 1449 | } |
1376 | c->waiting = &wait; | 1450 | c->waiting = &wait; |
1377 | 1451 | ||
@@ -1670,14 +1744,9 @@ static void cciss_softirq_done(struct request *rq) | |||
1670 | /* unmap the DMA mapping for all the scatter gather elements */ | 1744 | /* unmap the DMA mapping for all the scatter gather elements */ |
1671 | for (i = 0; i < cmd->Header.SGList; i++) { | 1745 | for (i = 0; i < cmd->Header.SGList; i++) { |
1672 | if (curr_sg[sg_index].Ext == CCISS_SG_CHAIN) { | 1746 | if (curr_sg[sg_index].Ext == CCISS_SG_CHAIN) { |
1673 | temp64.val32.lower = cmd->SG[i].Addr.lower; | 1747 | cciss_unmap_sg_chain_block(h, cmd); |
1674 | temp64.val32.upper = cmd->SG[i].Addr.upper; | ||
1675 | pci_dma_sync_single_for_cpu(h->pdev, temp64.val, | ||
1676 | cmd->SG[i].Len, ddir); | ||
1677 | pci_unmap_single(h->pdev, temp64.val, | ||
1678 | cmd->SG[i].Len, ddir); | ||
1679 | /* Point to the next block */ | 1748 | /* Point to the next block */ |
1680 | curr_sg = h->cmd_sg_list[cmd->cmdindex]->sgchain; | 1749 | curr_sg = h->cmd_sg_list[cmd->cmdindex]; |
1681 | sg_index = 0; | 1750 | sg_index = 0; |
1682 | } | 1751 | } |
1683 | temp64.val32.lower = curr_sg[sg_index].Addr.lower; | 1752 | temp64.val32.lower = curr_sg[sg_index].Addr.lower; |
@@ -1796,12 +1865,9 @@ static int cciss_add_disk(ctlr_info_t *h, struct gendisk *disk, | |||
1796 | blk_queue_bounce_limit(disk->queue, h->pdev->dma_mask); | 1865 | blk_queue_bounce_limit(disk->queue, h->pdev->dma_mask); |
1797 | 1866 | ||
1798 | /* This is a hardware imposed limit. */ | 1867 | /* This is a hardware imposed limit. */ |
1799 | blk_queue_max_hw_segments(disk->queue, h->maxsgentries); | 1868 | blk_queue_max_segments(disk->queue, h->maxsgentries); |
1800 | |||
1801 | /* This is a limit in the driver and could be eliminated. */ | ||
1802 | blk_queue_max_phys_segments(disk->queue, h->maxsgentries); | ||
1803 | 1869 | ||
1804 | blk_queue_max_sectors(disk->queue, h->cciss_max_sectors); | 1870 | blk_queue_max_hw_sectors(disk->queue, h->cciss_max_sectors); |
1805 | 1871 | ||
1806 | blk_queue_softirq_done(disk->queue, cciss_softirq_done); | 1872 | blk_queue_softirq_done(disk->queue, cciss_softirq_done); |
1807 | 1873 | ||
@@ -2425,7 +2491,7 @@ static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff, | |||
2425 | c->Request.Type.Direction = XFER_READ; | 2491 | c->Request.Type.Direction = XFER_READ; |
2426 | c->Request.Timeout = 0; | 2492 | c->Request.Timeout = 0; |
2427 | c->Request.CDB[0] = cmd; | 2493 | c->Request.CDB[0] = cmd; |
2428 | c->Request.CDB[6] = (size >> 24) & 0xFF; //MSB | 2494 | c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */ |
2429 | c->Request.CDB[7] = (size >> 16) & 0xFF; | 2495 | c->Request.CDB[7] = (size >> 16) & 0xFF; |
2430 | c->Request.CDB[8] = (size >> 8) & 0xFF; | 2496 | c->Request.CDB[8] = (size >> 8) & 0xFF; |
2431 | c->Request.CDB[9] = size & 0xFF; | 2497 | c->Request.CDB[9] = size & 0xFF; |
@@ -2694,7 +2760,7 @@ static void cciss_geometry_inquiry(int ctlr, int logvol, | |||
2694 | "cciss: reading geometry failed, volume " | 2760 | "cciss: reading geometry failed, volume " |
2695 | "does not support reading geometry\n"); | 2761 | "does not support reading geometry\n"); |
2696 | drv->heads = 255; | 2762 | drv->heads = 255; |
2697 | drv->sectors = 32; // Sectors per track | 2763 | drv->sectors = 32; /* Sectors per track */ |
2698 | drv->cylinders = total_size + 1; | 2764 | drv->cylinders = total_size + 1; |
2699 | drv->raid_level = RAID_UNKNOWN; | 2765 | drv->raid_level = RAID_UNKNOWN; |
2700 | } else { | 2766 | } else { |
@@ -3082,7 +3148,6 @@ static void do_cciss_request(struct request_queue *q) | |||
3082 | SGDescriptor_struct *curr_sg; | 3148 | SGDescriptor_struct *curr_sg; |
3083 | drive_info_struct *drv; | 3149 | drive_info_struct *drv; |
3084 | int i, dir; | 3150 | int i, dir; |
3085 | int nseg = 0; | ||
3086 | int sg_index = 0; | 3151 | int sg_index = 0; |
3087 | int chained = 0; | 3152 | int chained = 0; |
3088 | 3153 | ||
@@ -3112,19 +3177,19 @@ static void do_cciss_request(struct request_queue *q) | |||
3112 | 3177 | ||
3113 | /* fill in the request */ | 3178 | /* fill in the request */ |
3114 | drv = creq->rq_disk->private_data; | 3179 | drv = creq->rq_disk->private_data; |
3115 | c->Header.ReplyQueue = 0; // unused in simple mode | 3180 | c->Header.ReplyQueue = 0; /* unused in simple mode */ |
3116 | /* got command from pool, so use the command block index instead */ | 3181 | /* got command from pool, so use the command block index instead */ |
3117 | /* for direct lookups. */ | 3182 | /* for direct lookups. */ |
3118 | /* The first 2 bits are reserved for controller error reporting. */ | 3183 | /* The first 2 bits are reserved for controller error reporting. */ |
3119 | c->Header.Tag.lower = (c->cmdindex << 3); | 3184 | c->Header.Tag.lower = (c->cmdindex << 3); |
3120 | c->Header.Tag.lower |= 0x04; /* flag for direct lookup. */ | 3185 | c->Header.Tag.lower |= 0x04; /* flag for direct lookup. */ |
3121 | memcpy(&c->Header.LUN, drv->LunID, sizeof(drv->LunID)); | 3186 | memcpy(&c->Header.LUN, drv->LunID, sizeof(drv->LunID)); |
3122 | c->Request.CDBLen = 10; // 12 byte commands not in FW yet; | 3187 | c->Request.CDBLen = 10; /* 12 byte commands not in FW yet; */ |
3123 | c->Request.Type.Type = TYPE_CMD; // It is a command. | 3188 | c->Request.Type.Type = TYPE_CMD; /* It is a command. */ |
3124 | c->Request.Type.Attribute = ATTR_SIMPLE; | 3189 | c->Request.Type.Attribute = ATTR_SIMPLE; |
3125 | c->Request.Type.Direction = | 3190 | c->Request.Type.Direction = |
3126 | (rq_data_dir(creq) == READ) ? XFER_READ : XFER_WRITE; | 3191 | (rq_data_dir(creq) == READ) ? XFER_READ : XFER_WRITE; |
3127 | c->Request.Timeout = 0; // Don't time out | 3192 | c->Request.Timeout = 0; /* Don't time out */ |
3128 | c->Request.CDB[0] = | 3193 | c->Request.CDB[0] = |
3129 | (rq_data_dir(creq) == READ) ? h->cciss_read : h->cciss_write; | 3194 | (rq_data_dir(creq) == READ) ? h->cciss_read : h->cciss_write; |
3130 | start_blk = blk_rq_pos(creq); | 3195 | start_blk = blk_rq_pos(creq); |
@@ -3149,13 +3214,8 @@ static void do_cciss_request(struct request_queue *q) | |||
3149 | for (i = 0; i < seg; i++) { | 3214 | for (i = 0; i < seg; i++) { |
3150 | if (((sg_index+1) == (h->max_cmd_sgentries)) && | 3215 | if (((sg_index+1) == (h->max_cmd_sgentries)) && |
3151 | !chained && ((seg - i) > 1)) { | 3216 | !chained && ((seg - i) > 1)) { |
3152 | nseg = seg - i; | ||
3153 | curr_sg[sg_index].Len = (nseg) * | ||
3154 | sizeof(SGDescriptor_struct); | ||
3155 | curr_sg[sg_index].Ext = CCISS_SG_CHAIN; | ||
3156 | |||
3157 | /* Point to next chain block. */ | 3217 | /* Point to next chain block. */ |
3158 | curr_sg = h->cmd_sg_list[c->cmdindex]->sgchain; | 3218 | curr_sg = h->cmd_sg_list[c->cmdindex]; |
3159 | sg_index = 0; | 3219 | sg_index = 0; |
3160 | chained = 1; | 3220 | chained = 1; |
3161 | } | 3221 | } |
@@ -3166,31 +3226,12 @@ static void do_cciss_request(struct request_queue *q) | |||
3166 | curr_sg[sg_index].Addr.lower = temp64.val32.lower; | 3226 | curr_sg[sg_index].Addr.lower = temp64.val32.lower; |
3167 | curr_sg[sg_index].Addr.upper = temp64.val32.upper; | 3227 | curr_sg[sg_index].Addr.upper = temp64.val32.upper; |
3168 | curr_sg[sg_index].Ext = 0; /* we are not chaining */ | 3228 | curr_sg[sg_index].Ext = 0; /* we are not chaining */ |
3169 | |||
3170 | ++sg_index; | 3229 | ++sg_index; |
3171 | } | 3230 | } |
3172 | 3231 | if (chained) | |
3173 | if (chained) { | 3232 | cciss_map_sg_chain_block(h, c, h->cmd_sg_list[c->cmdindex], |
3174 | int len; | 3233 | (seg - (h->max_cmd_sgentries - 1)) * |
3175 | curr_sg = c->SG; | 3234 | sizeof(SGDescriptor_struct)); |
3176 | sg_index = h->max_cmd_sgentries - 1; | ||
3177 | len = curr_sg[sg_index].Len; | ||
3178 | /* Setup pointer to next chain block. | ||
3179 | * Fill out last element in current chain | ||
3180 | * block with address of next chain block. | ||
3181 | */ | ||
3182 | temp64.val = pci_map_single(h->pdev, | ||
3183 | h->cmd_sg_list[c->cmdindex]->sgchain, | ||
3184 | len, dir); | ||
3185 | |||
3186 | h->cmd_sg_list[c->cmdindex]->sg_chain_dma = temp64.val; | ||
3187 | curr_sg[sg_index].Addr.lower = temp64.val32.lower; | ||
3188 | curr_sg[sg_index].Addr.upper = temp64.val32.upper; | ||
3189 | |||
3190 | pci_dma_sync_single_for_device(h->pdev, | ||
3191 | h->cmd_sg_list[c->cmdindex]->sg_chain_dma, | ||
3192 | len, dir); | ||
3193 | } | ||
3194 | 3235 | ||
3195 | /* track how many SG entries we are using */ | 3236 | /* track how many SG entries we are using */ |
3196 | if (seg > h->maxSG) | 3237 | if (seg > h->maxSG) |
@@ -3209,11 +3250,11 @@ static void do_cciss_request(struct request_queue *q) | |||
3209 | if (likely(blk_fs_request(creq))) { | 3250 | if (likely(blk_fs_request(creq))) { |
3210 | if(h->cciss_read == CCISS_READ_10) { | 3251 | if(h->cciss_read == CCISS_READ_10) { |
3211 | c->Request.CDB[1] = 0; | 3252 | c->Request.CDB[1] = 0; |
3212 | c->Request.CDB[2] = (start_blk >> 24) & 0xff; //MSB | 3253 | c->Request.CDB[2] = (start_blk >> 24) & 0xff; /* MSB */ |
3213 | c->Request.CDB[3] = (start_blk >> 16) & 0xff; | 3254 | c->Request.CDB[3] = (start_blk >> 16) & 0xff; |
3214 | c->Request.CDB[4] = (start_blk >> 8) & 0xff; | 3255 | c->Request.CDB[4] = (start_blk >> 8) & 0xff; |
3215 | c->Request.CDB[5] = start_blk & 0xff; | 3256 | c->Request.CDB[5] = start_blk & 0xff; |
3216 | c->Request.CDB[6] = 0; // (sect >> 24) & 0xff; MSB | 3257 | c->Request.CDB[6] = 0; /* (sect >> 24) & 0xff; MSB */ |
3217 | c->Request.CDB[7] = (blk_rq_sectors(creq) >> 8) & 0xff; | 3258 | c->Request.CDB[7] = (blk_rq_sectors(creq) >> 8) & 0xff; |
3218 | c->Request.CDB[8] = blk_rq_sectors(creq) & 0xff; | 3259 | c->Request.CDB[8] = blk_rq_sectors(creq) & 0xff; |
3219 | c->Request.CDB[9] = c->Request.CDB[11] = c->Request.CDB[12] = 0; | 3260 | c->Request.CDB[9] = c->Request.CDB[11] = c->Request.CDB[12] = 0; |
@@ -3222,7 +3263,7 @@ static void do_cciss_request(struct request_queue *q) | |||
3222 | 3263 | ||
3223 | c->Request.CDBLen = 16; | 3264 | c->Request.CDBLen = 16; |
3224 | c->Request.CDB[1]= 0; | 3265 | c->Request.CDB[1]= 0; |
3225 | c->Request.CDB[2]= (upper32 >> 24) & 0xff; //MSB | 3266 | c->Request.CDB[2]= (upper32 >> 24) & 0xff; /* MSB */ |
3226 | c->Request.CDB[3]= (upper32 >> 16) & 0xff; | 3267 | c->Request.CDB[3]= (upper32 >> 16) & 0xff; |
3227 | c->Request.CDB[4]= (upper32 >> 8) & 0xff; | 3268 | c->Request.CDB[4]= (upper32 >> 8) & 0xff; |
3228 | c->Request.CDB[5]= upper32 & 0xff; | 3269 | c->Request.CDB[5]= upper32 & 0xff; |
@@ -4240,37 +4281,10 @@ static int __devinit cciss_init_one(struct pci_dev *pdev, | |||
4240 | goto clean4; | 4281 | goto clean4; |
4241 | } | 4282 | } |
4242 | } | 4283 | } |
4243 | hba[i]->cmd_sg_list = kmalloc(sizeof(struct Cmd_sg_list *) * | 4284 | hba[i]->cmd_sg_list = cciss_allocate_sg_chain_blocks(hba[i], |
4244 | hba[i]->nr_cmds, | 4285 | hba[i]->chainsize, hba[i]->nr_cmds); |
4245 | GFP_KERNEL); | 4286 | if (!hba[i]->cmd_sg_list && hba[i]->chainsize > 0) |
4246 | if (!hba[i]->cmd_sg_list) { | ||
4247 | printk(KERN_ERR "cciss%d: Cannot get memory for " | ||
4248 | "s/g chaining.\n", i); | ||
4249 | goto clean4; | 4287 | goto clean4; |
4250 | } | ||
4251 | /* Build up chain blocks for each command */ | ||
4252 | if (hba[i]->chainsize > 0) { | ||
4253 | for (j = 0; j < hba[i]->nr_cmds; j++) { | ||
4254 | hba[i]->cmd_sg_list[j] = | ||
4255 | kmalloc(sizeof(struct Cmd_sg_list), | ||
4256 | GFP_KERNEL); | ||
4257 | if (!hba[i]->cmd_sg_list[j]) { | ||
4258 | printk(KERN_ERR "cciss%d: Cannot get memory " | ||
4259 | "for chain block.\n", i); | ||
4260 | goto clean4; | ||
4261 | } | ||
4262 | /* Need a block of chainsized s/g elements. */ | ||
4263 | hba[i]->cmd_sg_list[j]->sgchain = | ||
4264 | kmalloc((hba[i]->chainsize * | ||
4265 | sizeof(SGDescriptor_struct)), | ||
4266 | GFP_KERNEL); | ||
4267 | if (!hba[i]->cmd_sg_list[j]->sgchain) { | ||
4268 | printk(KERN_ERR "cciss%d: Cannot get memory " | ||
4269 | "for s/g chains\n", i); | ||
4270 | goto clean4; | ||
4271 | } | ||
4272 | } | ||
4273 | } | ||
4274 | 4288 | ||
4275 | spin_lock_init(&hba[i]->lock); | 4289 | spin_lock_init(&hba[i]->lock); |
4276 | 4290 | ||
@@ -4329,16 +4343,7 @@ clean4: | |||
4329 | for (k = 0; k < hba[i]->nr_cmds; k++) | 4343 | for (k = 0; k < hba[i]->nr_cmds; k++) |
4330 | kfree(hba[i]->scatter_list[k]); | 4344 | kfree(hba[i]->scatter_list[k]); |
4331 | kfree(hba[i]->scatter_list); | 4345 | kfree(hba[i]->scatter_list); |
4332 | /* Only free up extra s/g lists if controller supports them */ | 4346 | cciss_free_sg_chain_blocks(hba[i]->cmd_sg_list, hba[i]->nr_cmds); |
4333 | if (hba[i]->chainsize > 0) { | ||
4334 | for (j = 0; j < hba[i]->nr_cmds; j++) { | ||
4335 | if (hba[i]->cmd_sg_list[j]) { | ||
4336 | kfree(hba[i]->cmd_sg_list[j]->sgchain); | ||
4337 | kfree(hba[i]->cmd_sg_list[j]); | ||
4338 | } | ||
4339 | } | ||
4340 | kfree(hba[i]->cmd_sg_list); | ||
4341 | } | ||
4342 | if (hba[i]->cmd_pool) | 4347 | if (hba[i]->cmd_pool) |
4343 | pci_free_consistent(hba[i]->pdev, | 4348 | pci_free_consistent(hba[i]->pdev, |
4344 | hba[i]->nr_cmds * sizeof(CommandList_struct), | 4349 | hba[i]->nr_cmds * sizeof(CommandList_struct), |
@@ -4456,16 +4461,7 @@ static void __devexit cciss_remove_one(struct pci_dev *pdev) | |||
4456 | for (j = 0; j < hba[i]->nr_cmds; j++) | 4461 | for (j = 0; j < hba[i]->nr_cmds; j++) |
4457 | kfree(hba[i]->scatter_list[j]); | 4462 | kfree(hba[i]->scatter_list[j]); |
4458 | kfree(hba[i]->scatter_list); | 4463 | kfree(hba[i]->scatter_list); |
4459 | /* Only free up extra s/g lists if controller supports them */ | 4464 | cciss_free_sg_chain_blocks(hba[i]->cmd_sg_list, hba[i]->nr_cmds); |
4460 | if (hba[i]->chainsize > 0) { | ||
4461 | for (j = 0; j < hba[i]->nr_cmds; j++) { | ||
4462 | if (hba[i]->cmd_sg_list[j]) { | ||
4463 | kfree(hba[i]->cmd_sg_list[j]->sgchain); | ||
4464 | kfree(hba[i]->cmd_sg_list[j]); | ||
4465 | } | ||
4466 | } | ||
4467 | kfree(hba[i]->cmd_sg_list); | ||
4468 | } | ||
4469 | /* | 4465 | /* |
4470 | * Deliberately omit pci_disable_device(): it does something nasty to | 4466 | * Deliberately omit pci_disable_device(): it does something nasty to |
4471 | * Smart Array controllers that pci_enable_device does not undo | 4467 | * Smart Array controllers that pci_enable_device does not undo |
@@ -4498,7 +4494,7 @@ static int __init cciss_init(void) | |||
4498 | * boundary. Given that we use pci_alloc_consistent() to allocate an | 4494 | * boundary. Given that we use pci_alloc_consistent() to allocate an |
4499 | * array of them, the size must be a multiple of 8 bytes. | 4495 | * array of them, the size must be a multiple of 8 bytes. |
4500 | */ | 4496 | */ |
4501 | BUILD_BUG_ON(sizeof(CommandList_struct) % 8); | 4497 | BUILD_BUG_ON(sizeof(CommandList_struct) % COMMANDLIST_ALIGNMENT); |
4502 | 4498 | ||
4503 | printk(KERN_INFO DRIVER_NAME "\n"); | 4499 | printk(KERN_INFO DRIVER_NAME "\n"); |
4504 | 4500 | ||
diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h index 1d95db254069..c5d411174db0 100644 --- a/drivers/block/cciss.h +++ b/drivers/block/cciss.h | |||
@@ -55,18 +55,12 @@ typedef struct _drive_info_struct | |||
55 | char device_initialized; /* indicates whether dev is initialized */ | 55 | char device_initialized; /* indicates whether dev is initialized */ |
56 | } drive_info_struct; | 56 | } drive_info_struct; |
57 | 57 | ||
58 | struct Cmd_sg_list { | ||
59 | SGDescriptor_struct *sgchain; | ||
60 | dma_addr_t sg_chain_dma; | ||
61 | int chain_block_size; | ||
62 | }; | ||
63 | |||
64 | struct ctlr_info | 58 | struct ctlr_info |
65 | { | 59 | { |
66 | int ctlr; | 60 | int ctlr; |
67 | char devname[8]; | 61 | char devname[8]; |
68 | char *product_name; | 62 | char *product_name; |
69 | char firm_ver[4]; // Firmware version | 63 | char firm_ver[4]; /* Firmware version */ |
70 | struct pci_dev *pdev; | 64 | struct pci_dev *pdev; |
71 | __u32 board_id; | 65 | __u32 board_id; |
72 | void __iomem *vaddr; | 66 | void __iomem *vaddr; |
@@ -89,7 +83,7 @@ struct ctlr_info | |||
89 | int maxsgentries; | 83 | int maxsgentries; |
90 | int chainsize; | 84 | int chainsize; |
91 | int max_cmd_sgentries; | 85 | int max_cmd_sgentries; |
92 | struct Cmd_sg_list **cmd_sg_list; | 86 | SGDescriptor_struct **cmd_sg_list; |
93 | 87 | ||
94 | # define DOORBELL_INT 0 | 88 | # define DOORBELL_INT 0 |
95 | # define PERF_MODE_INT 1 | 89 | # define PERF_MODE_INT 1 |
@@ -103,7 +97,7 @@ struct ctlr_info | |||
103 | BYTE cciss_write; | 97 | BYTE cciss_write; |
104 | BYTE cciss_read_capacity; | 98 | BYTE cciss_read_capacity; |
105 | 99 | ||
106 | // information about each logical volume | 100 | /* information about each logical volume */ |
107 | drive_info_struct *drv[CISS_MAX_LUN]; | 101 | drive_info_struct *drv[CISS_MAX_LUN]; |
108 | 102 | ||
109 | struct access_method access; | 103 | struct access_method access; |
@@ -116,7 +110,7 @@ struct ctlr_info | |||
116 | unsigned int maxSG; | 110 | unsigned int maxSG; |
117 | spinlock_t lock; | 111 | spinlock_t lock; |
118 | 112 | ||
119 | //* pointers to command and error info pool */ | 113 | /* pointers to command and error info pool */ |
120 | CommandList_struct *cmd_pool; | 114 | CommandList_struct *cmd_pool; |
121 | dma_addr_t cmd_pool_dhandle; | 115 | dma_addr_t cmd_pool_dhandle; |
122 | ErrorInfo_struct *errinfo_pool; | 116 | ErrorInfo_struct *errinfo_pool; |
@@ -134,12 +128,10 @@ struct ctlr_info | |||
134 | */ | 128 | */ |
135 | int next_to_run; | 129 | int next_to_run; |
136 | 130 | ||
137 | // Disk structures we need to pass back | 131 | /* Disk structures we need to pass back */ |
138 | struct gendisk *gendisk[CISS_MAX_LUN]; | 132 | struct gendisk *gendisk[CISS_MAX_LUN]; |
139 | #ifdef CONFIG_CISS_SCSI_TAPE | 133 | #ifdef CONFIG_CISS_SCSI_TAPE |
140 | void *scsi_ctlr; /* ptr to structure containing scsi related stuff */ | 134 | struct cciss_scsi_adapter_data_t *scsi_ctlr; |
141 | /* list of block side commands the scsi error handling sucked up */ | ||
142 | /* and saved for later processing */ | ||
143 | #endif | 135 | #endif |
144 | unsigned char alive; | 136 | unsigned char alive; |
145 | struct list_head scan_list; | 137 | struct list_head scan_list; |
@@ -315,4 +307,3 @@ struct board_type { | |||
315 | #define CCISS_LOCK(i) (&hba[i]->lock) | 307 | #define CCISS_LOCK(i) (&hba[i]->lock) |
316 | 308 | ||
317 | #endif /* CCISS_H */ | 309 | #endif /* CCISS_H */ |
318 | |||
diff --git a/drivers/block/cciss_cmd.h b/drivers/block/cciss_cmd.h index 6afa700890ff..e624ff959cb6 100644 --- a/drivers/block/cciss_cmd.h +++ b/drivers/block/cciss_cmd.h | |||
@@ -1,31 +1,16 @@ | |||
1 | #ifndef CCISS_CMD_H | 1 | #ifndef CCISS_CMD_H |
2 | #define CCISS_CMD_H | 2 | #define CCISS_CMD_H |
3 | //########################################################################### | 3 | |
4 | //DEFINES | 4 | #include <linux/cciss_defs.h> |
5 | //########################################################################### | 5 | |
6 | /* DEFINES */ | ||
6 | #define CISS_VERSION "1.00" | 7 | #define CISS_VERSION "1.00" |
7 | 8 | ||
8 | //general boundary definitions | 9 | /* general boundary definitions */ |
9 | #define SENSEINFOBYTES 32//note that this value may vary between host implementations | ||
10 | #define MAXSGENTRIES 32 | 10 | #define MAXSGENTRIES 32 |
11 | #define CCISS_SG_CHAIN 0x80000000 | 11 | #define CCISS_SG_CHAIN 0x80000000 |
12 | #define MAXREPLYQS 256 | 12 | #define MAXREPLYQS 256 |
13 | 13 | ||
14 | //Command Status value | ||
15 | #define CMD_SUCCESS 0x0000 | ||
16 | #define CMD_TARGET_STATUS 0x0001 | ||
17 | #define CMD_DATA_UNDERRUN 0x0002 | ||
18 | #define CMD_DATA_OVERRUN 0x0003 | ||
19 | #define CMD_INVALID 0x0004 | ||
20 | #define CMD_PROTOCOL_ERR 0x0005 | ||
21 | #define CMD_HARDWARE_ERR 0x0006 | ||
22 | #define CMD_CONNECTION_LOST 0x0007 | ||
23 | #define CMD_ABORTED 0x0008 | ||
24 | #define CMD_ABORT_FAILED 0x0009 | ||
25 | #define CMD_UNSOLICITED_ABORT 0x000A | ||
26 | #define CMD_TIMEOUT 0x000B | ||
27 | #define CMD_UNABORTABLE 0x000C | ||
28 | |||
29 | /* Unit Attentions ASC's as defined for the MSA2012sa */ | 14 | /* Unit Attentions ASC's as defined for the MSA2012sa */ |
30 | #define POWER_OR_RESET 0x29 | 15 | #define POWER_OR_RESET 0x29 |
31 | #define STATE_CHANGED 0x2a | 16 | #define STATE_CHANGED 0x2a |
@@ -49,30 +34,13 @@ | |||
49 | #define ASYM_ACCESS_CHANGED 0x06 | 34 | #define ASYM_ACCESS_CHANGED 0x06 |
50 | #define LUN_CAPACITY_CHANGED 0x09 | 35 | #define LUN_CAPACITY_CHANGED 0x09 |
51 | 36 | ||
52 | //transfer direction | 37 | /* config space register offsets */ |
53 | #define XFER_NONE 0x00 | ||
54 | #define XFER_WRITE 0x01 | ||
55 | #define XFER_READ 0x02 | ||
56 | #define XFER_RSVD 0x03 | ||
57 | |||
58 | //task attribute | ||
59 | #define ATTR_UNTAGGED 0x00 | ||
60 | #define ATTR_SIMPLE 0x04 | ||
61 | #define ATTR_HEADOFQUEUE 0x05 | ||
62 | #define ATTR_ORDERED 0x06 | ||
63 | #define ATTR_ACA 0x07 | ||
64 | |||
65 | //cdb type | ||
66 | #define TYPE_CMD 0x00 | ||
67 | #define TYPE_MSG 0x01 | ||
68 | |||
69 | //config space register offsets | ||
70 | #define CFG_VENDORID 0x00 | 38 | #define CFG_VENDORID 0x00 |
71 | #define CFG_DEVICEID 0x02 | 39 | #define CFG_DEVICEID 0x02 |
72 | #define CFG_I2OBAR 0x10 | 40 | #define CFG_I2OBAR 0x10 |
73 | #define CFG_MEM1BAR 0x14 | 41 | #define CFG_MEM1BAR 0x14 |
74 | 42 | ||
75 | //i2o space register offsets | 43 | /* i2o space register offsets */ |
76 | #define I2O_IBDB_SET 0x20 | 44 | #define I2O_IBDB_SET 0x20 |
77 | #define I2O_IBDB_CLEAR 0x70 | 45 | #define I2O_IBDB_CLEAR 0x70 |
78 | #define I2O_INT_STATUS 0x30 | 46 | #define I2O_INT_STATUS 0x30 |
@@ -81,7 +49,7 @@ | |||
81 | #define I2O_OBPOST_Q 0x44 | 49 | #define I2O_OBPOST_Q 0x44 |
82 | #define I2O_DMA1_CFG 0x214 | 50 | #define I2O_DMA1_CFG 0x214 |
83 | 51 | ||
84 | //Configuration Table | 52 | /* Configuration Table */ |
85 | #define CFGTBL_ChangeReq 0x00000001l | 53 | #define CFGTBL_ChangeReq 0x00000001l |
86 | #define CFGTBL_AccCmds 0x00000001l | 54 | #define CFGTBL_AccCmds 0x00000001l |
87 | 55 | ||
@@ -103,24 +71,17 @@ typedef union _u64bit | |||
103 | __u64 val; | 71 | __u64 val; |
104 | } u64bit; | 72 | } u64bit; |
105 | 73 | ||
106 | // Type defs used in the following structs | 74 | /* Type defs used in the following structs */ |
107 | #define BYTE __u8 | ||
108 | #define WORD __u16 | ||
109 | #define HWORD __u16 | ||
110 | #define DWORD __u32 | ||
111 | #define QWORD vals32 | 75 | #define QWORD vals32 |
112 | 76 | ||
113 | //########################################################################### | 77 | /* STRUCTURES */ |
114 | //STRUCTURES | ||
115 | //########################################################################### | ||
116 | #define CISS_MAX_LUN 1024 | ||
117 | #define CISS_MAX_PHYS_LUN 1024 | 78 | #define CISS_MAX_PHYS_LUN 1024 |
118 | // SCSI-3 Cmmands | 79 | /* SCSI-3 Cmmands */ |
119 | 80 | ||
120 | #pragma pack(1) | 81 | #pragma pack(1) |
121 | 82 | ||
122 | #define CISS_INQUIRY 0x12 | 83 | #define CISS_INQUIRY 0x12 |
123 | //Date returned | 84 | /* Date returned */ |
124 | typedef struct _InquiryData_struct | 85 | typedef struct _InquiryData_struct |
125 | { | 86 | { |
126 | BYTE data_byte[36]; | 87 | BYTE data_byte[36]; |
@@ -128,7 +89,7 @@ typedef struct _InquiryData_struct | |||
128 | 89 | ||
129 | #define CISS_REPORT_LOG 0xc2 /* Report Logical LUNs */ | 90 | #define CISS_REPORT_LOG 0xc2 /* Report Logical LUNs */ |
130 | #define CISS_REPORT_PHYS 0xc3 /* Report Physical LUNs */ | 91 | #define CISS_REPORT_PHYS 0xc3 /* Report Physical LUNs */ |
131 | // Data returned | 92 | /* Data returned */ |
132 | typedef struct _ReportLUNdata_struct | 93 | typedef struct _ReportLUNdata_struct |
133 | { | 94 | { |
134 | BYTE LUNListLength[4]; | 95 | BYTE LUNListLength[4]; |
@@ -139,8 +100,8 @@ typedef struct _ReportLUNdata_struct | |||
139 | #define CCISS_READ_CAPACITY 0x25 /* Read Capacity */ | 100 | #define CCISS_READ_CAPACITY 0x25 /* Read Capacity */ |
140 | typedef struct _ReadCapdata_struct | 101 | typedef struct _ReadCapdata_struct |
141 | { | 102 | { |
142 | BYTE total_size[4]; // Total size in blocks | 103 | BYTE total_size[4]; /* Total size in blocks */ |
143 | BYTE block_size[4]; // Size of blocks in bytes | 104 | BYTE block_size[4]; /* Size of blocks in bytes */ |
144 | } ReadCapdata_struct; | 105 | } ReadCapdata_struct; |
145 | 106 | ||
146 | #define CCISS_READ_CAPACITY_16 0x9e /* Read Capacity 16 */ | 107 | #define CCISS_READ_CAPACITY_16 0x9e /* Read Capacity 16 */ |
@@ -172,52 +133,13 @@ typedef struct _ReadCapdata_struct_16 | |||
172 | #define CDB_LEN10 10 | 133 | #define CDB_LEN10 10 |
173 | #define CDB_LEN16 16 | 134 | #define CDB_LEN16 16 |
174 | 135 | ||
175 | // BMIC commands | 136 | /* BMIC commands */ |
176 | #define BMIC_READ 0x26 | 137 | #define BMIC_READ 0x26 |
177 | #define BMIC_WRITE 0x27 | 138 | #define BMIC_WRITE 0x27 |
178 | #define BMIC_CACHE_FLUSH 0xc2 | 139 | #define BMIC_CACHE_FLUSH 0xc2 |
179 | #define CCISS_CACHE_FLUSH 0x01 //C2 was already being used by CCISS | 140 | #define CCISS_CACHE_FLUSH 0x01 /* C2 was already being used by CCISS */ |
180 | |||
181 | //Command List Structure | ||
182 | typedef union _SCSI3Addr_struct { | ||
183 | struct { | ||
184 | BYTE Dev; | ||
185 | BYTE Bus:6; | ||
186 | BYTE Mode:2; // b00 | ||
187 | } PeripDev; | ||
188 | struct { | ||
189 | BYTE DevLSB; | ||
190 | BYTE DevMSB:6; | ||
191 | BYTE Mode:2; // b01 | ||
192 | } LogDev; | ||
193 | struct { | ||
194 | BYTE Dev:5; | ||
195 | BYTE Bus:3; | ||
196 | BYTE Targ:6; | ||
197 | BYTE Mode:2; // b10 | ||
198 | } LogUnit; | ||
199 | } SCSI3Addr_struct; | ||
200 | |||
201 | typedef struct _PhysDevAddr_struct { | ||
202 | DWORD TargetId:24; | ||
203 | DWORD Bus:6; | ||
204 | DWORD Mode:2; | ||
205 | SCSI3Addr_struct Target[2]; //2 level target device addr | ||
206 | } PhysDevAddr_struct; | ||
207 | |||
208 | typedef struct _LogDevAddr_struct { | ||
209 | DWORD VolId:30; | ||
210 | DWORD Mode:2; | ||
211 | BYTE reserved[4]; | ||
212 | } LogDevAddr_struct; | ||
213 | |||
214 | typedef union _LUNAddr_struct { | ||
215 | BYTE LunAddrBytes[8]; | ||
216 | SCSI3Addr_struct SCSI3Lun[4]; | ||
217 | PhysDevAddr_struct PhysDev; | ||
218 | LogDevAddr_struct LogDev; | ||
219 | } LUNAddr_struct; | ||
220 | 141 | ||
142 | /* Command List Structure */ | ||
221 | #define CTLR_LUNID "\0\0\0\0\0\0\0\0" | 143 | #define CTLR_LUNID "\0\0\0\0\0\0\0\0" |
222 | 144 | ||
223 | typedef struct _CommandListHeader_struct { | 145 | typedef struct _CommandListHeader_struct { |
@@ -227,16 +149,6 @@ typedef struct _CommandListHeader_struct { | |||
227 | QWORD Tag; | 149 | QWORD Tag; |
228 | LUNAddr_struct LUN; | 150 | LUNAddr_struct LUN; |
229 | } CommandListHeader_struct; | 151 | } CommandListHeader_struct; |
230 | typedef struct _RequestBlock_struct { | ||
231 | BYTE CDBLen; | ||
232 | struct { | ||
233 | BYTE Type:3; | ||
234 | BYTE Attribute:3; | ||
235 | BYTE Direction:2; | ||
236 | } Type; | ||
237 | HWORD Timeout; | ||
238 | BYTE CDB[16]; | ||
239 | } RequestBlock_struct; | ||
240 | typedef struct _ErrDescriptor_struct { | 152 | typedef struct _ErrDescriptor_struct { |
241 | QWORD Addr; | 153 | QWORD Addr; |
242 | DWORD Len; | 154 | DWORD Len; |
@@ -247,28 +159,6 @@ typedef struct _SGDescriptor_struct { | |||
247 | DWORD Ext; | 159 | DWORD Ext; |
248 | } SGDescriptor_struct; | 160 | } SGDescriptor_struct; |
249 | 161 | ||
250 | typedef union _MoreErrInfo_struct{ | ||
251 | struct { | ||
252 | BYTE Reserved[3]; | ||
253 | BYTE Type; | ||
254 | DWORD ErrorInfo; | ||
255 | }Common_Info; | ||
256 | struct{ | ||
257 | BYTE Reserved[2]; | ||
258 | BYTE offense_size;//size of offending entry | ||
259 | BYTE offense_num; //byte # of offense 0-base | ||
260 | DWORD offense_value; | ||
261 | }Invalid_Cmd; | ||
262 | }MoreErrInfo_struct; | ||
263 | typedef struct _ErrorInfo_struct { | ||
264 | BYTE ScsiStatus; | ||
265 | BYTE SenseLen; | ||
266 | HWORD CommandStatus; | ||
267 | DWORD ResidualCnt; | ||
268 | MoreErrInfo_struct MoreErrInfo; | ||
269 | BYTE SenseInfo[SENSEINFOBYTES]; | ||
270 | } ErrorInfo_struct; | ||
271 | |||
272 | /* Command types */ | 162 | /* Command types */ |
273 | #define CMD_RWREQ 0x00 | 163 | #define CMD_RWREQ 0x00 |
274 | #define CMD_IOCTL_PEND 0x01 | 164 | #define CMD_IOCTL_PEND 0x01 |
@@ -277,10 +167,18 @@ typedef struct _ErrorInfo_struct { | |||
277 | #define CMD_MSG_TIMEOUT 0x05 | 167 | #define CMD_MSG_TIMEOUT 0x05 |
278 | #define CMD_MSG_STALE 0xff | 168 | #define CMD_MSG_STALE 0xff |
279 | 169 | ||
280 | /* This structure needs to be divisible by 8 for new | 170 | /* This structure needs to be divisible by COMMANDLIST_ALIGNMENT |
281 | * indexing method. | 171 | * because low bits of the address are used to to indicate that |
172 | * whether the tag contains an index or an address. PAD_32 and | ||
173 | * PAD_64 can be adjusted independently as needed for 32-bit | ||
174 | * and 64-bits systems. | ||
282 | */ | 175 | */ |
283 | #define PADSIZE (sizeof(long) - 4) | 176 | #define COMMANDLIST_ALIGNMENT (8) |
177 | #define IS_64_BIT ((sizeof(long) - 4)/4) | ||
178 | #define IS_32_BIT (!IS_64_BIT) | ||
179 | #define PAD_32 (0) | ||
180 | #define PAD_64 (4) | ||
181 | #define PADSIZE (IS_32_BIT * PAD_32 + IS_64_BIT * PAD_64) | ||
284 | typedef struct _CommandList_struct { | 182 | typedef struct _CommandList_struct { |
285 | CommandListHeader_struct Header; | 183 | CommandListHeader_struct Header; |
286 | RequestBlock_struct Request; | 184 | RequestBlock_struct Request; |
@@ -300,7 +198,7 @@ typedef struct _CommandList_struct { | |||
300 | char pad[PADSIZE]; | 198 | char pad[PADSIZE]; |
301 | } CommandList_struct; | 199 | } CommandList_struct; |
302 | 200 | ||
303 | //Configuration Table Structure | 201 | /* Configuration Table Structure */ |
304 | typedef struct _HostWrite_struct { | 202 | typedef struct _HostWrite_struct { |
305 | DWORD TransportRequest; | 203 | DWORD TransportRequest; |
306 | DWORD Reserved; | 204 | DWORD Reserved; |
@@ -326,4 +224,4 @@ typedef struct _CfgTable_struct { | |||
326 | DWORD MaxPhysicalDrivesPerLogicalUnit; | 224 | DWORD MaxPhysicalDrivesPerLogicalUnit; |
327 | } CfgTable_struct; | 225 | } CfgTable_struct; |
328 | #pragma pack() | 226 | #pragma pack() |
329 | #endif // CCISS_CMD_H | 227 | #endif /* CCISS_CMD_H */ |
diff --git a/drivers/block/cciss_scsi.c b/drivers/block/cciss_scsi.c index 5d0e46dc3632..e1d0e2cfec72 100644 --- a/drivers/block/cciss_scsi.c +++ b/drivers/block/cciss_scsi.c | |||
@@ -84,7 +84,6 @@ static struct scsi_host_template cciss_driver_template = { | |||
84 | .queuecommand = cciss_scsi_queue_command, | 84 | .queuecommand = cciss_scsi_queue_command, |
85 | .can_queue = SCSI_CCISS_CAN_QUEUE, | 85 | .can_queue = SCSI_CCISS_CAN_QUEUE, |
86 | .this_id = 7, | 86 | .this_id = 7, |
87 | .sg_tablesize = MAXSGENTRIES, | ||
88 | .cmd_per_lun = 1, | 87 | .cmd_per_lun = 1, |
89 | .use_clustering = DISABLE_CLUSTERING, | 88 | .use_clustering = DISABLE_CLUSTERING, |
90 | /* Can't have eh_bus_reset_handler or eh_host_reset_handler for cciss */ | 89 | /* Can't have eh_bus_reset_handler or eh_host_reset_handler for cciss */ |
@@ -93,11 +92,16 @@ static struct scsi_host_template cciss_driver_template = { | |||
93 | }; | 92 | }; |
94 | 93 | ||
95 | #pragma pack(1) | 94 | #pragma pack(1) |
95 | |||
96 | #define SCSI_PAD_32 0 | ||
97 | #define SCSI_PAD_64 0 | ||
98 | |||
96 | struct cciss_scsi_cmd_stack_elem_t { | 99 | struct cciss_scsi_cmd_stack_elem_t { |
97 | CommandList_struct cmd; | 100 | CommandList_struct cmd; |
98 | ErrorInfo_struct Err; | 101 | ErrorInfo_struct Err; |
99 | __u32 busaddr; | 102 | __u32 busaddr; |
100 | __u32 pad; | 103 | int cmdindex; |
104 | u8 pad[IS_32_BIT * SCSI_PAD_32 + IS_64_BIT * SCSI_PAD_64]; | ||
101 | }; | 105 | }; |
102 | 106 | ||
103 | #pragma pack() | 107 | #pragma pack() |
@@ -118,16 +122,15 @@ struct cciss_scsi_cmd_stack_t { | |||
118 | struct cciss_scsi_adapter_data_t { | 122 | struct cciss_scsi_adapter_data_t { |
119 | struct Scsi_Host *scsi_host; | 123 | struct Scsi_Host *scsi_host; |
120 | struct cciss_scsi_cmd_stack_t cmd_stack; | 124 | struct cciss_scsi_cmd_stack_t cmd_stack; |
125 | SGDescriptor_struct **cmd_sg_list; | ||
121 | int registered; | 126 | int registered; |
122 | spinlock_t lock; // to protect ccissscsi[ctlr]; | 127 | spinlock_t lock; // to protect ccissscsi[ctlr]; |
123 | }; | 128 | }; |
124 | 129 | ||
125 | #define CPQ_TAPE_LOCK(ctlr, flags) spin_lock_irqsave( \ | 130 | #define CPQ_TAPE_LOCK(ctlr, flags) spin_lock_irqsave( \ |
126 | &(((struct cciss_scsi_adapter_data_t *) \ | 131 | &hba[ctlr]->scsi_ctlr->lock, flags); |
127 | hba[ctlr]->scsi_ctlr)->lock), flags); | ||
128 | #define CPQ_TAPE_UNLOCK(ctlr, flags) spin_unlock_irqrestore( \ | 132 | #define CPQ_TAPE_UNLOCK(ctlr, flags) spin_unlock_irqrestore( \ |
129 | &(((struct cciss_scsi_adapter_data_t *) \ | 133 | &hba[ctlr]->scsi_ctlr->lock, flags); |
130 | hba[ctlr]->scsi_ctlr)->lock), flags); | ||
131 | 134 | ||
132 | static CommandList_struct * | 135 | static CommandList_struct * |
133 | scsi_cmd_alloc(ctlr_info_t *h) | 136 | scsi_cmd_alloc(ctlr_info_t *h) |
@@ -143,7 +146,7 @@ scsi_cmd_alloc(ctlr_info_t *h) | |||
143 | struct cciss_scsi_cmd_stack_t *stk; | 146 | struct cciss_scsi_cmd_stack_t *stk; |
144 | u64bit temp64; | 147 | u64bit temp64; |
145 | 148 | ||
146 | sa = (struct cciss_scsi_adapter_data_t *) h->scsi_ctlr; | 149 | sa = h->scsi_ctlr; |
147 | stk = &sa->cmd_stack; | 150 | stk = &sa->cmd_stack; |
148 | 151 | ||
149 | if (stk->top < 0) | 152 | if (stk->top < 0) |
@@ -154,6 +157,7 @@ scsi_cmd_alloc(ctlr_info_t *h) | |||
154 | memset(&c->Err, 0, sizeof(c->Err)); | 157 | memset(&c->Err, 0, sizeof(c->Err)); |
155 | /* set physical addr of cmd and addr of scsi parameters */ | 158 | /* set physical addr of cmd and addr of scsi parameters */ |
156 | c->cmd.busaddr = c->busaddr; | 159 | c->cmd.busaddr = c->busaddr; |
160 | c->cmd.cmdindex = c->cmdindex; | ||
157 | /* (__u32) (stk->cmd_pool_handle + | 161 | /* (__u32) (stk->cmd_pool_handle + |
158 | (sizeof(struct cciss_scsi_cmd_stack_elem_t)*stk->top)); */ | 162 | (sizeof(struct cciss_scsi_cmd_stack_elem_t)*stk->top)); */ |
159 | 163 | ||
@@ -182,7 +186,7 @@ scsi_cmd_free(ctlr_info_t *h, CommandList_struct *cmd) | |||
182 | struct cciss_scsi_adapter_data_t *sa; | 186 | struct cciss_scsi_adapter_data_t *sa; |
183 | struct cciss_scsi_cmd_stack_t *stk; | 187 | struct cciss_scsi_cmd_stack_t *stk; |
184 | 188 | ||
185 | sa = (struct cciss_scsi_adapter_data_t *) h->scsi_ctlr; | 189 | sa = h->scsi_ctlr; |
186 | stk = &sa->cmd_stack; | 190 | stk = &sa->cmd_stack; |
187 | if (stk->top >= CMD_STACK_SIZE) { | 191 | if (stk->top >= CMD_STACK_SIZE) { |
188 | printk("cciss: scsi_cmd_free called too many times.\n"); | 192 | printk("cciss: scsi_cmd_free called too many times.\n"); |
@@ -199,24 +203,31 @@ scsi_cmd_stack_setup(int ctlr, struct cciss_scsi_adapter_data_t *sa) | |||
199 | struct cciss_scsi_cmd_stack_t *stk; | 203 | struct cciss_scsi_cmd_stack_t *stk; |
200 | size_t size; | 204 | size_t size; |
201 | 205 | ||
206 | sa->cmd_sg_list = cciss_allocate_sg_chain_blocks(hba[ctlr], | ||
207 | hba[ctlr]->chainsize, CMD_STACK_SIZE); | ||
208 | if (!sa->cmd_sg_list && hba[ctlr]->chainsize > 0) | ||
209 | return -ENOMEM; | ||
210 | |||
202 | stk = &sa->cmd_stack; | 211 | stk = &sa->cmd_stack; |
203 | size = sizeof(struct cciss_scsi_cmd_stack_elem_t) * CMD_STACK_SIZE; | 212 | size = sizeof(struct cciss_scsi_cmd_stack_elem_t) * CMD_STACK_SIZE; |
204 | 213 | ||
205 | // pci_alloc_consistent guarantees 32-bit DMA address will | 214 | /* Check alignment, see cciss_cmd.h near CommandList_struct def. */ |
206 | // be used | 215 | BUILD_BUG_ON((sizeof(*stk->pool) % COMMANDLIST_ALIGNMENT) != 0); |
207 | 216 | /* pci_alloc_consistent guarantees 32-bit DMA address will be used */ | |
208 | stk->pool = (struct cciss_scsi_cmd_stack_elem_t *) | 217 | stk->pool = (struct cciss_scsi_cmd_stack_elem_t *) |
209 | pci_alloc_consistent(hba[ctlr]->pdev, size, &stk->cmd_pool_handle); | 218 | pci_alloc_consistent(hba[ctlr]->pdev, size, &stk->cmd_pool_handle); |
210 | 219 | ||
211 | if (stk->pool == NULL) { | 220 | if (stk->pool == NULL) { |
212 | printk("stk->pool is null\n"); | 221 | cciss_free_sg_chain_blocks(sa->cmd_sg_list, CMD_STACK_SIZE); |
213 | return -1; | 222 | sa->cmd_sg_list = NULL; |
223 | return -ENOMEM; | ||
214 | } | 224 | } |
215 | 225 | ||
216 | for (i=0; i<CMD_STACK_SIZE; i++) { | 226 | for (i=0; i<CMD_STACK_SIZE; i++) { |
217 | stk->elem[i] = &stk->pool[i]; | 227 | stk->elem[i] = &stk->pool[i]; |
218 | stk->elem[i]->busaddr = (__u32) (stk->cmd_pool_handle + | 228 | stk->elem[i]->busaddr = (__u32) (stk->cmd_pool_handle + |
219 | (sizeof(struct cciss_scsi_cmd_stack_elem_t) * i)); | 229 | (sizeof(struct cciss_scsi_cmd_stack_elem_t) * i)); |
230 | stk->elem[i]->cmdindex = i; | ||
220 | } | 231 | } |
221 | stk->top = CMD_STACK_SIZE-1; | 232 | stk->top = CMD_STACK_SIZE-1; |
222 | return 0; | 233 | return 0; |
@@ -229,7 +240,7 @@ scsi_cmd_stack_free(int ctlr) | |||
229 | struct cciss_scsi_cmd_stack_t *stk; | 240 | struct cciss_scsi_cmd_stack_t *stk; |
230 | size_t size; | 241 | size_t size; |
231 | 242 | ||
232 | sa = (struct cciss_scsi_adapter_data_t *) hba[ctlr]->scsi_ctlr; | 243 | sa = hba[ctlr]->scsi_ctlr; |
233 | stk = &sa->cmd_stack; | 244 | stk = &sa->cmd_stack; |
234 | if (stk->top != CMD_STACK_SIZE-1) { | 245 | if (stk->top != CMD_STACK_SIZE-1) { |
235 | printk( "cciss: %d scsi commands are still outstanding.\n", | 246 | printk( "cciss: %d scsi commands are still outstanding.\n", |
@@ -241,6 +252,7 @@ scsi_cmd_stack_free(int ctlr) | |||
241 | 252 | ||
242 | pci_free_consistent(hba[ctlr]->pdev, size, stk->pool, stk->cmd_pool_handle); | 253 | pci_free_consistent(hba[ctlr]->pdev, size, stk->pool, stk->cmd_pool_handle); |
243 | stk->pool = NULL; | 254 | stk->pool = NULL; |
255 | cciss_free_sg_chain_blocks(sa->cmd_sg_list, CMD_STACK_SIZE); | ||
244 | } | 256 | } |
245 | 257 | ||
246 | #if 0 | 258 | #if 0 |
@@ -530,8 +542,7 @@ adjust_cciss_scsi_table(int ctlr, int hostno, | |||
530 | CPQ_TAPE_LOCK(ctlr, flags); | 542 | CPQ_TAPE_LOCK(ctlr, flags); |
531 | 543 | ||
532 | if (hostno != -1) /* if it's not the first time... */ | 544 | if (hostno != -1) /* if it's not the first time... */ |
533 | sh = ((struct cciss_scsi_adapter_data_t *) | 545 | sh = hba[ctlr]->scsi_ctlr->scsi_host; |
534 | hba[ctlr]->scsi_ctlr)->scsi_host; | ||
535 | 546 | ||
536 | /* find any devices in ccissscsi[] that are not in | 547 | /* find any devices in ccissscsi[] that are not in |
537 | sd[] and remove them from ccissscsi[] */ | 548 | sd[] and remove them from ccissscsi[] */ |
@@ -702,7 +713,7 @@ cciss_scsi_setup(int cntl_num) | |||
702 | kfree(shba); | 713 | kfree(shba); |
703 | shba = NULL; | 714 | shba = NULL; |
704 | } | 715 | } |
705 | hba[cntl_num]->scsi_ctlr = (void *) shba; | 716 | hba[cntl_num]->scsi_ctlr = shba; |
706 | return; | 717 | return; |
707 | } | 718 | } |
708 | 719 | ||
@@ -725,6 +736,8 @@ complete_scsi_command( CommandList_struct *cp, int timeout, __u32 tag) | |||
725 | ctlr = hba[cp->ctlr]; | 736 | ctlr = hba[cp->ctlr]; |
726 | 737 | ||
727 | scsi_dma_unmap(cmd); | 738 | scsi_dma_unmap(cmd); |
739 | if (cp->Header.SGTotal > ctlr->max_cmd_sgentries) | ||
740 | cciss_unmap_sg_chain_block(ctlr, cp); | ||
728 | 741 | ||
729 | cmd->result = (DID_OK << 16); /* host byte */ | 742 | cmd->result = (DID_OK << 16); /* host byte */ |
730 | cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */ | 743 | cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */ |
@@ -847,9 +860,10 @@ cciss_scsi_detect(int ctlr) | |||
847 | sh->io_port = 0; // good enough? FIXME, | 860 | sh->io_port = 0; // good enough? FIXME, |
848 | sh->n_io_port = 0; // I don't think we use these two... | 861 | sh->n_io_port = 0; // I don't think we use these two... |
849 | sh->this_id = SELF_SCSI_ID; | 862 | sh->this_id = SELF_SCSI_ID; |
863 | sh->sg_tablesize = hba[ctlr]->maxsgentries; | ||
850 | 864 | ||
851 | ((struct cciss_scsi_adapter_data_t *) | 865 | ((struct cciss_scsi_adapter_data_t *) |
852 | hba[ctlr]->scsi_ctlr)->scsi_host = (void *) sh; | 866 | hba[ctlr]->scsi_ctlr)->scsi_host = sh; |
853 | sh->hostdata[0] = (unsigned long) hba[ctlr]; | 867 | sh->hostdata[0] = (unsigned long) hba[ctlr]; |
854 | sh->irq = hba[ctlr]->intr[SIMPLE_MODE_INT]; | 868 | sh->irq = hba[ctlr]->intr[SIMPLE_MODE_INT]; |
855 | sh->unique_id = sh->irq; | 869 | sh->unique_id = sh->irq; |
@@ -1364,34 +1378,54 @@ cciss_scsi_proc_info(struct Scsi_Host *sh, | |||
1364 | dma mapping and fills in the scatter gather entries of the | 1378 | dma mapping and fills in the scatter gather entries of the |
1365 | cciss command, cp. */ | 1379 | cciss command, cp. */ |
1366 | 1380 | ||
1367 | static void | 1381 | static void cciss_scatter_gather(ctlr_info_t *h, CommandList_struct *cp, |
1368 | cciss_scatter_gather(struct pci_dev *pdev, | 1382 | struct scsi_cmnd *cmd) |
1369 | CommandList_struct *cp, | ||
1370 | struct scsi_cmnd *cmd) | ||
1371 | { | 1383 | { |
1372 | unsigned int len; | 1384 | unsigned int len; |
1373 | struct scatterlist *sg; | 1385 | struct scatterlist *sg; |
1374 | __u64 addr64; | 1386 | __u64 addr64; |
1375 | int use_sg, i; | 1387 | int request_nsgs, i, chained, sg_index; |
1376 | 1388 | struct cciss_scsi_adapter_data_t *sa = h->scsi_ctlr; | |
1377 | BUG_ON(scsi_sg_count(cmd) > MAXSGENTRIES); | 1389 | SGDescriptor_struct *curr_sg; |
1378 | 1390 | ||
1379 | use_sg = scsi_dma_map(cmd); | 1391 | BUG_ON(scsi_sg_count(cmd) > h->maxsgentries); |
1380 | if (use_sg) { /* not too many addrs? */ | 1392 | |
1381 | scsi_for_each_sg(cmd, sg, use_sg, i) { | 1393 | chained = 0; |
1394 | sg_index = 0; | ||
1395 | curr_sg = cp->SG; | ||
1396 | request_nsgs = scsi_dma_map(cmd); | ||
1397 | if (request_nsgs) { | ||
1398 | scsi_for_each_sg(cmd, sg, request_nsgs, i) { | ||
1399 | if (sg_index + 1 == h->max_cmd_sgentries && | ||
1400 | !chained && request_nsgs - i > 1) { | ||
1401 | chained = 1; | ||
1402 | sg_index = 0; | ||
1403 | curr_sg = sa->cmd_sg_list[cp->cmdindex]; | ||
1404 | } | ||
1382 | addr64 = (__u64) sg_dma_address(sg); | 1405 | addr64 = (__u64) sg_dma_address(sg); |
1383 | len = sg_dma_len(sg); | 1406 | len = sg_dma_len(sg); |
1384 | cp->SG[i].Addr.lower = | 1407 | curr_sg[sg_index].Addr.lower = |
1385 | (__u32) (addr64 & (__u64) 0x00000000FFFFFFFF); | 1408 | (__u32) (addr64 & 0x0FFFFFFFFULL); |
1386 | cp->SG[i].Addr.upper = | 1409 | curr_sg[sg_index].Addr.upper = |
1387 | (__u32) ((addr64 >> 32) & (__u64) 0x00000000FFFFFFFF); | 1410 | (__u32) ((addr64 >> 32) & 0x0FFFFFFFFULL); |
1388 | cp->SG[i].Len = len; | 1411 | curr_sg[sg_index].Len = len; |
1389 | cp->SG[i].Ext = 0; // we are not chaining | 1412 | curr_sg[sg_index].Ext = 0; |
1413 | ++sg_index; | ||
1390 | } | 1414 | } |
1415 | if (chained) | ||
1416 | cciss_map_sg_chain_block(h, cp, | ||
1417 | sa->cmd_sg_list[cp->cmdindex], | ||
1418 | (request_nsgs - (h->max_cmd_sgentries - 1)) * | ||
1419 | sizeof(SGDescriptor_struct)); | ||
1391 | } | 1420 | } |
1392 | 1421 | /* track how many SG entries we are using */ | |
1393 | cp->Header.SGList = (__u8) use_sg; /* no. SGs contig in this cmd */ | 1422 | if (request_nsgs > h->maxSG) |
1394 | cp->Header.SGTotal = (__u16) use_sg; /* total sgs in this cmd list */ | 1423 | h->maxSG = request_nsgs; |
1424 | cp->Header.SGTotal = (__u8) request_nsgs + chained; | ||
1425 | if (request_nsgs > h->max_cmd_sgentries) | ||
1426 | cp->Header.SGList = h->max_cmd_sgentries; | ||
1427 | else | ||
1428 | cp->Header.SGList = cp->Header.SGTotal; | ||
1395 | return; | 1429 | return; |
1396 | } | 1430 | } |
1397 | 1431 | ||
@@ -1399,7 +1433,7 @@ cciss_scatter_gather(struct pci_dev *pdev, | |||
1399 | static int | 1433 | static int |
1400 | cciss_scsi_queue_command (struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd *)) | 1434 | cciss_scsi_queue_command (struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd *)) |
1401 | { | 1435 | { |
1402 | ctlr_info_t **c; | 1436 | ctlr_info_t *c; |
1403 | int ctlr, rc; | 1437 | int ctlr, rc; |
1404 | unsigned char scsi3addr[8]; | 1438 | unsigned char scsi3addr[8]; |
1405 | CommandList_struct *cp; | 1439 | CommandList_struct *cp; |
@@ -1407,8 +1441,8 @@ cciss_scsi_queue_command (struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd | |||
1407 | 1441 | ||
1408 | // Get the ptr to our adapter structure (hba[i]) out of cmd->host. | 1442 | // Get the ptr to our adapter structure (hba[i]) out of cmd->host. |
1409 | // We violate cmd->host privacy here. (Is there another way?) | 1443 | // We violate cmd->host privacy here. (Is there another way?) |
1410 | c = (ctlr_info_t **) &cmd->device->host->hostdata[0]; | 1444 | c = (ctlr_info_t *) cmd->device->host->hostdata[0]; |
1411 | ctlr = (*c)->ctlr; | 1445 | ctlr = c->ctlr; |
1412 | 1446 | ||
1413 | rc = lookup_scsi3addr(ctlr, cmd->device->channel, cmd->device->id, | 1447 | rc = lookup_scsi3addr(ctlr, cmd->device->channel, cmd->device->id, |
1414 | cmd->device->lun, scsi3addr); | 1448 | cmd->device->lun, scsi3addr); |
@@ -1431,7 +1465,7 @@ cciss_scsi_queue_command (struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd | |||
1431 | see what the device thinks of it. */ | 1465 | see what the device thinks of it. */ |
1432 | 1466 | ||
1433 | spin_lock_irqsave(CCISS_LOCK(ctlr), flags); | 1467 | spin_lock_irqsave(CCISS_LOCK(ctlr), flags); |
1434 | cp = scsi_cmd_alloc(*c); | 1468 | cp = scsi_cmd_alloc(c); |
1435 | spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags); | 1469 | spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags); |
1436 | if (cp == NULL) { /* trouble... */ | 1470 | if (cp == NULL) { /* trouble... */ |
1437 | printk("scsi_cmd_alloc returned NULL!\n"); | 1471 | printk("scsi_cmd_alloc returned NULL!\n"); |
@@ -1489,15 +1523,14 @@ cciss_scsi_queue_command (struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd | |||
1489 | BUG(); | 1523 | BUG(); |
1490 | break; | 1524 | break; |
1491 | } | 1525 | } |
1492 | 1526 | cciss_scatter_gather(c, cp, cmd); | |
1493 | cciss_scatter_gather((*c)->pdev, cp, cmd); // Fill the SG list | ||
1494 | 1527 | ||
1495 | /* Put the request on the tail of the request queue */ | 1528 | /* Put the request on the tail of the request queue */ |
1496 | 1529 | ||
1497 | spin_lock_irqsave(CCISS_LOCK(ctlr), flags); | 1530 | spin_lock_irqsave(CCISS_LOCK(ctlr), flags); |
1498 | addQ(&(*c)->reqQ, cp); | 1531 | addQ(&c->reqQ, cp); |
1499 | (*c)->Qdepth++; | 1532 | c->Qdepth++; |
1500 | start_io(*c); | 1533 | start_io(c); |
1501 | spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags); | 1534 | spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags); |
1502 | 1535 | ||
1503 | /* the cmd'll come back via intr handler in complete_scsi_command() */ | 1536 | /* the cmd'll come back via intr handler in complete_scsi_command() */ |
@@ -1514,7 +1547,7 @@ cciss_unregister_scsi(int ctlr) | |||
1514 | /* we are being forcibly unloaded, and may not refuse. */ | 1547 | /* we are being forcibly unloaded, and may not refuse. */ |
1515 | 1548 | ||
1516 | spin_lock_irqsave(CCISS_LOCK(ctlr), flags); | 1549 | spin_lock_irqsave(CCISS_LOCK(ctlr), flags); |
1517 | sa = (struct cciss_scsi_adapter_data_t *) hba[ctlr]->scsi_ctlr; | 1550 | sa = hba[ctlr]->scsi_ctlr; |
1518 | stk = &sa->cmd_stack; | 1551 | stk = &sa->cmd_stack; |
1519 | 1552 | ||
1520 | /* if we weren't ever actually registered, don't unregister */ | 1553 | /* if we weren't ever actually registered, don't unregister */ |
@@ -1541,7 +1574,7 @@ cciss_engage_scsi(int ctlr) | |||
1541 | unsigned long flags; | 1574 | unsigned long flags; |
1542 | 1575 | ||
1543 | spin_lock_irqsave(CCISS_LOCK(ctlr), flags); | 1576 | spin_lock_irqsave(CCISS_LOCK(ctlr), flags); |
1544 | sa = (struct cciss_scsi_adapter_data_t *) hba[ctlr]->scsi_ctlr; | 1577 | sa = hba[ctlr]->scsi_ctlr; |
1545 | stk = &sa->cmd_stack; | 1578 | stk = &sa->cmd_stack; |
1546 | 1579 | ||
1547 | if (sa->registered) { | 1580 | if (sa->registered) { |
@@ -1654,14 +1687,14 @@ static int cciss_eh_device_reset_handler(struct scsi_cmnd *scsicmd) | |||
1654 | int rc; | 1687 | int rc; |
1655 | CommandList_struct *cmd_in_trouble; | 1688 | CommandList_struct *cmd_in_trouble; |
1656 | unsigned char lunaddr[8]; | 1689 | unsigned char lunaddr[8]; |
1657 | ctlr_info_t **c; | 1690 | ctlr_info_t *c; |
1658 | int ctlr; | 1691 | int ctlr; |
1659 | 1692 | ||
1660 | /* find the controller to which the command to be aborted was sent */ | 1693 | /* find the controller to which the command to be aborted was sent */ |
1661 | c = (ctlr_info_t **) &scsicmd->device->host->hostdata[0]; | 1694 | c = (ctlr_info_t *) scsicmd->device->host->hostdata[0]; |
1662 | if (c == NULL) /* paranoia */ | 1695 | if (c == NULL) /* paranoia */ |
1663 | return FAILED; | 1696 | return FAILED; |
1664 | ctlr = (*c)->ctlr; | 1697 | ctlr = c->ctlr; |
1665 | printk(KERN_WARNING "cciss%d: resetting tape drive or medium changer.\n", ctlr); | 1698 | printk(KERN_WARNING "cciss%d: resetting tape drive or medium changer.\n", ctlr); |
1666 | /* find the command that's giving us trouble */ | 1699 | /* find the command that's giving us trouble */ |
1667 | cmd_in_trouble = (CommandList_struct *) scsicmd->host_scribble; | 1700 | cmd_in_trouble = (CommandList_struct *) scsicmd->host_scribble; |
@@ -1671,7 +1704,7 @@ static int cciss_eh_device_reset_handler(struct scsi_cmnd *scsicmd) | |||
1671 | /* send a reset to the SCSI LUN which the command was sent to */ | 1704 | /* send a reset to the SCSI LUN which the command was sent to */ |
1672 | rc = sendcmd_withirq(CCISS_RESET_MSG, ctlr, NULL, 0, 0, lunaddr, | 1705 | rc = sendcmd_withirq(CCISS_RESET_MSG, ctlr, NULL, 0, 0, lunaddr, |
1673 | TYPE_MSG); | 1706 | TYPE_MSG); |
1674 | if (rc == 0 && wait_for_device_to_become_ready(*c, lunaddr) == 0) | 1707 | if (rc == 0 && wait_for_device_to_become_ready(c, lunaddr) == 0) |
1675 | return SUCCESS; | 1708 | return SUCCESS; |
1676 | printk(KERN_WARNING "cciss%d: resetting device failed.\n", ctlr); | 1709 | printk(KERN_WARNING "cciss%d: resetting device failed.\n", ctlr); |
1677 | return FAILED; | 1710 | return FAILED; |
@@ -1682,14 +1715,14 @@ static int cciss_eh_abort_handler(struct scsi_cmnd *scsicmd) | |||
1682 | int rc; | 1715 | int rc; |
1683 | CommandList_struct *cmd_to_abort; | 1716 | CommandList_struct *cmd_to_abort; |
1684 | unsigned char lunaddr[8]; | 1717 | unsigned char lunaddr[8]; |
1685 | ctlr_info_t **c; | 1718 | ctlr_info_t *c; |
1686 | int ctlr; | 1719 | int ctlr; |
1687 | 1720 | ||
1688 | /* find the controller to which the command to be aborted was sent */ | 1721 | /* find the controller to which the command to be aborted was sent */ |
1689 | c = (ctlr_info_t **) &scsicmd->device->host->hostdata[0]; | 1722 | c = (ctlr_info_t *) scsicmd->device->host->hostdata[0]; |
1690 | if (c == NULL) /* paranoia */ | 1723 | if (c == NULL) /* paranoia */ |
1691 | return FAILED; | 1724 | return FAILED; |
1692 | ctlr = (*c)->ctlr; | 1725 | ctlr = c->ctlr; |
1693 | printk(KERN_WARNING "cciss%d: aborting tardy SCSI cmd\n", ctlr); | 1726 | printk(KERN_WARNING "cciss%d: aborting tardy SCSI cmd\n", ctlr); |
1694 | 1727 | ||
1695 | /* find the command to be aborted */ | 1728 | /* find the command to be aborted */ |
diff --git a/drivers/block/cciss_scsi.h b/drivers/block/cciss_scsi.h index 7b750245ae76..6d5822fe851a 100644 --- a/drivers/block/cciss_scsi.h +++ b/drivers/block/cciss_scsi.h | |||
@@ -25,16 +25,16 @@ | |||
25 | 25 | ||
26 | #include <scsi/scsicam.h> /* possibly irrelevant, since we don't show disks */ | 26 | #include <scsi/scsicam.h> /* possibly irrelevant, since we don't show disks */ |
27 | 27 | ||
28 | // the scsi id of the adapter... | 28 | /* the scsi id of the adapter... */ |
29 | #define SELF_SCSI_ID 15 | 29 | #define SELF_SCSI_ID 15 |
30 | // 15 is somewhat arbitrary, since the scsi-2 bus | 30 | /* 15 is somewhat arbitrary, since the scsi-2 bus |
31 | // that's presented by the driver to the OS is | 31 | that's presented by the driver to the OS is |
32 | // fabricated. The "real" scsi-3 bus the | 32 | fabricated. The "real" scsi-3 bus the |
33 | // hardware presents is fabricated too. | 33 | hardware presents is fabricated too. |
34 | // The actual, honest-to-goodness physical | 34 | The actual, honest-to-goodness physical |
35 | // bus that the devices are attached to is not | 35 | bus that the devices are attached to is not |
36 | // addressible natively, and may in fact turn | 36 | addressible natively, and may in fact turn |
37 | // out to be not scsi at all. | 37 | out to be not scsi at all. */ |
38 | 38 | ||
39 | #define SCSI_CCISS_CAN_QUEUE 2 | 39 | #define SCSI_CCISS_CAN_QUEUE 2 |
40 | 40 | ||
diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c index 6422651ec364..91d11631cec9 100644 --- a/drivers/block/cpqarray.c +++ b/drivers/block/cpqarray.c | |||
@@ -448,11 +448,8 @@ static int __init cpqarray_register_ctlr( int i, struct pci_dev *pdev) | |||
448 | blk_queue_bounce_limit(q, hba[i]->pci_dev->dma_mask); | 448 | blk_queue_bounce_limit(q, hba[i]->pci_dev->dma_mask); |
449 | 449 | ||
450 | /* This is a hardware imposed limit. */ | 450 | /* This is a hardware imposed limit. */ |
451 | blk_queue_max_hw_segments(q, SG_MAX); | 451 | blk_queue_max_segments(q, SG_MAX); |
452 | 452 | ||
453 | /* This is a driver limit and could be eliminated. */ | ||
454 | blk_queue_max_phys_segments(q, SG_MAX); | ||
455 | |||
456 | init_timer(&hba[i]->timer); | 453 | init_timer(&hba[i]->timer); |
457 | hba[i]->timer.expires = jiffies + IDA_TIMER; | 454 | hba[i]->timer.expires = jiffies + IDA_TIMER; |
458 | hba[i]->timer.data = (unsigned long)hba[i]; | 455 | hba[i]->timer.data = (unsigned long)hba[i]; |
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c index 1292e0620663..4df3b40b1057 100644 --- a/drivers/block/drbd/drbd_nl.c +++ b/drivers/block/drbd/drbd_nl.c | |||
@@ -709,9 +709,8 @@ void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_seg_s) __mu | |||
709 | 709 | ||
710 | max_seg_s = min(queue_max_sectors(b) * queue_logical_block_size(b), max_seg_s); | 710 | max_seg_s = min(queue_max_sectors(b) * queue_logical_block_size(b), max_seg_s); |
711 | 711 | ||
712 | blk_queue_max_sectors(q, max_seg_s >> 9); | 712 | blk_queue_max_hw_sectors(q, max_seg_s >> 9); |
713 | blk_queue_max_phys_segments(q, max_segments ? max_segments : MAX_PHYS_SEGMENTS); | 713 | blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS); |
714 | blk_queue_max_hw_segments(q, max_segments ? max_segments : MAX_HW_SEGMENTS); | ||
715 | blk_queue_max_segment_size(q, max_seg_s); | 714 | blk_queue_max_segment_size(q, max_seg_s); |
716 | blk_queue_logical_block_size(q, 512); | 715 | blk_queue_logical_block_size(q, 512); |
717 | blk_queue_segment_boundary(q, PAGE_SIZE-1); | 716 | blk_queue_segment_boundary(q, PAGE_SIZE-1); |
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index 3266b4f65daa..b9b117059b62 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c | |||
@@ -4234,7 +4234,7 @@ static int __init floppy_init(void) | |||
4234 | err = -ENOMEM; | 4234 | err = -ENOMEM; |
4235 | goto out_unreg_driver; | 4235 | goto out_unreg_driver; |
4236 | } | 4236 | } |
4237 | blk_queue_max_sectors(floppy_queue, 64); | 4237 | blk_queue_max_hw_sectors(floppy_queue, 64); |
4238 | 4238 | ||
4239 | blk_register_region(MKDEV(FLOPPY_MAJOR, 0), 256, THIS_MODULE, | 4239 | blk_register_region(MKDEV(FLOPPY_MAJOR, 0), 256, THIS_MODULE, |
4240 | floppy_find, NULL, NULL); | 4240 | floppy_find, NULL, NULL); |
diff --git a/drivers/block/hd.c b/drivers/block/hd.c index d5cdce08ffd2..5116c65c07cb 100644 --- a/drivers/block/hd.c +++ b/drivers/block/hd.c | |||
@@ -719,7 +719,7 @@ static int __init hd_init(void) | |||
719 | return -ENOMEM; | 719 | return -ENOMEM; |
720 | } | 720 | } |
721 | 721 | ||
722 | blk_queue_max_sectors(hd_queue, 255); | 722 | blk_queue_max_hw_sectors(hd_queue, 255); |
723 | init_timer(&device_timer); | 723 | init_timer(&device_timer); |
724 | device_timer.function = hd_times_out; | 724 | device_timer.function = hd_times_out; |
725 | blk_queue_logical_block_size(hd_queue, 512); | 725 | blk_queue_logical_block_size(hd_queue, 512); |
diff --git a/drivers/block/mg_disk.c b/drivers/block/mg_disk.c index 02b2583df7fc..5416c9a606e4 100644 --- a/drivers/block/mg_disk.c +++ b/drivers/block/mg_disk.c | |||
@@ -980,7 +980,7 @@ static int mg_probe(struct platform_device *plat_dev) | |||
980 | __func__, __LINE__); | 980 | __func__, __LINE__); |
981 | goto probe_err_6; | 981 | goto probe_err_6; |
982 | } | 982 | } |
983 | blk_queue_max_sectors(host->breq, MG_MAX_SECTS); | 983 | blk_queue_max_hw_sectors(host->breq, MG_MAX_SECTS); |
984 | blk_queue_logical_block_size(host->breq, MG_SECTOR_SIZE); | 984 | blk_queue_logical_block_size(host->breq, MG_SECTOR_SIZE); |
985 | 985 | ||
986 | init_timer(&host->timer); | 986 | init_timer(&host->timer); |
diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c index 569e39e8f114..e712cd51af15 100644 --- a/drivers/block/paride/pd.c +++ b/drivers/block/paride/pd.c | |||
@@ -906,7 +906,7 @@ static int __init pd_init(void) | |||
906 | if (!pd_queue) | 906 | if (!pd_queue) |
907 | goto out1; | 907 | goto out1; |
908 | 908 | ||
909 | blk_queue_max_sectors(pd_queue, cluster); | 909 | blk_queue_max_hw_sectors(pd_queue, cluster); |
910 | 910 | ||
911 | if (register_blkdev(major, name)) | 911 | if (register_blkdev(major, name)) |
912 | goto out2; | 912 | goto out2; |
diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c index ea54ea393553..ddb4f9abd480 100644 --- a/drivers/block/paride/pf.c +++ b/drivers/block/paride/pf.c | |||
@@ -956,8 +956,7 @@ static int __init pf_init(void) | |||
956 | return -ENOMEM; | 956 | return -ENOMEM; |
957 | } | 957 | } |
958 | 958 | ||
959 | blk_queue_max_phys_segments(pf_queue, cluster); | 959 | blk_queue_max_segments(pf_queue, cluster); |
960 | blk_queue_max_hw_segments(pf_queue, cluster); | ||
961 | 960 | ||
962 | for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) { | 961 | for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) { |
963 | struct gendisk *disk = pf->disk; | 962 | struct gendisk *disk = pf->disk; |
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index 68b5957f107c..b72935b8f203 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c | |||
@@ -569,6 +569,7 @@ static struct packet_data *pkt_alloc_packet_data(int frames) | |||
569 | } | 569 | } |
570 | 570 | ||
571 | spin_lock_init(&pkt->lock); | 571 | spin_lock_init(&pkt->lock); |
572 | bio_list_init(&pkt->orig_bios); | ||
572 | 573 | ||
573 | for (i = 0; i < frames; i++) { | 574 | for (i = 0; i < frames; i++) { |
574 | struct bio *bio = pkt_bio_alloc(1); | 575 | struct bio *bio = pkt_bio_alloc(1); |
@@ -721,43 +722,6 @@ static void pkt_rbtree_insert(struct pktcdvd_device *pd, struct pkt_rb_node *nod | |||
721 | } | 722 | } |
722 | 723 | ||
723 | /* | 724 | /* |
724 | * Add a bio to a single linked list defined by its head and tail pointers. | ||
725 | */ | ||
726 | static void pkt_add_list_last(struct bio *bio, struct bio **list_head, struct bio **list_tail) | ||
727 | { | ||
728 | bio->bi_next = NULL; | ||
729 | if (*list_tail) { | ||
730 | BUG_ON((*list_head) == NULL); | ||
731 | (*list_tail)->bi_next = bio; | ||
732 | (*list_tail) = bio; | ||
733 | } else { | ||
734 | BUG_ON((*list_head) != NULL); | ||
735 | (*list_head) = bio; | ||
736 | (*list_tail) = bio; | ||
737 | } | ||
738 | } | ||
739 | |||
740 | /* | ||
741 | * Remove and return the first bio from a single linked list defined by its | ||
742 | * head and tail pointers. | ||
743 | */ | ||
744 | static inline struct bio *pkt_get_list_first(struct bio **list_head, struct bio **list_tail) | ||
745 | { | ||
746 | struct bio *bio; | ||
747 | |||
748 | if (*list_head == NULL) | ||
749 | return NULL; | ||
750 | |||
751 | bio = *list_head; | ||
752 | *list_head = bio->bi_next; | ||
753 | if (*list_head == NULL) | ||
754 | *list_tail = NULL; | ||
755 | |||
756 | bio->bi_next = NULL; | ||
757 | return bio; | ||
758 | } | ||
759 | |||
760 | /* | ||
761 | * Send a packet_command to the underlying block device and | 725 | * Send a packet_command to the underlying block device and |
762 | * wait for completion. | 726 | * wait for completion. |
763 | */ | 727 | */ |
@@ -876,13 +840,10 @@ static noinline_for_stack int pkt_set_speed(struct pktcdvd_device *pd, | |||
876 | static void pkt_queue_bio(struct pktcdvd_device *pd, struct bio *bio) | 840 | static void pkt_queue_bio(struct pktcdvd_device *pd, struct bio *bio) |
877 | { | 841 | { |
878 | spin_lock(&pd->iosched.lock); | 842 | spin_lock(&pd->iosched.lock); |
879 | if (bio_data_dir(bio) == READ) { | 843 | if (bio_data_dir(bio) == READ) |
880 | pkt_add_list_last(bio, &pd->iosched.read_queue, | 844 | bio_list_add(&pd->iosched.read_queue, bio); |
881 | &pd->iosched.read_queue_tail); | 845 | else |
882 | } else { | 846 | bio_list_add(&pd->iosched.write_queue, bio); |
883 | pkt_add_list_last(bio, &pd->iosched.write_queue, | ||
884 | &pd->iosched.write_queue_tail); | ||
885 | } | ||
886 | spin_unlock(&pd->iosched.lock); | 847 | spin_unlock(&pd->iosched.lock); |
887 | 848 | ||
888 | atomic_set(&pd->iosched.attention, 1); | 849 | atomic_set(&pd->iosched.attention, 1); |
@@ -917,8 +878,8 @@ static void pkt_iosched_process_queue(struct pktcdvd_device *pd) | |||
917 | int reads_queued, writes_queued; | 878 | int reads_queued, writes_queued; |
918 | 879 | ||
919 | spin_lock(&pd->iosched.lock); | 880 | spin_lock(&pd->iosched.lock); |
920 | reads_queued = (pd->iosched.read_queue != NULL); | 881 | reads_queued = !bio_list_empty(&pd->iosched.read_queue); |
921 | writes_queued = (pd->iosched.write_queue != NULL); | 882 | writes_queued = !bio_list_empty(&pd->iosched.write_queue); |
922 | spin_unlock(&pd->iosched.lock); | 883 | spin_unlock(&pd->iosched.lock); |
923 | 884 | ||
924 | if (!reads_queued && !writes_queued) | 885 | if (!reads_queued && !writes_queued) |
@@ -927,7 +888,7 @@ static void pkt_iosched_process_queue(struct pktcdvd_device *pd) | |||
927 | if (pd->iosched.writing) { | 888 | if (pd->iosched.writing) { |
928 | int need_write_seek = 1; | 889 | int need_write_seek = 1; |
929 | spin_lock(&pd->iosched.lock); | 890 | spin_lock(&pd->iosched.lock); |
930 | bio = pd->iosched.write_queue; | 891 | bio = bio_list_peek(&pd->iosched.write_queue); |
931 | spin_unlock(&pd->iosched.lock); | 892 | spin_unlock(&pd->iosched.lock); |
932 | if (bio && (bio->bi_sector == pd->iosched.last_write)) | 893 | if (bio && (bio->bi_sector == pd->iosched.last_write)) |
933 | need_write_seek = 0; | 894 | need_write_seek = 0; |
@@ -950,13 +911,10 @@ static void pkt_iosched_process_queue(struct pktcdvd_device *pd) | |||
950 | } | 911 | } |
951 | 912 | ||
952 | spin_lock(&pd->iosched.lock); | 913 | spin_lock(&pd->iosched.lock); |
953 | if (pd->iosched.writing) { | 914 | if (pd->iosched.writing) |
954 | bio = pkt_get_list_first(&pd->iosched.write_queue, | 915 | bio = bio_list_pop(&pd->iosched.write_queue); |
955 | &pd->iosched.write_queue_tail); | 916 | else |
956 | } else { | 917 | bio = bio_list_pop(&pd->iosched.read_queue); |
957 | bio = pkt_get_list_first(&pd->iosched.read_queue, | ||
958 | &pd->iosched.read_queue_tail); | ||
959 | } | ||
960 | spin_unlock(&pd->iosched.lock); | 918 | spin_unlock(&pd->iosched.lock); |
961 | 919 | ||
962 | if (!bio) | 920 | if (!bio) |
@@ -992,14 +950,14 @@ static void pkt_iosched_process_queue(struct pktcdvd_device *pd) | |||
992 | static int pkt_set_segment_merging(struct pktcdvd_device *pd, struct request_queue *q) | 950 | static int pkt_set_segment_merging(struct pktcdvd_device *pd, struct request_queue *q) |
993 | { | 951 | { |
994 | if ((pd->settings.size << 9) / CD_FRAMESIZE | 952 | if ((pd->settings.size << 9) / CD_FRAMESIZE |
995 | <= queue_max_phys_segments(q)) { | 953 | <= queue_max_segments(q)) { |
996 | /* | 954 | /* |
997 | * The cdrom device can handle one segment/frame | 955 | * The cdrom device can handle one segment/frame |
998 | */ | 956 | */ |
999 | clear_bit(PACKET_MERGE_SEGS, &pd->flags); | 957 | clear_bit(PACKET_MERGE_SEGS, &pd->flags); |
1000 | return 0; | 958 | return 0; |
1001 | } else if ((pd->settings.size << 9) / PAGE_SIZE | 959 | } else if ((pd->settings.size << 9) / PAGE_SIZE |
1002 | <= queue_max_phys_segments(q)) { | 960 | <= queue_max_segments(q)) { |
1003 | /* | 961 | /* |
1004 | * We can handle this case at the expense of some extra memory | 962 | * We can handle this case at the expense of some extra memory |
1005 | * copies during write operations | 963 | * copies during write operations |
@@ -1114,7 +1072,7 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt) | |||
1114 | int f; | 1072 | int f; |
1115 | char written[PACKET_MAX_SIZE]; | 1073 | char written[PACKET_MAX_SIZE]; |
1116 | 1074 | ||
1117 | BUG_ON(!pkt->orig_bios); | 1075 | BUG_ON(bio_list_empty(&pkt->orig_bios)); |
1118 | 1076 | ||
1119 | atomic_set(&pkt->io_wait, 0); | 1077 | atomic_set(&pkt->io_wait, 0); |
1120 | atomic_set(&pkt->io_errors, 0); | 1078 | atomic_set(&pkt->io_errors, 0); |
@@ -1124,7 +1082,7 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt) | |||
1124 | */ | 1082 | */ |
1125 | memset(written, 0, sizeof(written)); | 1083 | memset(written, 0, sizeof(written)); |
1126 | spin_lock(&pkt->lock); | 1084 | spin_lock(&pkt->lock); |
1127 | for (bio = pkt->orig_bios; bio; bio = bio->bi_next) { | 1085 | bio_list_for_each(bio, &pkt->orig_bios) { |
1128 | int first_frame = (bio->bi_sector - pkt->sector) / (CD_FRAMESIZE >> 9); | 1086 | int first_frame = (bio->bi_sector - pkt->sector) / (CD_FRAMESIZE >> 9); |
1129 | int num_frames = bio->bi_size / CD_FRAMESIZE; | 1087 | int num_frames = bio->bi_size / CD_FRAMESIZE; |
1130 | pd->stats.secs_w += num_frames * (CD_FRAMESIZE >> 9); | 1088 | pd->stats.secs_w += num_frames * (CD_FRAMESIZE >> 9); |
@@ -1363,7 +1321,7 @@ try_next_bio: | |||
1363 | break; | 1321 | break; |
1364 | pkt_rbtree_erase(pd, node); | 1322 | pkt_rbtree_erase(pd, node); |
1365 | spin_lock(&pkt->lock); | 1323 | spin_lock(&pkt->lock); |
1366 | pkt_add_list_last(bio, &pkt->orig_bios, &pkt->orig_bios_tail); | 1324 | bio_list_add(&pkt->orig_bios, bio); |
1367 | pkt->write_size += bio->bi_size / CD_FRAMESIZE; | 1325 | pkt->write_size += bio->bi_size / CD_FRAMESIZE; |
1368 | spin_unlock(&pkt->lock); | 1326 | spin_unlock(&pkt->lock); |
1369 | } | 1327 | } |
@@ -1409,7 +1367,7 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt) | |||
1409 | */ | 1367 | */ |
1410 | frames_write = 0; | 1368 | frames_write = 0; |
1411 | spin_lock(&pkt->lock); | 1369 | spin_lock(&pkt->lock); |
1412 | for (bio = pkt->orig_bios; bio; bio = bio->bi_next) { | 1370 | bio_list_for_each(bio, &pkt->orig_bios) { |
1413 | int segment = bio->bi_idx; | 1371 | int segment = bio->bi_idx; |
1414 | int src_offs = 0; | 1372 | int src_offs = 0; |
1415 | int first_frame = (bio->bi_sector - pkt->sector) / (CD_FRAMESIZE >> 9); | 1373 | int first_frame = (bio->bi_sector - pkt->sector) / (CD_FRAMESIZE >> 9); |
@@ -1472,20 +1430,14 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt) | |||
1472 | 1430 | ||
1473 | static void pkt_finish_packet(struct packet_data *pkt, int uptodate) | 1431 | static void pkt_finish_packet(struct packet_data *pkt, int uptodate) |
1474 | { | 1432 | { |
1475 | struct bio *bio, *next; | 1433 | struct bio *bio; |
1476 | 1434 | ||
1477 | if (!uptodate) | 1435 | if (!uptodate) |
1478 | pkt->cache_valid = 0; | 1436 | pkt->cache_valid = 0; |
1479 | 1437 | ||
1480 | /* Finish all bios corresponding to this packet */ | 1438 | /* Finish all bios corresponding to this packet */ |
1481 | bio = pkt->orig_bios; | 1439 | while ((bio = bio_list_pop(&pkt->orig_bios))) |
1482 | while (bio) { | ||
1483 | next = bio->bi_next; | ||
1484 | bio->bi_next = NULL; | ||
1485 | bio_endio(bio, uptodate ? 0 : -EIO); | 1440 | bio_endio(bio, uptodate ? 0 : -EIO); |
1486 | bio = next; | ||
1487 | } | ||
1488 | pkt->orig_bios = pkt->orig_bios_tail = NULL; | ||
1489 | } | 1441 | } |
1490 | 1442 | ||
1491 | static void pkt_run_state_machine(struct pktcdvd_device *pd, struct packet_data *pkt) | 1443 | static void pkt_run_state_machine(struct pktcdvd_device *pd, struct packet_data *pkt) |
@@ -2360,7 +2312,7 @@ static int pkt_open_dev(struct pktcdvd_device *pd, fmode_t write) | |||
2360 | * even if the size is a multiple of the packet size. | 2312 | * even if the size is a multiple of the packet size. |
2361 | */ | 2313 | */ |
2362 | spin_lock_irq(q->queue_lock); | 2314 | spin_lock_irq(q->queue_lock); |
2363 | blk_queue_max_sectors(q, pd->settings.size); | 2315 | blk_queue_max_hw_sectors(q, pd->settings.size); |
2364 | spin_unlock_irq(q->queue_lock); | 2316 | spin_unlock_irq(q->queue_lock); |
2365 | set_bit(PACKET_WRITABLE, &pd->flags); | 2317 | set_bit(PACKET_WRITABLE, &pd->flags); |
2366 | } else { | 2318 | } else { |
@@ -2567,8 +2519,7 @@ static int pkt_make_request(struct request_queue *q, struct bio *bio) | |||
2567 | spin_lock(&pkt->lock); | 2519 | spin_lock(&pkt->lock); |
2568 | if ((pkt->state == PACKET_WAITING_STATE) || | 2520 | if ((pkt->state == PACKET_WAITING_STATE) || |
2569 | (pkt->state == PACKET_READ_WAIT_STATE)) { | 2521 | (pkt->state == PACKET_READ_WAIT_STATE)) { |
2570 | pkt_add_list_last(bio, &pkt->orig_bios, | 2522 | bio_list_add(&pkt->orig_bios, bio); |
2571 | &pkt->orig_bios_tail); | ||
2572 | pkt->write_size += bio->bi_size / CD_FRAMESIZE; | 2523 | pkt->write_size += bio->bi_size / CD_FRAMESIZE; |
2573 | if ((pkt->write_size >= pkt->frames) && | 2524 | if ((pkt->write_size >= pkt->frames) && |
2574 | (pkt->state == PACKET_WAITING_STATE)) { | 2525 | (pkt->state == PACKET_WAITING_STATE)) { |
@@ -2662,7 +2613,7 @@ static void pkt_init_queue(struct pktcdvd_device *pd) | |||
2662 | 2613 | ||
2663 | blk_queue_make_request(q, pkt_make_request); | 2614 | blk_queue_make_request(q, pkt_make_request); |
2664 | blk_queue_logical_block_size(q, CD_FRAMESIZE); | 2615 | blk_queue_logical_block_size(q, CD_FRAMESIZE); |
2665 | blk_queue_max_sectors(q, PACKET_MAX_SECTORS); | 2616 | blk_queue_max_hw_sectors(q, PACKET_MAX_SECTORS); |
2666 | blk_queue_merge_bvec(q, pkt_merge_bvec); | 2617 | blk_queue_merge_bvec(q, pkt_merge_bvec); |
2667 | q->queuedata = pd; | 2618 | q->queuedata = pd; |
2668 | } | 2619 | } |
@@ -2898,6 +2849,8 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev) | |||
2898 | 2849 | ||
2899 | spin_lock_init(&pd->lock); | 2850 | spin_lock_init(&pd->lock); |
2900 | spin_lock_init(&pd->iosched.lock); | 2851 | spin_lock_init(&pd->iosched.lock); |
2852 | bio_list_init(&pd->iosched.read_queue); | ||
2853 | bio_list_init(&pd->iosched.write_queue); | ||
2901 | sprintf(pd->name, DRIVER_NAME"%d", idx); | 2854 | sprintf(pd->name, DRIVER_NAME"%d", idx); |
2902 | init_waitqueue_head(&pd->wqueue); | 2855 | init_waitqueue_head(&pd->wqueue); |
2903 | pd->bio_queue = RB_ROOT; | 2856 | pd->bio_queue = RB_ROOT; |
diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c index 03a130dca8ab..bc95469d33c1 100644 --- a/drivers/block/ps3disk.c +++ b/drivers/block/ps3disk.c | |||
@@ -474,7 +474,7 @@ static int __devinit ps3disk_probe(struct ps3_system_bus_device *_dev) | |||
474 | 474 | ||
475 | blk_queue_bounce_limit(queue, BLK_BOUNCE_HIGH); | 475 | blk_queue_bounce_limit(queue, BLK_BOUNCE_HIGH); |
476 | 476 | ||
477 | blk_queue_max_sectors(queue, dev->bounce_size >> 9); | 477 | blk_queue_max_hw_sectors(queue, dev->bounce_size >> 9); |
478 | blk_queue_segment_boundary(queue, -1UL); | 478 | blk_queue_segment_boundary(queue, -1UL); |
479 | blk_queue_dma_alignment(queue, dev->blk_size-1); | 479 | blk_queue_dma_alignment(queue, dev->blk_size-1); |
480 | blk_queue_logical_block_size(queue, dev->blk_size); | 480 | blk_queue_logical_block_size(queue, dev->blk_size); |
@@ -482,8 +482,7 @@ static int __devinit ps3disk_probe(struct ps3_system_bus_device *_dev) | |||
482 | blk_queue_ordered(queue, QUEUE_ORDERED_DRAIN_FLUSH, | 482 | blk_queue_ordered(queue, QUEUE_ORDERED_DRAIN_FLUSH, |
483 | ps3disk_prepare_flush); | 483 | ps3disk_prepare_flush); |
484 | 484 | ||
485 | blk_queue_max_phys_segments(queue, -1); | 485 | blk_queue_max_segments(queue, -1); |
486 | blk_queue_max_hw_segments(queue, -1); | ||
487 | blk_queue_max_segment_size(queue, dev->bounce_size); | 486 | blk_queue_max_segment_size(queue, dev->bounce_size); |
488 | 487 | ||
489 | gendisk = alloc_disk(PS3DISK_MINORS); | 488 | gendisk = alloc_disk(PS3DISK_MINORS); |
diff --git a/drivers/block/ps3vram.c b/drivers/block/ps3vram.c index 1fb6c3135fc8..e44608229972 100644 --- a/drivers/block/ps3vram.c +++ b/drivers/block/ps3vram.c | |||
@@ -751,10 +751,9 @@ static int __devinit ps3vram_probe(struct ps3_system_bus_device *dev) | |||
751 | priv->queue = queue; | 751 | priv->queue = queue; |
752 | queue->queuedata = dev; | 752 | queue->queuedata = dev; |
753 | blk_queue_make_request(queue, ps3vram_make_request); | 753 | blk_queue_make_request(queue, ps3vram_make_request); |
754 | blk_queue_max_phys_segments(queue, MAX_PHYS_SEGMENTS); | 754 | blk_queue_max_segments(queue, BLK_MAX_SEGMENTS); |
755 | blk_queue_max_hw_segments(queue, MAX_HW_SEGMENTS); | 755 | blk_queue_max_segment_size(queue, BLK_MAX_SEGMENT_SIZE); |
756 | blk_queue_max_segment_size(queue, MAX_SEGMENT_SIZE); | 756 | blk_queue_max_hw_sectors(queue, BLK_SAFE_MAX_SECTORS); |
757 | blk_queue_max_sectors(queue, SAFE_MAX_SECTORS); | ||
758 | 757 | ||
759 | gendisk = alloc_disk(1); | 758 | gendisk = alloc_disk(1); |
760 | if (!gendisk) { | 759 | if (!gendisk) { |
diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c index 411f064760b4..48e8fee9f2d4 100644 --- a/drivers/block/sunvdc.c +++ b/drivers/block/sunvdc.c | |||
@@ -691,9 +691,8 @@ static int probe_disk(struct vdc_port *port) | |||
691 | 691 | ||
692 | port->disk = g; | 692 | port->disk = g; |
693 | 693 | ||
694 | blk_queue_max_hw_segments(q, port->ring_cookies); | 694 | blk_queue_max_segments(q, port->ring_cookies); |
695 | blk_queue_max_phys_segments(q, port->ring_cookies); | 695 | blk_queue_max_hw_sectors(q, port->max_xfer_size); |
696 | blk_queue_max_sectors(q, port->max_xfer_size); | ||
697 | g->major = vdc_major; | 696 | g->major = vdc_major; |
698 | g->first_minor = port->vio.vdev->dev_no << PARTITION_SHIFT; | 697 | g->first_minor = port->vio.vdev->dev_no << PARTITION_SHIFT; |
699 | strcpy(g->disk_name, port->disk_name); | 698 | strcpy(g->disk_name, port->disk_name); |
diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c index a7c4184f4a63..b70f0fca9a42 100644 --- a/drivers/block/sx8.c +++ b/drivers/block/sx8.c | |||
@@ -409,7 +409,7 @@ static int carm_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | |||
409 | static void carm_remove_one (struct pci_dev *pdev); | 409 | static void carm_remove_one (struct pci_dev *pdev); |
410 | static int carm_bdev_getgeo(struct block_device *bdev, struct hd_geometry *geo); | 410 | static int carm_bdev_getgeo(struct block_device *bdev, struct hd_geometry *geo); |
411 | 411 | ||
412 | static struct pci_device_id carm_pci_tbl[] = { | 412 | static const struct pci_device_id carm_pci_tbl[] = { |
413 | { PCI_VENDOR_ID_PROMISE, 0x8000, PCI_ANY_ID, PCI_ANY_ID, 0, 0, }, | 413 | { PCI_VENDOR_ID_PROMISE, 0x8000, PCI_ANY_ID, PCI_ANY_ID, 0, 0, }, |
414 | { PCI_VENDOR_ID_PROMISE, 0x8002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, }, | 414 | { PCI_VENDOR_ID_PROMISE, 0x8002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, }, |
415 | { } /* terminate list */ | 415 | { } /* terminate list */ |
@@ -1518,8 +1518,7 @@ static int carm_init_disks(struct carm_host *host) | |||
1518 | break; | 1518 | break; |
1519 | } | 1519 | } |
1520 | disk->queue = q; | 1520 | disk->queue = q; |
1521 | blk_queue_max_hw_segments(q, CARM_MAX_REQ_SG); | 1521 | blk_queue_max_segments(q, CARM_MAX_REQ_SG); |
1522 | blk_queue_max_phys_segments(q, CARM_MAX_REQ_SG); | ||
1523 | blk_queue_segment_boundary(q, CARM_SG_BOUNDARY); | 1522 | blk_queue_segment_boundary(q, CARM_SG_BOUNDARY); |
1524 | 1523 | ||
1525 | q->queuedata = port; | 1524 | q->queuedata = port; |
diff --git a/drivers/block/ub.c b/drivers/block/ub.c index c739b203fe91..2e889838e819 100644 --- a/drivers/block/ub.c +++ b/drivers/block/ub.c | |||
@@ -393,7 +393,7 @@ static int ub_probe_lun(struct ub_dev *sc, int lnum); | |||
393 | #define ub_usb_ids usb_storage_usb_ids | 393 | #define ub_usb_ids usb_storage_usb_ids |
394 | #else | 394 | #else |
395 | 395 | ||
396 | static struct usb_device_id ub_usb_ids[] = { | 396 | static const struct usb_device_id ub_usb_ids[] = { |
397 | { USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, US_SC_SCSI, US_PR_BULK) }, | 397 | { USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, US_SC_SCSI, US_PR_BULK) }, |
398 | { } | 398 | { } |
399 | }; | 399 | }; |
@@ -2320,10 +2320,9 @@ static int ub_probe_lun(struct ub_dev *sc, int lnum) | |||
2320 | disk->queue = q; | 2320 | disk->queue = q; |
2321 | 2321 | ||
2322 | blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH); | 2322 | blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH); |
2323 | blk_queue_max_hw_segments(q, UB_MAX_REQ_SG); | 2323 | blk_queue_max_segments(q, UB_MAX_REQ_SG); |
2324 | blk_queue_max_phys_segments(q, UB_MAX_REQ_SG); | ||
2325 | blk_queue_segment_boundary(q, 0xffffffff); /* Dubious. */ | 2324 | blk_queue_segment_boundary(q, 0xffffffff); /* Dubious. */ |
2326 | blk_queue_max_sectors(q, UB_MAX_SECTORS); | 2325 | blk_queue_max_hw_sectors(q, UB_MAX_SECTORS); |
2327 | blk_queue_logical_block_size(q, lun->capacity.bsize); | 2326 | blk_queue_logical_block_size(q, lun->capacity.bsize); |
2328 | 2327 | ||
2329 | lun->disk = disk; | 2328 | lun->disk = disk; |
diff --git a/drivers/block/viodasd.c b/drivers/block/viodasd.c index 1b3def1e8591..788d93882ab9 100644 --- a/drivers/block/viodasd.c +++ b/drivers/block/viodasd.c | |||
@@ -462,9 +462,8 @@ retry: | |||
462 | } | 462 | } |
463 | 463 | ||
464 | d->disk = g; | 464 | d->disk = g; |
465 | blk_queue_max_hw_segments(q, VIOMAXBLOCKDMA); | 465 | blk_queue_max_segments(q, VIOMAXBLOCKDMA); |
466 | blk_queue_max_phys_segments(q, VIOMAXBLOCKDMA); | 466 | blk_queue_max_hw_sectors(q, VIODASD_MAXSECTORS); |
467 | blk_queue_max_sectors(q, VIODASD_MAXSECTORS); | ||
468 | g->major = VIODASD_MAJOR; | 467 | g->major = VIODASD_MAJOR; |
469 | g->first_minor = dev_no << PARTITION_SHIFT; | 468 | g->first_minor = dev_no << PARTITION_SHIFT; |
470 | if (dev_no >= 26) | 469 | if (dev_no >= 26) |
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 7eff828b2117..3c64af05fa82 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c | |||
@@ -435,7 +435,7 @@ static void __devexit virtblk_remove(struct virtio_device *vdev) | |||
435 | kfree(vblk); | 435 | kfree(vblk); |
436 | } | 436 | } |
437 | 437 | ||
438 | static struct virtio_device_id id_table[] = { | 438 | static const struct virtio_device_id id_table[] = { |
439 | { VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID }, | 439 | { VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID }, |
440 | { 0 }, | 440 | { 0 }, |
441 | }; | 441 | }; |
diff --git a/drivers/block/xd.c b/drivers/block/xd.c index d1fd032e7514..1a325fb05c92 100644 --- a/drivers/block/xd.c +++ b/drivers/block/xd.c | |||
@@ -242,7 +242,7 @@ static int __init xd_init(void) | |||
242 | } | 242 | } |
243 | 243 | ||
244 | /* xd_maxsectors depends on controller - so set after detection */ | 244 | /* xd_maxsectors depends on controller - so set after detection */ |
245 | blk_queue_max_sectors(xd_queue, xd_maxsectors); | 245 | blk_queue_max_hw_sectors(xd_queue, xd_maxsectors); |
246 | 246 | ||
247 | for (i = 0; i < xd_drives; i++) | 247 | for (i = 0; i < xd_drives; i++) |
248 | add_disk(xd_gendisk[i]); | 248 | add_disk(xd_gendisk[i]); |
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 05a31e55d278..9c09694b2520 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c | |||
@@ -346,15 +346,14 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size) | |||
346 | 346 | ||
347 | /* Hard sector size and max sectors impersonate the equiv. hardware. */ | 347 | /* Hard sector size and max sectors impersonate the equiv. hardware. */ |
348 | blk_queue_logical_block_size(rq, sector_size); | 348 | blk_queue_logical_block_size(rq, sector_size); |
349 | blk_queue_max_sectors(rq, 512); | 349 | blk_queue_max_hw_sectors(rq, 512); |
350 | 350 | ||
351 | /* Each segment in a request is up to an aligned page in size. */ | 351 | /* Each segment in a request is up to an aligned page in size. */ |
352 | blk_queue_segment_boundary(rq, PAGE_SIZE - 1); | 352 | blk_queue_segment_boundary(rq, PAGE_SIZE - 1); |
353 | blk_queue_max_segment_size(rq, PAGE_SIZE); | 353 | blk_queue_max_segment_size(rq, PAGE_SIZE); |
354 | 354 | ||
355 | /* Ensure a merged request will fit in a single I/O ring slot. */ | 355 | /* Ensure a merged request will fit in a single I/O ring slot. */ |
356 | blk_queue_max_phys_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST); | 356 | blk_queue_max_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST); |
357 | blk_queue_max_hw_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST); | ||
358 | 357 | ||
359 | /* Make sure buffer addresses are sector-aligned. */ | 358 | /* Make sure buffer addresses are sector-aligned. */ |
360 | blk_queue_dma_alignment(rq, 511); | 359 | blk_queue_dma_alignment(rq, 511); |
@@ -1050,7 +1049,7 @@ static const struct block_device_operations xlvbd_block_fops = | |||
1050 | }; | 1049 | }; |
1051 | 1050 | ||
1052 | 1051 | ||
1053 | static struct xenbus_device_id blkfront_ids[] = { | 1052 | static const struct xenbus_device_id blkfront_ids[] = { |
1054 | { "vbd" }, | 1053 | { "vbd" }, |
1055 | { "" } | 1054 | { "" } |
1056 | }; | 1055 | }; |
diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c index e5c5415eb45e..e1c95e208a66 100644 --- a/drivers/block/xsysace.c +++ b/drivers/block/xsysace.c | |||
@@ -1227,7 +1227,7 @@ static int __devexit ace_of_remove(struct of_device *op) | |||
1227 | } | 1227 | } |
1228 | 1228 | ||
1229 | /* Match table for of_platform binding */ | 1229 | /* Match table for of_platform binding */ |
1230 | static struct of_device_id ace_of_match[] __devinitdata = { | 1230 | static const struct of_device_id ace_of_match[] __devinitconst = { |
1231 | { .compatible = "xlnx,opb-sysace-1.00.b", }, | 1231 | { .compatible = "xlnx,opb-sysace-1.00.b", }, |
1232 | { .compatible = "xlnx,opb-sysace-1.00.c", }, | 1232 | { .compatible = "xlnx,opb-sysace-1.00.c", }, |
1233 | { .compatible = "xlnx,xps-sysace-1.00.a", }, | 1233 | { .compatible = "xlnx,xps-sysace-1.00.a", }, |