aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--block/blk-core.c4
-rw-r--r--block/blk-ioc.c37
-rw-r--r--block/blk-map.c20
-rw-r--r--block/blk-merge.c6
-rw-r--r--block/blk-settings.c9
-rw-r--r--block/bsg.c8
-rw-r--r--block/cfq-iosched.c38
-rw-r--r--block/elevator.c15
-rw-r--r--block/scsi_ioctl.c3
-rw-r--r--drivers/ata/ahci.c5
-rw-r--r--drivers/ata/libata-core.c249
-rw-r--r--drivers/ata/libata-scsi.c72
-rw-r--r--drivers/ata/pata_icside.c8
-rw-r--r--drivers/ata/sata_fsl.c13
-rw-r--r--drivers/ata/sata_mv.c6
-rw-r--r--drivers/ata/sata_sil24.c5
-rw-r--r--drivers/scsi/ipr.c4
-rw-r--r--drivers/scsi/libsas/sas_ata.c4
-rw-r--r--drivers/scsi/scsi_lib.c8
-rw-r--r--fs/bio.c2
-rw-r--r--fs/block_dev.c201
-rw-r--r--include/linux/aio.h1
-rw-r--r--include/linux/bio.h1
-rw-r--r--include/linux/blkdev.h8
-rw-r--r--include/linux/fs.h1
-rw-r--r--include/linux/iocontext.h2
-rw-r--r--include/linux/libata.h28
27 files changed, 191 insertions, 567 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index e9754dc98ec4..775c8516abf5 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -38,7 +38,7 @@ static int __make_request(struct request_queue *q, struct bio *bio);
38/* 38/*
39 * For the allocated request tables 39 * For the allocated request tables
40 */ 40 */
41struct kmem_cache *request_cachep; 41static struct kmem_cache *request_cachep;
42 42
43/* 43/*
44 * For queue allocation 44 * For queue allocation
@@ -127,6 +127,7 @@ void rq_init(struct request_queue *q, struct request *rq)
127 rq->nr_hw_segments = 0; 127 rq->nr_hw_segments = 0;
128 rq->ioprio = 0; 128 rq->ioprio = 0;
129 rq->special = NULL; 129 rq->special = NULL;
130 rq->raw_data_len = 0;
130 rq->buffer = NULL; 131 rq->buffer = NULL;
131 rq->tag = -1; 132 rq->tag = -1;
132 rq->errors = 0; 133 rq->errors = 0;
@@ -2015,6 +2016,7 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
2015 rq->hard_cur_sectors = rq->current_nr_sectors; 2016 rq->hard_cur_sectors = rq->current_nr_sectors;
2016 rq->hard_nr_sectors = rq->nr_sectors = bio_sectors(bio); 2017 rq->hard_nr_sectors = rq->nr_sectors = bio_sectors(bio);
2017 rq->buffer = bio_data(bio); 2018 rq->buffer = bio_data(bio);
2019 rq->raw_data_len = bio->bi_size;
2018 rq->data_len = bio->bi_size; 2020 rq->data_len = bio->bi_size;
2019 2021
2020 rq->bio = rq->biotail = bio; 2022 rq->bio = rq->biotail = bio;
diff --git a/block/blk-ioc.c b/block/blk-ioc.c
index 80245dc30c75..e34df7c9fc36 100644
--- a/block/blk-ioc.c
+++ b/block/blk-ioc.c
@@ -17,17 +17,13 @@ static struct kmem_cache *iocontext_cachep;
17 17
18static void cfq_dtor(struct io_context *ioc) 18static void cfq_dtor(struct io_context *ioc)
19{ 19{
20 struct cfq_io_context *cic[1]; 20 if (!hlist_empty(&ioc->cic_list)) {
21 int r; 21 struct cfq_io_context *cic;
22 22
23 /* 23 cic = list_entry(ioc->cic_list.first, struct cfq_io_context,
24 * We don't have a specific key to lookup with, so use the gang 24 cic_list);
25 * lookup to just retrieve the first item stored. The cfq exit 25 cic->dtor(ioc);
26 * function will iterate the full tree, so any member will do. 26 }
27 */
28 r = radix_tree_gang_lookup(&ioc->radix_root, (void **) cic, 0, 1);
29 if (r > 0)
30 cic[0]->dtor(ioc);
31} 27}
32 28
33/* 29/*
@@ -57,18 +53,16 @@ EXPORT_SYMBOL(put_io_context);
57 53
58static void cfq_exit(struct io_context *ioc) 54static void cfq_exit(struct io_context *ioc)
59{ 55{
60 struct cfq_io_context *cic[1];
61 int r;
62
63 rcu_read_lock(); 56 rcu_read_lock();
64 /*
65 * See comment for cfq_dtor()
66 */
67 r = radix_tree_gang_lookup(&ioc->radix_root, (void **) cic, 0, 1);
68 rcu_read_unlock();
69 57
70 if (r > 0) 58 if (!hlist_empty(&ioc->cic_list)) {
71 cic[0]->exit(ioc); 59 struct cfq_io_context *cic;
60
61 cic = list_entry(ioc->cic_list.first, struct cfq_io_context,
62 cic_list);
63 cic->exit(ioc);
64 }
65 rcu_read_unlock();
72} 66}
73 67
74/* Called by the exitting task */ 68/* Called by the exitting task */
@@ -105,6 +99,7 @@ struct io_context *alloc_io_context(gfp_t gfp_flags, int node)
105 ret->nr_batch_requests = 0; /* because this is 0 */ 99 ret->nr_batch_requests = 0; /* because this is 0 */
106 ret->aic = NULL; 100 ret->aic = NULL;
107 INIT_RADIX_TREE(&ret->radix_root, GFP_ATOMIC | __GFP_HIGH); 101 INIT_RADIX_TREE(&ret->radix_root, GFP_ATOMIC | __GFP_HIGH);
102 INIT_HLIST_HEAD(&ret->cic_list);
108 ret->ioc_data = NULL; 103 ret->ioc_data = NULL;
109 } 104 }
110 105
@@ -176,7 +171,7 @@ void copy_io_context(struct io_context **pdst, struct io_context **psrc)
176} 171}
177EXPORT_SYMBOL(copy_io_context); 172EXPORT_SYMBOL(copy_io_context);
178 173
179int __init blk_ioc_init(void) 174static int __init blk_ioc_init(void)
180{ 175{
181 iocontext_cachep = kmem_cache_create("blkdev_ioc", 176 iocontext_cachep = kmem_cache_create("blkdev_ioc",
182 sizeof(struct io_context), 0, SLAB_PANIC, NULL); 177 sizeof(struct io_context), 0, SLAB_PANIC, NULL);
diff --git a/block/blk-map.c b/block/blk-map.c
index 955d75c1a58f..09f7fd0bcb73 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -19,6 +19,7 @@ int blk_rq_append_bio(struct request_queue *q, struct request *rq,
19 rq->biotail->bi_next = bio; 19 rq->biotail->bi_next = bio;
20 rq->biotail = bio; 20 rq->biotail = bio;
21 21
22 rq->raw_data_len += bio->bi_size;
22 rq->data_len += bio->bi_size; 23 rq->data_len += bio->bi_size;
23 } 24 }
24 return 0; 25 return 0;
@@ -139,10 +140,29 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq,
139 ubuf += ret; 140 ubuf += ret;
140 } 141 }
141 142
143 /*
144 * __blk_rq_map_user() copies the buffers if starting address
145 * or length isn't aligned. As the copied buffer is always
146 * page aligned, we know that there's enough room for padding.
147 * Extend the last bio and update rq->data_len accordingly.
148 *
149 * On unmap, bio_uncopy_user() will use unmodified
150 * bio_map_data pointed to by bio->bi_private.
151 */
152 if (len & queue_dma_alignment(q)) {
153 unsigned int pad_len = (queue_dma_alignment(q) & ~len) + 1;
154 struct bio *bio = rq->biotail;
155
156 bio->bi_io_vec[bio->bi_vcnt - 1].bv_len += pad_len;
157 bio->bi_size += pad_len;
158 rq->data_len += pad_len;
159 }
160
142 rq->buffer = rq->data = NULL; 161 rq->buffer = rq->data = NULL;
143 return 0; 162 return 0;
144unmap_rq: 163unmap_rq:
145 blk_rq_unmap_user(bio); 164 blk_rq_unmap_user(bio);
165 rq->bio = NULL;
146 return ret; 166 return ret;
147} 167}
148EXPORT_SYMBOL(blk_rq_map_user); 168EXPORT_SYMBOL(blk_rq_map_user);
diff --git a/block/blk-merge.c b/block/blk-merge.c
index d3b84bbb776a..7506c4fe0264 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -220,7 +220,10 @@ new_segment:
220 bvprv = bvec; 220 bvprv = bvec;
221 } /* segments in rq */ 221 } /* segments in rq */
222 222
223 if (q->dma_drain_size) { 223 if (q->dma_drain_size && q->dma_drain_needed(rq)) {
224 if (rq->cmd_flags & REQ_RW)
225 memset(q->dma_drain_buffer, 0, q->dma_drain_size);
226
224 sg->page_link &= ~0x02; 227 sg->page_link &= ~0x02;
225 sg = sg_next(sg); 228 sg = sg_next(sg);
226 sg_set_page(sg, virt_to_page(q->dma_drain_buffer), 229 sg_set_page(sg, virt_to_page(q->dma_drain_buffer),
@@ -228,6 +231,7 @@ new_segment:
228 ((unsigned long)q->dma_drain_buffer) & 231 ((unsigned long)q->dma_drain_buffer) &
229 (PAGE_SIZE - 1)); 232 (PAGE_SIZE - 1));
230 nsegs++; 233 nsegs++;
234 rq->data_len += q->dma_drain_size;
231 } 235 }
232 236
233 if (sg) 237 if (sg)
diff --git a/block/blk-settings.c b/block/blk-settings.c
index c8d0c5724098..9a8ffdd0ce3d 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -296,6 +296,7 @@ EXPORT_SYMBOL(blk_queue_stack_limits);
296 * blk_queue_dma_drain - Set up a drain buffer for excess dma. 296 * blk_queue_dma_drain - Set up a drain buffer for excess dma.
297 * 297 *
298 * @q: the request queue for the device 298 * @q: the request queue for the device
299 * @dma_drain_needed: fn which returns non-zero if drain is necessary
299 * @buf: physically contiguous buffer 300 * @buf: physically contiguous buffer
300 * @size: size of the buffer in bytes 301 * @size: size of the buffer in bytes
301 * 302 *
@@ -315,14 +316,16 @@ EXPORT_SYMBOL(blk_queue_stack_limits);
315 * device can support otherwise there won't be room for the drain 316 * device can support otherwise there won't be room for the drain
316 * buffer. 317 * buffer.
317 */ 318 */
318int blk_queue_dma_drain(struct request_queue *q, void *buf, 319extern int blk_queue_dma_drain(struct request_queue *q,
319 unsigned int size) 320 dma_drain_needed_fn *dma_drain_needed,
321 void *buf, unsigned int size)
320{ 322{
321 if (q->max_hw_segments < 2 || q->max_phys_segments < 2) 323 if (q->max_hw_segments < 2 || q->max_phys_segments < 2)
322 return -EINVAL; 324 return -EINVAL;
323 /* make room for appending the drain */ 325 /* make room for appending the drain */
324 --q->max_hw_segments; 326 --q->max_hw_segments;
325 --q->max_phys_segments; 327 --q->max_phys_segments;
328 q->dma_drain_needed = dma_drain_needed;
326 q->dma_drain_buffer = buf; 329 q->dma_drain_buffer = buf;
327 q->dma_drain_size = size; 330 q->dma_drain_size = size;
328 331
@@ -386,7 +389,7 @@ void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
386} 389}
387EXPORT_SYMBOL(blk_queue_update_dma_alignment); 390EXPORT_SYMBOL(blk_queue_update_dma_alignment);
388 391
389int __init blk_settings_init(void) 392static int __init blk_settings_init(void)
390{ 393{
391 blk_max_low_pfn = max_low_pfn - 1; 394 blk_max_low_pfn = max_low_pfn - 1;
392 blk_max_pfn = max_pfn - 1; 395 blk_max_pfn = max_pfn - 1;
diff --git a/block/bsg.c b/block/bsg.c
index 8917c5174dc2..7f3c09549e4b 100644
--- a/block/bsg.c
+++ b/block/bsg.c
@@ -437,14 +437,14 @@ static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
437 } 437 }
438 438
439 if (rq->next_rq) { 439 if (rq->next_rq) {
440 hdr->dout_resid = rq->data_len; 440 hdr->dout_resid = rq->raw_data_len;
441 hdr->din_resid = rq->next_rq->data_len; 441 hdr->din_resid = rq->next_rq->raw_data_len;
442 blk_rq_unmap_user(bidi_bio); 442 blk_rq_unmap_user(bidi_bio);
443 blk_put_request(rq->next_rq); 443 blk_put_request(rq->next_rq);
444 } else if (rq_data_dir(rq) == READ) 444 } else if (rq_data_dir(rq) == READ)
445 hdr->din_resid = rq->data_len; 445 hdr->din_resid = rq->raw_data_len;
446 else 446 else
447 hdr->dout_resid = rq->data_len; 447 hdr->dout_resid = rq->raw_data_len;
448 448
449 /* 449 /*
450 * If the request generated a negative error number, return it 450 * If the request generated a negative error number, return it
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index ca198e61fa65..0f962ecae91f 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -1145,38 +1145,19 @@ static void cfq_put_queue(struct cfq_queue *cfqq)
1145/* 1145/*
1146 * Call func for each cic attached to this ioc. Returns number of cic's seen. 1146 * Call func for each cic attached to this ioc. Returns number of cic's seen.
1147 */ 1147 */
1148#define CIC_GANG_NR 16
1149static unsigned int 1148static unsigned int
1150call_for_each_cic(struct io_context *ioc, 1149call_for_each_cic(struct io_context *ioc,
1151 void (*func)(struct io_context *, struct cfq_io_context *)) 1150 void (*func)(struct io_context *, struct cfq_io_context *))
1152{ 1151{
1153 struct cfq_io_context *cics[CIC_GANG_NR]; 1152 struct cfq_io_context *cic;
1154 unsigned long index = 0; 1153 struct hlist_node *n;
1155 unsigned int called = 0; 1154 int called = 0;
1156 int nr;
1157 1155
1158 rcu_read_lock(); 1156 rcu_read_lock();
1159 1157 hlist_for_each_entry_rcu(cic, n, &ioc->cic_list, cic_list) {
1160 do { 1158 func(ioc, cic);
1161 int i; 1159 called++;
1162 1160 }
1163 /*
1164 * Perhaps there's a better way - this just gang lookups from
1165 * 0 to the end, restarting after each CIC_GANG_NR from the
1166 * last key + 1.
1167 */
1168 nr = radix_tree_gang_lookup(&ioc->radix_root, (void **) cics,
1169 index, CIC_GANG_NR);
1170 if (!nr)
1171 break;
1172
1173 called += nr;
1174 index = 1 + (unsigned long) cics[nr - 1]->key;
1175
1176 for (i = 0; i < nr; i++)
1177 func(ioc, cics[i]);
1178 } while (nr == CIC_GANG_NR);
1179
1180 rcu_read_unlock(); 1161 rcu_read_unlock();
1181 1162
1182 return called; 1163 return called;
@@ -1190,6 +1171,7 @@ static void cic_free_func(struct io_context *ioc, struct cfq_io_context *cic)
1190 1171
1191 spin_lock_irqsave(&ioc->lock, flags); 1172 spin_lock_irqsave(&ioc->lock, flags);
1192 radix_tree_delete(&ioc->radix_root, cic->dead_key); 1173 radix_tree_delete(&ioc->radix_root, cic->dead_key);
1174 hlist_del_rcu(&cic->cic_list);
1193 spin_unlock_irqrestore(&ioc->lock, flags); 1175 spin_unlock_irqrestore(&ioc->lock, flags);
1194 1176
1195 kmem_cache_free(cfq_ioc_pool, cic); 1177 kmem_cache_free(cfq_ioc_pool, cic);
@@ -1280,6 +1262,7 @@ cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
1280 if (cic) { 1262 if (cic) {
1281 cic->last_end_request = jiffies; 1263 cic->last_end_request = jiffies;
1282 INIT_LIST_HEAD(&cic->queue_list); 1264 INIT_LIST_HEAD(&cic->queue_list);
1265 INIT_HLIST_NODE(&cic->cic_list);
1283 cic->dtor = cfq_free_io_context; 1266 cic->dtor = cfq_free_io_context;
1284 cic->exit = cfq_exit_io_context; 1267 cic->exit = cfq_exit_io_context;
1285 elv_ioc_count_inc(ioc_count); 1268 elv_ioc_count_inc(ioc_count);
@@ -1501,6 +1484,7 @@ cfq_drop_dead_cic(struct cfq_data *cfqd, struct io_context *ioc,
1501 rcu_assign_pointer(ioc->ioc_data, NULL); 1484 rcu_assign_pointer(ioc->ioc_data, NULL);
1502 1485
1503 radix_tree_delete(&ioc->radix_root, (unsigned long) cfqd); 1486 radix_tree_delete(&ioc->radix_root, (unsigned long) cfqd);
1487 hlist_del_rcu(&cic->cic_list);
1504 spin_unlock_irqrestore(&ioc->lock, flags); 1488 spin_unlock_irqrestore(&ioc->lock, flags);
1505 1489
1506 cfq_cic_free(cic); 1490 cfq_cic_free(cic);
@@ -1561,6 +1545,8 @@ static int cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc,
1561 spin_lock_irqsave(&ioc->lock, flags); 1545 spin_lock_irqsave(&ioc->lock, flags);
1562 ret = radix_tree_insert(&ioc->radix_root, 1546 ret = radix_tree_insert(&ioc->radix_root,
1563 (unsigned long) cfqd, cic); 1547 (unsigned long) cfqd, cic);
1548 if (!ret)
1549 hlist_add_head_rcu(&cic->cic_list, &ioc->cic_list);
1564 spin_unlock_irqrestore(&ioc->lock, flags); 1550 spin_unlock_irqrestore(&ioc->lock, flags);
1565 1551
1566 radix_tree_preload_end(); 1552 radix_tree_preload_end();
diff --git a/block/elevator.c b/block/elevator.c
index bafbae0344d3..88318c383608 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -134,6 +134,21 @@ static struct elevator_type *elevator_get(const char *name)
134 spin_lock(&elv_list_lock); 134 spin_lock(&elv_list_lock);
135 135
136 e = elevator_find(name); 136 e = elevator_find(name);
137 if (!e) {
138 char elv[ELV_NAME_MAX + strlen("-iosched")];
139
140 spin_unlock(&elv_list_lock);
141
142 if (!strcmp(name, "anticipatory"))
143 sprintf(elv, "as-iosched");
144 else
145 sprintf(elv, "%s-iosched", name);
146
147 request_module(elv);
148 spin_lock(&elv_list_lock);
149 e = elevator_find(name);
150 }
151
137 if (e && !try_module_get(e->elevator_owner)) 152 if (e && !try_module_get(e->elevator_owner))
138 e = NULL; 153 e = NULL;
139 154
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index 9675b34638d4..e993cac4911d 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -266,7 +266,7 @@ static int blk_complete_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr,
266 hdr->info = 0; 266 hdr->info = 0;
267 if (hdr->masked_status || hdr->host_status || hdr->driver_status) 267 if (hdr->masked_status || hdr->host_status || hdr->driver_status)
268 hdr->info |= SG_INFO_CHECK; 268 hdr->info |= SG_INFO_CHECK;
269 hdr->resid = rq->data_len; 269 hdr->resid = rq->raw_data_len;
270 hdr->sb_len_wr = 0; 270 hdr->sb_len_wr = 0;
271 271
272 if (rq->sense_len && hdr->sbp) { 272 if (rq->sense_len && hdr->sbp) {
@@ -528,6 +528,7 @@ static int __blk_send_generic(struct request_queue *q, struct gendisk *bd_disk,
528 rq = blk_get_request(q, WRITE, __GFP_WAIT); 528 rq = blk_get_request(q, WRITE, __GFP_WAIT);
529 rq->cmd_type = REQ_TYPE_BLOCK_PC; 529 rq->cmd_type = REQ_TYPE_BLOCK_PC;
530 rq->data = NULL; 530 rq->data = NULL;
531 rq->raw_data_len = 0;
531 rq->data_len = 0; 532 rq->data_len = 0;
532 rq->timeout = BLK_DEFAULT_SG_TIMEOUT; 533 rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
533 memset(rq->cmd, 0, sizeof(rq->cmd)); 534 memset(rq->cmd, 0, sizeof(rq->cmd));
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 29e71bddd6ff..3c06e457b4dc 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -1975,16 +1975,11 @@ static int ahci_port_start(struct ata_port *ap)
1975 struct ahci_port_priv *pp; 1975 struct ahci_port_priv *pp;
1976 void *mem; 1976 void *mem;
1977 dma_addr_t mem_dma; 1977 dma_addr_t mem_dma;
1978 int rc;
1979 1978
1980 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); 1979 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1981 if (!pp) 1980 if (!pp)
1982 return -ENOMEM; 1981 return -ENOMEM;
1983 1982
1984 rc = ata_pad_alloc(ap, dev);
1985 if (rc)
1986 return rc;
1987
1988 mem = dmam_alloc_coherent(dev, AHCI_PORT_PRIV_DMA_SZ, &mem_dma, 1983 mem = dmam_alloc_coherent(dev, AHCI_PORT_PRIV_DMA_SZ, &mem_dma,
1989 GFP_KERNEL); 1984 GFP_KERNEL);
1990 if (!mem) 1985 if (!mem)
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index f46eb6f6dc9f..def3682f416a 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4493,30 +4493,13 @@ void ata_sg_clean(struct ata_queued_cmd *qc)
4493 struct ata_port *ap = qc->ap; 4493 struct ata_port *ap = qc->ap;
4494 struct scatterlist *sg = qc->sg; 4494 struct scatterlist *sg = qc->sg;
4495 int dir = qc->dma_dir; 4495 int dir = qc->dma_dir;
4496 void *pad_buf = NULL;
4497 4496
4498 WARN_ON(sg == NULL); 4497 WARN_ON(sg == NULL);
4499 4498
4500 VPRINTK("unmapping %u sg elements\n", qc->mapped_n_elem); 4499 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
4501 4500
4502 /* if we padded the buffer out to 32-bit bound, and data 4501 if (qc->n_elem)
4503 * xfer direction is from-device, we must copy from the 4502 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
4504 * pad buffer back into the supplied buffer
4505 */
4506 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
4507 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4508
4509 if (qc->mapped_n_elem)
4510 dma_unmap_sg(ap->dev, sg, qc->mapped_n_elem, dir);
4511 /* restore last sg */
4512 if (qc->last_sg)
4513 *qc->last_sg = qc->saved_last_sg;
4514 if (pad_buf) {
4515 struct scatterlist *psg = &qc->extra_sg[1];
4516 void *addr = kmap_atomic(sg_page(psg), KM_IRQ0);
4517 memcpy(addr + psg->offset, pad_buf, qc->pad_len);
4518 kunmap_atomic(addr, KM_IRQ0);
4519 }
4520 4503
4521 qc->flags &= ~ATA_QCFLAG_DMAMAP; 4504 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4522 qc->sg = NULL; 4505 qc->sg = NULL;
@@ -4659,43 +4642,6 @@ int ata_check_atapi_dma(struct ata_queued_cmd *qc)
4659} 4642}
4660 4643
4661/** 4644/**
4662 * atapi_qc_may_overflow - Check whether data transfer may overflow
4663 * @qc: ATA command in question
4664 *
4665 * ATAPI commands which transfer variable length data to host
4666 * might overflow due to application error or hardare bug. This
4667 * function checks whether overflow should be drained and ignored
4668 * for @qc.
4669 *
4670 * LOCKING:
4671 * None.
4672 *
4673 * RETURNS:
4674 * 1 if @qc may overflow; otherwise, 0.
4675 */
4676static int atapi_qc_may_overflow(struct ata_queued_cmd *qc)
4677{
4678 if (qc->tf.protocol != ATAPI_PROT_PIO &&
4679 qc->tf.protocol != ATAPI_PROT_DMA)
4680 return 0;
4681
4682 if (qc->tf.flags & ATA_TFLAG_WRITE)
4683 return 0;
4684
4685 switch (qc->cdb[0]) {
4686 case READ_10:
4687 case READ_12:
4688 case WRITE_10:
4689 case WRITE_12:
4690 case GPCMD_READ_CD:
4691 case GPCMD_READ_CD_MSF:
4692 return 0;
4693 }
4694
4695 return 1;
4696}
4697
4698/**
4699 * ata_std_qc_defer - Check whether a qc needs to be deferred 4645 * ata_std_qc_defer - Check whether a qc needs to be deferred
4700 * @qc: ATA command in question 4646 * @qc: ATA command in question
4701 * 4647 *
@@ -4782,97 +4728,6 @@ void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4782 qc->cursg = qc->sg; 4728 qc->cursg = qc->sg;
4783} 4729}
4784 4730
4785static unsigned int ata_sg_setup_extra(struct ata_queued_cmd *qc,
4786 unsigned int *n_elem_extra,
4787 unsigned int *nbytes_extra)
4788{
4789 struct ata_port *ap = qc->ap;
4790 unsigned int n_elem = qc->n_elem;
4791 struct scatterlist *lsg, *copy_lsg = NULL, *tsg = NULL, *esg = NULL;
4792
4793 *n_elem_extra = 0;
4794 *nbytes_extra = 0;
4795
4796 /* needs padding? */
4797 qc->pad_len = qc->nbytes & 3;
4798
4799 if (likely(!qc->pad_len))
4800 return n_elem;
4801
4802 /* locate last sg and save it */
4803 lsg = sg_last(qc->sg, n_elem);
4804 qc->last_sg = lsg;
4805 qc->saved_last_sg = *lsg;
4806
4807 sg_init_table(qc->extra_sg, ARRAY_SIZE(qc->extra_sg));
4808
4809 if (qc->pad_len) {
4810 struct scatterlist *psg = &qc->extra_sg[1];
4811 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4812 unsigned int offset;
4813
4814 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
4815
4816 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
4817
4818 /* psg->page/offset are used to copy to-be-written
4819 * data in this function or read data in ata_sg_clean.
4820 */
4821 offset = lsg->offset + lsg->length - qc->pad_len;
4822 sg_set_page(psg, nth_page(sg_page(lsg), offset >> PAGE_SHIFT),
4823 qc->pad_len, offset_in_page(offset));
4824
4825 if (qc->tf.flags & ATA_TFLAG_WRITE) {
4826 void *addr = kmap_atomic(sg_page(psg), KM_IRQ0);
4827 memcpy(pad_buf, addr + psg->offset, qc->pad_len);
4828 kunmap_atomic(addr, KM_IRQ0);
4829 }
4830
4831 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
4832 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
4833
4834 /* Trim the last sg entry and chain the original and
4835 * padding sg lists.
4836 *
4837 * Because chaining consumes one sg entry, one extra
4838 * sg entry is allocated and the last sg entry is
4839 * copied to it if the length isn't zero after padded
4840 * amount is removed.
4841 *
4842 * If the last sg entry is completely replaced by
4843 * padding sg entry, the first sg entry is skipped
4844 * while chaining.
4845 */
4846 lsg->length -= qc->pad_len;
4847 if (lsg->length) {
4848 copy_lsg = &qc->extra_sg[0];
4849 tsg = &qc->extra_sg[0];
4850 } else {
4851 n_elem--;
4852 tsg = &qc->extra_sg[1];
4853 }
4854
4855 esg = &qc->extra_sg[1];
4856
4857 (*n_elem_extra)++;
4858 (*nbytes_extra) += 4 - qc->pad_len;
4859 }
4860
4861 if (copy_lsg)
4862 sg_set_page(copy_lsg, sg_page(lsg), lsg->length, lsg->offset);
4863
4864 sg_chain(lsg, 1, tsg);
4865 sg_mark_end(esg);
4866
4867 /* sglist can't start with chaining sg entry, fast forward */
4868 if (qc->sg == lsg) {
4869 qc->sg = tsg;
4870 qc->cursg = tsg;
4871 }
4872
4873 return n_elem;
4874}
4875
4876/** 4731/**
4877 * ata_sg_setup - DMA-map the scatter-gather table associated with a command. 4732 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4878 * @qc: Command with scatter-gather table to be mapped. 4733 * @qc: Command with scatter-gather table to be mapped.
@@ -4889,26 +4744,17 @@ static unsigned int ata_sg_setup_extra(struct ata_queued_cmd *qc,
4889static int ata_sg_setup(struct ata_queued_cmd *qc) 4744static int ata_sg_setup(struct ata_queued_cmd *qc)
4890{ 4745{
4891 struct ata_port *ap = qc->ap; 4746 struct ata_port *ap = qc->ap;
4892 unsigned int n_elem, n_elem_extra, nbytes_extra; 4747 unsigned int n_elem;
4893 4748
4894 VPRINTK("ENTER, ata%u\n", ap->print_id); 4749 VPRINTK("ENTER, ata%u\n", ap->print_id);
4895 4750
4896 n_elem = ata_sg_setup_extra(qc, &n_elem_extra, &nbytes_extra); 4751 n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir);
4752 if (n_elem < 1)
4753 return -1;
4897 4754
4898 if (n_elem) { 4755 DPRINTK("%d sg elements mapped\n", n_elem);
4899 n_elem = dma_map_sg(ap->dev, qc->sg, n_elem, qc->dma_dir);
4900 if (n_elem < 1) {
4901 /* restore last sg */
4902 if (qc->last_sg)
4903 *qc->last_sg = qc->saved_last_sg;
4904 return -1;
4905 }
4906 DPRINTK("%d sg elements mapped\n", n_elem);
4907 }
4908 4756
4909 qc->n_elem = qc->mapped_n_elem = n_elem; 4757 qc->n_elem = n_elem;
4910 qc->n_elem += n_elem_extra;
4911 qc->nbytes += nbytes_extra;
4912 qc->flags |= ATA_QCFLAG_DMAMAP; 4758 qc->flags |= ATA_QCFLAG_DMAMAP;
4913 4759
4914 return 0; 4760 return 0;
@@ -5146,46 +4992,22 @@ static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
5146 */ 4992 */
5147static int __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes) 4993static int __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
5148{ 4994{
5149 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE); 4995 int rw = (qc->tf.flags & ATA_TFLAG_WRITE) ? WRITE : READ;
5150 struct ata_port *ap = qc->ap; 4996 struct ata_port *ap = qc->ap;
5151 struct ata_eh_info *ehi = &qc->dev->link->eh_info; 4997 struct ata_device *dev = qc->dev;
4998 struct ata_eh_info *ehi = &dev->link->eh_info;
5152 struct scatterlist *sg; 4999 struct scatterlist *sg;
5153 struct page *page; 5000 struct page *page;
5154 unsigned char *buf; 5001 unsigned char *buf;
5155 unsigned int offset, count; 5002 unsigned int offset, count, consumed;
5156 5003
5157next_sg: 5004next_sg:
5158 sg = qc->cursg; 5005 sg = qc->cursg;
5159 if (unlikely(!sg)) { 5006 if (unlikely(!sg)) {
5160 /* 5007 ata_ehi_push_desc(ehi, "unexpected or too much trailing data "
5161 * The end of qc->sg is reached and the device expects 5008 "buf=%u cur=%u bytes=%u",
5162 * more data to transfer. In order not to overrun qc->sg 5009 qc->nbytes, qc->curbytes, bytes);
5163 * and fulfill length specified in the byte count register, 5010 return -1;
5164 * - for read case, discard trailing data from the device
5165 * - for write case, padding zero data to the device
5166 */
5167 u16 pad_buf[1] = { 0 };
5168 unsigned int i;
5169
5170 if (bytes > qc->curbytes - qc->nbytes + ATAPI_MAX_DRAIN) {
5171 ata_ehi_push_desc(ehi, "too much trailing data "
5172 "buf=%u cur=%u bytes=%u",
5173 qc->nbytes, qc->curbytes, bytes);
5174 return -1;
5175 }
5176
5177 /* overflow is exptected for misc ATAPI commands */
5178 if (bytes && !atapi_qc_may_overflow(qc))
5179 ata_dev_printk(qc->dev, KERN_WARNING, "ATAPI %u bytes "
5180 "trailing data (cdb=%02x nbytes=%u)\n",
5181 bytes, qc->cdb[0], qc->nbytes);
5182
5183 for (i = 0; i < (bytes + 1) / 2; i++)
5184 ap->ops->data_xfer(qc->dev, (unsigned char *)pad_buf, 2, do_write);
5185
5186 qc->curbytes += bytes;
5187
5188 return 0;
5189 } 5011 }
5190 5012
5191 page = sg_page(sg); 5013 page = sg_page(sg);
@@ -5211,18 +5033,16 @@ next_sg:
5211 buf = kmap_atomic(page, KM_IRQ0); 5033 buf = kmap_atomic(page, KM_IRQ0);
5212 5034
5213 /* do the actual data transfer */ 5035 /* do the actual data transfer */
5214 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write); 5036 consumed = ap->ops->data_xfer(dev, buf + offset, count, rw);
5215 5037
5216 kunmap_atomic(buf, KM_IRQ0); 5038 kunmap_atomic(buf, KM_IRQ0);
5217 local_irq_restore(flags); 5039 local_irq_restore(flags);
5218 } else { 5040 } else {
5219 buf = page_address(page); 5041 buf = page_address(page);
5220 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write); 5042 consumed = ap->ops->data_xfer(dev, buf + offset, count, rw);
5221 } 5043 }
5222 5044
5223 bytes -= count; 5045 bytes -= min(bytes, consumed);
5224 if ((count & 1) && bytes)
5225 bytes--;
5226 qc->curbytes += count; 5046 qc->curbytes += count;
5227 qc->cursg_ofs += count; 5047 qc->cursg_ofs += count;
5228 5048
@@ -5231,9 +5051,11 @@ next_sg:
5231 qc->cursg_ofs = 0; 5051 qc->cursg_ofs = 0;
5232 } 5052 }
5233 5053
5054 /* consumed can be larger than count only for the last transfer */
5055 WARN_ON(qc->cursg && count != consumed);
5056
5234 if (bytes) 5057 if (bytes)
5235 goto next_sg; 5058 goto next_sg;
5236
5237 return 0; 5059 return 0;
5238} 5060}
5239 5061
@@ -5251,6 +5073,7 @@ static void atapi_pio_bytes(struct ata_queued_cmd *qc)
5251{ 5073{
5252 struct ata_port *ap = qc->ap; 5074 struct ata_port *ap = qc->ap;
5253 struct ata_device *dev = qc->dev; 5075 struct ata_device *dev = qc->dev;
5076 struct ata_eh_info *ehi = &dev->link->eh_info;
5254 unsigned int ireason, bc_lo, bc_hi, bytes; 5077 unsigned int ireason, bc_lo, bc_hi, bytes;
5255 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0; 5078 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
5256 5079
@@ -5268,26 +5091,28 @@ static void atapi_pio_bytes(struct ata_queued_cmd *qc)
5268 5091
5269 /* shall be cleared to zero, indicating xfer of data */ 5092 /* shall be cleared to zero, indicating xfer of data */
5270 if (unlikely(ireason & (1 << 0))) 5093 if (unlikely(ireason & (1 << 0)))
5271 goto err_out; 5094 goto atapi_check;
5272 5095
5273 /* make sure transfer direction matches expected */ 5096 /* make sure transfer direction matches expected */
5274 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0; 5097 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
5275 if (unlikely(do_write != i_write)) 5098 if (unlikely(do_write != i_write))
5276 goto err_out; 5099 goto atapi_check;
5277 5100
5278 if (unlikely(!bytes)) 5101 if (unlikely(!bytes))
5279 goto err_out; 5102 goto atapi_check;
5280 5103
5281 VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes); 5104 VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes);
5282 5105
5283 if (__atapi_pio_bytes(qc, bytes)) 5106 if (unlikely(__atapi_pio_bytes(qc, bytes)))
5284 goto err_out; 5107 goto err_out;
5285 ata_altstatus(ap); /* flush */ 5108 ata_altstatus(ap); /* flush */
5286 5109
5287 return; 5110 return;
5288 5111
5289err_out: 5112 atapi_check:
5290 ata_dev_printk(dev, KERN_INFO, "ATAPI check failed\n"); 5113 ata_ehi_push_desc(ehi, "ATAPI check failed (ireason=0x%x bytes=%u)",
5114 ireason, bytes);
5115 err_out:
5291 qc->err_mask |= AC_ERR_HSM; 5116 qc->err_mask |= AC_ERR_HSM;
5292 ap->hsm_task_state = HSM_ST_ERR; 5117 ap->hsm_task_state = HSM_ST_ERR;
5293} 5118}
@@ -5972,9 +5797,6 @@ void ata_qc_issue(struct ata_queued_cmd *qc)
5972 */ 5797 */
5973 BUG_ON(ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes)); 5798 BUG_ON(ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes));
5974 5799
5975 /* ata_sg_setup() may update nbytes */
5976 qc->raw_nbytes = qc->nbytes;
5977
5978 if (ata_is_dma(prot) || (ata_is_pio(prot) && 5800 if (ata_is_dma(prot) || (ata_is_pio(prot) &&
5979 (ap->flags & ATA_FLAG_PIO_DMA))) 5801 (ap->flags & ATA_FLAG_PIO_DMA)))
5980 if (ata_sg_setup(qc)) 5802 if (ata_sg_setup(qc))
@@ -6583,19 +6405,12 @@ void ata_host_resume(struct ata_host *host)
6583int ata_port_start(struct ata_port *ap) 6405int ata_port_start(struct ata_port *ap)
6584{ 6406{
6585 struct device *dev = ap->dev; 6407 struct device *dev = ap->dev;
6586 int rc;
6587 6408
6588 ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma, 6409 ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
6589 GFP_KERNEL); 6410 GFP_KERNEL);
6590 if (!ap->prd) 6411 if (!ap->prd)
6591 return -ENOMEM; 6412 return -ENOMEM;
6592 6413
6593 rc = ata_pad_alloc(ap, dev);
6594 if (rc)
6595 return rc;
6596
6597 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd,
6598 (unsigned long long)ap->prd_dma);
6599 return 0; 6414 return 0;
6600} 6415}
6601 6416
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 1cea18f62abc..dd41b1a1b304 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -826,30 +826,61 @@ static void ata_scsi_sdev_config(struct scsi_device *sdev)
826 sdev->max_device_blocked = 1; 826 sdev->max_device_blocked = 1;
827} 827}
828 828
829static void ata_scsi_dev_config(struct scsi_device *sdev, 829/**
830 struct ata_device *dev) 830 * atapi_drain_needed - Check whether data transfer may overflow
831 * @request: request to be checked
832 *
833 * ATAPI commands which transfer variable length data to host
834 * might overflow due to application error or hardare bug. This
835 * function checks whether overflow should be drained and ignored
836 * for @request.
837 *
838 * LOCKING:
839 * None.
840 *
841 * RETURNS:
842 * 1 if ; otherwise, 0.
843 */
844static int atapi_drain_needed(struct request *rq)
845{
846 if (likely(!blk_pc_request(rq)))
847 return 0;
848
849 if (!rq->data_len || (rq->cmd_flags & REQ_RW))
850 return 0;
851
852 return atapi_cmd_type(rq->cmd[0]) == ATAPI_MISC;
853}
854
855static int ata_scsi_dev_config(struct scsi_device *sdev,
856 struct ata_device *dev)
831{ 857{
832 /* configure max sectors */ 858 /* configure max sectors */
833 blk_queue_max_sectors(sdev->request_queue, dev->max_sectors); 859 blk_queue_max_sectors(sdev->request_queue, dev->max_sectors);
834 860
835 /* SATA DMA transfers must be multiples of 4 byte, so
836 * we need to pad ATAPI transfers using an extra sg.
837 * Decrement max hw segments accordingly.
838 */
839 if (dev->class == ATA_DEV_ATAPI) { 861 if (dev->class == ATA_DEV_ATAPI) {
840 struct request_queue *q = sdev->request_queue; 862 struct request_queue *q = sdev->request_queue;
841 blk_queue_max_hw_segments(q, q->max_hw_segments - 1); 863 void *buf;
842 864
843 /* set the min alignment */ 865 /* set the min alignment */
844 blk_queue_update_dma_alignment(sdev->request_queue, 866 blk_queue_update_dma_alignment(sdev->request_queue,
845 ATA_DMA_PAD_SZ - 1); 867 ATA_DMA_PAD_SZ - 1);
846 } else 868
869 /* configure draining */
870 buf = kmalloc(ATAPI_MAX_DRAIN, q->bounce_gfp | GFP_KERNEL);
871 if (!buf) {
872 ata_dev_printk(dev, KERN_ERR,
873 "drain buffer allocation failed\n");
874 return -ENOMEM;
875 }
876
877 blk_queue_dma_drain(q, atapi_drain_needed, buf, ATAPI_MAX_DRAIN);
878 } else {
847 /* ATA devices must be sector aligned */ 879 /* ATA devices must be sector aligned */
848 blk_queue_update_dma_alignment(sdev->request_queue, 880 blk_queue_update_dma_alignment(sdev->request_queue,
849 ATA_SECT_SIZE - 1); 881 ATA_SECT_SIZE - 1);
850
851 if (dev->class == ATA_DEV_ATA)
852 sdev->manage_start_stop = 1; 882 sdev->manage_start_stop = 1;
883 }
853 884
854 if (dev->flags & ATA_DFLAG_AN) 885 if (dev->flags & ATA_DFLAG_AN)
855 set_bit(SDEV_EVT_MEDIA_CHANGE, sdev->supported_events); 886 set_bit(SDEV_EVT_MEDIA_CHANGE, sdev->supported_events);
@@ -861,6 +892,8 @@ static void ata_scsi_dev_config(struct scsi_device *sdev,
861 depth = min(ATA_MAX_QUEUE - 1, depth); 892 depth = min(ATA_MAX_QUEUE - 1, depth);
862 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, depth); 893 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, depth);
863 } 894 }
895
896 return 0;
864} 897}
865 898
866/** 899/**
@@ -879,13 +912,14 @@ int ata_scsi_slave_config(struct scsi_device *sdev)
879{ 912{
880 struct ata_port *ap = ata_shost_to_port(sdev->host); 913 struct ata_port *ap = ata_shost_to_port(sdev->host);
881 struct ata_device *dev = __ata_scsi_find_dev(ap, sdev); 914 struct ata_device *dev = __ata_scsi_find_dev(ap, sdev);
915 int rc = 0;
882 916
883 ata_scsi_sdev_config(sdev); 917 ata_scsi_sdev_config(sdev);
884 918
885 if (dev) 919 if (dev)
886 ata_scsi_dev_config(sdev, dev); 920 rc = ata_scsi_dev_config(sdev, dev);
887 921
888 return 0; 922 return rc;
889} 923}
890 924
891/** 925/**
@@ -905,6 +939,7 @@ int ata_scsi_slave_config(struct scsi_device *sdev)
905void ata_scsi_slave_destroy(struct scsi_device *sdev) 939void ata_scsi_slave_destroy(struct scsi_device *sdev)
906{ 940{
907 struct ata_port *ap = ata_shost_to_port(sdev->host); 941 struct ata_port *ap = ata_shost_to_port(sdev->host);
942 struct request_queue *q = sdev->request_queue;
908 unsigned long flags; 943 unsigned long flags;
909 struct ata_device *dev; 944 struct ata_device *dev;
910 945
@@ -920,6 +955,10 @@ void ata_scsi_slave_destroy(struct scsi_device *sdev)
920 ata_port_schedule_eh(ap); 955 ata_port_schedule_eh(ap);
921 } 956 }
922 spin_unlock_irqrestore(ap->lock, flags); 957 spin_unlock_irqrestore(ap->lock, flags);
958
959 kfree(q->dma_drain_buffer);
960 q->dma_drain_buffer = NULL;
961 q->dma_drain_size = 0;
923} 962}
924 963
925/** 964/**
@@ -2500,7 +2539,7 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc)
2500 * want to set it properly, and for DMA where it is 2539 * want to set it properly, and for DMA where it is
2501 * effectively meaningless. 2540 * effectively meaningless.
2502 */ 2541 */
2503 nbytes = min(qc->nbytes, (unsigned int)63 * 1024); 2542 nbytes = min(scmd->request->raw_data_len, (unsigned int)63 * 1024);
2504 2543
2505 /* Most ATAPI devices which honor transfer chunk size don't 2544 /* Most ATAPI devices which honor transfer chunk size don't
2506 * behave according to the spec when odd chunk size which 2545 * behave according to the spec when odd chunk size which
@@ -3555,7 +3594,7 @@ EXPORT_SYMBOL_GPL(ata_sas_port_alloc);
3555 * @ap: Port to initialize 3594 * @ap: Port to initialize
3556 * 3595 *
3557 * Called just after data structures for each port are 3596 * Called just after data structures for each port are
3558 * initialized. Allocates DMA pad. 3597 * initialized.
3559 * 3598 *
3560 * May be used as the port_start() entry in ata_port_operations. 3599 * May be used as the port_start() entry in ata_port_operations.
3561 * 3600 *
@@ -3564,7 +3603,7 @@ EXPORT_SYMBOL_GPL(ata_sas_port_alloc);
3564 */ 3603 */
3565int ata_sas_port_start(struct ata_port *ap) 3604int ata_sas_port_start(struct ata_port *ap)
3566{ 3605{
3567 return ata_pad_alloc(ap, ap->dev); 3606 return 0;
3568} 3607}
3569EXPORT_SYMBOL_GPL(ata_sas_port_start); 3608EXPORT_SYMBOL_GPL(ata_sas_port_start);
3570 3609
@@ -3572,8 +3611,6 @@ EXPORT_SYMBOL_GPL(ata_sas_port_start);
3572 * ata_port_stop - Undo ata_sas_port_start() 3611 * ata_port_stop - Undo ata_sas_port_start()
3573 * @ap: Port to shut down 3612 * @ap: Port to shut down
3574 * 3613 *
3575 * Frees the DMA pad.
3576 *
3577 * May be used as the port_stop() entry in ata_port_operations. 3614 * May be used as the port_stop() entry in ata_port_operations.
3578 * 3615 *
3579 * LOCKING: 3616 * LOCKING:
@@ -3582,7 +3619,6 @@ EXPORT_SYMBOL_GPL(ata_sas_port_start);
3582 3619
3583void ata_sas_port_stop(struct ata_port *ap) 3620void ata_sas_port_stop(struct ata_port *ap)
3584{ 3621{
3585 ata_pad_free(ap, ap->dev);
3586} 3622}
3587EXPORT_SYMBOL_GPL(ata_sas_port_stop); 3623EXPORT_SYMBOL_GPL(ata_sas_port_stop);
3588 3624
diff --git a/drivers/ata/pata_icside.c b/drivers/ata/pata_icside.c
index 5b8586dac63b..f97068be2d79 100644
--- a/drivers/ata/pata_icside.c
+++ b/drivers/ata/pata_icside.c
@@ -304,12 +304,6 @@ static int icside_dma_init(struct pata_icside_info *info)
304} 304}
305 305
306 306
307static int pata_icside_port_start(struct ata_port *ap)
308{
309 /* No PRD to alloc */
310 return ata_pad_alloc(ap, ap->dev);
311}
312
313static struct scsi_host_template pata_icside_sht = { 307static struct scsi_host_template pata_icside_sht = {
314 .module = THIS_MODULE, 308 .module = THIS_MODULE,
315 .name = DRV_NAME, 309 .name = DRV_NAME,
@@ -389,8 +383,6 @@ static struct ata_port_operations pata_icside_port_ops = {
389 .irq_clear = ata_dummy_noret, 383 .irq_clear = ata_dummy_noret,
390 .irq_on = ata_irq_on, 384 .irq_on = ata_irq_on,
391 385
392 .port_start = pata_icside_port_start,
393
394 .bmdma_stop = pata_icside_bmdma_stop, 386 .bmdma_stop = pata_icside_bmdma_stop,
395 .bmdma_status = pata_icside_bmdma_status, 387 .bmdma_status = pata_icside_bmdma_status,
396}; 388};
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index efcb66b6ccef..9323dd0c7d8d 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -601,21 +601,9 @@ static int sata_fsl_port_start(struct ata_port *ap)
601 if (!pp) 601 if (!pp)
602 return -ENOMEM; 602 return -ENOMEM;
603 603
604 /*
605 * allocate per command dma alignment pad buffer, which is used
606 * internally by libATA to ensure that all transfers ending on
607 * unaligned boundaries are padded, to align on Dword boundaries
608 */
609 retval = ata_pad_alloc(ap, dev);
610 if (retval) {
611 kfree(pp);
612 return retval;
613 }
614
615 mem = dma_alloc_coherent(dev, SATA_FSL_PORT_PRIV_DMA_SZ, &mem_dma, 604 mem = dma_alloc_coherent(dev, SATA_FSL_PORT_PRIV_DMA_SZ, &mem_dma,
616 GFP_KERNEL); 605 GFP_KERNEL);
617 if (!mem) { 606 if (!mem) {
618 ata_pad_free(ap, dev);
619 kfree(pp); 607 kfree(pp);
620 return -ENOMEM; 608 return -ENOMEM;
621 } 609 }
@@ -694,7 +682,6 @@ static void sata_fsl_port_stop(struct ata_port *ap)
694 dma_free_coherent(dev, SATA_FSL_PORT_PRIV_DMA_SZ, 682 dma_free_coherent(dev, SATA_FSL_PORT_PRIV_DMA_SZ,
695 pp->cmdslot, pp->cmdslot_paddr); 683 pp->cmdslot, pp->cmdslot_paddr);
696 684
697 ata_pad_free(ap, dev);
698 kfree(pp); 685 kfree(pp);
699} 686}
700 687
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 2ecd44db4142..1c1fbf375d9a 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -1158,17 +1158,13 @@ static int mv_port_start(struct ata_port *ap)
1158 struct mv_port_priv *pp; 1158 struct mv_port_priv *pp;
1159 void __iomem *port_mmio = mv_ap_base(ap); 1159 void __iomem *port_mmio = mv_ap_base(ap);
1160 unsigned long flags; 1160 unsigned long flags;
1161 int tag, rc; 1161 int tag;
1162 1162
1163 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); 1163 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1164 if (!pp) 1164 if (!pp)
1165 return -ENOMEM; 1165 return -ENOMEM;
1166 ap->private_data = pp; 1166 ap->private_data = pp;
1167 1167
1168 rc = ata_pad_alloc(ap, dev);
1169 if (rc)
1170 return rc;
1171
1172 pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma); 1168 pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
1173 if (!pp->crqb) 1169 if (!pp->crqb)
1174 return -ENOMEM; 1170 return -ENOMEM;
diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
index b4b1f91ea693..df7988df7908 100644
--- a/drivers/ata/sata_sil24.c
+++ b/drivers/ata/sata_sil24.c
@@ -1234,7 +1234,6 @@ static int sil24_port_start(struct ata_port *ap)
1234 union sil24_cmd_block *cb; 1234 union sil24_cmd_block *cb;
1235 size_t cb_size = sizeof(*cb) * SIL24_MAX_CMDS; 1235 size_t cb_size = sizeof(*cb) * SIL24_MAX_CMDS;
1236 dma_addr_t cb_dma; 1236 dma_addr_t cb_dma;
1237 int rc;
1238 1237
1239 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); 1238 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1240 if (!pp) 1239 if (!pp)
@@ -1247,10 +1246,6 @@ static int sil24_port_start(struct ata_port *ap)
1247 return -ENOMEM; 1246 return -ENOMEM;
1248 memset(cb, 0, cb_size); 1247 memset(cb, 0, cb_size);
1249 1248
1250 rc = ata_pad_alloc(ap, dev);
1251 if (rc)
1252 return rc;
1253
1254 pp->cmd_block = cb; 1249 pp->cmd_block = cb;
1255 pp->cmd_block_dma = cb_dma; 1250 pp->cmd_block_dma = cb_dma;
1256 1251
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 2074701f7e76..c72014a3e7d4 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -5140,7 +5140,7 @@ static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
5140 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 5140 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5141 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl; 5141 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5142 struct ipr_ioadl_desc *last_ioadl = NULL; 5142 struct ipr_ioadl_desc *last_ioadl = NULL;
5143 int len = qc->nbytes + qc->pad_len; 5143 int len = qc->nbytes;
5144 struct scatterlist *sg; 5144 struct scatterlist *sg;
5145 unsigned int si; 5145 unsigned int si;
5146 5146
@@ -5206,7 +5206,7 @@ static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
5206 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU; 5206 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
5207 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC; 5207 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
5208 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK; 5208 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5209 ipr_cmd->dma_use_sg = qc->pad_len ? qc->n_elem + 1 : qc->n_elem; 5209 ipr_cmd->dma_use_sg = qc->n_elem;
5210 5210
5211 ipr_build_ata_ioadl(ipr_cmd, qc); 5211 ipr_build_ata_ioadl(ipr_cmd, qc);
5212 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION; 5212 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 0996f866f14c..7cd05b599a12 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -178,8 +178,8 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc)
178 task->uldd_task = qc; 178 task->uldd_task = qc;
179 if (ata_is_atapi(qc->tf.protocol)) { 179 if (ata_is_atapi(qc->tf.protocol)) {
180 memcpy(task->ata_task.atapi_packet, qc->cdb, qc->dev->cdb_len); 180 memcpy(task->ata_task.atapi_packet, qc->cdb, qc->dev->cdb_len);
181 task->total_xfer_len = qc->nbytes + qc->pad_len; 181 task->total_xfer_len = qc->nbytes;
182 task->num_scatter = qc->pad_len ? qc->n_elem + 1 : qc->n_elem; 182 task->num_scatter = qc->n_elem;
183 } else { 183 } else {
184 for_each_sg(qc->sg, sg, qc->n_elem, si) 184 for_each_sg(qc->sg, sg, qc->n_elem, si)
185 xfer += sg->length; 185 xfer += sg->length;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 135c1d054701..ba21d97d1855 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1014,10 +1014,6 @@ static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb,
1014 } 1014 }
1015 1015
1016 req->buffer = NULL; 1016 req->buffer = NULL;
1017 if (blk_pc_request(req))
1018 sdb->length = req->data_len;
1019 else
1020 sdb->length = req->nr_sectors << 9;
1021 1017
1022 /* 1018 /*
1023 * Next, walk the list, and fill in the addresses and sizes of 1019 * Next, walk the list, and fill in the addresses and sizes of
@@ -1026,6 +1022,10 @@ static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb,
1026 count = blk_rq_map_sg(req->q, req, sdb->table.sgl); 1022 count = blk_rq_map_sg(req->q, req, sdb->table.sgl);
1027 BUG_ON(count > sdb->table.nents); 1023 BUG_ON(count > sdb->table.nents);
1028 sdb->table.nents = count; 1024 sdb->table.nents = count;
1025 if (blk_pc_request(req))
1026 sdb->length = req->data_len;
1027 else
1028 sdb->length = req->nr_sectors << 9;
1029 return BLKPREP_OK; 1029 return BLKPREP_OK;
1030} 1030}
1031 1031
diff --git a/fs/bio.c b/fs/bio.c
index 242e409dab4b..3312fcc3c098 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -903,7 +903,7 @@ void bio_set_pages_dirty(struct bio *bio)
903 } 903 }
904} 904}
905 905
906void bio_release_pages(struct bio *bio) 906static void bio_release_pages(struct bio *bio)
907{ 907{
908 struct bio_vec *bvec = bio->bi_io_vec; 908 struct bio_vec *bvec = bio->bi_io_vec;
909 int i; 909 int i;
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 67fe72ce6ac7..7d822fae7765 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -31,6 +31,8 @@ struct bdev_inode {
31 struct inode vfs_inode; 31 struct inode vfs_inode;
32}; 32};
33 33
34static const struct address_space_operations def_blk_aops;
35
34static inline struct bdev_inode *BDEV_I(struct inode *inode) 36static inline struct bdev_inode *BDEV_I(struct inode *inode)
35{ 37{
36 return container_of(inode, struct bdev_inode, vfs_inode); 38 return container_of(inode, struct bdev_inode, vfs_inode);
@@ -171,203 +173,6 @@ blkdev_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
171 iov, offset, nr_segs, blkdev_get_blocks, NULL); 173 iov, offset, nr_segs, blkdev_get_blocks, NULL);
172} 174}
173 175
174#if 0
175static void blk_end_aio(struct bio *bio, int error)
176{
177 struct kiocb *iocb = bio->bi_private;
178 atomic_t *bio_count = &iocb->ki_bio_count;
179
180 if (bio_data_dir(bio) == READ)
181 bio_check_pages_dirty(bio);
182 else {
183 bio_release_pages(bio);
184 bio_put(bio);
185 }
186
187 /* iocb->ki_nbytes stores error code from LLDD */
188 if (error)
189 iocb->ki_nbytes = -EIO;
190
191 if (atomic_dec_and_test(bio_count)) {
192 if ((long)iocb->ki_nbytes < 0)
193 aio_complete(iocb, iocb->ki_nbytes, 0);
194 else
195 aio_complete(iocb, iocb->ki_left, 0);
196 }
197
198 return 0;
199}
200
201#define VEC_SIZE 16
202struct pvec {
203 unsigned short nr;
204 unsigned short idx;
205 struct page *page[VEC_SIZE];
206};
207
208#define PAGES_SPANNED(addr, len) \
209 (DIV_ROUND_UP((addr) + (len), PAGE_SIZE) - (addr) / PAGE_SIZE);
210
211/*
212 * get page pointer for user addr, we internally cache struct page array for
213 * (addr, count) range in pvec to avoid frequent call to get_user_pages. If
214 * internal page list is exhausted, a batch count of up to VEC_SIZE is used
215 * to get next set of page struct.
216 */
217static struct page *blk_get_page(unsigned long addr, size_t count, int rw,
218 struct pvec *pvec)
219{
220 int ret, nr_pages;
221 if (pvec->idx == pvec->nr) {
222 nr_pages = PAGES_SPANNED(addr, count);
223 nr_pages = min(nr_pages, VEC_SIZE);
224 down_read(&current->mm->mmap_sem);
225 ret = get_user_pages(current, current->mm, addr, nr_pages,
226 rw == READ, 0, pvec->page, NULL);
227 up_read(&current->mm->mmap_sem);
228 if (ret < 0)
229 return ERR_PTR(ret);
230 pvec->nr = ret;
231 pvec->idx = 0;
232 }
233 return pvec->page[pvec->idx++];
234}
235
236/* return a page back to pvec array */
237static void blk_unget_page(struct page *page, struct pvec *pvec)
238{
239 pvec->page[--pvec->idx] = page;
240}
241
242static ssize_t
243blkdev_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
244 loff_t pos, unsigned long nr_segs)
245{
246 struct inode *inode = iocb->ki_filp->f_mapping->host;
247 unsigned blkbits = blksize_bits(bdev_hardsect_size(I_BDEV(inode)));
248 unsigned blocksize_mask = (1 << blkbits) - 1;
249 unsigned long seg = 0; /* iov segment iterator */
250 unsigned long nvec; /* number of bio vec needed */
251 unsigned long cur_off; /* offset into current page */
252 unsigned long cur_len; /* I/O len of current page, up to PAGE_SIZE */
253
254 unsigned long addr; /* user iovec address */
255 size_t count; /* user iovec len */
256 size_t nbytes = iocb->ki_nbytes = iocb->ki_left; /* total xfer size */
257 loff_t size; /* size of block device */
258 struct bio *bio;
259 atomic_t *bio_count = &iocb->ki_bio_count;
260 struct page *page;
261 struct pvec pvec;
262
263 pvec.nr = 0;
264 pvec.idx = 0;
265
266 if (pos & blocksize_mask)
267 return -EINVAL;
268
269 size = i_size_read(inode);
270 if (pos + nbytes > size) {
271 nbytes = size - pos;
272 iocb->ki_left = nbytes;
273 }
274
275 /*
276 * check first non-zero iov alignment, the remaining
277 * iov alignment is checked inside bio loop below.
278 */
279 do {
280 addr = (unsigned long) iov[seg].iov_base;
281 count = min(iov[seg].iov_len, nbytes);
282 if (addr & blocksize_mask || count & blocksize_mask)
283 return -EINVAL;
284 } while (!count && ++seg < nr_segs);
285 atomic_set(bio_count, 1);
286
287 while (nbytes) {
288 /* roughly estimate number of bio vec needed */
289 nvec = (nbytes + PAGE_SIZE - 1) / PAGE_SIZE;
290 nvec = max(nvec, nr_segs - seg);
291 nvec = min(nvec, (unsigned long) BIO_MAX_PAGES);
292
293 /* bio_alloc should not fail with GFP_KERNEL flag */
294 bio = bio_alloc(GFP_KERNEL, nvec);
295 bio->bi_bdev = I_BDEV(inode);
296 bio->bi_end_io = blk_end_aio;
297 bio->bi_private = iocb;
298 bio->bi_sector = pos >> blkbits;
299same_bio:
300 cur_off = addr & ~PAGE_MASK;
301 cur_len = PAGE_SIZE - cur_off;
302 if (count < cur_len)
303 cur_len = count;
304
305 page = blk_get_page(addr, count, rw, &pvec);
306 if (unlikely(IS_ERR(page)))
307 goto backout;
308
309 if (bio_add_page(bio, page, cur_len, cur_off)) {
310 pos += cur_len;
311 addr += cur_len;
312 count -= cur_len;
313 nbytes -= cur_len;
314
315 if (count)
316 goto same_bio;
317 while (++seg < nr_segs) {
318 addr = (unsigned long) iov[seg].iov_base;
319 count = iov[seg].iov_len;
320 if (!count)
321 continue;
322 if (unlikely(addr & blocksize_mask ||
323 count & blocksize_mask)) {
324 page = ERR_PTR(-EINVAL);
325 goto backout;
326 }
327 count = min(count, nbytes);
328 goto same_bio;
329 }
330 } else {
331 blk_unget_page(page, &pvec);
332 }
333
334 /* bio is ready, submit it */
335 if (rw == READ)
336 bio_set_pages_dirty(bio);
337 atomic_inc(bio_count);
338 submit_bio(rw, bio);
339 }
340
341completion:
342 iocb->ki_left -= nbytes;
343 nbytes = iocb->ki_left;
344 iocb->ki_pos += nbytes;
345
346 blk_run_address_space(inode->i_mapping);
347 if (atomic_dec_and_test(bio_count))
348 aio_complete(iocb, nbytes, 0);
349
350 return -EIOCBQUEUED;
351
352backout:
353 /*
354 * back out nbytes count constructed so far for this bio,
355 * we will throw away current bio.
356 */
357 nbytes += bio->bi_size;
358 bio_release_pages(bio);
359 bio_put(bio);
360
361 /*
362 * if no bio was submmitted, return the error code.
363 * otherwise, proceed with pending I/O completion.
364 */
365 if (atomic_read(bio_count) == 1)
366 return PTR_ERR(page);
367 goto completion;
368}
369#endif
370
371static int blkdev_writepage(struct page *page, struct writeback_control *wbc) 176static int blkdev_writepage(struct page *page, struct writeback_control *wbc)
372{ 177{
373 return block_write_full_page(page, blkdev_get_block, wbc); 178 return block_write_full_page(page, blkdev_get_block, wbc);
@@ -1334,7 +1139,7 @@ static long block_ioctl(struct file *file, unsigned cmd, unsigned long arg)
1334 return blkdev_ioctl(file->f_mapping->host, file, cmd, arg); 1139 return blkdev_ioctl(file->f_mapping->host, file, cmd, arg);
1335} 1140}
1336 1141
1337const struct address_space_operations def_blk_aops = { 1142static const struct address_space_operations def_blk_aops = {
1338 .readpage = blkdev_readpage, 1143 .readpage = blkdev_readpage,
1339 .writepage = blkdev_writepage, 1144 .writepage = blkdev_writepage,
1340 .sync_page = block_sync_page, 1145 .sync_page = block_sync_page,
diff --git a/include/linux/aio.h b/include/linux/aio.h
index a9931e2e5624..0d0b7f629bd3 100644
--- a/include/linux/aio.h
+++ b/include/linux/aio.h
@@ -105,7 +105,6 @@ struct kiocb {
105 wait_queue_t ki_wait; 105 wait_queue_t ki_wait;
106 loff_t ki_pos; 106 loff_t ki_pos;
107 107
108 atomic_t ki_bio_count; /* num bio used for this iocb */
109 void *private; 108 void *private;
110 /* State that we remember to be able to restart/retry */ 109 /* State that we remember to be able to restart/retry */
111 unsigned short ki_opcode; 110 unsigned short ki_opcode;
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 4da441337d6e..4c59bdccd3ee 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -326,7 +326,6 @@ extern struct bio *bio_map_kern(struct request_queue *, void *, unsigned int,
326 gfp_t); 326 gfp_t);
327extern void bio_set_pages_dirty(struct bio *bio); 327extern void bio_set_pages_dirty(struct bio *bio);
328extern void bio_check_pages_dirty(struct bio *bio); 328extern void bio_check_pages_dirty(struct bio *bio);
329extern void bio_release_pages(struct bio *bio);
330extern struct bio *bio_copy_user(struct request_queue *, unsigned long, unsigned int, int); 329extern struct bio *bio_copy_user(struct request_queue *, unsigned long, unsigned int, int);
331extern int bio_uncopy_user(struct bio *); 330extern int bio_uncopy_user(struct bio *);
332void zero_fill_bio(struct bio *bio); 331void zero_fill_bio(struct bio *bio);
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index e1888cc5b8ae..6fe67d1939c2 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -216,6 +216,7 @@ struct request {
216 unsigned int cmd_len; 216 unsigned int cmd_len;
217 unsigned char cmd[BLK_MAX_CDB]; 217 unsigned char cmd[BLK_MAX_CDB];
218 218
219 unsigned int raw_data_len;
219 unsigned int data_len; 220 unsigned int data_len;
220 unsigned int sense_len; 221 unsigned int sense_len;
221 void *data; 222 void *data;
@@ -258,6 +259,7 @@ struct bio_vec;
258typedef int (merge_bvec_fn) (struct request_queue *, struct bio *, struct bio_vec *); 259typedef int (merge_bvec_fn) (struct request_queue *, struct bio *, struct bio_vec *);
259typedef void (prepare_flush_fn) (struct request_queue *, struct request *); 260typedef void (prepare_flush_fn) (struct request_queue *, struct request *);
260typedef void (softirq_done_fn)(struct request *); 261typedef void (softirq_done_fn)(struct request *);
262typedef int (dma_drain_needed_fn)(struct request *);
261 263
262enum blk_queue_state { 264enum blk_queue_state {
263 Queue_down, 265 Queue_down,
@@ -294,6 +296,7 @@ struct request_queue
294 merge_bvec_fn *merge_bvec_fn; 296 merge_bvec_fn *merge_bvec_fn;
295 prepare_flush_fn *prepare_flush_fn; 297 prepare_flush_fn *prepare_flush_fn;
296 softirq_done_fn *softirq_done_fn; 298 softirq_done_fn *softirq_done_fn;
299 dma_drain_needed_fn *dma_drain_needed;
297 300
298 /* 301 /*
299 * Dispatch queue sorting 302 * Dispatch queue sorting
@@ -698,8 +701,9 @@ extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short);
698extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); 701extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
699extern void blk_queue_hardsect_size(struct request_queue *, unsigned short); 702extern void blk_queue_hardsect_size(struct request_queue *, unsigned short);
700extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); 703extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b);
701extern int blk_queue_dma_drain(struct request_queue *q, void *buf, 704extern int blk_queue_dma_drain(struct request_queue *q,
702 unsigned int size); 705 dma_drain_needed_fn *dma_drain_needed,
706 void *buf, unsigned int size);
703extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); 707extern void blk_queue_segment_boundary(struct request_queue *, unsigned long);
704extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn); 708extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn);
705extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *); 709extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *);
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 98ffb6ead434..b84b848431f2 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1590,7 +1590,6 @@ extern void bd_set_size(struct block_device *, loff_t size);
1590extern void bd_forget(struct inode *inode); 1590extern void bd_forget(struct inode *inode);
1591extern void bdput(struct block_device *); 1591extern void bdput(struct block_device *);
1592extern struct block_device *open_by_devnum(dev_t, unsigned); 1592extern struct block_device *open_by_devnum(dev_t, unsigned);
1593extern const struct address_space_operations def_blk_aops;
1594#else 1593#else
1595static inline void bd_forget(struct inode *inode) {} 1594static inline void bd_forget(struct inode *inode) {}
1596#endif 1595#endif
diff --git a/include/linux/iocontext.h b/include/linux/iocontext.h
index 593b222d9dcc..1b4ccf25b4d2 100644
--- a/include/linux/iocontext.h
+++ b/include/linux/iocontext.h
@@ -50,6 +50,7 @@ struct cfq_io_context {
50 sector_t seek_mean; 50 sector_t seek_mean;
51 51
52 struct list_head queue_list; 52 struct list_head queue_list;
53 struct hlist_node cic_list;
53 54
54 void (*dtor)(struct io_context *); /* destructor */ 55 void (*dtor)(struct io_context *); /* destructor */
55 void (*exit)(struct io_context *); /* called on task exit */ 56 void (*exit)(struct io_context *); /* called on task exit */
@@ -77,6 +78,7 @@ struct io_context {
77 78
78 struct as_io_context *aic; 79 struct as_io_context *aic;
79 struct radix_tree_root radix_root; 80 struct radix_tree_root radix_root;
81 struct hlist_head cic_list;
80 void *ioc_data; 82 void *ioc_data;
81}; 83};
82 84
diff --git a/include/linux/libata.h b/include/linux/libata.h
index bc5a8d0c7090..2e098f940cec 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -278,7 +278,6 @@ enum {
278 278
279 /* size of buffer to pad xfers ending on unaligned boundaries */ 279 /* size of buffer to pad xfers ending on unaligned boundaries */
280 ATA_DMA_PAD_SZ = 4, 280 ATA_DMA_PAD_SZ = 4,
281 ATA_DMA_PAD_BUF_SZ = ATA_DMA_PAD_SZ * ATA_MAX_QUEUE,
282 281
283 /* ering size */ 282 /* ering size */
284 ATA_ERING_SIZE = 32, 283 ATA_ERING_SIZE = 32,
@@ -457,24 +456,18 @@ struct ata_queued_cmd {
457 unsigned long flags; /* ATA_QCFLAG_xxx */ 456 unsigned long flags; /* ATA_QCFLAG_xxx */
458 unsigned int tag; 457 unsigned int tag;
459 unsigned int n_elem; 458 unsigned int n_elem;
460 unsigned int mapped_n_elem;
461 459
462 int dma_dir; 460 int dma_dir;
463 461
464 unsigned int pad_len;
465 unsigned int sect_size; 462 unsigned int sect_size;
466 463
467 unsigned int nbytes; 464 unsigned int nbytes;
468 unsigned int raw_nbytes;
469 unsigned int curbytes; 465 unsigned int curbytes;
470 466
471 struct scatterlist *cursg; 467 struct scatterlist *cursg;
472 unsigned int cursg_ofs; 468 unsigned int cursg_ofs;
473 469
474 struct scatterlist *last_sg;
475 struct scatterlist saved_last_sg;
476 struct scatterlist sgent; 470 struct scatterlist sgent;
477 struct scatterlist extra_sg[2];
478 471
479 struct scatterlist *sg; 472 struct scatterlist *sg;
480 473
@@ -619,9 +612,6 @@ struct ata_port {
619 struct ata_prd *prd; /* our SG list */ 612 struct ata_prd *prd; /* our SG list */
620 dma_addr_t prd_dma; /* and its DMA mapping */ 613 dma_addr_t prd_dma; /* and its DMA mapping */
621 614
622 void *pad; /* array of DMA pad buffers */
623 dma_addr_t pad_dma;
624
625 struct ata_ioports ioaddr; /* ATA cmd/ctl/dma register blocks */ 615 struct ata_ioports ioaddr; /* ATA cmd/ctl/dma register blocks */
626 616
627 u8 ctl; /* cache of ATA control register */ 617 u8 ctl; /* cache of ATA control register */
@@ -1363,12 +1353,9 @@ static inline void ata_qc_reinit(struct ata_queued_cmd *qc)
1363 qc->flags = 0; 1353 qc->flags = 0;
1364 qc->cursg = NULL; 1354 qc->cursg = NULL;
1365 qc->cursg_ofs = 0; 1355 qc->cursg_ofs = 0;
1366 qc->nbytes = qc->raw_nbytes = qc->curbytes = 0; 1356 qc->nbytes = qc->curbytes = 0;
1367 qc->n_elem = 0; 1357 qc->n_elem = 0;
1368 qc->mapped_n_elem = 0;
1369 qc->err_mask = 0; 1358 qc->err_mask = 0;
1370 qc->pad_len = 0;
1371 qc->last_sg = NULL;
1372 qc->sect_size = ATA_SECT_SIZE; 1359 qc->sect_size = ATA_SECT_SIZE;
1373 1360
1374 ata_tf_init(qc->dev, &qc->tf); 1361 ata_tf_init(qc->dev, &qc->tf);
@@ -1423,19 +1410,6 @@ static inline unsigned int __ac_err_mask(u8 status)
1423 return mask; 1410 return mask;
1424} 1411}
1425 1412
1426static inline int ata_pad_alloc(struct ata_port *ap, struct device *dev)
1427{
1428 ap->pad_dma = 0;
1429 ap->pad = dmam_alloc_coherent(dev, ATA_DMA_PAD_BUF_SZ,
1430 &ap->pad_dma, GFP_KERNEL);
1431 return (ap->pad == NULL) ? -ENOMEM : 0;
1432}
1433
1434static inline void ata_pad_free(struct ata_port *ap, struct device *dev)
1435{
1436 dmam_free_coherent(dev, ATA_DMA_PAD_BUF_SZ, ap->pad, ap->pad_dma);
1437}
1438
1439static inline struct ata_port *ata_shost_to_port(struct Scsi_Host *host) 1413static inline struct ata_port *ata_shost_to_port(struct Scsi_Host *host)
1440{ 1414{
1441 return *(struct ata_port **)&host->hostdata[0]; 1415 return *(struct ata_port **)&host->hostdata[0];