diff options
| -rw-r--r-- | arch/ia64/Kconfig | 4 | ||||
| -rw-r--r-- | arch/ia64/lib/Makefile | 2 | ||||
| -rw-r--r-- | arch/x86_64/kernel/Makefile | 2 | ||||
| -rw-r--r-- | drivers/block/aoe/aoecmd.c | 13 | ||||
| -rw-r--r-- | drivers/block/genhd.c | 29 | ||||
| -rw-r--r-- | drivers/block/ll_rw_blk.c | 47 | ||||
| -rw-r--r-- | drivers/md/linear.c | 10 | ||||
| -rw-r--r-- | drivers/md/md.c | 4 | ||||
| -rw-r--r-- | drivers/md/multipath.c | 10 | ||||
| -rw-r--r-- | drivers/md/raid0.c | 10 | ||||
| -rw-r--r-- | drivers/md/raid1.c | 12 | ||||
| -rw-r--r-- | drivers/md/raid10.c | 12 | ||||
| -rw-r--r-- | drivers/md/raid5.c | 10 | ||||
| -rw-r--r-- | drivers/md/raid6main.c | 12 | ||||
| -rw-r--r-- | fs/partitions/check.c | 7 | ||||
| -rw-r--r-- | include/asm-x86_64/dma-mapping.h | 31 | ||||
| -rw-r--r-- | include/asm-x86_64/swiotlb.h | 8 | ||||
| -rw-r--r-- | include/linux/genhd.h | 10 | ||||
| -rw-r--r-- | lib/Makefile | 2 | ||||
| -rw-r--r-- | lib/swiotlb.c (renamed from arch/ia64/lib/swiotlb.c) | 142 |
20 files changed, 209 insertions, 168 deletions
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 1642375fb14e..3b4248cff9a7 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig | |||
| @@ -26,6 +26,10 @@ config MMU | |||
| 26 | bool | 26 | bool |
| 27 | default y | 27 | default y |
| 28 | 28 | ||
| 29 | config SWIOTLB | ||
| 30 | bool | ||
| 31 | default y | ||
| 32 | |||
| 29 | config RWSEM_XCHGADD_ALGORITHM | 33 | config RWSEM_XCHGADD_ALGORITHM |
| 30 | bool | 34 | bool |
| 31 | default y | 35 | default y |
diff --git a/arch/ia64/lib/Makefile b/arch/ia64/lib/Makefile index cb1af597370b..ac64664a1807 100644 --- a/arch/ia64/lib/Makefile +++ b/arch/ia64/lib/Makefile | |||
| @@ -9,7 +9,7 @@ lib-y := __divsi3.o __udivsi3.o __modsi3.o __umodsi3.o \ | |||
| 9 | bitop.o checksum.o clear_page.o csum_partial_copy.o \ | 9 | bitop.o checksum.o clear_page.o csum_partial_copy.o \ |
| 10 | clear_user.o strncpy_from_user.o strlen_user.o strnlen_user.o \ | 10 | clear_user.o strncpy_from_user.o strlen_user.o strnlen_user.o \ |
| 11 | flush.o ip_fast_csum.o do_csum.o \ | 11 | flush.o ip_fast_csum.o do_csum.o \ |
| 12 | memset.o strlen.o swiotlb.o | 12 | memset.o strlen.o |
| 13 | 13 | ||
| 14 | lib-$(CONFIG_ITANIUM) += copy_page.o copy_user.o memcpy.o | 14 | lib-$(CONFIG_ITANIUM) += copy_page.o copy_user.o memcpy.o |
| 15 | lib-$(CONFIG_MCKINLEY) += copy_page_mck.o memcpy_mck.o | 15 | lib-$(CONFIG_MCKINLEY) += copy_page_mck.o memcpy_mck.o |
diff --git a/arch/x86_64/kernel/Makefile b/arch/x86_64/kernel/Makefile index bcdd0a805fe7..14328cab5d3a 100644 --- a/arch/x86_64/kernel/Makefile +++ b/arch/x86_64/kernel/Makefile | |||
| @@ -27,7 +27,6 @@ obj-$(CONFIG_CPU_FREQ) += cpufreq/ | |||
| 27 | obj-$(CONFIG_EARLY_PRINTK) += early_printk.o | 27 | obj-$(CONFIG_EARLY_PRINTK) += early_printk.o |
| 28 | obj-$(CONFIG_GART_IOMMU) += pci-gart.o aperture.o | 28 | obj-$(CONFIG_GART_IOMMU) += pci-gart.o aperture.o |
| 29 | obj-$(CONFIG_DUMMY_IOMMU) += pci-nommu.o pci-dma.o | 29 | obj-$(CONFIG_DUMMY_IOMMU) += pci-nommu.o pci-dma.o |
| 30 | obj-$(CONFIG_SWIOTLB) += swiotlb.o | ||
| 31 | obj-$(CONFIG_KPROBES) += kprobes.o | 30 | obj-$(CONFIG_KPROBES) += kprobes.o |
| 32 | obj-$(CONFIG_X86_PM_TIMER) += pmtimer.o | 31 | obj-$(CONFIG_X86_PM_TIMER) += pmtimer.o |
| 33 | 32 | ||
| @@ -41,7 +40,6 @@ CFLAGS_vsyscall.o := $(PROFILING) -g0 | |||
| 41 | bootflag-y += ../../i386/kernel/bootflag.o | 40 | bootflag-y += ../../i386/kernel/bootflag.o |
| 42 | cpuid-$(subst m,y,$(CONFIG_X86_CPUID)) += ../../i386/kernel/cpuid.o | 41 | cpuid-$(subst m,y,$(CONFIG_X86_CPUID)) += ../../i386/kernel/cpuid.o |
| 43 | topology-y += ../../i386/mach-default/topology.o | 42 | topology-y += ../../i386/mach-default/topology.o |
| 44 | swiotlb-$(CONFIG_SWIOTLB) += ../../ia64/lib/swiotlb.o | ||
| 45 | microcode-$(subst m,y,$(CONFIG_MICROCODE)) += ../../i386/kernel/microcode.o | 43 | microcode-$(subst m,y,$(CONFIG_MICROCODE)) += ../../i386/kernel/microcode.o |
| 46 | intel_cacheinfo-y += ../../i386/kernel/cpu/intel_cacheinfo.o | 44 | intel_cacheinfo-y += ../../i386/kernel/cpu/intel_cacheinfo.o |
| 47 | quirks-y += ../../i386/kernel/quirks.o | 45 | quirks-y += ../../i386/kernel/quirks.o |
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c index 5c9c7c1a3d4c..326ca3876b68 100644 --- a/drivers/block/aoe/aoecmd.c +++ b/drivers/block/aoe/aoecmd.c | |||
| @@ -468,16 +468,11 @@ aoecmd_ata_rsp(struct sk_buff *skb) | |||
| 468 | unsigned long duration = jiffies - buf->start_time; | 468 | unsigned long duration = jiffies - buf->start_time; |
| 469 | unsigned long n_sect = buf->bio->bi_size >> 9; | 469 | unsigned long n_sect = buf->bio->bi_size >> 9; |
| 470 | struct gendisk *disk = d->gd; | 470 | struct gendisk *disk = d->gd; |
| 471 | const int rw = bio_data_dir(buf->bio); | ||
| 471 | 472 | ||
| 472 | if (bio_data_dir(buf->bio) == WRITE) { | 473 | disk_stat_inc(disk, ios[rw]); |
| 473 | disk_stat_inc(disk, writes); | 474 | disk_stat_add(disk, ticks[rw], duration); |
| 474 | disk_stat_add(disk, write_ticks, duration); | 475 | disk_stat_add(disk, sectors[rw], n_sect); |
| 475 | disk_stat_add(disk, write_sectors, n_sect); | ||
| 476 | } else { | ||
| 477 | disk_stat_inc(disk, reads); | ||
| 478 | disk_stat_add(disk, read_ticks, duration); | ||
| 479 | disk_stat_add(disk, read_sectors, n_sect); | ||
| 480 | } | ||
| 481 | disk_stat_add(disk, io_ticks, duration); | 476 | disk_stat_add(disk, io_ticks, duration); |
| 482 | n = (buf->flags & BUFFL_FAIL) ? -EIO : 0; | 477 | n = (buf->flags & BUFFL_FAIL) ? -EIO : 0; |
| 483 | bio_endio(buf->bio, buf->bio->bi_size, n); | 478 | bio_endio(buf->bio, buf->bio->bi_size, n); |
diff --git a/drivers/block/genhd.c b/drivers/block/genhd.c index 486ce1fdeb8c..54aec4a1ae13 100644 --- a/drivers/block/genhd.c +++ b/drivers/block/genhd.c | |||
| @@ -391,13 +391,12 @@ static ssize_t disk_stats_read(struct gendisk * disk, char *page) | |||
| 391 | "%8u %8u %8llu %8u " | 391 | "%8u %8u %8llu %8u " |
| 392 | "%8u %8u %8u" | 392 | "%8u %8u %8u" |
| 393 | "\n", | 393 | "\n", |
| 394 | disk_stat_read(disk, reads), disk_stat_read(disk, read_merges), | 394 | disk_stat_read(disk, ios[0]), disk_stat_read(disk, merges[0]), |
| 395 | (unsigned long long)disk_stat_read(disk, read_sectors), | 395 | (unsigned long long)disk_stat_read(disk, sectors[0]), |
| 396 | jiffies_to_msecs(disk_stat_read(disk, read_ticks)), | 396 | jiffies_to_msecs(disk_stat_read(disk, ticks[0])), |
| 397 | disk_stat_read(disk, writes), | 397 | disk_stat_read(disk, ios[1]), disk_stat_read(disk, merges[1]), |
| 398 | disk_stat_read(disk, write_merges), | 398 | (unsigned long long)disk_stat_read(disk, sectors[1]), |
| 399 | (unsigned long long)disk_stat_read(disk, write_sectors), | 399 | jiffies_to_msecs(disk_stat_read(disk, ticks[1])), |
| 400 | jiffies_to_msecs(disk_stat_read(disk, write_ticks)), | ||
| 401 | disk->in_flight, | 400 | disk->in_flight, |
| 402 | jiffies_to_msecs(disk_stat_read(disk, io_ticks)), | 401 | jiffies_to_msecs(disk_stat_read(disk, io_ticks)), |
| 403 | jiffies_to_msecs(disk_stat_read(disk, time_in_queue))); | 402 | jiffies_to_msecs(disk_stat_read(disk, time_in_queue))); |
| @@ -583,12 +582,12 @@ static int diskstats_show(struct seq_file *s, void *v) | |||
| 583 | preempt_enable(); | 582 | preempt_enable(); |
| 584 | seq_printf(s, "%4d %4d %s %u %u %llu %u %u %u %llu %u %u %u %u\n", | 583 | seq_printf(s, "%4d %4d %s %u %u %llu %u %u %u %llu %u %u %u %u\n", |
| 585 | gp->major, n + gp->first_minor, disk_name(gp, n, buf), | 584 | gp->major, n + gp->first_minor, disk_name(gp, n, buf), |
| 586 | disk_stat_read(gp, reads), disk_stat_read(gp, read_merges), | 585 | disk_stat_read(gp, ios[0]), disk_stat_read(gp, merges[0]), |
| 587 | (unsigned long long)disk_stat_read(gp, read_sectors), | 586 | (unsigned long long)disk_stat_read(gp, sectors[0]), |
| 588 | jiffies_to_msecs(disk_stat_read(gp, read_ticks)), | 587 | jiffies_to_msecs(disk_stat_read(gp, ticks[0])), |
| 589 | disk_stat_read(gp, writes), disk_stat_read(gp, write_merges), | 588 | disk_stat_read(gp, ios[1]), disk_stat_read(gp, merges[1]), |
| 590 | (unsigned long long)disk_stat_read(gp, write_sectors), | 589 | (unsigned long long)disk_stat_read(gp, sectors[1]), |
| 591 | jiffies_to_msecs(disk_stat_read(gp, write_ticks)), | 590 | jiffies_to_msecs(disk_stat_read(gp, ticks[1])), |
| 592 | gp->in_flight, | 591 | gp->in_flight, |
| 593 | jiffies_to_msecs(disk_stat_read(gp, io_ticks)), | 592 | jiffies_to_msecs(disk_stat_read(gp, io_ticks)), |
| 594 | jiffies_to_msecs(disk_stat_read(gp, time_in_queue))); | 593 | jiffies_to_msecs(disk_stat_read(gp, time_in_queue))); |
| @@ -601,8 +600,8 @@ static int diskstats_show(struct seq_file *s, void *v) | |||
| 601 | seq_printf(s, "%4d %4d %s %u %u %u %u\n", | 600 | seq_printf(s, "%4d %4d %s %u %u %u %u\n", |
| 602 | gp->major, n + gp->first_minor + 1, | 601 | gp->major, n + gp->first_minor + 1, |
| 603 | disk_name(gp, n + 1, buf), | 602 | disk_name(gp, n + 1, buf), |
| 604 | hd->reads, hd->read_sectors, | 603 | hd->ios[0], hd->sectors[0], |
| 605 | hd->writes, hd->write_sectors); | 604 | hd->ios[1], hd->sectors[1]); |
| 606 | } | 605 | } |
| 607 | 606 | ||
| 608 | return 0; | 607 | return 0; |
diff --git a/drivers/block/ll_rw_blk.c b/drivers/block/ll_rw_blk.c index 0af73512b9a8..2747741677fb 100644 --- a/drivers/block/ll_rw_blk.c +++ b/drivers/block/ll_rw_blk.c | |||
| @@ -2387,16 +2387,9 @@ static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io) | |||
| 2387 | if (!blk_fs_request(rq) || !rq->rq_disk) | 2387 | if (!blk_fs_request(rq) || !rq->rq_disk) |
| 2388 | return; | 2388 | return; |
| 2389 | 2389 | ||
| 2390 | if (rw == READ) { | 2390 | if (!new_io) { |
| 2391 | __disk_stat_add(rq->rq_disk, read_sectors, nr_sectors); | 2391 | __disk_stat_inc(rq->rq_disk, merges[rw]); |
| 2392 | if (!new_io) | 2392 | } else { |
| 2393 | __disk_stat_inc(rq->rq_disk, read_merges); | ||
| 2394 | } else if (rw == WRITE) { | ||
| 2395 | __disk_stat_add(rq->rq_disk, write_sectors, nr_sectors); | ||
| 2396 | if (!new_io) | ||
| 2397 | __disk_stat_inc(rq->rq_disk, write_merges); | ||
| 2398 | } | ||
| 2399 | if (new_io) { | ||
| 2400 | disk_round_stats(rq->rq_disk); | 2393 | disk_round_stats(rq->rq_disk); |
| 2401 | rq->rq_disk->in_flight++; | 2394 | rq->rq_disk->in_flight++; |
| 2402 | } | 2395 | } |
| @@ -2791,17 +2784,11 @@ static inline void blk_partition_remap(struct bio *bio) | |||
| 2791 | 2784 | ||
| 2792 | if (bdev != bdev->bd_contains) { | 2785 | if (bdev != bdev->bd_contains) { |
| 2793 | struct hd_struct *p = bdev->bd_part; | 2786 | struct hd_struct *p = bdev->bd_part; |
| 2787 | const int rw = bio_data_dir(bio); | ||
| 2788 | |||
| 2789 | p->sectors[rw] += bio_sectors(bio); | ||
| 2790 | p->ios[rw]++; | ||
| 2794 | 2791 | ||
| 2795 | switch (bio_data_dir(bio)) { | ||
| 2796 | case READ: | ||
| 2797 | p->read_sectors += bio_sectors(bio); | ||
| 2798 | p->reads++; | ||
| 2799 | break; | ||
| 2800 | case WRITE: | ||
| 2801 | p->write_sectors += bio_sectors(bio); | ||
| 2802 | p->writes++; | ||
| 2803 | break; | ||
| 2804 | } | ||
| 2805 | bio->bi_sector += p->start_sect; | 2792 | bio->bi_sector += p->start_sect; |
| 2806 | bio->bi_bdev = bdev->bd_contains; | 2793 | bio->bi_bdev = bdev->bd_contains; |
| 2807 | } | 2794 | } |
| @@ -3048,6 +3035,12 @@ static int __end_that_request_first(struct request *req, int uptodate, | |||
| 3048 | (unsigned long long)req->sector); | 3035 | (unsigned long long)req->sector); |
| 3049 | } | 3036 | } |
| 3050 | 3037 | ||
| 3038 | if (blk_fs_request(req) && req->rq_disk) { | ||
| 3039 | const int rw = rq_data_dir(req); | ||
| 3040 | |||
| 3041 | __disk_stat_add(req->rq_disk, sectors[rw], nr_bytes >> 9); | ||
| 3042 | } | ||
| 3043 | |||
| 3051 | total_bytes = bio_nbytes = 0; | 3044 | total_bytes = bio_nbytes = 0; |
| 3052 | while ((bio = req->bio) != NULL) { | 3045 | while ((bio = req->bio) != NULL) { |
| 3053 | int nbytes; | 3046 | int nbytes; |
| @@ -3176,16 +3169,10 @@ void end_that_request_last(struct request *req) | |||
| 3176 | 3169 | ||
| 3177 | if (disk && blk_fs_request(req)) { | 3170 | if (disk && blk_fs_request(req)) { |
| 3178 | unsigned long duration = jiffies - req->start_time; | 3171 | unsigned long duration = jiffies - req->start_time; |
| 3179 | switch (rq_data_dir(req)) { | 3172 | const int rw = rq_data_dir(req); |
| 3180 | case WRITE: | 3173 | |
| 3181 | __disk_stat_inc(disk, writes); | 3174 | __disk_stat_inc(disk, ios[rw]); |
| 3182 | __disk_stat_add(disk, write_ticks, duration); | 3175 | __disk_stat_add(disk, ticks[rw], duration); |
| 3183 | break; | ||
| 3184 | case READ: | ||
| 3185 | __disk_stat_inc(disk, reads); | ||
| 3186 | __disk_stat_add(disk, read_ticks, duration); | ||
| 3187 | break; | ||
| 3188 | } | ||
| 3189 | disk_round_stats(disk); | 3176 | disk_round_stats(disk); |
| 3190 | disk->in_flight--; | 3177 | disk->in_flight--; |
| 3191 | } | 3178 | } |
diff --git a/drivers/md/linear.c b/drivers/md/linear.c index bb279fad2fd2..946efef3a8f5 100644 --- a/drivers/md/linear.c +++ b/drivers/md/linear.c | |||
| @@ -271,6 +271,7 @@ static int linear_stop (mddev_t *mddev) | |||
| 271 | 271 | ||
| 272 | static int linear_make_request (request_queue_t *q, struct bio *bio) | 272 | static int linear_make_request (request_queue_t *q, struct bio *bio) |
| 273 | { | 273 | { |
| 274 | const int rw = bio_data_dir(bio); | ||
| 274 | mddev_t *mddev = q->queuedata; | 275 | mddev_t *mddev = q->queuedata; |
| 275 | dev_info_t *tmp_dev; | 276 | dev_info_t *tmp_dev; |
| 276 | sector_t block; | 277 | sector_t block; |
| @@ -280,13 +281,8 @@ static int linear_make_request (request_queue_t *q, struct bio *bio) | |||
| 280 | return 0; | 281 | return 0; |
| 281 | } | 282 | } |
| 282 | 283 | ||
| 283 | if (bio_data_dir(bio)==WRITE) { | 284 | disk_stat_inc(mddev->gendisk, ios[rw]); |
| 284 | disk_stat_inc(mddev->gendisk, writes); | 285 | disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bio)); |
| 285 | disk_stat_add(mddev->gendisk, write_sectors, bio_sectors(bio)); | ||
| 286 | } else { | ||
| 287 | disk_stat_inc(mddev->gendisk, reads); | ||
| 288 | disk_stat_add(mddev->gendisk, read_sectors, bio_sectors(bio)); | ||
| 289 | } | ||
| 290 | 286 | ||
| 291 | tmp_dev = which_dev(mddev, bio->bi_sector); | 287 | tmp_dev = which_dev(mddev, bio->bi_sector); |
| 292 | block = bio->bi_sector >> 1; | 288 | block = bio->bi_sector >> 1; |
diff --git a/drivers/md/md.c b/drivers/md/md.c index 2a8a5696bf8a..9ecf51ee596f 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
| @@ -3466,8 +3466,8 @@ static int is_mddev_idle(mddev_t *mddev) | |||
| 3466 | idle = 1; | 3466 | idle = 1; |
| 3467 | ITERATE_RDEV(mddev,rdev,tmp) { | 3467 | ITERATE_RDEV(mddev,rdev,tmp) { |
| 3468 | struct gendisk *disk = rdev->bdev->bd_contains->bd_disk; | 3468 | struct gendisk *disk = rdev->bdev->bd_contains->bd_disk; |
| 3469 | curr_events = disk_stat_read(disk, read_sectors) + | 3469 | curr_events = disk_stat_read(disk, sectors[0]) + |
| 3470 | disk_stat_read(disk, write_sectors) - | 3470 | disk_stat_read(disk, sectors[1]) - |
| 3471 | atomic_read(&disk->sync_io); | 3471 | atomic_read(&disk->sync_io); |
| 3472 | /* Allow some slack between valud of curr_events and last_events, | 3472 | /* Allow some slack between valud of curr_events and last_events, |
| 3473 | * as there are some uninteresting races. | 3473 | * as there are some uninteresting races. |
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c index 1151c3ed3006..c06f4474192b 100644 --- a/drivers/md/multipath.c +++ b/drivers/md/multipath.c | |||
| @@ -168,6 +168,7 @@ static int multipath_make_request (request_queue_t *q, struct bio * bio) | |||
| 168 | multipath_conf_t *conf = mddev_to_conf(mddev); | 168 | multipath_conf_t *conf = mddev_to_conf(mddev); |
| 169 | struct multipath_bh * mp_bh; | 169 | struct multipath_bh * mp_bh; |
| 170 | struct multipath_info *multipath; | 170 | struct multipath_info *multipath; |
| 171 | const int rw = bio_data_dir(bio); | ||
| 171 | 172 | ||
| 172 | if (unlikely(bio_barrier(bio))) { | 173 | if (unlikely(bio_barrier(bio))) { |
| 173 | bio_endio(bio, bio->bi_size, -EOPNOTSUPP); | 174 | bio_endio(bio, bio->bi_size, -EOPNOTSUPP); |
| @@ -179,13 +180,8 @@ static int multipath_make_request (request_queue_t *q, struct bio * bio) | |||
| 179 | mp_bh->master_bio = bio; | 180 | mp_bh->master_bio = bio; |
| 180 | mp_bh->mddev = mddev; | 181 | mp_bh->mddev = mddev; |
| 181 | 182 | ||
| 182 | if (bio_data_dir(bio)==WRITE) { | 183 | disk_stat_inc(mddev->gendisk, ios[rw]); |
| 183 | disk_stat_inc(mddev->gendisk, writes); | 184 | disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bio)); |
| 184 | disk_stat_add(mddev->gendisk, write_sectors, bio_sectors(bio)); | ||
| 185 | } else { | ||
| 186 | disk_stat_inc(mddev->gendisk, reads); | ||
| 187 | disk_stat_add(mddev->gendisk, read_sectors, bio_sectors(bio)); | ||
| 188 | } | ||
| 189 | 185 | ||
| 190 | mp_bh->path = multipath_map(conf); | 186 | mp_bh->path = multipath_map(conf); |
| 191 | if (mp_bh->path < 0) { | 187 | if (mp_bh->path < 0) { |
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index f6757259ce7f..fece3277c2a5 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c | |||
| @@ -403,19 +403,15 @@ static int raid0_make_request (request_queue_t *q, struct bio *bio) | |||
| 403 | mdk_rdev_t *tmp_dev; | 403 | mdk_rdev_t *tmp_dev; |
| 404 | unsigned long chunk; | 404 | unsigned long chunk; |
| 405 | sector_t block, rsect; | 405 | sector_t block, rsect; |
| 406 | const int rw = bio_data_dir(bio); | ||
| 406 | 407 | ||
| 407 | if (unlikely(bio_barrier(bio))) { | 408 | if (unlikely(bio_barrier(bio))) { |
| 408 | bio_endio(bio, bio->bi_size, -EOPNOTSUPP); | 409 | bio_endio(bio, bio->bi_size, -EOPNOTSUPP); |
| 409 | return 0; | 410 | return 0; |
| 410 | } | 411 | } |
| 411 | 412 | ||
| 412 | if (bio_data_dir(bio)==WRITE) { | 413 | disk_stat_inc(mddev->gendisk, ios[rw]); |
| 413 | disk_stat_inc(mddev->gendisk, writes); | 414 | disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bio)); |
| 414 | disk_stat_add(mddev->gendisk, write_sectors, bio_sectors(bio)); | ||
| 415 | } else { | ||
| 416 | disk_stat_inc(mddev->gendisk, reads); | ||
| 417 | disk_stat_add(mddev->gendisk, read_sectors, bio_sectors(bio)); | ||
| 418 | } | ||
| 419 | 415 | ||
| 420 | chunk_size = mddev->chunk_size >> 10; | 416 | chunk_size = mddev->chunk_size >> 10; |
| 421 | chunk_sects = mddev->chunk_size >> 9; | 417 | chunk_sects = mddev->chunk_size >> 9; |
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 0e1f148dd41d..e16f473bcf46 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c | |||
| @@ -647,6 +647,7 @@ static int make_request(request_queue_t *q, struct bio * bio) | |||
| 647 | unsigned long flags; | 647 | unsigned long flags; |
| 648 | struct bio_list bl; | 648 | struct bio_list bl; |
| 649 | struct page **behind_pages = NULL; | 649 | struct page **behind_pages = NULL; |
| 650 | const int rw = bio_data_dir(bio); | ||
| 650 | 651 | ||
| 651 | if (unlikely(bio_barrier(bio))) { | 652 | if (unlikely(bio_barrier(bio))) { |
| 652 | bio_endio(bio, bio->bi_size, -EOPNOTSUPP); | 653 | bio_endio(bio, bio->bi_size, -EOPNOTSUPP); |
| @@ -665,13 +666,8 @@ static int make_request(request_queue_t *q, struct bio * bio) | |||
| 665 | conf->nr_pending++; | 666 | conf->nr_pending++; |
| 666 | spin_unlock_irq(&conf->resync_lock); | 667 | spin_unlock_irq(&conf->resync_lock); |
| 667 | 668 | ||
| 668 | if (bio_data_dir(bio)==WRITE) { | 669 | disk_stat_inc(mddev->gendisk, ios[rw]); |
| 669 | disk_stat_inc(mddev->gendisk, writes); | 670 | disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bio)); |
| 670 | disk_stat_add(mddev->gendisk, write_sectors, bio_sectors(bio)); | ||
| 671 | } else { | ||
| 672 | disk_stat_inc(mddev->gendisk, reads); | ||
| 673 | disk_stat_add(mddev->gendisk, read_sectors, bio_sectors(bio)); | ||
| 674 | } | ||
| 675 | 671 | ||
| 676 | /* | 672 | /* |
| 677 | * make_request() can abort the operation when READA is being | 673 | * make_request() can abort the operation when READA is being |
| @@ -686,7 +682,7 @@ static int make_request(request_queue_t *q, struct bio * bio) | |||
| 686 | r1_bio->mddev = mddev; | 682 | r1_bio->mddev = mddev; |
| 687 | r1_bio->sector = bio->bi_sector; | 683 | r1_bio->sector = bio->bi_sector; |
| 688 | 684 | ||
| 689 | if (bio_data_dir(bio) == READ) { | 685 | if (rw == READ) { |
| 690 | /* | 686 | /* |
| 691 | * read balancing logic: | 687 | * read balancing logic: |
| 692 | */ | 688 | */ |
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 28dd028415e4..bbe40e9cf923 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c | |||
| @@ -668,6 +668,7 @@ static int make_request(request_queue_t *q, struct bio * bio) | |||
| 668 | struct bio *read_bio; | 668 | struct bio *read_bio; |
| 669 | int i; | 669 | int i; |
| 670 | int chunk_sects = conf->chunk_mask + 1; | 670 | int chunk_sects = conf->chunk_mask + 1; |
| 671 | const int rw = bio_data_dir(bio); | ||
| 671 | 672 | ||
| 672 | if (unlikely(bio_barrier(bio))) { | 673 | if (unlikely(bio_barrier(bio))) { |
| 673 | bio_endio(bio, bio->bi_size, -EOPNOTSUPP); | 674 | bio_endio(bio, bio->bi_size, -EOPNOTSUPP); |
| @@ -718,13 +719,8 @@ static int make_request(request_queue_t *q, struct bio * bio) | |||
| 718 | conf->nr_pending++; | 719 | conf->nr_pending++; |
| 719 | spin_unlock_irq(&conf->resync_lock); | 720 | spin_unlock_irq(&conf->resync_lock); |
| 720 | 721 | ||
| 721 | if (bio_data_dir(bio)==WRITE) { | 722 | disk_stat_inc(mddev->gendisk, ios[rw]); |
| 722 | disk_stat_inc(mddev->gendisk, writes); | 723 | disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bio)); |
| 723 | disk_stat_add(mddev->gendisk, write_sectors, bio_sectors(bio)); | ||
| 724 | } else { | ||
| 725 | disk_stat_inc(mddev->gendisk, reads); | ||
| 726 | disk_stat_add(mddev->gendisk, read_sectors, bio_sectors(bio)); | ||
| 727 | } | ||
| 728 | 724 | ||
| 729 | r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO); | 725 | r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO); |
| 730 | 726 | ||
| @@ -734,7 +730,7 @@ static int make_request(request_queue_t *q, struct bio * bio) | |||
| 734 | r10_bio->mddev = mddev; | 730 | r10_bio->mddev = mddev; |
| 735 | r10_bio->sector = bio->bi_sector; | 731 | r10_bio->sector = bio->bi_sector; |
| 736 | 732 | ||
| 737 | if (bio_data_dir(bio) == READ) { | 733 | if (rw == READ) { |
| 738 | /* | 734 | /* |
| 739 | * read balancing logic: | 735 | * read balancing logic: |
| 740 | */ | 736 | */ |
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 4683ca24c046..6497295ebfb9 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
| @@ -1462,6 +1462,7 @@ static int make_request (request_queue_t *q, struct bio * bi) | |||
| 1462 | sector_t new_sector; | 1462 | sector_t new_sector; |
| 1463 | sector_t logical_sector, last_sector; | 1463 | sector_t logical_sector, last_sector; |
| 1464 | struct stripe_head *sh; | 1464 | struct stripe_head *sh; |
| 1465 | const int rw = bio_data_dir(bi); | ||
| 1465 | 1466 | ||
| 1466 | if (unlikely(bio_barrier(bi))) { | 1467 | if (unlikely(bio_barrier(bi))) { |
| 1467 | bio_endio(bi, bi->bi_size, -EOPNOTSUPP); | 1468 | bio_endio(bi, bi->bi_size, -EOPNOTSUPP); |
| @@ -1470,13 +1471,8 @@ static int make_request (request_queue_t *q, struct bio * bi) | |||
| 1470 | 1471 | ||
| 1471 | md_write_start(mddev, bi); | 1472 | md_write_start(mddev, bi); |
| 1472 | 1473 | ||
| 1473 | if (bio_data_dir(bi)==WRITE) { | 1474 | disk_stat_inc(mddev->gendisk, ios[rw]); |
| 1474 | disk_stat_inc(mddev->gendisk, writes); | 1475 | disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bi)); |
| 1475 | disk_stat_add(mddev->gendisk, write_sectors, bio_sectors(bi)); | ||
| 1476 | } else { | ||
| 1477 | disk_stat_inc(mddev->gendisk, reads); | ||
| 1478 | disk_stat_add(mddev->gendisk, read_sectors, bio_sectors(bi)); | ||
| 1479 | } | ||
| 1480 | 1476 | ||
| 1481 | logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1); | 1477 | logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1); |
| 1482 | last_sector = bi->bi_sector + (bi->bi_size>>9); | 1478 | last_sector = bi->bi_sector + (bi->bi_size>>9); |
diff --git a/drivers/md/raid6main.c b/drivers/md/raid6main.c index 267eb1430c83..6437a95ffc1c 100644 --- a/drivers/md/raid6main.c +++ b/drivers/md/raid6main.c | |||
| @@ -1621,6 +1621,7 @@ static int make_request (request_queue_t *q, struct bio * bi) | |||
| 1621 | sector_t new_sector; | 1621 | sector_t new_sector; |
| 1622 | sector_t logical_sector, last_sector; | 1622 | sector_t logical_sector, last_sector; |
| 1623 | struct stripe_head *sh; | 1623 | struct stripe_head *sh; |
| 1624 | const int rw = bio_data_dir(bi); | ||
| 1624 | 1625 | ||
| 1625 | if (unlikely(bio_barrier(bi))) { | 1626 | if (unlikely(bio_barrier(bi))) { |
| 1626 | bio_endio(bi, bi->bi_size, -EOPNOTSUPP); | 1627 | bio_endio(bi, bi->bi_size, -EOPNOTSUPP); |
| @@ -1629,13 +1630,8 @@ static int make_request (request_queue_t *q, struct bio * bi) | |||
| 1629 | 1630 | ||
| 1630 | md_write_start(mddev, bi); | 1631 | md_write_start(mddev, bi); |
| 1631 | 1632 | ||
| 1632 | if (bio_data_dir(bi)==WRITE) { | 1633 | disk_stat_inc(mddev->gendisk, ios[rw]); |
| 1633 | disk_stat_inc(mddev->gendisk, writes); | 1634 | disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bi)); |
| 1634 | disk_stat_add(mddev->gendisk, write_sectors, bio_sectors(bi)); | ||
| 1635 | } else { | ||
| 1636 | disk_stat_inc(mddev->gendisk, reads); | ||
| 1637 | disk_stat_add(mddev->gendisk, read_sectors, bio_sectors(bi)); | ||
| 1638 | } | ||
| 1639 | 1635 | ||
| 1640 | logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1); | 1636 | logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1); |
| 1641 | last_sector = bi->bi_sector + (bi->bi_size>>9); | 1637 | last_sector = bi->bi_sector + (bi->bi_size>>9); |
| @@ -1682,7 +1678,7 @@ static int make_request (request_queue_t *q, struct bio * bi) | |||
| 1682 | if (--bi->bi_phys_segments == 0) { | 1678 | if (--bi->bi_phys_segments == 0) { |
| 1683 | int bytes = bi->bi_size; | 1679 | int bytes = bi->bi_size; |
| 1684 | 1680 | ||
| 1685 | if ( bio_data_dir(bi) == WRITE ) | 1681 | if (rw == WRITE ) |
| 1686 | md_write_end(mddev); | 1682 | md_write_end(mddev); |
| 1687 | bi->bi_size = 0; | 1683 | bi->bi_size = 0; |
| 1688 | bi->bi_end_io(bi, bytes, 0); | 1684 | bi->bi_end_io(bi, bytes, 0); |
diff --git a/fs/partitions/check.c b/fs/partitions/check.c index 9c06c5434ec4..8dc1822a7022 100644 --- a/fs/partitions/check.c +++ b/fs/partitions/check.c | |||
| @@ -246,8 +246,8 @@ static ssize_t part_size_read(struct hd_struct * p, char *page) | |||
| 246 | static ssize_t part_stat_read(struct hd_struct * p, char *page) | 246 | static ssize_t part_stat_read(struct hd_struct * p, char *page) |
| 247 | { | 247 | { |
| 248 | return sprintf(page, "%8u %8llu %8u %8llu\n", | 248 | return sprintf(page, "%8u %8llu %8u %8llu\n", |
| 249 | p->reads, (unsigned long long)p->read_sectors, | 249 | p->ios[0], (unsigned long long)p->sectors[0], |
| 250 | p->writes, (unsigned long long)p->write_sectors); | 250 | p->ios[1], (unsigned long long)p->sectors[1]); |
| 251 | } | 251 | } |
| 252 | static struct part_attribute part_attr_uevent = { | 252 | static struct part_attribute part_attr_uevent = { |
| 253 | .attr = {.name = "uevent", .mode = S_IWUSR }, | 253 | .attr = {.name = "uevent", .mode = S_IWUSR }, |
| @@ -303,7 +303,8 @@ void delete_partition(struct gendisk *disk, int part) | |||
| 303 | disk->part[part-1] = NULL; | 303 | disk->part[part-1] = NULL; |
| 304 | p->start_sect = 0; | 304 | p->start_sect = 0; |
| 305 | p->nr_sects = 0; | 305 | p->nr_sects = 0; |
| 306 | p->reads = p->writes = p->read_sectors = p->write_sectors = 0; | 306 | p->ios[0] = p->ios[1] = 0; |
| 307 | p->sectors[0] = p->sectors[1] = 0; | ||
| 307 | devfs_remove("%s/part%d", disk->devfs_name, part); | 308 | devfs_remove("%s/part%d", disk->devfs_name, part); |
| 308 | kobject_unregister(&p->kobj); | 309 | kobject_unregister(&p->kobj); |
| 309 | } | 310 | } |
diff --git a/include/asm-x86_64/dma-mapping.h b/include/asm-x86_64/dma-mapping.h index 54a380efed41..36d16dfbac88 100644 --- a/include/asm-x86_64/dma-mapping.h +++ b/include/asm-x86_64/dma-mapping.h | |||
| @@ -85,10 +85,33 @@ static inline void dma_sync_single_for_device(struct device *hwdev, | |||
| 85 | flush_write_buffers(); | 85 | flush_write_buffers(); |
| 86 | } | 86 | } |
| 87 | 87 | ||
| 88 | #define dma_sync_single_range_for_cpu(dev, dma_handle, offset, size, dir) \ | 88 | static inline void dma_sync_single_range_for_cpu(struct device *hwdev, |
| 89 | dma_sync_single_for_cpu(dev, dma_handle, size, dir) | 89 | dma_addr_t dma_handle, |
| 90 | #define dma_sync_single_range_for_device(dev, dma_handle, offset, size, dir) \ | 90 | unsigned long offset, |
| 91 | dma_sync_single_for_device(dev, dma_handle, size, dir) | 91 | size_t size, int direction) |
| 92 | { | ||
| 93 | if (direction == DMA_NONE) | ||
| 94 | out_of_line_bug(); | ||
| 95 | |||
| 96 | if (swiotlb) | ||
| 97 | return swiotlb_sync_single_range_for_cpu(hwdev,dma_handle,offset,size,direction); | ||
| 98 | |||
| 99 | flush_write_buffers(); | ||
| 100 | } | ||
| 101 | |||
| 102 | static inline void dma_sync_single_range_for_device(struct device *hwdev, | ||
| 103 | dma_addr_t dma_handle, | ||
| 104 | unsigned long offset, | ||
| 105 | size_t size, int direction) | ||
| 106 | { | ||
| 107 | if (direction == DMA_NONE) | ||
| 108 | out_of_line_bug(); | ||
| 109 | |||
| 110 | if (swiotlb) | ||
| 111 | return swiotlb_sync_single_range_for_device(hwdev,dma_handle,offset,size,direction); | ||
| 112 | |||
| 113 | flush_write_buffers(); | ||
| 114 | } | ||
| 92 | 115 | ||
| 93 | static inline void dma_sync_sg_for_cpu(struct device *hwdev, | 116 | static inline void dma_sync_sg_for_cpu(struct device *hwdev, |
| 94 | struct scatterlist *sg, | 117 | struct scatterlist *sg, |
diff --git a/include/asm-x86_64/swiotlb.h b/include/asm-x86_64/swiotlb.h index 7cbfd10ecc3c..dddf1b218681 100644 --- a/include/asm-x86_64/swiotlb.h +++ b/include/asm-x86_64/swiotlb.h | |||
| @@ -15,6 +15,14 @@ extern void swiotlb_sync_single_for_cpu(struct device *hwdev, | |||
| 15 | extern void swiotlb_sync_single_for_device(struct device *hwdev, | 15 | extern void swiotlb_sync_single_for_device(struct device *hwdev, |
| 16 | dma_addr_t dev_addr, | 16 | dma_addr_t dev_addr, |
| 17 | size_t size, int dir); | 17 | size_t size, int dir); |
| 18 | extern void swiotlb_sync_single_range_for_cpu(struct device *hwdev, | ||
| 19 | dma_addr_t dev_addr, | ||
| 20 | unsigned long offset, | ||
| 21 | size_t size, int dir); | ||
| 22 | extern void swiotlb_sync_single_range_for_device(struct device *hwdev, | ||
| 23 | dma_addr_t dev_addr, | ||
| 24 | unsigned long offset, | ||
| 25 | size_t size, int dir); | ||
| 18 | extern void swiotlb_sync_sg_for_cpu(struct device *hwdev, | 26 | extern void swiotlb_sync_sg_for_cpu(struct device *hwdev, |
| 19 | struct scatterlist *sg, int nelems, | 27 | struct scatterlist *sg, int nelems, |
| 20 | int dir); | 28 | int dir); |
diff --git a/include/linux/genhd.h b/include/linux/genhd.h index eabdb5cce357..8eeaa53a68c9 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h | |||
| @@ -78,7 +78,7 @@ struct hd_struct { | |||
| 78 | sector_t start_sect; | 78 | sector_t start_sect; |
| 79 | sector_t nr_sects; | 79 | sector_t nr_sects; |
| 80 | struct kobject kobj; | 80 | struct kobject kobj; |
| 81 | unsigned reads, read_sectors, writes, write_sectors; | 81 | unsigned ios[2], sectors[2]; |
| 82 | int policy, partno; | 82 | int policy, partno; |
| 83 | }; | 83 | }; |
| 84 | 84 | ||
| @@ -89,10 +89,10 @@ struct hd_struct { | |||
| 89 | #define GENHD_FL_SUPPRESS_PARTITION_INFO 32 | 89 | #define GENHD_FL_SUPPRESS_PARTITION_INFO 32 |
| 90 | 90 | ||
| 91 | struct disk_stats { | 91 | struct disk_stats { |
| 92 | unsigned read_sectors, write_sectors; | 92 | unsigned sectors[2]; |
| 93 | unsigned reads, writes; | 93 | unsigned ios[2]; |
| 94 | unsigned read_merges, write_merges; | 94 | unsigned merges[2]; |
| 95 | unsigned read_ticks, write_ticks; | 95 | unsigned ticks[2]; |
| 96 | unsigned io_ticks; | 96 | unsigned io_ticks; |
| 97 | unsigned time_in_queue; | 97 | unsigned time_in_queue; |
| 98 | }; | 98 | }; |
diff --git a/lib/Makefile b/lib/Makefile index 44a46750690a..8535f4d7d1c3 100644 --- a/lib/Makefile +++ b/lib/Makefile | |||
| @@ -44,6 +44,8 @@ obj-$(CONFIG_TEXTSEARCH_KMP) += ts_kmp.o | |||
| 44 | obj-$(CONFIG_TEXTSEARCH_BM) += ts_bm.o | 44 | obj-$(CONFIG_TEXTSEARCH_BM) += ts_bm.o |
| 45 | obj-$(CONFIG_TEXTSEARCH_FSM) += ts_fsm.o | 45 | obj-$(CONFIG_TEXTSEARCH_FSM) += ts_fsm.o |
| 46 | 46 | ||
| 47 | obj-$(CONFIG_SWIOTLB) += swiotlb.o | ||
| 48 | |||
| 47 | hostprogs-y := gen_crc32table | 49 | hostprogs-y := gen_crc32table |
| 48 | clean-files := crc32table.h | 50 | clean-files := crc32table.h |
| 49 | 51 | ||
diff --git a/arch/ia64/lib/swiotlb.c b/lib/swiotlb.c index 96edcc0fdcd9..57216f3544ca 100644 --- a/arch/ia64/lib/swiotlb.c +++ b/lib/swiotlb.c | |||
| @@ -1,7 +1,7 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * Dynamic DMA mapping support. | 2 | * Dynamic DMA mapping support. |
| 3 | * | 3 | * |
| 4 | * This implementation is for IA-64 platforms that do not support | 4 | * This implementation is for IA-64 and EM64T platforms that do not support |
| 5 | * I/O TLBs (aka DMA address translation hardware). | 5 | * I/O TLBs (aka DMA address translation hardware). |
| 6 | * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com> | 6 | * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com> |
| 7 | * Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com> | 7 | * Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com> |
| @@ -11,21 +11,23 @@ | |||
| 11 | * 03/05/07 davidm Switch from PCI-DMA to generic device DMA API. | 11 | * 03/05/07 davidm Switch from PCI-DMA to generic device DMA API. |
| 12 | * 00/12/13 davidm Rename to swiotlb.c and add mark_clean() to avoid | 12 | * 00/12/13 davidm Rename to swiotlb.c and add mark_clean() to avoid |
| 13 | * unnecessary i-cache flushing. | 13 | * unnecessary i-cache flushing. |
| 14 | * 04/07/.. ak Better overflow handling. Assorted fixes. | 14 | * 04/07/.. ak Better overflow handling. Assorted fixes. |
| 15 | * 05/09/10 linville Add support for syncing ranges, support syncing for | ||
| 16 | * DMA_BIDIRECTIONAL mappings, miscellaneous cleanup. | ||
| 15 | */ | 17 | */ |
| 16 | 18 | ||
| 17 | #include <linux/cache.h> | 19 | #include <linux/cache.h> |
| 20 | #include <linux/dma-mapping.h> | ||
| 18 | #include <linux/mm.h> | 21 | #include <linux/mm.h> |
| 19 | #include <linux/module.h> | 22 | #include <linux/module.h> |
| 20 | #include <linux/pci.h> | ||
| 21 | #include <linux/spinlock.h> | 23 | #include <linux/spinlock.h> |
| 22 | #include <linux/string.h> | 24 | #include <linux/string.h> |
| 23 | #include <linux/types.h> | 25 | #include <linux/types.h> |
| 24 | #include <linux/ctype.h> | 26 | #include <linux/ctype.h> |
| 25 | 27 | ||
| 26 | #include <asm/io.h> | 28 | #include <asm/io.h> |
| 27 | #include <asm/pci.h> | ||
| 28 | #include <asm/dma.h> | 29 | #include <asm/dma.h> |
| 30 | #include <asm/scatterlist.h> | ||
| 29 | 31 | ||
| 30 | #include <linux/init.h> | 32 | #include <linux/init.h> |
| 31 | #include <linux/bootmem.h> | 33 | #include <linux/bootmem.h> |
| @@ -58,6 +60,14 @@ | |||
| 58 | */ | 60 | */ |
| 59 | #define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT) | 61 | #define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT) |
| 60 | 62 | ||
| 63 | /* | ||
| 64 | * Enumeration for sync targets | ||
| 65 | */ | ||
| 66 | enum dma_sync_target { | ||
| 67 | SYNC_FOR_CPU = 0, | ||
| 68 | SYNC_FOR_DEVICE = 1, | ||
| 69 | }; | ||
| 70 | |||
| 61 | int swiotlb_force; | 71 | int swiotlb_force; |
| 62 | 72 | ||
| 63 | /* | 73 | /* |
| @@ -117,7 +127,7 @@ __setup("swiotlb=", setup_io_tlb_npages); | |||
| 117 | 127 | ||
| 118 | /* | 128 | /* |
| 119 | * Statically reserve bounce buffer space and initialize bounce buffer data | 129 | * Statically reserve bounce buffer space and initialize bounce buffer data |
| 120 | * structures for the software IO TLB used to implement the PCI DMA API. | 130 | * structures for the software IO TLB used to implement the DMA API. |
| 121 | */ | 131 | */ |
| 122 | void | 132 | void |
| 123 | swiotlb_init_with_default_size (size_t default_size) | 133 | swiotlb_init_with_default_size (size_t default_size) |
| @@ -397,21 +407,28 @@ unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir) | |||
| 397 | } | 407 | } |
| 398 | 408 | ||
| 399 | static void | 409 | static void |
| 400 | sync_single(struct device *hwdev, char *dma_addr, size_t size, int dir) | 410 | sync_single(struct device *hwdev, char *dma_addr, size_t size, |
| 411 | int dir, int target) | ||
| 401 | { | 412 | { |
| 402 | int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT; | 413 | int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT; |
| 403 | char *buffer = io_tlb_orig_addr[index]; | 414 | char *buffer = io_tlb_orig_addr[index]; |
| 404 | 415 | ||
| 405 | /* | 416 | switch (target) { |
| 406 | * bounce... copy the data back into/from the original buffer | 417 | case SYNC_FOR_CPU: |
| 407 | * XXX How do you handle DMA_BIDIRECTIONAL here ? | 418 | if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)) |
| 408 | */ | 419 | memcpy(buffer, dma_addr, size); |
| 409 | if (dir == DMA_FROM_DEVICE) | 420 | else if (dir != DMA_TO_DEVICE) |
| 410 | memcpy(buffer, dma_addr, size); | 421 | BUG(); |
| 411 | else if (dir == DMA_TO_DEVICE) | 422 | break; |
| 412 | memcpy(dma_addr, buffer, size); | 423 | case SYNC_FOR_DEVICE: |
| 413 | else | 424 | if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)) |
| 425 | memcpy(dma_addr, buffer, size); | ||
| 426 | else if (dir != DMA_FROM_DEVICE) | ||
| 427 | BUG(); | ||
| 428 | break; | ||
| 429 | default: | ||
| 414 | BUG(); | 430 | BUG(); |
| 431 | } | ||
| 415 | } | 432 | } |
| 416 | 433 | ||
| 417 | void * | 434 | void * |
| @@ -485,24 +502,24 @@ swiotlb_full(struct device *dev, size_t size, int dir, int do_panic) | |||
| 485 | /* | 502 | /* |
| 486 | * Ran out of IOMMU space for this operation. This is very bad. | 503 | * Ran out of IOMMU space for this operation. This is very bad. |
| 487 | * Unfortunately the drivers cannot handle this operation properly. | 504 | * Unfortunately the drivers cannot handle this operation properly. |
| 488 | * unless they check for pci_dma_mapping_error (most don't) | 505 | * unless they check for dma_mapping_error (most don't) |
| 489 | * When the mapping is small enough return a static buffer to limit | 506 | * When the mapping is small enough return a static buffer to limit |
| 490 | * the damage, or panic when the transfer is too big. | 507 | * the damage, or panic when the transfer is too big. |
| 491 | */ | 508 | */ |
| 492 | printk(KERN_ERR "PCI-DMA: Out of SW-IOMMU space for %lu bytes at " | 509 | printk(KERN_ERR "DMA: Out of SW-IOMMU space for %lu bytes at " |
| 493 | "device %s\n", size, dev ? dev->bus_id : "?"); | 510 | "device %s\n", size, dev ? dev->bus_id : "?"); |
| 494 | 511 | ||
| 495 | if (size > io_tlb_overflow && do_panic) { | 512 | if (size > io_tlb_overflow && do_panic) { |
| 496 | if (dir == PCI_DMA_FROMDEVICE || dir == PCI_DMA_BIDIRECTIONAL) | 513 | if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) |
| 497 | panic("PCI-DMA: Memory would be corrupted\n"); | 514 | panic("DMA: Memory would be corrupted\n"); |
| 498 | if (dir == PCI_DMA_TODEVICE || dir == PCI_DMA_BIDIRECTIONAL) | 515 | if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) |
| 499 | panic("PCI-DMA: Random memory would be DMAed\n"); | 516 | panic("DMA: Random memory would be DMAed\n"); |
| 500 | } | 517 | } |
| 501 | } | 518 | } |
| 502 | 519 | ||
| 503 | /* | 520 | /* |
| 504 | * Map a single buffer of the indicated size for DMA in streaming mode. The | 521 | * Map a single buffer of the indicated size for DMA in streaming mode. The |
| 505 | * PCI address to use is returned. | 522 | * physical address to use is returned. |
| 506 | * | 523 | * |
| 507 | * Once the device is given the dma address, the device owns this memory until | 524 | * Once the device is given the dma address, the device owns this memory until |
| 508 | * either swiotlb_unmap_single or swiotlb_dma_sync_single is performed. | 525 | * either swiotlb_unmap_single or swiotlb_dma_sync_single is performed. |
| @@ -589,39 +606,73 @@ swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size, | |||
| 589 | * after a transfer. | 606 | * after a transfer. |
| 590 | * | 607 | * |
| 591 | * If you perform a swiotlb_map_single() but wish to interrogate the buffer | 608 | * If you perform a swiotlb_map_single() but wish to interrogate the buffer |
| 592 | * using the cpu, yet do not wish to teardown the PCI dma mapping, you must | 609 | * using the cpu, yet do not wish to teardown the dma mapping, you must |
| 593 | * call this function before doing so. At the next point you give the PCI dma | 610 | * call this function before doing so. At the next point you give the dma |
| 594 | * address back to the card, you must first perform a | 611 | * address back to the card, you must first perform a |
| 595 | * swiotlb_dma_sync_for_device, and then the device again owns the buffer | 612 | * swiotlb_dma_sync_for_device, and then the device again owns the buffer |
| 596 | */ | 613 | */ |
| 597 | void | 614 | static inline void |
| 598 | swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr, | 615 | swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, |
| 599 | size_t size, int dir) | 616 | size_t size, int dir, int target) |
| 600 | { | 617 | { |
| 601 | char *dma_addr = phys_to_virt(dev_addr); | 618 | char *dma_addr = phys_to_virt(dev_addr); |
| 602 | 619 | ||
| 603 | if (dir == DMA_NONE) | 620 | if (dir == DMA_NONE) |
| 604 | BUG(); | 621 | BUG(); |
| 605 | if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) | 622 | if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) |
| 606 | sync_single(hwdev, dma_addr, size, dir); | 623 | sync_single(hwdev, dma_addr, size, dir, target); |
| 607 | else if (dir == DMA_FROM_DEVICE) | 624 | else if (dir == DMA_FROM_DEVICE) |
| 608 | mark_clean(dma_addr, size); | 625 | mark_clean(dma_addr, size); |
| 609 | } | 626 | } |
| 610 | 627 | ||
| 611 | void | 628 | void |
| 629 | swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr, | ||
| 630 | size_t size, int dir) | ||
| 631 | { | ||
| 632 | swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU); | ||
| 633 | } | ||
| 634 | |||
| 635 | void | ||
| 612 | swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr, | 636 | swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr, |
| 613 | size_t size, int dir) | 637 | size_t size, int dir) |
| 614 | { | 638 | { |
| 615 | char *dma_addr = phys_to_virt(dev_addr); | 639 | swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE); |
| 640 | } | ||
| 641 | |||
| 642 | /* | ||
| 643 | * Same as above, but for a sub-range of the mapping. | ||
| 644 | */ | ||
| 645 | static inline void | ||
| 646 | swiotlb_sync_single_range(struct device *hwdev, dma_addr_t dev_addr, | ||
| 647 | unsigned long offset, size_t size, | ||
| 648 | int dir, int target) | ||
| 649 | { | ||
| 650 | char *dma_addr = phys_to_virt(dev_addr) + offset; | ||
| 616 | 651 | ||
| 617 | if (dir == DMA_NONE) | 652 | if (dir == DMA_NONE) |
| 618 | BUG(); | 653 | BUG(); |
| 619 | if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) | 654 | if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) |
| 620 | sync_single(hwdev, dma_addr, size, dir); | 655 | sync_single(hwdev, dma_addr, size, dir, target); |
| 621 | else if (dir == DMA_FROM_DEVICE) | 656 | else if (dir == DMA_FROM_DEVICE) |
| 622 | mark_clean(dma_addr, size); | 657 | mark_clean(dma_addr, size); |
| 623 | } | 658 | } |
| 624 | 659 | ||
| 660 | void | ||
| 661 | swiotlb_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dev_addr, | ||
| 662 | unsigned long offset, size_t size, int dir) | ||
| 663 | { | ||
| 664 | swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir, | ||
| 665 | SYNC_FOR_CPU); | ||
| 666 | } | ||
| 667 | |||
| 668 | void | ||
| 669 | swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr, | ||
| 670 | unsigned long offset, size_t size, int dir) | ||
| 671 | { | ||
| 672 | swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir, | ||
| 673 | SYNC_FOR_DEVICE); | ||
| 674 | } | ||
| 675 | |||
| 625 | /* | 676 | /* |
| 626 | * Map a set of buffers described by scatterlist in streaming mode for DMA. | 677 | * Map a set of buffers described by scatterlist in streaming mode for DMA. |
| 627 | * This is the scatter-gather version of the above swiotlb_map_single | 678 | * This is the scatter-gather version of the above swiotlb_map_single |
| @@ -696,9 +747,9 @@ swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nelems, | |||
| 696 | * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules | 747 | * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules |
| 697 | * and usage. | 748 | * and usage. |
| 698 | */ | 749 | */ |
| 699 | void | 750 | static inline void |
| 700 | swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, | 751 | swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sg, |
| 701 | int nelems, int dir) | 752 | int nelems, int dir, int target) |
| 702 | { | 753 | { |
| 703 | int i; | 754 | int i; |
| 704 | 755 | ||
| @@ -708,22 +759,21 @@ swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, | |||
| 708 | for (i = 0; i < nelems; i++, sg++) | 759 | for (i = 0; i < nelems; i++, sg++) |
| 709 | if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg)) | 760 | if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg)) |
| 710 | sync_single(hwdev, (void *) sg->dma_address, | 761 | sync_single(hwdev, (void *) sg->dma_address, |
| 711 | sg->dma_length, dir); | 762 | sg->dma_length, dir, target); |
| 763 | } | ||
| 764 | |||
| 765 | void | ||
| 766 | swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, | ||
| 767 | int nelems, int dir) | ||
| 768 | { | ||
| 769 | swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU); | ||
| 712 | } | 770 | } |
| 713 | 771 | ||
| 714 | void | 772 | void |
| 715 | swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, | 773 | swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, |
| 716 | int nelems, int dir) | 774 | int nelems, int dir) |
| 717 | { | 775 | { |
| 718 | int i; | 776 | swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE); |
| 719 | |||
| 720 | if (dir == DMA_NONE) | ||
| 721 | BUG(); | ||
| 722 | |||
| 723 | for (i = 0; i < nelems; i++, sg++) | ||
| 724 | if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg)) | ||
| 725 | sync_single(hwdev, (void *) sg->dma_address, | ||
| 726 | sg->dma_length, dir); | ||
| 727 | } | 777 | } |
| 728 | 778 | ||
| 729 | int | 779 | int |
| @@ -733,9 +783,9 @@ swiotlb_dma_mapping_error(dma_addr_t dma_addr) | |||
| 733 | } | 783 | } |
| 734 | 784 | ||
| 735 | /* | 785 | /* |
| 736 | * Return whether the given PCI device DMA address mask can be supported | 786 | * Return whether the given device DMA address mask can be supported |
| 737 | * properly. For example, if your device can only drive the low 24-bits | 787 | * properly. For example, if your device can only drive the low 24-bits |
| 738 | * during PCI bus mastering, then you would pass 0x00ffffff as the mask to | 788 | * during bus mastering, then you would pass 0x00ffffff as the mask to |
| 739 | * this function. | 789 | * this function. |
| 740 | */ | 790 | */ |
| 741 | int | 791 | int |
| @@ -751,6 +801,8 @@ EXPORT_SYMBOL(swiotlb_map_sg); | |||
| 751 | EXPORT_SYMBOL(swiotlb_unmap_sg); | 801 | EXPORT_SYMBOL(swiotlb_unmap_sg); |
| 752 | EXPORT_SYMBOL(swiotlb_sync_single_for_cpu); | 802 | EXPORT_SYMBOL(swiotlb_sync_single_for_cpu); |
| 753 | EXPORT_SYMBOL(swiotlb_sync_single_for_device); | 803 | EXPORT_SYMBOL(swiotlb_sync_single_for_device); |
| 804 | EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_cpu); | ||
| 805 | EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_device); | ||
| 754 | EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu); | 806 | EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu); |
| 755 | EXPORT_SYMBOL(swiotlb_sync_sg_for_device); | 807 | EXPORT_SYMBOL(swiotlb_sync_sg_for_device); |
| 756 | EXPORT_SYMBOL(swiotlb_dma_mapping_error); | 808 | EXPORT_SYMBOL(swiotlb_dma_mapping_error); |
