diff options
| -rw-r--r-- | arch/ia64/ia32/sys_ia32.c | 10 | ||||
| -rw-r--r-- | arch/ia64/kernel/efi.c | 36 | ||||
| -rw-r--r-- | arch/ia64/kernel/perfmon.c | 11 | ||||
| -rw-r--r-- | arch/ia64/kernel/setup.c | 30 | ||||
| -rw-r--r-- | arch/ia64/lib/Makefile | 3 | ||||
| -rw-r--r-- | arch/ia64/mm/contig.c | 5 | ||||
| -rw-r--r-- | arch/ia64/mm/discontig.c | 6 | ||||
| -rw-r--r-- | include/asm-ia64/meminit.h | 6 | ||||
| -rw-r--r-- | include/asm-ia64/resource.h | 1 | ||||
| -rw-r--r-- | include/asm-ia64/swiotlb.h | 9 | ||||
| -rw-r--r-- | include/asm-x86_64/swiotlb.h | 1 | ||||
| -rw-r--r-- | lib/swiotlb.c | 184 |
12 files changed, 121 insertions, 181 deletions
diff --git a/arch/ia64/ia32/sys_ia32.c b/arch/ia64/ia32/sys_ia32.c index d430d36ae49d..0afb4fe7c35b 100644 --- a/arch/ia64/ia32/sys_ia32.c +++ b/arch/ia64/ia32/sys_ia32.c | |||
| @@ -1267,6 +1267,10 @@ sys32_getdents (unsigned int fd, struct compat_dirent __user *dirent, unsigned i | |||
| 1267 | struct getdents32_callback buf; | 1267 | struct getdents32_callback buf; |
| 1268 | int error; | 1268 | int error; |
| 1269 | 1269 | ||
| 1270 | error = -EFAULT; | ||
| 1271 | if (!access_ok(VERIFY_WRITE, dirent, count)) | ||
| 1272 | goto out; | ||
| 1273 | |||
| 1270 | error = -EBADF; | 1274 | error = -EBADF; |
| 1271 | file = fget(fd); | 1275 | file = fget(fd); |
| 1272 | if (!file) | 1276 | if (!file) |
| @@ -1283,10 +1287,10 @@ sys32_getdents (unsigned int fd, struct compat_dirent __user *dirent, unsigned i | |||
| 1283 | error = buf.error; | 1287 | error = buf.error; |
| 1284 | lastdirent = buf.previous; | 1288 | lastdirent = buf.previous; |
| 1285 | if (lastdirent) { | 1289 | if (lastdirent) { |
| 1286 | error = -EINVAL; | ||
| 1287 | if (put_user(file->f_pos, &lastdirent->d_off)) | 1290 | if (put_user(file->f_pos, &lastdirent->d_off)) |
| 1288 | goto out_putf; | 1291 | error = -EFAULT; |
| 1289 | error = count - buf.count; | 1292 | else |
| 1293 | error = count - buf.count; | ||
| 1290 | } | 1294 | } |
| 1291 | 1295 | ||
| 1292 | out_putf: | 1296 | out_putf: |
diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c index 772ba6fe110f..4061593e5b17 100644 --- a/arch/ia64/kernel/efi.c +++ b/arch/ia64/kernel/efi.c | |||
| @@ -21,6 +21,7 @@ | |||
| 21 | * Skip non-WB memory and ignore empty memory ranges. | 21 | * Skip non-WB memory and ignore empty memory ranges. |
| 22 | */ | 22 | */ |
| 23 | #include <linux/module.h> | 23 | #include <linux/module.h> |
| 24 | #include <linux/bootmem.h> | ||
| 24 | #include <linux/kernel.h> | 25 | #include <linux/kernel.h> |
| 25 | #include <linux/init.h> | 26 | #include <linux/init.h> |
| 26 | #include <linux/types.h> | 27 | #include <linux/types.h> |
| @@ -1009,6 +1010,11 @@ efi_memmap_init(unsigned long *s, unsigned long *e) | |||
| 1009 | } else | 1010 | } else |
| 1010 | ae = efi_md_end(md); | 1011 | ae = efi_md_end(md); |
| 1011 | 1012 | ||
| 1013 | #ifdef CONFIG_CRASH_DUMP | ||
| 1014 | /* saved_max_pfn should ignore max_addr= command line arg */ | ||
| 1015 | if (saved_max_pfn < (ae >> PAGE_SHIFT)) | ||
| 1016 | saved_max_pfn = (ae >> PAGE_SHIFT); | ||
| 1017 | #endif | ||
| 1012 | /* keep within max_addr= and min_addr= command line arg */ | 1018 | /* keep within max_addr= and min_addr= command line arg */ |
| 1013 | as = max(as, min_addr); | 1019 | as = max(as, min_addr); |
| 1014 | ae = min(ae, max_addr); | 1020 | ae = min(ae, max_addr); |
| @@ -1177,3 +1183,33 @@ kdump_find_rsvd_region (unsigned long size, | |||
| 1177 | return ~0UL; | 1183 | return ~0UL; |
| 1178 | } | 1184 | } |
| 1179 | #endif | 1185 | #endif |
| 1186 | |||
| 1187 | #ifdef CONFIG_PROC_VMCORE | ||
| 1188 | /* locate the size find a the descriptor at a certain address */ | ||
| 1189 | unsigned long | ||
| 1190 | vmcore_find_descriptor_size (unsigned long address) | ||
| 1191 | { | ||
| 1192 | void *efi_map_start, *efi_map_end, *p; | ||
| 1193 | efi_memory_desc_t *md; | ||
| 1194 | u64 efi_desc_size; | ||
| 1195 | unsigned long ret = 0; | ||
| 1196 | |||
| 1197 | efi_map_start = __va(ia64_boot_param->efi_memmap); | ||
| 1198 | efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size; | ||
| 1199 | efi_desc_size = ia64_boot_param->efi_memdesc_size; | ||
| 1200 | |||
| 1201 | for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) { | ||
| 1202 | md = p; | ||
| 1203 | if (efi_wb(md) && md->type == EFI_LOADER_DATA | ||
| 1204 | && md->phys_addr == address) { | ||
| 1205 | ret = efi_md_size(md); | ||
| 1206 | break; | ||
| 1207 | } | ||
| 1208 | } | ||
| 1209 | |||
| 1210 | if (ret == 0) | ||
| 1211 | printk(KERN_WARNING "Cannot locate EFI vmcore descriptor\n"); | ||
| 1212 | |||
| 1213 | return ret; | ||
| 1214 | } | ||
| 1215 | #endif | ||
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index 9ddf896a137a..abc7ad035886 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c | |||
| @@ -2299,7 +2299,7 @@ pfm_remap_buffer(struct vm_area_struct *vma, unsigned long buf, unsigned long ad | |||
| 2299 | * allocate a sampling buffer and remaps it into the user address space of the task | 2299 | * allocate a sampling buffer and remaps it into the user address space of the task |
| 2300 | */ | 2300 | */ |
| 2301 | static int | 2301 | static int |
| 2302 | pfm_smpl_buffer_alloc(struct task_struct *task, pfm_context_t *ctx, unsigned long rsize, void **user_vaddr) | 2302 | pfm_smpl_buffer_alloc(struct task_struct *task, struct file *filp, pfm_context_t *ctx, unsigned long rsize, void **user_vaddr) |
| 2303 | { | 2303 | { |
| 2304 | struct mm_struct *mm = task->mm; | 2304 | struct mm_struct *mm = task->mm; |
| 2305 | struct vm_area_struct *vma = NULL; | 2305 | struct vm_area_struct *vma = NULL; |
| @@ -2349,6 +2349,7 @@ pfm_smpl_buffer_alloc(struct task_struct *task, pfm_context_t *ctx, unsigned lon | |||
| 2349 | * partially initialize the vma for the sampling buffer | 2349 | * partially initialize the vma for the sampling buffer |
| 2350 | */ | 2350 | */ |
| 2351 | vma->vm_mm = mm; | 2351 | vma->vm_mm = mm; |
| 2352 | vma->vm_file = filp; | ||
| 2352 | vma->vm_flags = VM_READ| VM_MAYREAD |VM_RESERVED; | 2353 | vma->vm_flags = VM_READ| VM_MAYREAD |VM_RESERVED; |
| 2353 | vma->vm_page_prot = PAGE_READONLY; /* XXX may need to change */ | 2354 | vma->vm_page_prot = PAGE_READONLY; /* XXX may need to change */ |
| 2354 | 2355 | ||
| @@ -2387,6 +2388,8 @@ pfm_smpl_buffer_alloc(struct task_struct *task, pfm_context_t *ctx, unsigned lon | |||
| 2387 | goto error; | 2388 | goto error; |
| 2388 | } | 2389 | } |
| 2389 | 2390 | ||
| 2391 | get_file(filp); | ||
| 2392 | |||
| 2390 | /* | 2393 | /* |
| 2391 | * now insert the vma in the vm list for the process, must be | 2394 | * now insert the vma in the vm list for the process, must be |
| 2392 | * done with mmap lock held | 2395 | * done with mmap lock held |
| @@ -2464,7 +2467,7 @@ pfarg_is_sane(struct task_struct *task, pfarg_context_t *pfx) | |||
| 2464 | } | 2467 | } |
| 2465 | 2468 | ||
| 2466 | static int | 2469 | static int |
| 2467 | pfm_setup_buffer_fmt(struct task_struct *task, pfm_context_t *ctx, unsigned int ctx_flags, | 2470 | pfm_setup_buffer_fmt(struct task_struct *task, struct file *filp, pfm_context_t *ctx, unsigned int ctx_flags, |
| 2468 | unsigned int cpu, pfarg_context_t *arg) | 2471 | unsigned int cpu, pfarg_context_t *arg) |
| 2469 | { | 2472 | { |
| 2470 | pfm_buffer_fmt_t *fmt = NULL; | 2473 | pfm_buffer_fmt_t *fmt = NULL; |
| @@ -2505,7 +2508,7 @@ pfm_setup_buffer_fmt(struct task_struct *task, pfm_context_t *ctx, unsigned int | |||
| 2505 | /* | 2508 | /* |
| 2506 | * buffer is always remapped into the caller's address space | 2509 | * buffer is always remapped into the caller's address space |
| 2507 | */ | 2510 | */ |
| 2508 | ret = pfm_smpl_buffer_alloc(current, ctx, size, &uaddr); | 2511 | ret = pfm_smpl_buffer_alloc(current, filp, ctx, size, &uaddr); |
| 2509 | if (ret) goto error; | 2512 | if (ret) goto error; |
| 2510 | 2513 | ||
| 2511 | /* keep track of user address of buffer */ | 2514 | /* keep track of user address of buffer */ |
| @@ -2716,7 +2719,7 @@ pfm_context_create(pfm_context_t *ctx, void *arg, int count, struct pt_regs *reg | |||
| 2716 | * does the user want to sample? | 2719 | * does the user want to sample? |
| 2717 | */ | 2720 | */ |
| 2718 | if (pfm_uuid_cmp(req->ctx_smpl_buf_id, pfm_null_uuid)) { | 2721 | if (pfm_uuid_cmp(req->ctx_smpl_buf_id, pfm_null_uuid)) { |
| 2719 | ret = pfm_setup_buffer_fmt(current, ctx, ctx_flags, 0, req); | 2722 | ret = pfm_setup_buffer_fmt(current, filp, ctx, ctx_flags, 0, req); |
| 2720 | if (ret) goto buffer_error; | 2723 | if (ret) goto buffer_error; |
| 2721 | } | 2724 | } |
| 2722 | 2725 | ||
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c index 5fa09d141ab7..7d6fe65c93f4 100644 --- a/arch/ia64/kernel/setup.c +++ b/arch/ia64/kernel/setup.c | |||
| @@ -251,6 +251,12 @@ reserve_memory (void) | |||
| 251 | } | 251 | } |
| 252 | #endif | 252 | #endif |
| 253 | 253 | ||
| 254 | #ifdef CONFIG_PROC_VMCORE | ||
| 255 | if (reserve_elfcorehdr(&rsvd_region[n].start, | ||
| 256 | &rsvd_region[n].end) == 0) | ||
| 257 | n++; | ||
| 258 | #endif | ||
| 259 | |||
| 254 | efi_memmap_init(&rsvd_region[n].start, &rsvd_region[n].end); | 260 | efi_memmap_init(&rsvd_region[n].start, &rsvd_region[n].end); |
| 255 | n++; | 261 | n++; |
| 256 | 262 | ||
| @@ -453,6 +459,30 @@ static int __init parse_elfcorehdr(char *arg) | |||
| 453 | return 0; | 459 | return 0; |
| 454 | } | 460 | } |
| 455 | early_param("elfcorehdr", parse_elfcorehdr); | 461 | early_param("elfcorehdr", parse_elfcorehdr); |
| 462 | |||
| 463 | int __init reserve_elfcorehdr(unsigned long *start, unsigned long *end) | ||
| 464 | { | ||
| 465 | unsigned long length; | ||
| 466 | |||
| 467 | /* We get the address using the kernel command line, | ||
| 468 | * but the size is extracted from the EFI tables. | ||
| 469 | * Both address and size are required for reservation | ||
| 470 | * to work properly. | ||
| 471 | */ | ||
| 472 | |||
| 473 | if (elfcorehdr_addr >= ELFCORE_ADDR_MAX) | ||
| 474 | return -EINVAL; | ||
| 475 | |||
| 476 | if ((length = vmcore_find_descriptor_size(elfcorehdr_addr)) == 0) { | ||
| 477 | elfcorehdr_addr = ELFCORE_ADDR_MAX; | ||
| 478 | return -EINVAL; | ||
| 479 | } | ||
| 480 | |||
| 481 | *start = (unsigned long)__va(elfcorehdr_addr); | ||
| 482 | *end = *start + length; | ||
| 483 | return 0; | ||
| 484 | } | ||
| 485 | |||
| 456 | #endif /* CONFIG_PROC_VMCORE */ | 486 | #endif /* CONFIG_PROC_VMCORE */ |
| 457 | 487 | ||
| 458 | void __init | 488 | void __init |
diff --git a/arch/ia64/lib/Makefile b/arch/ia64/lib/Makefile index 38fa6e49e791..46edf8444c7e 100644 --- a/arch/ia64/lib/Makefile +++ b/arch/ia64/lib/Makefile | |||
| @@ -9,12 +9,11 @@ lib-y := __divsi3.o __udivsi3.o __modsi3.o __umodsi3.o \ | |||
| 9 | checksum.o clear_page.o csum_partial_copy.o \ | 9 | checksum.o clear_page.o csum_partial_copy.o \ |
| 10 | clear_user.o strncpy_from_user.o strlen_user.o strnlen_user.o \ | 10 | clear_user.o strncpy_from_user.o strlen_user.o strnlen_user.o \ |
| 11 | flush.o ip_fast_csum.o do_csum.o \ | 11 | flush.o ip_fast_csum.o do_csum.o \ |
| 12 | memset.o strlen.o | 12 | memset.o strlen.o xor.o |
| 13 | 13 | ||
| 14 | lib-$(CONFIG_ITANIUM) += copy_page.o copy_user.o memcpy.o | 14 | lib-$(CONFIG_ITANIUM) += copy_page.o copy_user.o memcpy.o |
| 15 | lib-$(CONFIG_MCKINLEY) += copy_page_mck.o memcpy_mck.o | 15 | lib-$(CONFIG_MCKINLEY) += copy_page_mck.o memcpy_mck.o |
| 16 | lib-$(CONFIG_PERFMON) += carta_random.o | 16 | lib-$(CONFIG_PERFMON) += carta_random.o |
| 17 | lib-$(CONFIG_MD_RAID456) += xor.o | ||
| 18 | 17 | ||
| 19 | AFLAGS___divdi3.o = | 18 | AFLAGS___divdi3.o = |
| 20 | AFLAGS___udivdi3.o = -DUNSIGNED | 19 | AFLAGS___udivdi3.o = -DUNSIGNED |
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c index ca4d41e5f177..fb0f4698f5d0 100644 --- a/arch/ia64/mm/contig.c +++ b/arch/ia64/mm/contig.c | |||
| @@ -197,11 +197,6 @@ find_memory (void) | |||
| 197 | 197 | ||
| 198 | find_initrd(); | 198 | find_initrd(); |
| 199 | 199 | ||
| 200 | #ifdef CONFIG_CRASH_DUMP | ||
| 201 | /* If we are doing a crash dump, we still need to know the real mem | ||
| 202 | * size before original memory map is reset. */ | ||
| 203 | saved_max_pfn = max_pfn; | ||
| 204 | #endif | ||
| 205 | } | 200 | } |
| 206 | 201 | ||
| 207 | #ifdef CONFIG_SMP | 202 | #ifdef CONFIG_SMP |
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c index 16835108bb5b..11a2d8825d89 100644 --- a/arch/ia64/mm/discontig.c +++ b/arch/ia64/mm/discontig.c | |||
| @@ -480,12 +480,6 @@ void __init find_memory(void) | |||
| 480 | max_pfn = max_low_pfn; | 480 | max_pfn = max_low_pfn; |
| 481 | 481 | ||
| 482 | find_initrd(); | 482 | find_initrd(); |
| 483 | |||
| 484 | #ifdef CONFIG_CRASH_DUMP | ||
| 485 | /* If we are doing a crash dump, we still need to know the real mem | ||
| 486 | * size before original memory map is reset. */ | ||
| 487 | saved_max_pfn = max_pfn; | ||
| 488 | #endif | ||
| 489 | } | 483 | } |
| 490 | 484 | ||
| 491 | #ifdef CONFIG_SMP | 485 | #ifdef CONFIG_SMP |
diff --git a/include/asm-ia64/meminit.h b/include/asm-ia64/meminit.h index 6dd476b652c6..21ec5f3d23de 100644 --- a/include/asm-ia64/meminit.h +++ b/include/asm-ia64/meminit.h | |||
| @@ -17,10 +17,11 @@ | |||
| 17 | * - kernel code & data | 17 | * - kernel code & data |
| 18 | * - crash dumping code reserved region | 18 | * - crash dumping code reserved region |
| 19 | * - Kernel memory map built from EFI memory map | 19 | * - Kernel memory map built from EFI memory map |
| 20 | * - ELF core header | ||
| 20 | * | 21 | * |
| 21 | * More could be added if necessary | 22 | * More could be added if necessary |
| 22 | */ | 23 | */ |
| 23 | #define IA64_MAX_RSVD_REGIONS 7 | 24 | #define IA64_MAX_RSVD_REGIONS 8 |
| 24 | 25 | ||
| 25 | struct rsvd_region { | 26 | struct rsvd_region { |
| 26 | unsigned long start; /* virtual address of beginning of element */ | 27 | unsigned long start; /* virtual address of beginning of element */ |
| @@ -36,6 +37,9 @@ extern void find_initrd (void); | |||
| 36 | extern int filter_rsvd_memory (unsigned long start, unsigned long end, void *arg); | 37 | extern int filter_rsvd_memory (unsigned long start, unsigned long end, void *arg); |
| 37 | extern void efi_memmap_init(unsigned long *, unsigned long *); | 38 | extern void efi_memmap_init(unsigned long *, unsigned long *); |
| 38 | 39 | ||
| 40 | extern unsigned long vmcore_find_descriptor_size(unsigned long address); | ||
| 41 | extern int reserve_elfcorehdr(unsigned long *start, unsigned long *end); | ||
| 42 | |||
| 39 | /* | 43 | /* |
| 40 | * For rounding an address to the next IA64_GRANULE_SIZE or order | 44 | * For rounding an address to the next IA64_GRANULE_SIZE or order |
| 41 | */ | 45 | */ |
diff --git a/include/asm-ia64/resource.h b/include/asm-ia64/resource.h index 77b1eee01f30..ba2272a87fc7 100644 --- a/include/asm-ia64/resource.h +++ b/include/asm-ia64/resource.h | |||
| @@ -2,7 +2,6 @@ | |||
| 2 | #define _ASM_IA64_RESOURCE_H | 2 | #define _ASM_IA64_RESOURCE_H |
| 3 | 3 | ||
| 4 | #include <asm/ustack.h> | 4 | #include <asm/ustack.h> |
| 5 | #define _STK_LIM_MAX DEFAULT_USER_STACK_SIZE | ||
| 6 | #include <asm-generic/resource.h> | 5 | #include <asm-generic/resource.h> |
| 7 | 6 | ||
| 8 | #endif /* _ASM_IA64_RESOURCE_H */ | 7 | #endif /* _ASM_IA64_RESOURCE_H */ |
diff --git a/include/asm-ia64/swiotlb.h b/include/asm-ia64/swiotlb.h deleted file mode 100644 index 452c162dee4e..000000000000 --- a/include/asm-ia64/swiotlb.h +++ /dev/null | |||
| @@ -1,9 +0,0 @@ | |||
| 1 | #ifndef _ASM_SWIOTLB_H | ||
| 2 | #define _ASM_SWIOTLB_H 1 | ||
| 3 | |||
| 4 | #include <asm/machvec.h> | ||
| 5 | |||
| 6 | #define SWIOTLB_ARCH_NEED_LATE_INIT | ||
| 7 | #define SWIOTLB_ARCH_NEED_ALLOC | ||
| 8 | |||
| 9 | #endif /* _ASM_SWIOTLB_H */ | ||
diff --git a/include/asm-x86_64/swiotlb.h b/include/asm-x86_64/swiotlb.h index ab913ffcad56..f9c589539a82 100644 --- a/include/asm-x86_64/swiotlb.h +++ b/include/asm-x86_64/swiotlb.h | |||
| @@ -44,7 +44,6 @@ extern void swiotlb_init(void); | |||
| 44 | extern int swiotlb_force; | 44 | extern int swiotlb_force; |
| 45 | 45 | ||
| 46 | #ifdef CONFIG_SWIOTLB | 46 | #ifdef CONFIG_SWIOTLB |
| 47 | #define SWIOTLB_ARCH_NEED_ALLOC | ||
| 48 | extern int swiotlb; | 47 | extern int swiotlb; |
| 49 | #else | 48 | #else |
| 50 | #define swiotlb 0 | 49 | #define swiotlb 0 |
diff --git a/lib/swiotlb.c b/lib/swiotlb.c index 623a68af8b18..9970e55c90bd 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c | |||
| @@ -28,7 +28,6 @@ | |||
| 28 | #include <asm/io.h> | 28 | #include <asm/io.h> |
| 29 | #include <asm/dma.h> | 29 | #include <asm/dma.h> |
| 30 | #include <asm/scatterlist.h> | 30 | #include <asm/scatterlist.h> |
| 31 | #include <asm/swiotlb.h> | ||
| 32 | 31 | ||
| 33 | #include <linux/init.h> | 32 | #include <linux/init.h> |
| 34 | #include <linux/bootmem.h> | 33 | #include <linux/bootmem.h> |
| @@ -36,10 +35,8 @@ | |||
| 36 | #define OFFSET(val,align) ((unsigned long) \ | 35 | #define OFFSET(val,align) ((unsigned long) \ |
| 37 | ( (val) & ( (align) - 1))) | 36 | ( (val) & ( (align) - 1))) |
| 38 | 37 | ||
| 39 | #ifndef SG_ENT_VIRT_ADDRESS | ||
| 40 | #define SG_ENT_VIRT_ADDRESS(sg) (page_address((sg)->page) + (sg)->offset) | 38 | #define SG_ENT_VIRT_ADDRESS(sg) (page_address((sg)->page) + (sg)->offset) |
| 41 | #define SG_ENT_PHYS_ADDRESS(sg) virt_to_bus(SG_ENT_VIRT_ADDRESS(sg)) | 39 | #define SG_ENT_PHYS_ADDRESS(sg) virt_to_bus(SG_ENT_VIRT_ADDRESS(sg)) |
| 42 | #endif | ||
| 43 | 40 | ||
| 44 | /* | 41 | /* |
| 45 | * Maximum allowable number of contiguous slabs to map, | 42 | * Maximum allowable number of contiguous slabs to map, |
| @@ -104,25 +101,13 @@ static unsigned int io_tlb_index; | |||
| 104 | * We need to save away the original address corresponding to a mapped entry | 101 | * We need to save away the original address corresponding to a mapped entry |
| 105 | * for the sync operations. | 102 | * for the sync operations. |
| 106 | */ | 103 | */ |
| 107 | #ifndef SWIOTLB_ARCH_HAS_IO_TLB_ADDR_T | 104 | static unsigned char **io_tlb_orig_addr; |
| 108 | typedef char *io_tlb_addr_t; | ||
| 109 | #define swiotlb_orig_addr_null(buffer) (!(buffer)) | ||
| 110 | #define ptr_to_io_tlb_addr(ptr) (ptr) | ||
| 111 | #define page_to_io_tlb_addr(pg, off) (page_address(pg) + (off)) | ||
| 112 | #define sg_to_io_tlb_addr(sg) SG_ENT_VIRT_ADDRESS(sg) | ||
| 113 | #endif | ||
| 114 | static io_tlb_addr_t *io_tlb_orig_addr; | ||
| 115 | 105 | ||
| 116 | /* | 106 | /* |
| 117 | * Protect the above data structures in the map and unmap calls | 107 | * Protect the above data structures in the map and unmap calls |
| 118 | */ | 108 | */ |
| 119 | static DEFINE_SPINLOCK(io_tlb_lock); | 109 | static DEFINE_SPINLOCK(io_tlb_lock); |
| 120 | 110 | ||
| 121 | #ifdef SWIOTLB_EXTRA_VARIABLES | ||
| 122 | SWIOTLB_EXTRA_VARIABLES; | ||
| 123 | #endif | ||
| 124 | |||
| 125 | #ifndef SWIOTLB_ARCH_HAS_SETUP_IO_TLB_NPAGES | ||
| 126 | static int __init | 111 | static int __init |
| 127 | setup_io_tlb_npages(char *str) | 112 | setup_io_tlb_npages(char *str) |
| 128 | { | 113 | { |
| @@ -137,25 +122,9 @@ setup_io_tlb_npages(char *str) | |||
| 137 | swiotlb_force = 1; | 122 | swiotlb_force = 1; |
| 138 | return 1; | 123 | return 1; |
| 139 | } | 124 | } |
| 140 | #endif | ||
| 141 | __setup("swiotlb=", setup_io_tlb_npages); | 125 | __setup("swiotlb=", setup_io_tlb_npages); |
| 142 | /* make io_tlb_overflow tunable too? */ | 126 | /* make io_tlb_overflow tunable too? */ |
| 143 | 127 | ||
| 144 | #ifndef swiotlb_adjust_size | ||
| 145 | #define swiotlb_adjust_size(size) ((void)0) | ||
| 146 | #endif | ||
| 147 | |||
| 148 | #ifndef swiotlb_adjust_seg | ||
| 149 | #define swiotlb_adjust_seg(start, size) ((void)0) | ||
| 150 | #endif | ||
| 151 | |||
| 152 | #ifndef swiotlb_print_info | ||
| 153 | #define swiotlb_print_info(bytes) \ | ||
| 154 | printk(KERN_INFO "Placing %luMB software IO TLB between 0x%lx - " \ | ||
| 155 | "0x%lx\n", bytes >> 20, \ | ||
| 156 | virt_to_bus(io_tlb_start), virt_to_bus(io_tlb_end)) | ||
| 157 | #endif | ||
| 158 | |||
| 159 | /* | 128 | /* |
| 160 | * Statically reserve bounce buffer space and initialize bounce buffer data | 129 | * Statically reserve bounce buffer space and initialize bounce buffer data |
| 161 | * structures for the software IO TLB used to implement the DMA API. | 130 | * structures for the software IO TLB used to implement the DMA API. |
| @@ -169,8 +138,6 @@ swiotlb_init_with_default_size(size_t default_size) | |||
| 169 | io_tlb_nslabs = (default_size >> IO_TLB_SHIFT); | 138 | io_tlb_nslabs = (default_size >> IO_TLB_SHIFT); |
| 170 | io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE); | 139 | io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE); |
| 171 | } | 140 | } |
| 172 | swiotlb_adjust_size(io_tlb_nslabs); | ||
| 173 | swiotlb_adjust_size(io_tlb_overflow); | ||
| 174 | 141 | ||
| 175 | bytes = io_tlb_nslabs << IO_TLB_SHIFT; | 142 | bytes = io_tlb_nslabs << IO_TLB_SHIFT; |
| 176 | 143 | ||
| @@ -188,14 +155,10 @@ swiotlb_init_with_default_size(size_t default_size) | |||
| 188 | * between io_tlb_start and io_tlb_end. | 155 | * between io_tlb_start and io_tlb_end. |
| 189 | */ | 156 | */ |
| 190 | io_tlb_list = alloc_bootmem(io_tlb_nslabs * sizeof(int)); | 157 | io_tlb_list = alloc_bootmem(io_tlb_nslabs * sizeof(int)); |
| 191 | for (i = 0; i < io_tlb_nslabs; i++) { | 158 | for (i = 0; i < io_tlb_nslabs; i++) |
| 192 | if ( !(i % IO_TLB_SEGSIZE) ) | ||
| 193 | swiotlb_adjust_seg(io_tlb_start + (i << IO_TLB_SHIFT), | ||
| 194 | IO_TLB_SEGSIZE << IO_TLB_SHIFT); | ||
| 195 | io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); | 159 | io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); |
| 196 | } | ||
| 197 | io_tlb_index = 0; | 160 | io_tlb_index = 0; |
| 198 | io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(io_tlb_addr_t)); | 161 | io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(char *)); |
| 199 | 162 | ||
| 200 | /* | 163 | /* |
| 201 | * Get the overflow emergency buffer | 164 | * Get the overflow emergency buffer |
| @@ -203,21 +166,17 @@ swiotlb_init_with_default_size(size_t default_size) | |||
| 203 | io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow); | 166 | io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow); |
| 204 | if (!io_tlb_overflow_buffer) | 167 | if (!io_tlb_overflow_buffer) |
| 205 | panic("Cannot allocate SWIOTLB overflow buffer!\n"); | 168 | panic("Cannot allocate SWIOTLB overflow buffer!\n"); |
| 206 | swiotlb_adjust_seg(io_tlb_overflow_buffer, io_tlb_overflow); | ||
| 207 | 169 | ||
| 208 | swiotlb_print_info(bytes); | 170 | printk(KERN_INFO "Placing software IO TLB between 0x%lx - 0x%lx\n", |
| 171 | virt_to_bus(io_tlb_start), virt_to_bus(io_tlb_end)); | ||
| 209 | } | 172 | } |
| 210 | #ifndef __swiotlb_init_with_default_size | ||
| 211 | #define __swiotlb_init_with_default_size swiotlb_init_with_default_size | ||
| 212 | #endif | ||
| 213 | 173 | ||
| 214 | void __init | 174 | void __init |
| 215 | swiotlb_init(void) | 175 | swiotlb_init(void) |
| 216 | { | 176 | { |
| 217 | __swiotlb_init_with_default_size(64 * (1<<20)); /* default to 64MB */ | 177 | swiotlb_init_with_default_size(64 * (1<<20)); /* default to 64MB */ |
| 218 | } | 178 | } |
| 219 | 179 | ||
| 220 | #ifdef SWIOTLB_ARCH_NEED_LATE_INIT | ||
| 221 | /* | 180 | /* |
| 222 | * Systems with larger DMA zones (those that don't support ISA) can | 181 | * Systems with larger DMA zones (those that don't support ISA) can |
| 223 | * initialize the swiotlb later using the slab allocator if needed. | 182 | * initialize the swiotlb later using the slab allocator if needed. |
| @@ -275,12 +234,12 @@ swiotlb_late_init_with_default_size(size_t default_size) | |||
| 275 | io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); | 234 | io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); |
| 276 | io_tlb_index = 0; | 235 | io_tlb_index = 0; |
| 277 | 236 | ||
| 278 | io_tlb_orig_addr = (io_tlb_addr_t *)__get_free_pages(GFP_KERNEL, | 237 | io_tlb_orig_addr = (unsigned char **)__get_free_pages(GFP_KERNEL, |
| 279 | get_order(io_tlb_nslabs * sizeof(io_tlb_addr_t))); | 238 | get_order(io_tlb_nslabs * sizeof(char *))); |
| 280 | if (!io_tlb_orig_addr) | 239 | if (!io_tlb_orig_addr) |
| 281 | goto cleanup3; | 240 | goto cleanup3; |
| 282 | 241 | ||
| 283 | memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(io_tlb_addr_t)); | 242 | memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(char *)); |
| 284 | 243 | ||
| 285 | /* | 244 | /* |
| 286 | * Get the overflow emergency buffer | 245 | * Get the overflow emergency buffer |
| @@ -290,17 +249,19 @@ swiotlb_late_init_with_default_size(size_t default_size) | |||
| 290 | if (!io_tlb_overflow_buffer) | 249 | if (!io_tlb_overflow_buffer) |
| 291 | goto cleanup4; | 250 | goto cleanup4; |
| 292 | 251 | ||
| 293 | swiotlb_print_info(bytes); | 252 | printk(KERN_INFO "Placing %luMB software IO TLB between 0x%lx - " |
| 253 | "0x%lx\n", bytes >> 20, | ||
| 254 | virt_to_bus(io_tlb_start), virt_to_bus(io_tlb_end)); | ||
| 294 | 255 | ||
| 295 | return 0; | 256 | return 0; |
| 296 | 257 | ||
| 297 | cleanup4: | 258 | cleanup4: |
| 298 | free_pages((unsigned long)io_tlb_orig_addr, | 259 | free_pages((unsigned long)io_tlb_orig_addr, get_order(io_tlb_nslabs * |
| 299 | get_order(io_tlb_nslabs * sizeof(io_tlb_addr_t))); | 260 | sizeof(char *))); |
| 300 | io_tlb_orig_addr = NULL; | 261 | io_tlb_orig_addr = NULL; |
| 301 | cleanup3: | 262 | cleanup3: |
| 302 | free_pages((unsigned long)io_tlb_list, | 263 | free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs * |
| 303 | get_order(io_tlb_nslabs * sizeof(int))); | 264 | sizeof(int))); |
| 304 | io_tlb_list = NULL; | 265 | io_tlb_list = NULL; |
| 305 | cleanup2: | 266 | cleanup2: |
| 306 | io_tlb_end = NULL; | 267 | io_tlb_end = NULL; |
| @@ -310,9 +271,7 @@ cleanup1: | |||
| 310 | io_tlb_nslabs = req_nslabs; | 271 | io_tlb_nslabs = req_nslabs; |
| 311 | return -ENOMEM; | 272 | return -ENOMEM; |
| 312 | } | 273 | } |
| 313 | #endif | ||
| 314 | 274 | ||
| 315 | #ifndef SWIOTLB_ARCH_HAS_NEEDS_MAPPING | ||
| 316 | static int | 275 | static int |
| 317 | address_needs_mapping(struct device *hwdev, dma_addr_t addr) | 276 | address_needs_mapping(struct device *hwdev, dma_addr_t addr) |
| 318 | { | 277 | { |
| @@ -323,35 +282,11 @@ address_needs_mapping(struct device *hwdev, dma_addr_t addr) | |||
| 323 | return (addr & ~mask) != 0; | 282 | return (addr & ~mask) != 0; |
| 324 | } | 283 | } |
| 325 | 284 | ||
| 326 | static inline int range_needs_mapping(const void *ptr, size_t size) | ||
| 327 | { | ||
| 328 | return swiotlb_force; | ||
| 329 | } | ||
| 330 | |||
| 331 | static inline int order_needs_mapping(unsigned int order) | ||
| 332 | { | ||
| 333 | return 0; | ||
| 334 | } | ||
| 335 | #endif | ||
| 336 | |||
| 337 | static void | ||
| 338 | __sync_single(io_tlb_addr_t buffer, char *dma_addr, size_t size, int dir) | ||
| 339 | { | ||
| 340 | #ifndef SWIOTLB_ARCH_HAS_SYNC_SINGLE | ||
| 341 | if (dir == DMA_TO_DEVICE) | ||
| 342 | memcpy(dma_addr, buffer, size); | ||
| 343 | else | ||
| 344 | memcpy(buffer, dma_addr, size); | ||
| 345 | #else | ||
| 346 | __swiotlb_arch_sync_single(buffer, dma_addr, size, dir); | ||
| 347 | #endif | ||
| 348 | } | ||
| 349 | |||
| 350 | /* | 285 | /* |
| 351 | * Allocates bounce buffer and returns its kernel virtual address. | 286 | * Allocates bounce buffer and returns its kernel virtual address. |
| 352 | */ | 287 | */ |
| 353 | static void * | 288 | static void * |
| 354 | map_single(struct device *hwdev, io_tlb_addr_t buffer, size_t size, int dir) | 289 | map_single(struct device *hwdev, char *buffer, size_t size, int dir) |
| 355 | { | 290 | { |
| 356 | unsigned long flags; | 291 | unsigned long flags; |
| 357 | char *dma_addr; | 292 | char *dma_addr; |
| @@ -424,7 +359,7 @@ map_single(struct device *hwdev, io_tlb_addr_t buffer, size_t size, int dir) | |||
| 424 | */ | 359 | */ |
| 425 | io_tlb_orig_addr[index] = buffer; | 360 | io_tlb_orig_addr[index] = buffer; |
| 426 | if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) | 361 | if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) |
| 427 | __sync_single(buffer, dma_addr, size, DMA_TO_DEVICE); | 362 | memcpy(dma_addr, buffer, size); |
| 428 | 363 | ||
| 429 | return dma_addr; | 364 | return dma_addr; |
| 430 | } | 365 | } |
| @@ -438,18 +373,17 @@ unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir) | |||
| 438 | unsigned long flags; | 373 | unsigned long flags; |
| 439 | int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; | 374 | int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; |
| 440 | int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT; | 375 | int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT; |
| 441 | io_tlb_addr_t buffer = io_tlb_orig_addr[index]; | 376 | char *buffer = io_tlb_orig_addr[index]; |
| 442 | 377 | ||
| 443 | /* | 378 | /* |
| 444 | * First, sync the memory before unmapping the entry | 379 | * First, sync the memory before unmapping the entry |
| 445 | */ | 380 | */ |
| 446 | if (!swiotlb_orig_addr_null(buffer) | 381 | if (buffer && ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL))) |
| 447 | && ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL))) | ||
| 448 | /* | 382 | /* |
| 449 | * bounce... copy the data back into the original buffer * and | 383 | * bounce... copy the data back into the original buffer * and |
| 450 | * delete the bounce buffer. | 384 | * delete the bounce buffer. |
| 451 | */ | 385 | */ |
| 452 | __sync_single(buffer, dma_addr, size, DMA_FROM_DEVICE); | 386 | memcpy(buffer, dma_addr, size); |
| 453 | 387 | ||
| 454 | /* | 388 | /* |
| 455 | * Return the buffer to the free list by setting the corresponding | 389 | * Return the buffer to the free list by setting the corresponding |
| @@ -482,18 +416,18 @@ sync_single(struct device *hwdev, char *dma_addr, size_t size, | |||
| 482 | int dir, int target) | 416 | int dir, int target) |
| 483 | { | 417 | { |
| 484 | int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT; | 418 | int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT; |
| 485 | io_tlb_addr_t buffer = io_tlb_orig_addr[index]; | 419 | char *buffer = io_tlb_orig_addr[index]; |
| 486 | 420 | ||
| 487 | switch (target) { | 421 | switch (target) { |
| 488 | case SYNC_FOR_CPU: | 422 | case SYNC_FOR_CPU: |
| 489 | if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)) | 423 | if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)) |
| 490 | __sync_single(buffer, dma_addr, size, DMA_FROM_DEVICE); | 424 | memcpy(buffer, dma_addr, size); |
| 491 | else | 425 | else |
| 492 | BUG_ON(dir != DMA_TO_DEVICE); | 426 | BUG_ON(dir != DMA_TO_DEVICE); |
| 493 | break; | 427 | break; |
| 494 | case SYNC_FOR_DEVICE: | 428 | case SYNC_FOR_DEVICE: |
| 495 | if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)) | 429 | if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)) |
| 496 | __sync_single(buffer, dma_addr, size, DMA_TO_DEVICE); | 430 | memcpy(dma_addr, buffer, size); |
| 497 | else | 431 | else |
| 498 | BUG_ON(dir != DMA_FROM_DEVICE); | 432 | BUG_ON(dir != DMA_FROM_DEVICE); |
| 499 | break; | 433 | break; |
| @@ -502,8 +436,6 @@ sync_single(struct device *hwdev, char *dma_addr, size_t size, | |||
| 502 | } | 436 | } |
| 503 | } | 437 | } |
| 504 | 438 | ||
| 505 | #ifdef SWIOTLB_ARCH_NEED_ALLOC | ||
| 506 | |||
| 507 | void * | 439 | void * |
| 508 | swiotlb_alloc_coherent(struct device *hwdev, size_t size, | 440 | swiotlb_alloc_coherent(struct device *hwdev, size_t size, |
| 509 | dma_addr_t *dma_handle, gfp_t flags) | 441 | dma_addr_t *dma_handle, gfp_t flags) |
| @@ -519,10 +451,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, | |||
| 519 | */ | 451 | */ |
| 520 | flags |= GFP_DMA; | 452 | flags |= GFP_DMA; |
| 521 | 453 | ||
| 522 | if (!order_needs_mapping(order)) | 454 | ret = (void *)__get_free_pages(flags, order); |
| 523 | ret = (void *)__get_free_pages(flags, order); | ||
| 524 | else | ||
| 525 | ret = NULL; | ||
| 526 | if (ret && address_needs_mapping(hwdev, virt_to_bus(ret))) { | 455 | if (ret && address_needs_mapping(hwdev, virt_to_bus(ret))) { |
| 527 | /* | 456 | /* |
| 528 | * The allocated memory isn't reachable by the device. | 457 | * The allocated memory isn't reachable by the device. |
| @@ -560,7 +489,6 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, | |||
| 560 | *dma_handle = dev_addr; | 489 | *dma_handle = dev_addr; |
| 561 | return ret; | 490 | return ret; |
| 562 | } | 491 | } |
| 563 | EXPORT_SYMBOL(swiotlb_alloc_coherent); | ||
| 564 | 492 | ||
| 565 | void | 493 | void |
| 566 | swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, | 494 | swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, |
| @@ -573,9 +501,6 @@ swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, | |||
| 573 | /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ | 501 | /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ |
| 574 | swiotlb_unmap_single (hwdev, dma_handle, size, DMA_TO_DEVICE); | 502 | swiotlb_unmap_single (hwdev, dma_handle, size, DMA_TO_DEVICE); |
| 575 | } | 503 | } |
| 576 | EXPORT_SYMBOL(swiotlb_free_coherent); | ||
| 577 | |||
| 578 | #endif | ||
| 579 | 504 | ||
| 580 | static void | 505 | static void |
| 581 | swiotlb_full(struct device *dev, size_t size, int dir, int do_panic) | 506 | swiotlb_full(struct device *dev, size_t size, int dir, int do_panic) |
| @@ -617,14 +542,13 @@ swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir) | |||
| 617 | * we can safely return the device addr and not worry about bounce | 542 | * we can safely return the device addr and not worry about bounce |
| 618 | * buffering it. | 543 | * buffering it. |
| 619 | */ | 544 | */ |
| 620 | if (!range_needs_mapping(ptr, size) | 545 | if (!address_needs_mapping(hwdev, dev_addr) && !swiotlb_force) |
| 621 | && !address_needs_mapping(hwdev, dev_addr)) | ||
| 622 | return dev_addr; | 546 | return dev_addr; |
| 623 | 547 | ||
| 624 | /* | 548 | /* |
| 625 | * Oh well, have to allocate and map a bounce buffer. | 549 | * Oh well, have to allocate and map a bounce buffer. |
| 626 | */ | 550 | */ |
| 627 | map = map_single(hwdev, ptr_to_io_tlb_addr(ptr), size, dir); | 551 | map = map_single(hwdev, ptr, size, dir); |
| 628 | if (!map) { | 552 | if (!map) { |
| 629 | swiotlb_full(hwdev, size, dir, 1); | 553 | swiotlb_full(hwdev, size, dir, 1); |
| 630 | map = io_tlb_overflow_buffer; | 554 | map = io_tlb_overflow_buffer; |
| @@ -752,16 +676,17 @@ int | |||
| 752 | swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nelems, | 676 | swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nelems, |
| 753 | int dir) | 677 | int dir) |
| 754 | { | 678 | { |
| 679 | void *addr; | ||
| 755 | dma_addr_t dev_addr; | 680 | dma_addr_t dev_addr; |
| 756 | int i; | 681 | int i; |
| 757 | 682 | ||
| 758 | BUG_ON(dir == DMA_NONE); | 683 | BUG_ON(dir == DMA_NONE); |
| 759 | 684 | ||
| 760 | for (i = 0; i < nelems; i++, sg++) { | 685 | for (i = 0; i < nelems; i++, sg++) { |
| 761 | dev_addr = SG_ENT_PHYS_ADDRESS(sg); | 686 | addr = SG_ENT_VIRT_ADDRESS(sg); |
| 762 | if (range_needs_mapping(SG_ENT_VIRT_ADDRESS(sg), sg->length) | 687 | dev_addr = virt_to_bus(addr); |
| 763 | || address_needs_mapping(hwdev, dev_addr)) { | 688 | if (swiotlb_force || address_needs_mapping(hwdev, dev_addr)) { |
| 764 | void *map = map_single(hwdev, sg_to_io_tlb_addr(sg), sg->length, dir); | 689 | void *map = map_single(hwdev, addr, sg->length, dir); |
| 765 | if (!map) { | 690 | if (!map) { |
| 766 | /* Don't panic here, we expect map_sg users | 691 | /* Don't panic here, we expect map_sg users |
| 767 | to do proper error handling. */ | 692 | to do proper error handling. */ |
| @@ -835,44 +760,6 @@ swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, | |||
| 835 | swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE); | 760 | swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE); |
| 836 | } | 761 | } |
| 837 | 762 | ||
| 838 | #ifdef SWIOTLB_ARCH_NEED_MAP_PAGE | ||
| 839 | |||
| 840 | dma_addr_t | ||
| 841 | swiotlb_map_page(struct device *hwdev, struct page *page, | ||
| 842 | unsigned long offset, size_t size, | ||
| 843 | enum dma_data_direction direction) | ||
| 844 | { | ||
| 845 | dma_addr_t dev_addr; | ||
| 846 | char *map; | ||
| 847 | |||
| 848 | dev_addr = page_to_bus(page) + offset; | ||
| 849 | if (address_needs_mapping(hwdev, dev_addr)) { | ||
| 850 | map = map_single(hwdev, page_to_io_tlb_addr(page, offset), size, direction); | ||
| 851 | if (!map) { | ||
| 852 | swiotlb_full(hwdev, size, direction, 1); | ||
| 853 | map = io_tlb_overflow_buffer; | ||
| 854 | } | ||
| 855 | dev_addr = virt_to_bus(map); | ||
| 856 | } | ||
| 857 | |||
| 858 | return dev_addr; | ||
| 859 | } | ||
| 860 | |||
| 861 | void | ||
| 862 | swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, | ||
| 863 | size_t size, enum dma_data_direction direction) | ||
| 864 | { | ||
| 865 | char *dma_addr = bus_to_virt(dev_addr); | ||
| 866 | |||
| 867 | BUG_ON(direction == DMA_NONE); | ||
| 868 | if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) | ||
| 869 | unmap_single(hwdev, dma_addr, size, direction); | ||
| 870 | else if (direction == DMA_FROM_DEVICE) | ||
| 871 | dma_mark_clean(dma_addr, size); | ||
| 872 | } | ||
| 873 | |||
| 874 | #endif | ||
| 875 | |||
| 876 | int | 763 | int |
| 877 | swiotlb_dma_mapping_error(dma_addr_t dma_addr) | 764 | swiotlb_dma_mapping_error(dma_addr_t dma_addr) |
| 878 | { | 765 | { |
| @@ -885,13 +772,10 @@ swiotlb_dma_mapping_error(dma_addr_t dma_addr) | |||
| 885 | * during bus mastering, then you would pass 0x00ffffff as the mask to | 772 | * during bus mastering, then you would pass 0x00ffffff as the mask to |
| 886 | * this function. | 773 | * this function. |
| 887 | */ | 774 | */ |
| 888 | #ifndef __swiotlb_dma_supported | ||
| 889 | #define __swiotlb_dma_supported(hwdev, mask) (virt_to_bus(io_tlb_end - 1) <= (mask)) | ||
| 890 | #endif | ||
| 891 | int | 775 | int |
| 892 | swiotlb_dma_supported(struct device *hwdev, u64 mask) | 776 | swiotlb_dma_supported(struct device *hwdev, u64 mask) |
| 893 | { | 777 | { |
| 894 | return __swiotlb_dma_supported(hwdev, mask); | 778 | return virt_to_bus(io_tlb_end - 1) <= mask; |
| 895 | } | 779 | } |
| 896 | 780 | ||
| 897 | EXPORT_SYMBOL(swiotlb_init); | 781 | EXPORT_SYMBOL(swiotlb_init); |
| @@ -906,4 +790,6 @@ EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_device); | |||
| 906 | EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu); | 790 | EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu); |
| 907 | EXPORT_SYMBOL(swiotlb_sync_sg_for_device); | 791 | EXPORT_SYMBOL(swiotlb_sync_sg_for_device); |
| 908 | EXPORT_SYMBOL(swiotlb_dma_mapping_error); | 792 | EXPORT_SYMBOL(swiotlb_dma_mapping_error); |
| 793 | EXPORT_SYMBOL(swiotlb_alloc_coherent); | ||
| 794 | EXPORT_SYMBOL(swiotlb_free_coherent); | ||
| 909 | EXPORT_SYMBOL(swiotlb_dma_supported); | 795 | EXPORT_SYMBOL(swiotlb_dma_supported); |
