diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2015-11-05 19:34:48 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-11-05 19:34:48 -0500 |
commit | 2c302e7e41050dbc174d50b58ad42eedf5dbd6fa (patch) | |
tree | 922bb84e205ea41f0f6bcae28cc43d9a8668936f | |
parent | 933425fb0010bd02bd459b41e63082756818ffce (diff) | |
parent | 52708d690b8be132ba9d294464625dbbdb9fa5df (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc
Pull sparc updates from David Miller:
"Just a couple of fixes/cleanups:
- Correct NUMA latency calculations on sparc64, from Nitin Gupta.
- ASI_ST_BLKINIT_MRU_S value was wrong, from Rob Gardner.
- Fix non-faulting load handling of non-quad values, also from Rob
Gardner.
- Cleanup VISsave assembler, from Sam Ravnborg.
- Fix iommu-common code so it doesn't emit rediculous warnings on
some architectures, particularly ARM"
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc:
sparc64: Fix numa distance values
sparc64: Don't restrict fp regs for no-fault loads
iommu-common: Fix error code used in iommu_tbl_range_{alloc,free}().
sparc64: use ENTRY/ENDPROC in VISsave
sparc64: Fix incorrect ASI_ST_BLKINIT_MRU_S value
-rw-r--r-- | arch/sparc/include/asm/topology_64.h | 3 | ||||
-rw-r--r-- | arch/sparc/include/uapi/asm/asi.h | 2 | ||||
-rw-r--r-- | arch/sparc/kernel/iommu.c | 12 | ||||
-rw-r--r-- | arch/sparc/kernel/ldc.c | 2 | ||||
-rw-r--r-- | arch/sparc/kernel/pci_sun4v.c | 18 | ||||
-rw-r--r-- | arch/sparc/kernel/unaligned_64.c | 22 | ||||
-rw-r--r-- | arch/sparc/lib/VISsave.S | 10 | ||||
-rw-r--r-- | arch/sparc/mm/init_64.c | 70 | ||||
-rw-r--r-- | include/linux/iommu-common.h | 1 | ||||
-rw-r--r-- | lib/iommu-common.c | 10 |
10 files changed, 113 insertions, 37 deletions
diff --git a/arch/sparc/include/asm/topology_64.h b/arch/sparc/include/asm/topology_64.h index 01d17046225a..bec481aaca16 100644 --- a/arch/sparc/include/asm/topology_64.h +++ b/arch/sparc/include/asm/topology_64.h | |||
@@ -31,6 +31,9 @@ static inline int pcibus_to_node(struct pci_bus *pbus) | |||
31 | cpu_all_mask : \ | 31 | cpu_all_mask : \ |
32 | cpumask_of_node(pcibus_to_node(bus))) | 32 | cpumask_of_node(pcibus_to_node(bus))) |
33 | 33 | ||
34 | int __node_distance(int, int); | ||
35 | #define node_distance(a, b) __node_distance(a, b) | ||
36 | |||
34 | #else /* CONFIG_NUMA */ | 37 | #else /* CONFIG_NUMA */ |
35 | 38 | ||
36 | #include <asm-generic/topology.h> | 39 | #include <asm-generic/topology.h> |
diff --git a/arch/sparc/include/uapi/asm/asi.h b/arch/sparc/include/uapi/asm/asi.h index aace6f313716..7ad7203deaec 100644 --- a/arch/sparc/include/uapi/asm/asi.h +++ b/arch/sparc/include/uapi/asm/asi.h | |||
@@ -279,7 +279,7 @@ | |||
279 | * Most-Recently-Used, primary, | 279 | * Most-Recently-Used, primary, |
280 | * implicit | 280 | * implicit |
281 | */ | 281 | */ |
282 | #define ASI_ST_BLKINIT_MRU_S 0xf2 /* (NG4) init-store, twin load, | 282 | #define ASI_ST_BLKINIT_MRU_S 0xf3 /* (NG4) init-store, twin load, |
283 | * Most-Recently-Used, secondary, | 283 | * Most-Recently-Used, secondary, |
284 | * implicit | 284 | * implicit |
285 | */ | 285 | */ |
diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c index 5320689c06e9..37686828c3d9 100644 --- a/arch/sparc/kernel/iommu.c +++ b/arch/sparc/kernel/iommu.c | |||
@@ -161,7 +161,7 @@ static inline iopte_t *alloc_npages(struct device *dev, | |||
161 | 161 | ||
162 | entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL, | 162 | entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL, |
163 | (unsigned long)(-1), 0); | 163 | (unsigned long)(-1), 0); |
164 | if (unlikely(entry == DMA_ERROR_CODE)) | 164 | if (unlikely(entry == IOMMU_ERROR_CODE)) |
165 | return NULL; | 165 | return NULL; |
166 | 166 | ||
167 | return iommu->page_table + entry; | 167 | return iommu->page_table + entry; |
@@ -253,7 +253,7 @@ static void dma_4u_free_coherent(struct device *dev, size_t size, | |||
253 | npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT; | 253 | npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT; |
254 | iommu = dev->archdata.iommu; | 254 | iommu = dev->archdata.iommu; |
255 | 255 | ||
256 | iommu_tbl_range_free(&iommu->tbl, dvma, npages, DMA_ERROR_CODE); | 256 | iommu_tbl_range_free(&iommu->tbl, dvma, npages, IOMMU_ERROR_CODE); |
257 | 257 | ||
258 | order = get_order(size); | 258 | order = get_order(size); |
259 | if (order < 10) | 259 | if (order < 10) |
@@ -426,7 +426,7 @@ static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr, | |||
426 | iommu_free_ctx(iommu, ctx); | 426 | iommu_free_ctx(iommu, ctx); |
427 | spin_unlock_irqrestore(&iommu->lock, flags); | 427 | spin_unlock_irqrestore(&iommu->lock, flags); |
428 | 428 | ||
429 | iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, DMA_ERROR_CODE); | 429 | iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, IOMMU_ERROR_CODE); |
430 | } | 430 | } |
431 | 431 | ||
432 | static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist, | 432 | static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist, |
@@ -492,7 +492,7 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist, | |||
492 | &handle, (unsigned long)(-1), 0); | 492 | &handle, (unsigned long)(-1), 0); |
493 | 493 | ||
494 | /* Handle failure */ | 494 | /* Handle failure */ |
495 | if (unlikely(entry == DMA_ERROR_CODE)) { | 495 | if (unlikely(entry == IOMMU_ERROR_CODE)) { |
496 | if (printk_ratelimit()) | 496 | if (printk_ratelimit()) |
497 | printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx" | 497 | printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx" |
498 | " npages %lx\n", iommu, paddr, npages); | 498 | " npages %lx\n", iommu, paddr, npages); |
@@ -571,7 +571,7 @@ iommu_map_failed: | |||
571 | iopte_make_dummy(iommu, base + j); | 571 | iopte_make_dummy(iommu, base + j); |
572 | 572 | ||
573 | iommu_tbl_range_free(&iommu->tbl, vaddr, npages, | 573 | iommu_tbl_range_free(&iommu->tbl, vaddr, npages, |
574 | DMA_ERROR_CODE); | 574 | IOMMU_ERROR_CODE); |
575 | 575 | ||
576 | s->dma_address = DMA_ERROR_CODE; | 576 | s->dma_address = DMA_ERROR_CODE; |
577 | s->dma_length = 0; | 577 | s->dma_length = 0; |
@@ -648,7 +648,7 @@ static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist, | |||
648 | iopte_make_dummy(iommu, base + i); | 648 | iopte_make_dummy(iommu, base + i); |
649 | 649 | ||
650 | iommu_tbl_range_free(&iommu->tbl, dma_handle, npages, | 650 | iommu_tbl_range_free(&iommu->tbl, dma_handle, npages, |
651 | DMA_ERROR_CODE); | 651 | IOMMU_ERROR_CODE); |
652 | sg = sg_next(sg); | 652 | sg = sg_next(sg); |
653 | } | 653 | } |
654 | 654 | ||
diff --git a/arch/sparc/kernel/ldc.c b/arch/sparc/kernel/ldc.c index 1ae5eb1bb045..59d503866431 100644 --- a/arch/sparc/kernel/ldc.c +++ b/arch/sparc/kernel/ldc.c | |||
@@ -1953,7 +1953,7 @@ static struct ldc_mtable_entry *alloc_npages(struct ldc_iommu *iommu, | |||
1953 | 1953 | ||
1954 | entry = iommu_tbl_range_alloc(NULL, &iommu->iommu_map_table, | 1954 | entry = iommu_tbl_range_alloc(NULL, &iommu->iommu_map_table, |
1955 | npages, NULL, (unsigned long)-1, 0); | 1955 | npages, NULL, (unsigned long)-1, 0); |
1956 | if (unlikely(entry < 0)) | 1956 | if (unlikely(entry == IOMMU_ERROR_CODE)) |
1957 | return NULL; | 1957 | return NULL; |
1958 | 1958 | ||
1959 | return iommu->page_table + entry; | 1959 | return iommu->page_table + entry; |
diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c index d2fe57dad433..836e8cef47e2 100644 --- a/arch/sparc/kernel/pci_sun4v.c +++ b/arch/sparc/kernel/pci_sun4v.c | |||
@@ -159,7 +159,7 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size, | |||
159 | entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL, | 159 | entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL, |
160 | (unsigned long)(-1), 0); | 160 | (unsigned long)(-1), 0); |
161 | 161 | ||
162 | if (unlikely(entry == DMA_ERROR_CODE)) | 162 | if (unlikely(entry == IOMMU_ERROR_CODE)) |
163 | goto range_alloc_fail; | 163 | goto range_alloc_fail; |
164 | 164 | ||
165 | *dma_addrp = (iommu->tbl.table_map_base + (entry << IO_PAGE_SHIFT)); | 165 | *dma_addrp = (iommu->tbl.table_map_base + (entry << IO_PAGE_SHIFT)); |
@@ -187,7 +187,7 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size, | |||
187 | return ret; | 187 | return ret; |
188 | 188 | ||
189 | iommu_map_fail: | 189 | iommu_map_fail: |
190 | iommu_tbl_range_free(&iommu->tbl, *dma_addrp, npages, DMA_ERROR_CODE); | 190 | iommu_tbl_range_free(&iommu->tbl, *dma_addrp, npages, IOMMU_ERROR_CODE); |
191 | 191 | ||
192 | range_alloc_fail: | 192 | range_alloc_fail: |
193 | free_pages(first_page, order); | 193 | free_pages(first_page, order); |
@@ -226,7 +226,7 @@ static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu, | |||
226 | devhandle = pbm->devhandle; | 226 | devhandle = pbm->devhandle; |
227 | entry = ((dvma - iommu->tbl.table_map_base) >> IO_PAGE_SHIFT); | 227 | entry = ((dvma - iommu->tbl.table_map_base) >> IO_PAGE_SHIFT); |
228 | dma_4v_iommu_demap(&devhandle, entry, npages); | 228 | dma_4v_iommu_demap(&devhandle, entry, npages); |
229 | iommu_tbl_range_free(&iommu->tbl, dvma, npages, DMA_ERROR_CODE); | 229 | iommu_tbl_range_free(&iommu->tbl, dvma, npages, IOMMU_ERROR_CODE); |
230 | order = get_order(size); | 230 | order = get_order(size); |
231 | if (order < 10) | 231 | if (order < 10) |
232 | free_pages((unsigned long)cpu, order); | 232 | free_pages((unsigned long)cpu, order); |
@@ -256,7 +256,7 @@ static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page, | |||
256 | entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL, | 256 | entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL, |
257 | (unsigned long)(-1), 0); | 257 | (unsigned long)(-1), 0); |
258 | 258 | ||
259 | if (unlikely(entry == DMA_ERROR_CODE)) | 259 | if (unlikely(entry == IOMMU_ERROR_CODE)) |
260 | goto bad; | 260 | goto bad; |
261 | 261 | ||
262 | bus_addr = (iommu->tbl.table_map_base + (entry << IO_PAGE_SHIFT)); | 262 | bus_addr = (iommu->tbl.table_map_base + (entry << IO_PAGE_SHIFT)); |
@@ -288,7 +288,7 @@ bad: | |||
288 | return DMA_ERROR_CODE; | 288 | return DMA_ERROR_CODE; |
289 | 289 | ||
290 | iommu_map_fail: | 290 | iommu_map_fail: |
291 | iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, DMA_ERROR_CODE); | 291 | iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, IOMMU_ERROR_CODE); |
292 | return DMA_ERROR_CODE; | 292 | return DMA_ERROR_CODE; |
293 | } | 293 | } |
294 | 294 | ||
@@ -317,7 +317,7 @@ static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr, | |||
317 | bus_addr &= IO_PAGE_MASK; | 317 | bus_addr &= IO_PAGE_MASK; |
318 | entry = (bus_addr - iommu->tbl.table_map_base) >> IO_PAGE_SHIFT; | 318 | entry = (bus_addr - iommu->tbl.table_map_base) >> IO_PAGE_SHIFT; |
319 | dma_4v_iommu_demap(&devhandle, entry, npages); | 319 | dma_4v_iommu_demap(&devhandle, entry, npages); |
320 | iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, DMA_ERROR_CODE); | 320 | iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, IOMMU_ERROR_CODE); |
321 | } | 321 | } |
322 | 322 | ||
323 | static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, | 323 | static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, |
@@ -376,7 +376,7 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, | |||
376 | &handle, (unsigned long)(-1), 0); | 376 | &handle, (unsigned long)(-1), 0); |
377 | 377 | ||
378 | /* Handle failure */ | 378 | /* Handle failure */ |
379 | if (unlikely(entry == DMA_ERROR_CODE)) { | 379 | if (unlikely(entry == IOMMU_ERROR_CODE)) { |
380 | if (printk_ratelimit()) | 380 | if (printk_ratelimit()) |
381 | printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx" | 381 | printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx" |
382 | " npages %lx\n", iommu, paddr, npages); | 382 | " npages %lx\n", iommu, paddr, npages); |
@@ -451,7 +451,7 @@ iommu_map_failed: | |||
451 | npages = iommu_num_pages(s->dma_address, s->dma_length, | 451 | npages = iommu_num_pages(s->dma_address, s->dma_length, |
452 | IO_PAGE_SIZE); | 452 | IO_PAGE_SIZE); |
453 | iommu_tbl_range_free(&iommu->tbl, vaddr, npages, | 453 | iommu_tbl_range_free(&iommu->tbl, vaddr, npages, |
454 | DMA_ERROR_CODE); | 454 | IOMMU_ERROR_CODE); |
455 | /* XXX demap? XXX */ | 455 | /* XXX demap? XXX */ |
456 | s->dma_address = DMA_ERROR_CODE; | 456 | s->dma_address = DMA_ERROR_CODE; |
457 | s->dma_length = 0; | 457 | s->dma_length = 0; |
@@ -496,7 +496,7 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist, | |||
496 | entry = ((dma_handle - tbl->table_map_base) >> shift); | 496 | entry = ((dma_handle - tbl->table_map_base) >> shift); |
497 | dma_4v_iommu_demap(&devhandle, entry, npages); | 497 | dma_4v_iommu_demap(&devhandle, entry, npages); |
498 | iommu_tbl_range_free(&iommu->tbl, dma_handle, npages, | 498 | iommu_tbl_range_free(&iommu->tbl, dma_handle, npages, |
499 | DMA_ERROR_CODE); | 499 | IOMMU_ERROR_CODE); |
500 | sg = sg_next(sg); | 500 | sg = sg_next(sg); |
501 | } | 501 | } |
502 | 502 | ||
diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c index 62098a89bbbf..d89e97b374cf 100644 --- a/arch/sparc/kernel/unaligned_64.c +++ b/arch/sparc/kernel/unaligned_64.c | |||
@@ -436,24 +436,26 @@ extern void sun4v_data_access_exception(struct pt_regs *regs, | |||
436 | int handle_ldf_stq(u32 insn, struct pt_regs *regs) | 436 | int handle_ldf_stq(u32 insn, struct pt_regs *regs) |
437 | { | 437 | { |
438 | unsigned long addr = compute_effective_address(regs, insn, 0); | 438 | unsigned long addr = compute_effective_address(regs, insn, 0); |
439 | int freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20); | 439 | int freg; |
440 | struct fpustate *f = FPUSTATE; | 440 | struct fpustate *f = FPUSTATE; |
441 | int asi = decode_asi(insn, regs); | 441 | int asi = decode_asi(insn, regs); |
442 | int flag = (freg < 32) ? FPRS_DL : FPRS_DU; | 442 | int flag; |
443 | 443 | ||
444 | perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0); | 444 | perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0); |
445 | 445 | ||
446 | save_and_clear_fpu(); | 446 | save_and_clear_fpu(); |
447 | current_thread_info()->xfsr[0] &= ~0x1c000; | 447 | current_thread_info()->xfsr[0] &= ~0x1c000; |
448 | if (freg & 3) { | ||
449 | current_thread_info()->xfsr[0] |= (6 << 14) /* invalid_fp_register */; | ||
450 | do_fpother(regs); | ||
451 | return 0; | ||
452 | } | ||
453 | if (insn & 0x200000) { | 448 | if (insn & 0x200000) { |
454 | /* STQ */ | 449 | /* STQ */ |
455 | u64 first = 0, second = 0; | 450 | u64 first = 0, second = 0; |
456 | 451 | ||
452 | freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20); | ||
453 | flag = (freg < 32) ? FPRS_DL : FPRS_DU; | ||
454 | if (freg & 3) { | ||
455 | current_thread_info()->xfsr[0] |= (6 << 14) /* invalid_fp_register */; | ||
456 | do_fpother(regs); | ||
457 | return 0; | ||
458 | } | ||
457 | if (current_thread_info()->fpsaved[0] & flag) { | 459 | if (current_thread_info()->fpsaved[0] & flag) { |
458 | first = *(u64 *)&f->regs[freg]; | 460 | first = *(u64 *)&f->regs[freg]; |
459 | second = *(u64 *)&f->regs[freg+2]; | 461 | second = *(u64 *)&f->regs[freg+2]; |
@@ -513,6 +515,12 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs) | |||
513 | case 0x100000: size = 4; break; | 515 | case 0x100000: size = 4; break; |
514 | default: size = 2; break; | 516 | default: size = 2; break; |
515 | } | 517 | } |
518 | if (size == 1) | ||
519 | freg = (insn >> 25) & 0x1f; | ||
520 | else | ||
521 | freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20); | ||
522 | flag = (freg < 32) ? FPRS_DL : FPRS_DU; | ||
523 | |||
516 | for (i = 0; i < size; i++) | 524 | for (i = 0; i < size; i++) |
517 | data[i] = 0; | 525 | data[i] = 0; |
518 | 526 | ||
diff --git a/arch/sparc/lib/VISsave.S b/arch/sparc/lib/VISsave.S index a063d84336d6..62c2647bd5ce 100644 --- a/arch/sparc/lib/VISsave.S +++ b/arch/sparc/lib/VISsave.S | |||
@@ -6,24 +6,23 @@ | |||
6 | * Copyright (C) 1998 Jakub Jelinek (jj@ultra.linux.cz) | 6 | * Copyright (C) 1998 Jakub Jelinek (jj@ultra.linux.cz) |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #include <linux/linkage.h> | ||
10 | |||
9 | #include <asm/asi.h> | 11 | #include <asm/asi.h> |
10 | #include <asm/page.h> | 12 | #include <asm/page.h> |
11 | #include <asm/ptrace.h> | 13 | #include <asm/ptrace.h> |
12 | #include <asm/visasm.h> | 14 | #include <asm/visasm.h> |
13 | #include <asm/thread_info.h> | 15 | #include <asm/thread_info.h> |
14 | 16 | ||
15 | .text | ||
16 | .globl VISenter, VISenterhalf | ||
17 | |||
18 | /* On entry: %o5=current FPRS value, %g7 is callers address */ | 17 | /* On entry: %o5=current FPRS value, %g7 is callers address */ |
19 | /* May clobber %o5, %g1, %g2, %g3, %g7, %icc, %xcc */ | 18 | /* May clobber %o5, %g1, %g2, %g3, %g7, %icc, %xcc */ |
20 | 19 | ||
21 | /* Nothing special need be done here to handle pre-emption, this | 20 | /* Nothing special need be done here to handle pre-emption, this |
22 | * FPU save/restore mechanism is already preemption safe. | 21 | * FPU save/restore mechanism is already preemption safe. |
23 | */ | 22 | */ |
24 | 23 | .text | |
25 | .align 32 | 24 | .align 32 |
26 | VISenter: | 25 | ENTRY(VISenter) |
27 | ldub [%g6 + TI_FPDEPTH], %g1 | 26 | ldub [%g6 + TI_FPDEPTH], %g1 |
28 | brnz,a,pn %g1, 1f | 27 | brnz,a,pn %g1, 1f |
29 | cmp %g1, 1 | 28 | cmp %g1, 1 |
@@ -79,3 +78,4 @@ vis1: ldub [%g6 + TI_FPSAVED], %g3 | |||
79 | .align 32 | 78 | .align 32 |
80 | 80: jmpl %g7 + %g0, %g0 | 79 | 80: jmpl %g7 + %g0, %g0 |
81 | nop | 80 | nop |
81 | ENDPROC(VISenter) | ||
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index 4ac88b757514..3025bd57f7ab 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c | |||
@@ -93,6 +93,8 @@ static unsigned long cpu_pgsz_mask; | |||
93 | static struct linux_prom64_registers pavail[MAX_BANKS]; | 93 | static struct linux_prom64_registers pavail[MAX_BANKS]; |
94 | static int pavail_ents; | 94 | static int pavail_ents; |
95 | 95 | ||
96 | u64 numa_latency[MAX_NUMNODES][MAX_NUMNODES]; | ||
97 | |||
96 | static int cmp_p64(const void *a, const void *b) | 98 | static int cmp_p64(const void *a, const void *b) |
97 | { | 99 | { |
98 | const struct linux_prom64_registers *x = a, *y = b; | 100 | const struct linux_prom64_registers *x = a, *y = b; |
@@ -1157,6 +1159,48 @@ static struct mdesc_mlgroup * __init find_mlgroup(u64 node) | |||
1157 | return NULL; | 1159 | return NULL; |
1158 | } | 1160 | } |
1159 | 1161 | ||
1162 | int __node_distance(int from, int to) | ||
1163 | { | ||
1164 | if ((from >= MAX_NUMNODES) || (to >= MAX_NUMNODES)) { | ||
1165 | pr_warn("Returning default NUMA distance value for %d->%d\n", | ||
1166 | from, to); | ||
1167 | return (from == to) ? LOCAL_DISTANCE : REMOTE_DISTANCE; | ||
1168 | } | ||
1169 | return numa_latency[from][to]; | ||
1170 | } | ||
1171 | |||
1172 | static int find_best_numa_node_for_mlgroup(struct mdesc_mlgroup *grp) | ||
1173 | { | ||
1174 | int i; | ||
1175 | |||
1176 | for (i = 0; i < MAX_NUMNODES; i++) { | ||
1177 | struct node_mem_mask *n = &node_masks[i]; | ||
1178 | |||
1179 | if ((grp->mask == n->mask) && (grp->match == n->val)) | ||
1180 | break; | ||
1181 | } | ||
1182 | return i; | ||
1183 | } | ||
1184 | |||
1185 | static void find_numa_latencies_for_group(struct mdesc_handle *md, u64 grp, | ||
1186 | int index) | ||
1187 | { | ||
1188 | u64 arc; | ||
1189 | |||
1190 | mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) { | ||
1191 | int tnode; | ||
1192 | u64 target = mdesc_arc_target(md, arc); | ||
1193 | struct mdesc_mlgroup *m = find_mlgroup(target); | ||
1194 | |||
1195 | if (!m) | ||
1196 | continue; | ||
1197 | tnode = find_best_numa_node_for_mlgroup(m); | ||
1198 | if (tnode == MAX_NUMNODES) | ||
1199 | continue; | ||
1200 | numa_latency[index][tnode] = m->latency; | ||
1201 | } | ||
1202 | } | ||
1203 | |||
1160 | static int __init numa_attach_mlgroup(struct mdesc_handle *md, u64 grp, | 1204 | static int __init numa_attach_mlgroup(struct mdesc_handle *md, u64 grp, |
1161 | int index) | 1205 | int index) |
1162 | { | 1206 | { |
@@ -1220,9 +1264,16 @@ static int __init numa_parse_mdesc_group(struct mdesc_handle *md, u64 grp, | |||
1220 | static int __init numa_parse_mdesc(void) | 1264 | static int __init numa_parse_mdesc(void) |
1221 | { | 1265 | { |
1222 | struct mdesc_handle *md = mdesc_grab(); | 1266 | struct mdesc_handle *md = mdesc_grab(); |
1223 | int i, err, count; | 1267 | int i, j, err, count; |
1224 | u64 node; | 1268 | u64 node; |
1225 | 1269 | ||
1270 | /* Some sane defaults for numa latency values */ | ||
1271 | for (i = 0; i < MAX_NUMNODES; i++) { | ||
1272 | for (j = 0; j < MAX_NUMNODES; j++) | ||
1273 | numa_latency[i][j] = (i == j) ? | ||
1274 | LOCAL_DISTANCE : REMOTE_DISTANCE; | ||
1275 | } | ||
1276 | |||
1226 | node = mdesc_node_by_name(md, MDESC_NODE_NULL, "latency-groups"); | 1277 | node = mdesc_node_by_name(md, MDESC_NODE_NULL, "latency-groups"); |
1227 | if (node == MDESC_NODE_NULL) { | 1278 | if (node == MDESC_NODE_NULL) { |
1228 | mdesc_release(md); | 1279 | mdesc_release(md); |
@@ -1245,6 +1296,23 @@ static int __init numa_parse_mdesc(void) | |||
1245 | count++; | 1296 | count++; |
1246 | } | 1297 | } |
1247 | 1298 | ||
1299 | count = 0; | ||
1300 | mdesc_for_each_node_by_name(md, node, "group") { | ||
1301 | find_numa_latencies_for_group(md, node, count); | ||
1302 | count++; | ||
1303 | } | ||
1304 | |||
1305 | /* Normalize numa latency matrix according to ACPI SLIT spec. */ | ||
1306 | for (i = 0; i < MAX_NUMNODES; i++) { | ||
1307 | u64 self_latency = numa_latency[i][i]; | ||
1308 | |||
1309 | for (j = 0; j < MAX_NUMNODES; j++) { | ||
1310 | numa_latency[i][j] = | ||
1311 | (numa_latency[i][j] * LOCAL_DISTANCE) / | ||
1312 | self_latency; | ||
1313 | } | ||
1314 | } | ||
1315 | |||
1248 | add_node_ranges(); | 1316 | add_node_ranges(); |
1249 | 1317 | ||
1250 | for (i = 0; i < num_node_masks; i++) { | 1318 | for (i = 0; i < num_node_masks; i++) { |
diff --git a/include/linux/iommu-common.h b/include/linux/iommu-common.h index bbced83b32ee..376a27c9cc6a 100644 --- a/include/linux/iommu-common.h +++ b/include/linux/iommu-common.h | |||
@@ -7,6 +7,7 @@ | |||
7 | 7 | ||
8 | #define IOMMU_POOL_HASHBITS 4 | 8 | #define IOMMU_POOL_HASHBITS 4 |
9 | #define IOMMU_NR_POOLS (1 << IOMMU_POOL_HASHBITS) | 9 | #define IOMMU_NR_POOLS (1 << IOMMU_POOL_HASHBITS) |
10 | #define IOMMU_ERROR_CODE (~(unsigned long) 0) | ||
10 | 11 | ||
11 | struct iommu_pool { | 12 | struct iommu_pool { |
12 | unsigned long start; | 13 | unsigned long start; |
diff --git a/lib/iommu-common.c b/lib/iommu-common.c index b1c93e94ca7a..858dc1aae478 100644 --- a/lib/iommu-common.c +++ b/lib/iommu-common.c | |||
@@ -11,10 +11,6 @@ | |||
11 | #include <linux/dma-mapping.h> | 11 | #include <linux/dma-mapping.h> |
12 | #include <linux/hash.h> | 12 | #include <linux/hash.h> |
13 | 13 | ||
14 | #ifndef DMA_ERROR_CODE | ||
15 | #define DMA_ERROR_CODE (~(dma_addr_t)0x0) | ||
16 | #endif | ||
17 | |||
18 | static unsigned long iommu_large_alloc = 15; | 14 | static unsigned long iommu_large_alloc = 15; |
19 | 15 | ||
20 | static DEFINE_PER_CPU(unsigned int, iommu_hash_common); | 16 | static DEFINE_PER_CPU(unsigned int, iommu_hash_common); |
@@ -123,7 +119,7 @@ unsigned long iommu_tbl_range_alloc(struct device *dev, | |||
123 | /* Sanity check */ | 119 | /* Sanity check */ |
124 | if (unlikely(npages == 0)) { | 120 | if (unlikely(npages == 0)) { |
125 | WARN_ON_ONCE(1); | 121 | WARN_ON_ONCE(1); |
126 | return DMA_ERROR_CODE; | 122 | return IOMMU_ERROR_CODE; |
127 | } | 123 | } |
128 | 124 | ||
129 | if (largealloc) { | 125 | if (largealloc) { |
@@ -206,7 +202,7 @@ unsigned long iommu_tbl_range_alloc(struct device *dev, | |||
206 | goto again; | 202 | goto again; |
207 | } else { | 203 | } else { |
208 | /* give up */ | 204 | /* give up */ |
209 | n = DMA_ERROR_CODE; | 205 | n = IOMMU_ERROR_CODE; |
210 | goto bail; | 206 | goto bail; |
211 | } | 207 | } |
212 | } | 208 | } |
@@ -259,7 +255,7 @@ void iommu_tbl_range_free(struct iommu_map_table *iommu, u64 dma_addr, | |||
259 | unsigned long flags; | 255 | unsigned long flags; |
260 | unsigned long shift = iommu->table_shift; | 256 | unsigned long shift = iommu->table_shift; |
261 | 257 | ||
262 | if (entry == DMA_ERROR_CODE) /* use default addr->entry mapping */ | 258 | if (entry == IOMMU_ERROR_CODE) /* use default addr->entry mapping */ |
263 | entry = (dma_addr - iommu->table_map_base) >> shift; | 259 | entry = (dma_addr - iommu->table_map_base) >> shift; |
264 | pool = get_pool(iommu, entry); | 260 | pool = get_pool(iommu, entry); |
265 | 261 | ||