diff options
| -rw-r--r-- | arch/powerpc/platforms/cell/iommu.c | 269 |
1 files changed, 267 insertions, 2 deletions
diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c index 7316226a5f17..df330666ccc9 100644 --- a/arch/powerpc/platforms/cell/iommu.c +++ b/arch/powerpc/platforms/cell/iommu.c | |||
| @@ -1,7 +1,7 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * IOMMU implementation for Cell Broadband Processor Architecture | 2 | * IOMMU implementation for Cell Broadband Processor Architecture |
| 3 | * | 3 | * |
| 4 | * (C) Copyright IBM Corporation 2006 | 4 | * (C) Copyright IBM Corporation 2006-2008 |
| 5 | * | 5 | * |
| 6 | * Author: Jeremy Kerr <jk@ozlabs.org> | 6 | * Author: Jeremy Kerr <jk@ozlabs.org> |
| 7 | * | 7 | * |
| @@ -523,6 +523,9 @@ static struct cbe_iommu *cell_iommu_for_node(int nid) | |||
| 523 | 523 | ||
| 524 | static unsigned long cell_dma_direct_offset; | 524 | static unsigned long cell_dma_direct_offset; |
| 525 | 525 | ||
| 526 | static unsigned long dma_iommu_fixed_base; | ||
| 527 | struct dma_mapping_ops dma_iommu_fixed_ops; | ||
| 528 | |||
| 526 | static void cell_dma_dev_setup_iommu(struct device *dev) | 529 | static void cell_dma_dev_setup_iommu(struct device *dev) |
| 527 | { | 530 | { |
| 528 | struct iommu_window *window; | 531 | struct iommu_window *window; |
| @@ -545,11 +548,16 @@ static void cell_dma_dev_setup_iommu(struct device *dev) | |||
| 545 | archdata->dma_data = &window->table; | 548 | archdata->dma_data = &window->table; |
| 546 | } | 549 | } |
| 547 | 550 | ||
| 551 | static void cell_dma_dev_setup_static(struct device *dev); | ||
| 552 | |||
| 548 | static void cell_dma_dev_setup(struct device *dev) | 553 | static void cell_dma_dev_setup(struct device *dev) |
| 549 | { | 554 | { |
| 550 | struct dev_archdata *archdata = &dev->archdata; | 555 | struct dev_archdata *archdata = &dev->archdata; |
| 551 | 556 | ||
| 552 | if (get_pci_dma_ops() == &dma_iommu_ops) | 557 | /* Order is important here, these are not mutually exclusive */ |
| 558 | if (get_dma_ops(dev) == &dma_iommu_fixed_ops) | ||
| 559 | cell_dma_dev_setup_static(dev); | ||
| 560 | else if (get_pci_dma_ops() == &dma_iommu_ops) | ||
| 553 | cell_dma_dev_setup_iommu(dev); | 561 | cell_dma_dev_setup_iommu(dev); |
| 554 | else if (get_pci_dma_ops() == &dma_direct_ops) | 562 | else if (get_pci_dma_ops() == &dma_direct_ops) |
| 555 | archdata->dma_data = (void *)cell_dma_direct_offset; | 563 | archdata->dma_data = (void *)cell_dma_direct_offset; |
| @@ -752,6 +760,260 @@ static int __init cell_iommu_init_disabled(void) | |||
| 752 | return 0; | 760 | return 0; |
| 753 | } | 761 | } |
| 754 | 762 | ||
| 763 | /* | ||
| 764 | * Fixed IOMMU mapping support | ||
| 765 | * | ||
| 766 | * This code adds support for setting up a fixed IOMMU mapping on certain | ||
| 767 | * cell machines. For 64-bit devices this avoids the performance overhead of | ||
| 768 | * mapping and unmapping pages at runtime. 32-bit devices are unable to use | ||
| 769 | * the fixed mapping. | ||
| 770 | * | ||
| 771 | * The fixed mapping is established at boot, and maps all of physical memory | ||
| 772 | * 1:1 into device space at some offset. On machines with < 30 GB of memory | ||
| 773 | * we setup the fixed mapping immediately above the normal IOMMU window. | ||
| 774 | * | ||
| 775 | * For example a machine with 4GB of memory would end up with the normal | ||
| 776 | * IOMMU window from 0-2GB and the fixed mapping window from 2GB to 6GB. In | ||
| 777 | * this case a 64-bit device wishing to DMA to 1GB would be told to DMA to | ||
| 778 | * 3GB, plus any offset required by firmware. The firmware offset is encoded | ||
| 779 | * in the "dma-ranges" property. | ||
| 780 | * | ||
| 781 | * On machines with 30GB or more of memory, we are unable to place the fixed | ||
| 782 | * mapping above the normal IOMMU window as we would run out of address space. | ||
| 783 | * Instead we move the normal IOMMU window to coincide with the hash page | ||
| 784 | * table, this region does not need to be part of the fixed mapping as no | ||
| 785 | * device should ever be DMA'ing to it. We then setup the fixed mapping | ||
| 786 | * from 0 to 32GB. | ||
| 787 | */ | ||
| 788 | |||
| 789 | static u64 cell_iommu_get_fixed_address(struct device *dev) | ||
| 790 | { | ||
| 791 | u64 cpu_addr, size, best_size, pci_addr = OF_BAD_ADDR; | ||
| 792 | struct device_node *tmp, *np; | ||
| 793 | const u32 *ranges = NULL; | ||
| 794 | int i, len, best; | ||
| 795 | |||
| 796 | np = dev->archdata.of_node; | ||
| 797 | of_node_get(np); | ||
| 798 | ranges = of_get_property(np, "dma-ranges", &len); | ||
| 799 | while (!ranges && np) { | ||
| 800 | tmp = of_get_parent(np); | ||
| 801 | of_node_put(np); | ||
| 802 | np = tmp; | ||
| 803 | ranges = of_get_property(np, "dma-ranges", &len); | ||
| 804 | } | ||
| 805 | |||
| 806 | if (!ranges) { | ||
| 807 | dev_dbg(dev, "iommu: no dma-ranges found\n"); | ||
| 808 | goto out; | ||
| 809 | } | ||
| 810 | |||
| 811 | len /= sizeof(u32); | ||
| 812 | |||
| 813 | /* dma-ranges format: | ||
| 814 | * 1 cell: pci space | ||
| 815 | * 2 cells: pci address | ||
| 816 | * 2 cells: parent address | ||
| 817 | * 2 cells: size | ||
| 818 | */ | ||
| 819 | for (i = 0, best = -1, best_size = 0; i < len; i += 7) { | ||
| 820 | cpu_addr = of_translate_dma_address(np, ranges +i + 3); | ||
| 821 | size = of_read_number(ranges + i + 5, 2); | ||
| 822 | |||
| 823 | if (cpu_addr == 0 && size > best_size) { | ||
| 824 | best = i; | ||
| 825 | best_size = size; | ||
| 826 | } | ||
| 827 | } | ||
| 828 | |||
| 829 | if (best >= 0) | ||
| 830 | pci_addr = of_read_number(ranges + best + 1, 2); | ||
| 831 | else | ||
| 832 | dev_dbg(dev, "iommu: no suitable range found!\n"); | ||
| 833 | |||
| 834 | out: | ||
| 835 | of_node_put(np); | ||
| 836 | |||
| 837 | return pci_addr; | ||
| 838 | } | ||
| 839 | |||
| 840 | static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask) | ||
| 841 | { | ||
| 842 | if (!dev->dma_mask || !dma_supported(dev, dma_mask)) | ||
| 843 | return -EIO; | ||
| 844 | |||
| 845 | if (dma_mask == DMA_BIT_MASK(64)) { | ||
| 846 | if (cell_iommu_get_fixed_address(dev) == OF_BAD_ADDR) | ||
| 847 | dev_dbg(dev, "iommu: 64-bit OK, but bad addr\n"); | ||
| 848 | else { | ||
| 849 | dev_dbg(dev, "iommu: 64-bit OK, using fixed ops\n"); | ||
| 850 | set_dma_ops(dev, &dma_iommu_fixed_ops); | ||
| 851 | cell_dma_dev_setup(dev); | ||
| 852 | } | ||
| 853 | } else { | ||
| 854 | dev_dbg(dev, "iommu: not 64-bit, using default ops\n"); | ||
| 855 | set_dma_ops(dev, get_pci_dma_ops()); | ||
| 856 | } | ||
| 857 | |||
| 858 | *dev->dma_mask = dma_mask; | ||
| 859 | |||
| 860 | return 0; | ||
| 861 | } | ||
| 862 | |||
| 863 | static void cell_dma_dev_setup_static(struct device *dev) | ||
| 864 | { | ||
| 865 | struct dev_archdata *archdata = &dev->archdata; | ||
| 866 | u64 addr; | ||
| 867 | |||
| 868 | addr = cell_iommu_get_fixed_address(dev) + dma_iommu_fixed_base; | ||
| 869 | archdata->dma_data = (void *)addr; | ||
| 870 | |||
| 871 | dev_dbg(dev, "iommu: fixed addr = %lx\n", addr); | ||
| 872 | } | ||
| 873 | |||
| 874 | static void cell_iommu_setup_fixed_ptab(struct cbe_iommu *iommu, | ||
| 875 | struct device_node *np, unsigned long dbase, unsigned long dsize, | ||
| 876 | unsigned long fbase, unsigned long fsize) | ||
| 877 | { | ||
| 878 | unsigned long base_pte, uaddr, *io_pte; | ||
| 879 | int i; | ||
| 880 | |||
| 881 | dma_iommu_fixed_base = fbase; | ||
| 882 | |||
| 883 | /* convert from bytes into page table indices */ | ||
| 884 | dbase = dbase >> IOMMU_PAGE_SHIFT; | ||
| 885 | dsize = dsize >> IOMMU_PAGE_SHIFT; | ||
| 886 | fbase = fbase >> IOMMU_PAGE_SHIFT; | ||
| 887 | fsize = fsize >> IOMMU_PAGE_SHIFT; | ||
| 888 | |||
| 889 | pr_debug("iommu: mapping 0x%lx pages from 0x%lx\n", fsize, fbase); | ||
| 890 | |||
| 891 | io_pte = iommu->ptab; | ||
| 892 | base_pte = IOPTE_PP_W | IOPTE_PP_R | IOPTE_M | IOPTE_SO_RW | ||
| 893 | | (cell_iommu_get_ioid(np) & IOPTE_IOID_Mask); | ||
| 894 | |||
| 895 | uaddr = 0; | ||
| 896 | for (i = fbase; i < fbase + fsize; i++, uaddr += IOMMU_PAGE_SIZE) { | ||
| 897 | /* Don't touch the dynamic region */ | ||
| 898 | if (i >= dbase && i < (dbase + dsize)) { | ||
| 899 | pr_debug("iommu: static/dynamic overlap, skipping\n"); | ||
| 900 | continue; | ||
| 901 | } | ||
| 902 | io_pte[i] = base_pte | (__pa(uaddr) & IOPTE_RPN_Mask); | ||
| 903 | } | ||
| 904 | |||
| 905 | mb(); | ||
| 906 | } | ||
| 907 | |||
| 908 | static int __init cell_iommu_fixed_mapping_init(void) | ||
| 909 | { | ||
| 910 | unsigned long dbase, dsize, fbase, fsize, hbase, hend; | ||
| 911 | struct cbe_iommu *iommu; | ||
| 912 | struct device_node *np; | ||
| 913 | |||
| 914 | /* The fixed mapping is only supported on axon machines */ | ||
| 915 | np = of_find_node_by_name(NULL, "axon"); | ||
| 916 | if (!np) { | ||
| 917 | pr_debug("iommu: fixed mapping disabled, no axons found\n"); | ||
| 918 | return -1; | ||
| 919 | } | ||
| 920 | |||
| 921 | /* The default setup is to have the fixed mapping sit after the | ||
| 922 | * dynamic region, so find the top of the largest IOMMU window | ||
| 923 | * on any axon, then add the size of RAM and that's our max value. | ||
| 924 | * If that is > 32GB we have to do other shennanigans. | ||
| 925 | */ | ||
| 926 | fbase = 0; | ||
| 927 | for_each_node_by_name(np, "axon") { | ||
| 928 | cell_iommu_get_window(np, &dbase, &dsize); | ||
| 929 | fbase = max(fbase, dbase + dsize); | ||
| 930 | } | ||
| 931 | |||
| 932 | fbase = _ALIGN_UP(fbase, 1 << IO_SEGMENT_SHIFT); | ||
| 933 | fsize = lmb_phys_mem_size(); | ||
| 934 | |||
| 935 | if ((fbase + fsize) <= 0x800000000) | ||
| 936 | hbase = 0; /* use the device tree window */ | ||
| 937 | else { | ||
| 938 | /* If we're over 32 GB we need to cheat. We can't map all of | ||
| 939 | * RAM with the fixed mapping, and also fit the dynamic | ||
| 940 | * region. So try to place the dynamic region where the hash | ||
| 941 | * table sits, drivers never need to DMA to it, we don't | ||
| 942 | * need a fixed mapping for that area. | ||
| 943 | */ | ||
| 944 | if (!htab_address) { | ||
| 945 | pr_debug("iommu: htab is NULL, on LPAR? Huh?\n"); | ||
| 946 | return -1; | ||
| 947 | } | ||
| 948 | hbase = __pa(htab_address); | ||
| 949 | hend = hbase + htab_size_bytes; | ||
| 950 | |||
| 951 | /* The window must start and end on a segment boundary */ | ||
| 952 | if ((hbase != _ALIGN_UP(hbase, 1 << IO_SEGMENT_SHIFT)) || | ||
| 953 | (hend != _ALIGN_UP(hend, 1 << IO_SEGMENT_SHIFT))) { | ||
| 954 | pr_debug("iommu: hash window not segment aligned\n"); | ||
| 955 | return -1; | ||
| 956 | } | ||
| 957 | |||
| 958 | /* Check the hash window fits inside the real DMA window */ | ||
| 959 | for_each_node_by_name(np, "axon") { | ||
| 960 | cell_iommu_get_window(np, &dbase, &dsize); | ||
| 961 | |||
| 962 | if (hbase < dbase || (hend > (dbase + dsize))) { | ||
| 963 | pr_debug("iommu: hash window doesn't fit in" | ||
| 964 | "real DMA window\n"); | ||
| 965 | return -1; | ||
| 966 | } | ||
| 967 | } | ||
| 968 | |||
| 969 | fbase = 0; | ||
| 970 | } | ||
| 971 | |||
| 972 | /* Setup the dynamic regions */ | ||
| 973 | for_each_node_by_name(np, "axon") { | ||
| 974 | iommu = cell_iommu_alloc(np); | ||
| 975 | BUG_ON(!iommu); | ||
| 976 | |||
| 977 | if (hbase == 0) | ||
| 978 | cell_iommu_get_window(np, &dbase, &dsize); | ||
| 979 | else { | ||
| 980 | dbase = hbase; | ||
| 981 | dsize = htab_size_bytes; | ||
| 982 | } | ||
| 983 | |||
| 984 | pr_debug("iommu: setting up %d, dynamic window %lx-%lx " \ | ||
| 985 | "fixed window %lx-%lx\n", iommu->nid, dbase, | ||
| 986 | dbase + dsize, fbase, fbase + fsize); | ||
| 987 | |||
| 988 | cell_iommu_setup_page_tables(iommu, dbase, dsize, fbase, fsize); | ||
| 989 | cell_iommu_setup_fixed_ptab(iommu, np, dbase, dsize, | ||
| 990 | fbase, fsize); | ||
| 991 | cell_iommu_enable_hardware(iommu); | ||
| 992 | cell_iommu_setup_window(iommu, np, dbase, dsize, 0); | ||
| 993 | } | ||
| 994 | |||
| 995 | dma_iommu_fixed_ops = dma_direct_ops; | ||
| 996 | dma_iommu_fixed_ops.set_dma_mask = dma_set_mask_and_switch; | ||
| 997 | |||
| 998 | dma_iommu_ops.set_dma_mask = dma_set_mask_and_switch; | ||
| 999 | set_pci_dma_ops(&dma_iommu_ops); | ||
| 1000 | |||
| 1001 | printk(KERN_DEBUG "IOMMU fixed mapping established.\n"); | ||
| 1002 | |||
| 1003 | return 0; | ||
| 1004 | } | ||
| 1005 | |||
| 1006 | static int iommu_fixed_disabled; | ||
| 1007 | |||
| 1008 | static int __init setup_iommu_fixed(char *str) | ||
| 1009 | { | ||
| 1010 | if (strcmp(str, "off") == 0) | ||
| 1011 | iommu_fixed_disabled = 1; | ||
| 1012 | |||
| 1013 | return 1; | ||
| 1014 | } | ||
| 1015 | __setup("iommu_fixed=", setup_iommu_fixed); | ||
| 1016 | |||
| 755 | static int __init cell_iommu_init(void) | 1017 | static int __init cell_iommu_init(void) |
| 756 | { | 1018 | { |
| 757 | struct device_node *np; | 1019 | struct device_node *np; |
| @@ -771,6 +1033,9 @@ static int __init cell_iommu_init(void) | |||
| 771 | ppc_md.tce_build = tce_build_cell; | 1033 | ppc_md.tce_build = tce_build_cell; |
| 772 | ppc_md.tce_free = tce_free_cell; | 1034 | ppc_md.tce_free = tce_free_cell; |
| 773 | 1035 | ||
| 1036 | if (!iommu_fixed_disabled && cell_iommu_fixed_mapping_init() == 0) | ||
| 1037 | goto bail; | ||
| 1038 | |||
| 774 | /* Create an iommu for each /axon node. */ | 1039 | /* Create an iommu for each /axon node. */ |
| 775 | for_each_node_by_name(np, "axon") { | 1040 | for_each_node_by_name(np, "axon") { |
| 776 | if (np->parent == NULL || np->parent->parent != NULL) | 1041 | if (np->parent == NULL || np->parent->parent != NULL) |
