diff options
Diffstat (limited to 'arch/powerpc/platforms')
-rw-r--r-- | arch/powerpc/platforms/iseries/htab.c | 65 | ||||
-rw-r--r-- | arch/powerpc/platforms/iseries/hvlog.c | 4 | ||||
-rw-r--r-- | arch/powerpc/platforms/iseries/iommu.c | 74 | ||||
-rw-r--r-- | arch/powerpc/platforms/iseries/setup.c | 13 | ||||
-rw-r--r-- | arch/powerpc/platforms/iseries/vio.c | 39 | ||||
-rw-r--r-- | arch/powerpc/platforms/iseries/viopath.c | 16 | ||||
-rw-r--r-- | arch/powerpc/platforms/pseries/lpar.c | 115 |
7 files changed, 170 insertions, 156 deletions
diff --git a/arch/powerpc/platforms/iseries/htab.c b/arch/powerpc/platforms/iseries/htab.c index b3c6c3374ca6..30bdcf3925d9 100644 --- a/arch/powerpc/platforms/iseries/htab.c +++ b/arch/powerpc/platforms/iseries/htab.c | |||
@@ -39,15 +39,16 @@ static inline void iSeries_hunlock(unsigned long slot) | |||
39 | spin_unlock(&iSeries_hlocks[(slot >> 4) & 0x3f]); | 39 | spin_unlock(&iSeries_hlocks[(slot >> 4) & 0x3f]); |
40 | } | 40 | } |
41 | 41 | ||
42 | static long iSeries_hpte_insert(unsigned long hpte_group, unsigned long va, | 42 | long iSeries_hpte_insert(unsigned long hpte_group, unsigned long va, |
43 | unsigned long prpn, unsigned long vflags, | 43 | unsigned long pa, unsigned long rflags, |
44 | unsigned long rflags) | 44 | unsigned long vflags, int psize) |
45 | { | 45 | { |
46 | unsigned long arpn; | ||
47 | long slot; | 46 | long slot; |
48 | hpte_t lhpte; | 47 | hpte_t lhpte; |
49 | int secondary = 0; | 48 | int secondary = 0; |
50 | 49 | ||
50 | BUG_ON(psize != MMU_PAGE_4K); | ||
51 | |||
51 | /* | 52 | /* |
52 | * The hypervisor tries both primary and secondary. | 53 | * The hypervisor tries both primary and secondary. |
53 | * If we are being called to insert in the secondary, | 54 | * If we are being called to insert in the secondary, |
@@ -59,8 +60,19 @@ static long iSeries_hpte_insert(unsigned long hpte_group, unsigned long va, | |||
59 | 60 | ||
60 | iSeries_hlock(hpte_group); | 61 | iSeries_hlock(hpte_group); |
61 | 62 | ||
62 | slot = HvCallHpt_findValid(&lhpte, va >> PAGE_SHIFT); | 63 | slot = HvCallHpt_findValid(&lhpte, va >> HW_PAGE_SHIFT); |
63 | BUG_ON(lhpte.v & HPTE_V_VALID); | 64 | if (unlikely(lhpte.v & HPTE_V_VALID)) { |
65 | if (vflags & HPTE_V_BOLTED) { | ||
66 | HvCallHpt_setSwBits(slot, 0x10, 0); | ||
67 | HvCallHpt_setPp(slot, PP_RWXX); | ||
68 | iSeries_hunlock(hpte_group); | ||
69 | if (slot < 0) | ||
70 | return 0x8 | (slot & 7); | ||
71 | else | ||
72 | return slot & 7; | ||
73 | } | ||
74 | BUG(); | ||
75 | } | ||
64 | 76 | ||
65 | if (slot == -1) { /* No available entry found in either group */ | 77 | if (slot == -1) { /* No available entry found in either group */ |
66 | iSeries_hunlock(hpte_group); | 78 | iSeries_hunlock(hpte_group); |
@@ -73,10 +85,9 @@ static long iSeries_hpte_insert(unsigned long hpte_group, unsigned long va, | |||
73 | slot &= 0x7fffffffffffffff; | 85 | slot &= 0x7fffffffffffffff; |
74 | } | 86 | } |
75 | 87 | ||
76 | arpn = phys_to_abs(prpn << PAGE_SHIFT) >> PAGE_SHIFT; | ||
77 | 88 | ||
78 | lhpte.v = (va >> 23) << HPTE_V_AVPN_SHIFT | vflags | HPTE_V_VALID; | 89 | lhpte.v = hpte_encode_v(va, MMU_PAGE_4K) | vflags | HPTE_V_VALID; |
79 | lhpte.r = (arpn << HPTE_R_RPN_SHIFT) | rflags; | 90 | lhpte.r = hpte_encode_r(phys_to_abs(pa), MMU_PAGE_4K) | rflags; |
80 | 91 | ||
81 | /* Now fill in the actual HPTE */ | 92 | /* Now fill in the actual HPTE */ |
82 | HvCallHpt_addValidate(slot, secondary, &lhpte); | 93 | HvCallHpt_addValidate(slot, secondary, &lhpte); |
@@ -86,25 +97,6 @@ static long iSeries_hpte_insert(unsigned long hpte_group, unsigned long va, | |||
86 | return (secondary << 3) | (slot & 7); | 97 | return (secondary << 3) | (slot & 7); |
87 | } | 98 | } |
88 | 99 | ||
89 | long iSeries_hpte_bolt_or_insert(unsigned long hpte_group, | ||
90 | unsigned long va, unsigned long prpn, unsigned long vflags, | ||
91 | unsigned long rflags) | ||
92 | { | ||
93 | long slot; | ||
94 | hpte_t lhpte; | ||
95 | |||
96 | slot = HvCallHpt_findValid(&lhpte, va >> PAGE_SHIFT); | ||
97 | |||
98 | if (lhpte.v & HPTE_V_VALID) { | ||
99 | /* Bolt the existing HPTE */ | ||
100 | HvCallHpt_setSwBits(slot, 0x10, 0); | ||
101 | HvCallHpt_setPp(slot, PP_RWXX); | ||
102 | return 0; | ||
103 | } | ||
104 | |||
105 | return iSeries_hpte_insert(hpte_group, va, prpn, vflags, rflags); | ||
106 | } | ||
107 | |||
108 | static unsigned long iSeries_hpte_getword0(unsigned long slot) | 100 | static unsigned long iSeries_hpte_getword0(unsigned long slot) |
109 | { | 101 | { |
110 | hpte_t hpte; | 102 | hpte_t hpte; |
@@ -150,15 +142,17 @@ static long iSeries_hpte_remove(unsigned long hpte_group) | |||
150 | * bits 61..63 : PP2,PP1,PP0 | 142 | * bits 61..63 : PP2,PP1,PP0 |
151 | */ | 143 | */ |
152 | static long iSeries_hpte_updatepp(unsigned long slot, unsigned long newpp, | 144 | static long iSeries_hpte_updatepp(unsigned long slot, unsigned long newpp, |
153 | unsigned long va, int large, int local) | 145 | unsigned long va, int psize, int local) |
154 | { | 146 | { |
155 | hpte_t hpte; | 147 | hpte_t hpte; |
156 | unsigned long avpn = va >> 23; | 148 | unsigned long want_v; |
157 | 149 | ||
158 | iSeries_hlock(slot); | 150 | iSeries_hlock(slot); |
159 | 151 | ||
160 | HvCallHpt_get(&hpte, slot); | 152 | HvCallHpt_get(&hpte, slot); |
161 | if ((HPTE_V_AVPN_VAL(hpte.v) == avpn) && (hpte.v & HPTE_V_VALID)) { | 153 | want_v = hpte_encode_v(va, MMU_PAGE_4K); |
154 | |||
155 | if (HPTE_V_COMPARE(hpte.v, want_v) && (hpte.v & HPTE_V_VALID)) { | ||
162 | /* | 156 | /* |
163 | * Hypervisor expects bits as NPPP, which is | 157 | * Hypervisor expects bits as NPPP, which is |
164 | * different from how they are mapped in our PP. | 158 | * different from how they are mapped in our PP. |
@@ -210,14 +204,17 @@ static long iSeries_hpte_find(unsigned long vpn) | |||
210 | * | 204 | * |
211 | * No need to lock here because we should be the only user. | 205 | * No need to lock here because we should be the only user. |
212 | */ | 206 | */ |
213 | static void iSeries_hpte_updateboltedpp(unsigned long newpp, unsigned long ea) | 207 | static void iSeries_hpte_updateboltedpp(unsigned long newpp, unsigned long ea, |
208 | int psize) | ||
214 | { | 209 | { |
215 | unsigned long vsid,va,vpn; | 210 | unsigned long vsid,va,vpn; |
216 | long slot; | 211 | long slot; |
217 | 212 | ||
213 | BUG_ON(psize != MMU_PAGE_4K); | ||
214 | |||
218 | vsid = get_kernel_vsid(ea); | 215 | vsid = get_kernel_vsid(ea); |
219 | va = (vsid << 28) | (ea & 0x0fffffff); | 216 | va = (vsid << 28) | (ea & 0x0fffffff); |
220 | vpn = va >> PAGE_SHIFT; | 217 | vpn = va >> HW_PAGE_SHIFT; |
221 | slot = iSeries_hpte_find(vpn); | 218 | slot = iSeries_hpte_find(vpn); |
222 | if (slot == -1) | 219 | if (slot == -1) |
223 | panic("updateboltedpp: Could not find page to bolt\n"); | 220 | panic("updateboltedpp: Could not find page to bolt\n"); |
@@ -225,7 +222,7 @@ static void iSeries_hpte_updateboltedpp(unsigned long newpp, unsigned long ea) | |||
225 | } | 222 | } |
226 | 223 | ||
227 | static void iSeries_hpte_invalidate(unsigned long slot, unsigned long va, | 224 | static void iSeries_hpte_invalidate(unsigned long slot, unsigned long va, |
228 | int large, int local) | 225 | int psize, int local) |
229 | { | 226 | { |
230 | unsigned long hpte_v; | 227 | unsigned long hpte_v; |
231 | unsigned long avpn = va >> 23; | 228 | unsigned long avpn = va >> 23; |
diff --git a/arch/powerpc/platforms/iseries/hvlog.c b/arch/powerpc/platforms/iseries/hvlog.c index 62ec73479687..f476d71194fa 100644 --- a/arch/powerpc/platforms/iseries/hvlog.c +++ b/arch/powerpc/platforms/iseries/hvlog.c | |||
@@ -22,7 +22,7 @@ void HvCall_writeLogBuffer(const void *buffer, u64 len) | |||
22 | 22 | ||
23 | while (len) { | 23 | while (len) { |
24 | hv_buf.addr = cur; | 24 | hv_buf.addr = cur; |
25 | left_this_page = ((cur & PAGE_MASK) + PAGE_SIZE) - cur; | 25 | left_this_page = ((cur & HW_PAGE_MASK) + HW_PAGE_SIZE) - cur; |
26 | if (left_this_page > len) | 26 | if (left_this_page > len) |
27 | left_this_page = len; | 27 | left_this_page = len; |
28 | hv_buf.len = left_this_page; | 28 | hv_buf.len = left_this_page; |
@@ -30,6 +30,6 @@ void HvCall_writeLogBuffer(const void *buffer, u64 len) | |||
30 | HvCall2(HvCallBaseWriteLogBuffer, | 30 | HvCall2(HvCallBaseWriteLogBuffer, |
31 | virt_to_abs(&hv_buf), | 31 | virt_to_abs(&hv_buf), |
32 | left_this_page); | 32 | left_this_page); |
33 | cur = (cur & PAGE_MASK) + PAGE_SIZE; | 33 | cur = (cur & HW_PAGE_MASK) + HW_PAGE_SIZE; |
34 | } | 34 | } |
35 | } | 35 | } |
diff --git a/arch/powerpc/platforms/iseries/iommu.c b/arch/powerpc/platforms/iseries/iommu.c index 1a6845b5c5a4..bf081b345820 100644 --- a/arch/powerpc/platforms/iseries/iommu.c +++ b/arch/powerpc/platforms/iseries/iommu.c | |||
@@ -43,9 +43,12 @@ static void tce_build_iSeries(struct iommu_table *tbl, long index, long npages, | |||
43 | u64 rc; | 43 | u64 rc; |
44 | union tce_entry tce; | 44 | union tce_entry tce; |
45 | 45 | ||
46 | index <<= TCE_PAGE_FACTOR; | ||
47 | npages <<= TCE_PAGE_FACTOR; | ||
48 | |||
46 | while (npages--) { | 49 | while (npages--) { |
47 | tce.te_word = 0; | 50 | tce.te_word = 0; |
48 | tce.te_bits.tb_rpn = virt_to_abs(uaddr) >> PAGE_SHIFT; | 51 | tce.te_bits.tb_rpn = virt_to_abs(uaddr) >> TCE_SHIFT; |
49 | 52 | ||
50 | if (tbl->it_type == TCE_VB) { | 53 | if (tbl->it_type == TCE_VB) { |
51 | /* Virtual Bus */ | 54 | /* Virtual Bus */ |
@@ -66,7 +69,7 @@ static void tce_build_iSeries(struct iommu_table *tbl, long index, long npages, | |||
66 | panic("PCI_DMA: HvCallXm_setTce failed, Rc: 0x%lx\n", | 69 | panic("PCI_DMA: HvCallXm_setTce failed, Rc: 0x%lx\n", |
67 | rc); | 70 | rc); |
68 | index++; | 71 | index++; |
69 | uaddr += PAGE_SIZE; | 72 | uaddr += TCE_PAGE_SIZE; |
70 | } | 73 | } |
71 | } | 74 | } |
72 | 75 | ||
@@ -74,6 +77,9 @@ static void tce_free_iSeries(struct iommu_table *tbl, long index, long npages) | |||
74 | { | 77 | { |
75 | u64 rc; | 78 | u64 rc; |
76 | 79 | ||
80 | npages <<= TCE_PAGE_FACTOR; | ||
81 | index <<= TCE_PAGE_FACTOR; | ||
82 | |||
77 | while (npages--) { | 83 | while (npages--) { |
78 | rc = HvCallXm_setTce((u64)tbl->it_index, (u64)index, 0); | 84 | rc = HvCallXm_setTce((u64)tbl->it_index, (u64)index, 0); |
79 | if (rc) | 85 | if (rc) |
@@ -83,27 +89,6 @@ static void tce_free_iSeries(struct iommu_table *tbl, long index, long npages) | |||
83 | } | 89 | } |
84 | } | 90 | } |
85 | 91 | ||
86 | #ifdef CONFIG_PCI | ||
87 | /* | ||
88 | * This function compares the known tables to find an iommu_table | ||
89 | * that has already been built for hardware TCEs. | ||
90 | */ | ||
91 | static struct iommu_table *iommu_table_find(struct iommu_table * tbl) | ||
92 | { | ||
93 | struct pci_dn *pdn; | ||
94 | |||
95 | list_for_each_entry(pdn, &iSeries_Global_Device_List, Device_List) { | ||
96 | struct iommu_table *it = pdn->iommu_table; | ||
97 | if ((it != NULL) && | ||
98 | (it->it_type == TCE_PCI) && | ||
99 | (it->it_offset == tbl->it_offset) && | ||
100 | (it->it_index == tbl->it_index) && | ||
101 | (it->it_size == tbl->it_size)) | ||
102 | return it; | ||
103 | } | ||
104 | return NULL; | ||
105 | } | ||
106 | |||
107 | /* | 92 | /* |
108 | * Call Hv with the architected data structure to get TCE table info. | 93 | * Call Hv with the architected data structure to get TCE table info. |
109 | * info. Put the returned data into the Linux representation of the | 94 | * info. Put the returned data into the Linux representation of the |
@@ -113,8 +98,10 @@ static struct iommu_table *iommu_table_find(struct iommu_table * tbl) | |||
113 | * 2. TCE table per Bus. | 98 | * 2. TCE table per Bus. |
114 | * 3. TCE Table per IOA. | 99 | * 3. TCE Table per IOA. |
115 | */ | 100 | */ |
116 | static void iommu_table_getparms(struct pci_dn *pdn, | 101 | void iommu_table_getparms_iSeries(unsigned long busno, |
117 | struct iommu_table* tbl) | 102 | unsigned char slotno, |
103 | unsigned char virtbus, | ||
104 | struct iommu_table* tbl) | ||
118 | { | 105 | { |
119 | struct iommu_table_cb *parms; | 106 | struct iommu_table_cb *parms; |
120 | 107 | ||
@@ -124,9 +111,9 @@ static void iommu_table_getparms(struct pci_dn *pdn, | |||
124 | 111 | ||
125 | memset(parms, 0, sizeof(*parms)); | 112 | memset(parms, 0, sizeof(*parms)); |
126 | 113 | ||
127 | parms->itc_busno = pdn->busno; | 114 | parms->itc_busno = busno; |
128 | parms->itc_slotno = pdn->LogicalSlot; | 115 | parms->itc_slotno = slotno; |
129 | parms->itc_virtbus = 0; | 116 | parms->itc_virtbus = virtbus; |
130 | 117 | ||
131 | HvCallXm_getTceTableParms(iseries_hv_addr(parms)); | 118 | HvCallXm_getTceTableParms(iseries_hv_addr(parms)); |
132 | 119 | ||
@@ -134,17 +121,40 @@ static void iommu_table_getparms(struct pci_dn *pdn, | |||
134 | panic("PCI_DMA: parms->size is zero, parms is 0x%p", parms); | 121 | panic("PCI_DMA: parms->size is zero, parms is 0x%p", parms); |
135 | 122 | ||
136 | /* itc_size is in pages worth of table, it_size is in # of entries */ | 123 | /* itc_size is in pages worth of table, it_size is in # of entries */ |
137 | tbl->it_size = (parms->itc_size * PAGE_SIZE) / sizeof(union tce_entry); | 124 | tbl->it_size = ((parms->itc_size * TCE_PAGE_SIZE) / |
125 | sizeof(union tce_entry)) >> TCE_PAGE_FACTOR; | ||
138 | tbl->it_busno = parms->itc_busno; | 126 | tbl->it_busno = parms->itc_busno; |
139 | tbl->it_offset = parms->itc_offset; | 127 | tbl->it_offset = parms->itc_offset >> TCE_PAGE_FACTOR; |
140 | tbl->it_index = parms->itc_index; | 128 | tbl->it_index = parms->itc_index; |
141 | tbl->it_blocksize = 1; | 129 | tbl->it_blocksize = 1; |
142 | tbl->it_type = TCE_PCI; | 130 | tbl->it_type = virtbus ? TCE_VB : TCE_PCI; |
143 | 131 | ||
144 | kfree(parms); | 132 | kfree(parms); |
145 | } | 133 | } |
146 | 134 | ||
147 | 135 | ||
136 | #ifdef CONFIG_PCI | ||
137 | /* | ||
138 | * This function compares the known tables to find an iommu_table | ||
139 | * that has already been built for hardware TCEs. | ||
140 | */ | ||
141 | static struct iommu_table *iommu_table_find(struct iommu_table * tbl) | ||
142 | { | ||
143 | struct pci_dn *pdn; | ||
144 | |||
145 | list_for_each_entry(pdn, &iSeries_Global_Device_List, Device_List) { | ||
146 | struct iommu_table *it = pdn->iommu_table; | ||
147 | if ((it != NULL) && | ||
148 | (it->it_type == TCE_PCI) && | ||
149 | (it->it_offset == tbl->it_offset) && | ||
150 | (it->it_index == tbl->it_index) && | ||
151 | (it->it_size == tbl->it_size)) | ||
152 | return it; | ||
153 | } | ||
154 | return NULL; | ||
155 | } | ||
156 | |||
157 | |||
148 | void iommu_devnode_init_iSeries(struct device_node *dn) | 158 | void iommu_devnode_init_iSeries(struct device_node *dn) |
149 | { | 159 | { |
150 | struct iommu_table *tbl; | 160 | struct iommu_table *tbl; |
@@ -152,7 +162,7 @@ void iommu_devnode_init_iSeries(struct device_node *dn) | |||
152 | 162 | ||
153 | tbl = kmalloc(sizeof(struct iommu_table), GFP_KERNEL); | 163 | tbl = kmalloc(sizeof(struct iommu_table), GFP_KERNEL); |
154 | 164 | ||
155 | iommu_table_getparms(pdn, tbl); | 165 | iommu_table_getparms_iSeries(pdn->busno, pdn->LogicalSlot, 0, tbl); |
156 | 166 | ||
157 | /* Look for existing tce table */ | 167 | /* Look for existing tce table */ |
158 | pdn->iommu_table = iommu_table_find(tbl); | 168 | pdn->iommu_table = iommu_table_find(tbl); |
diff --git a/arch/powerpc/platforms/iseries/setup.c b/arch/powerpc/platforms/iseries/setup.c index 36f89e9ec7d0..d3e4bf756c83 100644 --- a/arch/powerpc/platforms/iseries/setup.c +++ b/arch/powerpc/platforms/iseries/setup.c | |||
@@ -316,11 +316,11 @@ static void __init iSeries_init_early(void) | |||
316 | */ | 316 | */ |
317 | if (naca.xRamDisk) { | 317 | if (naca.xRamDisk) { |
318 | initrd_start = (unsigned long)__va(naca.xRamDisk); | 318 | initrd_start = (unsigned long)__va(naca.xRamDisk); |
319 | initrd_end = initrd_start + naca.xRamDiskSize * PAGE_SIZE; | 319 | initrd_end = initrd_start + naca.xRamDiskSize * HW_PAGE_SIZE; |
320 | initrd_below_start_ok = 1; // ramdisk in kernel space | 320 | initrd_below_start_ok = 1; // ramdisk in kernel space |
321 | ROOT_DEV = Root_RAM0; | 321 | ROOT_DEV = Root_RAM0; |
322 | if (((rd_size * 1024) / PAGE_SIZE) < naca.xRamDiskSize) | 322 | if (((rd_size * 1024) / HW_PAGE_SIZE) < naca.xRamDiskSize) |
323 | rd_size = (naca.xRamDiskSize * PAGE_SIZE) / 1024; | 323 | rd_size = (naca.xRamDiskSize * HW_PAGE_SIZE) / 1024; |
324 | } else | 324 | } else |
325 | #endif /* CONFIG_BLK_DEV_INITRD */ | 325 | #endif /* CONFIG_BLK_DEV_INITRD */ |
326 | { | 326 | { |
@@ -466,13 +466,14 @@ static void __init build_iSeries_Memory_Map(void) | |||
466 | */ | 466 | */ |
467 | hptFirstChunk = (u32)addr_to_chunk(HvCallHpt_getHptAddress()); | 467 | hptFirstChunk = (u32)addr_to_chunk(HvCallHpt_getHptAddress()); |
468 | hptSizePages = (u32)HvCallHpt_getHptPages(); | 468 | hptSizePages = (u32)HvCallHpt_getHptPages(); |
469 | hptSizeChunks = hptSizePages >> (MSCHUNKS_CHUNK_SHIFT - PAGE_SHIFT); | 469 | hptSizeChunks = hptSizePages >> |
470 | (MSCHUNKS_CHUNK_SHIFT - HW_PAGE_SHIFT); | ||
470 | hptLastChunk = hptFirstChunk + hptSizeChunks - 1; | 471 | hptLastChunk = hptFirstChunk + hptSizeChunks - 1; |
471 | 472 | ||
472 | printk("HPT absolute addr = %016lx, size = %dK\n", | 473 | printk("HPT absolute addr = %016lx, size = %dK\n", |
473 | chunk_to_addr(hptFirstChunk), hptSizeChunks * 256); | 474 | chunk_to_addr(hptFirstChunk), hptSizeChunks * 256); |
474 | 475 | ||
475 | ppc64_pft_size = __ilog2(hptSizePages * PAGE_SIZE); | 476 | ppc64_pft_size = __ilog2(hptSizePages * HW_PAGE_SIZE); |
476 | 477 | ||
477 | /* | 478 | /* |
478 | * The actual hashed page table is in the hypervisor, | 479 | * The actual hashed page table is in the hypervisor, |
@@ -625,7 +626,7 @@ static void __init iSeries_fixup_klimit(void) | |||
625 | */ | 626 | */ |
626 | if (naca.xRamDisk) | 627 | if (naca.xRamDisk) |
627 | klimit = KERNELBASE + (u64)naca.xRamDisk + | 628 | klimit = KERNELBASE + (u64)naca.xRamDisk + |
628 | (naca.xRamDiskSize * PAGE_SIZE); | 629 | (naca.xRamDiskSize * HW_PAGE_SIZE); |
629 | else { | 630 | else { |
630 | /* | 631 | /* |
631 | * No ram disk was included - check and see if there | 632 | * No ram disk was included - check and see if there |
diff --git a/arch/powerpc/platforms/iseries/vio.c b/arch/powerpc/platforms/iseries/vio.c index c27a66876c2c..384360ee06ec 100644 --- a/arch/powerpc/platforms/iseries/vio.c +++ b/arch/powerpc/platforms/iseries/vio.c | |||
@@ -30,41 +30,14 @@ static struct iommu_table vio_iommu_table; | |||
30 | 30 | ||
31 | static void __init iommu_vio_init(void) | 31 | static void __init iommu_vio_init(void) |
32 | { | 32 | { |
33 | struct iommu_table *t; | 33 | iommu_table_getparms_iSeries(255, 0, 0xff, &veth_iommu_table); |
34 | struct iommu_table_cb cb; | 34 | veth_iommu_table.it_size /= 2; |
35 | unsigned long cbp; | 35 | vio_iommu_table = veth_iommu_table; |
36 | unsigned long itc_entries; | 36 | vio_iommu_table.it_offset += veth_iommu_table.it_size; |
37 | 37 | ||
38 | cb.itc_busno = 255; /* Bus 255 is the virtual bus */ | 38 | if (!iommu_init_table(&veth_iommu_table)) |
39 | cb.itc_virtbus = 0xff; /* Ask for virtual bus */ | ||
40 | |||
41 | cbp = virt_to_abs(&cb); | ||
42 | HvCallXm_getTceTableParms(cbp); | ||
43 | |||
44 | itc_entries = cb.itc_size * PAGE_SIZE / sizeof(union tce_entry); | ||
45 | veth_iommu_table.it_size = itc_entries / 2; | ||
46 | veth_iommu_table.it_busno = cb.itc_busno; | ||
47 | veth_iommu_table.it_offset = cb.itc_offset; | ||
48 | veth_iommu_table.it_index = cb.itc_index; | ||
49 | veth_iommu_table.it_type = TCE_VB; | ||
50 | veth_iommu_table.it_blocksize = 1; | ||
51 | |||
52 | t = iommu_init_table(&veth_iommu_table); | ||
53 | |||
54 | if (!t) | ||
55 | printk("Virtual Bus VETH TCE table failed.\n"); | 39 | printk("Virtual Bus VETH TCE table failed.\n"); |
56 | 40 | if (!iommu_init_table(&vio_iommu_table)) | |
57 | vio_iommu_table.it_size = itc_entries - veth_iommu_table.it_size; | ||
58 | vio_iommu_table.it_busno = cb.itc_busno; | ||
59 | vio_iommu_table.it_offset = cb.itc_offset + | ||
60 | veth_iommu_table.it_size; | ||
61 | vio_iommu_table.it_index = cb.itc_index; | ||
62 | vio_iommu_table.it_type = TCE_VB; | ||
63 | vio_iommu_table.it_blocksize = 1; | ||
64 | |||
65 | t = iommu_init_table(&vio_iommu_table); | ||
66 | |||
67 | if (!t) | ||
68 | printk("Virtual Bus VIO TCE table failed.\n"); | 41 | printk("Virtual Bus VIO TCE table failed.\n"); |
69 | } | 42 | } |
70 | 43 | ||
diff --git a/arch/powerpc/platforms/iseries/viopath.c b/arch/powerpc/platforms/iseries/viopath.c index fe97bfbf7463..842672695598 100644 --- a/arch/powerpc/platforms/iseries/viopath.c +++ b/arch/powerpc/platforms/iseries/viopath.c | |||
@@ -68,7 +68,8 @@ static DEFINE_SPINLOCK(statuslock); | |||
68 | * For each kind of event we allocate a buffer that is | 68 | * For each kind of event we allocate a buffer that is |
69 | * guaranteed not to cross a page boundary | 69 | * guaranteed not to cross a page boundary |
70 | */ | 70 | */ |
71 | static unsigned char event_buffer[VIO_MAX_SUBTYPES * 256] __page_aligned; | 71 | static unsigned char event_buffer[VIO_MAX_SUBTYPES * 256] |
72 | __attribute__((__aligned__(4096))); | ||
72 | static atomic_t event_buffer_available[VIO_MAX_SUBTYPES]; | 73 | static atomic_t event_buffer_available[VIO_MAX_SUBTYPES]; |
73 | static int event_buffer_initialised; | 74 | static int event_buffer_initialised; |
74 | 75 | ||
@@ -116,12 +117,12 @@ static int proc_viopath_show(struct seq_file *m, void *v) | |||
116 | HvLpEvent_Rc hvrc; | 117 | HvLpEvent_Rc hvrc; |
117 | DECLARE_MUTEX_LOCKED(Semaphore); | 118 | DECLARE_MUTEX_LOCKED(Semaphore); |
118 | 119 | ||
119 | buf = kmalloc(PAGE_SIZE, GFP_KERNEL); | 120 | buf = kmalloc(HW_PAGE_SIZE, GFP_KERNEL); |
120 | if (!buf) | 121 | if (!buf) |
121 | return 0; | 122 | return 0; |
122 | memset(buf, 0, PAGE_SIZE); | 123 | memset(buf, 0, HW_PAGE_SIZE); |
123 | 124 | ||
124 | handle = dma_map_single(iSeries_vio_dev, buf, PAGE_SIZE, | 125 | handle = dma_map_single(iSeries_vio_dev, buf, HW_PAGE_SIZE, |
125 | DMA_FROM_DEVICE); | 126 | DMA_FROM_DEVICE); |
126 | 127 | ||
127 | hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp, | 128 | hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp, |
@@ -131,7 +132,7 @@ static int proc_viopath_show(struct seq_file *m, void *v) | |||
131 | viopath_sourceinst(viopath_hostLp), | 132 | viopath_sourceinst(viopath_hostLp), |
132 | viopath_targetinst(viopath_hostLp), | 133 | viopath_targetinst(viopath_hostLp), |
133 | (u64)(unsigned long)&Semaphore, VIOVERSION << 16, | 134 | (u64)(unsigned long)&Semaphore, VIOVERSION << 16, |
134 | ((u64)handle) << 32, PAGE_SIZE, 0, 0); | 135 | ((u64)handle) << 32, HW_PAGE_SIZE, 0, 0); |
135 | 136 | ||
136 | if (hvrc != HvLpEvent_Rc_Good) | 137 | if (hvrc != HvLpEvent_Rc_Good) |
137 | printk(VIOPATH_KERN_WARN "hv error on op %d\n", (int)hvrc); | 138 | printk(VIOPATH_KERN_WARN "hv error on op %d\n", (int)hvrc); |
@@ -140,7 +141,7 @@ static int proc_viopath_show(struct seq_file *m, void *v) | |||
140 | 141 | ||
141 | vlanMap = HvLpConfig_getVirtualLanIndexMap(); | 142 | vlanMap = HvLpConfig_getVirtualLanIndexMap(); |
142 | 143 | ||
143 | buf[PAGE_SIZE-1] = '\0'; | 144 | buf[HW_PAGE_SIZE-1] = '\0'; |
144 | seq_printf(m, "%s", buf); | 145 | seq_printf(m, "%s", buf); |
145 | seq_printf(m, "AVAILABLE_VETH=%x\n", vlanMap); | 146 | seq_printf(m, "AVAILABLE_VETH=%x\n", vlanMap); |
146 | seq_printf(m, "SRLNBR=%c%c%c%c%c%c%c\n", | 147 | seq_printf(m, "SRLNBR=%c%c%c%c%c%c%c\n", |
@@ -152,7 +153,8 @@ static int proc_viopath_show(struct seq_file *m, void *v) | |||
152 | e2a(xItExtVpdPanel.systemSerial[4]), | 153 | e2a(xItExtVpdPanel.systemSerial[4]), |
153 | e2a(xItExtVpdPanel.systemSerial[5])); | 154 | e2a(xItExtVpdPanel.systemSerial[5])); |
154 | 155 | ||
155 | dma_unmap_single(iSeries_vio_dev, handle, PAGE_SIZE, DMA_FROM_DEVICE); | 156 | dma_unmap_single(iSeries_vio_dev, handle, HW_PAGE_SIZE, |
157 | DMA_FROM_DEVICE); | ||
156 | kfree(buf); | 158 | kfree(buf); |
157 | 159 | ||
158 | return 0; | 160 | return 0; |
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c index 8a42006370c5..a50e5f3f396d 100644 --- a/arch/powerpc/platforms/pseries/lpar.c +++ b/arch/powerpc/platforms/pseries/lpar.c | |||
@@ -19,7 +19,7 @@ | |||
19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
20 | */ | 20 | */ |
21 | 21 | ||
22 | #define DEBUG | 22 | #undef DEBUG_LOW |
23 | 23 | ||
24 | #include <linux/config.h> | 24 | #include <linux/config.h> |
25 | #include <linux/kernel.h> | 25 | #include <linux/kernel.h> |
@@ -42,10 +42,10 @@ | |||
42 | 42 | ||
43 | #include "plpar_wrappers.h" | 43 | #include "plpar_wrappers.h" |
44 | 44 | ||
45 | #ifdef DEBUG | 45 | #ifdef DEBUG_LOW |
46 | #define DBG(fmt...) udbg_printf(fmt) | 46 | #define DBG_LOW(fmt...) do { udbg_printf(fmt); } while(0) |
47 | #else | 47 | #else |
48 | #define DBG(fmt...) | 48 | #define DBG_LOW(fmt...) do { } while(0) |
49 | #endif | 49 | #endif |
50 | 50 | ||
51 | /* in pSeries_hvCall.S */ | 51 | /* in pSeries_hvCall.S */ |
@@ -277,8 +277,9 @@ void vpa_init(int cpu) | |||
277 | } | 277 | } |
278 | 278 | ||
279 | long pSeries_lpar_hpte_insert(unsigned long hpte_group, | 279 | long pSeries_lpar_hpte_insert(unsigned long hpte_group, |
280 | unsigned long va, unsigned long prpn, | 280 | unsigned long va, unsigned long pa, |
281 | unsigned long vflags, unsigned long rflags) | 281 | unsigned long rflags, unsigned long vflags, |
282 | int psize) | ||
282 | { | 283 | { |
283 | unsigned long lpar_rc; | 284 | unsigned long lpar_rc; |
284 | unsigned long flags; | 285 | unsigned long flags; |
@@ -286,11 +287,28 @@ long pSeries_lpar_hpte_insert(unsigned long hpte_group, | |||
286 | unsigned long hpte_v, hpte_r; | 287 | unsigned long hpte_v, hpte_r; |
287 | unsigned long dummy0, dummy1; | 288 | unsigned long dummy0, dummy1; |
288 | 289 | ||
289 | hpte_v = ((va >> 23) << HPTE_V_AVPN_SHIFT) | vflags | HPTE_V_VALID; | 290 | if (!(vflags & HPTE_V_BOLTED)) |
290 | if (vflags & HPTE_V_LARGE) | 291 | DBG_LOW("hpte_insert(group=%lx, va=%016lx, pa=%016lx, " |
291 | hpte_v &= ~(1UL << HPTE_V_AVPN_SHIFT); | 292 | "rflags=%lx, vflags=%lx, psize=%d)\n", |
292 | 293 | hpte_group, va, pa, rflags, vflags, psize); | |
293 | hpte_r = (prpn << HPTE_R_RPN_SHIFT) | rflags; | 294 | |
295 | hpte_v = hpte_encode_v(va, psize) | vflags | HPTE_V_VALID; | ||
296 | hpte_r = hpte_encode_r(pa, psize) | rflags; | ||
297 | |||
298 | if (!(vflags & HPTE_V_BOLTED)) | ||
299 | DBG_LOW(" hpte_v=%016lx, hpte_r=%016lx\n", hpte_v, hpte_r); | ||
300 | |||
301 | #if 1 | ||
302 | { | ||
303 | int i; | ||
304 | for (i=0;i<8;i++) { | ||
305 | unsigned long w0, w1; | ||
306 | plpar_pte_read(0, hpte_group, &w0, &w1); | ||
307 | BUG_ON (HPTE_V_COMPARE(hpte_v, w0) | ||
308 | && (w0 & HPTE_V_VALID)); | ||
309 | } | ||
310 | } | ||
311 | #endif | ||
294 | 312 | ||
295 | /* Now fill in the actual HPTE */ | 313 | /* Now fill in the actual HPTE */ |
296 | /* Set CEC cookie to 0 */ | 314 | /* Set CEC cookie to 0 */ |
@@ -300,23 +318,30 @@ long pSeries_lpar_hpte_insert(unsigned long hpte_group, | |||
300 | /* Exact = 0 */ | 318 | /* Exact = 0 */ |
301 | flags = 0; | 319 | flags = 0; |
302 | 320 | ||
303 | /* XXX why is this here? - Anton */ | 321 | /* Make pHyp happy */ |
304 | if (rflags & (_PAGE_GUARDED|_PAGE_NO_CACHE)) | 322 | if (rflags & (_PAGE_GUARDED|_PAGE_NO_CACHE)) |
305 | hpte_r &= ~_PAGE_COHERENT; | 323 | hpte_r &= ~_PAGE_COHERENT; |
306 | 324 | ||
307 | lpar_rc = plpar_hcall(H_ENTER, flags, hpte_group, hpte_v, | 325 | lpar_rc = plpar_hcall(H_ENTER, flags, hpte_group, hpte_v, |
308 | hpte_r, &slot, &dummy0, &dummy1); | 326 | hpte_r, &slot, &dummy0, &dummy1); |
309 | 327 | if (unlikely(lpar_rc == H_PTEG_Full)) { | |
310 | if (unlikely(lpar_rc == H_PTEG_Full)) | 328 | if (!(vflags & HPTE_V_BOLTED)) |
329 | DBG_LOW(" full\n"); | ||
311 | return -1; | 330 | return -1; |
331 | } | ||
312 | 332 | ||
313 | /* | 333 | /* |
314 | * Since we try and ioremap PHBs we don't own, the pte insert | 334 | * Since we try and ioremap PHBs we don't own, the pte insert |
315 | * will fail. However we must catch the failure in hash_page | 335 | * will fail. However we must catch the failure in hash_page |
316 | * or we will loop forever, so return -2 in this case. | 336 | * or we will loop forever, so return -2 in this case. |
317 | */ | 337 | */ |
318 | if (unlikely(lpar_rc != H_Success)) | 338 | if (unlikely(lpar_rc != H_Success)) { |
339 | if (!(vflags & HPTE_V_BOLTED)) | ||
340 | DBG_LOW(" lpar err %d\n", lpar_rc); | ||
319 | return -2; | 341 | return -2; |
342 | } | ||
343 | if (!(vflags & HPTE_V_BOLTED)) | ||
344 | DBG_LOW(" -> slot: %d\n", slot & 7); | ||
320 | 345 | ||
321 | /* Because of iSeries, we have to pass down the secondary | 346 | /* Because of iSeries, we have to pass down the secondary |
322 | * bucket bit here as well | 347 | * bucket bit here as well |
@@ -341,10 +366,8 @@ static long pSeries_lpar_hpte_remove(unsigned long hpte_group) | |||
341 | /* don't remove a bolted entry */ | 366 | /* don't remove a bolted entry */ |
342 | lpar_rc = plpar_pte_remove(H_ANDCOND, hpte_group + slot_offset, | 367 | lpar_rc = plpar_pte_remove(H_ANDCOND, hpte_group + slot_offset, |
343 | (0x1UL << 4), &dummy1, &dummy2); | 368 | (0x1UL << 4), &dummy1, &dummy2); |
344 | |||
345 | if (lpar_rc == H_Success) | 369 | if (lpar_rc == H_Success) |
346 | return i; | 370 | return i; |
347 | |||
348 | BUG_ON(lpar_rc != H_Not_Found); | 371 | BUG_ON(lpar_rc != H_Not_Found); |
349 | 372 | ||
350 | slot_offset++; | 373 | slot_offset++; |
@@ -372,20 +395,28 @@ static void pSeries_lpar_hptab_clear(void) | |||
372 | * We can probably optimize here and assume the high bits of newpp are | 395 | * We can probably optimize here and assume the high bits of newpp are |
373 | * already zero. For now I am paranoid. | 396 | * already zero. For now I am paranoid. |
374 | */ | 397 | */ |
375 | static long pSeries_lpar_hpte_updatepp(unsigned long slot, unsigned long newpp, | 398 | static long pSeries_lpar_hpte_updatepp(unsigned long slot, |
376 | unsigned long va, int large, int local) | 399 | unsigned long newpp, |
400 | unsigned long va, | ||
401 | int psize, int local) | ||
377 | { | 402 | { |
378 | unsigned long lpar_rc; | 403 | unsigned long lpar_rc; |
379 | unsigned long flags = (newpp & 7) | H_AVPN; | 404 | unsigned long flags = (newpp & 7) | H_AVPN; |
380 | unsigned long avpn = va >> 23; | 405 | unsigned long want_v; |
381 | 406 | ||
382 | if (large) | 407 | want_v = hpte_encode_v(va, psize); |
383 | avpn &= ~0x1UL; | ||
384 | 408 | ||
385 | lpar_rc = plpar_pte_protect(flags, slot, (avpn << 7)); | 409 | DBG_LOW(" update: avpnv=%016lx, hash=%016lx, f=%x, psize: %d ... ", |
410 | want_v & HPTE_V_AVPN, slot, flags, psize); | ||
386 | 411 | ||
387 | if (lpar_rc == H_Not_Found) | 412 | lpar_rc = plpar_pte_protect(flags, slot, want_v & HPTE_V_AVPN); |
413 | |||
414 | if (lpar_rc == H_Not_Found) { | ||
415 | DBG_LOW("not found !\n"); | ||
388 | return -1; | 416 | return -1; |
417 | } | ||
418 | |||
419 | DBG_LOW("ok\n"); | ||
389 | 420 | ||
390 | BUG_ON(lpar_rc != H_Success); | 421 | BUG_ON(lpar_rc != H_Success); |
391 | 422 | ||
@@ -411,21 +442,22 @@ static unsigned long pSeries_lpar_hpte_getword0(unsigned long slot) | |||
411 | return dword0; | 442 | return dword0; |
412 | } | 443 | } |
413 | 444 | ||
414 | static long pSeries_lpar_hpte_find(unsigned long vpn) | 445 | static long pSeries_lpar_hpte_find(unsigned long va, int psize) |
415 | { | 446 | { |
416 | unsigned long hash; | 447 | unsigned long hash; |
417 | unsigned long i, j; | 448 | unsigned long i, j; |
418 | long slot; | 449 | long slot; |
419 | unsigned long hpte_v; | 450 | unsigned long want_v, hpte_v; |
420 | 451 | ||
421 | hash = hpt_hash(vpn, 0); | 452 | hash = hpt_hash(va, mmu_psize_defs[psize].shift); |
453 | want_v = hpte_encode_v(va, psize); | ||
422 | 454 | ||
423 | for (j = 0; j < 2; j++) { | 455 | for (j = 0; j < 2; j++) { |
424 | slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; | 456 | slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; |
425 | for (i = 0; i < HPTES_PER_GROUP; i++) { | 457 | for (i = 0; i < HPTES_PER_GROUP; i++) { |
426 | hpte_v = pSeries_lpar_hpte_getword0(slot); | 458 | hpte_v = pSeries_lpar_hpte_getword0(slot); |
427 | 459 | ||
428 | if ((HPTE_V_AVPN_VAL(hpte_v) == (vpn >> 11)) | 460 | if (HPTE_V_COMPARE(hpte_v, want_v) |
429 | && (hpte_v & HPTE_V_VALID) | 461 | && (hpte_v & HPTE_V_VALID) |
430 | && (!!(hpte_v & HPTE_V_SECONDARY) == j)) { | 462 | && (!!(hpte_v & HPTE_V_SECONDARY) == j)) { |
431 | /* HPTE matches */ | 463 | /* HPTE matches */ |
@@ -442,17 +474,15 @@ static long pSeries_lpar_hpte_find(unsigned long vpn) | |||
442 | } | 474 | } |
443 | 475 | ||
444 | static void pSeries_lpar_hpte_updateboltedpp(unsigned long newpp, | 476 | static void pSeries_lpar_hpte_updateboltedpp(unsigned long newpp, |
445 | unsigned long ea) | 477 | unsigned long ea, |
478 | int psize) | ||
446 | { | 479 | { |
447 | unsigned long lpar_rc; | 480 | unsigned long lpar_rc, slot, vsid, va, flags; |
448 | unsigned long vsid, va, vpn, flags; | ||
449 | long slot; | ||
450 | 481 | ||
451 | vsid = get_kernel_vsid(ea); | 482 | vsid = get_kernel_vsid(ea); |
452 | va = (vsid << 28) | (ea & 0x0fffffff); | 483 | va = (vsid << 28) | (ea & 0x0fffffff); |
453 | vpn = va >> PAGE_SHIFT; | ||
454 | 484 | ||
455 | slot = pSeries_lpar_hpte_find(vpn); | 485 | slot = pSeries_lpar_hpte_find(va, psize); |
456 | BUG_ON(slot == -1); | 486 | BUG_ON(slot == -1); |
457 | 487 | ||
458 | flags = newpp & 7; | 488 | flags = newpp & 7; |
@@ -462,18 +492,18 @@ static void pSeries_lpar_hpte_updateboltedpp(unsigned long newpp, | |||
462 | } | 492 | } |
463 | 493 | ||
464 | static void pSeries_lpar_hpte_invalidate(unsigned long slot, unsigned long va, | 494 | static void pSeries_lpar_hpte_invalidate(unsigned long slot, unsigned long va, |
465 | int large, int local) | 495 | int psize, int local) |
466 | { | 496 | { |
467 | unsigned long avpn = va >> 23; | 497 | unsigned long want_v; |
468 | unsigned long lpar_rc; | 498 | unsigned long lpar_rc; |
469 | unsigned long dummy1, dummy2; | 499 | unsigned long dummy1, dummy2; |
470 | 500 | ||
471 | if (large) | 501 | DBG_LOW(" inval : slot=%lx, va=%016lx, psize: %d, local: %d", |
472 | avpn &= ~0x1UL; | 502 | slot, va, psize, local); |
473 | |||
474 | lpar_rc = plpar_pte_remove(H_AVPN, slot, (avpn << 7), &dummy1, | ||
475 | &dummy2); | ||
476 | 503 | ||
504 | want_v = hpte_encode_v(va, psize); | ||
505 | lpar_rc = plpar_pte_remove(H_AVPN, slot, want_v & HPTE_V_AVPN, | ||
506 | &dummy1, &dummy2); | ||
477 | if (lpar_rc == H_Not_Found) | 507 | if (lpar_rc == H_Not_Found) |
478 | return; | 508 | return; |
479 | 509 | ||
@@ -495,7 +525,8 @@ void pSeries_lpar_flush_hash_range(unsigned long number, int local) | |||
495 | spin_lock_irqsave(&pSeries_lpar_tlbie_lock, flags); | 525 | spin_lock_irqsave(&pSeries_lpar_tlbie_lock, flags); |
496 | 526 | ||
497 | for (i = 0; i < number; i++) | 527 | for (i = 0; i < number; i++) |
498 | flush_hash_page(batch->vaddr[i], batch->pte[i], local); | 528 | flush_hash_page(batch->vaddr[i], batch->pte[i], |
529 | batch->psize, local); | ||
499 | 530 | ||
500 | if (lock_tlbie) | 531 | if (lock_tlbie) |
501 | spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock, flags); | 532 | spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock, flags); |