aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/platforms/iseries
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/platforms/iseries')
-rw-r--r--arch/powerpc/platforms/iseries/htab.c65
-rw-r--r--arch/powerpc/platforms/iseries/hvlog.c4
-rw-r--r--arch/powerpc/platforms/iseries/iommu.c74
-rw-r--r--arch/powerpc/platforms/iseries/setup.c13
-rw-r--r--arch/powerpc/platforms/iseries/vio.c39
-rw-r--r--arch/powerpc/platforms/iseries/viopath.c16
6 files changed, 97 insertions, 114 deletions
diff --git a/arch/powerpc/platforms/iseries/htab.c b/arch/powerpc/platforms/iseries/htab.c
index b3c6c3374ca6..30bdcf3925d9 100644
--- a/arch/powerpc/platforms/iseries/htab.c
+++ b/arch/powerpc/platforms/iseries/htab.c
@@ -39,15 +39,16 @@ static inline void iSeries_hunlock(unsigned long slot)
39 spin_unlock(&iSeries_hlocks[(slot >> 4) & 0x3f]); 39 spin_unlock(&iSeries_hlocks[(slot >> 4) & 0x3f]);
40} 40}
41 41
42static long iSeries_hpte_insert(unsigned long hpte_group, unsigned long va, 42long iSeries_hpte_insert(unsigned long hpte_group, unsigned long va,
43 unsigned long prpn, unsigned long vflags, 43 unsigned long pa, unsigned long rflags,
44 unsigned long rflags) 44 unsigned long vflags, int psize)
45{ 45{
46 unsigned long arpn;
47 long slot; 46 long slot;
48 hpte_t lhpte; 47 hpte_t lhpte;
49 int secondary = 0; 48 int secondary = 0;
50 49
50 BUG_ON(psize != MMU_PAGE_4K);
51
51 /* 52 /*
52 * The hypervisor tries both primary and secondary. 53 * The hypervisor tries both primary and secondary.
53 * If we are being called to insert in the secondary, 54 * If we are being called to insert in the secondary,
@@ -59,8 +60,19 @@ static long iSeries_hpte_insert(unsigned long hpte_group, unsigned long va,
59 60
60 iSeries_hlock(hpte_group); 61 iSeries_hlock(hpte_group);
61 62
62 slot = HvCallHpt_findValid(&lhpte, va >> PAGE_SHIFT); 63 slot = HvCallHpt_findValid(&lhpte, va >> HW_PAGE_SHIFT);
63 BUG_ON(lhpte.v & HPTE_V_VALID); 64 if (unlikely(lhpte.v & HPTE_V_VALID)) {
65 if (vflags & HPTE_V_BOLTED) {
66 HvCallHpt_setSwBits(slot, 0x10, 0);
67 HvCallHpt_setPp(slot, PP_RWXX);
68 iSeries_hunlock(hpte_group);
69 if (slot < 0)
70 return 0x8 | (slot & 7);
71 else
72 return slot & 7;
73 }
74 BUG();
75 }
64 76
65 if (slot == -1) { /* No available entry found in either group */ 77 if (slot == -1) { /* No available entry found in either group */
66 iSeries_hunlock(hpte_group); 78 iSeries_hunlock(hpte_group);
@@ -73,10 +85,9 @@ static long iSeries_hpte_insert(unsigned long hpte_group, unsigned long va,
73 slot &= 0x7fffffffffffffff; 85 slot &= 0x7fffffffffffffff;
74 } 86 }
75 87
76 arpn = phys_to_abs(prpn << PAGE_SHIFT) >> PAGE_SHIFT;
77 88
78 lhpte.v = (va >> 23) << HPTE_V_AVPN_SHIFT | vflags | HPTE_V_VALID; 89 lhpte.v = hpte_encode_v(va, MMU_PAGE_4K) | vflags | HPTE_V_VALID;
79 lhpte.r = (arpn << HPTE_R_RPN_SHIFT) | rflags; 90 lhpte.r = hpte_encode_r(phys_to_abs(pa), MMU_PAGE_4K) | rflags;
80 91
81 /* Now fill in the actual HPTE */ 92 /* Now fill in the actual HPTE */
82 HvCallHpt_addValidate(slot, secondary, &lhpte); 93 HvCallHpt_addValidate(slot, secondary, &lhpte);
@@ -86,25 +97,6 @@ static long iSeries_hpte_insert(unsigned long hpte_group, unsigned long va,
86 return (secondary << 3) | (slot & 7); 97 return (secondary << 3) | (slot & 7);
87} 98}
88 99
89long iSeries_hpte_bolt_or_insert(unsigned long hpte_group,
90 unsigned long va, unsigned long prpn, unsigned long vflags,
91 unsigned long rflags)
92{
93 long slot;
94 hpte_t lhpte;
95
96 slot = HvCallHpt_findValid(&lhpte, va >> PAGE_SHIFT);
97
98 if (lhpte.v & HPTE_V_VALID) {
99 /* Bolt the existing HPTE */
100 HvCallHpt_setSwBits(slot, 0x10, 0);
101 HvCallHpt_setPp(slot, PP_RWXX);
102 return 0;
103 }
104
105 return iSeries_hpte_insert(hpte_group, va, prpn, vflags, rflags);
106}
107
108static unsigned long iSeries_hpte_getword0(unsigned long slot) 100static unsigned long iSeries_hpte_getword0(unsigned long slot)
109{ 101{
110 hpte_t hpte; 102 hpte_t hpte;
@@ -150,15 +142,17 @@ static long iSeries_hpte_remove(unsigned long hpte_group)
150 * bits 61..63 : PP2,PP1,PP0 142 * bits 61..63 : PP2,PP1,PP0
151 */ 143 */
152static long iSeries_hpte_updatepp(unsigned long slot, unsigned long newpp, 144static long iSeries_hpte_updatepp(unsigned long slot, unsigned long newpp,
153 unsigned long va, int large, int local) 145 unsigned long va, int psize, int local)
154{ 146{
155 hpte_t hpte; 147 hpte_t hpte;
156 unsigned long avpn = va >> 23; 148 unsigned long want_v;
157 149
158 iSeries_hlock(slot); 150 iSeries_hlock(slot);
159 151
160 HvCallHpt_get(&hpte, slot); 152 HvCallHpt_get(&hpte, slot);
161 if ((HPTE_V_AVPN_VAL(hpte.v) == avpn) && (hpte.v & HPTE_V_VALID)) { 153 want_v = hpte_encode_v(va, MMU_PAGE_4K);
154
155 if (HPTE_V_COMPARE(hpte.v, want_v) && (hpte.v & HPTE_V_VALID)) {
162 /* 156 /*
163 * Hypervisor expects bits as NPPP, which is 157 * Hypervisor expects bits as NPPP, which is
164 * different from how they are mapped in our PP. 158 * different from how they are mapped in our PP.
@@ -210,14 +204,17 @@ static long iSeries_hpte_find(unsigned long vpn)
210 * 204 *
211 * No need to lock here because we should be the only user. 205 * No need to lock here because we should be the only user.
212 */ 206 */
213static void iSeries_hpte_updateboltedpp(unsigned long newpp, unsigned long ea) 207static void iSeries_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
208 int psize)
214{ 209{
215 unsigned long vsid,va,vpn; 210 unsigned long vsid,va,vpn;
216 long slot; 211 long slot;
217 212
213 BUG_ON(psize != MMU_PAGE_4K);
214
218 vsid = get_kernel_vsid(ea); 215 vsid = get_kernel_vsid(ea);
219 va = (vsid << 28) | (ea & 0x0fffffff); 216 va = (vsid << 28) | (ea & 0x0fffffff);
220 vpn = va >> PAGE_SHIFT; 217 vpn = va >> HW_PAGE_SHIFT;
221 slot = iSeries_hpte_find(vpn); 218 slot = iSeries_hpte_find(vpn);
222 if (slot == -1) 219 if (slot == -1)
223 panic("updateboltedpp: Could not find page to bolt\n"); 220 panic("updateboltedpp: Could not find page to bolt\n");
@@ -225,7 +222,7 @@ static void iSeries_hpte_updateboltedpp(unsigned long newpp, unsigned long ea)
225} 222}
226 223
227static void iSeries_hpte_invalidate(unsigned long slot, unsigned long va, 224static void iSeries_hpte_invalidate(unsigned long slot, unsigned long va,
228 int large, int local) 225 int psize, int local)
229{ 226{
230 unsigned long hpte_v; 227 unsigned long hpte_v;
231 unsigned long avpn = va >> 23; 228 unsigned long avpn = va >> 23;
diff --git a/arch/powerpc/platforms/iseries/hvlog.c b/arch/powerpc/platforms/iseries/hvlog.c
index 62ec73479687..f476d71194fa 100644
--- a/arch/powerpc/platforms/iseries/hvlog.c
+++ b/arch/powerpc/platforms/iseries/hvlog.c
@@ -22,7 +22,7 @@ void HvCall_writeLogBuffer(const void *buffer, u64 len)
22 22
23 while (len) { 23 while (len) {
24 hv_buf.addr = cur; 24 hv_buf.addr = cur;
25 left_this_page = ((cur & PAGE_MASK) + PAGE_SIZE) - cur; 25 left_this_page = ((cur & HW_PAGE_MASK) + HW_PAGE_SIZE) - cur;
26 if (left_this_page > len) 26 if (left_this_page > len)
27 left_this_page = len; 27 left_this_page = len;
28 hv_buf.len = left_this_page; 28 hv_buf.len = left_this_page;
@@ -30,6 +30,6 @@ void HvCall_writeLogBuffer(const void *buffer, u64 len)
30 HvCall2(HvCallBaseWriteLogBuffer, 30 HvCall2(HvCallBaseWriteLogBuffer,
31 virt_to_abs(&hv_buf), 31 virt_to_abs(&hv_buf),
32 left_this_page); 32 left_this_page);
33 cur = (cur & PAGE_MASK) + PAGE_SIZE; 33 cur = (cur & HW_PAGE_MASK) + HW_PAGE_SIZE;
34 } 34 }
35} 35}
diff --git a/arch/powerpc/platforms/iseries/iommu.c b/arch/powerpc/platforms/iseries/iommu.c
index 1a6845b5c5a4..bf081b345820 100644
--- a/arch/powerpc/platforms/iseries/iommu.c
+++ b/arch/powerpc/platforms/iseries/iommu.c
@@ -43,9 +43,12 @@ static void tce_build_iSeries(struct iommu_table *tbl, long index, long npages,
43 u64 rc; 43 u64 rc;
44 union tce_entry tce; 44 union tce_entry tce;
45 45
46 index <<= TCE_PAGE_FACTOR;
47 npages <<= TCE_PAGE_FACTOR;
48
46 while (npages--) { 49 while (npages--) {
47 tce.te_word = 0; 50 tce.te_word = 0;
48 tce.te_bits.tb_rpn = virt_to_abs(uaddr) >> PAGE_SHIFT; 51 tce.te_bits.tb_rpn = virt_to_abs(uaddr) >> TCE_SHIFT;
49 52
50 if (tbl->it_type == TCE_VB) { 53 if (tbl->it_type == TCE_VB) {
51 /* Virtual Bus */ 54 /* Virtual Bus */
@@ -66,7 +69,7 @@ static void tce_build_iSeries(struct iommu_table *tbl, long index, long npages,
66 panic("PCI_DMA: HvCallXm_setTce failed, Rc: 0x%lx\n", 69 panic("PCI_DMA: HvCallXm_setTce failed, Rc: 0x%lx\n",
67 rc); 70 rc);
68 index++; 71 index++;
69 uaddr += PAGE_SIZE; 72 uaddr += TCE_PAGE_SIZE;
70 } 73 }
71} 74}
72 75
@@ -74,6 +77,9 @@ static void tce_free_iSeries(struct iommu_table *tbl, long index, long npages)
74{ 77{
75 u64 rc; 78 u64 rc;
76 79
80 npages <<= TCE_PAGE_FACTOR;
81 index <<= TCE_PAGE_FACTOR;
82
77 while (npages--) { 83 while (npages--) {
78 rc = HvCallXm_setTce((u64)tbl->it_index, (u64)index, 0); 84 rc = HvCallXm_setTce((u64)tbl->it_index, (u64)index, 0);
79 if (rc) 85 if (rc)
@@ -83,27 +89,6 @@ static void tce_free_iSeries(struct iommu_table *tbl, long index, long npages)
83 } 89 }
84} 90}
85 91
86#ifdef CONFIG_PCI
87/*
88 * This function compares the known tables to find an iommu_table
89 * that has already been built for hardware TCEs.
90 */
91static struct iommu_table *iommu_table_find(struct iommu_table * tbl)
92{
93 struct pci_dn *pdn;
94
95 list_for_each_entry(pdn, &iSeries_Global_Device_List, Device_List) {
96 struct iommu_table *it = pdn->iommu_table;
97 if ((it != NULL) &&
98 (it->it_type == TCE_PCI) &&
99 (it->it_offset == tbl->it_offset) &&
100 (it->it_index == tbl->it_index) &&
101 (it->it_size == tbl->it_size))
102 return it;
103 }
104 return NULL;
105}
106
107/* 92/*
108 * Call Hv with the architected data structure to get TCE table info. 93 * Call Hv with the architected data structure to get TCE table info.
109 * info. Put the returned data into the Linux representation of the 94 * info. Put the returned data into the Linux representation of the
@@ -113,8 +98,10 @@ static struct iommu_table *iommu_table_find(struct iommu_table * tbl)
113 * 2. TCE table per Bus. 98 * 2. TCE table per Bus.
114 * 3. TCE Table per IOA. 99 * 3. TCE Table per IOA.
115 */ 100 */
116static void iommu_table_getparms(struct pci_dn *pdn, 101void iommu_table_getparms_iSeries(unsigned long busno,
117 struct iommu_table* tbl) 102 unsigned char slotno,
103 unsigned char virtbus,
104 struct iommu_table* tbl)
118{ 105{
119 struct iommu_table_cb *parms; 106 struct iommu_table_cb *parms;
120 107
@@ -124,9 +111,9 @@ static void iommu_table_getparms(struct pci_dn *pdn,
124 111
125 memset(parms, 0, sizeof(*parms)); 112 memset(parms, 0, sizeof(*parms));
126 113
127 parms->itc_busno = pdn->busno; 114 parms->itc_busno = busno;
128 parms->itc_slotno = pdn->LogicalSlot; 115 parms->itc_slotno = slotno;
129 parms->itc_virtbus = 0; 116 parms->itc_virtbus = virtbus;
130 117
131 HvCallXm_getTceTableParms(iseries_hv_addr(parms)); 118 HvCallXm_getTceTableParms(iseries_hv_addr(parms));
132 119
@@ -134,17 +121,40 @@ static void iommu_table_getparms(struct pci_dn *pdn,
134 panic("PCI_DMA: parms->size is zero, parms is 0x%p", parms); 121 panic("PCI_DMA: parms->size is zero, parms is 0x%p", parms);
135 122
136 /* itc_size is in pages worth of table, it_size is in # of entries */ 123 /* itc_size is in pages worth of table, it_size is in # of entries */
137 tbl->it_size = (parms->itc_size * PAGE_SIZE) / sizeof(union tce_entry); 124 tbl->it_size = ((parms->itc_size * TCE_PAGE_SIZE) /
125 sizeof(union tce_entry)) >> TCE_PAGE_FACTOR;
138 tbl->it_busno = parms->itc_busno; 126 tbl->it_busno = parms->itc_busno;
139 tbl->it_offset = parms->itc_offset; 127 tbl->it_offset = parms->itc_offset >> TCE_PAGE_FACTOR;
140 tbl->it_index = parms->itc_index; 128 tbl->it_index = parms->itc_index;
141 tbl->it_blocksize = 1; 129 tbl->it_blocksize = 1;
142 tbl->it_type = TCE_PCI; 130 tbl->it_type = virtbus ? TCE_VB : TCE_PCI;
143 131
144 kfree(parms); 132 kfree(parms);
145} 133}
146 134
147 135
136#ifdef CONFIG_PCI
137/*
138 * This function compares the known tables to find an iommu_table
139 * that has already been built for hardware TCEs.
140 */
141static struct iommu_table *iommu_table_find(struct iommu_table * tbl)
142{
143 struct pci_dn *pdn;
144
145 list_for_each_entry(pdn, &iSeries_Global_Device_List, Device_List) {
146 struct iommu_table *it = pdn->iommu_table;
147 if ((it != NULL) &&
148 (it->it_type == TCE_PCI) &&
149 (it->it_offset == tbl->it_offset) &&
150 (it->it_index == tbl->it_index) &&
151 (it->it_size == tbl->it_size))
152 return it;
153 }
154 return NULL;
155}
156
157
148void iommu_devnode_init_iSeries(struct device_node *dn) 158void iommu_devnode_init_iSeries(struct device_node *dn)
149{ 159{
150 struct iommu_table *tbl; 160 struct iommu_table *tbl;
@@ -152,7 +162,7 @@ void iommu_devnode_init_iSeries(struct device_node *dn)
152 162
153 tbl = kmalloc(sizeof(struct iommu_table), GFP_KERNEL); 163 tbl = kmalloc(sizeof(struct iommu_table), GFP_KERNEL);
154 164
155 iommu_table_getparms(pdn, tbl); 165 iommu_table_getparms_iSeries(pdn->busno, pdn->LogicalSlot, 0, tbl);
156 166
157 /* Look for existing tce table */ 167 /* Look for existing tce table */
158 pdn->iommu_table = iommu_table_find(tbl); 168 pdn->iommu_table = iommu_table_find(tbl);
diff --git a/arch/powerpc/platforms/iseries/setup.c b/arch/powerpc/platforms/iseries/setup.c
index fda712b42168..c5207064977d 100644
--- a/arch/powerpc/platforms/iseries/setup.c
+++ b/arch/powerpc/platforms/iseries/setup.c
@@ -320,11 +320,11 @@ static void __init iSeries_init_early(void)
320 */ 320 */
321 if (naca.xRamDisk) { 321 if (naca.xRamDisk) {
322 initrd_start = (unsigned long)__va(naca.xRamDisk); 322 initrd_start = (unsigned long)__va(naca.xRamDisk);
323 initrd_end = initrd_start + naca.xRamDiskSize * PAGE_SIZE; 323 initrd_end = initrd_start + naca.xRamDiskSize * HW_PAGE_SIZE;
324 initrd_below_start_ok = 1; // ramdisk in kernel space 324 initrd_below_start_ok = 1; // ramdisk in kernel space
325 ROOT_DEV = Root_RAM0; 325 ROOT_DEV = Root_RAM0;
326 if (((rd_size * 1024) / PAGE_SIZE) < naca.xRamDiskSize) 326 if (((rd_size * 1024) / HW_PAGE_SIZE) < naca.xRamDiskSize)
327 rd_size = (naca.xRamDiskSize * PAGE_SIZE) / 1024; 327 rd_size = (naca.xRamDiskSize * HW_PAGE_SIZE) / 1024;
328 } else 328 } else
329#endif /* CONFIG_BLK_DEV_INITRD */ 329#endif /* CONFIG_BLK_DEV_INITRD */
330 { 330 {
@@ -470,13 +470,14 @@ static void __init build_iSeries_Memory_Map(void)
470 */ 470 */
471 hptFirstChunk = (u32)addr_to_chunk(HvCallHpt_getHptAddress()); 471 hptFirstChunk = (u32)addr_to_chunk(HvCallHpt_getHptAddress());
472 hptSizePages = (u32)HvCallHpt_getHptPages(); 472 hptSizePages = (u32)HvCallHpt_getHptPages();
473 hptSizeChunks = hptSizePages >> (MSCHUNKS_CHUNK_SHIFT - PAGE_SHIFT); 473 hptSizeChunks = hptSizePages >>
474 (MSCHUNKS_CHUNK_SHIFT - HW_PAGE_SHIFT);
474 hptLastChunk = hptFirstChunk + hptSizeChunks - 1; 475 hptLastChunk = hptFirstChunk + hptSizeChunks - 1;
475 476
476 printk("HPT absolute addr = %016lx, size = %dK\n", 477 printk("HPT absolute addr = %016lx, size = %dK\n",
477 chunk_to_addr(hptFirstChunk), hptSizeChunks * 256); 478 chunk_to_addr(hptFirstChunk), hptSizeChunks * 256);
478 479
479 ppc64_pft_size = __ilog2(hptSizePages * PAGE_SIZE); 480 ppc64_pft_size = __ilog2(hptSizePages * HW_PAGE_SIZE);
480 481
481 /* 482 /*
482 * The actual hashed page table is in the hypervisor, 483 * The actual hashed page table is in the hypervisor,
@@ -629,7 +630,7 @@ static void __init iSeries_fixup_klimit(void)
629 */ 630 */
630 if (naca.xRamDisk) 631 if (naca.xRamDisk)
631 klimit = KERNELBASE + (u64)naca.xRamDisk + 632 klimit = KERNELBASE + (u64)naca.xRamDisk +
632 (naca.xRamDiskSize * PAGE_SIZE); 633 (naca.xRamDiskSize * HW_PAGE_SIZE);
633 else { 634 else {
634 /* 635 /*
635 * No ram disk was included - check and see if there 636 * No ram disk was included - check and see if there
diff --git a/arch/powerpc/platforms/iseries/vio.c b/arch/powerpc/platforms/iseries/vio.c
index c27a66876c2c..384360ee06ec 100644
--- a/arch/powerpc/platforms/iseries/vio.c
+++ b/arch/powerpc/platforms/iseries/vio.c
@@ -30,41 +30,14 @@ static struct iommu_table vio_iommu_table;
30 30
31static void __init iommu_vio_init(void) 31static void __init iommu_vio_init(void)
32{ 32{
33 struct iommu_table *t; 33 iommu_table_getparms_iSeries(255, 0, 0xff, &veth_iommu_table);
34 struct iommu_table_cb cb; 34 veth_iommu_table.it_size /= 2;
35 unsigned long cbp; 35 vio_iommu_table = veth_iommu_table;
36 unsigned long itc_entries; 36 vio_iommu_table.it_offset += veth_iommu_table.it_size;
37 37
38 cb.itc_busno = 255; /* Bus 255 is the virtual bus */ 38 if (!iommu_init_table(&veth_iommu_table))
39 cb.itc_virtbus = 0xff; /* Ask for virtual bus */
40
41 cbp = virt_to_abs(&cb);
42 HvCallXm_getTceTableParms(cbp);
43
44 itc_entries = cb.itc_size * PAGE_SIZE / sizeof(union tce_entry);
45 veth_iommu_table.it_size = itc_entries / 2;
46 veth_iommu_table.it_busno = cb.itc_busno;
47 veth_iommu_table.it_offset = cb.itc_offset;
48 veth_iommu_table.it_index = cb.itc_index;
49 veth_iommu_table.it_type = TCE_VB;
50 veth_iommu_table.it_blocksize = 1;
51
52 t = iommu_init_table(&veth_iommu_table);
53
54 if (!t)
55 printk("Virtual Bus VETH TCE table failed.\n"); 39 printk("Virtual Bus VETH TCE table failed.\n");
56 40 if (!iommu_init_table(&vio_iommu_table))
57 vio_iommu_table.it_size = itc_entries - veth_iommu_table.it_size;
58 vio_iommu_table.it_busno = cb.itc_busno;
59 vio_iommu_table.it_offset = cb.itc_offset +
60 veth_iommu_table.it_size;
61 vio_iommu_table.it_index = cb.itc_index;
62 vio_iommu_table.it_type = TCE_VB;
63 vio_iommu_table.it_blocksize = 1;
64
65 t = iommu_init_table(&vio_iommu_table);
66
67 if (!t)
68 printk("Virtual Bus VIO TCE table failed.\n"); 41 printk("Virtual Bus VIO TCE table failed.\n");
69} 42}
70 43
diff --git a/arch/powerpc/platforms/iseries/viopath.c b/arch/powerpc/platforms/iseries/viopath.c
index fe97bfbf7463..842672695598 100644
--- a/arch/powerpc/platforms/iseries/viopath.c
+++ b/arch/powerpc/platforms/iseries/viopath.c
@@ -68,7 +68,8 @@ static DEFINE_SPINLOCK(statuslock);
68 * For each kind of event we allocate a buffer that is 68 * For each kind of event we allocate a buffer that is
69 * guaranteed not to cross a page boundary 69 * guaranteed not to cross a page boundary
70 */ 70 */
71static unsigned char event_buffer[VIO_MAX_SUBTYPES * 256] __page_aligned; 71static unsigned char event_buffer[VIO_MAX_SUBTYPES * 256]
72 __attribute__((__aligned__(4096)));
72static atomic_t event_buffer_available[VIO_MAX_SUBTYPES]; 73static atomic_t event_buffer_available[VIO_MAX_SUBTYPES];
73static int event_buffer_initialised; 74static int event_buffer_initialised;
74 75
@@ -116,12 +117,12 @@ static int proc_viopath_show(struct seq_file *m, void *v)
116 HvLpEvent_Rc hvrc; 117 HvLpEvent_Rc hvrc;
117 DECLARE_MUTEX_LOCKED(Semaphore); 118 DECLARE_MUTEX_LOCKED(Semaphore);
118 119
119 buf = kmalloc(PAGE_SIZE, GFP_KERNEL); 120 buf = kmalloc(HW_PAGE_SIZE, GFP_KERNEL);
120 if (!buf) 121 if (!buf)
121 return 0; 122 return 0;
122 memset(buf, 0, PAGE_SIZE); 123 memset(buf, 0, HW_PAGE_SIZE);
123 124
124 handle = dma_map_single(iSeries_vio_dev, buf, PAGE_SIZE, 125 handle = dma_map_single(iSeries_vio_dev, buf, HW_PAGE_SIZE,
125 DMA_FROM_DEVICE); 126 DMA_FROM_DEVICE);
126 127
127 hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp, 128 hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
@@ -131,7 +132,7 @@ static int proc_viopath_show(struct seq_file *m, void *v)
131 viopath_sourceinst(viopath_hostLp), 132 viopath_sourceinst(viopath_hostLp),
132 viopath_targetinst(viopath_hostLp), 133 viopath_targetinst(viopath_hostLp),
133 (u64)(unsigned long)&Semaphore, VIOVERSION << 16, 134 (u64)(unsigned long)&Semaphore, VIOVERSION << 16,
134 ((u64)handle) << 32, PAGE_SIZE, 0, 0); 135 ((u64)handle) << 32, HW_PAGE_SIZE, 0, 0);
135 136
136 if (hvrc != HvLpEvent_Rc_Good) 137 if (hvrc != HvLpEvent_Rc_Good)
137 printk(VIOPATH_KERN_WARN "hv error on op %d\n", (int)hvrc); 138 printk(VIOPATH_KERN_WARN "hv error on op %d\n", (int)hvrc);
@@ -140,7 +141,7 @@ static int proc_viopath_show(struct seq_file *m, void *v)
140 141
141 vlanMap = HvLpConfig_getVirtualLanIndexMap(); 142 vlanMap = HvLpConfig_getVirtualLanIndexMap();
142 143
143 buf[PAGE_SIZE-1] = '\0'; 144 buf[HW_PAGE_SIZE-1] = '\0';
144 seq_printf(m, "%s", buf); 145 seq_printf(m, "%s", buf);
145 seq_printf(m, "AVAILABLE_VETH=%x\n", vlanMap); 146 seq_printf(m, "AVAILABLE_VETH=%x\n", vlanMap);
146 seq_printf(m, "SRLNBR=%c%c%c%c%c%c%c\n", 147 seq_printf(m, "SRLNBR=%c%c%c%c%c%c%c\n",
@@ -152,7 +153,8 @@ static int proc_viopath_show(struct seq_file *m, void *v)
152 e2a(xItExtVpdPanel.systemSerial[4]), 153 e2a(xItExtVpdPanel.systemSerial[4]),
153 e2a(xItExtVpdPanel.systemSerial[5])); 154 e2a(xItExtVpdPanel.systemSerial[5]));
154 155
155 dma_unmap_single(iSeries_vio_dev, handle, PAGE_SIZE, DMA_FROM_DEVICE); 156 dma_unmap_single(iSeries_vio_dev, handle, HW_PAGE_SIZE,
157 DMA_FROM_DEVICE);
156 kfree(buf); 158 kfree(buf);
157 159
158 return 0; 160 return 0;