aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r--arch/powerpc/kernel/dma-iommu.c34
-rw-r--r--arch/powerpc/kernel/head_64.S30
-rw-r--r--arch/powerpc/kernel/ibmebus.c27
-rw-r--r--arch/powerpc/kernel/iommu.c25
-rw-r--r--arch/powerpc/kernel/machine_kexec_64.c9
-rw-r--r--arch/powerpc/kernel/misc_64.S9
-rw-r--r--arch/powerpc/kernel/of_device.c1
-rw-r--r--arch/powerpc/kernel/pci-common.c110
-rw-r--r--arch/powerpc/kernel/pci_64.c2
-rw-r--r--arch/powerpc/kernel/prom_init.c10
-rw-r--r--arch/powerpc/kernel/setup_64.c6
-rw-r--r--arch/powerpc/kernel/signal_32.c36
-rw-r--r--arch/powerpc/kernel/signal_64.c33
-rw-r--r--arch/powerpc/kernel/vio.c25
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S5
15 files changed, 177 insertions, 185 deletions
diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c
index 49248f89ce23..14183af1b3fb 100644
--- a/arch/powerpc/kernel/dma-iommu.c
+++ b/arch/powerpc/kernel/dma-iommu.c
@@ -30,28 +30,26 @@ static void dma_iommu_free_coherent(struct device *dev, size_t size,
30} 30}
31 31
32/* Creates TCEs for a user provided buffer. The user buffer must be 32/* Creates TCEs for a user provided buffer. The user buffer must be
33 * contiguous real kernel storage (not vmalloc). The address of the buffer 33 * contiguous real kernel storage (not vmalloc). The address passed here
34 * passed here is the kernel (virtual) address of the buffer. The buffer 34 * comprises a page address and offset into that page. The dma_addr_t
35 * need not be page aligned, the dma_addr_t returned will point to the same 35 * returned will point to the same byte within the page as was passed in.
36 * byte within the page as vaddr.
37 */ 36 */
38static dma_addr_t dma_iommu_map_single(struct device *dev, void *vaddr, 37static dma_addr_t dma_iommu_map_page(struct device *dev, struct page *page,
39 size_t size, 38 unsigned long offset, size_t size,
40 enum dma_data_direction direction, 39 enum dma_data_direction direction,
41 struct dma_attrs *attrs) 40 struct dma_attrs *attrs)
42{ 41{
43 return iommu_map_single(dev, dev->archdata.dma_data, vaddr, size, 42 return iommu_map_page(dev, dev->archdata.dma_data, page, offset, size,
44 device_to_mask(dev), direction, attrs); 43 device_to_mask(dev), direction, attrs);
45} 44}
46 45
47 46
48static void dma_iommu_unmap_single(struct device *dev, dma_addr_t dma_handle, 47static void dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle,
49 size_t size, 48 size_t size, enum dma_data_direction direction,
50 enum dma_data_direction direction, 49 struct dma_attrs *attrs)
51 struct dma_attrs *attrs)
52{ 50{
53 iommu_unmap_single(dev->archdata.dma_data, dma_handle, size, direction, 51 iommu_unmap_page(dev->archdata.dma_data, dma_handle, size, direction,
54 attrs); 52 attrs);
55} 53}
56 54
57 55
@@ -94,10 +92,10 @@ static int dma_iommu_dma_supported(struct device *dev, u64 mask)
94struct dma_mapping_ops dma_iommu_ops = { 92struct dma_mapping_ops dma_iommu_ops = {
95 .alloc_coherent = dma_iommu_alloc_coherent, 93 .alloc_coherent = dma_iommu_alloc_coherent,
96 .free_coherent = dma_iommu_free_coherent, 94 .free_coherent = dma_iommu_free_coherent,
97 .map_single = dma_iommu_map_single,
98 .unmap_single = dma_iommu_unmap_single,
99 .map_sg = dma_iommu_map_sg, 95 .map_sg = dma_iommu_map_sg,
100 .unmap_sg = dma_iommu_unmap_sg, 96 .unmap_sg = dma_iommu_unmap_sg,
101 .dma_supported = dma_iommu_dma_supported, 97 .dma_supported = dma_iommu_dma_supported,
98 .map_page = dma_iommu_map_page,
99 .unmap_page = dma_iommu_unmap_page,
102}; 100};
103EXPORT_SYMBOL(dma_iommu_ops); 101EXPORT_SYMBOL(dma_iommu_ops);
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index 69489bd3210c..b4bcf5a930fa 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -97,12 +97,6 @@ __secondary_hold_spinloop:
97__secondary_hold_acknowledge: 97__secondary_hold_acknowledge:
98 .llong 0x0 98 .llong 0x0
99 99
100 /* This flag is set by purgatory if we should be a kdump kernel. */
101 /* Do not move this variable as purgatory knows about it. */
102 .globl __kdump_flag
103__kdump_flag:
104 .llong 0x0
105
106#ifdef CONFIG_PPC_ISERIES 100#ifdef CONFIG_PPC_ISERIES
107 /* 101 /*
108 * At offset 0x20, there is a pointer to iSeries LPAR data. 102 * At offset 0x20, there is a pointer to iSeries LPAR data.
@@ -112,6 +106,20 @@ __kdump_flag:
112 .llong hvReleaseData-KERNELBASE 106 .llong hvReleaseData-KERNELBASE
113#endif /* CONFIG_PPC_ISERIES */ 107#endif /* CONFIG_PPC_ISERIES */
114 108
109#ifdef CONFIG_CRASH_DUMP
110 /* This flag is set to 1 by a loader if the kernel should run
111 * at the loaded address instead of the linked address. This
112 * is used by kexec-tools to keep the the kdump kernel in the
113 * crash_kernel region. The loader is responsible for
114 * observing the alignment requirement.
115 */
116 /* Do not move this variable as kexec-tools knows about it. */
117 . = 0x5c
118 .globl __run_at_load
119__run_at_load:
120 .long 0x72756e30 /* "run0" -- relocate to 0 by default */
121#endif
122
115 . = 0x60 123 . = 0x60
116/* 124/*
117 * The following code is used to hold secondary processors 125 * The following code is used to hold secondary processors
@@ -1391,8 +1399,8 @@ _STATIC(__after_prom_start)
1391 lis r25,PAGE_OFFSET@highest /* compute virtual base of kernel */ 1399 lis r25,PAGE_OFFSET@highest /* compute virtual base of kernel */
1392 sldi r25,r25,32 1400 sldi r25,r25,32
1393#ifdef CONFIG_CRASH_DUMP 1401#ifdef CONFIG_CRASH_DUMP
1394 ld r7,__kdump_flag-_stext(r26) 1402 lwz r7,__run_at_load-_stext(r26)
1395 cmpldi cr0,r7,1 /* kdump kernel ? - stay where we are */ 1403 cmplwi cr0,r7,1 /* kdump kernel ? - stay where we are */
1396 bne 1f 1404 bne 1f
1397 add r25,r25,r26 1405 add r25,r25,r26
1398#endif 1406#endif
@@ -1416,11 +1424,11 @@ _STATIC(__after_prom_start)
1416#ifdef CONFIG_CRASH_DUMP 1424#ifdef CONFIG_CRASH_DUMP
1417/* 1425/*
1418 * Check if the kernel has to be running as relocatable kernel based on the 1426 * Check if the kernel has to be running as relocatable kernel based on the
1419 * variable __kdump_flag, if it is set the kernel is treated as relocatable 1427 * variable __run_at_load, if it is set the kernel is treated as relocatable
1420 * kernel, otherwise it will be moved to PHYSICAL_START 1428 * kernel, otherwise it will be moved to PHYSICAL_START
1421 */ 1429 */
1422 ld r7,__kdump_flag-_stext(r26) 1430 lwz r7,__run_at_load-_stext(r26)
1423 cmpldi cr0,r7,1 1431 cmplwi cr0,r7,1
1424 bne 3f 1432 bne 3f
1425 1433
1426 li r5,__end_interrupts - _stext /* just copy interrupts */ 1434 li r5,__end_interrupts - _stext /* just copy interrupts */
diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c
index a06362223f8d..64299d28f364 100644
--- a/arch/powerpc/kernel/ibmebus.c
+++ b/arch/powerpc/kernel/ibmebus.c
@@ -79,20 +79,21 @@ static void ibmebus_free_coherent(struct device *dev,
79 kfree(vaddr); 79 kfree(vaddr);
80} 80}
81 81
82static dma_addr_t ibmebus_map_single(struct device *dev, 82static dma_addr_t ibmebus_map_page(struct device *dev,
83 void *ptr, 83 struct page *page,
84 size_t size, 84 unsigned long offset,
85 enum dma_data_direction direction, 85 size_t size,
86 struct dma_attrs *attrs) 86 enum dma_data_direction direction,
87 struct dma_attrs *attrs)
87{ 88{
88 return (dma_addr_t)(ptr); 89 return (dma_addr_t)(page_address(page) + offset);
89} 90}
90 91
91static void ibmebus_unmap_single(struct device *dev, 92static void ibmebus_unmap_page(struct device *dev,
92 dma_addr_t dma_addr, 93 dma_addr_t dma_addr,
93 size_t size, 94 size_t size,
94 enum dma_data_direction direction, 95 enum dma_data_direction direction,
95 struct dma_attrs *attrs) 96 struct dma_attrs *attrs)
96{ 97{
97 return; 98 return;
98} 99}
@@ -129,11 +130,11 @@ static int ibmebus_dma_supported(struct device *dev, u64 mask)
129static struct dma_mapping_ops ibmebus_dma_ops = { 130static struct dma_mapping_ops ibmebus_dma_ops = {
130 .alloc_coherent = ibmebus_alloc_coherent, 131 .alloc_coherent = ibmebus_alloc_coherent,
131 .free_coherent = ibmebus_free_coherent, 132 .free_coherent = ibmebus_free_coherent,
132 .map_single = ibmebus_map_single,
133 .unmap_single = ibmebus_unmap_single,
134 .map_sg = ibmebus_map_sg, 133 .map_sg = ibmebus_map_sg,
135 .unmap_sg = ibmebus_unmap_sg, 134 .unmap_sg = ibmebus_unmap_sg,
136 .dma_supported = ibmebus_dma_supported, 135 .dma_supported = ibmebus_dma_supported,
136 .map_page = ibmebus_map_page,
137 .unmap_page = ibmebus_unmap_page,
137}; 138};
138 139
139static int ibmebus_match_path(struct device *dev, void *data) 140static int ibmebus_match_path(struct device *dev, void *data)
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index 3857d7e2af0c..1bfa706b96e7 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -32,6 +32,7 @@
32#include <linux/dma-mapping.h> 32#include <linux/dma-mapping.h>
33#include <linux/bitops.h> 33#include <linux/bitops.h>
34#include <linux/iommu-helper.h> 34#include <linux/iommu-helper.h>
35#include <linux/crash_dump.h>
35#include <asm/io.h> 36#include <asm/io.h>
36#include <asm/prom.h> 37#include <asm/prom.h>
37#include <asm/iommu.h> 38#include <asm/iommu.h>
@@ -460,7 +461,7 @@ void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
460 461
461static void iommu_table_clear(struct iommu_table *tbl) 462static void iommu_table_clear(struct iommu_table *tbl)
462{ 463{
463 if (!__kdump_flag) { 464 if (!is_kdump_kernel()) {
464 /* Clear the table in case firmware left allocations in it */ 465 /* Clear the table in case firmware left allocations in it */
465 ppc_md.tce_free(tbl, tbl->it_offset, tbl->it_size); 466 ppc_md.tce_free(tbl, tbl->it_offset, tbl->it_size);
466 return; 467 return;
@@ -564,21 +565,23 @@ void iommu_free_table(struct iommu_table *tbl, const char *node_name)
564} 565}
565 566
566/* Creates TCEs for a user provided buffer. The user buffer must be 567/* Creates TCEs for a user provided buffer. The user buffer must be
567 * contiguous real kernel storage (not vmalloc). The address of the buffer 568 * contiguous real kernel storage (not vmalloc). The address passed here
568 * passed here is the kernel (virtual) address of the buffer. The buffer 569 * comprises a page address and offset into that page. The dma_addr_t
569 * need not be page aligned, the dma_addr_t returned will point to the same 570 * returned will point to the same byte within the page as was passed in.
570 * byte within the page as vaddr.
571 */ 571 */
572dma_addr_t iommu_map_single(struct device *dev, struct iommu_table *tbl, 572dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl,
573 void *vaddr, size_t size, unsigned long mask, 573 struct page *page, unsigned long offset, size_t size,
574 enum dma_data_direction direction, struct dma_attrs *attrs) 574 unsigned long mask, enum dma_data_direction direction,
575 struct dma_attrs *attrs)
575{ 576{
576 dma_addr_t dma_handle = DMA_ERROR_CODE; 577 dma_addr_t dma_handle = DMA_ERROR_CODE;
578 void *vaddr;
577 unsigned long uaddr; 579 unsigned long uaddr;
578 unsigned int npages, align; 580 unsigned int npages, align;
579 581
580 BUG_ON(direction == DMA_NONE); 582 BUG_ON(direction == DMA_NONE);
581 583
584 vaddr = page_address(page) + offset;
582 uaddr = (unsigned long)vaddr; 585 uaddr = (unsigned long)vaddr;
583 npages = iommu_num_pages(uaddr, size, IOMMU_PAGE_SIZE); 586 npages = iommu_num_pages(uaddr, size, IOMMU_PAGE_SIZE);
584 587
@@ -604,9 +607,9 @@ dma_addr_t iommu_map_single(struct device *dev, struct iommu_table *tbl,
604 return dma_handle; 607 return dma_handle;
605} 608}
606 609
607void iommu_unmap_single(struct iommu_table *tbl, dma_addr_t dma_handle, 610void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle,
608 size_t size, enum dma_data_direction direction, 611 size_t size, enum dma_data_direction direction,
609 struct dma_attrs *attrs) 612 struct dma_attrs *attrs)
610{ 613{
611 unsigned int npages; 614 unsigned int npages;
612 615
diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c
index e6efec788c4d..3c4ca046e854 100644
--- a/arch/powerpc/kernel/machine_kexec_64.c
+++ b/arch/powerpc/kernel/machine_kexec_64.c
@@ -255,14 +255,11 @@ static union thread_union kexec_stack
255/* Our assembly helper, in kexec_stub.S */ 255/* Our assembly helper, in kexec_stub.S */
256extern NORET_TYPE void kexec_sequence(void *newstack, unsigned long start, 256extern NORET_TYPE void kexec_sequence(void *newstack, unsigned long start,
257 void *image, void *control, 257 void *image, void *control,
258 void (*clear_all)(void), 258 void (*clear_all)(void)) ATTRIB_NORET;
259 unsigned long kdump_flag) ATTRIB_NORET;
260 259
261/* too late to fail here */ 260/* too late to fail here */
262void default_machine_kexec(struct kimage *image) 261void default_machine_kexec(struct kimage *image)
263{ 262{
264 unsigned long kdump_flag = 0;
265
266 /* prepare control code if any */ 263 /* prepare control code if any */
267 264
268 /* 265 /*
@@ -275,8 +272,6 @@ void default_machine_kexec(struct kimage *image)
275 272
276 if (crashing_cpu == -1) 273 if (crashing_cpu == -1)
277 kexec_prepare_cpus(); 274 kexec_prepare_cpus();
278 else
279 kdump_flag = KDUMP_SIGNATURE;
280 275
281 /* switch to a staticly allocated stack. Based on irq stack code. 276 /* switch to a staticly allocated stack. Based on irq stack code.
282 * XXX: the task struct will likely be invalid once we do the copy! 277 * XXX: the task struct will likely be invalid once we do the copy!
@@ -289,7 +284,7 @@ void default_machine_kexec(struct kimage *image)
289 */ 284 */
290 kexec_sequence(&kexec_stack, image->start, image, 285 kexec_sequence(&kexec_stack, image->start, image,
291 page_address(image->control_code_page), 286 page_address(image->control_code_page),
292 ppc_md.hpte_clear_all, kdump_flag); 287 ppc_md.hpte_clear_all);
293 /* NOTREACHED */ 288 /* NOTREACHED */
294} 289}
295 290
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index a243fd072a77..3053fe5c62f2 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -611,12 +611,10 @@ real_mode: /* assume normal blr return */
611 611
612 612
613/* 613/*
614 * kexec_sequence(newstack, start, image, control, clear_all(), kdump_flag) 614 * kexec_sequence(newstack, start, image, control, clear_all())
615 * 615 *
616 * does the grungy work with stack switching and real mode switches 616 * does the grungy work with stack switching and real mode switches
617 * also does simple calls to other code 617 * also does simple calls to other code
618 *
619 * kdump_flag says whether the next kernel should be a kdump kernel.
620 */ 618 */
621 619
622_GLOBAL(kexec_sequence) 620_GLOBAL(kexec_sequence)
@@ -649,7 +647,7 @@ _GLOBAL(kexec_sequence)
649 mr r29,r5 /* image (virt) */ 647 mr r29,r5 /* image (virt) */
650 mr r28,r6 /* control, unused */ 648 mr r28,r6 /* control, unused */
651 mr r27,r7 /* clear_all() fn desc */ 649 mr r27,r7 /* clear_all() fn desc */
652 mr r26,r8 /* kdump flag */ 650 mr r26,r8 /* spare */
653 lhz r25,PACAHWCPUID(r13) /* get our phys cpu from paca */ 651 lhz r25,PACAHWCPUID(r13) /* get our phys cpu from paca */
654 652
655 /* disable interrupts, we are overwriting kernel data next */ 653 /* disable interrupts, we are overwriting kernel data next */
@@ -711,6 +709,5 @@ _GLOBAL(kexec_sequence)
711 mr r4,r30 # start, aka phys mem offset 709 mr r4,r30 # start, aka phys mem offset
712 mtlr 4 710 mtlr 4
713 li r5,0 711 li r5,0
714 mr r6,r26 /* kdump_flag */ 712 blr /* image->start(physid, image->start, 0); */
715 blr /* image->start(physid, image->start, 0, kdump_flag); */
716#endif /* CONFIG_KEXEC */ 713#endif /* CONFIG_KEXEC */
diff --git a/arch/powerpc/kernel/of_device.c b/arch/powerpc/kernel/of_device.c
index 93ae5b169f41..f3c9cae01dd5 100644
--- a/arch/powerpc/kernel/of_device.c
+++ b/arch/powerpc/kernel/of_device.c
@@ -78,7 +78,6 @@ struct of_device *of_device_alloc(struct device_node *np,
78 dev->dev.parent = parent; 78 dev->dev.parent = parent;
79 dev->dev.release = of_release_dev; 79 dev->dev.release = of_release_dev;
80 dev->dev.archdata.of_node = np; 80 dev->dev.archdata.of_node = np;
81 set_dev_node(&dev->dev, of_node_to_nid(np));
82 81
83 if (bus_id) 82 if (bus_id)
84 strlcpy(dev->dev.bus_id, bus_id, BUS_ID_SIZE); 83 strlcpy(dev->dev.bus_id, bus_id, BUS_ID_SIZE);
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index 1ec73938a00f..f36936d9fda3 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -1239,69 +1239,66 @@ static int __init reparent_resources(struct resource *parent,
1239 * as well. 1239 * as well.
1240 */ 1240 */
1241 1241
1242static void __init pcibios_allocate_bus_resources(struct list_head *bus_list) 1242void pcibios_allocate_bus_resources(struct pci_bus *bus)
1243{ 1243{
1244 struct pci_bus *bus; 1244 struct pci_bus *b;
1245 int i; 1245 int i;
1246 struct resource *res, *pr; 1246 struct resource *res, *pr;
1247 1247
1248 /* Depth-First Search on bus tree */ 1248 for (i = 0; i < PCI_BUS_NUM_RESOURCES; ++i) {
1249 list_for_each_entry(bus, bus_list, node) { 1249 if ((res = bus->resource[i]) == NULL || !res->flags
1250 for (i = 0; i < PCI_BUS_NUM_RESOURCES; ++i) { 1250 || res->start > res->end)
1251 if ((res = bus->resource[i]) == NULL || !res->flags 1251 continue;
1252 || res->start > res->end) 1252 if (bus->parent == NULL)
1253 continue; 1253 pr = (res->flags & IORESOURCE_IO) ?
1254 if (bus->parent == NULL) 1254 &ioport_resource : &iomem_resource;
1255 pr = (res->flags & IORESOURCE_IO) ? 1255 else {
1256 &ioport_resource : &iomem_resource; 1256 /* Don't bother with non-root busses when
1257 else { 1257 * re-assigning all resources. We clear the
1258 /* Don't bother with non-root busses when 1258 * resource flags as if they were colliding
1259 * re-assigning all resources. We clear the 1259 * and as such ensure proper re-allocation
1260 * resource flags as if they were colliding 1260 * later.
1261 * and as such ensure proper re-allocation 1261 */
1262 * later. 1262 if (ppc_pci_flags & PPC_PCI_REASSIGN_ALL_RSRC)
1263 goto clear_resource;
1264 pr = pci_find_parent_resource(bus->self, res);
1265 if (pr == res) {
1266 /* this happens when the generic PCI
1267 * code (wrongly) decides that this
1268 * bridge is transparent -- paulus
1263 */ 1269 */
1264 if (ppc_pci_flags & PPC_PCI_REASSIGN_ALL_RSRC) 1270 continue;
1265 goto clear_resource;
1266 pr = pci_find_parent_resource(bus->self, res);
1267 if (pr == res) {
1268 /* this happens when the generic PCI
1269 * code (wrongly) decides that this
1270 * bridge is transparent -- paulus
1271 */
1272 continue;
1273 }
1274 } 1271 }
1272 }
1275 1273
1276 DBG("PCI: %s (bus %d) bridge rsrc %d: %016llx-%016llx " 1274 DBG("PCI: %s (bus %d) bridge rsrc %d: %016llx-%016llx "
1277 "[0x%x], parent %p (%s)\n", 1275 "[0x%x], parent %p (%s)\n",
1278 bus->self ? pci_name(bus->self) : "PHB", 1276 bus->self ? pci_name(bus->self) : "PHB",
1279 bus->number, i, 1277 bus->number, i,
1280 (unsigned long long)res->start, 1278 (unsigned long long)res->start,
1281 (unsigned long long)res->end, 1279 (unsigned long long)res->end,
1282 (unsigned int)res->flags, 1280 (unsigned int)res->flags,
1283 pr, (pr && pr->name) ? pr->name : "nil"); 1281 pr, (pr && pr->name) ? pr->name : "nil");
1284 1282
1285 if (pr && !(pr->flags & IORESOURCE_UNSET)) { 1283 if (pr && !(pr->flags & IORESOURCE_UNSET)) {
1286 if (request_resource(pr, res) == 0) 1284 if (request_resource(pr, res) == 0)
1287 continue; 1285 continue;
1288 /* 1286 /*
1289 * Must be a conflict with an existing entry. 1287 * Must be a conflict with an existing entry.
1290 * Move that entry (or entries) under the 1288 * Move that entry (or entries) under the
1291 * bridge resource and try again. 1289 * bridge resource and try again.
1292 */ 1290 */
1293 if (reparent_resources(pr, res) == 0) 1291 if (reparent_resources(pr, res) == 0)
1294 continue; 1292 continue;
1295 }
1296 printk(KERN_WARNING
1297 "PCI: Cannot allocate resource region "
1298 "%d of PCI bridge %d, will remap\n",
1299 i, bus->number);
1300clear_resource:
1301 res->flags = 0;
1302 } 1293 }
1303 pcibios_allocate_bus_resources(&bus->children); 1294 printk(KERN_WARNING "PCI: Cannot allocate resource region "
1295 "%d of PCI bridge %d, will remap\n", i, bus->number);
1296clear_resource:
1297 res->flags = 0;
1304 } 1298 }
1299
1300 list_for_each_entry(b, &bus->children, node)
1301 pcibios_allocate_bus_resources(b);
1305} 1302}
1306 1303
1307static inline void __devinit alloc_resource(struct pci_dev *dev, int idx) 1304static inline void __devinit alloc_resource(struct pci_dev *dev, int idx)
@@ -1372,10 +1369,13 @@ static void __init pcibios_allocate_resources(int pass)
1372 1369
1373void __init pcibios_resource_survey(void) 1370void __init pcibios_resource_survey(void)
1374{ 1371{
1372 struct pci_bus *b;
1373
1375 /* Allocate and assign resources. If we re-assign everything, then 1374 /* Allocate and assign resources. If we re-assign everything, then
1376 * we skip the allocate phase 1375 * we skip the allocate phase
1377 */ 1376 */
1378 pcibios_allocate_bus_resources(&pci_root_buses); 1377 list_for_each_entry(b, &pci_root_buses, node)
1378 pcibios_allocate_bus_resources(b);
1379 1379
1380 if (!(ppc_pci_flags & PPC_PCI_REASSIGN_ALL_RSRC)) { 1380 if (!(ppc_pci_flags & PPC_PCI_REASSIGN_ALL_RSRC)) {
1381 pcibios_allocate_resources(0); 1381 pcibios_allocate_resources(0);
diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c
index 8247cff1cb3e..3502b9101e6b 100644
--- a/arch/powerpc/kernel/pci_64.c
+++ b/arch/powerpc/kernel/pci_64.c
@@ -426,7 +426,7 @@ int pcibios_unmap_io_space(struct pci_bus *bus)
426 pci_name(bus->self)); 426 pci_name(bus->self));
427 427
428 __flush_hash_table_range(&init_mm, res->start + _IO_BASE, 428 __flush_hash_table_range(&init_mm, res->start + _IO_BASE,
429 res->end - res->start + 1); 429 res->end + _IO_BASE + 1);
430 return 0; 430 return 0;
431 } 431 }
432 432
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 23e0db203329..2445945d3761 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -671,7 +671,7 @@ static struct fake_elf {
671 u32 ignore_me; 671 u32 ignore_me;
672 } rpadesc; 672 } rpadesc;
673 } rpanote; 673 } rpanote;
674} fake_elf __section(.fakeelf) = { 674} fake_elf = {
675 .elfhdr = { 675 .elfhdr = {
676 .e_ident = { 0x7f, 'E', 'L', 'F', 676 .e_ident = { 0x7f, 'E', 'L', 'F',
677 ELFCLASS32, ELFDATA2MSB, EV_CURRENT }, 677 ELFCLASS32, ELFDATA2MSB, EV_CURRENT },
@@ -713,13 +713,13 @@ static struct fake_elf {
713 .type = 0x12759999, 713 .type = 0x12759999,
714 .name = "IBM,RPA-Client-Config", 714 .name = "IBM,RPA-Client-Config",
715 .rpadesc = { 715 .rpadesc = {
716 .lpar_affinity = 1, 716 .lpar_affinity = 0,
717 .min_rmo_size = 128, /* in megabytes */ 717 .min_rmo_size = 64, /* in megabytes */
718 .min_rmo_percent = 0, 718 .min_rmo_percent = 0,
719 .max_pft_size = 46, /* 2^46 bytes max PFT size */ 719 .max_pft_size = 48, /* 2^48 bytes max PFT size */
720 .splpar = 1, 720 .splpar = 1,
721 .min_load = ~0U, 721 .min_load = ~0U,
722 .new_mem_def = 1 722 .new_mem_def = 0
723 } 723 }
724 } 724 }
725}; 725};
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 843c0af210d0..169d74cef157 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -444,9 +444,9 @@ void __init setup_system(void)
444 if (htab_address) 444 if (htab_address)
445 printk("htab_address = 0x%p\n", htab_address); 445 printk("htab_address = 0x%p\n", htab_address);
446 printk("htab_hash_mask = 0x%lx\n", htab_hash_mask); 446 printk("htab_hash_mask = 0x%lx\n", htab_hash_mask);
447#if PHYSICAL_START > 0 447 if (PHYSICAL_START > 0)
448 printk("physical_start = 0x%lx\n", PHYSICAL_START); 448 printk("physical_start = 0x%lx\n",
449#endif 449 PHYSICAL_START);
450 printk("-----------------------------------------------------\n"); 450 printk("-----------------------------------------------------\n");
451 451
452 DBG(" <- setup_system()\n"); 452 DBG(" <- setup_system()\n");
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index 3e80aa32b8b0..a6a43103655e 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -410,7 +410,7 @@ inline unsigned long copy_fpr_from_user(struct task_struct *task,
410 * altivec/spe instructions at some point. 410 * altivec/spe instructions at some point.
411 */ 411 */
412static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame, 412static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
413 int sigret) 413 int sigret, int ctx_has_vsx_region)
414{ 414{
415 unsigned long msr = regs->msr; 415 unsigned long msr = regs->msr;
416 416
@@ -451,7 +451,7 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
451 * the saved MSR value to indicate that frame->mc_vregs 451 * the saved MSR value to indicate that frame->mc_vregs
452 * contains valid data 452 * contains valid data
453 */ 453 */
454 if (current->thread.used_vsr) { 454 if (current->thread.used_vsr && ctx_has_vsx_region) {
455 __giveup_vsx(current); 455 __giveup_vsx(current);
456 if (copy_vsx_to_user(&frame->mc_vsregs, current)) 456 if (copy_vsx_to_user(&frame->mc_vsregs, current))
457 return 1; 457 return 1;
@@ -858,11 +858,11 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
858 frame = &rt_sf->uc.uc_mcontext; 858 frame = &rt_sf->uc.uc_mcontext;
859 addr = frame; 859 addr = frame;
860 if (vdso32_rt_sigtramp && current->mm->context.vdso_base) { 860 if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
861 if (save_user_regs(regs, frame, 0)) 861 if (save_user_regs(regs, frame, 0, 1))
862 goto badframe; 862 goto badframe;
863 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp; 863 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
864 } else { 864 } else {
865 if (save_user_regs(regs, frame, __NR_rt_sigreturn)) 865 if (save_user_regs(regs, frame, __NR_rt_sigreturn, 1))
866 goto badframe; 866 goto badframe;
867 regs->link = (unsigned long) frame->tramp; 867 regs->link = (unsigned long) frame->tramp;
868 } 868 }
@@ -936,12 +936,13 @@ long sys_swapcontext(struct ucontext __user *old_ctx,
936 int ctx_size, int r6, int r7, int r8, struct pt_regs *regs) 936 int ctx_size, int r6, int r7, int r8, struct pt_regs *regs)
937{ 937{
938 unsigned char tmp; 938 unsigned char tmp;
939 int ctx_has_vsx_region = 0;
939 940
940#ifdef CONFIG_PPC64 941#ifdef CONFIG_PPC64
941 unsigned long new_msr = 0; 942 unsigned long new_msr = 0;
942 943
943 if (new_ctx && 944 if (new_ctx &&
944 __get_user(new_msr, &new_ctx->uc_mcontext.mc_gregs[PT_MSR])) 945 get_user(new_msr, &new_ctx->uc_mcontext.mc_gregs[PT_MSR]))
945 return -EFAULT; 946 return -EFAULT;
946 /* 947 /*
947 * Check that the context is not smaller than the original 948 * Check that the context is not smaller than the original
@@ -956,16 +957,9 @@ long sys_swapcontext(struct ucontext __user *old_ctx,
956 if ((ctx_size < sizeof(struct ucontext)) && 957 if ((ctx_size < sizeof(struct ucontext)) &&
957 (new_msr & MSR_VSX)) 958 (new_msr & MSR_VSX))
958 return -EINVAL; 959 return -EINVAL;
959#ifdef CONFIG_VSX 960 /* Does the context have enough room to store VSX data? */
960 /* 961 if (ctx_size >= sizeof(struct ucontext))
961 * If userspace doesn't provide enough room for VSX data, 962 ctx_has_vsx_region = 1;
962 * but current thread has used VSX, we don't have anywhere
963 * to store the full context back into.
964 */
965 if ((ctx_size < sizeof(struct ucontext)) &&
966 (current->thread.used_vsr && old_ctx))
967 return -EINVAL;
968#endif
969#else 963#else
970 /* Context size is for future use. Right now, we only make sure 964 /* Context size is for future use. Right now, we only make sure
971 * we are passed something we understand 965 * we are passed something we understand
@@ -985,17 +979,17 @@ long sys_swapcontext(struct ucontext __user *old_ctx,
985 */ 979 */
986 mctx = (struct mcontext __user *) 980 mctx = (struct mcontext __user *)
987 ((unsigned long) &old_ctx->uc_mcontext & ~0xfUL); 981 ((unsigned long) &old_ctx->uc_mcontext & ~0xfUL);
988 if (!access_ok(VERIFY_WRITE, old_ctx, sizeof(*old_ctx)) 982 if (!access_ok(VERIFY_WRITE, old_ctx, ctx_size)
989 || save_user_regs(regs, mctx, 0) 983 || save_user_regs(regs, mctx, 0, ctx_has_vsx_region)
990 || put_sigset_t(&old_ctx->uc_sigmask, &current->blocked) 984 || put_sigset_t(&old_ctx->uc_sigmask, &current->blocked)
991 || __put_user(to_user_ptr(mctx), &old_ctx->uc_regs)) 985 || __put_user(to_user_ptr(mctx), &old_ctx->uc_regs))
992 return -EFAULT; 986 return -EFAULT;
993 } 987 }
994 if (new_ctx == NULL) 988 if (new_ctx == NULL)
995 return 0; 989 return 0;
996 if (!access_ok(VERIFY_READ, new_ctx, sizeof(*new_ctx)) 990 if (!access_ok(VERIFY_READ, new_ctx, ctx_size)
997 || __get_user(tmp, (u8 __user *) new_ctx) 991 || __get_user(tmp, (u8 __user *) new_ctx)
998 || __get_user(tmp, (u8 __user *) (new_ctx + 1) - 1)) 992 || __get_user(tmp, (u8 __user *) new_ctx + ctx_size - 1))
999 return -EFAULT; 993 return -EFAULT;
1000 994
1001 /* 995 /*
@@ -1196,11 +1190,11 @@ int handle_signal32(unsigned long sig, struct k_sigaction *ka,
1196 goto badframe; 1190 goto badframe;
1197 1191
1198 if (vdso32_sigtramp && current->mm->context.vdso_base) { 1192 if (vdso32_sigtramp && current->mm->context.vdso_base) {
1199 if (save_user_regs(regs, &frame->mctx, 0)) 1193 if (save_user_regs(regs, &frame->mctx, 0, 1))
1200 goto badframe; 1194 goto badframe;
1201 regs->link = current->mm->context.vdso_base + vdso32_sigtramp; 1195 regs->link = current->mm->context.vdso_base + vdso32_sigtramp;
1202 } else { 1196 } else {
1203 if (save_user_regs(regs, &frame->mctx, __NR_sigreturn)) 1197 if (save_user_regs(regs, &frame->mctx, __NR_sigreturn, 1))
1204 goto badframe; 1198 goto badframe;
1205 regs->link = (unsigned long) frame->mctx.tramp; 1199 regs->link = (unsigned long) frame->mctx.tramp;
1206 } 1200 }
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index c6a8f2326b6f..e132891d3cea 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -74,7 +74,8 @@ static const char fmt64[] = KERN_INFO \
74 */ 74 */
75 75
76static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, 76static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
77 int signr, sigset_t *set, unsigned long handler) 77 int signr, sigset_t *set, unsigned long handler,
78 int ctx_has_vsx_region)
78{ 79{
79 /* When CONFIG_ALTIVEC is set, we _always_ setup v_regs even if the 80 /* When CONFIG_ALTIVEC is set, we _always_ setup v_regs even if the
80 * process never used altivec yet (MSR_VEC is zero in pt_regs of 81 * process never used altivec yet (MSR_VEC is zero in pt_regs of
@@ -121,7 +122,7 @@ static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
121 * then out to userspace. Update v_regs to point after the 122 * then out to userspace. Update v_regs to point after the
122 * VMX data. 123 * VMX data.
123 */ 124 */
124 if (current->thread.used_vsr) { 125 if (current->thread.used_vsr && ctx_has_vsx_region) {
125 __giveup_vsx(current); 126 __giveup_vsx(current);
126 v_regs += ELF_NVRREG; 127 v_regs += ELF_NVRREG;
127 err |= copy_vsx_to_user(v_regs, current); 128 err |= copy_vsx_to_user(v_regs, current);
@@ -282,9 +283,10 @@ int sys_swapcontext(struct ucontext __user *old_ctx,
282 unsigned char tmp; 283 unsigned char tmp;
283 sigset_t set; 284 sigset_t set;
284 unsigned long new_msr = 0; 285 unsigned long new_msr = 0;
286 int ctx_has_vsx_region = 0;
285 287
286 if (new_ctx && 288 if (new_ctx &&
287 __get_user(new_msr, &new_ctx->uc_mcontext.gp_regs[PT_MSR])) 289 get_user(new_msr, &new_ctx->uc_mcontext.gp_regs[PT_MSR]))
288 return -EFAULT; 290 return -EFAULT;
289 /* 291 /*
290 * Check that the context is not smaller than the original 292 * Check that the context is not smaller than the original
@@ -299,28 +301,23 @@ int sys_swapcontext(struct ucontext __user *old_ctx,
299 if ((ctx_size < sizeof(struct ucontext)) && 301 if ((ctx_size < sizeof(struct ucontext)) &&
300 (new_msr & MSR_VSX)) 302 (new_msr & MSR_VSX))
301 return -EINVAL; 303 return -EINVAL;
302#ifdef CONFIG_VSX 304 /* Does the context have enough room to store VSX data? */
303 /* 305 if (ctx_size >= sizeof(struct ucontext))
304 * If userspace doesn't provide enough room for VSX data, 306 ctx_has_vsx_region = 1;
305 * but current thread has used VSX, we don't have anywhere 307
306 * to store the full context back into.
307 */
308 if ((ctx_size < sizeof(struct ucontext)) &&
309 (current->thread.used_vsr && old_ctx))
310 return -EINVAL;
311#endif
312 if (old_ctx != NULL) { 308 if (old_ctx != NULL) {
313 if (!access_ok(VERIFY_WRITE, old_ctx, sizeof(*old_ctx)) 309 if (!access_ok(VERIFY_WRITE, old_ctx, ctx_size)
314 || setup_sigcontext(&old_ctx->uc_mcontext, regs, 0, NULL, 0) 310 || setup_sigcontext(&old_ctx->uc_mcontext, regs, 0, NULL, 0,
311 ctx_has_vsx_region)
315 || __copy_to_user(&old_ctx->uc_sigmask, 312 || __copy_to_user(&old_ctx->uc_sigmask,
316 &current->blocked, sizeof(sigset_t))) 313 &current->blocked, sizeof(sigset_t)))
317 return -EFAULT; 314 return -EFAULT;
318 } 315 }
319 if (new_ctx == NULL) 316 if (new_ctx == NULL)
320 return 0; 317 return 0;
321 if (!access_ok(VERIFY_READ, new_ctx, sizeof(*new_ctx)) 318 if (!access_ok(VERIFY_READ, new_ctx, ctx_size)
322 || __get_user(tmp, (u8 __user *) new_ctx) 319 || __get_user(tmp, (u8 __user *) new_ctx)
323 || __get_user(tmp, (u8 __user *) (new_ctx + 1) - 1)) 320 || __get_user(tmp, (u8 __user *) new_ctx + ctx_size - 1))
324 return -EFAULT; 321 return -EFAULT;
325 322
326 /* 323 /*
@@ -423,7 +420,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
423 &frame->uc.uc_stack.ss_flags); 420 &frame->uc.uc_stack.ss_flags);
424 err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size); 421 err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
425 err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, signr, NULL, 422 err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, signr, NULL,
426 (unsigned long)ka->sa.sa_handler); 423 (unsigned long)ka->sa.sa_handler, 1);
427 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); 424 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
428 if (err) 425 if (err)
429 goto badframe; 426 goto badframe;
diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
index 434c92a85c03..a11e6bc59b30 100644
--- a/arch/powerpc/kernel/vio.c
+++ b/arch/powerpc/kernel/vio.c
@@ -516,10 +516,10 @@ static void vio_dma_iommu_free_coherent(struct device *dev, size_t size,
516 vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE)); 516 vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE));
517} 517}
518 518
519static dma_addr_t vio_dma_iommu_map_single(struct device *dev, void *vaddr, 519static dma_addr_t vio_dma_iommu_map_page(struct device *dev, struct page *page,
520 size_t size, 520 unsigned long offset, size_t size,
521 enum dma_data_direction direction, 521 enum dma_data_direction direction,
522 struct dma_attrs *attrs) 522 struct dma_attrs *attrs)
523{ 523{
524 struct vio_dev *viodev = to_vio_dev(dev); 524 struct vio_dev *viodev = to_vio_dev(dev);
525 dma_addr_t ret = DMA_ERROR_CODE; 525 dma_addr_t ret = DMA_ERROR_CODE;
@@ -529,7 +529,7 @@ static dma_addr_t vio_dma_iommu_map_single(struct device *dev, void *vaddr,
529 return ret; 529 return ret;
530 } 530 }
531 531
532 ret = dma_iommu_ops.map_single(dev, vaddr, size, direction, attrs); 532 ret = dma_iommu_ops.map_page(dev, page, offset, size, direction, attrs);
533 if (unlikely(dma_mapping_error(dev, ret))) { 533 if (unlikely(dma_mapping_error(dev, ret))) {
534 vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE)); 534 vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE));
535 atomic_inc(&viodev->cmo.allocs_failed); 535 atomic_inc(&viodev->cmo.allocs_failed);
@@ -538,14 +538,14 @@ static dma_addr_t vio_dma_iommu_map_single(struct device *dev, void *vaddr,
538 return ret; 538 return ret;
539} 539}
540 540
541static void vio_dma_iommu_unmap_single(struct device *dev, 541static void vio_dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle,
542 dma_addr_t dma_handle, size_t size, 542 size_t size,
543 enum dma_data_direction direction, 543 enum dma_data_direction direction,
544 struct dma_attrs *attrs) 544 struct dma_attrs *attrs)
545{ 545{
546 struct vio_dev *viodev = to_vio_dev(dev); 546 struct vio_dev *viodev = to_vio_dev(dev);
547 547
548 dma_iommu_ops.unmap_single(dev, dma_handle, size, direction, attrs); 548 dma_iommu_ops.unmap_page(dev, dma_handle, size, direction, attrs);
549 549
550 vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE)); 550 vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE));
551} 551}
@@ -603,10 +603,11 @@ static void vio_dma_iommu_unmap_sg(struct device *dev,
603struct dma_mapping_ops vio_dma_mapping_ops = { 603struct dma_mapping_ops vio_dma_mapping_ops = {
604 .alloc_coherent = vio_dma_iommu_alloc_coherent, 604 .alloc_coherent = vio_dma_iommu_alloc_coherent,
605 .free_coherent = vio_dma_iommu_free_coherent, 605 .free_coherent = vio_dma_iommu_free_coherent,
606 .map_single = vio_dma_iommu_map_single,
607 .unmap_single = vio_dma_iommu_unmap_single,
608 .map_sg = vio_dma_iommu_map_sg, 606 .map_sg = vio_dma_iommu_map_sg,
609 .unmap_sg = vio_dma_iommu_unmap_sg, 607 .unmap_sg = vio_dma_iommu_unmap_sg,
608 .map_page = vio_dma_iommu_map_page,
609 .unmap_page = vio_dma_iommu_unmap_page,
610
610}; 611};
611 612
612/** 613/**
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index b39c27ed7919..2412c056baa4 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -187,6 +187,7 @@ SECTIONS
187 *(.machine.desc) 187 *(.machine.desc)
188 __machine_desc_end = . ; 188 __machine_desc_end = . ;
189 } 189 }
190#ifdef CONFIG_RELOCATABLE
190 . = ALIGN(8); 191 . = ALIGN(8);
191 .dynsym : AT(ADDR(.dynsym) - LOAD_OFFSET) { *(.dynsym) } 192 .dynsym : AT(ADDR(.dynsym) - LOAD_OFFSET) { *(.dynsym) }
192 .dynstr : AT(ADDR(.dynstr) - LOAD_OFFSET) { *(.dynstr) } 193 .dynstr : AT(ADDR(.dynstr) - LOAD_OFFSET) { *(.dynstr) }
@@ -202,9 +203,7 @@ SECTIONS
202 __rela_dyn_start = .; 203 __rela_dyn_start = .;
203 *(.rela*) 204 *(.rela*)
204 } 205 }
205 206#endif
206 /* Fake ELF header containing RPA note; for addnote */
207 .fakeelf : AT(ADDR(.fakeelf) - LOAD_OFFSET) { *(.fakeelf) }
208 207
209 /* freed after init ends here */ 208 /* freed after init ends here */
210 . = ALIGN(PAGE_SIZE); 209 . = ALIGN(PAGE_SIZE);