aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorTim Abbott <tabbott@ksplice.com>2009-09-20 18:14:15 -0400
committerSam Ravnborg <sam@ravnborg.org>2009-09-21 00:27:08 -0400
commitabe1ee3a221d53778c3e58747bbec6e518e5471b (patch)
tree5a3f7ee7bbc93ac893f6c77dedfd93c4a7277f04 /arch
parent02b7da37f7acd49277dea1481dc0c5c246c09732 (diff)
Use macros for .data.page_aligned section.
This patch changes the remaining direct references to .data.page_aligned in C and assembly code to use the macros in include/linux/linkage.h. Signed-off-by: Tim Abbott <tabbott@ksplice.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Ingo Molnar <mingo@redhat.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Haavard Skinnemoen <hskinnemoen@atmel.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Paul Mackerras <paulus@samba.org> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Signed-off-by: Sam Ravnborg <sam@ravnborg.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/avr32/mm/init.c4
-rw-r--r--arch/powerpc/kernel/vdso.c3
-rw-r--r--arch/powerpc/kernel/vdso32/vdso32_wrapper.S3
-rw-r--r--arch/powerpc/kernel/vdso64/vdso64_wrapper.S3
-rw-r--r--arch/s390/kernel/vdso.c2
-rw-r--r--arch/s390/kernel/vdso32/vdso32_wrapper.S3
-rw-r--r--arch/s390/kernel/vdso64/vdso64_wrapper.S3
-rw-r--r--arch/x86/include/asm/cache.h4
-rw-r--r--arch/x86/kernel/head_32.S2
9 files changed, 16 insertions, 11 deletions
diff --git a/arch/avr32/mm/init.c b/arch/avr32/mm/init.c
index e819fa69a90e..cc60d10cf8f7 100644
--- a/arch/avr32/mm/init.c
+++ b/arch/avr32/mm/init.c
@@ -24,11 +24,9 @@
24#include <asm/setup.h> 24#include <asm/setup.h>
25#include <asm/sections.h> 25#include <asm/sections.h>
26 26
27#define __page_aligned __attribute__((section(".data.page_aligned")))
28
29DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); 27DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
30 28
31pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned; 29pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_data;
32 30
33struct page *empty_zero_page; 31struct page *empty_zero_page;
34EXPORT_SYMBOL(empty_zero_page); 32EXPORT_SYMBOL(empty_zero_page);
diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
index a0abce251d0a..3faaf29bdb29 100644
--- a/arch/powerpc/kernel/vdso.c
+++ b/arch/powerpc/kernel/vdso.c
@@ -1,3 +1,4 @@
1
1/* 2/*
2 * Copyright (C) 2004 Benjamin Herrenschmidt, IBM Corp. 3 * Copyright (C) 2004 Benjamin Herrenschmidt, IBM Corp.
3 * <benh@kernel.crashing.org> 4 * <benh@kernel.crashing.org>
@@ -74,7 +75,7 @@ static int vdso_ready;
74static union { 75static union {
75 struct vdso_data data; 76 struct vdso_data data;
76 u8 page[PAGE_SIZE]; 77 u8 page[PAGE_SIZE];
77} vdso_data_store __attribute__((__section__(".data.page_aligned"))); 78} vdso_data_store __page_aligned_data;
78struct vdso_data *vdso_data = &vdso_data_store.data; 79struct vdso_data *vdso_data = &vdso_data_store.data;
79 80
80/* Format of the patch table */ 81/* Format of the patch table */
diff --git a/arch/powerpc/kernel/vdso32/vdso32_wrapper.S b/arch/powerpc/kernel/vdso32/vdso32_wrapper.S
index 556f0caa5d84..6e8f507ed32b 100644
--- a/arch/powerpc/kernel/vdso32/vdso32_wrapper.S
+++ b/arch/powerpc/kernel/vdso32/vdso32_wrapper.S
@@ -1,7 +1,8 @@
1#include <linux/init.h> 1#include <linux/init.h>
2#include <linux/linkage.h>
2#include <asm/page.h> 3#include <asm/page.h>
3 4
4 .section ".data.page_aligned" 5 __PAGE_ALIGNED_DATA
5 6
6 .globl vdso32_start, vdso32_end 7 .globl vdso32_start, vdso32_end
7 .balign PAGE_SIZE 8 .balign PAGE_SIZE
diff --git a/arch/powerpc/kernel/vdso64/vdso64_wrapper.S b/arch/powerpc/kernel/vdso64/vdso64_wrapper.S
index 0529cb9e3b97..b8553d62b792 100644
--- a/arch/powerpc/kernel/vdso64/vdso64_wrapper.S
+++ b/arch/powerpc/kernel/vdso64/vdso64_wrapper.S
@@ -1,7 +1,8 @@
1#include <linux/init.h> 1#include <linux/init.h>
2#include <linux/linkage.h>
2#include <asm/page.h> 3#include <asm/page.h>
3 4
4 .section ".data.page_aligned" 5 __PAGE_ALIGNED_DATA
5 6
6 .globl vdso64_start, vdso64_end 7 .globl vdso64_start, vdso64_end
7 .balign PAGE_SIZE 8 .balign PAGE_SIZE
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c
index 45e1708b70fd..45a3e9a7ae21 100644
--- a/arch/s390/kernel/vdso.c
+++ b/arch/s390/kernel/vdso.c
@@ -75,7 +75,7 @@ __setup("vdso=", vdso_setup);
75static union { 75static union {
76 struct vdso_data data; 76 struct vdso_data data;
77 u8 page[PAGE_SIZE]; 77 u8 page[PAGE_SIZE];
78} vdso_data_store __attribute__((__section__(".data.page_aligned"))); 78} vdso_data_store __page_aligned_data;
79struct vdso_data *vdso_data = &vdso_data_store.data; 79struct vdso_data *vdso_data = &vdso_data_store.data;
80 80
81/* 81/*
diff --git a/arch/s390/kernel/vdso32/vdso32_wrapper.S b/arch/s390/kernel/vdso32/vdso32_wrapper.S
index 61639a89e70b..ae42f8ce350b 100644
--- a/arch/s390/kernel/vdso32/vdso32_wrapper.S
+++ b/arch/s390/kernel/vdso32/vdso32_wrapper.S
@@ -1,7 +1,8 @@
1#include <linux/init.h> 1#include <linux/init.h>
2#include <linux/linkage.h>
2#include <asm/page.h> 3#include <asm/page.h>
3 4
4 .section ".data.page_aligned" 5 __PAGE_ALIGNED_DATA
5 6
6 .globl vdso32_start, vdso32_end 7 .globl vdso32_start, vdso32_end
7 .balign PAGE_SIZE 8 .balign PAGE_SIZE
diff --git a/arch/s390/kernel/vdso64/vdso64_wrapper.S b/arch/s390/kernel/vdso64/vdso64_wrapper.S
index d8e2ac14d564..c245842b516f 100644
--- a/arch/s390/kernel/vdso64/vdso64_wrapper.S
+++ b/arch/s390/kernel/vdso64/vdso64_wrapper.S
@@ -1,7 +1,8 @@
1#include <linux/init.h> 1#include <linux/init.h>
2#include <linux/linkage.h>
2#include <asm/page.h> 3#include <asm/page.h>
3 4
4 .section ".data.page_aligned" 5 __PAGE_ALIGNED_DATA
5 6
6 .globl vdso64_start, vdso64_end 7 .globl vdso64_start, vdso64_end
7 .balign PAGE_SIZE 8 .balign PAGE_SIZE
diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
index 5d367caa0e36..549860d3be8f 100644
--- a/arch/x86/include/asm/cache.h
+++ b/arch/x86/include/asm/cache.h
@@ -1,6 +1,8 @@
1#ifndef _ASM_X86_CACHE_H 1#ifndef _ASM_X86_CACHE_H
2#define _ASM_X86_CACHE_H 2#define _ASM_X86_CACHE_H
3 3
4#include <linux/linkage.h>
5
4/* L1 cache line size */ 6/* L1 cache line size */
5#define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT) 7#define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
6#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) 8#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
@@ -13,7 +15,7 @@
13#ifdef CONFIG_SMP 15#ifdef CONFIG_SMP
14#define __cacheline_aligned_in_smp \ 16#define __cacheline_aligned_in_smp \
15 __attribute__((__aligned__(1 << (INTERNODE_CACHE_SHIFT)))) \ 17 __attribute__((__aligned__(1 << (INTERNODE_CACHE_SHIFT)))) \
16 __attribute__((__section__(".data.page_aligned"))) 18 __page_aligned_data
17#endif 19#endif
18#endif 20#endif
19 21
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index 1dac23958427..218aad7ee76e 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -626,7 +626,7 @@ ENTRY(empty_zero_page)
626 * This starts the data section. 626 * This starts the data section.
627 */ 627 */
628#ifdef CONFIG_X86_PAE 628#ifdef CONFIG_X86_PAE
629.section ".data.page_aligned","wa" 629__PAGE_ALIGNED_DATA
630 /* Page-aligned for the benefit of paravirt? */ 630 /* Page-aligned for the benefit of paravirt? */
631 .align PAGE_SIZE_asm 631 .align PAGE_SIZE_asm
632ENTRY(swapper_pg_dir) 632ENTRY(swapper_pg_dir)