aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorTakashi Iwai <tiwai@suse.de>2010-06-02 08:18:06 -0400
committerTakashi Iwai <tiwai@suse.de>2010-06-02 08:18:06 -0400
commitc7a441bba9de3b4e166b6a4449208bc906d70558 (patch)
tree346fdf11e464c8201a9aaa8abdd1c1b6dc4f86e0 /include
parentead54d878465291746c91c95749990d62742a6cf (diff)
parente4caa8bab3862a7694ab7c6dfede223227ad7fc5 (diff)
Merge branch 'fix/hda' into for-linus
Diffstat (limited to 'include')
-rw-r--r--include/asm-generic/percpu.h10
-rw-r--r--include/asm-generic/vmlinux.lds.h38
-rw-r--r--include/drm/i915_drm.h5
-rw-r--r--include/linux/cache.h2
-rw-r--r--include/linux/init.h2
-rw-r--r--include/linux/init_task.h2
-rw-r--r--include/linux/linkage.h8
-rw-r--r--include/linux/netfilter/x_tables.h2
-rw-r--r--include/linux/percpu-defs.h4
-rw-r--r--include/linux/skbuff.h2
-rw-r--r--include/linux/spinlock.h2
-rw-r--r--include/net/sock.h15
12 files changed, 41 insertions, 51 deletions
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h
index 04f91c2d3f7b..b5043a9890d8 100644
--- a/include/asm-generic/percpu.h
+++ b/include/asm-generic/percpu.h
@@ -80,7 +80,7 @@ extern void setup_per_cpu_areas(void);
80 80
81#ifndef PER_CPU_BASE_SECTION 81#ifndef PER_CPU_BASE_SECTION
82#ifdef CONFIG_SMP 82#ifdef CONFIG_SMP
83#define PER_CPU_BASE_SECTION ".data.percpu" 83#define PER_CPU_BASE_SECTION ".data..percpu"
84#else 84#else
85#define PER_CPU_BASE_SECTION ".data" 85#define PER_CPU_BASE_SECTION ".data"
86#endif 86#endif
@@ -92,15 +92,15 @@ extern void setup_per_cpu_areas(void);
92#define PER_CPU_SHARED_ALIGNED_SECTION "" 92#define PER_CPU_SHARED_ALIGNED_SECTION ""
93#define PER_CPU_ALIGNED_SECTION "" 93#define PER_CPU_ALIGNED_SECTION ""
94#else 94#else
95#define PER_CPU_SHARED_ALIGNED_SECTION ".shared_aligned" 95#define PER_CPU_SHARED_ALIGNED_SECTION "..shared_aligned"
96#define PER_CPU_ALIGNED_SECTION ".shared_aligned" 96#define PER_CPU_ALIGNED_SECTION "..shared_aligned"
97#endif 97#endif
98#define PER_CPU_FIRST_SECTION ".first" 98#define PER_CPU_FIRST_SECTION "..first"
99 99
100#else 100#else
101 101
102#define PER_CPU_SHARED_ALIGNED_SECTION "" 102#define PER_CPU_SHARED_ALIGNED_SECTION ""
103#define PER_CPU_ALIGNED_SECTION ".shared_aligned" 103#define PER_CPU_ALIGNED_SECTION "..shared_aligned"
104#define PER_CPU_FIRST_SECTION "" 104#define PER_CPU_FIRST_SECTION ""
105 105
106#endif 106#endif
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index ef779c6fc3d7..48c5299cbf26 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -175,25 +175,25 @@
175#define NOSAVE_DATA \ 175#define NOSAVE_DATA \
176 . = ALIGN(PAGE_SIZE); \ 176 . = ALIGN(PAGE_SIZE); \
177 VMLINUX_SYMBOL(__nosave_begin) = .; \ 177 VMLINUX_SYMBOL(__nosave_begin) = .; \
178 *(.data.nosave) \ 178 *(.data..nosave) \
179 . = ALIGN(PAGE_SIZE); \ 179 . = ALIGN(PAGE_SIZE); \
180 VMLINUX_SYMBOL(__nosave_end) = .; 180 VMLINUX_SYMBOL(__nosave_end) = .;
181 181
182#define PAGE_ALIGNED_DATA(page_align) \ 182#define PAGE_ALIGNED_DATA(page_align) \
183 . = ALIGN(page_align); \ 183 . = ALIGN(page_align); \
184 *(.data.page_aligned) 184 *(.data..page_aligned)
185 185
186#define READ_MOSTLY_DATA(align) \ 186#define READ_MOSTLY_DATA(align) \
187 . = ALIGN(align); \ 187 . = ALIGN(align); \
188 *(.data.read_mostly) 188 *(.data..read_mostly)
189 189
190#define CACHELINE_ALIGNED_DATA(align) \ 190#define CACHELINE_ALIGNED_DATA(align) \
191 . = ALIGN(align); \ 191 . = ALIGN(align); \
192 *(.data.cacheline_aligned) 192 *(.data..cacheline_aligned)
193 193
194#define INIT_TASK_DATA(align) \ 194#define INIT_TASK_DATA(align) \
195 . = ALIGN(align); \ 195 . = ALIGN(align); \
196 *(.data.init_task) 196 *(.data..init_task)
197 197
198/* 198/*
199 * Read only Data 199 * Read only Data
@@ -435,7 +435,7 @@
435 */ 435 */
436#define INIT_TASK_DATA_SECTION(align) \ 436#define INIT_TASK_DATA_SECTION(align) \
437 . = ALIGN(align); \ 437 . = ALIGN(align); \
438 .data.init_task : { \ 438 .data..init_task : { \
439 INIT_TASK_DATA(align) \ 439 INIT_TASK_DATA(align) \
440 } 440 }
441 441
@@ -499,7 +499,7 @@
499#define BSS(bss_align) \ 499#define BSS(bss_align) \
500 . = ALIGN(bss_align); \ 500 . = ALIGN(bss_align); \
501 .bss : AT(ADDR(.bss) - LOAD_OFFSET) { \ 501 .bss : AT(ADDR(.bss) - LOAD_OFFSET) { \
502 *(.bss.page_aligned) \ 502 *(.bss..page_aligned) \
503 *(.dynbss) \ 503 *(.dynbss) \
504 *(.bss) \ 504 *(.bss) \
505 *(COMMON) \ 505 *(COMMON) \
@@ -666,16 +666,16 @@
666 */ 666 */
667#define PERCPU_VADDR(vaddr, phdr) \ 667#define PERCPU_VADDR(vaddr, phdr) \
668 VMLINUX_SYMBOL(__per_cpu_load) = .; \ 668 VMLINUX_SYMBOL(__per_cpu_load) = .; \
669 .data.percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \ 669 .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
670 - LOAD_OFFSET) { \ 670 - LOAD_OFFSET) { \
671 VMLINUX_SYMBOL(__per_cpu_start) = .; \ 671 VMLINUX_SYMBOL(__per_cpu_start) = .; \
672 *(.data.percpu.first) \ 672 *(.data..percpu..first) \
673 *(.data.percpu.page_aligned) \ 673 *(.data..percpu..page_aligned) \
674 *(.data.percpu) \ 674 *(.data..percpu) \
675 *(.data.percpu.shared_aligned) \ 675 *(.data..percpu..shared_aligned) \
676 VMLINUX_SYMBOL(__per_cpu_end) = .; \ 676 VMLINUX_SYMBOL(__per_cpu_end) = .; \
677 } phdr \ 677 } phdr \
678 . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data.percpu); 678 . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
679 679
680/** 680/**
681 * PERCPU - define output section for percpu area, simple version 681 * PERCPU - define output section for percpu area, simple version
@@ -687,18 +687,18 @@
687 * 687 *
688 * This macro is equivalent to ALIGN(align); PERCPU_VADDR( , ) except 688 * This macro is equivalent to ALIGN(align); PERCPU_VADDR( , ) except
689 * that __per_cpu_load is defined as a relative symbol against 689 * that __per_cpu_load is defined as a relative symbol against
690 * .data.percpu which is required for relocatable x86_32 690 * .data..percpu which is required for relocatable x86_32
691 * configuration. 691 * configuration.
692 */ 692 */
693#define PERCPU(align) \ 693#define PERCPU(align) \
694 . = ALIGN(align); \ 694 . = ALIGN(align); \
695 .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) { \ 695 .data..percpu : AT(ADDR(.data..percpu) - LOAD_OFFSET) { \
696 VMLINUX_SYMBOL(__per_cpu_load) = .; \ 696 VMLINUX_SYMBOL(__per_cpu_load) = .; \
697 VMLINUX_SYMBOL(__per_cpu_start) = .; \ 697 VMLINUX_SYMBOL(__per_cpu_start) = .; \
698 *(.data.percpu.first) \ 698 *(.data..percpu..first) \
699 *(.data.percpu.page_aligned) \ 699 *(.data..percpu..page_aligned) \
700 *(.data.percpu) \ 700 *(.data..percpu) \
701 *(.data.percpu.shared_aligned) \ 701 *(.data..percpu..shared_aligned) \
702 VMLINUX_SYMBOL(__per_cpu_end) = .; \ 702 VMLINUX_SYMBOL(__per_cpu_end) = .; \
703 } 703 }
704 704
diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h
index b64a8d7cdf6d..7f0028e1010b 100644
--- a/include/drm/i915_drm.h
+++ b/include/drm/i915_drm.h
@@ -275,6 +275,7 @@ typedef struct drm_i915_irq_wait {
275#define I915_PARAM_HAS_OVERLAY 7 275#define I915_PARAM_HAS_OVERLAY 7
276#define I915_PARAM_HAS_PAGEFLIPPING 8 276#define I915_PARAM_HAS_PAGEFLIPPING 8
277#define I915_PARAM_HAS_EXECBUF2 9 277#define I915_PARAM_HAS_EXECBUF2 9
278#define I915_PARAM_HAS_BSD 10
278 279
279typedef struct drm_i915_getparam { 280typedef struct drm_i915_getparam {
280 int param; 281 int param;
@@ -616,7 +617,9 @@ struct drm_i915_gem_execbuffer2 {
616 __u32 num_cliprects; 617 __u32 num_cliprects;
617 /** This is a struct drm_clip_rect *cliprects */ 618 /** This is a struct drm_clip_rect *cliprects */
618 __u64 cliprects_ptr; 619 __u64 cliprects_ptr;
619 __u64 flags; /* currently unused */ 620#define I915_EXEC_RENDER (1<<0)
621#define I915_EXEC_BSD (1<<1)
622 __u64 flags;
620 __u64 rsvd1; 623 __u64 rsvd1;
621 __u64 rsvd2; 624 __u64 rsvd2;
622}; 625};
diff --git a/include/linux/cache.h b/include/linux/cache.h
index 97e24881c4c6..4c570653ab84 100644
--- a/include/linux/cache.h
+++ b/include/linux/cache.h
@@ -31,7 +31,7 @@
31#ifndef __cacheline_aligned 31#ifndef __cacheline_aligned
32#define __cacheline_aligned \ 32#define __cacheline_aligned \
33 __attribute__((__aligned__(SMP_CACHE_BYTES), \ 33 __attribute__((__aligned__(SMP_CACHE_BYTES), \
34 __section__(".data.cacheline_aligned"))) 34 __section__(".data..cacheline_aligned")))
35#endif /* __cacheline_aligned */ 35#endif /* __cacheline_aligned */
36 36
37#ifndef __cacheline_aligned_in_smp 37#ifndef __cacheline_aligned_in_smp
diff --git a/include/linux/init.h b/include/linux/init.h
index ab1d31f9352b..de994304e0bb 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -301,7 +301,7 @@ void __init parse_early_options(char *cmdline);
301#endif 301#endif
302 302
303/* Data marked not to be saved by software suspend */ 303/* Data marked not to be saved by software suspend */
304#define __nosavedata __section(.data.nosave) 304#define __nosavedata __section(.data..nosave)
305 305
306/* This means "can be init if no module support, otherwise module load 306/* This means "can be init if no module support, otherwise module load
307 may call it." */ 307 may call it." */
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 2beaa13492be..1f43fa56f600 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -183,7 +183,7 @@ extern struct cred init_cred;
183} 183}
184 184
185/* Attach to the init_task data structure for proper alignment */ 185/* Attach to the init_task data structure for proper alignment */
186#define __init_task_data __attribute__((__section__(".data.init_task"))) 186#define __init_task_data __attribute__((__section__(".data..init_task")))
187 187
188 188
189#endif 189#endif
diff --git a/include/linux/linkage.h b/include/linux/linkage.h
index 5126cceb6ae9..7135ebc8428c 100644
--- a/include/linux/linkage.h
+++ b/include/linux/linkage.h
@@ -18,8 +18,8 @@
18# define asmregparm 18# define asmregparm
19#endif 19#endif
20 20
21#define __page_aligned_data __section(.data.page_aligned) __aligned(PAGE_SIZE) 21#define __page_aligned_data __section(.data..page_aligned) __aligned(PAGE_SIZE)
22#define __page_aligned_bss __section(.bss.page_aligned) __aligned(PAGE_SIZE) 22#define __page_aligned_bss __section(.bss..page_aligned) __aligned(PAGE_SIZE)
23 23
24/* 24/*
25 * For assembly routines. 25 * For assembly routines.
@@ -27,8 +27,8 @@
27 * Note when using these that you must specify the appropriate 27 * Note when using these that you must specify the appropriate
28 * alignment directives yourself 28 * alignment directives yourself
29 */ 29 */
30#define __PAGE_ALIGNED_DATA .section ".data.page_aligned", "aw" 30#define __PAGE_ALIGNED_DATA .section ".data..page_aligned", "aw"
31#define __PAGE_ALIGNED_BSS .section ".bss.page_aligned", "aw" 31#define __PAGE_ALIGNED_BSS .section ".bss..page_aligned", "aw"
32 32
33/* 33/*
34 * This is used by architectures to keep arguments on the stack 34 * This is used by architectures to keep arguments on the stack
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
index c00cc0c4d0b7..24e5d01d27d0 100644
--- a/include/linux/netfilter/x_tables.h
+++ b/include/linux/netfilter/x_tables.h
@@ -397,7 +397,7 @@ struct xt_table_info {
397 * @stacksize jumps (number of user chains) can possibly be made. 397 * @stacksize jumps (number of user chains) can possibly be made.
398 */ 398 */
399 unsigned int stacksize; 399 unsigned int stacksize;
400 unsigned int *stackptr; 400 unsigned int __percpu *stackptr;
401 void ***jumpstack; 401 void ***jumpstack;
402 /* ipt_entry tables: one per CPU */ 402 /* ipt_entry tables: one per CPU */
403 /* Note : this field MUST be the last one, see XT_TABLE_INFO_SZ */ 403 /* Note : this field MUST be the last one, see XT_TABLE_INFO_SZ */
diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h
index 68567c0b3a5d..ce2dc655cd1d 100644
--- a/include/linux/percpu-defs.h
+++ b/include/linux/percpu-defs.h
@@ -131,11 +131,11 @@
131 * Declaration/definition used for per-CPU variables that must be page aligned. 131 * Declaration/definition used for per-CPU variables that must be page aligned.
132 */ 132 */
133#define DECLARE_PER_CPU_PAGE_ALIGNED(type, name) \ 133#define DECLARE_PER_CPU_PAGE_ALIGNED(type, name) \
134 DECLARE_PER_CPU_SECTION(type, name, ".page_aligned") \ 134 DECLARE_PER_CPU_SECTION(type, name, "..page_aligned") \
135 __aligned(PAGE_SIZE) 135 __aligned(PAGE_SIZE)
136 136
137#define DEFINE_PER_CPU_PAGE_ALIGNED(type, name) \ 137#define DEFINE_PER_CPU_PAGE_ALIGNED(type, name) \
138 DEFINE_PER_CPU_SECTION(type, name, ".page_aligned") \ 138 DEFINE_PER_CPU_SECTION(type, name, "..page_aligned") \
139 __aligned(PAGE_SIZE) 139 __aligned(PAGE_SIZE)
140 140
141/* 141/*
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 7cdfb4d52847..bf243fc54959 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -501,7 +501,7 @@ static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
501 return __alloc_skb(size, priority, 1, -1); 501 return __alloc_skb(size, priority, 1, -1);
502} 502}
503 503
504extern int skb_recycle_check(struct sk_buff *skb, int skb_size); 504extern bool skb_recycle_check(struct sk_buff *skb, int skb_size);
505 505
506extern struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src); 506extern struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
507extern struct sk_buff *skb_clone(struct sk_buff *skb, 507extern struct sk_buff *skb_clone(struct sk_buff *skb,
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 89fac6a3f78b..f8854655860e 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -60,7 +60,7 @@
60/* 60/*
61 * Must define these before including other files, inline functions need them 61 * Must define these before including other files, inline functions need them
62 */ 62 */
63#define LOCK_SECTION_NAME ".text.lock."KBUILD_BASENAME 63#define LOCK_SECTION_NAME ".text..lock."KBUILD_BASENAME
64 64
65#define LOCK_SECTION_START(extra) \ 65#define LOCK_SECTION_START(extra) \
66 ".subsection 1\n\t" \ 66 ".subsection 1\n\t" \
diff --git a/include/net/sock.h b/include/net/sock.h
index ca241ea14875..731150d52799 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1524,20 +1524,7 @@ extern void sk_stop_timer(struct sock *sk, struct timer_list* timer);
1524 1524
1525extern int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); 1525extern int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
1526 1526
1527static inline int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) 1527extern int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb);
1528{
1529 /* Cast skb->rcvbuf to unsigned... It's pointless, but reduces
1530 number of warnings when compiling with -W --ANK
1531 */
1532 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
1533 (unsigned)sk->sk_rcvbuf)
1534 return -ENOMEM;
1535 skb_set_owner_r(skb, sk);
1536 skb_queue_tail(&sk->sk_error_queue, skb);
1537 if (!sock_flag(sk, SOCK_DEAD))
1538 sk->sk_data_ready(sk, skb->len);
1539 return 0;
1540}
1541 1528
1542/* 1529/*
1543 * Recover an error report and clear atomically 1530 * Recover an error report and clear atomically