diff options
Diffstat (limited to 'include/linux')
35 files changed, 374 insertions, 141 deletions
diff --git a/include/linux/Kbuild b/include/linux/Kbuild index 12e9a2957ca..b97cdc516a8 100644 --- a/include/linux/Kbuild +++ b/include/linux/Kbuild | |||
@@ -41,6 +41,7 @@ header-y += baycom.h | |||
41 | header-y += bfs_fs.h | 41 | header-y += bfs_fs.h |
42 | header-y += blkpg.h | 42 | header-y += blkpg.h |
43 | header-y += bpqether.h | 43 | header-y += bpqether.h |
44 | header-y += bsg.h | ||
44 | header-y += can.h | 45 | header-y += can.h |
45 | header-y += cdk.h | 46 | header-y += cdk.h |
46 | header-y += chio.h | 47 | header-y += chio.h |
@@ -89,7 +90,6 @@ header-y += if_ppp.h | |||
89 | header-y += if_slip.h | 90 | header-y += if_slip.h |
90 | header-y += if_strip.h | 91 | header-y += if_strip.h |
91 | header-y += if_tun.h | 92 | header-y += if_tun.h |
92 | header-y += if_tunnel.h | ||
93 | header-y += in_route.h | 93 | header-y += in_route.h |
94 | header-y += ioctl.h | 94 | header-y += ioctl.h |
95 | header-y += ip6_tunnel.h | 95 | header-y += ip6_tunnel.h |
@@ -235,6 +235,7 @@ unifdef-y += if_phonet.h | |||
235 | unifdef-y += if_pppol2tp.h | 235 | unifdef-y += if_pppol2tp.h |
236 | unifdef-y += if_pppox.h | 236 | unifdef-y += if_pppox.h |
237 | unifdef-y += if_tr.h | 237 | unifdef-y += if_tr.h |
238 | unifdef-y += if_tunnel.h | ||
238 | unifdef-y += if_vlan.h | 239 | unifdef-y += if_vlan.h |
239 | unifdef-y += igmp.h | 240 | unifdef-y += igmp.h |
240 | unifdef-y += inet_diag.h | 241 | unifdef-y += inet_diag.h |
diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 6fce2fc2d12..d59f0fa4d77 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h | |||
@@ -79,6 +79,7 @@ typedef int (*acpi_table_handler) (struct acpi_table_header *table); | |||
79 | typedef int (*acpi_table_entry_handler) (struct acpi_subtable_header *header, const unsigned long end); | 79 | typedef int (*acpi_table_entry_handler) (struct acpi_subtable_header *header, const unsigned long end); |
80 | 80 | ||
81 | char * __acpi_map_table (unsigned long phys_addr, unsigned long size); | 81 | char * __acpi_map_table (unsigned long phys_addr, unsigned long size); |
82 | void __init __acpi_unmap_table(char *map, unsigned long size); | ||
82 | int early_acpi_boot_init(void); | 83 | int early_acpi_boot_init(void); |
83 | int acpi_boot_init (void); | 84 | int acpi_boot_init (void); |
84 | int acpi_boot_table_init (void); | 85 | int acpi_boot_table_init (void); |
diff --git a/include/linux/async.h b/include/linux/async.h index c4ecacd0b32..68a9530196f 100644 --- a/include/linux/async.h +++ b/include/linux/async.h | |||
@@ -17,9 +17,11 @@ typedef u64 async_cookie_t; | |||
17 | typedef void (async_func_ptr) (void *data, async_cookie_t cookie); | 17 | typedef void (async_func_ptr) (void *data, async_cookie_t cookie); |
18 | 18 | ||
19 | extern async_cookie_t async_schedule(async_func_ptr *ptr, void *data); | 19 | extern async_cookie_t async_schedule(async_func_ptr *ptr, void *data); |
20 | extern async_cookie_t async_schedule_special(async_func_ptr *ptr, void *data, struct list_head *list); | 20 | extern async_cookie_t async_schedule_domain(async_func_ptr *ptr, void *data, |
21 | struct list_head *list); | ||
21 | extern void async_synchronize_full(void); | 22 | extern void async_synchronize_full(void); |
22 | extern void async_synchronize_full_special(struct list_head *list); | 23 | extern void async_synchronize_full_domain(struct list_head *list); |
23 | extern void async_synchronize_cookie(async_cookie_t cookie); | 24 | extern void async_synchronize_cookie(async_cookie_t cookie); |
24 | extern void async_synchronize_cookie_special(async_cookie_t cookie, struct list_head *list); | 25 | extern void async_synchronize_cookie_domain(async_cookie_t cookie, |
26 | struct list_head *list); | ||
25 | 27 | ||
diff --git a/include/linux/ata.h b/include/linux/ata.h index a53318b8cbd..08a86d5cdf1 100644 --- a/include/linux/ata.h +++ b/include/linux/ata.h | |||
@@ -731,12 +731,17 @@ static inline int ata_id_current_chs_valid(const u16 *id) | |||
731 | 731 | ||
732 | static inline int ata_id_is_cfa(const u16 *id) | 732 | static inline int ata_id_is_cfa(const u16 *id) |
733 | { | 733 | { |
734 | if (id[ATA_ID_CONFIG] == 0x848A) /* Standard CF */ | 734 | if (id[ATA_ID_CONFIG] == 0x848A) /* Traditional CF */ |
735 | return 1; | 735 | return 1; |
736 | /* Could be CF hiding as standard ATA */ | 736 | /* |
737 | if (ata_id_major_version(id) >= 3 && | 737 | * CF specs don't require specific value in the word 0 anymore and yet |
738 | id[ATA_ID_COMMAND_SET_1] != 0xFFFF && | 738 | * they forbid to report the ATA version in the word 80 and require the |
739 | (id[ATA_ID_COMMAND_SET_1] & (1 << 2))) | 739 | * CFA feature set support to be indicated in the word 83 in this case. |
740 | * Unfortunately, some cards only follow either of this requirements, | ||
741 | * and while those that don't indicate CFA feature support need some | ||
742 | * sort of quirk list, it seems impractical for the ones that do... | ||
743 | */ | ||
744 | if ((id[ATA_ID_COMMAND_SET_2] & 0xC004) == 0x4004) | ||
740 | return 1; | 745 | return 1; |
741 | return 0; | 746 | return 0; |
742 | } | 747 | } |
diff --git a/include/linux/bio.h b/include/linux/bio.h index 18462c5b8ff..2aa283ab062 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h | |||
@@ -144,7 +144,7 @@ struct bio { | |||
144 | * bit 1 -- rw-ahead when set | 144 | * bit 1 -- rw-ahead when set |
145 | * bit 2 -- barrier | 145 | * bit 2 -- barrier |
146 | * Insert a serialization point in the IO queue, forcing previously | 146 | * Insert a serialization point in the IO queue, forcing previously |
147 | * submitted IO to be completed before this oen is issued. | 147 | * submitted IO to be completed before this one is issued. |
148 | * bit 3 -- synchronous I/O hint: the block layer will unplug immediately | 148 | * bit 3 -- synchronous I/O hint: the block layer will unplug immediately |
149 | * Note that this does NOT indicate that the IO itself is sync, just | 149 | * Note that this does NOT indicate that the IO itself is sync, just |
150 | * that the block layer will not postpone issue of this IO by plugging. | 150 | * that the block layer will not postpone issue of this IO by plugging. |
@@ -163,12 +163,33 @@ struct bio { | |||
163 | #define BIO_RW 0 /* Must match RW in req flags (blkdev.h) */ | 163 | #define BIO_RW 0 /* Must match RW in req flags (blkdev.h) */ |
164 | #define BIO_RW_AHEAD 1 /* Must match FAILFAST in req flags */ | 164 | #define BIO_RW_AHEAD 1 /* Must match FAILFAST in req flags */ |
165 | #define BIO_RW_BARRIER 2 | 165 | #define BIO_RW_BARRIER 2 |
166 | #define BIO_RW_SYNC 3 | 166 | #define BIO_RW_SYNCIO 3 |
167 | #define BIO_RW_META 4 | 167 | #define BIO_RW_UNPLUG 4 |
168 | #define BIO_RW_DISCARD 5 | 168 | #define BIO_RW_META 5 |
169 | #define BIO_RW_FAILFAST_DEV 6 | 169 | #define BIO_RW_DISCARD 6 |
170 | #define BIO_RW_FAILFAST_TRANSPORT 7 | 170 | #define BIO_RW_FAILFAST_DEV 7 |
171 | #define BIO_RW_FAILFAST_DRIVER 8 | 171 | #define BIO_RW_FAILFAST_TRANSPORT 8 |
172 | #define BIO_RW_FAILFAST_DRIVER 9 | ||
173 | |||
174 | #define BIO_RW_SYNC (BIO_RW_SYNCIO | BIO_RW_UNPLUG) | ||
175 | |||
176 | #define bio_rw_flagged(bio, flag) ((bio)->bi_rw & (1 << (flag))) | ||
177 | |||
178 | /* | ||
179 | * Old defines, these should eventually be replaced by direct usage of | ||
180 | * bio_rw_flagged() | ||
181 | */ | ||
182 | #define bio_barrier(bio) bio_rw_flagged(bio, BIO_RW_BARRIER) | ||
183 | #define bio_sync(bio) bio_rw_flagged(bio, BIO_RW_SYNCIO) | ||
184 | #define bio_unplug(bio) bio_rw_flagged(bio, BIO_RW_UNPLUG) | ||
185 | #define bio_failfast_dev(bio) bio_rw_flagged(bio, BIO_RW_FAILFAST_DEV) | ||
186 | #define bio_failfast_transport(bio) \ | ||
187 | bio_rw_flagged(bio, BIO_RW_FAILFAST_TRANSPORT) | ||
188 | #define bio_failfast_driver(bio) \ | ||
189 | bio_rw_flagged(bio, BIO_RW_FAILFAST_DRIVER) | ||
190 | #define bio_rw_ahead(bio) bio_rw_flagged(bio, BIO_RW_AHEAD) | ||
191 | #define bio_rw_meta(bio) bio_rw_flagged(bio, BIO_RW_META) | ||
192 | #define bio_discard(bio) bio_rw_flagged(bio, BIO_RW_DISCARD) | ||
172 | 193 | ||
173 | /* | 194 | /* |
174 | * upper 16 bits of bi_rw define the io priority of this bio | 195 | * upper 16 bits of bi_rw define the io priority of this bio |
@@ -193,15 +214,6 @@ struct bio { | |||
193 | #define bio_offset(bio) bio_iovec((bio))->bv_offset | 214 | #define bio_offset(bio) bio_iovec((bio))->bv_offset |
194 | #define bio_segments(bio) ((bio)->bi_vcnt - (bio)->bi_idx) | 215 | #define bio_segments(bio) ((bio)->bi_vcnt - (bio)->bi_idx) |
195 | #define bio_sectors(bio) ((bio)->bi_size >> 9) | 216 | #define bio_sectors(bio) ((bio)->bi_size >> 9) |
196 | #define bio_barrier(bio) ((bio)->bi_rw & (1 << BIO_RW_BARRIER)) | ||
197 | #define bio_sync(bio) ((bio)->bi_rw & (1 << BIO_RW_SYNC)) | ||
198 | #define bio_failfast_dev(bio) ((bio)->bi_rw & (1 << BIO_RW_FAILFAST_DEV)) | ||
199 | #define bio_failfast_transport(bio) \ | ||
200 | ((bio)->bi_rw & (1 << BIO_RW_FAILFAST_TRANSPORT)) | ||
201 | #define bio_failfast_driver(bio) ((bio)->bi_rw & (1 << BIO_RW_FAILFAST_DRIVER)) | ||
202 | #define bio_rw_ahead(bio) ((bio)->bi_rw & (1 << BIO_RW_AHEAD)) | ||
203 | #define bio_rw_meta(bio) ((bio)->bi_rw & (1 << BIO_RW_META)) | ||
204 | #define bio_discard(bio) ((bio)->bi_rw & (1 << BIO_RW_DISCARD)) | ||
205 | #define bio_empty_barrier(bio) (bio_barrier(bio) && !bio_has_data(bio) && !bio_discard(bio)) | 217 | #define bio_empty_barrier(bio) (bio_barrier(bio) && !bio_has_data(bio) && !bio_discard(bio)) |
206 | 218 | ||
207 | static inline unsigned int bio_cur_sectors(struct bio *bio) | 219 | static inline unsigned int bio_cur_sectors(struct bio *bio) |
@@ -312,7 +324,6 @@ struct bio_integrity_payload { | |||
312 | void *bip_buf; /* generated integrity data */ | 324 | void *bip_buf; /* generated integrity data */ |
313 | bio_end_io_t *bip_end_io; /* saved I/O completion fn */ | 325 | bio_end_io_t *bip_end_io; /* saved I/O completion fn */ |
314 | 326 | ||
315 | int bip_error; /* saved I/O error */ | ||
316 | unsigned int bip_size; | 327 | unsigned int bip_size; |
317 | 328 | ||
318 | unsigned short bip_pool; /* pool the ivec came from */ | 329 | unsigned short bip_pool; /* pool the ivec came from */ |
@@ -440,12 +451,13 @@ extern struct biovec_slab bvec_slabs[BIOVEC_NR_POOLS] __read_mostly; | |||
440 | 451 | ||
441 | #ifdef CONFIG_HIGHMEM | 452 | #ifdef CONFIG_HIGHMEM |
442 | /* | 453 | /* |
443 | * remember to add offset! and never ever reenable interrupts between a | 454 | * remember never ever reenable interrupts between a bvec_kmap_irq and |
444 | * bvec_kmap_irq and bvec_kunmap_irq!! | 455 | * bvec_kunmap_irq! |
445 | * | 456 | * |
446 | * This function MUST be inlined - it plays with the CPU interrupt flags. | 457 | * This function MUST be inlined - it plays with the CPU interrupt flags. |
447 | */ | 458 | */ |
448 | static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags) | 459 | static __always_inline char *bvec_kmap_irq(struct bio_vec *bvec, |
460 | unsigned long *flags) | ||
449 | { | 461 | { |
450 | unsigned long addr; | 462 | unsigned long addr; |
451 | 463 | ||
@@ -461,7 +473,8 @@ static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags) | |||
461 | return (char *) addr + bvec->bv_offset; | 473 | return (char *) addr + bvec->bv_offset; |
462 | } | 474 | } |
463 | 475 | ||
464 | static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags) | 476 | static __always_inline void bvec_kunmap_irq(char *buffer, |
477 | unsigned long *flags) | ||
465 | { | 478 | { |
466 | unsigned long ptr = (unsigned long) buffer & PAGE_MASK; | 479 | unsigned long ptr = (unsigned long) buffer & PAGE_MASK; |
467 | 480 | ||
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 044467ef7b1..dcaa0fd84b0 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -108,6 +108,7 @@ enum rq_flag_bits { | |||
108 | __REQ_RW_META, /* metadata io request */ | 108 | __REQ_RW_META, /* metadata io request */ |
109 | __REQ_COPY_USER, /* contains copies of user pages */ | 109 | __REQ_COPY_USER, /* contains copies of user pages */ |
110 | __REQ_INTEGRITY, /* integrity metadata has been remapped */ | 110 | __REQ_INTEGRITY, /* integrity metadata has been remapped */ |
111 | __REQ_UNPLUG, /* unplug queue on submission */ | ||
111 | __REQ_NR_BITS, /* stops here */ | 112 | __REQ_NR_BITS, /* stops here */ |
112 | }; | 113 | }; |
113 | 114 | ||
@@ -134,6 +135,7 @@ enum rq_flag_bits { | |||
134 | #define REQ_RW_META (1 << __REQ_RW_META) | 135 | #define REQ_RW_META (1 << __REQ_RW_META) |
135 | #define REQ_COPY_USER (1 << __REQ_COPY_USER) | 136 | #define REQ_COPY_USER (1 << __REQ_COPY_USER) |
136 | #define REQ_INTEGRITY (1 << __REQ_INTEGRITY) | 137 | #define REQ_INTEGRITY (1 << __REQ_INTEGRITY) |
138 | #define REQ_UNPLUG (1 << __REQ_UNPLUG) | ||
137 | 139 | ||
138 | #define BLK_MAX_CDB 16 | 140 | #define BLK_MAX_CDB 16 |
139 | 141 | ||
@@ -449,6 +451,11 @@ struct request_queue | |||
449 | #define QUEUE_FLAG_STACKABLE 13 /* supports request stacking */ | 451 | #define QUEUE_FLAG_STACKABLE 13 /* supports request stacking */ |
450 | #define QUEUE_FLAG_NONROT 14 /* non-rotational device (SSD) */ | 452 | #define QUEUE_FLAG_NONROT 14 /* non-rotational device (SSD) */ |
451 | #define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ | 453 | #define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ |
454 | #define QUEUE_FLAG_IO_STAT 15 /* do IO stats */ | ||
455 | |||
456 | #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ | ||
457 | (1 << QUEUE_FLAG_CLUSTER) | \ | ||
458 | (1 << QUEUE_FLAG_STACKABLE)) | ||
452 | 459 | ||
453 | static inline int queue_is_locked(struct request_queue *q) | 460 | static inline int queue_is_locked(struct request_queue *q) |
454 | { | 461 | { |
@@ -565,6 +572,7 @@ enum { | |||
565 | #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) | 572 | #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) |
566 | #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) | 573 | #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) |
567 | #define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) | 574 | #define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) |
575 | #define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags) | ||
568 | #define blk_queue_flushing(q) ((q)->ordseq) | 576 | #define blk_queue_flushing(q) ((q)->ordseq) |
569 | #define blk_queue_stackable(q) \ | 577 | #define blk_queue_stackable(q) \ |
570 | test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags) | 578 | test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags) |
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index e4e8e117d27..499900d0cee 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h | |||
@@ -378,6 +378,7 @@ struct cgroup_subsys { | |||
378 | * - initiating hotplug events | 378 | * - initiating hotplug events |
379 | */ | 379 | */ |
380 | struct mutex hierarchy_mutex; | 380 | struct mutex hierarchy_mutex; |
381 | struct lock_class_key subsys_key; | ||
381 | 382 | ||
382 | /* | 383 | /* |
383 | * Link to parent, and list entry in parent's children. | 384 | * Link to parent, and list entry in parent's children. |
diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h index cea153697ec..3a1dbba4d3a 100644 --- a/include/linux/clockchips.h +++ b/include/linux/clockchips.h | |||
@@ -36,6 +36,7 @@ enum clock_event_nofitiers { | |||
36 | CLOCK_EVT_NOTIFY_BROADCAST_EXIT, | 36 | CLOCK_EVT_NOTIFY_BROADCAST_EXIT, |
37 | CLOCK_EVT_NOTIFY_SUSPEND, | 37 | CLOCK_EVT_NOTIFY_SUSPEND, |
38 | CLOCK_EVT_NOTIFY_RESUME, | 38 | CLOCK_EVT_NOTIFY_RESUME, |
39 | CLOCK_EVT_NOTIFY_CPU_DYING, | ||
39 | CLOCK_EVT_NOTIFY_CPU_DEAD, | 40 | CLOCK_EVT_NOTIFY_CPU_DEAD, |
40 | }; | 41 | }; |
41 | 42 | ||
diff --git a/include/linux/crypto.h b/include/linux/crypto.h index 3bacd71509f..1f2e9020acc 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h | |||
@@ -552,7 +552,12 @@ struct crypto_tfm *crypto_alloc_tfm(const char *alg_name, | |||
552 | const struct crypto_type *frontend, | 552 | const struct crypto_type *frontend, |
553 | u32 type, u32 mask); | 553 | u32 type, u32 mask); |
554 | struct crypto_tfm *crypto_alloc_base(const char *alg_name, u32 type, u32 mask); | 554 | struct crypto_tfm *crypto_alloc_base(const char *alg_name, u32 type, u32 mask); |
555 | void crypto_free_tfm(struct crypto_tfm *tfm); | 555 | void crypto_destroy_tfm(void *mem, struct crypto_tfm *tfm); |
556 | |||
557 | static inline void crypto_free_tfm(struct crypto_tfm *tfm) | ||
558 | { | ||
559 | return crypto_destroy_tfm(tfm, tfm); | ||
560 | } | ||
556 | 561 | ||
557 | int alg_test(const char *driver, const char *alg, u32 type, u32 mask); | 562 | int alg_test(const char *driver, const char *alg, u32 type, u32 mask); |
558 | 563 | ||
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 3e0f64c335c..3e68469c188 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h | |||
@@ -282,6 +282,18 @@ static inline void dmaengine_put(void) | |||
282 | } | 282 | } |
283 | #endif | 283 | #endif |
284 | 284 | ||
285 | #ifdef CONFIG_NET_DMA | ||
286 | #define net_dmaengine_get() dmaengine_get() | ||
287 | #define net_dmaengine_put() dmaengine_put() | ||
288 | #else | ||
289 | static inline void net_dmaengine_get(void) | ||
290 | { | ||
291 | } | ||
292 | static inline void net_dmaengine_put(void) | ||
293 | { | ||
294 | } | ||
295 | #endif | ||
296 | |||
285 | dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan, | 297 | dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan, |
286 | void *dest, void *src, size_t len); | 298 | void *dest, void *src, size_t len); |
287 | dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan, | 299 | dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan, |
diff --git a/include/linux/elfcore.h b/include/linux/elfcore.h index 5ca54d77079..7605c5e9589 100644 --- a/include/linux/elfcore.h +++ b/include/linux/elfcore.h | |||
@@ -111,6 +111,15 @@ static inline void elf_core_copy_regs(elf_gregset_t *elfregs, struct pt_regs *re | |||
111 | #endif | 111 | #endif |
112 | } | 112 | } |
113 | 113 | ||
114 | static inline void elf_core_copy_kernel_regs(elf_gregset_t *elfregs, struct pt_regs *regs) | ||
115 | { | ||
116 | #ifdef ELF_CORE_COPY_KERNEL_REGS | ||
117 | ELF_CORE_COPY_KERNEL_REGS((*elfregs), regs); | ||
118 | #else | ||
119 | elf_core_copy_regs(elfregs, regs); | ||
120 | #endif | ||
121 | } | ||
122 | |||
114 | static inline int elf_core_copy_task_regs(struct task_struct *t, elf_gregset_t* elfregs) | 123 | static inline int elf_core_copy_task_regs(struct task_struct *t, elf_gregset_t* elfregs) |
115 | { | 124 | { |
116 | #ifdef ELF_CORE_COPY_TASK_REGS | 125 | #ifdef ELF_CORE_COPY_TASK_REGS |
diff --git a/include/linux/fb.h b/include/linux/fb.h index 818fe21257e..31527e17076 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h | |||
@@ -960,6 +960,21 @@ extern struct fb_info *registered_fb[FB_MAX]; | |||
960 | extern int num_registered_fb; | 960 | extern int num_registered_fb; |
961 | extern struct class *fb_class; | 961 | extern struct class *fb_class; |
962 | 962 | ||
963 | static inline int lock_fb_info(struct fb_info *info) | ||
964 | { | ||
965 | mutex_lock(&info->lock); | ||
966 | if (!info->fbops) { | ||
967 | mutex_unlock(&info->lock); | ||
968 | return 0; | ||
969 | } | ||
970 | return 1; | ||
971 | } | ||
972 | |||
973 | static inline void unlock_fb_info(struct fb_info *info) | ||
974 | { | ||
975 | mutex_unlock(&info->lock); | ||
976 | } | ||
977 | |||
963 | static inline void __fb_pad_aligned_buffer(u8 *dst, u32 d_pitch, | 978 | static inline void __fb_pad_aligned_buffer(u8 *dst, u32 d_pitch, |
964 | u8 *src, u32 s_pitch, u32 height) | 979 | u8 *src, u32 s_pitch, u32 height) |
965 | { | 980 | { |
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index f1d2fba19ea..03be7f29ca0 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h | |||
@@ -33,7 +33,8 @@ unsigned long hugetlb_total_pages(void); | |||
33 | int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, | 33 | int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, |
34 | unsigned long address, int write_access); | 34 | unsigned long address, int write_access); |
35 | int hugetlb_reserve_pages(struct inode *inode, long from, long to, | 35 | int hugetlb_reserve_pages(struct inode *inode, long from, long to, |
36 | struct vm_area_struct *vma); | 36 | struct vm_area_struct *vma, |
37 | int acctflags); | ||
37 | void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed); | 38 | void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed); |
38 | 39 | ||
39 | extern unsigned long hugepages_treat_as_movable; | 40 | extern unsigned long hugepages_treat_as_movable; |
@@ -138,7 +139,7 @@ static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb) | |||
138 | 139 | ||
139 | extern const struct file_operations hugetlbfs_file_operations; | 140 | extern const struct file_operations hugetlbfs_file_operations; |
140 | extern struct vm_operations_struct hugetlb_vm_ops; | 141 | extern struct vm_operations_struct hugetlb_vm_ops; |
141 | struct file *hugetlb_file_setup(const char *name, size_t); | 142 | struct file *hugetlb_file_setup(const char *name, size_t, int); |
142 | int hugetlb_get_quota(struct address_space *mapping, long delta); | 143 | int hugetlb_get_quota(struct address_space *mapping, long delta); |
143 | void hugetlb_put_quota(struct address_space *mapping, long delta); | 144 | void hugetlb_put_quota(struct address_space *mapping, long delta); |
144 | 145 | ||
@@ -158,9 +159,9 @@ static inline void set_file_hugepages(struct file *file) | |||
158 | } | 159 | } |
159 | #else /* !CONFIG_HUGETLBFS */ | 160 | #else /* !CONFIG_HUGETLBFS */ |
160 | 161 | ||
161 | #define is_file_hugepages(file) 0 | 162 | #define is_file_hugepages(file) 0 |
162 | #define set_file_hugepages(file) BUG() | 163 | #define set_file_hugepages(file) BUG() |
163 | #define hugetlb_file_setup(name,size) ERR_PTR(-ENOSYS) | 164 | #define hugetlb_file_setup(name,size,acctflag) ERR_PTR(-ENOSYS) |
164 | 165 | ||
165 | #endif /* !CONFIG_HUGETLBFS */ | 166 | #endif /* !CONFIG_HUGETLBFS */ |
166 | 167 | ||
diff --git a/include/linux/if_tunnel.h b/include/linux/if_tunnel.h index aeab2cb32a9..82c43624c06 100644 --- a/include/linux/if_tunnel.h +++ b/include/linux/if_tunnel.h | |||
@@ -2,7 +2,10 @@ | |||
2 | #define _IF_TUNNEL_H_ | 2 | #define _IF_TUNNEL_H_ |
3 | 3 | ||
4 | #include <linux/types.h> | 4 | #include <linux/types.h> |
5 | |||
6 | #ifdef __KERNEL__ | ||
5 | #include <linux/ip.h> | 7 | #include <linux/ip.h> |
8 | #endif | ||
6 | 9 | ||
7 | #define SIOCGETTUNNEL (SIOCDEVPRIVATE + 0) | 10 | #define SIOCGETTUNNEL (SIOCDEVPRIVATE + 0) |
8 | #define SIOCADDTUNNEL (SIOCDEVPRIVATE + 1) | 11 | #define SIOCADDTUNNEL (SIOCDEVPRIVATE + 1) |
diff --git a/include/linux/init_task.h b/include/linux/init_task.h index ea0ea1a4c36..e752d973fa2 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h | |||
@@ -48,12 +48,11 @@ extern struct fs_struct init_fs; | |||
48 | .posix_timers = LIST_HEAD_INIT(sig.posix_timers), \ | 48 | .posix_timers = LIST_HEAD_INIT(sig.posix_timers), \ |
49 | .cpu_timers = INIT_CPU_TIMERS(sig.cpu_timers), \ | 49 | .cpu_timers = INIT_CPU_TIMERS(sig.cpu_timers), \ |
50 | .rlim = INIT_RLIMITS, \ | 50 | .rlim = INIT_RLIMITS, \ |
51 | .cputime = { .totals = { \ | 51 | .cputimer = { \ |
52 | .utime = cputime_zero, \ | 52 | .cputime = INIT_CPUTIME, \ |
53 | .stime = cputime_zero, \ | 53 | .running = 0, \ |
54 | .sum_exec_runtime = 0, \ | 54 | .lock = __SPIN_LOCK_UNLOCKED(sig.cputimer.lock), \ |
55 | .lock = __SPIN_LOCK_UNLOCKED(sig.cputime.totals.lock), \ | 55 | }, \ |
56 | }, }, \ | ||
57 | } | 56 | } |
58 | 57 | ||
59 | extern struct nsproxy init_nsproxy; | 58 | extern struct nsproxy init_nsproxy; |
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 9127f6b51a3..472f11765f6 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h | |||
@@ -467,6 +467,7 @@ int show_interrupts(struct seq_file *p, void *v); | |||
467 | struct irq_desc; | 467 | struct irq_desc; |
468 | 468 | ||
469 | extern int early_irq_init(void); | 469 | extern int early_irq_init(void); |
470 | extern int arch_probe_nr_irqs(void); | ||
470 | extern int arch_early_irq_init(void); | 471 | extern int arch_early_irq_init(void); |
471 | extern int arch_init_chip_data(struct irq_desc *desc, int cpu); | 472 | extern int arch_init_chip_data(struct irq_desc *desc, int cpu); |
472 | 473 | ||
diff --git a/include/linux/irq.h b/include/linux/irq.h index f899b502f18..27a67536511 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h | |||
@@ -182,11 +182,11 @@ struct irq_desc { | |||
182 | unsigned int irqs_unhandled; | 182 | unsigned int irqs_unhandled; |
183 | spinlock_t lock; | 183 | spinlock_t lock; |
184 | #ifdef CONFIG_SMP | 184 | #ifdef CONFIG_SMP |
185 | cpumask_t affinity; | 185 | cpumask_var_t affinity; |
186 | unsigned int cpu; | 186 | unsigned int cpu; |
187 | #endif | ||
188 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 187 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
189 | cpumask_t pending_mask; | 188 | cpumask_var_t pending_mask; |
189 | #endif | ||
190 | #endif | 190 | #endif |
191 | #ifdef CONFIG_PROC_FS | 191 | #ifdef CONFIG_PROC_FS |
192 | struct proc_dir_entry *dir; | 192 | struct proc_dir_entry *dir; |
@@ -422,4 +422,84 @@ extern int set_irq_msi(unsigned int irq, struct msi_desc *entry); | |||
422 | 422 | ||
423 | #endif /* !CONFIG_S390 */ | 423 | #endif /* !CONFIG_S390 */ |
424 | 424 | ||
425 | #ifdef CONFIG_SMP | ||
426 | /** | ||
427 | * init_alloc_desc_masks - allocate cpumasks for irq_desc | ||
428 | * @desc: pointer to irq_desc struct | ||
429 | * @cpu: cpu which will be handling the cpumasks | ||
430 | * @boot: true if need bootmem | ||
431 | * | ||
432 | * Allocates affinity and pending_mask cpumask if required. | ||
433 | * Returns true if successful (or not required). | ||
434 | * Side effect: affinity has all bits set, pending_mask has all bits clear. | ||
435 | */ | ||
436 | static inline bool init_alloc_desc_masks(struct irq_desc *desc, int cpu, | ||
437 | bool boot) | ||
438 | { | ||
439 | int node; | ||
440 | |||
441 | if (boot) { | ||
442 | alloc_bootmem_cpumask_var(&desc->affinity); | ||
443 | cpumask_setall(desc->affinity); | ||
444 | |||
445 | #ifdef CONFIG_GENERIC_PENDING_IRQ | ||
446 | alloc_bootmem_cpumask_var(&desc->pending_mask); | ||
447 | cpumask_clear(desc->pending_mask); | ||
448 | #endif | ||
449 | return true; | ||
450 | } | ||
451 | |||
452 | node = cpu_to_node(cpu); | ||
453 | |||
454 | if (!alloc_cpumask_var_node(&desc->affinity, GFP_ATOMIC, node)) | ||
455 | return false; | ||
456 | cpumask_setall(desc->affinity); | ||
457 | |||
458 | #ifdef CONFIG_GENERIC_PENDING_IRQ | ||
459 | if (!alloc_cpumask_var_node(&desc->pending_mask, GFP_ATOMIC, node)) { | ||
460 | free_cpumask_var(desc->affinity); | ||
461 | return false; | ||
462 | } | ||
463 | cpumask_clear(desc->pending_mask); | ||
464 | #endif | ||
465 | return true; | ||
466 | } | ||
467 | |||
468 | /** | ||
469 | * init_copy_desc_masks - copy cpumasks for irq_desc | ||
470 | * @old_desc: pointer to old irq_desc struct | ||
471 | * @new_desc: pointer to new irq_desc struct | ||
472 | * | ||
473 | * Insures affinity and pending_masks are copied to new irq_desc. | ||
474 | * If !CONFIG_CPUMASKS_OFFSTACK the cpumasks are embedded in the | ||
475 | * irq_desc struct so the copy is redundant. | ||
476 | */ | ||
477 | |||
478 | static inline void init_copy_desc_masks(struct irq_desc *old_desc, | ||
479 | struct irq_desc *new_desc) | ||
480 | { | ||
481 | #ifdef CONFIG_CPUMASKS_OFFSTACK | ||
482 | cpumask_copy(new_desc->affinity, old_desc->affinity); | ||
483 | |||
484 | #ifdef CONFIG_GENERIC_PENDING_IRQ | ||
485 | cpumask_copy(new_desc->pending_mask, old_desc->pending_mask); | ||
486 | #endif | ||
487 | #endif | ||
488 | } | ||
489 | |||
490 | #else /* !CONFIG_SMP */ | ||
491 | |||
492 | static inline bool init_alloc_desc_masks(struct irq_desc *desc, int cpu, | ||
493 | bool boot) | ||
494 | { | ||
495 | return true; | ||
496 | } | ||
497 | |||
498 | static inline void init_copy_desc_masks(struct irq_desc *old_desc, | ||
499 | struct irq_desc *new_desc) | ||
500 | { | ||
501 | } | ||
502 | |||
503 | #endif /* CONFIG_SMP */ | ||
504 | |||
425 | #endif /* _LINUX_IRQ_H */ | 505 | #endif /* _LINUX_IRQ_H */ |
diff --git a/include/linux/irqnr.h b/include/linux/irqnr.h index 86af92e9e84..887477bc2ab 100644 --- a/include/linux/irqnr.h +++ b/include/linux/irqnr.h | |||
@@ -20,6 +20,7 @@ | |||
20 | 20 | ||
21 | # define for_each_irq_desc_reverse(irq, desc) \ | 21 | # define for_each_irq_desc_reverse(irq, desc) \ |
22 | for (irq = nr_irqs - 1; irq >= 0; irq--) | 22 | for (irq = nr_irqs - 1; irq >= 0; irq--) |
23 | |||
23 | #else /* CONFIG_GENERIC_HARDIRQS */ | 24 | #else /* CONFIG_GENERIC_HARDIRQS */ |
24 | 25 | ||
25 | extern int nr_irqs; | 26 | extern int nr_irqs; |
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index b45109c61fb..b28b37eb11c 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h | |||
@@ -308,7 +308,8 @@ void buffer_assertion_failure(struct buffer_head *bh); | |||
308 | int val = (expr); \ | 308 | int val = (expr); \ |
309 | if (!val) { \ | 309 | if (!val) { \ |
310 | printk(KERN_ERR \ | 310 | printk(KERN_ERR \ |
311 | "EXT3-fs unexpected failure: %s;\n",# expr); \ | 311 | "JBD2 unexpected failure: %s: %s;\n", \ |
312 | __func__, #expr); \ | ||
312 | printk(KERN_ERR why "\n"); \ | 313 | printk(KERN_ERR why "\n"); \ |
313 | } \ | 314 | } \ |
314 | val; \ | 315 | val; \ |
diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 343df9ef241..7fa371898e3 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h | |||
@@ -480,7 +480,8 @@ static inline char *pack_hex_byte(char *buf, u8 byte) | |||
480 | /* | 480 | /* |
481 | * swap - swap value of @a and @b | 481 | * swap - swap value of @a and @b |
482 | */ | 482 | */ |
483 | #define swap(a, b) ({ typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; }) | 483 | #define swap(a, b) \ |
484 | do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0) | ||
484 | 485 | ||
485 | /** | 486 | /** |
486 | * container_of - cast a member of a structure out to the containing structure | 487 | * container_of - cast a member of a structure out to the containing structure |
diff --git a/include/linux/libata.h b/include/linux/libata.h index bca3ba25f52..5d87bc09a1f 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h | |||
@@ -380,6 +380,7 @@ enum { | |||
380 | ATA_HORKAGE_ATAPI_MOD16_DMA = (1 << 11), /* use ATAPI DMA for commands | 380 | ATA_HORKAGE_ATAPI_MOD16_DMA = (1 << 11), /* use ATAPI DMA for commands |
381 | not multiple of 16 bytes */ | 381 | not multiple of 16 bytes */ |
382 | ATA_HORKAGE_FIRMWARE_WARN = (1 << 12), /* firwmare update warning */ | 382 | ATA_HORKAGE_FIRMWARE_WARN = (1 << 12), /* firwmare update warning */ |
383 | ATA_HORKAGE_1_5_GBPS = (1 << 13), /* force 1.5 Gbps */ | ||
383 | 384 | ||
384 | /* DMA mask for user DMA control: User visible values; DO NOT | 385 | /* DMA mask for user DMA control: User visible values; DO NOT |
385 | renumber */ | 386 | renumber */ |
@@ -580,7 +581,7 @@ struct ata_device { | |||
580 | acpi_handle acpi_handle; | 581 | acpi_handle acpi_handle; |
581 | union acpi_object *gtf_cache; | 582 | union acpi_object *gtf_cache; |
582 | #endif | 583 | #endif |
583 | /* n_sector is used as CLEAR_OFFSET, read comment above CLEAR_OFFSET */ | 584 | /* n_sector is CLEAR_BEGIN, read comment above CLEAR_BEGIN */ |
584 | u64 n_sectors; /* size of device, if ATA */ | 585 | u64 n_sectors; /* size of device, if ATA */ |
585 | unsigned int class; /* ATA_DEV_xxx */ | 586 | unsigned int class; /* ATA_DEV_xxx */ |
586 | unsigned long unpark_deadline; | 587 | unsigned long unpark_deadline; |
@@ -605,20 +606,22 @@ struct ata_device { | |||
605 | u16 heads; /* Number of heads */ | 606 | u16 heads; /* Number of heads */ |
606 | u16 sectors; /* Number of sectors per track */ | 607 | u16 sectors; /* Number of sectors per track */ |
607 | 608 | ||
608 | /* error history */ | ||
609 | int spdn_cnt; | ||
610 | struct ata_ering ering; | ||
611 | |||
612 | union { | 609 | union { |
613 | u16 id[ATA_ID_WORDS]; /* IDENTIFY xxx DEVICE data */ | 610 | u16 id[ATA_ID_WORDS]; /* IDENTIFY xxx DEVICE data */ |
614 | u32 gscr[SATA_PMP_GSCR_DWORDS]; /* PMP GSCR block */ | 611 | u32 gscr[SATA_PMP_GSCR_DWORDS]; /* PMP GSCR block */ |
615 | }; | 612 | }; |
613 | |||
614 | /* error history */ | ||
615 | int spdn_cnt; | ||
616 | /* ering is CLEAR_END, read comment above CLEAR_END */ | ||
617 | struct ata_ering ering; | ||
616 | }; | 618 | }; |
617 | 619 | ||
618 | /* Offset into struct ata_device. Fields above it are maintained | 620 | /* Fields between ATA_DEVICE_CLEAR_BEGIN and ATA_DEVICE_CLEAR_END are |
619 | * acress device init. Fields below are zeroed. | 621 | * cleared to zero on ata_dev_init(). |
620 | */ | 622 | */ |
621 | #define ATA_DEVICE_CLEAR_OFFSET offsetof(struct ata_device, n_sectors) | 623 | #define ATA_DEVICE_CLEAR_BEGIN offsetof(struct ata_device, n_sectors) |
624 | #define ATA_DEVICE_CLEAR_END offsetof(struct ata_device, ering) | ||
622 | 625 | ||
623 | struct ata_eh_info { | 626 | struct ata_eh_info { |
624 | struct ata_device *dev; /* offending device */ | 627 | struct ata_device *dev; /* offending device */ |
diff --git a/include/linux/magic.h b/include/linux/magic.h index 0b4df7eba85..5b4e28bcb78 100644 --- a/include/linux/magic.h +++ b/include/linux/magic.h | |||
@@ -49,4 +49,5 @@ | |||
49 | #define FUTEXFS_SUPER_MAGIC 0xBAD1DEA | 49 | #define FUTEXFS_SUPER_MAGIC 0xBAD1DEA |
50 | #define INOTIFYFS_SUPER_MAGIC 0x2BAD1DEA | 50 | #define INOTIFYFS_SUPER_MAGIC 0x2BAD1DEA |
51 | 51 | ||
52 | #define STACK_END_MAGIC 0x57AC6E9D | ||
52 | #endif /* __LINUX_MAGIC_H__ */ | 53 | #endif /* __LINUX_MAGIC_H__ */ |
diff --git a/include/linux/mm.h b/include/linux/mm.h index e8ddc98b840..7dc04ff5ab8 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -1129,8 +1129,7 @@ extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, | |||
1129 | unsigned long flag, unsigned long pgoff); | 1129 | unsigned long flag, unsigned long pgoff); |
1130 | extern unsigned long mmap_region(struct file *file, unsigned long addr, | 1130 | extern unsigned long mmap_region(struct file *file, unsigned long addr, |
1131 | unsigned long len, unsigned long flags, | 1131 | unsigned long len, unsigned long flags, |
1132 | unsigned int vm_flags, unsigned long pgoff, | 1132 | unsigned int vm_flags, unsigned long pgoff); |
1133 | int accountable); | ||
1134 | 1133 | ||
1135 | static inline unsigned long do_mmap(struct file *file, unsigned long addr, | 1134 | static inline unsigned long do_mmap(struct file *file, unsigned long addr, |
1136 | unsigned long len, unsigned long prot, | 1135 | unsigned long len, unsigned long prot, |
@@ -1305,5 +1304,6 @@ void vmemmap_populate_print_last(void); | |||
1305 | 1304 | ||
1306 | extern void *alloc_locked_buffer(size_t size); | 1305 | extern void *alloc_locked_buffer(size_t size); |
1307 | extern void free_locked_buffer(void *buffer, size_t size); | 1306 | extern void free_locked_buffer(void *buffer, size_t size); |
1307 | extern void release_locked_buffer(void *buffer, size_t size); | ||
1308 | #endif /* __KERNEL__ */ | 1308 | #endif /* __KERNEL__ */ |
1309 | #endif /* _LINUX_MM_H */ | 1309 | #endif /* _LINUX_MM_H */ |
diff --git a/include/linux/module.h b/include/linux/module.h index 4f7ea12463d..145a75528cc 100644 --- a/include/linux/module.h +++ b/include/linux/module.h | |||
@@ -219,11 +219,6 @@ void *__symbol_get_gpl(const char *symbol); | |||
219 | 219 | ||
220 | #endif | 220 | #endif |
221 | 221 | ||
222 | struct module_ref | ||
223 | { | ||
224 | local_t count; | ||
225 | } ____cacheline_aligned; | ||
226 | |||
227 | enum module_state | 222 | enum module_state |
228 | { | 223 | { |
229 | MODULE_STATE_LIVE, | 224 | MODULE_STATE_LIVE, |
@@ -344,8 +339,11 @@ struct module | |||
344 | /* Destruction function. */ | 339 | /* Destruction function. */ |
345 | void (*exit)(void); | 340 | void (*exit)(void); |
346 | 341 | ||
347 | /* Reference counts */ | 342 | #ifdef CONFIG_SMP |
348 | struct module_ref ref[NR_CPUS]; | 343 | char *refptr; |
344 | #else | ||
345 | local_t ref; | ||
346 | #endif | ||
349 | #endif | 347 | #endif |
350 | }; | 348 | }; |
351 | #ifndef MODULE_ARCH_INIT | 349 | #ifndef MODULE_ARCH_INIT |
@@ -395,13 +393,21 @@ void __symbol_put(const char *symbol); | |||
395 | #define symbol_put(x) __symbol_put(MODULE_SYMBOL_PREFIX #x) | 393 | #define symbol_put(x) __symbol_put(MODULE_SYMBOL_PREFIX #x) |
396 | void symbol_put_addr(void *addr); | 394 | void symbol_put_addr(void *addr); |
397 | 395 | ||
396 | static inline local_t *__module_ref_addr(struct module *mod, int cpu) | ||
397 | { | ||
398 | #ifdef CONFIG_SMP | ||
399 | return (local_t *) (mod->refptr + per_cpu_offset(cpu)); | ||
400 | #else | ||
401 | return &mod->ref; | ||
402 | #endif | ||
403 | } | ||
404 | |||
398 | /* Sometimes we know we already have a refcount, and it's easier not | 405 | /* Sometimes we know we already have a refcount, and it's easier not |
399 | to handle the error case (which only happens with rmmod --wait). */ | 406 | to handle the error case (which only happens with rmmod --wait). */ |
400 | static inline void __module_get(struct module *module) | 407 | static inline void __module_get(struct module *module) |
401 | { | 408 | { |
402 | if (module) { | 409 | if (module) { |
403 | BUG_ON(module_refcount(module) == 0); | 410 | local_inc(__module_ref_addr(module, get_cpu())); |
404 | local_inc(&module->ref[get_cpu()].count); | ||
405 | put_cpu(); | 411 | put_cpu(); |
406 | } | 412 | } |
407 | } | 413 | } |
@@ -413,7 +419,7 @@ static inline int try_module_get(struct module *module) | |||
413 | if (module) { | 419 | if (module) { |
414 | unsigned int cpu = get_cpu(); | 420 | unsigned int cpu = get_cpu(); |
415 | if (likely(module_is_live(module))) | 421 | if (likely(module_is_live(module))) |
416 | local_inc(&module->ref[cpu].count); | 422 | local_inc(__module_ref_addr(module, cpu)); |
417 | else | 423 | else |
418 | ret = 0; | 424 | ret = 0; |
419 | put_cpu(); | 425 | put_cpu(); |
diff --git a/include/linux/pci.h b/include/linux/pci.h index 48890cf3f96..7bd624bfdcf 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h | |||
@@ -684,7 +684,7 @@ int pci_enable_rom(struct pci_dev *pdev); | |||
684 | void pci_disable_rom(struct pci_dev *pdev); | 684 | void pci_disable_rom(struct pci_dev *pdev); |
685 | void __iomem __must_check *pci_map_rom(struct pci_dev *pdev, size_t *size); | 685 | void __iomem __must_check *pci_map_rom(struct pci_dev *pdev, size_t *size); |
686 | void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom); | 686 | void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom); |
687 | size_t pci_get_rom_size(void __iomem *rom, size_t size); | 687 | size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom, size_t size); |
688 | 688 | ||
689 | /* Power management related routines */ | 689 | /* Power management related routines */ |
690 | int pci_save_state(struct pci_dev *dev); | 690 | int pci_save_state(struct pci_dev *dev); |
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index febc10ed385..52a9fe08451 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h | |||
@@ -2425,6 +2425,7 @@ | |||
2425 | #define PCI_DEVICE_ID_INTEL_ICH7_0 0x27b8 | 2425 | #define PCI_DEVICE_ID_INTEL_ICH7_0 0x27b8 |
2426 | #define PCI_DEVICE_ID_INTEL_ICH7_1 0x27b9 | 2426 | #define PCI_DEVICE_ID_INTEL_ICH7_1 0x27b9 |
2427 | #define PCI_DEVICE_ID_INTEL_ICH7_30 0x27b0 | 2427 | #define PCI_DEVICE_ID_INTEL_ICH7_30 0x27b0 |
2428 | #define PCI_DEVICE_ID_INTEL_TGP_LPC 0x27bc | ||
2428 | #define PCI_DEVICE_ID_INTEL_ICH7_31 0x27bd | 2429 | #define PCI_DEVICE_ID_INTEL_ICH7_31 0x27bd |
2429 | #define PCI_DEVICE_ID_INTEL_ICH7_17 0x27da | 2430 | #define PCI_DEVICE_ID_INTEL_ICH7_17 0x27da |
2430 | #define PCI_DEVICE_ID_INTEL_ICH7_19 0x27dd | 2431 | #define PCI_DEVICE_ID_INTEL_ICH7_19 0x27dd |
diff --git a/include/linux/percpu.h b/include/linux/percpu.h index 9f2a3751873..3577ffd90d4 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h | |||
@@ -8,35 +8,46 @@ | |||
8 | 8 | ||
9 | #include <asm/percpu.h> | 9 | #include <asm/percpu.h> |
10 | 10 | ||
11 | #ifndef PER_CPU_BASE_SECTION | ||
12 | #ifdef CONFIG_SMP | ||
13 | #define PER_CPU_BASE_SECTION ".data.percpu" | ||
14 | #else | ||
15 | #define PER_CPU_BASE_SECTION ".data" | ||
16 | #endif | ||
17 | #endif | ||
18 | |||
11 | #ifdef CONFIG_SMP | 19 | #ifdef CONFIG_SMP |
12 | #define DEFINE_PER_CPU(type, name) \ | ||
13 | __attribute__((__section__(".data.percpu"))) \ | ||
14 | PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name | ||
15 | 20 | ||
16 | #ifdef MODULE | 21 | #ifdef MODULE |
17 | #define SHARED_ALIGNED_SECTION ".data.percpu" | 22 | #define PER_CPU_SHARED_ALIGNED_SECTION "" |
18 | #else | 23 | #else |
19 | #define SHARED_ALIGNED_SECTION ".data.percpu.shared_aligned" | 24 | #define PER_CPU_SHARED_ALIGNED_SECTION ".shared_aligned" |
20 | #endif | 25 | #endif |
26 | #define PER_CPU_FIRST_SECTION ".first" | ||
21 | 27 | ||
22 | #define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ | 28 | #else |
23 | __attribute__((__section__(SHARED_ALIGNED_SECTION))) \ | ||
24 | PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name \ | ||
25 | ____cacheline_aligned_in_smp | ||
26 | 29 | ||
27 | #define DEFINE_PER_CPU_PAGE_ALIGNED(type, name) \ | 30 | #define PER_CPU_SHARED_ALIGNED_SECTION "" |
28 | __attribute__((__section__(".data.percpu.page_aligned"))) \ | 31 | #define PER_CPU_FIRST_SECTION "" |
32 | |||
33 | #endif | ||
34 | |||
35 | #define DEFINE_PER_CPU_SECTION(type, name, section) \ | ||
36 | __attribute__((__section__(PER_CPU_BASE_SECTION section))) \ | ||
29 | PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name | 37 | PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name |
30 | #else | 38 | |
31 | #define DEFINE_PER_CPU(type, name) \ | 39 | #define DEFINE_PER_CPU(type, name) \ |
32 | PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name | 40 | DEFINE_PER_CPU_SECTION(type, name, "") |
33 | 41 | ||
34 | #define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ | 42 | #define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ |
35 | DEFINE_PER_CPU(type, name) | 43 | DEFINE_PER_CPU_SECTION(type, name, PER_CPU_SHARED_ALIGNED_SECTION) \ |
44 | ____cacheline_aligned_in_smp | ||
36 | 45 | ||
37 | #define DEFINE_PER_CPU_PAGE_ALIGNED(type, name) \ | 46 | #define DEFINE_PER_CPU_PAGE_ALIGNED(type, name) \ |
38 | DEFINE_PER_CPU(type, name) | 47 | DEFINE_PER_CPU_SECTION(type, name, ".page_aligned") |
39 | #endif | 48 | |
49 | #define DEFINE_PER_CPU_FIRST(type, name) \ | ||
50 | DEFINE_PER_CPU_SECTION(type, name, PER_CPU_FIRST_SECTION) | ||
40 | 51 | ||
41 | #define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var) | 52 | #define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var) |
42 | #define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var) | 53 | #define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var) |
diff --git a/include/linux/sched.h b/include/linux/sched.h index 5a7c7638873..f0a50b20e8a 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -453,23 +453,33 @@ struct task_cputime { | |||
453 | cputime_t utime; | 453 | cputime_t utime; |
454 | cputime_t stime; | 454 | cputime_t stime; |
455 | unsigned long long sum_exec_runtime; | 455 | unsigned long long sum_exec_runtime; |
456 | spinlock_t lock; | ||
457 | }; | 456 | }; |
458 | /* Alternate field names when used to cache expirations. */ | 457 | /* Alternate field names when used to cache expirations. */ |
459 | #define prof_exp stime | 458 | #define prof_exp stime |
460 | #define virt_exp utime | 459 | #define virt_exp utime |
461 | #define sched_exp sum_exec_runtime | 460 | #define sched_exp sum_exec_runtime |
462 | 461 | ||
462 | #define INIT_CPUTIME \ | ||
463 | (struct task_cputime) { \ | ||
464 | .utime = cputime_zero, \ | ||
465 | .stime = cputime_zero, \ | ||
466 | .sum_exec_runtime = 0, \ | ||
467 | } | ||
468 | |||
463 | /** | 469 | /** |
464 | * struct thread_group_cputime - thread group interval timer counts | 470 | * struct thread_group_cputimer - thread group interval timer counts |
465 | * @totals: thread group interval timers; substructure for | 471 | * @cputime: thread group interval timers. |
466 | * uniprocessor kernel, per-cpu for SMP kernel. | 472 | * @running: non-zero when there are timers running and |
473 | * @cputime receives updates. | ||
474 | * @lock: lock for fields in this struct. | ||
467 | * | 475 | * |
468 | * This structure contains the version of task_cputime, above, that is | 476 | * This structure contains the version of task_cputime, above, that is |
469 | * used for thread group CPU clock calculations. | 477 | * used for thread group CPU timer calculations. |
470 | */ | 478 | */ |
471 | struct thread_group_cputime { | 479 | struct thread_group_cputimer { |
472 | struct task_cputime totals; | 480 | struct task_cputime cputime; |
481 | int running; | ||
482 | spinlock_t lock; | ||
473 | }; | 483 | }; |
474 | 484 | ||
475 | /* | 485 | /* |
@@ -518,10 +528,10 @@ struct signal_struct { | |||
518 | cputime_t it_prof_incr, it_virt_incr; | 528 | cputime_t it_prof_incr, it_virt_incr; |
519 | 529 | ||
520 | /* | 530 | /* |
521 | * Thread group totals for process CPU clocks. | 531 | * Thread group totals for process CPU timers. |
522 | * See thread_group_cputime(), et al, for details. | 532 | * See thread_group_cputimer(), et al, for details. |
523 | */ | 533 | */ |
524 | struct thread_group_cputime cputime; | 534 | struct thread_group_cputimer cputimer; |
525 | 535 | ||
526 | /* Earliest-expiration cache. */ | 536 | /* Earliest-expiration cache. */ |
527 | struct task_cputime cputime_expires; | 537 | struct task_cputime cputime_expires; |
@@ -558,7 +568,7 @@ struct signal_struct { | |||
558 | * Live threads maintain their own counters and add to these | 568 | * Live threads maintain their own counters and add to these |
559 | * in __exit_signal, except for the group leader. | 569 | * in __exit_signal, except for the group leader. |
560 | */ | 570 | */ |
561 | cputime_t cutime, cstime; | 571 | cputime_t utime, stime, cutime, cstime; |
562 | cputime_t gtime; | 572 | cputime_t gtime; |
563 | cputime_t cgtime; | 573 | cputime_t cgtime; |
564 | unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw; | 574 | unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw; |
@@ -567,6 +577,14 @@ struct signal_struct { | |||
567 | struct task_io_accounting ioac; | 577 | struct task_io_accounting ioac; |
568 | 578 | ||
569 | /* | 579 | /* |
580 | * Cumulative ns of schedule CPU time fo dead threads in the | ||
581 | * group, not including a zombie group leader, (This only differs | ||
582 | * from jiffies_to_ns(utime + stime) if sched_clock uses something | ||
583 | * other than jiffies.) | ||
584 | */ | ||
585 | unsigned long long sum_sched_runtime; | ||
586 | |||
587 | /* | ||
570 | * We don't bother to synchronize most readers of this at all, | 588 | * We don't bother to synchronize most readers of this at all, |
571 | * because there is no reader checking a limit that actually needs | 589 | * because there is no reader checking a limit that actually needs |
572 | * to get both rlim_cur and rlim_max atomically, and either one | 590 | * to get both rlim_cur and rlim_max atomically, and either one |
@@ -1160,10 +1178,9 @@ struct task_struct { | |||
1160 | pid_t pid; | 1178 | pid_t pid; |
1161 | pid_t tgid; | 1179 | pid_t tgid; |
1162 | 1180 | ||
1163 | #ifdef CONFIG_CC_STACKPROTECTOR | ||
1164 | /* Canary value for the -fstack-protector gcc feature */ | 1181 | /* Canary value for the -fstack-protector gcc feature */ |
1165 | unsigned long stack_canary; | 1182 | unsigned long stack_canary; |
1166 | #endif | 1183 | |
1167 | /* | 1184 | /* |
1168 | * pointers to (original) parent process, youngest child, younger sibling, | 1185 | * pointers to (original) parent process, youngest child, younger sibling, |
1169 | * older sibling, respectively. (p->father can be replaced with | 1186 | * older sibling, respectively. (p->father can be replaced with |
@@ -2069,6 +2086,19 @@ static inline int object_is_on_stack(void *obj) | |||
2069 | 2086 | ||
2070 | extern void thread_info_cache_init(void); | 2087 | extern void thread_info_cache_init(void); |
2071 | 2088 | ||
2089 | #ifdef CONFIG_DEBUG_STACK_USAGE | ||
2090 | static inline unsigned long stack_not_used(struct task_struct *p) | ||
2091 | { | ||
2092 | unsigned long *n = end_of_stack(p); | ||
2093 | |||
2094 | do { /* Skip over canary */ | ||
2095 | n++; | ||
2096 | } while (!*n); | ||
2097 | |||
2098 | return (unsigned long)n - (unsigned long)end_of_stack(p); | ||
2099 | } | ||
2100 | #endif | ||
2101 | |||
2072 | /* set thread flags in other task's structures | 2102 | /* set thread flags in other task's structures |
2073 | * - see asm/thread_info.h for TIF_xxxx flags available | 2103 | * - see asm/thread_info.h for TIF_xxxx flags available |
2074 | */ | 2104 | */ |
@@ -2182,27 +2212,14 @@ static inline int spin_needbreak(spinlock_t *lock) | |||
2182 | /* | 2212 | /* |
2183 | * Thread group CPU time accounting. | 2213 | * Thread group CPU time accounting. |
2184 | */ | 2214 | */ |
2185 | 2215 | void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times); | |
2186 | static inline | 2216 | void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times); |
2187 | void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times) | ||
2188 | { | ||
2189 | struct task_cputime *totals = &tsk->signal->cputime.totals; | ||
2190 | unsigned long flags; | ||
2191 | |||
2192 | spin_lock_irqsave(&totals->lock, flags); | ||
2193 | *times = *totals; | ||
2194 | spin_unlock_irqrestore(&totals->lock, flags); | ||
2195 | } | ||
2196 | 2217 | ||
2197 | static inline void thread_group_cputime_init(struct signal_struct *sig) | 2218 | static inline void thread_group_cputime_init(struct signal_struct *sig) |
2198 | { | 2219 | { |
2199 | sig->cputime.totals = (struct task_cputime){ | 2220 | sig->cputimer.cputime = INIT_CPUTIME; |
2200 | .utime = cputime_zero, | 2221 | spin_lock_init(&sig->cputimer.lock); |
2201 | .stime = cputime_zero, | 2222 | sig->cputimer.running = 0; |
2202 | .sum_exec_runtime = 0, | ||
2203 | }; | ||
2204 | |||
2205 | spin_lock_init(&sig->cputime.totals.lock); | ||
2206 | } | 2223 | } |
2207 | 2224 | ||
2208 | static inline void thread_group_cputime_free(struct signal_struct *sig) | 2225 | static inline void thread_group_cputime_free(struct signal_struct *sig) |
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h index 39c3a5eb8eb..6ca6a7b66d7 100644 --- a/include/linux/slab_def.h +++ b/include/linux/slab_def.h | |||
@@ -43,10 +43,7 @@ static inline void *kmalloc(size_t size, gfp_t flags) | |||
43 | i++; | 43 | i++; |
44 | #include <linux/kmalloc_sizes.h> | 44 | #include <linux/kmalloc_sizes.h> |
45 | #undef CACHE | 45 | #undef CACHE |
46 | { | 46 | return NULL; |
47 | extern void __you_cannot_kmalloc_that_much(void); | ||
48 | __you_cannot_kmalloc_that_much(); | ||
49 | } | ||
50 | found: | 47 | found: |
51 | #ifdef CONFIG_ZONE_DMA | 48 | #ifdef CONFIG_ZONE_DMA |
52 | if (flags & GFP_DMA) | 49 | if (flags & GFP_DMA) |
@@ -77,10 +74,7 @@ static inline void *kmalloc_node(size_t size, gfp_t flags, int node) | |||
77 | i++; | 74 | i++; |
78 | #include <linux/kmalloc_sizes.h> | 75 | #include <linux/kmalloc_sizes.h> |
79 | #undef CACHE | 76 | #undef CACHE |
80 | { | 77 | return NULL; |
81 | extern void __you_cannot_kmalloc_that_much(void); | ||
82 | __you_cannot_kmalloc_that_much(); | ||
83 | } | ||
84 | found: | 78 | found: |
85 | #ifdef CONFIG_ZONE_DMA | 79 | #ifdef CONFIG_ZONE_DMA |
86 | if (flags & GFP_DMA) | 80 | if (flags & GFP_DMA) |
diff --git a/include/linux/smp.h b/include/linux/smp.h index 715196b09d6..bbacb7baa44 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h | |||
@@ -176,6 +176,12 @@ static inline void init_call_single_data(void) | |||
176 | #define put_cpu() preempt_enable() | 176 | #define put_cpu() preempt_enable() |
177 | #define put_cpu_no_resched() preempt_enable_no_resched() | 177 | #define put_cpu_no_resched() preempt_enable_no_resched() |
178 | 178 | ||
179 | /* | ||
180 | * Callback to arch code if there's nosmp or maxcpus=0 on the | ||
181 | * boot command line: | ||
182 | */ | ||
183 | extern void arch_disable_smp_support(void); | ||
184 | |||
179 | void smp_setup_processor_id(void); | 185 | void smp_setup_processor_id(void); |
180 | 186 | ||
181 | #endif /* __LINUX_SMP_H */ | 187 | #endif /* __LINUX_SMP_H */ |
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index e0c0fccced4..a0c66a2e00a 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h | |||
@@ -124,7 +124,12 @@ do { \ | |||
124 | #ifdef CONFIG_GENERIC_LOCKBREAK | 124 | #ifdef CONFIG_GENERIC_LOCKBREAK |
125 | #define spin_is_contended(lock) ((lock)->break_lock) | 125 | #define spin_is_contended(lock) ((lock)->break_lock) |
126 | #else | 126 | #else |
127 | |||
128 | #ifdef __raw_spin_is_contended | ||
127 | #define spin_is_contended(lock) __raw_spin_is_contended(&(lock)->raw_lock) | 129 | #define spin_is_contended(lock) __raw_spin_is_contended(&(lock)->raw_lock) |
130 | #else | ||
131 | #define spin_is_contended(lock) (((void)(lock), 0)) | ||
132 | #endif /*__raw_spin_is_contended*/ | ||
128 | #endif | 133 | #endif |
129 | 134 | ||
130 | /** | 135 | /** |
diff --git a/include/linux/stackprotector.h b/include/linux/stackprotector.h new file mode 100644 index 00000000000..6f3e54c704c --- /dev/null +++ b/include/linux/stackprotector.h | |||
@@ -0,0 +1,16 @@ | |||
1 | #ifndef _LINUX_STACKPROTECTOR_H | ||
2 | #define _LINUX_STACKPROTECTOR_H 1 | ||
3 | |||
4 | #include <linux/compiler.h> | ||
5 | #include <linux/sched.h> | ||
6 | #include <linux/random.h> | ||
7 | |||
8 | #ifdef CONFIG_CC_STACKPROTECTOR | ||
9 | # include <asm/stackprotector.h> | ||
10 | #else | ||
11 | static inline void boot_init_stack_canary(void) | ||
12 | { | ||
13 | } | ||
14 | #endif | ||
15 | |||
16 | #endif | ||
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 0eda02ff241..f9f900cfd06 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h | |||
@@ -95,13 +95,13 @@ struct old_linux_dirent; | |||
95 | #define __SC_TEST5(t5, a5, ...) __SC_TEST(t5); __SC_TEST4(__VA_ARGS__) | 95 | #define __SC_TEST5(t5, a5, ...) __SC_TEST(t5); __SC_TEST4(__VA_ARGS__) |
96 | #define __SC_TEST6(t6, a6, ...) __SC_TEST(t6); __SC_TEST5(__VA_ARGS__) | 96 | #define __SC_TEST6(t6, a6, ...) __SC_TEST(t6); __SC_TEST5(__VA_ARGS__) |
97 | 97 | ||
98 | #define SYSCALL_DEFINE0(name) asmlinkage long sys_##name(void) | 98 | #define SYSCALL_DEFINE0(name) asmlinkage long sys_##name(void) |
99 | #define SYSCALL_DEFINE1(...) SYSCALL_DEFINEx(1, __VA_ARGS__) | 99 | #define SYSCALL_DEFINE1(name, ...) SYSCALL_DEFINEx(1, _##name, __VA_ARGS__) |
100 | #define SYSCALL_DEFINE2(...) SYSCALL_DEFINEx(2, __VA_ARGS__) | 100 | #define SYSCALL_DEFINE2(name, ...) SYSCALL_DEFINEx(2, _##name, __VA_ARGS__) |
101 | #define SYSCALL_DEFINE3(...) SYSCALL_DEFINEx(3, __VA_ARGS__) | 101 | #define SYSCALL_DEFINE3(name, ...) SYSCALL_DEFINEx(3, _##name, __VA_ARGS__) |
102 | #define SYSCALL_DEFINE4(...) SYSCALL_DEFINEx(4, __VA_ARGS__) | 102 | #define SYSCALL_DEFINE4(name, ...) SYSCALL_DEFINEx(4, _##name, __VA_ARGS__) |
103 | #define SYSCALL_DEFINE5(...) SYSCALL_DEFINEx(5, __VA_ARGS__) | 103 | #define SYSCALL_DEFINE5(name, ...) SYSCALL_DEFINEx(5, _##name, __VA_ARGS__) |
104 | #define SYSCALL_DEFINE6(...) SYSCALL_DEFINEx(6, __VA_ARGS__) | 104 | #define SYSCALL_DEFINE6(name, ...) SYSCALL_DEFINEx(6, _##name, __VA_ARGS__) |
105 | 105 | ||
106 | #ifdef CONFIG_PPC64 | 106 | #ifdef CONFIG_PPC64 |
107 | #define SYSCALL_ALIAS(alias, name) \ | 107 | #define SYSCALL_ALIAS(alias, name) \ |
@@ -121,21 +121,21 @@ struct old_linux_dirent; | |||
121 | 121 | ||
122 | #define SYSCALL_DEFINE(name) static inline long SYSC_##name | 122 | #define SYSCALL_DEFINE(name) static inline long SYSC_##name |
123 | #define SYSCALL_DEFINEx(x, name, ...) \ | 123 | #define SYSCALL_DEFINEx(x, name, ...) \ |
124 | asmlinkage long sys_##name(__SC_DECL##x(__VA_ARGS__)); \ | 124 | asmlinkage long sys##name(__SC_DECL##x(__VA_ARGS__)); \ |
125 | static inline long SYSC_##name(__SC_DECL##x(__VA_ARGS__)); \ | 125 | static inline long SYSC##name(__SC_DECL##x(__VA_ARGS__)); \ |
126 | asmlinkage long SyS_##name(__SC_LONG##x(__VA_ARGS__)) \ | 126 | asmlinkage long SyS##name(__SC_LONG##x(__VA_ARGS__)) \ |
127 | { \ | 127 | { \ |
128 | __SC_TEST##x(__VA_ARGS__); \ | 128 | __SC_TEST##x(__VA_ARGS__); \ |
129 | return (long) SYSC_##name(__SC_CAST##x(__VA_ARGS__)); \ | 129 | return (long) SYSC##name(__SC_CAST##x(__VA_ARGS__)); \ |
130 | } \ | 130 | } \ |
131 | SYSCALL_ALIAS(sys_##name, SyS_##name); \ | 131 | SYSCALL_ALIAS(sys##name, SyS##name); \ |
132 | static inline long SYSC_##name(__SC_DECL##x(__VA_ARGS__)) | 132 | static inline long SYSC##name(__SC_DECL##x(__VA_ARGS__)) |
133 | 133 | ||
134 | #else /* CONFIG_HAVE_SYSCALL_WRAPPERS */ | 134 | #else /* CONFIG_HAVE_SYSCALL_WRAPPERS */ |
135 | 135 | ||
136 | #define SYSCALL_DEFINE(name) asmlinkage long sys_##name | 136 | #define SYSCALL_DEFINE(name) asmlinkage long sys_##name |
137 | #define SYSCALL_DEFINEx(x, name, ...) \ | 137 | #define SYSCALL_DEFINEx(x, name, ...) \ |
138 | asmlinkage long sys_##name(__SC_DECL##x(__VA_ARGS__)) | 138 | asmlinkage long sys##name(__SC_DECL##x(__VA_ARGS__)) |
139 | 139 | ||
140 | #endif /* CONFIG_HAVE_SYSCALL_WRAPPERS */ | 140 | #endif /* CONFIG_HAVE_SYSCALL_WRAPPERS */ |
141 | 141 | ||
diff --git a/include/linux/topology.h b/include/linux/topology.h index e632d29f054..a16b9e06f2e 100644 --- a/include/linux/topology.h +++ b/include/linux/topology.h | |||
@@ -193,5 +193,11 @@ int arch_update_cpu_topology(void); | |||
193 | #ifndef topology_core_siblings | 193 | #ifndef topology_core_siblings |
194 | #define topology_core_siblings(cpu) cpumask_of_cpu(cpu) | 194 | #define topology_core_siblings(cpu) cpumask_of_cpu(cpu) |
195 | #endif | 195 | #endif |
196 | #ifndef topology_thread_cpumask | ||
197 | #define topology_thread_cpumask(cpu) cpumask_of(cpu) | ||
198 | #endif | ||
199 | #ifndef topology_core_cpumask | ||
200 | #define topology_core_cpumask(cpu) cpumask_of(cpu) | ||
201 | #endif | ||
196 | 202 | ||
197 | #endif /* _LINUX_TOPOLOGY_H */ | 203 | #endif /* _LINUX_TOPOLOGY_H */ |
diff --git a/include/linux/wait.h b/include/linux/wait.h index ef609f842fa..a210ede73b5 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h | |||
@@ -132,6 +132,8 @@ static inline void __remove_wait_queue(wait_queue_head_t *head, | |||
132 | list_del(&old->task_list); | 132 | list_del(&old->task_list); |
133 | } | 133 | } |
134 | 134 | ||
135 | void __wake_up_common(wait_queue_head_t *q, unsigned int mode, | ||
136 | int nr_exclusive, int sync, void *key); | ||
135 | void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key); | 137 | void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key); |
136 | extern void __wake_up_locked(wait_queue_head_t *q, unsigned int mode); | 138 | extern void __wake_up_locked(wait_queue_head_t *q, unsigned int mode); |
137 | extern void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr); | 139 | extern void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr); |
@@ -333,16 +335,19 @@ do { \ | |||
333 | for (;;) { \ | 335 | for (;;) { \ |
334 | prepare_to_wait_exclusive(&wq, &__wait, \ | 336 | prepare_to_wait_exclusive(&wq, &__wait, \ |
335 | TASK_INTERRUPTIBLE); \ | 337 | TASK_INTERRUPTIBLE); \ |
336 | if (condition) \ | 338 | if (condition) { \ |
339 | finish_wait(&wq, &__wait); \ | ||
337 | break; \ | 340 | break; \ |
341 | } \ | ||
338 | if (!signal_pending(current)) { \ | 342 | if (!signal_pending(current)) { \ |
339 | schedule(); \ | 343 | schedule(); \ |
340 | continue; \ | 344 | continue; \ |
341 | } \ | 345 | } \ |
342 | ret = -ERESTARTSYS; \ | 346 | ret = -ERESTARTSYS; \ |
347 | abort_exclusive_wait(&wq, &__wait, \ | ||
348 | TASK_INTERRUPTIBLE, NULL); \ | ||
343 | break; \ | 349 | break; \ |
344 | } \ | 350 | } \ |
345 | finish_wait(&wq, &__wait); \ | ||
346 | } while (0) | 351 | } while (0) |
347 | 352 | ||
348 | #define wait_event_interruptible_exclusive(wq, condition) \ | 353 | #define wait_event_interruptible_exclusive(wq, condition) \ |
@@ -431,6 +436,8 @@ extern long interruptible_sleep_on_timeout(wait_queue_head_t *q, | |||
431 | void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state); | 436 | void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state); |
432 | void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state); | 437 | void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state); |
433 | void finish_wait(wait_queue_head_t *q, wait_queue_t *wait); | 438 | void finish_wait(wait_queue_head_t *q, wait_queue_t *wait); |
439 | void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait, | ||
440 | unsigned int mode, void *key); | ||
434 | int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key); | 441 | int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key); |
435 | int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key); | 442 | int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key); |
436 | 443 | ||