aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/ahci_platform.h5
-rw-r--r--include/linux/bio.h13
-rw-r--r--include/linux/blk-mq.h2
-rw-r--r--include/linux/blkdev.h3
-rw-r--r--include/linux/cgroup.h165
-rw-r--r--include/linux/clk-provider.h5
-rw-r--r--include/linux/clk/clk-conf.h20
-rw-r--r--include/linux/cpufreq.h4
-rw-r--r--include/linux/crypto.h8
-rw-r--r--include/linux/elevator.h3
-rw-r--r--include/linux/fs.h8
-rw-r--r--include/linux/ftrace.h68
-rw-r--r--include/linux/ftrace_event.h3
-rw-r--r--include/linux/hugetlb.h1
-rw-r--r--include/linux/init_task.h9
-rw-r--r--include/linux/irq_work.h5
-rw-r--r--include/linux/irqchip/arm-gic-v3.h200
-rw-r--r--include/linux/kernfs.h2
-rw-r--r--include/linux/kthread.h13
-rw-r--r--include/linux/libata.h1
-rw-r--r--include/linux/mlx4/device.h4
-rw-r--r--include/linux/msi.h3
-rw-r--r--include/linux/mutex.h8
-rw-r--r--include/linux/nmi.h12
-rw-r--r--include/linux/of_fdt.h3
-rw-r--r--include/linux/of_mdio.h8
-rw-r--r--include/linux/osq_lock.h27
-rw-r--r--include/linux/page-flags.h3
-rw-r--r--include/linux/pagemap.h12
-rw-r--r--include/linux/pci.h4
-rw-r--r--include/linux/pci_ids.h4
-rw-r--r--include/linux/percpu-defs.h384
-rw-r--r--include/linux/percpu-refcount.h64
-rw-r--r--include/linux/percpu.h673
-rw-r--r--include/linux/phy.h9
-rw-r--r--include/linux/platform_data/ata-samsung_cf.h1
-rw-r--r--include/linux/profile.h1
-rw-r--r--include/linux/ptrace.h3
-rw-r--r--include/linux/rcupdate.h91
-rw-r--r--include/linux/regulator/consumer.h5
-rw-r--r--include/linux/rtmutex.h6
-rw-r--r--include/linux/rwsem-spinlock.h8
-rw-r--r--include/linux/rwsem.h34
-rw-r--r--include/linux/sched.h22
-rw-r--r--include/linux/seqlock.h2
-rw-r--r--include/linux/socket.h4
-rw-r--r--include/linux/sunrpc/sched.h2
-rw-r--r--include/linux/suspend.h2
-rw-r--r--include/linux/tick.h29
-rw-r--r--include/linux/trace_seq.h36
-rw-r--r--include/linux/uio.h19
-rw-r--r--include/linux/usb_usual.h4
-rw-r--r--include/linux/wait.h125
-rw-r--r--include/linux/writeback.h3
54 files changed, 1145 insertions, 1008 deletions
diff --git a/include/linux/ahci_platform.h b/include/linux/ahci_platform.h
index 6dfd51a04d77..09a947e8bc87 100644
--- a/include/linux/ahci_platform.h
+++ b/include/linux/ahci_platform.h
@@ -43,10 +43,7 @@ struct ahci_host_priv *ahci_platform_get_resources(
43 struct platform_device *pdev); 43 struct platform_device *pdev);
44int ahci_platform_init_host(struct platform_device *pdev, 44int ahci_platform_init_host(struct platform_device *pdev,
45 struct ahci_host_priv *hpriv, 45 struct ahci_host_priv *hpriv,
46 const struct ata_port_info *pi_template, 46 const struct ata_port_info *pi_template);
47 unsigned long host_flags,
48 unsigned int force_port_map,
49 unsigned int mask_port_map);
50 47
51int ahci_platform_suspend_host(struct device *dev); 48int ahci_platform_suspend_host(struct device *dev);
52int ahci_platform_resume_host(struct device *dev); 49int ahci_platform_resume_host(struct device *dev);
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 5a645769f020..d2633ee099d9 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -186,6 +186,15 @@ static inline void *bio_data(struct bio *bio)
186#define BIOVEC_SEG_BOUNDARY(q, b1, b2) \ 186#define BIOVEC_SEG_BOUNDARY(q, b1, b2) \
187 __BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, queue_segment_boundary((q))) 187 __BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, queue_segment_boundary((q)))
188 188
189/*
190 * Check if adding a bio_vec after bprv with offset would create a gap in
191 * the SG list. Most drivers don't care about this, but some do.
192 */
193static inline bool bvec_gap_to_prev(struct bio_vec *bprv, unsigned int offset)
194{
195 return offset || ((bprv->bv_offset + bprv->bv_len) & (PAGE_SIZE - 1));
196}
197
189#define bio_io_error(bio) bio_endio((bio), -EIO) 198#define bio_io_error(bio) bio_endio((bio), -EIO)
190 199
191/* 200/*
@@ -644,10 +653,6 @@ struct biovec_slab {
644 653
645#if defined(CONFIG_BLK_DEV_INTEGRITY) 654#if defined(CONFIG_BLK_DEV_INTEGRITY)
646 655
647
648
649#define bip_vec_idx(bip, idx) (&(bip->bip_vec[(idx)]))
650
651#define bip_for_each_vec(bvl, bip, iter) \ 656#define bip_for_each_vec(bvl, bip, iter) \
652 for_each_bvec(bvl, (bip)->bip_vec, iter, (bip)->bip_iter) 657 for_each_bvec(bvl, (bip)->bip_vec, iter, (bip)->bip_iter)
653 658
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index a002cf191427..eb726b9c5762 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -42,7 +42,7 @@ struct blk_mq_hw_ctx {
42 unsigned int nr_ctx; 42 unsigned int nr_ctx;
43 struct blk_mq_ctx **ctxs; 43 struct blk_mq_ctx **ctxs;
44 44
45 unsigned int wait_index; 45 atomic_t wait_index;
46 46
47 struct blk_mq_tags *tags; 47 struct blk_mq_tags *tags;
48 48
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 31e11051f1ba..8699bcf5f099 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -512,6 +512,7 @@ struct request_queue {
512#define QUEUE_FLAG_DEAD 19 /* queue tear-down finished */ 512#define QUEUE_FLAG_DEAD 19 /* queue tear-down finished */
513#define QUEUE_FLAG_INIT_DONE 20 /* queue is initialized */ 513#define QUEUE_FLAG_INIT_DONE 20 /* queue is initialized */
514#define QUEUE_FLAG_NO_SG_MERGE 21 /* don't attempt to merge SG segments*/ 514#define QUEUE_FLAG_NO_SG_MERGE 21 /* don't attempt to merge SG segments*/
515#define QUEUE_FLAG_SG_GAPS 22 /* queue doesn't support SG gaps */
515 516
516#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ 517#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
517 (1 << QUEUE_FLAG_STACKABLE) | \ 518 (1 << QUEUE_FLAG_STACKABLE) | \
@@ -920,7 +921,7 @@ static inline unsigned int blk_max_size_offset(struct request_queue *q,
920 sector_t offset) 921 sector_t offset)
921{ 922{
922 if (!q->limits.chunk_sectors) 923 if (!q->limits.chunk_sectors)
923 return q->limits.max_hw_sectors; 924 return q->limits.max_sectors;
924 925
925 return q->limits.chunk_sectors - 926 return q->limits.chunk_sectors -
926 (offset & (q->limits.chunk_sectors - 1)); 927 (offset & (q->limits.chunk_sectors - 1));
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 8a111dd42d7a..b5223c570eba 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -203,7 +203,15 @@ struct cgroup {
203 struct kernfs_node *kn; /* cgroup kernfs entry */ 203 struct kernfs_node *kn; /* cgroup kernfs entry */
204 struct kernfs_node *populated_kn; /* kn for "cgroup.subtree_populated" */ 204 struct kernfs_node *populated_kn; /* kn for "cgroup.subtree_populated" */
205 205
206 /* the bitmask of subsystems enabled on the child cgroups */ 206 /*
207 * The bitmask of subsystems enabled on the child cgroups.
208 * ->subtree_control is the one configured through
209 * "cgroup.subtree_control" while ->child_subsys_mask is the
210 * effective one which may have more subsystems enabled.
211 * Controller knobs are made available iff it's enabled in
212 * ->subtree_control.
213 */
214 unsigned int subtree_control;
207 unsigned int child_subsys_mask; 215 unsigned int child_subsys_mask;
208 216
209 /* Private pointers for each registered subsystem */ 217 /* Private pointers for each registered subsystem */
@@ -248,73 +256,9 @@ struct cgroup {
248 256
249/* cgroup_root->flags */ 257/* cgroup_root->flags */
250enum { 258enum {
251 /* 259 CGRP_ROOT_SANE_BEHAVIOR = (1 << 0), /* __DEVEL__sane_behavior specified */
252 * Unfortunately, cgroup core and various controllers are riddled
253 * with idiosyncrasies and pointless options. The following flag,
254 * when set, will force sane behavior - some options are forced on,
255 * others are disallowed, and some controllers will change their
256 * hierarchical or other behaviors.
257 *
258 * The set of behaviors affected by this flag are still being
259 * determined and developed and the mount option for this flag is
260 * prefixed with __DEVEL__. The prefix will be dropped once we
261 * reach the point where all behaviors are compatible with the
262 * planned unified hierarchy, which will automatically turn on this
263 * flag.
264 *
265 * The followings are the behaviors currently affected this flag.
266 *
267 * - Mount options "noprefix", "xattr", "clone_children",
268 * "release_agent" and "name" are disallowed.
269 *
270 * - When mounting an existing superblock, mount options should
271 * match.
272 *
273 * - Remount is disallowed.
274 *
275 * - rename(2) is disallowed.
276 *
277 * - "tasks" is removed. Everything should be at process
278 * granularity. Use "cgroup.procs" instead.
279 *
280 * - "cgroup.procs" is not sorted. pids will be unique unless they
281 * got recycled inbetween reads.
282 *
283 * - "release_agent" and "notify_on_release" are removed.
284 * Replacement notification mechanism will be implemented.
285 *
286 * - "cgroup.clone_children" is removed.
287 *
288 * - "cgroup.subtree_populated" is available. Its value is 0 if
289 * the cgroup and its descendants contain no task; otherwise, 1.
290 * The file also generates kernfs notification which can be
291 * monitored through poll and [di]notify when the value of the
292 * file changes.
293 *
294 * - If mount is requested with sane_behavior but without any
295 * subsystem, the default unified hierarchy is mounted.
296 *
297 * - cpuset: tasks will be kept in empty cpusets when hotplug happens
298 * and take masks of ancestors with non-empty cpus/mems, instead of
299 * being moved to an ancestor.
300 *
301 * - cpuset: a task can be moved into an empty cpuset, and again it
302 * takes masks of ancestors.
303 *
304 * - memcg: use_hierarchy is on by default and the cgroup file for
305 * the flag is not created.
306 *
307 * - blkcg: blk-throttle becomes properly hierarchical.
308 *
309 * - debug: disallowed on the default hierarchy.
310 */
311 CGRP_ROOT_SANE_BEHAVIOR = (1 << 0),
312
313 CGRP_ROOT_NOPREFIX = (1 << 1), /* mounted subsystems have no named prefix */ 260 CGRP_ROOT_NOPREFIX = (1 << 1), /* mounted subsystems have no named prefix */
314 CGRP_ROOT_XATTR = (1 << 2), /* supports extended attributes */ 261 CGRP_ROOT_XATTR = (1 << 2), /* supports extended attributes */
315
316 /* mount options live below bit 16 */
317 CGRP_ROOT_OPTION_MASK = (1 << 16) - 1,
318}; 262};
319 263
320/* 264/*
@@ -440,9 +384,11 @@ struct css_set {
440enum { 384enum {
441 CFTYPE_ONLY_ON_ROOT = (1 << 0), /* only create on root cgrp */ 385 CFTYPE_ONLY_ON_ROOT = (1 << 0), /* only create on root cgrp */
442 CFTYPE_NOT_ON_ROOT = (1 << 1), /* don't create on root cgrp */ 386 CFTYPE_NOT_ON_ROOT = (1 << 1), /* don't create on root cgrp */
443 CFTYPE_INSANE = (1 << 2), /* don't create if sane_behavior */
444 CFTYPE_NO_PREFIX = (1 << 3), /* (DON'T USE FOR NEW FILES) no subsys prefix */ 387 CFTYPE_NO_PREFIX = (1 << 3), /* (DON'T USE FOR NEW FILES) no subsys prefix */
445 CFTYPE_ONLY_ON_DFL = (1 << 4), /* only on default hierarchy */ 388
389 /* internal flags, do not use outside cgroup core proper */
390 __CFTYPE_ONLY_ON_DFL = (1 << 16), /* only on default hierarchy */
391 __CFTYPE_NOT_ON_DFL = (1 << 17), /* not on default hierarchy */
446}; 392};
447 393
448#define MAX_CFTYPE_NAME 64 394#define MAX_CFTYPE_NAME 64
@@ -526,20 +472,64 @@ struct cftype {
526extern struct cgroup_root cgrp_dfl_root; 472extern struct cgroup_root cgrp_dfl_root;
527extern struct css_set init_css_set; 473extern struct css_set init_css_set;
528 474
475/**
476 * cgroup_on_dfl - test whether a cgroup is on the default hierarchy
477 * @cgrp: the cgroup of interest
478 *
479 * The default hierarchy is the v2 interface of cgroup and this function
480 * can be used to test whether a cgroup is on the default hierarchy for
481 * cases where a subsystem should behave differnetly depending on the
482 * interface version.
483 *
484 * The set of behaviors which change on the default hierarchy are still
485 * being determined and the mount option is prefixed with __DEVEL__.
486 *
487 * List of changed behaviors:
488 *
489 * - Mount options "noprefix", "xattr", "clone_children", "release_agent"
490 * and "name" are disallowed.
491 *
492 * - When mounting an existing superblock, mount options should match.
493 *
494 * - Remount is disallowed.
495 *
496 * - rename(2) is disallowed.
497 *
498 * - "tasks" is removed. Everything should be at process granularity. Use
499 * "cgroup.procs" instead.
500 *
501 * - "cgroup.procs" is not sorted. pids will be unique unless they got
502 * recycled inbetween reads.
503 *
504 * - "release_agent" and "notify_on_release" are removed. Replacement
505 * notification mechanism will be implemented.
506 *
507 * - "cgroup.clone_children" is removed.
508 *
509 * - "cgroup.subtree_populated" is available. Its value is 0 if the cgroup
510 * and its descendants contain no task; otherwise, 1. The file also
511 * generates kernfs notification which can be monitored through poll and
512 * [di]notify when the value of the file changes.
513 *
514 * - cpuset: tasks will be kept in empty cpusets when hotplug happens and
515 * take masks of ancestors with non-empty cpus/mems, instead of being
516 * moved to an ancestor.
517 *
518 * - cpuset: a task can be moved into an empty cpuset, and again it takes
519 * masks of ancestors.
520 *
521 * - memcg: use_hierarchy is on by default and the cgroup file for the flag
522 * is not created.
523 *
524 * - blkcg: blk-throttle becomes properly hierarchical.
525 *
526 * - debug: disallowed on the default hierarchy.
527 */
529static inline bool cgroup_on_dfl(const struct cgroup *cgrp) 528static inline bool cgroup_on_dfl(const struct cgroup *cgrp)
530{ 529{
531 return cgrp->root == &cgrp_dfl_root; 530 return cgrp->root == &cgrp_dfl_root;
532} 531}
533 532
534/*
535 * See the comment above CGRP_ROOT_SANE_BEHAVIOR for details. This
536 * function can be called as long as @cgrp is accessible.
537 */
538static inline bool cgroup_sane_behavior(const struct cgroup *cgrp)
539{
540 return cgrp->root->flags & CGRP_ROOT_SANE_BEHAVIOR;
541}
542
543/* no synchronization, the result can only be used as a hint */ 533/* no synchronization, the result can only be used as a hint */
544static inline bool cgroup_has_tasks(struct cgroup *cgrp) 534static inline bool cgroup_has_tasks(struct cgroup *cgrp)
545{ 535{
@@ -602,7 +592,8 @@ static inline void pr_cont_cgroup_path(struct cgroup *cgrp)
602 592
603char *task_cgroup_path(struct task_struct *task, char *buf, size_t buflen); 593char *task_cgroup_path(struct task_struct *task, char *buf, size_t buflen);
604 594
605int cgroup_add_cftypes(struct cgroup_subsys *ss, struct cftype *cfts); 595int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
596int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
606int cgroup_rm_cftypes(struct cftype *cfts); 597int cgroup_rm_cftypes(struct cftype *cfts);
607 598
608bool cgroup_is_descendant(struct cgroup *cgrp, struct cgroup *ancestor); 599bool cgroup_is_descendant(struct cgroup *cgrp, struct cgroup *ancestor);
@@ -634,6 +625,7 @@ struct cgroup_subsys {
634 int (*css_online)(struct cgroup_subsys_state *css); 625 int (*css_online)(struct cgroup_subsys_state *css);
635 void (*css_offline)(struct cgroup_subsys_state *css); 626 void (*css_offline)(struct cgroup_subsys_state *css);
636 void (*css_free)(struct cgroup_subsys_state *css); 627 void (*css_free)(struct cgroup_subsys_state *css);
628 void (*css_reset)(struct cgroup_subsys_state *css);
637 629
638 int (*can_attach)(struct cgroup_subsys_state *css, 630 int (*can_attach)(struct cgroup_subsys_state *css,
639 struct cgroup_taskset *tset); 631 struct cgroup_taskset *tset);
@@ -682,8 +674,21 @@ struct cgroup_subsys {
682 */ 674 */
683 struct list_head cfts; 675 struct list_head cfts;
684 676
685 /* base cftypes, automatically registered with subsys itself */ 677 /*
686 struct cftype *base_cftypes; 678 * Base cftypes which are automatically registered. The two can
679 * point to the same array.
680 */
681 struct cftype *dfl_cftypes; /* for the default hierarchy */
682 struct cftype *legacy_cftypes; /* for the legacy hierarchies */
683
684 /*
685 * A subsystem may depend on other subsystems. When such subsystem
686 * is enabled on a cgroup, the depended-upon subsystems are enabled
687 * together if available. Subsystems enabled due to dependency are
688 * not visible to userland until explicitly enabled. The following
689 * specifies the mask of subsystems that this one depends on.
690 */
691 unsigned int depends_on;
687}; 692};
688 693
689#define SUBSYS(_x) extern struct cgroup_subsys _x ## _cgrp_subsys; 694#define SUBSYS(_x) extern struct cgroup_subsys _x ## _cgrp_subsys;
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index 0c287dbbb144..411dd7eb2653 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -619,5 +619,10 @@ static inline void clk_writel(u32 val, u32 __iomem *reg)
619 619
620#endif /* platform dependent I/O accessors */ 620#endif /* platform dependent I/O accessors */
621 621
622#ifdef CONFIG_DEBUG_FS
623struct dentry *clk_debugfs_add_file(struct clk *clk, char *name, umode_t mode,
624 void *data, const struct file_operations *fops);
625#endif
626
622#endif /* CONFIG_COMMON_CLK */ 627#endif /* CONFIG_COMMON_CLK */
623#endif /* CLK_PROVIDER_H */ 628#endif /* CLK_PROVIDER_H */
diff --git a/include/linux/clk/clk-conf.h b/include/linux/clk/clk-conf.h
new file mode 100644
index 000000000000..f3050e15f833
--- /dev/null
+++ b/include/linux/clk/clk-conf.h
@@ -0,0 +1,20 @@
1/*
2 * Copyright (C) 2014 Samsung Electronics Co., Ltd.
3 * Sylwester Nawrocki <s.nawrocki@samsung.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10struct device_node;
11
12#if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK)
13int of_clk_set_defaults(struct device_node *node, bool clk_supplier);
14#else
15static inline int of_clk_set_defaults(struct device_node *node,
16 bool clk_supplier)
17{
18 return 0;
19}
20#endif
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index ec4112d257bc..8f8ae95c6e27 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -482,8 +482,8 @@ extern struct cpufreq_governor cpufreq_gov_conservative;
482 *********************************************************************/ 482 *********************************************************************/
483 483
484/* Special Values of .frequency field */ 484/* Special Values of .frequency field */
485#define CPUFREQ_ENTRY_INVALID ~0 485#define CPUFREQ_ENTRY_INVALID ~0u
486#define CPUFREQ_TABLE_END ~1 486#define CPUFREQ_TABLE_END ~1u
487/* Special Values of .flags field */ 487/* Special Values of .flags field */
488#define CPUFREQ_BOOST_FREQ (1 << 0) 488#define CPUFREQ_BOOST_FREQ (1 << 0)
489 489
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index b92eadf92d72..d45e949699ea 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -710,9 +710,9 @@ static inline void ablkcipher_request_free(struct ablkcipher_request *req)
710 710
711static inline void ablkcipher_request_set_callback( 711static inline void ablkcipher_request_set_callback(
712 struct ablkcipher_request *req, 712 struct ablkcipher_request *req,
713 u32 flags, crypto_completion_t complete, void *data) 713 u32 flags, crypto_completion_t compl, void *data)
714{ 714{
715 req->base.complete = complete; 715 req->base.complete = compl;
716 req->base.data = data; 716 req->base.data = data;
717 req->base.flags = flags; 717 req->base.flags = flags;
718} 718}
@@ -841,10 +841,10 @@ static inline void aead_request_free(struct aead_request *req)
841 841
842static inline void aead_request_set_callback(struct aead_request *req, 842static inline void aead_request_set_callback(struct aead_request *req,
843 u32 flags, 843 u32 flags,
844 crypto_completion_t complete, 844 crypto_completion_t compl,
845 void *data) 845 void *data)
846{ 846{
847 req->base.complete = complete; 847 req->base.complete = compl;
848 req->base.data = data; 848 req->base.data = data;
849 req->base.flags = flags; 849 req->base.flags = flags;
850} 850}
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index 4ff262e2bf37..45a91474487d 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -133,7 +133,6 @@ extern struct request *elv_latter_request(struct request_queue *, struct request
133extern int elv_register_queue(struct request_queue *q); 133extern int elv_register_queue(struct request_queue *q);
134extern void elv_unregister_queue(struct request_queue *q); 134extern void elv_unregister_queue(struct request_queue *q);
135extern int elv_may_queue(struct request_queue *, int); 135extern int elv_may_queue(struct request_queue *, int);
136extern void elv_abort_queue(struct request_queue *);
137extern void elv_completed_request(struct request_queue *, struct request *); 136extern void elv_completed_request(struct request_queue *, struct request *);
138extern int elv_set_request(struct request_queue *q, struct request *rq, 137extern int elv_set_request(struct request_queue *q, struct request *rq,
139 struct bio *bio, gfp_t gfp_mask); 138 struct bio *bio, gfp_t gfp_mask);
@@ -144,7 +143,7 @@ extern void elv_drain_elevator(struct request_queue *);
144 * io scheduler registration 143 * io scheduler registration
145 */ 144 */
146extern void __init load_default_elevator_module(void); 145extern void __init load_default_elevator_module(void);
147extern int __init elv_register(struct elevator_type *); 146extern int elv_register(struct elevator_type *);
148extern void elv_unregister(struct elevator_type *); 147extern void elv_unregister(struct elevator_type *);
149 148
150/* 149/*
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 338e6f758c6d..2daccaf4b547 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -833,7 +833,7 @@ static inline struct file *get_file(struct file *f)
833 * 833 *
834 * Lockd stuffs a "host" pointer into this. 834 * Lockd stuffs a "host" pointer into this.
835 */ 835 */
836typedef struct files_struct *fl_owner_t; 836typedef void *fl_owner_t;
837 837
838struct file_lock_operations { 838struct file_lock_operations {
839 void (*fl_copy_lock)(struct file_lock *, struct file_lock *); 839 void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
@@ -1921,6 +1921,12 @@ static inline int break_lease(struct inode *inode, unsigned int mode)
1921 1921
1922static inline int break_deleg(struct inode *inode, unsigned int mode) 1922static inline int break_deleg(struct inode *inode, unsigned int mode)
1923{ 1923{
1924 /*
1925 * Since this check is lockless, we must ensure that any refcounts
1926 * taken are done before checking inode->i_flock. Otherwise, we could
1927 * end up racing with tasks trying to set a new lease on this file.
1928 */
1929 smp_mb();
1924 if (inode->i_flock) 1930 if (inode->i_flock)
1925 return __break_lease(inode, mode, FL_DELEG); 1931 return __break_lease(inode, mode, FL_DELEG);
1926 return 0; 1932 return 0;
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 404a686a3644..6bb5e3f2a3b4 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -33,8 +33,7 @@
33 * features, then it must call an indirect function that 33 * features, then it must call an indirect function that
34 * does. Or at least does enough to prevent any unwelcomed side effects. 34 * does. Or at least does enough to prevent any unwelcomed side effects.
35 */ 35 */
36#if !defined(CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST) || \ 36#if !ARCH_SUPPORTS_FTRACE_OPS
37 !ARCH_SUPPORTS_FTRACE_OPS
38# define FTRACE_FORCE_LIST_FUNC 1 37# define FTRACE_FORCE_LIST_FUNC 1
39#else 38#else
40# define FTRACE_FORCE_LIST_FUNC 0 39# define FTRACE_FORCE_LIST_FUNC 0
@@ -118,17 +117,18 @@ struct ftrace_ops {
118 ftrace_func_t func; 117 ftrace_func_t func;
119 struct ftrace_ops *next; 118 struct ftrace_ops *next;
120 unsigned long flags; 119 unsigned long flags;
121 int __percpu *disabled;
122 void *private; 120 void *private;
121 int __percpu *disabled;
123#ifdef CONFIG_DYNAMIC_FTRACE 122#ifdef CONFIG_DYNAMIC_FTRACE
123 int nr_trampolines;
124 struct ftrace_hash *notrace_hash; 124 struct ftrace_hash *notrace_hash;
125 struct ftrace_hash *filter_hash; 125 struct ftrace_hash *filter_hash;
126 struct ftrace_hash *tramp_hash;
126 struct mutex regex_lock; 127 struct mutex regex_lock;
128 unsigned long trampoline;
127#endif 129#endif
128}; 130};
129 131
130extern int function_trace_stop;
131
132/* 132/*
133 * Type of the current tracing. 133 * Type of the current tracing.
134 */ 134 */
@@ -140,32 +140,6 @@ enum ftrace_tracing_type_t {
140/* Current tracing type, default is FTRACE_TYPE_ENTER */ 140/* Current tracing type, default is FTRACE_TYPE_ENTER */
141extern enum ftrace_tracing_type_t ftrace_tracing_type; 141extern enum ftrace_tracing_type_t ftrace_tracing_type;
142 142
143/**
144 * ftrace_stop - stop function tracer.
145 *
146 * A quick way to stop the function tracer. Note this an on off switch,
147 * it is not something that is recursive like preempt_disable.
148 * This does not disable the calling of mcount, it only stops the
149 * calling of functions from mcount.
150 */
151static inline void ftrace_stop(void)
152{
153 function_trace_stop = 1;
154}
155
156/**
157 * ftrace_start - start the function tracer.
158 *
159 * This function is the inverse of ftrace_stop. This does not enable
160 * the function tracing if the function tracer is disabled. This only
161 * sets the function tracer flag to continue calling the functions
162 * from mcount.
163 */
164static inline void ftrace_start(void)
165{
166 function_trace_stop = 0;
167}
168
169/* 143/*
170 * The ftrace_ops must be a static and should also 144 * The ftrace_ops must be a static and should also
171 * be read_mostly. These functions do modify read_mostly variables 145 * be read_mostly. These functions do modify read_mostly variables
@@ -242,8 +216,6 @@ static inline int ftrace_nr_registered_ops(void)
242} 216}
243static inline void clear_ftrace_function(void) { } 217static inline void clear_ftrace_function(void) { }
244static inline void ftrace_kill(void) { } 218static inline void ftrace_kill(void) { }
245static inline void ftrace_stop(void) { }
246static inline void ftrace_start(void) { }
247#endif /* CONFIG_FUNCTION_TRACER */ 219#endif /* CONFIG_FUNCTION_TRACER */
248 220
249#ifdef CONFIG_STACK_TRACER 221#ifdef CONFIG_STACK_TRACER
@@ -317,13 +289,20 @@ extern int ftrace_nr_registered_ops(void);
317 * from tracing that function. 289 * from tracing that function.
318 */ 290 */
319enum { 291enum {
320 FTRACE_FL_ENABLED = (1UL << 29), 292 FTRACE_FL_ENABLED = (1UL << 31),
321 FTRACE_FL_REGS = (1UL << 30), 293 FTRACE_FL_REGS = (1UL << 30),
322 FTRACE_FL_REGS_EN = (1UL << 31) 294 FTRACE_FL_REGS_EN = (1UL << 29),
295 FTRACE_FL_TRAMP = (1UL << 28),
296 FTRACE_FL_TRAMP_EN = (1UL << 27),
323}; 297};
324 298
325#define FTRACE_FL_MASK (0x7UL << 29) 299#define FTRACE_REF_MAX_SHIFT 27
326#define FTRACE_REF_MAX ((1UL << 29) - 1) 300#define FTRACE_FL_BITS 5
301#define FTRACE_FL_MASKED_BITS ((1UL << FTRACE_FL_BITS) - 1)
302#define FTRACE_FL_MASK (FTRACE_FL_MASKED_BITS << FTRACE_REF_MAX_SHIFT)
303#define FTRACE_REF_MAX ((1UL << FTRACE_REF_MAX_SHIFT) - 1)
304
305#define ftrace_rec_count(rec) ((rec)->flags & ~FTRACE_FL_MASK)
327 306
328struct dyn_ftrace { 307struct dyn_ftrace {
329 unsigned long ip; /* address of mcount call-site */ 308 unsigned long ip; /* address of mcount call-site */
@@ -431,6 +410,10 @@ void ftrace_modify_all_code(int command);
431#define FTRACE_ADDR ((unsigned long)ftrace_caller) 410#define FTRACE_ADDR ((unsigned long)ftrace_caller)
432#endif 411#endif
433 412
413#ifndef FTRACE_GRAPH_ADDR
414#define FTRACE_GRAPH_ADDR ((unsigned long)ftrace_graph_caller)
415#endif
416
434#ifndef FTRACE_REGS_ADDR 417#ifndef FTRACE_REGS_ADDR
435#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS 418#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
436# define FTRACE_REGS_ADDR ((unsigned long)ftrace_regs_caller) 419# define FTRACE_REGS_ADDR ((unsigned long)ftrace_regs_caller)
@@ -439,6 +422,16 @@ void ftrace_modify_all_code(int command);
439#endif 422#endif
440#endif 423#endif
441 424
425/*
426 * If an arch would like functions that are only traced
427 * by the function graph tracer to jump directly to its own
428 * trampoline, then they can define FTRACE_GRAPH_TRAMP_ADDR
429 * to be that address to jump to.
430 */
431#ifndef FTRACE_GRAPH_TRAMP_ADDR
432#define FTRACE_GRAPH_TRAMP_ADDR ((unsigned long) 0)
433#endif
434
442#ifdef CONFIG_FUNCTION_GRAPH_TRACER 435#ifdef CONFIG_FUNCTION_GRAPH_TRACER
443extern void ftrace_graph_caller(void); 436extern void ftrace_graph_caller(void);
444extern int ftrace_enable_ftrace_graph_caller(void); 437extern int ftrace_enable_ftrace_graph_caller(void);
@@ -736,6 +729,7 @@ extern char __irqentry_text_end[];
736extern int register_ftrace_graph(trace_func_graph_ret_t retfunc, 729extern int register_ftrace_graph(trace_func_graph_ret_t retfunc,
737 trace_func_graph_ent_t entryfunc); 730 trace_func_graph_ent_t entryfunc);
738 731
732extern bool ftrace_graph_is_dead(void);
739extern void ftrace_graph_stop(void); 733extern void ftrace_graph_stop(void);
740 734
741/* The current handlers in use */ 735/* The current handlers in use */
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index cff3106ffe2c..06c6faa9e5cc 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -272,7 +272,6 @@ struct ftrace_event_call {
272 struct trace_event event; 272 struct trace_event event;
273 const char *print_fmt; 273 const char *print_fmt;
274 struct event_filter *filter; 274 struct event_filter *filter;
275 struct list_head *files;
276 void *mod; 275 void *mod;
277 void *data; 276 void *data;
278 /* 277 /*
@@ -404,8 +403,6 @@ enum event_trigger_type {
404 ETT_EVENT_ENABLE = (1 << 3), 403 ETT_EVENT_ENABLE = (1 << 3),
405}; 404};
406 405
407extern void destroy_preds(struct ftrace_event_file *file);
408extern void destroy_call_preds(struct ftrace_event_call *call);
409extern int filter_match_preds(struct event_filter *filter, void *rec); 406extern int filter_match_preds(struct event_filter *filter, void *rec);
410 407
411extern int filter_check_discard(struct ftrace_event_file *file, void *rec, 408extern int filter_check_discard(struct ftrace_event_file *file, void *rec,
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 255cd5cc0754..a23c096b3080 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -80,6 +80,7 @@ int dequeue_hwpoisoned_huge_page(struct page *page);
80bool isolate_huge_page(struct page *page, struct list_head *list); 80bool isolate_huge_page(struct page *page, struct list_head *list);
81void putback_active_hugepage(struct page *page); 81void putback_active_hugepage(struct page *page);
82bool is_hugepage_active(struct page *page); 82bool is_hugepage_active(struct page *page);
83void free_huge_page(struct page *page);
83 84
84#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE 85#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
85pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud); 86pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud);
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 6df7f9fe0d01..2bb4c4f3531a 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -102,12 +102,6 @@ extern struct group_info init_groups;
102#define INIT_IDS 102#define INIT_IDS
103#endif 103#endif
104 104
105#ifdef CONFIG_RCU_BOOST
106#define INIT_TASK_RCU_BOOST() \
107 .rcu_boost_mutex = NULL,
108#else
109#define INIT_TASK_RCU_BOOST()
110#endif
111#ifdef CONFIG_TREE_PREEMPT_RCU 105#ifdef CONFIG_TREE_PREEMPT_RCU
112#define INIT_TASK_RCU_TREE_PREEMPT() \ 106#define INIT_TASK_RCU_TREE_PREEMPT() \
113 .rcu_blocked_node = NULL, 107 .rcu_blocked_node = NULL,
@@ -119,8 +113,7 @@ extern struct group_info init_groups;
119 .rcu_read_lock_nesting = 0, \ 113 .rcu_read_lock_nesting = 0, \
120 .rcu_read_unlock_special = 0, \ 114 .rcu_read_unlock_special = 0, \
121 .rcu_node_entry = LIST_HEAD_INIT(tsk.rcu_node_entry), \ 115 .rcu_node_entry = LIST_HEAD_INIT(tsk.rcu_node_entry), \
122 INIT_TASK_RCU_TREE_PREEMPT() \ 116 INIT_TASK_RCU_TREE_PREEMPT()
123 INIT_TASK_RCU_BOOST()
124#else 117#else
125#define INIT_TASK_RCU_PREEMPT(tsk) 118#define INIT_TASK_RCU_PREEMPT(tsk)
126#endif 119#endif
diff --git a/include/linux/irq_work.h b/include/linux/irq_work.h
index 19ae05d4b8ec..bf9422c3aefe 100644
--- a/include/linux/irq_work.h
+++ b/include/linux/irq_work.h
@@ -33,6 +33,11 @@ void init_irq_work(struct irq_work *work, void (*func)(struct irq_work *))
33#define DEFINE_IRQ_WORK(name, _f) struct irq_work name = { .func = (_f), } 33#define DEFINE_IRQ_WORK(name, _f) struct irq_work name = { .func = (_f), }
34 34
35bool irq_work_queue(struct irq_work *work); 35bool irq_work_queue(struct irq_work *work);
36
37#ifdef CONFIG_SMP
38bool irq_work_queue_on(struct irq_work *work, int cpu);
39#endif
40
36void irq_work_run(void); 41void irq_work_run(void);
37void irq_work_sync(struct irq_work *work); 42void irq_work_sync(struct irq_work *work);
38 43
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
new file mode 100644
index 000000000000..03a4ea37ba86
--- /dev/null
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -0,0 +1,200 @@
1/*
2 * Copyright (C) 2013, 2014 ARM Limited, All Rights Reserved.
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18#ifndef __LINUX_IRQCHIP_ARM_GIC_V3_H
19#define __LINUX_IRQCHIP_ARM_GIC_V3_H
20
21#include <asm/sysreg.h>
22
23/*
24 * Distributor registers. We assume we're running non-secure, with ARE
25 * being set. Secure-only and non-ARE registers are not described.
26 */
27#define GICD_CTLR 0x0000
28#define GICD_TYPER 0x0004
29#define GICD_IIDR 0x0008
30#define GICD_STATUSR 0x0010
31#define GICD_SETSPI_NSR 0x0040
32#define GICD_CLRSPI_NSR 0x0048
33#define GICD_SETSPI_SR 0x0050
34#define GICD_CLRSPI_SR 0x0058
35#define GICD_SEIR 0x0068
36#define GICD_ISENABLER 0x0100
37#define GICD_ICENABLER 0x0180
38#define GICD_ISPENDR 0x0200
39#define GICD_ICPENDR 0x0280
40#define GICD_ISACTIVER 0x0300
41#define GICD_ICACTIVER 0x0380
42#define GICD_IPRIORITYR 0x0400
43#define GICD_ICFGR 0x0C00
44#define GICD_IROUTER 0x6000
45#define GICD_PIDR2 0xFFE8
46
47#define GICD_CTLR_RWP (1U << 31)
48#define GICD_CTLR_ARE_NS (1U << 4)
49#define GICD_CTLR_ENABLE_G1A (1U << 1)
50#define GICD_CTLR_ENABLE_G1 (1U << 0)
51
52#define GICD_IROUTER_SPI_MODE_ONE (0U << 31)
53#define GICD_IROUTER_SPI_MODE_ANY (1U << 31)
54
55#define GIC_PIDR2_ARCH_MASK 0xf0
56#define GIC_PIDR2_ARCH_GICv3 0x30
57#define GIC_PIDR2_ARCH_GICv4 0x40
58
59/*
60 * Re-Distributor registers, offsets from RD_base
61 */
62#define GICR_CTLR GICD_CTLR
63#define GICR_IIDR 0x0004
64#define GICR_TYPER 0x0008
65#define GICR_STATUSR GICD_STATUSR
66#define GICR_WAKER 0x0014
67#define GICR_SETLPIR 0x0040
68#define GICR_CLRLPIR 0x0048
69#define GICR_SEIR GICD_SEIR
70#define GICR_PROPBASER 0x0070
71#define GICR_PENDBASER 0x0078
72#define GICR_INVLPIR 0x00A0
73#define GICR_INVALLR 0x00B0
74#define GICR_SYNCR 0x00C0
75#define GICR_MOVLPIR 0x0100
76#define GICR_MOVALLR 0x0110
77#define GICR_PIDR2 GICD_PIDR2
78
79#define GICR_WAKER_ProcessorSleep (1U << 1)
80#define GICR_WAKER_ChildrenAsleep (1U << 2)
81
82/*
83 * Re-Distributor registers, offsets from SGI_base
84 */
85#define GICR_ISENABLER0 GICD_ISENABLER
86#define GICR_ICENABLER0 GICD_ICENABLER
87#define GICR_ISPENDR0 GICD_ISPENDR
88#define GICR_ICPENDR0 GICD_ICPENDR
89#define GICR_ISACTIVER0 GICD_ISACTIVER
90#define GICR_ICACTIVER0 GICD_ICACTIVER
91#define GICR_IPRIORITYR0 GICD_IPRIORITYR
92#define GICR_ICFGR0 GICD_ICFGR
93
94#define GICR_TYPER_VLPIS (1U << 1)
95#define GICR_TYPER_LAST (1U << 4)
96
97/*
98 * CPU interface registers
99 */
100#define ICC_CTLR_EL1_EOImode_drop_dir (0U << 1)
101#define ICC_CTLR_EL1_EOImode_drop (1U << 1)
102#define ICC_SRE_EL1_SRE (1U << 0)
103
104/*
105 * Hypervisor interface registers (SRE only)
106 */
107#define ICH_LR_VIRTUAL_ID_MASK ((1UL << 32) - 1)
108
109#define ICH_LR_EOI (1UL << 41)
110#define ICH_LR_GROUP (1UL << 60)
111#define ICH_LR_STATE (3UL << 62)
112#define ICH_LR_PENDING_BIT (1UL << 62)
113#define ICH_LR_ACTIVE_BIT (1UL << 63)
114
115#define ICH_MISR_EOI (1 << 0)
116#define ICH_MISR_U (1 << 1)
117
118#define ICH_HCR_EN (1 << 0)
119#define ICH_HCR_UIE (1 << 1)
120
121#define ICH_VMCR_CTLR_SHIFT 0
122#define ICH_VMCR_CTLR_MASK (0x21f << ICH_VMCR_CTLR_SHIFT)
123#define ICH_VMCR_BPR1_SHIFT 18
124#define ICH_VMCR_BPR1_MASK (7 << ICH_VMCR_BPR1_SHIFT)
125#define ICH_VMCR_BPR0_SHIFT 21
126#define ICH_VMCR_BPR0_MASK (7 << ICH_VMCR_BPR0_SHIFT)
127#define ICH_VMCR_PMR_SHIFT 24
128#define ICH_VMCR_PMR_MASK (0xffUL << ICH_VMCR_PMR_SHIFT)
129
130#define ICC_EOIR1_EL1 sys_reg(3, 0, 12, 12, 1)
131#define ICC_IAR1_EL1 sys_reg(3, 0, 12, 12, 0)
132#define ICC_SGI1R_EL1 sys_reg(3, 0, 12, 11, 5)
133#define ICC_PMR_EL1 sys_reg(3, 0, 4, 6, 0)
134#define ICC_CTLR_EL1 sys_reg(3, 0, 12, 12, 4)
135#define ICC_SRE_EL1 sys_reg(3, 0, 12, 12, 5)
136#define ICC_GRPEN1_EL1 sys_reg(3, 0, 12, 12, 7)
137
138#define ICC_IAR1_EL1_SPURIOUS 0x3ff
139
140#define ICC_SRE_EL2 sys_reg(3, 4, 12, 9, 5)
141
142#define ICC_SRE_EL2_SRE (1 << 0)
143#define ICC_SRE_EL2_ENABLE (1 << 3)
144
145/*
146 * System register definitions
147 */
148#define ICH_VSEIR_EL2 sys_reg(3, 4, 12, 9, 4)
149#define ICH_HCR_EL2 sys_reg(3, 4, 12, 11, 0)
150#define ICH_VTR_EL2 sys_reg(3, 4, 12, 11, 1)
151#define ICH_MISR_EL2 sys_reg(3, 4, 12, 11, 2)
152#define ICH_EISR_EL2 sys_reg(3, 4, 12, 11, 3)
153#define ICH_ELSR_EL2 sys_reg(3, 4, 12, 11, 5)
154#define ICH_VMCR_EL2 sys_reg(3, 4, 12, 11, 7)
155
156#define __LR0_EL2(x) sys_reg(3, 4, 12, 12, x)
157#define __LR8_EL2(x) sys_reg(3, 4, 12, 13, x)
158
159#define ICH_LR0_EL2 __LR0_EL2(0)
160#define ICH_LR1_EL2 __LR0_EL2(1)
161#define ICH_LR2_EL2 __LR0_EL2(2)
162#define ICH_LR3_EL2 __LR0_EL2(3)
163#define ICH_LR4_EL2 __LR0_EL2(4)
164#define ICH_LR5_EL2 __LR0_EL2(5)
165#define ICH_LR6_EL2 __LR0_EL2(6)
166#define ICH_LR7_EL2 __LR0_EL2(7)
167#define ICH_LR8_EL2 __LR8_EL2(0)
168#define ICH_LR9_EL2 __LR8_EL2(1)
169#define ICH_LR10_EL2 __LR8_EL2(2)
170#define ICH_LR11_EL2 __LR8_EL2(3)
171#define ICH_LR12_EL2 __LR8_EL2(4)
172#define ICH_LR13_EL2 __LR8_EL2(5)
173#define ICH_LR14_EL2 __LR8_EL2(6)
174#define ICH_LR15_EL2 __LR8_EL2(7)
175
176#define __AP0Rx_EL2(x) sys_reg(3, 4, 12, 8, x)
177#define ICH_AP0R0_EL2 __AP0Rx_EL2(0)
178#define ICH_AP0R1_EL2 __AP0Rx_EL2(1)
179#define ICH_AP0R2_EL2 __AP0Rx_EL2(2)
180#define ICH_AP0R3_EL2 __AP0Rx_EL2(3)
181
182#define __AP1Rx_EL2(x) sys_reg(3, 4, 12, 9, x)
183#define ICH_AP1R0_EL2 __AP1Rx_EL2(0)
184#define ICH_AP1R1_EL2 __AP1Rx_EL2(1)
185#define ICH_AP1R2_EL2 __AP1Rx_EL2(2)
186#define ICH_AP1R3_EL2 __AP1Rx_EL2(3)
187
188#ifndef __ASSEMBLY__
189
190#include <linux/stringify.h>
191
192static inline void gic_write_eoir(u64 irq)
193{
194 asm volatile("msr_s " __stringify(ICC_EOIR1_EL1) ", %0" : : "r" (irq));
195 isb();
196}
197
198#endif
199
200#endif
diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h
index 17aa1cce6f8e..30faf797c2c3 100644
--- a/include/linux/kernfs.h
+++ b/include/linux/kernfs.h
@@ -91,6 +91,7 @@ struct kernfs_elem_attr {
91 const struct kernfs_ops *ops; 91 const struct kernfs_ops *ops;
92 struct kernfs_open_node *open; 92 struct kernfs_open_node *open;
93 loff_t size; 93 loff_t size;
94 struct kernfs_node *notify_next; /* for kernfs_notify() */
94}; 95};
95 96
96/* 97/*
@@ -304,6 +305,7 @@ struct dentry *kernfs_mount_ns(struct file_system_type *fs_type, int flags,
304 struct kernfs_root *root, unsigned long magic, 305 struct kernfs_root *root, unsigned long magic,
305 bool *new_sb_created, const void *ns); 306 bool *new_sb_created, const void *ns);
306void kernfs_kill_sb(struct super_block *sb); 307void kernfs_kill_sb(struct super_block *sb);
308struct super_block *kernfs_pin_sb(struct kernfs_root *root, const void *ns);
307 309
308void kernfs_init(void); 310void kernfs_init(void);
309 311
diff --git a/include/linux/kthread.h b/include/linux/kthread.h
index 7dcef3317689..13d55206ccf6 100644
--- a/include/linux/kthread.h
+++ b/include/linux/kthread.h
@@ -73,7 +73,6 @@ struct kthread_worker {
73struct kthread_work { 73struct kthread_work {
74 struct list_head node; 74 struct list_head node;
75 kthread_work_func_t func; 75 kthread_work_func_t func;
76 wait_queue_head_t done;
77 struct kthread_worker *worker; 76 struct kthread_worker *worker;
78}; 77};
79 78
@@ -85,7 +84,6 @@ struct kthread_work {
85#define KTHREAD_WORK_INIT(work, fn) { \ 84#define KTHREAD_WORK_INIT(work, fn) { \
86 .node = LIST_HEAD_INIT((work).node), \ 85 .node = LIST_HEAD_INIT((work).node), \
87 .func = (fn), \ 86 .func = (fn), \
88 .done = __WAIT_QUEUE_HEAD_INITIALIZER((work).done), \
89 } 87 }
90 88
91#define DEFINE_KTHREAD_WORKER(worker) \ 89#define DEFINE_KTHREAD_WORKER(worker) \
@@ -95,22 +93,16 @@ struct kthread_work {
95 struct kthread_work work = KTHREAD_WORK_INIT(work, fn) 93 struct kthread_work work = KTHREAD_WORK_INIT(work, fn)
96 94
97/* 95/*
98 * kthread_worker.lock and kthread_work.done need their own lockdep class 96 * kthread_worker.lock needs its own lockdep class key when defined on
99 * keys if they are defined on stack with lockdep enabled. Use the 97 * stack with lockdep enabled. Use the following macros in such cases.
100 * following macros when defining them on stack.
101 */ 98 */
102#ifdef CONFIG_LOCKDEP 99#ifdef CONFIG_LOCKDEP
103# define KTHREAD_WORKER_INIT_ONSTACK(worker) \ 100# define KTHREAD_WORKER_INIT_ONSTACK(worker) \
104 ({ init_kthread_worker(&worker); worker; }) 101 ({ init_kthread_worker(&worker); worker; })
105# define DEFINE_KTHREAD_WORKER_ONSTACK(worker) \ 102# define DEFINE_KTHREAD_WORKER_ONSTACK(worker) \
106 struct kthread_worker worker = KTHREAD_WORKER_INIT_ONSTACK(worker) 103 struct kthread_worker worker = KTHREAD_WORKER_INIT_ONSTACK(worker)
107# define KTHREAD_WORK_INIT_ONSTACK(work, fn) \
108 ({ init_kthread_work((&work), fn); work; })
109# define DEFINE_KTHREAD_WORK_ONSTACK(work, fn) \
110 struct kthread_work work = KTHREAD_WORK_INIT_ONSTACK(work, fn)
111#else 104#else
112# define DEFINE_KTHREAD_WORKER_ONSTACK(worker) DEFINE_KTHREAD_WORKER(worker) 105# define DEFINE_KTHREAD_WORKER_ONSTACK(worker) DEFINE_KTHREAD_WORKER(worker)
113# define DEFINE_KTHREAD_WORK_ONSTACK(work, fn) DEFINE_KTHREAD_WORK(work, fn)
114#endif 106#endif
115 107
116extern void __init_kthread_worker(struct kthread_worker *worker, 108extern void __init_kthread_worker(struct kthread_worker *worker,
@@ -127,7 +119,6 @@ extern void __init_kthread_worker(struct kthread_worker *worker,
127 memset((work), 0, sizeof(struct kthread_work)); \ 119 memset((work), 0, sizeof(struct kthread_work)); \
128 INIT_LIST_HEAD(&(work)->node); \ 120 INIT_LIST_HEAD(&(work)->node); \
129 (work)->func = (fn); \ 121 (work)->func = (fn); \
130 init_waitqueue_head(&(work)->done); \
131 } while (0) 122 } while (0)
132 123
133int kthread_worker_fn(void *worker_ptr); 124int kthread_worker_fn(void *worker_ptr);
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 5ab4e3a76721..92abb497ab14 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -593,6 +593,7 @@ struct ata_host {
593 struct device *dev; 593 struct device *dev;
594 void __iomem * const *iomap; 594 void __iomem * const *iomap;
595 unsigned int n_ports; 595 unsigned int n_ports;
596 unsigned int n_tags; /* nr of NCQ tags */
596 void *private_data; 597 void *private_data;
597 struct ata_port_operations *ops; 598 struct ata_port_operations *ops;
598 unsigned long flags; 599 unsigned long flags;
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index b12f4bbd064c..35b51e7af886 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -578,8 +578,6 @@ struct mlx4_cq {
578 u32 cons_index; 578 u32 cons_index;
579 579
580 u16 irq; 580 u16 irq;
581 bool irq_affinity_change;
582
583 __be32 *set_ci_db; 581 __be32 *set_ci_db;
584 __be32 *arm_db; 582 __be32 *arm_db;
585 int arm_sn; 583 int arm_sn;
@@ -1167,6 +1165,8 @@ int mlx4_assign_eq(struct mlx4_dev *dev, char *name, struct cpu_rmap *rmap,
1167 int *vector); 1165 int *vector);
1168void mlx4_release_eq(struct mlx4_dev *dev, int vec); 1166void mlx4_release_eq(struct mlx4_dev *dev, int vec);
1169 1167
1168int mlx4_eq_get_irq(struct mlx4_dev *dev, int vec);
1169
1170int mlx4_get_phys_port_id(struct mlx4_dev *dev); 1170int mlx4_get_phys_port_id(struct mlx4_dev *dev);
1171int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port); 1171int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port);
1172int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port); 1172int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port);
diff --git a/include/linux/msi.h b/include/linux/msi.h
index 92a2f991262a..8103f32f6d87 100644
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -25,7 +25,8 @@ void write_msi_msg(unsigned int irq, struct msi_msg *msg);
25struct msi_desc { 25struct msi_desc {
26 struct { 26 struct {
27 __u8 is_msix : 1; 27 __u8 is_msix : 1;
28 __u8 multiple: 3; /* log2 number of messages */ 28 __u8 multiple: 3; /* log2 num of messages allocated */
29 __u8 multi_cap : 3; /* log2 num of messages supported */
29 __u8 maskbit : 1; /* mask-pending bit supported ? */ 30 __u8 maskbit : 1; /* mask-pending bit supported ? */
30 __u8 is_64 : 1; /* Address size: 0=32bit 1=64bit */ 31 __u8 is_64 : 1; /* Address size: 0=32bit 1=64bit */
31 __u8 pos; /* Location of the msi capability */ 32 __u8 pos; /* Location of the msi capability */
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index 11692dea18aa..8d5535c58cc2 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -17,6 +17,7 @@
17#include <linux/lockdep.h> 17#include <linux/lockdep.h>
18#include <linux/atomic.h> 18#include <linux/atomic.h>
19#include <asm/processor.h> 19#include <asm/processor.h>
20#include <linux/osq_lock.h>
20 21
21/* 22/*
22 * Simple, straightforward mutexes with strict semantics: 23 * Simple, straightforward mutexes with strict semantics:
@@ -46,7 +47,6 @@
46 * - detects multi-task circular deadlocks and prints out all affected 47 * - detects multi-task circular deadlocks and prints out all affected
47 * locks and tasks (and only those tasks) 48 * locks and tasks (and only those tasks)
48 */ 49 */
49struct optimistic_spin_queue;
50struct mutex { 50struct mutex {
51 /* 1: unlocked, 0: locked, negative: locked, possible waiters */ 51 /* 1: unlocked, 0: locked, negative: locked, possible waiters */
52 atomic_t count; 52 atomic_t count;
@@ -56,7 +56,7 @@ struct mutex {
56 struct task_struct *owner; 56 struct task_struct *owner;
57#endif 57#endif
58#ifdef CONFIG_MUTEX_SPIN_ON_OWNER 58#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
59 struct optimistic_spin_queue *osq; /* Spinner MCS lock */ 59 struct optimistic_spin_queue osq; /* Spinner MCS lock */
60#endif 60#endif
61#ifdef CONFIG_DEBUG_MUTEXES 61#ifdef CONFIG_DEBUG_MUTEXES
62 const char *name; 62 const char *name;
@@ -176,8 +176,4 @@ extern void mutex_unlock(struct mutex *lock);
176 176
177extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock); 177extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
178 178
179#ifndef arch_mutex_cpu_relax
180# define arch_mutex_cpu_relax() cpu_relax()
181#endif
182
183#endif /* __LINUX_MUTEX_H */ 179#endif /* __LINUX_MUTEX_H */
diff --git a/include/linux/nmi.h b/include/linux/nmi.h
index 6a45fb583ff1..447775ee2c4b 100644
--- a/include/linux/nmi.h
+++ b/include/linux/nmi.h
@@ -32,15 +32,24 @@ static inline void touch_nmi_watchdog(void)
32#ifdef arch_trigger_all_cpu_backtrace 32#ifdef arch_trigger_all_cpu_backtrace
33static inline bool trigger_all_cpu_backtrace(void) 33static inline bool trigger_all_cpu_backtrace(void)
34{ 34{
35 arch_trigger_all_cpu_backtrace(); 35 arch_trigger_all_cpu_backtrace(true);
36 36
37 return true; 37 return true;
38} 38}
39static inline bool trigger_allbutself_cpu_backtrace(void)
40{
41 arch_trigger_all_cpu_backtrace(false);
42 return true;
43}
39#else 44#else
40static inline bool trigger_all_cpu_backtrace(void) 45static inline bool trigger_all_cpu_backtrace(void)
41{ 46{
42 return false; 47 return false;
43} 48}
49static inline bool trigger_allbutself_cpu_backtrace(void)
50{
51 return false;
52}
44#endif 53#endif
45 54
46#ifdef CONFIG_LOCKUP_DETECTOR 55#ifdef CONFIG_LOCKUP_DETECTOR
@@ -48,6 +57,7 @@ int hw_nmi_is_cpu_stuck(struct pt_regs *);
48u64 hw_nmi_get_sample_period(int watchdog_thresh); 57u64 hw_nmi_get_sample_period(int watchdog_thresh);
49extern int watchdog_user_enabled; 58extern int watchdog_user_enabled;
50extern int watchdog_thresh; 59extern int watchdog_thresh;
60extern int sysctl_softlockup_all_cpu_backtrace;
51struct ctl_table; 61struct ctl_table;
52extern int proc_dowatchdog(struct ctl_table *, int , 62extern int proc_dowatchdog(struct ctl_table *, int ,
53 void __user *, size_t *, loff_t *); 63 void __user *, size_t *, loff_t *);
diff --git a/include/linux/of_fdt.h b/include/linux/of_fdt.h
index 05117899fcb4..0ff360d5b3b3 100644
--- a/include/linux/of_fdt.h
+++ b/include/linux/of_fdt.h
@@ -73,6 +73,8 @@ extern int early_init_dt_scan_root(unsigned long node, const char *uname,
73 int depth, void *data); 73 int depth, void *data);
74 74
75extern bool early_init_dt_scan(void *params); 75extern bool early_init_dt_scan(void *params);
76extern bool early_init_dt_verify(void *params);
77extern void early_init_dt_scan_nodes(void);
76 78
77extern const char *of_flat_dt_get_machine_name(void); 79extern const char *of_flat_dt_get_machine_name(void);
78extern const void *of_flat_dt_match_machine(const void *default_match, 80extern const void *of_flat_dt_match_machine(const void *default_match,
@@ -84,6 +86,7 @@ extern void unflatten_and_copy_device_tree(void);
84extern void early_init_devtree(void *); 86extern void early_init_devtree(void *);
85extern void early_get_first_memblock_info(void *, phys_addr_t *); 87extern void early_get_first_memblock_info(void *, phys_addr_t *);
86extern u64 fdt_translate_address(const void *blob, int node_offset); 88extern u64 fdt_translate_address(const void *blob, int node_offset);
89extern void of_fdt_limit_memory(int limit);
87#else /* CONFIG_OF_FLATTREE */ 90#else /* CONFIG_OF_FLATTREE */
88static inline void early_init_fdt_scan_reserved_mem(void) {} 91static inline void early_init_fdt_scan_reserved_mem(void) {}
89static inline const char *of_flat_dt_get_machine_name(void) { return NULL; } 92static inline const char *of_flat_dt_get_machine_name(void) { return NULL; }
diff --git a/include/linux/of_mdio.h b/include/linux/of_mdio.h
index a70c9493d55a..d449018d0726 100644
--- a/include/linux/of_mdio.h
+++ b/include/linux/of_mdio.h
@@ -25,9 +25,6 @@ struct phy_device *of_phy_attach(struct net_device *dev,
25 25
26extern struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np); 26extern struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np);
27 27
28extern void of_mdiobus_link_phydev(struct mii_bus *mdio,
29 struct phy_device *phydev);
30
31#else /* CONFIG_OF */ 28#else /* CONFIG_OF */
32static inline int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np) 29static inline int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
33{ 30{
@@ -63,11 +60,6 @@ static inline struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np)
63{ 60{
64 return NULL; 61 return NULL;
65} 62}
66
67static inline void of_mdiobus_link_phydev(struct mii_bus *mdio,
68 struct phy_device *phydev)
69{
70}
71#endif /* CONFIG_OF */ 63#endif /* CONFIG_OF */
72 64
73#if defined(CONFIG_OF) && defined(CONFIG_FIXED_PHY) 65#if defined(CONFIG_OF) && defined(CONFIG_FIXED_PHY)
diff --git a/include/linux/osq_lock.h b/include/linux/osq_lock.h
new file mode 100644
index 000000000000..90230d5811c5
--- /dev/null
+++ b/include/linux/osq_lock.h
@@ -0,0 +1,27 @@
1#ifndef __LINUX_OSQ_LOCK_H
2#define __LINUX_OSQ_LOCK_H
3
4/*
5 * An MCS like lock especially tailored for optimistic spinning for sleeping
6 * lock implementations (mutex, rwsem, etc).
7 */
8
9#define OSQ_UNLOCKED_VAL (0)
10
11struct optimistic_spin_queue {
12 /*
13 * Stores an encoded value of the CPU # of the tail node in the queue.
14 * If the queue is empty, then it's set to OSQ_UNLOCKED_VAL.
15 */
16 atomic_t tail;
17};
18
19/* Init macro and function. */
20#define OSQ_LOCK_UNLOCKED { ATOMIC_INIT(OSQ_UNLOCKED_VAL) }
21
22static inline void osq_lock_init(struct optimistic_spin_queue *lock)
23{
24 atomic_set(&lock->tail, OSQ_UNLOCKED_VAL);
25}
26
27#endif
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 3c545b48aeab..8304959ad336 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -360,6 +360,9 @@ static inline void ClearPageCompound(struct page *page)
360 ClearPageHead(page); 360 ClearPageHead(page);
361} 361}
362#endif 362#endif
363
364#define PG_head_mask ((1L << PG_head))
365
363#else 366#else
364/* 367/*
365 * Reduce page flag use as much as possible by overlapping 368 * Reduce page flag use as much as possible by overlapping
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 0a97b583ee8d..e1474ae18c88 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -399,6 +399,18 @@ static inline struct page *read_mapping_page(struct address_space *mapping,
399} 399}
400 400
401/* 401/*
402 * Get the offset in PAGE_SIZE.
403 * (TODO: hugepage should have ->index in PAGE_SIZE)
404 */
405static inline pgoff_t page_to_pgoff(struct page *page)
406{
407 if (unlikely(PageHeadHuge(page)))
408 return page->index << compound_order(page);
409 else
410 return page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
411}
412
413/*
402 * Return byte-offset into filesystem object for page. 414 * Return byte-offset into filesystem object for page.
403 */ 415 */
404static inline loff_t page_offset(struct page *page) 416static inline loff_t page_offset(struct page *page)
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 466bcd111d85..6ed3647b38df 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -978,6 +978,8 @@ int pci_try_reset_slot(struct pci_slot *slot);
978int pci_probe_reset_bus(struct pci_bus *bus); 978int pci_probe_reset_bus(struct pci_bus *bus);
979int pci_reset_bus(struct pci_bus *bus); 979int pci_reset_bus(struct pci_bus *bus);
980int pci_try_reset_bus(struct pci_bus *bus); 980int pci_try_reset_bus(struct pci_bus *bus);
981void pci_reset_secondary_bus(struct pci_dev *dev);
982void pcibios_reset_secondary_bus(struct pci_dev *dev);
981void pci_reset_bridge_secondary_bus(struct pci_dev *dev); 983void pci_reset_bridge_secondary_bus(struct pci_dev *dev);
982void pci_update_resource(struct pci_dev *dev, int resno); 984void pci_update_resource(struct pci_dev *dev, int resno);
983int __must_check pci_assign_resource(struct pci_dev *dev, int i); 985int __must_check pci_assign_resource(struct pci_dev *dev, int i);
@@ -1186,7 +1188,6 @@ int pci_msix_vec_count(struct pci_dev *dev);
1186int pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, int nvec); 1188int pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, int nvec);
1187void pci_msix_shutdown(struct pci_dev *dev); 1189void pci_msix_shutdown(struct pci_dev *dev);
1188void pci_disable_msix(struct pci_dev *dev); 1190void pci_disable_msix(struct pci_dev *dev);
1189void msi_remove_pci_irq_vectors(struct pci_dev *dev);
1190void pci_restore_msi_state(struct pci_dev *dev); 1191void pci_restore_msi_state(struct pci_dev *dev);
1191int pci_msi_enabled(void); 1192int pci_msi_enabled(void);
1192int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec); 1193int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec);
@@ -1217,7 +1218,6 @@ static inline int pci_enable_msix(struct pci_dev *dev,
1217{ return -ENOSYS; } 1218{ return -ENOSYS; }
1218static inline void pci_msix_shutdown(struct pci_dev *dev) { } 1219static inline void pci_msix_shutdown(struct pci_dev *dev) { }
1219static inline void pci_disable_msix(struct pci_dev *dev) { } 1220static inline void pci_disable_msix(struct pci_dev *dev) { }
1220static inline void msi_remove_pci_irq_vectors(struct pci_dev *dev) { }
1221static inline void pci_restore_msi_state(struct pci_dev *dev) { } 1221static inline void pci_restore_msi_state(struct pci_dev *dev) { }
1222static inline int pci_msi_enabled(void) { return 0; } 1222static inline int pci_msi_enabled(void) { return 0; }
1223static inline int pci_enable_msi_range(struct pci_dev *dev, int minvec, 1223static inline int pci_enable_msi_range(struct pci_dev *dev, int minvec,
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 7fa31731c854..6ed0bb73a864 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -6,6 +6,8 @@
6 * Do not add new entries to this file unless the definitions 6 * Do not add new entries to this file unless the definitions
7 * are shared between multiple drivers. 7 * are shared between multiple drivers.
8 */ 8 */
9#ifndef _LINUX_PCI_IDS_H
10#define _LINUX_PCI_IDS_H
9 11
10/* Device classes and subclasses */ 12/* Device classes and subclasses */
11 13
@@ -2968,3 +2970,5 @@
2968#define PCI_DEVICE_ID_XEN_PLATFORM 0x0001 2970#define PCI_DEVICE_ID_XEN_PLATFORM 0x0001
2969 2971
2970#define PCI_VENDOR_ID_OCZ 0x1b85 2972#define PCI_VENDOR_ID_OCZ 0x1b85
2973
2974#endif /* _LINUX_PCI_IDS_H */
diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h
index a5fc7d01aad6..cfd56046ecec 100644
--- a/include/linux/percpu-defs.h
+++ b/include/linux/percpu-defs.h
@@ -1,6 +1,40 @@
1/*
2 * linux/percpu-defs.h - basic definitions for percpu areas
3 *
4 * DO NOT INCLUDE DIRECTLY OUTSIDE PERCPU IMPLEMENTATION PROPER.
5 *
6 * This file is separate from linux/percpu.h to avoid cyclic inclusion
7 * dependency from arch header files. Only to be included from
8 * asm/percpu.h.
9 *
10 * This file includes macros necessary to declare percpu sections and
11 * variables, and definitions of percpu accessors and operations. It
12 * should provide enough percpu features to arch header files even when
13 * they can only include asm/percpu.h to avoid cyclic inclusion dependency.
14 */
15
1#ifndef _LINUX_PERCPU_DEFS_H 16#ifndef _LINUX_PERCPU_DEFS_H
2#define _LINUX_PERCPU_DEFS_H 17#define _LINUX_PERCPU_DEFS_H
3 18
19#ifdef CONFIG_SMP
20
21#ifdef MODULE
22#define PER_CPU_SHARED_ALIGNED_SECTION ""
23#define PER_CPU_ALIGNED_SECTION ""
24#else
25#define PER_CPU_SHARED_ALIGNED_SECTION "..shared_aligned"
26#define PER_CPU_ALIGNED_SECTION "..shared_aligned"
27#endif
28#define PER_CPU_FIRST_SECTION "..first"
29
30#else
31
32#define PER_CPU_SHARED_ALIGNED_SECTION ""
33#define PER_CPU_ALIGNED_SECTION "..shared_aligned"
34#define PER_CPU_FIRST_SECTION ""
35
36#endif
37
4/* 38/*
5 * Base implementations of per-CPU variable declarations and definitions, where 39 * Base implementations of per-CPU variable declarations and definitions, where
6 * the section in which the variable is to be placed is provided by the 40 * the section in which the variable is to be placed is provided by the
@@ -19,19 +53,6 @@
19 __attribute__((section(".discard"), unused)) 53 __attribute__((section(".discard"), unused))
20 54
21/* 55/*
22 * Macro which verifies @ptr is a percpu pointer without evaluating
23 * @ptr. This is to be used in percpu accessors to verify that the
24 * input parameter is a percpu pointer.
25 *
26 * + 0 is required in order to convert the pointer type from a
27 * potential array type to a pointer to a single item of the array.
28 */
29#define __verify_pcpu_ptr(ptr) do { \
30 const void __percpu *__vpp_verify = (typeof((ptr) + 0))NULL; \
31 (void)__vpp_verify; \
32} while (0)
33
34/*
35 * s390 and alpha modules require percpu variables to be defined as 56 * s390 and alpha modules require percpu variables to be defined as
36 * weak to force the compiler to generate GOT based external 57 * weak to force the compiler to generate GOT based external
37 * references for them. This is necessary because percpu sections 58 * references for them. This is necessary because percpu sections
@@ -146,10 +167,10 @@
146 * Declaration/definition used for per-CPU variables that must be read mostly. 167 * Declaration/definition used for per-CPU variables that must be read mostly.
147 */ 168 */
148#define DECLARE_PER_CPU_READ_MOSTLY(type, name) \ 169#define DECLARE_PER_CPU_READ_MOSTLY(type, name) \
149 DECLARE_PER_CPU_SECTION(type, name, "..readmostly") 170 DECLARE_PER_CPU_SECTION(type, name, "..read_mostly")
150 171
151#define DEFINE_PER_CPU_READ_MOSTLY(type, name) \ 172#define DEFINE_PER_CPU_READ_MOSTLY(type, name) \
152 DEFINE_PER_CPU_SECTION(type, name, "..readmostly") 173 DEFINE_PER_CPU_SECTION(type, name, "..read_mostly")
153 174
154/* 175/*
155 * Intermodule exports for per-CPU variables. sparse forgets about 176 * Intermodule exports for per-CPU variables. sparse forgets about
@@ -164,4 +185,337 @@
164#define EXPORT_PER_CPU_SYMBOL_GPL(var) 185#define EXPORT_PER_CPU_SYMBOL_GPL(var)
165#endif 186#endif
166 187
188/*
189 * Accessors and operations.
190 */
191#ifndef __ASSEMBLY__
192
193/*
194 * __verify_pcpu_ptr() verifies @ptr is a percpu pointer without evaluating
195 * @ptr and is invoked once before a percpu area is accessed by all
196 * accessors and operations. This is performed in the generic part of
197 * percpu and arch overrides don't need to worry about it; however, if an
198 * arch wants to implement an arch-specific percpu accessor or operation,
199 * it may use __verify_pcpu_ptr() to verify the parameters.
200 *
201 * + 0 is required in order to convert the pointer type from a
202 * potential array type to a pointer to a single item of the array.
203 */
204#define __verify_pcpu_ptr(ptr) \
205do { \
206 const void __percpu *__vpp_verify = (typeof((ptr) + 0))NULL; \
207 (void)__vpp_verify; \
208} while (0)
209
210#ifdef CONFIG_SMP
211
212/*
213 * Add an offset to a pointer but keep the pointer as-is. Use RELOC_HIDE()
214 * to prevent the compiler from making incorrect assumptions about the
215 * pointer value. The weird cast keeps both GCC and sparse happy.
216 */
217#define SHIFT_PERCPU_PTR(__p, __offset) \
218 RELOC_HIDE((typeof(*(__p)) __kernel __force *)(__p), (__offset))
219
220#define per_cpu_ptr(ptr, cpu) \
221({ \
222 __verify_pcpu_ptr(ptr); \
223 SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu))); \
224})
225
226#define raw_cpu_ptr(ptr) \
227({ \
228 __verify_pcpu_ptr(ptr); \
229 arch_raw_cpu_ptr(ptr); \
230})
231
232#ifdef CONFIG_DEBUG_PREEMPT
233#define this_cpu_ptr(ptr) \
234({ \
235 __verify_pcpu_ptr(ptr); \
236 SHIFT_PERCPU_PTR(ptr, my_cpu_offset); \
237})
238#else
239#define this_cpu_ptr(ptr) raw_cpu_ptr(ptr)
240#endif
241
242#else /* CONFIG_SMP */
243
244#define VERIFY_PERCPU_PTR(__p) \
245({ \
246 __verify_pcpu_ptr(__p); \
247 (typeof(*(__p)) __kernel __force *)(__p); \
248})
249
250#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); VERIFY_PERCPU_PTR(ptr); })
251#define raw_cpu_ptr(ptr) per_cpu_ptr(ptr, 0)
252#define this_cpu_ptr(ptr) raw_cpu_ptr(ptr)
253
254#endif /* CONFIG_SMP */
255
256#define per_cpu(var, cpu) (*per_cpu_ptr(&(var), cpu))
257#define __raw_get_cpu_var(var) (*raw_cpu_ptr(&(var)))
258#define __get_cpu_var(var) (*this_cpu_ptr(&(var)))
259
260/* keep until we have removed all uses of __this_cpu_ptr */
261#define __this_cpu_ptr(ptr) raw_cpu_ptr(ptr)
262
263/*
264 * Must be an lvalue. Since @var must be a simple identifier,
265 * we force a syntax error here if it isn't.
266 */
267#define get_cpu_var(var) \
268(*({ \
269 preempt_disable(); \
270 this_cpu_ptr(&var); \
271}))
272
273/*
274 * The weird & is necessary because sparse considers (void)(var) to be
275 * a direct dereference of percpu variable (var).
276 */
277#define put_cpu_var(var) \
278do { \
279 (void)&(var); \
280 preempt_enable(); \
281} while (0)
282
283#define get_cpu_ptr(var) \
284({ \
285 preempt_disable(); \
286 this_cpu_ptr(var); \
287})
288
289#define put_cpu_ptr(var) \
290do { \
291 (void)(var); \
292 preempt_enable(); \
293} while (0)
294
295/*
296 * Branching function to split up a function into a set of functions that
297 * are called for different scalar sizes of the objects handled.
298 */
299
300extern void __bad_size_call_parameter(void);
301
302#ifdef CONFIG_DEBUG_PREEMPT
303extern void __this_cpu_preempt_check(const char *op);
304#else
305static inline void __this_cpu_preempt_check(const char *op) { }
306#endif
307
308#define __pcpu_size_call_return(stem, variable) \
309({ \
310 typeof(variable) pscr_ret__; \
311 __verify_pcpu_ptr(&(variable)); \
312 switch(sizeof(variable)) { \
313 case 1: pscr_ret__ = stem##1(variable); break; \
314 case 2: pscr_ret__ = stem##2(variable); break; \
315 case 4: pscr_ret__ = stem##4(variable); break; \
316 case 8: pscr_ret__ = stem##8(variable); break; \
317 default: \
318 __bad_size_call_parameter(); break; \
319 } \
320 pscr_ret__; \
321})
322
323#define __pcpu_size_call_return2(stem, variable, ...) \
324({ \
325 typeof(variable) pscr2_ret__; \
326 __verify_pcpu_ptr(&(variable)); \
327 switch(sizeof(variable)) { \
328 case 1: pscr2_ret__ = stem##1(variable, __VA_ARGS__); break; \
329 case 2: pscr2_ret__ = stem##2(variable, __VA_ARGS__); break; \
330 case 4: pscr2_ret__ = stem##4(variable, __VA_ARGS__); break; \
331 case 8: pscr2_ret__ = stem##8(variable, __VA_ARGS__); break; \
332 default: \
333 __bad_size_call_parameter(); break; \
334 } \
335 pscr2_ret__; \
336})
337
338/*
339 * Special handling for cmpxchg_double. cmpxchg_double is passed two
340 * percpu variables. The first has to be aligned to a double word
341 * boundary and the second has to follow directly thereafter.
342 * We enforce this on all architectures even if they don't support
343 * a double cmpxchg instruction, since it's a cheap requirement, and it
344 * avoids breaking the requirement for architectures with the instruction.
345 */
346#define __pcpu_double_call_return_bool(stem, pcp1, pcp2, ...) \
347({ \
348 bool pdcrb_ret__; \
349 __verify_pcpu_ptr(&(pcp1)); \
350 BUILD_BUG_ON(sizeof(pcp1) != sizeof(pcp2)); \
351 VM_BUG_ON((unsigned long)(&(pcp1)) % (2 * sizeof(pcp1))); \
352 VM_BUG_ON((unsigned long)(&(pcp2)) != \
353 (unsigned long)(&(pcp1)) + sizeof(pcp1)); \
354 switch(sizeof(pcp1)) { \
355 case 1: pdcrb_ret__ = stem##1(pcp1, pcp2, __VA_ARGS__); break; \
356 case 2: pdcrb_ret__ = stem##2(pcp1, pcp2, __VA_ARGS__); break; \
357 case 4: pdcrb_ret__ = stem##4(pcp1, pcp2, __VA_ARGS__); break; \
358 case 8: pdcrb_ret__ = stem##8(pcp1, pcp2, __VA_ARGS__); break; \
359 default: \
360 __bad_size_call_parameter(); break; \
361 } \
362 pdcrb_ret__; \
363})
364
365#define __pcpu_size_call(stem, variable, ...) \
366do { \
367 __verify_pcpu_ptr(&(variable)); \
368 switch(sizeof(variable)) { \
369 case 1: stem##1(variable, __VA_ARGS__);break; \
370 case 2: stem##2(variable, __VA_ARGS__);break; \
371 case 4: stem##4(variable, __VA_ARGS__);break; \
372 case 8: stem##8(variable, __VA_ARGS__);break; \
373 default: \
374 __bad_size_call_parameter();break; \
375 } \
376} while (0)
377
378/*
379 * this_cpu operations (C) 2008-2013 Christoph Lameter <cl@linux.com>
380 *
381 * Optimized manipulation for memory allocated through the per cpu
382 * allocator or for addresses of per cpu variables.
383 *
384 * These operation guarantee exclusivity of access for other operations
385 * on the *same* processor. The assumption is that per cpu data is only
386 * accessed by a single processor instance (the current one).
387 *
388 * The arch code can provide optimized implementation by defining macros
389 * for certain scalar sizes. F.e. provide this_cpu_add_2() to provide per
390 * cpu atomic operations for 2 byte sized RMW actions. If arch code does
391 * not provide operations for a scalar size then the fallback in the
392 * generic code will be used.
393 *
394 * cmpxchg_double replaces two adjacent scalars at once. The first two
395 * parameters are per cpu variables which have to be of the same size. A
396 * truth value is returned to indicate success or failure (since a double
397 * register result is difficult to handle). There is very limited hardware
398 * support for these operations, so only certain sizes may work.
399 */
400
401/*
402 * Operations for contexts where we do not want to do any checks for
403 * preemptions. Unless strictly necessary, always use [__]this_cpu_*()
404 * instead.
405 *
406 * If there is no other protection through preempt disable and/or disabling
407 * interupts then one of these RMW operations can show unexpected behavior
408 * because the execution thread was rescheduled on another processor or an
409 * interrupt occurred and the same percpu variable was modified from the
410 * interrupt context.
411 */
412#define raw_cpu_read(pcp) __pcpu_size_call_return(raw_cpu_read_, pcp)
413#define raw_cpu_write(pcp, val) __pcpu_size_call(raw_cpu_write_, pcp, val)
414#define raw_cpu_add(pcp, val) __pcpu_size_call(raw_cpu_add_, pcp, val)
415#define raw_cpu_and(pcp, val) __pcpu_size_call(raw_cpu_and_, pcp, val)
416#define raw_cpu_or(pcp, val) __pcpu_size_call(raw_cpu_or_, pcp, val)
417#define raw_cpu_add_return(pcp, val) __pcpu_size_call_return2(raw_cpu_add_return_, pcp, val)
418#define raw_cpu_xchg(pcp, nval) __pcpu_size_call_return2(raw_cpu_xchg_, pcp, nval)
419#define raw_cpu_cmpxchg(pcp, oval, nval) \
420 __pcpu_size_call_return2(raw_cpu_cmpxchg_, pcp, oval, nval)
421#define raw_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
422 __pcpu_double_call_return_bool(raw_cpu_cmpxchg_double_, pcp1, pcp2, oval1, oval2, nval1, nval2)
423
424#define raw_cpu_sub(pcp, val) raw_cpu_add(pcp, -(val))
425#define raw_cpu_inc(pcp) raw_cpu_add(pcp, 1)
426#define raw_cpu_dec(pcp) raw_cpu_sub(pcp, 1)
427#define raw_cpu_sub_return(pcp, val) raw_cpu_add_return(pcp, -(typeof(pcp))(val))
428#define raw_cpu_inc_return(pcp) raw_cpu_add_return(pcp, 1)
429#define raw_cpu_dec_return(pcp) raw_cpu_add_return(pcp, -1)
430
431/*
432 * Operations for contexts that are safe from preemption/interrupts. These
433 * operations verify that preemption is disabled.
434 */
435#define __this_cpu_read(pcp) \
436({ \
437 __this_cpu_preempt_check("read"); \
438 raw_cpu_read(pcp); \
439})
440
441#define __this_cpu_write(pcp, val) \
442({ \
443 __this_cpu_preempt_check("write"); \
444 raw_cpu_write(pcp, val); \
445})
446
447#define __this_cpu_add(pcp, val) \
448({ \
449 __this_cpu_preempt_check("add"); \
450 raw_cpu_add(pcp, val); \
451})
452
453#define __this_cpu_and(pcp, val) \
454({ \
455 __this_cpu_preempt_check("and"); \
456 raw_cpu_and(pcp, val); \
457})
458
459#define __this_cpu_or(pcp, val) \
460({ \
461 __this_cpu_preempt_check("or"); \
462 raw_cpu_or(pcp, val); \
463})
464
465#define __this_cpu_add_return(pcp, val) \
466({ \
467 __this_cpu_preempt_check("add_return"); \
468 raw_cpu_add_return(pcp, val); \
469})
470
471#define __this_cpu_xchg(pcp, nval) \
472({ \
473 __this_cpu_preempt_check("xchg"); \
474 raw_cpu_xchg(pcp, nval); \
475})
476
477#define __this_cpu_cmpxchg(pcp, oval, nval) \
478({ \
479 __this_cpu_preempt_check("cmpxchg"); \
480 raw_cpu_cmpxchg(pcp, oval, nval); \
481})
482
483#define __this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
484({ __this_cpu_preempt_check("cmpxchg_double"); \
485 raw_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2); \
486})
487
488#define __this_cpu_sub(pcp, val) __this_cpu_add(pcp, -(typeof(pcp))(val))
489#define __this_cpu_inc(pcp) __this_cpu_add(pcp, 1)
490#define __this_cpu_dec(pcp) __this_cpu_sub(pcp, 1)
491#define __this_cpu_sub_return(pcp, val) __this_cpu_add_return(pcp, -(typeof(pcp))(val))
492#define __this_cpu_inc_return(pcp) __this_cpu_add_return(pcp, 1)
493#define __this_cpu_dec_return(pcp) __this_cpu_add_return(pcp, -1)
494
495/*
496 * Operations with implied preemption protection. These operations can be
497 * used without worrying about preemption. Note that interrupts may still
498 * occur while an operation is in progress and if the interrupt modifies
499 * the variable too then RMW actions may not be reliable.
500 */
501#define this_cpu_read(pcp) __pcpu_size_call_return(this_cpu_read_, pcp)
502#define this_cpu_write(pcp, val) __pcpu_size_call(this_cpu_write_, pcp, val)
503#define this_cpu_add(pcp, val) __pcpu_size_call(this_cpu_add_, pcp, val)
504#define this_cpu_and(pcp, val) __pcpu_size_call(this_cpu_and_, pcp, val)
505#define this_cpu_or(pcp, val) __pcpu_size_call(this_cpu_or_, pcp, val)
506#define this_cpu_add_return(pcp, val) __pcpu_size_call_return2(this_cpu_add_return_, pcp, val)
507#define this_cpu_xchg(pcp, nval) __pcpu_size_call_return2(this_cpu_xchg_, pcp, nval)
508#define this_cpu_cmpxchg(pcp, oval, nval) \
509 __pcpu_size_call_return2(this_cpu_cmpxchg_, pcp, oval, nval)
510#define this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
511 __pcpu_double_call_return_bool(this_cpu_cmpxchg_double_, pcp1, pcp2, oval1, oval2, nval1, nval2)
512
513#define this_cpu_sub(pcp, val) this_cpu_add(pcp, -(typeof(pcp))(val))
514#define this_cpu_inc(pcp) this_cpu_add(pcp, 1)
515#define this_cpu_dec(pcp) this_cpu_sub(pcp, 1)
516#define this_cpu_sub_return(pcp, val) this_cpu_add_return(pcp, -(typeof(pcp))(val))
517#define this_cpu_inc_return(pcp) this_cpu_add_return(pcp, 1)
518#define this_cpu_dec_return(pcp) this_cpu_add_return(pcp, -1)
519
520#endif /* __ASSEMBLY__ */
167#endif /* _LINUX_PERCPU_DEFS_H */ 521#endif /* _LINUX_PERCPU_DEFS_H */
diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h
index 5d8920e23073..3dfbf237cd8f 100644
--- a/include/linux/percpu-refcount.h
+++ b/include/linux/percpu-refcount.h
@@ -57,11 +57,9 @@ struct percpu_ref {
57 atomic_t count; 57 atomic_t count;
58 /* 58 /*
59 * The low bit of the pointer indicates whether the ref is in percpu 59 * The low bit of the pointer indicates whether the ref is in percpu
60 * mode; if set, then get/put will manipulate the atomic_t (this is a 60 * mode; if set, then get/put will manipulate the atomic_t.
61 * hack because we need to keep the pointer around for
62 * percpu_ref_kill_rcu())
63 */ 61 */
64 unsigned __percpu *pcpu_count; 62 unsigned long pcpu_count_ptr;
65 percpu_ref_func_t *release; 63 percpu_ref_func_t *release;
66 percpu_ref_func_t *confirm_kill; 64 percpu_ref_func_t *confirm_kill;
67 struct rcu_head rcu; 65 struct rcu_head rcu;
@@ -69,7 +67,8 @@ struct percpu_ref {
69 67
70int __must_check percpu_ref_init(struct percpu_ref *ref, 68int __must_check percpu_ref_init(struct percpu_ref *ref,
71 percpu_ref_func_t *release); 69 percpu_ref_func_t *release);
72void percpu_ref_cancel_init(struct percpu_ref *ref); 70void percpu_ref_reinit(struct percpu_ref *ref);
71void percpu_ref_exit(struct percpu_ref *ref);
73void percpu_ref_kill_and_confirm(struct percpu_ref *ref, 72void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
74 percpu_ref_func_t *confirm_kill); 73 percpu_ref_func_t *confirm_kill);
75 74
@@ -88,12 +87,28 @@ static inline void percpu_ref_kill(struct percpu_ref *ref)
88 return percpu_ref_kill_and_confirm(ref, NULL); 87 return percpu_ref_kill_and_confirm(ref, NULL);
89} 88}
90 89
91#define PCPU_STATUS_BITS 2
92#define PCPU_STATUS_MASK ((1 << PCPU_STATUS_BITS) - 1)
93#define PCPU_REF_PTR 0
94#define PCPU_REF_DEAD 1 90#define PCPU_REF_DEAD 1
95 91
96#define REF_STATUS(count) (((unsigned long) count) & PCPU_STATUS_MASK) 92/*
93 * Internal helper. Don't use outside percpu-refcount proper. The
94 * function doesn't return the pointer and let the caller test it for NULL
95 * because doing so forces the compiler to generate two conditional
96 * branches as it can't assume that @ref->pcpu_count is not NULL.
97 */
98static inline bool __pcpu_ref_alive(struct percpu_ref *ref,
99 unsigned __percpu **pcpu_countp)
100{
101 unsigned long pcpu_ptr = ACCESS_ONCE(ref->pcpu_count_ptr);
102
103 /* paired with smp_store_release() in percpu_ref_reinit() */
104 smp_read_barrier_depends();
105
106 if (unlikely(pcpu_ptr & PCPU_REF_DEAD))
107 return false;
108
109 *pcpu_countp = (unsigned __percpu *)pcpu_ptr;
110 return true;
111}
97 112
98/** 113/**
99 * percpu_ref_get - increment a percpu refcount 114 * percpu_ref_get - increment a percpu refcount
@@ -107,9 +122,7 @@ static inline void percpu_ref_get(struct percpu_ref *ref)
107 122
108 rcu_read_lock_sched(); 123 rcu_read_lock_sched();
109 124
110 pcpu_count = ACCESS_ONCE(ref->pcpu_count); 125 if (__pcpu_ref_alive(ref, &pcpu_count))
111
112 if (likely(REF_STATUS(pcpu_count) == PCPU_REF_PTR))
113 this_cpu_inc(*pcpu_count); 126 this_cpu_inc(*pcpu_count);
114 else 127 else
115 atomic_inc(&ref->count); 128 atomic_inc(&ref->count);
@@ -133,9 +146,7 @@ static inline bool percpu_ref_tryget(struct percpu_ref *ref)
133 146
134 rcu_read_lock_sched(); 147 rcu_read_lock_sched();
135 148
136 pcpu_count = ACCESS_ONCE(ref->pcpu_count); 149 if (__pcpu_ref_alive(ref, &pcpu_count)) {
137
138 if (likely(REF_STATUS(pcpu_count) == PCPU_REF_PTR)) {
139 this_cpu_inc(*pcpu_count); 150 this_cpu_inc(*pcpu_count);
140 ret = true; 151 ret = true;
141 } else { 152 } else {
@@ -168,9 +179,7 @@ static inline bool percpu_ref_tryget_live(struct percpu_ref *ref)
168 179
169 rcu_read_lock_sched(); 180 rcu_read_lock_sched();
170 181
171 pcpu_count = ACCESS_ONCE(ref->pcpu_count); 182 if (__pcpu_ref_alive(ref, &pcpu_count)) {
172
173 if (likely(REF_STATUS(pcpu_count) == PCPU_REF_PTR)) {
174 this_cpu_inc(*pcpu_count); 183 this_cpu_inc(*pcpu_count);
175 ret = true; 184 ret = true;
176 } 185 }
@@ -193,9 +202,7 @@ static inline void percpu_ref_put(struct percpu_ref *ref)
193 202
194 rcu_read_lock_sched(); 203 rcu_read_lock_sched();
195 204
196 pcpu_count = ACCESS_ONCE(ref->pcpu_count); 205 if (__pcpu_ref_alive(ref, &pcpu_count))
197
198 if (likely(REF_STATUS(pcpu_count) == PCPU_REF_PTR))
199 this_cpu_dec(*pcpu_count); 206 this_cpu_dec(*pcpu_count);
200 else if (unlikely(atomic_dec_and_test(&ref->count))) 207 else if (unlikely(atomic_dec_and_test(&ref->count)))
201 ref->release(ref); 208 ref->release(ref);
@@ -203,4 +210,19 @@ static inline void percpu_ref_put(struct percpu_ref *ref)
203 rcu_read_unlock_sched(); 210 rcu_read_unlock_sched();
204} 211}
205 212
213/**
214 * percpu_ref_is_zero - test whether a percpu refcount reached zero
215 * @ref: percpu_ref to test
216 *
217 * Returns %true if @ref reached zero.
218 */
219static inline bool percpu_ref_is_zero(struct percpu_ref *ref)
220{
221 unsigned __percpu *pcpu_count;
222
223 if (__pcpu_ref_alive(ref, &pcpu_count))
224 return false;
225 return !atomic_read(&ref->count);
226}
227
206#endif 228#endif
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index 8419053d0f2e..6f61b61b7996 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -23,32 +23,6 @@
23 PERCPU_MODULE_RESERVE) 23 PERCPU_MODULE_RESERVE)
24#endif 24#endif
25 25
26/*
27 * Must be an lvalue. Since @var must be a simple identifier,
28 * we force a syntax error here if it isn't.
29 */
30#define get_cpu_var(var) (*({ \
31 preempt_disable(); \
32 this_cpu_ptr(&var); }))
33
34/*
35 * The weird & is necessary because sparse considers (void)(var) to be
36 * a direct dereference of percpu variable (var).
37 */
38#define put_cpu_var(var) do { \
39 (void)&(var); \
40 preempt_enable(); \
41} while (0)
42
43#define get_cpu_ptr(var) ({ \
44 preempt_disable(); \
45 this_cpu_ptr(var); })
46
47#define put_cpu_ptr(var) do { \
48 (void)(var); \
49 preempt_enable(); \
50} while (0)
51
52/* minimum unit size, also is the maximum supported allocation size */ 26/* minimum unit size, also is the maximum supported allocation size */
53#define PCPU_MIN_UNIT_SIZE PFN_ALIGN(32 << 10) 27#define PCPU_MIN_UNIT_SIZE PFN_ALIGN(32 << 10)
54 28
@@ -140,17 +114,6 @@ extern int __init pcpu_page_first_chunk(size_t reserved_size,
140 pcpu_fc_populate_pte_fn_t populate_pte_fn); 114 pcpu_fc_populate_pte_fn_t populate_pte_fn);
141#endif 115#endif
142 116
143/*
144 * Use this to get to a cpu's version of the per-cpu object
145 * dynamically allocated. Non-atomic access to the current CPU's
146 * version should probably be combined with get_cpu()/put_cpu().
147 */
148#ifdef CONFIG_SMP
149#define per_cpu_ptr(ptr, cpu) SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu)))
150#else
151#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); VERIFY_PERCPU_PTR((ptr)); })
152#endif
153
154extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align); 117extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align);
155extern bool is_kernel_percpu_address(unsigned long addr); 118extern bool is_kernel_percpu_address(unsigned long addr);
156 119
@@ -166,640 +129,4 @@ extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
166#define alloc_percpu(type) \ 129#define alloc_percpu(type) \
167 (typeof(type) __percpu *)__alloc_percpu(sizeof(type), __alignof__(type)) 130 (typeof(type) __percpu *)__alloc_percpu(sizeof(type), __alignof__(type))
168 131
169/*
170 * Branching function to split up a function into a set of functions that
171 * are called for different scalar sizes of the objects handled.
172 */
173
174extern void __bad_size_call_parameter(void);
175
176#ifdef CONFIG_DEBUG_PREEMPT
177extern void __this_cpu_preempt_check(const char *op);
178#else
179static inline void __this_cpu_preempt_check(const char *op) { }
180#endif
181
182#define __pcpu_size_call_return(stem, variable) \
183({ typeof(variable) pscr_ret__; \
184 __verify_pcpu_ptr(&(variable)); \
185 switch(sizeof(variable)) { \
186 case 1: pscr_ret__ = stem##1(variable);break; \
187 case 2: pscr_ret__ = stem##2(variable);break; \
188 case 4: pscr_ret__ = stem##4(variable);break; \
189 case 8: pscr_ret__ = stem##8(variable);break; \
190 default: \
191 __bad_size_call_parameter();break; \
192 } \
193 pscr_ret__; \
194})
195
196#define __pcpu_size_call_return2(stem, variable, ...) \
197({ \
198 typeof(variable) pscr2_ret__; \
199 __verify_pcpu_ptr(&(variable)); \
200 switch(sizeof(variable)) { \
201 case 1: pscr2_ret__ = stem##1(variable, __VA_ARGS__); break; \
202 case 2: pscr2_ret__ = stem##2(variable, __VA_ARGS__); break; \
203 case 4: pscr2_ret__ = stem##4(variable, __VA_ARGS__); break; \
204 case 8: pscr2_ret__ = stem##8(variable, __VA_ARGS__); break; \
205 default: \
206 __bad_size_call_parameter(); break; \
207 } \
208 pscr2_ret__; \
209})
210
211/*
212 * Special handling for cmpxchg_double. cmpxchg_double is passed two
213 * percpu variables. The first has to be aligned to a double word
214 * boundary and the second has to follow directly thereafter.
215 * We enforce this on all architectures even if they don't support
216 * a double cmpxchg instruction, since it's a cheap requirement, and it
217 * avoids breaking the requirement for architectures with the instruction.
218 */
219#define __pcpu_double_call_return_bool(stem, pcp1, pcp2, ...) \
220({ \
221 bool pdcrb_ret__; \
222 __verify_pcpu_ptr(&pcp1); \
223 BUILD_BUG_ON(sizeof(pcp1) != sizeof(pcp2)); \
224 VM_BUG_ON((unsigned long)(&pcp1) % (2 * sizeof(pcp1))); \
225 VM_BUG_ON((unsigned long)(&pcp2) != \
226 (unsigned long)(&pcp1) + sizeof(pcp1)); \
227 switch(sizeof(pcp1)) { \
228 case 1: pdcrb_ret__ = stem##1(pcp1, pcp2, __VA_ARGS__); break; \
229 case 2: pdcrb_ret__ = stem##2(pcp1, pcp2, __VA_ARGS__); break; \
230 case 4: pdcrb_ret__ = stem##4(pcp1, pcp2, __VA_ARGS__); break; \
231 case 8: pdcrb_ret__ = stem##8(pcp1, pcp2, __VA_ARGS__); break; \
232 default: \
233 __bad_size_call_parameter(); break; \
234 } \
235 pdcrb_ret__; \
236})
237
238#define __pcpu_size_call(stem, variable, ...) \
239do { \
240 __verify_pcpu_ptr(&(variable)); \
241 switch(sizeof(variable)) { \
242 case 1: stem##1(variable, __VA_ARGS__);break; \
243 case 2: stem##2(variable, __VA_ARGS__);break; \
244 case 4: stem##4(variable, __VA_ARGS__);break; \
245 case 8: stem##8(variable, __VA_ARGS__);break; \
246 default: \
247 __bad_size_call_parameter();break; \
248 } \
249} while (0)
250
251/*
252 * this_cpu operations (C) 2008-2013 Christoph Lameter <cl@linux.com>
253 *
254 * Optimized manipulation for memory allocated through the per cpu
255 * allocator or for addresses of per cpu variables.
256 *
257 * These operation guarantee exclusivity of access for other operations
258 * on the *same* processor. The assumption is that per cpu data is only
259 * accessed by a single processor instance (the current one).
260 *
261 * The first group is used for accesses that must be done in a
262 * preemption safe way since we know that the context is not preempt
263 * safe. Interrupts may occur. If the interrupt modifies the variable
264 * too then RMW actions will not be reliable.
265 *
266 * The arch code can provide optimized functions in two ways:
267 *
268 * 1. Override the function completely. F.e. define this_cpu_add().
269 * The arch must then ensure that the various scalar format passed
270 * are handled correctly.
271 *
272 * 2. Provide functions for certain scalar sizes. F.e. provide
273 * this_cpu_add_2() to provide per cpu atomic operations for 2 byte
274 * sized RMW actions. If arch code does not provide operations for
275 * a scalar size then the fallback in the generic code will be
276 * used.
277 */
278
279#define _this_cpu_generic_read(pcp) \
280({ typeof(pcp) ret__; \
281 preempt_disable(); \
282 ret__ = *this_cpu_ptr(&(pcp)); \
283 preempt_enable(); \
284 ret__; \
285})
286
287#ifndef this_cpu_read
288# ifndef this_cpu_read_1
289# define this_cpu_read_1(pcp) _this_cpu_generic_read(pcp)
290# endif
291# ifndef this_cpu_read_2
292# define this_cpu_read_2(pcp) _this_cpu_generic_read(pcp)
293# endif
294# ifndef this_cpu_read_4
295# define this_cpu_read_4(pcp) _this_cpu_generic_read(pcp)
296# endif
297# ifndef this_cpu_read_8
298# define this_cpu_read_8(pcp) _this_cpu_generic_read(pcp)
299# endif
300# define this_cpu_read(pcp) __pcpu_size_call_return(this_cpu_read_, (pcp))
301#endif
302
303#define _this_cpu_generic_to_op(pcp, val, op) \
304do { \
305 unsigned long flags; \
306 raw_local_irq_save(flags); \
307 *raw_cpu_ptr(&(pcp)) op val; \
308 raw_local_irq_restore(flags); \
309} while (0)
310
311#ifndef this_cpu_write
312# ifndef this_cpu_write_1
313# define this_cpu_write_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), =)
314# endif
315# ifndef this_cpu_write_2
316# define this_cpu_write_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), =)
317# endif
318# ifndef this_cpu_write_4
319# define this_cpu_write_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), =)
320# endif
321# ifndef this_cpu_write_8
322# define this_cpu_write_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), =)
323# endif
324# define this_cpu_write(pcp, val) __pcpu_size_call(this_cpu_write_, (pcp), (val))
325#endif
326
327#ifndef this_cpu_add
328# ifndef this_cpu_add_1
329# define this_cpu_add_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=)
330# endif
331# ifndef this_cpu_add_2
332# define this_cpu_add_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=)
333# endif
334# ifndef this_cpu_add_4
335# define this_cpu_add_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=)
336# endif
337# ifndef this_cpu_add_8
338# define this_cpu_add_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=)
339# endif
340# define this_cpu_add(pcp, val) __pcpu_size_call(this_cpu_add_, (pcp), (val))
341#endif
342
343#ifndef this_cpu_sub
344# define this_cpu_sub(pcp, val) this_cpu_add((pcp), -(typeof(pcp))(val))
345#endif
346
347#ifndef this_cpu_inc
348# define this_cpu_inc(pcp) this_cpu_add((pcp), 1)
349#endif
350
351#ifndef this_cpu_dec
352# define this_cpu_dec(pcp) this_cpu_sub((pcp), 1)
353#endif
354
355#ifndef this_cpu_and
356# ifndef this_cpu_and_1
357# define this_cpu_and_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=)
358# endif
359# ifndef this_cpu_and_2
360# define this_cpu_and_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=)
361# endif
362# ifndef this_cpu_and_4
363# define this_cpu_and_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=)
364# endif
365# ifndef this_cpu_and_8
366# define this_cpu_and_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=)
367# endif
368# define this_cpu_and(pcp, val) __pcpu_size_call(this_cpu_and_, (pcp), (val))
369#endif
370
371#ifndef this_cpu_or
372# ifndef this_cpu_or_1
373# define this_cpu_or_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=)
374# endif
375# ifndef this_cpu_or_2
376# define this_cpu_or_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=)
377# endif
378# ifndef this_cpu_or_4
379# define this_cpu_or_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=)
380# endif
381# ifndef this_cpu_or_8
382# define this_cpu_or_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=)
383# endif
384# define this_cpu_or(pcp, val) __pcpu_size_call(this_cpu_or_, (pcp), (val))
385#endif
386
387#define _this_cpu_generic_add_return(pcp, val) \
388({ \
389 typeof(pcp) ret__; \
390 unsigned long flags; \
391 raw_local_irq_save(flags); \
392 raw_cpu_add(pcp, val); \
393 ret__ = raw_cpu_read(pcp); \
394 raw_local_irq_restore(flags); \
395 ret__; \
396})
397
398#ifndef this_cpu_add_return
399# ifndef this_cpu_add_return_1
400# define this_cpu_add_return_1(pcp, val) _this_cpu_generic_add_return(pcp, val)
401# endif
402# ifndef this_cpu_add_return_2
403# define this_cpu_add_return_2(pcp, val) _this_cpu_generic_add_return(pcp, val)
404# endif
405# ifndef this_cpu_add_return_4
406# define this_cpu_add_return_4(pcp, val) _this_cpu_generic_add_return(pcp, val)
407# endif
408# ifndef this_cpu_add_return_8
409# define this_cpu_add_return_8(pcp, val) _this_cpu_generic_add_return(pcp, val)
410# endif
411# define this_cpu_add_return(pcp, val) __pcpu_size_call_return2(this_cpu_add_return_, pcp, val)
412#endif
413
414#define this_cpu_sub_return(pcp, val) this_cpu_add_return(pcp, -(typeof(pcp))(val))
415#define this_cpu_inc_return(pcp) this_cpu_add_return(pcp, 1)
416#define this_cpu_dec_return(pcp) this_cpu_add_return(pcp, -1)
417
418#define _this_cpu_generic_xchg(pcp, nval) \
419({ typeof(pcp) ret__; \
420 unsigned long flags; \
421 raw_local_irq_save(flags); \
422 ret__ = raw_cpu_read(pcp); \
423 raw_cpu_write(pcp, nval); \
424 raw_local_irq_restore(flags); \
425 ret__; \
426})
427
428#ifndef this_cpu_xchg
429# ifndef this_cpu_xchg_1
430# define this_cpu_xchg_1(pcp, nval) _this_cpu_generic_xchg(pcp, nval)
431# endif
432# ifndef this_cpu_xchg_2
433# define this_cpu_xchg_2(pcp, nval) _this_cpu_generic_xchg(pcp, nval)
434# endif
435# ifndef this_cpu_xchg_4
436# define this_cpu_xchg_4(pcp, nval) _this_cpu_generic_xchg(pcp, nval)
437# endif
438# ifndef this_cpu_xchg_8
439# define this_cpu_xchg_8(pcp, nval) _this_cpu_generic_xchg(pcp, nval)
440# endif
441# define this_cpu_xchg(pcp, nval) \
442 __pcpu_size_call_return2(this_cpu_xchg_, (pcp), nval)
443#endif
444
445#define _this_cpu_generic_cmpxchg(pcp, oval, nval) \
446({ \
447 typeof(pcp) ret__; \
448 unsigned long flags; \
449 raw_local_irq_save(flags); \
450 ret__ = raw_cpu_read(pcp); \
451 if (ret__ == (oval)) \
452 raw_cpu_write(pcp, nval); \
453 raw_local_irq_restore(flags); \
454 ret__; \
455})
456
457#ifndef this_cpu_cmpxchg
458# ifndef this_cpu_cmpxchg_1
459# define this_cpu_cmpxchg_1(pcp, oval, nval) _this_cpu_generic_cmpxchg(pcp, oval, nval)
460# endif
461# ifndef this_cpu_cmpxchg_2
462# define this_cpu_cmpxchg_2(pcp, oval, nval) _this_cpu_generic_cmpxchg(pcp, oval, nval)
463# endif
464# ifndef this_cpu_cmpxchg_4
465# define this_cpu_cmpxchg_4(pcp, oval, nval) _this_cpu_generic_cmpxchg(pcp, oval, nval)
466# endif
467# ifndef this_cpu_cmpxchg_8
468# define this_cpu_cmpxchg_8(pcp, oval, nval) _this_cpu_generic_cmpxchg(pcp, oval, nval)
469# endif
470# define this_cpu_cmpxchg(pcp, oval, nval) \
471 __pcpu_size_call_return2(this_cpu_cmpxchg_, pcp, oval, nval)
472#endif
473
474/*
475 * cmpxchg_double replaces two adjacent scalars at once. The first
476 * two parameters are per cpu variables which have to be of the same
477 * size. A truth value is returned to indicate success or failure
478 * (since a double register result is difficult to handle). There is
479 * very limited hardware support for these operations, so only certain
480 * sizes may work.
481 */
482#define _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
483({ \
484 int ret__; \
485 unsigned long flags; \
486 raw_local_irq_save(flags); \
487 ret__ = raw_cpu_generic_cmpxchg_double(pcp1, pcp2, \
488 oval1, oval2, nval1, nval2); \
489 raw_local_irq_restore(flags); \
490 ret__; \
491})
492
493#ifndef this_cpu_cmpxchg_double
494# ifndef this_cpu_cmpxchg_double_1
495# define this_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2) \
496 _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
497# endif
498# ifndef this_cpu_cmpxchg_double_2
499# define this_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2) \
500 _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
501# endif
502# ifndef this_cpu_cmpxchg_double_4
503# define this_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2) \
504 _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
505# endif
506# ifndef this_cpu_cmpxchg_double_8
507# define this_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2) \
508 _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
509# endif
510# define this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
511 __pcpu_double_call_return_bool(this_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2))
512#endif
513
514/*
515 * Generic percpu operations for contexts where we do not want to do
516 * any checks for preemptiosn.
517 *
518 * If there is no other protection through preempt disable and/or
519 * disabling interupts then one of these RMW operations can show unexpected
520 * behavior because the execution thread was rescheduled on another processor
521 * or an interrupt occurred and the same percpu variable was modified from
522 * the interrupt context.
523 */
524#ifndef raw_cpu_read
525# ifndef raw_cpu_read_1
526# define raw_cpu_read_1(pcp) (*raw_cpu_ptr(&(pcp)))
527# endif
528# ifndef raw_cpu_read_2
529# define raw_cpu_read_2(pcp) (*raw_cpu_ptr(&(pcp)))
530# endif
531# ifndef raw_cpu_read_4
532# define raw_cpu_read_4(pcp) (*raw_cpu_ptr(&(pcp)))
533# endif
534# ifndef raw_cpu_read_8
535# define raw_cpu_read_8(pcp) (*raw_cpu_ptr(&(pcp)))
536# endif
537# define raw_cpu_read(pcp) __pcpu_size_call_return(raw_cpu_read_, (pcp))
538#endif
539
540#define raw_cpu_generic_to_op(pcp, val, op) \
541do { \
542 *raw_cpu_ptr(&(pcp)) op val; \
543} while (0)
544
545
546#ifndef raw_cpu_write
547# ifndef raw_cpu_write_1
548# define raw_cpu_write_1(pcp, val) raw_cpu_generic_to_op((pcp), (val), =)
549# endif
550# ifndef raw_cpu_write_2
551# define raw_cpu_write_2(pcp, val) raw_cpu_generic_to_op((pcp), (val), =)
552# endif
553# ifndef raw_cpu_write_4
554# define raw_cpu_write_4(pcp, val) raw_cpu_generic_to_op((pcp), (val), =)
555# endif
556# ifndef raw_cpu_write_8
557# define raw_cpu_write_8(pcp, val) raw_cpu_generic_to_op((pcp), (val), =)
558# endif
559# define raw_cpu_write(pcp, val) __pcpu_size_call(raw_cpu_write_, (pcp), (val))
560#endif
561
562#ifndef raw_cpu_add
563# ifndef raw_cpu_add_1
564# define raw_cpu_add_1(pcp, val) raw_cpu_generic_to_op((pcp), (val), +=)
565# endif
566# ifndef raw_cpu_add_2
567# define raw_cpu_add_2(pcp, val) raw_cpu_generic_to_op((pcp), (val), +=)
568# endif
569# ifndef raw_cpu_add_4
570# define raw_cpu_add_4(pcp, val) raw_cpu_generic_to_op((pcp), (val), +=)
571# endif
572# ifndef raw_cpu_add_8
573# define raw_cpu_add_8(pcp, val) raw_cpu_generic_to_op((pcp), (val), +=)
574# endif
575# define raw_cpu_add(pcp, val) __pcpu_size_call(raw_cpu_add_, (pcp), (val))
576#endif
577
578#ifndef raw_cpu_sub
579# define raw_cpu_sub(pcp, val) raw_cpu_add((pcp), -(val))
580#endif
581
582#ifndef raw_cpu_inc
583# define raw_cpu_inc(pcp) raw_cpu_add((pcp), 1)
584#endif
585
586#ifndef raw_cpu_dec
587# define raw_cpu_dec(pcp) raw_cpu_sub((pcp), 1)
588#endif
589
590#ifndef raw_cpu_and
591# ifndef raw_cpu_and_1
592# define raw_cpu_and_1(pcp, val) raw_cpu_generic_to_op((pcp), (val), &=)
593# endif
594# ifndef raw_cpu_and_2
595# define raw_cpu_and_2(pcp, val) raw_cpu_generic_to_op((pcp), (val), &=)
596# endif
597# ifndef raw_cpu_and_4
598# define raw_cpu_and_4(pcp, val) raw_cpu_generic_to_op((pcp), (val), &=)
599# endif
600# ifndef raw_cpu_and_8
601# define raw_cpu_and_8(pcp, val) raw_cpu_generic_to_op((pcp), (val), &=)
602# endif
603# define raw_cpu_and(pcp, val) __pcpu_size_call(raw_cpu_and_, (pcp), (val))
604#endif
605
606#ifndef raw_cpu_or
607# ifndef raw_cpu_or_1
608# define raw_cpu_or_1(pcp, val) raw_cpu_generic_to_op((pcp), (val), |=)
609# endif
610# ifndef raw_cpu_or_2
611# define raw_cpu_or_2(pcp, val) raw_cpu_generic_to_op((pcp), (val), |=)
612# endif
613# ifndef raw_cpu_or_4
614# define raw_cpu_or_4(pcp, val) raw_cpu_generic_to_op((pcp), (val), |=)
615# endif
616# ifndef raw_cpu_or_8
617# define raw_cpu_or_8(pcp, val) raw_cpu_generic_to_op((pcp), (val), |=)
618# endif
619# define raw_cpu_or(pcp, val) __pcpu_size_call(raw_cpu_or_, (pcp), (val))
620#endif
621
622#define raw_cpu_generic_add_return(pcp, val) \
623({ \
624 raw_cpu_add(pcp, val); \
625 raw_cpu_read(pcp); \
626})
627
628#ifndef raw_cpu_add_return
629# ifndef raw_cpu_add_return_1
630# define raw_cpu_add_return_1(pcp, val) raw_cpu_generic_add_return(pcp, val)
631# endif
632# ifndef raw_cpu_add_return_2
633# define raw_cpu_add_return_2(pcp, val) raw_cpu_generic_add_return(pcp, val)
634# endif
635# ifndef raw_cpu_add_return_4
636# define raw_cpu_add_return_4(pcp, val) raw_cpu_generic_add_return(pcp, val)
637# endif
638# ifndef raw_cpu_add_return_8
639# define raw_cpu_add_return_8(pcp, val) raw_cpu_generic_add_return(pcp, val)
640# endif
641# define raw_cpu_add_return(pcp, val) \
642 __pcpu_size_call_return2(raw_cpu_add_return_, pcp, val)
643#endif
644
645#define raw_cpu_sub_return(pcp, val) raw_cpu_add_return(pcp, -(typeof(pcp))(val))
646#define raw_cpu_inc_return(pcp) raw_cpu_add_return(pcp, 1)
647#define raw_cpu_dec_return(pcp) raw_cpu_add_return(pcp, -1)
648
649#define raw_cpu_generic_xchg(pcp, nval) \
650({ typeof(pcp) ret__; \
651 ret__ = raw_cpu_read(pcp); \
652 raw_cpu_write(pcp, nval); \
653 ret__; \
654})
655
656#ifndef raw_cpu_xchg
657# ifndef raw_cpu_xchg_1
658# define raw_cpu_xchg_1(pcp, nval) raw_cpu_generic_xchg(pcp, nval)
659# endif
660# ifndef raw_cpu_xchg_2
661# define raw_cpu_xchg_2(pcp, nval) raw_cpu_generic_xchg(pcp, nval)
662# endif
663# ifndef raw_cpu_xchg_4
664# define raw_cpu_xchg_4(pcp, nval) raw_cpu_generic_xchg(pcp, nval)
665# endif
666# ifndef raw_cpu_xchg_8
667# define raw_cpu_xchg_8(pcp, nval) raw_cpu_generic_xchg(pcp, nval)
668# endif
669# define raw_cpu_xchg(pcp, nval) \
670 __pcpu_size_call_return2(raw_cpu_xchg_, (pcp), nval)
671#endif
672
673#define raw_cpu_generic_cmpxchg(pcp, oval, nval) \
674({ \
675 typeof(pcp) ret__; \
676 ret__ = raw_cpu_read(pcp); \
677 if (ret__ == (oval)) \
678 raw_cpu_write(pcp, nval); \
679 ret__; \
680})
681
682#ifndef raw_cpu_cmpxchg
683# ifndef raw_cpu_cmpxchg_1
684# define raw_cpu_cmpxchg_1(pcp, oval, nval) raw_cpu_generic_cmpxchg(pcp, oval, nval)
685# endif
686# ifndef raw_cpu_cmpxchg_2
687# define raw_cpu_cmpxchg_2(pcp, oval, nval) raw_cpu_generic_cmpxchg(pcp, oval, nval)
688# endif
689# ifndef raw_cpu_cmpxchg_4
690# define raw_cpu_cmpxchg_4(pcp, oval, nval) raw_cpu_generic_cmpxchg(pcp, oval, nval)
691# endif
692# ifndef raw_cpu_cmpxchg_8
693# define raw_cpu_cmpxchg_8(pcp, oval, nval) raw_cpu_generic_cmpxchg(pcp, oval, nval)
694# endif
695# define raw_cpu_cmpxchg(pcp, oval, nval) \
696 __pcpu_size_call_return2(raw_cpu_cmpxchg_, pcp, oval, nval)
697#endif
698
699#define raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
700({ \
701 int __ret = 0; \
702 if (raw_cpu_read(pcp1) == (oval1) && \
703 raw_cpu_read(pcp2) == (oval2)) { \
704 raw_cpu_write(pcp1, (nval1)); \
705 raw_cpu_write(pcp2, (nval2)); \
706 __ret = 1; \
707 } \
708 (__ret); \
709})
710
711#ifndef raw_cpu_cmpxchg_double
712# ifndef raw_cpu_cmpxchg_double_1
713# define raw_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2) \
714 raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
715# endif
716# ifndef raw_cpu_cmpxchg_double_2
717# define raw_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2) \
718 raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
719# endif
720# ifndef raw_cpu_cmpxchg_double_4
721# define raw_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2) \
722 raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
723# endif
724# ifndef raw_cpu_cmpxchg_double_8
725# define raw_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2) \
726 raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
727# endif
728# define raw_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
729 __pcpu_double_call_return_bool(raw_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2))
730#endif
731
732/*
733 * Generic percpu operations for context that are safe from preemption/interrupts.
734 */
735#ifndef __this_cpu_read
736# define __this_cpu_read(pcp) \
737 (__this_cpu_preempt_check("read"),__pcpu_size_call_return(raw_cpu_read_, (pcp)))
738#endif
739
740#ifndef __this_cpu_write
741# define __this_cpu_write(pcp, val) \
742do { __this_cpu_preempt_check("write"); \
743 __pcpu_size_call(raw_cpu_write_, (pcp), (val)); \
744} while (0)
745#endif
746
747#ifndef __this_cpu_add
748# define __this_cpu_add(pcp, val) \
749do { __this_cpu_preempt_check("add"); \
750 __pcpu_size_call(raw_cpu_add_, (pcp), (val)); \
751} while (0)
752#endif
753
754#ifndef __this_cpu_sub
755# define __this_cpu_sub(pcp, val) __this_cpu_add((pcp), -(typeof(pcp))(val))
756#endif
757
758#ifndef __this_cpu_inc
759# define __this_cpu_inc(pcp) __this_cpu_add((pcp), 1)
760#endif
761
762#ifndef __this_cpu_dec
763# define __this_cpu_dec(pcp) __this_cpu_sub((pcp), 1)
764#endif
765
766#ifndef __this_cpu_and
767# define __this_cpu_and(pcp, val) \
768do { __this_cpu_preempt_check("and"); \
769 __pcpu_size_call(raw_cpu_and_, (pcp), (val)); \
770} while (0)
771
772#endif
773
774#ifndef __this_cpu_or
775# define __this_cpu_or(pcp, val) \
776do { __this_cpu_preempt_check("or"); \
777 __pcpu_size_call(raw_cpu_or_, (pcp), (val)); \
778} while (0)
779#endif
780
781#ifndef __this_cpu_add_return
782# define __this_cpu_add_return(pcp, val) \
783 (__this_cpu_preempt_check("add_return"),__pcpu_size_call_return2(raw_cpu_add_return_, pcp, val))
784#endif
785
786#define __this_cpu_sub_return(pcp, val) __this_cpu_add_return(pcp, -(typeof(pcp))(val))
787#define __this_cpu_inc_return(pcp) __this_cpu_add_return(pcp, 1)
788#define __this_cpu_dec_return(pcp) __this_cpu_add_return(pcp, -1)
789
790#ifndef __this_cpu_xchg
791# define __this_cpu_xchg(pcp, nval) \
792 (__this_cpu_preempt_check("xchg"),__pcpu_size_call_return2(raw_cpu_xchg_, (pcp), nval))
793#endif
794
795#ifndef __this_cpu_cmpxchg
796# define __this_cpu_cmpxchg(pcp, oval, nval) \
797 (__this_cpu_preempt_check("cmpxchg"),__pcpu_size_call_return2(raw_cpu_cmpxchg_, pcp, oval, nval))
798#endif
799
800#ifndef __this_cpu_cmpxchg_double
801# define __this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
802 (__this_cpu_preempt_check("cmpxchg_double"),__pcpu_double_call_return_bool(raw_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2)))
803#endif
804
805#endif /* __LINUX_PERCPU_H */ 132#endif /* __LINUX_PERCPU_H */
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 864ddafad8cc..68041446c450 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -536,6 +536,15 @@ struct phy_driver {
536 /* See set_wol, but for checking whether Wake on LAN is enabled. */ 536 /* See set_wol, but for checking whether Wake on LAN is enabled. */
537 void (*get_wol)(struct phy_device *dev, struct ethtool_wolinfo *wol); 537 void (*get_wol)(struct phy_device *dev, struct ethtool_wolinfo *wol);
538 538
539 /*
540 * Called to inform a PHY device driver when the core is about to
541 * change the link state. This callback is supposed to be used as
542 * fixup hook for drivers that need to take action when the link
543 * state changes. Drivers are by no means allowed to mess with the
544 * PHY device structure in their implementations.
545 */
546 void (*link_change_notify)(struct phy_device *dev);
547
539 struct device_driver driver; 548 struct device_driver driver;
540}; 549};
541#define to_phy_driver(d) container_of(d, struct phy_driver, driver) 550#define to_phy_driver(d) container_of(d, struct phy_driver, driver)
diff --git a/include/linux/platform_data/ata-samsung_cf.h b/include/linux/platform_data/ata-samsung_cf.h
index c2049e3d7444..748e71642c4a 100644
--- a/include/linux/platform_data/ata-samsung_cf.h
+++ b/include/linux/platform_data/ata-samsung_cf.h
@@ -29,7 +29,6 @@ extern void s3c_ide_set_platdata(struct s3c_ide_platdata *pdata);
29 29
30/* architecture-specific IDE configuration */ 30/* architecture-specific IDE configuration */
31extern void s3c64xx_ide_setup_gpio(void); 31extern void s3c64xx_ide_setup_gpio(void);
32extern void s5pc100_ide_setup_gpio(void);
33extern void s5pv210_ide_setup_gpio(void); 32extern void s5pv210_ide_setup_gpio(void);
34 33
35#endif /*__ATA_SAMSUNG_CF_H */ 34#endif /*__ATA_SAMSUNG_CF_H */
diff --git a/include/linux/profile.h b/include/linux/profile.h
index aaad3861beb8..b537a25ffa17 100644
--- a/include/linux/profile.h
+++ b/include/linux/profile.h
@@ -44,6 +44,7 @@ extern int prof_on __read_mostly;
44int profile_init(void); 44int profile_init(void);
45int profile_setup(char *str); 45int profile_setup(char *str);
46void profile_tick(int type); 46void profile_tick(int type);
47int setup_profiling_timer(unsigned int multiplier);
47 48
48/* 49/*
49 * Add multiple profiler hits to a given address: 50 * Add multiple profiler hits to a given address:
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
index 077904c8b70d..cc79eff4a1ad 100644
--- a/include/linux/ptrace.h
+++ b/include/linux/ptrace.h
@@ -334,6 +334,9 @@ static inline void user_single_step_siginfo(struct task_struct *tsk,
334 * calling arch_ptrace_stop() when it would be superfluous. For example, 334 * calling arch_ptrace_stop() when it would be superfluous. For example,
335 * if the thread has not been back to user mode since the last stop, the 335 * if the thread has not been back to user mode since the last stop, the
336 * thread state might indicate that nothing needs to be done. 336 * thread state might indicate that nothing needs to be done.
337 *
338 * This is guaranteed to be invoked once before a task stops for ptrace and
339 * may include arch-specific operations necessary prior to a ptrace stop.
337 */ 340 */
338#define arch_ptrace_stop_needed(code, info) (0) 341#define arch_ptrace_stop_needed(code, info) (0)
339#endif 342#endif
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 5a75d19aa661..d231aa17b1d7 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -44,7 +44,6 @@
44#include <linux/debugobjects.h> 44#include <linux/debugobjects.h>
45#include <linux/bug.h> 45#include <linux/bug.h>
46#include <linux/compiler.h> 46#include <linux/compiler.h>
47#include <linux/percpu.h>
48#include <asm/barrier.h> 47#include <asm/barrier.h>
49 48
50extern int rcu_expedited; /* for sysctl */ 49extern int rcu_expedited; /* for sysctl */
@@ -300,41 +299,6 @@ bool __rcu_is_watching(void);
300#endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) */ 299#endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) */
301 300
302/* 301/*
303 * Hooks for cond_resched() and friends to avoid RCU CPU stall warnings.
304 */
305
306#define RCU_COND_RESCHED_LIM 256 /* ms vs. 100s of ms. */
307DECLARE_PER_CPU(int, rcu_cond_resched_count);
308void rcu_resched(void);
309
310/*
311 * Is it time to report RCU quiescent states?
312 *
313 * Note unsynchronized access to rcu_cond_resched_count. Yes, we might
314 * increment some random CPU's count, and possibly also load the result from
315 * yet another CPU's count. We might even clobber some other CPU's attempt
316 * to zero its counter. This is all OK because the goal is not precision,
317 * but rather reasonable amortization of rcu_note_context_switch() overhead
318 * and extremely high probability of avoiding RCU CPU stall warnings.
319 * Note that this function has to be preempted in just the wrong place,
320 * many thousands of times in a row, for anything bad to happen.
321 */
322static inline bool rcu_should_resched(void)
323{
324 return raw_cpu_inc_return(rcu_cond_resched_count) >=
325 RCU_COND_RESCHED_LIM;
326}
327
328/*
329 * Report quiscent states to RCU if it is time to do so.
330 */
331static inline void rcu_cond_resched(void)
332{
333 if (unlikely(rcu_should_resched()))
334 rcu_resched();
335}
336
337/*
338 * Infrastructure to implement the synchronize_() primitives in 302 * Infrastructure to implement the synchronize_() primitives in
339 * TREE_RCU and rcu_barrier_() primitives in TINY_RCU. 303 * TREE_RCU and rcu_barrier_() primitives in TINY_RCU.
340 */ 304 */
@@ -358,9 +322,19 @@ void wait_rcu_gp(call_rcu_func_t crf);
358 * initialization. 322 * initialization.
359 */ 323 */
360#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD 324#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
325void init_rcu_head(struct rcu_head *head);
326void destroy_rcu_head(struct rcu_head *head);
361void init_rcu_head_on_stack(struct rcu_head *head); 327void init_rcu_head_on_stack(struct rcu_head *head);
362void destroy_rcu_head_on_stack(struct rcu_head *head); 328void destroy_rcu_head_on_stack(struct rcu_head *head);
363#else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ 329#else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
330static inline void init_rcu_head(struct rcu_head *head)
331{
332}
333
334static inline void destroy_rcu_head(struct rcu_head *head)
335{
336}
337
364static inline void init_rcu_head_on_stack(struct rcu_head *head) 338static inline void init_rcu_head_on_stack(struct rcu_head *head)
365{ 339{
366} 340}
@@ -852,15 +826,14 @@ static inline void rcu_preempt_sleep_check(void)
852 * read-side critical section that would block in a !PREEMPT kernel. 826 * read-side critical section that would block in a !PREEMPT kernel.
853 * But if you want the full story, read on! 827 * But if you want the full story, read on!
854 * 828 *
855 * In non-preemptible RCU implementations (TREE_RCU and TINY_RCU), it 829 * In non-preemptible RCU implementations (TREE_RCU and TINY_RCU),
856 * is illegal to block while in an RCU read-side critical section. In 830 * it is illegal to block while in an RCU read-side critical section.
857 * preemptible RCU implementations (TREE_PREEMPT_RCU and TINY_PREEMPT_RCU) 831 * In preemptible RCU implementations (TREE_PREEMPT_RCU) in CONFIG_PREEMPT
858 * in CONFIG_PREEMPT kernel builds, RCU read-side critical sections may 832 * kernel builds, RCU read-side critical sections may be preempted,
859 * be preempted, but explicit blocking is illegal. Finally, in preemptible 833 * but explicit blocking is illegal. Finally, in preemptible RCU
860 * RCU implementations in real-time (with -rt patchset) kernel builds, 834 * implementations in real-time (with -rt patchset) kernel builds, RCU
861 * RCU read-side critical sections may be preempted and they may also 835 * read-side critical sections may be preempted and they may also block, but
862 * block, but only when acquiring spinlocks that are subject to priority 836 * only when acquiring spinlocks that are subject to priority inheritance.
863 * inheritance.
864 */ 837 */
865static inline void rcu_read_lock(void) 838static inline void rcu_read_lock(void)
866{ 839{
@@ -884,6 +857,34 @@ static inline void rcu_read_lock(void)
884/** 857/**
885 * rcu_read_unlock() - marks the end of an RCU read-side critical section. 858 * rcu_read_unlock() - marks the end of an RCU read-side critical section.
886 * 859 *
860 * In most situations, rcu_read_unlock() is immune from deadlock.
861 * However, in kernels built with CONFIG_RCU_BOOST, rcu_read_unlock()
862 * is responsible for deboosting, which it does via rt_mutex_unlock().
863 * Unfortunately, this function acquires the scheduler's runqueue and
864 * priority-inheritance spinlocks. This means that deadlock could result
865 * if the caller of rcu_read_unlock() already holds one of these locks or
866 * any lock that is ever acquired while holding them.
867 *
868 * That said, RCU readers are never priority boosted unless they were
869 * preempted. Therefore, one way to avoid deadlock is to make sure
870 * that preemption never happens within any RCU read-side critical
871 * section whose outermost rcu_read_unlock() is called with one of
872 * rt_mutex_unlock()'s locks held. Such preemption can be avoided in
873 * a number of ways, for example, by invoking preempt_disable() before
874 * critical section's outermost rcu_read_lock().
875 *
876 * Given that the set of locks acquired by rt_mutex_unlock() might change
877 * at any time, a somewhat more future-proofed approach is to make sure
878 * that that preemption never happens within any RCU read-side critical
879 * section whose outermost rcu_read_unlock() is called with irqs disabled.
880 * This approach relies on the fact that rt_mutex_unlock() currently only
881 * acquires irq-disabled locks.
882 *
883 * The second of these two approaches is best in most situations,
884 * however, the first approach can also be useful, at least to those
885 * developers willing to keep abreast of the set of locks acquired by
886 * rt_mutex_unlock().
887 *
887 * See rcu_read_lock() for more information. 888 * See rcu_read_lock() for more information.
888 */ 889 */
889static inline void rcu_read_unlock(void) 890static inline void rcu_read_unlock(void)
diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h
index a2d9d81038d1..14ec18d5e18b 100644
--- a/include/linux/regulator/consumer.h
+++ b/include/linux/regulator/consumer.h
@@ -395,6 +395,11 @@ static inline void regulator_bulk_free(int num_consumers,
395{ 395{
396} 396}
397 397
398static inline int regulator_can_change_voltage(struct regulator *regulator)
399{
400 return 0;
401}
402
398static inline int regulator_set_voltage(struct regulator *regulator, 403static inline int regulator_set_voltage(struct regulator *regulator,
399 int min_uV, int max_uV) 404 int min_uV, int max_uV)
400{ 405{
diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h
index 3aed8d737e1a..1abba5ce2a2f 100644
--- a/include/linux/rtmutex.h
+++ b/include/linux/rtmutex.h
@@ -90,11 +90,9 @@ extern void __rt_mutex_init(struct rt_mutex *lock, const char *name);
90extern void rt_mutex_destroy(struct rt_mutex *lock); 90extern void rt_mutex_destroy(struct rt_mutex *lock);
91 91
92extern void rt_mutex_lock(struct rt_mutex *lock); 92extern void rt_mutex_lock(struct rt_mutex *lock);
93extern int rt_mutex_lock_interruptible(struct rt_mutex *lock, 93extern int rt_mutex_lock_interruptible(struct rt_mutex *lock);
94 int detect_deadlock);
95extern int rt_mutex_timed_lock(struct rt_mutex *lock, 94extern int rt_mutex_timed_lock(struct rt_mutex *lock,
96 struct hrtimer_sleeper *timeout, 95 struct hrtimer_sleeper *timeout);
97 int detect_deadlock);
98 96
99extern int rt_mutex_trylock(struct rt_mutex *lock); 97extern int rt_mutex_trylock(struct rt_mutex *lock);
100 98
diff --git a/include/linux/rwsem-spinlock.h b/include/linux/rwsem-spinlock.h
index d5b13bc07a0b..561e8615528d 100644
--- a/include/linux/rwsem-spinlock.h
+++ b/include/linux/rwsem-spinlock.h
@@ -15,13 +15,13 @@
15#ifdef __KERNEL__ 15#ifdef __KERNEL__
16/* 16/*
17 * the rw-semaphore definition 17 * the rw-semaphore definition
18 * - if activity is 0 then there are no active readers or writers 18 * - if count is 0 then there are no active readers or writers
19 * - if activity is +ve then that is the number of active readers 19 * - if count is +ve then that is the number of active readers
20 * - if activity is -1 then there is one active writer 20 * - if count is -1 then there is one active writer
21 * - if wait_list is not empty, then there are processes waiting for the semaphore 21 * - if wait_list is not empty, then there are processes waiting for the semaphore
22 */ 22 */
23struct rw_semaphore { 23struct rw_semaphore {
24 __s32 activity; 24 __s32 count;
25 raw_spinlock_t wait_lock; 25 raw_spinlock_t wait_lock;
26 struct list_head wait_list; 26 struct list_head wait_list;
27#ifdef CONFIG_DEBUG_LOCK_ALLOC 27#ifdef CONFIG_DEBUG_LOCK_ALLOC
diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
index 8d79708146aa..035d3c57fc8a 100644
--- a/include/linux/rwsem.h
+++ b/include/linux/rwsem.h
@@ -13,10 +13,11 @@
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/list.h> 14#include <linux/list.h>
15#include <linux/spinlock.h> 15#include <linux/spinlock.h>
16
17#include <linux/atomic.h> 16#include <linux/atomic.h>
17#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
18#include <linux/osq_lock.h>
19#endif
18 20
19struct optimistic_spin_queue;
20struct rw_semaphore; 21struct rw_semaphore;
21 22
22#ifdef CONFIG_RWSEM_GENERIC_SPINLOCK 23#ifdef CONFIG_RWSEM_GENERIC_SPINLOCK
@@ -25,15 +26,15 @@ struct rw_semaphore;
25/* All arch specific implementations share the same struct */ 26/* All arch specific implementations share the same struct */
26struct rw_semaphore { 27struct rw_semaphore {
27 long count; 28 long count;
28 raw_spinlock_t wait_lock;
29 struct list_head wait_list; 29 struct list_head wait_list;
30#ifdef CONFIG_SMP 30 raw_spinlock_t wait_lock;
31#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
32 struct optimistic_spin_queue osq; /* spinner MCS lock */
31 /* 33 /*
32 * Write owner. Used as a speculative check to see 34 * Write owner. Used as a speculative check to see
33 * if the owner is running on the cpu. 35 * if the owner is running on the cpu.
34 */ 36 */
35 struct task_struct *owner; 37 struct task_struct *owner;
36 struct optimistic_spin_queue *osq; /* spinner MCS lock */
37#endif 38#endif
38#ifdef CONFIG_DEBUG_LOCK_ALLOC 39#ifdef CONFIG_DEBUG_LOCK_ALLOC
39 struct lockdep_map dep_map; 40 struct lockdep_map dep_map;
@@ -64,22 +65,19 @@ static inline int rwsem_is_locked(struct rw_semaphore *sem)
64# define __RWSEM_DEP_MAP_INIT(lockname) 65# define __RWSEM_DEP_MAP_INIT(lockname)
65#endif 66#endif
66 67
67#if defined(CONFIG_SMP) && !defined(CONFIG_RWSEM_GENERIC_SPINLOCK) 68#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
68#define __RWSEM_INITIALIZER(name) \ 69#define __RWSEM_OPT_INIT(lockname) , .osq = OSQ_LOCK_UNLOCKED, .owner = NULL
69 { RWSEM_UNLOCKED_VALUE, \
70 __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \
71 LIST_HEAD_INIT((name).wait_list), \
72 NULL, /* owner */ \
73 NULL /* mcs lock */ \
74 __RWSEM_DEP_MAP_INIT(name) }
75#else 70#else
76#define __RWSEM_INITIALIZER(name) \ 71#define __RWSEM_OPT_INIT(lockname)
77 { RWSEM_UNLOCKED_VALUE, \
78 __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \
79 LIST_HEAD_INIT((name).wait_list) \
80 __RWSEM_DEP_MAP_INIT(name) }
81#endif 72#endif
82 73
74#define __RWSEM_INITIALIZER(name) \
75 { .count = RWSEM_UNLOCKED_VALUE, \
76 .wait_list = LIST_HEAD_INIT((name).wait_list), \
77 .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock) \
78 __RWSEM_OPT_INIT(name) \
79 __RWSEM_DEP_MAP_INIT(name) }
80
83#define DECLARE_RWSEM(name) \ 81#define DECLARE_RWSEM(name) \
84 struct rw_semaphore name = __RWSEM_INITIALIZER(name) 82 struct rw_semaphore name = __RWSEM_INITIALIZER(name)
85 83
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 306f4f0c987a..42cac4dc2157 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -872,21 +872,21 @@ enum cpu_idle_type {
872#define SD_NUMA 0x4000 /* cross-node balancing */ 872#define SD_NUMA 0x4000 /* cross-node balancing */
873 873
874#ifdef CONFIG_SCHED_SMT 874#ifdef CONFIG_SCHED_SMT
875static inline const int cpu_smt_flags(void) 875static inline int cpu_smt_flags(void)
876{ 876{
877 return SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES; 877 return SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
878} 878}
879#endif 879#endif
880 880
881#ifdef CONFIG_SCHED_MC 881#ifdef CONFIG_SCHED_MC
882static inline const int cpu_core_flags(void) 882static inline int cpu_core_flags(void)
883{ 883{
884 return SD_SHARE_PKG_RESOURCES; 884 return SD_SHARE_PKG_RESOURCES;
885} 885}
886#endif 886#endif
887 887
888#ifdef CONFIG_NUMA 888#ifdef CONFIG_NUMA
889static inline const int cpu_numa_flags(void) 889static inline int cpu_numa_flags(void)
890{ 890{
891 return SD_NUMA; 891 return SD_NUMA;
892} 892}
@@ -999,7 +999,7 @@ void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms);
999bool cpus_share_cache(int this_cpu, int that_cpu); 999bool cpus_share_cache(int this_cpu, int that_cpu);
1000 1000
1001typedef const struct cpumask *(*sched_domain_mask_f)(int cpu); 1001typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
1002typedef const int (*sched_domain_flags_f)(void); 1002typedef int (*sched_domain_flags_f)(void);
1003 1003
1004#define SDTL_OVERLAP 0x01 1004#define SDTL_OVERLAP 0x01
1005 1005
@@ -1270,9 +1270,6 @@ struct task_struct {
1270#ifdef CONFIG_TREE_PREEMPT_RCU 1270#ifdef CONFIG_TREE_PREEMPT_RCU
1271 struct rcu_node *rcu_blocked_node; 1271 struct rcu_node *rcu_blocked_node;
1272#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ 1272#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
1273#ifdef CONFIG_RCU_BOOST
1274 struct rt_mutex *rcu_boost_mutex;
1275#endif /* #ifdef CONFIG_RCU_BOOST */
1276 1273
1277#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) 1274#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
1278 struct sched_info sched_info; 1275 struct sched_info sched_info;
@@ -1440,8 +1437,6 @@ struct task_struct {
1440 struct rb_node *pi_waiters_leftmost; 1437 struct rb_node *pi_waiters_leftmost;
1441 /* Deadlock detection and priority inheritance handling */ 1438 /* Deadlock detection and priority inheritance handling */
1442 struct rt_mutex_waiter *pi_blocked_on; 1439 struct rt_mutex_waiter *pi_blocked_on;
1443 /* Top pi_waiters task */
1444 struct task_struct *pi_top_task;
1445#endif 1440#endif
1446 1441
1447#ifdef CONFIG_DEBUG_MUTEXES 1442#ifdef CONFIG_DEBUG_MUTEXES
@@ -2009,9 +2004,6 @@ static inline void rcu_copy_process(struct task_struct *p)
2009#ifdef CONFIG_TREE_PREEMPT_RCU 2004#ifdef CONFIG_TREE_PREEMPT_RCU
2010 p->rcu_blocked_node = NULL; 2005 p->rcu_blocked_node = NULL;
2011#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ 2006#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
2012#ifdef CONFIG_RCU_BOOST
2013 p->rcu_boost_mutex = NULL;
2014#endif /* #ifdef CONFIG_RCU_BOOST */
2015 INIT_LIST_HEAD(&p->rcu_node_entry); 2007 INIT_LIST_HEAD(&p->rcu_node_entry);
2016} 2008}
2017 2009
@@ -2788,7 +2780,7 @@ static inline bool __must_check current_set_polling_and_test(void)
2788 2780
2789 /* 2781 /*
2790 * Polling state must be visible before we test NEED_RESCHED, 2782 * Polling state must be visible before we test NEED_RESCHED,
2791 * paired by resched_task() 2783 * paired by resched_curr()
2792 */ 2784 */
2793 smp_mb__after_atomic(); 2785 smp_mb__after_atomic();
2794 2786
@@ -2806,7 +2798,7 @@ static inline bool __must_check current_clr_polling_and_test(void)
2806 2798
2807 /* 2799 /*
2808 * Polling state must be visible before we test NEED_RESCHED, 2800 * Polling state must be visible before we test NEED_RESCHED,
2809 * paired by resched_task() 2801 * paired by resched_curr()
2810 */ 2802 */
2811 smp_mb__after_atomic(); 2803 smp_mb__after_atomic();
2812 2804
@@ -2838,7 +2830,7 @@ static inline void current_clr_polling(void)
2838 * TIF_NEED_RESCHED and the IPI handler, scheduler_ipi(), will also 2830 * TIF_NEED_RESCHED and the IPI handler, scheduler_ipi(), will also
2839 * fold. 2831 * fold.
2840 */ 2832 */
2841 smp_mb(); /* paired with resched_task() */ 2833 smp_mb(); /* paired with resched_curr() */
2842 2834
2843 preempt_fold_need_resched(); 2835 preempt_fold_need_resched();
2844} 2836}
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index 535f158977b9..8cf350325dc6 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -164,8 +164,6 @@ static inline unsigned read_seqcount_begin(const seqcount_t *s)
164static inline unsigned raw_seqcount_begin(const seqcount_t *s) 164static inline unsigned raw_seqcount_begin(const seqcount_t *s)
165{ 165{
166 unsigned ret = ACCESS_ONCE(s->sequence); 166 unsigned ret = ACCESS_ONCE(s->sequence);
167
168 seqcount_lockdep_reader_access(s);
169 smp_rmb(); 167 smp_rmb();
170 return ret & ~1; 168 return ret & ~1;
171} 169}
diff --git a/include/linux/socket.h b/include/linux/socket.h
index 8e98297f1388..ec538fc287a6 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -305,8 +305,6 @@ struct ucred {
305/* IPX options */ 305/* IPX options */
306#define IPX_TYPE 1 306#define IPX_TYPE 1
307 307
308extern int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov,
309 int offset, int len);
310extern int csum_partial_copy_fromiovecend(unsigned char *kdata, 308extern int csum_partial_copy_fromiovecend(unsigned char *kdata,
311 struct iovec *iov, 309 struct iovec *iov,
312 int offset, 310 int offset,
@@ -315,8 +313,6 @@ extern unsigned long iov_pages(const struct iovec *iov, int offset,
315 unsigned long nr_segs); 313 unsigned long nr_segs);
316 314
317extern int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *address, int mode); 315extern int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *address, int mode);
318extern int memcpy_toiovecend(const struct iovec *v, unsigned char *kdata,
319 int offset, int len);
320extern int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr_storage *kaddr); 316extern int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr_storage *kaddr);
321extern int put_cmsg(struct msghdr*, int level, int type, int len, void *data); 317extern int put_cmsg(struct msghdr*, int level, int type, int len, void *data);
322 318
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
index ad7dbe2cfecd..1a8959944c5f 100644
--- a/include/linux/sunrpc/sched.h
+++ b/include/linux/sunrpc/sched.h
@@ -236,7 +236,7 @@ void * rpc_malloc(struct rpc_task *, size_t);
236void rpc_free(void *); 236void rpc_free(void *);
237int rpciod_up(void); 237int rpciod_up(void);
238void rpciod_down(void); 238void rpciod_down(void);
239int __rpc_wait_for_completion_task(struct rpc_task *task, int (*)(void *)); 239int __rpc_wait_for_completion_task(struct rpc_task *task, wait_bit_action_f *);
240#ifdef RPC_DEBUG 240#ifdef RPC_DEBUG
241struct net; 241struct net;
242void rpc_show_tasks(struct net *); 242void rpc_show_tasks(struct net *);
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index f76994b9396c..519064e0c943 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -327,6 +327,7 @@ extern unsigned long get_safe_page(gfp_t gfp_mask);
327extern void hibernation_set_ops(const struct platform_hibernation_ops *ops); 327extern void hibernation_set_ops(const struct platform_hibernation_ops *ops);
328extern int hibernate(void); 328extern int hibernate(void);
329extern bool system_entering_hibernation(void); 329extern bool system_entering_hibernation(void);
330extern bool hibernation_available(void);
330asmlinkage int swsusp_save(void); 331asmlinkage int swsusp_save(void);
331extern struct pbe *restore_pblist; 332extern struct pbe *restore_pblist;
332#else /* CONFIG_HIBERNATION */ 333#else /* CONFIG_HIBERNATION */
@@ -339,6 +340,7 @@ static inline void swsusp_unset_page_free(struct page *p) {}
339static inline void hibernation_set_ops(const struct platform_hibernation_ops *ops) {} 340static inline void hibernation_set_ops(const struct platform_hibernation_ops *ops) {}
340static inline int hibernate(void) { return -ENOSYS; } 341static inline int hibernate(void) { return -ENOSYS; }
341static inline bool system_entering_hibernation(void) { return false; } 342static inline bool system_entering_hibernation(void) { return false; }
343static inline bool hibernation_available(void) { return false; }
342#endif /* CONFIG_HIBERNATION */ 344#endif /* CONFIG_HIBERNATION */
343 345
344/* Hibernation and suspend events */ 346/* Hibernation and suspend events */
diff --git a/include/linux/tick.h b/include/linux/tick.h
index b84773cb9f4c..059052306831 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -12,6 +12,7 @@
12#include <linux/hrtimer.h> 12#include <linux/hrtimer.h>
13#include <linux/context_tracking_state.h> 13#include <linux/context_tracking_state.h>
14#include <linux/cpumask.h> 14#include <linux/cpumask.h>
15#include <linux/sched.h>
15 16
16#ifdef CONFIG_GENERIC_CLOCKEVENTS 17#ifdef CONFIG_GENERIC_CLOCKEVENTS
17 18
@@ -162,6 +163,7 @@ static inline u64 get_cpu_iowait_time_us(int cpu, u64 *unused) { return -1; }
162#ifdef CONFIG_NO_HZ_FULL 163#ifdef CONFIG_NO_HZ_FULL
163extern bool tick_nohz_full_running; 164extern bool tick_nohz_full_running;
164extern cpumask_var_t tick_nohz_full_mask; 165extern cpumask_var_t tick_nohz_full_mask;
166extern cpumask_var_t housekeeping_mask;
165 167
166static inline bool tick_nohz_full_enabled(void) 168static inline bool tick_nohz_full_enabled(void)
167{ 169{
@@ -181,7 +183,13 @@ static inline bool tick_nohz_full_cpu(int cpu)
181 183
182extern void tick_nohz_init(void); 184extern void tick_nohz_init(void);
183extern void __tick_nohz_full_check(void); 185extern void __tick_nohz_full_check(void);
184extern void tick_nohz_full_kick(void); 186extern void tick_nohz_full_kick_cpu(int cpu);
187
188static inline void tick_nohz_full_kick(void)
189{
190 tick_nohz_full_kick_cpu(smp_processor_id());
191}
192
185extern void tick_nohz_full_kick_all(void); 193extern void tick_nohz_full_kick_all(void);
186extern void __tick_nohz_task_switch(struct task_struct *tsk); 194extern void __tick_nohz_task_switch(struct task_struct *tsk);
187#else 195#else
@@ -189,11 +197,30 @@ static inline void tick_nohz_init(void) { }
189static inline bool tick_nohz_full_enabled(void) { return false; } 197static inline bool tick_nohz_full_enabled(void) { return false; }
190static inline bool tick_nohz_full_cpu(int cpu) { return false; } 198static inline bool tick_nohz_full_cpu(int cpu) { return false; }
191static inline void __tick_nohz_full_check(void) { } 199static inline void __tick_nohz_full_check(void) { }
200static inline void tick_nohz_full_kick_cpu(int cpu) { }
192static inline void tick_nohz_full_kick(void) { } 201static inline void tick_nohz_full_kick(void) { }
193static inline void tick_nohz_full_kick_all(void) { } 202static inline void tick_nohz_full_kick_all(void) { }
194static inline void __tick_nohz_task_switch(struct task_struct *tsk) { } 203static inline void __tick_nohz_task_switch(struct task_struct *tsk) { }
195#endif 204#endif
196 205
206static inline bool is_housekeeping_cpu(int cpu)
207{
208#ifdef CONFIG_NO_HZ_FULL
209 if (tick_nohz_full_enabled())
210 return cpumask_test_cpu(cpu, housekeeping_mask);
211#endif
212 return true;
213}
214
215static inline void housekeeping_affine(struct task_struct *t)
216{
217#ifdef CONFIG_NO_HZ_FULL
218 if (tick_nohz_full_enabled())
219 set_cpus_allowed_ptr(t, housekeeping_mask);
220
221#endif
222}
223
197static inline void tick_nohz_full_check(void) 224static inline void tick_nohz_full_check(void)
198{ 225{
199 if (tick_nohz_full_enabled()) 226 if (tick_nohz_full_enabled())
diff --git a/include/linux/trace_seq.h b/include/linux/trace_seq.h
index 136116924d8d..ea6c9dea79e3 100644
--- a/include/linux/trace_seq.h
+++ b/include/linux/trace_seq.h
@@ -25,6 +25,21 @@ trace_seq_init(struct trace_seq *s)
25 s->full = 0; 25 s->full = 0;
26} 26}
27 27
28/**
29 * trace_seq_buffer_ptr - return pointer to next location in buffer
30 * @s: trace sequence descriptor
31 *
32 * Returns the pointer to the buffer where the next write to
33 * the buffer will happen. This is useful to save the location
34 * that is about to be written to and then return the result
35 * of that write.
36 */
37static inline unsigned char *
38trace_seq_buffer_ptr(struct trace_seq *s)
39{
40 return s->buffer + s->len;
41}
42
28/* 43/*
29 * Currently only defined when tracing is enabled. 44 * Currently only defined when tracing is enabled.
30 */ 45 */
@@ -36,14 +51,13 @@ int trace_seq_vprintf(struct trace_seq *s, const char *fmt, va_list args);
36extern int 51extern int
37trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary); 52trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary);
38extern int trace_print_seq(struct seq_file *m, struct trace_seq *s); 53extern int trace_print_seq(struct seq_file *m, struct trace_seq *s);
39extern ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, 54extern int trace_seq_to_user(struct trace_seq *s, char __user *ubuf,
40 size_t cnt); 55 int cnt);
41extern int trace_seq_puts(struct trace_seq *s, const char *str); 56extern int trace_seq_puts(struct trace_seq *s, const char *str);
42extern int trace_seq_putc(struct trace_seq *s, unsigned char c); 57extern int trace_seq_putc(struct trace_seq *s, unsigned char c);
43extern int trace_seq_putmem(struct trace_seq *s, const void *mem, size_t len); 58extern int trace_seq_putmem(struct trace_seq *s, const void *mem, unsigned int len);
44extern int trace_seq_putmem_hex(struct trace_seq *s, const void *mem, 59extern int trace_seq_putmem_hex(struct trace_seq *s, const void *mem,
45 size_t len); 60 unsigned int len);
46extern void *trace_seq_reserve(struct trace_seq *s, size_t len);
47extern int trace_seq_path(struct trace_seq *s, const struct path *path); 61extern int trace_seq_path(struct trace_seq *s, const struct path *path);
48 62
49extern int trace_seq_bitmask(struct trace_seq *s, const unsigned long *maskp, 63extern int trace_seq_bitmask(struct trace_seq *s, const unsigned long *maskp,
@@ -71,8 +85,8 @@ static inline int trace_print_seq(struct seq_file *m, struct trace_seq *s)
71{ 85{
72 return 0; 86 return 0;
73} 87}
74static inline ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, 88static inline int trace_seq_to_user(struct trace_seq *s, char __user *ubuf,
75 size_t cnt) 89 int cnt)
76{ 90{
77 return 0; 91 return 0;
78} 92}
@@ -85,19 +99,15 @@ static inline int trace_seq_putc(struct trace_seq *s, unsigned char c)
85 return 0; 99 return 0;
86} 100}
87static inline int 101static inline int
88trace_seq_putmem(struct trace_seq *s, const void *mem, size_t len) 102trace_seq_putmem(struct trace_seq *s, const void *mem, unsigned int len)
89{ 103{
90 return 0; 104 return 0;
91} 105}
92static inline int trace_seq_putmem_hex(struct trace_seq *s, const void *mem, 106static inline int trace_seq_putmem_hex(struct trace_seq *s, const void *mem,
93 size_t len) 107 unsigned int len)
94{ 108{
95 return 0; 109 return 0;
96} 110}
97static inline void *trace_seq_reserve(struct trace_seq *s, size_t len)
98{
99 return NULL;
100}
101static inline int trace_seq_path(struct trace_seq *s, const struct path *path) 111static inline int trace_seq_path(struct trace_seq *s, const struct path *path)
102{ 112{
103 return 0; 113 return 0;
diff --git a/include/linux/uio.h b/include/linux/uio.h
index e2231e47cec1..09a7cffc224e 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -94,8 +94,20 @@ static inline size_t iov_iter_count(struct iov_iter *i)
94 return i->count; 94 return i->count;
95} 95}
96 96
97static inline void iov_iter_truncate(struct iov_iter *i, size_t count) 97/*
98 * Cap the iov_iter by given limit; note that the second argument is
99 * *not* the new size - it's upper limit for such. Passing it a value
100 * greater than the amount of data in iov_iter is fine - it'll just do
101 * nothing in that case.
102 */
103static inline void iov_iter_truncate(struct iov_iter *i, u64 count)
98{ 104{
105 /*
106 * count doesn't have to fit in size_t - comparison extends both
107 * operands to u64 here and any value that would be truncated by
108 * conversion in assignement is by definition greater than all
109 * values of size_t, including old i->count.
110 */
99 if (i->count > count) 111 if (i->count > count)
100 i->count = count; 112 i->count = count;
101} 113}
@@ -111,6 +123,9 @@ static inline void iov_iter_reexpand(struct iov_iter *i, size_t count)
111 123
112int memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len); 124int memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len);
113int memcpy_toiovec(struct iovec *iov, unsigned char *kdata, int len); 125int memcpy_toiovec(struct iovec *iov, unsigned char *kdata, int len);
114 126int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov,
127 int offset, int len);
128int memcpy_toiovecend(const struct iovec *v, unsigned char *kdata,
129 int offset, int len);
115 130
116#endif 131#endif
diff --git a/include/linux/usb_usual.h b/include/linux/usb_usual.h
index 1a64b26046ed..9b7de1b46437 100644
--- a/include/linux/usb_usual.h
+++ b/include/linux/usb_usual.h
@@ -70,7 +70,9 @@
70 US_FLAG(NEEDS_CAP16, 0x00400000) \ 70 US_FLAG(NEEDS_CAP16, 0x00400000) \
71 /* cannot handle READ_CAPACITY_10 */ \ 71 /* cannot handle READ_CAPACITY_10 */ \
72 US_FLAG(IGNORE_UAS, 0x00800000) \ 72 US_FLAG(IGNORE_UAS, 0x00800000) \
73 /* Device advertises UAS but it is broken */ 73 /* Device advertises UAS but it is broken */ \
74 US_FLAG(BROKEN_FUA, 0x01000000) \
75 /* Cannot handle FUA in WRITE or READ CDBs */ \
74 76
75#define US_FLAG(name, value) US_FL_##name = value , 77#define US_FLAG(name, value) US_FL_##name = value ,
76enum { US_DO_ALL_FLAGS }; 78enum { US_DO_ALL_FLAGS };
diff --git a/include/linux/wait.h b/include/linux/wait.h
index bd68819f0815..6fb1ba5f9b2f 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -25,6 +25,7 @@ struct wait_bit_key {
25 void *flags; 25 void *flags;
26 int bit_nr; 26 int bit_nr;
27#define WAIT_ATOMIC_T_BIT_NR -1 27#define WAIT_ATOMIC_T_BIT_NR -1
28 unsigned long private;
28}; 29};
29 30
30struct wait_bit_queue { 31struct wait_bit_queue {
@@ -141,18 +142,19 @@ __remove_wait_queue(wait_queue_head_t *head, wait_queue_t *old)
141 list_del(&old->task_list); 142 list_del(&old->task_list);
142} 143}
143 144
145typedef int wait_bit_action_f(struct wait_bit_key *);
144void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key); 146void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
145void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key); 147void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key);
146void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, void *key); 148void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
147void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr); 149void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr);
148void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr); 150void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr);
149void __wake_up_bit(wait_queue_head_t *, void *, int); 151void __wake_up_bit(wait_queue_head_t *, void *, int);
150int __wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned); 152int __wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, wait_bit_action_f *, unsigned);
151int __wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned); 153int __wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, wait_bit_action_f *, unsigned);
152void wake_up_bit(void *, int); 154void wake_up_bit(void *, int);
153void wake_up_atomic_t(atomic_t *); 155void wake_up_atomic_t(atomic_t *);
154int out_of_line_wait_on_bit(void *, int, int (*)(void *), unsigned); 156int out_of_line_wait_on_bit(void *, int, wait_bit_action_f *, unsigned);
155int out_of_line_wait_on_bit_lock(void *, int, int (*)(void *), unsigned); 157int out_of_line_wait_on_bit_lock(void *, int, wait_bit_action_f *, unsigned);
156int out_of_line_wait_on_atomic_t(atomic_t *, int (*)(atomic_t *), unsigned); 158int out_of_line_wait_on_atomic_t(atomic_t *, int (*)(atomic_t *), unsigned);
157wait_queue_head_t *bit_waitqueue(void *, int); 159wait_queue_head_t *bit_waitqueue(void *, int);
158 160
@@ -854,11 +856,14 @@ int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
854 (wait)->flags = 0; \ 856 (wait)->flags = 0; \
855 } while (0) 857 } while (0)
856 858
859
860extern int bit_wait(struct wait_bit_key *);
861extern int bit_wait_io(struct wait_bit_key *);
862
857/** 863/**
858 * wait_on_bit - wait for a bit to be cleared 864 * wait_on_bit - wait for a bit to be cleared
859 * @word: the word being waited on, a kernel virtual address 865 * @word: the word being waited on, a kernel virtual address
860 * @bit: the bit of the word being waited on 866 * @bit: the bit of the word being waited on
861 * @action: the function used to sleep, which may take special actions
862 * @mode: the task state to sleep in 867 * @mode: the task state to sleep in
863 * 868 *
864 * There is a standard hashed waitqueue table for generic use. This 869 * There is a standard hashed waitqueue table for generic use. This
@@ -867,9 +872,62 @@ int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
867 * call wait_on_bit() in threads waiting for the bit to clear. 872 * call wait_on_bit() in threads waiting for the bit to clear.
868 * One uses wait_on_bit() where one is waiting for the bit to clear, 873 * One uses wait_on_bit() where one is waiting for the bit to clear,
869 * but has no intention of setting it. 874 * but has no intention of setting it.
875 * Returned value will be zero if the bit was cleared, or non-zero
876 * if the process received a signal and the mode permitted wakeup
877 * on that signal.
878 */
879static inline int
880wait_on_bit(void *word, int bit, unsigned mode)
881{
882 if (!test_bit(bit, word))
883 return 0;
884 return out_of_line_wait_on_bit(word, bit,
885 bit_wait,
886 mode);
887}
888
889/**
890 * wait_on_bit_io - wait for a bit to be cleared
891 * @word: the word being waited on, a kernel virtual address
892 * @bit: the bit of the word being waited on
893 * @mode: the task state to sleep in
894 *
895 * Use the standard hashed waitqueue table to wait for a bit
896 * to be cleared. This is similar to wait_on_bit(), but calls
897 * io_schedule() instead of schedule() for the actual waiting.
898 *
899 * Returned value will be zero if the bit was cleared, or non-zero
900 * if the process received a signal and the mode permitted wakeup
901 * on that signal.
902 */
903static inline int
904wait_on_bit_io(void *word, int bit, unsigned mode)
905{
906 if (!test_bit(bit, word))
907 return 0;
908 return out_of_line_wait_on_bit(word, bit,
909 bit_wait_io,
910 mode);
911}
912
913/**
914 * wait_on_bit_action - wait for a bit to be cleared
915 * @word: the word being waited on, a kernel virtual address
916 * @bit: the bit of the word being waited on
917 * @action: the function used to sleep, which may take special actions
918 * @mode: the task state to sleep in
919 *
920 * Use the standard hashed waitqueue table to wait for a bit
921 * to be cleared, and allow the waiting action to be specified.
922 * This is like wait_on_bit() but allows fine control of how the waiting
923 * is done.
924 *
925 * Returned value will be zero if the bit was cleared, or non-zero
926 * if the process received a signal and the mode permitted wakeup
927 * on that signal.
870 */ 928 */
871static inline int 929static inline int
872wait_on_bit(void *word, int bit, int (*action)(void *), unsigned mode) 930wait_on_bit_action(void *word, int bit, wait_bit_action_f *action, unsigned mode)
873{ 931{
874 if (!test_bit(bit, word)) 932 if (!test_bit(bit, word))
875 return 0; 933 return 0;
@@ -880,7 +938,6 @@ wait_on_bit(void *word, int bit, int (*action)(void *), unsigned mode)
880 * wait_on_bit_lock - wait for a bit to be cleared, when wanting to set it 938 * wait_on_bit_lock - wait for a bit to be cleared, when wanting to set it
881 * @word: the word being waited on, a kernel virtual address 939 * @word: the word being waited on, a kernel virtual address
882 * @bit: the bit of the word being waited on 940 * @bit: the bit of the word being waited on
883 * @action: the function used to sleep, which may take special actions
884 * @mode: the task state to sleep in 941 * @mode: the task state to sleep in
885 * 942 *
886 * There is a standard hashed waitqueue table for generic use. This 943 * There is a standard hashed waitqueue table for generic use. This
@@ -891,9 +948,61 @@ wait_on_bit(void *word, int bit, int (*action)(void *), unsigned mode)
891 * wait_on_bit() in threads waiting to be able to set the bit. 948 * wait_on_bit() in threads waiting to be able to set the bit.
892 * One uses wait_on_bit_lock() where one is waiting for the bit to 949 * One uses wait_on_bit_lock() where one is waiting for the bit to
893 * clear with the intention of setting it, and when done, clearing it. 950 * clear with the intention of setting it, and when done, clearing it.
951 *
952 * Returns zero if the bit was (eventually) found to be clear and was
953 * set. Returns non-zero if a signal was delivered to the process and
954 * the @mode allows that signal to wake the process.
955 */
956static inline int
957wait_on_bit_lock(void *word, int bit, unsigned mode)
958{
959 if (!test_and_set_bit(bit, word))
960 return 0;
961 return out_of_line_wait_on_bit_lock(word, bit, bit_wait, mode);
962}
963
964/**
965 * wait_on_bit_lock_io - wait for a bit to be cleared, when wanting to set it
966 * @word: the word being waited on, a kernel virtual address
967 * @bit: the bit of the word being waited on
968 * @mode: the task state to sleep in
969 *
970 * Use the standard hashed waitqueue table to wait for a bit
971 * to be cleared and then to atomically set it. This is similar
972 * to wait_on_bit(), but calls io_schedule() instead of schedule()
973 * for the actual waiting.
974 *
975 * Returns zero if the bit was (eventually) found to be clear and was
976 * set. Returns non-zero if a signal was delivered to the process and
977 * the @mode allows that signal to wake the process.
978 */
979static inline int
980wait_on_bit_lock_io(void *word, int bit, unsigned mode)
981{
982 if (!test_and_set_bit(bit, word))
983 return 0;
984 return out_of_line_wait_on_bit_lock(word, bit, bit_wait_io, mode);
985}
986
987/**
988 * wait_on_bit_lock_action - wait for a bit to be cleared, when wanting to set it
989 * @word: the word being waited on, a kernel virtual address
990 * @bit: the bit of the word being waited on
991 * @action: the function used to sleep, which may take special actions
992 * @mode: the task state to sleep in
993 *
994 * Use the standard hashed waitqueue table to wait for a bit
995 * to be cleared and then to set it, and allow the waiting action
996 * to be specified.
997 * This is like wait_on_bit() but allows fine control of how the waiting
998 * is done.
999 *
1000 * Returns zero if the bit was (eventually) found to be clear and was
1001 * set. Returns non-zero if a signal was delivered to the process and
1002 * the @mode allows that signal to wake the process.
894 */ 1003 */
895static inline int 1004static inline int
896wait_on_bit_lock(void *word, int bit, int (*action)(void *), unsigned mode) 1005wait_on_bit_lock_action(void *word, int bit, wait_bit_action_f *action, unsigned mode)
897{ 1006{
898 if (!test_and_set_bit(bit, word)) 1007 if (!test_and_set_bit(bit, word))
899 return 0; 1008 return 0;
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index 5777c13849ba..a219be961c0a 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -90,7 +90,6 @@ struct writeback_control {
90 * fs/fs-writeback.c 90 * fs/fs-writeback.c
91 */ 91 */
92struct bdi_writeback; 92struct bdi_writeback;
93int inode_wait(void *);
94void writeback_inodes_sb(struct super_block *, enum wb_reason reason); 93void writeback_inodes_sb(struct super_block *, enum wb_reason reason);
95void writeback_inodes_sb_nr(struct super_block *, unsigned long nr, 94void writeback_inodes_sb_nr(struct super_block *, unsigned long nr,
96 enum wb_reason reason); 95 enum wb_reason reason);
@@ -105,7 +104,7 @@ void inode_wait_for_writeback(struct inode *inode);
105static inline void wait_on_inode(struct inode *inode) 104static inline void wait_on_inode(struct inode *inode)
106{ 105{
107 might_sleep(); 106 might_sleep();
108 wait_on_bit(&inode->i_state, __I_NEW, inode_wait, TASK_UNINTERRUPTIBLE); 107 wait_on_bit(&inode->i_state, __I_NEW, TASK_UNINTERRUPTIBLE);
109} 108}
110 109
111/* 110/*