aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/bio.h8
-rw-r--r--include/linux/blk_types.h10
-rw-r--r--include/linux/blkdev.h20
-rw-r--r--include/linux/drbd.h6
-rw-r--r--include/linux/drbd_limits.h7
-rw-r--r--include/linux/drbd_nl.h5
-rw-r--r--include/linux/elevator.h8
-rw-r--r--include/linux/iocontext.h39
-rw-r--r--include/linux/iommu.h10
-rw-r--r--include/linux/ioprio.h22
10 files changed, 88 insertions, 47 deletions
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 4d94eb8bcbcc..26435890dc87 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -269,6 +269,14 @@ extern struct bio_vec *bvec_alloc_bs(gfp_t, int, unsigned long *, struct bio_set
269extern void bvec_free_bs(struct bio_set *, struct bio_vec *, unsigned int); 269extern void bvec_free_bs(struct bio_set *, struct bio_vec *, unsigned int);
270extern unsigned int bvec_nr_vecs(unsigned short idx); 270extern unsigned int bvec_nr_vecs(unsigned short idx);
271 271
272#ifdef CONFIG_BLK_CGROUP
273int bio_associate_current(struct bio *bio);
274void bio_disassociate_task(struct bio *bio);
275#else /* CONFIG_BLK_CGROUP */
276static inline int bio_associate_current(struct bio *bio) { return -ENOENT; }
277static inline void bio_disassociate_task(struct bio *bio) { }
278#endif /* CONFIG_BLK_CGROUP */
279
272/* 280/*
273 * bio_set is used to allow other portions of the IO system to 281 * bio_set is used to allow other portions of the IO system to
274 * allocate their own private memory pools for bio and iovec structures. 282 * allocate their own private memory pools for bio and iovec structures.
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 4053cbd4490e..0edb65dd8edd 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -14,6 +14,8 @@ struct bio;
14struct bio_integrity_payload; 14struct bio_integrity_payload;
15struct page; 15struct page;
16struct block_device; 16struct block_device;
17struct io_context;
18struct cgroup_subsys_state;
17typedef void (bio_end_io_t) (struct bio *, int); 19typedef void (bio_end_io_t) (struct bio *, int);
18typedef void (bio_destructor_t) (struct bio *); 20typedef void (bio_destructor_t) (struct bio *);
19 21
@@ -66,6 +68,14 @@ struct bio {
66 bio_end_io_t *bi_end_io; 68 bio_end_io_t *bi_end_io;
67 69
68 void *bi_private; 70 void *bi_private;
71#ifdef CONFIG_BLK_CGROUP
72 /*
73 * Optional ioc and css associated with this bio. Put on bio
74 * release. Read comment on top of bio_associate_current().
75 */
76 struct io_context *bi_ioc;
77 struct cgroup_subsys_state *bi_css;
78#endif
69#if defined(CONFIG_BLK_DEV_INTEGRITY) 79#if defined(CONFIG_BLK_DEV_INTEGRITY)
70 struct bio_integrity_payload *bi_integrity; /* data integrity */ 80 struct bio_integrity_payload *bi_integrity; /* data integrity */
71#endif 81#endif
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 4d4ac24a263e..ba43f408baa3 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -32,10 +32,17 @@ struct blk_trace;
32struct request; 32struct request;
33struct sg_io_hdr; 33struct sg_io_hdr;
34struct bsg_job; 34struct bsg_job;
35struct blkcg_gq;
35 36
36#define BLKDEV_MIN_RQ 4 37#define BLKDEV_MIN_RQ 4
37#define BLKDEV_MAX_RQ 128 /* Default maximum */ 38#define BLKDEV_MAX_RQ 128 /* Default maximum */
38 39
40/*
41 * Maximum number of blkcg policies allowed to be registered concurrently.
42 * Defined here to simplify include dependency.
43 */
44#define BLKCG_MAX_POLS 2
45
39struct request; 46struct request;
40typedef void (rq_end_io_fn)(struct request *, int); 47typedef void (rq_end_io_fn)(struct request *, int);
41 48
@@ -363,6 +370,11 @@ struct request_queue {
363 struct list_head timeout_list; 370 struct list_head timeout_list;
364 371
365 struct list_head icq_list; 372 struct list_head icq_list;
373#ifdef CONFIG_BLK_CGROUP
374 DECLARE_BITMAP (blkcg_pols, BLKCG_MAX_POLS);
375 struct blkcg_gq *root_blkg;
376 struct list_head blkg_list;
377#endif
366 378
367 struct queue_limits limits; 379 struct queue_limits limits;
368 380
@@ -390,12 +402,17 @@ struct request_queue {
390 402
391 struct mutex sysfs_lock; 403 struct mutex sysfs_lock;
392 404
405 int bypass_depth;
406
393#if defined(CONFIG_BLK_DEV_BSG) 407#if defined(CONFIG_BLK_DEV_BSG)
394 bsg_job_fn *bsg_job_fn; 408 bsg_job_fn *bsg_job_fn;
395 int bsg_job_size; 409 int bsg_job_size;
396 struct bsg_class_device bsg_dev; 410 struct bsg_class_device bsg_dev;
397#endif 411#endif
398 412
413#ifdef CONFIG_BLK_CGROUP
414 struct list_head all_q_node;
415#endif
399#ifdef CONFIG_BLK_DEV_THROTTLING 416#ifdef CONFIG_BLK_DEV_THROTTLING
400 /* Throttle data */ 417 /* Throttle data */
401 struct throtl_data *td; 418 struct throtl_data *td;
@@ -407,7 +424,7 @@ struct request_queue {
407#define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */ 424#define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */
408#define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */ 425#define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */
409#define QUEUE_FLAG_DEAD 5 /* queue being torn down */ 426#define QUEUE_FLAG_DEAD 5 /* queue being torn down */
410#define QUEUE_FLAG_ELVSWITCH 6 /* don't use elevator, just do FIFO */ 427#define QUEUE_FLAG_BYPASS 6 /* act as dumb FIFO queue */
411#define QUEUE_FLAG_BIDI 7 /* queue supports bidi requests */ 428#define QUEUE_FLAG_BIDI 7 /* queue supports bidi requests */
412#define QUEUE_FLAG_NOMERGES 8 /* disable merge attempts */ 429#define QUEUE_FLAG_NOMERGES 8 /* disable merge attempts */
413#define QUEUE_FLAG_SAME_COMP 9 /* complete on same CPU-group */ 430#define QUEUE_FLAG_SAME_COMP 9 /* complete on same CPU-group */
@@ -491,6 +508,7 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
491#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) 508#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
492#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) 509#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
493#define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags) 510#define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags)
511#define blk_queue_bypass(q) test_bit(QUEUE_FLAG_BYPASS, &(q)->queue_flags)
494#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) 512#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
495#define blk_queue_noxmerges(q) \ 513#define blk_queue_noxmerges(q) \
496 test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags) 514 test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags)
diff --git a/include/linux/drbd.h b/include/linux/drbd.h
index 9e5f5607eba3..47e3d4850584 100644
--- a/include/linux/drbd.h
+++ b/include/linux/drbd.h
@@ -53,7 +53,7 @@
53 53
54 54
55extern const char *drbd_buildtag(void); 55extern const char *drbd_buildtag(void);
56#define REL_VERSION "8.3.11" 56#define REL_VERSION "8.3.13"
57#define API_VERSION 88 57#define API_VERSION 88
58#define PRO_VERSION_MIN 86 58#define PRO_VERSION_MIN 86
59#define PRO_VERSION_MAX 96 59#define PRO_VERSION_MAX 96
@@ -112,8 +112,8 @@ enum drbd_ret_code {
112 ERR_OPEN_MD_DISK = 105, 112 ERR_OPEN_MD_DISK = 105,
113 ERR_DISK_NOT_BDEV = 107, 113 ERR_DISK_NOT_BDEV = 107,
114 ERR_MD_NOT_BDEV = 108, 114 ERR_MD_NOT_BDEV = 108,
115 ERR_DISK_TO_SMALL = 111, 115 ERR_DISK_TOO_SMALL = 111,
116 ERR_MD_DISK_TO_SMALL = 112, 116 ERR_MD_DISK_TOO_SMALL = 112,
117 ERR_BDCLAIM_DISK = 114, 117 ERR_BDCLAIM_DISK = 114,
118 ERR_BDCLAIM_MD_DISK = 115, 118 ERR_BDCLAIM_MD_DISK = 115,
119 ERR_MD_IDX_INVALID = 116, 119 ERR_MD_IDX_INVALID = 116,
diff --git a/include/linux/drbd_limits.h b/include/linux/drbd_limits.h
index 447c36752385..fb670bf603f7 100644
--- a/include/linux/drbd_limits.h
+++ b/include/linux/drbd_limits.h
@@ -48,6 +48,11 @@
48#define DRBD_TIMEOUT_MAX 600 48#define DRBD_TIMEOUT_MAX 600
49#define DRBD_TIMEOUT_DEF 60 /* 6 seconds */ 49#define DRBD_TIMEOUT_DEF 60 /* 6 seconds */
50 50
51 /* If backing disk takes longer than disk_timeout, mark the disk as failed */
52#define DRBD_DISK_TIMEOUT_MIN 0 /* 0 = disabled */
53#define DRBD_DISK_TIMEOUT_MAX 6000 /* 10 Minutes */
54#define DRBD_DISK_TIMEOUT_DEF 0 /* disabled */
55
51 /* active connection retries when C_WF_CONNECTION */ 56 /* active connection retries when C_WF_CONNECTION */
52#define DRBD_CONNECT_INT_MIN 1 57#define DRBD_CONNECT_INT_MIN 1
53#define DRBD_CONNECT_INT_MAX 120 58#define DRBD_CONNECT_INT_MAX 120
@@ -60,7 +65,7 @@
60 65
61 /* timeout for the ping packets.*/ 66 /* timeout for the ping packets.*/
62#define DRBD_PING_TIMEO_MIN 1 67#define DRBD_PING_TIMEO_MIN 1
63#define DRBD_PING_TIMEO_MAX 100 68#define DRBD_PING_TIMEO_MAX 300
64#define DRBD_PING_TIMEO_DEF 5 69#define DRBD_PING_TIMEO_DEF 5
65 70
66 /* max number of write requests between write barriers */ 71 /* max number of write requests between write barriers */
diff --git a/include/linux/drbd_nl.h b/include/linux/drbd_nl.h
index ab6159e4fcf0..a8706f08ab36 100644
--- a/include/linux/drbd_nl.h
+++ b/include/linux/drbd_nl.h
@@ -31,9 +31,12 @@ NL_PACKET(disk_conf, 3,
31 NL_INTEGER( 56, T_MAY_IGNORE, max_bio_bvecs) 31 NL_INTEGER( 56, T_MAY_IGNORE, max_bio_bvecs)
32 NL_BIT( 57, T_MAY_IGNORE, no_disk_barrier) 32 NL_BIT( 57, T_MAY_IGNORE, no_disk_barrier)
33 NL_BIT( 58, T_MAY_IGNORE, no_disk_drain) 33 NL_BIT( 58, T_MAY_IGNORE, no_disk_drain)
34 NL_INTEGER( 89, T_MAY_IGNORE, disk_timeout)
34) 35)
35 36
36NL_PACKET(detach, 4, ) 37NL_PACKET(detach, 4,
38 NL_BIT( 88, T_MANDATORY, detach_force)
39)
37 40
38NL_PACKET(net_conf, 5, 41NL_PACKET(net_conf, 5,
39 NL_STRING( 8, T_MANDATORY, my_addr, 128) 42 NL_STRING( 8, T_MANDATORY, my_addr, 128)
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index 7d4e0356f329..c03af7687bb4 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -28,12 +28,13 @@ typedef int (elevator_may_queue_fn) (struct request_queue *, int);
28 28
29typedef void (elevator_init_icq_fn) (struct io_cq *); 29typedef void (elevator_init_icq_fn) (struct io_cq *);
30typedef void (elevator_exit_icq_fn) (struct io_cq *); 30typedef void (elevator_exit_icq_fn) (struct io_cq *);
31typedef int (elevator_set_req_fn) (struct request_queue *, struct request *, gfp_t); 31typedef int (elevator_set_req_fn) (struct request_queue *, struct request *,
32 struct bio *, gfp_t);
32typedef void (elevator_put_req_fn) (struct request *); 33typedef void (elevator_put_req_fn) (struct request *);
33typedef void (elevator_activate_req_fn) (struct request_queue *, struct request *); 34typedef void (elevator_activate_req_fn) (struct request_queue *, struct request *);
34typedef void (elevator_deactivate_req_fn) (struct request_queue *, struct request *); 35typedef void (elevator_deactivate_req_fn) (struct request_queue *, struct request *);
35 36
36typedef void *(elevator_init_fn) (struct request_queue *); 37typedef int (elevator_init_fn) (struct request_queue *);
37typedef void (elevator_exit_fn) (struct elevator_queue *); 38typedef void (elevator_exit_fn) (struct elevator_queue *);
38 39
39struct elevator_ops 40struct elevator_ops
@@ -129,7 +130,8 @@ extern void elv_unregister_queue(struct request_queue *q);
129extern int elv_may_queue(struct request_queue *, int); 130extern int elv_may_queue(struct request_queue *, int);
130extern void elv_abort_queue(struct request_queue *); 131extern void elv_abort_queue(struct request_queue *);
131extern void elv_completed_request(struct request_queue *, struct request *); 132extern void elv_completed_request(struct request_queue *, struct request *);
132extern int elv_set_request(struct request_queue *, struct request *, gfp_t); 133extern int elv_set_request(struct request_queue *q, struct request *rq,
134 struct bio *bio, gfp_t gfp_mask);
133extern void elv_put_request(struct request_queue *, struct request *); 135extern void elv_put_request(struct request_queue *, struct request *);
134extern void elv_drain_elevator(struct request_queue *); 136extern void elv_drain_elevator(struct request_queue *);
135 137
diff --git a/include/linux/iocontext.h b/include/linux/iocontext.h
index 1a3018063034..df38db2ef45b 100644
--- a/include/linux/iocontext.h
+++ b/include/linux/iocontext.h
@@ -6,11 +6,7 @@
6#include <linux/workqueue.h> 6#include <linux/workqueue.h>
7 7
8enum { 8enum {
9 ICQ_IOPRIO_CHANGED = 1 << 0,
10 ICQ_CGROUP_CHANGED = 1 << 1,
11 ICQ_EXITED = 1 << 2, 9 ICQ_EXITED = 1 << 2,
12
13 ICQ_CHANGED_MASK = ICQ_IOPRIO_CHANGED | ICQ_CGROUP_CHANGED,
14}; 10};
15 11
16/* 12/*
@@ -100,6 +96,7 @@ struct io_cq {
100 */ 96 */
101struct io_context { 97struct io_context {
102 atomic_long_t refcount; 98 atomic_long_t refcount;
99 atomic_t active_ref;
103 atomic_t nr_tasks; 100 atomic_t nr_tasks;
104 101
105 /* all the fields below are protected by this lock */ 102 /* all the fields below are protected by this lock */
@@ -120,29 +117,37 @@ struct io_context {
120 struct work_struct release_work; 117 struct work_struct release_work;
121}; 118};
122 119
123static inline struct io_context *ioc_task_link(struct io_context *ioc) 120/**
121 * get_io_context_active - get active reference on ioc
122 * @ioc: ioc of interest
123 *
124 * Only iocs with active reference can issue new IOs. This function
125 * acquires an active reference on @ioc. The caller must already have an
126 * active reference on @ioc.
127 */
128static inline void get_io_context_active(struct io_context *ioc)
124{ 129{
125 /* 130 WARN_ON_ONCE(atomic_long_read(&ioc->refcount) <= 0);
126 * if ref count is zero, don't allow sharing (ioc is going away, it's 131 WARN_ON_ONCE(atomic_read(&ioc->active_ref) <= 0);
127 * a race). 132 atomic_long_inc(&ioc->refcount);
128 */ 133 atomic_inc(&ioc->active_ref);
129 if (ioc && atomic_long_inc_not_zero(&ioc->refcount)) { 134}
130 atomic_inc(&ioc->nr_tasks); 135
131 return ioc; 136static inline void ioc_task_link(struct io_context *ioc)
132 } 137{
138 get_io_context_active(ioc);
133 139
134 return NULL; 140 WARN_ON_ONCE(atomic_read(&ioc->nr_tasks) <= 0);
141 atomic_inc(&ioc->nr_tasks);
135} 142}
136 143
137struct task_struct; 144struct task_struct;
138#ifdef CONFIG_BLOCK 145#ifdef CONFIG_BLOCK
139void put_io_context(struct io_context *ioc); 146void put_io_context(struct io_context *ioc);
147void put_io_context_active(struct io_context *ioc);
140void exit_io_context(struct task_struct *task); 148void exit_io_context(struct task_struct *task);
141struct io_context *get_task_io_context(struct task_struct *task, 149struct io_context *get_task_io_context(struct task_struct *task,
142 gfp_t gfp_flags, int node); 150 gfp_t gfp_flags, int node);
143void ioc_ioprio_changed(struct io_context *ioc, int ioprio);
144void ioc_cgroup_changed(struct io_context *ioc);
145unsigned int icq_get_changed(struct io_cq *icq);
146#else 151#else
147struct io_context; 152struct io_context;
148static inline void put_io_context(struct io_context *ioc) { } 153static inline void put_io_context(struct io_context *ioc) { }
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index d937580417ba..450293f6d68b 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -35,12 +35,13 @@ struct iommu_domain;
35#define IOMMU_FAULT_WRITE 0x1 35#define IOMMU_FAULT_WRITE 0x1
36 36
37typedef int (*iommu_fault_handler_t)(struct iommu_domain *, 37typedef int (*iommu_fault_handler_t)(struct iommu_domain *,
38 struct device *, unsigned long, int); 38 struct device *, unsigned long, int, void *);
39 39
40struct iommu_domain { 40struct iommu_domain {
41 struct iommu_ops *ops; 41 struct iommu_ops *ops;
42 void *priv; 42 void *priv;
43 iommu_fault_handler_t handler; 43 iommu_fault_handler_t handler;
44 void *handler_token;
44}; 45};
45 46
46#define IOMMU_CAP_CACHE_COHERENCY 0x1 47#define IOMMU_CAP_CACHE_COHERENCY 0x1
@@ -95,7 +96,7 @@ extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain,
95extern int iommu_domain_has_cap(struct iommu_domain *domain, 96extern int iommu_domain_has_cap(struct iommu_domain *domain,
96 unsigned long cap); 97 unsigned long cap);
97extern void iommu_set_fault_handler(struct iommu_domain *domain, 98extern void iommu_set_fault_handler(struct iommu_domain *domain,
98 iommu_fault_handler_t handler); 99 iommu_fault_handler_t handler, void *token);
99extern int iommu_device_group(struct device *dev, unsigned int *groupid); 100extern int iommu_device_group(struct device *dev, unsigned int *groupid);
100 101
101/** 102/**
@@ -132,7 +133,8 @@ static inline int report_iommu_fault(struct iommu_domain *domain,
132 * invoke it. 133 * invoke it.
133 */ 134 */
134 if (domain->handler) 135 if (domain->handler)
135 ret = domain->handler(domain, dev, iova, flags); 136 ret = domain->handler(domain, dev, iova, flags,
137 domain->handler_token);
136 138
137 return ret; 139 return ret;
138} 140}
@@ -191,7 +193,7 @@ static inline int domain_has_cap(struct iommu_domain *domain,
191} 193}
192 194
193static inline void iommu_set_fault_handler(struct iommu_domain *domain, 195static inline void iommu_set_fault_handler(struct iommu_domain *domain,
194 iommu_fault_handler_t handler) 196 iommu_fault_handler_t handler, void *token)
195{ 197{
196} 198}
197 199
diff --git a/include/linux/ioprio.h b/include/linux/ioprio.h
index 76dad4808847..beb9ce1c2c23 100644
--- a/include/linux/ioprio.h
+++ b/include/linux/ioprio.h
@@ -42,26 +42,14 @@ enum {
42}; 42};
43 43
44/* 44/*
45 * if process has set io priority explicitly, use that. if not, convert 45 * Fallback BE priority
46 * the cpu scheduler nice value to an io priority
47 */ 46 */
48#define IOPRIO_NORM (4) 47#define IOPRIO_NORM (4)
49static inline int task_ioprio(struct io_context *ioc)
50{
51 if (ioprio_valid(ioc->ioprio))
52 return IOPRIO_PRIO_DATA(ioc->ioprio);
53
54 return IOPRIO_NORM;
55}
56
57static inline int task_ioprio_class(struct io_context *ioc)
58{
59 if (ioprio_valid(ioc->ioprio))
60 return IOPRIO_PRIO_CLASS(ioc->ioprio);
61
62 return IOPRIO_CLASS_BE;
63}
64 48
49/*
50 * if process has set io priority explicitly, use that. if not, convert
51 * the cpu scheduler nice value to an io priority
52 */
65static inline int task_nice_ioprio(struct task_struct *task) 53static inline int task_nice_ioprio(struct task_struct *task)
66{ 54{
67 return (task_nice(task) + 20) / 5; 55 return (task_nice(task) + 20) / 5;