aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/blkdev.h125
-rw-r--r--include/linux/blktrace_api.h12
-rw-r--r--include/linux/init_task.h1
-rw-r--r--include/linux/iocontext.h95
-rw-r--r--include/linux/ioprio.h13
-rw-r--r--include/linux/mv643xx.h10
-rw-r--r--include/linux/mv643xx_i2c.h22
-rw-r--r--include/linux/scatterlist.h126
-rw-r--r--include/linux/sched.h2
9 files changed, 193 insertions, 213 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 49b7a4c31a6d..71e7a847dffc 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -34,83 +34,10 @@ struct sg_io_hdr;
34#define BLKDEV_MIN_RQ 4 34#define BLKDEV_MIN_RQ 4
35#define BLKDEV_MAX_RQ 128 /* Default maximum */ 35#define BLKDEV_MAX_RQ 128 /* Default maximum */
36 36
37/* 37int put_io_context(struct io_context *ioc);
38 * This is the per-process anticipatory I/O scheduler state.
39 */
40struct as_io_context {
41 spinlock_t lock;
42
43 void (*dtor)(struct as_io_context *aic); /* destructor */
44 void (*exit)(struct as_io_context *aic); /* called on task exit */
45
46 unsigned long state;
47 atomic_t nr_queued; /* queued reads & sync writes */
48 atomic_t nr_dispatched; /* number of requests gone to the drivers */
49
50 /* IO History tracking */
51 /* Thinktime */
52 unsigned long last_end_request;
53 unsigned long ttime_total;
54 unsigned long ttime_samples;
55 unsigned long ttime_mean;
56 /* Layout pattern */
57 unsigned int seek_samples;
58 sector_t last_request_pos;
59 u64 seek_total;
60 sector_t seek_mean;
61};
62
63struct cfq_queue;
64struct cfq_io_context {
65 struct rb_node rb_node;
66 void *key;
67
68 struct cfq_queue *cfqq[2];
69
70 struct io_context *ioc;
71
72 unsigned long last_end_request;
73 sector_t last_request_pos;
74
75 unsigned long ttime_total;
76 unsigned long ttime_samples;
77 unsigned long ttime_mean;
78
79 unsigned int seek_samples;
80 u64 seek_total;
81 sector_t seek_mean;
82
83 struct list_head queue_list;
84
85 void (*dtor)(struct io_context *); /* destructor */
86 void (*exit)(struct io_context *); /* called on task exit */
87};
88
89/*
90 * This is the per-process I/O subsystem state. It is refcounted and
91 * kmalloc'ed. Currently all fields are modified in process io context
92 * (apart from the atomic refcount), so require no locking.
93 */
94struct io_context {
95 atomic_t refcount;
96 struct task_struct *task;
97
98 unsigned int ioprio_changed;
99
100 /*
101 * For request batching
102 */
103 unsigned long last_waited; /* Time last woken after wait for request */
104 int nr_batch_requests; /* Number of requests left in the batch */
105
106 struct as_io_context *aic;
107 struct rb_root cic_root;
108 void *ioc_data;
109};
110
111void put_io_context(struct io_context *ioc);
112void exit_io_context(void); 38void exit_io_context(void);
113struct io_context *get_io_context(gfp_t gfp_flags, int node); 39struct io_context *get_io_context(gfp_t gfp_flags, int node);
40struct io_context *alloc_io_context(gfp_t gfp_flags, int node);
114void copy_io_context(struct io_context **pdst, struct io_context **psrc); 41void copy_io_context(struct io_context **pdst, struct io_context **psrc);
115void swap_io_context(struct io_context **ioc1, struct io_context **ioc2); 42void swap_io_context(struct io_context **ioc1, struct io_context **ioc2);
116 43
@@ -429,6 +356,8 @@ struct request_queue
429 unsigned int max_segment_size; 356 unsigned int max_segment_size;
430 357
431 unsigned long seg_boundary_mask; 358 unsigned long seg_boundary_mask;
359 void *dma_drain_buffer;
360 unsigned int dma_drain_size;
432 unsigned int dma_alignment; 361 unsigned int dma_alignment;
433 362
434 struct blk_queue_tag *queue_tags; 363 struct blk_queue_tag *queue_tags;
@@ -537,6 +466,8 @@ enum {
537#define blk_fua_rq(rq) ((rq)->cmd_flags & REQ_FUA) 466#define blk_fua_rq(rq) ((rq)->cmd_flags & REQ_FUA)
538#define blk_bidi_rq(rq) ((rq)->next_rq != NULL) 467#define blk_bidi_rq(rq) ((rq)->next_rq != NULL)
539#define blk_empty_barrier(rq) (blk_barrier_rq(rq) && blk_fs_request(rq) && !(rq)->hard_nr_sectors) 468#define blk_empty_barrier(rq) (blk_barrier_rq(rq) && blk_fs_request(rq) && !(rq)->hard_nr_sectors)
469/* rq->queuelist of dequeued request must be list_empty() */
470#define blk_queued_rq(rq) (!list_empty(&(rq)->queuelist))
540 471
541#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) 472#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist)
542 473
@@ -716,29 +647,32 @@ static inline void blk_run_address_space(struct address_space *mapping)
716} 647}
717 648
718/* 649/*
719 * end_request() and friends. Must be called with the request queue spinlock 650 * blk_end_request() and friends.
720 * acquired. All functions called within end_request() _must_be_ atomic. 651 * __blk_end_request() and end_request() must be called with
652 * the request queue spinlock acquired.
721 * 653 *
722 * Several drivers define their own end_request and call 654 * Several drivers define their own end_request and call
723 * end_that_request_first() and end_that_request_last() 655 * blk_end_request() for parts of the original function.
724 * for parts of the original function. This prevents 656 * This prevents code duplication in drivers.
725 * code duplication in drivers.
726 */ 657 */
727extern int end_that_request_first(struct request *, int, int); 658extern int blk_end_request(struct request *rq, int error, int nr_bytes);
728extern int end_that_request_chunk(struct request *, int, int); 659extern int __blk_end_request(struct request *rq, int error, int nr_bytes);
729extern void end_that_request_last(struct request *, int); 660extern int blk_end_bidi_request(struct request *rq, int error, int nr_bytes,
661 int bidi_bytes);
730extern void end_request(struct request *, int); 662extern void end_request(struct request *, int);
731extern void end_queued_request(struct request *, int); 663extern void end_queued_request(struct request *, int);
732extern void end_dequeued_request(struct request *, int); 664extern void end_dequeued_request(struct request *, int);
665extern int blk_end_request_callback(struct request *rq, int error, int nr_bytes,
666 int (drv_callback)(struct request *));
733extern void blk_complete_request(struct request *); 667extern void blk_complete_request(struct request *);
734 668
735/* 669/*
736 * end_that_request_first/chunk() takes an uptodate argument. we account 670 * blk_end_request() takes bytes instead of sectors as a complete size.
737 * any value <= as an io error. 0 means -EIO for compatability reasons, 671 * blk_rq_bytes() returns bytes left to complete in the entire request.
738 * any other < 0 value is the direct error type. An uptodate value of 672 * blk_rq_cur_bytes() returns bytes left to complete in the current segment.
739 * 1 indicates successful io completion
740 */ 673 */
741#define end_io_error(uptodate) (unlikely((uptodate) <= 0)) 674extern unsigned int blk_rq_bytes(struct request *rq);
675extern unsigned int blk_rq_cur_bytes(struct request *rq);
742 676
743static inline void blkdev_dequeue_request(struct request *req) 677static inline void blkdev_dequeue_request(struct request *req)
744{ 678{
@@ -760,6 +694,8 @@ extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short);
760extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); 694extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
761extern void blk_queue_hardsect_size(struct request_queue *, unsigned short); 695extern void blk_queue_hardsect_size(struct request_queue *, unsigned short);
762extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); 696extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b);
697extern int blk_queue_dma_drain(struct request_queue *q, void *buf,
698 unsigned int size);
763extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); 699extern void blk_queue_segment_boundary(struct request_queue *, unsigned long);
764extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn); 700extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn);
765extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *); 701extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *);
@@ -836,12 +772,7 @@ static inline int bdev_hardsect_size(struct block_device *bdev)
836 772
837static inline int queue_dma_alignment(struct request_queue *q) 773static inline int queue_dma_alignment(struct request_queue *q)
838{ 774{
839 int retval = 511; 775 return q ? q->dma_alignment : 511;
840
841 if (q && q->dma_alignment)
842 retval = q->dma_alignment;
843
844 return retval;
845} 776}
846 777
847/* assumes size > 256 */ 778/* assumes size > 256 */
@@ -894,6 +825,12 @@ static inline void exit_io_context(void)
894{ 825{
895} 826}
896 827
828static inline int put_io_context(struct io_context *ioc)
829{
830 return 1;
831}
832
833
897#endif /* CONFIG_BLOCK */ 834#endif /* CONFIG_BLOCK */
898 835
899#endif 836#endif
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
index 7e11d23ac36a..06dadba349ac 100644
--- a/include/linux/blktrace_api.h
+++ b/include/linux/blktrace_api.h
@@ -148,7 +148,7 @@ extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
148extern void blk_trace_shutdown(struct request_queue *); 148extern void blk_trace_shutdown(struct request_queue *);
149extern void __blk_add_trace(struct blk_trace *, sector_t, int, int, u32, int, int, void *); 149extern void __blk_add_trace(struct blk_trace *, sector_t, int, int, u32, int, int, void *);
150extern int do_blk_trace_setup(struct request_queue *q, 150extern int do_blk_trace_setup(struct request_queue *q,
151 struct block_device *bdev, struct blk_user_trace_setup *buts); 151 char *name, dev_t dev, struct blk_user_trace_setup *buts);
152 152
153 153
154/** 154/**
@@ -282,6 +282,11 @@ static inline void blk_add_trace_remap(struct request_queue *q, struct bio *bio,
282 __blk_add_trace(bt, from, bio->bi_size, bio->bi_rw, BLK_TA_REMAP, !bio_flagged(bio, BIO_UPTODATE), sizeof(r), &r); 282 __blk_add_trace(bt, from, bio->bi_size, bio->bi_rw, BLK_TA_REMAP, !bio_flagged(bio, BIO_UPTODATE), sizeof(r), &r);
283} 283}
284 284
285extern int blk_trace_setup(request_queue_t *q, char *name, dev_t dev,
286 char __user *arg);
287extern int blk_trace_startstop(request_queue_t *q, int start);
288extern int blk_trace_remove(request_queue_t *q);
289
285#else /* !CONFIG_BLK_DEV_IO_TRACE */ 290#else /* !CONFIG_BLK_DEV_IO_TRACE */
286#define blk_trace_ioctl(bdev, cmd, arg) (-ENOTTY) 291#define blk_trace_ioctl(bdev, cmd, arg) (-ENOTTY)
287#define blk_trace_shutdown(q) do { } while (0) 292#define blk_trace_shutdown(q) do { } while (0)
@@ -290,7 +295,10 @@ static inline void blk_add_trace_remap(struct request_queue *q, struct bio *bio,
290#define blk_add_trace_generic(q, rq, rw, what) do { } while (0) 295#define blk_add_trace_generic(q, rq, rw, what) do { } while (0)
291#define blk_add_trace_pdu_int(q, what, bio, pdu) do { } while (0) 296#define blk_add_trace_pdu_int(q, what, bio, pdu) do { } while (0)
292#define blk_add_trace_remap(q, bio, dev, f, t) do {} while (0) 297#define blk_add_trace_remap(q, bio, dev, f, t) do {} while (0)
293#define do_blk_trace_setup(q, bdev, buts) (-ENOTTY) 298#define do_blk_trace_setup(q, name, dev, buts) (-ENOTTY)
299#define blk_trace_setup(q, name, dev, arg) (-ENOTTY)
300#define blk_trace_startstop(q, start) (-ENOTTY)
301#define blk_trace_remove(q) (-ENOTTY)
294#endif /* CONFIG_BLK_DEV_IO_TRACE */ 302#endif /* CONFIG_BLK_DEV_IO_TRACE */
295#endif /* __KERNEL__ */ 303#endif /* __KERNEL__ */
296#endif 304#endif
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 796019b22b6f..e6b3f7080679 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -137,7 +137,6 @@ extern struct group_info init_groups;
137 .time_slice = HZ, \ 137 .time_slice = HZ, \
138 .nr_cpus_allowed = NR_CPUS, \ 138 .nr_cpus_allowed = NR_CPUS, \
139 }, \ 139 }, \
140 .ioprio = 0, \
141 .tasks = LIST_HEAD_INIT(tsk.tasks), \ 140 .tasks = LIST_HEAD_INIT(tsk.tasks), \
142 .ptrace_children= LIST_HEAD_INIT(tsk.ptrace_children), \ 141 .ptrace_children= LIST_HEAD_INIT(tsk.ptrace_children), \
143 .ptrace_list = LIST_HEAD_INIT(tsk.ptrace_list), \ 142 .ptrace_list = LIST_HEAD_INIT(tsk.ptrace_list), \
diff --git a/include/linux/iocontext.h b/include/linux/iocontext.h
new file mode 100644
index 000000000000..593b222d9dcc
--- /dev/null
+++ b/include/linux/iocontext.h
@@ -0,0 +1,95 @@
1#ifndef IOCONTEXT_H
2#define IOCONTEXT_H
3
4#include <linux/radix-tree.h>
5
6/*
7 * This is the per-process anticipatory I/O scheduler state.
8 */
9struct as_io_context {
10 spinlock_t lock;
11
12 void (*dtor)(struct as_io_context *aic); /* destructor */
13 void (*exit)(struct as_io_context *aic); /* called on task exit */
14
15 unsigned long state;
16 atomic_t nr_queued; /* queued reads & sync writes */
17 atomic_t nr_dispatched; /* number of requests gone to the drivers */
18
19 /* IO History tracking */
20 /* Thinktime */
21 unsigned long last_end_request;
22 unsigned long ttime_total;
23 unsigned long ttime_samples;
24 unsigned long ttime_mean;
25 /* Layout pattern */
26 unsigned int seek_samples;
27 sector_t last_request_pos;
28 u64 seek_total;
29 sector_t seek_mean;
30};
31
32struct cfq_queue;
33struct cfq_io_context {
34 void *key;
35 unsigned long dead_key;
36
37 struct cfq_queue *cfqq[2];
38
39 struct io_context *ioc;
40
41 unsigned long last_end_request;
42 sector_t last_request_pos;
43
44 unsigned long ttime_total;
45 unsigned long ttime_samples;
46 unsigned long ttime_mean;
47
48 unsigned int seek_samples;
49 u64 seek_total;
50 sector_t seek_mean;
51
52 struct list_head queue_list;
53
54 void (*dtor)(struct io_context *); /* destructor */
55 void (*exit)(struct io_context *); /* called on task exit */
56};
57
58/*
59 * I/O subsystem state of the associated processes. It is refcounted
60 * and kmalloc'ed. These could be shared between processes.
61 */
62struct io_context {
63 atomic_t refcount;
64 atomic_t nr_tasks;
65
66 /* all the fields below are protected by this lock */
67 spinlock_t lock;
68
69 unsigned short ioprio;
70 unsigned short ioprio_changed;
71
72 /*
73 * For request batching
74 */
75 unsigned long last_waited; /* Time last woken after wait for request */
76 int nr_batch_requests; /* Number of requests left in the batch */
77
78 struct as_io_context *aic;
79 struct radix_tree_root radix_root;
80 void *ioc_data;
81};
82
83static inline struct io_context *ioc_task_link(struct io_context *ioc)
84{
85 /*
86 * if ref count is zero, don't allow sharing (ioc is going away, it's
87 * a race).
88 */
89 if (ioc && atomic_inc_not_zero(&ioc->refcount))
90 return ioc;
91
92 return NULL;
93}
94
95#endif
diff --git a/include/linux/ioprio.h b/include/linux/ioprio.h
index baf29387cab4..2a3bb1bb7433 100644
--- a/include/linux/ioprio.h
+++ b/include/linux/ioprio.h
@@ -2,6 +2,7 @@
2#define IOPRIO_H 2#define IOPRIO_H
3 3
4#include <linux/sched.h> 4#include <linux/sched.h>
5#include <linux/iocontext.h>
5 6
6/* 7/*
7 * Gives us 8 prio classes with 13-bits of data for each class 8 * Gives us 8 prio classes with 13-bits of data for each class
@@ -45,18 +46,18 @@ enum {
45 * the cpu scheduler nice value to an io priority 46 * the cpu scheduler nice value to an io priority
46 */ 47 */
47#define IOPRIO_NORM (4) 48#define IOPRIO_NORM (4)
48static inline int task_ioprio(struct task_struct *task) 49static inline int task_ioprio(struct io_context *ioc)
49{ 50{
50 if (ioprio_valid(task->ioprio)) 51 if (ioprio_valid(ioc->ioprio))
51 return IOPRIO_PRIO_DATA(task->ioprio); 52 return IOPRIO_PRIO_DATA(ioc->ioprio);
52 53
53 return IOPRIO_NORM; 54 return IOPRIO_NORM;
54} 55}
55 56
56static inline int task_ioprio_class(struct task_struct *task) 57static inline int task_ioprio_class(struct io_context *ioc)
57{ 58{
58 if (ioprio_valid(task->ioprio)) 59 if (ioprio_valid(ioc->ioprio))
59 return IOPRIO_PRIO_CLASS(task->ioprio); 60 return IOPRIO_PRIO_CLASS(ioc->ioprio);
60 61
61 return IOPRIO_CLASS_BE; 62 return IOPRIO_CLASS_BE;
62} 63}
diff --git a/include/linux/mv643xx.h b/include/linux/mv643xx.h
index d2ae6185f03b..69327b7b4ce4 100644
--- a/include/linux/mv643xx.h
+++ b/include/linux/mv643xx.h
@@ -15,6 +15,7 @@
15 15
16#include <asm/types.h> 16#include <asm/types.h>
17#include <linux/mv643xx_eth.h> 17#include <linux/mv643xx_eth.h>
18#include <linux/mv643xx_i2c.h>
18 19
19/****************************************/ 20/****************************************/
20/* Processor Address Space */ 21/* Processor Address Space */
@@ -863,7 +864,6 @@
863/* I2C Registers */ 864/* I2C Registers */
864/****************************************/ 865/****************************************/
865 866
866#define MV64XXX_I2C_CTLR_NAME "mv64xxx_i2c"
867#define MV64XXX_I2C_OFFSET 0xc000 867#define MV64XXX_I2C_OFFSET 0xc000
868#define MV64XXX_I2C_REG_BLOCK_SIZE 0x0020 868#define MV64XXX_I2C_REG_BLOCK_SIZE 0x0020
869 869
@@ -968,14 +968,6 @@ struct mpsc_pdata {
968 u32 brg_clk_freq; 968 u32 brg_clk_freq;
969}; 969};
970 970
971/* i2c Platform Device, Driver Data */
972struct mv64xxx_i2c_pdata {
973 u32 freq_m;
974 u32 freq_n;
975 u32 timeout; /* In milliseconds */
976 u32 retries;
977};
978
979/* Watchdog Platform Device, Driver Data */ 971/* Watchdog Platform Device, Driver Data */
980#define MV64x60_WDT_NAME "mv64x60_wdt" 972#define MV64x60_WDT_NAME "mv64x60_wdt"
981 973
diff --git a/include/linux/mv643xx_i2c.h b/include/linux/mv643xx_i2c.h
new file mode 100644
index 000000000000..5db5152e9de5
--- /dev/null
+++ b/include/linux/mv643xx_i2c.h
@@ -0,0 +1,22 @@
1/*
2 * This program is free software; you can redistribute it and/or modify it
3 * under the terms of the GNU General Public License as published by the
4 * Free Software Foundation; either version 2 of the License, or (at your
5 * option) any later version.
6 */
7
8#ifndef _MV64XXX_I2C_H_
9#define _MV64XXX_I2C_H_
10
11#include <linux/types.h>
12
13#define MV64XXX_I2C_CTLR_NAME "mv64xxx_i2c"
14
15/* i2c Platform Device, Driver Data */
16struct mv64xxx_i2c_pdata {
17 u32 freq_m;
18 u32 freq_n;
19 u32 timeout; /* In milliseconds */
20};
21
22#endif /*_MV64XXX_I2C_H_*/
diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
index e3ff21dbac53..a3d567a974e8 100644
--- a/include/linux/scatterlist.h
+++ b/include/linux/scatterlist.h
@@ -7,6 +7,12 @@
7#include <linux/string.h> 7#include <linux/string.h>
8#include <asm/io.h> 8#include <asm/io.h>
9 9
10struct sg_table {
11 struct scatterlist *sgl; /* the list */
12 unsigned int nents; /* number of mapped entries */
13 unsigned int orig_nents; /* original size of list */
14};
15
10/* 16/*
11 * Notes on SG table design. 17 * Notes on SG table design.
12 * 18 *
@@ -106,31 +112,6 @@ static inline void sg_set_buf(struct scatterlist *sg, const void *buf,
106 sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf)); 112 sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf));
107} 113}
108 114
109/**
110 * sg_next - return the next scatterlist entry in a list
111 * @sg: The current sg entry
112 *
113 * Description:
114 * Usually the next entry will be @sg@ + 1, but if this sg element is part
115 * of a chained scatterlist, it could jump to the start of a new
116 * scatterlist array.
117 *
118 **/
119static inline struct scatterlist *sg_next(struct scatterlist *sg)
120{
121#ifdef CONFIG_DEBUG_SG
122 BUG_ON(sg->sg_magic != SG_MAGIC);
123#endif
124 if (sg_is_last(sg))
125 return NULL;
126
127 sg++;
128 if (unlikely(sg_is_chain(sg)))
129 sg = sg_chain_ptr(sg);
130
131 return sg;
132}
133
134/* 115/*
135 * Loop over each sg element, following the pointer to a new list if necessary 116 * Loop over each sg element, following the pointer to a new list if necessary
136 */ 117 */
@@ -138,40 +119,6 @@ static inline struct scatterlist *sg_next(struct scatterlist *sg)
138 for (__i = 0, sg = (sglist); __i < (nr); __i++, sg = sg_next(sg)) 119 for (__i = 0, sg = (sglist); __i < (nr); __i++, sg = sg_next(sg))
139 120
140/** 121/**
141 * sg_last - return the last scatterlist entry in a list
142 * @sgl: First entry in the scatterlist
143 * @nents: Number of entries in the scatterlist
144 *
145 * Description:
146 * Should only be used casually, it (currently) scan the entire list
147 * to get the last entry.
148 *
149 * Note that the @sgl@ pointer passed in need not be the first one,
150 * the important bit is that @nents@ denotes the number of entries that
151 * exist from @sgl@.
152 *
153 **/
154static inline struct scatterlist *sg_last(struct scatterlist *sgl,
155 unsigned int nents)
156{
157#ifndef ARCH_HAS_SG_CHAIN
158 struct scatterlist *ret = &sgl[nents - 1];
159#else
160 struct scatterlist *sg, *ret = NULL;
161 unsigned int i;
162
163 for_each_sg(sgl, sg, nents, i)
164 ret = sg;
165
166#endif
167#ifdef CONFIG_DEBUG_SG
168 BUG_ON(sgl[0].sg_magic != SG_MAGIC);
169 BUG_ON(!sg_is_last(ret));
170#endif
171 return ret;
172}
173
174/**
175 * sg_chain - Chain two sglists together 122 * sg_chain - Chain two sglists together
176 * @prv: First scatterlist 123 * @prv: First scatterlist
177 * @prv_nents: Number of entries in prv 124 * @prv_nents: Number of entries in prv
@@ -223,47 +170,6 @@ static inline void sg_mark_end(struct scatterlist *sg)
223} 170}
224 171
225/** 172/**
226 * sg_init_table - Initialize SG table
227 * @sgl: The SG table
228 * @nents: Number of entries in table
229 *
230 * Notes:
231 * If this is part of a chained sg table, sg_mark_end() should be
232 * used only on the last table part.
233 *
234 **/
235static inline void sg_init_table(struct scatterlist *sgl, unsigned int nents)
236{
237 memset(sgl, 0, sizeof(*sgl) * nents);
238#ifdef CONFIG_DEBUG_SG
239 {
240 unsigned int i;
241 for (i = 0; i < nents; i++)
242 sgl[i].sg_magic = SG_MAGIC;
243 }
244#endif
245 sg_mark_end(&sgl[nents - 1]);
246}
247
248/**
249 * sg_init_one - Initialize a single entry sg list
250 * @sg: SG entry
251 * @buf: Virtual address for IO
252 * @buflen: IO length
253 *
254 * Notes:
255 * This should not be used on a single entry that is part of a larger
256 * table. Use sg_init_table() for that.
257 *
258 **/
259static inline void sg_init_one(struct scatterlist *sg, const void *buf,
260 unsigned int buflen)
261{
262 sg_init_table(sg, 1);
263 sg_set_buf(sg, buf, buflen);
264}
265
266/**
267 * sg_phys - Return physical address of an sg entry 173 * sg_phys - Return physical address of an sg entry
268 * @sg: SG entry 174 * @sg: SG entry
269 * 175 *
@@ -293,4 +199,24 @@ static inline void *sg_virt(struct scatterlist *sg)
293 return page_address(sg_page(sg)) + sg->offset; 199 return page_address(sg_page(sg)) + sg->offset;
294} 200}
295 201
202struct scatterlist *sg_next(struct scatterlist *);
203struct scatterlist *sg_last(struct scatterlist *s, unsigned int);
204void sg_init_table(struct scatterlist *, unsigned int);
205void sg_init_one(struct scatterlist *, const void *, unsigned int);
206
207typedef struct scatterlist *(sg_alloc_fn)(unsigned int, gfp_t);
208typedef void (sg_free_fn)(struct scatterlist *, unsigned int);
209
210void __sg_free_table(struct sg_table *, unsigned int, sg_free_fn *);
211void sg_free_table(struct sg_table *);
212int __sg_alloc_table(struct sg_table *, unsigned int, unsigned int, gfp_t,
213 sg_alloc_fn *);
214int sg_alloc_table(struct sg_table *, unsigned int, gfp_t);
215
216/*
217 * Maximum number of entries that will be allocated in one piece, if
218 * a list larger than this is required then chaining will be utilized.
219 */
220#define SG_MAX_SINGLE_ALLOC (PAGE_SIZE / sizeof(struct scatterlist))
221
296#endif /* _LINUX_SCATTERLIST_H */ 222#endif /* _LINUX_SCATTERLIST_H */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index df5b24ee80b3..2d0546e884ea 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -27,6 +27,7 @@
27#define CLONE_NEWUSER 0x10000000 /* New user namespace */ 27#define CLONE_NEWUSER 0x10000000 /* New user namespace */
28#define CLONE_NEWPID 0x20000000 /* New pid namespace */ 28#define CLONE_NEWPID 0x20000000 /* New pid namespace */
29#define CLONE_NEWNET 0x40000000 /* New network namespace */ 29#define CLONE_NEWNET 0x40000000 /* New network namespace */
30#define CLONE_IO 0x80000000 /* Clone io context */
30 31
31/* 32/*
32 * Scheduling policies 33 * Scheduling policies
@@ -975,7 +976,6 @@ struct task_struct {
975 struct hlist_head preempt_notifiers; 976 struct hlist_head preempt_notifiers;
976#endif 977#endif
977 978
978 unsigned short ioprio;
979 /* 979 /*
980 * fpu_counter contains the number of consecutive context switches 980 * fpu_counter contains the number of consecutive context switches
981 * that the FPU is used. If this is over a threshold, the lazy fpu 981 * that the FPU is used. If this is over a threshold, the lazy fpu