aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/linux/async_tx.h9
-rw-r--r--include/linux/cgroup.h5
-rw-r--r--include/linux/device-mapper.h3
-rw-r--r--include/linux/dm-dirty-log.h13
-rw-r--r--include/linux/dma_remapping.h1
-rw-r--r--include/linux/dmaengine.h30
-rw-r--r--include/linux/dw_dmac.h19
-rw-r--r--include/linux/ext3_fs.h1
-rw-r--r--include/linux/fscache-cache.h505
-rw-r--r--include/linux/fscache.h618
-rw-r--r--include/linux/hdreg.h66
-rw-r--r--include/linux/hid.h23
-rw-r--r--include/linux/highmem.h29
-rw-r--r--include/linux/i2c/twl4030.h47
-rw-r--r--include/linux/intel-iommu.h2
-rw-r--r--include/linux/iommu.h13
-rw-r--r--include/linux/irqflags.h8
-rw-r--r--include/linux/jbd.h7
-rw-r--r--include/linux/libata.h2
-rw-r--r--include/linux/mmc/host.h5
-rw-r--r--include/linux/nfs_fs.h13
-rw-r--r--include/linux/nfs_fs_sb.h11
-rw-r--r--include/linux/nfs_iostat.h12
-rw-r--r--include/linux/page-flags.h40
-rw-r--r--include/linux/pagemap.h5
-rw-r--r--include/linux/pwm.h2
-rw-r--r--include/linux/raid/bitmap.h288
-rw-r--r--include/linux/raid/linear.h31
-rw-r--r--include/linux/raid/md.h81
-rw-r--r--include/linux/raid/md_k.h402
-rw-r--r--include/linux/raid/md_u.h35
-rw-r--r--include/linux/raid/multipath.h42
-rw-r--r--include/linux/raid/pq.h132
-rw-r--r--include/linux/raid/raid0.h30
-rw-r--r--include/linux/raid/raid1.h134
-rw-r--r--include/linux/raid/raid10.h123
-rw-r--r--include/linux/raid/raid5.h402
-rw-r--r--include/linux/raid/xor.h2
-rw-r--r--include/linux/regulator/bq24022.h3
-rw-r--r--include/linux/regulator/consumer.h6
-rw-r--r--include/linux/regulator/driver.h81
-rw-r--r--include/linux/regulator/fixed.h3
-rw-r--r--include/linux/regulator/machine.h12
-rw-r--r--include/linux/slow-work.h95
-rw-r--r--include/linux/smp.h7
-rw-r--r--include/linux/timeriomem-rng.h2
-rw-r--r--include/linux/usb/wusb.h3
47 files changed, 1739 insertions, 1664 deletions
diff --git a/include/linux/async_tx.h b/include/linux/async_tx.h
index 45f6297821bd..5fc2ef8d97fa 100644
--- a/include/linux/async_tx.h
+++ b/include/linux/async_tx.h
@@ -21,6 +21,15 @@
21#include <linux/spinlock.h> 21#include <linux/spinlock.h>
22#include <linux/interrupt.h> 22#include <linux/interrupt.h>
23 23
24/* on architectures without dma-mapping capabilities we need to ensure
25 * that the asynchronous path compiles away
26 */
27#ifdef CONFIG_HAS_DMA
28#define __async_inline
29#else
30#define __async_inline __always_inline
31#endif
32
24/** 33/**
25 * dma_chan_ref - object used to manage dma channels received from the 34 * dma_chan_ref - object used to manage dma channels received from the
26 * dmaengine core. 35 * dmaengine core.
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 4316a546beb5..665fa70e4094 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -365,7 +365,10 @@ int cgroup_task_count(const struct cgroup *cgrp);
365/* Return true if cgrp is a descendant of the task's cgroup */ 365/* Return true if cgrp is a descendant of the task's cgroup */
366int cgroup_is_descendant(const struct cgroup *cgrp, struct task_struct *task); 366int cgroup_is_descendant(const struct cgroup *cgrp, struct task_struct *task);
367 367
368/* Control Group subsystem type. See Documentation/cgroups.txt for details */ 368/*
369 * Control Group subsystem type.
370 * See Documentation/cgroups/cgroups.txt for details
371 */
369 372
370struct cgroup_subsys { 373struct cgroup_subsys {
371 struct cgroup_subsys_state *(*create)(struct cgroup_subsys *ss, 374 struct cgroup_subsys_state *(*create)(struct cgroup_subsys *ss,
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index 8209e08969f9..66ec05a57955 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -139,6 +139,9 @@ struct target_type {
139 dm_ioctl_fn ioctl; 139 dm_ioctl_fn ioctl;
140 dm_merge_fn merge; 140 dm_merge_fn merge;
141 dm_busy_fn busy; 141 dm_busy_fn busy;
142
143 /* For internal device-mapper use. */
144 struct list_head list;
142}; 145};
143 146
144struct io_restrictions { 147struct io_restrictions {
diff --git a/include/linux/dm-dirty-log.h b/include/linux/dm-dirty-log.h
index 600c5fb2daad..5e8b11d88f6f 100644
--- a/include/linux/dm-dirty-log.h
+++ b/include/linux/dm-dirty-log.h
@@ -28,6 +28,9 @@ struct dm_dirty_log_type {
28 const char *name; 28 const char *name;
29 struct module *module; 29 struct module *module;
30 30
31 /* For internal device-mapper use */
32 struct list_head list;
33
31 int (*ctr)(struct dm_dirty_log *log, struct dm_target *ti, 34 int (*ctr)(struct dm_dirty_log *log, struct dm_target *ti,
32 unsigned argc, char **argv); 35 unsigned argc, char **argv);
33 void (*dtr)(struct dm_dirty_log *log); 36 void (*dtr)(struct dm_dirty_log *log);
@@ -113,6 +116,16 @@ struct dm_dirty_log_type {
113 */ 116 */
114 int (*status)(struct dm_dirty_log *log, status_type_t status_type, 117 int (*status)(struct dm_dirty_log *log, status_type_t status_type,
115 char *result, unsigned maxlen); 118 char *result, unsigned maxlen);
119
120 /*
121 * is_remote_recovering is necessary for cluster mirroring. It provides
122 * a way to detect recovery on another node, so we aren't writing
123 * concurrently. This function is likely to block (when a cluster log
124 * is used).
125 *
126 * Returns: 0, 1
127 */
128 int (*is_remote_recovering)(struct dm_dirty_log *log, region_t region);
116}; 129};
117 130
118int dm_dirty_log_type_register(struct dm_dirty_log_type *type); 131int dm_dirty_log_type_register(struct dm_dirty_log_type *type);
diff --git a/include/linux/dma_remapping.h b/include/linux/dma_remapping.h
index af1dab41674b..1a455f1f86d7 100644
--- a/include/linux/dma_remapping.h
+++ b/include/linux/dma_remapping.h
@@ -11,6 +11,7 @@
11 11
12#define DMA_PTE_READ (1) 12#define DMA_PTE_READ (1)
13#define DMA_PTE_WRITE (2) 13#define DMA_PTE_WRITE (2)
14#define DMA_PTE_SNP (1 << 11)
14 15
15struct intel_iommu; 16struct intel_iommu;
16struct dmar_domain; 17struct dmar_domain;
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 1956c8d46d32..2e2aa3df170c 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -23,9 +23,6 @@
23 23
24#include <linux/device.h> 24#include <linux/device.h>
25#include <linux/uio.h> 25#include <linux/uio.h>
26#include <linux/kref.h>
27#include <linux/completion.h>
28#include <linux/rcupdate.h>
29#include <linux/dma-mapping.h> 26#include <linux/dma-mapping.h>
30 27
31/** 28/**
@@ -205,6 +202,7 @@ struct dma_async_tx_descriptor {
205/** 202/**
206 * struct dma_device - info on the entity supplying DMA services 203 * struct dma_device - info on the entity supplying DMA services
207 * @chancnt: how many DMA channels are supported 204 * @chancnt: how many DMA channels are supported
205 * @privatecnt: how many DMA channels are requested by dma_request_channel
208 * @channels: the list of struct dma_chan 206 * @channels: the list of struct dma_chan
209 * @global_node: list_head for global dma_device_list 207 * @global_node: list_head for global dma_device_list
210 * @cap_mask: one or more dma_capability flags 208 * @cap_mask: one or more dma_capability flags
@@ -227,6 +225,7 @@ struct dma_async_tx_descriptor {
227struct dma_device { 225struct dma_device {
228 226
229 unsigned int chancnt; 227 unsigned int chancnt;
228 unsigned int privatecnt;
230 struct list_head channels; 229 struct list_head channels;
231 struct list_head global_node; 230 struct list_head global_node;
232 dma_cap_mask_t cap_mask; 231 dma_cap_mask_t cap_mask;
@@ -291,6 +290,24 @@ static inline void net_dmaengine_put(void)
291} 290}
292#endif 291#endif
293 292
293#ifdef CONFIG_ASYNC_TX_DMA
294#define async_dmaengine_get() dmaengine_get()
295#define async_dmaengine_put() dmaengine_put()
296#define async_dma_find_channel(type) dma_find_channel(type)
297#else
298static inline void async_dmaengine_get(void)
299{
300}
301static inline void async_dmaengine_put(void)
302{
303}
304static inline struct dma_chan *
305async_dma_find_channel(enum dma_transaction_type type)
306{
307 return NULL;
308}
309#endif
310
294dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan, 311dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan,
295 void *dest, void *src, size_t len); 312 void *dest, void *src, size_t len);
296dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan, 313dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan,
@@ -337,6 +354,13 @@ __dma_cap_set(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp)
337 set_bit(tx_type, dstp->bits); 354 set_bit(tx_type, dstp->bits);
338} 355}
339 356
357#define dma_cap_clear(tx, mask) __dma_cap_clear((tx), &(mask))
358static inline void
359__dma_cap_clear(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp)
360{
361 clear_bit(tx_type, dstp->bits);
362}
363
340#define dma_cap_zero(mask) __dma_cap_zero(&(mask)) 364#define dma_cap_zero(mask) __dma_cap_zero(&(mask))
341static inline void __dma_cap_zero(dma_cap_mask_t *dstp) 365static inline void __dma_cap_zero(dma_cap_mask_t *dstp)
342{ 366{
diff --git a/include/linux/dw_dmac.h b/include/linux/dw_dmac.h
index d797dde247f7..c8aad713a046 100644
--- a/include/linux/dw_dmac.h
+++ b/include/linux/dw_dmac.h
@@ -74,4 +74,23 @@ struct dw_dma_slave {
74#define DWC_CFGL_HS_DST_POL (1 << 18) /* dst handshake active low */ 74#define DWC_CFGL_HS_DST_POL (1 << 18) /* dst handshake active low */
75#define DWC_CFGL_HS_SRC_POL (1 << 19) /* src handshake active low */ 75#define DWC_CFGL_HS_SRC_POL (1 << 19) /* src handshake active low */
76 76
77/* DMA API extensions */
78struct dw_cyclic_desc {
79 struct dw_desc **desc;
80 unsigned long periods;
81 void (*period_callback)(void *param);
82 void *period_callback_param;
83};
84
85struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
86 dma_addr_t buf_addr, size_t buf_len, size_t period_len,
87 enum dma_data_direction direction);
88void dw_dma_cyclic_free(struct dma_chan *chan);
89int dw_dma_cyclic_start(struct dma_chan *chan);
90void dw_dma_cyclic_stop(struct dma_chan *chan);
91
92dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan);
93
94dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan);
95
77#endif /* DW_DMAC_H */ 96#endif /* DW_DMAC_H */
diff --git a/include/linux/ext3_fs.h b/include/linux/ext3_fs.h
index e263acaa405b..634a5e5aba3e 100644
--- a/include/linux/ext3_fs.h
+++ b/include/linux/ext3_fs.h
@@ -208,6 +208,7 @@ static inline __u32 ext3_mask_flags(umode_t mode, __u32 flags)
208#define EXT3_STATE_JDATA 0x00000001 /* journaled data exists */ 208#define EXT3_STATE_JDATA 0x00000001 /* journaled data exists */
209#define EXT3_STATE_NEW 0x00000002 /* inode is newly created */ 209#define EXT3_STATE_NEW 0x00000002 /* inode is newly created */
210#define EXT3_STATE_XATTR 0x00000004 /* has in-inode xattrs */ 210#define EXT3_STATE_XATTR 0x00000004 /* has in-inode xattrs */
211#define EXT3_STATE_FLUSH_ON_CLOSE 0x00000008
211 212
212/* Used to pass group descriptor data when online resize is done */ 213/* Used to pass group descriptor data when online resize is done */
213struct ext3_new_group_input { 214struct ext3_new_group_input {
diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
new file mode 100644
index 000000000000..84d3532dd3ea
--- /dev/null
+++ b/include/linux/fscache-cache.h
@@ -0,0 +1,505 @@
1/* General filesystem caching backing cache interface
2 *
3 * Copyright (C) 2004-2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 * NOTE!!! See:
12 *
13 * Documentation/filesystems/caching/backend-api.txt
14 *
15 * for a description of the cache backend interface declared here.
16 */
17
18#ifndef _LINUX_FSCACHE_CACHE_H
19#define _LINUX_FSCACHE_CACHE_H
20
21#include <linux/fscache.h>
22#include <linux/sched.h>
23#include <linux/slow-work.h>
24
25#define NR_MAXCACHES BITS_PER_LONG
26
27struct fscache_cache;
28struct fscache_cache_ops;
29struct fscache_object;
30struct fscache_operation;
31
32/*
33 * cache tag definition
34 */
35struct fscache_cache_tag {
36 struct list_head link;
37 struct fscache_cache *cache; /* cache referred to by this tag */
38 unsigned long flags;
39#define FSCACHE_TAG_RESERVED 0 /* T if tag is reserved for a cache */
40 atomic_t usage;
41 char name[0]; /* tag name */
42};
43
44/*
45 * cache definition
46 */
47struct fscache_cache {
48 const struct fscache_cache_ops *ops;
49 struct fscache_cache_tag *tag; /* tag representing this cache */
50 struct kobject *kobj; /* system representation of this cache */
51 struct list_head link; /* link in list of caches */
52 size_t max_index_size; /* maximum size of index data */
53 char identifier[36]; /* cache label */
54
55 /* node management */
56 struct work_struct op_gc; /* operation garbage collector */
57 struct list_head object_list; /* list of data/index objects */
58 struct list_head op_gc_list; /* list of ops to be deleted */
59 spinlock_t object_list_lock;
60 spinlock_t op_gc_list_lock;
61 atomic_t object_count; /* no. of live objects in this cache */
62 struct fscache_object *fsdef; /* object for the fsdef index */
63 unsigned long flags;
64#define FSCACHE_IOERROR 0 /* cache stopped on I/O error */
65#define FSCACHE_CACHE_WITHDRAWN 1 /* cache has been withdrawn */
66};
67
68extern wait_queue_head_t fscache_cache_cleared_wq;
69
70/*
71 * operation to be applied to a cache object
72 * - retrieval initiation operations are done in the context of the process
73 * that issued them, and not in an async thread pool
74 */
75typedef void (*fscache_operation_release_t)(struct fscache_operation *op);
76typedef void (*fscache_operation_processor_t)(struct fscache_operation *op);
77
78struct fscache_operation {
79 union {
80 struct work_struct fast_work; /* record for fast ops */
81 struct slow_work slow_work; /* record for (very) slow ops */
82 };
83 struct list_head pend_link; /* link in object->pending_ops */
84 struct fscache_object *object; /* object to be operated upon */
85
86 unsigned long flags;
87#define FSCACHE_OP_TYPE 0x000f /* operation type */
88#define FSCACHE_OP_FAST 0x0001 /* - fast op, processor may not sleep for disk */
89#define FSCACHE_OP_SLOW 0x0002 /* - (very) slow op, processor may sleep for disk */
90#define FSCACHE_OP_MYTHREAD 0x0003 /* - processing is done be issuing thread, not pool */
91#define FSCACHE_OP_WAITING 4 /* cleared when op is woken */
92#define FSCACHE_OP_EXCLUSIVE 5 /* exclusive op, other ops must wait */
93#define FSCACHE_OP_DEAD 6 /* op is now dead */
94
95 atomic_t usage;
96 unsigned debug_id; /* debugging ID */
97
98 /* operation processor callback
99 * - can be NULL if FSCACHE_OP_WAITING is going to be used to perform
100 * the op in a non-pool thread */
101 fscache_operation_processor_t processor;
102
103 /* operation releaser */
104 fscache_operation_release_t release;
105};
106
107extern atomic_t fscache_op_debug_id;
108extern const struct slow_work_ops fscache_op_slow_work_ops;
109
110extern void fscache_enqueue_operation(struct fscache_operation *);
111extern void fscache_put_operation(struct fscache_operation *);
112
113/**
114 * fscache_operation_init - Do basic initialisation of an operation
115 * @op: The operation to initialise
116 * @release: The release function to assign
117 *
118 * Do basic initialisation of an operation. The caller must still set flags,
119 * object, either fast_work or slow_work if necessary, and processor if needed.
120 */
121static inline void fscache_operation_init(struct fscache_operation *op,
122 fscache_operation_release_t release)
123{
124 atomic_set(&op->usage, 1);
125 op->debug_id = atomic_inc_return(&fscache_op_debug_id);
126 op->release = release;
127 INIT_LIST_HEAD(&op->pend_link);
128}
129
130/**
131 * fscache_operation_init_slow - Do additional initialisation of a slow op
132 * @op: The operation to initialise
133 * @processor: The processor function to assign
134 *
135 * Do additional initialisation of an operation as required for slow work.
136 */
137static inline
138void fscache_operation_init_slow(struct fscache_operation *op,
139 fscache_operation_processor_t processor)
140{
141 op->processor = processor;
142 slow_work_init(&op->slow_work, &fscache_op_slow_work_ops);
143}
144
145/*
146 * data read operation
147 */
148struct fscache_retrieval {
149 struct fscache_operation op;
150 struct address_space *mapping; /* netfs pages */
151 fscache_rw_complete_t end_io_func; /* function to call on I/O completion */
152 void *context; /* netfs read context (pinned) */
153 struct list_head to_do; /* list of things to be done by the backend */
154 unsigned long start_time; /* time at which retrieval started */
155};
156
157typedef int (*fscache_page_retrieval_func_t)(struct fscache_retrieval *op,
158 struct page *page,
159 gfp_t gfp);
160
161typedef int (*fscache_pages_retrieval_func_t)(struct fscache_retrieval *op,
162 struct list_head *pages,
163 unsigned *nr_pages,
164 gfp_t gfp);
165
166/**
167 * fscache_get_retrieval - Get an extra reference on a retrieval operation
168 * @op: The retrieval operation to get a reference on
169 *
170 * Get an extra reference on a retrieval operation.
171 */
172static inline
173struct fscache_retrieval *fscache_get_retrieval(struct fscache_retrieval *op)
174{
175 atomic_inc(&op->op.usage);
176 return op;
177}
178
179/**
180 * fscache_enqueue_retrieval - Enqueue a retrieval operation for processing
181 * @op: The retrieval operation affected
182 *
183 * Enqueue a retrieval operation for processing by the FS-Cache thread pool.
184 */
185static inline void fscache_enqueue_retrieval(struct fscache_retrieval *op)
186{
187 fscache_enqueue_operation(&op->op);
188}
189
190/**
191 * fscache_put_retrieval - Drop a reference to a retrieval operation
192 * @op: The retrieval operation affected
193 *
194 * Drop a reference to a retrieval operation.
195 */
196static inline void fscache_put_retrieval(struct fscache_retrieval *op)
197{
198 fscache_put_operation(&op->op);
199}
200
201/*
202 * cached page storage work item
203 * - used to do three things:
204 * - batch writes to the cache
205 * - do cache writes asynchronously
206 * - defer writes until cache object lookup completion
207 */
208struct fscache_storage {
209 struct fscache_operation op;
210 pgoff_t store_limit; /* don't write more than this */
211};
212
213/*
214 * cache operations
215 */
216struct fscache_cache_ops {
217 /* name of cache provider */
218 const char *name;
219
220 /* allocate an object record for a cookie */
221 struct fscache_object *(*alloc_object)(struct fscache_cache *cache,
222 struct fscache_cookie *cookie);
223
224 /* look up the object for a cookie */
225 void (*lookup_object)(struct fscache_object *object);
226
227 /* finished looking up */
228 void (*lookup_complete)(struct fscache_object *object);
229
230 /* increment the usage count on this object (may fail if unmounting) */
231 struct fscache_object *(*grab_object)(struct fscache_object *object);
232
233 /* pin an object in the cache */
234 int (*pin_object)(struct fscache_object *object);
235
236 /* unpin an object in the cache */
237 void (*unpin_object)(struct fscache_object *object);
238
239 /* store the updated auxilliary data on an object */
240 void (*update_object)(struct fscache_object *object);
241
242 /* discard the resources pinned by an object and effect retirement if
243 * necessary */
244 void (*drop_object)(struct fscache_object *object);
245
246 /* dispose of a reference to an object */
247 void (*put_object)(struct fscache_object *object);
248
249 /* sync a cache */
250 void (*sync_cache)(struct fscache_cache *cache);
251
252 /* notification that the attributes of a non-index object (such as
253 * i_size) have changed */
254 int (*attr_changed)(struct fscache_object *object);
255
256 /* reserve space for an object's data and associated metadata */
257 int (*reserve_space)(struct fscache_object *object, loff_t i_size);
258
259 /* request a backing block for a page be read or allocated in the
260 * cache */
261 fscache_page_retrieval_func_t read_or_alloc_page;
262
263 /* request backing blocks for a list of pages be read or allocated in
264 * the cache */
265 fscache_pages_retrieval_func_t read_or_alloc_pages;
266
267 /* request a backing block for a page be allocated in the cache so that
268 * it can be written directly */
269 fscache_page_retrieval_func_t allocate_page;
270
271 /* request backing blocks for pages be allocated in the cache so that
272 * they can be written directly */
273 fscache_pages_retrieval_func_t allocate_pages;
274
275 /* write a page to its backing block in the cache */
276 int (*write_page)(struct fscache_storage *op, struct page *page);
277
278 /* detach backing block from a page (optional)
279 * - must release the cookie lock before returning
280 * - may sleep
281 */
282 void (*uncache_page)(struct fscache_object *object,
283 struct page *page);
284
285 /* dissociate a cache from all the pages it was backing */
286 void (*dissociate_pages)(struct fscache_cache *cache);
287};
288
289/*
290 * data file or index object cookie
291 * - a file will only appear in one cache
292 * - a request to cache a file may or may not be honoured, subject to
293 * constraints such as disk space
294 * - indices are created on disk just-in-time
295 */
296struct fscache_cookie {
297 atomic_t usage; /* number of users of this cookie */
298 atomic_t n_children; /* number of children of this cookie */
299 spinlock_t lock;
300 struct hlist_head backing_objects; /* object(s) backing this file/index */
301 const struct fscache_cookie_def *def; /* definition */
302 struct fscache_cookie *parent; /* parent of this entry */
303 void *netfs_data; /* back pointer to netfs */
304 struct radix_tree_root stores; /* pages to be stored on this cookie */
305#define FSCACHE_COOKIE_PENDING_TAG 0 /* pages tag: pending write to cache */
306
307 unsigned long flags;
308#define FSCACHE_COOKIE_LOOKING_UP 0 /* T if non-index cookie being looked up still */
309#define FSCACHE_COOKIE_CREATING 1 /* T if non-index object being created still */
310#define FSCACHE_COOKIE_NO_DATA_YET 2 /* T if new object with no cached data yet */
311#define FSCACHE_COOKIE_PENDING_FILL 3 /* T if pending initial fill on object */
312#define FSCACHE_COOKIE_FILLING 4 /* T if filling object incrementally */
313#define FSCACHE_COOKIE_UNAVAILABLE 5 /* T if cookie is unavailable (error, etc) */
314};
315
316extern struct fscache_cookie fscache_fsdef_index;
317
318/*
319 * on-disk cache file or index handle
320 */
321struct fscache_object {
322 enum fscache_object_state {
323 FSCACHE_OBJECT_INIT, /* object in initial unbound state */
324 FSCACHE_OBJECT_LOOKING_UP, /* looking up object */
325 FSCACHE_OBJECT_CREATING, /* creating object */
326
327 /* active states */
328 FSCACHE_OBJECT_AVAILABLE, /* cleaning up object after creation */
329 FSCACHE_OBJECT_ACTIVE, /* object is usable */
330 FSCACHE_OBJECT_UPDATING, /* object is updating */
331
332 /* terminal states */
333 FSCACHE_OBJECT_DYING, /* object waiting for accessors to finish */
334 FSCACHE_OBJECT_LC_DYING, /* object cleaning up after lookup/create */
335 FSCACHE_OBJECT_ABORT_INIT, /* abort the init state */
336 FSCACHE_OBJECT_RELEASING, /* releasing object */
337 FSCACHE_OBJECT_RECYCLING, /* retiring object */
338 FSCACHE_OBJECT_WITHDRAWING, /* withdrawing object */
339 FSCACHE_OBJECT_DEAD, /* object is now dead */
340 } state;
341
342 int debug_id; /* debugging ID */
343 int n_children; /* number of child objects */
344 int n_ops; /* number of ops outstanding on object */
345 int n_obj_ops; /* number of object ops outstanding on object */
346 int n_in_progress; /* number of ops in progress */
347 int n_exclusive; /* number of exclusive ops queued */
348 spinlock_t lock; /* state and operations lock */
349
350 unsigned long lookup_jif; /* time at which lookup started */
351 unsigned long event_mask; /* events this object is interested in */
352 unsigned long events; /* events to be processed by this object
353 * (order is important - using fls) */
354#define FSCACHE_OBJECT_EV_REQUEUE 0 /* T if object should be requeued */
355#define FSCACHE_OBJECT_EV_UPDATE 1 /* T if object should be updated */
356#define FSCACHE_OBJECT_EV_CLEARED 2 /* T if accessors all gone */
357#define FSCACHE_OBJECT_EV_ERROR 3 /* T if fatal error occurred during processing */
358#define FSCACHE_OBJECT_EV_RELEASE 4 /* T if netfs requested object release */
359#define FSCACHE_OBJECT_EV_RETIRE 5 /* T if netfs requested object retirement */
360#define FSCACHE_OBJECT_EV_WITHDRAW 6 /* T if cache requested object withdrawal */
361
362 unsigned long flags;
363#define FSCACHE_OBJECT_LOCK 0 /* T if object is busy being processed */
364#define FSCACHE_OBJECT_PENDING_WRITE 1 /* T if object has pending write */
365#define FSCACHE_OBJECT_WAITING 2 /* T if object is waiting on its parent */
366
367 struct list_head cache_link; /* link in cache->object_list */
368 struct hlist_node cookie_link; /* link in cookie->backing_objects */
369 struct fscache_cache *cache; /* cache that supplied this object */
370 struct fscache_cookie *cookie; /* netfs's file/index object */
371 struct fscache_object *parent; /* parent object */
372 struct slow_work work; /* attention scheduling record */
373 struct list_head dependents; /* FIFO of dependent objects */
374 struct list_head dep_link; /* link in parent's dependents list */
375 struct list_head pending_ops; /* unstarted operations on this object */
376 pgoff_t store_limit; /* current storage limit */
377};
378
379extern const char *fscache_object_states[];
380
381#define fscache_object_is_active(obj) \
382 (!test_bit(FSCACHE_IOERROR, &(obj)->cache->flags) && \
383 (obj)->state >= FSCACHE_OBJECT_AVAILABLE && \
384 (obj)->state < FSCACHE_OBJECT_DYING)
385
386extern const struct slow_work_ops fscache_object_slow_work_ops;
387
388/**
389 * fscache_object_init - Initialise a cache object description
390 * @object: Object description
391 *
392 * Initialise a cache object description to its basic values.
393 *
394 * See Documentation/filesystems/caching/backend-api.txt for a complete
395 * description.
396 */
397static inline
398void fscache_object_init(struct fscache_object *object,
399 struct fscache_cookie *cookie,
400 struct fscache_cache *cache)
401{
402 atomic_inc(&cache->object_count);
403
404 object->state = FSCACHE_OBJECT_INIT;
405 spin_lock_init(&object->lock);
406 INIT_LIST_HEAD(&object->cache_link);
407 INIT_HLIST_NODE(&object->cookie_link);
408 vslow_work_init(&object->work, &fscache_object_slow_work_ops);
409 INIT_LIST_HEAD(&object->dependents);
410 INIT_LIST_HEAD(&object->dep_link);
411 INIT_LIST_HEAD(&object->pending_ops);
412 object->n_children = 0;
413 object->n_ops = object->n_in_progress = object->n_exclusive = 0;
414 object->events = object->event_mask = 0;
415 object->flags = 0;
416 object->store_limit = 0;
417 object->cache = cache;
418 object->cookie = cookie;
419 object->parent = NULL;
420}
421
422extern void fscache_object_lookup_negative(struct fscache_object *object);
423extern void fscache_obtained_object(struct fscache_object *object);
424
425/**
426 * fscache_object_destroyed - Note destruction of an object in a cache
427 * @cache: The cache from which the object came
428 *
429 * Note the destruction and deallocation of an object record in a cache.
430 */
431static inline void fscache_object_destroyed(struct fscache_cache *cache)
432{
433 if (atomic_dec_and_test(&cache->object_count))
434 wake_up_all(&fscache_cache_cleared_wq);
435}
436
437/**
438 * fscache_object_lookup_error - Note an object encountered an error
439 * @object: The object on which the error was encountered
440 *
441 * Note that an object encountered a fatal error (usually an I/O error) and
442 * that it should be withdrawn as soon as possible.
443 */
444static inline void fscache_object_lookup_error(struct fscache_object *object)
445{
446 set_bit(FSCACHE_OBJECT_EV_ERROR, &object->events);
447}
448
449/**
450 * fscache_set_store_limit - Set the maximum size to be stored in an object
451 * @object: The object to set the maximum on
452 * @i_size: The limit to set in bytes
453 *
454 * Set the maximum size an object is permitted to reach, implying the highest
455 * byte that may be written. Intended to be called by the attr_changed() op.
456 *
457 * See Documentation/filesystems/caching/backend-api.txt for a complete
458 * description.
459 */
460static inline
461void fscache_set_store_limit(struct fscache_object *object, loff_t i_size)
462{
463 object->store_limit = i_size >> PAGE_SHIFT;
464 if (i_size & ~PAGE_MASK)
465 object->store_limit++;
466}
467
468/**
469 * fscache_end_io - End a retrieval operation on a page
470 * @op: The FS-Cache operation covering the retrieval
471 * @page: The page that was to be fetched
472 * @error: The error code (0 if successful)
473 *
474 * Note the end of an operation to retrieve a page, as covered by a particular
475 * operation record.
476 */
477static inline void fscache_end_io(struct fscache_retrieval *op,
478 struct page *page, int error)
479{
480 op->end_io_func(page, op->context, error);
481}
482
483/*
484 * out-of-line cache backend functions
485 */
486extern void fscache_init_cache(struct fscache_cache *cache,
487 const struct fscache_cache_ops *ops,
488 const char *idfmt,
489 ...) __attribute__ ((format (printf, 3, 4)));
490
491extern int fscache_add_cache(struct fscache_cache *cache,
492 struct fscache_object *fsdef,
493 const char *tagname);
494extern void fscache_withdraw_cache(struct fscache_cache *cache);
495
496extern void fscache_io_error(struct fscache_cache *cache);
497
498extern void fscache_mark_pages_cached(struct fscache_retrieval *op,
499 struct pagevec *pagevec);
500
501extern enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
502 const void *data,
503 uint16_t datalen);
504
505#endif /* _LINUX_FSCACHE_CACHE_H */
diff --git a/include/linux/fscache.h b/include/linux/fscache.h
new file mode 100644
index 000000000000..6d8ee466e0a0
--- /dev/null
+++ b/include/linux/fscache.h
@@ -0,0 +1,618 @@
1/* General filesystem caching interface
2 *
3 * Copyright (C) 2004-2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 * NOTE!!! See:
12 *
13 * Documentation/filesystems/caching/netfs-api.txt
14 *
15 * for a description of the network filesystem interface declared here.
16 */
17
18#ifndef _LINUX_FSCACHE_H
19#define _LINUX_FSCACHE_H
20
21#include <linux/fs.h>
22#include <linux/list.h>
23#include <linux/pagemap.h>
24#include <linux/pagevec.h>
25
26#if defined(CONFIG_FSCACHE) || defined(CONFIG_FSCACHE_MODULE)
27#define fscache_available() (1)
28#define fscache_cookie_valid(cookie) (cookie)
29#else
30#define fscache_available() (0)
31#define fscache_cookie_valid(cookie) (0)
32#endif
33
34
35/*
36 * overload PG_private_2 to give us PG_fscache - this is used to indicate that
37 * a page is currently backed by a local disk cache
38 */
39#define PageFsCache(page) PagePrivate2((page))
40#define SetPageFsCache(page) SetPagePrivate2((page))
41#define ClearPageFsCache(page) ClearPagePrivate2((page))
42#define TestSetPageFsCache(page) TestSetPagePrivate2((page))
43#define TestClearPageFsCache(page) TestClearPagePrivate2((page))
44
45/* pattern used to fill dead space in an index entry */
46#define FSCACHE_INDEX_DEADFILL_PATTERN 0x79
47
48struct pagevec;
49struct fscache_cache_tag;
50struct fscache_cookie;
51struct fscache_netfs;
52
53typedef void (*fscache_rw_complete_t)(struct page *page,
54 void *context,
55 int error);
56
57/* result of index entry consultation */
58enum fscache_checkaux {
59 FSCACHE_CHECKAUX_OKAY, /* entry okay as is */
60 FSCACHE_CHECKAUX_NEEDS_UPDATE, /* entry requires update */
61 FSCACHE_CHECKAUX_OBSOLETE, /* entry requires deletion */
62};
63
64/*
65 * fscache cookie definition
66 */
67struct fscache_cookie_def {
68 /* name of cookie type */
69 char name[16];
70
71 /* cookie type */
72 uint8_t type;
73#define FSCACHE_COOKIE_TYPE_INDEX 0
74#define FSCACHE_COOKIE_TYPE_DATAFILE 1
75
76 /* select the cache into which to insert an entry in this index
77 * - optional
78 * - should return a cache identifier or NULL to cause the cache to be
79 * inherited from the parent if possible or the first cache picked
80 * for a non-index file if not
81 */
82 struct fscache_cache_tag *(*select_cache)(
83 const void *parent_netfs_data,
84 const void *cookie_netfs_data);
85
86 /* get an index key
87 * - should store the key data in the buffer
88 * - should return the amount of amount stored
89 * - not permitted to return an error
90 * - the netfs data from the cookie being used as the source is
91 * presented
92 */
93 uint16_t (*get_key)(const void *cookie_netfs_data,
94 void *buffer,
95 uint16_t bufmax);
96
97 /* get certain file attributes from the netfs data
98 * - this function can be absent for an index
99 * - not permitted to return an error
100 * - the netfs data from the cookie being used as the source is
101 * presented
102 */
103 void (*get_attr)(const void *cookie_netfs_data, uint64_t *size);
104
105 /* get the auxilliary data from netfs data
106 * - this function can be absent if the index carries no state data
107 * - should store the auxilliary data in the buffer
108 * - should return the amount of amount stored
109 * - not permitted to return an error
110 * - the netfs data from the cookie being used as the source is
111 * presented
112 */
113 uint16_t (*get_aux)(const void *cookie_netfs_data,
114 void *buffer,
115 uint16_t bufmax);
116
117 /* consult the netfs about the state of an object
118 * - this function can be absent if the index carries no state data
119 * - the netfs data from the cookie being used as the target is
120 * presented, as is the auxilliary data
121 */
122 enum fscache_checkaux (*check_aux)(void *cookie_netfs_data,
123 const void *data,
124 uint16_t datalen);
125
126 /* get an extra reference on a read context
127 * - this function can be absent if the completion function doesn't
128 * require a context
129 */
130 void (*get_context)(void *cookie_netfs_data, void *context);
131
132 /* release an extra reference on a read context
133 * - this function can be absent if the completion function doesn't
134 * require a context
135 */
136 void (*put_context)(void *cookie_netfs_data, void *context);
137
138 /* indicate pages that now have cache metadata retained
139 * - this function should mark the specified pages as now being cached
140 * - the pages will have been marked with PG_fscache before this is
141 * called, so this is optional
142 */
143 void (*mark_pages_cached)(void *cookie_netfs_data,
144 struct address_space *mapping,
145 struct pagevec *cached_pvec);
146
147 /* indicate the cookie is no longer cached
148 * - this function is called when the backing store currently caching
149 * a cookie is removed
150 * - the netfs should use this to clean up any markers indicating
151 * cached pages
152 * - this is mandatory for any object that may have data
153 */
154 void (*now_uncached)(void *cookie_netfs_data);
155};
156
157/*
158 * fscache cached network filesystem type
159 * - name, version and ops must be filled in before registration
160 * - all other fields will be set during registration
161 */
162struct fscache_netfs {
163 uint32_t version; /* indexing version */
164 const char *name; /* filesystem name */
165 struct fscache_cookie *primary_index;
166 struct list_head link; /* internal link */
167};
168
169/*
170 * slow-path functions for when there is actually caching available, and the
171 * netfs does actually have a valid token
172 * - these are not to be called directly
173 * - these are undefined symbols when FS-Cache is not configured and the
174 * optimiser takes care of not using them
175 */
176extern int __fscache_register_netfs(struct fscache_netfs *);
177extern void __fscache_unregister_netfs(struct fscache_netfs *);
178extern struct fscache_cache_tag *__fscache_lookup_cache_tag(const char *);
179extern void __fscache_release_cache_tag(struct fscache_cache_tag *);
180
181extern struct fscache_cookie *__fscache_acquire_cookie(
182 struct fscache_cookie *,
183 const struct fscache_cookie_def *,
184 void *);
185extern void __fscache_relinquish_cookie(struct fscache_cookie *, int);
186extern void __fscache_update_cookie(struct fscache_cookie *);
187extern int __fscache_attr_changed(struct fscache_cookie *);
188extern int __fscache_read_or_alloc_page(struct fscache_cookie *,
189 struct page *,
190 fscache_rw_complete_t,
191 void *,
192 gfp_t);
193extern int __fscache_read_or_alloc_pages(struct fscache_cookie *,
194 struct address_space *,
195 struct list_head *,
196 unsigned *,
197 fscache_rw_complete_t,
198 void *,
199 gfp_t);
200extern int __fscache_alloc_page(struct fscache_cookie *, struct page *, gfp_t);
201extern int __fscache_write_page(struct fscache_cookie *, struct page *, gfp_t);
202extern void __fscache_uncache_page(struct fscache_cookie *, struct page *);
203extern bool __fscache_check_page_write(struct fscache_cookie *, struct page *);
204extern void __fscache_wait_on_page_write(struct fscache_cookie *, struct page *);
205
206/**
207 * fscache_register_netfs - Register a filesystem as desiring caching services
208 * @netfs: The description of the filesystem
209 *
210 * Register a filesystem as desiring caching services if they're available.
211 *
212 * See Documentation/filesystems/caching/netfs-api.txt for a complete
213 * description.
214 */
215static inline
216int fscache_register_netfs(struct fscache_netfs *netfs)
217{
218 if (fscache_available())
219 return __fscache_register_netfs(netfs);
220 else
221 return 0;
222}
223
224/**
225 * fscache_unregister_netfs - Indicate that a filesystem no longer desires
226 * caching services
227 * @netfs: The description of the filesystem
228 *
229 * Indicate that a filesystem no longer desires caching services for the
230 * moment.
231 *
232 * See Documentation/filesystems/caching/netfs-api.txt for a complete
233 * description.
234 */
235static inline
236void fscache_unregister_netfs(struct fscache_netfs *netfs)
237{
238 if (fscache_available())
239 __fscache_unregister_netfs(netfs);
240}
241
242/**
243 * fscache_lookup_cache_tag - Look up a cache tag
244 * @name: The name of the tag to search for
245 *
246 * Acquire a specific cache referral tag that can be used to select a specific
247 * cache in which to cache an index.
248 *
249 * See Documentation/filesystems/caching/netfs-api.txt for a complete
250 * description.
251 */
252static inline
253struct fscache_cache_tag *fscache_lookup_cache_tag(const char *name)
254{
255 if (fscache_available())
256 return __fscache_lookup_cache_tag(name);
257 else
258 return NULL;
259}
260
261/**
262 * fscache_release_cache_tag - Release a cache tag
263 * @tag: The tag to release
264 *
265 * Release a reference to a cache referral tag previously looked up.
266 *
267 * See Documentation/filesystems/caching/netfs-api.txt for a complete
268 * description.
269 */
270static inline
271void fscache_release_cache_tag(struct fscache_cache_tag *tag)
272{
273 if (fscache_available())
274 __fscache_release_cache_tag(tag);
275}
276
277/**
278 * fscache_acquire_cookie - Acquire a cookie to represent a cache object
279 * @parent: The cookie that's to be the parent of this one
280 * @def: A description of the cache object, including callback operations
281 * @netfs_data: An arbitrary piece of data to be kept in the cookie to
282 * represent the cache object to the netfs
283 *
284 * This function is used to inform FS-Cache about part of an index hierarchy
285 * that can be used to locate files. This is done by requesting a cookie for
286 * each index in the path to the file.
287 *
288 * See Documentation/filesystems/caching/netfs-api.txt for a complete
289 * description.
290 */
291static inline
292struct fscache_cookie *fscache_acquire_cookie(
293 struct fscache_cookie *parent,
294 const struct fscache_cookie_def *def,
295 void *netfs_data)
296{
297 if (fscache_cookie_valid(parent))
298 return __fscache_acquire_cookie(parent, def, netfs_data);
299 else
300 return NULL;
301}
302
303/**
304 * fscache_relinquish_cookie - Return the cookie to the cache, maybe discarding
305 * it
306 * @cookie: The cookie being returned
307 * @retire: True if the cache object the cookie represents is to be discarded
308 *
309 * This function returns a cookie to the cache, forcibly discarding the
310 * associated cache object if retire is set to true.
311 *
312 * See Documentation/filesystems/caching/netfs-api.txt for a complete
313 * description.
314 */
315static inline
316void fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
317{
318 if (fscache_cookie_valid(cookie))
319 __fscache_relinquish_cookie(cookie, retire);
320}
321
322/**
323 * fscache_update_cookie - Request that a cache object be updated
324 * @cookie: The cookie representing the cache object
325 *
326 * Request an update of the index data for the cache object associated with the
327 * cookie.
328 *
329 * See Documentation/filesystems/caching/netfs-api.txt for a complete
330 * description.
331 */
332static inline
333void fscache_update_cookie(struct fscache_cookie *cookie)
334{
335 if (fscache_cookie_valid(cookie))
336 __fscache_update_cookie(cookie);
337}
338
339/**
340 * fscache_pin_cookie - Pin a data-storage cache object in its cache
341 * @cookie: The cookie representing the cache object
342 *
343 * Permit data-storage cache objects to be pinned in the cache.
344 *
345 * See Documentation/filesystems/caching/netfs-api.txt for a complete
346 * description.
347 */
348static inline
349int fscache_pin_cookie(struct fscache_cookie *cookie)
350{
351 return -ENOBUFS;
352}
353
354/**
355 * fscache_pin_cookie - Unpin a data-storage cache object in its cache
356 * @cookie: The cookie representing the cache object
357 *
358 * Permit data-storage cache objects to be unpinned from the cache.
359 *
360 * See Documentation/filesystems/caching/netfs-api.txt for a complete
361 * description.
362 */
363static inline
364void fscache_unpin_cookie(struct fscache_cookie *cookie)
365{
366}
367
368/**
369 * fscache_attr_changed - Notify cache that an object's attributes changed
370 * @cookie: The cookie representing the cache object
371 *
372 * Send a notification to the cache indicating that an object's attributes have
373 * changed. This includes the data size. These attributes will be obtained
374 * through the get_attr() cookie definition op.
375 *
376 * See Documentation/filesystems/caching/netfs-api.txt for a complete
377 * description.
378 */
379static inline
380int fscache_attr_changed(struct fscache_cookie *cookie)
381{
382 if (fscache_cookie_valid(cookie))
383 return __fscache_attr_changed(cookie);
384 else
385 return -ENOBUFS;
386}
387
388/**
389 * fscache_reserve_space - Reserve data space for a cached object
390 * @cookie: The cookie representing the cache object
391 * @i_size: The amount of space to be reserved
392 *
393 * Reserve an amount of space in the cache for the cache object attached to a
394 * cookie so that a write to that object within the space can always be
395 * honoured.
396 *
397 * See Documentation/filesystems/caching/netfs-api.txt for a complete
398 * description.
399 */
400static inline
401int fscache_reserve_space(struct fscache_cookie *cookie, loff_t size)
402{
403 return -ENOBUFS;
404}
405
406/**
407 * fscache_read_or_alloc_page - Read a page from the cache or allocate a block
408 * in which to store it
409 * @cookie: The cookie representing the cache object
410 * @page: The netfs page to fill if possible
411 * @end_io_func: The callback to invoke when and if the page is filled
412 * @context: An arbitrary piece of data to pass on to end_io_func()
413 * @gfp: The conditions under which memory allocation should be made
414 *
415 * Read a page from the cache, or if that's not possible make a potential
416 * one-block reservation in the cache into which the page may be stored once
417 * fetched from the server.
418 *
419 * If the page is not backed by the cache object, or if it there's some reason
420 * it can't be, -ENOBUFS will be returned and nothing more will be done for
421 * that page.
422 *
423 * Else, if that page is backed by the cache, a read will be initiated directly
424 * to the netfs's page and 0 will be returned by this function. The
425 * end_io_func() callback will be invoked when the operation terminates on a
426 * completion or failure. Note that the callback may be invoked before the
427 * return.
428 *
429 * Else, if the page is unbacked, -ENODATA is returned and a block may have
430 * been allocated in the cache.
431 *
432 * See Documentation/filesystems/caching/netfs-api.txt for a complete
433 * description.
434 */
435static inline
436int fscache_read_or_alloc_page(struct fscache_cookie *cookie,
437 struct page *page,
438 fscache_rw_complete_t end_io_func,
439 void *context,
440 gfp_t gfp)
441{
442 if (fscache_cookie_valid(cookie))
443 return __fscache_read_or_alloc_page(cookie, page, end_io_func,
444 context, gfp);
445 else
446 return -ENOBUFS;
447}
448
449/**
450 * fscache_read_or_alloc_pages - Read pages from the cache and/or allocate
451 * blocks in which to store them
452 * @cookie: The cookie representing the cache object
453 * @mapping: The netfs inode mapping to which the pages will be attached
454 * @pages: A list of potential netfs pages to be filled
455 * @end_io_func: The callback to invoke when and if each page is filled
456 * @context: An arbitrary piece of data to pass on to end_io_func()
457 * @gfp: The conditions under which memory allocation should be made
458 *
459 * Read a set of pages from the cache, or if that's not possible, attempt to
460 * make a potential one-block reservation for each page in the cache into which
461 * that page may be stored once fetched from the server.
462 *
463 * If some pages are not backed by the cache object, or if it there's some
464 * reason they can't be, -ENOBUFS will be returned and nothing more will be
465 * done for that pages.
466 *
467 * Else, if some of the pages are backed by the cache, a read will be initiated
468 * directly to the netfs's page and 0 will be returned by this function. The
469 * end_io_func() callback will be invoked when the operation terminates on a
470 * completion or failure. Note that the callback may be invoked before the
471 * return.
472 *
473 * Else, if a page is unbacked, -ENODATA is returned and a block may have
474 * been allocated in the cache.
475 *
476 * Because the function may want to return all of -ENOBUFS, -ENODATA and 0 in
477 * regard to different pages, the return values are prioritised in that order.
478 * Any pages submitted for reading are removed from the pages list.
479 *
480 * See Documentation/filesystems/caching/netfs-api.txt for a complete
481 * description.
482 */
483static inline
484int fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
485 struct address_space *mapping,
486 struct list_head *pages,
487 unsigned *nr_pages,
488 fscache_rw_complete_t end_io_func,
489 void *context,
490 gfp_t gfp)
491{
492 if (fscache_cookie_valid(cookie))
493 return __fscache_read_or_alloc_pages(cookie, mapping, pages,
494 nr_pages, end_io_func,
495 context, gfp);
496 else
497 return -ENOBUFS;
498}
499
500/**
501 * fscache_alloc_page - Allocate a block in which to store a page
502 * @cookie: The cookie representing the cache object
503 * @page: The netfs page to allocate a page for
504 * @gfp: The conditions under which memory allocation should be made
505 *
506 * Request Allocation a block in the cache in which to store a netfs page
507 * without retrieving any contents from the cache.
508 *
509 * If the page is not backed by a file then -ENOBUFS will be returned and
510 * nothing more will be done, and no reservation will be made.
511 *
512 * Else, a block will be allocated if one wasn't already, and 0 will be
513 * returned
514 *
515 * See Documentation/filesystems/caching/netfs-api.txt for a complete
516 * description.
517 */
518static inline
519int fscache_alloc_page(struct fscache_cookie *cookie,
520 struct page *page,
521 gfp_t gfp)
522{
523 if (fscache_cookie_valid(cookie))
524 return __fscache_alloc_page(cookie, page, gfp);
525 else
526 return -ENOBUFS;
527}
528
529/**
530 * fscache_write_page - Request storage of a page in the cache
531 * @cookie: The cookie representing the cache object
532 * @page: The netfs page to store
533 * @gfp: The conditions under which memory allocation should be made
534 *
535 * Request the contents of the netfs page be written into the cache. This
536 * request may be ignored if no cache block is currently allocated, in which
537 * case it will return -ENOBUFS.
538 *
539 * If a cache block was already allocated, a write will be initiated and 0 will
540 * be returned. The PG_fscache_write page bit is set immediately and will then
541 * be cleared at the completion of the write to indicate the success or failure
542 * of the operation. Note that the completion may happen before the return.
543 *
544 * See Documentation/filesystems/caching/netfs-api.txt for a complete
545 * description.
546 */
547static inline
548int fscache_write_page(struct fscache_cookie *cookie,
549 struct page *page,
550 gfp_t gfp)
551{
552 if (fscache_cookie_valid(cookie))
553 return __fscache_write_page(cookie, page, gfp);
554 else
555 return -ENOBUFS;
556}
557
558/**
559 * fscache_uncache_page - Indicate that caching is no longer required on a page
560 * @cookie: The cookie representing the cache object
561 * @page: The netfs page that was being cached.
562 *
563 * Tell the cache that we no longer want a page to be cached and that it should
564 * remove any knowledge of the netfs page it may have.
565 *
566 * Note that this cannot cancel any outstanding I/O operations between this
567 * page and the cache.
568 *
569 * See Documentation/filesystems/caching/netfs-api.txt for a complete
570 * description.
571 */
572static inline
573void fscache_uncache_page(struct fscache_cookie *cookie,
574 struct page *page)
575{
576 if (fscache_cookie_valid(cookie))
577 __fscache_uncache_page(cookie, page);
578}
579
580/**
581 * fscache_check_page_write - Ask if a page is being writing to the cache
582 * @cookie: The cookie representing the cache object
583 * @page: The netfs page that is being cached.
584 *
585 * Ask the cache if a page is being written to the cache.
586 *
587 * See Documentation/filesystems/caching/netfs-api.txt for a complete
588 * description.
589 */
590static inline
591bool fscache_check_page_write(struct fscache_cookie *cookie,
592 struct page *page)
593{
594 if (fscache_cookie_valid(cookie))
595 return __fscache_check_page_write(cookie, page);
596 return false;
597}
598
599/**
600 * fscache_wait_on_page_write - Wait for a page to complete writing to the cache
601 * @cookie: The cookie representing the cache object
602 * @page: The netfs page that is being cached.
603 *
604 * Ask the cache to wake us up when a page is no longer being written to the
605 * cache.
606 *
607 * See Documentation/filesystems/caching/netfs-api.txt for a complete
608 * description.
609 */
610static inline
611void fscache_wait_on_page_write(struct fscache_cookie *cookie,
612 struct page *page)
613{
614 if (fscache_cookie_valid(cookie))
615 __fscache_wait_on_page_write(cookie, page);
616}
617
618#endif /* _LINUX_FSCACHE_H */
diff --git a/include/linux/hdreg.h b/include/linux/hdreg.h
index ed21bd3dbd25..29ee2873f4a8 100644
--- a/include/linux/hdreg.h
+++ b/include/linux/hdreg.h
@@ -1,68 +1,6 @@
1#ifndef _LINUX_HDREG_H 1#ifndef _LINUX_HDREG_H
2#define _LINUX_HDREG_H 2#define _LINUX_HDREG_H
3 3
4#ifdef __KERNEL__
5#include <linux/ata.h>
6
7/*
8 * This file contains some defines for the AT-hd-controller.
9 * Various sources.
10 */
11
12/* ide.c has its own port definitions in "ide.h" */
13
14#define HD_IRQ 14
15
16/* Hd controller regs. Ref: IBM AT Bios-listing */
17#define HD_DATA 0x1f0 /* _CTL when writing */
18#define HD_ERROR 0x1f1 /* see err-bits */
19#define HD_NSECTOR 0x1f2 /* nr of sectors to read/write */
20#define HD_SECTOR 0x1f3 /* starting sector */
21#define HD_LCYL 0x1f4 /* starting cylinder */
22#define HD_HCYL 0x1f5 /* high byte of starting cyl */
23#define HD_CURRENT 0x1f6 /* 101dhhhh , d=drive, hhhh=head */
24#define HD_STATUS 0x1f7 /* see status-bits */
25#define HD_FEATURE HD_ERROR /* same io address, read=error, write=feature */
26#define HD_PRECOMP HD_FEATURE /* obsolete use of this port - predates IDE */
27#define HD_COMMAND HD_STATUS /* same io address, read=status, write=cmd */
28
29#define HD_CMD 0x3f6 /* used for resets */
30#define HD_ALTSTATUS 0x3f6 /* same as HD_STATUS but doesn't clear irq */
31
32/* remainder is shared between hd.c, ide.c, ide-cd.c, and the hdparm utility */
33
34/* Bits of HD_STATUS */
35#define ERR_STAT 0x01
36#define INDEX_STAT 0x02
37#define ECC_STAT 0x04 /* Corrected error */
38#define DRQ_STAT 0x08
39#define SEEK_STAT 0x10
40#define SRV_STAT 0x10
41#define WRERR_STAT 0x20
42#define READY_STAT 0x40
43#define BUSY_STAT 0x80
44
45/* Bits for HD_ERROR */
46#define MARK_ERR 0x01 /* Bad address mark */
47#define ILI_ERR 0x01 /* Illegal Length Indication (ATAPI) */
48#define TRK0_ERR 0x02 /* couldn't find track 0 */
49#define EOM_ERR 0x02 /* End Of Media (ATAPI) */
50#define ABRT_ERR 0x04 /* Command aborted */
51#define MCR_ERR 0x08 /* media change request */
52#define ID_ERR 0x10 /* ID field not found */
53#define MC_ERR 0x20 /* media changed */
54#define ECC_ERR 0x40 /* Uncorrectable ECC error */
55#define BBD_ERR 0x80 /* pre-EIDE meaning: block marked bad */
56#define ICRC_ERR 0x80 /* new meaning: CRC error during transfer */
57#define LFS_ERR 0xf0 /* Last Failed Sense (ATAPI) */
58
59/* Bits of HD_NSECTOR */
60#define CD 0x01
61#define IO 0x02
62#define REL 0x04
63#define TAG_MASK 0xf8
64#endif /* __KERNEL__ */
65
66#include <linux/types.h> 4#include <linux/types.h>
67 5
68/* 6/*
@@ -191,6 +129,7 @@ typedef struct hd_drive_hob_hdr {
191#define TASKFILE_INVALID 0x7fff 129#define TASKFILE_INVALID 0x7fff
192#endif 130#endif
193 131
132#ifndef __KERNEL__
194/* ATA/ATAPI Commands pre T13 Spec */ 133/* ATA/ATAPI Commands pre T13 Spec */
195#define WIN_NOP 0x00 134#define WIN_NOP 0x00
196/* 135/*
@@ -379,6 +318,7 @@ typedef struct hd_drive_hob_hdr {
379#define SECURITY_ERASE_UNIT 0xBD 318#define SECURITY_ERASE_UNIT 0xBD
380#define SECURITY_FREEZE_LOCK 0xBE 319#define SECURITY_FREEZE_LOCK 0xBE
381#define SECURITY_DISABLE_PASSWORD 0xBF 320#define SECURITY_DISABLE_PASSWORD 0xBF
321#endif /* __KERNEL__ */
382 322
383struct hd_geometry { 323struct hd_geometry {
384 unsigned char heads; 324 unsigned char heads;
@@ -448,6 +388,7 @@ enum {
448 388
449#define __NEW_HD_DRIVE_ID 389#define __NEW_HD_DRIVE_ID
450 390
391#ifndef __KERNEL__
451/* 392/*
452 * Structure returned by HDIO_GET_IDENTITY, as per ANSI NCITS ATA6 rev.1b spec. 393 * Structure returned by HDIO_GET_IDENTITY, as per ANSI NCITS ATA6 rev.1b spec.
453 * 394 *
@@ -699,6 +640,7 @@ struct hd_driveid {
699 * 7:0 Signature 640 * 7:0 Signature
700 */ 641 */
701}; 642};
643#endif /* __KERNEL__ */
702 644
703/* 645/*
704 * IDE "nice" flags. These are used on a per drive basis to determine 646 * IDE "nice" flags. These are used on a per drive basis to determine
diff --git a/include/linux/hid.h b/include/linux/hid.h
index fa8ee9cef7be..a72876e43589 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -270,6 +270,7 @@ struct hid_item {
270 270
271#define HID_QUIRK_INVERT 0x00000001 271#define HID_QUIRK_INVERT 0x00000001
272#define HID_QUIRK_NOTOUCH 0x00000002 272#define HID_QUIRK_NOTOUCH 0x00000002
273#define HID_QUIRK_IGNORE 0x00000004
273#define HID_QUIRK_NOGET 0x00000008 274#define HID_QUIRK_NOGET 0x00000008
274#define HID_QUIRK_BADPAD 0x00000020 275#define HID_QUIRK_BADPAD 0x00000020
275#define HID_QUIRK_MULTI_INPUT 0x00000040 276#define HID_QUIRK_MULTI_INPUT 0x00000040
@@ -603,12 +604,17 @@ struct hid_ll_driver {
603 int (*open)(struct hid_device *hdev); 604 int (*open)(struct hid_device *hdev);
604 void (*close)(struct hid_device *hdev); 605 void (*close)(struct hid_device *hdev);
605 606
607 int (*power)(struct hid_device *hdev, int level);
608
606 int (*hidinput_input_event) (struct input_dev *idev, unsigned int type, 609 int (*hidinput_input_event) (struct input_dev *idev, unsigned int type,
607 unsigned int code, int value); 610 unsigned int code, int value);
608 611
609 int (*parse)(struct hid_device *hdev); 612 int (*parse)(struct hid_device *hdev);
610}; 613};
611 614
615#define PM_HINT_FULLON 1<<5
616#define PM_HINT_NORMAL 1<<1
617
612/* Applications from HID Usage Tables 4/8/99 Version 1.1 */ 618/* Applications from HID Usage Tables 4/8/99 Version 1.1 */
613/* We ignore a few input applications that are not widely used */ 619/* We ignore a few input applications that are not widely used */
614#define IS_INPUT_APPLICATION(a) (((a >= 0x00010000) && (a <= 0x00010008)) || (a == 0x00010080) || (a == 0x000c0001) || (a == 0x000d0002)) 620#define IS_INPUT_APPLICATION(a) (((a >= 0x00010000) && (a <= 0x00010008)) || (a == 0x00010080) || (a == 0x000c0001) || (a == 0x000d0002))
@@ -641,6 +647,7 @@ int hidinput_find_field(struct hid_device *hid, unsigned int type, unsigned int
641void hid_output_report(struct hid_report *report, __u8 *data); 647void hid_output_report(struct hid_report *report, __u8 *data);
642struct hid_device *hid_allocate_device(void); 648struct hid_device *hid_allocate_device(void);
643int hid_parse_report(struct hid_device *hid, __u8 *start, unsigned size); 649int hid_parse_report(struct hid_device *hid, __u8 *start, unsigned size);
650int hid_check_keys_pressed(struct hid_device *hid);
644int hid_connect(struct hid_device *hid, unsigned int connect_mask); 651int hid_connect(struct hid_device *hid, unsigned int connect_mask);
645 652
646/** 653/**
@@ -791,21 +798,5 @@ dbg_hid(const char *fmt, ...)
791 __FILE__ , ## arg) 798 __FILE__ , ## arg)
792#endif /* HID_FF */ 799#endif /* HID_FF */
793 800
794#ifdef __KERNEL__
795#ifdef CONFIG_HID_COMPAT
796#define HID_COMPAT_LOAD_DRIVER(name) \
797/* prototype to avoid sparse warning */ \
798extern void hid_compat_##name(void); \
799void hid_compat_##name(void) { } \
800EXPORT_SYMBOL(hid_compat_##name)
801#else
802#define HID_COMPAT_LOAD_DRIVER(name)
803#endif /* HID_COMPAT */
804#define HID_COMPAT_CALL_DRIVER(name) do { \
805 extern void hid_compat_##name(void); \
806 hid_compat_##name(); \
807} while (0)
808#endif /* __KERNEL__ */
809
810#endif 801#endif
811 802
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index 7ff5c55f9b55..1fcb7126a01f 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -19,8 +19,21 @@ static inline void flush_kernel_dcache_page(struct page *page)
19} 19}
20#endif 20#endif
21 21
22#ifdef CONFIG_HIGHMEM 22#include <asm/kmap_types.h>
23
24#if defined(CONFIG_DEBUG_HIGHMEM) && defined(CONFIG_TRACE_IRQFLAGS_SUPPORT)
25
26void debug_kmap_atomic(enum km_type type);
27
28#else
23 29
30static inline void debug_kmap_atomic(enum km_type type)
31{
32}
33
34#endif
35
36#ifdef CONFIG_HIGHMEM
24#include <asm/highmem.h> 37#include <asm/highmem.h>
25 38
26/* declarations for linux/mm/highmem.c */ 39/* declarations for linux/mm/highmem.c */
@@ -44,8 +57,6 @@ static inline void *kmap(struct page *page)
44 57
45#define kunmap(page) do { (void) (page); } while (0) 58#define kunmap(page) do { (void) (page); } while (0)
46 59
47#include <asm/kmap_types.h>
48
49static inline void *kmap_atomic(struct page *page, enum km_type idx) 60static inline void *kmap_atomic(struct page *page, enum km_type idx)
50{ 61{
51 pagefault_disable(); 62 pagefault_disable();
@@ -187,16 +198,4 @@ static inline void copy_highpage(struct page *to, struct page *from)
187 kunmap_atomic(vto, KM_USER1); 198 kunmap_atomic(vto, KM_USER1);
188} 199}
189 200
190#if defined(CONFIG_DEBUG_HIGHMEM) && defined(CONFIG_TRACE_IRQFLAGS_SUPPORT)
191
192void debug_kmap_atomic(enum km_type type);
193
194#else
195
196static inline void debug_kmap_atomic(enum km_type type)
197{
198}
199
200#endif
201
202#endif /* _LINUX_HIGHMEM_H */ 201#endif /* _LINUX_HIGHMEM_H */
diff --git a/include/linux/i2c/twl4030.h b/include/linux/i2c/twl4030.h
index 8137f660a5cc..0dc80ef24975 100644
--- a/include/linux/i2c/twl4030.h
+++ b/include/linux/i2c/twl4030.h
@@ -218,6 +218,53 @@ int twl4030_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes);
218 218
219/*----------------------------------------------------------------------*/ 219/*----------------------------------------------------------------------*/
220 220
221/* Power bus message definitions */
222
223#define DEV_GRP_NULL 0x0
224#define DEV_GRP_P1 0x1
225#define DEV_GRP_P2 0x2
226#define DEV_GRP_P3 0x4
227
228#define RES_GRP_RES 0x0
229#define RES_GRP_PP 0x1
230#define RES_GRP_RC 0x2
231#define RES_GRP_PP_RC 0x3
232#define RES_GRP_PR 0x4
233#define RES_GRP_PP_PR 0x5
234#define RES_GRP_RC_PR 0x6
235#define RES_GRP_ALL 0x7
236
237#define RES_TYPE2_R0 0x0
238
239#define RES_TYPE_ALL 0x7
240
241#define RES_STATE_WRST 0xF
242#define RES_STATE_ACTIVE 0xE
243#define RES_STATE_SLEEP 0x8
244#define RES_STATE_OFF 0x0
245
246/*
247 * Power Bus Message Format ... these can be sent individually by Linux,
248 * but are usually part of downloaded scripts that are run when various
249 * power events are triggered.
250 *
251 * Broadcast Message (16 Bits):
252 * DEV_GRP[15:13] MT[12] RES_GRP[11:9] RES_TYPE2[8:7] RES_TYPE[6:4]
253 * RES_STATE[3:0]
254 *
255 * Singular Message (16 Bits):
256 * DEV_GRP[15:13] MT[12] RES_ID[11:4] RES_STATE[3:0]
257 */
258
259#define MSG_BROADCAST(devgrp, grp, type, type2, state) \
260 ( (devgrp) << 13 | 1 << 12 | (grp) << 9 | (type2) << 7 \
261 | (type) << 4 | (state))
262
263#define MSG_SINGULAR(devgrp, id, state) \
264 ((devgrp) << 13 | 0 << 12 | (id) << 4 | (state))
265
266/*----------------------------------------------------------------------*/
267
221struct twl4030_bci_platform_data { 268struct twl4030_bci_platform_data {
222 int *battery_tmp_tbl; 269 int *battery_tmp_tbl;
223 unsigned int tblsize; 270 unsigned int tblsize;
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index 1d6c71d96ede..77214ead1a36 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -123,7 +123,7 @@ static inline void dmar_writeq(void __iomem *addr, u64 val)
123#define ecap_eim_support(e) ((e >> 4) & 0x1) 123#define ecap_eim_support(e) ((e >> 4) & 0x1)
124#define ecap_ir_support(e) ((e >> 3) & 0x1) 124#define ecap_ir_support(e) ((e >> 3) & 0x1)
125#define ecap_max_handle_mask(e) ((e >> 20) & 0xf) 125#define ecap_max_handle_mask(e) ((e >> 20) & 0xf)
126 126#define ecap_sc_support(e) ((e >> 7) & 0x1) /* Snooping Control */
127 127
128/* IOTLB_REG */ 128/* IOTLB_REG */
129#define DMA_TLB_FLUSH_GRANU_OFFSET 60 129#define DMA_TLB_FLUSH_GRANU_OFFSET 60
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 8a7bfb1b6ca0..3af4ffd591b9 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -21,6 +21,7 @@
21 21
22#define IOMMU_READ (1) 22#define IOMMU_READ (1)
23#define IOMMU_WRITE (2) 23#define IOMMU_WRITE (2)
24#define IOMMU_CACHE (4) /* DMA cache coherency */
24 25
25struct device; 26struct device;
26 27
@@ -28,6 +29,8 @@ struct iommu_domain {
28 void *priv; 29 void *priv;
29}; 30};
30 31
32#define IOMMU_CAP_CACHE_COHERENCY 0x1
33
31struct iommu_ops { 34struct iommu_ops {
32 int (*domain_init)(struct iommu_domain *domain); 35 int (*domain_init)(struct iommu_domain *domain);
33 void (*domain_destroy)(struct iommu_domain *domain); 36 void (*domain_destroy)(struct iommu_domain *domain);
@@ -39,6 +42,8 @@ struct iommu_ops {
39 size_t size); 42 size_t size);
40 phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, 43 phys_addr_t (*iova_to_phys)(struct iommu_domain *domain,
41 unsigned long iova); 44 unsigned long iova);
45 int (*domain_has_cap)(struct iommu_domain *domain,
46 unsigned long cap);
42}; 47};
43 48
44#ifdef CONFIG_IOMMU_API 49#ifdef CONFIG_IOMMU_API
@@ -57,6 +62,8 @@ extern void iommu_unmap_range(struct iommu_domain *domain, unsigned long iova,
57 size_t size); 62 size_t size);
58extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, 63extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain,
59 unsigned long iova); 64 unsigned long iova);
65extern int iommu_domain_has_cap(struct iommu_domain *domain,
66 unsigned long cap);
60 67
61#else /* CONFIG_IOMMU_API */ 68#else /* CONFIG_IOMMU_API */
62 69
@@ -107,6 +114,12 @@ static inline phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain,
107 return 0; 114 return 0;
108} 115}
109 116
117static inline int domain_has_cap(struct iommu_domain *domain,
118 unsigned long cap)
119{
120 return 0;
121}
122
110#endif /* CONFIG_IOMMU_API */ 123#endif /* CONFIG_IOMMU_API */
111 124
112#endif /* __LINUX_IOMMU_H */ 125#endif /* __LINUX_IOMMU_H */
diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h
index 74bde13224c9..b02a3f1d46a0 100644
--- a/include/linux/irqflags.h
+++ b/include/linux/irqflags.h
@@ -24,8 +24,8 @@
24# define trace_softirqs_enabled(p) ((p)->softirqs_enabled) 24# define trace_softirqs_enabled(p) ((p)->softirqs_enabled)
25# define trace_hardirq_enter() do { current->hardirq_context++; } while (0) 25# define trace_hardirq_enter() do { current->hardirq_context++; } while (0)
26# define trace_hardirq_exit() do { current->hardirq_context--; } while (0) 26# define trace_hardirq_exit() do { current->hardirq_context--; } while (0)
27# define trace_softirq_enter() do { current->softirq_context++; } while (0) 27# define lockdep_softirq_enter() do { current->softirq_context++; } while (0)
28# define trace_softirq_exit() do { current->softirq_context--; } while (0) 28# define lockdep_softirq_exit() do { current->softirq_context--; } while (0)
29# define INIT_TRACE_IRQFLAGS .softirqs_enabled = 1, 29# define INIT_TRACE_IRQFLAGS .softirqs_enabled = 1,
30#else 30#else
31# define trace_hardirqs_on() do { } while (0) 31# define trace_hardirqs_on() do { } while (0)
@@ -38,8 +38,8 @@
38# define trace_softirqs_enabled(p) 0 38# define trace_softirqs_enabled(p) 0
39# define trace_hardirq_enter() do { } while (0) 39# define trace_hardirq_enter() do { } while (0)
40# define trace_hardirq_exit() do { } while (0) 40# define trace_hardirq_exit() do { } while (0)
41# define trace_softirq_enter() do { } while (0) 41# define lockdep_softirq_enter() do { } while (0)
42# define trace_softirq_exit() do { } while (0) 42# define lockdep_softirq_exit() do { } while (0)
43# define INIT_TRACE_IRQFLAGS 43# define INIT_TRACE_IRQFLAGS
44#endif 44#endif
45 45
diff --git a/include/linux/jbd.h b/include/linux/jbd.h
index 64246dce5663..53ae4399da2d 100644
--- a/include/linux/jbd.h
+++ b/include/linux/jbd.h
@@ -35,7 +35,7 @@
35#define journal_oom_retry 1 35#define journal_oom_retry 1
36 36
37/* 37/*
38 * Define JBD_PARANIOD_IOFAIL to cause a kernel BUG() if ext3 finds 38 * Define JBD_PARANOID_IOFAIL to cause a kernel BUG() if ext3 finds
39 * certain classes of error which can occur due to failed IOs. Under 39 * certain classes of error which can occur due to failed IOs. Under
40 * normal use we want ext3 to continue after such errors, because 40 * normal use we want ext3 to continue after such errors, because
41 * hardware _can_ fail, but for debugging purposes when running tests on 41 * hardware _can_ fail, but for debugging purposes when running tests on
@@ -552,6 +552,11 @@ struct transaction_s
552 */ 552 */
553 int t_handle_count; 553 int t_handle_count;
554 554
555 /*
556 * This transaction is being forced and some process is
557 * waiting for it to finish.
558 */
559 int t_synchronous_commit:1;
555}; 560};
556 561
557/** 562/**
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 76262d83656b..b450a2628855 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -379,7 +379,7 @@ enum {
379 ATA_HORKAGE_BRIDGE_OK = (1 << 10), /* no bridge limits */ 379 ATA_HORKAGE_BRIDGE_OK = (1 << 10), /* no bridge limits */
380 ATA_HORKAGE_ATAPI_MOD16_DMA = (1 << 11), /* use ATAPI DMA for commands 380 ATA_HORKAGE_ATAPI_MOD16_DMA = (1 << 11), /* use ATAPI DMA for commands
381 not multiple of 16 bytes */ 381 not multiple of 16 bytes */
382 ATA_HORKAGE_FIRMWARE_WARN = (1 << 12), /* firwmare update warning */ 382 ATA_HORKAGE_FIRMWARE_WARN = (1 << 12), /* firmware update warning */
383 ATA_HORKAGE_1_5_GBPS = (1 << 13), /* force 1.5 Gbps */ 383 ATA_HORKAGE_1_5_GBPS = (1 << 13), /* force 1.5 Gbps */
384 384
385 /* DMA mask for user DMA control: User visible values; DO NOT 385 /* DMA mask for user DMA control: User visible values; DO NOT
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 4e457256bd33..3e7615e9087e 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -192,5 +192,10 @@ static inline void mmc_signal_sdio_irq(struct mmc_host *host)
192 wake_up_process(host->sdio_irq_thread); 192 wake_up_process(host->sdio_irq_thread);
193} 193}
194 194
195struct regulator;
196
197int mmc_regulator_get_ocrmask(struct regulator *supply);
198int mmc_regulator_set_ocr(struct regulator *supply, unsigned short vdd_bit);
199
195#endif 200#endif
196 201
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index bde2557c2a9c..fdffb413b192 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -185,6 +185,9 @@ struct nfs_inode {
185 fmode_t delegation_state; 185 fmode_t delegation_state;
186 struct rw_semaphore rwsem; 186 struct rw_semaphore rwsem;
187#endif /* CONFIG_NFS_V4*/ 187#endif /* CONFIG_NFS_V4*/
188#ifdef CONFIG_NFS_FSCACHE
189 struct fscache_cookie *fscache;
190#endif
188 struct inode vfs_inode; 191 struct inode vfs_inode;
189}; 192};
190 193
@@ -207,6 +210,8 @@ struct nfs_inode {
207#define NFS_INO_ACL_LRU_SET (2) /* Inode is on the LRU list */ 210#define NFS_INO_ACL_LRU_SET (2) /* Inode is on the LRU list */
208#define NFS_INO_MOUNTPOINT (3) /* inode is remote mountpoint */ 211#define NFS_INO_MOUNTPOINT (3) /* inode is remote mountpoint */
209#define NFS_INO_FLUSHING (4) /* inode is flushing out data */ 212#define NFS_INO_FLUSHING (4) /* inode is flushing out data */
213#define NFS_INO_FSCACHE (5) /* inode can be cached by FS-Cache */
214#define NFS_INO_FSCACHE_LOCK (6) /* FS-Cache cookie management lock */
210 215
211static inline struct nfs_inode *NFS_I(const struct inode *inode) 216static inline struct nfs_inode *NFS_I(const struct inode *inode)
212{ 217{
@@ -260,6 +265,11 @@ static inline int NFS_STALE(const struct inode *inode)
260 return test_bit(NFS_INO_STALE, &NFS_I(inode)->flags); 265 return test_bit(NFS_INO_STALE, &NFS_I(inode)->flags);
261} 266}
262 267
268static inline int NFS_FSCACHE(const struct inode *inode)
269{
270 return test_bit(NFS_INO_FSCACHE, &NFS_I(inode)->flags);
271}
272
263static inline __u64 NFS_FILEID(const struct inode *inode) 273static inline __u64 NFS_FILEID(const struct inode *inode)
264{ 274{
265 return NFS_I(inode)->fileid; 275 return NFS_I(inode)->fileid;
@@ -506,6 +516,8 @@ extern int nfs_readpages(struct file *, struct address_space *,
506 struct list_head *, unsigned); 516 struct list_head *, unsigned);
507extern int nfs_readpage_result(struct rpc_task *, struct nfs_read_data *); 517extern int nfs_readpage_result(struct rpc_task *, struct nfs_read_data *);
508extern void nfs_readdata_release(void *data); 518extern void nfs_readdata_release(void *data);
519extern int nfs_readpage_async(struct nfs_open_context *, struct inode *,
520 struct page *);
509 521
510/* 522/*
511 * Allocate nfs_read_data structures 523 * Allocate nfs_read_data structures
@@ -583,6 +595,7 @@ extern void * nfs_root_data(void);
583#define NFSDBG_CALLBACK 0x0100 595#define NFSDBG_CALLBACK 0x0100
584#define NFSDBG_CLIENT 0x0200 596#define NFSDBG_CLIENT 0x0200
585#define NFSDBG_MOUNT 0x0400 597#define NFSDBG_MOUNT 0x0400
598#define NFSDBG_FSCACHE 0x0800
586#define NFSDBG_ALL 0xFFFF 599#define NFSDBG_ALL 0xFFFF
587 600
588#ifdef __KERNEL__ 601#ifdef __KERNEL__
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index 29b1e40dce99..6ad75948cbf7 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -64,6 +64,10 @@ struct nfs_client {
64 char cl_ipaddr[48]; 64 char cl_ipaddr[48];
65 unsigned char cl_id_uniquifier; 65 unsigned char cl_id_uniquifier;
66#endif 66#endif
67
68#ifdef CONFIG_NFS_FSCACHE
69 struct fscache_cookie *fscache; /* client index cache cookie */
70#endif
67}; 71};
68 72
69/* 73/*
@@ -96,12 +100,19 @@ struct nfs_server {
96 unsigned int acdirmin; 100 unsigned int acdirmin;
97 unsigned int acdirmax; 101 unsigned int acdirmax;
98 unsigned int namelen; 102 unsigned int namelen;
103 unsigned int options; /* extra options enabled by mount */
104#define NFS_OPTION_FSCACHE 0x00000001 /* - local caching enabled */
99 105
100 struct nfs_fsid fsid; 106 struct nfs_fsid fsid;
101 __u64 maxfilesize; /* maximum file size */ 107 __u64 maxfilesize; /* maximum file size */
102 unsigned long mount_time; /* when this fs was mounted */ 108 unsigned long mount_time; /* when this fs was mounted */
103 dev_t s_dev; /* superblock dev numbers */ 109 dev_t s_dev; /* superblock dev numbers */
104 110
111#ifdef CONFIG_NFS_FSCACHE
112 struct nfs_fscache_key *fscache_key; /* unique key for superblock */
113 struct fscache_cookie *fscache; /* superblock cookie */
114#endif
115
105#ifdef CONFIG_NFS_V4 116#ifdef CONFIG_NFS_V4
106 u32 attr_bitmask[2];/* V4 bitmask representing the set 117 u32 attr_bitmask[2];/* V4 bitmask representing the set
107 of attributes supported on this 118 of attributes supported on this
diff --git a/include/linux/nfs_iostat.h b/include/linux/nfs_iostat.h
index 1cb9a3fed2b3..68b10f5f8907 100644
--- a/include/linux/nfs_iostat.h
+++ b/include/linux/nfs_iostat.h
@@ -116,4 +116,16 @@ enum nfs_stat_eventcounters {
116 __NFSIOS_COUNTSMAX, 116 __NFSIOS_COUNTSMAX,
117}; 117};
118 118
119/*
120 * NFS local caching servicing counters
121 */
122enum nfs_stat_fscachecounters {
123 NFSIOS_FSCACHE_PAGES_READ_OK,
124 NFSIOS_FSCACHE_PAGES_READ_FAIL,
125 NFSIOS_FSCACHE_PAGES_WRITTEN_OK,
126 NFSIOS_FSCACHE_PAGES_WRITTEN_FAIL,
127 NFSIOS_FSCACHE_PAGES_UNCACHED,
128 __NFSIOS_FSCACHEMAX,
129};
130
119#endif /* _LINUX_NFS_IOSTAT */ 131#endif /* _LINUX_NFS_IOSTAT */
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 61df1779b2a5..62214c7d2d93 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -82,6 +82,7 @@ enum pageflags {
82 PG_arch_1, 82 PG_arch_1,
83 PG_reserved, 83 PG_reserved,
84 PG_private, /* If pagecache, has fs-private data */ 84 PG_private, /* If pagecache, has fs-private data */
85 PG_private_2, /* If pagecache, has fs aux data */
85 PG_writeback, /* Page is under writeback */ 86 PG_writeback, /* Page is under writeback */
86#ifdef CONFIG_PAGEFLAGS_EXTENDED 87#ifdef CONFIG_PAGEFLAGS_EXTENDED
87 PG_head, /* A head page */ 88 PG_head, /* A head page */
@@ -108,6 +109,12 @@ enum pageflags {
108 /* Filesystems */ 109 /* Filesystems */
109 PG_checked = PG_owner_priv_1, 110 PG_checked = PG_owner_priv_1,
110 111
112 /* Two page bits are conscripted by FS-Cache to maintain local caching
113 * state. These bits are set on pages belonging to the netfs's inodes
114 * when those inodes are being locally cached.
115 */
116 PG_fscache = PG_private_2, /* page backed by cache */
117
111 /* XEN */ 118 /* XEN */
112 PG_pinned = PG_owner_priv_1, 119 PG_pinned = PG_owner_priv_1,
113 PG_savepinned = PG_dirty, 120 PG_savepinned = PG_dirty,
@@ -182,7 +189,7 @@ static inline int TestClearPage##uname(struct page *page) { return 0; }
182 189
183struct page; /* forward declaration */ 190struct page; /* forward declaration */
184 191
185TESTPAGEFLAG(Locked, locked) 192TESTPAGEFLAG(Locked, locked) TESTSETFLAG(Locked, locked)
186PAGEFLAG(Error, error) 193PAGEFLAG(Error, error)
187PAGEFLAG(Referenced, referenced) TESTCLEARFLAG(Referenced, referenced) 194PAGEFLAG(Referenced, referenced) TESTCLEARFLAG(Referenced, referenced)
188PAGEFLAG(Dirty, dirty) TESTSCFLAG(Dirty, dirty) __CLEARPAGEFLAG(Dirty, dirty) 195PAGEFLAG(Dirty, dirty) TESTSCFLAG(Dirty, dirty) __CLEARPAGEFLAG(Dirty, dirty)
@@ -194,8 +201,6 @@ PAGEFLAG(Checked, checked) /* Used by some filesystems */
194PAGEFLAG(Pinned, pinned) TESTSCFLAG(Pinned, pinned) /* Xen */ 201PAGEFLAG(Pinned, pinned) TESTSCFLAG(Pinned, pinned) /* Xen */
195PAGEFLAG(SavePinned, savepinned); /* Xen */ 202PAGEFLAG(SavePinned, savepinned); /* Xen */
196PAGEFLAG(Reserved, reserved) __CLEARPAGEFLAG(Reserved, reserved) 203PAGEFLAG(Reserved, reserved) __CLEARPAGEFLAG(Reserved, reserved)
197PAGEFLAG(Private, private) __CLEARPAGEFLAG(Private, private)
198 __SETPAGEFLAG(Private, private)
199PAGEFLAG(SwapBacked, swapbacked) __CLEARPAGEFLAG(SwapBacked, swapbacked) 204PAGEFLAG(SwapBacked, swapbacked) __CLEARPAGEFLAG(SwapBacked, swapbacked)
200 205
201__PAGEFLAG(SlobPage, slob_page) 206__PAGEFLAG(SlobPage, slob_page)
@@ -205,6 +210,16 @@ __PAGEFLAG(SlubFrozen, slub_frozen)
205__PAGEFLAG(SlubDebug, slub_debug) 210__PAGEFLAG(SlubDebug, slub_debug)
206 211
207/* 212/*
213 * Private page markings that may be used by the filesystem that owns the page
214 * for its own purposes.
215 * - PG_private and PG_private_2 cause releasepage() and co to be invoked
216 */
217PAGEFLAG(Private, private) __SETPAGEFLAG(Private, private)
218 __CLEARPAGEFLAG(Private, private)
219PAGEFLAG(Private2, private_2) TESTSCFLAG(Private2, private_2)
220PAGEFLAG(OwnerPriv1, owner_priv_1) TESTCLEARFLAG(OwnerPriv1, owner_priv_1)
221
222/*
208 * Only test-and-set exist for PG_writeback. The unconditional operators are 223 * Only test-and-set exist for PG_writeback. The unconditional operators are
209 * risky: they bypass page accounting. 224 * risky: they bypass page accounting.
210 */ 225 */
@@ -384,9 +399,10 @@ static inline void __ClearPageTail(struct page *page)
384 * these flags set. It they are, there is a problem. 399 * these flags set. It they are, there is a problem.
385 */ 400 */
386#define PAGE_FLAGS_CHECK_AT_FREE \ 401#define PAGE_FLAGS_CHECK_AT_FREE \
387 (1 << PG_lru | 1 << PG_private | 1 << PG_locked | \ 402 (1 << PG_lru | 1 << PG_locked | \
388 1 << PG_buddy | 1 << PG_writeback | 1 << PG_reserved | \ 403 1 << PG_private | 1 << PG_private_2 | \
389 1 << PG_slab | 1 << PG_swapcache | 1 << PG_active | \ 404 1 << PG_buddy | 1 << PG_writeback | 1 << PG_reserved | \
405 1 << PG_slab | 1 << PG_swapcache | 1 << PG_active | \
390 __PG_UNEVICTABLE | __PG_MLOCKED) 406 __PG_UNEVICTABLE | __PG_MLOCKED)
391 407
392/* 408/*
@@ -397,4 +413,16 @@ static inline void __ClearPageTail(struct page *page)
397#define PAGE_FLAGS_CHECK_AT_PREP ((1 << NR_PAGEFLAGS) - 1) 413#define PAGE_FLAGS_CHECK_AT_PREP ((1 << NR_PAGEFLAGS) - 1)
398 414
399#endif /* !__GENERATING_BOUNDS_H */ 415#endif /* !__GENERATING_BOUNDS_H */
416
417/**
418 * page_has_private - Determine if page has private stuff
419 * @page: The page to be checked
420 *
421 * Determine if a page has private stuff, indicating that release routines
422 * should be invoked upon it.
423 */
424#define page_has_private(page) \
425 ((page)->flags & ((1 << PG_private) | \
426 (1 << PG_private_2)))
427
400#endif /* PAGE_FLAGS_H */ 428#endif /* PAGE_FLAGS_H */
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 076a7dc67c2b..34da5230faab 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -384,6 +384,11 @@ static inline void wait_on_page_writeback(struct page *page)
384extern void end_page_writeback(struct page *page); 384extern void end_page_writeback(struct page *page);
385 385
386/* 386/*
387 * Add an arbitrary waiter to a page's wait queue
388 */
389extern void add_page_wait_queue(struct page *page, wait_queue_t *waiter);
390
391/*
387 * Fault a userspace page into pagetables. Return non-zero on a fault. 392 * Fault a userspace page into pagetables. Return non-zero on a fault.
388 * 393 *
389 * This assumes that two userspace pages are always sufficient. That's 394 * This assumes that two userspace pages are always sufficient. That's
diff --git a/include/linux/pwm.h b/include/linux/pwm.h
index 3945f803d514..7c775751392c 100644
--- a/include/linux/pwm.h
+++ b/include/linux/pwm.h
@@ -28,4 +28,4 @@ int pwm_enable(struct pwm_device *pwm);
28 */ 28 */
29void pwm_disable(struct pwm_device *pwm); 29void pwm_disable(struct pwm_device *pwm);
30 30
31#endif /* __ASM_ARCH_PWM_H */ 31#endif /* __LINUX_PWM_H */
diff --git a/include/linux/raid/bitmap.h b/include/linux/raid/bitmap.h
deleted file mode 100644
index e98900671ca9..000000000000
--- a/include/linux/raid/bitmap.h
+++ /dev/null
@@ -1,288 +0,0 @@
1/*
2 * bitmap.h: Copyright (C) Peter T. Breuer (ptb@ot.uc3m.es) 2003
3 *
4 * additions: Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.
5 */
6#ifndef BITMAP_H
7#define BITMAP_H 1
8
9#define BITMAP_MAJOR_LO 3
10/* version 4 insists the bitmap is in little-endian order
11 * with version 3, it is host-endian which is non-portable
12 */
13#define BITMAP_MAJOR_HI 4
14#define BITMAP_MAJOR_HOSTENDIAN 3
15
16#define BITMAP_MINOR 39
17
18/*
19 * in-memory bitmap:
20 *
21 * Use 16 bit block counters to track pending writes to each "chunk".
22 * The 2 high order bits are special-purpose, the first is a flag indicating
23 * whether a resync is needed. The second is a flag indicating whether a
24 * resync is active.
25 * This means that the counter is actually 14 bits:
26 *
27 * +--------+--------+------------------------------------------------+
28 * | resync | resync | counter |
29 * | needed | active | |
30 * | (0-1) | (0-1) | (0-16383) |
31 * +--------+--------+------------------------------------------------+
32 *
33 * The "resync needed" bit is set when:
34 * a '1' bit is read from storage at startup.
35 * a write request fails on some drives
36 * a resync is aborted on a chunk with 'resync active' set
37 * It is cleared (and resync-active set) when a resync starts across all drives
38 * of the chunk.
39 *
40 *
41 * The "resync active" bit is set when:
42 * a resync is started on all drives, and resync_needed is set.
43 * resync_needed will be cleared (as long as resync_active wasn't already set).
44 * It is cleared when a resync completes.
45 *
46 * The counter counts pending write requests, plus the on-disk bit.
47 * When the counter is '1' and the resync bits are clear, the on-disk
48 * bit can be cleared aswell, thus setting the counter to 0.
49 * When we set a bit, or in the counter (to start a write), if the fields is
50 * 0, we first set the disk bit and set the counter to 1.
51 *
52 * If the counter is 0, the on-disk bit is clear and the stipe is clean
53 * Anything that dirties the stipe pushes the counter to 2 (at least)
54 * and sets the on-disk bit (lazily).
55 * If a periodic sweep find the counter at 2, it is decremented to 1.
56 * If the sweep find the counter at 1, the on-disk bit is cleared and the
57 * counter goes to zero.
58 *
59 * Also, we'll hijack the "map" pointer itself and use it as two 16 bit block
60 * counters as a fallback when "page" memory cannot be allocated:
61 *
62 * Normal case (page memory allocated):
63 *
64 * page pointer (32-bit)
65 *
66 * [ ] ------+
67 * |
68 * +-------> [ ][ ]..[ ] (4096 byte page == 2048 counters)
69 * c1 c2 c2048
70 *
71 * Hijacked case (page memory allocation failed):
72 *
73 * hijacked page pointer (32-bit)
74 *
75 * [ ][ ] (no page memory allocated)
76 * counter #1 (16-bit) counter #2 (16-bit)
77 *
78 */
79
80#ifdef __KERNEL__
81
82#define PAGE_BITS (PAGE_SIZE << 3)
83#define PAGE_BIT_SHIFT (PAGE_SHIFT + 3)
84
85typedef __u16 bitmap_counter_t;
86#define COUNTER_BITS 16
87#define COUNTER_BIT_SHIFT 4
88#define COUNTER_BYTE_RATIO (COUNTER_BITS / 8)
89#define COUNTER_BYTE_SHIFT (COUNTER_BIT_SHIFT - 3)
90
91#define NEEDED_MASK ((bitmap_counter_t) (1 << (COUNTER_BITS - 1)))
92#define RESYNC_MASK ((bitmap_counter_t) (1 << (COUNTER_BITS - 2)))
93#define COUNTER_MAX ((bitmap_counter_t) RESYNC_MASK - 1)
94#define NEEDED(x) (((bitmap_counter_t) x) & NEEDED_MASK)
95#define RESYNC(x) (((bitmap_counter_t) x) & RESYNC_MASK)
96#define COUNTER(x) (((bitmap_counter_t) x) & COUNTER_MAX)
97
98/* how many counters per page? */
99#define PAGE_COUNTER_RATIO (PAGE_BITS / COUNTER_BITS)
100/* same, except a shift value for more efficient bitops */
101#define PAGE_COUNTER_SHIFT (PAGE_BIT_SHIFT - COUNTER_BIT_SHIFT)
102/* same, except a mask value for more efficient bitops */
103#define PAGE_COUNTER_MASK (PAGE_COUNTER_RATIO - 1)
104
105#define BITMAP_BLOCK_SIZE 512
106#define BITMAP_BLOCK_SHIFT 9
107
108/* how many blocks per chunk? (this is variable) */
109#define CHUNK_BLOCK_RATIO(bitmap) ((bitmap)->chunksize >> BITMAP_BLOCK_SHIFT)
110#define CHUNK_BLOCK_SHIFT(bitmap) ((bitmap)->chunkshift - BITMAP_BLOCK_SHIFT)
111#define CHUNK_BLOCK_MASK(bitmap) (CHUNK_BLOCK_RATIO(bitmap) - 1)
112
113/* when hijacked, the counters and bits represent even larger "chunks" */
114/* there will be 1024 chunks represented by each counter in the page pointers */
115#define PAGEPTR_BLOCK_RATIO(bitmap) \
116 (CHUNK_BLOCK_RATIO(bitmap) << PAGE_COUNTER_SHIFT >> 1)
117#define PAGEPTR_BLOCK_SHIFT(bitmap) \
118 (CHUNK_BLOCK_SHIFT(bitmap) + PAGE_COUNTER_SHIFT - 1)
119#define PAGEPTR_BLOCK_MASK(bitmap) (PAGEPTR_BLOCK_RATIO(bitmap) - 1)
120
121/*
122 * on-disk bitmap:
123 *
124 * Use one bit per "chunk" (block set). We do the disk I/O on the bitmap
125 * file a page at a time. There's a superblock at the start of the file.
126 */
127
128/* map chunks (bits) to file pages - offset by the size of the superblock */
129#define CHUNK_BIT_OFFSET(chunk) ((chunk) + (sizeof(bitmap_super_t) << 3))
130
131#endif
132
133/*
134 * bitmap structures:
135 */
136
137#define BITMAP_MAGIC 0x6d746962
138
139/* use these for bitmap->flags and bitmap->sb->state bit-fields */
140enum bitmap_state {
141 BITMAP_STALE = 0x002, /* the bitmap file is out of date or had -EIO */
142 BITMAP_WRITE_ERROR = 0x004, /* A write error has occurred */
143 BITMAP_HOSTENDIAN = 0x8000,
144};
145
146/* the superblock at the front of the bitmap file -- little endian */
147typedef struct bitmap_super_s {
148 __le32 magic; /* 0 BITMAP_MAGIC */
149 __le32 version; /* 4 the bitmap major for now, could change... */
150 __u8 uuid[16]; /* 8 128 bit uuid - must match md device uuid */
151 __le64 events; /* 24 event counter for the bitmap (1)*/
152 __le64 events_cleared;/*32 event counter when last bit cleared (2) */
153 __le64 sync_size; /* 40 the size of the md device's sync range(3) */
154 __le32 state; /* 48 bitmap state information */
155 __le32 chunksize; /* 52 the bitmap chunk size in bytes */
156 __le32 daemon_sleep; /* 56 seconds between disk flushes */
157 __le32 write_behind; /* 60 number of outstanding write-behind writes */
158
159 __u8 pad[256 - 64]; /* set to zero */
160} bitmap_super_t;
161
162/* notes:
163 * (1) This event counter is updated before the eventcounter in the md superblock
164 * When a bitmap is loaded, it is only accepted if this event counter is equal
165 * to, or one greater than, the event counter in the superblock.
166 * (2) This event counter is updated when the other one is *if*and*only*if* the
167 * array is not degraded. As bits are not cleared when the array is degraded,
168 * this represents the last time that any bits were cleared.
169 * If a device is being added that has an event count with this value or
170 * higher, it is accepted as conforming to the bitmap.
171 * (3)This is the number of sectors represented by the bitmap, and is the range that
172 * resync happens across. For raid1 and raid5/6 it is the size of individual
173 * devices. For raid10 it is the size of the array.
174 */
175
176#ifdef __KERNEL__
177
178/* the in-memory bitmap is represented by bitmap_pages */
179struct bitmap_page {
180 /*
181 * map points to the actual memory page
182 */
183 char *map;
184 /*
185 * in emergencies (when map cannot be alloced), hijack the map
186 * pointer and use it as two counters itself
187 */
188 unsigned int hijacked:1;
189 /*
190 * count of dirty bits on the page
191 */
192 unsigned int count:31;
193};
194
195/* keep track of bitmap file pages that have pending writes on them */
196struct page_list {
197 struct list_head list;
198 struct page *page;
199};
200
201/* the main bitmap structure - one per mddev */
202struct bitmap {
203 struct bitmap_page *bp;
204 unsigned long pages; /* total number of pages in the bitmap */
205 unsigned long missing_pages; /* number of pages not yet allocated */
206
207 mddev_t *mddev; /* the md device that the bitmap is for */
208
209 int counter_bits; /* how many bits per block counter */
210
211 /* bitmap chunksize -- how much data does each bit represent? */
212 unsigned long chunksize;
213 unsigned long chunkshift; /* chunksize = 2^chunkshift (for bitops) */
214 unsigned long chunks; /* total number of data chunks for the array */
215
216 /* We hold a count on the chunk currently being synced, and drop
217 * it when the last block is started. If the resync is aborted
218 * midway, we need to be able to drop that count, so we remember
219 * the counted chunk..
220 */
221 unsigned long syncchunk;
222
223 __u64 events_cleared;
224 int need_sync;
225
226 /* bitmap spinlock */
227 spinlock_t lock;
228
229 long offset; /* offset from superblock if file is NULL */
230 struct file *file; /* backing disk file */
231 struct page *sb_page; /* cached copy of the bitmap file superblock */
232 struct page **filemap; /* list of cache pages for the file */
233 unsigned long *filemap_attr; /* attributes associated w/ filemap pages */
234 unsigned long file_pages; /* number of pages in the file */
235 int last_page_size; /* bytes in the last page */
236
237 unsigned long flags;
238
239 int allclean;
240
241 unsigned long max_write_behind; /* write-behind mode */
242 atomic_t behind_writes;
243
244 /*
245 * the bitmap daemon - periodically wakes up and sweeps the bitmap
246 * file, cleaning up bits and flushing out pages to disk as necessary
247 */
248 unsigned long daemon_lastrun; /* jiffies of last run */
249 unsigned long daemon_sleep; /* how many seconds between updates? */
250 unsigned long last_end_sync; /* when we lasted called end_sync to
251 * update bitmap with resync progress */
252
253 atomic_t pending_writes; /* pending writes to the bitmap file */
254 wait_queue_head_t write_wait;
255 wait_queue_head_t overflow_wait;
256
257};
258
259/* the bitmap API */
260
261/* these are used only by md/bitmap */
262int bitmap_create(mddev_t *mddev);
263void bitmap_flush(mddev_t *mddev);
264void bitmap_destroy(mddev_t *mddev);
265
266void bitmap_print_sb(struct bitmap *bitmap);
267void bitmap_update_sb(struct bitmap *bitmap);
268
269int bitmap_setallbits(struct bitmap *bitmap);
270void bitmap_write_all(struct bitmap *bitmap);
271
272void bitmap_dirty_bits(struct bitmap *bitmap, unsigned long s, unsigned long e);
273
274/* these are exported */
275int bitmap_startwrite(struct bitmap *bitmap, sector_t offset,
276 unsigned long sectors, int behind);
277void bitmap_endwrite(struct bitmap *bitmap, sector_t offset,
278 unsigned long sectors, int success, int behind);
279int bitmap_start_sync(struct bitmap *bitmap, sector_t offset, int *blocks, int degraded);
280void bitmap_end_sync(struct bitmap *bitmap, sector_t offset, int *blocks, int aborted);
281void bitmap_close_sync(struct bitmap *bitmap);
282void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector);
283
284void bitmap_unplug(struct bitmap *bitmap);
285void bitmap_daemon_work(struct bitmap *bitmap);
286#endif
287
288#endif
diff --git a/include/linux/raid/linear.h b/include/linux/raid/linear.h
deleted file mode 100644
index f38b9c586afb..000000000000
--- a/include/linux/raid/linear.h
+++ /dev/null
@@ -1,31 +0,0 @@
1#ifndef _LINEAR_H
2#define _LINEAR_H
3
4#include <linux/raid/md.h>
5
6struct dev_info {
7 mdk_rdev_t *rdev;
8 sector_t num_sectors;
9 sector_t start_sector;
10};
11
12typedef struct dev_info dev_info_t;
13
14struct linear_private_data
15{
16 struct linear_private_data *prev; /* earlier version */
17 dev_info_t **hash_table;
18 sector_t spacing;
19 sector_t array_sectors;
20 int sector_shift; /* shift before dividing
21 * by spacing
22 */
23 dev_info_t disks[0];
24};
25
26
27typedef struct linear_private_data linear_conf_t;
28
29#define mddev_to_conf(mddev) ((linear_conf_t *) mddev->private)
30
31#endif
diff --git a/include/linux/raid/md.h b/include/linux/raid/md.h
deleted file mode 100644
index 82bea14cae1a..000000000000
--- a/include/linux/raid/md.h
+++ /dev/null
@@ -1,81 +0,0 @@
1/*
2 md.h : Multiple Devices driver for Linux
3 Copyright (C) 1996-98 Ingo Molnar, Gadi Oxman
4 Copyright (C) 1994-96 Marc ZYNGIER
5 <zyngier@ufr-info-p7.ibp.fr> or
6 <maz@gloups.fdn.fr>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2, or (at your option)
11 any later version.
12
13 You should have received a copy of the GNU General Public License
14 (for example /usr/src/linux/COPYING); if not, write to the Free
15 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
16*/
17
18#ifndef _MD_H
19#define _MD_H
20
21#include <linux/blkdev.h>
22#include <linux/seq_file.h>
23
24/*
25 * 'md_p.h' holds the 'physical' layout of RAID devices
26 * 'md_u.h' holds the user <=> kernel API
27 *
28 * 'md_k.h' holds kernel internal definitions
29 */
30
31#include <linux/raid/md_p.h>
32#include <linux/raid/md_u.h>
33#include <linux/raid/md_k.h>
34
35#ifdef CONFIG_MD
36
37/*
38 * Different major versions are not compatible.
39 * Different minor versions are only downward compatible.
40 * Different patchlevel versions are downward and upward compatible.
41 */
42#define MD_MAJOR_VERSION 0
43#define MD_MINOR_VERSION 90
44/*
45 * MD_PATCHLEVEL_VERSION indicates kernel functionality.
46 * >=1 means different superblock formats are selectable using SET_ARRAY_INFO
47 * and major_version/minor_version accordingly
48 * >=2 means that Internal bitmaps are supported by setting MD_SB_BITMAP_PRESENT
49 * in the super status byte
50 * >=3 means that bitmap superblock version 4 is supported, which uses
51 * little-ending representation rather than host-endian
52 */
53#define MD_PATCHLEVEL_VERSION 3
54
55extern int mdp_major;
56
57extern int register_md_personality(struct mdk_personality *p);
58extern int unregister_md_personality(struct mdk_personality *p);
59extern mdk_thread_t * md_register_thread(void (*run) (mddev_t *mddev),
60 mddev_t *mddev, const char *name);
61extern void md_unregister_thread(mdk_thread_t *thread);
62extern void md_wakeup_thread(mdk_thread_t *thread);
63extern void md_check_recovery(mddev_t *mddev);
64extern void md_write_start(mddev_t *mddev, struct bio *bi);
65extern void md_write_end(mddev_t *mddev);
66extern void md_done_sync(mddev_t *mddev, int blocks, int ok);
67extern void md_error(mddev_t *mddev, mdk_rdev_t *rdev);
68
69extern void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
70 sector_t sector, int size, struct page *page);
71extern void md_super_wait(mddev_t *mddev);
72extern int sync_page_io(struct block_device *bdev, sector_t sector, int size,
73 struct page *page, int rw);
74extern void md_do_sync(mddev_t *mddev);
75extern void md_new_event(mddev_t *mddev);
76extern int md_allow_write(mddev_t *mddev);
77extern void md_wait_for_blocked_rdev(mdk_rdev_t *rdev, mddev_t *mddev);
78
79#endif /* CONFIG_MD */
80#endif
81
diff --git a/include/linux/raid/md_k.h b/include/linux/raid/md_k.h
deleted file mode 100644
index 9743e4dbc918..000000000000
--- a/include/linux/raid/md_k.h
+++ /dev/null
@@ -1,402 +0,0 @@
1/*
2 md_k.h : kernel internal structure of the Linux MD driver
3 Copyright (C) 1996-98 Ingo Molnar, Gadi Oxman
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2, or (at your option)
8 any later version.
9
10 You should have received a copy of the GNU General Public License
11 (for example /usr/src/linux/COPYING); if not, write to the Free
12 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
13*/
14
15#ifndef _MD_K_H
16#define _MD_K_H
17
18/* and dm-bio-list.h is not under include/linux because.... ??? */
19#include "../../../drivers/md/dm-bio-list.h"
20
21#ifdef CONFIG_BLOCK
22
23#define LEVEL_MULTIPATH (-4)
24#define LEVEL_LINEAR (-1)
25#define LEVEL_FAULTY (-5)
26
27/* we need a value for 'no level specified' and 0
28 * means 'raid0', so we need something else. This is
29 * for internal use only
30 */
31#define LEVEL_NONE (-1000000)
32
33#define MaxSector (~(sector_t)0)
34
35typedef struct mddev_s mddev_t;
36typedef struct mdk_rdev_s mdk_rdev_t;
37
38/*
39 * options passed in raidrun:
40 */
41
42/* Currently this must fit in an 'int' */
43#define MAX_CHUNK_SIZE (1<<30)
44
45/*
46 * MD's 'extended' device
47 */
48struct mdk_rdev_s
49{
50 struct list_head same_set; /* RAID devices within the same set */
51
52 sector_t size; /* Device size (in blocks) */
53 mddev_t *mddev; /* RAID array if running */
54 long last_events; /* IO event timestamp */
55
56 struct block_device *bdev; /* block device handle */
57
58 struct page *sb_page;
59 int sb_loaded;
60 __u64 sb_events;
61 sector_t data_offset; /* start of data in array */
62 sector_t sb_start; /* offset of the super block (in 512byte sectors) */
63 int sb_size; /* bytes in the superblock */
64 int preferred_minor; /* autorun support */
65
66 struct kobject kobj;
67
68 /* A device can be in one of three states based on two flags:
69 * Not working: faulty==1 in_sync==0
70 * Fully working: faulty==0 in_sync==1
71 * Working, but not
72 * in sync with array
73 * faulty==0 in_sync==0
74 *
75 * It can never have faulty==1, in_sync==1
76 * This reduces the burden of testing multiple flags in many cases
77 */
78
79 unsigned long flags;
80#define Faulty 1 /* device is known to have a fault */
81#define In_sync 2 /* device is in_sync with rest of array */
82#define WriteMostly 4 /* Avoid reading if at all possible */
83#define BarriersNotsupp 5 /* BIO_RW_BARRIER is not supported */
84#define AllReserved 6 /* If whole device is reserved for
85 * one array */
86#define AutoDetected 7 /* added by auto-detect */
87#define Blocked 8 /* An error occured on an externally
88 * managed array, don't allow writes
89 * until it is cleared */
90#define StateChanged 9 /* Faulty or Blocked has changed during
91 * interrupt, so it needs to be
92 * notified by the thread */
93 wait_queue_head_t blocked_wait;
94
95 int desc_nr; /* descriptor index in the superblock */
96 int raid_disk; /* role of device in array */
97 int saved_raid_disk; /* role that device used to have in the
98 * array and could again if we did a partial
99 * resync from the bitmap
100 */
101 sector_t recovery_offset;/* If this device has been partially
102 * recovered, this is where we were
103 * up to.
104 */
105
106 atomic_t nr_pending; /* number of pending requests.
107 * only maintained for arrays that
108 * support hot removal
109 */
110 atomic_t read_errors; /* number of consecutive read errors that
111 * we have tried to ignore.
112 */
113 atomic_t corrected_errors; /* number of corrected read errors,
114 * for reporting to userspace and storing
115 * in superblock.
116 */
117 struct work_struct del_work; /* used for delayed sysfs removal */
118
119 struct sysfs_dirent *sysfs_state; /* handle for 'state'
120 * sysfs entry */
121};
122
123struct mddev_s
124{
125 void *private;
126 struct mdk_personality *pers;
127 dev_t unit;
128 int md_minor;
129 struct list_head disks;
130 unsigned long flags;
131#define MD_CHANGE_DEVS 0 /* Some device status has changed */
132#define MD_CHANGE_CLEAN 1 /* transition to or from 'clean' */
133#define MD_CHANGE_PENDING 2 /* superblock update in progress */
134
135 int ro;
136
137 struct gendisk *gendisk;
138
139 struct kobject kobj;
140 int hold_active;
141#define UNTIL_IOCTL 1
142#define UNTIL_STOP 2
143
144 /* Superblock information */
145 int major_version,
146 minor_version,
147 patch_version;
148 int persistent;
149 int external; /* metadata is
150 * managed externally */
151 char metadata_type[17]; /* externally set*/
152 int chunk_size;
153 time_t ctime, utime;
154 int level, layout;
155 char clevel[16];
156 int raid_disks;
157 int max_disks;
158 sector_t size; /* used size of component devices */
159 sector_t array_sectors; /* exported array size */
160 __u64 events;
161
162 char uuid[16];
163
164 /* If the array is being reshaped, we need to record the
165 * new shape and an indication of where we are up to.
166 * This is written to the superblock.
167 * If reshape_position is MaxSector, then no reshape is happening (yet).
168 */
169 sector_t reshape_position;
170 int delta_disks, new_level, new_layout, new_chunk;
171
172 struct mdk_thread_s *thread; /* management thread */
173 struct mdk_thread_s *sync_thread; /* doing resync or reconstruct */
174 sector_t curr_resync; /* last block scheduled */
175 unsigned long resync_mark; /* a recent timestamp */
176 sector_t resync_mark_cnt;/* blocks written at resync_mark */
177 sector_t curr_mark_cnt; /* blocks scheduled now */
178
179 sector_t resync_max_sectors; /* may be set by personality */
180
181 sector_t resync_mismatches; /* count of sectors where
182 * parity/replica mismatch found
183 */
184
185 /* allow user-space to request suspension of IO to regions of the array */
186 sector_t suspend_lo;
187 sector_t suspend_hi;
188 /* if zero, use the system-wide default */
189 int sync_speed_min;
190 int sync_speed_max;
191
192 /* resync even though the same disks are shared among md-devices */
193 int parallel_resync;
194
195 int ok_start_degraded;
196 /* recovery/resync flags
197 * NEEDED: we might need to start a resync/recover
198 * RUNNING: a thread is running, or about to be started
199 * SYNC: actually doing a resync, not a recovery
200 * RECOVER: doing recovery, or need to try it.
201 * INTR: resync needs to be aborted for some reason
202 * DONE: thread is done and is waiting to be reaped
203 * REQUEST: user-space has requested a sync (used with SYNC)
204 * CHECK: user-space request for for check-only, no repair
205 * RESHAPE: A reshape is happening
206 *
207 * If neither SYNC or RESHAPE are set, then it is a recovery.
208 */
209#define MD_RECOVERY_RUNNING 0
210#define MD_RECOVERY_SYNC 1
211#define MD_RECOVERY_RECOVER 2
212#define MD_RECOVERY_INTR 3
213#define MD_RECOVERY_DONE 4
214#define MD_RECOVERY_NEEDED 5
215#define MD_RECOVERY_REQUESTED 6
216#define MD_RECOVERY_CHECK 7
217#define MD_RECOVERY_RESHAPE 8
218#define MD_RECOVERY_FROZEN 9
219
220 unsigned long recovery;
221 int recovery_disabled; /* if we detect that recovery
222 * will always fail, set this
223 * so we don't loop trying */
224
225 int in_sync; /* know to not need resync */
226 struct mutex reconfig_mutex;
227 atomic_t active; /* general refcount */
228 atomic_t openers; /* number of active opens */
229
230 int changed; /* true if we might need to reread partition info */
231 int degraded; /* whether md should consider
232 * adding a spare
233 */
234 int barriers_work; /* initialised to true, cleared as soon
235 * as a barrier request to slave
236 * fails. Only supported
237 */
238 struct bio *biolist; /* bios that need to be retried
239 * because BIO_RW_BARRIER is not supported
240 */
241
242 atomic_t recovery_active; /* blocks scheduled, but not written */
243 wait_queue_head_t recovery_wait;
244 sector_t recovery_cp;
245 sector_t resync_min; /* user requested sync
246 * starts here */
247 sector_t resync_max; /* resync should pause
248 * when it gets here */
249
250 struct sysfs_dirent *sysfs_state; /* handle for 'array_state'
251 * file in sysfs.
252 */
253 struct sysfs_dirent *sysfs_action; /* handle for 'sync_action' */
254
255 struct work_struct del_work; /* used for delayed sysfs removal */
256
257 spinlock_t write_lock;
258 wait_queue_head_t sb_wait; /* for waiting on superblock updates */
259 atomic_t pending_writes; /* number of active superblock writes */
260
261 unsigned int safemode; /* if set, update "clean" superblock
262 * when no writes pending.
263 */
264 unsigned int safemode_delay;
265 struct timer_list safemode_timer;
266 atomic_t writes_pending;
267 struct request_queue *queue; /* for plugging ... */
268
269 atomic_t write_behind; /* outstanding async IO */
270 unsigned int max_write_behind; /* 0 = sync */
271
272 struct bitmap *bitmap; /* the bitmap for the device */
273 struct file *bitmap_file; /* the bitmap file */
274 long bitmap_offset; /* offset from superblock of
275 * start of bitmap. May be
276 * negative, but not '0'
277 */
278 long default_bitmap_offset; /* this is the offset to use when
279 * hot-adding a bitmap. It should
280 * eventually be settable by sysfs.
281 */
282
283 struct list_head all_mddevs;
284};
285
286
287static inline void rdev_dec_pending(mdk_rdev_t *rdev, mddev_t *mddev)
288{
289 int faulty = test_bit(Faulty, &rdev->flags);
290 if (atomic_dec_and_test(&rdev->nr_pending) && faulty)
291 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
292}
293
294static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
295{
296 atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
297}
298
299struct mdk_personality
300{
301 char *name;
302 int level;
303 struct list_head list;
304 struct module *owner;
305 int (*make_request)(struct request_queue *q, struct bio *bio);
306 int (*run)(mddev_t *mddev);
307 int (*stop)(mddev_t *mddev);
308 void (*status)(struct seq_file *seq, mddev_t *mddev);
309 /* error_handler must set ->faulty and clear ->in_sync
310 * if appropriate, and should abort recovery if needed
311 */
312 void (*error_handler)(mddev_t *mddev, mdk_rdev_t *rdev);
313 int (*hot_add_disk) (mddev_t *mddev, mdk_rdev_t *rdev);
314 int (*hot_remove_disk) (mddev_t *mddev, int number);
315 int (*spare_active) (mddev_t *mddev);
316 sector_t (*sync_request)(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster);
317 int (*resize) (mddev_t *mddev, sector_t sectors);
318 int (*check_reshape) (mddev_t *mddev);
319 int (*start_reshape) (mddev_t *mddev);
320 int (*reconfig) (mddev_t *mddev, int layout, int chunk_size);
321 /* quiesce moves between quiescence states
322 * 0 - fully active
323 * 1 - no new requests allowed
324 * others - reserved
325 */
326 void (*quiesce) (mddev_t *mddev, int state);
327};
328
329
330struct md_sysfs_entry {
331 struct attribute attr;
332 ssize_t (*show)(mddev_t *, char *);
333 ssize_t (*store)(mddev_t *, const char *, size_t);
334};
335
336
337static inline char * mdname (mddev_t * mddev)
338{
339 return mddev->gendisk ? mddev->gendisk->disk_name : "mdX";
340}
341
342/*
343 * iterates through some rdev ringlist. It's safe to remove the
344 * current 'rdev'. Dont touch 'tmp' though.
345 */
346#define rdev_for_each_list(rdev, tmp, head) \
347 list_for_each_entry_safe(rdev, tmp, head, same_set)
348
349/*
350 * iterates through the 'same array disks' ringlist
351 */
352#define rdev_for_each(rdev, tmp, mddev) \
353 list_for_each_entry_safe(rdev, tmp, &((mddev)->disks), same_set)
354
355#define rdev_for_each_rcu(rdev, mddev) \
356 list_for_each_entry_rcu(rdev, &((mddev)->disks), same_set)
357
358typedef struct mdk_thread_s {
359 void (*run) (mddev_t *mddev);
360 mddev_t *mddev;
361 wait_queue_head_t wqueue;
362 unsigned long flags;
363 struct task_struct *tsk;
364 unsigned long timeout;
365} mdk_thread_t;
366
367#define THREAD_WAKEUP 0
368
369#define __wait_event_lock_irq(wq, condition, lock, cmd) \
370do { \
371 wait_queue_t __wait; \
372 init_waitqueue_entry(&__wait, current); \
373 \
374 add_wait_queue(&wq, &__wait); \
375 for (;;) { \
376 set_current_state(TASK_UNINTERRUPTIBLE); \
377 if (condition) \
378 break; \
379 spin_unlock_irq(&lock); \
380 cmd; \
381 schedule(); \
382 spin_lock_irq(&lock); \
383 } \
384 current->state = TASK_RUNNING; \
385 remove_wait_queue(&wq, &__wait); \
386} while (0)
387
388#define wait_event_lock_irq(wq, condition, lock, cmd) \
389do { \
390 if (condition) \
391 break; \
392 __wait_event_lock_irq(wq, condition, lock, cmd); \
393} while (0)
394
395static inline void safe_put_page(struct page *p)
396{
397 if (p) put_page(p);
398}
399
400#endif /* CONFIG_BLOCK */
401#endif
402
diff --git a/include/linux/raid/md_u.h b/include/linux/raid/md_u.h
index 7192035fc4b0..fb1abb3367e9 100644
--- a/include/linux/raid/md_u.h
+++ b/include/linux/raid/md_u.h
@@ -15,6 +15,24 @@
15#ifndef _MD_U_H 15#ifndef _MD_U_H
16#define _MD_U_H 16#define _MD_U_H
17 17
18/*
19 * Different major versions are not compatible.
20 * Different minor versions are only downward compatible.
21 * Different patchlevel versions are downward and upward compatible.
22 */
23#define MD_MAJOR_VERSION 0
24#define MD_MINOR_VERSION 90
25/*
26 * MD_PATCHLEVEL_VERSION indicates kernel functionality.
27 * >=1 means different superblock formats are selectable using SET_ARRAY_INFO
28 * and major_version/minor_version accordingly
29 * >=2 means that Internal bitmaps are supported by setting MD_SB_BITMAP_PRESENT
30 * in the super status byte
31 * >=3 means that bitmap superblock version 4 is supported, which uses
32 * little-ending representation rather than host-endian
33 */
34#define MD_PATCHLEVEL_VERSION 3
35
18/* ioctls */ 36/* ioctls */
19 37
20/* status */ 38/* status */
@@ -46,6 +64,12 @@
46#define STOP_ARRAY_RO _IO (MD_MAJOR, 0x33) 64#define STOP_ARRAY_RO _IO (MD_MAJOR, 0x33)
47#define RESTART_ARRAY_RW _IO (MD_MAJOR, 0x34) 65#define RESTART_ARRAY_RW _IO (MD_MAJOR, 0x34)
48 66
67/* 63 partitions with the alternate major number (mdp) */
68#define MdpMinorShift 6
69#ifdef __KERNEL__
70extern int mdp_major;
71#endif
72
49typedef struct mdu_version_s { 73typedef struct mdu_version_s {
50 int major; 74 int major;
51 int minor; 75 int minor;
@@ -85,6 +109,17 @@ typedef struct mdu_array_info_s {
85 109
86} mdu_array_info_t; 110} mdu_array_info_t;
87 111
112/* non-obvious values for 'level' */
113#define LEVEL_MULTIPATH (-4)
114#define LEVEL_LINEAR (-1)
115#define LEVEL_FAULTY (-5)
116
117/* we need a value for 'no level specified' and 0
118 * means 'raid0', so we need something else. This is
119 * for internal use only
120 */
121#define LEVEL_NONE (-1000000)
122
88typedef struct mdu_disk_info_s { 123typedef struct mdu_disk_info_s {
89 /* 124 /*
90 * configuration/status of one particular disk 125 * configuration/status of one particular disk
diff --git a/include/linux/raid/multipath.h b/include/linux/raid/multipath.h
deleted file mode 100644
index 6f53fc177a47..000000000000
--- a/include/linux/raid/multipath.h
+++ /dev/null
@@ -1,42 +0,0 @@
1#ifndef _MULTIPATH_H
2#define _MULTIPATH_H
3
4#include <linux/raid/md.h>
5
6struct multipath_info {
7 mdk_rdev_t *rdev;
8};
9
10struct multipath_private_data {
11 mddev_t *mddev;
12 struct multipath_info *multipaths;
13 int raid_disks;
14 int working_disks;
15 spinlock_t device_lock;
16 struct list_head retry_list;
17
18 mempool_t *pool;
19};
20
21typedef struct multipath_private_data multipath_conf_t;
22
23/*
24 * this is the only point in the RAID code where we violate
25 * C type safety. mddev->private is an 'opaque' pointer.
26 */
27#define mddev_to_conf(mddev) ((multipath_conf_t *) mddev->private)
28
29/*
30 * this is our 'private' 'collective' MULTIPATH buffer head.
31 * it contains information about what kind of IO operations were started
32 * for this MULTIPATH operation, and about their status:
33 */
34
35struct multipath_bh {
36 mddev_t *mddev;
37 struct bio *master_bio;
38 struct bio bio;
39 int path;
40 struct list_head retry_list;
41};
42#endif
diff --git a/include/linux/raid/pq.h b/include/linux/raid/pq.h
new file mode 100644
index 000000000000..d92480f8285c
--- /dev/null
+++ b/include/linux/raid/pq.h
@@ -0,0 +1,132 @@
1/* -*- linux-c -*- ------------------------------------------------------- *
2 *
3 * Copyright 2003 H. Peter Anvin - All Rights Reserved
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation, Inc., 53 Temple Place Ste 330,
8 * Boston MA 02111-1307, USA; either version 2 of the License, or
9 * (at your option) any later version; incorporated herein by reference.
10 *
11 * ----------------------------------------------------------------------- */
12
13#ifndef LINUX_RAID_RAID6_H
14#define LINUX_RAID_RAID6_H
15
16#ifdef __KERNEL__
17
18/* Set to 1 to use kernel-wide empty_zero_page */
19#define RAID6_USE_EMPTY_ZERO_PAGE 0
20#include <linux/blkdev.h>
21
22/* We need a pre-zeroed page... if we don't want to use the kernel-provided
23 one define it here */
24#if RAID6_USE_EMPTY_ZERO_PAGE
25# define raid6_empty_zero_page empty_zero_page
26#else
27extern const char raid6_empty_zero_page[PAGE_SIZE];
28#endif
29
30#else /* ! __KERNEL__ */
31/* Used for testing in user space */
32
33#include <errno.h>
34#include <inttypes.h>
35#include <limits.h>
36#include <stddef.h>
37#include <sys/mman.h>
38#include <sys/types.h>
39
40/* Not standard, but glibc defines it */
41#define BITS_PER_LONG __WORDSIZE
42
43typedef uint8_t u8;
44typedef uint16_t u16;
45typedef uint32_t u32;
46typedef uint64_t u64;
47
48#ifndef PAGE_SIZE
49# define PAGE_SIZE 4096
50#endif
51extern const char raid6_empty_zero_page[PAGE_SIZE];
52
53#define __init
54#define __exit
55#define __attribute_const__ __attribute__((const))
56#define noinline __attribute__((noinline))
57
58#define preempt_enable()
59#define preempt_disable()
60#define cpu_has_feature(x) 1
61#define enable_kernel_altivec()
62#define disable_kernel_altivec()
63
64#define EXPORT_SYMBOL(sym)
65#define MODULE_LICENSE(licence)
66#define subsys_initcall(x)
67#define module_exit(x)
68#endif /* __KERNEL__ */
69
70/* Routine choices */
71struct raid6_calls {
72 void (*gen_syndrome)(int, size_t, void **);
73 int (*valid)(void); /* Returns 1 if this routine set is usable */
74 const char *name; /* Name of this routine set */
75 int prefer; /* Has special performance attribute */
76};
77
78/* Selected algorithm */
79extern struct raid6_calls raid6_call;
80
81/* Algorithm list */
82extern const struct raid6_calls * const raid6_algos[];
83int raid6_select_algo(void);
84
85/* Return values from chk_syndrome */
86#define RAID6_OK 0
87#define RAID6_P_BAD 1
88#define RAID6_Q_BAD 2
89#define RAID6_PQ_BAD 3
90
91/* Galois field tables */
92extern const u8 raid6_gfmul[256][256] __attribute__((aligned(256)));
93extern const u8 raid6_gfexp[256] __attribute__((aligned(256)));
94extern const u8 raid6_gfinv[256] __attribute__((aligned(256)));
95extern const u8 raid6_gfexi[256] __attribute__((aligned(256)));
96
97/* Recovery routines */
98void raid6_2data_recov(int disks, size_t bytes, int faila, int failb,
99 void **ptrs);
100void raid6_datap_recov(int disks, size_t bytes, int faila, void **ptrs);
101void raid6_dual_recov(int disks, size_t bytes, int faila, int failb,
102 void **ptrs);
103
104/* Some definitions to allow code to be compiled for testing in userspace */
105#ifndef __KERNEL__
106
107# define jiffies raid6_jiffies()
108# define printk printf
109# define GFP_KERNEL 0
110# define __get_free_pages(x, y) ((unsigned long)mmap(NULL, PAGE_SIZE << (y), \
111 PROT_READ|PROT_WRITE, \
112 MAP_PRIVATE|MAP_ANONYMOUS,\
113 0, 0))
114# define free_pages(x, y) munmap((void *)(x), (y)*PAGE_SIZE)
115
116static inline void cpu_relax(void)
117{
118 /* Nothing */
119}
120
121#undef HZ
122#define HZ 1000
123static inline uint32_t raid6_jiffies(void)
124{
125 struct timeval tv;
126 gettimeofday(&tv, NULL);
127 return tv.tv_sec*1000 + tv.tv_usec/1000;
128}
129
130#endif /* ! __KERNEL__ */
131
132#endif /* LINUX_RAID_RAID6_H */
diff --git a/include/linux/raid/raid0.h b/include/linux/raid/raid0.h
deleted file mode 100644
index fd42aa87c391..000000000000
--- a/include/linux/raid/raid0.h
+++ /dev/null
@@ -1,30 +0,0 @@
1#ifndef _RAID0_H
2#define _RAID0_H
3
4#include <linux/raid/md.h>
5
6struct strip_zone
7{
8 sector_t zone_start; /* Zone offset in md_dev (in sectors) */
9 sector_t dev_start; /* Zone offset in real dev (in sectors) */
10 sector_t sectors; /* Zone size in sectors */
11 int nb_dev; /* # of devices attached to the zone */
12 mdk_rdev_t **dev; /* Devices attached to the zone */
13};
14
15struct raid0_private_data
16{
17 struct strip_zone **hash_table; /* Table of indexes into strip_zone */
18 struct strip_zone *strip_zone;
19 mdk_rdev_t **devlist; /* lists of rdevs, pointed to by strip_zone->dev */
20 int nr_strip_zones;
21
22 sector_t spacing;
23 int sector_shift; /* shift this before divide by spacing */
24};
25
26typedef struct raid0_private_data raid0_conf_t;
27
28#define mddev_to_conf(mddev) ((raid0_conf_t *) mddev->private)
29
30#endif
diff --git a/include/linux/raid/raid1.h b/include/linux/raid/raid1.h
deleted file mode 100644
index 0a9ba7c3302e..000000000000
--- a/include/linux/raid/raid1.h
+++ /dev/null
@@ -1,134 +0,0 @@
1#ifndef _RAID1_H
2#define _RAID1_H
3
4#include <linux/raid/md.h>
5
6typedef struct mirror_info mirror_info_t;
7
8struct mirror_info {
9 mdk_rdev_t *rdev;
10 sector_t head_position;
11};
12
13/*
14 * memory pools need a pointer to the mddev, so they can force an unplug
15 * when memory is tight, and a count of the number of drives that the
16 * pool was allocated for, so they know how much to allocate and free.
17 * mddev->raid_disks cannot be used, as it can change while a pool is active
18 * These two datums are stored in a kmalloced struct.
19 */
20
21struct pool_info {
22 mddev_t *mddev;
23 int raid_disks;
24};
25
26
27typedef struct r1bio_s r1bio_t;
28
29struct r1_private_data_s {
30 mddev_t *mddev;
31 mirror_info_t *mirrors;
32 int raid_disks;
33 int last_used;
34 sector_t next_seq_sect;
35 spinlock_t device_lock;
36
37 struct list_head retry_list;
38 /* queue pending writes and submit them on unplug */
39 struct bio_list pending_bio_list;
40 /* queue of writes that have been unplugged */
41 struct bio_list flushing_bio_list;
42
43 /* for use when syncing mirrors: */
44
45 spinlock_t resync_lock;
46 int nr_pending;
47 int nr_waiting;
48 int nr_queued;
49 int barrier;
50 sector_t next_resync;
51 int fullsync; /* set to 1 if a full sync is needed,
52 * (fresh device added).
53 * Cleared when a sync completes.
54 */
55
56 wait_queue_head_t wait_barrier;
57
58 struct pool_info *poolinfo;
59
60 struct page *tmppage;
61
62 mempool_t *r1bio_pool;
63 mempool_t *r1buf_pool;
64};
65
66typedef struct r1_private_data_s conf_t;
67
68/*
69 * this is the only point in the RAID code where we violate
70 * C type safety. mddev->private is an 'opaque' pointer.
71 */
72#define mddev_to_conf(mddev) ((conf_t *) mddev->private)
73
74/*
75 * this is our 'private' RAID1 bio.
76 *
77 * it contains information about what kind of IO operations were started
78 * for this RAID1 operation, and about their status:
79 */
80
81struct r1bio_s {
82 atomic_t remaining; /* 'have we finished' count,
83 * used from IRQ handlers
84 */
85 atomic_t behind_remaining; /* number of write-behind ios remaining
86 * in this BehindIO request
87 */
88 sector_t sector;
89 int sectors;
90 unsigned long state;
91 mddev_t *mddev;
92 /*
93 * original bio going to /dev/mdx
94 */
95 struct bio *master_bio;
96 /*
97 * if the IO is in READ direction, then this is where we read
98 */
99 int read_disk;
100
101 struct list_head retry_list;
102 struct bitmap_update *bitmap_update;
103 /*
104 * if the IO is in WRITE direction, then multiple bios are used.
105 * We choose the number when they are allocated.
106 */
107 struct bio *bios[0];
108 /* DO NOT PUT ANY NEW FIELDS HERE - bios array is contiguously alloced*/
109};
110
111/* when we get a read error on a read-only array, we redirect to another
112 * device without failing the first device, or trying to over-write to
113 * correct the read error. To keep track of bad blocks on a per-bio
114 * level, we store IO_BLOCKED in the appropriate 'bios' pointer
115 */
116#define IO_BLOCKED ((struct bio*)1)
117
118/* bits for r1bio.state */
119#define R1BIO_Uptodate 0
120#define R1BIO_IsSync 1
121#define R1BIO_Degraded 2
122#define R1BIO_BehindIO 3
123#define R1BIO_Barrier 4
124#define R1BIO_BarrierRetry 5
125/* For write-behind requests, we call bi_end_io when
126 * the last non-write-behind device completes, providing
127 * any write was successful. Otherwise we call when
128 * any write-behind write succeeds, otherwise we call
129 * with failure when last write completes (and all failed).
130 * Record that bi_end_io was called with this flag...
131 */
132#define R1BIO_Returned 6
133
134#endif
diff --git a/include/linux/raid/raid10.h b/include/linux/raid/raid10.h
deleted file mode 100644
index e9091cfeb286..000000000000
--- a/include/linux/raid/raid10.h
+++ /dev/null
@@ -1,123 +0,0 @@
1#ifndef _RAID10_H
2#define _RAID10_H
3
4#include <linux/raid/md.h>
5
6typedef struct mirror_info mirror_info_t;
7
8struct mirror_info {
9 mdk_rdev_t *rdev;
10 sector_t head_position;
11};
12
13typedef struct r10bio_s r10bio_t;
14
15struct r10_private_data_s {
16 mddev_t *mddev;
17 mirror_info_t *mirrors;
18 int raid_disks;
19 spinlock_t device_lock;
20
21 /* geometry */
22 int near_copies; /* number of copies layed out raid0 style */
23 int far_copies; /* number of copies layed out
24 * at large strides across drives
25 */
26 int far_offset; /* far_copies are offset by 1 stripe
27 * instead of many
28 */
29 int copies; /* near_copies * far_copies.
30 * must be <= raid_disks
31 */
32 sector_t stride; /* distance between far copies.
33 * This is size / far_copies unless
34 * far_offset, in which case it is
35 * 1 stripe.
36 */
37
38 int chunk_shift; /* shift from chunks to sectors */
39 sector_t chunk_mask;
40
41 struct list_head retry_list;
42 /* queue pending writes and submit them on unplug */
43 struct bio_list pending_bio_list;
44
45
46 spinlock_t resync_lock;
47 int nr_pending;
48 int nr_waiting;
49 int nr_queued;
50 int barrier;
51 sector_t next_resync;
52 int fullsync; /* set to 1 if a full sync is needed,
53 * (fresh device added).
54 * Cleared when a sync completes.
55 */
56
57 wait_queue_head_t wait_barrier;
58
59 mempool_t *r10bio_pool;
60 mempool_t *r10buf_pool;
61 struct page *tmppage;
62};
63
64typedef struct r10_private_data_s conf_t;
65
66/*
67 * this is the only point in the RAID code where we violate
68 * C type safety. mddev->private is an 'opaque' pointer.
69 */
70#define mddev_to_conf(mddev) ((conf_t *) mddev->private)
71
72/*
73 * this is our 'private' RAID10 bio.
74 *
75 * it contains information about what kind of IO operations were started
76 * for this RAID10 operation, and about their status:
77 */
78
79struct r10bio_s {
80 atomic_t remaining; /* 'have we finished' count,
81 * used from IRQ handlers
82 */
83 sector_t sector; /* virtual sector number */
84 int sectors;
85 unsigned long state;
86 mddev_t *mddev;
87 /*
88 * original bio going to /dev/mdx
89 */
90 struct bio *master_bio;
91 /*
92 * if the IO is in READ direction, then this is where we read
93 */
94 int read_slot;
95
96 struct list_head retry_list;
97 /*
98 * if the IO is in WRITE direction, then multiple bios are used,
99 * one for each copy.
100 * When resyncing we also use one for each copy.
101 * When reconstructing, we use 2 bios, one for read, one for write.
102 * We choose the number when they are allocated.
103 */
104 struct {
105 struct bio *bio;
106 sector_t addr;
107 int devnum;
108 } devs[0];
109};
110
111/* when we get a read error on a read-only array, we redirect to another
112 * device without failing the first device, or trying to over-write to
113 * correct the read error. To keep track of bad blocks on a per-bio
114 * level, we store IO_BLOCKED in the appropriate 'bios' pointer
115 */
116#define IO_BLOCKED ((struct bio*)1)
117
118/* bits for r10bio.state */
119#define R10BIO_Uptodate 0
120#define R10BIO_IsSync 1
121#define R10BIO_IsRecover 2
122#define R10BIO_Degraded 3
123#endif
diff --git a/include/linux/raid/raid5.h b/include/linux/raid/raid5.h
deleted file mode 100644
index 3b2672792457..000000000000
--- a/include/linux/raid/raid5.h
+++ /dev/null
@@ -1,402 +0,0 @@
1#ifndef _RAID5_H
2#define _RAID5_H
3
4#include <linux/raid/md.h>
5#include <linux/raid/xor.h>
6
7/*
8 *
9 * Each stripe contains one buffer per disc. Each buffer can be in
10 * one of a number of states stored in "flags". Changes between
11 * these states happen *almost* exclusively under a per-stripe
12 * spinlock. Some very specific changes can happen in bi_end_io, and
13 * these are not protected by the spin lock.
14 *
15 * The flag bits that are used to represent these states are:
16 * R5_UPTODATE and R5_LOCKED
17 *
18 * State Empty == !UPTODATE, !LOCK
19 * We have no data, and there is no active request
20 * State Want == !UPTODATE, LOCK
21 * A read request is being submitted for this block
22 * State Dirty == UPTODATE, LOCK
23 * Some new data is in this buffer, and it is being written out
24 * State Clean == UPTODATE, !LOCK
25 * We have valid data which is the same as on disc
26 *
27 * The possible state transitions are:
28 *
29 * Empty -> Want - on read or write to get old data for parity calc
30 * Empty -> Dirty - on compute_parity to satisfy write/sync request.(RECONSTRUCT_WRITE)
31 * Empty -> Clean - on compute_block when computing a block for failed drive
32 * Want -> Empty - on failed read
33 * Want -> Clean - on successful completion of read request
34 * Dirty -> Clean - on successful completion of write request
35 * Dirty -> Clean - on failed write
36 * Clean -> Dirty - on compute_parity to satisfy write/sync (RECONSTRUCT or RMW)
37 *
38 * The Want->Empty, Want->Clean, Dirty->Clean, transitions
39 * all happen in b_end_io at interrupt time.
40 * Each sets the Uptodate bit before releasing the Lock bit.
41 * This leaves one multi-stage transition:
42 * Want->Dirty->Clean
43 * This is safe because thinking that a Clean buffer is actually dirty
44 * will at worst delay some action, and the stripe will be scheduled
45 * for attention after the transition is complete.
46 *
47 * There is one possibility that is not covered by these states. That
48 * is if one drive has failed and there is a spare being rebuilt. We
49 * can't distinguish between a clean block that has been generated
50 * from parity calculations, and a clean block that has been
51 * successfully written to the spare ( or to parity when resyncing).
52 * To distingush these states we have a stripe bit STRIPE_INSYNC that
53 * is set whenever a write is scheduled to the spare, or to the parity
54 * disc if there is no spare. A sync request clears this bit, and
55 * when we find it set with no buffers locked, we know the sync is
56 * complete.
57 *
58 * Buffers for the md device that arrive via make_request are attached
59 * to the appropriate stripe in one of two lists linked on b_reqnext.
60 * One list (bh_read) for read requests, one (bh_write) for write.
61 * There should never be more than one buffer on the two lists
62 * together, but we are not guaranteed of that so we allow for more.
63 *
64 * If a buffer is on the read list when the associated cache buffer is
65 * Uptodate, the data is copied into the read buffer and it's b_end_io
66 * routine is called. This may happen in the end_request routine only
67 * if the buffer has just successfully been read. end_request should
68 * remove the buffers from the list and then set the Uptodate bit on
69 * the buffer. Other threads may do this only if they first check
70 * that the Uptodate bit is set. Once they have checked that they may
71 * take buffers off the read queue.
72 *
73 * When a buffer on the write list is committed for write it is copied
74 * into the cache buffer, which is then marked dirty, and moved onto a
75 * third list, the written list (bh_written). Once both the parity
76 * block and the cached buffer are successfully written, any buffer on
77 * a written list can be returned with b_end_io.
78 *
79 * The write list and read list both act as fifos. The read list is
80 * protected by the device_lock. The write and written lists are
81 * protected by the stripe lock. The device_lock, which can be
82 * claimed while the stipe lock is held, is only for list
83 * manipulations and will only be held for a very short time. It can
84 * be claimed from interrupts.
85 *
86 *
87 * Stripes in the stripe cache can be on one of two lists (or on
88 * neither). The "inactive_list" contains stripes which are not
89 * currently being used for any request. They can freely be reused
90 * for another stripe. The "handle_list" contains stripes that need
91 * to be handled in some way. Both of these are fifo queues. Each
92 * stripe is also (potentially) linked to a hash bucket in the hash
93 * table so that it can be found by sector number. Stripes that are
94 * not hashed must be on the inactive_list, and will normally be at
95 * the front. All stripes start life this way.
96 *
97 * The inactive_list, handle_list and hash bucket lists are all protected by the
98 * device_lock.
99 * - stripes on the inactive_list never have their stripe_lock held.
100 * - stripes have a reference counter. If count==0, they are on a list.
101 * - If a stripe might need handling, STRIPE_HANDLE is set.
102 * - When refcount reaches zero, then if STRIPE_HANDLE it is put on
103 * handle_list else inactive_list
104 *
105 * This, combined with the fact that STRIPE_HANDLE is only ever
106 * cleared while a stripe has a non-zero count means that if the
107 * refcount is 0 and STRIPE_HANDLE is set, then it is on the
108 * handle_list and if recount is 0 and STRIPE_HANDLE is not set, then
109 * the stripe is on inactive_list.
110 *
111 * The possible transitions are:
112 * activate an unhashed/inactive stripe (get_active_stripe())
113 * lockdev check-hash unlink-stripe cnt++ clean-stripe hash-stripe unlockdev
114 * activate a hashed, possibly active stripe (get_active_stripe())
115 * lockdev check-hash if(!cnt++)unlink-stripe unlockdev
116 * attach a request to an active stripe (add_stripe_bh())
117 * lockdev attach-buffer unlockdev
118 * handle a stripe (handle_stripe())
119 * lockstripe clrSTRIPE_HANDLE ...
120 * (lockdev check-buffers unlockdev) ..
121 * change-state ..
122 * record io/ops needed unlockstripe schedule io/ops
123 * release an active stripe (release_stripe())
124 * lockdev if (!--cnt) { if STRIPE_HANDLE, add to handle_list else add to inactive-list } unlockdev
125 *
126 * The refcount counts each thread that have activated the stripe,
127 * plus raid5d if it is handling it, plus one for each active request
128 * on a cached buffer, and plus one if the stripe is undergoing stripe
129 * operations.
130 *
131 * Stripe operations are performed outside the stripe lock,
132 * the stripe operations are:
133 * -copying data between the stripe cache and user application buffers
134 * -computing blocks to save a disk access, or to recover a missing block
135 * -updating the parity on a write operation (reconstruct write and
136 * read-modify-write)
137 * -checking parity correctness
138 * -running i/o to disk
139 * These operations are carried out by raid5_run_ops which uses the async_tx
140 * api to (optionally) offload operations to dedicated hardware engines.
141 * When requesting an operation handle_stripe sets the pending bit for the
142 * operation and increments the count. raid5_run_ops is then run whenever
143 * the count is non-zero.
144 * There are some critical dependencies between the operations that prevent some
145 * from being requested while another is in flight.
146 * 1/ Parity check operations destroy the in cache version of the parity block,
147 * so we prevent parity dependent operations like writes and compute_blocks
148 * from starting while a check is in progress. Some dma engines can perform
149 * the check without damaging the parity block, in these cases the parity
150 * block is re-marked up to date (assuming the check was successful) and is
151 * not re-read from disk.
152 * 2/ When a write operation is requested we immediately lock the affected
153 * blocks, and mark them as not up to date. This causes new read requests
154 * to be held off, as well as parity checks and compute block operations.
155 * 3/ Once a compute block operation has been requested handle_stripe treats
156 * that block as if it is up to date. raid5_run_ops guaruntees that any
157 * operation that is dependent on the compute block result is initiated after
158 * the compute block completes.
159 */
160
161/*
162 * Operations state - intermediate states that are visible outside of sh->lock
163 * In general _idle indicates nothing is running, _run indicates a data
164 * processing operation is active, and _result means the data processing result
165 * is stable and can be acted upon. For simple operations like biofill and
166 * compute that only have an _idle and _run state they are indicated with
167 * sh->state flags (STRIPE_BIOFILL_RUN and STRIPE_COMPUTE_RUN)
168 */
169/**
170 * enum check_states - handles syncing / repairing a stripe
171 * @check_state_idle - check operations are quiesced
172 * @check_state_run - check operation is running
173 * @check_state_result - set outside lock when check result is valid
174 * @check_state_compute_run - check failed and we are repairing
175 * @check_state_compute_result - set outside lock when compute result is valid
176 */
177enum check_states {
178 check_state_idle = 0,
179 check_state_run, /* parity check */
180 check_state_check_result,
181 check_state_compute_run, /* parity repair */
182 check_state_compute_result,
183};
184
185/**
186 * enum reconstruct_states - handles writing or expanding a stripe
187 */
188enum reconstruct_states {
189 reconstruct_state_idle = 0,
190 reconstruct_state_prexor_drain_run, /* prexor-write */
191 reconstruct_state_drain_run, /* write */
192 reconstruct_state_run, /* expand */
193 reconstruct_state_prexor_drain_result,
194 reconstruct_state_drain_result,
195 reconstruct_state_result,
196};
197
198struct stripe_head {
199 struct hlist_node hash;
200 struct list_head lru; /* inactive_list or handle_list */
201 struct raid5_private_data *raid_conf;
202 sector_t sector; /* sector of this row */
203 int pd_idx; /* parity disk index */
204 unsigned long state; /* state flags */
205 atomic_t count; /* nr of active thread/requests */
206 spinlock_t lock;
207 int bm_seq; /* sequence number for bitmap flushes */
208 int disks; /* disks in stripe */
209 enum check_states check_state;
210 enum reconstruct_states reconstruct_state;
211 /* stripe_operations
212 * @target - STRIPE_OP_COMPUTE_BLK target
213 */
214 struct stripe_operations {
215 int target;
216 u32 zero_sum_result;
217 } ops;
218 struct r5dev {
219 struct bio req;
220 struct bio_vec vec;
221 struct page *page;
222 struct bio *toread, *read, *towrite, *written;
223 sector_t sector; /* sector of this page */
224 unsigned long flags;
225 } dev[1]; /* allocated with extra space depending of RAID geometry */
226};
227
228/* stripe_head_state - collects and tracks the dynamic state of a stripe_head
229 * for handle_stripe. It is only valid under spin_lock(sh->lock);
230 */
231struct stripe_head_state {
232 int syncing, expanding, expanded;
233 int locked, uptodate, to_read, to_write, failed, written;
234 int to_fill, compute, req_compute, non_overwrite;
235 int failed_num;
236 unsigned long ops_request;
237};
238
239/* r6_state - extra state data only relevant to r6 */
240struct r6_state {
241 int p_failed, q_failed, qd_idx, failed_num[2];
242};
243
244/* Flags */
245#define R5_UPTODATE 0 /* page contains current data */
246#define R5_LOCKED 1 /* IO has been submitted on "req" */
247#define R5_OVERWRITE 2 /* towrite covers whole page */
248/* and some that are internal to handle_stripe */
249#define R5_Insync 3 /* rdev && rdev->in_sync at start */
250#define R5_Wantread 4 /* want to schedule a read */
251#define R5_Wantwrite 5
252#define R5_Overlap 7 /* There is a pending overlapping request on this block */
253#define R5_ReadError 8 /* seen a read error here recently */
254#define R5_ReWrite 9 /* have tried to over-write the readerror */
255
256#define R5_Expanded 10 /* This block now has post-expand data */
257#define R5_Wantcompute 11 /* compute_block in progress treat as
258 * uptodate
259 */
260#define R5_Wantfill 12 /* dev->toread contains a bio that needs
261 * filling
262 */
263#define R5_Wantdrain 13 /* dev->towrite needs to be drained */
264/*
265 * Write method
266 */
267#define RECONSTRUCT_WRITE 1
268#define READ_MODIFY_WRITE 2
269/* not a write method, but a compute_parity mode */
270#define CHECK_PARITY 3
271
272/*
273 * Stripe state
274 */
275#define STRIPE_HANDLE 2
276#define STRIPE_SYNCING 3
277#define STRIPE_INSYNC 4
278#define STRIPE_PREREAD_ACTIVE 5
279#define STRIPE_DELAYED 6
280#define STRIPE_DEGRADED 7
281#define STRIPE_BIT_DELAY 8
282#define STRIPE_EXPANDING 9
283#define STRIPE_EXPAND_SOURCE 10
284#define STRIPE_EXPAND_READY 11
285#define STRIPE_IO_STARTED 12 /* do not count towards 'bypass_count' */
286#define STRIPE_FULL_WRITE 13 /* all blocks are set to be overwritten */
287#define STRIPE_BIOFILL_RUN 14
288#define STRIPE_COMPUTE_RUN 15
289/*
290 * Operation request flags
291 */
292#define STRIPE_OP_BIOFILL 0
293#define STRIPE_OP_COMPUTE_BLK 1
294#define STRIPE_OP_PREXOR 2
295#define STRIPE_OP_BIODRAIN 3
296#define STRIPE_OP_POSTXOR 4
297#define STRIPE_OP_CHECK 5
298
299/*
300 * Plugging:
301 *
302 * To improve write throughput, we need to delay the handling of some
303 * stripes until there has been a chance that several write requests
304 * for the one stripe have all been collected.
305 * In particular, any write request that would require pre-reading
306 * is put on a "delayed" queue until there are no stripes currently
307 * in a pre-read phase. Further, if the "delayed" queue is empty when
308 * a stripe is put on it then we "plug" the queue and do not process it
309 * until an unplug call is made. (the unplug_io_fn() is called).
310 *
311 * When preread is initiated on a stripe, we set PREREAD_ACTIVE and add
312 * it to the count of prereading stripes.
313 * When write is initiated, or the stripe refcnt == 0 (just in case) we
314 * clear the PREREAD_ACTIVE flag and decrement the count
315 * Whenever the 'handle' queue is empty and the device is not plugged, we
316 * move any strips from delayed to handle and clear the DELAYED flag and set
317 * PREREAD_ACTIVE.
318 * In stripe_handle, if we find pre-reading is necessary, we do it if
319 * PREREAD_ACTIVE is set, else we set DELAYED which will send it to the delayed queue.
320 * HANDLE gets cleared if stripe_handle leave nothing locked.
321 */
322
323
324struct disk_info {
325 mdk_rdev_t *rdev;
326};
327
328struct raid5_private_data {
329 struct hlist_head *stripe_hashtbl;
330 mddev_t *mddev;
331 struct disk_info *spare;
332 int chunk_size, level, algorithm;
333 int max_degraded;
334 int raid_disks;
335 int max_nr_stripes;
336
337 /* used during an expand */
338 sector_t expand_progress; /* MaxSector when no expand happening */
339 sector_t expand_lo; /* from here up to expand_progress it out-of-bounds
340 * as we haven't flushed the metadata yet
341 */
342 int previous_raid_disks;
343
344 struct list_head handle_list; /* stripes needing handling */
345 struct list_head hold_list; /* preread ready stripes */
346 struct list_head delayed_list; /* stripes that have plugged requests */
347 struct list_head bitmap_list; /* stripes delaying awaiting bitmap update */
348 struct bio *retry_read_aligned; /* currently retrying aligned bios */
349 struct bio *retry_read_aligned_list; /* aligned bios retry list */
350 atomic_t preread_active_stripes; /* stripes with scheduled io */
351 atomic_t active_aligned_reads;
352 atomic_t pending_full_writes; /* full write backlog */
353 int bypass_count; /* bypassed prereads */
354 int bypass_threshold; /* preread nice */
355 struct list_head *last_hold; /* detect hold_list promotions */
356
357 atomic_t reshape_stripes; /* stripes with pending writes for reshape */
358 /* unfortunately we need two cache names as we temporarily have
359 * two caches.
360 */
361 int active_name;
362 char cache_name[2][20];
363 struct kmem_cache *slab_cache; /* for allocating stripes */
364
365 int seq_flush, seq_write;
366 int quiesce;
367
368 int fullsync; /* set to 1 if a full sync is needed,
369 * (fresh device added).
370 * Cleared when a sync completes.
371 */
372
373 struct page *spare_page; /* Used when checking P/Q in raid6 */
374
375 /*
376 * Free stripes pool
377 */
378 atomic_t active_stripes;
379 struct list_head inactive_list;
380 wait_queue_head_t wait_for_stripe;
381 wait_queue_head_t wait_for_overlap;
382 int inactive_blocked; /* release of inactive stripes blocked,
383 * waiting for 25% to be free
384 */
385 int pool_size; /* number of disks in stripeheads in pool */
386 spinlock_t device_lock;
387 struct disk_info *disks;
388};
389
390typedef struct raid5_private_data raid5_conf_t;
391
392#define mddev_to_conf(mddev) ((raid5_conf_t *) mddev->private)
393
394/*
395 * Our supported algorithms
396 */
397#define ALGORITHM_LEFT_ASYMMETRIC 0
398#define ALGORITHM_RIGHT_ASYMMETRIC 1
399#define ALGORITHM_LEFT_SYMMETRIC 2
400#define ALGORITHM_RIGHT_SYMMETRIC 3
401
402#endif
diff --git a/include/linux/raid/xor.h b/include/linux/raid/xor.h
index 3e120587eada..5a210959e3f8 100644
--- a/include/linux/raid/xor.h
+++ b/include/linux/raid/xor.h
@@ -1,8 +1,6 @@
1#ifndef _XOR_H 1#ifndef _XOR_H
2#define _XOR_H 2#define _XOR_H
3 3
4#include <linux/raid/md.h>
5
6#define MAX_XOR_BLOCKS 4 4#define MAX_XOR_BLOCKS 4
7 5
8extern void xor_blocks(unsigned int count, unsigned int bytes, 6extern void xor_blocks(unsigned int count, unsigned int bytes,
diff --git a/include/linux/regulator/bq24022.h b/include/linux/regulator/bq24022.h
index e84b0a9feda5..a6d014005d49 100644
--- a/include/linux/regulator/bq24022.h
+++ b/include/linux/regulator/bq24022.h
@@ -10,6 +10,8 @@
10 * 10 *
11 */ 11 */
12 12
13struct regulator_init_data;
14
13/** 15/**
14 * bq24022_mach_info - platform data for bq24022 16 * bq24022_mach_info - platform data for bq24022
15 * @gpio_nce: GPIO line connected to the nCE pin, used to enable / disable charging 17 * @gpio_nce: GPIO line connected to the nCE pin, used to enable / disable charging
@@ -18,4 +20,5 @@
18struct bq24022_mach_info { 20struct bq24022_mach_info {
19 int gpio_nce; 21 int gpio_nce;
20 int gpio_iset2; 22 int gpio_iset2;
23 struct regulator_init_data *init_data;
21}; 24};
diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h
index 801bf77ff4e2..277f4b964df5 100644
--- a/include/linux/regulator/consumer.h
+++ b/include/linux/regulator/consumer.h
@@ -3,7 +3,7 @@
3 * 3 *
4 * Copyright (C) 2007, 2008 Wolfson Microelectronics PLC. 4 * Copyright (C) 2007, 2008 Wolfson Microelectronics PLC.
5 * 5 *
6 * Author: Liam Girdwood <lg@opensource.wolfsonmicro.com> 6 * Author: Liam Girdwood <lrg@slimlogic.co.uk>
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as 9 * it under the terms of the GNU General Public License version 2 as
@@ -88,6 +88,7 @@
88 * FAIL Regulator output has failed. 88 * FAIL Regulator output has failed.
89 * OVER_TEMP Regulator over temp. 89 * OVER_TEMP Regulator over temp.
90 * FORCE_DISABLE Regulator shut down by software. 90 * FORCE_DISABLE Regulator shut down by software.
91 * VOLTAGE_CHANGE Regulator voltage changed.
91 * 92 *
92 * NOTE: These events can be OR'ed together when passed into handler. 93 * NOTE: These events can be OR'ed together when passed into handler.
93 */ 94 */
@@ -98,6 +99,7 @@
98#define REGULATOR_EVENT_FAIL 0x08 99#define REGULATOR_EVENT_FAIL 0x08
99#define REGULATOR_EVENT_OVER_TEMP 0x10 100#define REGULATOR_EVENT_OVER_TEMP 0x10
100#define REGULATOR_EVENT_FORCE_DISABLE 0x20 101#define REGULATOR_EVENT_FORCE_DISABLE 0x20
102#define REGULATOR_EVENT_VOLTAGE_CHANGE 0x40
101 103
102struct regulator; 104struct regulator;
103 105
@@ -140,6 +142,8 @@ int regulator_bulk_disable(int num_consumers,
140void regulator_bulk_free(int num_consumers, 142void regulator_bulk_free(int num_consumers,
141 struct regulator_bulk_data *consumers); 143 struct regulator_bulk_data *consumers);
142 144
145int regulator_count_voltages(struct regulator *regulator);
146int regulator_list_voltage(struct regulator *regulator, unsigned selector);
143int regulator_set_voltage(struct regulator *regulator, int min_uV, int max_uV); 147int regulator_set_voltage(struct regulator *regulator, int min_uV, int max_uV);
144int regulator_get_voltage(struct regulator *regulator); 148int regulator_get_voltage(struct regulator *regulator);
145int regulator_set_current_limit(struct regulator *regulator, 149int regulator_set_current_limit(struct regulator *regulator,
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index 2dae05705f13..4848d8dacd90 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -3,7 +3,7 @@
3 * 3 *
4 * Copyright (C) 2007, 2008 Wolfson Microelectronics PLC. 4 * Copyright (C) 2007, 2008 Wolfson Microelectronics PLC.
5 * 5 *
6 * Author: Liam Girdwood <lg@opensource.wolfsonmicro.com> 6 * Author: Liam Girdwood <lrg@slimlogic.co.uk>
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as 9 * it under the terms of the GNU General Public License version 2 as
@@ -21,25 +21,38 @@
21struct regulator_dev; 21struct regulator_dev;
22struct regulator_init_data; 22struct regulator_init_data;
23 23
24enum regulator_status {
25 REGULATOR_STATUS_OFF,
26 REGULATOR_STATUS_ON,
27 REGULATOR_STATUS_ERROR,
28 /* fast/normal/idle/standby are flavors of "on" */
29 REGULATOR_STATUS_FAST,
30 REGULATOR_STATUS_NORMAL,
31 REGULATOR_STATUS_IDLE,
32 REGULATOR_STATUS_STANDBY,
33};
34
24/** 35/**
25 * struct regulator_ops - regulator operations. 36 * struct regulator_ops - regulator operations.
26 * 37 *
27 * This struct describes regulator operations which can be implemented by 38 * @enable: Configure the regulator as enabled.
28 * regulator chip drivers. 39 * @disable: Configure the regulator as disabled.
29 *
30 * @enable: Enable the regulator.
31 * @disable: Disable the regulator.
32 * @is_enabled: Return 1 if the regulator is enabled, 0 otherwise. 40 * @is_enabled: Return 1 if the regulator is enabled, 0 otherwise.
33 * 41 *
34 * @set_voltage: Set the voltage for the regulator within the range specified. 42 * @set_voltage: Set the voltage for the regulator within the range specified.
35 * The driver should select the voltage closest to min_uV. 43 * The driver should select the voltage closest to min_uV.
36 * @get_voltage: Return the currently configured voltage for the regulator. 44 * @get_voltage: Return the currently configured voltage for the regulator.
45 * @list_voltage: Return one of the supported voltages, in microvolts; zero
46 * if the selector indicates a voltage that is unusable on this system;
47 * or negative errno. Selectors range from zero to one less than
48 * regulator_desc.n_voltages. Voltages may be reported in any order.
37 * 49 *
38 * @set_current_limit: Configure a limit for a current-limited regulator. 50 * @set_current_limit: Configure a limit for a current-limited regulator.
39 * @get_current_limit: Get the limit for a current-limited regulator. 51 * @get_current_limit: Get the configured limit for a current-limited regulator.
40 * 52 *
41 * @set_mode: Set the operating mode for the regulator. 53 * @get_mode: Get the configured operating mode for the regulator.
42 * @get_mode: Get the current operating mode for the regulator. 54 * @get_status: Return actual (not as-configured) status of regulator, as a
55 * REGULATOR_STATUS value (or negative errno)
43 * @get_optimum_mode: Get the most efficient operating mode for the regulator 56 * @get_optimum_mode: Get the most efficient operating mode for the regulator
44 * when running with the specified parameters. 57 * when running with the specified parameters.
45 * 58 *
@@ -51,9 +64,15 @@ struct regulator_init_data;
51 * suspended. 64 * suspended.
52 * @set_suspend_mode: Set the operating mode for the regulator when the 65 * @set_suspend_mode: Set the operating mode for the regulator when the
53 * system is suspended. 66 * system is suspended.
67 *
68 * This struct describes regulator operations which can be implemented by
69 * regulator chip drivers.
54 */ 70 */
55struct regulator_ops { 71struct regulator_ops {
56 72
73 /* enumerate supported voltages */
74 int (*list_voltage) (struct regulator_dev *, unsigned selector);
75
57 /* get/set regulator voltage */ 76 /* get/set regulator voltage */
58 int (*set_voltage) (struct regulator_dev *, int min_uV, int max_uV); 77 int (*set_voltage) (struct regulator_dev *, int min_uV, int max_uV);
59 int (*get_voltage) (struct regulator_dev *); 78 int (*get_voltage) (struct regulator_dev *);
@@ -72,6 +91,13 @@ struct regulator_ops {
72 int (*set_mode) (struct regulator_dev *, unsigned int mode); 91 int (*set_mode) (struct regulator_dev *, unsigned int mode);
73 unsigned int (*get_mode) (struct regulator_dev *); 92 unsigned int (*get_mode) (struct regulator_dev *);
74 93
94 /* report regulator status ... most other accessors report
95 * control inputs, this reports results of combining inputs
96 * from Linux (and other sources) with the actual load.
97 * returns REGULATOR_STATUS_* or negative errno.
98 */
99 int (*get_status)(struct regulator_dev *);
100
75 /* get most efficient regulator operating mode for load */ 101 /* get most efficient regulator operating mode for load */
76 unsigned int (*get_optimum_mode) (struct regulator_dev *, int input_uV, 102 unsigned int (*get_optimum_mode) (struct regulator_dev *, int input_uV,
77 int output_uV, int load_uA); 103 int output_uV, int load_uA);
@@ -106,6 +132,7 @@ enum regulator_type {
106 * 132 *
107 * @name: Identifying name for the regulator. 133 * @name: Identifying name for the regulator.
108 * @id: Numerical identifier for the regulator. 134 * @id: Numerical identifier for the regulator.
135 * @n_voltages: Number of selectors available for ops.list_voltage().
109 * @ops: Regulator operations table. 136 * @ops: Regulator operations table.
110 * @irq: Interrupt number for the regulator. 137 * @irq: Interrupt number for the regulator.
111 * @type: Indicates if the regulator is a voltage or current regulator. 138 * @type: Indicates if the regulator is a voltage or current regulator.
@@ -114,14 +141,48 @@ enum regulator_type {
114struct regulator_desc { 141struct regulator_desc {
115 const char *name; 142 const char *name;
116 int id; 143 int id;
144 unsigned n_voltages;
117 struct regulator_ops *ops; 145 struct regulator_ops *ops;
118 int irq; 146 int irq;
119 enum regulator_type type; 147 enum regulator_type type;
120 struct module *owner; 148 struct module *owner;
121}; 149};
122 150
151/*
152 * struct regulator_dev
153 *
154 * Voltage / Current regulator class device. One for each
155 * regulator.
156 *
157 * This should *not* be used directly by anything except the regulator
158 * core and notification injection (which should take the mutex and do
159 * no other direct access).
160 */
161struct regulator_dev {
162 struct regulator_desc *desc;
163 int use_count;
164
165 /* lists we belong to */
166 struct list_head list; /* list of all regulators */
167 struct list_head slist; /* list of supplied regulators */
168
169 /* lists we own */
170 struct list_head consumer_list; /* consumers we supply */
171 struct list_head supply_list; /* regulators we supply */
172
173 struct blocking_notifier_head notifier;
174 struct mutex mutex; /* consumer lock */
175 struct module *owner;
176 struct device dev;
177 struct regulation_constraints *constraints;
178 struct regulator_dev *supply; /* for tree */
179
180 void *reg_data; /* regulator_dev data */
181};
182
123struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc, 183struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc,
124 struct device *dev, void *driver_data); 184 struct device *dev, struct regulator_init_data *init_data,
185 void *driver_data);
125void regulator_unregister(struct regulator_dev *rdev); 186void regulator_unregister(struct regulator_dev *rdev);
126 187
127int regulator_notifier_call_chain(struct regulator_dev *rdev, 188int regulator_notifier_call_chain(struct regulator_dev *rdev,
diff --git a/include/linux/regulator/fixed.h b/include/linux/regulator/fixed.h
index 1387a5d2190e..91b4da31f1b5 100644
--- a/include/linux/regulator/fixed.h
+++ b/include/linux/regulator/fixed.h
@@ -14,9 +14,12 @@
14#ifndef __REGULATOR_FIXED_H 14#ifndef __REGULATOR_FIXED_H
15#define __REGULATOR_FIXED_H 15#define __REGULATOR_FIXED_H
16 16
17struct regulator_init_data;
18
17struct fixed_voltage_config { 19struct fixed_voltage_config {
18 const char *supply_name; 20 const char *supply_name;
19 int microvolts; 21 int microvolts;
22 struct regulator_init_data *init_data;
20}; 23};
21 24
22#endif 25#endif
diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h
index 3794773b23d2..bac64fa390f2 100644
--- a/include/linux/regulator/machine.h
+++ b/include/linux/regulator/machine.h
@@ -3,7 +3,7 @@
3 * 3 *
4 * Copyright (C) 2007, 2008 Wolfson Microelectronics PLC. 4 * Copyright (C) 2007, 2008 Wolfson Microelectronics PLC.
5 * 5 *
6 * Author: Liam Girdwood <lg@opensource.wolfsonmicro.com> 6 * Author: Liam Girdwood <lrg@slimlogic.co.uk>
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as 9 * it under the terms of the GNU General Public License version 2 as
@@ -73,7 +73,9 @@ struct regulator_state {
73 * 73 *
74 * @always_on: Set if the regulator should never be disabled. 74 * @always_on: Set if the regulator should never be disabled.
75 * @boot_on: Set if the regulator is enabled when the system is initially 75 * @boot_on: Set if the regulator is enabled when the system is initially
76 * started. 76 * started. If the regulator is not enabled by the hardware or
77 * bootloader then it will be enabled when the constraints are
78 * applied.
77 * @apply_uV: Apply the voltage constraint when initialising. 79 * @apply_uV: Apply the voltage constraint when initialising.
78 * 80 *
79 * @input_uV: Input voltage for regulator when supplied by another regulator. 81 * @input_uV: Input voltage for regulator when supplied by another regulator.
@@ -83,6 +85,7 @@ struct regulator_state {
83 * @state_standby: State for regulator when system is suspended in standby 85 * @state_standby: State for regulator when system is suspended in standby
84 * mode. 86 * mode.
85 * @initial_state: Suspend state to set by default. 87 * @initial_state: Suspend state to set by default.
88 * @initial_mode: Mode to set at startup.
86 */ 89 */
87struct regulation_constraints { 90struct regulation_constraints {
88 91
@@ -111,6 +114,9 @@ struct regulation_constraints {
111 struct regulator_state state_standby; 114 struct regulator_state state_standby;
112 suspend_state_t initial_state; /* suspend state to set at init */ 115 suspend_state_t initial_state; /* suspend state to set at init */
113 116
117 /* mode to set on startup */
118 unsigned int initial_mode;
119
114 /* constriant flags */ 120 /* constriant flags */
115 unsigned always_on:1; /* regulator never off when system is on */ 121 unsigned always_on:1; /* regulator never off when system is on */
116 unsigned boot_on:1; /* bootloader/firmware enabled regulator */ 122 unsigned boot_on:1; /* bootloader/firmware enabled regulator */
@@ -160,4 +166,6 @@ struct regulator_init_data {
160 166
161int regulator_suspend_prepare(suspend_state_t state); 167int regulator_suspend_prepare(suspend_state_t state);
162 168
169void regulator_has_full_constraints(void);
170
163#endif 171#endif
diff --git a/include/linux/slow-work.h b/include/linux/slow-work.h
new file mode 100644
index 000000000000..85958277f83d
--- /dev/null
+++ b/include/linux/slow-work.h
@@ -0,0 +1,95 @@
1/* Worker thread pool for slow items, such as filesystem lookups or mkdirs
2 *
3 * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 *
11 * See Documentation/slow-work.txt
12 */
13
14#ifndef _LINUX_SLOW_WORK_H
15#define _LINUX_SLOW_WORK_H
16
17#ifdef CONFIG_SLOW_WORK
18
19#include <linux/sysctl.h>
20
21struct slow_work;
22
23/*
24 * The operations used to support slow work items
25 */
26struct slow_work_ops {
27 /* get a ref on a work item
28 * - return 0 if successful, -ve if not
29 */
30 int (*get_ref)(struct slow_work *work);
31
32 /* discard a ref to a work item */
33 void (*put_ref)(struct slow_work *work);
34
35 /* execute a work item */
36 void (*execute)(struct slow_work *work);
37};
38
39/*
40 * A slow work item
41 * - A reference is held on the parent object by the thread pool when it is
42 * queued
43 */
44struct slow_work {
45 unsigned long flags;
46#define SLOW_WORK_PENDING 0 /* item pending (further) execution */
47#define SLOW_WORK_EXECUTING 1 /* item currently executing */
48#define SLOW_WORK_ENQ_DEFERRED 2 /* item enqueue deferred */
49#define SLOW_WORK_VERY_SLOW 3 /* item is very slow */
50 const struct slow_work_ops *ops; /* operations table for this item */
51 struct list_head link; /* link in queue */
52};
53
54/**
55 * slow_work_init - Initialise a slow work item
56 * @work: The work item to initialise
57 * @ops: The operations to use to handle the slow work item
58 *
59 * Initialise a slow work item.
60 */
61static inline void slow_work_init(struct slow_work *work,
62 const struct slow_work_ops *ops)
63{
64 work->flags = 0;
65 work->ops = ops;
66 INIT_LIST_HEAD(&work->link);
67}
68
69/**
70 * slow_work_init - Initialise a very slow work item
71 * @work: The work item to initialise
72 * @ops: The operations to use to handle the slow work item
73 *
74 * Initialise a very slow work item. This item will be restricted such that
75 * only a certain number of the pool threads will be able to execute items of
76 * this type.
77 */
78static inline void vslow_work_init(struct slow_work *work,
79 const struct slow_work_ops *ops)
80{
81 work->flags = 1 << SLOW_WORK_VERY_SLOW;
82 work->ops = ops;
83 INIT_LIST_HEAD(&work->link);
84}
85
86extern int slow_work_enqueue(struct slow_work *work);
87extern int slow_work_register_user(void);
88extern void slow_work_unregister_user(void);
89
90#ifdef CONFIG_SYSCTL
91extern ctl_table slow_work_sysctls[];
92#endif
93
94#endif /* CONFIG_SLOW_WORK */
95#endif /* _LINUX_SLOW_WORK_H */
diff --git a/include/linux/smp.h b/include/linux/smp.h
index bbacb7baa446..a69db820eed6 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -38,7 +38,7 @@ int smp_call_function_single(int cpuid, void (*func) (void *info), void *info,
38/* 38/*
39 * main cross-CPU interfaces, handles INIT, TLB flush, STOP, etc. 39 * main cross-CPU interfaces, handles INIT, TLB flush, STOP, etc.
40 * (defined in asm header): 40 * (defined in asm header):
41 */ 41 */
42 42
43/* 43/*
44 * stops all CPUs but the current one: 44 * stops all CPUs but the current one:
@@ -82,7 +82,8 @@ smp_call_function_mask(cpumask_t mask, void(*func)(void *info), void *info,
82 return 0; 82 return 0;
83} 83}
84 84
85void __smp_call_function_single(int cpuid, struct call_single_data *data); 85void __smp_call_function_single(int cpuid, struct call_single_data *data,
86 int wait);
86 87
87/* 88/*
88 * Generic and arch helpers 89 * Generic and arch helpers
@@ -121,6 +122,8 @@ extern unsigned int setup_max_cpus;
121 122
122#else /* !SMP */ 123#else /* !SMP */
123 124
125static inline void smp_send_stop(void) { }
126
124/* 127/*
125 * These macros fold the SMP functionality into a single CPU system 128 * These macros fold the SMP functionality into a single CPU system
126 */ 129 */
diff --git a/include/linux/timeriomem-rng.h b/include/linux/timeriomem-rng.h
index dd253177f65f..3e08a1c86830 100644
--- a/include/linux/timeriomem-rng.h
+++ b/include/linux/timeriomem-rng.h
@@ -14,7 +14,7 @@ struct timeriomem_rng_data {
14 struct completion completion; 14 struct completion completion;
15 unsigned int present:1; 15 unsigned int present:1;
16 16
17 u32 __iomem *address; 17 void __iomem *address;
18 18
19 /* measures in usecs */ 19 /* measures in usecs */
20 unsigned int period; 20 unsigned int period;
diff --git a/include/linux/usb/wusb.h b/include/linux/usb/wusb.h
index 5f401b644ed5..429c631d2aad 100644
--- a/include/linux/usb/wusb.h
+++ b/include/linux/usb/wusb.h
@@ -80,8 +80,7 @@ struct wusb_ckhdid {
80 u8 data[16]; 80 u8 data[16];
81} __attribute__((packed)); 81} __attribute__((packed));
82 82
83const static 83static const struct wusb_ckhdid wusb_ckhdid_zero = { .data = { 0 } };
84struct wusb_ckhdid wusb_ckhdid_zero = { .data = { 0 } };
85 84
86#define WUSB_CKHDID_STRSIZE (3 * sizeof(struct wusb_ckhdid) + 1) 85#define WUSB_CKHDID_STRSIZE (3 * sizeof(struct wusb_ckhdid) + 1)
87 86