aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/Kbuild3
-rw-r--r--include/linux/bio.h19
-rw-r--r--include/linux/bitops.h6
-rw-r--r--include/linux/blkdev.h8
-rw-r--r--include/linux/buffer_head.h26
-rw-r--r--include/linux/connector.h7
-rw-r--r--include/linux/console.h3
-rw-r--r--include/linux/console_struct.h1
-rw-r--r--include/linux/cpuset.h2
-rw-r--r--include/linux/dca.h47
-rw-r--r--include/linux/dma-mapping.h7
-rw-r--r--include/linux/fb.h3
-rw-r--r--include/linux/firewire-cdev.h15
-rw-r--r--include/linux/fs.h78
-rw-r--r--include/linux/gfp.h62
-rw-r--r--include/linux/hugetlb.h1
-rw-r--r--include/linux/i2c-id.h1
-rw-r--r--include/linux/i2o.h3
-rw-r--r--include/linux/ide.h18
-rw-r--r--include/linux/init.h2
-rw-r--r--include/linux/interrupt.h9
-rw-r--r--include/linux/ioport.h3
-rw-r--r--include/linux/isdn.h3
-rw-r--r--include/linux/jbd.h1
-rw-r--r--include/linux/jiffies.h2
-rw-r--r--include/linux/kernel.h1
-rw-r--r--include/linux/kprobes.h6
-rw-r--r--include/linux/libata.h16
-rw-r--r--include/linux/memory_hotplug.h18
-rw-r--r--include/linux/mempolicy.h7
-rw-r--r--include/linux/mm.h97
-rw-r--r--include/linux/mm_types.h158
-rw-r--r--include/linux/mmzone.h68
-rw-r--r--include/linux/nfsd/export.h11
-rw-r--r--include/linux/nodemask.h94
-rw-r--r--include/linux/of_device.h5
-rw-r--r--include/linux/page-isolation.h37
-rw-r--r--include/linux/pageblock-flags.h75
-rw-r--r--include/linux/pagemap.h36
-rw-r--r--include/linux/pci_ids.h4
-rw-r--r--include/linux/radix-tree.h40
-rw-r--r--include/linux/scatterlist.h84
-rw-r--r--include/linux/sched.h77
-rw-r--r--include/linux/security.h18
-rw-r--r--include/linux/selection.h1
-rw-r--r--include/linux/serial_core.h3
-rw-r--r--include/linux/slab.h4
-rw-r--r--include/linux/slub_def.h71
-rw-r--r--include/linux/sm501-regs.h18
-rw-r--r--include/linux/spi/at73c213.h25
-rw-r--r--include/linux/spi/spi.h12
-rw-r--r--include/linux/usb/gadget.h4
52 files changed, 962 insertions, 358 deletions
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
index 818cc3a50e6b..025af706d114 100644
--- a/include/linux/Kbuild
+++ b/include/linux/Kbuild
@@ -78,7 +78,6 @@ header-y += if_arcnet.h
78header-y += if_bonding.h 78header-y += if_bonding.h
79header-y += if_cablemodem.h 79header-y += if_cablemodem.h
80header-y += if_fc.h 80header-y += if_fc.h
81header-y += if_fddi.h
82header-y += if.h 81header-y += if.h
83header-y += if_hippi.h 82header-y += if_hippi.h
84header-y += if_infiniband.h 83header-y += if_infiniband.h
@@ -121,7 +120,6 @@ header-y += nl80211.h
121header-y += oom.h 120header-y += oom.h
122header-y += param.h 121header-y += param.h
123header-y += pci_regs.h 122header-y += pci_regs.h
124header-y += personality.h
125header-y += pfkeyv2.h 123header-y += pfkeyv2.h
126header-y += pg.h 124header-y += pg.h
127header-y += phantom.h 125header-y += phantom.h
@@ -159,7 +157,6 @@ header-y += video_decoder.h
159header-y += video_encoder.h 157header-y += video_encoder.h
160header-y += videotext.h 158header-y += videotext.h
161header-y += vt.h 159header-y += vt.h
162header-y += wireless.h
163header-y += x25.h 160header-y += x25.h
164 161
165unifdef-y += acct.h 162unifdef-y += acct.h
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 089a8bc55dd4..4da441337d6e 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -176,13 +176,28 @@ struct bio {
176#define bio_offset(bio) bio_iovec((bio))->bv_offset 176#define bio_offset(bio) bio_iovec((bio))->bv_offset
177#define bio_segments(bio) ((bio)->bi_vcnt - (bio)->bi_idx) 177#define bio_segments(bio) ((bio)->bi_vcnt - (bio)->bi_idx)
178#define bio_sectors(bio) ((bio)->bi_size >> 9) 178#define bio_sectors(bio) ((bio)->bi_size >> 9)
179#define bio_cur_sectors(bio) (bio_iovec(bio)->bv_len >> 9)
180#define bio_data(bio) (page_address(bio_page((bio))) + bio_offset((bio)))
181#define bio_barrier(bio) ((bio)->bi_rw & (1 << BIO_RW_BARRIER)) 179#define bio_barrier(bio) ((bio)->bi_rw & (1 << BIO_RW_BARRIER))
182#define bio_sync(bio) ((bio)->bi_rw & (1 << BIO_RW_SYNC)) 180#define bio_sync(bio) ((bio)->bi_rw & (1 << BIO_RW_SYNC))
183#define bio_failfast(bio) ((bio)->bi_rw & (1 << BIO_RW_FAILFAST)) 181#define bio_failfast(bio) ((bio)->bi_rw & (1 << BIO_RW_FAILFAST))
184#define bio_rw_ahead(bio) ((bio)->bi_rw & (1 << BIO_RW_AHEAD)) 182#define bio_rw_ahead(bio) ((bio)->bi_rw & (1 << BIO_RW_AHEAD))
185#define bio_rw_meta(bio) ((bio)->bi_rw & (1 << BIO_RW_META)) 183#define bio_rw_meta(bio) ((bio)->bi_rw & (1 << BIO_RW_META))
184#define bio_empty_barrier(bio) (bio_barrier(bio) && !(bio)->bi_size)
185
186static inline unsigned int bio_cur_sectors(struct bio *bio)
187{
188 if (bio->bi_vcnt)
189 return bio_iovec(bio)->bv_len >> 9;
190
191 return 0;
192}
193
194static inline void *bio_data(struct bio *bio)
195{
196 if (bio->bi_vcnt)
197 return page_address(bio_page(bio)) + bio_offset(bio);
198
199 return NULL;
200}
186 201
187/* 202/*
188 * will die 203 * will die
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index 638165f571da..b9fb8ee3308b 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -8,6 +8,12 @@
8 */ 8 */
9#include <asm/bitops.h> 9#include <asm/bitops.h>
10 10
11#define for_each_bit(bit, addr, size) \
12 for ((bit) = find_first_bit((addr), (size)); \
13 (bit) < (size); \
14 (bit) = find_next_bit((addr), (size), (bit) + 1))
15
16
11static __inline__ int get_bitmask_order(unsigned int count) 17static __inline__ int get_bitmask_order(unsigned int count)
12{ 18{
13 int order; 19 int order;
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 5ed888b04b29..bbf906a0b419 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -330,7 +330,6 @@ typedef void (unplug_fn) (struct request_queue *);
330 330
331struct bio_vec; 331struct bio_vec;
332typedef int (merge_bvec_fn) (struct request_queue *, struct bio *, struct bio_vec *); 332typedef int (merge_bvec_fn) (struct request_queue *, struct bio *, struct bio_vec *);
333typedef int (issue_flush_fn) (struct request_queue *, struct gendisk *, sector_t *);
334typedef void (prepare_flush_fn) (struct request_queue *, struct request *); 333typedef void (prepare_flush_fn) (struct request_queue *, struct request *);
335typedef void (softirq_done_fn)(struct request *); 334typedef void (softirq_done_fn)(struct request *);
336 335
@@ -368,7 +367,6 @@ struct request_queue
368 prep_rq_fn *prep_rq_fn; 367 prep_rq_fn *prep_rq_fn;
369 unplug_fn *unplug_fn; 368 unplug_fn *unplug_fn;
370 merge_bvec_fn *merge_bvec_fn; 369 merge_bvec_fn *merge_bvec_fn;
371 issue_flush_fn *issue_flush_fn;
372 prepare_flush_fn *prepare_flush_fn; 370 prepare_flush_fn *prepare_flush_fn;
373 softirq_done_fn *softirq_done_fn; 371 softirq_done_fn *softirq_done_fn;
374 372
@@ -540,6 +538,7 @@ enum {
540#define blk_barrier_rq(rq) ((rq)->cmd_flags & REQ_HARDBARRIER) 538#define blk_barrier_rq(rq) ((rq)->cmd_flags & REQ_HARDBARRIER)
541#define blk_fua_rq(rq) ((rq)->cmd_flags & REQ_FUA) 539#define blk_fua_rq(rq) ((rq)->cmd_flags & REQ_FUA)
542#define blk_bidi_rq(rq) ((rq)->next_rq != NULL) 540#define blk_bidi_rq(rq) ((rq)->next_rq != NULL)
541#define blk_empty_barrier(rq) (blk_barrier_rq(rq) && blk_fs_request(rq) && !(rq)->hard_nr_sectors)
543 542
544#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) 543#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist)
545 544
@@ -729,7 +728,9 @@ static inline void blk_run_address_space(struct address_space *mapping)
729extern int end_that_request_first(struct request *, int, int); 728extern int end_that_request_first(struct request *, int, int);
730extern int end_that_request_chunk(struct request *, int, int); 729extern int end_that_request_chunk(struct request *, int, int);
731extern void end_that_request_last(struct request *, int); 730extern void end_that_request_last(struct request *, int);
732extern void end_request(struct request *req, int uptodate); 731extern void end_request(struct request *, int);
732extern void end_queued_request(struct request *, int);
733extern void end_dequeued_request(struct request *, int);
733extern void blk_complete_request(struct request *); 734extern void blk_complete_request(struct request *);
734 735
735/* 736/*
@@ -767,7 +768,6 @@ extern void blk_queue_dma_alignment(struct request_queue *, int);
767extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); 768extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *);
768extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); 769extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
769extern int blk_queue_ordered(struct request_queue *, unsigned, prepare_flush_fn *); 770extern int blk_queue_ordered(struct request_queue *, unsigned, prepare_flush_fn *);
770extern void blk_queue_issue_flush_fn(struct request_queue *, issue_flush_fn *);
771extern int blk_do_ordered(struct request_queue *, struct request **); 771extern int blk_do_ordered(struct request_queue *, struct request **);
772extern unsigned blk_ordered_cur_seq(struct request_queue *); 772extern unsigned blk_ordered_cur_seq(struct request_queue *);
773extern unsigned blk_ordered_req_seq(struct request *); 773extern unsigned blk_ordered_req_seq(struct request *);
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index 35cadad84b14..da0d83fbadc0 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -203,10 +203,20 @@ void block_invalidatepage(struct page *page, unsigned long offset);
203int block_write_full_page(struct page *page, get_block_t *get_block, 203int block_write_full_page(struct page *page, get_block_t *get_block,
204 struct writeback_control *wbc); 204 struct writeback_control *wbc);
205int block_read_full_page(struct page*, get_block_t*); 205int block_read_full_page(struct page*, get_block_t*);
206int block_write_begin(struct file *, struct address_space *,
207 loff_t, unsigned, unsigned,
208 struct page **, void **, get_block_t*);
209int block_write_end(struct file *, struct address_space *,
210 loff_t, unsigned, unsigned,
211 struct page *, void *);
212int generic_write_end(struct file *, struct address_space *,
213 loff_t, unsigned, unsigned,
214 struct page *, void *);
215void page_zero_new_buffers(struct page *page, unsigned from, unsigned to);
206int block_prepare_write(struct page*, unsigned, unsigned, get_block_t*); 216int block_prepare_write(struct page*, unsigned, unsigned, get_block_t*);
207int cont_prepare_write(struct page*, unsigned, unsigned, get_block_t*, 217int cont_write_begin(struct file *, struct address_space *, loff_t,
208 loff_t *); 218 unsigned, unsigned, struct page **, void **,
209int generic_cont_expand(struct inode *inode, loff_t size); 219 get_block_t *, loff_t *);
210int generic_cont_expand_simple(struct inode *inode, loff_t size); 220int generic_cont_expand_simple(struct inode *inode, loff_t size);
211int block_commit_write(struct page *page, unsigned from, unsigned to); 221int block_commit_write(struct page *page, unsigned from, unsigned to);
212int block_page_mkwrite(struct vm_area_struct *vma, struct page *page, 222int block_page_mkwrite(struct vm_area_struct *vma, struct page *page,
@@ -216,9 +226,13 @@ sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *);
216int generic_commit_write(struct file *, struct page *, unsigned, unsigned); 226int generic_commit_write(struct file *, struct page *, unsigned, unsigned);
217int block_truncate_page(struct address_space *, loff_t, get_block_t *); 227int block_truncate_page(struct address_space *, loff_t, get_block_t *);
218int file_fsync(struct file *, struct dentry *, int); 228int file_fsync(struct file *, struct dentry *, int);
219int nobh_prepare_write(struct page*, unsigned, unsigned, get_block_t*); 229int nobh_write_begin(struct file *, struct address_space *,
220int nobh_commit_write(struct file *, struct page *, unsigned, unsigned); 230 loff_t, unsigned, unsigned,
221int nobh_truncate_page(struct address_space *, loff_t); 231 struct page **, void **, get_block_t*);
232int nobh_write_end(struct file *, struct address_space *,
233 loff_t, unsigned, unsigned,
234 struct page *, void *);
235int nobh_truncate_page(struct address_space *, loff_t, get_block_t *);
222int nobh_writepage(struct page *page, get_block_t *get_block, 236int nobh_writepage(struct page *page, get_block_t *get_block,
223 struct writeback_control *wbc); 237 struct writeback_control *wbc);
224 238
diff --git a/include/linux/connector.h b/include/linux/connector.h
index b62f823e90cf..13fc4541bf23 100644
--- a/include/linux/connector.h
+++ b/include/linux/connector.h
@@ -36,14 +36,15 @@
36#define CN_VAL_CIFS 0x1 36#define CN_VAL_CIFS 0x1
37#define CN_W1_IDX 0x3 /* w1 communication */ 37#define CN_W1_IDX 0x3 /* w1 communication */
38#define CN_W1_VAL 0x1 38#define CN_W1_VAL 0x1
39#define CN_IDX_V86D 0x4
40#define CN_VAL_V86D_UVESAFB 0x1
39 41
40 42#define CN_NETLINK_USERS 5
41#define CN_NETLINK_USERS 4
42 43
43/* 44/*
44 * Maximum connector's message size. 45 * Maximum connector's message size.
45 */ 46 */
46#define CONNECTOR_MAX_MSG_SIZE 1024 47#define CONNECTOR_MAX_MSG_SIZE 16384
47 48
48/* 49/*
49 * idx and val are unique identifiers which 50 * idx and val are unique identifiers which
diff --git a/include/linux/console.h b/include/linux/console.h
index 56a7bcda49cb..0a4542ddb73d 100644
--- a/include/linux/console.h
+++ b/include/linux/console.h
@@ -45,7 +45,8 @@ struct consw {
45 int (*con_font_get)(struct vc_data *, struct console_font *); 45 int (*con_font_get)(struct vc_data *, struct console_font *);
46 int (*con_font_default)(struct vc_data *, struct console_font *, char *); 46 int (*con_font_default)(struct vc_data *, struct console_font *, char *);
47 int (*con_font_copy)(struct vc_data *, int); 47 int (*con_font_copy)(struct vc_data *, int);
48 int (*con_resize)(struct vc_data *, unsigned int, unsigned int); 48 int (*con_resize)(struct vc_data *, unsigned int, unsigned int,
49 unsigned int);
49 int (*con_set_palette)(struct vc_data *, unsigned char *); 50 int (*con_set_palette)(struct vc_data *, unsigned char *);
50 int (*con_scrolldelta)(struct vc_data *, int); 51 int (*con_scrolldelta)(struct vc_data *, int);
51 int (*con_set_origin)(struct vc_data *); 52 int (*con_set_origin)(struct vc_data *);
diff --git a/include/linux/console_struct.h b/include/linux/console_struct.h
index dc77fed7b285..d71f7c0f931b 100644
--- a/include/linux/console_struct.h
+++ b/include/linux/console_struct.h
@@ -100,6 +100,7 @@ struct vc_data {
100 unsigned char vc_G1_charset; 100 unsigned char vc_G1_charset;
101 unsigned char vc_saved_G0; 101 unsigned char vc_saved_G0;
102 unsigned char vc_saved_G1; 102 unsigned char vc_saved_G1;
103 unsigned int vc_resize_user; /* resize request from user */
103 unsigned int vc_bell_pitch; /* Console bell pitch */ 104 unsigned int vc_bell_pitch; /* Console bell pitch */
104 unsigned int vc_bell_duration; /* Console bell duration */ 105 unsigned int vc_bell_duration; /* Console bell duration */
105 struct vc_data **vc_display_fg; /* [!] Ptr to var holding fg console for this display */ 106 struct vc_data **vc_display_fg; /* [!] Ptr to var holding fg console for this display */
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index 826b15e914e2..9e633ea103ce 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -93,7 +93,7 @@ static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
93 return node_possible_map; 93 return node_possible_map;
94} 94}
95 95
96#define cpuset_current_mems_allowed (node_online_map) 96#define cpuset_current_mems_allowed (node_states[N_HIGH_MEMORY])
97static inline void cpuset_init_current_mems_allowed(void) {} 97static inline void cpuset_init_current_mems_allowed(void) {}
98static inline void cpuset_update_task_memory_state(void) {} 98static inline void cpuset_update_task_memory_state(void) {}
99#define cpuset_nodes_subset_current_mems_allowed(nodes) (1) 99#define cpuset_nodes_subset_current_mems_allowed(nodes) (1)
diff --git a/include/linux/dca.h b/include/linux/dca.h
new file mode 100644
index 000000000000..83eaecc6f8ab
--- /dev/null
+++ b/include/linux/dca.h
@@ -0,0 +1,47 @@
1#ifndef DCA_H
2#define DCA_H
3/* DCA Provider API */
4
5/* DCA Notifier Interface */
6void dca_register_notify(struct notifier_block *nb);
7void dca_unregister_notify(struct notifier_block *nb);
8
9#define DCA_PROVIDER_ADD 0x0001
10#define DCA_PROVIDER_REMOVE 0x0002
11
12struct dca_provider {
13 struct dca_ops *ops;
14 struct class_device *cd;
15 int id;
16};
17
18struct dca_ops {
19 int (*add_requester) (struct dca_provider *, struct device *);
20 int (*remove_requester) (struct dca_provider *, struct device *);
21 u8 (*get_tag) (struct dca_provider *, int cpu);
22};
23
24struct dca_provider *alloc_dca_provider(struct dca_ops *ops, int priv_size);
25void free_dca_provider(struct dca_provider *dca);
26int register_dca_provider(struct dca_provider *dca, struct device *dev);
27void unregister_dca_provider(struct dca_provider *dca);
28
29static inline void *dca_priv(struct dca_provider *dca)
30{
31 return (void *)dca + sizeof(struct dca_provider);
32}
33
34/* Requester API */
35int dca_add_requester(struct device *dev);
36int dca_remove_requester(struct device *dev);
37u8 dca_get_tag(int cpu);
38
39/* internal stuff */
40int __init dca_sysfs_init(void);
41void __exit dca_sysfs_exit(void);
42int dca_sysfs_add_provider(struct dca_provider *dca, struct device *dev);
43void dca_sysfs_remove_provider(struct dca_provider *dca);
44int dca_sysfs_add_req(struct dca_provider *dca, struct device *dev, int slot);
45void dca_sysfs_remove_req(struct dca_provider *dca, int slot);
46
47#endif /* DCA_H */
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 2dc21cbeb304..0ebfafbd338c 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -24,6 +24,8 @@ enum dma_data_direction {
24#define DMA_28BIT_MASK 0x000000000fffffffULL 24#define DMA_28BIT_MASK 0x000000000fffffffULL
25#define DMA_24BIT_MASK 0x0000000000ffffffULL 25#define DMA_24BIT_MASK 0x0000000000ffffffULL
26 26
27#define DMA_MASK_NONE 0x0ULL
28
27static inline int valid_dma_direction(int dma_direction) 29static inline int valid_dma_direction(int dma_direction)
28{ 30{
29 return ((dma_direction == DMA_BIDIRECTIONAL) || 31 return ((dma_direction == DMA_BIDIRECTIONAL) ||
@@ -31,6 +33,11 @@ static inline int valid_dma_direction(int dma_direction)
31 (dma_direction == DMA_FROM_DEVICE)); 33 (dma_direction == DMA_FROM_DEVICE));
32} 34}
33 35
36static inline int is_device_dma_capable(struct device *dev)
37{
38 return dev->dma_mask != NULL && *dev->dma_mask != DMA_MASK_NONE;
39}
40
34#ifdef CONFIG_HAS_DMA 41#ifdef CONFIG_HAS_DMA
35#include <asm/dma-mapping.h> 42#include <asm/dma-mapping.h>
36#else 43#else
diff --git a/include/linux/fb.h b/include/linux/fb.h
index cec54106aa87..58c57a33e5dd 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -180,6 +180,7 @@ struct fb_bitfield {
180}; 180};
181 181
182#define FB_NONSTD_HAM 1 /* Hold-And-Modify (HAM) */ 182#define FB_NONSTD_HAM 1 /* Hold-And-Modify (HAM) */
183#define FB_NONSTD_REV_PIX_IN_B 2 /* order of pixels in each byte is reversed */
183 184
184#define FB_ACTIVATE_NOW 0 /* set values immediately (or vbl)*/ 185#define FB_ACTIVATE_NOW 0 /* set values immediately (or vbl)*/
185#define FB_ACTIVATE_NXTOPEN 1 /* activate on next open */ 186#define FB_ACTIVATE_NXTOPEN 1 /* activate on next open */
@@ -206,6 +207,7 @@ struct fb_bitfield {
206#define FB_VMODE_NONINTERLACED 0 /* non interlaced */ 207#define FB_VMODE_NONINTERLACED 0 /* non interlaced */
207#define FB_VMODE_INTERLACED 1 /* interlaced */ 208#define FB_VMODE_INTERLACED 1 /* interlaced */
208#define FB_VMODE_DOUBLE 2 /* double scan */ 209#define FB_VMODE_DOUBLE 2 /* double scan */
210#define FB_VMODE_ODD_FLD_FIRST 4 /* interlaced: top line first */
209#define FB_VMODE_MASK 255 211#define FB_VMODE_MASK 255
210 212
211#define FB_VMODE_YWRAP 256 /* ywrap instead of panning */ 213#define FB_VMODE_YWRAP 256 /* ywrap instead of panning */
@@ -1054,6 +1056,7 @@ struct fb_videomode {
1054 u32 flag; 1056 u32 flag;
1055}; 1057};
1056 1058
1059extern const char *fb_mode_option;
1057extern const struct fb_videomode vesa_modes[]; 1060extern const struct fb_videomode vesa_modes[];
1058 1061
1059struct fb_modelist { 1062struct fb_modelist {
diff --git a/include/linux/firewire-cdev.h b/include/linux/firewire-cdev.h
index 1a45d6f41b09..0f0e271f97fa 100644
--- a/include/linux/firewire-cdev.h
+++ b/include/linux/firewire-cdev.h
@@ -178,6 +178,7 @@ union fw_cdev_event {
178#define FW_CDEV_IOC_QUEUE_ISO _IOWR('#', 0x09, struct fw_cdev_queue_iso) 178#define FW_CDEV_IOC_QUEUE_ISO _IOWR('#', 0x09, struct fw_cdev_queue_iso)
179#define FW_CDEV_IOC_START_ISO _IOW('#', 0x0a, struct fw_cdev_start_iso) 179#define FW_CDEV_IOC_START_ISO _IOW('#', 0x0a, struct fw_cdev_start_iso)
180#define FW_CDEV_IOC_STOP_ISO _IOW('#', 0x0b, struct fw_cdev_stop_iso) 180#define FW_CDEV_IOC_STOP_ISO _IOW('#', 0x0b, struct fw_cdev_stop_iso)
181#define FW_CDEV_IOC_GET_CYCLE_TIMER _IOR('#', 0x0c, struct fw_cdev_get_cycle_timer)
181 182
182/* FW_CDEV_VERSION History 183/* FW_CDEV_VERSION History
183 * 184 *
@@ -459,4 +460,18 @@ struct fw_cdev_stop_iso {
459 __u32 handle; 460 __u32 handle;
460}; 461};
461 462
463/**
464 * struct fw_cdev_get_cycle_timer - read cycle timer register
465 * @local_time: system time, in microseconds since the Epoch
466 * @cycle_timer: isochronous cycle timer, as per OHCI 1.1 clause 5.13
467 *
468 * The %FW_CDEV_IOC_GET_CYCLE_TIMER ioctl reads the isochronous cycle timer
469 * and also the system clock. This allows to express the receive time of an
470 * isochronous packet as a system time with microsecond accuracy.
471 */
472struct fw_cdev_get_cycle_timer {
473 __u64 local_time;
474 __u32 cycle_timer;
475};
476
462#endif /* _LINUX_FIREWIRE_CDEV_H */ 477#endif /* _LINUX_FIREWIRE_CDEV_H */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 4a6a21077bae..f70d52c46617 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -381,7 +381,7 @@ struct iattr {
381 * trying again. The aop will be taking reasonable 381 * trying again. The aop will be taking reasonable
382 * precautions not to livelock. If the caller held a page 382 * precautions not to livelock. If the caller held a page
383 * reference, it should drop it before retrying. Returned 383 * reference, it should drop it before retrying. Returned
384 * by readpage(), prepare_write(), and commit_write(). 384 * by readpage().
385 * 385 *
386 * address_space_operation functions return these large constants to indicate 386 * address_space_operation functions return these large constants to indicate
387 * special semantics to the caller. These are much larger than the bytes in a 387 * special semantics to the caller. These are much larger than the bytes in a
@@ -394,6 +394,9 @@ enum positive_aop_returns {
394 AOP_TRUNCATED_PAGE = 0x80001, 394 AOP_TRUNCATED_PAGE = 0x80001,
395}; 395};
396 396
397#define AOP_FLAG_UNINTERRUPTIBLE 0x0001 /* will not do a short write */
398#define AOP_FLAG_CONT_EXPAND 0x0002 /* called from cont_expand */
399
397/* 400/*
398 * oh the beauties of C type declarations. 401 * oh the beauties of C type declarations.
399 */ 402 */
@@ -401,6 +404,39 @@ struct page;
401struct address_space; 404struct address_space;
402struct writeback_control; 405struct writeback_control;
403 406
407struct iov_iter {
408 const struct iovec *iov;
409 unsigned long nr_segs;
410 size_t iov_offset;
411 size_t count;
412};
413
414size_t iov_iter_copy_from_user_atomic(struct page *page,
415 struct iov_iter *i, unsigned long offset, size_t bytes);
416size_t iov_iter_copy_from_user(struct page *page,
417 struct iov_iter *i, unsigned long offset, size_t bytes);
418void iov_iter_advance(struct iov_iter *i, size_t bytes);
419int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes);
420size_t iov_iter_single_seg_count(struct iov_iter *i);
421
422static inline void iov_iter_init(struct iov_iter *i,
423 const struct iovec *iov, unsigned long nr_segs,
424 size_t count, size_t written)
425{
426 i->iov = iov;
427 i->nr_segs = nr_segs;
428 i->iov_offset = 0;
429 i->count = count + written;
430
431 iov_iter_advance(i, written);
432}
433
434static inline size_t iov_iter_count(struct iov_iter *i)
435{
436 return i->count;
437}
438
439
404struct address_space_operations { 440struct address_space_operations {
405 int (*writepage)(struct page *page, struct writeback_control *wbc); 441 int (*writepage)(struct page *page, struct writeback_control *wbc);
406 int (*readpage)(struct file *, struct page *); 442 int (*readpage)(struct file *, struct page *);
@@ -421,6 +457,14 @@ struct address_space_operations {
421 */ 457 */
422 int (*prepare_write)(struct file *, struct page *, unsigned, unsigned); 458 int (*prepare_write)(struct file *, struct page *, unsigned, unsigned);
423 int (*commit_write)(struct file *, struct page *, unsigned, unsigned); 459 int (*commit_write)(struct file *, struct page *, unsigned, unsigned);
460
461 int (*write_begin)(struct file *, struct address_space *mapping,
462 loff_t pos, unsigned len, unsigned flags,
463 struct page **pagep, void **fsdata);
464 int (*write_end)(struct file *, struct address_space *mapping,
465 loff_t pos, unsigned len, unsigned copied,
466 struct page *page, void *fsdata);
467
424 /* Unfortunately this kludge is needed for FIBMAP. Don't use it */ 468 /* Unfortunately this kludge is needed for FIBMAP. Don't use it */
425 sector_t (*bmap)(struct address_space *, sector_t); 469 sector_t (*bmap)(struct address_space *, sector_t);
426 void (*invalidatepage) (struct page *, unsigned long); 470 void (*invalidatepage) (struct page *, unsigned long);
@@ -435,6 +479,18 @@ struct address_space_operations {
435 int (*launder_page) (struct page *); 479 int (*launder_page) (struct page *);
436}; 480};
437 481
482/*
483 * pagecache_write_begin/pagecache_write_end must be used by general code
484 * to write into the pagecache.
485 */
486int pagecache_write_begin(struct file *, struct address_space *mapping,
487 loff_t pos, unsigned len, unsigned flags,
488 struct page **pagep, void **fsdata);
489
490int pagecache_write_end(struct file *, struct address_space *mapping,
491 loff_t pos, unsigned len, unsigned copied,
492 struct page *page, void *fsdata);
493
438struct backing_dev_info; 494struct backing_dev_info;
439struct address_space { 495struct address_space {
440 struct inode *host; /* owner: inode, block_device */ 496 struct inode *host; /* owner: inode, block_device */
@@ -697,16 +753,14 @@ struct fown_struct {
697 * Track a single file's readahead state 753 * Track a single file's readahead state
698 */ 754 */
699struct file_ra_state { 755struct file_ra_state {
700 pgoff_t start; /* where readahead started */ 756 pgoff_t start; /* where readahead started */
701 unsigned long size; /* # of readahead pages */ 757 unsigned int size; /* # of readahead pages */
702 unsigned long async_size; /* do asynchronous readahead when 758 unsigned int async_size; /* do asynchronous readahead when
703 there are only # of pages ahead */ 759 there are only # of pages ahead */
704 760
705 unsigned long ra_pages; /* Maximum readahead window */ 761 unsigned int ra_pages; /* Maximum readahead window */
706 unsigned long mmap_hit; /* Cache hit stat for mmap accesses */ 762 int mmap_miss; /* Cache miss stat for mmap accesses */
707 unsigned long mmap_miss; /* Cache miss stat for mmap accesses */ 763 loff_t prev_pos; /* Cache last read() position */
708 unsigned long prev_index; /* Cache last read() position */
709 unsigned int prev_offset; /* Offset where last read() ended in a page */
710}; 764};
711 765
712/* 766/*
@@ -1835,6 +1889,12 @@ extern int simple_prepare_write(struct file *file, struct page *page,
1835 unsigned offset, unsigned to); 1889 unsigned offset, unsigned to);
1836extern int simple_commit_write(struct file *file, struct page *page, 1890extern int simple_commit_write(struct file *file, struct page *page,
1837 unsigned offset, unsigned to); 1891 unsigned offset, unsigned to);
1892extern int simple_write_begin(struct file *file, struct address_space *mapping,
1893 loff_t pos, unsigned len, unsigned flags,
1894 struct page **pagep, void **fsdata);
1895extern int simple_write_end(struct file *file, struct address_space *mapping,
1896 loff_t pos, unsigned len, unsigned copied,
1897 struct page *page, void *fsdata);
1838 1898
1839extern struct dentry *simple_lookup(struct inode *, struct dentry *, struct nameidata *); 1899extern struct dentry *simple_lookup(struct inode *, struct dentry *, struct nameidata *);
1840extern ssize_t generic_read_dir(struct file *, char __user *, size_t, loff_t *); 1900extern ssize_t generic_read_dir(struct file *, char __user *, size_t, loff_t *);
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index bc68dd9a6d41..7e93a9ae7064 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -48,18 +48,12 @@ struct vm_area_struct;
48#define __GFP_NOMEMALLOC ((__force gfp_t)0x10000u) /* Don't use emergency reserves */ 48#define __GFP_NOMEMALLOC ((__force gfp_t)0x10000u) /* Don't use emergency reserves */
49#define __GFP_HARDWALL ((__force gfp_t)0x20000u) /* Enforce hardwall cpuset memory allocs */ 49#define __GFP_HARDWALL ((__force gfp_t)0x20000u) /* Enforce hardwall cpuset memory allocs */
50#define __GFP_THISNODE ((__force gfp_t)0x40000u)/* No fallback, no policies */ 50#define __GFP_THISNODE ((__force gfp_t)0x40000u)/* No fallback, no policies */
51#define __GFP_MOVABLE ((__force gfp_t)0x80000u) /* Page is movable */ 51#define __GFP_RECLAIMABLE ((__force gfp_t)0x80000u) /* Page is reclaimable */
52#define __GFP_MOVABLE ((__force gfp_t)0x100000u) /* Page is movable */
52 53
53#define __GFP_BITS_SHIFT 20 /* Room for 20 __GFP_FOO bits */ 54#define __GFP_BITS_SHIFT 21 /* Room for 21 __GFP_FOO bits */
54#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) 55#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
55 56
56/* if you forget to add the bitmask here kernel will crash, period */
57#define GFP_LEVEL_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS| \
58 __GFP_COLD|__GFP_NOWARN|__GFP_REPEAT| \
59 __GFP_NOFAIL|__GFP_NORETRY|__GFP_COMP| \
60 __GFP_NOMEMALLOC|__GFP_HARDWALL|__GFP_THISNODE| \
61 __GFP_MOVABLE)
62
63/* This equals 0, but use constants in case they ever change */ 57/* This equals 0, but use constants in case they ever change */
64#define GFP_NOWAIT (GFP_ATOMIC & ~__GFP_HIGH) 58#define GFP_NOWAIT (GFP_ATOMIC & ~__GFP_HIGH)
65/* GFP_ATOMIC means both !wait (__GFP_WAIT not set) and use emergency pool */ 59/* GFP_ATOMIC means both !wait (__GFP_WAIT not set) and use emergency pool */
@@ -67,6 +61,8 @@ struct vm_area_struct;
67#define GFP_NOIO (__GFP_WAIT) 61#define GFP_NOIO (__GFP_WAIT)
68#define GFP_NOFS (__GFP_WAIT | __GFP_IO) 62#define GFP_NOFS (__GFP_WAIT | __GFP_IO)
69#define GFP_KERNEL (__GFP_WAIT | __GFP_IO | __GFP_FS) 63#define GFP_KERNEL (__GFP_WAIT | __GFP_IO | __GFP_FS)
64#define GFP_TEMPORARY (__GFP_WAIT | __GFP_IO | __GFP_FS | \
65 __GFP_RECLAIMABLE)
70#define GFP_USER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL) 66#define GFP_USER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL)
71#define GFP_HIGHUSER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL | \ 67#define GFP_HIGHUSER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL | \
72 __GFP_HIGHMEM) 68 __GFP_HIGHMEM)
@@ -86,6 +82,19 @@ struct vm_area_struct;
86#define GFP_THISNODE ((__force gfp_t)0) 82#define GFP_THISNODE ((__force gfp_t)0)
87#endif 83#endif
88 84
85/* This mask makes up all the page movable related flags */
86#define GFP_MOVABLE_MASK (__GFP_RECLAIMABLE|__GFP_MOVABLE)
87
88/* Control page allocator reclaim behavior */
89#define GFP_RECLAIM_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS|\
90 __GFP_NOWARN|__GFP_REPEAT|__GFP_NOFAIL|\
91 __GFP_NORETRY|__GFP_NOMEMALLOC)
92
93/* Control allocation constraints */
94#define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE)
95
96/* Do not use these with a slab allocator */
97#define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK)
89 98
90/* Flag - indicates that the buffer will be suitable for DMA. Ignored on some 99/* Flag - indicates that the buffer will be suitable for DMA. Ignored on some
91 platforms, used as appropriate on others */ 100 platforms, used as appropriate on others */
@@ -95,25 +104,50 @@ struct vm_area_struct;
95/* 4GB DMA on some platforms */ 104/* 4GB DMA on some platforms */
96#define GFP_DMA32 __GFP_DMA32 105#define GFP_DMA32 __GFP_DMA32
97 106
107/* Convert GFP flags to their corresponding migrate type */
108static inline int allocflags_to_migratetype(gfp_t gfp_flags)
109{
110 WARN_ON((gfp_flags & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK);
111
112 if (unlikely(page_group_by_mobility_disabled))
113 return MIGRATE_UNMOVABLE;
114
115 /* Group based on mobility */
116 return (((gfp_flags & __GFP_MOVABLE) != 0) << 1) |
117 ((gfp_flags & __GFP_RECLAIMABLE) != 0);
118}
98 119
99static inline enum zone_type gfp_zone(gfp_t flags) 120static inline enum zone_type gfp_zone(gfp_t flags)
100{ 121{
122 int base = 0;
123
124#ifdef CONFIG_NUMA
125 if (flags & __GFP_THISNODE)
126 base = MAX_NR_ZONES;
127#endif
128
101#ifdef CONFIG_ZONE_DMA 129#ifdef CONFIG_ZONE_DMA
102 if (flags & __GFP_DMA) 130 if (flags & __GFP_DMA)
103 return ZONE_DMA; 131 return base + ZONE_DMA;
104#endif 132#endif
105#ifdef CONFIG_ZONE_DMA32 133#ifdef CONFIG_ZONE_DMA32
106 if (flags & __GFP_DMA32) 134 if (flags & __GFP_DMA32)
107 return ZONE_DMA32; 135 return base + ZONE_DMA32;
108#endif 136#endif
109 if ((flags & (__GFP_HIGHMEM | __GFP_MOVABLE)) == 137 if ((flags & (__GFP_HIGHMEM | __GFP_MOVABLE)) ==
110 (__GFP_HIGHMEM | __GFP_MOVABLE)) 138 (__GFP_HIGHMEM | __GFP_MOVABLE))
111 return ZONE_MOVABLE; 139 return base + ZONE_MOVABLE;
112#ifdef CONFIG_HIGHMEM 140#ifdef CONFIG_HIGHMEM
113 if (flags & __GFP_HIGHMEM) 141 if (flags & __GFP_HIGHMEM)
114 return ZONE_HIGHMEM; 142 return base + ZONE_HIGHMEM;
115#endif 143#endif
116 return ZONE_NORMAL; 144 return base + ZONE_NORMAL;
145}
146
147static inline gfp_t set_migrateflags(gfp_t gfp, gfp_t migrate_flags)
148{
149 BUG_ON((gfp & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK);
150 return (gfp & ~(GFP_MOVABLE_MASK)) | migrate_flags;
117} 151}
118 152
119/* 153/*
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 3a19b032c0eb..ea0f50bfbe03 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -33,6 +33,7 @@ void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed);
33 33
34extern unsigned long max_huge_pages; 34extern unsigned long max_huge_pages;
35extern unsigned long hugepages_treat_as_movable; 35extern unsigned long hugepages_treat_as_movable;
36extern int hugetlb_dynamic_pool;
36extern const unsigned long hugetlb_zero, hugetlb_infinity; 37extern const unsigned long hugetlb_zero, hugetlb_infinity;
37extern int sysctl_hugetlb_shm_group; 38extern int sysctl_hugetlb_shm_group;
38 39
diff --git a/include/linux/i2c-id.h b/include/linux/i2c-id.h
index a271b67a8e2d..88c81403eb3f 100644
--- a/include/linux/i2c-id.h
+++ b/include/linux/i2c-id.h
@@ -120,6 +120,7 @@
120#define I2C_DRIVERID_WM8753 91 /* Wolfson WM8753 audio codec */ 120#define I2C_DRIVERID_WM8753 91 /* Wolfson WM8753 audio codec */
121#define I2C_DRIVERID_LM4857 92 /* LM4857 Audio Amplifier */ 121#define I2C_DRIVERID_LM4857 92 /* LM4857 Audio Amplifier */
122#define I2C_DRIVERID_VP27SMPX 93 /* Panasonic VP27s tuner internal MPX */ 122#define I2C_DRIVERID_VP27SMPX 93 /* Panasonic VP27s tuner internal MPX */
123#define I2C_DRIVERID_CS4270 94 /* Cirrus Logic 4270 audio codec */
123 124
124#define I2C_DRIVERID_I2CDEV 900 125#define I2C_DRIVERID_I2CDEV 900
125#define I2C_DRIVERID_ARP 902 /* SMBus ARP Client */ 126#define I2C_DRIVERID_ARP 902 /* SMBus ARP Client */
diff --git a/include/linux/i2o.h b/include/linux/i2o.h
index 9752307d16ba..7da5b98d90e6 100644
--- a/include/linux/i2o.h
+++ b/include/linux/i2o.h
@@ -32,6 +32,7 @@
32#include <linux/workqueue.h> /* work_struct */ 32#include <linux/workqueue.h> /* work_struct */
33#include <linux/mempool.h> 33#include <linux/mempool.h>
34#include <linux/mutex.h> 34#include <linux/mutex.h>
35#include <linux/scatterlist.h>
35 36
36#include <asm/io.h> 37#include <asm/io.h>
37#include <asm/semaphore.h> /* Needed for MUTEX init macros */ 38#include <asm/semaphore.h> /* Needed for MUTEX init macros */
@@ -837,7 +838,7 @@ static inline int i2o_dma_map_sg(struct i2o_controller *c,
837 if ((sizeof(dma_addr_t) > 4) && c->pae_support) 838 if ((sizeof(dma_addr_t) > 4) && c->pae_support)
838 *mptr++ = cpu_to_le32(i2o_dma_high(sg_dma_address(sg))); 839 *mptr++ = cpu_to_le32(i2o_dma_high(sg_dma_address(sg)));
839#endif 840#endif
840 sg++; 841 sg = sg_next(sg);
841 } 842 }
842 *sg_ptr = mptr; 843 *sg_ptr = mptr;
843 844
diff --git a/include/linux/ide.h b/include/linux/ide.h
index 02a27e8cbad2..e39ee2fa2607 100644
--- a/include/linux/ide.h
+++ b/include/linux/ide.h
@@ -576,7 +576,6 @@ typedef struct ide_drive_s {
576 select_t select; /* basic drive/head select reg value */ 576 select_t select; /* basic drive/head select reg value */
577 577
578 u8 keep_settings; /* restore settings after drive reset */ 578 u8 keep_settings; /* restore settings after drive reset */
579 u8 autodma; /* device can safely use dma on host */
580 u8 using_dma; /* disk is using dma for read/write */ 579 u8 using_dma; /* disk is using dma for read/write */
581 u8 retry_pio; /* retrying dma capable host in pio */ 580 u8 retry_pio; /* retrying dma capable host in pio */
582 u8 state; /* retry state */ 581 u8 state; /* retry state */
@@ -600,6 +599,7 @@ typedef struct ide_drive_s {
600 unsigned nice0 : 1; /* give obvious excess bandwidth */ 599 unsigned nice0 : 1; /* give obvious excess bandwidth */
601 unsigned nice2 : 1; /* give a share in our own bandwidth */ 600 unsigned nice2 : 1; /* give a share in our own bandwidth */
602 unsigned doorlocking : 1; /* for removable only: door lock/unlock works */ 601 unsigned doorlocking : 1; /* for removable only: door lock/unlock works */
602 unsigned nodma : 1; /* disallow DMA */
603 unsigned autotune : 2; /* 0=default, 1=autotune, 2=noautotune */ 603 unsigned autotune : 2; /* 0=default, 1=autotune, 2=noautotune */
604 unsigned remap_0_to_1 : 1; /* 0=noremap, 1=remap 0->1 (for EZDrive) */ 604 unsigned remap_0_to_1 : 1; /* 0=noremap, 1=remap 0->1 (for EZDrive) */
605 unsigned blocked : 1; /* 1=powermanagment told us not to do anything, so sleep nicely */ 605 unsigned blocked : 1; /* 1=powermanagment told us not to do anything, so sleep nicely */
@@ -736,7 +736,6 @@ typedef struct hwif_s {
736 void (*dma_exec_cmd)(ide_drive_t *, u8); 736 void (*dma_exec_cmd)(ide_drive_t *, u8);
737 void (*dma_start)(ide_drive_t *); 737 void (*dma_start)(ide_drive_t *);
738 int (*ide_dma_end)(ide_drive_t *drive); 738 int (*ide_dma_end)(ide_drive_t *drive);
739 int (*ide_dma_check)(ide_drive_t *drive);
740 int (*ide_dma_on)(ide_drive_t *drive); 739 int (*ide_dma_on)(ide_drive_t *drive);
741 void (*dma_off_quietly)(ide_drive_t *drive); 740 void (*dma_off_quietly)(ide_drive_t *drive);
742 int (*ide_dma_test_irq)(ide_drive_t *drive); 741 int (*ide_dma_test_irq)(ide_drive_t *drive);
@@ -772,7 +771,7 @@ typedef struct hwif_s {
772 771
773 unsigned int nsect; 772 unsigned int nsect;
774 unsigned int nleft; 773 unsigned int nleft;
775 unsigned int cursg; 774 struct scatterlist *cursg;
776 unsigned int cursg_ofs; 775 unsigned int cursg_ofs;
777 776
778 int rqsize; /* max sectors per request */ 777 int rqsize; /* max sectors per request */
@@ -798,7 +797,6 @@ typedef struct hwif_s {
798 unsigned serialized : 1; /* serialized all channel operation */ 797 unsigned serialized : 1; /* serialized all channel operation */
799 unsigned sharing_irq: 1; /* 1 = sharing irq with another hwif */ 798 unsigned sharing_irq: 1; /* 1 = sharing irq with another hwif */
800 unsigned reset : 1; /* reset after probe */ 799 unsigned reset : 1; /* reset after probe */
801 unsigned autodma : 1; /* auto-attempt using DMA at boot */
802 unsigned no_lba48 : 1; /* 1 = cannot do LBA48 */ 800 unsigned no_lba48 : 1; /* 1 = cannot do LBA48 */
803 unsigned no_lba48_dma : 1; /* 1 = cannot do LBA48 DMA */ 801 unsigned no_lba48_dma : 1; /* 1 = cannot do LBA48 DMA */
804 unsigned auto_poll : 1; /* supports nop auto-poll */ 802 unsigned auto_poll : 1; /* supports nop auto-poll */
@@ -1093,11 +1091,6 @@ extern ide_startstop_t ide_do_reset (ide_drive_t *);
1093extern void ide_init_drive_cmd (struct request *rq); 1091extern void ide_init_drive_cmd (struct request *rq);
1094 1092
1095/* 1093/*
1096 * this function returns error location sector offset in case of a write error
1097 */
1098extern u64 ide_get_error_location(ide_drive_t *, char *);
1099
1100/*
1101 * "action" parameter type for ide_do_drive_cmd() below. 1094 * "action" parameter type for ide_do_drive_cmd() below.
1102 */ 1095 */
1103typedef enum { 1096typedef enum {
@@ -1261,6 +1254,10 @@ enum {
1261 IDE_HFLAG_POST_SET_MODE = (1 << 8), 1254 IDE_HFLAG_POST_SET_MODE = (1 << 8),
1262 /* don't program host/device for the transfer mode ("smart" hosts) */ 1255 /* don't program host/device for the transfer mode ("smart" hosts) */
1263 IDE_HFLAG_NO_SET_MODE = (1 << 9), 1256 IDE_HFLAG_NO_SET_MODE = (1 << 9),
1257 /* trust BIOS for programming chipset/device for DMA */
1258 IDE_HFLAG_TRUST_BIOS_FOR_DMA = (1 << 10),
1259 /* host uses VDMA */
1260 IDE_HFLAG_VDMA = (1 << 11),
1264}; 1261};
1265 1262
1266typedef struct ide_pci_device_s { 1263typedef struct ide_pci_device_s {
@@ -1308,7 +1305,6 @@ static inline u8 ide_max_dma_mode(ide_drive_t *drive)
1308 return ide_find_dma_mode(drive, XFER_UDMA_6); 1305 return ide_find_dma_mode(drive, XFER_UDMA_6);
1309} 1306}
1310 1307
1311int ide_tune_dma(ide_drive_t *);
1312void ide_dma_off(ide_drive_t *); 1308void ide_dma_off(ide_drive_t *);
1313void ide_dma_verbose(ide_drive_t *); 1309void ide_dma_verbose(ide_drive_t *);
1314int ide_set_dma(ide_drive_t *); 1310int ide_set_dma(ide_drive_t *);
@@ -1335,7 +1331,6 @@ extern void ide_dma_timeout(ide_drive_t *);
1335#else 1331#else
1336static inline u8 ide_find_dma_mode(ide_drive_t *drive, u8 speed) { return 0; } 1332static inline u8 ide_find_dma_mode(ide_drive_t *drive, u8 speed) { return 0; }
1337static inline u8 ide_max_dma_mode(ide_drive_t *drive) { return 0; } 1333static inline u8 ide_max_dma_mode(ide_drive_t *drive) { return 0; }
1338static inline int ide_tune_dma(ide_drive_t *drive) { return 0; }
1339static inline void ide_dma_off(ide_drive_t *drive) { ; } 1334static inline void ide_dma_off(ide_drive_t *drive) { ; }
1340static inline void ide_dma_verbose(ide_drive_t *drive) { ; } 1335static inline void ide_dma_verbose(ide_drive_t *drive) { ; }
1341static inline int ide_set_dma(ide_drive_t *drive) { return 1; } 1336static inline int ide_set_dma(ide_drive_t *drive) { return 1; }
@@ -1385,7 +1380,6 @@ static inline void ide_set_hwifdata (ide_hwif_t * hwif, void *data)
1385extern char *ide_xfer_verbose(u8 xfer_rate); 1380extern char *ide_xfer_verbose(u8 xfer_rate);
1386extern void ide_toggle_bounce(ide_drive_t *drive, int on); 1381extern void ide_toggle_bounce(ide_drive_t *drive, int on);
1387extern int ide_set_xfer_rate(ide_drive_t *drive, u8 rate); 1382extern int ide_set_xfer_rate(ide_drive_t *drive, u8 rate);
1388int ide_use_fast_pio(ide_drive_t *);
1389 1383
1390static inline int ide_dev_has_iordy(struct hd_driveid *id) 1384static inline int ide_dev_has_iordy(struct hd_driveid *id)
1391{ 1385{
diff --git a/include/linux/init.h b/include/linux/init.h
index f8d9d0b5cffc..9b7a2ba8237e 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -67,8 +67,10 @@
67 67
68/* For assembly routines */ 68/* For assembly routines */
69#define __INIT .section ".init.text","ax" 69#define __INIT .section ".init.text","ax"
70#define __INIT_REFOK .section ".text.init.refok","ax"
70#define __FINIT .previous 71#define __FINIT .previous
71#define __INITDATA .section ".init.data","aw" 72#define __INITDATA .section ".init.data","aw"
73#define __INITDATA_REFOK .section ".data.init.refok","aw"
72 74
73#ifndef __ASSEMBLY__ 75#ifndef __ASSEMBLY__
74/* 76/*
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 5523f19d88d2..8e5f289052a2 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -205,6 +205,15 @@ static inline int disable_irq_wake(unsigned int irq)
205 enable_irq(irq) 205 enable_irq(irq)
206# endif 206# endif
207 207
208static inline int enable_irq_wake(unsigned int irq)
209{
210 return 0;
211}
212
213static inline int disable_irq_wake(unsigned int irq)
214{
215 return 0;
216}
208#endif /* CONFIG_GENERIC_HARDIRQS */ 217#endif /* CONFIG_GENERIC_HARDIRQS */
209 218
210#ifndef __ARCH_SET_SOFTIRQ_PENDING 219#ifndef __ARCH_SET_SOFTIRQ_PENDING
diff --git a/include/linux/ioport.h b/include/linux/ioport.h
index 71ea92319241..6187a8567bc7 100644
--- a/include/linux/ioport.h
+++ b/include/linux/ioport.h
@@ -110,9 +110,6 @@ extern int allocate_resource(struct resource *root, struct resource *new,
110int adjust_resource(struct resource *res, resource_size_t start, 110int adjust_resource(struct resource *res, resource_size_t start,
111 resource_size_t size); 111 resource_size_t size);
112 112
113/* get registered SYSTEM_RAM resources in specified area */
114extern int find_next_system_ram(struct resource *res);
115
116/* Convenience shorthand with allocation */ 113/* Convenience shorthand with allocation */
117#define request_region(start,n,name) __request_region(&ioport_resource, (start), (n), (name)) 114#define request_region(start,n,name) __request_region(&ioport_resource, (start), (n), (name))
118#define request_mem_region(start,n,name) __request_region(&iomem_resource, (start), (n), (name)) 115#define request_mem_region(start,n,name) __request_region(&iomem_resource, (start), (n), (name))
diff --git a/include/linux/isdn.h b/include/linux/isdn.h
index d5dda4b643ac..d0ecc8eebfbf 100644
--- a/include/linux/isdn.h
+++ b/include/linux/isdn.h
@@ -167,6 +167,7 @@ typedef struct {
167#include <linux/etherdevice.h> 167#include <linux/etherdevice.h>
168#include <linux/skbuff.h> 168#include <linux/skbuff.h>
169#include <linux/tcp.h> 169#include <linux/tcp.h>
170#include <linux/mutex.h>
170 171
171#define ISDN_TTY_MAJOR 43 172#define ISDN_TTY_MAJOR 43
172#define ISDN_TTYAUX_MAJOR 44 173#define ISDN_TTYAUX_MAJOR 44
@@ -616,7 +617,7 @@ typedef struct isdn_devt {
616 int v110emu[ISDN_MAX_CHANNELS]; /* V.110 emulator-mode 0=none */ 617 int v110emu[ISDN_MAX_CHANNELS]; /* V.110 emulator-mode 0=none */
617 atomic_t v110use[ISDN_MAX_CHANNELS]; /* Usage-Semaphore for stream */ 618 atomic_t v110use[ISDN_MAX_CHANNELS]; /* Usage-Semaphore for stream */
618 isdn_v110_stream *v110[ISDN_MAX_CHANNELS]; /* V.110 private data */ 619 isdn_v110_stream *v110[ISDN_MAX_CHANNELS]; /* V.110 private data */
619 struct semaphore sem; /* serialize list access*/ 620 struct mutex mtx; /* serialize list access*/
620 unsigned long global_features; 621 unsigned long global_features;
621} isdn_dev; 622} isdn_dev;
622 623
diff --git a/include/linux/jbd.h b/include/linux/jbd.h
index 700a93b79189..72f522372924 100644
--- a/include/linux/jbd.h
+++ b/include/linux/jbd.h
@@ -372,6 +372,7 @@ struct jbd_revoke_table_s;
372 * @h_sync: flag for sync-on-close 372 * @h_sync: flag for sync-on-close
373 * @h_jdata: flag to force data journaling 373 * @h_jdata: flag to force data journaling
374 * @h_aborted: flag indicating fatal error on handle 374 * @h_aborted: flag indicating fatal error on handle
375 * @h_lockdep_map: lockdep info for debugging lock problems
375 **/ 376 **/
376 377
377/* Docbook can't yet cope with the bit fields, but will leave the documentation 378/* Docbook can't yet cope with the bit fields, but will leave the documentation
diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
index e757a74b9d17..8b080024bbc1 100644
--- a/include/linux/jiffies.h
+++ b/include/linux/jiffies.h
@@ -148,6 +148,8 @@ static inline u64 get_jiffies_64(void)
148 */ 148 */
149#define MAX_JIFFY_OFFSET ((LONG_MAX >> 1)-1) 149#define MAX_JIFFY_OFFSET ((LONG_MAX >> 1)-1)
150 150
151extern unsigned long preset_lpj;
152
151/* 153/*
152 * We want to do realistic conversions of time so we need to use the same 154 * We want to do realistic conversions of time so we need to use the same
153 * values the update wall clock code uses as the jiffies size. This value 155 * values the update wall clock code uses as the jiffies size. This value
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index d9725a28a265..5fdbc814c2eb 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -35,6 +35,7 @@ extern const char linux_proc_banner[];
35#define ALIGN(x,a) __ALIGN_MASK(x,(typeof(x))(a)-1) 35#define ALIGN(x,a) __ALIGN_MASK(x,(typeof(x))(a)-1)
36#define __ALIGN_MASK(x,mask) (((x)+(mask))&~(mask)) 36#define __ALIGN_MASK(x,mask) (((x)+(mask))&~(mask))
37#define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a))) 37#define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a)))
38#define IS_ALIGNED(x,a) (((x) % ((typeof(x))(a))) == 0)
38 39
39#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr)) 40#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr))
40 41
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index 51464d12a4e5..81891581e89b 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -166,6 +166,12 @@ struct kretprobe_instance {
166 struct task_struct *task; 166 struct task_struct *task;
167}; 167};
168 168
169struct kretprobe_blackpoint {
170 const char *name;
171 void *addr;
172};
173extern struct kretprobe_blackpoint kretprobe_blacklist[];
174
169static inline void kretprobe_assert(struct kretprobe_instance *ri, 175static inline void kretprobe_assert(struct kretprobe_instance *ri,
170 unsigned long orig_ret_address, unsigned long trampoline_address) 176 unsigned long orig_ret_address, unsigned long trampoline_address)
171{ 177{
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 229a9ff9f924..377e6d4d9be3 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -29,7 +29,7 @@
29#include <linux/delay.h> 29#include <linux/delay.h>
30#include <linux/interrupt.h> 30#include <linux/interrupt.h>
31#include <linux/dma-mapping.h> 31#include <linux/dma-mapping.h>
32#include <asm/scatterlist.h> 32#include <linux/scatterlist.h>
33#include <linux/io.h> 33#include <linux/io.h>
34#include <linux/ata.h> 34#include <linux/ata.h>
35#include <linux/workqueue.h> 35#include <linux/workqueue.h>
@@ -416,6 +416,7 @@ struct ata_queued_cmd {
416 unsigned long flags; /* ATA_QCFLAG_xxx */ 416 unsigned long flags; /* ATA_QCFLAG_xxx */
417 unsigned int tag; 417 unsigned int tag;
418 unsigned int n_elem; 418 unsigned int n_elem;
419 unsigned int n_iter;
419 unsigned int orig_n_elem; 420 unsigned int orig_n_elem;
420 421
421 int dma_dir; 422 int dma_dir;
@@ -426,7 +427,7 @@ struct ata_queued_cmd {
426 unsigned int nbytes; 427 unsigned int nbytes;
427 unsigned int curbytes; 428 unsigned int curbytes;
428 429
429 unsigned int cursg; 430 struct scatterlist *cursg;
430 unsigned int cursg_ofs; 431 unsigned int cursg_ofs;
431 432
432 struct scatterlist sgent; 433 struct scatterlist sgent;
@@ -1043,7 +1044,7 @@ ata_sg_is_last(struct scatterlist *sg, struct ata_queued_cmd *qc)
1043 return 1; 1044 return 1;
1044 if (qc->pad_len) 1045 if (qc->pad_len)
1045 return 0; 1046 return 0;
1046 if (((sg - qc->__sg) + 1) == qc->n_elem) 1047 if (qc->n_iter == qc->n_elem)
1047 return 1; 1048 return 1;
1048 return 0; 1049 return 0;
1049} 1050}
@@ -1051,6 +1052,7 @@ ata_sg_is_last(struct scatterlist *sg, struct ata_queued_cmd *qc)
1051static inline struct scatterlist * 1052static inline struct scatterlist *
1052ata_qc_first_sg(struct ata_queued_cmd *qc) 1053ata_qc_first_sg(struct ata_queued_cmd *qc)
1053{ 1054{
1055 qc->n_iter = 0;
1054 if (qc->n_elem) 1056 if (qc->n_elem)
1055 return qc->__sg; 1057 return qc->__sg;
1056 if (qc->pad_len) 1058 if (qc->pad_len)
@@ -1063,8 +1065,8 @@ ata_qc_next_sg(struct scatterlist *sg, struct ata_queued_cmd *qc)
1063{ 1065{
1064 if (sg == &qc->pad_sgent) 1066 if (sg == &qc->pad_sgent)
1065 return NULL; 1067 return NULL;
1066 if (++sg - qc->__sg < qc->n_elem) 1068 if (++qc->n_iter < qc->n_elem)
1067 return sg; 1069 return sg_next(sg);
1068 if (qc->pad_len) 1070 if (qc->pad_len)
1069 return &qc->pad_sgent; 1071 return &qc->pad_sgent;
1070 return NULL; 1072 return NULL;
@@ -1309,9 +1311,11 @@ static inline void ata_qc_reinit(struct ata_queued_cmd *qc)
1309 qc->dma_dir = DMA_NONE; 1311 qc->dma_dir = DMA_NONE;
1310 qc->__sg = NULL; 1312 qc->__sg = NULL;
1311 qc->flags = 0; 1313 qc->flags = 0;
1312 qc->cursg = qc->cursg_ofs = 0; 1314 qc->cursg = NULL;
1315 qc->cursg_ofs = 0;
1313 qc->nbytes = qc->curbytes = 0; 1316 qc->nbytes = qc->curbytes = 0;
1314 qc->n_elem = 0; 1317 qc->n_elem = 0;
1318 qc->n_iter = 0;
1315 qc->err_mask = 0; 1319 qc->err_mask = 0;
1316 qc->pad_len = 0; 1320 qc->pad_len = 0;
1317 qc->sect_size = ATA_SECT_SIZE; 1321 qc->sect_size = ATA_SECT_SIZE;
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index 7b54666cea8e..8fee7a45736b 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -3,7 +3,6 @@
3 3
4#include <linux/mmzone.h> 4#include <linux/mmzone.h>
5#include <linux/spinlock.h> 5#include <linux/spinlock.h>
6#include <linux/mmzone.h>
7#include <linux/notifier.h> 6#include <linux/notifier.h>
8 7
9struct page; 8struct page;
@@ -59,11 +58,21 @@ extern int add_one_highpage(struct page *page, int pfn, int bad_ppro);
59extern void online_page(struct page *page); 58extern void online_page(struct page *page);
60/* VM interface that may be used by firmware interface */ 59/* VM interface that may be used by firmware interface */
61extern int online_pages(unsigned long, unsigned long); 60extern int online_pages(unsigned long, unsigned long);
61extern void __offline_isolated_pages(unsigned long, unsigned long);
62extern int offline_pages(unsigned long, unsigned long, unsigned long);
62 63
63/* reasonably generic interface to expand the physical pages in a zone */ 64/* reasonably generic interface to expand the physical pages in a zone */
64extern int __add_pages(struct zone *zone, unsigned long start_pfn, 65extern int __add_pages(struct zone *zone, unsigned long start_pfn,
65 unsigned long nr_pages); 66 unsigned long nr_pages);
66 67
68/*
69 * Walk thorugh all memory which is registered as resource.
70 * arg is (start_pfn, nr_pages, private_arg_pointer)
71 */
72extern int walk_memory_resource(unsigned long start_pfn,
73 unsigned long nr_pages, void *arg,
74 int (*func)(unsigned long, unsigned long, void *));
75
67#ifdef CONFIG_NUMA 76#ifdef CONFIG_NUMA
68extern int memory_add_physaddr_to_nid(u64 start); 77extern int memory_add_physaddr_to_nid(u64 start);
69#else 78#else
@@ -161,13 +170,6 @@ static inline int mhp_notimplemented(const char *func)
161} 170}
162 171
163#endif /* ! CONFIG_MEMORY_HOTPLUG */ 172#endif /* ! CONFIG_MEMORY_HOTPLUG */
164static inline int __remove_pages(struct zone *zone, unsigned long start_pfn,
165 unsigned long nr_pages)
166{
167 printk(KERN_WARNING "%s() called, not yet supported\n", __FUNCTION__);
168 dump_stack();
169 return -ENOSYS;
170}
171 173
172extern int add_memory(int nid, u64 start, u64 size); 174extern int add_memory(int nid, u64 start, u64 size);
173extern int arch_add_memory(int nid, u64 start, u64 size); 175extern int arch_add_memory(int nid, u64 start, u64 size);
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
index a020eb2d4e2a..38c04d61ee06 100644
--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -19,6 +19,7 @@
19/* Flags for get_mem_policy */ 19/* Flags for get_mem_policy */
20#define MPOL_F_NODE (1<<0) /* return next IL mode instead of node mask */ 20#define MPOL_F_NODE (1<<0) /* return next IL mode instead of node mask */
21#define MPOL_F_ADDR (1<<1) /* look up vma using address */ 21#define MPOL_F_ADDR (1<<1) /* look up vma using address */
22#define MPOL_F_MEMS_ALLOWED (1<<2) /* return allowed memories */
22 23
23/* Flags for mbind */ 24/* Flags for mbind */
24#define MPOL_MF_STRICT (1<<0) /* Verify existing pages in the mapping */ 25#define MPOL_MF_STRICT (1<<0) /* Verify existing pages in the mapping */
@@ -143,7 +144,6 @@ struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp,
143 144
144extern void numa_default_policy(void); 145extern void numa_default_policy(void);
145extern void numa_policy_init(void); 146extern void numa_policy_init(void);
146extern void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *new);
147extern void mpol_rebind_task(struct task_struct *tsk, 147extern void mpol_rebind_task(struct task_struct *tsk,
148 const nodemask_t *new); 148 const nodemask_t *new);
149extern void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new); 149extern void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new);
@@ -235,11 +235,6 @@ static inline void numa_default_policy(void)
235{ 235{
236} 236}
237 237
238static inline void mpol_rebind_policy(struct mempolicy *pol,
239 const nodemask_t *new)
240{
241}
242
243static inline void mpol_rebind_task(struct task_struct *tsk, 238static inline void mpol_rebind_task(struct task_struct *tsk,
244 const nodemask_t *new) 239 const nodemask_t *new)
245{ 240{
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 1692dd6cb915..7e87e1b1662e 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -50,69 +50,6 @@ extern int sysctl_legacy_va_layout;
50 * mmap() functions). 50 * mmap() functions).
51 */ 51 */
52 52
53/*
54 * This struct defines a memory VMM memory area. There is one of these
55 * per VM-area/task. A VM area is any part of the process virtual memory
56 * space that has a special rule for the page-fault handlers (ie a shared
57 * library, the executable area etc).
58 */
59struct vm_area_struct {
60 struct mm_struct * vm_mm; /* The address space we belong to. */
61 unsigned long vm_start; /* Our start address within vm_mm. */
62 unsigned long vm_end; /* The first byte after our end address
63 within vm_mm. */
64
65 /* linked list of VM areas per task, sorted by address */
66 struct vm_area_struct *vm_next;
67
68 pgprot_t vm_page_prot; /* Access permissions of this VMA. */
69 unsigned long vm_flags; /* Flags, listed below. */
70
71 struct rb_node vm_rb;
72
73 /*
74 * For areas with an address space and backing store,
75 * linkage into the address_space->i_mmap prio tree, or
76 * linkage to the list of like vmas hanging off its node, or
77 * linkage of vma in the address_space->i_mmap_nonlinear list.
78 */
79 union {
80 struct {
81 struct list_head list;
82 void *parent; /* aligns with prio_tree_node parent */
83 struct vm_area_struct *head;
84 } vm_set;
85
86 struct raw_prio_tree_node prio_tree_node;
87 } shared;
88
89 /*
90 * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma
91 * list, after a COW of one of the file pages. A MAP_SHARED vma
92 * can only be in the i_mmap tree. An anonymous MAP_PRIVATE, stack
93 * or brk vma (with NULL file) can only be in an anon_vma list.
94 */
95 struct list_head anon_vma_node; /* Serialized by anon_vma->lock */
96 struct anon_vma *anon_vma; /* Serialized by page_table_lock */
97
98 /* Function pointers to deal with this struct. */
99 struct vm_operations_struct * vm_ops;
100
101 /* Information about our backing store: */
102 unsigned long vm_pgoff; /* Offset (within vm_file) in PAGE_SIZE
103 units, *not* PAGE_CACHE_SIZE */
104 struct file * vm_file; /* File we map to (can be NULL). */
105 void * vm_private_data; /* was vm_pte (shared mem) */
106 unsigned long vm_truncate_count;/* truncate_count or restart_addr */
107
108#ifndef CONFIG_MMU
109 atomic_t vm_usage; /* refcount (VMAs shared if !MMU) */
110#endif
111#ifdef CONFIG_NUMA
112 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
113#endif
114};
115
116extern struct kmem_cache *vm_area_cachep; 53extern struct kmem_cache *vm_area_cachep;
117 54
118/* 55/*
@@ -631,10 +568,6 @@ static inline struct address_space *page_mapping(struct page *page)
631 VM_BUG_ON(PageSlab(page)); 568 VM_BUG_ON(PageSlab(page));
632 if (unlikely(PageSwapCache(page))) 569 if (unlikely(PageSwapCache(page)))
633 mapping = &swapper_space; 570 mapping = &swapper_space;
634#ifdef CONFIG_SLUB
635 else if (unlikely(PageSlab(page)))
636 mapping = NULL;
637#endif
638 else if (unlikely((unsigned long)mapping & PAGE_MAPPING_ANON)) 571 else if (unlikely((unsigned long)mapping & PAGE_MAPPING_ANON))
639 mapping = NULL; 572 mapping = NULL;
640 return mapping; 573 return mapping;
@@ -715,9 +648,6 @@ static inline int page_mapped(struct page *page)
715extern void show_free_areas(void); 648extern void show_free_areas(void);
716 649
717#ifdef CONFIG_SHMEM 650#ifdef CONFIG_SHMEM
718int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *new);
719struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
720 unsigned long addr);
721int shmem_lock(struct file *file, int lock, struct user_struct *user); 651int shmem_lock(struct file *file, int lock, struct user_struct *user);
722#else 652#else
723static inline int shmem_lock(struct file *file, int lock, 653static inline int shmem_lock(struct file *file, int lock,
@@ -725,18 +655,6 @@ static inline int shmem_lock(struct file *file, int lock,
725{ 655{
726 return 0; 656 return 0;
727} 657}
728
729static inline int shmem_set_policy(struct vm_area_struct *vma,
730 struct mempolicy *new)
731{
732 return 0;
733}
734
735static inline struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
736 unsigned long addr)
737{
738 return NULL;
739}
740#endif 658#endif
741struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags); 659struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags);
742 660
@@ -779,8 +697,6 @@ void free_pgtables(struct mmu_gather **tlb, struct vm_area_struct *start_vma,
779 unsigned long floor, unsigned long ceiling); 697 unsigned long floor, unsigned long ceiling);
780int copy_page_range(struct mm_struct *dst, struct mm_struct *src, 698int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
781 struct vm_area_struct *vma); 699 struct vm_area_struct *vma);
782int zeromap_page_range(struct vm_area_struct *vma, unsigned long from,
783 unsigned long size, pgprot_t prot);
784void unmap_mapping_range(struct address_space *mapping, 700void unmap_mapping_range(struct address_space *mapping,
785 loff_t const holebegin, loff_t const holelen, int even_cows); 701 loff_t const holebegin, loff_t const holelen, int even_cows);
786 702
@@ -1106,8 +1022,6 @@ int write_one_page(struct page *page, int wait);
1106/* readahead.c */ 1022/* readahead.c */
1107#define VM_MAX_READAHEAD 128 /* kbytes */ 1023#define VM_MAX_READAHEAD 128 /* kbytes */
1108#define VM_MIN_READAHEAD 16 /* kbytes (includes current page) */ 1024#define VM_MIN_READAHEAD 16 /* kbytes (includes current page) */
1109#define VM_MAX_CACHE_HIT 256 /* max pages in a row in cache before
1110 * turning readahead off */
1111 1025
1112int do_page_cache_readahead(struct address_space *mapping, struct file *filp, 1026int do_page_cache_readahead(struct address_space *mapping, struct file *filp,
1113 pgoff_t offset, unsigned long nr_to_read); 1027 pgoff_t offset, unsigned long nr_to_read);
@@ -1218,5 +1132,16 @@ extern int randomize_va_space;
1218 1132
1219const char * arch_vma_name(struct vm_area_struct *vma); 1133const char * arch_vma_name(struct vm_area_struct *vma);
1220 1134
1135struct page *sparse_mem_map_populate(unsigned long pnum, int nid);
1136pgd_t *vmemmap_pgd_populate(unsigned long addr, int node);
1137pud_t *vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node);
1138pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node);
1139pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node);
1140void *vmemmap_alloc_block(unsigned long size, int node);
1141void vmemmap_verify(pte_t *, int, unsigned long, unsigned long);
1142int vmemmap_populate_basepages(struct page *start_page,
1143 unsigned long pages, int node);
1144int vmemmap_populate(struct page *start_page, unsigned long pages, int node);
1145
1221#endif /* __KERNEL__ */ 1146#endif /* __KERNEL__ */
1222#endif /* _LINUX_MM_H */ 1147#endif /* _LINUX_MM_H */
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index d5bb1796e12b..877667918452 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -1,13 +1,26 @@
1#ifndef _LINUX_MM_TYPES_H 1#ifndef _LINUX_MM_TYPES_H
2#define _LINUX_MM_TYPES_H 2#define _LINUX_MM_TYPES_H
3 3
4#include <linux/auxvec.h> /* For AT_VECTOR_SIZE */
4#include <linux/types.h> 5#include <linux/types.h>
5#include <linux/threads.h> 6#include <linux/threads.h>
6#include <linux/list.h> 7#include <linux/list.h>
7#include <linux/spinlock.h> 8#include <linux/spinlock.h>
9#include <linux/prio_tree.h>
10#include <linux/rbtree.h>
11#include <linux/rwsem.h>
12#include <linux/completion.h>
13#include <asm/page.h>
14#include <asm/mmu.h>
8 15
9struct address_space; 16struct address_space;
10 17
18#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS
19typedef atomic_long_t mm_counter_t;
20#else /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */
21typedef unsigned long mm_counter_t;
22#endif /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */
23
11/* 24/*
12 * Each physical page in the system has a struct page associated with 25 * Each physical page in the system has a struct page associated with
13 * it to keep track of whatever it is we are using the page for at the 26 * it to keep track of whatever it is we are using the page for at the
@@ -24,10 +37,7 @@ struct page {
24 * to show when page is mapped 37 * to show when page is mapped
25 * & limit reverse map searches. 38 * & limit reverse map searches.
26 */ 39 */
27 struct { /* SLUB uses */ 40 unsigned int inuse; /* SLUB: Nr of objects */
28 short unsigned int inuse;
29 short unsigned int offset;
30 };
31 }; 41 };
32 union { 42 union {
33 struct { 43 struct {
@@ -49,13 +59,8 @@ struct page {
49#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS 59#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS
50 spinlock_t ptl; 60 spinlock_t ptl;
51#endif 61#endif
52 struct { /* SLUB uses */ 62 struct kmem_cache *slab; /* SLUB: Pointer to slab */
53 void **lockless_freelist; 63 struct page *first_page; /* Compound tail pages */
54 struct kmem_cache *slab; /* Pointer to slab */
55 };
56 struct {
57 struct page *first_page; /* Compound pages */
58 };
59 }; 64 };
60 union { 65 union {
61 pgoff_t index; /* Our offset within mapping. */ 66 pgoff_t index; /* Our offset within mapping. */
@@ -80,4 +85,135 @@ struct page {
80#endif /* WANT_PAGE_VIRTUAL */ 85#endif /* WANT_PAGE_VIRTUAL */
81}; 86};
82 87
88/*
89 * This struct defines a memory VMM memory area. There is one of these
90 * per VM-area/task. A VM area is any part of the process virtual memory
91 * space that has a special rule for the page-fault handlers (ie a shared
92 * library, the executable area etc).
93 */
94struct vm_area_struct {
95 struct mm_struct * vm_mm; /* The address space we belong to. */
96 unsigned long vm_start; /* Our start address within vm_mm. */
97 unsigned long vm_end; /* The first byte after our end address
98 within vm_mm. */
99
100 /* linked list of VM areas per task, sorted by address */
101 struct vm_area_struct *vm_next;
102
103 pgprot_t vm_page_prot; /* Access permissions of this VMA. */
104 unsigned long vm_flags; /* Flags, listed below. */
105
106 struct rb_node vm_rb;
107
108 /*
109 * For areas with an address space and backing store,
110 * linkage into the address_space->i_mmap prio tree, or
111 * linkage to the list of like vmas hanging off its node, or
112 * linkage of vma in the address_space->i_mmap_nonlinear list.
113 */
114 union {
115 struct {
116 struct list_head list;
117 void *parent; /* aligns with prio_tree_node parent */
118 struct vm_area_struct *head;
119 } vm_set;
120
121 struct raw_prio_tree_node prio_tree_node;
122 } shared;
123
124 /*
125 * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma
126 * list, after a COW of one of the file pages. A MAP_SHARED vma
127 * can only be in the i_mmap tree. An anonymous MAP_PRIVATE, stack
128 * or brk vma (with NULL file) can only be in an anon_vma list.
129 */
130 struct list_head anon_vma_node; /* Serialized by anon_vma->lock */
131 struct anon_vma *anon_vma; /* Serialized by page_table_lock */
132
133 /* Function pointers to deal with this struct. */
134 struct vm_operations_struct * vm_ops;
135
136 /* Information about our backing store: */
137 unsigned long vm_pgoff; /* Offset (within vm_file) in PAGE_SIZE
138 units, *not* PAGE_CACHE_SIZE */
139 struct file * vm_file; /* File we map to (can be NULL). */
140 void * vm_private_data; /* was vm_pte (shared mem) */
141 unsigned long vm_truncate_count;/* truncate_count or restart_addr */
142
143#ifndef CONFIG_MMU
144 atomic_t vm_usage; /* refcount (VMAs shared if !MMU) */
145#endif
146#ifdef CONFIG_NUMA
147 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
148#endif
149};
150
151struct mm_struct {
152 struct vm_area_struct * mmap; /* list of VMAs */
153 struct rb_root mm_rb;
154 struct vm_area_struct * mmap_cache; /* last find_vma result */
155 unsigned long (*get_unmapped_area) (struct file *filp,
156 unsigned long addr, unsigned long len,
157 unsigned long pgoff, unsigned long flags);
158 void (*unmap_area) (struct mm_struct *mm, unsigned long addr);
159 unsigned long mmap_base; /* base of mmap area */
160 unsigned long task_size; /* size of task vm space */
161 unsigned long cached_hole_size; /* if non-zero, the largest hole below free_area_cache */
162 unsigned long free_area_cache; /* first hole of size cached_hole_size or larger */
163 pgd_t * pgd;
164 atomic_t mm_users; /* How many users with user space? */
165 atomic_t mm_count; /* How many references to "struct mm_struct" (users count as 1) */
166 int map_count; /* number of VMAs */
167 struct rw_semaphore mmap_sem;
168 spinlock_t page_table_lock; /* Protects page tables and some counters */
169
170 struct list_head mmlist; /* List of maybe swapped mm's. These are globally strung
171 * together off init_mm.mmlist, and are protected
172 * by mmlist_lock
173 */
174
175 /* Special counters, in some configurations protected by the
176 * page_table_lock, in other configurations by being atomic.
177 */
178 mm_counter_t _file_rss;
179 mm_counter_t _anon_rss;
180
181 unsigned long hiwater_rss; /* High-watermark of RSS usage */
182 unsigned long hiwater_vm; /* High-water virtual memory usage */
183
184 unsigned long total_vm, locked_vm, shared_vm, exec_vm;
185 unsigned long stack_vm, reserved_vm, def_flags, nr_ptes;
186 unsigned long start_code, end_code, start_data, end_data;
187 unsigned long start_brk, brk, start_stack;
188 unsigned long arg_start, arg_end, env_start, env_end;
189
190 unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
191
192 cpumask_t cpu_vm_mask;
193
194 /* Architecture-specific MM context */
195 mm_context_t context;
196
197 /* Swap token stuff */
198 /*
199 * Last value of global fault stamp as seen by this process.
200 * In other words, this value gives an indication of how long
201 * it has been since this task got the token.
202 * Look at mm/thrash.c
203 */
204 unsigned int faultstamp;
205 unsigned int token_priority;
206 unsigned int last_interval;
207
208 unsigned long flags; /* Must use atomic bitops to access the bits */
209
210 /* coredumping support */
211 int core_waiters;
212 struct completion *core_startup_done, core_done;
213
214 /* aio bits */
215 rwlock_t ioctx_list_lock;
216 struct kioctx *ioctx_list;
217};
218
83#endif /* _LINUX_MM_TYPES_H */ 219#endif /* _LINUX_MM_TYPES_H */
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 4e5627379b09..f4bfe824834f 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -13,6 +13,7 @@
13#include <linux/init.h> 13#include <linux/init.h>
14#include <linux/seqlock.h> 14#include <linux/seqlock.h>
15#include <linux/nodemask.h> 15#include <linux/nodemask.h>
16#include <linux/pageblock-flags.h>
16#include <asm/atomic.h> 17#include <asm/atomic.h>
17#include <asm/page.h> 18#include <asm/page.h>
18 19
@@ -32,8 +33,29 @@
32 */ 33 */
33#define PAGE_ALLOC_COSTLY_ORDER 3 34#define PAGE_ALLOC_COSTLY_ORDER 3
34 35
36#define MIGRATE_UNMOVABLE 0
37#define MIGRATE_RECLAIMABLE 1
38#define MIGRATE_MOVABLE 2
39#define MIGRATE_RESERVE 3
40#define MIGRATE_ISOLATE 4 /* can't allocate from here */
41#define MIGRATE_TYPES 5
42
43#define for_each_migratetype_order(order, type) \
44 for (order = 0; order < MAX_ORDER; order++) \
45 for (type = 0; type < MIGRATE_TYPES; type++)
46
47extern int page_group_by_mobility_disabled;
48
49static inline int get_pageblock_migratetype(struct page *page)
50{
51 if (unlikely(page_group_by_mobility_disabled))
52 return MIGRATE_UNMOVABLE;
53
54 return get_pageblock_flags_group(page, PB_migrate, PB_migrate_end);
55}
56
35struct free_area { 57struct free_area {
36 struct list_head free_list; 58 struct list_head free_list[MIGRATE_TYPES];
37 unsigned long nr_free; 59 unsigned long nr_free;
38}; 60};
39 61
@@ -222,6 +244,14 @@ struct zone {
222#endif 244#endif
223 struct free_area free_area[MAX_ORDER]; 245 struct free_area free_area[MAX_ORDER];
224 246
247#ifndef CONFIG_SPARSEMEM
248 /*
249 * Flags for a pageblock_nr_pages block. See pageblock-flags.h.
250 * In SPARSEMEM, this map is stored in struct mem_section
251 */
252 unsigned long *pageblock_flags;
253#endif /* CONFIG_SPARSEMEM */
254
225 255
226 ZONE_PADDING(_pad1_) 256 ZONE_PADDING(_pad1_)
227 257
@@ -324,6 +354,17 @@ struct zone {
324#define MAX_ZONES_PER_ZONELIST (MAX_NUMNODES * MAX_NR_ZONES) 354#define MAX_ZONES_PER_ZONELIST (MAX_NUMNODES * MAX_NR_ZONES)
325 355
326#ifdef CONFIG_NUMA 356#ifdef CONFIG_NUMA
357
358/*
359 * The NUMA zonelists are doubled becausse we need zonelists that restrict the
360 * allocations to a single node for GFP_THISNODE.
361 *
362 * [0 .. MAX_NR_ZONES -1] : Zonelists with fallback
363 * [MAZ_NR_ZONES ... MAZ_ZONELISTS -1] : No fallback (GFP_THISNODE)
364 */
365#define MAX_ZONELISTS (2 * MAX_NR_ZONES)
366
367
327/* 368/*
328 * We cache key information from each zonelist for smaller cache 369 * We cache key information from each zonelist for smaller cache
329 * footprint when scanning for free pages in get_page_from_freelist(). 370 * footprint when scanning for free pages in get_page_from_freelist().
@@ -389,6 +430,7 @@ struct zonelist_cache {
389 unsigned long last_full_zap; /* when last zap'd (jiffies) */ 430 unsigned long last_full_zap; /* when last zap'd (jiffies) */
390}; 431};
391#else 432#else
433#define MAX_ZONELISTS MAX_NR_ZONES
392struct zonelist_cache; 434struct zonelist_cache;
393#endif 435#endif
394 436
@@ -455,7 +497,7 @@ extern struct page *mem_map;
455struct bootmem_data; 497struct bootmem_data;
456typedef struct pglist_data { 498typedef struct pglist_data {
457 struct zone node_zones[MAX_NR_ZONES]; 499 struct zone node_zones[MAX_NR_ZONES];
458 struct zonelist node_zonelists[MAX_NR_ZONES]; 500 struct zonelist node_zonelists[MAX_ZONELISTS];
459 int nr_zones; 501 int nr_zones;
460#ifdef CONFIG_FLAT_NODE_MEM_MAP 502#ifdef CONFIG_FLAT_NODE_MEM_MAP
461 struct page *node_mem_map; 503 struct page *node_mem_map;
@@ -708,6 +750,9 @@ extern struct zone *next_zone(struct zone *zone);
708#define PAGES_PER_SECTION (1UL << PFN_SECTION_SHIFT) 750#define PAGES_PER_SECTION (1UL << PFN_SECTION_SHIFT)
709#define PAGE_SECTION_MASK (~(PAGES_PER_SECTION-1)) 751#define PAGE_SECTION_MASK (~(PAGES_PER_SECTION-1))
710 752
753#define SECTION_BLOCKFLAGS_BITS \
754 ((1UL << (PFN_SECTION_SHIFT - pageblock_order)) * NR_PAGEBLOCK_BITS)
755
711#if (MAX_ORDER - 1 + PAGE_SHIFT) > SECTION_SIZE_BITS 756#if (MAX_ORDER - 1 + PAGE_SHIFT) > SECTION_SIZE_BITS
712#error Allocator MAX_ORDER exceeds SECTION_SIZE 757#error Allocator MAX_ORDER exceeds SECTION_SIZE
713#endif 758#endif
@@ -727,6 +772,9 @@ struct mem_section {
727 * before using it wrong. 772 * before using it wrong.
728 */ 773 */
729 unsigned long section_mem_map; 774 unsigned long section_mem_map;
775
776 /* See declaration of similar field in struct zone */
777 unsigned long *pageblock_flags;
730}; 778};
731 779
732#ifdef CONFIG_SPARSEMEM_EXTREME 780#ifdef CONFIG_SPARSEMEM_EXTREME
@@ -771,12 +819,17 @@ static inline struct page *__section_mem_map_addr(struct mem_section *section)
771 return (struct page *)map; 819 return (struct page *)map;
772} 820}
773 821
774static inline int valid_section(struct mem_section *section) 822static inline int present_section(struct mem_section *section)
775{ 823{
776 return (section && (section->section_mem_map & SECTION_MARKED_PRESENT)); 824 return (section && (section->section_mem_map & SECTION_MARKED_PRESENT));
777} 825}
778 826
779static inline int section_has_mem_map(struct mem_section *section) 827static inline int present_section_nr(unsigned long nr)
828{
829 return present_section(__nr_to_section(nr));
830}
831
832static inline int valid_section(struct mem_section *section)
780{ 833{
781 return (section && (section->section_mem_map & SECTION_HAS_MEM_MAP)); 834 return (section && (section->section_mem_map & SECTION_HAS_MEM_MAP));
782} 835}
@@ -798,6 +851,13 @@ static inline int pfn_valid(unsigned long pfn)
798 return valid_section(__nr_to_section(pfn_to_section_nr(pfn))); 851 return valid_section(__nr_to_section(pfn_to_section_nr(pfn)));
799} 852}
800 853
854static inline int pfn_present(unsigned long pfn)
855{
856 if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
857 return 0;
858 return present_section(__nr_to_section(pfn_to_section_nr(pfn)));
859}
860
801/* 861/*
802 * These are _only_ used during initialisation, therefore they 862 * These are _only_ used during initialisation, therefore they
803 * can use __initdata ... They could have names to indicate 863 * can use __initdata ... They could have names to indicate
diff --git a/include/linux/nfsd/export.h b/include/linux/nfsd/export.h
index 5cd192469096..bcb7abafbca9 100644
--- a/include/linux/nfsd/export.h
+++ b/include/linux/nfsd/export.h
@@ -127,17 +127,9 @@ void nfsd_export_shutdown(void);
127void nfsd_export_flush(void); 127void nfsd_export_flush(void);
128void exp_readlock(void); 128void exp_readlock(void);
129void exp_readunlock(void); 129void exp_readunlock(void);
130struct svc_export * exp_get_by_name(struct auth_domain *clp,
131 struct vfsmount *mnt,
132 struct dentry *dentry,
133 struct cache_req *reqp);
134struct svc_export * rqst_exp_get_by_name(struct svc_rqst *, 130struct svc_export * rqst_exp_get_by_name(struct svc_rqst *,
135 struct vfsmount *, 131 struct vfsmount *,
136 struct dentry *); 132 struct dentry *);
137struct svc_export * exp_parent(struct auth_domain *clp,
138 struct vfsmount *mnt,
139 struct dentry *dentry,
140 struct cache_req *reqp);
141struct svc_export * rqst_exp_parent(struct svc_rqst *, 133struct svc_export * rqst_exp_parent(struct svc_rqst *,
142 struct vfsmount *mnt, 134 struct vfsmount *mnt,
143 struct dentry *dentry); 135 struct dentry *dentry);
@@ -157,9 +149,6 @@ static inline void exp_get(struct svc_export *exp)
157{ 149{
158 cache_get(&exp->h); 150 cache_get(&exp->h);
159} 151}
160extern struct svc_export *
161exp_find(struct auth_domain *clp, int fsid_type, u32 *fsidv,
162 struct cache_req *reqp);
163struct svc_export * rqst_exp_find(struct svc_rqst *, int, u32 *); 152struct svc_export * rqst_exp_find(struct svc_rqst *, int, u32 *);
164 153
165#endif /* __KERNEL__ */ 154#endif /* __KERNEL__ */
diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h
index 52c54a5720f3..905e18f4b412 100644
--- a/include/linux/nodemask.h
+++ b/include/linux/nodemask.h
@@ -338,31 +338,88 @@ static inline void __nodes_remap(nodemask_t *dstp, const nodemask_t *srcp,
338#endif /* MAX_NUMNODES */ 338#endif /* MAX_NUMNODES */
339 339
340/* 340/*
341 * Bitmasks that are kept for all the nodes.
342 */
343enum node_states {
344 N_POSSIBLE, /* The node could become online at some point */
345 N_ONLINE, /* The node is online */
346 N_NORMAL_MEMORY, /* The node has regular memory */
347#ifdef CONFIG_HIGHMEM
348 N_HIGH_MEMORY, /* The node has regular or high memory */
349#else
350 N_HIGH_MEMORY = N_NORMAL_MEMORY,
351#endif
352 N_CPU, /* The node has one or more cpus */
353 NR_NODE_STATES
354};
355
356/*
341 * The following particular system nodemasks and operations 357 * The following particular system nodemasks and operations
342 * on them manage all possible and online nodes. 358 * on them manage all possible and online nodes.
343 */ 359 */
344 360
345extern nodemask_t node_online_map; 361extern nodemask_t node_states[NR_NODE_STATES];
346extern nodemask_t node_possible_map;
347 362
348#if MAX_NUMNODES > 1 363#if MAX_NUMNODES > 1
349#define num_online_nodes() nodes_weight(node_online_map) 364static inline int node_state(int node, enum node_states state)
350#define num_possible_nodes() nodes_weight(node_possible_map) 365{
351#define node_online(node) node_isset((node), node_online_map) 366 return node_isset(node, node_states[state]);
352#define node_possible(node) node_isset((node), node_possible_map) 367}
353#define first_online_node first_node(node_online_map) 368
354#define next_online_node(nid) next_node((nid), node_online_map) 369static inline void node_set_state(int node, enum node_states state)
370{
371 __node_set(node, &node_states[state]);
372}
373
374static inline void node_clear_state(int node, enum node_states state)
375{
376 __node_clear(node, &node_states[state]);
377}
378
379static inline int num_node_state(enum node_states state)
380{
381 return nodes_weight(node_states[state]);
382}
383
384#define for_each_node_state(__node, __state) \
385 for_each_node_mask((__node), node_states[__state])
386
387#define first_online_node first_node(node_states[N_ONLINE])
388#define next_online_node(nid) next_node((nid), node_states[N_ONLINE])
389
355extern int nr_node_ids; 390extern int nr_node_ids;
356#else 391#else
357#define num_online_nodes() 1 392
358#define num_possible_nodes() 1 393static inline int node_state(int node, enum node_states state)
359#define node_online(node) ((node) == 0) 394{
360#define node_possible(node) ((node) == 0) 395 return node == 0;
396}
397
398static inline void node_set_state(int node, enum node_states state)
399{
400}
401
402static inline void node_clear_state(int node, enum node_states state)
403{
404}
405
406static inline int num_node_state(enum node_states state)
407{
408 return 1;
409}
410
411#define for_each_node_state(node, __state) \
412 for ( (node) = 0; (node) == 0; (node) = 1)
413
361#define first_online_node 0 414#define first_online_node 0
362#define next_online_node(nid) (MAX_NUMNODES) 415#define next_online_node(nid) (MAX_NUMNODES)
363#define nr_node_ids 1 416#define nr_node_ids 1
417
364#endif 418#endif
365 419
420#define node_online_map node_states[N_ONLINE]
421#define node_possible_map node_states[N_POSSIBLE]
422
366#define any_online_node(mask) \ 423#define any_online_node(mask) \
367({ \ 424({ \
368 int node; \ 425 int node; \
@@ -372,10 +429,15 @@ extern int nr_node_ids;
372 node; \ 429 node; \
373}) 430})
374 431
375#define node_set_online(node) set_bit((node), node_online_map.bits) 432#define num_online_nodes() num_node_state(N_ONLINE)
376#define node_set_offline(node) clear_bit((node), node_online_map.bits) 433#define num_possible_nodes() num_node_state(N_POSSIBLE)
434#define node_online(node) node_state((node), N_ONLINE)
435#define node_possible(node) node_state((node), N_POSSIBLE)
436
437#define node_set_online(node) node_set_state((node), N_ONLINE)
438#define node_set_offline(node) node_clear_state((node), N_ONLINE)
377 439
378#define for_each_node(node) for_each_node_mask((node), node_possible_map) 440#define for_each_node(node) for_each_node_state(node, N_POSSIBLE)
379#define for_each_online_node(node) for_each_node_mask((node), node_online_map) 441#define for_each_online_node(node) for_each_node_state(node, N_ONLINE)
380 442
381#endif /* __LINUX_NODEMASK_H */ 443#endif /* __LINUX_NODEMASK_H */
diff --git a/include/linux/of_device.h b/include/linux/of_device.h
index 91bf84b9d144..212bffb2b174 100644
--- a/include/linux/of_device.h
+++ b/include/linux/of_device.h
@@ -22,5 +22,10 @@ extern int of_device_register(struct of_device *ofdev);
22extern void of_device_unregister(struct of_device *ofdev); 22extern void of_device_unregister(struct of_device *ofdev);
23extern void of_release_dev(struct device *dev); 23extern void of_release_dev(struct device *dev);
24 24
25static inline void of_device_free(struct of_device *dev)
26{
27 of_release_dev(&dev->dev);
28}
29
25#endif /* __KERNEL__ */ 30#endif /* __KERNEL__ */
26#endif /* _LINUX_OF_DEVICE_H */ 31#endif /* _LINUX_OF_DEVICE_H */
diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h
new file mode 100644
index 000000000000..051c1b1ede4e
--- /dev/null
+++ b/include/linux/page-isolation.h
@@ -0,0 +1,37 @@
1#ifndef __LINUX_PAGEISOLATION_H
2#define __LINUX_PAGEISOLATION_H
3
4/*
5 * Changes migrate type in [start_pfn, end_pfn) to be MIGRATE_ISOLATE.
6 * If specified range includes migrate types other than MOVABLE,
7 * this will fail with -EBUSY.
8 *
9 * For isolating all pages in the range finally, the caller have to
10 * free all pages in the range. test_page_isolated() can be used for
11 * test it.
12 */
13extern int
14start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn);
15
16/*
17 * Changes MIGRATE_ISOLATE to MIGRATE_MOVABLE.
18 * target range is [start_pfn, end_pfn)
19 */
20extern int
21undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn);
22
23/*
24 * test all pages in [start_pfn, end_pfn)are isolated or not.
25 */
26extern int
27test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn);
28
29/*
30 * Internal funcs.Changes pageblock's migrate type.
31 * Please use make_pagetype_isolated()/make_pagetype_movable().
32 */
33extern int set_migratetype_isolate(struct page *page);
34extern void unset_migratetype_isolate(struct page *page);
35
36
37#endif
diff --git a/include/linux/pageblock-flags.h b/include/linux/pageblock-flags.h
new file mode 100644
index 000000000000..e875905f7b12
--- /dev/null
+++ b/include/linux/pageblock-flags.h
@@ -0,0 +1,75 @@
1/*
2 * Macros for manipulating and testing flags related to a
3 * pageblock_nr_pages number of pages.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation version 2 of the License
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright (C) IBM Corporation, 2006
19 *
20 * Original author, Mel Gorman
21 * Major cleanups and reduction of bit operations, Andy Whitcroft
22 */
23#ifndef PAGEBLOCK_FLAGS_H
24#define PAGEBLOCK_FLAGS_H
25
26#include <linux/types.h>
27
28/* Macro to aid the definition of ranges of bits */
29#define PB_range(name, required_bits) \
30 name, name ## _end = (name + required_bits) - 1
31
32/* Bit indices that affect a whole block of pages */
33enum pageblock_bits {
34 PB_range(PB_migrate, 3), /* 3 bits required for migrate types */
35 NR_PAGEBLOCK_BITS
36};
37
38#ifdef CONFIG_HUGETLB_PAGE
39
40#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
41
42/* Huge page sizes are variable */
43extern int pageblock_order;
44
45#else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
46
47/* Huge pages are a constant size */
48#define pageblock_order HUGETLB_PAGE_ORDER
49
50#endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
51
52#else /* CONFIG_HUGETLB_PAGE */
53
54/* If huge pages are not used, group by MAX_ORDER_NR_PAGES */
55#define pageblock_order (MAX_ORDER-1)
56
57#endif /* CONFIG_HUGETLB_PAGE */
58
59#define pageblock_nr_pages (1UL << pageblock_order)
60
61/* Forward declaration */
62struct page;
63
64/* Declarations for getting and setting flags. See mm/page_alloc.c */
65unsigned long get_pageblock_flags_group(struct page *page,
66 int start_bitidx, int end_bitidx);
67void set_pageblock_flags_group(struct page *page, unsigned long flags,
68 int start_bitidx, int end_bitidx);
69
70#define get_pageblock_flags(page) \
71 get_pageblock_flags_group(page, 0, NR_PAGEBLOCK_BITS-1)
72#define set_pageblock_flags(page) \
73 set_pageblock_flags_group(page, 0, NR_PAGEBLOCK_BITS-1)
74
75#endif /* PAGEBLOCK_FLAGS_H */
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 8a83537d6978..db8a410ae9e1 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -84,11 +84,11 @@ static inline struct page *page_cache_alloc_cold(struct address_space *x)
84typedef int filler_t(void *, struct page *); 84typedef int filler_t(void *, struct page *);
85 85
86extern struct page * find_get_page(struct address_space *mapping, 86extern struct page * find_get_page(struct address_space *mapping,
87 unsigned long index); 87 pgoff_t index);
88extern struct page * find_lock_page(struct address_space *mapping, 88extern struct page * find_lock_page(struct address_space *mapping,
89 unsigned long index); 89 pgoff_t index);
90extern struct page * find_or_create_page(struct address_space *mapping, 90extern struct page * find_or_create_page(struct address_space *mapping,
91 unsigned long index, gfp_t gfp_mask); 91 pgoff_t index, gfp_t gfp_mask);
92unsigned find_get_pages(struct address_space *mapping, pgoff_t start, 92unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
93 unsigned int nr_pages, struct page **pages); 93 unsigned int nr_pages, struct page **pages);
94unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start, 94unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
@@ -96,44 +96,47 @@ unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
96unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index, 96unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
97 int tag, unsigned int nr_pages, struct page **pages); 97 int tag, unsigned int nr_pages, struct page **pages);
98 98
99struct page *__grab_cache_page(struct address_space *mapping, pgoff_t index);
100
99/* 101/*
100 * Returns locked page at given index in given cache, creating it if needed. 102 * Returns locked page at given index in given cache, creating it if needed.
101 */ 103 */
102static inline struct page *grab_cache_page(struct address_space *mapping, unsigned long index) 104static inline struct page *grab_cache_page(struct address_space *mapping,
105 pgoff_t index)
103{ 106{
104 return find_or_create_page(mapping, index, mapping_gfp_mask(mapping)); 107 return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
105} 108}
106 109
107extern struct page * grab_cache_page_nowait(struct address_space *mapping, 110extern struct page * grab_cache_page_nowait(struct address_space *mapping,
108 unsigned long index); 111 pgoff_t index);
109extern struct page * read_cache_page_async(struct address_space *mapping, 112extern struct page * read_cache_page_async(struct address_space *mapping,
110 unsigned long index, filler_t *filler, 113 pgoff_t index, filler_t *filler,
111 void *data); 114 void *data);
112extern struct page * read_cache_page(struct address_space *mapping, 115extern struct page * read_cache_page(struct address_space *mapping,
113 unsigned long index, filler_t *filler, 116 pgoff_t index, filler_t *filler,
114 void *data); 117 void *data);
115extern int read_cache_pages(struct address_space *mapping, 118extern int read_cache_pages(struct address_space *mapping,
116 struct list_head *pages, filler_t *filler, void *data); 119 struct list_head *pages, filler_t *filler, void *data);
117 120
118static inline struct page *read_mapping_page_async( 121static inline struct page *read_mapping_page_async(
119 struct address_space *mapping, 122 struct address_space *mapping,
120 unsigned long index, void *data) 123 pgoff_t index, void *data)
121{ 124{
122 filler_t *filler = (filler_t *)mapping->a_ops->readpage; 125 filler_t *filler = (filler_t *)mapping->a_ops->readpage;
123 return read_cache_page_async(mapping, index, filler, data); 126 return read_cache_page_async(mapping, index, filler, data);
124} 127}
125 128
126static inline struct page *read_mapping_page(struct address_space *mapping, 129static inline struct page *read_mapping_page(struct address_space *mapping,
127 unsigned long index, void *data) 130 pgoff_t index, void *data)
128{ 131{
129 filler_t *filler = (filler_t *)mapping->a_ops->readpage; 132 filler_t *filler = (filler_t *)mapping->a_ops->readpage;
130 return read_cache_page(mapping, index, filler, data); 133 return read_cache_page(mapping, index, filler, data);
131} 134}
132 135
133int add_to_page_cache(struct page *page, struct address_space *mapping, 136int add_to_page_cache(struct page *page, struct address_space *mapping,
134 unsigned long index, gfp_t gfp_mask); 137 pgoff_t index, gfp_t gfp_mask);
135int add_to_page_cache_lru(struct page *page, struct address_space *mapping, 138int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
136 unsigned long index, gfp_t gfp_mask); 139 pgoff_t index, gfp_t gfp_mask);
137extern void remove_from_page_cache(struct page *page); 140extern void remove_from_page_cache(struct page *page);
138extern void __remove_from_page_cache(struct page *page); 141extern void __remove_from_page_cache(struct page *page);
139 142
@@ -218,6 +221,9 @@ static inline int fault_in_pages_writeable(char __user *uaddr, int size)
218{ 221{
219 int ret; 222 int ret;
220 223
224 if (unlikely(size == 0))
225 return 0;
226
221 /* 227 /*
222 * Writing zeroes into userspace here is OK, because we know that if 228 * Writing zeroes into userspace here is OK, because we know that if
223 * the zero gets there, we'll be overwriting it. 229 * the zero gets there, we'll be overwriting it.
@@ -237,19 +243,23 @@ static inline int fault_in_pages_writeable(char __user *uaddr, int size)
237 return ret; 243 return ret;
238} 244}
239 245
240static inline void fault_in_pages_readable(const char __user *uaddr, int size) 246static inline int fault_in_pages_readable(const char __user *uaddr, int size)
241{ 247{
242 volatile char c; 248 volatile char c;
243 int ret; 249 int ret;
244 250
251 if (unlikely(size == 0))
252 return 0;
253
245 ret = __get_user(c, uaddr); 254 ret = __get_user(c, uaddr);
246 if (ret == 0) { 255 if (ret == 0) {
247 const char __user *end = uaddr + size - 1; 256 const char __user *end = uaddr + size - 1;
248 257
249 if (((unsigned long)uaddr & PAGE_MASK) != 258 if (((unsigned long)uaddr & PAGE_MASK) !=
250 ((unsigned long)end & PAGE_MASK)) 259 ((unsigned long)end & PAGE_MASK))
251 __get_user(c, end); 260 ret = __get_user(c, end);
252 } 261 }
262 return ret;
253} 263}
254 264
255#endif /* _LINUX_PAGEMAP_H */ 265#endif /* _LINUX_PAGEMAP_H */
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 2c49561f9b45..df948b44edad 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -1995,6 +1995,8 @@
1995#define PCI_VENDOR_ID_TOPIC 0x151f 1995#define PCI_VENDOR_ID_TOPIC 0x151f
1996#define PCI_DEVICE_ID_TOPIC_TP560 0x0000 1996#define PCI_DEVICE_ID_TOPIC_TP560 0x0000
1997 1997
1998#define PCI_VENDOR_ID_MAINPINE 0x1522
1999#define PCI_DEVICE_ID_MAINPINE_PBRIDGE 0x0100
1998#define PCI_VENDOR_ID_ENE 0x1524 2000#define PCI_VENDOR_ID_ENE 0x1524
1999#define PCI_DEVICE_ID_ENE_CB712_SD 0x0550 2001#define PCI_DEVICE_ID_ENE_CB712_SD 0x0550
2000#define PCI_DEVICE_ID_ENE_CB712_SD_2 0x0551 2002#define PCI_DEVICE_ID_ENE_CB712_SD_2 0x0551
@@ -2324,6 +2326,8 @@
2324#define PCI_DEVICE_ID_INTEL_MCH_PC 0x3599 2326#define PCI_DEVICE_ID_INTEL_MCH_PC 0x3599
2325#define PCI_DEVICE_ID_INTEL_MCH_PC1 0x359a 2327#define PCI_DEVICE_ID_INTEL_MCH_PC1 0x359a
2326#define PCI_DEVICE_ID_INTEL_E7525_MCH 0x359e 2328#define PCI_DEVICE_ID_INTEL_E7525_MCH 0x359e
2329#define PCI_DEVICE_ID_INTEL_IOAT_CNB 0x360b
2330#define PCI_DEVICE_ID_INTEL_IOAT_SCNB 0x65ff
2327#define PCI_DEVICE_ID_INTEL_TOLAPAI_0 0x5031 2331#define PCI_DEVICE_ID_INTEL_TOLAPAI_0 0x5031
2328#define PCI_DEVICE_ID_INTEL_TOLAPAI_1 0x5032 2332#define PCI_DEVICE_ID_INTEL_TOLAPAI_1 0x5032
2329#define PCI_DEVICE_ID_INTEL_82371SB_0 0x7000 2333#define PCI_DEVICE_ID_INTEL_82371SB_0 0x7000
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
index f9e77d2ee320..b6116b4445c7 100644
--- a/include/linux/radix-tree.h
+++ b/include/linux/radix-tree.h
@@ -26,28 +26,31 @@
26#include <linux/rcupdate.h> 26#include <linux/rcupdate.h>
27 27
28/* 28/*
29 * A direct pointer (root->rnode pointing directly to a data item, 29 * An indirect pointer (root->rnode pointing to a radix_tree_node, rather
30 * rather than another radix_tree_node) is signalled by the low bit 30 * than a data item) is signalled by the low bit set in the root->rnode
31 * set in the root->rnode pointer. 31 * pointer.
32 * 32 *
33 * In this case root->height is also NULL, but the direct pointer tests are 33 * In this case root->height is > 0, but the indirect pointer tests are
34 * needed for RCU lookups when root->height is unreliable. 34 * needed for RCU lookups (because root->height is unreliable). The only
35 * time callers need worry about this is when doing a lookup_slot under
36 * RCU.
35 */ 37 */
36#define RADIX_TREE_DIRECT_PTR 1 38#define RADIX_TREE_INDIRECT_PTR 1
39#define RADIX_TREE_RETRY ((void *)-1UL)
37 40
38static inline void *radix_tree_ptr_to_direct(void *ptr) 41static inline void *radix_tree_ptr_to_indirect(void *ptr)
39{ 42{
40 return (void *)((unsigned long)ptr | RADIX_TREE_DIRECT_PTR); 43 return (void *)((unsigned long)ptr | RADIX_TREE_INDIRECT_PTR);
41} 44}
42 45
43static inline void *radix_tree_direct_to_ptr(void *ptr) 46static inline void *radix_tree_indirect_to_ptr(void *ptr)
44{ 47{
45 return (void *)((unsigned long)ptr & ~RADIX_TREE_DIRECT_PTR); 48 return (void *)((unsigned long)ptr & ~RADIX_TREE_INDIRECT_PTR);
46} 49}
47 50
48static inline int radix_tree_is_direct_ptr(void *ptr) 51static inline int radix_tree_is_indirect_ptr(void *ptr)
49{ 52{
50 return (int)((unsigned long)ptr & RADIX_TREE_DIRECT_PTR); 53 return (int)((unsigned long)ptr & RADIX_TREE_INDIRECT_PTR);
51} 54}
52 55
53/*** radix-tree API starts here ***/ 56/*** radix-tree API starts here ***/
@@ -130,7 +133,10 @@ do { \
130 */ 133 */
131static inline void *radix_tree_deref_slot(void **pslot) 134static inline void *radix_tree_deref_slot(void **pslot)
132{ 135{
133 return radix_tree_direct_to_ptr(*pslot); 136 void *ret = *pslot;
137 if (unlikely(radix_tree_is_indirect_ptr(ret)))
138 ret = RADIX_TREE_RETRY;
139 return ret;
134} 140}
135/** 141/**
136 * radix_tree_replace_slot - replace item in a slot 142 * radix_tree_replace_slot - replace item in a slot
@@ -142,10 +148,8 @@ static inline void *radix_tree_deref_slot(void **pslot)
142 */ 148 */
143static inline void radix_tree_replace_slot(void **pslot, void *item) 149static inline void radix_tree_replace_slot(void **pslot, void *item)
144{ 150{
145 BUG_ON(radix_tree_is_direct_ptr(item)); 151 BUG_ON(radix_tree_is_indirect_ptr(item));
146 rcu_assign_pointer(*pslot, 152 rcu_assign_pointer(*pslot, item);
147 (void *)((unsigned long)item |
148 ((unsigned long)*pslot & RADIX_TREE_DIRECT_PTR)));
149} 153}
150 154
151int radix_tree_insert(struct radix_tree_root *, unsigned long, void *); 155int radix_tree_insert(struct radix_tree_root *, unsigned long, void *);
@@ -155,6 +159,8 @@ void *radix_tree_delete(struct radix_tree_root *, unsigned long);
155unsigned int 159unsigned int
156radix_tree_gang_lookup(struct radix_tree_root *root, void **results, 160radix_tree_gang_lookup(struct radix_tree_root *root, void **results,
157 unsigned long first_index, unsigned int max_items); 161 unsigned long first_index, unsigned int max_items);
162unsigned long radix_tree_next_hole(struct radix_tree_root *root,
163 unsigned long index, unsigned long max_scan);
158int radix_tree_preload(gfp_t gfp_mask); 164int radix_tree_preload(gfp_t gfp_mask);
159void radix_tree_init(void); 165void radix_tree_init(void);
160void *radix_tree_tag_set(struct radix_tree_root *root, 166void *radix_tree_tag_set(struct radix_tree_root *root,
diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
index 4efbd9c445f5..2dc7464cce52 100644
--- a/include/linux/scatterlist.h
+++ b/include/linux/scatterlist.h
@@ -20,4 +20,88 @@ static inline void sg_init_one(struct scatterlist *sg, const void *buf,
20 sg_set_buf(sg, buf, buflen); 20 sg_set_buf(sg, buf, buflen);
21} 21}
22 22
23/*
24 * We overload the LSB of the page pointer to indicate whether it's
25 * a valid sg entry, or whether it points to the start of a new scatterlist.
26 * Those low bits are there for everyone! (thanks mason :-)
27 */
28#define sg_is_chain(sg) ((unsigned long) (sg)->page & 0x01)
29#define sg_chain_ptr(sg) \
30 ((struct scatterlist *) ((unsigned long) (sg)->page & ~0x01))
31
32/**
33 * sg_next - return the next scatterlist entry in a list
34 * @sg: The current sg entry
35 *
36 * Usually the next entry will be @sg@ + 1, but if this sg element is part
37 * of a chained scatterlist, it could jump to the start of a new
38 * scatterlist array.
39 *
40 * Note that the caller must ensure that there are further entries after
41 * the current entry, this function will NOT return NULL for an end-of-list.
42 *
43 */
44static inline struct scatterlist *sg_next(struct scatterlist *sg)
45{
46 sg++;
47
48 if (unlikely(sg_is_chain(sg)))
49 sg = sg_chain_ptr(sg);
50
51 return sg;
52}
53
54/*
55 * Loop over each sg element, following the pointer to a new list if necessary
56 */
57#define for_each_sg(sglist, sg, nr, __i) \
58 for (__i = 0, sg = (sglist); __i < (nr); __i++, sg = sg_next(sg))
59
60/**
61 * sg_last - return the last scatterlist entry in a list
62 * @sgl: First entry in the scatterlist
63 * @nents: Number of entries in the scatterlist
64 *
65 * Should only be used casually, it (currently) scan the entire list
66 * to get the last entry.
67 *
68 * Note that the @sgl@ pointer passed in need not be the first one,
69 * the important bit is that @nents@ denotes the number of entries that
70 * exist from @sgl@.
71 *
72 */
73static inline struct scatterlist *sg_last(struct scatterlist *sgl,
74 unsigned int nents)
75{
76#ifndef ARCH_HAS_SG_CHAIN
77 struct scatterlist *ret = &sgl[nents - 1];
78#else
79 struct scatterlist *sg, *ret = NULL;
80 int i;
81
82 for_each_sg(sgl, sg, nents, i)
83 ret = sg;
84
85#endif
86 return ret;
87}
88
89/**
90 * sg_chain - Chain two sglists together
91 * @prv: First scatterlist
92 * @prv_nents: Number of entries in prv
93 * @sgl: Second scatterlist
94 *
95 * Links @prv@ and @sgl@ together, to form a longer scatterlist.
96 *
97 */
98static inline void sg_chain(struct scatterlist *prv, unsigned int prv_nents,
99 struct scatterlist *sgl)
100{
101#ifndef ARCH_HAS_SG_CHAIN
102 BUG();
103#endif
104 prv[prv_nents - 1].page = (struct page *) ((unsigned long) sgl | 0x01);
105}
106
23#endif /* _LINUX_SCATTERLIST_H */ 107#endif /* _LINUX_SCATTERLIST_H */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 228e0a8ce248..592e3a55f818 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1,8 +1,6 @@
1#ifndef _LINUX_SCHED_H 1#ifndef _LINUX_SCHED_H
2#define _LINUX_SCHED_H 2#define _LINUX_SCHED_H
3 3
4#include <linux/auxvec.h> /* For AT_VECTOR_SIZE */
5
6/* 4/*
7 * cloning flags: 5 * cloning flags:
8 */ 6 */
@@ -58,12 +56,12 @@ struct sched_param {
58#include <linux/cpumask.h> 56#include <linux/cpumask.h>
59#include <linux/errno.h> 57#include <linux/errno.h>
60#include <linux/nodemask.h> 58#include <linux/nodemask.h>
59#include <linux/mm_types.h>
61 60
62#include <asm/system.h> 61#include <asm/system.h>
63#include <asm/semaphore.h> 62#include <asm/semaphore.h>
64#include <asm/page.h> 63#include <asm/page.h>
65#include <asm/ptrace.h> 64#include <asm/ptrace.h>
66#include <asm/mmu.h>
67#include <asm/cputime.h> 65#include <asm/cputime.h>
68 66
69#include <linux/smp.h> 67#include <linux/smp.h>
@@ -319,7 +317,6 @@ extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long);
319#define add_mm_counter(mm, member, value) atomic_long_add(value, &(mm)->_##member) 317#define add_mm_counter(mm, member, value) atomic_long_add(value, &(mm)->_##member)
320#define inc_mm_counter(mm, member) atomic_long_inc(&(mm)->_##member) 318#define inc_mm_counter(mm, member) atomic_long_inc(&(mm)->_##member)
321#define dec_mm_counter(mm, member) atomic_long_dec(&(mm)->_##member) 319#define dec_mm_counter(mm, member) atomic_long_dec(&(mm)->_##member)
322typedef atomic_long_t mm_counter_t;
323 320
324#else /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */ 321#else /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */
325/* 322/*
@@ -331,7 +328,6 @@ typedef atomic_long_t mm_counter_t;
331#define add_mm_counter(mm, member, value) (mm)->_##member += (value) 328#define add_mm_counter(mm, member, value) (mm)->_##member += (value)
332#define inc_mm_counter(mm, member) (mm)->_##member++ 329#define inc_mm_counter(mm, member) (mm)->_##member++
333#define dec_mm_counter(mm, member) (mm)->_##member-- 330#define dec_mm_counter(mm, member) (mm)->_##member--
334typedef unsigned long mm_counter_t;
335 331
336#endif /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */ 332#endif /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */
337 333
@@ -368,74 +364,6 @@ extern int get_dumpable(struct mm_struct *mm);
368#define MMF_DUMP_FILTER_DEFAULT \ 364#define MMF_DUMP_FILTER_DEFAULT \
369 ((1 << MMF_DUMP_ANON_PRIVATE) | (1 << MMF_DUMP_ANON_SHARED)) 365 ((1 << MMF_DUMP_ANON_PRIVATE) | (1 << MMF_DUMP_ANON_SHARED))
370 366
371struct mm_struct {
372 struct vm_area_struct * mmap; /* list of VMAs */
373 struct rb_root mm_rb;
374 struct vm_area_struct * mmap_cache; /* last find_vma result */
375 unsigned long (*get_unmapped_area) (struct file *filp,
376 unsigned long addr, unsigned long len,
377 unsigned long pgoff, unsigned long flags);
378 void (*unmap_area) (struct mm_struct *mm, unsigned long addr);
379 unsigned long mmap_base; /* base of mmap area */
380 unsigned long task_size; /* size of task vm space */
381 unsigned long cached_hole_size; /* if non-zero, the largest hole below free_area_cache */
382 unsigned long free_area_cache; /* first hole of size cached_hole_size or larger */
383 pgd_t * pgd;
384 atomic_t mm_users; /* How many users with user space? */
385 atomic_t mm_count; /* How many references to "struct mm_struct" (users count as 1) */
386 int map_count; /* number of VMAs */
387 struct rw_semaphore mmap_sem;
388 spinlock_t page_table_lock; /* Protects page tables and some counters */
389
390 struct list_head mmlist; /* List of maybe swapped mm's. These are globally strung
391 * together off init_mm.mmlist, and are protected
392 * by mmlist_lock
393 */
394
395 /* Special counters, in some configurations protected by the
396 * page_table_lock, in other configurations by being atomic.
397 */
398 mm_counter_t _file_rss;
399 mm_counter_t _anon_rss;
400
401 unsigned long hiwater_rss; /* High-watermark of RSS usage */
402 unsigned long hiwater_vm; /* High-water virtual memory usage */
403
404 unsigned long total_vm, locked_vm, shared_vm, exec_vm;
405 unsigned long stack_vm, reserved_vm, def_flags, nr_ptes;
406 unsigned long start_code, end_code, start_data, end_data;
407 unsigned long start_brk, brk, start_stack;
408 unsigned long arg_start, arg_end, env_start, env_end;
409
410 unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
411
412 cpumask_t cpu_vm_mask;
413
414 /* Architecture-specific MM context */
415 mm_context_t context;
416
417 /* Swap token stuff */
418 /*
419 * Last value of global fault stamp as seen by this process.
420 * In other words, this value gives an indication of how long
421 * it has been since this task got the token.
422 * Look at mm/thrash.c
423 */
424 unsigned int faultstamp;
425 unsigned int token_priority;
426 unsigned int last_interval;
427
428 unsigned long flags; /* Must use atomic bitops to access the bits */
429
430 /* coredumping support */
431 int core_waiters;
432 struct completion *core_startup_done, core_done;
433
434 /* aio bits */
435 rwlock_t ioctx_list_lock;
436 struct kioctx *ioctx_list;
437};
438
439struct sighand_struct { 367struct sighand_struct {
440 atomic_t count; 368 atomic_t count;
441 struct k_sigaction action[_NSIG]; 369 struct k_sigaction action[_NSIG];
@@ -801,9 +729,6 @@ struct sched_domain {
801#endif 729#endif
802}; 730};
803 731
804extern int partition_sched_domains(cpumask_t *partition1,
805 cpumask_t *partition2);
806
807#endif /* CONFIG_SMP */ 732#endif /* CONFIG_SMP */
808 733
809/* 734/*
diff --git a/include/linux/security.h b/include/linux/security.h
index 1a15526e9f67..928d4793c6f4 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -504,6 +504,13 @@ struct request_sock;
504 * @file contains the file structure being received. 504 * @file contains the file structure being received.
505 * Return 0 if permission is granted. 505 * Return 0 if permission is granted.
506 * 506 *
507 * Security hook for dentry
508 *
509 * @dentry_open
510 * Save open-time permission checking state for later use upon
511 * file_permission, and recheck access if anything has changed
512 * since inode_permission.
513 *
507 * Security hooks for task operations. 514 * Security hooks for task operations.
508 * 515 *
509 * @task_create: 516 * @task_create:
@@ -1256,6 +1263,7 @@ struct security_operations {
1256 int (*file_send_sigiotask) (struct task_struct * tsk, 1263 int (*file_send_sigiotask) (struct task_struct * tsk,
1257 struct fown_struct * fown, int sig); 1264 struct fown_struct * fown, int sig);
1258 int (*file_receive) (struct file * file); 1265 int (*file_receive) (struct file * file);
1266 int (*dentry_open) (struct file *file);
1259 1267
1260 int (*task_create) (unsigned long clone_flags); 1268 int (*task_create) (unsigned long clone_flags);
1261 int (*task_alloc_security) (struct task_struct * p); 1269 int (*task_alloc_security) (struct task_struct * p);
@@ -1864,6 +1872,11 @@ static inline int security_file_receive (struct file *file)
1864 return security_ops->file_receive (file); 1872 return security_ops->file_receive (file);
1865} 1873}
1866 1874
1875static inline int security_dentry_open (struct file *file)
1876{
1877 return security_ops->dentry_open (file);
1878}
1879
1867static inline int security_task_create (unsigned long clone_flags) 1880static inline int security_task_create (unsigned long clone_flags)
1868{ 1881{
1869 return security_ops->task_create (clone_flags); 1882 return security_ops->task_create (clone_flags);
@@ -2546,6 +2559,11 @@ static inline int security_file_receive (struct file *file)
2546 return 0; 2559 return 0;
2547} 2560}
2548 2561
2562static inline int security_dentry_open (struct file *file)
2563{
2564 return 0;
2565}
2566
2549static inline int security_task_create (unsigned long clone_flags) 2567static inline int security_task_create (unsigned long clone_flags)
2550{ 2568{
2551 return 0; 2569 return 0;
diff --git a/include/linux/selection.h b/include/linux/selection.h
index f9457861937c..8cdaa1151d2e 100644
--- a/include/linux/selection.h
+++ b/include/linux/selection.h
@@ -13,6 +13,7 @@
13struct tty_struct; 13struct tty_struct;
14 14
15extern struct vc_data *sel_cons; 15extern struct vc_data *sel_cons;
16struct tty_struct;
16 17
17extern void clear_selection(void); 18extern void clear_selection(void);
18extern int set_selection(const struct tiocl_selection __user *sel, struct tty_struct *tty); 19extern int set_selection(const struct tiocl_selection __user *sel, struct tty_struct *tty);
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index 09d17b06bf02..4db77249281c 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -291,7 +291,8 @@ struct uart_port {
291 resource_size_t mapbase; /* for ioremap */ 291 resource_size_t mapbase; /* for ioremap */
292 struct device *dev; /* parent device */ 292 struct device *dev; /* parent device */
293 unsigned char hub6; /* this should be in the 8250 driver */ 293 unsigned char hub6; /* this should be in the 8250 driver */
294 unsigned char unused[3]; 294 unsigned char suspended;
295 unsigned char unused[2];
295 void *private_data; /* generic platform data pointer */ 296 void *private_data; /* generic platform data pointer */
296}; 297};
297 298
diff --git a/include/linux/slab.h b/include/linux/slab.h
index d859354b9e51..3a5bad3ad126 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -24,12 +24,14 @@
24#define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */ 24#define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
25#define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */ 25#define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */
26#define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */ 26#define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */
27#define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */
28#define SLAB_PANIC 0x00040000UL /* Panic if kmem_cache_create() fails */ 27#define SLAB_PANIC 0x00040000UL /* Panic if kmem_cache_create() fails */
29#define SLAB_DESTROY_BY_RCU 0x00080000UL /* Defer freeing slabs to RCU */ 28#define SLAB_DESTROY_BY_RCU 0x00080000UL /* Defer freeing slabs to RCU */
30#define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */ 29#define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */
31#define SLAB_TRACE 0x00200000UL /* Trace allocations and frees */ 30#define SLAB_TRACE 0x00200000UL /* Trace allocations and frees */
32 31
32/* The following flags affect the page allocator grouping pages by mobility */
33#define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */
34#define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */
33/* 35/*
34 * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests. 36 * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.
35 * 37 *
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 74962077f632..d65159d1d4f5 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -11,6 +11,14 @@
11#include <linux/workqueue.h> 11#include <linux/workqueue.h>
12#include <linux/kobject.h> 12#include <linux/kobject.h>
13 13
14struct kmem_cache_cpu {
15 void **freelist;
16 struct page *page;
17 int node;
18 unsigned int offset;
19 unsigned int objsize;
20};
21
14struct kmem_cache_node { 22struct kmem_cache_node {
15 spinlock_t list_lock; /* Protect partial list and nr_partial */ 23 spinlock_t list_lock; /* Protect partial list and nr_partial */
16 unsigned long nr_partial; 24 unsigned long nr_partial;
@@ -54,7 +62,11 @@ struct kmem_cache {
54 int defrag_ratio; 62 int defrag_ratio;
55 struct kmem_cache_node *node[MAX_NUMNODES]; 63 struct kmem_cache_node *node[MAX_NUMNODES];
56#endif 64#endif
57 struct page *cpu_slab[NR_CPUS]; 65#ifdef CONFIG_SMP
66 struct kmem_cache_cpu *cpu_slab[NR_CPUS];
67#else
68 struct kmem_cache_cpu cpu_slab;
69#endif
58}; 70};
59 71
60/* 72/*
@@ -72,7 +84,7 @@ struct kmem_cache {
72 * We keep the general caches in an array of slab caches that are used for 84 * We keep the general caches in an array of slab caches that are used for
73 * 2^x bytes of allocations. 85 * 2^x bytes of allocations.
74 */ 86 */
75extern struct kmem_cache kmalloc_caches[KMALLOC_SHIFT_HIGH + 1]; 87extern struct kmem_cache kmalloc_caches[PAGE_SHIFT];
76 88
77/* 89/*
78 * Sorry that the following has to be that ugly but some versions of GCC 90 * Sorry that the following has to be that ugly but some versions of GCC
@@ -83,9 +95,6 @@ static __always_inline int kmalloc_index(size_t size)
83 if (!size) 95 if (!size)
84 return 0; 96 return 0;
85 97
86 if (size > KMALLOC_MAX_SIZE)
87 return -1;
88
89 if (size <= KMALLOC_MIN_SIZE) 98 if (size <= KMALLOC_MIN_SIZE)
90 return KMALLOC_SHIFT_LOW; 99 return KMALLOC_SHIFT_LOW;
91 100
@@ -102,6 +111,10 @@ static __always_inline int kmalloc_index(size_t size)
102 if (size <= 512) return 9; 111 if (size <= 512) return 9;
103 if (size <= 1024) return 10; 112 if (size <= 1024) return 10;
104 if (size <= 2 * 1024) return 11; 113 if (size <= 2 * 1024) return 11;
114/*
115 * The following is only needed to support architectures with a larger page
116 * size than 4k.
117 */
105 if (size <= 4 * 1024) return 12; 118 if (size <= 4 * 1024) return 12;
106 if (size <= 8 * 1024) return 13; 119 if (size <= 8 * 1024) return 13;
107 if (size <= 16 * 1024) return 14; 120 if (size <= 16 * 1024) return 14;
@@ -109,13 +122,9 @@ static __always_inline int kmalloc_index(size_t size)
109 if (size <= 64 * 1024) return 16; 122 if (size <= 64 * 1024) return 16;
110 if (size <= 128 * 1024) return 17; 123 if (size <= 128 * 1024) return 17;
111 if (size <= 256 * 1024) return 18; 124 if (size <= 256 * 1024) return 18;
112 if (size <= 512 * 1024) return 19; 125 if (size <= 512 * 1024) return 19;
113 if (size <= 1024 * 1024) return 20; 126 if (size <= 1024 * 1024) return 20;
114 if (size <= 2 * 1024 * 1024) return 21; 127 if (size <= 2 * 1024 * 1024) return 21;
115 if (size <= 4 * 1024 * 1024) return 22;
116 if (size <= 8 * 1024 * 1024) return 23;
117 if (size <= 16 * 1024 * 1024) return 24;
118 if (size <= 32 * 1024 * 1024) return 25;
119 return -1; 128 return -1;
120 129
121/* 130/*
@@ -140,19 +149,6 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
140 if (index == 0) 149 if (index == 0)
141 return NULL; 150 return NULL;
142 151
143 /*
144 * This function only gets expanded if __builtin_constant_p(size), so
145 * testing it here shouldn't be needed. But some versions of gcc need
146 * help.
147 */
148 if (__builtin_constant_p(size) && index < 0) {
149 /*
150 * Generate a link failure. Would be great if we could
151 * do something to stop the compile here.
152 */
153 extern void __kmalloc_size_too_large(void);
154 __kmalloc_size_too_large();
155 }
156 return &kmalloc_caches[index]; 152 return &kmalloc_caches[index];
157} 153}
158 154
@@ -168,15 +164,21 @@ void *__kmalloc(size_t size, gfp_t flags);
168 164
169static __always_inline void *kmalloc(size_t size, gfp_t flags) 165static __always_inline void *kmalloc(size_t size, gfp_t flags)
170{ 166{
171 if (__builtin_constant_p(size) && !(flags & SLUB_DMA)) { 167 if (__builtin_constant_p(size)) {
172 struct kmem_cache *s = kmalloc_slab(size); 168 if (size > PAGE_SIZE / 2)
169 return (void *)__get_free_pages(flags | __GFP_COMP,
170 get_order(size));
173 171
174 if (!s) 172 if (!(flags & SLUB_DMA)) {
175 return ZERO_SIZE_PTR; 173 struct kmem_cache *s = kmalloc_slab(size);
176 174
177 return kmem_cache_alloc(s, flags); 175 if (!s)
178 } else 176 return ZERO_SIZE_PTR;
179 return __kmalloc(size, flags); 177
178 return kmem_cache_alloc(s, flags);
179 }
180 }
181 return __kmalloc(size, flags);
180} 182}
181 183
182#ifdef CONFIG_NUMA 184#ifdef CONFIG_NUMA
@@ -185,15 +187,16 @@ void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
185 187
186static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) 188static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
187{ 189{
188 if (__builtin_constant_p(size) && !(flags & SLUB_DMA)) { 190 if (__builtin_constant_p(size) &&
189 struct kmem_cache *s = kmalloc_slab(size); 191 size <= PAGE_SIZE / 2 && !(flags & SLUB_DMA)) {
192 struct kmem_cache *s = kmalloc_slab(size);
190 193
191 if (!s) 194 if (!s)
192 return ZERO_SIZE_PTR; 195 return ZERO_SIZE_PTR;
193 196
194 return kmem_cache_alloc_node(s, flags, node); 197 return kmem_cache_alloc_node(s, flags, node);
195 } else 198 }
196 return __kmalloc_node(size, flags, node); 199 return __kmalloc_node(size, flags, node);
197} 200}
198#endif 201#endif
199 202
diff --git a/include/linux/sm501-regs.h b/include/linux/sm501-regs.h
index 014e73b31fc0..df7620dd8f31 100644
--- a/include/linux/sm501-regs.h
+++ b/include/linux/sm501-regs.h
@@ -15,6 +15,24 @@
15 15
16/* config 1 */ 16/* config 1 */
17#define SM501_SYSTEM_CONTROL (0x000000) 17#define SM501_SYSTEM_CONTROL (0x000000)
18
19#define SM501_SYSCTRL_PANEL_TRISTATE (1<<0)
20#define SM501_SYSCTRL_MEM_TRISTATE (1<<1)
21#define SM501_SYSCTRL_CRT_TRISTATE (1<<2)
22
23#define SM501_SYSCTRL_PCI_SLAVE_BURST_MASK (3<<4)
24#define SM501_SYSCTRL_PCI_SLAVE_BURST_1 (0<<4)
25#define SM501_SYSCTRL_PCI_SLAVE_BURST_2 (1<<4)
26#define SM501_SYSCTRL_PCI_SLAVE_BURST_4 (2<<4)
27#define SM501_SYSCTRL_PCI_SLAVE_BURST_8 (3<<4)
28
29#define SM501_SYSCTRL_PCI_CLOCK_RUN_EN (1<<6)
30#define SM501_SYSCTRL_PCI_RETRY_DISABLE (1<<7)
31#define SM501_SYSCTRL_PCI_SUBSYS_LOCK (1<<11)
32#define SM501_SYSCTRL_PCI_BURST_READ_EN (1<<15)
33
34/* miscellaneous control */
35
18#define SM501_MISC_CONTROL (0x000004) 36#define SM501_MISC_CONTROL (0x000004)
19 37
20#define SM501_MISC_BUS_SH (0x0) 38#define SM501_MISC_BUS_SH (0x0)
diff --git a/include/linux/spi/at73c213.h b/include/linux/spi/at73c213.h
new file mode 100644
index 000000000000..0f20a70e5eb4
--- /dev/null
+++ b/include/linux/spi/at73c213.h
@@ -0,0 +1,25 @@
1/*
2 * Board-specific data used to set up AT73c213 audio DAC driver.
3 */
4
5#ifndef __LINUX_SPI_AT73C213_H
6#define __LINUX_SPI_AT73C213_H
7
8/**
9 * at73c213_board_info - how the external DAC is wired to the device.
10 *
11 * @ssc_id: SSC platform_driver id the DAC shall use to stream the audio.
12 * @dac_clk: the external clock used to provide master clock to the DAC.
13 * @shortname: a short discription for the DAC, seen by userspace tools.
14 *
15 * This struct contains the configuration of the hardware connection to the
16 * external DAC. The DAC needs a master clock and a I2S audio stream. It also
17 * provides a name which is used to identify it in userspace tools.
18 */
19struct at73c213_board_info {
20 int ssc_id;
21 struct clk *dac_clk;
22 char shortname[32];
23};
24
25#endif /* __LINUX_SPI_AT73C213_H */
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index 002a3cddbdd5..387e428f1cdf 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -195,7 +195,7 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
195 195
196/** 196/**
197 * struct spi_master - interface to SPI master controller 197 * struct spi_master - interface to SPI master controller
198 * @cdev: class interface to this driver 198 * @dev: device interface to this driver
199 * @bus_num: board-specific (and often SOC-specific) identifier for a 199 * @bus_num: board-specific (and often SOC-specific) identifier for a
200 * given SPI controller. 200 * given SPI controller.
201 * @num_chipselect: chipselects are used to distinguish individual 201 * @num_chipselect: chipselects are used to distinguish individual
@@ -222,7 +222,7 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
222 * message's completion function when the transaction completes. 222 * message's completion function when the transaction completes.
223 */ 223 */
224struct spi_master { 224struct spi_master {
225 struct class_device cdev; 225 struct device dev;
226 226
227 /* other than negative (== assign one dynamically), bus_num is fully 227 /* other than negative (== assign one dynamically), bus_num is fully
228 * board-specific. usually that simplifies to being SOC-specific. 228 * board-specific. usually that simplifies to being SOC-specific.
@@ -268,17 +268,17 @@ struct spi_master {
268 268
269static inline void *spi_master_get_devdata(struct spi_master *master) 269static inline void *spi_master_get_devdata(struct spi_master *master)
270{ 270{
271 return class_get_devdata(&master->cdev); 271 return dev_get_drvdata(&master->dev);
272} 272}
273 273
274static inline void spi_master_set_devdata(struct spi_master *master, void *data) 274static inline void spi_master_set_devdata(struct spi_master *master, void *data)
275{ 275{
276 class_set_devdata(&master->cdev, data); 276 dev_set_drvdata(&master->dev, data);
277} 277}
278 278
279static inline struct spi_master *spi_master_get(struct spi_master *master) 279static inline struct spi_master *spi_master_get(struct spi_master *master)
280{ 280{
281 if (!master || !class_device_get(&master->cdev)) 281 if (!master || !get_device(&master->dev))
282 return NULL; 282 return NULL;
283 return master; 283 return master;
284} 284}
@@ -286,7 +286,7 @@ static inline struct spi_master *spi_master_get(struct spi_master *master)
286static inline void spi_master_put(struct spi_master *master) 286static inline void spi_master_put(struct spi_master *master)
287{ 287{
288 if (master) 288 if (master)
289 class_device_put(&master->cdev); 289 put_device(&master->dev);
290} 290}
291 291
292 292
diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h
index 46705e91573d..c1527c2ef3cb 100644
--- a/include/linux/usb/gadget.h
+++ b/include/linux/usb/gadget.h
@@ -481,7 +481,7 @@ static inline void *get_gadget_data (struct usb_gadget *gadget)
481 481
482/** 482/**
483 * gadget_is_dualspeed - return true iff the hardware handles high speed 483 * gadget_is_dualspeed - return true iff the hardware handles high speed
484 * @gadget: controller that might support both high and full speeds 484 * @g: controller that might support both high and full speeds
485 */ 485 */
486static inline int gadget_is_dualspeed(struct usb_gadget *g) 486static inline int gadget_is_dualspeed(struct usb_gadget *g)
487{ 487{
@@ -497,7 +497,7 @@ static inline int gadget_is_dualspeed(struct usb_gadget *g)
497 497
498/** 498/**
499 * gadget_is_otg - return true iff the hardware is OTG-ready 499 * gadget_is_otg - return true iff the hardware is OTG-ready
500 * @gadget: controller that might have a Mini-AB connector 500 * @g: controller that might have a Mini-AB connector
501 * 501 *
502 * This is a runtime test, since kernels with a USB-OTG stack sometimes 502 * This is a runtime test, since kernels with a USB-OTG stack sometimes
503 * run on boards which only have a Mini-B (or Mini-A) connector. 503 * run on boards which only have a Mini-B (or Mini-A) connector.