diff options
Diffstat (limited to 'include/linux')
35 files changed, 1191 insertions, 681 deletions
diff --git a/include/linux/ata.h b/include/linux/ata.h index d2873b732bb1..94f77cce27fa 100644 --- a/include/linux/ata.h +++ b/include/linux/ata.h | |||
@@ -129,6 +129,7 @@ enum { | |||
129 | ATA_CMD_READ_EXT = 0x25, | 129 | ATA_CMD_READ_EXT = 0x25, |
130 | ATA_CMD_WRITE = 0xCA, | 130 | ATA_CMD_WRITE = 0xCA, |
131 | ATA_CMD_WRITE_EXT = 0x35, | 131 | ATA_CMD_WRITE_EXT = 0x35, |
132 | ATA_CMD_WRITE_FUA_EXT = 0x3D, | ||
132 | ATA_CMD_PIO_READ = 0x20, | 133 | ATA_CMD_PIO_READ = 0x20, |
133 | ATA_CMD_PIO_READ_EXT = 0x24, | 134 | ATA_CMD_PIO_READ_EXT = 0x24, |
134 | ATA_CMD_PIO_WRITE = 0x30, | 135 | ATA_CMD_PIO_WRITE = 0x30, |
@@ -137,10 +138,13 @@ enum { | |||
137 | ATA_CMD_READ_MULTI_EXT = 0x29, | 138 | ATA_CMD_READ_MULTI_EXT = 0x29, |
138 | ATA_CMD_WRITE_MULTI = 0xC5, | 139 | ATA_CMD_WRITE_MULTI = 0xC5, |
139 | ATA_CMD_WRITE_MULTI_EXT = 0x39, | 140 | ATA_CMD_WRITE_MULTI_EXT = 0x39, |
141 | ATA_CMD_WRITE_MULTI_FUA_EXT = 0xCE, | ||
140 | ATA_CMD_SET_FEATURES = 0xEF, | 142 | ATA_CMD_SET_FEATURES = 0xEF, |
141 | ATA_CMD_PACKET = 0xA0, | 143 | ATA_CMD_PACKET = 0xA0, |
142 | ATA_CMD_VERIFY = 0x40, | 144 | ATA_CMD_VERIFY = 0x40, |
143 | ATA_CMD_VERIFY_EXT = 0x42, | 145 | ATA_CMD_VERIFY_EXT = 0x42, |
146 | ATA_CMD_STANDBYNOW1 = 0xE0, | ||
147 | ATA_CMD_IDLEIMMEDIATE = 0xE1, | ||
144 | ATA_CMD_INIT_DEV_PARAMS = 0x91, | 148 | ATA_CMD_INIT_DEV_PARAMS = 0x91, |
145 | 149 | ||
146 | /* SETFEATURES stuff */ | 150 | /* SETFEATURES stuff */ |
@@ -192,6 +196,7 @@ enum { | |||
192 | ATA_TFLAG_DEVICE = (1 << 2), /* enable r/w to device reg */ | 196 | ATA_TFLAG_DEVICE = (1 << 2), /* enable r/w to device reg */ |
193 | ATA_TFLAG_WRITE = (1 << 3), /* data dir: host->dev==1 (write) */ | 197 | ATA_TFLAG_WRITE = (1 << 3), /* data dir: host->dev==1 (write) */ |
194 | ATA_TFLAG_LBA = (1 << 4), /* enable LBA */ | 198 | ATA_TFLAG_LBA = (1 << 4), /* enable LBA */ |
199 | ATA_TFLAG_FUA = (1 << 5), /* enable FUA */ | ||
195 | }; | 200 | }; |
196 | 201 | ||
197 | enum ata_tf_protocols { | 202 | enum ata_tf_protocols { |
@@ -245,7 +250,8 @@ struct ata_taskfile { | |||
245 | #define ata_id_is_sata(id) ((id)[93] == 0) | 250 | #define ata_id_is_sata(id) ((id)[93] == 0) |
246 | #define ata_id_rahead_enabled(id) ((id)[85] & (1 << 6)) | 251 | #define ata_id_rahead_enabled(id) ((id)[85] & (1 << 6)) |
247 | #define ata_id_wcache_enabled(id) ((id)[85] & (1 << 5)) | 252 | #define ata_id_wcache_enabled(id) ((id)[85] & (1 << 5)) |
248 | #define ata_id_has_flush(id) ((id)[83] & (1 << 12)) | 253 | #define ata_id_has_fua(id) ((id)[84] & (1 << 6)) |
254 | #define ata_id_has_flush(id) ((id)[83] & (1 << 12)) | ||
249 | #define ata_id_has_flush_ext(id) ((id)[83] & (1 << 13)) | 255 | #define ata_id_has_flush_ext(id) ((id)[83] & (1 << 13)) |
250 | #define ata_id_has_lba48(id) ((id)[83] & (1 << 10)) | 256 | #define ata_id_has_lba48(id) ((id)[83] & (1 << 10)) |
251 | #define ata_id_has_wcache(id) ((id)[82] & (1 << 5)) | 257 | #define ata_id_has_wcache(id) ((id)[82] & (1 << 5)) |
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index a18500d196e1..fb0985377421 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -102,7 +102,7 @@ void copy_io_context(struct io_context **pdst, struct io_context **psrc); | |||
102 | void swap_io_context(struct io_context **ioc1, struct io_context **ioc2); | 102 | void swap_io_context(struct io_context **ioc1, struct io_context **ioc2); |
103 | 103 | ||
104 | struct request; | 104 | struct request; |
105 | typedef void (rq_end_io_fn)(struct request *); | 105 | typedef void (rq_end_io_fn)(struct request *, int); |
106 | 106 | ||
107 | struct request_list { | 107 | struct request_list { |
108 | int count[2]; | 108 | int count[2]; |
@@ -207,6 +207,7 @@ enum rq_flag_bits { | |||
207 | __REQ_SORTED, /* elevator knows about this request */ | 207 | __REQ_SORTED, /* elevator knows about this request */ |
208 | __REQ_SOFTBARRIER, /* may not be passed by ioscheduler */ | 208 | __REQ_SOFTBARRIER, /* may not be passed by ioscheduler */ |
209 | __REQ_HARDBARRIER, /* may not be passed by drive either */ | 209 | __REQ_HARDBARRIER, /* may not be passed by drive either */ |
210 | __REQ_FUA, /* forced unit access */ | ||
210 | __REQ_CMD, /* is a regular fs rw request */ | 211 | __REQ_CMD, /* is a regular fs rw request */ |
211 | __REQ_NOMERGE, /* don't touch this for merging */ | 212 | __REQ_NOMERGE, /* don't touch this for merging */ |
212 | __REQ_STARTED, /* drive already may have started this one */ | 213 | __REQ_STARTED, /* drive already may have started this one */ |
@@ -230,9 +231,7 @@ enum rq_flag_bits { | |||
230 | __REQ_PM_SUSPEND, /* suspend request */ | 231 | __REQ_PM_SUSPEND, /* suspend request */ |
231 | __REQ_PM_RESUME, /* resume request */ | 232 | __REQ_PM_RESUME, /* resume request */ |
232 | __REQ_PM_SHUTDOWN, /* shutdown request */ | 233 | __REQ_PM_SHUTDOWN, /* shutdown request */ |
233 | __REQ_BAR_PREFLUSH, /* barrier pre-flush done */ | 234 | __REQ_ORDERED_COLOR, /* is before or after barrier */ |
234 | __REQ_BAR_POSTFLUSH, /* barrier post-flush */ | ||
235 | __REQ_BAR_FLUSH, /* rq is the flush request */ | ||
236 | __REQ_NR_BITS, /* stops here */ | 235 | __REQ_NR_BITS, /* stops here */ |
237 | }; | 236 | }; |
238 | 237 | ||
@@ -241,6 +240,7 @@ enum rq_flag_bits { | |||
241 | #define REQ_SORTED (1 << __REQ_SORTED) | 240 | #define REQ_SORTED (1 << __REQ_SORTED) |
242 | #define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER) | 241 | #define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER) |
243 | #define REQ_HARDBARRIER (1 << __REQ_HARDBARRIER) | 242 | #define REQ_HARDBARRIER (1 << __REQ_HARDBARRIER) |
243 | #define REQ_FUA (1 << __REQ_FUA) | ||
244 | #define REQ_CMD (1 << __REQ_CMD) | 244 | #define REQ_CMD (1 << __REQ_CMD) |
245 | #define REQ_NOMERGE (1 << __REQ_NOMERGE) | 245 | #define REQ_NOMERGE (1 << __REQ_NOMERGE) |
246 | #define REQ_STARTED (1 << __REQ_STARTED) | 246 | #define REQ_STARTED (1 << __REQ_STARTED) |
@@ -260,9 +260,7 @@ enum rq_flag_bits { | |||
260 | #define REQ_PM_SUSPEND (1 << __REQ_PM_SUSPEND) | 260 | #define REQ_PM_SUSPEND (1 << __REQ_PM_SUSPEND) |
261 | #define REQ_PM_RESUME (1 << __REQ_PM_RESUME) | 261 | #define REQ_PM_RESUME (1 << __REQ_PM_RESUME) |
262 | #define REQ_PM_SHUTDOWN (1 << __REQ_PM_SHUTDOWN) | 262 | #define REQ_PM_SHUTDOWN (1 << __REQ_PM_SHUTDOWN) |
263 | #define REQ_BAR_PREFLUSH (1 << __REQ_BAR_PREFLUSH) | 263 | #define REQ_ORDERED_COLOR (1 << __REQ_ORDERED_COLOR) |
264 | #define REQ_BAR_POSTFLUSH (1 << __REQ_BAR_POSTFLUSH) | ||
265 | #define REQ_BAR_FLUSH (1 << __REQ_BAR_FLUSH) | ||
266 | 264 | ||
267 | /* | 265 | /* |
268 | * State information carried for REQ_PM_SUSPEND and REQ_PM_RESUME | 266 | * State information carried for REQ_PM_SUSPEND and REQ_PM_RESUME |
@@ -292,8 +290,7 @@ struct bio_vec; | |||
292 | typedef int (merge_bvec_fn) (request_queue_t *, struct bio *, struct bio_vec *); | 290 | typedef int (merge_bvec_fn) (request_queue_t *, struct bio *, struct bio_vec *); |
293 | typedef void (activity_fn) (void *data, int rw); | 291 | typedef void (activity_fn) (void *data, int rw); |
294 | typedef int (issue_flush_fn) (request_queue_t *, struct gendisk *, sector_t *); | 292 | typedef int (issue_flush_fn) (request_queue_t *, struct gendisk *, sector_t *); |
295 | typedef int (prepare_flush_fn) (request_queue_t *, struct request *); | 293 | typedef void (prepare_flush_fn) (request_queue_t *, struct request *); |
296 | typedef void (end_flush_fn) (request_queue_t *, struct request *); | ||
297 | 294 | ||
298 | enum blk_queue_state { | 295 | enum blk_queue_state { |
299 | Queue_down, | 296 | Queue_down, |
@@ -335,7 +332,6 @@ struct request_queue | |||
335 | activity_fn *activity_fn; | 332 | activity_fn *activity_fn; |
336 | issue_flush_fn *issue_flush_fn; | 333 | issue_flush_fn *issue_flush_fn; |
337 | prepare_flush_fn *prepare_flush_fn; | 334 | prepare_flush_fn *prepare_flush_fn; |
338 | end_flush_fn *end_flush_fn; | ||
339 | 335 | ||
340 | /* | 336 | /* |
341 | * Dispatch queue sorting | 337 | * Dispatch queue sorting |
@@ -420,14 +416,11 @@ struct request_queue | |||
420 | /* | 416 | /* |
421 | * reserved for flush operations | 417 | * reserved for flush operations |
422 | */ | 418 | */ |
423 | struct request *flush_rq; | 419 | unsigned int ordered, next_ordered, ordseq; |
424 | unsigned char ordered; | 420 | int orderr, ordcolor; |
425 | }; | 421 | struct request pre_flush_rq, bar_rq, post_flush_rq; |
426 | 422 | struct request *orig_bar_rq; | |
427 | enum { | 423 | unsigned int bi_size; |
428 | QUEUE_ORDERED_NONE, | ||
429 | QUEUE_ORDERED_TAG, | ||
430 | QUEUE_ORDERED_FLUSH, | ||
431 | }; | 424 | }; |
432 | 425 | ||
433 | #define RQ_INACTIVE (-1) | 426 | #define RQ_INACTIVE (-1) |
@@ -445,12 +438,51 @@ enum { | |||
445 | #define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */ | 438 | #define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */ |
446 | #define QUEUE_FLAG_PLUGGED 7 /* queue is plugged */ | 439 | #define QUEUE_FLAG_PLUGGED 7 /* queue is plugged */ |
447 | #define QUEUE_FLAG_ELVSWITCH 8 /* don't use elevator, just do FIFO */ | 440 | #define QUEUE_FLAG_ELVSWITCH 8 /* don't use elevator, just do FIFO */ |
448 | #define QUEUE_FLAG_FLUSH 9 /* doing barrier flush sequence */ | 441 | |
442 | enum { | ||
443 | /* | ||
444 | * Hardbarrier is supported with one of the following methods. | ||
445 | * | ||
446 | * NONE : hardbarrier unsupported | ||
447 | * DRAIN : ordering by draining is enough | ||
448 | * DRAIN_FLUSH : ordering by draining w/ pre and post flushes | ||
449 | * DRAIN_FUA : ordering by draining w/ pre flush and FUA write | ||
450 | * TAG : ordering by tag is enough | ||
451 | * TAG_FLUSH : ordering by tag w/ pre and post flushes | ||
452 | * TAG_FUA : ordering by tag w/ pre flush and FUA write | ||
453 | */ | ||
454 | QUEUE_ORDERED_NONE = 0x00, | ||
455 | QUEUE_ORDERED_DRAIN = 0x01, | ||
456 | QUEUE_ORDERED_TAG = 0x02, | ||
457 | |||
458 | QUEUE_ORDERED_PREFLUSH = 0x10, | ||
459 | QUEUE_ORDERED_POSTFLUSH = 0x20, | ||
460 | QUEUE_ORDERED_FUA = 0x40, | ||
461 | |||
462 | QUEUE_ORDERED_DRAIN_FLUSH = QUEUE_ORDERED_DRAIN | | ||
463 | QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH, | ||
464 | QUEUE_ORDERED_DRAIN_FUA = QUEUE_ORDERED_DRAIN | | ||
465 | QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_FUA, | ||
466 | QUEUE_ORDERED_TAG_FLUSH = QUEUE_ORDERED_TAG | | ||
467 | QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH, | ||
468 | QUEUE_ORDERED_TAG_FUA = QUEUE_ORDERED_TAG | | ||
469 | QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_FUA, | ||
470 | |||
471 | /* | ||
472 | * Ordered operation sequence | ||
473 | */ | ||
474 | QUEUE_ORDSEQ_STARTED = 0x01, /* flushing in progress */ | ||
475 | QUEUE_ORDSEQ_DRAIN = 0x02, /* waiting for the queue to be drained */ | ||
476 | QUEUE_ORDSEQ_PREFLUSH = 0x04, /* pre-flushing in progress */ | ||
477 | QUEUE_ORDSEQ_BAR = 0x08, /* original barrier req in progress */ | ||
478 | QUEUE_ORDSEQ_POSTFLUSH = 0x10, /* post-flushing in progress */ | ||
479 | QUEUE_ORDSEQ_DONE = 0x20, | ||
480 | }; | ||
449 | 481 | ||
450 | #define blk_queue_plugged(q) test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags) | 482 | #define blk_queue_plugged(q) test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags) |
451 | #define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) | 483 | #define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) |
452 | #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) | 484 | #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) |
453 | #define blk_queue_flushing(q) test_bit(QUEUE_FLAG_FLUSH, &(q)->queue_flags) | 485 | #define blk_queue_flushing(q) ((q)->ordseq) |
454 | 486 | ||
455 | #define blk_fs_request(rq) ((rq)->flags & REQ_CMD) | 487 | #define blk_fs_request(rq) ((rq)->flags & REQ_CMD) |
456 | #define blk_pc_request(rq) ((rq)->flags & REQ_BLOCK_PC) | 488 | #define blk_pc_request(rq) ((rq)->flags & REQ_BLOCK_PC) |
@@ -466,8 +498,7 @@ enum { | |||
466 | 498 | ||
467 | #define blk_sorted_rq(rq) ((rq)->flags & REQ_SORTED) | 499 | #define blk_sorted_rq(rq) ((rq)->flags & REQ_SORTED) |
468 | #define blk_barrier_rq(rq) ((rq)->flags & REQ_HARDBARRIER) | 500 | #define blk_barrier_rq(rq) ((rq)->flags & REQ_HARDBARRIER) |
469 | #define blk_barrier_preflush(rq) ((rq)->flags & REQ_BAR_PREFLUSH) | 501 | #define blk_fua_rq(rq) ((rq)->flags & REQ_FUA) |
470 | #define blk_barrier_postflush(rq) ((rq)->flags & REQ_BAR_POSTFLUSH) | ||
471 | 502 | ||
472 | #define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) | 503 | #define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) |
473 | 504 | ||
@@ -560,7 +591,7 @@ extern void register_disk(struct gendisk *dev); | |||
560 | extern void generic_make_request(struct bio *bio); | 591 | extern void generic_make_request(struct bio *bio); |
561 | extern void blk_put_request(struct request *); | 592 | extern void blk_put_request(struct request *); |
562 | extern void __blk_put_request(request_queue_t *, struct request *); | 593 | extern void __blk_put_request(request_queue_t *, struct request *); |
563 | extern void blk_end_sync_rq(struct request *rq); | 594 | extern void blk_end_sync_rq(struct request *rq, int error); |
564 | extern void blk_attempt_remerge(request_queue_t *, struct request *); | 595 | extern void blk_attempt_remerge(request_queue_t *, struct request *); |
565 | extern struct request *blk_get_request(request_queue_t *, int, gfp_t); | 596 | extern struct request *blk_get_request(request_queue_t *, int, gfp_t); |
566 | extern void blk_insert_request(request_queue_t *, struct request *, int, void *); | 597 | extern void blk_insert_request(request_queue_t *, struct request *, int, void *); |
@@ -582,8 +613,7 @@ extern int blk_rq_map_user_iov(request_queue_t *, struct request *, struct sg_io | |||
582 | extern int blk_execute_rq(request_queue_t *, struct gendisk *, | 613 | extern int blk_execute_rq(request_queue_t *, struct gendisk *, |
583 | struct request *, int); | 614 | struct request *, int); |
584 | extern void blk_execute_rq_nowait(request_queue_t *, struct gendisk *, | 615 | extern void blk_execute_rq_nowait(request_queue_t *, struct gendisk *, |
585 | struct request *, int, | 616 | struct request *, int, rq_end_io_fn *); |
586 | void (*done)(struct request *)); | ||
587 | 617 | ||
588 | static inline request_queue_t *bdev_get_queue(struct block_device *bdev) | 618 | static inline request_queue_t *bdev_get_queue(struct block_device *bdev) |
589 | { | 619 | { |
@@ -614,7 +644,7 @@ static inline void blk_run_address_space(struct address_space *mapping) | |||
614 | */ | 644 | */ |
615 | extern int end_that_request_first(struct request *, int, int); | 645 | extern int end_that_request_first(struct request *, int, int); |
616 | extern int end_that_request_chunk(struct request *, int, int); | 646 | extern int end_that_request_chunk(struct request *, int, int); |
617 | extern void end_that_request_last(struct request *); | 647 | extern void end_that_request_last(struct request *, int); |
618 | extern void end_request(struct request *req, int uptodate); | 648 | extern void end_request(struct request *req, int uptodate); |
619 | 649 | ||
620 | /* | 650 | /* |
@@ -665,11 +695,12 @@ extern void blk_queue_prep_rq(request_queue_t *, prep_rq_fn *pfn); | |||
665 | extern void blk_queue_merge_bvec(request_queue_t *, merge_bvec_fn *); | 695 | extern void blk_queue_merge_bvec(request_queue_t *, merge_bvec_fn *); |
666 | extern void blk_queue_dma_alignment(request_queue_t *, int); | 696 | extern void blk_queue_dma_alignment(request_queue_t *, int); |
667 | extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); | 697 | extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); |
668 | extern void blk_queue_ordered(request_queue_t *, int); | 698 | extern int blk_queue_ordered(request_queue_t *, unsigned, prepare_flush_fn *); |
669 | extern void blk_queue_issue_flush_fn(request_queue_t *, issue_flush_fn *); | 699 | extern void blk_queue_issue_flush_fn(request_queue_t *, issue_flush_fn *); |
670 | extern struct request *blk_start_pre_flush(request_queue_t *,struct request *); | 700 | extern int blk_do_ordered(request_queue_t *, struct request **); |
671 | extern int blk_complete_barrier_rq(request_queue_t *, struct request *, int); | 701 | extern unsigned blk_ordered_cur_seq(request_queue_t *); |
672 | extern int blk_complete_barrier_rq_locked(request_queue_t *, struct request *, int); | 702 | extern unsigned blk_ordered_req_seq(struct request *); |
703 | extern void blk_ordered_complete_seq(request_queue_t *, unsigned, int); | ||
673 | 704 | ||
674 | extern int blk_rq_map_sg(request_queue_t *, struct request *, struct scatterlist *); | 705 | extern int blk_rq_map_sg(request_queue_t *, struct request *, struct scatterlist *); |
675 | extern void blk_dump_rq_flags(struct request *, char *); | 706 | extern void blk_dump_rq_flags(struct request *, char *); |
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h index 3b03b0b868dd..993da8cc9706 100644 --- a/include/linux/bootmem.h +++ b/include/linux/bootmem.h | |||
@@ -43,50 +43,38 @@ typedef struct bootmem_data { | |||
43 | extern unsigned long __init bootmem_bootmap_pages (unsigned long); | 43 | extern unsigned long __init bootmem_bootmap_pages (unsigned long); |
44 | extern unsigned long __init init_bootmem (unsigned long addr, unsigned long memend); | 44 | extern unsigned long __init init_bootmem (unsigned long addr, unsigned long memend); |
45 | extern void __init free_bootmem (unsigned long addr, unsigned long size); | 45 | extern void __init free_bootmem (unsigned long addr, unsigned long size); |
46 | extern void * __init __alloc_bootmem_limit (unsigned long size, unsigned long align, unsigned long goal, unsigned long limit); | 46 | extern void * __init __alloc_bootmem (unsigned long size, unsigned long align, unsigned long goal); |
47 | extern void * __init __alloc_bootmem_low(unsigned long size, | ||
48 | unsigned long align, | ||
49 | unsigned long goal); | ||
50 | extern void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, | ||
51 | unsigned long size, | ||
52 | unsigned long align, | ||
53 | unsigned long goal); | ||
47 | #ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE | 54 | #ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE |
48 | extern void __init reserve_bootmem (unsigned long addr, unsigned long size); | 55 | extern void __init reserve_bootmem (unsigned long addr, unsigned long size); |
49 | #define alloc_bootmem(x) \ | 56 | #define alloc_bootmem(x) \ |
50 | __alloc_bootmem((x), SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) | 57 | __alloc_bootmem((x), SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) |
51 | #define alloc_bootmem_low(x) \ | 58 | #define alloc_bootmem_low(x) \ |
52 | __alloc_bootmem((x), SMP_CACHE_BYTES, 0) | 59 | __alloc_bootmem_low((x), SMP_CACHE_BYTES, 0) |
53 | #define alloc_bootmem_pages(x) \ | 60 | #define alloc_bootmem_pages(x) \ |
54 | __alloc_bootmem((x), PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) | 61 | __alloc_bootmem((x), PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) |
55 | #define alloc_bootmem_low_pages(x) \ | 62 | #define alloc_bootmem_low_pages(x) \ |
56 | __alloc_bootmem((x), PAGE_SIZE, 0) | 63 | __alloc_bootmem_low((x), PAGE_SIZE, 0) |
57 | |||
58 | #define alloc_bootmem_limit(x, limit) \ | ||
59 | __alloc_bootmem_limit((x), SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS), (limit)) | ||
60 | #define alloc_bootmem_low_limit(x, limit) \ | ||
61 | __alloc_bootmem_limit((x), SMP_CACHE_BYTES, 0, (limit)) | ||
62 | #define alloc_bootmem_pages_limit(x, limit) \ | ||
63 | __alloc_bootmem_limit((x), PAGE_SIZE, __pa(MAX_DMA_ADDRESS), (limit)) | ||
64 | #define alloc_bootmem_low_pages_limit(x, limit) \ | ||
65 | __alloc_bootmem_limit((x), PAGE_SIZE, 0, (limit)) | ||
66 | |||
67 | #endif /* !CONFIG_HAVE_ARCH_BOOTMEM_NODE */ | 64 | #endif /* !CONFIG_HAVE_ARCH_BOOTMEM_NODE */ |
68 | extern unsigned long __init free_all_bootmem (void); | 65 | extern unsigned long __init free_all_bootmem (void); |
69 | 66 | extern void * __init __alloc_bootmem_node (pg_data_t *pgdat, unsigned long size, unsigned long align, unsigned long goal); | |
70 | extern unsigned long __init init_bootmem_node (pg_data_t *pgdat, unsigned long freepfn, unsigned long startpfn, unsigned long endpfn); | 67 | extern unsigned long __init init_bootmem_node (pg_data_t *pgdat, unsigned long freepfn, unsigned long startpfn, unsigned long endpfn); |
71 | extern void __init reserve_bootmem_node (pg_data_t *pgdat, unsigned long physaddr, unsigned long size); | 68 | extern void __init reserve_bootmem_node (pg_data_t *pgdat, unsigned long physaddr, unsigned long size); |
72 | extern void __init free_bootmem_node (pg_data_t *pgdat, unsigned long addr, unsigned long size); | 69 | extern void __init free_bootmem_node (pg_data_t *pgdat, unsigned long addr, unsigned long size); |
73 | extern unsigned long __init free_all_bootmem_node (pg_data_t *pgdat); | 70 | extern unsigned long __init free_all_bootmem_node (pg_data_t *pgdat); |
74 | extern void * __init __alloc_bootmem_node_limit (pg_data_t *pgdat, unsigned long size, unsigned long align, unsigned long goal, unsigned long limit); | ||
75 | #ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE | 71 | #ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE |
76 | #define alloc_bootmem_node(pgdat, x) \ | 72 | #define alloc_bootmem_node(pgdat, x) \ |
77 | __alloc_bootmem_node((pgdat), (x), SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) | 73 | __alloc_bootmem_node((pgdat), (x), SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) |
78 | #define alloc_bootmem_pages_node(pgdat, x) \ | 74 | #define alloc_bootmem_pages_node(pgdat, x) \ |
79 | __alloc_bootmem_node((pgdat), (x), PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) | 75 | __alloc_bootmem_node((pgdat), (x), PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) |
80 | #define alloc_bootmem_low_pages_node(pgdat, x) \ | 76 | #define alloc_bootmem_low_pages_node(pgdat, x) \ |
81 | __alloc_bootmem_node((pgdat), (x), PAGE_SIZE, 0) | 77 | __alloc_bootmem_low_node((pgdat), (x), PAGE_SIZE, 0) |
82 | |||
83 | #define alloc_bootmem_node_limit(pgdat, x, limit) \ | ||
84 | __alloc_bootmem_node_limit((pgdat), (x), SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS), (limit)) | ||
85 | #define alloc_bootmem_pages_node_limit(pgdat, x, limit) \ | ||
86 | __alloc_bootmem_node_limit((pgdat), (x), PAGE_SIZE, __pa(MAX_DMA_ADDRESS), (limit)) | ||
87 | #define alloc_bootmem_low_pages_node_limit(pgdat, x, limit) \ | ||
88 | __alloc_bootmem_node_limit((pgdat), (x), PAGE_SIZE, 0, (limit)) | ||
89 | |||
90 | #endif /* !CONFIG_HAVE_ARCH_BOOTMEM_NODE */ | 78 | #endif /* !CONFIG_HAVE_ARCH_BOOTMEM_NODE */ |
91 | 79 | ||
92 | #ifdef CONFIG_HAVE_ARCH_ALLOC_REMAP | 80 | #ifdef CONFIG_HAVE_ARCH_ALLOC_REMAP |
@@ -123,15 +111,5 @@ extern void *__init alloc_large_system_hash(const char *tablename, | |||
123 | #endif | 111 | #endif |
124 | extern int __initdata hashdist; /* Distribute hashes across NUMA nodes? */ | 112 | extern int __initdata hashdist; /* Distribute hashes across NUMA nodes? */ |
125 | 113 | ||
126 | static inline void *__alloc_bootmem (unsigned long size, unsigned long align, unsigned long goal) | ||
127 | { | ||
128 | return __alloc_bootmem_limit(size, align, goal, 0); | ||
129 | } | ||
130 | |||
131 | static inline void *__alloc_bootmem_node (pg_data_t *pgdat, unsigned long size, unsigned long align, | ||
132 | unsigned long goal) | ||
133 | { | ||
134 | return __alloc_bootmem_node_limit(pgdat, size, align, goal, 0); | ||
135 | } | ||
136 | 114 | ||
137 | #endif /* _LINUX_BOOTMEM_H */ | 115 | #endif /* _LINUX_BOOTMEM_H */ |
diff --git a/include/linux/configfs.h b/include/linux/configfs.h new file mode 100644 index 000000000000..acffb8c9073a --- /dev/null +++ b/include/linux/configfs.h | |||
@@ -0,0 +1,205 @@ | |||
1 | /* -*- mode: c; c-basic-offset: 8; -*- | ||
2 | * vim: noexpandtab sw=8 ts=8 sts=0: | ||
3 | * | ||
4 | * configfs.h - definitions for the device driver filesystem | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public | ||
8 | * License as published by the Free Software Foundation; either | ||
9 | * version 2 of the License, or (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | * General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public | ||
17 | * License along with this program; if not, write to the | ||
18 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | ||
19 | * Boston, MA 021110-1307, USA. | ||
20 | * | ||
21 | * Based on sysfs: | ||
22 | * sysfs is Copyright (C) 2001, 2002, 2003 Patrick Mochel | ||
23 | * | ||
24 | * Based on kobject.h: | ||
25 | * Copyright (c) 2002-2003 Patrick Mochel | ||
26 | * Copyright (c) 2002-2003 Open Source Development Labs | ||
27 | * | ||
28 | * configfs Copyright (C) 2005 Oracle. All rights reserved. | ||
29 | * | ||
30 | * Please read Documentation/filesystems/configfs.txt before using the | ||
31 | * configfs interface, ESPECIALLY the parts about reference counts and | ||
32 | * item destructors. | ||
33 | */ | ||
34 | |||
35 | #ifndef _CONFIGFS_H_ | ||
36 | #define _CONFIGFS_H_ | ||
37 | |||
38 | #ifdef __KERNEL__ | ||
39 | |||
40 | #include <linux/types.h> | ||
41 | #include <linux/list.h> | ||
42 | #include <linux/kref.h> | ||
43 | |||
44 | #include <asm/atomic.h> | ||
45 | #include <asm/semaphore.h> | ||
46 | |||
47 | #define CONFIGFS_ITEM_NAME_LEN 20 | ||
48 | |||
49 | struct module; | ||
50 | |||
51 | struct configfs_item_operations; | ||
52 | struct configfs_group_operations; | ||
53 | struct configfs_attribute; | ||
54 | struct configfs_subsystem; | ||
55 | |||
56 | struct config_item { | ||
57 | char *ci_name; | ||
58 | char ci_namebuf[CONFIGFS_ITEM_NAME_LEN]; | ||
59 | struct kref ci_kref; | ||
60 | struct list_head ci_entry; | ||
61 | struct config_item *ci_parent; | ||
62 | struct config_group *ci_group; | ||
63 | struct config_item_type *ci_type; | ||
64 | struct dentry *ci_dentry; | ||
65 | }; | ||
66 | |||
67 | extern int config_item_set_name(struct config_item *, const char *, ...); | ||
68 | |||
69 | static inline char *config_item_name(struct config_item * item) | ||
70 | { | ||
71 | return item->ci_name; | ||
72 | } | ||
73 | |||
74 | extern void config_item_init(struct config_item *); | ||
75 | extern void config_item_init_type_name(struct config_item *item, | ||
76 | const char *name, | ||
77 | struct config_item_type *type); | ||
78 | extern void config_item_cleanup(struct config_item *); | ||
79 | |||
80 | extern struct config_item * config_item_get(struct config_item *); | ||
81 | extern void config_item_put(struct config_item *); | ||
82 | |||
83 | struct config_item_type { | ||
84 | struct module *ct_owner; | ||
85 | struct configfs_item_operations *ct_item_ops; | ||
86 | struct configfs_group_operations *ct_group_ops; | ||
87 | struct configfs_attribute **ct_attrs; | ||
88 | }; | ||
89 | |||
90 | |||
91 | /** | ||
92 | * group - a group of config_items of a specific type, belonging | ||
93 | * to a specific subsystem. | ||
94 | */ | ||
95 | |||
96 | struct config_group { | ||
97 | struct config_item cg_item; | ||
98 | struct list_head cg_children; | ||
99 | struct configfs_subsystem *cg_subsys; | ||
100 | struct config_group **default_groups; | ||
101 | }; | ||
102 | |||
103 | |||
104 | extern void config_group_init(struct config_group *group); | ||
105 | extern void config_group_init_type_name(struct config_group *group, | ||
106 | const char *name, | ||
107 | struct config_item_type *type); | ||
108 | |||
109 | |||
110 | static inline struct config_group *to_config_group(struct config_item *item) | ||
111 | { | ||
112 | return item ? container_of(item,struct config_group,cg_item) : NULL; | ||
113 | } | ||
114 | |||
115 | static inline struct config_group *config_group_get(struct config_group *group) | ||
116 | { | ||
117 | return group ? to_config_group(config_item_get(&group->cg_item)) : NULL; | ||
118 | } | ||
119 | |||
120 | static inline void config_group_put(struct config_group *group) | ||
121 | { | ||
122 | config_item_put(&group->cg_item); | ||
123 | } | ||
124 | |||
125 | extern struct config_item *config_group_find_obj(struct config_group *, const char *); | ||
126 | |||
127 | |||
128 | struct configfs_attribute { | ||
129 | char *ca_name; | ||
130 | struct module *ca_owner; | ||
131 | mode_t ca_mode; | ||
132 | }; | ||
133 | |||
134 | |||
135 | /* | ||
136 | * If allow_link() exists, the item can symlink(2) out to other | ||
137 | * items. If the item is a group, it may support mkdir(2). | ||
138 | * Groups supply one of make_group() and make_item(). If the | ||
139 | * group supports make_group(), one can create group children. If it | ||
140 | * supports make_item(), one can create config_item children. If it has | ||
141 | * default_groups on group->default_groups, it has automatically created | ||
142 | * group children. default_groups may coexist alongsize make_group() or | ||
143 | * make_item(), but if the group wishes to have only default_groups | ||
144 | * children (disallowing mkdir(2)), it need not provide either function. | ||
145 | * If the group has commit(), it supports pending and commited (active) | ||
146 | * items. | ||
147 | */ | ||
148 | struct configfs_item_operations { | ||
149 | void (*release)(struct config_item *); | ||
150 | ssize_t (*show_attribute)(struct config_item *, struct configfs_attribute *,char *); | ||
151 | ssize_t (*store_attribute)(struct config_item *,struct configfs_attribute *,const char *, size_t); | ||
152 | int (*allow_link)(struct config_item *src, struct config_item *target); | ||
153 | int (*drop_link)(struct config_item *src, struct config_item *target); | ||
154 | }; | ||
155 | |||
156 | struct configfs_group_operations { | ||
157 | struct config_item *(*make_item)(struct config_group *group, const char *name); | ||
158 | struct config_group *(*make_group)(struct config_group *group, const char *name); | ||
159 | int (*commit_item)(struct config_item *item); | ||
160 | void (*drop_item)(struct config_group *group, struct config_item *item); | ||
161 | }; | ||
162 | |||
163 | |||
164 | |||
165 | /** | ||
166 | * Use these macros to make defining attributes easier. See include/linux/device.h | ||
167 | * for examples.. | ||
168 | */ | ||
169 | |||
170 | #if 0 | ||
171 | #define __ATTR(_name,_mode,_show,_store) { \ | ||
172 | .attr = {.ca_name = __stringify(_name), .ca_mode = _mode, .ca_owner = THIS_MODULE }, \ | ||
173 | .show = _show, \ | ||
174 | .store = _store, \ | ||
175 | } | ||
176 | |||
177 | #define __ATTR_RO(_name) { \ | ||
178 | .attr = { .ca_name = __stringify(_name), .ca_mode = 0444, .ca_owner = THIS_MODULE }, \ | ||
179 | .show = _name##_show, \ | ||
180 | } | ||
181 | |||
182 | #define __ATTR_NULL { .attr = { .name = NULL } } | ||
183 | |||
184 | #define attr_name(_attr) (_attr).attr.name | ||
185 | #endif | ||
186 | |||
187 | |||
188 | struct configfs_subsystem { | ||
189 | struct config_group su_group; | ||
190 | struct semaphore su_sem; | ||
191 | }; | ||
192 | |||
193 | static inline struct configfs_subsystem *to_configfs_subsystem(struct config_group *group) | ||
194 | { | ||
195 | return group ? | ||
196 | container_of(group, struct configfs_subsystem, su_group) : | ||
197 | NULL; | ||
198 | } | ||
199 | |||
200 | int configfs_register_subsystem(struct configfs_subsystem *subsys); | ||
201 | void configfs_unregister_subsystem(struct configfs_subsystem *subsys); | ||
202 | |||
203 | #endif /* __KERNEL__ */ | ||
204 | |||
205 | #endif /* _CONFIGFS_H_ */ | ||
diff --git a/include/linux/dm-ioctl.h b/include/linux/dm-ioctl.h index f5eb6b6cd109..fa75ba0d635e 100644 --- a/include/linux/dm-ioctl.h +++ b/include/linux/dm-ioctl.h | |||
@@ -272,9 +272,9 @@ typedef char ioctl_struct[308]; | |||
272 | #define DM_TARGET_MSG _IOWR(DM_IOCTL, DM_TARGET_MSG_CMD, struct dm_ioctl) | 272 | #define DM_TARGET_MSG _IOWR(DM_IOCTL, DM_TARGET_MSG_CMD, struct dm_ioctl) |
273 | 273 | ||
274 | #define DM_VERSION_MAJOR 4 | 274 | #define DM_VERSION_MAJOR 4 |
275 | #define DM_VERSION_MINOR 4 | 275 | #define DM_VERSION_MINOR 5 |
276 | #define DM_VERSION_PATCHLEVEL 0 | 276 | #define DM_VERSION_PATCHLEVEL 0 |
277 | #define DM_VERSION_EXTRA "-ioctl (2005-01-12)" | 277 | #define DM_VERSION_EXTRA "-ioctl (2005-10-04)" |
278 | 278 | ||
279 | /* Status bits */ | 279 | /* Status bits */ |
280 | #define DM_READONLY_FLAG (1 << 0) /* In/Out */ | 280 | #define DM_READONLY_FLAG (1 << 0) /* In/Out */ |
@@ -301,8 +301,13 @@ typedef char ioctl_struct[308]; | |||
301 | #define DM_BUFFER_FULL_FLAG (1 << 8) /* Out */ | 301 | #define DM_BUFFER_FULL_FLAG (1 << 8) /* Out */ |
302 | 302 | ||
303 | /* | 303 | /* |
304 | * Set this to improve performance when you aren't going to use open_count | 304 | * Set this to improve performance when you aren't going to use open_count. |
305 | */ | 305 | */ |
306 | #define DM_SKIP_BDGET_FLAG (1 << 9) /* In */ | 306 | #define DM_SKIP_BDGET_FLAG (1 << 9) /* In */ |
307 | 307 | ||
308 | /* | ||
309 | * Set this to avoid attempting to freeze any filesystem when suspending. | ||
310 | */ | ||
311 | #define DM_SKIP_LOCKFS_FLAG (1 << 10) /* In */ | ||
312 | |||
308 | #endif /* _LINUX_DM_IOCTL_H */ | 313 | #endif /* _LINUX_DM_IOCTL_H */ |
diff --git a/include/linux/elevator.h b/include/linux/elevator.h index a74c27e460ba..fb80fa44c4dd 100644 --- a/include/linux/elevator.h +++ b/include/linux/elevator.h | |||
@@ -130,6 +130,7 @@ extern int elv_try_last_merge(request_queue_t *, struct bio *); | |||
130 | #define ELEVATOR_INSERT_FRONT 1 | 130 | #define ELEVATOR_INSERT_FRONT 1 |
131 | #define ELEVATOR_INSERT_BACK 2 | 131 | #define ELEVATOR_INSERT_BACK 2 |
132 | #define ELEVATOR_INSERT_SORT 3 | 132 | #define ELEVATOR_INSERT_SORT 3 |
133 | #define ELEVATOR_INSERT_REQUEUE 4 | ||
133 | 134 | ||
134 | /* | 135 | /* |
135 | * return values from elevator_may_queue_fn | 136 | * return values from elevator_may_queue_fn |
diff --git a/include/linux/fs.h b/include/linux/fs.h index cc35b6ac778d..115e72be25d0 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
@@ -302,6 +302,37 @@ struct iattr { | |||
302 | */ | 302 | */ |
303 | #include <linux/quota.h> | 303 | #include <linux/quota.h> |
304 | 304 | ||
305 | /** | ||
306 | * enum positive_aop_returns - aop return codes with specific semantics | ||
307 | * | ||
308 | * @AOP_WRITEPAGE_ACTIVATE: Informs the caller that page writeback has | ||
309 | * completed, that the page is still locked, and | ||
310 | * should be considered active. The VM uses this hint | ||
311 | * to return the page to the active list -- it won't | ||
312 | * be a candidate for writeback again in the near | ||
313 | * future. Other callers must be careful to unlock | ||
314 | * the page if they get this return. Returned by | ||
315 | * writepage(); | ||
316 | * | ||
317 | * @AOP_TRUNCATED_PAGE: The AOP method that was handed a locked page has | ||
318 | * unlocked it and the page might have been truncated. | ||
319 | * The caller should back up to acquiring a new page and | ||
320 | * trying again. The aop will be taking reasonable | ||
321 | * precautions not to livelock. If the caller held a page | ||
322 | * reference, it should drop it before retrying. Returned | ||
323 | * by readpage(), prepare_write(), and commit_write(). | ||
324 | * | ||
325 | * address_space_operation functions return these large constants to indicate | ||
326 | * special semantics to the caller. These are much larger than the bytes in a | ||
327 | * page to allow for functions that return the number of bytes operated on in a | ||
328 | * given page. | ||
329 | */ | ||
330 | |||
331 | enum positive_aop_returns { | ||
332 | AOP_WRITEPAGE_ACTIVATE = 0x80000, | ||
333 | AOP_TRUNCATED_PAGE = 0x80001, | ||
334 | }; | ||
335 | |||
305 | /* | 336 | /* |
306 | * oh the beauties of C type declarations. | 337 | * oh the beauties of C type declarations. |
307 | */ | 338 | */ |
@@ -1019,6 +1050,7 @@ struct inode_operations { | |||
1019 | ssize_t (*getxattr) (struct dentry *, const char *, void *, size_t); | 1050 | ssize_t (*getxattr) (struct dentry *, const char *, void *, size_t); |
1020 | ssize_t (*listxattr) (struct dentry *, char *, size_t); | 1051 | ssize_t (*listxattr) (struct dentry *, char *, size_t); |
1021 | int (*removexattr) (struct dentry *, const char *); | 1052 | int (*removexattr) (struct dentry *, const char *); |
1053 | void (*truncate_range)(struct inode *, loff_t, loff_t); | ||
1022 | }; | 1054 | }; |
1023 | 1055 | ||
1024 | struct seq_file; | 1056 | struct seq_file; |
diff --git a/include/linux/fuse.h b/include/linux/fuse.h index b76b558b03d4..528959c52f1b 100644 --- a/include/linux/fuse.h +++ b/include/linux/fuse.h | |||
@@ -14,7 +14,7 @@ | |||
14 | #define FUSE_KERNEL_VERSION 7 | 14 | #define FUSE_KERNEL_VERSION 7 |
15 | 15 | ||
16 | /** Minor version number of this interface */ | 16 | /** Minor version number of this interface */ |
17 | #define FUSE_KERNEL_MINOR_VERSION 3 | 17 | #define FUSE_KERNEL_MINOR_VERSION 5 |
18 | 18 | ||
19 | /** The node ID of the root inode */ | 19 | /** The node ID of the root inode */ |
20 | #define FUSE_ROOT_ID 1 | 20 | #define FUSE_ROOT_ID 1 |
@@ -53,6 +53,9 @@ struct fuse_kstatfs { | |||
53 | __u64 ffree; | 53 | __u64 ffree; |
54 | __u32 bsize; | 54 | __u32 bsize; |
55 | __u32 namelen; | 55 | __u32 namelen; |
56 | __u32 frsize; | ||
57 | __u32 padding; | ||
58 | __u32 spare[6]; | ||
56 | }; | 59 | }; |
57 | 60 | ||
58 | #define FATTR_MODE (1 << 0) | 61 | #define FATTR_MODE (1 << 0) |
@@ -105,12 +108,8 @@ enum fuse_opcode { | |||
105 | FUSE_CREATE = 35 | 108 | FUSE_CREATE = 35 |
106 | }; | 109 | }; |
107 | 110 | ||
108 | /* Conservative buffer size for the client */ | 111 | /* The read buffer is required to be at least 8k, but may be much larger */ |
109 | #define FUSE_MAX_IN 8192 | 112 | #define FUSE_MIN_READ_BUFFER 8192 |
110 | |||
111 | #define FUSE_NAME_MAX 1024 | ||
112 | #define FUSE_SYMLINK_MAX 4096 | ||
113 | #define FUSE_XATTR_SIZE_MAX 4096 | ||
114 | 113 | ||
115 | struct fuse_entry_out { | 114 | struct fuse_entry_out { |
116 | __u64 nodeid; /* Inode ID */ | 115 | __u64 nodeid; /* Inode ID */ |
@@ -213,6 +212,8 @@ struct fuse_write_out { | |||
213 | __u32 padding; | 212 | __u32 padding; |
214 | }; | 213 | }; |
215 | 214 | ||
215 | #define FUSE_COMPAT_STATFS_SIZE 48 | ||
216 | |||
216 | struct fuse_statfs_out { | 217 | struct fuse_statfs_out { |
217 | struct fuse_kstatfs st; | 218 | struct fuse_kstatfs st; |
218 | }; | 219 | }; |
@@ -243,9 +244,16 @@ struct fuse_access_in { | |||
243 | __u32 padding; | 244 | __u32 padding; |
244 | }; | 245 | }; |
245 | 246 | ||
246 | struct fuse_init_in_out { | 247 | struct fuse_init_in { |
248 | __u32 major; | ||
249 | __u32 minor; | ||
250 | }; | ||
251 | |||
252 | struct fuse_init_out { | ||
247 | __u32 major; | 253 | __u32 major; |
248 | __u32 minor; | 254 | __u32 minor; |
255 | __u32 unused[3]; | ||
256 | __u32 max_write; | ||
249 | }; | 257 | }; |
250 | 258 | ||
251 | struct fuse_in_header { | 259 | struct fuse_in_header { |
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 1056717ee501..68d82ad6b17c 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h | |||
@@ -22,7 +22,7 @@ int hugetlb_report_meminfo(char *); | |||
22 | int hugetlb_report_node_meminfo(int, char *); | 22 | int hugetlb_report_node_meminfo(int, char *); |
23 | int is_hugepage_mem_enough(size_t); | 23 | int is_hugepage_mem_enough(size_t); |
24 | unsigned long hugetlb_total_pages(void); | 24 | unsigned long hugetlb_total_pages(void); |
25 | struct page *alloc_huge_page(void); | 25 | struct page *alloc_huge_page(struct vm_area_struct *, unsigned long); |
26 | void free_huge_page(struct page *); | 26 | void free_huge_page(struct page *); |
27 | int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, | 27 | int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, |
28 | unsigned long address, int write_access); | 28 | unsigned long address, int write_access); |
@@ -97,7 +97,7 @@ static inline unsigned long hugetlb_total_pages(void) | |||
97 | #define is_hugepage_only_range(mm, addr, len) 0 | 97 | #define is_hugepage_only_range(mm, addr, len) 0 |
98 | #define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) \ | 98 | #define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) \ |
99 | do { } while (0) | 99 | do { } while (0) |
100 | #define alloc_huge_page() ({ NULL; }) | 100 | #define alloc_huge_page(vma, addr) ({ NULL; }) |
101 | #define free_huge_page(p) ({ (void)(p); BUG(); }) | 101 | #define free_huge_page(p) ({ (void)(p); BUG(); }) |
102 | #define hugetlb_fault(mm, vma, addr, write) ({ BUG(); 0; }) | 102 | #define hugetlb_fault(mm, vma, addr, write) ({ BUG(); 0; }) |
103 | 103 | ||
diff --git a/include/linux/i2o.h b/include/linux/i2o.h index d79c8a4bc4f8..9ba806796667 100644 --- a/include/linux/i2o.h +++ b/include/linux/i2o.h | |||
@@ -30,6 +30,7 @@ | |||
30 | #include <linux/string.h> | 30 | #include <linux/string.h> |
31 | #include <linux/slab.h> | 31 | #include <linux/slab.h> |
32 | #include <linux/workqueue.h> /* work_struct */ | 32 | #include <linux/workqueue.h> /* work_struct */ |
33 | #include <linux/mempool.h> | ||
33 | 34 | ||
34 | #include <asm/io.h> | 35 | #include <asm/io.h> |
35 | #include <asm/semaphore.h> /* Needed for MUTEX init macros */ | 36 | #include <asm/semaphore.h> /* Needed for MUTEX init macros */ |
@@ -38,6 +39,355 @@ | |||
38 | #define I2O_QUEUE_EMPTY 0xffffffff | 39 | #define I2O_QUEUE_EMPTY 0xffffffff |
39 | 40 | ||
40 | /* | 41 | /* |
42 | * Cache strategies | ||
43 | */ | ||
44 | |||
45 | /* The NULL strategy leaves everything up to the controller. This tends to be a | ||
46 | * pessimal but functional choice. | ||
47 | */ | ||
48 | #define CACHE_NULL 0 | ||
49 | /* Prefetch data when reading. We continually attempt to load the next 32 sectors | ||
50 | * into the controller cache. | ||
51 | */ | ||
52 | #define CACHE_PREFETCH 1 | ||
53 | /* Prefetch data when reading. We sometimes attempt to load the next 32 sectors | ||
54 | * into the controller cache. When an I/O is less <= 8K we assume its probably | ||
55 | * not sequential and don't prefetch (default) | ||
56 | */ | ||
57 | #define CACHE_SMARTFETCH 2 | ||
58 | /* Data is written to the cache and then out on to the disk. The I/O must be | ||
59 | * physically on the medium before the write is acknowledged (default without | ||
60 | * NVRAM) | ||
61 | */ | ||
62 | #define CACHE_WRITETHROUGH 17 | ||
63 | /* Data is written to the cache and then out on to the disk. The controller | ||
64 | * is permitted to write back the cache any way it wants. (default if battery | ||
65 | * backed NVRAM is present). It can be useful to set this for swap regardless of | ||
66 | * battery state. | ||
67 | */ | ||
68 | #define CACHE_WRITEBACK 18 | ||
69 | /* Optimise for under powered controllers, especially on RAID1 and RAID0. We | ||
70 | * write large I/O's directly to disk bypassing the cache to avoid the extra | ||
71 | * memory copy hits. Small writes are writeback cached | ||
72 | */ | ||
73 | #define CACHE_SMARTBACK 19 | ||
74 | /* Optimise for under powered controllers, especially on RAID1 and RAID0. We | ||
75 | * write large I/O's directly to disk bypassing the cache to avoid the extra | ||
76 | * memory copy hits. Small writes are writethrough cached. Suitable for devices | ||
77 | * lacking battery backup | ||
78 | */ | ||
79 | #define CACHE_SMARTTHROUGH 20 | ||
80 | |||
81 | /* | ||
82 | * Ioctl structures | ||
83 | */ | ||
84 | |||
85 | #define BLKI2OGRSTRAT _IOR('2', 1, int) | ||
86 | #define BLKI2OGWSTRAT _IOR('2', 2, int) | ||
87 | #define BLKI2OSRSTRAT _IOW('2', 3, int) | ||
88 | #define BLKI2OSWSTRAT _IOW('2', 4, int) | ||
89 | |||
90 | /* | ||
91 | * I2O Function codes | ||
92 | */ | ||
93 | |||
94 | /* | ||
95 | * Executive Class | ||
96 | */ | ||
97 | #define I2O_CMD_ADAPTER_ASSIGN 0xB3 | ||
98 | #define I2O_CMD_ADAPTER_READ 0xB2 | ||
99 | #define I2O_CMD_ADAPTER_RELEASE 0xB5 | ||
100 | #define I2O_CMD_BIOS_INFO_SET 0xA5 | ||
101 | #define I2O_CMD_BOOT_DEVICE_SET 0xA7 | ||
102 | #define I2O_CMD_CONFIG_VALIDATE 0xBB | ||
103 | #define I2O_CMD_CONN_SETUP 0xCA | ||
104 | #define I2O_CMD_DDM_DESTROY 0xB1 | ||
105 | #define I2O_CMD_DDM_ENABLE 0xD5 | ||
106 | #define I2O_CMD_DDM_QUIESCE 0xC7 | ||
107 | #define I2O_CMD_DDM_RESET 0xD9 | ||
108 | #define I2O_CMD_DDM_SUSPEND 0xAF | ||
109 | #define I2O_CMD_DEVICE_ASSIGN 0xB7 | ||
110 | #define I2O_CMD_DEVICE_RELEASE 0xB9 | ||
111 | #define I2O_CMD_HRT_GET 0xA8 | ||
112 | #define I2O_CMD_ADAPTER_CLEAR 0xBE | ||
113 | #define I2O_CMD_ADAPTER_CONNECT 0xC9 | ||
114 | #define I2O_CMD_ADAPTER_RESET 0xBD | ||
115 | #define I2O_CMD_LCT_NOTIFY 0xA2 | ||
116 | #define I2O_CMD_OUTBOUND_INIT 0xA1 | ||
117 | #define I2O_CMD_PATH_ENABLE 0xD3 | ||
118 | #define I2O_CMD_PATH_QUIESCE 0xC5 | ||
119 | #define I2O_CMD_PATH_RESET 0xD7 | ||
120 | #define I2O_CMD_STATIC_MF_CREATE 0xDD | ||
121 | #define I2O_CMD_STATIC_MF_RELEASE 0xDF | ||
122 | #define I2O_CMD_STATUS_GET 0xA0 | ||
123 | #define I2O_CMD_SW_DOWNLOAD 0xA9 | ||
124 | #define I2O_CMD_SW_UPLOAD 0xAB | ||
125 | #define I2O_CMD_SW_REMOVE 0xAD | ||
126 | #define I2O_CMD_SYS_ENABLE 0xD1 | ||
127 | #define I2O_CMD_SYS_MODIFY 0xC1 | ||
128 | #define I2O_CMD_SYS_QUIESCE 0xC3 | ||
129 | #define I2O_CMD_SYS_TAB_SET 0xA3 | ||
130 | |||
131 | /* | ||
132 | * Utility Class | ||
133 | */ | ||
134 | #define I2O_CMD_UTIL_NOP 0x00 | ||
135 | #define I2O_CMD_UTIL_ABORT 0x01 | ||
136 | #define I2O_CMD_UTIL_CLAIM 0x09 | ||
137 | #define I2O_CMD_UTIL_RELEASE 0x0B | ||
138 | #define I2O_CMD_UTIL_PARAMS_GET 0x06 | ||
139 | #define I2O_CMD_UTIL_PARAMS_SET 0x05 | ||
140 | #define I2O_CMD_UTIL_EVT_REGISTER 0x13 | ||
141 | #define I2O_CMD_UTIL_EVT_ACK 0x14 | ||
142 | #define I2O_CMD_UTIL_CONFIG_DIALOG 0x10 | ||
143 | #define I2O_CMD_UTIL_DEVICE_RESERVE 0x0D | ||
144 | #define I2O_CMD_UTIL_DEVICE_RELEASE 0x0F | ||
145 | #define I2O_CMD_UTIL_LOCK 0x17 | ||
146 | #define I2O_CMD_UTIL_LOCK_RELEASE 0x19 | ||
147 | #define I2O_CMD_UTIL_REPLY_FAULT_NOTIFY 0x15 | ||
148 | |||
149 | /* | ||
150 | * SCSI Host Bus Adapter Class | ||
151 | */ | ||
152 | #define I2O_CMD_SCSI_EXEC 0x81 | ||
153 | #define I2O_CMD_SCSI_ABORT 0x83 | ||
154 | #define I2O_CMD_SCSI_BUSRESET 0x27 | ||
155 | |||
156 | /* | ||
157 | * Bus Adapter Class | ||
158 | */ | ||
159 | #define I2O_CMD_BUS_ADAPTER_RESET 0x85 | ||
160 | #define I2O_CMD_BUS_RESET 0x87 | ||
161 | #define I2O_CMD_BUS_SCAN 0x89 | ||
162 | #define I2O_CMD_BUS_QUIESCE 0x8b | ||
163 | |||
164 | /* | ||
165 | * Random Block Storage Class | ||
166 | */ | ||
167 | #define I2O_CMD_BLOCK_READ 0x30 | ||
168 | #define I2O_CMD_BLOCK_WRITE 0x31 | ||
169 | #define I2O_CMD_BLOCK_CFLUSH 0x37 | ||
170 | #define I2O_CMD_BLOCK_MLOCK 0x49 | ||
171 | #define I2O_CMD_BLOCK_MUNLOCK 0x4B | ||
172 | #define I2O_CMD_BLOCK_MMOUNT 0x41 | ||
173 | #define I2O_CMD_BLOCK_MEJECT 0x43 | ||
174 | #define I2O_CMD_BLOCK_POWER 0x70 | ||
175 | |||
176 | #define I2O_CMD_PRIVATE 0xFF | ||
177 | |||
178 | /* Command status values */ | ||
179 | |||
180 | #define I2O_CMD_IN_PROGRESS 0x01 | ||
181 | #define I2O_CMD_REJECTED 0x02 | ||
182 | #define I2O_CMD_FAILED 0x03 | ||
183 | #define I2O_CMD_COMPLETED 0x04 | ||
184 | |||
185 | /* I2O API function return values */ | ||
186 | |||
187 | #define I2O_RTN_NO_ERROR 0 | ||
188 | #define I2O_RTN_NOT_INIT 1 | ||
189 | #define I2O_RTN_FREE_Q_EMPTY 2 | ||
190 | #define I2O_RTN_TCB_ERROR 3 | ||
191 | #define I2O_RTN_TRANSACTION_ERROR 4 | ||
192 | #define I2O_RTN_ADAPTER_ALREADY_INIT 5 | ||
193 | #define I2O_RTN_MALLOC_ERROR 6 | ||
194 | #define I2O_RTN_ADPTR_NOT_REGISTERED 7 | ||
195 | #define I2O_RTN_MSG_REPLY_TIMEOUT 8 | ||
196 | #define I2O_RTN_NO_STATUS 9 | ||
197 | #define I2O_RTN_NO_FIRM_VER 10 | ||
198 | #define I2O_RTN_NO_LINK_SPEED 11 | ||
199 | |||
200 | /* Reply message status defines for all messages */ | ||
201 | |||
202 | #define I2O_REPLY_STATUS_SUCCESS 0x00 | ||
203 | #define I2O_REPLY_STATUS_ABORT_DIRTY 0x01 | ||
204 | #define I2O_REPLY_STATUS_ABORT_NO_DATA_TRANSFER 0x02 | ||
205 | #define I2O_REPLY_STATUS_ABORT_PARTIAL_TRANSFER 0x03 | ||
206 | #define I2O_REPLY_STATUS_ERROR_DIRTY 0x04 | ||
207 | #define I2O_REPLY_STATUS_ERROR_NO_DATA_TRANSFER 0x05 | ||
208 | #define I2O_REPLY_STATUS_ERROR_PARTIAL_TRANSFER 0x06 | ||
209 | #define I2O_REPLY_STATUS_PROCESS_ABORT_DIRTY 0x08 | ||
210 | #define I2O_REPLY_STATUS_PROCESS_ABORT_NO_DATA_TRANSFER 0x09 | ||
211 | #define I2O_REPLY_STATUS_PROCESS_ABORT_PARTIAL_TRANSFER 0x0A | ||
212 | #define I2O_REPLY_STATUS_TRANSACTION_ERROR 0x0B | ||
213 | #define I2O_REPLY_STATUS_PROGRESS_REPORT 0x80 | ||
214 | |||
215 | /* Status codes and Error Information for Parameter functions */ | ||
216 | |||
217 | #define I2O_PARAMS_STATUS_SUCCESS 0x00 | ||
218 | #define I2O_PARAMS_STATUS_BAD_KEY_ABORT 0x01 | ||
219 | #define I2O_PARAMS_STATUS_BAD_KEY_CONTINUE 0x02 | ||
220 | #define I2O_PARAMS_STATUS_BUFFER_FULL 0x03 | ||
221 | #define I2O_PARAMS_STATUS_BUFFER_TOO_SMALL 0x04 | ||
222 | #define I2O_PARAMS_STATUS_FIELD_UNREADABLE 0x05 | ||
223 | #define I2O_PARAMS_STATUS_FIELD_UNWRITEABLE 0x06 | ||
224 | #define I2O_PARAMS_STATUS_INSUFFICIENT_FIELDS 0x07 | ||
225 | #define I2O_PARAMS_STATUS_INVALID_GROUP_ID 0x08 | ||
226 | #define I2O_PARAMS_STATUS_INVALID_OPERATION 0x09 | ||
227 | #define I2O_PARAMS_STATUS_NO_KEY_FIELD 0x0A | ||
228 | #define I2O_PARAMS_STATUS_NO_SUCH_FIELD 0x0B | ||
229 | #define I2O_PARAMS_STATUS_NON_DYNAMIC_GROUP 0x0C | ||
230 | #define I2O_PARAMS_STATUS_OPERATION_ERROR 0x0D | ||
231 | #define I2O_PARAMS_STATUS_SCALAR_ERROR 0x0E | ||
232 | #define I2O_PARAMS_STATUS_TABLE_ERROR 0x0F | ||
233 | #define I2O_PARAMS_STATUS_WRONG_GROUP_TYPE 0x10 | ||
234 | |||
235 | /* DetailedStatusCode defines for Executive, DDM, Util and Transaction error | ||
236 | * messages: Table 3-2 Detailed Status Codes.*/ | ||
237 | |||
238 | #define I2O_DSC_SUCCESS 0x0000 | ||
239 | #define I2O_DSC_BAD_KEY 0x0002 | ||
240 | #define I2O_DSC_TCL_ERROR 0x0003 | ||
241 | #define I2O_DSC_REPLY_BUFFER_FULL 0x0004 | ||
242 | #define I2O_DSC_NO_SUCH_PAGE 0x0005 | ||
243 | #define I2O_DSC_INSUFFICIENT_RESOURCE_SOFT 0x0006 | ||
244 | #define I2O_DSC_INSUFFICIENT_RESOURCE_HARD 0x0007 | ||
245 | #define I2O_DSC_CHAIN_BUFFER_TOO_LARGE 0x0009 | ||
246 | #define I2O_DSC_UNSUPPORTED_FUNCTION 0x000A | ||
247 | #define I2O_DSC_DEVICE_LOCKED 0x000B | ||
248 | #define I2O_DSC_DEVICE_RESET 0x000C | ||
249 | #define I2O_DSC_INAPPROPRIATE_FUNCTION 0x000D | ||
250 | #define I2O_DSC_INVALID_INITIATOR_ADDRESS 0x000E | ||
251 | #define I2O_DSC_INVALID_MESSAGE_FLAGS 0x000F | ||
252 | #define I2O_DSC_INVALID_OFFSET 0x0010 | ||
253 | #define I2O_DSC_INVALID_PARAMETER 0x0011 | ||
254 | #define I2O_DSC_INVALID_REQUEST 0x0012 | ||
255 | #define I2O_DSC_INVALID_TARGET_ADDRESS 0x0013 | ||
256 | #define I2O_DSC_MESSAGE_TOO_LARGE 0x0014 | ||
257 | #define I2O_DSC_MESSAGE_TOO_SMALL 0x0015 | ||
258 | #define I2O_DSC_MISSING_PARAMETER 0x0016 | ||
259 | #define I2O_DSC_TIMEOUT 0x0017 | ||
260 | #define I2O_DSC_UNKNOWN_ERROR 0x0018 | ||
261 | #define I2O_DSC_UNKNOWN_FUNCTION 0x0019 | ||
262 | #define I2O_DSC_UNSUPPORTED_VERSION 0x001A | ||
263 | #define I2O_DSC_DEVICE_BUSY 0x001B | ||
264 | #define I2O_DSC_DEVICE_NOT_AVAILABLE 0x001C | ||
265 | |||
266 | /* DetailedStatusCode defines for Block Storage Operation: Table 6-7 Detailed | ||
267 | Status Codes.*/ | ||
268 | |||
269 | #define I2O_BSA_DSC_SUCCESS 0x0000 | ||
270 | #define I2O_BSA_DSC_MEDIA_ERROR 0x0001 | ||
271 | #define I2O_BSA_DSC_ACCESS_ERROR 0x0002 | ||
272 | #define I2O_BSA_DSC_DEVICE_FAILURE 0x0003 | ||
273 | #define I2O_BSA_DSC_DEVICE_NOT_READY 0x0004 | ||
274 | #define I2O_BSA_DSC_MEDIA_NOT_PRESENT 0x0005 | ||
275 | #define I2O_BSA_DSC_MEDIA_LOCKED 0x0006 | ||
276 | #define I2O_BSA_DSC_MEDIA_FAILURE 0x0007 | ||
277 | #define I2O_BSA_DSC_PROTOCOL_FAILURE 0x0008 | ||
278 | #define I2O_BSA_DSC_BUS_FAILURE 0x0009 | ||
279 | #define I2O_BSA_DSC_ACCESS_VIOLATION 0x000A | ||
280 | #define I2O_BSA_DSC_WRITE_PROTECTED 0x000B | ||
281 | #define I2O_BSA_DSC_DEVICE_RESET 0x000C | ||
282 | #define I2O_BSA_DSC_VOLUME_CHANGED 0x000D | ||
283 | #define I2O_BSA_DSC_TIMEOUT 0x000E | ||
284 | |||
285 | /* FailureStatusCodes, Table 3-3 Message Failure Codes */ | ||
286 | |||
287 | #define I2O_FSC_TRANSPORT_SERVICE_SUSPENDED 0x81 | ||
288 | #define I2O_FSC_TRANSPORT_SERVICE_TERMINATED 0x82 | ||
289 | #define I2O_FSC_TRANSPORT_CONGESTION 0x83 | ||
290 | #define I2O_FSC_TRANSPORT_FAILURE 0x84 | ||
291 | #define I2O_FSC_TRANSPORT_STATE_ERROR 0x85 | ||
292 | #define I2O_FSC_TRANSPORT_TIME_OUT 0x86 | ||
293 | #define I2O_FSC_TRANSPORT_ROUTING_FAILURE 0x87 | ||
294 | #define I2O_FSC_TRANSPORT_INVALID_VERSION 0x88 | ||
295 | #define I2O_FSC_TRANSPORT_INVALID_OFFSET 0x89 | ||
296 | #define I2O_FSC_TRANSPORT_INVALID_MSG_FLAGS 0x8A | ||
297 | #define I2O_FSC_TRANSPORT_FRAME_TOO_SMALL 0x8B | ||
298 | #define I2O_FSC_TRANSPORT_FRAME_TOO_LARGE 0x8C | ||
299 | #define I2O_FSC_TRANSPORT_INVALID_TARGET_ID 0x8D | ||
300 | #define I2O_FSC_TRANSPORT_INVALID_INITIATOR_ID 0x8E | ||
301 | #define I2O_FSC_TRANSPORT_INVALID_INITIATOR_CONTEXT 0x8F | ||
302 | #define I2O_FSC_TRANSPORT_UNKNOWN_FAILURE 0xFF | ||
303 | |||
304 | /* Device Claim Types */ | ||
305 | #define I2O_CLAIM_PRIMARY 0x01000000 | ||
306 | #define I2O_CLAIM_MANAGEMENT 0x02000000 | ||
307 | #define I2O_CLAIM_AUTHORIZED 0x03000000 | ||
308 | #define I2O_CLAIM_SECONDARY 0x04000000 | ||
309 | |||
310 | /* Message header defines for VersionOffset */ | ||
311 | #define I2OVER15 0x0001 | ||
312 | #define I2OVER20 0x0002 | ||
313 | |||
314 | /* Default is 1.5 */ | ||
315 | #define I2OVERSION I2OVER15 | ||
316 | |||
317 | #define SGL_OFFSET_0 I2OVERSION | ||
318 | #define SGL_OFFSET_4 (0x0040 | I2OVERSION) | ||
319 | #define SGL_OFFSET_5 (0x0050 | I2OVERSION) | ||
320 | #define SGL_OFFSET_6 (0x0060 | I2OVERSION) | ||
321 | #define SGL_OFFSET_7 (0x0070 | I2OVERSION) | ||
322 | #define SGL_OFFSET_8 (0x0080 | I2OVERSION) | ||
323 | #define SGL_OFFSET_9 (0x0090 | I2OVERSION) | ||
324 | #define SGL_OFFSET_10 (0x00A0 | I2OVERSION) | ||
325 | #define SGL_OFFSET_11 (0x00B0 | I2OVERSION) | ||
326 | #define SGL_OFFSET_12 (0x00C0 | I2OVERSION) | ||
327 | #define SGL_OFFSET(x) (((x)<<4) | I2OVERSION) | ||
328 | |||
329 | /* Transaction Reply Lists (TRL) Control Word structure */ | ||
330 | #define TRL_SINGLE_FIXED_LENGTH 0x00 | ||
331 | #define TRL_SINGLE_VARIABLE_LENGTH 0x40 | ||
332 | #define TRL_MULTIPLE_FIXED_LENGTH 0x80 | ||
333 | |||
334 | /* msg header defines for MsgFlags */ | ||
335 | #define MSG_STATIC 0x0100 | ||
336 | #define MSG_64BIT_CNTXT 0x0200 | ||
337 | #define MSG_MULTI_TRANS 0x1000 | ||
338 | #define MSG_FAIL 0x2000 | ||
339 | #define MSG_FINAL 0x4000 | ||
340 | #define MSG_REPLY 0x8000 | ||
341 | |||
342 | /* minimum size msg */ | ||
343 | #define THREE_WORD_MSG_SIZE 0x00030000 | ||
344 | #define FOUR_WORD_MSG_SIZE 0x00040000 | ||
345 | #define FIVE_WORD_MSG_SIZE 0x00050000 | ||
346 | #define SIX_WORD_MSG_SIZE 0x00060000 | ||
347 | #define SEVEN_WORD_MSG_SIZE 0x00070000 | ||
348 | #define EIGHT_WORD_MSG_SIZE 0x00080000 | ||
349 | #define NINE_WORD_MSG_SIZE 0x00090000 | ||
350 | #define TEN_WORD_MSG_SIZE 0x000A0000 | ||
351 | #define ELEVEN_WORD_MSG_SIZE 0x000B0000 | ||
352 | #define I2O_MESSAGE_SIZE(x) ((x)<<16) | ||
353 | |||
354 | /* special TID assignments */ | ||
355 | #define ADAPTER_TID 0 | ||
356 | #define HOST_TID 1 | ||
357 | |||
358 | /* outbound queue defines */ | ||
359 | #define I2O_MAX_OUTBOUND_MSG_FRAMES 128 | ||
360 | #define I2O_OUTBOUND_MSG_FRAME_SIZE 128 /* in 32-bit words */ | ||
361 | |||
362 | /* inbound queue definitions */ | ||
363 | #define I2O_MSG_INPOOL_MIN 32 | ||
364 | #define I2O_INBOUND_MSG_FRAME_SIZE 128 /* in 32-bit words */ | ||
365 | |||
366 | #define I2O_POST_WAIT_OK 0 | ||
367 | #define I2O_POST_WAIT_TIMEOUT -ETIMEDOUT | ||
368 | |||
369 | #define I2O_CONTEXT_LIST_MIN_LENGTH 15 | ||
370 | #define I2O_CONTEXT_LIST_USED 0x01 | ||
371 | #define I2O_CONTEXT_LIST_DELETED 0x02 | ||
372 | |||
373 | /* timeouts */ | ||
374 | #define I2O_TIMEOUT_INIT_OUTBOUND_QUEUE 15 | ||
375 | #define I2O_TIMEOUT_MESSAGE_GET 5 | ||
376 | #define I2O_TIMEOUT_RESET 30 | ||
377 | #define I2O_TIMEOUT_STATUS_GET 5 | ||
378 | #define I2O_TIMEOUT_LCT_GET 360 | ||
379 | #define I2O_TIMEOUT_SCSI_SCB_ABORT 240 | ||
380 | |||
381 | /* retries */ | ||
382 | #define I2O_HRT_GET_TRIES 3 | ||
383 | #define I2O_LCT_GET_TRIES 3 | ||
384 | |||
385 | /* defines for max_sectors and max_phys_segments */ | ||
386 | #define I2O_MAX_SECTORS 1024 | ||
387 | #define I2O_MAX_SECTORS_LIMITED 128 | ||
388 | #define I2O_MAX_PHYS_SEGMENTS MAX_PHYS_SEGMENTS | ||
389 | |||
390 | /* | ||
41 | * Message structures | 391 | * Message structures |
42 | */ | 392 | */ |
43 | struct i2o_message { | 393 | struct i2o_message { |
@@ -58,6 +408,12 @@ struct i2o_message { | |||
58 | u32 body[0]; | 408 | u32 body[0]; |
59 | }; | 409 | }; |
60 | 410 | ||
411 | /* MFA and I2O message used by mempool */ | ||
412 | struct i2o_msg_mfa { | ||
413 | u32 mfa; /* MFA returned by the controller */ | ||
414 | struct i2o_message msg; /* I2O message */ | ||
415 | }; | ||
416 | |||
61 | /* | 417 | /* |
62 | * Each I2O device entity has one of these. There is one per device. | 418 | * Each I2O device entity has one of these. There is one per device. |
63 | */ | 419 | */ |
@@ -130,6 +486,15 @@ struct i2o_dma { | |||
130 | }; | 486 | }; |
131 | 487 | ||
132 | /* | 488 | /* |
489 | * Contains slab cache and mempool information | ||
490 | */ | ||
491 | struct i2o_pool { | ||
492 | char *name; | ||
493 | kmem_cache_t *slab; | ||
494 | mempool_t *mempool; | ||
495 | }; | ||
496 | |||
497 | /* | ||
133 | * Contains IO mapped address information | 498 | * Contains IO mapped address information |
134 | */ | 499 | */ |
135 | struct i2o_io { | 500 | struct i2o_io { |
@@ -174,8 +539,6 @@ struct i2o_controller { | |||
174 | void __iomem *irq_status; /* Interrupt status register address */ | 539 | void __iomem *irq_status; /* Interrupt status register address */ |
175 | void __iomem *irq_mask; /* Interrupt mask register address */ | 540 | void __iomem *irq_mask; /* Interrupt mask register address */ |
176 | 541 | ||
177 | /* Dynamic LCT related data */ | ||
178 | |||
179 | struct i2o_dma status; /* IOP status block */ | 542 | struct i2o_dma status; /* IOP status block */ |
180 | 543 | ||
181 | struct i2o_dma hrt; /* HW Resource Table */ | 544 | struct i2o_dma hrt; /* HW Resource Table */ |
@@ -188,6 +551,8 @@ struct i2o_controller { | |||
188 | struct i2o_io in_queue; /* inbound message queue Host->IOP */ | 551 | struct i2o_io in_queue; /* inbound message queue Host->IOP */ |
189 | struct i2o_dma out_queue; /* outbound message queue IOP->Host */ | 552 | struct i2o_dma out_queue; /* outbound message queue IOP->Host */ |
190 | 553 | ||
554 | struct i2o_pool in_msg; /* mempool for inbound messages */ | ||
555 | |||
191 | unsigned int battery:1; /* Has a battery backup */ | 556 | unsigned int battery:1; /* Has a battery backup */ |
192 | unsigned int io_alloc:1; /* An I/O resource was allocated */ | 557 | unsigned int io_alloc:1; /* An I/O resource was allocated */ |
193 | unsigned int mem_alloc:1; /* A memory resource was allocated */ | 558 | unsigned int mem_alloc:1; /* A memory resource was allocated */ |
@@ -196,7 +561,6 @@ struct i2o_controller { | |||
196 | struct resource mem_resource; /* Mem resource allocated to the IOP */ | 561 | struct resource mem_resource; /* Mem resource allocated to the IOP */ |
197 | 562 | ||
198 | struct device device; | 563 | struct device device; |
199 | struct class_device *classdev; /* I2O controller class device */ | ||
200 | struct i2o_device *exec; /* Executive */ | 564 | struct i2o_device *exec; /* Executive */ |
201 | #if BITS_PER_LONG == 64 | 565 | #if BITS_PER_LONG == 64 |
202 | spinlock_t context_list_lock; /* lock for context_list */ | 566 | spinlock_t context_list_lock; /* lock for context_list */ |
@@ -247,16 +611,13 @@ struct i2o_sys_tbl { | |||
247 | extern struct list_head i2o_controllers; | 611 | extern struct list_head i2o_controllers; |
248 | 612 | ||
249 | /* Message functions */ | 613 | /* Message functions */ |
250 | static inline u32 i2o_msg_get(struct i2o_controller *, | 614 | static inline struct i2o_message *i2o_msg_get(struct i2o_controller *); |
251 | struct i2o_message __iomem **); | 615 | extern struct i2o_message *i2o_msg_get_wait(struct i2o_controller *, int); |
252 | extern u32 i2o_msg_get_wait(struct i2o_controller *, | 616 | static inline void i2o_msg_post(struct i2o_controller *, struct i2o_message *); |
253 | struct i2o_message __iomem **, int); | 617 | static inline int i2o_msg_post_wait(struct i2o_controller *, |
254 | static inline void i2o_msg_post(struct i2o_controller *, u32); | 618 | struct i2o_message *, unsigned long); |
255 | static inline int i2o_msg_post_wait(struct i2o_controller *, u32, | 619 | extern int i2o_msg_post_wait_mem(struct i2o_controller *, struct i2o_message *, |
256 | unsigned long); | 620 | unsigned long, struct i2o_dma *); |
257 | extern int i2o_msg_post_wait_mem(struct i2o_controller *, u32, unsigned long, | ||
258 | struct i2o_dma *); | ||
259 | extern void i2o_msg_nop(struct i2o_controller *, u32); | ||
260 | static inline void i2o_flush_reply(struct i2o_controller *, u32); | 621 | static inline void i2o_flush_reply(struct i2o_controller *, u32); |
261 | 622 | ||
262 | /* IOP functions */ | 623 | /* IOP functions */ |
@@ -384,10 +745,10 @@ static inline u16 i2o_sg_tablesize(struct i2o_controller *c, u16 body_size) | |||
384 | static inline dma_addr_t i2o_dma_map_single(struct i2o_controller *c, void *ptr, | 745 | static inline dma_addr_t i2o_dma_map_single(struct i2o_controller *c, void *ptr, |
385 | size_t size, | 746 | size_t size, |
386 | enum dma_data_direction direction, | 747 | enum dma_data_direction direction, |
387 | u32 __iomem ** sg_ptr) | 748 | u32 ** sg_ptr) |
388 | { | 749 | { |
389 | u32 sg_flags; | 750 | u32 sg_flags; |
390 | u32 __iomem *mptr = *sg_ptr; | 751 | u32 *mptr = *sg_ptr; |
391 | dma_addr_t dma_addr; | 752 | dma_addr_t dma_addr; |
392 | 753 | ||
393 | switch (direction) { | 754 | switch (direction) { |
@@ -405,16 +766,16 @@ static inline dma_addr_t i2o_dma_map_single(struct i2o_controller *c, void *ptr, | |||
405 | if (!dma_mapping_error(dma_addr)) { | 766 | if (!dma_mapping_error(dma_addr)) { |
406 | #ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64 | 767 | #ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64 |
407 | if ((sizeof(dma_addr_t) > 4) && c->pae_support) { | 768 | if ((sizeof(dma_addr_t) > 4) && c->pae_support) { |
408 | writel(0x7C020002, mptr++); | 769 | *mptr++ = cpu_to_le32(0x7C020002); |
409 | writel(PAGE_SIZE, mptr++); | 770 | *mptr++ = cpu_to_le32(PAGE_SIZE); |
410 | } | 771 | } |
411 | #endif | 772 | #endif |
412 | 773 | ||
413 | writel(sg_flags | size, mptr++); | 774 | *mptr++ = cpu_to_le32(sg_flags | size); |
414 | writel(i2o_dma_low(dma_addr), mptr++); | 775 | *mptr++ = cpu_to_le32(i2o_dma_low(dma_addr)); |
415 | #ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64 | 776 | #ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64 |
416 | if ((sizeof(dma_addr_t) > 4) && c->pae_support) | 777 | if ((sizeof(dma_addr_t) > 4) && c->pae_support) |
417 | writel(i2o_dma_high(dma_addr), mptr++); | 778 | *mptr++ = cpu_to_le32(i2o_dma_high(dma_addr)); |
418 | #endif | 779 | #endif |
419 | *sg_ptr = mptr; | 780 | *sg_ptr = mptr; |
420 | } | 781 | } |
@@ -439,10 +800,10 @@ static inline dma_addr_t i2o_dma_map_single(struct i2o_controller *c, void *ptr, | |||
439 | static inline int i2o_dma_map_sg(struct i2o_controller *c, | 800 | static inline int i2o_dma_map_sg(struct i2o_controller *c, |
440 | struct scatterlist *sg, int sg_count, | 801 | struct scatterlist *sg, int sg_count, |
441 | enum dma_data_direction direction, | 802 | enum dma_data_direction direction, |
442 | u32 __iomem ** sg_ptr) | 803 | u32 ** sg_ptr) |
443 | { | 804 | { |
444 | u32 sg_flags; | 805 | u32 sg_flags; |
445 | u32 __iomem *mptr = *sg_ptr; | 806 | u32 *mptr = *sg_ptr; |
446 | 807 | ||
447 | switch (direction) { | 808 | switch (direction) { |
448 | case DMA_TO_DEVICE: | 809 | case DMA_TO_DEVICE: |
@@ -461,19 +822,19 @@ static inline int i2o_dma_map_sg(struct i2o_controller *c, | |||
461 | 822 | ||
462 | #ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64 | 823 | #ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64 |
463 | if ((sizeof(dma_addr_t) > 4) && c->pae_support) { | 824 | if ((sizeof(dma_addr_t) > 4) && c->pae_support) { |
464 | writel(0x7C020002, mptr++); | 825 | *mptr++ = cpu_to_le32(0x7C020002); |
465 | writel(PAGE_SIZE, mptr++); | 826 | *mptr++ = cpu_to_le32(PAGE_SIZE); |
466 | } | 827 | } |
467 | #endif | 828 | #endif |
468 | 829 | ||
469 | while (sg_count-- > 0) { | 830 | while (sg_count-- > 0) { |
470 | if (!sg_count) | 831 | if (!sg_count) |
471 | sg_flags |= 0xC0000000; | 832 | sg_flags |= 0xC0000000; |
472 | writel(sg_flags | sg_dma_len(sg), mptr++); | 833 | *mptr++ = cpu_to_le32(sg_flags | sg_dma_len(sg)); |
473 | writel(i2o_dma_low(sg_dma_address(sg)), mptr++); | 834 | *mptr++ = cpu_to_le32(i2o_dma_low(sg_dma_address(sg))); |
474 | #ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64 | 835 | #ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64 |
475 | if ((sizeof(dma_addr_t) > 4) && c->pae_support) | 836 | if ((sizeof(dma_addr_t) > 4) && c->pae_support) |
476 | writel(i2o_dma_high(sg_dma_address(sg)), mptr++); | 837 | *mptr++ = cpu_to_le32(i2o_dma_high(sg_dma_address(sg))); |
477 | #endif | 838 | #endif |
478 | sg++; | 839 | sg++; |
479 | } | 840 | } |
@@ -563,6 +924,64 @@ static inline int i2o_dma_realloc(struct device *dev, struct i2o_dma *addr, | |||
563 | return 0; | 924 | return 0; |
564 | }; | 925 | }; |
565 | 926 | ||
927 | /* | ||
928 | * i2o_pool_alloc - Allocate an slab cache and mempool | ||
929 | * @mempool: pointer to struct i2o_pool to write data into. | ||
930 | * @name: name which is used to identify cache | ||
931 | * @size: size of each object | ||
932 | * @min_nr: minimum number of objects | ||
933 | * | ||
934 | * First allocates a slab cache with name and size. Then allocates a | ||
935 | * mempool which uses the slab cache for allocation and freeing. | ||
936 | * | ||
937 | * Returns 0 on success or negative error code on failure. | ||
938 | */ | ||
939 | static inline int i2o_pool_alloc(struct i2o_pool *pool, const char *name, | ||
940 | size_t size, int min_nr) | ||
941 | { | ||
942 | pool->name = kmalloc(strlen(name) + 1, GFP_KERNEL); | ||
943 | if (!pool->name) | ||
944 | goto exit; | ||
945 | strcpy(pool->name, name); | ||
946 | |||
947 | pool->slab = | ||
948 | kmem_cache_create(pool->name, size, 0, SLAB_HWCACHE_ALIGN, NULL, | ||
949 | NULL); | ||
950 | if (!pool->slab) | ||
951 | goto free_name; | ||
952 | |||
953 | pool->mempool = | ||
954 | mempool_create(min_nr, mempool_alloc_slab, mempool_free_slab, | ||
955 | pool->slab); | ||
956 | if (!pool->mempool) | ||
957 | goto free_slab; | ||
958 | |||
959 | return 0; | ||
960 | |||
961 | free_slab: | ||
962 | kmem_cache_destroy(pool->slab); | ||
963 | |||
964 | free_name: | ||
965 | kfree(pool->name); | ||
966 | |||
967 | exit: | ||
968 | return -ENOMEM; | ||
969 | }; | ||
970 | |||
971 | /* | ||
972 | * i2o_pool_free - Free slab cache and mempool again | ||
973 | * @mempool: pointer to struct i2o_pool which should be freed | ||
974 | * | ||
975 | * Note that you have to return all objects to the mempool again before | ||
976 | * calling i2o_pool_free(). | ||
977 | */ | ||
978 | static inline void i2o_pool_free(struct i2o_pool *pool) | ||
979 | { | ||
980 | mempool_destroy(pool->mempool); | ||
981 | kmem_cache_destroy(pool->slab); | ||
982 | kfree(pool->name); | ||
983 | }; | ||
984 | |||
566 | /* I2O driver (OSM) functions */ | 985 | /* I2O driver (OSM) functions */ |
567 | extern int i2o_driver_register(struct i2o_driver *); | 986 | extern int i2o_driver_register(struct i2o_driver *); |
568 | extern void i2o_driver_unregister(struct i2o_driver *); | 987 | extern void i2o_driver_unregister(struct i2o_driver *); |
@@ -638,39 +1057,89 @@ extern int i2o_exec_lct_get(struct i2o_controller *); | |||
638 | #define kobj_to_i2o_device(kobj) to_i2o_device(container_of(kobj, struct device, kobj)) | 1057 | #define kobj_to_i2o_device(kobj) to_i2o_device(container_of(kobj, struct device, kobj)) |
639 | 1058 | ||
640 | /** | 1059 | /** |
1060 | * i2o_out_to_virt - Turn an I2O message to a virtual address | ||
1061 | * @c: controller | ||
1062 | * @m: message engine value | ||
1063 | * | ||
1064 | * Turn a receive message from an I2O controller bus address into | ||
1065 | * a Linux virtual address. The shared page frame is a linear block | ||
1066 | * so we simply have to shift the offset. This function does not | ||
1067 | * work for sender side messages as they are ioremap objects | ||
1068 | * provided by the I2O controller. | ||
1069 | */ | ||
1070 | static inline struct i2o_message *i2o_msg_out_to_virt(struct i2o_controller *c, | ||
1071 | u32 m) | ||
1072 | { | ||
1073 | BUG_ON(m < c->out_queue.phys | ||
1074 | || m >= c->out_queue.phys + c->out_queue.len); | ||
1075 | |||
1076 | return c->out_queue.virt + (m - c->out_queue.phys); | ||
1077 | }; | ||
1078 | |||
1079 | /** | ||
1080 | * i2o_msg_in_to_virt - Turn an I2O message to a virtual address | ||
1081 | * @c: controller | ||
1082 | * @m: message engine value | ||
1083 | * | ||
1084 | * Turn a send message from an I2O controller bus address into | ||
1085 | * a Linux virtual address. The shared page frame is a linear block | ||
1086 | * so we simply have to shift the offset. This function does not | ||
1087 | * work for receive side messages as they are kmalloc objects | ||
1088 | * in a different pool. | ||
1089 | */ | ||
1090 | static inline struct i2o_message __iomem *i2o_msg_in_to_virt(struct | ||
1091 | i2o_controller *c, | ||
1092 | u32 m) | ||
1093 | { | ||
1094 | return c->in_queue.virt + m; | ||
1095 | }; | ||
1096 | |||
1097 | /** | ||
641 | * i2o_msg_get - obtain an I2O message from the IOP | 1098 | * i2o_msg_get - obtain an I2O message from the IOP |
642 | * @c: I2O controller | 1099 | * @c: I2O controller |
643 | * @msg: pointer to a I2O message pointer | ||
644 | * | 1100 | * |
645 | * This function tries to get a message slot. If no message slot is | 1101 | * This function tries to get a message frame. If no message frame is |
646 | * available do not wait until one is availabe (see also i2o_msg_get_wait). | 1102 | * available do not wait until one is availabe (see also i2o_msg_get_wait). |
1103 | * The returned pointer to the message frame is not in I/O memory, it is | ||
1104 | * allocated from a mempool. But because a MFA is allocated from the | ||
1105 | * controller too it is guaranteed that i2o_msg_post() will never fail. | ||
647 | * | 1106 | * |
648 | * On a success the message is returned and the pointer to the message is | 1107 | * On a success a pointer to the message frame is returned. If the message |
649 | * set in msg. The returned message is the physical page frame offset | 1108 | * queue is empty -EBUSY is returned and if no memory is available -ENOMEM |
650 | * address from the read port (see the i2o spec). If no message is | 1109 | * is returned. |
651 | * available returns I2O_QUEUE_EMPTY and msg is leaved untouched. | ||
652 | */ | 1110 | */ |
653 | static inline u32 i2o_msg_get(struct i2o_controller *c, | 1111 | static inline struct i2o_message *i2o_msg_get(struct i2o_controller *c) |
654 | struct i2o_message __iomem ** msg) | ||
655 | { | 1112 | { |
656 | u32 m = readl(c->in_port); | 1113 | struct i2o_msg_mfa *mmsg = mempool_alloc(c->in_msg.mempool, GFP_ATOMIC); |
657 | 1114 | if (!mmsg) | |
658 | if (m != I2O_QUEUE_EMPTY) | 1115 | return ERR_PTR(-ENOMEM); |
659 | *msg = c->in_queue.virt + m; | 1116 | |
1117 | mmsg->mfa = readl(c->in_port); | ||
1118 | if (mmsg->mfa == I2O_QUEUE_EMPTY) { | ||
1119 | mempool_free(mmsg, c->in_msg.mempool); | ||
1120 | return ERR_PTR(-EBUSY); | ||
1121 | } | ||
660 | 1122 | ||
661 | return m; | 1123 | return &mmsg->msg; |
662 | }; | 1124 | }; |
663 | 1125 | ||
664 | /** | 1126 | /** |
665 | * i2o_msg_post - Post I2O message to I2O controller | 1127 | * i2o_msg_post - Post I2O message to I2O controller |
666 | * @c: I2O controller to which the message should be send | 1128 | * @c: I2O controller to which the message should be send |
667 | * @m: the message identifier | 1129 | * @msg: message returned by i2o_msg_get() |
668 | * | 1130 | * |
669 | * Post the message to the I2O controller. | 1131 | * Post the message to the I2O controller and return immediately. |
670 | */ | 1132 | */ |
671 | static inline void i2o_msg_post(struct i2o_controller *c, u32 m) | 1133 | static inline void i2o_msg_post(struct i2o_controller *c, |
1134 | struct i2o_message *msg) | ||
672 | { | 1135 | { |
673 | writel(m, c->in_port); | 1136 | struct i2o_msg_mfa *mmsg; |
1137 | |||
1138 | mmsg = container_of(msg, struct i2o_msg_mfa, msg); | ||
1139 | memcpy_toio(i2o_msg_in_to_virt(c, mmsg->mfa), msg, | ||
1140 | (le32_to_cpu(msg->u.head[0]) >> 16) << 2); | ||
1141 | writel(mmsg->mfa, c->in_port); | ||
1142 | mempool_free(mmsg, c->in_msg.mempool); | ||
674 | }; | 1143 | }; |
675 | 1144 | ||
676 | /** | 1145 | /** |
@@ -685,62 +1154,66 @@ static inline void i2o_msg_post(struct i2o_controller *c, u32 m) | |||
685 | * | 1154 | * |
686 | * Returns 0 on success or negative error code on failure. | 1155 | * Returns 0 on success or negative error code on failure. |
687 | */ | 1156 | */ |
688 | static inline int i2o_msg_post_wait(struct i2o_controller *c, u32 m, | 1157 | static inline int i2o_msg_post_wait(struct i2o_controller *c, |
1158 | struct i2o_message *msg, | ||
689 | unsigned long timeout) | 1159 | unsigned long timeout) |
690 | { | 1160 | { |
691 | return i2o_msg_post_wait_mem(c, m, timeout, NULL); | 1161 | return i2o_msg_post_wait_mem(c, msg, timeout, NULL); |
692 | }; | 1162 | }; |
693 | 1163 | ||
694 | /** | 1164 | /** |
695 | * i2o_flush_reply - Flush reply from I2O controller | 1165 | * i2o_msg_nop_mfa - Returns a fetched MFA back to the controller |
696 | * @c: I2O controller | 1166 | * @c: I2O controller from which the MFA was fetched |
697 | * @m: the message identifier | 1167 | * @mfa: MFA which should be returned |
698 | * | 1168 | * |
699 | * The I2O controller must be informed that the reply message is not needed | 1169 | * This function must be used for preserved messages, because i2o_msg_nop() |
700 | * anymore. If you forget to flush the reply, the message frame can't be | 1170 | * also returns the allocated memory back to the msg_pool mempool. |
701 | * used by the controller anymore and is therefore lost. | ||
702 | */ | 1171 | */ |
703 | static inline void i2o_flush_reply(struct i2o_controller *c, u32 m) | 1172 | static inline void i2o_msg_nop_mfa(struct i2o_controller *c, u32 mfa) |
704 | { | 1173 | { |
705 | writel(m, c->out_port); | 1174 | struct i2o_message __iomem *msg; |
1175 | u32 nop[3] = { | ||
1176 | THREE_WORD_MSG_SIZE | SGL_OFFSET_0, | ||
1177 | I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | ADAPTER_TID, | ||
1178 | 0x00000000 | ||
1179 | }; | ||
1180 | |||
1181 | msg = i2o_msg_in_to_virt(c, mfa); | ||
1182 | memcpy_toio(msg, nop, sizeof(nop)); | ||
1183 | writel(mfa, c->in_port); | ||
706 | }; | 1184 | }; |
707 | 1185 | ||
708 | /** | 1186 | /** |
709 | * i2o_out_to_virt - Turn an I2O message to a virtual address | 1187 | * i2o_msg_nop - Returns a message which is not used |
710 | * @c: controller | 1188 | * @c: I2O controller from which the message was created |
711 | * @m: message engine value | 1189 | * @msg: message which should be returned |
712 | * | 1190 | * |
713 | * Turn a receive message from an I2O controller bus address into | 1191 | * If you fetch a message via i2o_msg_get, and can't use it, you must |
714 | * a Linux virtual address. The shared page frame is a linear block | 1192 | * return the message with this function. Otherwise the MFA is lost as well |
715 | * so we simply have to shift the offset. This function does not | 1193 | * as the allocated memory from the mempool. |
716 | * work for sender side messages as they are ioremap objects | ||
717 | * provided by the I2O controller. | ||
718 | */ | 1194 | */ |
719 | static inline struct i2o_message *i2o_msg_out_to_virt(struct i2o_controller *c, | 1195 | static inline void i2o_msg_nop(struct i2o_controller *c, |
720 | u32 m) | 1196 | struct i2o_message *msg) |
721 | { | 1197 | { |
722 | BUG_ON(m < c->out_queue.phys | 1198 | struct i2o_msg_mfa *mmsg; |
723 | || m >= c->out_queue.phys + c->out_queue.len); | 1199 | mmsg = container_of(msg, struct i2o_msg_mfa, msg); |
724 | 1200 | ||
725 | return c->out_queue.virt + (m - c->out_queue.phys); | 1201 | i2o_msg_nop_mfa(c, mmsg->mfa); |
1202 | mempool_free(mmsg, c->in_msg.mempool); | ||
726 | }; | 1203 | }; |
727 | 1204 | ||
728 | /** | 1205 | /** |
729 | * i2o_msg_in_to_virt - Turn an I2O message to a virtual address | 1206 | * i2o_flush_reply - Flush reply from I2O controller |
730 | * @c: controller | 1207 | * @c: I2O controller |
731 | * @m: message engine value | 1208 | * @m: the message identifier |
732 | * | 1209 | * |
733 | * Turn a send message from an I2O controller bus address into | 1210 | * The I2O controller must be informed that the reply message is not needed |
734 | * a Linux virtual address. The shared page frame is a linear block | 1211 | * anymore. If you forget to flush the reply, the message frame can't be |
735 | * so we simply have to shift the offset. This function does not | 1212 | * used by the controller anymore and is therefore lost. |
736 | * work for receive side messages as they are kmalloc objects | ||
737 | * in a different pool. | ||
738 | */ | 1213 | */ |
739 | static inline struct i2o_message __iomem *i2o_msg_in_to_virt(struct | 1214 | static inline void i2o_flush_reply(struct i2o_controller *c, u32 m) |
740 | i2o_controller *c, | ||
741 | u32 m) | ||
742 | { | 1215 | { |
743 | return c->in_queue.virt + m; | 1216 | writel(m, c->out_port); |
744 | }; | 1217 | }; |
745 | 1218 | ||
746 | /* | 1219 | /* |
@@ -779,350 +1252,5 @@ extern void i2o_dump_message(struct i2o_message *); | |||
779 | extern void i2o_dump_hrt(struct i2o_controller *c); | 1252 | extern void i2o_dump_hrt(struct i2o_controller *c); |
780 | extern void i2o_debug_state(struct i2o_controller *c); | 1253 | extern void i2o_debug_state(struct i2o_controller *c); |
781 | 1254 | ||
782 | /* | ||
783 | * Cache strategies | ||
784 | */ | ||
785 | |||
786 | /* The NULL strategy leaves everything up to the controller. This tends to be a | ||
787 | * pessimal but functional choice. | ||
788 | */ | ||
789 | #define CACHE_NULL 0 | ||
790 | /* Prefetch data when reading. We continually attempt to load the next 32 sectors | ||
791 | * into the controller cache. | ||
792 | */ | ||
793 | #define CACHE_PREFETCH 1 | ||
794 | /* Prefetch data when reading. We sometimes attempt to load the next 32 sectors | ||
795 | * into the controller cache. When an I/O is less <= 8K we assume its probably | ||
796 | * not sequential and don't prefetch (default) | ||
797 | */ | ||
798 | #define CACHE_SMARTFETCH 2 | ||
799 | /* Data is written to the cache and then out on to the disk. The I/O must be | ||
800 | * physically on the medium before the write is acknowledged (default without | ||
801 | * NVRAM) | ||
802 | */ | ||
803 | #define CACHE_WRITETHROUGH 17 | ||
804 | /* Data is written to the cache and then out on to the disk. The controller | ||
805 | * is permitted to write back the cache any way it wants. (default if battery | ||
806 | * backed NVRAM is present). It can be useful to set this for swap regardless of | ||
807 | * battery state. | ||
808 | */ | ||
809 | #define CACHE_WRITEBACK 18 | ||
810 | /* Optimise for under powered controllers, especially on RAID1 and RAID0. We | ||
811 | * write large I/O's directly to disk bypassing the cache to avoid the extra | ||
812 | * memory copy hits. Small writes are writeback cached | ||
813 | */ | ||
814 | #define CACHE_SMARTBACK 19 | ||
815 | /* Optimise for under powered controllers, especially on RAID1 and RAID0. We | ||
816 | * write large I/O's directly to disk bypassing the cache to avoid the extra | ||
817 | * memory copy hits. Small writes are writethrough cached. Suitable for devices | ||
818 | * lacking battery backup | ||
819 | */ | ||
820 | #define CACHE_SMARTTHROUGH 20 | ||
821 | |||
822 | /* | ||
823 | * Ioctl structures | ||
824 | */ | ||
825 | |||
826 | #define BLKI2OGRSTRAT _IOR('2', 1, int) | ||
827 | #define BLKI2OGWSTRAT _IOR('2', 2, int) | ||
828 | #define BLKI2OSRSTRAT _IOW('2', 3, int) | ||
829 | #define BLKI2OSWSTRAT _IOW('2', 4, int) | ||
830 | |||
831 | /* | ||
832 | * I2O Function codes | ||
833 | */ | ||
834 | |||
835 | /* | ||
836 | * Executive Class | ||
837 | */ | ||
838 | #define I2O_CMD_ADAPTER_ASSIGN 0xB3 | ||
839 | #define I2O_CMD_ADAPTER_READ 0xB2 | ||
840 | #define I2O_CMD_ADAPTER_RELEASE 0xB5 | ||
841 | #define I2O_CMD_BIOS_INFO_SET 0xA5 | ||
842 | #define I2O_CMD_BOOT_DEVICE_SET 0xA7 | ||
843 | #define I2O_CMD_CONFIG_VALIDATE 0xBB | ||
844 | #define I2O_CMD_CONN_SETUP 0xCA | ||
845 | #define I2O_CMD_DDM_DESTROY 0xB1 | ||
846 | #define I2O_CMD_DDM_ENABLE 0xD5 | ||
847 | #define I2O_CMD_DDM_QUIESCE 0xC7 | ||
848 | #define I2O_CMD_DDM_RESET 0xD9 | ||
849 | #define I2O_CMD_DDM_SUSPEND 0xAF | ||
850 | #define I2O_CMD_DEVICE_ASSIGN 0xB7 | ||
851 | #define I2O_CMD_DEVICE_RELEASE 0xB9 | ||
852 | #define I2O_CMD_HRT_GET 0xA8 | ||
853 | #define I2O_CMD_ADAPTER_CLEAR 0xBE | ||
854 | #define I2O_CMD_ADAPTER_CONNECT 0xC9 | ||
855 | #define I2O_CMD_ADAPTER_RESET 0xBD | ||
856 | #define I2O_CMD_LCT_NOTIFY 0xA2 | ||
857 | #define I2O_CMD_OUTBOUND_INIT 0xA1 | ||
858 | #define I2O_CMD_PATH_ENABLE 0xD3 | ||
859 | #define I2O_CMD_PATH_QUIESCE 0xC5 | ||
860 | #define I2O_CMD_PATH_RESET 0xD7 | ||
861 | #define I2O_CMD_STATIC_MF_CREATE 0xDD | ||
862 | #define I2O_CMD_STATIC_MF_RELEASE 0xDF | ||
863 | #define I2O_CMD_STATUS_GET 0xA0 | ||
864 | #define I2O_CMD_SW_DOWNLOAD 0xA9 | ||
865 | #define I2O_CMD_SW_UPLOAD 0xAB | ||
866 | #define I2O_CMD_SW_REMOVE 0xAD | ||
867 | #define I2O_CMD_SYS_ENABLE 0xD1 | ||
868 | #define I2O_CMD_SYS_MODIFY 0xC1 | ||
869 | #define I2O_CMD_SYS_QUIESCE 0xC3 | ||
870 | #define I2O_CMD_SYS_TAB_SET 0xA3 | ||
871 | |||
872 | /* | ||
873 | * Utility Class | ||
874 | */ | ||
875 | #define I2O_CMD_UTIL_NOP 0x00 | ||
876 | #define I2O_CMD_UTIL_ABORT 0x01 | ||
877 | #define I2O_CMD_UTIL_CLAIM 0x09 | ||
878 | #define I2O_CMD_UTIL_RELEASE 0x0B | ||
879 | #define I2O_CMD_UTIL_PARAMS_GET 0x06 | ||
880 | #define I2O_CMD_UTIL_PARAMS_SET 0x05 | ||
881 | #define I2O_CMD_UTIL_EVT_REGISTER 0x13 | ||
882 | #define I2O_CMD_UTIL_EVT_ACK 0x14 | ||
883 | #define I2O_CMD_UTIL_CONFIG_DIALOG 0x10 | ||
884 | #define I2O_CMD_UTIL_DEVICE_RESERVE 0x0D | ||
885 | #define I2O_CMD_UTIL_DEVICE_RELEASE 0x0F | ||
886 | #define I2O_CMD_UTIL_LOCK 0x17 | ||
887 | #define I2O_CMD_UTIL_LOCK_RELEASE 0x19 | ||
888 | #define I2O_CMD_UTIL_REPLY_FAULT_NOTIFY 0x15 | ||
889 | |||
890 | /* | ||
891 | * SCSI Host Bus Adapter Class | ||
892 | */ | ||
893 | #define I2O_CMD_SCSI_EXEC 0x81 | ||
894 | #define I2O_CMD_SCSI_ABORT 0x83 | ||
895 | #define I2O_CMD_SCSI_BUSRESET 0x27 | ||
896 | |||
897 | /* | ||
898 | * Bus Adapter Class | ||
899 | */ | ||
900 | #define I2O_CMD_BUS_ADAPTER_RESET 0x85 | ||
901 | #define I2O_CMD_BUS_RESET 0x87 | ||
902 | #define I2O_CMD_BUS_SCAN 0x89 | ||
903 | #define I2O_CMD_BUS_QUIESCE 0x8b | ||
904 | |||
905 | /* | ||
906 | * Random Block Storage Class | ||
907 | */ | ||
908 | #define I2O_CMD_BLOCK_READ 0x30 | ||
909 | #define I2O_CMD_BLOCK_WRITE 0x31 | ||
910 | #define I2O_CMD_BLOCK_CFLUSH 0x37 | ||
911 | #define I2O_CMD_BLOCK_MLOCK 0x49 | ||
912 | #define I2O_CMD_BLOCK_MUNLOCK 0x4B | ||
913 | #define I2O_CMD_BLOCK_MMOUNT 0x41 | ||
914 | #define I2O_CMD_BLOCK_MEJECT 0x43 | ||
915 | #define I2O_CMD_BLOCK_POWER 0x70 | ||
916 | |||
917 | #define I2O_CMD_PRIVATE 0xFF | ||
918 | |||
919 | /* Command status values */ | ||
920 | |||
921 | #define I2O_CMD_IN_PROGRESS 0x01 | ||
922 | #define I2O_CMD_REJECTED 0x02 | ||
923 | #define I2O_CMD_FAILED 0x03 | ||
924 | #define I2O_CMD_COMPLETED 0x04 | ||
925 | |||
926 | /* I2O API function return values */ | ||
927 | |||
928 | #define I2O_RTN_NO_ERROR 0 | ||
929 | #define I2O_RTN_NOT_INIT 1 | ||
930 | #define I2O_RTN_FREE_Q_EMPTY 2 | ||
931 | #define I2O_RTN_TCB_ERROR 3 | ||
932 | #define I2O_RTN_TRANSACTION_ERROR 4 | ||
933 | #define I2O_RTN_ADAPTER_ALREADY_INIT 5 | ||
934 | #define I2O_RTN_MALLOC_ERROR 6 | ||
935 | #define I2O_RTN_ADPTR_NOT_REGISTERED 7 | ||
936 | #define I2O_RTN_MSG_REPLY_TIMEOUT 8 | ||
937 | #define I2O_RTN_NO_STATUS 9 | ||
938 | #define I2O_RTN_NO_FIRM_VER 10 | ||
939 | #define I2O_RTN_NO_LINK_SPEED 11 | ||
940 | |||
941 | /* Reply message status defines for all messages */ | ||
942 | |||
943 | #define I2O_REPLY_STATUS_SUCCESS 0x00 | ||
944 | #define I2O_REPLY_STATUS_ABORT_DIRTY 0x01 | ||
945 | #define I2O_REPLY_STATUS_ABORT_NO_DATA_TRANSFER 0x02 | ||
946 | #define I2O_REPLY_STATUS_ABORT_PARTIAL_TRANSFER 0x03 | ||
947 | #define I2O_REPLY_STATUS_ERROR_DIRTY 0x04 | ||
948 | #define I2O_REPLY_STATUS_ERROR_NO_DATA_TRANSFER 0x05 | ||
949 | #define I2O_REPLY_STATUS_ERROR_PARTIAL_TRANSFER 0x06 | ||
950 | #define I2O_REPLY_STATUS_PROCESS_ABORT_DIRTY 0x08 | ||
951 | #define I2O_REPLY_STATUS_PROCESS_ABORT_NO_DATA_TRANSFER 0x09 | ||
952 | #define I2O_REPLY_STATUS_PROCESS_ABORT_PARTIAL_TRANSFER 0x0A | ||
953 | #define I2O_REPLY_STATUS_TRANSACTION_ERROR 0x0B | ||
954 | #define I2O_REPLY_STATUS_PROGRESS_REPORT 0x80 | ||
955 | |||
956 | /* Status codes and Error Information for Parameter functions */ | ||
957 | |||
958 | #define I2O_PARAMS_STATUS_SUCCESS 0x00 | ||
959 | #define I2O_PARAMS_STATUS_BAD_KEY_ABORT 0x01 | ||
960 | #define I2O_PARAMS_STATUS_BAD_KEY_CONTINUE 0x02 | ||
961 | #define I2O_PARAMS_STATUS_BUFFER_FULL 0x03 | ||
962 | #define I2O_PARAMS_STATUS_BUFFER_TOO_SMALL 0x04 | ||
963 | #define I2O_PARAMS_STATUS_FIELD_UNREADABLE 0x05 | ||
964 | #define I2O_PARAMS_STATUS_FIELD_UNWRITEABLE 0x06 | ||
965 | #define I2O_PARAMS_STATUS_INSUFFICIENT_FIELDS 0x07 | ||
966 | #define I2O_PARAMS_STATUS_INVALID_GROUP_ID 0x08 | ||
967 | #define I2O_PARAMS_STATUS_INVALID_OPERATION 0x09 | ||
968 | #define I2O_PARAMS_STATUS_NO_KEY_FIELD 0x0A | ||
969 | #define I2O_PARAMS_STATUS_NO_SUCH_FIELD 0x0B | ||
970 | #define I2O_PARAMS_STATUS_NON_DYNAMIC_GROUP 0x0C | ||
971 | #define I2O_PARAMS_STATUS_OPERATION_ERROR 0x0D | ||
972 | #define I2O_PARAMS_STATUS_SCALAR_ERROR 0x0E | ||
973 | #define I2O_PARAMS_STATUS_TABLE_ERROR 0x0F | ||
974 | #define I2O_PARAMS_STATUS_WRONG_GROUP_TYPE 0x10 | ||
975 | |||
976 | /* DetailedStatusCode defines for Executive, DDM, Util and Transaction error | ||
977 | * messages: Table 3-2 Detailed Status Codes.*/ | ||
978 | |||
979 | #define I2O_DSC_SUCCESS 0x0000 | ||
980 | #define I2O_DSC_BAD_KEY 0x0002 | ||
981 | #define I2O_DSC_TCL_ERROR 0x0003 | ||
982 | #define I2O_DSC_REPLY_BUFFER_FULL 0x0004 | ||
983 | #define I2O_DSC_NO_SUCH_PAGE 0x0005 | ||
984 | #define I2O_DSC_INSUFFICIENT_RESOURCE_SOFT 0x0006 | ||
985 | #define I2O_DSC_INSUFFICIENT_RESOURCE_HARD 0x0007 | ||
986 | #define I2O_DSC_CHAIN_BUFFER_TOO_LARGE 0x0009 | ||
987 | #define I2O_DSC_UNSUPPORTED_FUNCTION 0x000A | ||
988 | #define I2O_DSC_DEVICE_LOCKED 0x000B | ||
989 | #define I2O_DSC_DEVICE_RESET 0x000C | ||
990 | #define I2O_DSC_INAPPROPRIATE_FUNCTION 0x000D | ||
991 | #define I2O_DSC_INVALID_INITIATOR_ADDRESS 0x000E | ||
992 | #define I2O_DSC_INVALID_MESSAGE_FLAGS 0x000F | ||
993 | #define I2O_DSC_INVALID_OFFSET 0x0010 | ||
994 | #define I2O_DSC_INVALID_PARAMETER 0x0011 | ||
995 | #define I2O_DSC_INVALID_REQUEST 0x0012 | ||
996 | #define I2O_DSC_INVALID_TARGET_ADDRESS 0x0013 | ||
997 | #define I2O_DSC_MESSAGE_TOO_LARGE 0x0014 | ||
998 | #define I2O_DSC_MESSAGE_TOO_SMALL 0x0015 | ||
999 | #define I2O_DSC_MISSING_PARAMETER 0x0016 | ||
1000 | #define I2O_DSC_TIMEOUT 0x0017 | ||
1001 | #define I2O_DSC_UNKNOWN_ERROR 0x0018 | ||
1002 | #define I2O_DSC_UNKNOWN_FUNCTION 0x0019 | ||
1003 | #define I2O_DSC_UNSUPPORTED_VERSION 0x001A | ||
1004 | #define I2O_DSC_DEVICE_BUSY 0x001B | ||
1005 | #define I2O_DSC_DEVICE_NOT_AVAILABLE 0x001C | ||
1006 | |||
1007 | /* DetailedStatusCode defines for Block Storage Operation: Table 6-7 Detailed | ||
1008 | Status Codes.*/ | ||
1009 | |||
1010 | #define I2O_BSA_DSC_SUCCESS 0x0000 | ||
1011 | #define I2O_BSA_DSC_MEDIA_ERROR 0x0001 | ||
1012 | #define I2O_BSA_DSC_ACCESS_ERROR 0x0002 | ||
1013 | #define I2O_BSA_DSC_DEVICE_FAILURE 0x0003 | ||
1014 | #define I2O_BSA_DSC_DEVICE_NOT_READY 0x0004 | ||
1015 | #define I2O_BSA_DSC_MEDIA_NOT_PRESENT 0x0005 | ||
1016 | #define I2O_BSA_DSC_MEDIA_LOCKED 0x0006 | ||
1017 | #define I2O_BSA_DSC_MEDIA_FAILURE 0x0007 | ||
1018 | #define I2O_BSA_DSC_PROTOCOL_FAILURE 0x0008 | ||
1019 | #define I2O_BSA_DSC_BUS_FAILURE 0x0009 | ||
1020 | #define I2O_BSA_DSC_ACCESS_VIOLATION 0x000A | ||
1021 | #define I2O_BSA_DSC_WRITE_PROTECTED 0x000B | ||
1022 | #define I2O_BSA_DSC_DEVICE_RESET 0x000C | ||
1023 | #define I2O_BSA_DSC_VOLUME_CHANGED 0x000D | ||
1024 | #define I2O_BSA_DSC_TIMEOUT 0x000E | ||
1025 | |||
1026 | /* FailureStatusCodes, Table 3-3 Message Failure Codes */ | ||
1027 | |||
1028 | #define I2O_FSC_TRANSPORT_SERVICE_SUSPENDED 0x81 | ||
1029 | #define I2O_FSC_TRANSPORT_SERVICE_TERMINATED 0x82 | ||
1030 | #define I2O_FSC_TRANSPORT_CONGESTION 0x83 | ||
1031 | #define I2O_FSC_TRANSPORT_FAILURE 0x84 | ||
1032 | #define I2O_FSC_TRANSPORT_STATE_ERROR 0x85 | ||
1033 | #define I2O_FSC_TRANSPORT_TIME_OUT 0x86 | ||
1034 | #define I2O_FSC_TRANSPORT_ROUTING_FAILURE 0x87 | ||
1035 | #define I2O_FSC_TRANSPORT_INVALID_VERSION 0x88 | ||
1036 | #define I2O_FSC_TRANSPORT_INVALID_OFFSET 0x89 | ||
1037 | #define I2O_FSC_TRANSPORT_INVALID_MSG_FLAGS 0x8A | ||
1038 | #define I2O_FSC_TRANSPORT_FRAME_TOO_SMALL 0x8B | ||
1039 | #define I2O_FSC_TRANSPORT_FRAME_TOO_LARGE 0x8C | ||
1040 | #define I2O_FSC_TRANSPORT_INVALID_TARGET_ID 0x8D | ||
1041 | #define I2O_FSC_TRANSPORT_INVALID_INITIATOR_ID 0x8E | ||
1042 | #define I2O_FSC_TRANSPORT_INVALID_INITIATOR_CONTEXT 0x8F | ||
1043 | #define I2O_FSC_TRANSPORT_UNKNOWN_FAILURE 0xFF | ||
1044 | |||
1045 | /* Device Claim Types */ | ||
1046 | #define I2O_CLAIM_PRIMARY 0x01000000 | ||
1047 | #define I2O_CLAIM_MANAGEMENT 0x02000000 | ||
1048 | #define I2O_CLAIM_AUTHORIZED 0x03000000 | ||
1049 | #define I2O_CLAIM_SECONDARY 0x04000000 | ||
1050 | |||
1051 | /* Message header defines for VersionOffset */ | ||
1052 | #define I2OVER15 0x0001 | ||
1053 | #define I2OVER20 0x0002 | ||
1054 | |||
1055 | /* Default is 1.5 */ | ||
1056 | #define I2OVERSION I2OVER15 | ||
1057 | |||
1058 | #define SGL_OFFSET_0 I2OVERSION | ||
1059 | #define SGL_OFFSET_4 (0x0040 | I2OVERSION) | ||
1060 | #define SGL_OFFSET_5 (0x0050 | I2OVERSION) | ||
1061 | #define SGL_OFFSET_6 (0x0060 | I2OVERSION) | ||
1062 | #define SGL_OFFSET_7 (0x0070 | I2OVERSION) | ||
1063 | #define SGL_OFFSET_8 (0x0080 | I2OVERSION) | ||
1064 | #define SGL_OFFSET_9 (0x0090 | I2OVERSION) | ||
1065 | #define SGL_OFFSET_10 (0x00A0 | I2OVERSION) | ||
1066 | #define SGL_OFFSET_11 (0x00B0 | I2OVERSION) | ||
1067 | #define SGL_OFFSET_12 (0x00C0 | I2OVERSION) | ||
1068 | #define SGL_OFFSET(x) (((x)<<4) | I2OVERSION) | ||
1069 | |||
1070 | /* Transaction Reply Lists (TRL) Control Word structure */ | ||
1071 | #define TRL_SINGLE_FIXED_LENGTH 0x00 | ||
1072 | #define TRL_SINGLE_VARIABLE_LENGTH 0x40 | ||
1073 | #define TRL_MULTIPLE_FIXED_LENGTH 0x80 | ||
1074 | |||
1075 | /* msg header defines for MsgFlags */ | ||
1076 | #define MSG_STATIC 0x0100 | ||
1077 | #define MSG_64BIT_CNTXT 0x0200 | ||
1078 | #define MSG_MULTI_TRANS 0x1000 | ||
1079 | #define MSG_FAIL 0x2000 | ||
1080 | #define MSG_FINAL 0x4000 | ||
1081 | #define MSG_REPLY 0x8000 | ||
1082 | |||
1083 | /* minimum size msg */ | ||
1084 | #define THREE_WORD_MSG_SIZE 0x00030000 | ||
1085 | #define FOUR_WORD_MSG_SIZE 0x00040000 | ||
1086 | #define FIVE_WORD_MSG_SIZE 0x00050000 | ||
1087 | #define SIX_WORD_MSG_SIZE 0x00060000 | ||
1088 | #define SEVEN_WORD_MSG_SIZE 0x00070000 | ||
1089 | #define EIGHT_WORD_MSG_SIZE 0x00080000 | ||
1090 | #define NINE_WORD_MSG_SIZE 0x00090000 | ||
1091 | #define TEN_WORD_MSG_SIZE 0x000A0000 | ||
1092 | #define ELEVEN_WORD_MSG_SIZE 0x000B0000 | ||
1093 | #define I2O_MESSAGE_SIZE(x) ((x)<<16) | ||
1094 | |||
1095 | /* special TID assignments */ | ||
1096 | #define ADAPTER_TID 0 | ||
1097 | #define HOST_TID 1 | ||
1098 | |||
1099 | /* outbound queue defines */ | ||
1100 | #define I2O_MAX_OUTBOUND_MSG_FRAMES 128 | ||
1101 | #define I2O_OUTBOUND_MSG_FRAME_SIZE 128 /* in 32-bit words */ | ||
1102 | |||
1103 | #define I2O_POST_WAIT_OK 0 | ||
1104 | #define I2O_POST_WAIT_TIMEOUT -ETIMEDOUT | ||
1105 | |||
1106 | #define I2O_CONTEXT_LIST_MIN_LENGTH 15 | ||
1107 | #define I2O_CONTEXT_LIST_USED 0x01 | ||
1108 | #define I2O_CONTEXT_LIST_DELETED 0x02 | ||
1109 | |||
1110 | /* timeouts */ | ||
1111 | #define I2O_TIMEOUT_INIT_OUTBOUND_QUEUE 15 | ||
1112 | #define I2O_TIMEOUT_MESSAGE_GET 5 | ||
1113 | #define I2O_TIMEOUT_RESET 30 | ||
1114 | #define I2O_TIMEOUT_STATUS_GET 5 | ||
1115 | #define I2O_TIMEOUT_LCT_GET 360 | ||
1116 | #define I2O_TIMEOUT_SCSI_SCB_ABORT 240 | ||
1117 | |||
1118 | /* retries */ | ||
1119 | #define I2O_HRT_GET_TRIES 3 | ||
1120 | #define I2O_LCT_GET_TRIES 3 | ||
1121 | |||
1122 | /* defines for max_sectors and max_phys_segments */ | ||
1123 | #define I2O_MAX_SECTORS 1024 | ||
1124 | #define I2O_MAX_SECTORS_LIMITED 256 | ||
1125 | #define I2O_MAX_PHYS_SEGMENTS MAX_PHYS_SEGMENTS | ||
1126 | |||
1127 | #endif /* __KERNEL__ */ | 1255 | #endif /* __KERNEL__ */ |
1128 | #endif /* _I2O_H */ | 1256 | #endif /* _I2O_H */ |
diff --git a/include/linux/irq.h b/include/linux/irq.h index f04ba20712a2..6c5d4c898ccb 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h | |||
@@ -12,7 +12,7 @@ | |||
12 | #include <linux/config.h> | 12 | #include <linux/config.h> |
13 | #include <linux/smp.h> | 13 | #include <linux/smp.h> |
14 | 14 | ||
15 | #if !defined(CONFIG_ARCH_S390) | 15 | #if !defined(CONFIG_S390) |
16 | 16 | ||
17 | #include <linux/linkage.h> | 17 | #include <linux/linkage.h> |
18 | #include <linux/cache.h> | 18 | #include <linux/cache.h> |
@@ -221,6 +221,17 @@ extern void note_interrupt(unsigned int irq, irq_desc_t *desc, | |||
221 | extern int can_request_irq(unsigned int irq, unsigned long irqflags); | 221 | extern int can_request_irq(unsigned int irq, unsigned long irqflags); |
222 | 222 | ||
223 | extern void init_irq_proc(void); | 223 | extern void init_irq_proc(void); |
224 | |||
225 | #ifdef CONFIG_AUTO_IRQ_AFFINITY | ||
226 | extern int select_smp_affinity(unsigned int irq); | ||
227 | #else | ||
228 | static inline int | ||
229 | select_smp_affinity(unsigned int irq) | ||
230 | { | ||
231 | return 1; | ||
232 | } | ||
233 | #endif | ||
234 | |||
224 | #endif | 235 | #endif |
225 | 236 | ||
226 | extern hw_irq_controller no_irq_type; /* needed in every arch ? */ | 237 | extern hw_irq_controller no_irq_type; /* needed in every arch ? */ |
diff --git a/include/linux/jbd.h b/include/linux/jbd.h index dcde7adfdce5..558cb4c26ec9 100644 --- a/include/linux/jbd.h +++ b/include/linux/jbd.h | |||
@@ -498,6 +498,12 @@ struct transaction_s | |||
498 | struct journal_head *t_checkpoint_list; | 498 | struct journal_head *t_checkpoint_list; |
499 | 499 | ||
500 | /* | 500 | /* |
501 | * Doubly-linked circular list of all buffers submitted for IO while | ||
502 | * checkpointing. [j_list_lock] | ||
503 | */ | ||
504 | struct journal_head *t_checkpoint_io_list; | ||
505 | |||
506 | /* | ||
501 | * Doubly-linked circular list of temporary buffers currently undergoing | 507 | * Doubly-linked circular list of temporary buffers currently undergoing |
502 | * IO in the log [j_list_lock] | 508 | * IO in the log [j_list_lock] |
503 | */ | 509 | */ |
@@ -843,7 +849,7 @@ extern void journal_commit_transaction(journal_t *); | |||
843 | 849 | ||
844 | /* Checkpoint list management */ | 850 | /* Checkpoint list management */ |
845 | int __journal_clean_checkpoint_list(journal_t *journal); | 851 | int __journal_clean_checkpoint_list(journal_t *journal); |
846 | void __journal_remove_checkpoint(struct journal_head *); | 852 | int __journal_remove_checkpoint(struct journal_head *); |
847 | void __journal_insert_checkpoint(struct journal_head *, transaction_t *); | 853 | void __journal_insert_checkpoint(struct journal_head *, transaction_t *); |
848 | 854 | ||
849 | /* Buffer IO */ | 855 | /* Buffer IO */ |
diff --git a/include/linux/key.h b/include/linux/key.h index 53513a3be53b..4d189e51bc6c 100644 --- a/include/linux/key.h +++ b/include/linux/key.h | |||
@@ -193,14 +193,6 @@ struct key_type { | |||
193 | */ | 193 | */ |
194 | int (*instantiate)(struct key *key, const void *data, size_t datalen); | 194 | int (*instantiate)(struct key *key, const void *data, size_t datalen); |
195 | 195 | ||
196 | /* duplicate a key of this type (optional) | ||
197 | * - the source key will be locked against change | ||
198 | * - the new description will be attached | ||
199 | * - the quota will have been adjusted automatically from | ||
200 | * source->quotalen | ||
201 | */ | ||
202 | int (*duplicate)(struct key *key, const struct key *source); | ||
203 | |||
204 | /* update a key of this type (optional) | 196 | /* update a key of this type (optional) |
205 | * - this method should call key_payload_reserve() to recalculate the | 197 | * - this method should call key_payload_reserve() to recalculate the |
206 | * quota consumption | 198 | * quota consumption |
diff --git a/include/linux/libata.h b/include/linux/libata.h index e828e172ccbf..a43c95f8f968 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h | |||
@@ -124,6 +124,8 @@ enum { | |||
124 | ATA_FLAG_DEBUGMSG = (1 << 10), | 124 | ATA_FLAG_DEBUGMSG = (1 << 10), |
125 | ATA_FLAG_NO_ATAPI = (1 << 11), /* No ATAPI support */ | 125 | ATA_FLAG_NO_ATAPI = (1 << 11), /* No ATAPI support */ |
126 | 126 | ||
127 | ATA_FLAG_SUSPENDED = (1 << 12), /* port is suspended */ | ||
128 | |||
127 | ATA_QCFLAG_ACTIVE = (1 << 1), /* cmd not yet ack'd to scsi lyer */ | 129 | ATA_QCFLAG_ACTIVE = (1 << 1), /* cmd not yet ack'd to scsi lyer */ |
128 | ATA_QCFLAG_SG = (1 << 3), /* have s/g table? */ | 130 | ATA_QCFLAG_SG = (1 << 3), /* have s/g table? */ |
129 | ATA_QCFLAG_SINGLE = (1 << 4), /* no s/g, just a single buffer */ | 131 | ATA_QCFLAG_SINGLE = (1 << 4), /* no s/g, just a single buffer */ |
@@ -436,6 +438,8 @@ extern void ata_std_ports(struct ata_ioports *ioaddr); | |||
436 | extern int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info, | 438 | extern int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info, |
437 | unsigned int n_ports); | 439 | unsigned int n_ports); |
438 | extern void ata_pci_remove_one (struct pci_dev *pdev); | 440 | extern void ata_pci_remove_one (struct pci_dev *pdev); |
441 | extern int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t state); | ||
442 | extern int ata_pci_device_resume(struct pci_dev *pdev); | ||
439 | #endif /* CONFIG_PCI */ | 443 | #endif /* CONFIG_PCI */ |
440 | extern int ata_device_add(const struct ata_probe_ent *ent); | 444 | extern int ata_device_add(const struct ata_probe_ent *ent); |
441 | extern void ata_host_set_remove(struct ata_host_set *host_set); | 445 | extern void ata_host_set_remove(struct ata_host_set *host_set); |
@@ -445,6 +449,10 @@ extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmn | |||
445 | extern int ata_scsi_error(struct Scsi_Host *host); | 449 | extern int ata_scsi_error(struct Scsi_Host *host); |
446 | extern int ata_scsi_release(struct Scsi_Host *host); | 450 | extern int ata_scsi_release(struct Scsi_Host *host); |
447 | extern unsigned int ata_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc); | 451 | extern unsigned int ata_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc); |
452 | extern int ata_scsi_device_resume(struct scsi_device *); | ||
453 | extern int ata_scsi_device_suspend(struct scsi_device *); | ||
454 | extern int ata_device_resume(struct ata_port *, struct ata_device *); | ||
455 | extern int ata_device_suspend(struct ata_port *, struct ata_device *); | ||
448 | extern int ata_ratelimit(void); | 456 | extern int ata_ratelimit(void); |
449 | 457 | ||
450 | /* | 458 | /* |
@@ -480,7 +488,8 @@ extern u8 ata_bmdma_status(struct ata_port *ap); | |||
480 | extern void ata_bmdma_irq_clear(struct ata_port *ap); | 488 | extern void ata_bmdma_irq_clear(struct ata_port *ap); |
481 | extern void ata_qc_complete(struct ata_queued_cmd *qc); | 489 | extern void ata_qc_complete(struct ata_queued_cmd *qc); |
482 | extern void ata_eng_timeout(struct ata_port *ap); | 490 | extern void ata_eng_timeout(struct ata_port *ap); |
483 | extern void ata_scsi_simulate(u16 *id, struct scsi_cmnd *cmd, | 491 | extern void ata_scsi_simulate(struct ata_port *ap, struct ata_device *dev, |
492 | struct scsi_cmnd *cmd, | ||
484 | void (*done)(struct scsi_cmnd *)); | 493 | void (*done)(struct scsi_cmnd *)); |
485 | extern int ata_std_bios_param(struct scsi_device *sdev, | 494 | extern int ata_std_bios_param(struct scsi_device *sdev, |
486 | struct block_device *bdev, | 495 | struct block_device *bdev, |
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h index 8b67cf837ca9..ed00b278cb93 100644 --- a/include/linux/mempolicy.h +++ b/include/linux/mempolicy.h | |||
@@ -110,14 +110,6 @@ static inline int mpol_equal(struct mempolicy *a, struct mempolicy *b) | |||
110 | #define mpol_set_vma_default(vma) ((vma)->vm_policy = NULL) | 110 | #define mpol_set_vma_default(vma) ((vma)->vm_policy = NULL) |
111 | 111 | ||
112 | /* | 112 | /* |
113 | * Hugetlb policy. i386 hugetlb so far works with node numbers | ||
114 | * instead of zone lists, so give it special interfaces for now. | ||
115 | */ | ||
116 | extern int mpol_first_node(struct vm_area_struct *vma, unsigned long addr); | ||
117 | extern int mpol_node_valid(int nid, struct vm_area_struct *vma, | ||
118 | unsigned long addr); | ||
119 | |||
120 | /* | ||
121 | * Tree of shared policies for a shared memory region. | 113 | * Tree of shared policies for a shared memory region. |
122 | * Maintain the policies in a pseudo mm that contains vmas. The vmas | 114 | * Maintain the policies in a pseudo mm that contains vmas. The vmas |
123 | * carry the policy. As a special twist the pseudo mm is indexed in pages, not | 115 | * carry the policy. As a special twist the pseudo mm is indexed in pages, not |
@@ -156,6 +148,16 @@ extern void numa_default_policy(void); | |||
156 | extern void numa_policy_init(void); | 148 | extern void numa_policy_init(void); |
157 | extern void numa_policy_rebind(const nodemask_t *old, const nodemask_t *new); | 149 | extern void numa_policy_rebind(const nodemask_t *old, const nodemask_t *new); |
158 | extern struct mempolicy default_policy; | 150 | extern struct mempolicy default_policy; |
151 | extern struct zonelist *huge_zonelist(struct vm_area_struct *vma, | ||
152 | unsigned long addr); | ||
153 | |||
154 | extern int policy_zone; | ||
155 | |||
156 | static inline void check_highest_zone(int k) | ||
157 | { | ||
158 | if (k > policy_zone) | ||
159 | policy_zone = k; | ||
160 | } | ||
159 | 161 | ||
160 | #else | 162 | #else |
161 | 163 | ||
@@ -182,17 +184,6 @@ static inline struct mempolicy *mpol_copy(struct mempolicy *old) | |||
182 | return NULL; | 184 | return NULL; |
183 | } | 185 | } |
184 | 186 | ||
185 | static inline int mpol_first_node(struct vm_area_struct *vma, unsigned long a) | ||
186 | { | ||
187 | return numa_node_id(); | ||
188 | } | ||
189 | |||
190 | static inline int | ||
191 | mpol_node_valid(int nid, struct vm_area_struct *vma, unsigned long a) | ||
192 | { | ||
193 | return 1; | ||
194 | } | ||
195 | |||
196 | struct shared_policy {}; | 187 | struct shared_policy {}; |
197 | 188 | ||
198 | static inline int mpol_set_shared_policy(struct shared_policy *info, | 189 | static inline int mpol_set_shared_policy(struct shared_policy *info, |
@@ -232,6 +223,15 @@ static inline void numa_policy_rebind(const nodemask_t *old, | |||
232 | { | 223 | { |
233 | } | 224 | } |
234 | 225 | ||
226 | static inline struct zonelist *huge_zonelist(struct vm_area_struct *vma, | ||
227 | unsigned long addr) | ||
228 | { | ||
229 | return NODE_DATA(0)->node_zonelists + gfp_zone(GFP_HIGHUSER); | ||
230 | } | ||
231 | |||
232 | static inline void check_highest_zone(int k) | ||
233 | { | ||
234 | } | ||
235 | #endif /* CONFIG_NUMA */ | 235 | #endif /* CONFIG_NUMA */ |
236 | #endif /* __KERNEL__ */ | 236 | #endif /* __KERNEL__ */ |
237 | 237 | ||
diff --git a/include/linux/mm.h b/include/linux/mm.h index a06a84d347fb..bc01fff3aa01 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -634,14 +634,38 @@ struct mempolicy *shmem_get_policy(struct vm_area_struct *vma, | |||
634 | int shmem_lock(struct file *file, int lock, struct user_struct *user); | 634 | int shmem_lock(struct file *file, int lock, struct user_struct *user); |
635 | #else | 635 | #else |
636 | #define shmem_nopage filemap_nopage | 636 | #define shmem_nopage filemap_nopage |
637 | #define shmem_lock(a, b, c) ({0;}) /* always in memory, no need to lock */ | 637 | |
638 | #define shmem_set_policy(a, b) (0) | 638 | static inline int shmem_lock(struct file *file, int lock, |
639 | #define shmem_get_policy(a, b) (NULL) | 639 | struct user_struct *user) |
640 | { | ||
641 | return 0; | ||
642 | } | ||
643 | |||
644 | static inline int shmem_set_policy(struct vm_area_struct *vma, | ||
645 | struct mempolicy *new) | ||
646 | { | ||
647 | return 0; | ||
648 | } | ||
649 | |||
650 | static inline struct mempolicy *shmem_get_policy(struct vm_area_struct *vma, | ||
651 | unsigned long addr) | ||
652 | { | ||
653 | return NULL; | ||
654 | } | ||
640 | #endif | 655 | #endif |
641 | struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags); | 656 | struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags); |
657 | extern int shmem_mmap(struct file *file, struct vm_area_struct *vma); | ||
642 | 658 | ||
643 | int shmem_zero_setup(struct vm_area_struct *); | 659 | int shmem_zero_setup(struct vm_area_struct *); |
644 | 660 | ||
661 | #ifndef CONFIG_MMU | ||
662 | extern unsigned long shmem_get_unmapped_area(struct file *file, | ||
663 | unsigned long addr, | ||
664 | unsigned long len, | ||
665 | unsigned long pgoff, | ||
666 | unsigned long flags); | ||
667 | #endif | ||
668 | |||
645 | static inline int can_do_mlock(void) | 669 | static inline int can_do_mlock(void) |
646 | { | 670 | { |
647 | if (capable(CAP_IPC_LOCK)) | 671 | if (capable(CAP_IPC_LOCK)) |
@@ -690,14 +714,31 @@ static inline void unmap_shared_mapping_range(struct address_space *mapping, | |||
690 | } | 714 | } |
691 | 715 | ||
692 | extern int vmtruncate(struct inode * inode, loff_t offset); | 716 | extern int vmtruncate(struct inode * inode, loff_t offset); |
717 | extern int vmtruncate_range(struct inode * inode, loff_t offset, loff_t end); | ||
693 | extern int install_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, struct page *page, pgprot_t prot); | 718 | extern int install_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, struct page *page, pgprot_t prot); |
694 | extern int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, unsigned long pgoff, pgprot_t prot); | 719 | extern int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, unsigned long pgoff, pgprot_t prot); |
695 | extern int __handle_mm_fault(struct mm_struct *mm,struct vm_area_struct *vma, unsigned long address, int write_access); | ||
696 | 720 | ||
697 | static inline int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, int write_access) | 721 | #ifdef CONFIG_MMU |
722 | extern int __handle_mm_fault(struct mm_struct *mm,struct vm_area_struct *vma, | ||
723 | unsigned long address, int write_access); | ||
724 | |||
725 | static inline int handle_mm_fault(struct mm_struct *mm, | ||
726 | struct vm_area_struct *vma, unsigned long address, | ||
727 | int write_access) | ||
698 | { | 728 | { |
699 | return __handle_mm_fault(mm, vma, address, write_access) & (~VM_FAULT_WRITE); | 729 | return __handle_mm_fault(mm, vma, address, write_access) & |
730 | (~VM_FAULT_WRITE); | ||
700 | } | 731 | } |
732 | #else | ||
733 | static inline int handle_mm_fault(struct mm_struct *mm, | ||
734 | struct vm_area_struct *vma, unsigned long address, | ||
735 | int write_access) | ||
736 | { | ||
737 | /* should never happen if there's no MMU */ | ||
738 | BUG(); | ||
739 | return VM_FAULT_SIGBUS; | ||
740 | } | ||
741 | #endif | ||
701 | 742 | ||
702 | extern int make_pages_present(unsigned long addr, unsigned long end); | 743 | extern int make_pages_present(unsigned long addr, unsigned long end); |
703 | extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write); | 744 | extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write); |
@@ -896,6 +937,8 @@ extern unsigned long do_brk(unsigned long, unsigned long); | |||
896 | /* filemap.c */ | 937 | /* filemap.c */ |
897 | extern unsigned long page_unuse(struct page *); | 938 | extern unsigned long page_unuse(struct page *); |
898 | extern void truncate_inode_pages(struct address_space *, loff_t); | 939 | extern void truncate_inode_pages(struct address_space *, loff_t); |
940 | extern void truncate_inode_pages_range(struct address_space *, | ||
941 | loff_t lstart, loff_t lend); | ||
899 | 942 | ||
900 | /* generic vm_area_ops exported for stackable file systems */ | 943 | /* generic vm_area_ops exported for stackable file systems */ |
901 | extern struct page *filemap_nopage(struct vm_area_struct *, unsigned long, int *); | 944 | extern struct page *filemap_nopage(struct vm_area_struct *, unsigned long, int *); |
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 9f22090df7dd..c34f4a2c62f8 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h | |||
@@ -46,7 +46,6 @@ struct zone_padding { | |||
46 | 46 | ||
47 | struct per_cpu_pages { | 47 | struct per_cpu_pages { |
48 | int count; /* number of pages in the list */ | 48 | int count; /* number of pages in the list */ |
49 | int low; /* low watermark, refill needed */ | ||
50 | int high; /* high watermark, emptying needed */ | 49 | int high; /* high watermark, emptying needed */ |
51 | int batch; /* chunk size for buddy add/remove */ | 50 | int batch; /* chunk size for buddy add/remove */ |
52 | struct list_head list; /* the list of pages */ | 51 | struct list_head list; /* the list of pages */ |
@@ -389,6 +388,11 @@ static inline struct zone *next_zone(struct zone *zone) | |||
389 | #define for_each_zone(zone) \ | 388 | #define for_each_zone(zone) \ |
390 | for (zone = pgdat_list->node_zones; zone; zone = next_zone(zone)) | 389 | for (zone = pgdat_list->node_zones; zone; zone = next_zone(zone)) |
391 | 390 | ||
391 | static inline int populated_zone(struct zone *zone) | ||
392 | { | ||
393 | return (!!zone->present_pages); | ||
394 | } | ||
395 | |||
392 | static inline int is_highmem_idx(int idx) | 396 | static inline int is_highmem_idx(int idx) |
393 | { | 397 | { |
394 | return (idx == ZONE_HIGHMEM); | 398 | return (idx == ZONE_HIGHMEM); |
@@ -398,6 +402,7 @@ static inline int is_normal_idx(int idx) | |||
398 | { | 402 | { |
399 | return (idx == ZONE_NORMAL); | 403 | return (idx == ZONE_NORMAL); |
400 | } | 404 | } |
405 | |||
401 | /** | 406 | /** |
402 | * is_highmem - helper function to quickly check if a struct zone is a | 407 | * is_highmem - helper function to quickly check if a struct zone is a |
403 | * highmem zone or not. This is an attempt to keep references | 408 | * highmem zone or not. This is an attempt to keep references |
@@ -414,6 +419,16 @@ static inline int is_normal(struct zone *zone) | |||
414 | return zone == zone->zone_pgdat->node_zones + ZONE_NORMAL; | 419 | return zone == zone->zone_pgdat->node_zones + ZONE_NORMAL; |
415 | } | 420 | } |
416 | 421 | ||
422 | static inline int is_dma32(struct zone *zone) | ||
423 | { | ||
424 | return zone == zone->zone_pgdat->node_zones + ZONE_DMA32; | ||
425 | } | ||
426 | |||
427 | static inline int is_dma(struct zone *zone) | ||
428 | { | ||
429 | return zone == zone->zone_pgdat->node_zones + ZONE_DMA; | ||
430 | } | ||
431 | |||
417 | /* These two functions are used to setup the per zone pages min values */ | 432 | /* These two functions are used to setup the per zone pages min values */ |
418 | struct ctl_table; | 433 | struct ctl_table; |
419 | struct file; | 434 | struct file; |
@@ -435,7 +450,6 @@ extern struct pglist_data contig_page_data; | |||
435 | #define NODE_DATA(nid) (&contig_page_data) | 450 | #define NODE_DATA(nid) (&contig_page_data) |
436 | #define NODE_MEM_MAP(nid) mem_map | 451 | #define NODE_MEM_MAP(nid) mem_map |
437 | #define MAX_NODES_SHIFT 1 | 452 | #define MAX_NODES_SHIFT 1 |
438 | #define pfn_to_nid(pfn) (0) | ||
439 | 453 | ||
440 | #else /* CONFIG_NEED_MULTIPLE_NODES */ | 454 | #else /* CONFIG_NEED_MULTIPLE_NODES */ |
441 | 455 | ||
@@ -470,6 +484,10 @@ extern struct pglist_data contig_page_data; | |||
470 | #define early_pfn_to_nid(nid) (0UL) | 484 | #define early_pfn_to_nid(nid) (0UL) |
471 | #endif | 485 | #endif |
472 | 486 | ||
487 | #ifdef CONFIG_FLATMEM | ||
488 | #define pfn_to_nid(pfn) (0) | ||
489 | #endif | ||
490 | |||
473 | #define pfn_to_section_nr(pfn) ((pfn) >> PFN_SECTION_SHIFT) | 491 | #define pfn_to_section_nr(pfn) ((pfn) >> PFN_SECTION_SHIFT) |
474 | #define section_nr_to_pfn(sec) ((sec) << PFN_SECTION_SHIFT) | 492 | #define section_nr_to_pfn(sec) ((sec) << PFN_SECTION_SHIFT) |
475 | 493 | ||
@@ -564,11 +582,6 @@ static inline int valid_section_nr(unsigned long nr) | |||
564 | return valid_section(__nr_to_section(nr)); | 582 | return valid_section(__nr_to_section(nr)); |
565 | } | 583 | } |
566 | 584 | ||
567 | /* | ||
568 | * Given a kernel address, find the home node of the underlying memory. | ||
569 | */ | ||
570 | #define kvaddr_to_nid(kaddr) pfn_to_nid(__pa(kaddr) >> PAGE_SHIFT) | ||
571 | |||
572 | static inline struct mem_section *__pfn_to_section(unsigned long pfn) | 585 | static inline struct mem_section *__pfn_to_section(unsigned long pfn) |
573 | { | 586 | { |
574 | return __nr_to_section(pfn_to_section_nr(pfn)); | 587 | return __nr_to_section(pfn_to_section_nr(pfn)); |
@@ -598,13 +611,14 @@ static inline int pfn_valid(unsigned long pfn) | |||
598 | * this restriction. | 611 | * this restriction. |
599 | */ | 612 | */ |
600 | #ifdef CONFIG_NUMA | 613 | #ifdef CONFIG_NUMA |
601 | #define pfn_to_nid early_pfn_to_nid | 614 | #define pfn_to_nid(pfn) \ |
602 | #endif | ||
603 | |||
604 | #define pfn_to_pgdat(pfn) \ | ||
605 | ({ \ | 615 | ({ \ |
606 | NODE_DATA(pfn_to_nid(pfn)); \ | 616 | unsigned long __pfn_to_nid_pfn = (pfn); \ |
617 | page_to_nid(pfn_to_page(__pfn_to_nid_pfn)); \ | ||
607 | }) | 618 | }) |
619 | #else | ||
620 | #define pfn_to_nid(pfn) (0) | ||
621 | #endif | ||
608 | 622 | ||
609 | #define early_pfn_valid(pfn) pfn_valid(pfn) | 623 | #define early_pfn_valid(pfn) pfn_valid(pfn) |
610 | void sparse_init(void); | 624 | void sparse_init(void); |
@@ -613,12 +627,6 @@ void sparse_init(void); | |||
613 | #define sparse_index_init(_sec, _nid) do {} while (0) | 627 | #define sparse_index_init(_sec, _nid) do {} while (0) |
614 | #endif /* CONFIG_SPARSEMEM */ | 628 | #endif /* CONFIG_SPARSEMEM */ |
615 | 629 | ||
616 | #ifdef CONFIG_NODES_SPAN_OTHER_NODES | ||
617 | #define early_pfn_in_nid(pfn, nid) (early_pfn_to_nid(pfn) == (nid)) | ||
618 | #else | ||
619 | #define early_pfn_in_nid(pfn, nid) (1) | ||
620 | #endif | ||
621 | |||
622 | #ifndef early_pfn_valid | 630 | #ifndef early_pfn_valid |
623 | #define early_pfn_valid(pfn) (1) | 631 | #define early_pfn_valid(pfn) (1) |
624 | #endif | 632 | #endif |
diff --git a/include/linux/nbd.h b/include/linux/nbd.h index 090e210e98f0..f95d51fae733 100644 --- a/include/linux/nbd.h +++ b/include/linux/nbd.h | |||
@@ -37,18 +37,26 @@ enum { | |||
37 | /* userspace doesn't need the nbd_device structure */ | 37 | /* userspace doesn't need the nbd_device structure */ |
38 | #ifdef __KERNEL__ | 38 | #ifdef __KERNEL__ |
39 | 39 | ||
40 | #include <linux/wait.h> | ||
41 | |||
40 | /* values for flags field */ | 42 | /* values for flags field */ |
41 | #define NBD_READ_ONLY 0x0001 | 43 | #define NBD_READ_ONLY 0x0001 |
42 | #define NBD_WRITE_NOCHK 0x0002 | 44 | #define NBD_WRITE_NOCHK 0x0002 |
43 | 45 | ||
46 | struct request; | ||
47 | |||
44 | struct nbd_device { | 48 | struct nbd_device { |
45 | int flags; | 49 | int flags; |
46 | int harderror; /* Code of hard error */ | 50 | int harderror; /* Code of hard error */ |
47 | struct socket * sock; | 51 | struct socket * sock; |
48 | struct file * file; /* If == NULL, device is not ready, yet */ | 52 | struct file * file; /* If == NULL, device is not ready, yet */ |
49 | int magic; | 53 | int magic; |
54 | |||
50 | spinlock_t queue_lock; | 55 | spinlock_t queue_lock; |
51 | struct list_head queue_head;/* Requests are added here... */ | 56 | struct list_head queue_head;/* Requests are added here... */ |
57 | struct request *active_req; | ||
58 | wait_queue_head_t active_wq; | ||
59 | |||
52 | struct semaphore tx_lock; | 60 | struct semaphore tx_lock; |
53 | struct gendisk *disk; | 61 | struct gendisk *disk; |
54 | int blksize; | 62 | int blksize; |
diff --git a/include/linux/nfsd/xdr.h b/include/linux/nfsd/xdr.h index 130d4f588a37..3f4f7142bbe3 100644 --- a/include/linux/nfsd/xdr.h +++ b/include/linux/nfsd/xdr.h | |||
@@ -88,10 +88,12 @@ struct nfsd_readdirargs { | |||
88 | 88 | ||
89 | struct nfsd_attrstat { | 89 | struct nfsd_attrstat { |
90 | struct svc_fh fh; | 90 | struct svc_fh fh; |
91 | struct kstat stat; | ||
91 | }; | 92 | }; |
92 | 93 | ||
93 | struct nfsd_diropres { | 94 | struct nfsd_diropres { |
94 | struct svc_fh fh; | 95 | struct svc_fh fh; |
96 | struct kstat stat; | ||
95 | }; | 97 | }; |
96 | 98 | ||
97 | struct nfsd_readlinkres { | 99 | struct nfsd_readlinkres { |
@@ -101,6 +103,7 @@ struct nfsd_readlinkres { | |||
101 | struct nfsd_readres { | 103 | struct nfsd_readres { |
102 | struct svc_fh fh; | 104 | struct svc_fh fh; |
103 | unsigned long count; | 105 | unsigned long count; |
106 | struct kstat stat; | ||
104 | }; | 107 | }; |
105 | 108 | ||
106 | struct nfsd_readdirres { | 109 | struct nfsd_readdirres { |
diff --git a/include/linux/nfsd/xdr3.h b/include/linux/nfsd/xdr3.h index 3c2a71b43bac..a4322741f8b9 100644 --- a/include/linux/nfsd/xdr3.h +++ b/include/linux/nfsd/xdr3.h | |||
@@ -126,6 +126,7 @@ struct nfsd3_setaclargs { | |||
126 | struct nfsd3_attrstat { | 126 | struct nfsd3_attrstat { |
127 | __u32 status; | 127 | __u32 status; |
128 | struct svc_fh fh; | 128 | struct svc_fh fh; |
129 | struct kstat stat; | ||
129 | }; | 130 | }; |
130 | 131 | ||
131 | /* LOOKUP, CREATE, MKDIR, SYMLINK, MKNOD */ | 132 | /* LOOKUP, CREATE, MKDIR, SYMLINK, MKNOD */ |
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 343083fec258..d52999c43336 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h | |||
@@ -79,13 +79,23 @@ | |||
79 | /* | 79 | /* |
80 | * Global page accounting. One instance per CPU. Only unsigned longs are | 80 | * Global page accounting. One instance per CPU. Only unsigned longs are |
81 | * allowed. | 81 | * allowed. |
82 | * | ||
83 | * - Fields can be modified with xxx_page_state and xxx_page_state_zone at | ||
84 | * any time safely (which protects the instance from modification by | ||
85 | * interrupt. | ||
86 | * - The __xxx_page_state variants can be used safely when interrupts are | ||
87 | * disabled. | ||
88 | * - The __xxx_page_state variants can be used if the field is only | ||
89 | * modified from process context, or only modified from interrupt context. | ||
90 | * In this case, the field should be commented here. | ||
82 | */ | 91 | */ |
83 | struct page_state { | 92 | struct page_state { |
84 | unsigned long nr_dirty; /* Dirty writeable pages */ | 93 | unsigned long nr_dirty; /* Dirty writeable pages */ |
85 | unsigned long nr_writeback; /* Pages under writeback */ | 94 | unsigned long nr_writeback; /* Pages under writeback */ |
86 | unsigned long nr_unstable; /* NFS unstable pages */ | 95 | unsigned long nr_unstable; /* NFS unstable pages */ |
87 | unsigned long nr_page_table_pages;/* Pages used for pagetables */ | 96 | unsigned long nr_page_table_pages;/* Pages used for pagetables */ |
88 | unsigned long nr_mapped; /* mapped into pagetables */ | 97 | unsigned long nr_mapped; /* mapped into pagetables. |
98 | * only modified from process context */ | ||
89 | unsigned long nr_slab; /* In slab */ | 99 | unsigned long nr_slab; /* In slab */ |
90 | #define GET_PAGE_STATE_LAST nr_slab | 100 | #define GET_PAGE_STATE_LAST nr_slab |
91 | 101 | ||
@@ -97,32 +107,40 @@ struct page_state { | |||
97 | unsigned long pgpgout; /* Disk writes */ | 107 | unsigned long pgpgout; /* Disk writes */ |
98 | unsigned long pswpin; /* swap reads */ | 108 | unsigned long pswpin; /* swap reads */ |
99 | unsigned long pswpout; /* swap writes */ | 109 | unsigned long pswpout; /* swap writes */ |
100 | unsigned long pgalloc_high; /* page allocations */ | ||
101 | 110 | ||
111 | unsigned long pgalloc_high; /* page allocations */ | ||
102 | unsigned long pgalloc_normal; | 112 | unsigned long pgalloc_normal; |
113 | unsigned long pgalloc_dma32; | ||
103 | unsigned long pgalloc_dma; | 114 | unsigned long pgalloc_dma; |
115 | |||
104 | unsigned long pgfree; /* page freeings */ | 116 | unsigned long pgfree; /* page freeings */ |
105 | unsigned long pgactivate; /* pages moved inactive->active */ | 117 | unsigned long pgactivate; /* pages moved inactive->active */ |
106 | unsigned long pgdeactivate; /* pages moved active->inactive */ | 118 | unsigned long pgdeactivate; /* pages moved active->inactive */ |
107 | 119 | ||
108 | unsigned long pgfault; /* faults (major+minor) */ | 120 | unsigned long pgfault; /* faults (major+minor) */ |
109 | unsigned long pgmajfault; /* faults (major only) */ | 121 | unsigned long pgmajfault; /* faults (major only) */ |
122 | |||
110 | unsigned long pgrefill_high; /* inspected in refill_inactive_zone */ | 123 | unsigned long pgrefill_high; /* inspected in refill_inactive_zone */ |
111 | unsigned long pgrefill_normal; | 124 | unsigned long pgrefill_normal; |
125 | unsigned long pgrefill_dma32; | ||
112 | unsigned long pgrefill_dma; | 126 | unsigned long pgrefill_dma; |
113 | 127 | ||
114 | unsigned long pgsteal_high; /* total highmem pages reclaimed */ | 128 | unsigned long pgsteal_high; /* total highmem pages reclaimed */ |
115 | unsigned long pgsteal_normal; | 129 | unsigned long pgsteal_normal; |
130 | unsigned long pgsteal_dma32; | ||
116 | unsigned long pgsteal_dma; | 131 | unsigned long pgsteal_dma; |
132 | |||
117 | unsigned long pgscan_kswapd_high;/* total highmem pages scanned */ | 133 | unsigned long pgscan_kswapd_high;/* total highmem pages scanned */ |
118 | unsigned long pgscan_kswapd_normal; | 134 | unsigned long pgscan_kswapd_normal; |
119 | 135 | unsigned long pgscan_kswapd_dma32; | |
120 | unsigned long pgscan_kswapd_dma; | 136 | unsigned long pgscan_kswapd_dma; |
137 | |||
121 | unsigned long pgscan_direct_high;/* total highmem pages scanned */ | 138 | unsigned long pgscan_direct_high;/* total highmem pages scanned */ |
122 | unsigned long pgscan_direct_normal; | 139 | unsigned long pgscan_direct_normal; |
140 | unsigned long pgscan_direct_dma32; | ||
123 | unsigned long pgscan_direct_dma; | 141 | unsigned long pgscan_direct_dma; |
124 | unsigned long pginodesteal; /* pages reclaimed via inode freeing */ | ||
125 | 142 | ||
143 | unsigned long pginodesteal; /* pages reclaimed via inode freeing */ | ||
126 | unsigned long slabs_scanned; /* slab objects scanned */ | 144 | unsigned long slabs_scanned; /* slab objects scanned */ |
127 | unsigned long kswapd_steal; /* pages reclaimed by kswapd */ | 145 | unsigned long kswapd_steal; /* pages reclaimed by kswapd */ |
128 | unsigned long kswapd_inodesteal;/* reclaimed via kswapd inode freeing */ | 146 | unsigned long kswapd_inodesteal;/* reclaimed via kswapd inode freeing */ |
@@ -136,31 +154,54 @@ struct page_state { | |||
136 | extern void get_page_state(struct page_state *ret); | 154 | extern void get_page_state(struct page_state *ret); |
137 | extern void get_page_state_node(struct page_state *ret, int node); | 155 | extern void get_page_state_node(struct page_state *ret, int node); |
138 | extern void get_full_page_state(struct page_state *ret); | 156 | extern void get_full_page_state(struct page_state *ret); |
139 | extern unsigned long __read_page_state(unsigned long offset); | 157 | extern unsigned long read_page_state_offset(unsigned long offset); |
140 | extern void __mod_page_state(unsigned long offset, unsigned long delta); | 158 | extern void mod_page_state_offset(unsigned long offset, unsigned long delta); |
159 | extern void __mod_page_state_offset(unsigned long offset, unsigned long delta); | ||
141 | 160 | ||
142 | #define read_page_state(member) \ | 161 | #define read_page_state(member) \ |
143 | __read_page_state(offsetof(struct page_state, member)) | 162 | read_page_state_offset(offsetof(struct page_state, member)) |
144 | 163 | ||
145 | #define mod_page_state(member, delta) \ | 164 | #define mod_page_state(member, delta) \ |
146 | __mod_page_state(offsetof(struct page_state, member), (delta)) | 165 | mod_page_state_offset(offsetof(struct page_state, member), (delta)) |
147 | 166 | ||
148 | #define inc_page_state(member) mod_page_state(member, 1UL) | 167 | #define __mod_page_state(member, delta) \ |
149 | #define dec_page_state(member) mod_page_state(member, 0UL - 1) | 168 | __mod_page_state_offset(offsetof(struct page_state, member), (delta)) |
150 | #define add_page_state(member,delta) mod_page_state(member, (delta)) | 169 | |
151 | #define sub_page_state(member,delta) mod_page_state(member, 0UL - (delta)) | 170 | #define inc_page_state(member) mod_page_state(member, 1UL) |
152 | 171 | #define dec_page_state(member) mod_page_state(member, 0UL - 1) | |
153 | #define mod_page_state_zone(zone, member, delta) \ | 172 | #define add_page_state(member,delta) mod_page_state(member, (delta)) |
154 | do { \ | 173 | #define sub_page_state(member,delta) mod_page_state(member, 0UL - (delta)) |
155 | unsigned offset; \ | 174 | |
156 | if (is_highmem(zone)) \ | 175 | #define __inc_page_state(member) __mod_page_state(member, 1UL) |
157 | offset = offsetof(struct page_state, member##_high); \ | 176 | #define __dec_page_state(member) __mod_page_state(member, 0UL - 1) |
158 | else if (is_normal(zone)) \ | 177 | #define __add_page_state(member,delta) __mod_page_state(member, (delta)) |
159 | offset = offsetof(struct page_state, member##_normal); \ | 178 | #define __sub_page_state(member,delta) __mod_page_state(member, 0UL - (delta)) |
160 | else \ | 179 | |
161 | offset = offsetof(struct page_state, member##_dma); \ | 180 | #define page_state(member) (*__page_state(offsetof(struct page_state, member))) |
162 | __mod_page_state(offset, (delta)); \ | 181 | |
163 | } while (0) | 182 | #define state_zone_offset(zone, member) \ |
183 | ({ \ | ||
184 | unsigned offset; \ | ||
185 | if (is_highmem(zone)) \ | ||
186 | offset = offsetof(struct page_state, member##_high); \ | ||
187 | else if (is_normal(zone)) \ | ||
188 | offset = offsetof(struct page_state, member##_normal); \ | ||
189 | else if (is_dma32(zone)) \ | ||
190 | offset = offsetof(struct page_state, member##_dma32); \ | ||
191 | else \ | ||
192 | offset = offsetof(struct page_state, member##_dma); \ | ||
193 | offset; \ | ||
194 | }) | ||
195 | |||
196 | #define __mod_page_state_zone(zone, member, delta) \ | ||
197 | do { \ | ||
198 | __mod_page_state_offset(state_zone_offset(zone, member), (delta)); \ | ||
199 | } while (0) | ||
200 | |||
201 | #define mod_page_state_zone(zone, member, delta) \ | ||
202 | do { \ | ||
203 | mod_page_state_offset(state_zone_offset(zone, member), (delta)); \ | ||
204 | } while (0) | ||
164 | 205 | ||
165 | /* | 206 | /* |
166 | * Manipulation of page state flags | 207 | * Manipulation of page state flags |
diff --git a/include/linux/parport.h b/include/linux/parport.h index d2a4d9e1e6d1..f7ff0b0c4031 100644 --- a/include/linux/parport.h +++ b/include/linux/parport.h | |||
@@ -242,7 +242,6 @@ enum ieee1284_phase { | |||
242 | IEEE1284_PH_FWD_IDLE, | 242 | IEEE1284_PH_FWD_IDLE, |
243 | IEEE1284_PH_TERMINATE, | 243 | IEEE1284_PH_TERMINATE, |
244 | IEEE1284_PH_NEGOTIATION, | 244 | IEEE1284_PH_NEGOTIATION, |
245 | IEEE1284_PH_HBUSY_DNA, | ||
246 | IEEE1284_PH_REV_IDLE, | 245 | IEEE1284_PH_REV_IDLE, |
247 | IEEE1284_PH_HBUSY_DAVAIL, | 246 | IEEE1284_PH_HBUSY_DAVAIL, |
248 | IEEE1284_PH_REV_DATA, | 247 | IEEE1284_PH_REV_DATA, |
diff --git a/include/linux/parport_pc.h b/include/linux/parport_pc.h index c6f762470879..1cc0f6b1a49a 100644 --- a/include/linux/parport_pc.h +++ b/include/linux/parport_pc.h | |||
@@ -79,13 +79,13 @@ static __inline__ unsigned char parport_pc_read_data(struct parport *p) | |||
79 | } | 79 | } |
80 | 80 | ||
81 | #ifdef DEBUG_PARPORT | 81 | #ifdef DEBUG_PARPORT |
82 | extern __inline__ void dump_parport_state (char *str, struct parport *p) | 82 | static inline void dump_parport_state (char *str, struct parport *p) |
83 | { | 83 | { |
84 | /* here's hoping that reading these ports won't side-effect anything underneath */ | 84 | /* here's hoping that reading these ports won't side-effect anything underneath */ |
85 | unsigned char ecr = inb (ECONTROL (p)); | 85 | unsigned char ecr = inb (ECONTROL (p)); |
86 | unsigned char dcr = inb (CONTROL (p)); | 86 | unsigned char dcr = inb (CONTROL (p)); |
87 | unsigned char dsr = inb (STATUS (p)); | 87 | unsigned char dsr = inb (STATUS (p)); |
88 | static char *ecr_modes[] = {"SPP", "PS2", "PPFIFO", "ECP", "xXx", "yYy", "TST", "CFG"}; | 88 | static const char *const ecr_modes[] = {"SPP", "PS2", "PPFIFO", "ECP", "xXx", "yYy", "TST", "CFG"}; |
89 | const struct parport_pc_private *priv = p->physport->private_data; | 89 | const struct parport_pc_private *priv = p->physport->private_data; |
90 | int i; | 90 | int i; |
91 | 91 | ||
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 96a0403f61f6..a213e999de31 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h | |||
@@ -394,6 +394,13 @@ | |||
394 | #define PCI_DEVICE_ID_NS_87410 0xd001 | 394 | #define PCI_DEVICE_ID_NS_87410 0xd001 |
395 | #define PCI_DEVICE_ID_NS_CS5535_IDE 0x002d | 395 | #define PCI_DEVICE_ID_NS_CS5535_IDE 0x002d |
396 | 396 | ||
397 | #define PCI_DEVICE_ID_NS_CS5535_HOST_BRIDGE 0x0028 | ||
398 | #define PCI_DEVICE_ID_NS_CS5535_ISA_BRIDGE 0x002b | ||
399 | #define PCI_DEVICE_ID_NS_CS5535_IDE 0x002d | ||
400 | #define PCI_DEVICE_ID_NS_CS5535_AUDIO 0x002e | ||
401 | #define PCI_DEVICE_ID_NS_CS5535_USB 0x002f | ||
402 | #define PCI_DEVICE_ID_NS_CS5535_VIDEO 0x0030 | ||
403 | |||
397 | #define PCI_VENDOR_ID_TSENG 0x100c | 404 | #define PCI_VENDOR_ID_TSENG 0x100c |
398 | #define PCI_DEVICE_ID_TSENG_W32P_2 0x3202 | 405 | #define PCI_DEVICE_ID_TSENG_W32P_2 0x3202 |
399 | #define PCI_DEVICE_ID_TSENG_W32P_b 0x3205 | 406 | #define PCI_DEVICE_ID_TSENG_W32P_b 0x3205 |
@@ -496,6 +503,9 @@ | |||
496 | 503 | ||
497 | #define PCI_DEVICE_ID_AMD_CS5536_IDE 0x209A | 504 | #define PCI_DEVICE_ID_AMD_CS5536_IDE 0x209A |
498 | 505 | ||
506 | #define PCI_DEVICE_ID_AMD_LX_VIDEO 0x2081 | ||
507 | #define PCI_DEVICE_ID_AMD_LX_AES 0x2082 | ||
508 | |||
499 | #define PCI_VENDOR_ID_TRIDENT 0x1023 | 509 | #define PCI_VENDOR_ID_TRIDENT 0x1023 |
500 | #define PCI_DEVICE_ID_TRIDENT_4DWAVE_DX 0x2000 | 510 | #define PCI_DEVICE_ID_TRIDENT_4DWAVE_DX 0x2000 |
501 | #define PCI_DEVICE_ID_TRIDENT_4DWAVE_NX 0x2001 | 511 | #define PCI_DEVICE_ID_TRIDENT_4DWAVE_NX 0x2001 |
diff --git a/include/linux/raid/md.h b/include/linux/raid/md.h index 13e7c4b62367..b6e0bcad84e1 100644 --- a/include/linux/raid/md.h +++ b/include/linux/raid/md.h | |||
@@ -71,8 +71,8 @@ | |||
71 | */ | 71 | */ |
72 | #define MD_PATCHLEVEL_VERSION 3 | 72 | #define MD_PATCHLEVEL_VERSION 3 |
73 | 73 | ||
74 | extern int register_md_personality (int p_num, mdk_personality_t *p); | 74 | extern int register_md_personality (struct mdk_personality *p); |
75 | extern int unregister_md_personality (int p_num); | 75 | extern int unregister_md_personality (struct mdk_personality *p); |
76 | extern mdk_thread_t * md_register_thread (void (*run) (mddev_t *mddev), | 76 | extern mdk_thread_t * md_register_thread (void (*run) (mddev_t *mddev), |
77 | mddev_t *mddev, const char *name); | 77 | mddev_t *mddev, const char *name); |
78 | extern void md_unregister_thread (mdk_thread_t *thread); | 78 | extern void md_unregister_thread (mdk_thread_t *thread); |
diff --git a/include/linux/raid/md_k.h b/include/linux/raid/md_k.h index 46629a275ba9..617b9506c760 100644 --- a/include/linux/raid/md_k.h +++ b/include/linux/raid/md_k.h | |||
@@ -18,62 +18,19 @@ | |||
18 | /* and dm-bio-list.h is not under include/linux because.... ??? */ | 18 | /* and dm-bio-list.h is not under include/linux because.... ??? */ |
19 | #include "../../../drivers/md/dm-bio-list.h" | 19 | #include "../../../drivers/md/dm-bio-list.h" |
20 | 20 | ||
21 | #define MD_RESERVED 0UL | ||
22 | #define LINEAR 1UL | ||
23 | #define RAID0 2UL | ||
24 | #define RAID1 3UL | ||
25 | #define RAID5 4UL | ||
26 | #define TRANSLUCENT 5UL | ||
27 | #define HSM 6UL | ||
28 | #define MULTIPATH 7UL | ||
29 | #define RAID6 8UL | ||
30 | #define RAID10 9UL | ||
31 | #define FAULTY 10UL | ||
32 | #define MAX_PERSONALITY 11UL | ||
33 | |||
34 | #define LEVEL_MULTIPATH (-4) | 21 | #define LEVEL_MULTIPATH (-4) |
35 | #define LEVEL_LINEAR (-1) | 22 | #define LEVEL_LINEAR (-1) |
36 | #define LEVEL_FAULTY (-5) | 23 | #define LEVEL_FAULTY (-5) |
37 | 24 | ||
25 | /* we need a value for 'no level specified' and 0 | ||
26 | * means 'raid0', so we need something else. This is | ||
27 | * for internal use only | ||
28 | */ | ||
29 | #define LEVEL_NONE (-1000000) | ||
30 | |||
38 | #define MaxSector (~(sector_t)0) | 31 | #define MaxSector (~(sector_t)0) |
39 | #define MD_THREAD_NAME_MAX 14 | 32 | #define MD_THREAD_NAME_MAX 14 |
40 | 33 | ||
41 | static inline int pers_to_level (int pers) | ||
42 | { | ||
43 | switch (pers) { | ||
44 | case FAULTY: return LEVEL_FAULTY; | ||
45 | case MULTIPATH: return LEVEL_MULTIPATH; | ||
46 | case HSM: return -3; | ||
47 | case TRANSLUCENT: return -2; | ||
48 | case LINEAR: return LEVEL_LINEAR; | ||
49 | case RAID0: return 0; | ||
50 | case RAID1: return 1; | ||
51 | case RAID5: return 5; | ||
52 | case RAID6: return 6; | ||
53 | case RAID10: return 10; | ||
54 | } | ||
55 | BUG(); | ||
56 | return MD_RESERVED; | ||
57 | } | ||
58 | |||
59 | static inline int level_to_pers (int level) | ||
60 | { | ||
61 | switch (level) { | ||
62 | case LEVEL_FAULTY: return FAULTY; | ||
63 | case LEVEL_MULTIPATH: return MULTIPATH; | ||
64 | case -3: return HSM; | ||
65 | case -2: return TRANSLUCENT; | ||
66 | case LEVEL_LINEAR: return LINEAR; | ||
67 | case 0: return RAID0; | ||
68 | case 1: return RAID1; | ||
69 | case 4: | ||
70 | case 5: return RAID5; | ||
71 | case 6: return RAID6; | ||
72 | case 10: return RAID10; | ||
73 | } | ||
74 | return MD_RESERVED; | ||
75 | } | ||
76 | |||
77 | typedef struct mddev_s mddev_t; | 34 | typedef struct mddev_s mddev_t; |
78 | typedef struct mdk_rdev_s mdk_rdev_t; | 35 | typedef struct mdk_rdev_s mdk_rdev_t; |
79 | 36 | ||
@@ -138,14 +95,16 @@ struct mdk_rdev_s | |||
138 | atomic_t read_errors; /* number of consecutive read errors that | 95 | atomic_t read_errors; /* number of consecutive read errors that |
139 | * we have tried to ignore. | 96 | * we have tried to ignore. |
140 | */ | 97 | */ |
98 | atomic_t corrected_errors; /* number of corrected read errors, | ||
99 | * for reporting to userspace and storing | ||
100 | * in superblock. | ||
101 | */ | ||
141 | }; | 102 | }; |
142 | 103 | ||
143 | typedef struct mdk_personality_s mdk_personality_t; | ||
144 | |||
145 | struct mddev_s | 104 | struct mddev_s |
146 | { | 105 | { |
147 | void *private; | 106 | void *private; |
148 | mdk_personality_t *pers; | 107 | struct mdk_personality *pers; |
149 | dev_t unit; | 108 | dev_t unit; |
150 | int md_minor; | 109 | int md_minor; |
151 | struct list_head disks; | 110 | struct list_head disks; |
@@ -164,6 +123,7 @@ struct mddev_s | |||
164 | int chunk_size; | 123 | int chunk_size; |
165 | time_t ctime, utime; | 124 | time_t ctime, utime; |
166 | int level, layout; | 125 | int level, layout; |
126 | char clevel[16]; | ||
167 | int raid_disks; | 127 | int raid_disks; |
168 | int max_disks; | 128 | int max_disks; |
169 | sector_t size; /* used size of component devices */ | 129 | sector_t size; /* used size of component devices */ |
@@ -183,6 +143,11 @@ struct mddev_s | |||
183 | sector_t resync_mismatches; /* count of sectors where | 143 | sector_t resync_mismatches; /* count of sectors where |
184 | * parity/replica mismatch found | 144 | * parity/replica mismatch found |
185 | */ | 145 | */ |
146 | /* if zero, use the system-wide default */ | ||
147 | int sync_speed_min; | ||
148 | int sync_speed_max; | ||
149 | |||
150 | int ok_start_degraded; | ||
186 | /* recovery/resync flags | 151 | /* recovery/resync flags |
187 | * NEEDED: we might need to start a resync/recover | 152 | * NEEDED: we might need to start a resync/recover |
188 | * RUNNING: a thread is running, or about to be started | 153 | * RUNNING: a thread is running, or about to be started |
@@ -265,9 +230,11 @@ static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sect | |||
265 | atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io); | 230 | atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io); |
266 | } | 231 | } |
267 | 232 | ||
268 | struct mdk_personality_s | 233 | struct mdk_personality |
269 | { | 234 | { |
270 | char *name; | 235 | char *name; |
236 | int level; | ||
237 | struct list_head list; | ||
271 | struct module *owner; | 238 | struct module *owner; |
272 | int (*make_request)(request_queue_t *q, struct bio *bio); | 239 | int (*make_request)(request_queue_t *q, struct bio *bio); |
273 | int (*run)(mddev_t *mddev); | 240 | int (*run)(mddev_t *mddev); |
@@ -305,8 +272,6 @@ static inline char * mdname (mddev_t * mddev) | |||
305 | return mddev->gendisk ? mddev->gendisk->disk_name : "mdX"; | 272 | return mddev->gendisk ? mddev->gendisk->disk_name : "mdX"; |
306 | } | 273 | } |
307 | 274 | ||
308 | extern mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr); | ||
309 | |||
310 | /* | 275 | /* |
311 | * iterates through some rdev ringlist. It's safe to remove the | 276 | * iterates through some rdev ringlist. It's safe to remove the |
312 | * current 'rdev'. Dont touch 'tmp' though. | 277 | * current 'rdev'. Dont touch 'tmp' though. |
@@ -366,5 +331,10 @@ do { \ | |||
366 | __wait_event_lock_irq(wq, condition, lock, cmd); \ | 331 | __wait_event_lock_irq(wq, condition, lock, cmd); \ |
367 | } while (0) | 332 | } while (0) |
368 | 333 | ||
334 | static inline void safe_put_page(struct page *p) | ||
335 | { | ||
336 | if (p) put_page(p); | ||
337 | } | ||
338 | |||
369 | #endif | 339 | #endif |
370 | 340 | ||
diff --git a/include/linux/raid/raid1.h b/include/linux/raid/raid1.h index 292b98f2b408..9d5494aaac0f 100644 --- a/include/linux/raid/raid1.h +++ b/include/linux/raid/raid1.h | |||
@@ -45,6 +45,8 @@ struct r1_private_data_s { | |||
45 | 45 | ||
46 | spinlock_t resync_lock; | 46 | spinlock_t resync_lock; |
47 | int nr_pending; | 47 | int nr_pending; |
48 | int nr_waiting; | ||
49 | int nr_queued; | ||
48 | int barrier; | 50 | int barrier; |
49 | sector_t next_resync; | 51 | sector_t next_resync; |
50 | int fullsync; /* set to 1 if a full sync is needed, | 52 | int fullsync; /* set to 1 if a full sync is needed, |
@@ -52,11 +54,12 @@ struct r1_private_data_s { | |||
52 | * Cleared when a sync completes. | 54 | * Cleared when a sync completes. |
53 | */ | 55 | */ |
54 | 56 | ||
55 | wait_queue_head_t wait_idle; | 57 | wait_queue_head_t wait_barrier; |
56 | wait_queue_head_t wait_resume; | ||
57 | 58 | ||
58 | struct pool_info *poolinfo; | 59 | struct pool_info *poolinfo; |
59 | 60 | ||
61 | struct page *tmppage; | ||
62 | |||
60 | mempool_t *r1bio_pool; | 63 | mempool_t *r1bio_pool; |
61 | mempool_t *r1buf_pool; | 64 | mempool_t *r1buf_pool; |
62 | }; | 65 | }; |
@@ -106,6 +109,13 @@ struct r1bio_s { | |||
106 | /* DO NOT PUT ANY NEW FIELDS HERE - bios array is contiguously alloced*/ | 109 | /* DO NOT PUT ANY NEW FIELDS HERE - bios array is contiguously alloced*/ |
107 | }; | 110 | }; |
108 | 111 | ||
112 | /* when we get a read error on a read-only array, we redirect to another | ||
113 | * device without failing the first device, or trying to over-write to | ||
114 | * correct the read error. To keep track of bad blocks on a per-bio | ||
115 | * level, we store IO_BLOCKED in the appropriate 'bios' pointer | ||
116 | */ | ||
117 | #define IO_BLOCKED ((struct bio*)1) | ||
118 | |||
109 | /* bits for r1bio.state */ | 119 | /* bits for r1bio.state */ |
110 | #define R1BIO_Uptodate 0 | 120 | #define R1BIO_Uptodate 0 |
111 | #define R1BIO_IsSync 1 | 121 | #define R1BIO_IsSync 1 |
diff --git a/include/linux/raid/raid10.h b/include/linux/raid/raid10.h index 60708789c8f9..b1103298a8c2 100644 --- a/include/linux/raid/raid10.h +++ b/include/linux/raid/raid10.h | |||
@@ -35,18 +35,26 @@ struct r10_private_data_s { | |||
35 | sector_t chunk_mask; | 35 | sector_t chunk_mask; |
36 | 36 | ||
37 | struct list_head retry_list; | 37 | struct list_head retry_list; |
38 | /* for use when syncing mirrors: */ | 38 | /* queue pending writes and submit them on unplug */ |
39 | struct bio_list pending_bio_list; | ||
40 | |||
39 | 41 | ||
40 | spinlock_t resync_lock; | 42 | spinlock_t resync_lock; |
41 | int nr_pending; | 43 | int nr_pending; |
44 | int nr_waiting; | ||
45 | int nr_queued; | ||
42 | int barrier; | 46 | int barrier; |
43 | sector_t next_resync; | 47 | sector_t next_resync; |
48 | int fullsync; /* set to 1 if a full sync is needed, | ||
49 | * (fresh device added). | ||
50 | * Cleared when a sync completes. | ||
51 | */ | ||
44 | 52 | ||
45 | wait_queue_head_t wait_idle; | 53 | wait_queue_head_t wait_barrier; |
46 | wait_queue_head_t wait_resume; | ||
47 | 54 | ||
48 | mempool_t *r10bio_pool; | 55 | mempool_t *r10bio_pool; |
49 | mempool_t *r10buf_pool; | 56 | mempool_t *r10buf_pool; |
57 | struct page *tmppage; | ||
50 | }; | 58 | }; |
51 | 59 | ||
52 | typedef struct r10_private_data_s conf_t; | 60 | typedef struct r10_private_data_s conf_t; |
@@ -96,8 +104,16 @@ struct r10bio_s { | |||
96 | } devs[0]; | 104 | } devs[0]; |
97 | }; | 105 | }; |
98 | 106 | ||
107 | /* when we get a read error on a read-only array, we redirect to another | ||
108 | * device without failing the first device, or trying to over-write to | ||
109 | * correct the read error. To keep track of bad blocks on a per-bio | ||
110 | * level, we store IO_BLOCKED in the appropriate 'bios' pointer | ||
111 | */ | ||
112 | #define IO_BLOCKED ((struct bio*)1) | ||
113 | |||
99 | /* bits for r10bio.state */ | 114 | /* bits for r10bio.state */ |
100 | #define R10BIO_Uptodate 0 | 115 | #define R10BIO_Uptodate 0 |
101 | #define R10BIO_IsSync 1 | 116 | #define R10BIO_IsSync 1 |
102 | #define R10BIO_IsRecover 2 | 117 | #define R10BIO_IsRecover 2 |
118 | #define R10BIO_Degraded 3 | ||
103 | #endif | 119 | #endif |
diff --git a/include/linux/raid/raid5.h b/include/linux/raid/raid5.h index f025ba6fb14c..394da8207b34 100644 --- a/include/linux/raid/raid5.h +++ b/include/linux/raid/raid5.h | |||
@@ -126,7 +126,7 @@ | |||
126 | */ | 126 | */ |
127 | 127 | ||
128 | struct stripe_head { | 128 | struct stripe_head { |
129 | struct stripe_head *hash_next, **hash_pprev; /* hash pointers */ | 129 | struct hlist_node hash; |
130 | struct list_head lru; /* inactive_list or handle_list */ | 130 | struct list_head lru; /* inactive_list or handle_list */ |
131 | struct raid5_private_data *raid_conf; | 131 | struct raid5_private_data *raid_conf; |
132 | sector_t sector; /* sector of this row */ | 132 | sector_t sector; /* sector of this row */ |
@@ -152,7 +152,6 @@ struct stripe_head { | |||
152 | #define R5_Insync 3 /* rdev && rdev->in_sync at start */ | 152 | #define R5_Insync 3 /* rdev && rdev->in_sync at start */ |
153 | #define R5_Wantread 4 /* want to schedule a read */ | 153 | #define R5_Wantread 4 /* want to schedule a read */ |
154 | #define R5_Wantwrite 5 | 154 | #define R5_Wantwrite 5 |
155 | #define R5_Syncio 6 /* this io need to be accounted as resync io */ | ||
156 | #define R5_Overlap 7 /* There is a pending overlapping request on this block */ | 155 | #define R5_Overlap 7 /* There is a pending overlapping request on this block */ |
157 | #define R5_ReadError 8 /* seen a read error here recently */ | 156 | #define R5_ReadError 8 /* seen a read error here recently */ |
158 | #define R5_ReWrite 9 /* have tried to over-write the readerror */ | 157 | #define R5_ReWrite 9 /* have tried to over-write the readerror */ |
@@ -205,7 +204,7 @@ struct disk_info { | |||
205 | }; | 204 | }; |
206 | 205 | ||
207 | struct raid5_private_data { | 206 | struct raid5_private_data { |
208 | struct stripe_head **stripe_hashtbl; | 207 | struct hlist_head *stripe_hashtbl; |
209 | mddev_t *mddev; | 208 | mddev_t *mddev; |
210 | struct disk_info *spare; | 209 | struct disk_info *spare; |
211 | int chunk_size, level, algorithm; | 210 | int chunk_size, level, algorithm; |
@@ -228,6 +227,8 @@ struct raid5_private_data { | |||
228 | * Cleared when a sync completes. | 227 | * Cleared when a sync completes. |
229 | */ | 228 | */ |
230 | 229 | ||
230 | struct page *spare_page; /* Used when checking P/Q in raid6 */ | ||
231 | |||
231 | /* | 232 | /* |
232 | * Free stripes pool | 233 | * Free stripes pool |
233 | */ | 234 | */ |
diff --git a/include/linux/ramfs.h b/include/linux/ramfs.h index e0a4faa9610c..953b6df5d037 100644 --- a/include/linux/ramfs.h +++ b/include/linux/ramfs.h | |||
@@ -5,6 +5,16 @@ struct inode *ramfs_get_inode(struct super_block *sb, int mode, dev_t dev); | |||
5 | struct super_block *ramfs_get_sb(struct file_system_type *fs_type, | 5 | struct super_block *ramfs_get_sb(struct file_system_type *fs_type, |
6 | int flags, const char *dev_name, void *data); | 6 | int flags, const char *dev_name, void *data); |
7 | 7 | ||
8 | #ifndef CONFIG_MMU | ||
9 | extern unsigned long ramfs_nommu_get_unmapped_area(struct file *file, | ||
10 | unsigned long addr, | ||
11 | unsigned long len, | ||
12 | unsigned long pgoff, | ||
13 | unsigned long flags); | ||
14 | |||
15 | extern int ramfs_nommu_mmap(struct file *file, struct vm_area_struct *vma); | ||
16 | #endif | ||
17 | |||
8 | extern struct file_operations ramfs_file_operations; | 18 | extern struct file_operations ramfs_file_operations; |
9 | extern struct vm_operations_struct generic_file_vm_ops; | 19 | extern struct vm_operations_struct generic_file_vm_ops; |
10 | 20 | ||
diff --git a/include/linux/rmap.h b/include/linux/rmap.h index 33261f1d2239..9d6fbeef2104 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h | |||
@@ -71,6 +71,7 @@ void __anon_vma_link(struct vm_area_struct *); | |||
71 | * rmap interfaces called when adding or removing pte of page | 71 | * rmap interfaces called when adding or removing pte of page |
72 | */ | 72 | */ |
73 | void page_add_anon_rmap(struct page *, struct vm_area_struct *, unsigned long); | 73 | void page_add_anon_rmap(struct page *, struct vm_area_struct *, unsigned long); |
74 | void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, unsigned long); | ||
74 | void page_add_file_rmap(struct page *); | 75 | void page_add_file_rmap(struct page *); |
75 | void page_remove_rmap(struct page *); | 76 | void page_remove_rmap(struct page *); |
76 | 77 | ||
diff --git a/include/linux/sched.h b/include/linux/sched.h index b0ad6f30679e..7da33619d5d0 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -254,25 +254,12 @@ extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long); | |||
254 | * The mm counters are not protected by its page_table_lock, | 254 | * The mm counters are not protected by its page_table_lock, |
255 | * so must be incremented atomically. | 255 | * so must be incremented atomically. |
256 | */ | 256 | */ |
257 | #ifdef ATOMIC64_INIT | 257 | #define set_mm_counter(mm, member, value) atomic_long_set(&(mm)->_##member, value) |
258 | #define set_mm_counter(mm, member, value) atomic64_set(&(mm)->_##member, value) | 258 | #define get_mm_counter(mm, member) ((unsigned long)atomic_long_read(&(mm)->_##member)) |
259 | #define get_mm_counter(mm, member) ((unsigned long)atomic64_read(&(mm)->_##member)) | 259 | #define add_mm_counter(mm, member, value) atomic_long_add(value, &(mm)->_##member) |
260 | #define add_mm_counter(mm, member, value) atomic64_add(value, &(mm)->_##member) | 260 | #define inc_mm_counter(mm, member) atomic_long_inc(&(mm)->_##member) |
261 | #define inc_mm_counter(mm, member) atomic64_inc(&(mm)->_##member) | 261 | #define dec_mm_counter(mm, member) atomic_long_dec(&(mm)->_##member) |
262 | #define dec_mm_counter(mm, member) atomic64_dec(&(mm)->_##member) | 262 | typedef atomic_long_t mm_counter_t; |
263 | typedef atomic64_t mm_counter_t; | ||
264 | #else /* !ATOMIC64_INIT */ | ||
265 | /* | ||
266 | * The counters wrap back to 0 at 2^32 * PAGE_SIZE, | ||
267 | * that is, at 16TB if using 4kB page size. | ||
268 | */ | ||
269 | #define set_mm_counter(mm, member, value) atomic_set(&(mm)->_##member, value) | ||
270 | #define get_mm_counter(mm, member) ((unsigned long)atomic_read(&(mm)->_##member)) | ||
271 | #define add_mm_counter(mm, member, value) atomic_add(value, &(mm)->_##member) | ||
272 | #define inc_mm_counter(mm, member) atomic_inc(&(mm)->_##member) | ||
273 | #define dec_mm_counter(mm, member) atomic_dec(&(mm)->_##member) | ||
274 | typedef atomic_t mm_counter_t; | ||
275 | #endif /* !ATOMIC64_INIT */ | ||
276 | 263 | ||
277 | #else /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */ | 264 | #else /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */ |
278 | /* | 265 | /* |
diff --git a/include/linux/suspend.h b/include/linux/suspend.h index a61c04f804b2..5dc94e777fab 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h | |||
@@ -14,11 +14,7 @@ | |||
14 | typedef struct pbe { | 14 | typedef struct pbe { |
15 | unsigned long address; /* address of the copy */ | 15 | unsigned long address; /* address of the copy */ |
16 | unsigned long orig_address; /* original address of page */ | 16 | unsigned long orig_address; /* original address of page */ |
17 | swp_entry_t swap_address; | 17 | struct pbe *next; |
18 | |||
19 | struct pbe *next; /* also used as scratch space at | ||
20 | * end of page (see link, diskpage) | ||
21 | */ | ||
22 | } suspend_pagedir_t; | 18 | } suspend_pagedir_t; |
23 | 19 | ||
24 | #define for_each_pbe(pbe, pblist) \ | 20 | #define for_each_pbe(pbe, pblist) \ |
@@ -77,6 +73,6 @@ unsigned long get_safe_page(gfp_t gfp_mask); | |||
77 | * XXX: We try to keep some more pages free so that I/O operations succeed | 73 | * XXX: We try to keep some more pages free so that I/O operations succeed |
78 | * without paging. Might this be more? | 74 | * without paging. Might this be more? |
79 | */ | 75 | */ |
80 | #define PAGES_FOR_IO 512 | 76 | #define PAGES_FOR_IO 1024 |
81 | 77 | ||
82 | #endif /* _LINUX_SWSUSP_H */ | 78 | #endif /* _LINUX_SWSUSP_H */ |
diff --git a/include/linux/swap.h b/include/linux/swap.h index 508668f840b6..556617bcf7ac 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h | |||
@@ -172,7 +172,6 @@ extern void swap_setup(void); | |||
172 | 172 | ||
173 | /* linux/mm/vmscan.c */ | 173 | /* linux/mm/vmscan.c */ |
174 | extern int try_to_free_pages(struct zone **, gfp_t); | 174 | extern int try_to_free_pages(struct zone **, gfp_t); |
175 | extern int zone_reclaim(struct zone *, gfp_t, unsigned int); | ||
176 | extern int shrink_all_memory(int); | 175 | extern int shrink_all_memory(int); |
177 | extern int vm_swappiness; | 176 | extern int vm_swappiness; |
178 | 177 | ||
@@ -210,6 +209,7 @@ extern unsigned int nr_swapfiles; | |||
210 | extern struct swap_info_struct swap_info[]; | 209 | extern struct swap_info_struct swap_info[]; |
211 | extern void si_swapinfo(struct sysinfo *); | 210 | extern void si_swapinfo(struct sysinfo *); |
212 | extern swp_entry_t get_swap_page(void); | 211 | extern swp_entry_t get_swap_page(void); |
212 | extern swp_entry_t get_swap_page_of_type(int type); | ||
213 | extern int swap_duplicate(swp_entry_t); | 213 | extern int swap_duplicate(swp_entry_t); |
214 | extern int valid_swaphandles(swp_entry_t, unsigned long *); | 214 | extern int valid_swaphandles(swp_entry_t, unsigned long *); |
215 | extern void swap_free(swp_entry_t); | 215 | extern void swap_free(swp_entry_t); |
diff --git a/include/linux/writeback.h b/include/linux/writeback.h index 343d883d69c5..64a36ba43b2f 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h | |||
@@ -60,12 +60,6 @@ struct writeback_control { | |||
60 | }; | 60 | }; |
61 | 61 | ||
62 | /* | 62 | /* |
63 | * ->writepage() return values (make these much larger than a pagesize, in | ||
64 | * case some fs is returning number-of-bytes-written from writepage) | ||
65 | */ | ||
66 | #define WRITEPAGE_ACTIVATE 0x80000 /* IO was not started: activate page */ | ||
67 | |||
68 | /* | ||
69 | * fs/fs-writeback.c | 63 | * fs/fs-writeback.c |
70 | */ | 64 | */ |
71 | void writeback_inodes(struct writeback_control *wbc); | 65 | void writeback_inodes(struct writeback_control *wbc); |