aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/Kbuild1
-rw-r--r--include/linux/ata.h15
-rw-r--r--include/linux/bio.h108
-rw-r--r--include/linux/blkdev.h151
-rw-r--r--include/linux/blktrace_api.h62
-rw-r--r--include/linux/compiler.h4
-rw-r--r--include/linux/completion.h41
-rw-r--r--include/linux/cpu.h1
-rw-r--r--include/linux/cpufreq.h7
-rw-r--r--include/linux/crypto.h35
-rw-r--r--include/linux/device-mapper.h18
-rw-r--r--include/linux/device.h14
-rw-r--r--include/linux/dlm.h5
-rw-r--r--include/linux/dlm_device.h2
-rw-r--r--include/linux/elevator.h9
-rw-r--r--include/linux/fd.h8
-rw-r--r--include/linux/fs.h9
-rw-r--r--include/linux/genhd.h363
-rw-r--r--include/linux/gfs2_ondisk.h6
-rw-r--r--include/linux/klist.h3
-rw-r--r--include/linux/libata.h66
-rw-r--r--include/linux/major.h2
-rw-r--r--include/linux/mtd/blktrans.h2
-rw-r--r--include/linux/notifier.h10
-rw-r--r--include/linux/proportions.h2
-rw-r--r--include/linux/rcuclassic.h37
-rw-r--r--include/linux/rculist.h14
-rw-r--r--include/linux/rcupdate.h20
-rw-r--r--include/linux/rcupreempt.h11
-rw-r--r--include/linux/sched.h9
-rw-r--r--include/linux/security.h54
-rw-r--r--include/linux/string_helpers.h16
-rw-r--r--include/linux/tick.h2
33 files changed, 682 insertions, 425 deletions
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
index f431e40725d..282a504bd1d 100644
--- a/include/linux/Kbuild
+++ b/include/linux/Kbuild
@@ -181,6 +181,7 @@ unifdef-y += audit.h
181unifdef-y += auto_fs.h 181unifdef-y += auto_fs.h
182unifdef-y += auxvec.h 182unifdef-y += auxvec.h
183unifdef-y += binfmts.h 183unifdef-y += binfmts.h
184unifdef-y += blktrace_api.h
184unifdef-y += capability.h 185unifdef-y += capability.h
185unifdef-y += capi.h 186unifdef-y += capi.h
186unifdef-y += cciss_ioctl.h 187unifdef-y += cciss_ioctl.h
diff --git a/include/linux/ata.h b/include/linux/ata.h
index 8a12d718c16..be00973d1a8 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -88,6 +88,7 @@ enum {
88 ATA_ID_DLF = 128, 88 ATA_ID_DLF = 128,
89 ATA_ID_CSFO = 129, 89 ATA_ID_CSFO = 129,
90 ATA_ID_CFA_POWER = 160, 90 ATA_ID_CFA_POWER = 160,
91 ATA_ID_ROT_SPEED = 217,
91 ATA_ID_PIO4 = (1 << 1), 92 ATA_ID_PIO4 = (1 << 1),
92 93
93 ATA_ID_SERNO_LEN = 20, 94 ATA_ID_SERNO_LEN = 20,
@@ -667,6 +668,15 @@ static inline int ata_id_has_dword_io(const u16 *id)
667 return 0; 668 return 0;
668} 669}
669 670
671static inline int ata_id_has_unload(const u16 *id)
672{
673 if (ata_id_major_version(id) >= 7 &&
674 (id[ATA_ID_CFSSE] & 0xC000) == 0x4000 &&
675 id[ATA_ID_CFSSE] & (1 << 13))
676 return 1;
677 return 0;
678}
679
670static inline int ata_id_current_chs_valid(const u16 *id) 680static inline int ata_id_current_chs_valid(const u16 *id)
671{ 681{
672 /* For ATA-1 devices, if the INITIALIZE DEVICE PARAMETERS command 682 /* For ATA-1 devices, if the INITIALIZE DEVICE PARAMETERS command
@@ -691,6 +701,11 @@ static inline int ata_id_is_cfa(const u16 *id)
691 return 0; 701 return 0;
692} 702}
693 703
704static inline int ata_id_is_ssd(const u16 *id)
705{
706 return id[ATA_ID_ROT_SPEED] == 0x01;
707}
708
694static inline int ata_drive_40wire(const u16 *dev_id) 709static inline int ata_drive_40wire(const u16 *dev_id)
695{ 710{
696 if (ata_id_is_sata(dev_id)) 711 if (ata_id_is_sata(dev_id))
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 0933a14e641..ff5b4cf9e2d 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -26,21 +26,8 @@
26 26
27#ifdef CONFIG_BLOCK 27#ifdef CONFIG_BLOCK
28 28
29/* Platforms may set this to teach the BIO layer about IOMMU hardware. */
30#include <asm/io.h> 29#include <asm/io.h>
31 30
32#if defined(BIO_VMERGE_MAX_SIZE) && defined(BIO_VMERGE_BOUNDARY)
33#define BIOVEC_VIRT_START_SIZE(x) (bvec_to_phys(x) & (BIO_VMERGE_BOUNDARY - 1))
34#define BIOVEC_VIRT_OVERSIZE(x) ((x) > BIO_VMERGE_MAX_SIZE)
35#else
36#define BIOVEC_VIRT_START_SIZE(x) 0
37#define BIOVEC_VIRT_OVERSIZE(x) 0
38#endif
39
40#ifndef BIO_VMERGE_BOUNDARY
41#define BIO_VMERGE_BOUNDARY 0
42#endif
43
44#define BIO_DEBUG 31#define BIO_DEBUG
45 32
46#ifdef BIO_DEBUG 33#ifdef BIO_DEBUG
@@ -88,25 +75,14 @@ struct bio {
88 /* Number of segments in this BIO after 75 /* Number of segments in this BIO after
89 * physical address coalescing is performed. 76 * physical address coalescing is performed.
90 */ 77 */
91 unsigned short bi_phys_segments; 78 unsigned int bi_phys_segments;
92
93 /* Number of segments after physical and DMA remapping
94 * hardware coalescing is performed.
95 */
96 unsigned short bi_hw_segments;
97 79
98 unsigned int bi_size; /* residual I/O count */ 80 unsigned int bi_size; /* residual I/O count */
99 81
100 /*
101 * To keep track of the max hw size, we account for the
102 * sizes of the first and last virtually mergeable segments
103 * in this bio
104 */
105 unsigned int bi_hw_front_size;
106 unsigned int bi_hw_back_size;
107
108 unsigned int bi_max_vecs; /* max bvl_vecs we can hold */ 82 unsigned int bi_max_vecs; /* max bvl_vecs we can hold */
109 83
84 unsigned int bi_comp_cpu; /* completion CPU */
85
110 struct bio_vec *bi_io_vec; /* the actual vec list */ 86 struct bio_vec *bi_io_vec; /* the actual vec list */
111 87
112 bio_end_io_t *bi_end_io; 88 bio_end_io_t *bi_end_io;
@@ -126,11 +102,14 @@ struct bio {
126#define BIO_UPTODATE 0 /* ok after I/O completion */ 102#define BIO_UPTODATE 0 /* ok after I/O completion */
127#define BIO_RW_BLOCK 1 /* RW_AHEAD set, and read/write would block */ 103#define BIO_RW_BLOCK 1 /* RW_AHEAD set, and read/write would block */
128#define BIO_EOF 2 /* out-out-bounds error */ 104#define BIO_EOF 2 /* out-out-bounds error */
129#define BIO_SEG_VALID 3 /* nr_hw_seg valid */ 105#define BIO_SEG_VALID 3 /* bi_phys_segments valid */
130#define BIO_CLONED 4 /* doesn't own data */ 106#define BIO_CLONED 4 /* doesn't own data */
131#define BIO_BOUNCED 5 /* bio is a bounce bio */ 107#define BIO_BOUNCED 5 /* bio is a bounce bio */
132#define BIO_USER_MAPPED 6 /* contains user pages */ 108#define BIO_USER_MAPPED 6 /* contains user pages */
133#define BIO_EOPNOTSUPP 7 /* not supported */ 109#define BIO_EOPNOTSUPP 7 /* not supported */
110#define BIO_CPU_AFFINE 8 /* complete bio on same CPU as submitted */
111#define BIO_NULL_MAPPED 9 /* contains invalid user pages */
112#define BIO_FS_INTEGRITY 10 /* fs owns integrity data, not block layer */
134#define bio_flagged(bio, flag) ((bio)->bi_flags & (1 << (flag))) 113#define bio_flagged(bio, flag) ((bio)->bi_flags & (1 << (flag)))
135 114
136/* 115/*
@@ -144,18 +123,31 @@ struct bio {
144/* 123/*
145 * bio bi_rw flags 124 * bio bi_rw flags
146 * 125 *
147 * bit 0 -- read (not set) or write (set) 126 * bit 0 -- data direction
127 * If not set, bio is a read from device. If set, it's a write to device.
148 * bit 1 -- rw-ahead when set 128 * bit 1 -- rw-ahead when set
149 * bit 2 -- barrier 129 * bit 2 -- barrier
130 * Insert a serialization point in the IO queue, forcing previously
131 * submitted IO to be completed before this oen is issued.
150 * bit 3 -- fail fast, don't want low level driver retries 132 * bit 3 -- fail fast, don't want low level driver retries
151 * bit 4 -- synchronous I/O hint: the block layer will unplug immediately 133 * bit 4 -- synchronous I/O hint: the block layer will unplug immediately
134 * Note that this does NOT indicate that the IO itself is sync, just
135 * that the block layer will not postpone issue of this IO by plugging.
136 * bit 5 -- metadata request
137 * Used for tracing to differentiate metadata and data IO. May also
138 * get some preferential treatment in the IO scheduler
139 * bit 6 -- discard sectors
140 * Informs the lower level device that this range of sectors is no longer
141 * used by the file system and may thus be freed by the device. Used
142 * for flash based storage.
152 */ 143 */
153#define BIO_RW 0 144#define BIO_RW 0 /* Must match RW in req flags (blkdev.h) */
154#define BIO_RW_AHEAD 1 145#define BIO_RW_AHEAD 1 /* Must match FAILFAST in req flags */
155#define BIO_RW_BARRIER 2 146#define BIO_RW_BARRIER 2
156#define BIO_RW_FAILFAST 3 147#define BIO_RW_FAILFAST 3
157#define BIO_RW_SYNC 4 148#define BIO_RW_SYNC 4
158#define BIO_RW_META 5 149#define BIO_RW_META 5
150#define BIO_RW_DISCARD 6
159 151
160/* 152/*
161 * upper 16 bits of bi_rw define the io priority of this bio 153 * upper 16 bits of bi_rw define the io priority of this bio
@@ -185,14 +177,15 @@ struct bio {
185#define bio_failfast(bio) ((bio)->bi_rw & (1 << BIO_RW_FAILFAST)) 177#define bio_failfast(bio) ((bio)->bi_rw & (1 << BIO_RW_FAILFAST))
186#define bio_rw_ahead(bio) ((bio)->bi_rw & (1 << BIO_RW_AHEAD)) 178#define bio_rw_ahead(bio) ((bio)->bi_rw & (1 << BIO_RW_AHEAD))
187#define bio_rw_meta(bio) ((bio)->bi_rw & (1 << BIO_RW_META)) 179#define bio_rw_meta(bio) ((bio)->bi_rw & (1 << BIO_RW_META))
188#define bio_empty_barrier(bio) (bio_barrier(bio) && !(bio)->bi_size) 180#define bio_discard(bio) ((bio)->bi_rw & (1 << BIO_RW_DISCARD))
181#define bio_empty_barrier(bio) (bio_barrier(bio) && !bio_has_data(bio) && !bio_discard(bio))
189 182
190static inline unsigned int bio_cur_sectors(struct bio *bio) 183static inline unsigned int bio_cur_sectors(struct bio *bio)
191{ 184{
192 if (bio->bi_vcnt) 185 if (bio->bi_vcnt)
193 return bio_iovec(bio)->bv_len >> 9; 186 return bio_iovec(bio)->bv_len >> 9;
194 187 else /* dataless requests such as discard */
195 return 0; 188 return bio->bi_size >> 9;
196} 189}
197 190
198static inline void *bio_data(struct bio *bio) 191static inline void *bio_data(struct bio *bio)
@@ -236,8 +229,6 @@ static inline void *bio_data(struct bio *bio)
236 ((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2))) 229 ((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2)))
237#endif 230#endif
238 231
239#define BIOVEC_VIRT_MERGEABLE(vec1, vec2) \
240 ((((bvec_to_phys((vec1)) + (vec1)->bv_len) | bvec_to_phys((vec2))) & (BIO_VMERGE_BOUNDARY - 1)) == 0)
241#define __BIO_SEG_BOUNDARY(addr1, addr2, mask) \ 232#define __BIO_SEG_BOUNDARY(addr1, addr2, mask) \
242 (((addr1) | (mask)) == (((addr2) - 1) | (mask))) 233 (((addr1) | (mask)) == (((addr2) - 1) | (mask)))
243#define BIOVEC_SEG_BOUNDARY(q, b1, b2) \ 234#define BIOVEC_SEG_BOUNDARY(q, b1, b2) \
@@ -319,15 +310,14 @@ struct bio_pair {
319 atomic_t cnt; 310 atomic_t cnt;
320 int error; 311 int error;
321}; 312};
322extern struct bio_pair *bio_split(struct bio *bi, mempool_t *pool, 313extern struct bio_pair *bio_split(struct bio *bi, int first_sectors);
323 int first_sectors);
324extern mempool_t *bio_split_pool;
325extern void bio_pair_release(struct bio_pair *dbio); 314extern void bio_pair_release(struct bio_pair *dbio);
326 315
327extern struct bio_set *bioset_create(int, int); 316extern struct bio_set *bioset_create(int, int);
328extern void bioset_free(struct bio_set *); 317extern void bioset_free(struct bio_set *);
329 318
330extern struct bio *bio_alloc(gfp_t, int); 319extern struct bio *bio_alloc(gfp_t, int);
320extern struct bio *bio_kmalloc(gfp_t, int);
331extern struct bio *bio_alloc_bioset(gfp_t, int, struct bio_set *); 321extern struct bio *bio_alloc_bioset(gfp_t, int, struct bio_set *);
332extern void bio_put(struct bio *); 322extern void bio_put(struct bio *);
333extern void bio_free(struct bio *, struct bio_set *); 323extern void bio_free(struct bio *, struct bio_set *);
@@ -335,7 +325,6 @@ extern void bio_free(struct bio *, struct bio_set *);
335extern void bio_endio(struct bio *, int); 325extern void bio_endio(struct bio *, int);
336struct request_queue; 326struct request_queue;
337extern int bio_phys_segments(struct request_queue *, struct bio *); 327extern int bio_phys_segments(struct request_queue *, struct bio *);
338extern int bio_hw_segments(struct request_queue *, struct bio *);
339 328
340extern void __bio_clone(struct bio *, struct bio *); 329extern void __bio_clone(struct bio *, struct bio *);
341extern struct bio *bio_clone(struct bio *, gfp_t); 330extern struct bio *bio_clone(struct bio *, gfp_t);
@@ -346,12 +335,14 @@ extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int);
346extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *, 335extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *,
347 unsigned int, unsigned int); 336 unsigned int, unsigned int);
348extern int bio_get_nr_vecs(struct block_device *); 337extern int bio_get_nr_vecs(struct block_device *);
338extern sector_t bio_sector_offset(struct bio *, unsigned short, unsigned int);
349extern struct bio *bio_map_user(struct request_queue *, struct block_device *, 339extern struct bio *bio_map_user(struct request_queue *, struct block_device *,
350 unsigned long, unsigned int, int); 340 unsigned long, unsigned int, int, gfp_t);
351struct sg_iovec; 341struct sg_iovec;
342struct rq_map_data;
352extern struct bio *bio_map_user_iov(struct request_queue *, 343extern struct bio *bio_map_user_iov(struct request_queue *,
353 struct block_device *, 344 struct block_device *,
354 struct sg_iovec *, int, int); 345 struct sg_iovec *, int, int, gfp_t);
355extern void bio_unmap_user(struct bio *); 346extern void bio_unmap_user(struct bio *);
356extern struct bio *bio_map_kern(struct request_queue *, void *, unsigned int, 347extern struct bio *bio_map_kern(struct request_queue *, void *, unsigned int,
357 gfp_t); 348 gfp_t);
@@ -359,15 +350,25 @@ extern struct bio *bio_copy_kern(struct request_queue *, void *, unsigned int,
359 gfp_t, int); 350 gfp_t, int);
360extern void bio_set_pages_dirty(struct bio *bio); 351extern void bio_set_pages_dirty(struct bio *bio);
361extern void bio_check_pages_dirty(struct bio *bio); 352extern void bio_check_pages_dirty(struct bio *bio);
362extern struct bio *bio_copy_user(struct request_queue *, unsigned long, unsigned int, int); 353extern struct bio *bio_copy_user(struct request_queue *, struct rq_map_data *,
363extern struct bio *bio_copy_user_iov(struct request_queue *, struct sg_iovec *, 354 unsigned long, unsigned int, int, gfp_t);
364 int, int); 355extern struct bio *bio_copy_user_iov(struct request_queue *,
356 struct rq_map_data *, struct sg_iovec *,
357 int, int, gfp_t);
365extern int bio_uncopy_user(struct bio *); 358extern int bio_uncopy_user(struct bio *);
366void zero_fill_bio(struct bio *bio); 359void zero_fill_bio(struct bio *bio);
367extern struct bio_vec *bvec_alloc_bs(gfp_t, int, unsigned long *, struct bio_set *); 360extern struct bio_vec *bvec_alloc_bs(gfp_t, int, unsigned long *, struct bio_set *);
368extern unsigned int bvec_nr_vecs(unsigned short idx); 361extern unsigned int bvec_nr_vecs(unsigned short idx);
369 362
370/* 363/*
364 * Allow queuer to specify a completion CPU for this bio
365 */
366static inline void bio_set_completion_cpu(struct bio *bio, unsigned int cpu)
367{
368 bio->bi_comp_cpu = cpu;
369}
370
371/*
371 * bio_set is used to allow other portions of the IO system to 372 * bio_set is used to allow other portions of the IO system to
372 * allocate their own private memory pools for bio and iovec structures. 373 * allocate their own private memory pools for bio and iovec structures.
373 * These memory pools in turn all allocate from the bio_slab 374 * These memory pools in turn all allocate from the bio_slab
@@ -445,6 +446,14 @@ static inline char *__bio_kmap_irq(struct bio *bio, unsigned short idx,
445 __bio_kmap_irq((bio), (bio)->bi_idx, (flags)) 446 __bio_kmap_irq((bio), (bio)->bi_idx, (flags))
446#define bio_kunmap_irq(buf,flags) __bio_kunmap_irq(buf, flags) 447#define bio_kunmap_irq(buf,flags) __bio_kunmap_irq(buf, flags)
447 448
449/*
450 * Check whether this bio carries any data or not. A NULL bio is allowed.
451 */
452static inline int bio_has_data(struct bio *bio)
453{
454 return bio && bio->bi_io_vec != NULL;
455}
456
448#if defined(CONFIG_BLK_DEV_INTEGRITY) 457#if defined(CONFIG_BLK_DEV_INTEGRITY)
449 458
450#define bip_vec_idx(bip, idx) (&(bip->bip_vec[(idx)])) 459#define bip_vec_idx(bip, idx) (&(bip->bip_vec[(idx)]))
@@ -458,14 +467,7 @@ static inline char *__bio_kmap_irq(struct bio *bio, unsigned short idx,
458#define bip_for_each_vec(bvl, bip, i) \ 467#define bip_for_each_vec(bvl, bip, i) \
459 __bip_for_each_vec(bvl, bip, i, (bip)->bip_idx) 468 __bip_for_each_vec(bvl, bip, i, (bip)->bip_idx)
460 469
461static inline int bio_integrity(struct bio *bio) 470#define bio_integrity(bio) (bio->bi_integrity != NULL)
462{
463#if defined(CONFIG_BLK_DEV_INTEGRITY)
464 return bio->bi_integrity != NULL;
465#else
466 return 0;
467#endif
468}
469 471
470extern struct bio_integrity_payload *bio_integrity_alloc_bioset(struct bio *, gfp_t, unsigned int, struct bio_set *); 472extern struct bio_integrity_payload *bio_integrity_alloc_bioset(struct bio *, gfp_t, unsigned int, struct bio_set *);
471extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int); 473extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int);
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 53ea933cf60..a92d9e4ea96 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -16,7 +16,9 @@
16#include <linux/bio.h> 16#include <linux/bio.h>
17#include <linux/module.h> 17#include <linux/module.h>
18#include <linux/stringify.h> 18#include <linux/stringify.h>
19#include <linux/gfp.h>
19#include <linux/bsg.h> 20#include <linux/bsg.h>
21#include <linux/smp.h>
20 22
21#include <asm/scatterlist.h> 23#include <asm/scatterlist.h>
22 24
@@ -54,7 +56,6 @@ enum rq_cmd_type_bits {
54 REQ_TYPE_PM_SUSPEND, /* suspend request */ 56 REQ_TYPE_PM_SUSPEND, /* suspend request */
55 REQ_TYPE_PM_RESUME, /* resume request */ 57 REQ_TYPE_PM_RESUME, /* resume request */
56 REQ_TYPE_PM_SHUTDOWN, /* shutdown request */ 58 REQ_TYPE_PM_SHUTDOWN, /* shutdown request */
57 REQ_TYPE_FLUSH, /* flush request */
58 REQ_TYPE_SPECIAL, /* driver defined type */ 59 REQ_TYPE_SPECIAL, /* driver defined type */
59 REQ_TYPE_LINUX_BLOCK, /* generic block layer message */ 60 REQ_TYPE_LINUX_BLOCK, /* generic block layer message */
60 /* 61 /*
@@ -76,19 +77,18 @@ enum rq_cmd_type_bits {
76 * 77 *
77 */ 78 */
78enum { 79enum {
79 /*
80 * just examples for now
81 */
82 REQ_LB_OP_EJECT = 0x40, /* eject request */ 80 REQ_LB_OP_EJECT = 0x40, /* eject request */
83 REQ_LB_OP_FLUSH = 0x41, /* flush device */ 81 REQ_LB_OP_FLUSH = 0x41, /* flush request */
82 REQ_LB_OP_DISCARD = 0x42, /* discard sectors */
84}; 83};
85 84
86/* 85/*
87 * request type modified bits. first three bits match BIO_RW* bits, important 86 * request type modified bits. first two bits match BIO_RW* bits, important
88 */ 87 */
89enum rq_flag_bits { 88enum rq_flag_bits {
90 __REQ_RW, /* not set, read. set, write */ 89 __REQ_RW, /* not set, read. set, write */
91 __REQ_FAILFAST, /* no low level driver retries */ 90 __REQ_FAILFAST, /* no low level driver retries */
91 __REQ_DISCARD, /* request to discard sectors */
92 __REQ_SORTED, /* elevator knows about this request */ 92 __REQ_SORTED, /* elevator knows about this request */
93 __REQ_SOFTBARRIER, /* may not be passed by ioscheduler */ 93 __REQ_SOFTBARRIER, /* may not be passed by ioscheduler */
94 __REQ_HARDBARRIER, /* may not be passed by drive either */ 94 __REQ_HARDBARRIER, /* may not be passed by drive either */
@@ -111,6 +111,7 @@ enum rq_flag_bits {
111}; 111};
112 112
113#define REQ_RW (1 << __REQ_RW) 113#define REQ_RW (1 << __REQ_RW)
114#define REQ_DISCARD (1 << __REQ_DISCARD)
114#define REQ_FAILFAST (1 << __REQ_FAILFAST) 115#define REQ_FAILFAST (1 << __REQ_FAILFAST)
115#define REQ_SORTED (1 << __REQ_SORTED) 116#define REQ_SORTED (1 << __REQ_SORTED)
116#define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER) 117#define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER)
@@ -140,12 +141,14 @@ enum rq_flag_bits {
140 */ 141 */
141struct request { 142struct request {
142 struct list_head queuelist; 143 struct list_head queuelist;
143 struct list_head donelist; 144 struct call_single_data csd;
145 int cpu;
144 146
145 struct request_queue *q; 147 struct request_queue *q;
146 148
147 unsigned int cmd_flags; 149 unsigned int cmd_flags;
148 enum rq_cmd_type_bits cmd_type; 150 enum rq_cmd_type_bits cmd_type;
151 unsigned long atomic_flags;
149 152
150 /* Maintain bio traversal state for part by part I/O submission. 153 /* Maintain bio traversal state for part by part I/O submission.
151 * hard_* are block layer internals, no driver should touch them! 154 * hard_* are block layer internals, no driver should touch them!
@@ -190,13 +193,6 @@ struct request {
190 */ 193 */
191 unsigned short nr_phys_segments; 194 unsigned short nr_phys_segments;
192 195
193 /* Number of scatter-gather addr+len pairs after
194 * physical and DMA remapping hardware coalescing is performed.
195 * This is the number of scatter-gather entries the driver
196 * will actually have to deal with after DMA mapping is done.
197 */
198 unsigned short nr_hw_segments;
199
200 unsigned short ioprio; 196 unsigned short ioprio;
201 197
202 void *special; 198 void *special;
@@ -220,6 +216,8 @@ struct request {
220 void *data; 216 void *data;
221 void *sense; 217 void *sense;
222 218
219 unsigned long deadline;
220 struct list_head timeout_list;
223 unsigned int timeout; 221 unsigned int timeout;
224 int retries; 222 int retries;
225 223
@@ -233,6 +231,11 @@ struct request {
233 struct request *next_rq; 231 struct request *next_rq;
234}; 232};
235 233
234static inline unsigned short req_get_ioprio(struct request *req)
235{
236 return req->ioprio;
237}
238
236/* 239/*
237 * State information carried for REQ_TYPE_PM_SUSPEND and REQ_TYPE_PM_RESUME 240 * State information carried for REQ_TYPE_PM_SUSPEND and REQ_TYPE_PM_RESUME
238 * requests. Some step values could eventually be made generic. 241 * requests. Some step values could eventually be made generic.
@@ -252,6 +255,7 @@ typedef void (request_fn_proc) (struct request_queue *q);
252typedef int (make_request_fn) (struct request_queue *q, struct bio *bio); 255typedef int (make_request_fn) (struct request_queue *q, struct bio *bio);
253typedef int (prep_rq_fn) (struct request_queue *, struct request *); 256typedef int (prep_rq_fn) (struct request_queue *, struct request *);
254typedef void (unplug_fn) (struct request_queue *); 257typedef void (unplug_fn) (struct request_queue *);
258typedef int (prepare_discard_fn) (struct request_queue *, struct request *);
255 259
256struct bio_vec; 260struct bio_vec;
257struct bvec_merge_data { 261struct bvec_merge_data {
@@ -265,6 +269,15 @@ typedef int (merge_bvec_fn) (struct request_queue *, struct bvec_merge_data *,
265typedef void (prepare_flush_fn) (struct request_queue *, struct request *); 269typedef void (prepare_flush_fn) (struct request_queue *, struct request *);
266typedef void (softirq_done_fn)(struct request *); 270typedef void (softirq_done_fn)(struct request *);
267typedef int (dma_drain_needed_fn)(struct request *); 271typedef int (dma_drain_needed_fn)(struct request *);
272typedef int (lld_busy_fn) (struct request_queue *q);
273
274enum blk_eh_timer_return {
275 BLK_EH_NOT_HANDLED,
276 BLK_EH_HANDLED,
277 BLK_EH_RESET_TIMER,
278};
279
280typedef enum blk_eh_timer_return (rq_timed_out_fn)(struct request *);
268 281
269enum blk_queue_state { 282enum blk_queue_state {
270 Queue_down, 283 Queue_down,
@@ -307,10 +320,13 @@ struct request_queue
307 make_request_fn *make_request_fn; 320 make_request_fn *make_request_fn;
308 prep_rq_fn *prep_rq_fn; 321 prep_rq_fn *prep_rq_fn;
309 unplug_fn *unplug_fn; 322 unplug_fn *unplug_fn;
323 prepare_discard_fn *prepare_discard_fn;
310 merge_bvec_fn *merge_bvec_fn; 324 merge_bvec_fn *merge_bvec_fn;
311 prepare_flush_fn *prepare_flush_fn; 325 prepare_flush_fn *prepare_flush_fn;
312 softirq_done_fn *softirq_done_fn; 326 softirq_done_fn *softirq_done_fn;
327 rq_timed_out_fn *rq_timed_out_fn;
313 dma_drain_needed_fn *dma_drain_needed; 328 dma_drain_needed_fn *dma_drain_needed;
329 lld_busy_fn *lld_busy_fn;
314 330
315 /* 331 /*
316 * Dispatch queue sorting 332 * Dispatch queue sorting
@@ -385,6 +401,10 @@ struct request_queue
385 unsigned int nr_sorted; 401 unsigned int nr_sorted;
386 unsigned int in_flight; 402 unsigned int in_flight;
387 403
404 unsigned int rq_timeout;
405 struct timer_list timeout;
406 struct list_head timeout_list;
407
388 /* 408 /*
389 * sg stuff 409 * sg stuff
390 */ 410 */
@@ -421,6 +441,10 @@ struct request_queue
421#define QUEUE_FLAG_ELVSWITCH 8 /* don't use elevator, just do FIFO */ 441#define QUEUE_FLAG_ELVSWITCH 8 /* don't use elevator, just do FIFO */
422#define QUEUE_FLAG_BIDI 9 /* queue supports bidi requests */ 442#define QUEUE_FLAG_BIDI 9 /* queue supports bidi requests */
423#define QUEUE_FLAG_NOMERGES 10 /* disable merge attempts */ 443#define QUEUE_FLAG_NOMERGES 10 /* disable merge attempts */
444#define QUEUE_FLAG_SAME_COMP 11 /* force complete on same CPU */
445#define QUEUE_FLAG_FAIL_IO 12 /* fake timeout */
446#define QUEUE_FLAG_STACKABLE 13 /* supports request stacking */
447#define QUEUE_FLAG_NONROT 14 /* non-rotational device (SSD) */
424 448
425static inline int queue_is_locked(struct request_queue *q) 449static inline int queue_is_locked(struct request_queue *q)
426{ 450{
@@ -526,7 +550,10 @@ enum {
526#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) 550#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
527#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) 551#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
528#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) 552#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
553#define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags)
529#define blk_queue_flushing(q) ((q)->ordseq) 554#define blk_queue_flushing(q) ((q)->ordseq)
555#define blk_queue_stackable(q) \
556 test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags)
530 557
531#define blk_fs_request(rq) ((rq)->cmd_type == REQ_TYPE_FS) 558#define blk_fs_request(rq) ((rq)->cmd_type == REQ_TYPE_FS)
532#define blk_pc_request(rq) ((rq)->cmd_type == REQ_TYPE_BLOCK_PC) 559#define blk_pc_request(rq) ((rq)->cmd_type == REQ_TYPE_BLOCK_PC)
@@ -536,16 +563,18 @@ enum {
536#define blk_noretry_request(rq) ((rq)->cmd_flags & REQ_FAILFAST) 563#define blk_noretry_request(rq) ((rq)->cmd_flags & REQ_FAILFAST)
537#define blk_rq_started(rq) ((rq)->cmd_flags & REQ_STARTED) 564#define blk_rq_started(rq) ((rq)->cmd_flags & REQ_STARTED)
538 565
539#define blk_account_rq(rq) (blk_rq_started(rq) && blk_fs_request(rq)) 566#define blk_account_rq(rq) (blk_rq_started(rq) && (blk_fs_request(rq) || blk_discard_rq(rq)))
540 567
541#define blk_pm_suspend_request(rq) ((rq)->cmd_type == REQ_TYPE_PM_SUSPEND) 568#define blk_pm_suspend_request(rq) ((rq)->cmd_type == REQ_TYPE_PM_SUSPEND)
542#define blk_pm_resume_request(rq) ((rq)->cmd_type == REQ_TYPE_PM_RESUME) 569#define blk_pm_resume_request(rq) ((rq)->cmd_type == REQ_TYPE_PM_RESUME)
543#define blk_pm_request(rq) \ 570#define blk_pm_request(rq) \
544 (blk_pm_suspend_request(rq) || blk_pm_resume_request(rq)) 571 (blk_pm_suspend_request(rq) || blk_pm_resume_request(rq))
545 572
573#define blk_rq_cpu_valid(rq) ((rq)->cpu != -1)
546#define blk_sorted_rq(rq) ((rq)->cmd_flags & REQ_SORTED) 574#define blk_sorted_rq(rq) ((rq)->cmd_flags & REQ_SORTED)
547#define blk_barrier_rq(rq) ((rq)->cmd_flags & REQ_HARDBARRIER) 575#define blk_barrier_rq(rq) ((rq)->cmd_flags & REQ_HARDBARRIER)
548#define blk_fua_rq(rq) ((rq)->cmd_flags & REQ_FUA) 576#define blk_fua_rq(rq) ((rq)->cmd_flags & REQ_FUA)
577#define blk_discard_rq(rq) ((rq)->cmd_flags & REQ_DISCARD)
549#define blk_bidi_rq(rq) ((rq)->next_rq != NULL) 578#define blk_bidi_rq(rq) ((rq)->next_rq != NULL)
550#define blk_empty_barrier(rq) (blk_barrier_rq(rq) && blk_fs_request(rq) && !(rq)->hard_nr_sectors) 579#define blk_empty_barrier(rq) (blk_barrier_rq(rq) && blk_fs_request(rq) && !(rq)->hard_nr_sectors)
551/* rq->queuelist of dequeued request must be list_empty() */ 580/* rq->queuelist of dequeued request must be list_empty() */
@@ -592,7 +621,8 @@ static inline void blk_clear_queue_full(struct request_queue *q, int rw)
592#define RQ_NOMERGE_FLAGS \ 621#define RQ_NOMERGE_FLAGS \
593 (REQ_NOMERGE | REQ_STARTED | REQ_HARDBARRIER | REQ_SOFTBARRIER) 622 (REQ_NOMERGE | REQ_STARTED | REQ_HARDBARRIER | REQ_SOFTBARRIER)
594#define rq_mergeable(rq) \ 623#define rq_mergeable(rq) \
595 (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && blk_fs_request((rq))) 624 (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && \
625 (blk_discard_rq(rq) || blk_fs_request((rq))))
596 626
597/* 627/*
598 * q->prep_rq_fn return values 628 * q->prep_rq_fn return values
@@ -637,6 +667,12 @@ static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio)
637} 667}
638#endif /* CONFIG_MMU */ 668#endif /* CONFIG_MMU */
639 669
670struct rq_map_data {
671 struct page **pages;
672 int page_order;
673 int nr_entries;
674};
675
640struct req_iterator { 676struct req_iterator {
641 int i; 677 int i;
642 struct bio *bio; 678 struct bio *bio;
@@ -664,6 +700,10 @@ extern void __blk_put_request(struct request_queue *, struct request *);
664extern struct request *blk_get_request(struct request_queue *, int, gfp_t); 700extern struct request *blk_get_request(struct request_queue *, int, gfp_t);
665extern void blk_insert_request(struct request_queue *, struct request *, int, void *); 701extern void blk_insert_request(struct request_queue *, struct request *, int, void *);
666extern void blk_requeue_request(struct request_queue *, struct request *); 702extern void blk_requeue_request(struct request_queue *, struct request *);
703extern int blk_rq_check_limits(struct request_queue *q, struct request *rq);
704extern int blk_lld_busy(struct request_queue *q);
705extern int blk_insert_cloned_request(struct request_queue *q,
706 struct request *rq);
667extern void blk_plug_device(struct request_queue *); 707extern void blk_plug_device(struct request_queue *);
668extern void blk_plug_device_unlocked(struct request_queue *); 708extern void blk_plug_device_unlocked(struct request_queue *);
669extern int blk_remove_plug(struct request_queue *); 709extern int blk_remove_plug(struct request_queue *);
@@ -705,11 +745,14 @@ extern void __blk_stop_queue(struct request_queue *q);
705extern void __blk_run_queue(struct request_queue *); 745extern void __blk_run_queue(struct request_queue *);
706extern void blk_run_queue(struct request_queue *); 746extern void blk_run_queue(struct request_queue *);
707extern void blk_start_queueing(struct request_queue *); 747extern void blk_start_queueing(struct request_queue *);
708extern int blk_rq_map_user(struct request_queue *, struct request *, void __user *, unsigned long); 748extern int blk_rq_map_user(struct request_queue *, struct request *,
749 struct rq_map_data *, void __user *, unsigned long,
750 gfp_t);
709extern int blk_rq_unmap_user(struct bio *); 751extern int blk_rq_unmap_user(struct bio *);
710extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t); 752extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t);
711extern int blk_rq_map_user_iov(struct request_queue *, struct request *, 753extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
712 struct sg_iovec *, int, unsigned int); 754 struct rq_map_data *, struct sg_iovec *, int,
755 unsigned int, gfp_t);
713extern int blk_execute_rq(struct request_queue *, struct gendisk *, 756extern int blk_execute_rq(struct request_queue *, struct gendisk *,
714 struct request *, int); 757 struct request *, int);
715extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *, 758extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
@@ -750,12 +793,15 @@ extern int __blk_end_request(struct request *rq, int error,
750extern int blk_end_bidi_request(struct request *rq, int error, 793extern int blk_end_bidi_request(struct request *rq, int error,
751 unsigned int nr_bytes, unsigned int bidi_bytes); 794 unsigned int nr_bytes, unsigned int bidi_bytes);
752extern void end_request(struct request *, int); 795extern void end_request(struct request *, int);
753extern void end_queued_request(struct request *, int);
754extern void end_dequeued_request(struct request *, int);
755extern int blk_end_request_callback(struct request *rq, int error, 796extern int blk_end_request_callback(struct request *rq, int error,
756 unsigned int nr_bytes, 797 unsigned int nr_bytes,
757 int (drv_callback)(struct request *)); 798 int (drv_callback)(struct request *));
758extern void blk_complete_request(struct request *); 799extern void blk_complete_request(struct request *);
800extern void __blk_complete_request(struct request *);
801extern void blk_abort_request(struct request *);
802extern void blk_abort_queue(struct request_queue *);
803extern void blk_update_request(struct request *rq, int error,
804 unsigned int nr_bytes);
759 805
760/* 806/*
761 * blk_end_request() takes bytes instead of sectors as a complete size. 807 * blk_end_request() takes bytes instead of sectors as a complete size.
@@ -790,12 +836,16 @@ extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int);
790extern int blk_queue_dma_drain(struct request_queue *q, 836extern int blk_queue_dma_drain(struct request_queue *q,
791 dma_drain_needed_fn *dma_drain_needed, 837 dma_drain_needed_fn *dma_drain_needed,
792 void *buf, unsigned int size); 838 void *buf, unsigned int size);
839extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn);
793extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); 840extern void blk_queue_segment_boundary(struct request_queue *, unsigned long);
794extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn); 841extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn);
795extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *); 842extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *);
796extern void blk_queue_dma_alignment(struct request_queue *, int); 843extern void blk_queue_dma_alignment(struct request_queue *, int);
797extern void blk_queue_update_dma_alignment(struct request_queue *, int); 844extern void blk_queue_update_dma_alignment(struct request_queue *, int);
798extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); 845extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *);
846extern void blk_queue_set_discard(struct request_queue *, prepare_discard_fn *);
847extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *);
848extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
799extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); 849extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
800extern int blk_queue_ordered(struct request_queue *, unsigned, prepare_flush_fn *); 850extern int blk_queue_ordered(struct request_queue *, unsigned, prepare_flush_fn *);
801extern int blk_do_ordered(struct request_queue *, struct request **); 851extern int blk_do_ordered(struct request_queue *, struct request **);
@@ -837,6 +887,16 @@ static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt,
837} 887}
838 888
839extern int blkdev_issue_flush(struct block_device *, sector_t *); 889extern int blkdev_issue_flush(struct block_device *, sector_t *);
890extern int blkdev_issue_discard(struct block_device *,
891 sector_t sector, sector_t nr_sects, gfp_t);
892
893static inline int sb_issue_discard(struct super_block *sb,
894 sector_t block, sector_t nr_blocks)
895{
896 block <<= (sb->s_blocksize_bits - 9);
897 nr_blocks <<= (sb->s_blocksize_bits - 9);
898 return blkdev_issue_discard(sb->s_bdev, block, nr_blocks, GFP_KERNEL);
899}
840 900
841/* 901/*
842* command filter functions 902* command filter functions
@@ -874,6 +934,13 @@ static inline int queue_dma_alignment(struct request_queue *q)
874 return q ? q->dma_alignment : 511; 934 return q ? q->dma_alignment : 511;
875} 935}
876 936
937static inline int blk_rq_aligned(struct request_queue *q, void *addr,
938 unsigned int len)
939{
940 unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask;
941 return !((unsigned long)addr & alignment) && !(len & alignment);
942}
943
877/* assumes size > 256 */ 944/* assumes size > 256 */
878static inline unsigned int blksize_bits(unsigned int size) 945static inline unsigned int blksize_bits(unsigned int size)
879{ 946{
@@ -900,7 +967,7 @@ static inline void put_dev_sector(Sector p)
900} 967}
901 968
902struct work_struct; 969struct work_struct;
903int kblockd_schedule_work(struct work_struct *work); 970int kblockd_schedule_work(struct request_queue *q, struct work_struct *work);
904void kblockd_flush_work(struct work_struct *work); 971void kblockd_flush_work(struct work_struct *work);
905 972
906#define MODULE_ALIAS_BLOCKDEV(major,minor) \ 973#define MODULE_ALIAS_BLOCKDEV(major,minor) \
@@ -945,49 +1012,19 @@ struct blk_integrity {
945 1012
946extern int blk_integrity_register(struct gendisk *, struct blk_integrity *); 1013extern int blk_integrity_register(struct gendisk *, struct blk_integrity *);
947extern void blk_integrity_unregister(struct gendisk *); 1014extern void blk_integrity_unregister(struct gendisk *);
948extern int blk_integrity_compare(struct block_device *, struct block_device *); 1015extern int blk_integrity_compare(struct gendisk *, struct gendisk *);
949extern int blk_rq_map_integrity_sg(struct request *, struct scatterlist *); 1016extern int blk_rq_map_integrity_sg(struct request *, struct scatterlist *);
950extern int blk_rq_count_integrity_sg(struct request *); 1017extern int blk_rq_count_integrity_sg(struct request *);
951 1018
952static inline unsigned short blk_integrity_tuple_size(struct blk_integrity *bi) 1019static inline
953{ 1020struct blk_integrity *bdev_get_integrity(struct block_device *bdev)
954 if (bi)
955 return bi->tuple_size;
956
957 return 0;
958}
959
960static inline struct blk_integrity *bdev_get_integrity(struct block_device *bdev)
961{ 1021{
962 return bdev->bd_disk->integrity; 1022 return bdev->bd_disk->integrity;
963} 1023}
964 1024
965static inline unsigned int bdev_get_tag_size(struct block_device *bdev) 1025static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
966{ 1026{
967 struct blk_integrity *bi = bdev_get_integrity(bdev); 1027 return disk->integrity;
968
969 if (bi)
970 return bi->tag_size;
971
972 return 0;
973}
974
975static inline int bdev_integrity_enabled(struct block_device *bdev, int rw)
976{
977 struct blk_integrity *bi = bdev_get_integrity(bdev);
978
979 if (bi == NULL)
980 return 0;
981
982 if (rw == READ && bi->verify_fn != NULL &&
983 (bi->flags & INTEGRITY_FLAG_READ))
984 return 1;
985
986 if (rw == WRITE && bi->generate_fn != NULL &&
987 (bi->flags & INTEGRITY_FLAG_WRITE))
988 return 1;
989
990 return 0;
991} 1028}
992 1029
993static inline int blk_integrity_rq(struct request *rq) 1030static inline int blk_integrity_rq(struct request *rq)
@@ -1004,7 +1041,7 @@ static inline int blk_integrity_rq(struct request *rq)
1004#define blk_rq_count_integrity_sg(a) (0) 1041#define blk_rq_count_integrity_sg(a) (0)
1005#define blk_rq_map_integrity_sg(a, b) (0) 1042#define blk_rq_map_integrity_sg(a, b) (0)
1006#define bdev_get_integrity(a) (0) 1043#define bdev_get_integrity(a) (0)
1007#define bdev_get_tag_size(a) (0) 1044#define blk_get_integrity(a) (0)
1008#define blk_integrity_compare(a, b) (0) 1045#define blk_integrity_compare(a, b) (0)
1009#define blk_integrity_register(a, b) (0) 1046#define blk_integrity_register(a, b) (0)
1010#define blk_integrity_unregister(a) do { } while (0); 1047#define blk_integrity_unregister(a) do { } while (0);
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
index d084b8d227a..3a31eb50616 100644
--- a/include/linux/blktrace_api.h
+++ b/include/linux/blktrace_api.h
@@ -1,8 +1,10 @@
1#ifndef BLKTRACE_H 1#ifndef BLKTRACE_H
2#define BLKTRACE_H 2#define BLKTRACE_H
3 3
4#ifdef __KERNEL__
4#include <linux/blkdev.h> 5#include <linux/blkdev.h>
5#include <linux/relay.h> 6#include <linux/relay.h>
7#endif
6 8
7/* 9/*
8 * Trace categories 10 * Trace categories
@@ -21,6 +23,7 @@ enum blktrace_cat {
21 BLK_TC_NOTIFY = 1 << 10, /* special message */ 23 BLK_TC_NOTIFY = 1 << 10, /* special message */
22 BLK_TC_AHEAD = 1 << 11, /* readahead */ 24 BLK_TC_AHEAD = 1 << 11, /* readahead */
23 BLK_TC_META = 1 << 12, /* metadata */ 25 BLK_TC_META = 1 << 12, /* metadata */
26 BLK_TC_DISCARD = 1 << 13, /* discard requests */
24 27
25 BLK_TC_END = 1 << 15, /* only 16-bits, reminder */ 28 BLK_TC_END = 1 << 15, /* only 16-bits, reminder */
26}; 29};
@@ -47,6 +50,7 @@ enum blktrace_act {
47 __BLK_TA_SPLIT, /* bio was split */ 50 __BLK_TA_SPLIT, /* bio was split */
48 __BLK_TA_BOUNCE, /* bio was bounced */ 51 __BLK_TA_BOUNCE, /* bio was bounced */
49 __BLK_TA_REMAP, /* bio was remapped */ 52 __BLK_TA_REMAP, /* bio was remapped */
53 __BLK_TA_ABORT, /* request aborted */
50}; 54};
51 55
52/* 56/*
@@ -77,6 +81,7 @@ enum blktrace_notify {
77#define BLK_TA_SPLIT (__BLK_TA_SPLIT) 81#define BLK_TA_SPLIT (__BLK_TA_SPLIT)
78#define BLK_TA_BOUNCE (__BLK_TA_BOUNCE) 82#define BLK_TA_BOUNCE (__BLK_TA_BOUNCE)
79#define BLK_TA_REMAP (__BLK_TA_REMAP | BLK_TC_ACT(BLK_TC_QUEUE)) 83#define BLK_TA_REMAP (__BLK_TA_REMAP | BLK_TC_ACT(BLK_TC_QUEUE))
84#define BLK_TA_ABORT (__BLK_TA_ABORT | BLK_TC_ACT(BLK_TC_QUEUE))
80 85
81#define BLK_TN_PROCESS (__BLK_TN_PROCESS | BLK_TC_ACT(BLK_TC_NOTIFY)) 86#define BLK_TN_PROCESS (__BLK_TN_PROCESS | BLK_TC_ACT(BLK_TC_NOTIFY))
82#define BLK_TN_TIMESTAMP (__BLK_TN_TIMESTAMP | BLK_TC_ACT(BLK_TC_NOTIFY)) 87#define BLK_TN_TIMESTAMP (__BLK_TN_TIMESTAMP | BLK_TC_ACT(BLK_TC_NOTIFY))
@@ -89,17 +94,17 @@ enum blktrace_notify {
89 * The trace itself 94 * The trace itself
90 */ 95 */
91struct blk_io_trace { 96struct blk_io_trace {
92 u32 magic; /* MAGIC << 8 | version */ 97 __u32 magic; /* MAGIC << 8 | version */
93 u32 sequence; /* event number */ 98 __u32 sequence; /* event number */
94 u64 time; /* in microseconds */ 99 __u64 time; /* in microseconds */
95 u64 sector; /* disk offset */ 100 __u64 sector; /* disk offset */
96 u32 bytes; /* transfer length */ 101 __u32 bytes; /* transfer length */
97 u32 action; /* what happened */ 102 __u32 action; /* what happened */
98 u32 pid; /* who did it */ 103 __u32 pid; /* who did it */
99 u32 device; /* device number */ 104 __u32 device; /* device number */
100 u32 cpu; /* on what cpu did it happen */ 105 __u32 cpu; /* on what cpu did it happen */
101 u16 error; /* completion error */ 106 __u16 error; /* completion error */
102 u16 pdu_len; /* length of data after this trace */ 107 __u16 pdu_len; /* length of data after this trace */
103}; 108};
104 109
105/* 110/*
@@ -117,6 +122,23 @@ enum {
117 Blktrace_stopped, 122 Blktrace_stopped,
118}; 123};
119 124
125#define BLKTRACE_BDEV_SIZE 32
126
127/*
128 * User setup structure passed with BLKTRACESTART
129 */
130struct blk_user_trace_setup {
131 char name[BLKTRACE_BDEV_SIZE]; /* output */
132 __u16 act_mask; /* input */
133 __u32 buf_size; /* input */
134 __u32 buf_nr; /* input */
135 __u64 start_lba;
136 __u64 end_lba;
137 __u32 pid;
138};
139
140#ifdef __KERNEL__
141#if defined(CONFIG_BLK_DEV_IO_TRACE)
120struct blk_trace { 142struct blk_trace {
121 int trace_state; 143 int trace_state;
122 struct rchan *rchan; 144 struct rchan *rchan;
@@ -133,21 +155,6 @@ struct blk_trace {
133 atomic_t dropped; 155 atomic_t dropped;
134}; 156};
135 157
136/*
137 * User setup structure passed with BLKTRACESTART
138 */
139struct blk_user_trace_setup {
140 char name[BDEVNAME_SIZE]; /* output */
141 u16 act_mask; /* input */
142 u32 buf_size; /* input */
143 u32 buf_nr; /* input */
144 u64 start_lba;
145 u64 end_lba;
146 u32 pid;
147};
148
149#ifdef __KERNEL__
150#if defined(CONFIG_BLK_DEV_IO_TRACE)
151extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *); 158extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
152extern void blk_trace_shutdown(struct request_queue *); 159extern void blk_trace_shutdown(struct request_queue *);
153extern void __blk_add_trace(struct blk_trace *, sector_t, int, int, u32, int, int, void *); 160extern void __blk_add_trace(struct blk_trace *, sector_t, int, int, u32, int, int, void *);
@@ -195,6 +202,9 @@ static inline void blk_add_trace_rq(struct request_queue *q, struct request *rq,
195 if (likely(!bt)) 202 if (likely(!bt))
196 return; 203 return;
197 204
205 if (blk_discard_rq(rq))
206 rw |= (1 << BIO_RW_DISCARD);
207
198 if (blk_pc_request(rq)) { 208 if (blk_pc_request(rq)) {
199 what |= BLK_TC_ACT(BLK_TC_PC); 209 what |= BLK_TC_ACT(BLK_TC_PC);
200 __blk_add_trace(bt, 0, rq->data_len, rw, what, rq->errors, sizeof(rq->cmd), rq->cmd); 210 __blk_add_trace(bt, 0, rq->data_len, rw, what, rq->errors, sizeof(rq->cmd), rq->cmd);
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index c8bd2daf95e..8322141ee48 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -190,7 +190,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
190 * ACCESS_ONCE() in different C statements. 190 * ACCESS_ONCE() in different C statements.
191 * 191 *
192 * This macro does absolutely -nothing- to prevent the CPU from reordering, 192 * This macro does absolutely -nothing- to prevent the CPU from reordering,
193 * merging, or refetching absolutely anything at any time. 193 * merging, or refetching absolutely anything at any time. Its main intended
194 * use is to mediate communication between process-level code and irq/NMI
195 * handlers, all running on the same CPU.
194 */ 196 */
195#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) 197#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
196 198
diff --git a/include/linux/completion.h b/include/linux/completion.h
index 02ef8835999..4a6b604ef7e 100644
--- a/include/linux/completion.h
+++ b/include/linux/completion.h
@@ -10,6 +10,18 @@
10 10
11#include <linux/wait.h> 11#include <linux/wait.h>
12 12
13/**
14 * struct completion - structure used to maintain state for a "completion"
15 *
16 * This is the opaque structure used to maintain the state for a "completion".
17 * Completions currently use a FIFO to queue threads that have to wait for
18 * the "completion" event.
19 *
20 * See also: complete(), wait_for_completion() (and friends _timeout,
21 * _interruptible, _interruptible_timeout, and _killable), init_completion(),
22 * and macros DECLARE_COMPLETION(), DECLARE_COMPLETION_ONSTACK(), and
23 * INIT_COMPLETION().
24 */
13struct completion { 25struct completion {
14 unsigned int done; 26 unsigned int done;
15 wait_queue_head_t wait; 27 wait_queue_head_t wait;
@@ -21,6 +33,14 @@ struct completion {
21#define COMPLETION_INITIALIZER_ONSTACK(work) \ 33#define COMPLETION_INITIALIZER_ONSTACK(work) \
22 ({ init_completion(&work); work; }) 34 ({ init_completion(&work); work; })
23 35
36/**
37 * DECLARE_COMPLETION: - declare and initialize a completion structure
38 * @work: identifier for the completion structure
39 *
40 * This macro declares and initializes a completion structure. Generally used
41 * for static declarations. You should use the _ONSTACK variant for automatic
42 * variables.
43 */
24#define DECLARE_COMPLETION(work) \ 44#define DECLARE_COMPLETION(work) \
25 struct completion work = COMPLETION_INITIALIZER(work) 45 struct completion work = COMPLETION_INITIALIZER(work)
26 46
@@ -29,6 +49,13 @@ struct completion {
29 * completions - so we use the _ONSTACK() variant for those that 49 * completions - so we use the _ONSTACK() variant for those that
30 * are on the kernel stack: 50 * are on the kernel stack:
31 */ 51 */
52/**
53 * DECLARE_COMPLETION_ONSTACK: - declare and initialize a completion structure
54 * @work: identifier for the completion structure
55 *
56 * This macro declares and initializes a completion structure on the kernel
57 * stack.
58 */
32#ifdef CONFIG_LOCKDEP 59#ifdef CONFIG_LOCKDEP
33# define DECLARE_COMPLETION_ONSTACK(work) \ 60# define DECLARE_COMPLETION_ONSTACK(work) \
34 struct completion work = COMPLETION_INITIALIZER_ONSTACK(work) 61 struct completion work = COMPLETION_INITIALIZER_ONSTACK(work)
@@ -36,6 +63,13 @@ struct completion {
36# define DECLARE_COMPLETION_ONSTACK(work) DECLARE_COMPLETION(work) 63# define DECLARE_COMPLETION_ONSTACK(work) DECLARE_COMPLETION(work)
37#endif 64#endif
38 65
66/**
67 * init_completion: - Initialize a dynamically allocated completion
68 * @x: completion structure that is to be initialized
69 *
70 * This inline function will initialize a dynamically created completion
71 * structure.
72 */
39static inline void init_completion(struct completion *x) 73static inline void init_completion(struct completion *x)
40{ 74{
41 x->done = 0; 75 x->done = 0;
@@ -55,6 +89,13 @@ extern bool completion_done(struct completion *x);
55extern void complete(struct completion *); 89extern void complete(struct completion *);
56extern void complete_all(struct completion *); 90extern void complete_all(struct completion *);
57 91
92/**
93 * INIT_COMPLETION: - reinitialize a completion structure
94 * @x: completion structure to be reinitialized
95 *
96 * This macro should be used to reinitialize a completion structure so it can
97 * be reused. This is especially important after complete_all() is used.
98 */
58#define INIT_COMPLETION(x) ((x).done = 0) 99#define INIT_COMPLETION(x) ((x).done = 0)
59 100
60 101
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index d7faf880849..c2747ac2ae4 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -69,6 +69,7 @@ static inline void unregister_cpu_notifier(struct notifier_block *nb)
69#endif 69#endif
70 70
71int cpu_up(unsigned int cpu); 71int cpu_up(unsigned int cpu);
72void notify_cpu_starting(unsigned int cpu);
72extern void cpu_hotplug_init(void); 73extern void cpu_hotplug_init(void);
73extern void cpu_maps_update_begin(void); 74extern void cpu_maps_update_begin(void);
74extern void cpu_maps_update_done(void); 75extern void cpu_maps_update_done(void);
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 6fd5668aa57..1ee608fd7b7 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -187,7 +187,8 @@ extern int __cpufreq_driver_target(struct cpufreq_policy *policy,
187 unsigned int relation); 187 unsigned int relation);
188 188
189 189
190extern int __cpufreq_driver_getavg(struct cpufreq_policy *policy); 190extern int __cpufreq_driver_getavg(struct cpufreq_policy *policy,
191 unsigned int cpu);
191 192
192int cpufreq_register_governor(struct cpufreq_governor *governor); 193int cpufreq_register_governor(struct cpufreq_governor *governor);
193void cpufreq_unregister_governor(struct cpufreq_governor *governor); 194void cpufreq_unregister_governor(struct cpufreq_governor *governor);
@@ -226,7 +227,9 @@ struct cpufreq_driver {
226 unsigned int (*get) (unsigned int cpu); 227 unsigned int (*get) (unsigned int cpu);
227 228
228 /* optional */ 229 /* optional */
229 unsigned int (*getavg) (unsigned int cpu); 230 unsigned int (*getavg) (struct cpufreq_policy *policy,
231 unsigned int cpu);
232
230 int (*exit) (struct cpufreq_policy *policy); 233 int (*exit) (struct cpufreq_policy *policy);
231 int (*suspend) (struct cpufreq_policy *policy, pm_message_t pmsg); 234 int (*suspend) (struct cpufreq_policy *policy, pm_message_t pmsg);
232 int (*resume) (struct cpufreq_policy *policy); 235 int (*resume) (struct cpufreq_policy *policy);
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index c43dc47fdf7..3d2317e4af2 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -38,6 +38,7 @@
38#define CRYPTO_ALG_TYPE_DIGEST 0x00000008 38#define CRYPTO_ALG_TYPE_DIGEST 0x00000008
39#define CRYPTO_ALG_TYPE_HASH 0x00000009 39#define CRYPTO_ALG_TYPE_HASH 0x00000009
40#define CRYPTO_ALG_TYPE_AHASH 0x0000000a 40#define CRYPTO_ALG_TYPE_AHASH 0x0000000a
41#define CRYPTO_ALG_TYPE_RNG 0x0000000c
41 42
42#define CRYPTO_ALG_TYPE_HASH_MASK 0x0000000e 43#define CRYPTO_ALG_TYPE_HASH_MASK 0x0000000e
43#define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000c 44#define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000c
@@ -61,6 +62,14 @@
61#define CRYPTO_ALG_GENIV 0x00000200 62#define CRYPTO_ALG_GENIV 0x00000200
62 63
63/* 64/*
65 * Set if the algorithm has passed automated run-time testing. Note that
66 * if there is no run-time testing for a given algorithm it is considered
67 * to have passed.
68 */
69
70#define CRYPTO_ALG_TESTED 0x00000400
71
72/*
64 * Transform masks and values (for crt_flags). 73 * Transform masks and values (for crt_flags).
65 */ 74 */
66#define CRYPTO_TFM_REQ_MASK 0x000fff00 75#define CRYPTO_TFM_REQ_MASK 0x000fff00
@@ -105,6 +114,7 @@ struct crypto_aead;
105struct crypto_blkcipher; 114struct crypto_blkcipher;
106struct crypto_hash; 115struct crypto_hash;
107struct crypto_ahash; 116struct crypto_ahash;
117struct crypto_rng;
108struct crypto_tfm; 118struct crypto_tfm;
109struct crypto_type; 119struct crypto_type;
110struct aead_givcrypt_request; 120struct aead_givcrypt_request;
@@ -290,6 +300,15 @@ struct compress_alg {
290 unsigned int slen, u8 *dst, unsigned int *dlen); 300 unsigned int slen, u8 *dst, unsigned int *dlen);
291}; 301};
292 302
303struct rng_alg {
304 int (*rng_make_random)(struct crypto_rng *tfm, u8 *rdata,
305 unsigned int dlen);
306 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
307
308 unsigned int seedsize;
309};
310
311
293#define cra_ablkcipher cra_u.ablkcipher 312#define cra_ablkcipher cra_u.ablkcipher
294#define cra_aead cra_u.aead 313#define cra_aead cra_u.aead
295#define cra_blkcipher cra_u.blkcipher 314#define cra_blkcipher cra_u.blkcipher
@@ -298,6 +317,7 @@ struct compress_alg {
298#define cra_hash cra_u.hash 317#define cra_hash cra_u.hash
299#define cra_ahash cra_u.ahash 318#define cra_ahash cra_u.ahash
300#define cra_compress cra_u.compress 319#define cra_compress cra_u.compress
320#define cra_rng cra_u.rng
301 321
302struct crypto_alg { 322struct crypto_alg {
303 struct list_head cra_list; 323 struct list_head cra_list;
@@ -325,6 +345,7 @@ struct crypto_alg {
325 struct hash_alg hash; 345 struct hash_alg hash;
326 struct ahash_alg ahash; 346 struct ahash_alg ahash;
327 struct compress_alg compress; 347 struct compress_alg compress;
348 struct rng_alg rng;
328 } cra_u; 349 } cra_u;
329 350
330 int (*cra_init)(struct crypto_tfm *tfm); 351 int (*cra_init)(struct crypto_tfm *tfm);
@@ -430,6 +451,12 @@ struct compress_tfm {
430 u8 *dst, unsigned int *dlen); 451 u8 *dst, unsigned int *dlen);
431}; 452};
432 453
454struct rng_tfm {
455 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
456 unsigned int dlen);
457 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
458};
459
433#define crt_ablkcipher crt_u.ablkcipher 460#define crt_ablkcipher crt_u.ablkcipher
434#define crt_aead crt_u.aead 461#define crt_aead crt_u.aead
435#define crt_blkcipher crt_u.blkcipher 462#define crt_blkcipher crt_u.blkcipher
@@ -437,6 +464,7 @@ struct compress_tfm {
437#define crt_hash crt_u.hash 464#define crt_hash crt_u.hash
438#define crt_ahash crt_u.ahash 465#define crt_ahash crt_u.ahash
439#define crt_compress crt_u.compress 466#define crt_compress crt_u.compress
467#define crt_rng crt_u.rng
440 468
441struct crypto_tfm { 469struct crypto_tfm {
442 470
@@ -450,6 +478,7 @@ struct crypto_tfm {
450 struct hash_tfm hash; 478 struct hash_tfm hash;
451 struct ahash_tfm ahash; 479 struct ahash_tfm ahash;
452 struct compress_tfm compress; 480 struct compress_tfm compress;
481 struct rng_tfm rng;
453 } crt_u; 482 } crt_u;
454 483
455 struct crypto_alg *__crt_alg; 484 struct crypto_alg *__crt_alg;
@@ -481,6 +510,10 @@ struct crypto_hash {
481 struct crypto_tfm base; 510 struct crypto_tfm base;
482}; 511};
483 512
513struct crypto_rng {
514 struct crypto_tfm base;
515};
516
484enum { 517enum {
485 CRYPTOA_UNSPEC, 518 CRYPTOA_UNSPEC,
486 CRYPTOA_ALG, 519 CRYPTOA_ALG,
@@ -515,6 +548,8 @@ struct crypto_tfm *crypto_alloc_tfm(const char *alg_name, u32 tfm_flags);
515struct crypto_tfm *crypto_alloc_base(const char *alg_name, u32 type, u32 mask); 548struct crypto_tfm *crypto_alloc_base(const char *alg_name, u32 type, u32 mask);
516void crypto_free_tfm(struct crypto_tfm *tfm); 549void crypto_free_tfm(struct crypto_tfm *tfm);
517 550
551int alg_test(const char *driver, const char *alg, u32 type, u32 mask);
552
518/* 553/*
519 * Transform helpers which query the underlying algorithm. 554 * Transform helpers which query the underlying algorithm.
520 */ 555 */
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index a90222e3297..08d783592b7 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -13,7 +13,6 @@
13 13
14struct dm_target; 14struct dm_target;
15struct dm_table; 15struct dm_table;
16struct dm_dev;
17struct mapped_device; 16struct mapped_device;
18struct bio_vec; 17struct bio_vec;
19 18
@@ -84,6 +83,12 @@ void dm_error(const char *message);
84 */ 83 */
85void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev); 84void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev);
86 85
86struct dm_dev {
87 struct block_device *bdev;
88 int mode;
89 char name[16];
90};
91
87/* 92/*
88 * Constructors should call these functions to ensure destination devices 93 * Constructors should call these functions to ensure destination devices
89 * are opened/closed correctly. 94 * are opened/closed correctly.
@@ -202,6 +207,7 @@ int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid);
202struct gendisk *dm_disk(struct mapped_device *md); 207struct gendisk *dm_disk(struct mapped_device *md);
203int dm_suspended(struct mapped_device *md); 208int dm_suspended(struct mapped_device *md);
204int dm_noflush_suspending(struct dm_target *ti); 209int dm_noflush_suspending(struct dm_target *ti);
210union map_info *dm_get_mapinfo(struct bio *bio);
205 211
206/* 212/*
207 * Geometry functions. 213 * Geometry functions.
@@ -232,6 +238,11 @@ int dm_table_add_target(struct dm_table *t, const char *type,
232int dm_table_complete(struct dm_table *t); 238int dm_table_complete(struct dm_table *t);
233 239
234/* 240/*
241 * Unplug all devices in a table.
242 */
243void dm_table_unplug_all(struct dm_table *t);
244
245/*
235 * Table reference counting. 246 * Table reference counting.
236 */ 247 */
237struct dm_table *dm_get_table(struct mapped_device *md); 248struct dm_table *dm_get_table(struct mapped_device *md);
@@ -256,6 +267,11 @@ void dm_table_event(struct dm_table *t);
256 */ 267 */
257int dm_swap_table(struct mapped_device *md, struct dm_table *t); 268int dm_swap_table(struct mapped_device *md, struct dm_table *t);
258 269
270/*
271 * A wrapper around vmalloc.
272 */
273void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size);
274
259/*----------------------------------------------------------------- 275/*-----------------------------------------------------------------
260 * Macros. 276 * Macros.
261 *---------------------------------------------------------------*/ 277 *---------------------------------------------------------------*/
diff --git a/include/linux/device.h b/include/linux/device.h
index 4d8372d135d..246937c9cbc 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -199,6 +199,11 @@ struct class {
199 struct class_private *p; 199 struct class_private *p;
200}; 200};
201 201
202struct class_dev_iter {
203 struct klist_iter ki;
204 const struct device_type *type;
205};
206
202extern struct kobject *sysfs_dev_block_kobj; 207extern struct kobject *sysfs_dev_block_kobj;
203extern struct kobject *sysfs_dev_char_kobj; 208extern struct kobject *sysfs_dev_char_kobj;
204extern int __must_check __class_register(struct class *class, 209extern int __must_check __class_register(struct class *class,
@@ -213,6 +218,13 @@ extern void class_unregister(struct class *class);
213 __class_register(class, &__key); \ 218 __class_register(class, &__key); \
214}) 219})
215 220
221extern void class_dev_iter_init(struct class_dev_iter *iter,
222 struct class *class,
223 struct device *start,
224 const struct device_type *type);
225extern struct device *class_dev_iter_next(struct class_dev_iter *iter);
226extern void class_dev_iter_exit(struct class_dev_iter *iter);
227
216extern int class_for_each_device(struct class *class, struct device *start, 228extern int class_for_each_device(struct class *class, struct device *start,
217 void *data, 229 void *data,
218 int (*fn)(struct device *dev, void *data)); 230 int (*fn)(struct device *dev, void *data));
@@ -396,7 +408,7 @@ struct device {
396 spinlock_t devres_lock; 408 spinlock_t devres_lock;
397 struct list_head devres_head; 409 struct list_head devres_head;
398 410
399 struct list_head node; 411 struct klist_node knode_class;
400 struct class *class; 412 struct class *class;
401 dev_t devt; /* dev_t, creates the sysfs "dev" */ 413 dev_t devt; /* dev_t, creates the sysfs "dev" */
402 struct attribute_group **groups; /* optional groups */ 414 struct attribute_group **groups; /* optional groups */
diff --git a/include/linux/dlm.h b/include/linux/dlm.h
index 203a025e30e..b9cd38603fd 100644
--- a/include/linux/dlm.h
+++ b/include/linux/dlm.h
@@ -2,7 +2,7 @@
2******************************************************************************* 2*******************************************************************************
3** 3**
4** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 4** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
5** Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved. 5** Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
6** 6**
7** This copyrighted material is made available to anyone wishing to use, 7** This copyrighted material is made available to anyone wishing to use,
8** modify, copy, or redistribute it subject to the terms and conditions 8** modify, copy, or redistribute it subject to the terms and conditions
@@ -65,9 +65,12 @@ struct dlm_lksb {
65 char * sb_lvbptr; 65 char * sb_lvbptr;
66}; 66};
67 67
68/* dlm_new_lockspace() flags */
69
68#define DLM_LSFL_NODIR 0x00000001 70#define DLM_LSFL_NODIR 0x00000001
69#define DLM_LSFL_TIMEWARN 0x00000002 71#define DLM_LSFL_TIMEWARN 0x00000002
70#define DLM_LSFL_FS 0x00000004 72#define DLM_LSFL_FS 0x00000004
73#define DLM_LSFL_NEWEXCL 0x00000008
71 74
72#ifdef __KERNEL__ 75#ifdef __KERNEL__
73 76
diff --git a/include/linux/dlm_device.h b/include/linux/dlm_device.h
index c6034508fed..3060783c419 100644
--- a/include/linux/dlm_device.h
+++ b/include/linux/dlm_device.h
@@ -26,7 +26,7 @@
26/* Version of the device interface */ 26/* Version of the device interface */
27#define DLM_DEVICE_VERSION_MAJOR 6 27#define DLM_DEVICE_VERSION_MAJOR 6
28#define DLM_DEVICE_VERSION_MINOR 0 28#define DLM_DEVICE_VERSION_MINOR 0
29#define DLM_DEVICE_VERSION_PATCH 0 29#define DLM_DEVICE_VERSION_PATCH 1
30 30
31/* struct passed to the lock write */ 31/* struct passed to the lock write */
32struct dlm_lock_params { 32struct dlm_lock_params {
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index 639624b55fb..92f6f634e3e 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -112,6 +112,7 @@ extern struct request *elv_latter_request(struct request_queue *, struct request
112extern int elv_register_queue(struct request_queue *q); 112extern int elv_register_queue(struct request_queue *q);
113extern void elv_unregister_queue(struct request_queue *q); 113extern void elv_unregister_queue(struct request_queue *q);
114extern int elv_may_queue(struct request_queue *, int); 114extern int elv_may_queue(struct request_queue *, int);
115extern void elv_abort_queue(struct request_queue *);
115extern void elv_completed_request(struct request_queue *, struct request *); 116extern void elv_completed_request(struct request_queue *, struct request *);
116extern int elv_set_request(struct request_queue *, struct request *, gfp_t); 117extern int elv_set_request(struct request_queue *, struct request *, gfp_t);
117extern void elv_put_request(struct request_queue *, struct request *); 118extern void elv_put_request(struct request_queue *, struct request *);
@@ -173,15 +174,15 @@ enum {
173#define rb_entry_rq(node) rb_entry((node), struct request, rb_node) 174#define rb_entry_rq(node) rb_entry((node), struct request, rb_node)
174 175
175/* 176/*
176 * Hack to reuse the donelist list_head as the fifo time holder while 177 * Hack to reuse the csd.list list_head as the fifo time holder while
177 * the request is in the io scheduler. Saves an unsigned long in rq. 178 * the request is in the io scheduler. Saves an unsigned long in rq.
178 */ 179 */
179#define rq_fifo_time(rq) ((unsigned long) (rq)->donelist.next) 180#define rq_fifo_time(rq) ((unsigned long) (rq)->csd.list.next)
180#define rq_set_fifo_time(rq,exp) ((rq)->donelist.next = (void *) (exp)) 181#define rq_set_fifo_time(rq,exp) ((rq)->csd.list.next = (void *) (exp))
181#define rq_entry_fifo(ptr) list_entry((ptr), struct request, queuelist) 182#define rq_entry_fifo(ptr) list_entry((ptr), struct request, queuelist)
182#define rq_fifo_clear(rq) do { \ 183#define rq_fifo_clear(rq) do { \
183 list_del_init(&(rq)->queuelist); \ 184 list_del_init(&(rq)->queuelist); \
184 INIT_LIST_HEAD(&(rq)->donelist); \ 185 INIT_LIST_HEAD(&(rq)->csd.list); \
185 } while (0) 186 } while (0)
186 187
187/* 188/*
diff --git a/include/linux/fd.h b/include/linux/fd.h
index b6bd41d2b46..f5d194af07a 100644
--- a/include/linux/fd.h
+++ b/include/linux/fd.h
@@ -15,10 +15,16 @@ struct floppy_struct {
15 sect, /* sectors per track */ 15 sect, /* sectors per track */
16 head, /* nr of heads */ 16 head, /* nr of heads */
17 track, /* nr of tracks */ 17 track, /* nr of tracks */
18 stretch; /* !=0 means double track steps */ 18 stretch; /* bit 0 !=0 means double track steps */
19 /* bit 1 != 0 means swap sides */
20 /* bits 2..9 give the first sector */
21 /* number (the LSB is flipped) */
19#define FD_STRETCH 1 22#define FD_STRETCH 1
20#define FD_SWAPSIDES 2 23#define FD_SWAPSIDES 2
21#define FD_ZEROBASED 4 24#define FD_ZEROBASED 4
25#define FD_SECTBASEMASK 0x3FC
26#define FD_MKSECTBASE(s) (((s) ^ 1) << 2)
27#define FD_SECTBASE(floppy) ((((floppy)->stretch & FD_SECTBASEMASK) >> 2) ^ 1)
22 28
23 unsigned char gap, /* gap1 size */ 29 unsigned char gap, /* gap1 size */
24 30
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 580b513668f..32477e8872d 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -86,7 +86,9 @@ extern int dir_notify_enable;
86#define READ_META (READ | (1 << BIO_RW_META)) 86#define READ_META (READ | (1 << BIO_RW_META))
87#define WRITE_SYNC (WRITE | (1 << BIO_RW_SYNC)) 87#define WRITE_SYNC (WRITE | (1 << BIO_RW_SYNC))
88#define SWRITE_SYNC (SWRITE | (1 << BIO_RW_SYNC)) 88#define SWRITE_SYNC (SWRITE | (1 << BIO_RW_SYNC))
89#define WRITE_BARRIER ((1 << BIO_RW) | (1 << BIO_RW_BARRIER)) 89#define WRITE_BARRIER (WRITE | (1 << BIO_RW_BARRIER))
90#define DISCARD_NOBARRIER (1 << BIO_RW_DISCARD)
91#define DISCARD_BARRIER ((1 << BIO_RW_DISCARD) | (1 << BIO_RW_BARRIER))
90 92
91#define SEL_IN 1 93#define SEL_IN 1
92#define SEL_OUT 2 94#define SEL_OUT 2
@@ -222,6 +224,7 @@ extern int dir_notify_enable;
222#define BLKTRACESTART _IO(0x12,116) 224#define BLKTRACESTART _IO(0x12,116)
223#define BLKTRACESTOP _IO(0x12,117) 225#define BLKTRACESTOP _IO(0x12,117)
224#define BLKTRACETEARDOWN _IO(0x12,118) 226#define BLKTRACETEARDOWN _IO(0x12,118)
227#define BLKDISCARD _IO(0x12,119)
225 228
226#define BMAP_IOCTL 1 /* obsolete - kept for compatibility */ 229#define BMAP_IOCTL 1 /* obsolete - kept for compatibility */
227#define FIBMAP _IO(0x00,1) /* bmap access */ 230#define FIBMAP _IO(0x00,1) /* bmap access */
@@ -1682,6 +1685,7 @@ extern void chrdev_show(struct seq_file *,off_t);
1682 1685
1683/* fs/block_dev.c */ 1686/* fs/block_dev.c */
1684#define BDEVNAME_SIZE 32 /* Largest string for a blockdev identifier */ 1687#define BDEVNAME_SIZE 32 /* Largest string for a blockdev identifier */
1688#define BDEVT_SIZE 10 /* Largest string for MAJ:MIN for blkdev */
1685 1689
1686#ifdef CONFIG_BLOCK 1690#ifdef CONFIG_BLOCK
1687#define BLKDEV_MAJOR_HASH_SIZE 255 1691#define BLKDEV_MAJOR_HASH_SIZE 255
@@ -1718,6 +1722,9 @@ extern int fs_may_remount_ro(struct super_block *);
1718 */ 1722 */
1719#define bio_data_dir(bio) ((bio)->bi_rw & 1) 1723#define bio_data_dir(bio) ((bio)->bi_rw & 1)
1720 1724
1725extern void check_disk_size_change(struct gendisk *disk,
1726 struct block_device *bdev);
1727extern int revalidate_disk(struct gendisk *);
1721extern int check_disk_change(struct block_device *); 1728extern int check_disk_change(struct block_device *);
1722extern int __invalidate_device(struct block_device *); 1729extern int __invalidate_device(struct block_device *);
1723extern int invalidate_partition(struct gendisk *, int); 1730extern int invalidate_partition(struct gendisk *, int);
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index be4f5e5bfe0..206cdf96c3a 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -11,12 +11,15 @@
11 11
12#include <linux/types.h> 12#include <linux/types.h>
13#include <linux/kdev_t.h> 13#include <linux/kdev_t.h>
14#include <linux/rcupdate.h>
14 15
15#ifdef CONFIG_BLOCK 16#ifdef CONFIG_BLOCK
16 17
17#define kobj_to_dev(k) container_of(k, struct device, kobj) 18#define kobj_to_dev(k) container_of((k), struct device, kobj)
18#define dev_to_disk(device) container_of(device, struct gendisk, dev) 19#define dev_to_disk(device) container_of((device), struct gendisk, part0.__dev)
19#define dev_to_part(device) container_of(device, struct hd_struct, dev) 20#define dev_to_part(device) container_of((device), struct hd_struct, __dev)
21#define disk_to_dev(disk) (&(disk)->part0.__dev)
22#define part_to_dev(part) (&((part)->__dev))
20 23
21extern struct device_type part_type; 24extern struct device_type part_type;
22extern struct kobject *block_depr; 25extern struct kobject *block_depr;
@@ -55,6 +58,9 @@ enum {
55 UNIXWARE_PARTITION = 0x63, /* Same as GNU_HURD and SCO Unix */ 58 UNIXWARE_PARTITION = 0x63, /* Same as GNU_HURD and SCO Unix */
56}; 59};
57 60
61#define DISK_MAX_PARTS 256
62#define DISK_NAME_LEN 32
63
58#include <linux/major.h> 64#include <linux/major.h>
59#include <linux/device.h> 65#include <linux/device.h>
60#include <linux/smp.h> 66#include <linux/smp.h>
@@ -87,7 +93,7 @@ struct disk_stats {
87struct hd_struct { 93struct hd_struct {
88 sector_t start_sect; 94 sector_t start_sect;
89 sector_t nr_sects; 95 sector_t nr_sects;
90 struct device dev; 96 struct device __dev;
91 struct kobject *holder_dir; 97 struct kobject *holder_dir;
92 int policy, partno; 98 int policy, partno;
93#ifdef CONFIG_FAIL_MAKE_REQUEST 99#ifdef CONFIG_FAIL_MAKE_REQUEST
@@ -100,6 +106,7 @@ struct hd_struct {
100#else 106#else
101 struct disk_stats dkstats; 107 struct disk_stats dkstats;
102#endif 108#endif
109 struct rcu_head rcu_head;
103}; 110};
104 111
105#define GENHD_FL_REMOVABLE 1 112#define GENHD_FL_REMOVABLE 1
@@ -108,100 +115,148 @@ struct hd_struct {
108#define GENHD_FL_CD 8 115#define GENHD_FL_CD 8
109#define GENHD_FL_UP 16 116#define GENHD_FL_UP 16
110#define GENHD_FL_SUPPRESS_PARTITION_INFO 32 117#define GENHD_FL_SUPPRESS_PARTITION_INFO 32
111#define GENHD_FL_FAIL 64 118#define GENHD_FL_EXT_DEVT 64 /* allow extended devt */
119
120#define BLK_SCSI_MAX_CMDS (256)
121#define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8))
122
123struct blk_scsi_cmd_filter {
124 unsigned long read_ok[BLK_SCSI_CMD_PER_LONG];
125 unsigned long write_ok[BLK_SCSI_CMD_PER_LONG];
126 struct kobject kobj;
127};
128
129struct disk_part_tbl {
130 struct rcu_head rcu_head;
131 int len;
132 struct hd_struct *part[];
133};
112 134
113struct gendisk { 135struct gendisk {
136 /* major, first_minor and minors are input parameters only,
137 * don't use directly. Use disk_devt() and disk_max_parts().
138 */
114 int major; /* major number of driver */ 139 int major; /* major number of driver */
115 int first_minor; 140 int first_minor;
116 int minors; /* maximum number of minors, =1 for 141 int minors; /* maximum number of minors, =1 for
117 * disks that can't be partitioned. */ 142 * disks that can't be partitioned. */
118 char disk_name[32]; /* name of major driver */ 143
119 struct hd_struct **part; /* [indexed by minor] */ 144 char disk_name[DISK_NAME_LEN]; /* name of major driver */
145
146 /* Array of pointers to partitions indexed by partno.
147 * Protected with matching bdev lock but stat and other
148 * non-critical accesses use RCU. Always access through
149 * helpers.
150 */
151 struct disk_part_tbl *part_tbl;
152 struct hd_struct part0;
153
120 struct block_device_operations *fops; 154 struct block_device_operations *fops;
121 struct request_queue *queue; 155 struct request_queue *queue;
122 void *private_data; 156 void *private_data;
123 sector_t capacity;
124 157
125 int flags; 158 int flags;
126 struct device *driverfs_dev; // FIXME: remove 159 struct device *driverfs_dev; // FIXME: remove
127 struct device dev;
128 struct kobject *holder_dir;
129 struct kobject *slave_dir; 160 struct kobject *slave_dir;
130 161
131 struct timer_rand_state *random; 162 struct timer_rand_state *random;
132 int policy;
133 163
134 atomic_t sync_io; /* RAID */ 164 atomic_t sync_io; /* RAID */
135 unsigned long stamp;
136 int in_flight;
137#ifdef CONFIG_SMP
138 struct disk_stats *dkstats;
139#else
140 struct disk_stats dkstats;
141#endif
142 struct work_struct async_notify; 165 struct work_struct async_notify;
143#ifdef CONFIG_BLK_DEV_INTEGRITY 166#ifdef CONFIG_BLK_DEV_INTEGRITY
144 struct blk_integrity *integrity; 167 struct blk_integrity *integrity;
145#endif 168#endif
169 int node_id;
146}; 170};
147 171
148/* 172static inline struct gendisk *part_to_disk(struct hd_struct *part)
149 * Macros to operate on percpu disk statistics:
150 *
151 * The __ variants should only be called in critical sections. The full
152 * variants disable/enable preemption.
153 */
154static inline struct hd_struct *get_part(struct gendisk *gendiskp,
155 sector_t sector)
156{ 173{
157 struct hd_struct *part; 174 if (likely(part)) {
158 int i; 175 if (part->partno)
159 for (i = 0; i < gendiskp->minors - 1; i++) { 176 return dev_to_disk(part_to_dev(part)->parent);
160 part = gendiskp->part[i]; 177 else
161 if (part && part->start_sect <= sector 178 return dev_to_disk(part_to_dev(part));
162 && sector < part->start_sect + part->nr_sects)
163 return part;
164 } 179 }
165 return NULL; 180 return NULL;
166} 181}
167 182
168#ifdef CONFIG_SMP 183static inline int disk_max_parts(struct gendisk *disk)
169#define __disk_stat_add(gendiskp, field, addnd) \ 184{
170 (per_cpu_ptr(gendiskp->dkstats, smp_processor_id())->field += addnd) 185 if (disk->flags & GENHD_FL_EXT_DEVT)
186 return DISK_MAX_PARTS;
187 return disk->minors;
188}
171 189
172#define disk_stat_read(gendiskp, field) \ 190static inline bool disk_partitionable(struct gendisk *disk)
173({ \ 191{
174 typeof(gendiskp->dkstats->field) res = 0; \ 192 return disk_max_parts(disk) > 1;
175 int i; \ 193}
176 for_each_possible_cpu(i) \
177 res += per_cpu_ptr(gendiskp->dkstats, i)->field; \
178 res; \
179})
180 194
181static inline void disk_stat_set_all(struct gendisk *gendiskp, int value) { 195static inline dev_t disk_devt(struct gendisk *disk)
182 int i; 196{
197 return disk_to_dev(disk)->devt;
198}
183 199
184 for_each_possible_cpu(i) 200static inline dev_t part_devt(struct hd_struct *part)
185 memset(per_cpu_ptr(gendiskp->dkstats, i), value, 201{
186 sizeof(struct disk_stats)); 202 return part_to_dev(part)->devt;
187} 203}
188 204
189#define __part_stat_add(part, field, addnd) \ 205extern struct hd_struct *disk_get_part(struct gendisk *disk, int partno);
190 (per_cpu_ptr(part->dkstats, smp_processor_id())->field += addnd)
191 206
192#define __all_stat_add(gendiskp, part, field, addnd, sector) \ 207static inline void disk_put_part(struct hd_struct *part)
193({ \ 208{
194 if (part) \ 209 if (likely(part))
195 __part_stat_add(part, field, addnd); \ 210 put_device(part_to_dev(part));
196 __disk_stat_add(gendiskp, field, addnd); \ 211}
197}) 212
213/*
214 * Smarter partition iterator without context limits.
215 */
216#define DISK_PITER_REVERSE (1 << 0) /* iterate in the reverse direction */
217#define DISK_PITER_INCL_EMPTY (1 << 1) /* include 0-sized parts */
218#define DISK_PITER_INCL_PART0 (1 << 2) /* include partition 0 */
219
220struct disk_part_iter {
221 struct gendisk *disk;
222 struct hd_struct *part;
223 int idx;
224 unsigned int flags;
225};
226
227extern void disk_part_iter_init(struct disk_part_iter *piter,
228 struct gendisk *disk, unsigned int flags);
229extern struct hd_struct *disk_part_iter_next(struct disk_part_iter *piter);
230extern void disk_part_iter_exit(struct disk_part_iter *piter);
231
232extern struct hd_struct *disk_map_sector_rcu(struct gendisk *disk,
233 sector_t sector);
234
235/*
236 * Macros to operate on percpu disk statistics:
237 *
238 * {disk|part|all}_stat_{add|sub|inc|dec}() modify the stat counters
239 * and should be called between disk_stat_lock() and
240 * disk_stat_unlock().
241 *
242 * part_stat_read() can be called at any time.
243 *
244 * part_stat_{add|set_all}() and {init|free}_part_stats are for
245 * internal use only.
246 */
247#ifdef CONFIG_SMP
248#define part_stat_lock() ({ rcu_read_lock(); get_cpu(); })
249#define part_stat_unlock() do { put_cpu(); rcu_read_unlock(); } while (0)
250
251#define __part_stat_add(cpu, part, field, addnd) \
252 (per_cpu_ptr((part)->dkstats, (cpu))->field += (addnd))
198 253
199#define part_stat_read(part, field) \ 254#define part_stat_read(part, field) \
200({ \ 255({ \
201 typeof(part->dkstats->field) res = 0; \ 256 typeof((part)->dkstats->field) res = 0; \
202 int i; \ 257 int i; \
203 for_each_possible_cpu(i) \ 258 for_each_possible_cpu(i) \
204 res += per_cpu_ptr(part->dkstats, i)->field; \ 259 res += per_cpu_ptr((part)->dkstats, i)->field; \
205 res; \ 260 res; \
206}) 261})
207 262
@@ -213,171 +268,107 @@ static inline void part_stat_set_all(struct hd_struct *part, int value)
213 memset(per_cpu_ptr(part->dkstats, i), value, 268 memset(per_cpu_ptr(part->dkstats, i), value,
214 sizeof(struct disk_stats)); 269 sizeof(struct disk_stats));
215} 270}
216
217#else /* !CONFIG_SMP */
218#define __disk_stat_add(gendiskp, field, addnd) \
219 (gendiskp->dkstats.field += addnd)
220#define disk_stat_read(gendiskp, field) (gendiskp->dkstats.field)
221 271
222static inline void disk_stat_set_all(struct gendisk *gendiskp, int value) 272static inline int init_part_stats(struct hd_struct *part)
223{ 273{
224 memset(&gendiskp->dkstats, value, sizeof (struct disk_stats)); 274 part->dkstats = alloc_percpu(struct disk_stats);
275 if (!part->dkstats)
276 return 0;
277 return 1;
225} 278}
226 279
227#define __part_stat_add(part, field, addnd) \ 280static inline void free_part_stats(struct hd_struct *part)
228 (part->dkstats.field += addnd)
229
230#define __all_stat_add(gendiskp, part, field, addnd, sector) \
231({ \
232 if (part) \
233 part->dkstats.field += addnd; \
234 __disk_stat_add(gendiskp, field, addnd); \
235})
236
237#define part_stat_read(part, field) (part->dkstats.field)
238
239static inline void part_stat_set_all(struct hd_struct *part, int value)
240{ 281{
241 memset(&part->dkstats, value, sizeof(struct disk_stats)); 282 free_percpu(part->dkstats);
242} 283}
243 284
244#endif /* CONFIG_SMP */ 285#else /* !CONFIG_SMP */
286#define part_stat_lock() ({ rcu_read_lock(); 0; })
287#define part_stat_unlock() rcu_read_unlock()
245 288
246#define disk_stat_add(gendiskp, field, addnd) \ 289#define __part_stat_add(cpu, part, field, addnd) \
247 do { \ 290 ((part)->dkstats.field += addnd)
248 preempt_disable(); \ 291
249 __disk_stat_add(gendiskp, field, addnd); \ 292#define part_stat_read(part, field) ((part)->dkstats.field)
250 preempt_enable(); \
251 } while (0)
252
253#define __disk_stat_dec(gendiskp, field) __disk_stat_add(gendiskp, field, -1)
254#define disk_stat_dec(gendiskp, field) disk_stat_add(gendiskp, field, -1)
255
256#define __disk_stat_inc(gendiskp, field) __disk_stat_add(gendiskp, field, 1)
257#define disk_stat_inc(gendiskp, field) disk_stat_add(gendiskp, field, 1)
258
259#define __disk_stat_sub(gendiskp, field, subnd) \
260 __disk_stat_add(gendiskp, field, -subnd)
261#define disk_stat_sub(gendiskp, field, subnd) \
262 disk_stat_add(gendiskp, field, -subnd)
263
264#define part_stat_add(gendiskp, field, addnd) \
265 do { \
266 preempt_disable(); \
267 __part_stat_add(gendiskp, field, addnd);\
268 preempt_enable(); \
269 } while (0)
270
271#define __part_stat_dec(gendiskp, field) __part_stat_add(gendiskp, field, -1)
272#define part_stat_dec(gendiskp, field) part_stat_add(gendiskp, field, -1)
273
274#define __part_stat_inc(gendiskp, field) __part_stat_add(gendiskp, field, 1)
275#define part_stat_inc(gendiskp, field) part_stat_add(gendiskp, field, 1)
276
277#define __part_stat_sub(gendiskp, field, subnd) \
278 __part_stat_add(gendiskp, field, -subnd)
279#define part_stat_sub(gendiskp, field, subnd) \
280 part_stat_add(gendiskp, field, -subnd)
281
282#define all_stat_add(gendiskp, part, field, addnd, sector) \
283 do { \
284 preempt_disable(); \
285 __all_stat_add(gendiskp, part, field, addnd, sector); \
286 preempt_enable(); \
287 } while (0)
288
289#define __all_stat_dec(gendiskp, field, sector) \
290 __all_stat_add(gendiskp, field, -1, sector)
291#define all_stat_dec(gendiskp, field, sector) \
292 all_stat_add(gendiskp, field, -1, sector)
293
294#define __all_stat_inc(gendiskp, part, field, sector) \
295 __all_stat_add(gendiskp, part, field, 1, sector)
296#define all_stat_inc(gendiskp, part, field, sector) \
297 all_stat_add(gendiskp, part, field, 1, sector)
298
299#define __all_stat_sub(gendiskp, part, field, subnd, sector) \
300 __all_stat_add(gendiskp, part, field, -subnd, sector)
301#define all_stat_sub(gendiskp, part, field, subnd, sector) \
302 all_stat_add(gendiskp, part, field, -subnd, sector)
303
304/* Inlines to alloc and free disk stats in struct gendisk */
305#ifdef CONFIG_SMP
306static inline int init_disk_stats(struct gendisk *disk)
307{
308 disk->dkstats = alloc_percpu(struct disk_stats);
309 if (!disk->dkstats)
310 return 0;
311 return 1;
312}
313 293
314static inline void free_disk_stats(struct gendisk *disk) 294static inline void part_stat_set_all(struct hd_struct *part, int value)
315{ 295{
316 free_percpu(disk->dkstats); 296 memset(&part->dkstats, value, sizeof(struct disk_stats));
317} 297}
318 298
319static inline int init_part_stats(struct hd_struct *part) 299static inline int init_part_stats(struct hd_struct *part)
320{ 300{
321 part->dkstats = alloc_percpu(struct disk_stats);
322 if (!part->dkstats)
323 return 0;
324 return 1; 301 return 1;
325} 302}
326 303
327static inline void free_part_stats(struct hd_struct *part) 304static inline void free_part_stats(struct hd_struct *part)
328{ 305{
329 free_percpu(part->dkstats);
330}
331
332#else /* CONFIG_SMP */
333static inline int init_disk_stats(struct gendisk *disk)
334{
335 return 1;
336} 306}
337 307
338static inline void free_disk_stats(struct gendisk *disk) 308#endif /* CONFIG_SMP */
339{
340}
341 309
342static inline int init_part_stats(struct hd_struct *part) 310#define part_stat_add(cpu, part, field, addnd) do { \
311 __part_stat_add((cpu), (part), field, addnd); \
312 if ((part)->partno) \
313 __part_stat_add((cpu), &part_to_disk((part))->part0, \
314 field, addnd); \
315} while (0)
316
317#define part_stat_dec(cpu, gendiskp, field) \
318 part_stat_add(cpu, gendiskp, field, -1)
319#define part_stat_inc(cpu, gendiskp, field) \
320 part_stat_add(cpu, gendiskp, field, 1)
321#define part_stat_sub(cpu, gendiskp, field, subnd) \
322 part_stat_add(cpu, gendiskp, field, -subnd)
323
324static inline void part_inc_in_flight(struct hd_struct *part)
343{ 325{
344 return 1; 326 part->in_flight++;
327 if (part->partno)
328 part_to_disk(part)->part0.in_flight++;
345} 329}
346 330
347static inline void free_part_stats(struct hd_struct *part) 331static inline void part_dec_in_flight(struct hd_struct *part)
348{ 332{
333 part->in_flight--;
334 if (part->partno)
335 part_to_disk(part)->part0.in_flight--;
349} 336}
350#endif /* CONFIG_SMP */
351 337
352/* drivers/block/ll_rw_blk.c */ 338/* drivers/block/ll_rw_blk.c */
353extern void disk_round_stats(struct gendisk *disk); 339extern void part_round_stats(int cpu, struct hd_struct *part);
354extern void part_round_stats(struct hd_struct *part);
355 340
356/* drivers/block/genhd.c */ 341/* drivers/block/genhd.c */
357extern int get_blkdev_list(char *, int); 342extern int get_blkdev_list(char *, int);
358extern void add_disk(struct gendisk *disk); 343extern void add_disk(struct gendisk *disk);
359extern void del_gendisk(struct gendisk *gp); 344extern void del_gendisk(struct gendisk *gp);
360extern void unlink_gendisk(struct gendisk *gp); 345extern void unlink_gendisk(struct gendisk *gp);
361extern struct gendisk *get_gendisk(dev_t dev, int *part); 346extern struct gendisk *get_gendisk(dev_t dev, int *partno);
347extern struct block_device *bdget_disk(struct gendisk *disk, int partno);
362 348
363extern void set_device_ro(struct block_device *bdev, int flag); 349extern void set_device_ro(struct block_device *bdev, int flag);
364extern void set_disk_ro(struct gendisk *disk, int flag); 350extern void set_disk_ro(struct gendisk *disk, int flag);
365 351
352static inline int get_disk_ro(struct gendisk *disk)
353{
354 return disk->part0.policy;
355}
356
366/* drivers/char/random.c */ 357/* drivers/char/random.c */
367extern void add_disk_randomness(struct gendisk *disk); 358extern void add_disk_randomness(struct gendisk *disk);
368extern void rand_initialize_disk(struct gendisk *disk); 359extern void rand_initialize_disk(struct gendisk *disk);
369 360
370static inline sector_t get_start_sect(struct block_device *bdev) 361static inline sector_t get_start_sect(struct block_device *bdev)
371{ 362{
372 return bdev->bd_contains == bdev ? 0 : bdev->bd_part->start_sect; 363 return bdev->bd_part->start_sect;
373} 364}
374static inline sector_t get_capacity(struct gendisk *disk) 365static inline sector_t get_capacity(struct gendisk *disk)
375{ 366{
376 return disk->capacity; 367 return disk->part0.nr_sects;
377} 368}
378static inline void set_capacity(struct gendisk *disk, sector_t size) 369static inline void set_capacity(struct gendisk *disk, sector_t size)
379{ 370{
380 disk->capacity = size; 371 disk->part0.nr_sects = size;
381} 372}
382 373
383#ifdef CONFIG_SOLARIS_X86_PARTITION 374#ifdef CONFIG_SOLARIS_X86_PARTITION
@@ -527,9 +518,12 @@ struct unixware_disklabel {
527#define ADDPART_FLAG_RAID 1 518#define ADDPART_FLAG_RAID 1
528#define ADDPART_FLAG_WHOLEDISK 2 519#define ADDPART_FLAG_WHOLEDISK 2
529 520
530extern dev_t blk_lookup_devt(const char *name, int part); 521extern int blk_alloc_devt(struct hd_struct *part, dev_t *devt);
531extern char *disk_name (struct gendisk *hd, int part, char *buf); 522extern void blk_free_devt(dev_t devt);
523extern dev_t blk_lookup_devt(const char *name, int partno);
524extern char *disk_name (struct gendisk *hd, int partno, char *buf);
532 525
526extern int disk_expand_part_tbl(struct gendisk *disk, int target);
533extern int rescan_partitions(struct gendisk *disk, struct block_device *bdev); 527extern int rescan_partitions(struct gendisk *disk, struct block_device *bdev);
534extern int __must_check add_partition(struct gendisk *, int, sector_t, sector_t, int); 528extern int __must_check add_partition(struct gendisk *, int, sector_t, sector_t, int);
535extern void delete_partition(struct gendisk *, int); 529extern void delete_partition(struct gendisk *, int);
@@ -546,16 +540,23 @@ extern void blk_register_region(dev_t devt, unsigned long range,
546 void *data); 540 void *data);
547extern void blk_unregister_region(dev_t devt, unsigned long range); 541extern void blk_unregister_region(dev_t devt, unsigned long range);
548 542
549static inline struct block_device *bdget_disk(struct gendisk *disk, int index) 543extern ssize_t part_size_show(struct device *dev,
550{ 544 struct device_attribute *attr, char *buf);
551 return bdget(MKDEV(disk->major, disk->first_minor) + index); 545extern ssize_t part_stat_show(struct device *dev,
552} 546 struct device_attribute *attr, char *buf);
547#ifdef CONFIG_FAIL_MAKE_REQUEST
548extern ssize_t part_fail_show(struct device *dev,
549 struct device_attribute *attr, char *buf);
550extern ssize_t part_fail_store(struct device *dev,
551 struct device_attribute *attr,
552 const char *buf, size_t count);
553#endif /* CONFIG_FAIL_MAKE_REQUEST */
553 554
554#else /* CONFIG_BLOCK */ 555#else /* CONFIG_BLOCK */
555 556
556static inline void printk_all_partitions(void) { } 557static inline void printk_all_partitions(void) { }
557 558
558static inline dev_t blk_lookup_devt(const char *name, int part) 559static inline dev_t blk_lookup_devt(const char *name, int partno)
559{ 560{
560 dev_t devt = MKDEV(0, 0); 561 dev_t devt = MKDEV(0, 0);
561 return devt; 562 return devt;
diff --git a/include/linux/gfs2_ondisk.h b/include/linux/gfs2_ondisk.h
index c3c19f926e6..14d0df0b574 100644
--- a/include/linux/gfs2_ondisk.h
+++ b/include/linux/gfs2_ondisk.h
@@ -118,7 +118,11 @@ struct gfs2_sb {
118 118
119 char sb_lockproto[GFS2_LOCKNAME_LEN]; 119 char sb_lockproto[GFS2_LOCKNAME_LEN];
120 char sb_locktable[GFS2_LOCKNAME_LEN]; 120 char sb_locktable[GFS2_LOCKNAME_LEN];
121 /* In gfs1, quota and license dinodes followed */ 121
122 struct gfs2_inum __pad3; /* Was quota inode in gfs1 */
123 struct gfs2_inum __pad4; /* Was licence inode in gfs1 */
124#define GFS2_HAS_UUID 1
125 __u8 sb_uuid[16]; /* The UUID, maybe 0 for backwards compat */
122}; 126};
123 127
124/* 128/*
diff --git a/include/linux/klist.h b/include/linux/klist.h
index 06c338ef7f1..8ea98db223e 100644
--- a/include/linux/klist.h
+++ b/include/linux/klist.h
@@ -38,7 +38,7 @@ extern void klist_init(struct klist *k, void (*get)(struct klist_node *),
38 void (*put)(struct klist_node *)); 38 void (*put)(struct klist_node *));
39 39
40struct klist_node { 40struct klist_node {
41 struct klist *n_klist; 41 void *n_klist; /* never access directly */
42 struct list_head n_node; 42 struct list_head n_node;
43 struct kref n_ref; 43 struct kref n_ref;
44 struct completion n_removed; 44 struct completion n_removed;
@@ -57,7 +57,6 @@ extern int klist_node_attached(struct klist_node *n);
57 57
58struct klist_iter { 58struct klist_iter {
59 struct klist *i_klist; 59 struct klist *i_klist;
60 struct list_head *i_head;
61 struct klist_node *i_cur; 60 struct klist_node *i_cur;
62}; 61};
63 62
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 225bfc5bd9e..947cf84e555 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -146,6 +146,7 @@ enum {
146 ATA_DFLAG_SPUNDOWN = (1 << 14), /* XXX: for spindown_compat */ 146 ATA_DFLAG_SPUNDOWN = (1 << 14), /* XXX: for spindown_compat */
147 ATA_DFLAG_SLEEPING = (1 << 15), /* device is sleeping */ 147 ATA_DFLAG_SLEEPING = (1 << 15), /* device is sleeping */
148 ATA_DFLAG_DUBIOUS_XFER = (1 << 16), /* data transfer not verified */ 148 ATA_DFLAG_DUBIOUS_XFER = (1 << 16), /* data transfer not verified */
149 ATA_DFLAG_NO_UNLOAD = (1 << 17), /* device doesn't support unload */
149 ATA_DFLAG_INIT_MASK = (1 << 24) - 1, 150 ATA_DFLAG_INIT_MASK = (1 << 24) - 1,
150 151
151 ATA_DFLAG_DETACH = (1 << 24), 152 ATA_DFLAG_DETACH = (1 << 24),
@@ -244,6 +245,7 @@ enum {
244 ATA_TMOUT_BOOT = 30000, /* heuristic */ 245 ATA_TMOUT_BOOT = 30000, /* heuristic */
245 ATA_TMOUT_BOOT_QUICK = 7000, /* heuristic */ 246 ATA_TMOUT_BOOT_QUICK = 7000, /* heuristic */
246 ATA_TMOUT_INTERNAL_QUICK = 5000, 247 ATA_TMOUT_INTERNAL_QUICK = 5000,
248 ATA_TMOUT_MAX_PARK = 30000,
247 249
248 /* FIXME: GoVault needs 2s but we can't afford that without 250 /* FIXME: GoVault needs 2s but we can't afford that without
249 * parallel probing. 800ms is enough for iVDR disk 251 * parallel probing. 800ms is enough for iVDR disk
@@ -319,8 +321,11 @@ enum {
319 ATA_EH_RESET = ATA_EH_SOFTRESET | ATA_EH_HARDRESET, 321 ATA_EH_RESET = ATA_EH_SOFTRESET | ATA_EH_HARDRESET,
320 ATA_EH_ENABLE_LINK = (1 << 3), 322 ATA_EH_ENABLE_LINK = (1 << 3),
321 ATA_EH_LPM = (1 << 4), /* link power management action */ 323 ATA_EH_LPM = (1 << 4), /* link power management action */
324 ATA_EH_PARK = (1 << 5), /* unload heads and stop I/O */
322 325
323 ATA_EH_PERDEV_MASK = ATA_EH_REVALIDATE, 326 ATA_EH_PERDEV_MASK = ATA_EH_REVALIDATE | ATA_EH_PARK,
327 ATA_EH_ALL_ACTIONS = ATA_EH_REVALIDATE | ATA_EH_RESET |
328 ATA_EH_ENABLE_LINK | ATA_EH_LPM,
324 329
325 /* ata_eh_info->flags */ 330 /* ata_eh_info->flags */
326 ATA_EHI_HOTPLUGGED = (1 << 0), /* could have been hotplugged */ 331 ATA_EHI_HOTPLUGGED = (1 << 0), /* could have been hotplugged */
@@ -452,6 +457,7 @@ enum link_pm {
452 MEDIUM_POWER, 457 MEDIUM_POWER,
453}; 458};
454extern struct device_attribute dev_attr_link_power_management_policy; 459extern struct device_attribute dev_attr_link_power_management_policy;
460extern struct device_attribute dev_attr_unload_heads;
455extern struct device_attribute dev_attr_em_message_type; 461extern struct device_attribute dev_attr_em_message_type;
456extern struct device_attribute dev_attr_em_message; 462extern struct device_attribute dev_attr_em_message;
457extern struct device_attribute dev_attr_sw_activity; 463extern struct device_attribute dev_attr_sw_activity;
@@ -554,8 +560,8 @@ struct ata_ering {
554struct ata_device { 560struct ata_device {
555 struct ata_link *link; 561 struct ata_link *link;
556 unsigned int devno; /* 0 or 1 */ 562 unsigned int devno; /* 0 or 1 */
557 unsigned long flags; /* ATA_DFLAG_xxx */
558 unsigned int horkage; /* List of broken features */ 563 unsigned int horkage; /* List of broken features */
564 unsigned long flags; /* ATA_DFLAG_xxx */
559 struct scsi_device *sdev; /* attached SCSI device */ 565 struct scsi_device *sdev; /* attached SCSI device */
560#ifdef CONFIG_ATA_ACPI 566#ifdef CONFIG_ATA_ACPI
561 acpi_handle acpi_handle; 567 acpi_handle acpi_handle;
@@ -564,6 +570,7 @@ struct ata_device {
564 /* n_sector is used as CLEAR_OFFSET, read comment above CLEAR_OFFSET */ 570 /* n_sector is used as CLEAR_OFFSET, read comment above CLEAR_OFFSET */
565 u64 n_sectors; /* size of device, if ATA */ 571 u64 n_sectors; /* size of device, if ATA */
566 unsigned int class; /* ATA_DEV_xxx */ 572 unsigned int class; /* ATA_DEV_xxx */
573 unsigned long unpark_deadline;
567 574
568 u8 pio_mode; 575 u8 pio_mode;
569 u8 dma_mode; 576 u8 dma_mode;
@@ -621,6 +628,7 @@ struct ata_eh_context {
621 [ATA_EH_CMD_TIMEOUT_TABLE_SIZE]; 628 [ATA_EH_CMD_TIMEOUT_TABLE_SIZE];
622 unsigned int classes[ATA_MAX_DEVICES]; 629 unsigned int classes[ATA_MAX_DEVICES];
623 unsigned int did_probe_mask; 630 unsigned int did_probe_mask;
631 unsigned int unloaded_mask;
624 unsigned int saved_ncq_enabled; 632 unsigned int saved_ncq_enabled;
625 u8 saved_xfer_mode[ATA_MAX_DEVICES]; 633 u8 saved_xfer_mode[ATA_MAX_DEVICES];
626 /* timestamp for the last reset attempt or success */ 634 /* timestamp for the last reset attempt or success */
@@ -688,7 +696,8 @@ struct ata_port {
688 unsigned int qc_active; 696 unsigned int qc_active;
689 int nr_active_links; /* #links with active qcs */ 697 int nr_active_links; /* #links with active qcs */
690 698
691 struct ata_link link; /* host default link */ 699 struct ata_link link; /* host default link */
700 struct ata_link *slave_link; /* see ata_slave_link_init() */
692 701
693 int nr_pmp_links; /* nr of available PMP links */ 702 int nr_pmp_links; /* nr of available PMP links */
694 struct ata_link *pmp_link; /* array of PMP links */ 703 struct ata_link *pmp_link; /* array of PMP links */
@@ -709,6 +718,7 @@ struct ata_port {
709 struct list_head eh_done_q; 718 struct list_head eh_done_q;
710 wait_queue_head_t eh_wait_q; 719 wait_queue_head_t eh_wait_q;
711 int eh_tries; 720 int eh_tries;
721 struct completion park_req_pending;
712 722
713 pm_message_t pm_mesg; 723 pm_message_t pm_mesg;
714 int *pm_result; 724 int *pm_result;
@@ -772,8 +782,8 @@ struct ata_port_operations {
772 /* 782 /*
773 * Optional features 783 * Optional features
774 */ 784 */
775 int (*scr_read)(struct ata_port *ap, unsigned int sc_reg, u32 *val); 785 int (*scr_read)(struct ata_link *link, unsigned int sc_reg, u32 *val);
776 int (*scr_write)(struct ata_port *ap, unsigned int sc_reg, u32 val); 786 int (*scr_write)(struct ata_link *link, unsigned int sc_reg, u32 val);
777 void (*pmp_attach)(struct ata_port *ap); 787 void (*pmp_attach)(struct ata_port *ap);
778 void (*pmp_detach)(struct ata_port *ap); 788 void (*pmp_detach)(struct ata_port *ap);
779 int (*enable_pm)(struct ata_port *ap, enum link_pm policy); 789 int (*enable_pm)(struct ata_port *ap, enum link_pm policy);
@@ -895,6 +905,7 @@ extern void ata_port_disable(struct ata_port *);
895extern struct ata_host *ata_host_alloc(struct device *dev, int max_ports); 905extern struct ata_host *ata_host_alloc(struct device *dev, int max_ports);
896extern struct ata_host *ata_host_alloc_pinfo(struct device *dev, 906extern struct ata_host *ata_host_alloc_pinfo(struct device *dev,
897 const struct ata_port_info * const * ppi, int n_ports); 907 const struct ata_port_info * const * ppi, int n_ports);
908extern int ata_slave_link_init(struct ata_port *ap);
898extern int ata_host_start(struct ata_host *host); 909extern int ata_host_start(struct ata_host *host);
899extern int ata_host_register(struct ata_host *host, 910extern int ata_host_register(struct ata_host *host,
900 struct scsi_host_template *sht); 911 struct scsi_host_template *sht);
@@ -920,8 +931,8 @@ extern int sata_scr_valid(struct ata_link *link);
920extern int sata_scr_read(struct ata_link *link, int reg, u32 *val); 931extern int sata_scr_read(struct ata_link *link, int reg, u32 *val);
921extern int sata_scr_write(struct ata_link *link, int reg, u32 val); 932extern int sata_scr_write(struct ata_link *link, int reg, u32 val);
922extern int sata_scr_write_flush(struct ata_link *link, int reg, u32 val); 933extern int sata_scr_write_flush(struct ata_link *link, int reg, u32 val);
923extern int ata_link_online(struct ata_link *link); 934extern bool ata_link_online(struct ata_link *link);
924extern int ata_link_offline(struct ata_link *link); 935extern bool ata_link_offline(struct ata_link *link);
925#ifdef CONFIG_PM 936#ifdef CONFIG_PM
926extern int ata_host_suspend(struct ata_host *host, pm_message_t mesg); 937extern int ata_host_suspend(struct ata_host *host, pm_message_t mesg);
927extern void ata_host_resume(struct ata_host *host); 938extern void ata_host_resume(struct ata_host *host);
@@ -1098,6 +1109,7 @@ extern void ata_std_error_handler(struct ata_port *ap);
1098 */ 1109 */
1099extern const struct ata_port_operations ata_base_port_ops; 1110extern const struct ata_port_operations ata_base_port_ops;
1100extern const struct ata_port_operations sata_port_ops; 1111extern const struct ata_port_operations sata_port_ops;
1112extern struct device_attribute *ata_common_sdev_attrs[];
1101 1113
1102#define ATA_BASE_SHT(drv_name) \ 1114#define ATA_BASE_SHT(drv_name) \
1103 .module = THIS_MODULE, \ 1115 .module = THIS_MODULE, \
@@ -1112,7 +1124,8 @@ extern const struct ata_port_operations sata_port_ops;
1112 .proc_name = drv_name, \ 1124 .proc_name = drv_name, \
1113 .slave_configure = ata_scsi_slave_config, \ 1125 .slave_configure = ata_scsi_slave_config, \
1114 .slave_destroy = ata_scsi_slave_destroy, \ 1126 .slave_destroy = ata_scsi_slave_destroy, \
1115 .bios_param = ata_std_bios_param 1127 .bios_param = ata_std_bios_param, \
1128 .sdev_attrs = ata_common_sdev_attrs
1116 1129
1117#define ATA_NCQ_SHT(drv_name) \ 1130#define ATA_NCQ_SHT(drv_name) \
1118 ATA_BASE_SHT(drv_name), \ 1131 ATA_BASE_SHT(drv_name), \
@@ -1134,7 +1147,7 @@ static inline bool sata_pmp_attached(struct ata_port *ap)
1134 1147
1135static inline int ata_is_host_link(const struct ata_link *link) 1148static inline int ata_is_host_link(const struct ata_link *link)
1136{ 1149{
1137 return link == &link->ap->link; 1150 return link == &link->ap->link || link == link->ap->slave_link;
1138} 1151}
1139#else /* CONFIG_SATA_PMP */ 1152#else /* CONFIG_SATA_PMP */
1140static inline bool sata_pmp_supported(struct ata_port *ap) 1153static inline bool sata_pmp_supported(struct ata_port *ap)
@@ -1167,7 +1180,7 @@ static inline int sata_srst_pmp(struct ata_link *link)
1167 printk("%sata%u: "fmt, lv, (ap)->print_id , ##args) 1180 printk("%sata%u: "fmt, lv, (ap)->print_id , ##args)
1168 1181
1169#define ata_link_printk(link, lv, fmt, args...) do { \ 1182#define ata_link_printk(link, lv, fmt, args...) do { \
1170 if (sata_pmp_attached((link)->ap)) \ 1183 if (sata_pmp_attached((link)->ap) || (link)->ap->slave_link) \
1171 printk("%sata%u.%02u: "fmt, lv, (link)->ap->print_id, \ 1184 printk("%sata%u.%02u: "fmt, lv, (link)->ap->print_id, \
1172 (link)->pmp , ##args); \ 1185 (link)->pmp , ##args); \
1173 else \ 1186 else \
@@ -1265,34 +1278,17 @@ static inline int ata_link_active(struct ata_link *link)
1265 return ata_tag_valid(link->active_tag) || link->sactive; 1278 return ata_tag_valid(link->active_tag) || link->sactive;
1266} 1279}
1267 1280
1268static inline struct ata_link *ata_port_first_link(struct ata_port *ap) 1281extern struct ata_link *__ata_port_next_link(struct ata_port *ap,
1269{ 1282 struct ata_link *link,
1270 if (sata_pmp_attached(ap)) 1283 bool dev_only);
1271 return ap->pmp_link;
1272 return &ap->link;
1273}
1274
1275static inline struct ata_link *ata_port_next_link(struct ata_link *link)
1276{
1277 struct ata_port *ap = link->ap;
1278
1279 if (ata_is_host_link(link)) {
1280 if (!sata_pmp_attached(ap))
1281 return NULL;
1282 return ap->pmp_link;
1283 }
1284
1285 if (++link < ap->nr_pmp_links + ap->pmp_link)
1286 return link;
1287 return NULL;
1288}
1289 1284
1290#define __ata_port_for_each_link(lk, ap) \ 1285#define __ata_port_for_each_link(link, ap) \
1291 for ((lk) = &(ap)->link; (lk); (lk) = ata_port_next_link(lk)) 1286 for ((link) = __ata_port_next_link((ap), NULL, false); (link); \
1287 (link) = __ata_port_next_link((ap), (link), false))
1292 1288
1293#define ata_port_for_each_link(link, ap) \ 1289#define ata_port_for_each_link(link, ap) \
1294 for ((link) = ata_port_first_link(ap); (link); \ 1290 for ((link) = __ata_port_next_link((ap), NULL, true); (link); \
1295 (link) = ata_port_next_link(link)) 1291 (link) = __ata_port_next_link((ap), (link), true))
1296 1292
1297#define ata_link_for_each_dev(dev, link) \ 1293#define ata_link_for_each_dev(dev, link) \
1298 for ((dev) = (link)->device; \ 1294 for ((dev) = (link)->device; \
diff --git a/include/linux/major.h b/include/linux/major.h
index 53d5fafd85c..88249452b93 100644
--- a/include/linux/major.h
+++ b/include/linux/major.h
@@ -170,4 +170,6 @@
170 170
171#define VIOTAPE_MAJOR 230 171#define VIOTAPE_MAJOR 230
172 172
173#define BLOCK_EXT_MAJOR 259
174
173#endif 175#endif
diff --git a/include/linux/mtd/blktrans.h b/include/linux/mtd/blktrans.h
index 310e6160641..8b4aa0523db 100644
--- a/include/linux/mtd/blktrans.h
+++ b/include/linux/mtd/blktrans.h
@@ -41,6 +41,8 @@ struct mtd_blktrans_ops {
41 unsigned long block, char *buffer); 41 unsigned long block, char *buffer);
42 int (*writesect)(struct mtd_blktrans_dev *dev, 42 int (*writesect)(struct mtd_blktrans_dev *dev,
43 unsigned long block, char *buffer); 43 unsigned long block, char *buffer);
44 int (*discard)(struct mtd_blktrans_dev *dev,
45 unsigned long block, unsigned nr_blocks);
44 46
45 /* Block layer ioctls */ 47 /* Block layer ioctls */
46 int (*getgeo)(struct mtd_blktrans_dev *dev, struct hd_geometry *geo); 48 int (*getgeo)(struct mtd_blktrans_dev *dev, struct hd_geometry *geo);
diff --git a/include/linux/notifier.h b/include/linux/notifier.h
index da2698b0fdd..b86fa2ffca0 100644
--- a/include/linux/notifier.h
+++ b/include/linux/notifier.h
@@ -213,9 +213,16 @@ static inline int notifier_to_errno(int ret)
213#define CPU_DOWN_FAILED 0x0006 /* CPU (unsigned)v NOT going down */ 213#define CPU_DOWN_FAILED 0x0006 /* CPU (unsigned)v NOT going down */
214#define CPU_DEAD 0x0007 /* CPU (unsigned)v dead */ 214#define CPU_DEAD 0x0007 /* CPU (unsigned)v dead */
215#define CPU_DYING 0x0008 /* CPU (unsigned)v not running any task, 215#define CPU_DYING 0x0008 /* CPU (unsigned)v not running any task,
216 * not handling interrupts, soon dead */ 216 * not handling interrupts, soon dead.
217 * Called on the dying cpu, interrupts
218 * are already disabled. Must not
219 * sleep, must not fail */
217#define CPU_POST_DEAD 0x0009 /* CPU (unsigned)v dead, cpu_hotplug 220#define CPU_POST_DEAD 0x0009 /* CPU (unsigned)v dead, cpu_hotplug
218 * lock is dropped */ 221 * lock is dropped */
222#define CPU_STARTING 0x000A /* CPU (unsigned)v soon running.
223 * Called on the new cpu, just before
224 * enabling interrupts. Must not sleep,
225 * must not fail */
219 226
220/* Used for CPU hotplug events occuring while tasks are frozen due to a suspend 227/* Used for CPU hotplug events occuring while tasks are frozen due to a suspend
221 * operation in progress 228 * operation in progress
@@ -229,6 +236,7 @@ static inline int notifier_to_errno(int ret)
229#define CPU_DOWN_FAILED_FROZEN (CPU_DOWN_FAILED | CPU_TASKS_FROZEN) 236#define CPU_DOWN_FAILED_FROZEN (CPU_DOWN_FAILED | CPU_TASKS_FROZEN)
230#define CPU_DEAD_FROZEN (CPU_DEAD | CPU_TASKS_FROZEN) 237#define CPU_DEAD_FROZEN (CPU_DEAD | CPU_TASKS_FROZEN)
231#define CPU_DYING_FROZEN (CPU_DYING | CPU_TASKS_FROZEN) 238#define CPU_DYING_FROZEN (CPU_DYING | CPU_TASKS_FROZEN)
239#define CPU_STARTING_FROZEN (CPU_STARTING | CPU_TASKS_FROZEN)
232 240
233/* Hibernation and suspend events */ 241/* Hibernation and suspend events */
234#define PM_HIBERNATION_PREPARE 0x0001 /* Going to hibernate */ 242#define PM_HIBERNATION_PREPARE 0x0001 /* Going to hibernate */
diff --git a/include/linux/proportions.h b/include/linux/proportions.h
index 5afc1b23346..cf793bbbd05 100644
--- a/include/linux/proportions.h
+++ b/include/linux/proportions.h
@@ -104,8 +104,8 @@ struct prop_local_single {
104 * snapshot of the last seen global state 104 * snapshot of the last seen global state
105 * and a lock protecting this state 105 * and a lock protecting this state
106 */ 106 */
107 int shift;
108 unsigned long period; 107 unsigned long period;
108 int shift;
109 spinlock_t lock; /* protect the snapshot state */ 109 spinlock_t lock; /* protect the snapshot state */
110}; 110};
111 111
diff --git a/include/linux/rcuclassic.h b/include/linux/rcuclassic.h
index 4ab84362272..5f89b62e698 100644
--- a/include/linux/rcuclassic.h
+++ b/include/linux/rcuclassic.h
@@ -40,12 +40,21 @@
40#include <linux/cpumask.h> 40#include <linux/cpumask.h>
41#include <linux/seqlock.h> 41#include <linux/seqlock.h>
42 42
43#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
44#define RCU_SECONDS_TILL_STALL_CHECK ( 3 * HZ) /* for rcp->jiffies_stall */
45#define RCU_SECONDS_TILL_STALL_RECHECK (30 * HZ) /* for rcp->jiffies_stall */
46#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
43 47
44/* Global control variables for rcupdate callback mechanism. */ 48/* Global control variables for rcupdate callback mechanism. */
45struct rcu_ctrlblk { 49struct rcu_ctrlblk {
46 long cur; /* Current batch number. */ 50 long cur; /* Current batch number. */
47 long completed; /* Number of the last completed batch */ 51 long completed; /* Number of the last completed batch */
48 int next_pending; /* Is the next batch already waiting? */ 52 long pending; /* Number of the last pending batch */
53#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
54 unsigned long gp_start; /* Time at which GP started in jiffies. */
55 unsigned long jiffies_stall;
56 /* Time at which to check for CPU stalls. */
57#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
49 58
50 int signaled; 59 int signaled;
51 60
@@ -66,11 +75,7 @@ static inline int rcu_batch_after(long a, long b)
66 return (a - b) > 0; 75 return (a - b) > 0;
67} 76}
68 77
69/* 78/* Per-CPU data for Read-Copy UPdate. */
70 * Per-CPU data for Read-Copy UPdate.
71 * nxtlist - new callbacks are added here
72 * curlist - current batch for which quiescent cycle started if any
73 */
74struct rcu_data { 79struct rcu_data {
75 /* 1) quiescent state handling : */ 80 /* 1) quiescent state handling : */
76 long quiescbatch; /* Batch # for grace period */ 81 long quiescbatch; /* Batch # for grace period */
@@ -78,12 +83,24 @@ struct rcu_data {
78 int qs_pending; /* core waits for quiesc state */ 83 int qs_pending; /* core waits for quiesc state */
79 84
80 /* 2) batch handling */ 85 /* 2) batch handling */
81 long batch; /* Batch # for current RCU batch */ 86 /*
87 * if nxtlist is not NULL, then:
88 * batch:
89 * The batch # for the last entry of nxtlist
90 * [*nxttail[1], NULL = *nxttail[2]):
91 * Entries that batch # <= batch
92 * [*nxttail[0], *nxttail[1]):
93 * Entries that batch # <= batch - 1
94 * [nxtlist, *nxttail[0]):
95 * Entries that batch # <= batch - 2
96 * The grace period for these entries has completed, and
97 * the other grace-period-completed entries may be moved
98 * here temporarily in rcu_process_callbacks().
99 */
100 long batch;
82 struct rcu_head *nxtlist; 101 struct rcu_head *nxtlist;
83 struct rcu_head **nxttail; 102 struct rcu_head **nxttail[3];
84 long qlen; /* # of queued callbacks */ 103 long qlen; /* # of queued callbacks */
85 struct rcu_head *curlist;
86 struct rcu_head **curtail;
87 struct rcu_head *donelist; 104 struct rcu_head *donelist;
88 struct rcu_head **donetail; 105 struct rcu_head **donetail;
89 long blimit; /* Upper limit on a processed batch */ 106 long blimit; /* Upper limit on a processed batch */
diff --git a/include/linux/rculist.h b/include/linux/rculist.h
index eb4443c7e05..e649bd3f2c9 100644
--- a/include/linux/rculist.h
+++ b/include/linux/rculist.h
@@ -198,20 +198,6 @@ static inline void list_splice_init_rcu(struct list_head *list,
198 at->prev = last; 198 at->prev = last;
199} 199}
200 200
201/**
202 * list_for_each_rcu - iterate over an rcu-protected list
203 * @pos: the &struct list_head to use as a loop cursor.
204 * @head: the head for your list.
205 *
206 * This list-traversal primitive may safely run concurrently with
207 * the _rcu list-mutation primitives such as list_add_rcu()
208 * as long as the traversal is guarded by rcu_read_lock().
209 */
210#define list_for_each_rcu(pos, head) \
211 for (pos = rcu_dereference((head)->next); \
212 prefetch(pos->next), pos != (head); \
213 pos = rcu_dereference(pos->next))
214
215#define __list_for_each_rcu(pos, head) \ 201#define __list_for_each_rcu(pos, head) \
216 for (pos = rcu_dereference((head)->next); \ 202 for (pos = rcu_dereference((head)->next); \
217 pos != (head); \ 203 pos != (head); \
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index e8b4039cfb2..86f1f5e43e3 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -133,6 +133,26 @@ struct rcu_head {
133#define rcu_read_unlock_bh() __rcu_read_unlock_bh() 133#define rcu_read_unlock_bh() __rcu_read_unlock_bh()
134 134
135/** 135/**
136 * rcu_read_lock_sched - mark the beginning of a RCU-classic critical section
137 *
138 * Should be used with either
139 * - synchronize_sched()
140 * or
141 * - call_rcu_sched() and rcu_barrier_sched()
142 * on the write-side to insure proper synchronization.
143 */
144#define rcu_read_lock_sched() preempt_disable()
145
146/*
147 * rcu_read_unlock_sched - marks the end of a RCU-classic critical section
148 *
149 * See rcu_read_lock_sched for more information.
150 */
151#define rcu_read_unlock_sched() preempt_enable()
152
153
154
155/**
136 * rcu_dereference - fetch an RCU-protected pointer in an 156 * rcu_dereference - fetch an RCU-protected pointer in an
137 * RCU read-side critical section. This pointer may later 157 * RCU read-side critical section. This pointer may later
138 * be safely dereferenced. 158 * be safely dereferenced.
diff --git a/include/linux/rcupreempt.h b/include/linux/rcupreempt.h
index 0967f03b070..3e05c09b54a 100644
--- a/include/linux/rcupreempt.h
+++ b/include/linux/rcupreempt.h
@@ -57,7 +57,13 @@ static inline void rcu_qsctr_inc(int cpu)
57 rdssp->sched_qs++; 57 rdssp->sched_qs++;
58} 58}
59#define rcu_bh_qsctr_inc(cpu) 59#define rcu_bh_qsctr_inc(cpu)
60#define call_rcu_bh(head, rcu) call_rcu(head, rcu) 60
61/*
62 * Someone might want to pass call_rcu_bh as a function pointer.
63 * So this needs to just be a rename and not a macro function.
64 * (no parentheses)
65 */
66#define call_rcu_bh call_rcu
61 67
62/** 68/**
63 * call_rcu_sched - Queue RCU callback for invocation after sched grace period. 69 * call_rcu_sched - Queue RCU callback for invocation after sched grace period.
@@ -111,7 +117,6 @@ extern struct rcupreempt_trace *rcupreempt_trace_cpu(int cpu);
111struct softirq_action; 117struct softirq_action;
112 118
113#ifdef CONFIG_NO_HZ 119#ifdef CONFIG_NO_HZ
114DECLARE_PER_CPU(struct rcu_dyntick_sched, rcu_dyntick_sched);
115 120
116static inline void rcu_enter_nohz(void) 121static inline void rcu_enter_nohz(void)
117{ 122{
@@ -126,8 +131,8 @@ static inline void rcu_exit_nohz(void)
126{ 131{
127 static DEFINE_RATELIMIT_STATE(rs, 10 * HZ, 1); 132 static DEFINE_RATELIMIT_STATE(rs, 10 * HZ, 1);
128 133
129 smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */
130 __get_cpu_var(rcu_dyntick_sched).dynticks++; 134 __get_cpu_var(rcu_dyntick_sched).dynticks++;
135 smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */
131 WARN_ON_RATELIMIT(!(__get_cpu_var(rcu_dyntick_sched).dynticks & 0x1), 136 WARN_ON_RATELIMIT(!(__get_cpu_var(rcu_dyntick_sched).dynticks & 0x1),
132 &rs); 137 &rs);
133} 138}
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 3d9120c5ad1..5d0819ee442 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -451,8 +451,8 @@ struct signal_struct {
451 * - everyone except group_exit_task is stopped during signal delivery 451 * - everyone except group_exit_task is stopped during signal delivery
452 * of fatal signals, group_exit_task processes the signal. 452 * of fatal signals, group_exit_task processes the signal.
453 */ 453 */
454 struct task_struct *group_exit_task;
455 int notify_count; 454 int notify_count;
455 struct task_struct *group_exit_task;
456 456
457 /* thread group stop support, overloads group_exit_code too */ 457 /* thread group stop support, overloads group_exit_code too */
458 int group_stop_count; 458 int group_stop_count;
@@ -824,6 +824,9 @@ struct sched_domain {
824 unsigned int ttwu_move_affine; 824 unsigned int ttwu_move_affine;
825 unsigned int ttwu_move_balance; 825 unsigned int ttwu_move_balance;
826#endif 826#endif
827#ifdef CONFIG_SCHED_DEBUG
828 char *name;
829#endif
827}; 830};
828 831
829extern void partition_sched_domains(int ndoms_new, cpumask_t *doms_new, 832extern void partition_sched_domains(int ndoms_new, cpumask_t *doms_new,
@@ -897,7 +900,7 @@ struct sched_class {
897 void (*yield_task) (struct rq *rq); 900 void (*yield_task) (struct rq *rq);
898 int (*select_task_rq)(struct task_struct *p, int sync); 901 int (*select_task_rq)(struct task_struct *p, int sync);
899 902
900 void (*check_preempt_curr) (struct rq *rq, struct task_struct *p); 903 void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int sync);
901 904
902 struct task_struct * (*pick_next_task) (struct rq *rq); 905 struct task_struct * (*pick_next_task) (struct rq *rq);
903 void (*put_prev_task) (struct rq *rq, struct task_struct *p); 906 void (*put_prev_task) (struct rq *rq, struct task_struct *p);
@@ -1010,8 +1013,8 @@ struct sched_entity {
1010 1013
1011struct sched_rt_entity { 1014struct sched_rt_entity {
1012 struct list_head run_list; 1015 struct list_head run_list;
1013 unsigned int time_slice;
1014 unsigned long timeout; 1016 unsigned long timeout;
1017 unsigned int time_slice;
1015 int nr_cpus_allowed; 1018 int nr_cpus_allowed;
1016 1019
1017 struct sched_rt_entity *back; 1020 struct sched_rt_entity *back;
diff --git a/include/linux/security.h b/include/linux/security.h
index 80c4d002864..f5c4a51eb42 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -1560,11 +1560,6 @@ struct security_operations {
1560extern int security_init(void); 1560extern int security_init(void);
1561extern int security_module_enable(struct security_operations *ops); 1561extern int security_module_enable(struct security_operations *ops);
1562extern int register_security(struct security_operations *ops); 1562extern int register_security(struct security_operations *ops);
1563extern struct dentry *securityfs_create_file(const char *name, mode_t mode,
1564 struct dentry *parent, void *data,
1565 const struct file_operations *fops);
1566extern struct dentry *securityfs_create_dir(const char *name, struct dentry *parent);
1567extern void securityfs_remove(struct dentry *dentry);
1568 1563
1569/* Security operations */ 1564/* Security operations */
1570int security_ptrace_may_access(struct task_struct *child, unsigned int mode); 1565int security_ptrace_may_access(struct task_struct *child, unsigned int mode);
@@ -2424,25 +2419,6 @@ static inline int security_netlink_recv(struct sk_buff *skb, int cap)
2424 return cap_netlink_recv(skb, cap); 2419 return cap_netlink_recv(skb, cap);
2425} 2420}
2426 2421
2427static inline struct dentry *securityfs_create_dir(const char *name,
2428 struct dentry *parent)
2429{
2430 return ERR_PTR(-ENODEV);
2431}
2432
2433static inline struct dentry *securityfs_create_file(const char *name,
2434 mode_t mode,
2435 struct dentry *parent,
2436 void *data,
2437 const struct file_operations *fops)
2438{
2439 return ERR_PTR(-ENODEV);
2440}
2441
2442static inline void securityfs_remove(struct dentry *dentry)
2443{
2444}
2445
2446static inline int security_secid_to_secctx(u32 secid, char **secdata, u32 *seclen) 2422static inline int security_secid_to_secctx(u32 secid, char **secdata, u32 *seclen)
2447{ 2423{
2448 return -EOPNOTSUPP; 2424 return -EOPNOTSUPP;
@@ -2806,5 +2782,35 @@ static inline void security_audit_rule_free(void *lsmrule)
2806#endif /* CONFIG_SECURITY */ 2782#endif /* CONFIG_SECURITY */
2807#endif /* CONFIG_AUDIT */ 2783#endif /* CONFIG_AUDIT */
2808 2784
2785#ifdef CONFIG_SECURITYFS
2786
2787extern struct dentry *securityfs_create_file(const char *name, mode_t mode,
2788 struct dentry *parent, void *data,
2789 const struct file_operations *fops);
2790extern struct dentry *securityfs_create_dir(const char *name, struct dentry *parent);
2791extern void securityfs_remove(struct dentry *dentry);
2792
2793#else /* CONFIG_SECURITYFS */
2794
2795static inline struct dentry *securityfs_create_dir(const char *name,
2796 struct dentry *parent)
2797{
2798 return ERR_PTR(-ENODEV);
2799}
2800
2801static inline struct dentry *securityfs_create_file(const char *name,
2802 mode_t mode,
2803 struct dentry *parent,
2804 void *data,
2805 const struct file_operations *fops)
2806{
2807 return ERR_PTR(-ENODEV);
2808}
2809
2810static inline void securityfs_remove(struct dentry *dentry)
2811{}
2812
2813#endif
2814
2809#endif /* ! __LINUX_SECURITY_H */ 2815#endif /* ! __LINUX_SECURITY_H */
2810 2816
diff --git a/include/linux/string_helpers.h b/include/linux/string_helpers.h
new file mode 100644
index 00000000000..a3eb2f65b65
--- /dev/null
+++ b/include/linux/string_helpers.h
@@ -0,0 +1,16 @@
1#ifndef _LINUX_STRING_HELPERS_H_
2#define _LINUX_STRING_HELPERS_H_
3
4#include <linux/types.h>
5
6/* Descriptions of the types of units to
7 * print in */
8enum string_size_units {
9 STRING_UNITS_10, /* use powers of 10^3 (standard SI) */
10 STRING_UNITS_2, /* use binary powers of 2^10 */
11};
12
13int string_get_size(u64 size, enum string_size_units units,
14 char *buf, int len);
15
16#endif
diff --git a/include/linux/tick.h b/include/linux/tick.h
index 8cf8cfe2cc9..98921a3e1aa 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -126,7 +126,7 @@ static inline ktime_t tick_nohz_get_sleep_length(void)
126 return len; 126 return len;
127} 127}
128static inline void tick_nohz_stop_idle(int cpu) { } 128static inline void tick_nohz_stop_idle(int cpu) { }
129static inline u64 get_cpu_idle_time_us(int cpu, u64 *unused) { return 0; } 129static inline u64 get_cpu_idle_time_us(int cpu, u64 *unused) { return -1; }
130# endif /* !NO_HZ */ 130# endif /* !NO_HZ */
131 131
132#endif 132#endif