aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-10-22 20:00:32 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-10-22 20:00:32 -0400
commite9dd2b6837e26fe202708cce5ea4bb4ee3e3482e (patch)
treef42fd892495bfc4cbb740d06b016d267c9c42d00 /include/linux
parent4f3a29dadaf999a273f1e7fe2476595d0283eef3 (diff)
parentb4627321e18582dcbdeb45d77df29d3177107c65 (diff)
Merge branch 'for-2.6.37/core' of git://git.kernel.dk/linux-2.6-block
* 'for-2.6.37/core' of git://git.kernel.dk/linux-2.6-block: (39 commits) cfq-iosched: Fix a gcc 4.5 warning and put some comments block: Turn bvec_k{un,}map_irq() into static inline functions block: fix accounting bug on cross partition merges block: Make the integrity mapped property a bio flag block: Fix double free in blk_integrity_unregister block: Ensure physical block size is unsigned int blkio-throttle: Fix possible multiplication overflow in iops calculations blkio-throttle: limit max iops value to UINT_MAX blkio-throttle: There is no need to convert jiffies to milli seconds blkio-throttle: Fix link failure failure on i386 blkio: Recalculate the throttled bio dispatch time upon throttle limit change blkio: Add root group to td->tg_list blkio: deletion of a cgroup was causes oops blkio: Do not export throttle files if CONFIG_BLK_DEV_THROTTLING=n block: set the bounce_pfn to the actual DMA limit rather than to max memory block: revert bad fix for memory hotplug causing bounces Fix compile error in blk-exec.c for !CONFIG_DETECT_HUNG_TASK block: set the bounce_pfn to the actual DMA limit rather than to max memory block: Prevent hang_check firing during long I/O cfq: improve fsync performance for small files ... Fix up trivial conflicts due to __rcu sparse annotation in include/linux/genhd.h
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/bio.h15
-rw-r--r--include/linux/blk_types.h6
-rw-r--r--include/linux/blkdev.h66
-rw-r--r--include/linux/elevator.h2
-rw-r--r--include/linux/genhd.h54
-rw-r--r--include/linux/kernel.h10
-rw-r--r--include/linux/sched.h3
7 files changed, 142 insertions, 14 deletions
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 5274103434ad..ba679992d39b 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -346,8 +346,15 @@ static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags)
346} 346}
347 347
348#else 348#else
349#define bvec_kmap_irq(bvec, flags) (page_address((bvec)->bv_page) + (bvec)->bv_offset) 349static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags)
350#define bvec_kunmap_irq(buf, flags) do { *(flags) = 0; } while (0) 350{
351 return page_address(bvec->bv_page) + bvec->bv_offset;
352}
353
354static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags)
355{
356 *flags = 0;
357}
351#endif 358#endif
352 359
353static inline char *__bio_kmap_irq(struct bio *bio, unsigned short idx, 360static inline char *__bio_kmap_irq(struct bio *bio, unsigned short idx,
@@ -496,6 +503,10 @@ static inline struct bio *bio_list_get(struct bio_list *bl)
496#define bip_for_each_vec(bvl, bip, i) \ 503#define bip_for_each_vec(bvl, bip, i) \
497 __bip_for_each_vec(bvl, bip, i, (bip)->bip_idx) 504 __bip_for_each_vec(bvl, bip, i, (bip)->bip_idx)
498 505
506#define bio_for_each_integrity_vec(_bvl, _bio, _iter) \
507 for_each_bio(_bio) \
508 bip_for_each_vec(_bvl, _bio->bi_integrity, _iter)
509
499#define bio_integrity(bio) (bio->bi_integrity != NULL) 510#define bio_integrity(bio) (bio->bi_integrity != NULL)
500 511
501extern struct bio_integrity_payload *bio_integrity_alloc_bioset(struct bio *, gfp_t, unsigned int, struct bio_set *); 512extern struct bio_integrity_payload *bio_integrity_alloc_bioset(struct bio *, gfp_t, unsigned int, struct bio_set *);
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index ca83a97c9715..d36629620a4f 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -97,6 +97,7 @@ struct bio {
97#define BIO_NULL_MAPPED 9 /* contains invalid user pages */ 97#define BIO_NULL_MAPPED 9 /* contains invalid user pages */
98#define BIO_FS_INTEGRITY 10 /* fs owns integrity data, not block layer */ 98#define BIO_FS_INTEGRITY 10 /* fs owns integrity data, not block layer */
99#define BIO_QUIET 11 /* Make BIO Quiet */ 99#define BIO_QUIET 11 /* Make BIO Quiet */
100#define BIO_MAPPED_INTEGRITY 12/* integrity metadata has been remapped */
100#define bio_flagged(bio, flag) ((bio)->bi_flags & (1 << (flag))) 101#define bio_flagged(bio, flag) ((bio)->bi_flags & (1 << (flag)))
101 102
102/* 103/*
@@ -130,6 +131,8 @@ enum rq_flag_bits {
130 /* bio only flags */ 131 /* bio only flags */
131 __REQ_UNPLUG, /* unplug the immediately after submission */ 132 __REQ_UNPLUG, /* unplug the immediately after submission */
132 __REQ_RAHEAD, /* read ahead, can fail anytime */ 133 __REQ_RAHEAD, /* read ahead, can fail anytime */
134 __REQ_THROTTLED, /* This bio has already been subjected to
135 * throttling rules. Don't do it again. */
133 136
134 /* request only flags */ 137 /* request only flags */
135 __REQ_SORTED, /* elevator knows about this request */ 138 __REQ_SORTED, /* elevator knows about this request */
@@ -146,7 +149,6 @@ enum rq_flag_bits {
146 __REQ_ORDERED_COLOR, /* is before or after barrier */ 149 __REQ_ORDERED_COLOR, /* is before or after barrier */
147 __REQ_ALLOCED, /* request came from our alloc pool */ 150 __REQ_ALLOCED, /* request came from our alloc pool */
148 __REQ_COPY_USER, /* contains copies of user pages */ 151 __REQ_COPY_USER, /* contains copies of user pages */
149 __REQ_INTEGRITY, /* integrity metadata has been remapped */
150 __REQ_FLUSH, /* request for cache flush */ 152 __REQ_FLUSH, /* request for cache flush */
151 __REQ_IO_STAT, /* account I/O stat */ 153 __REQ_IO_STAT, /* account I/O stat */
152 __REQ_MIXED_MERGE, /* merge of different types, fail separately */ 154 __REQ_MIXED_MERGE, /* merge of different types, fail separately */
@@ -172,6 +174,7 @@ enum rq_flag_bits {
172 174
173#define REQ_UNPLUG (1 << __REQ_UNPLUG) 175#define REQ_UNPLUG (1 << __REQ_UNPLUG)
174#define REQ_RAHEAD (1 << __REQ_RAHEAD) 176#define REQ_RAHEAD (1 << __REQ_RAHEAD)
177#define REQ_THROTTLED (1 << __REQ_THROTTLED)
175 178
176#define REQ_SORTED (1 << __REQ_SORTED) 179#define REQ_SORTED (1 << __REQ_SORTED)
177#define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER) 180#define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER)
@@ -187,7 +190,6 @@ enum rq_flag_bits {
187#define REQ_ORDERED_COLOR (1 << __REQ_ORDERED_COLOR) 190#define REQ_ORDERED_COLOR (1 << __REQ_ORDERED_COLOR)
188#define REQ_ALLOCED (1 << __REQ_ALLOCED) 191#define REQ_ALLOCED (1 << __REQ_ALLOCED)
189#define REQ_COPY_USER (1 << __REQ_COPY_USER) 192#define REQ_COPY_USER (1 << __REQ_COPY_USER)
190#define REQ_INTEGRITY (1 << __REQ_INTEGRITY)
191#define REQ_FLUSH (1 << __REQ_FLUSH) 193#define REQ_FLUSH (1 << __REQ_FLUSH)
192#define REQ_IO_STAT (1 << __REQ_IO_STAT) 194#define REQ_IO_STAT (1 << __REQ_IO_STAT)
193#define REQ_MIXED_MERGE (1 << __REQ_MIXED_MERGE) 195#define REQ_MIXED_MERGE (1 << __REQ_MIXED_MERGE)
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 2c54906f678f..16f7f1be1acf 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -115,6 +115,7 @@ struct request {
115 void *elevator_private3; 115 void *elevator_private3;
116 116
117 struct gendisk *rq_disk; 117 struct gendisk *rq_disk;
118 struct hd_struct *part;
118 unsigned long start_time; 119 unsigned long start_time;
119#ifdef CONFIG_BLK_CGROUP 120#ifdef CONFIG_BLK_CGROUP
120 unsigned long long start_time_ns; 121 unsigned long long start_time_ns;
@@ -124,6 +125,9 @@ struct request {
124 * physical address coalescing is performed. 125 * physical address coalescing is performed.
125 */ 126 */
126 unsigned short nr_phys_segments; 127 unsigned short nr_phys_segments;
128#if defined(CONFIG_BLK_DEV_INTEGRITY)
129 unsigned short nr_integrity_segments;
130#endif
127 131
128 unsigned short ioprio; 132 unsigned short ioprio;
129 133
@@ -243,6 +247,7 @@ struct queue_limits {
243 247
244 unsigned short logical_block_size; 248 unsigned short logical_block_size;
245 unsigned short max_segments; 249 unsigned short max_segments;
250 unsigned short max_integrity_segments;
246 251
247 unsigned char misaligned; 252 unsigned char misaligned;
248 unsigned char discard_misaligned; 253 unsigned char discard_misaligned;
@@ -367,6 +372,11 @@ struct request_queue
367#if defined(CONFIG_BLK_DEV_BSG) 372#if defined(CONFIG_BLK_DEV_BSG)
368 struct bsg_class_device bsg_dev; 373 struct bsg_class_device bsg_dev;
369#endif 374#endif
375
376#ifdef CONFIG_BLK_DEV_THROTTLING
377 /* Throttle data */
378 struct throtl_data *td;
379#endif
370}; 380};
371 381
372#define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */ 382#define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */
@@ -851,7 +861,7 @@ extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
851extern void blk_queue_max_discard_sectors(struct request_queue *q, 861extern void blk_queue_max_discard_sectors(struct request_queue *q,
852 unsigned int max_discard_sectors); 862 unsigned int max_discard_sectors);
853extern void blk_queue_logical_block_size(struct request_queue *, unsigned short); 863extern void blk_queue_logical_block_size(struct request_queue *, unsigned short);
854extern void blk_queue_physical_block_size(struct request_queue *, unsigned short); 864extern void blk_queue_physical_block_size(struct request_queue *, unsigned int);
855extern void blk_queue_alignment_offset(struct request_queue *q, 865extern void blk_queue_alignment_offset(struct request_queue *q,
856 unsigned int alignment); 866 unsigned int alignment);
857extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min); 867extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min);
@@ -1004,7 +1014,7 @@ static inline unsigned int queue_physical_block_size(struct request_queue *q)
1004 return q->limits.physical_block_size; 1014 return q->limits.physical_block_size;
1005} 1015}
1006 1016
1007static inline int bdev_physical_block_size(struct block_device *bdev) 1017static inline unsigned int bdev_physical_block_size(struct block_device *bdev)
1008{ 1018{
1009 return queue_physical_block_size(bdev_get_queue(bdev)); 1019 return queue_physical_block_size(bdev_get_queue(bdev));
1010} 1020}
@@ -1093,11 +1103,11 @@ static inline int queue_dma_alignment(struct request_queue *q)
1093 return q ? q->dma_alignment : 511; 1103 return q ? q->dma_alignment : 511;
1094} 1104}
1095 1105
1096static inline int blk_rq_aligned(struct request_queue *q, void *addr, 1106static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr,
1097 unsigned int len) 1107 unsigned int len)
1098{ 1108{
1099 unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask; 1109 unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask;
1100 return !((unsigned long)addr & alignment) && !(len & alignment); 1110 return !(addr & alignment) && !(len & alignment);
1101} 1111}
1102 1112
1103/* assumes size > 256 */ 1113/* assumes size > 256 */
@@ -1127,6 +1137,7 @@ static inline void put_dev_sector(Sector p)
1127 1137
1128struct work_struct; 1138struct work_struct;
1129int kblockd_schedule_work(struct request_queue *q, struct work_struct *work); 1139int kblockd_schedule_work(struct request_queue *q, struct work_struct *work);
1140int kblockd_schedule_delayed_work(struct request_queue *q, struct delayed_work *dwork, unsigned long delay);
1130 1141
1131#ifdef CONFIG_BLK_CGROUP 1142#ifdef CONFIG_BLK_CGROUP
1132/* 1143/*
@@ -1170,6 +1181,24 @@ static inline uint64_t rq_io_start_time_ns(struct request *req)
1170} 1181}
1171#endif 1182#endif
1172 1183
1184#ifdef CONFIG_BLK_DEV_THROTTLING
1185extern int blk_throtl_init(struct request_queue *q);
1186extern void blk_throtl_exit(struct request_queue *q);
1187extern int blk_throtl_bio(struct request_queue *q, struct bio **bio);
1188extern void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay);
1189extern void throtl_shutdown_timer_wq(struct request_queue *q);
1190#else /* CONFIG_BLK_DEV_THROTTLING */
1191static inline int blk_throtl_bio(struct request_queue *q, struct bio **bio)
1192{
1193 return 0;
1194}
1195
1196static inline int blk_throtl_init(struct request_queue *q) { return 0; }
1197static inline int blk_throtl_exit(struct request_queue *q) { return 0; }
1198static inline void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay) {}
1199static inline void throtl_shutdown_timer_wq(struct request_queue *q) {}
1200#endif /* CONFIG_BLK_DEV_THROTTLING */
1201
1173#define MODULE_ALIAS_BLOCKDEV(major,minor) \ 1202#define MODULE_ALIAS_BLOCKDEV(major,minor) \
1174 MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor)) 1203 MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor))
1175#define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \ 1204#define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \
@@ -1213,8 +1242,13 @@ struct blk_integrity {
1213extern int blk_integrity_register(struct gendisk *, struct blk_integrity *); 1242extern int blk_integrity_register(struct gendisk *, struct blk_integrity *);
1214extern void blk_integrity_unregister(struct gendisk *); 1243extern void blk_integrity_unregister(struct gendisk *);
1215extern int blk_integrity_compare(struct gendisk *, struct gendisk *); 1244extern int blk_integrity_compare(struct gendisk *, struct gendisk *);
1216extern int blk_rq_map_integrity_sg(struct request *, struct scatterlist *); 1245extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *,
1217extern int blk_rq_count_integrity_sg(struct request *); 1246 struct scatterlist *);
1247extern int blk_rq_count_integrity_sg(struct request_queue *, struct bio *);
1248extern int blk_integrity_merge_rq(struct request_queue *, struct request *,
1249 struct request *);
1250extern int blk_integrity_merge_bio(struct request_queue *, struct request *,
1251 struct bio *);
1218 1252
1219static inline 1253static inline
1220struct blk_integrity *bdev_get_integrity(struct block_device *bdev) 1254struct blk_integrity *bdev_get_integrity(struct block_device *bdev)
@@ -1235,16 +1269,32 @@ static inline int blk_integrity_rq(struct request *rq)
1235 return bio_integrity(rq->bio); 1269 return bio_integrity(rq->bio);
1236} 1270}
1237 1271
1272static inline void blk_queue_max_integrity_segments(struct request_queue *q,
1273 unsigned int segs)
1274{
1275 q->limits.max_integrity_segments = segs;
1276}
1277
1278static inline unsigned short
1279queue_max_integrity_segments(struct request_queue *q)
1280{
1281 return q->limits.max_integrity_segments;
1282}
1283
1238#else /* CONFIG_BLK_DEV_INTEGRITY */ 1284#else /* CONFIG_BLK_DEV_INTEGRITY */
1239 1285
1240#define blk_integrity_rq(rq) (0) 1286#define blk_integrity_rq(rq) (0)
1241#define blk_rq_count_integrity_sg(a) (0) 1287#define blk_rq_count_integrity_sg(a, b) (0)
1242#define blk_rq_map_integrity_sg(a, b) (0) 1288#define blk_rq_map_integrity_sg(a, b, c) (0)
1243#define bdev_get_integrity(a) (0) 1289#define bdev_get_integrity(a) (0)
1244#define blk_get_integrity(a) (0) 1290#define blk_get_integrity(a) (0)
1245#define blk_integrity_compare(a, b) (0) 1291#define blk_integrity_compare(a, b) (0)
1246#define blk_integrity_register(a, b) (0) 1292#define blk_integrity_register(a, b) (0)
1247#define blk_integrity_unregister(a) do { } while (0); 1293#define blk_integrity_unregister(a) do { } while (0);
1294#define blk_queue_max_integrity_segments(a, b) do { } while (0);
1295#define queue_max_integrity_segments(a) (0)
1296#define blk_integrity_merge_rq(a, b, c) (0)
1297#define blk_integrity_merge_bio(a, b, c) (0)
1248 1298
1249#endif /* CONFIG_BLK_DEV_INTEGRITY */ 1299#endif /* CONFIG_BLK_DEV_INTEGRITY */
1250 1300
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index 4fd978e7eb83..80a0ece8f7e4 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -122,6 +122,8 @@ extern void elv_completed_request(struct request_queue *, struct request *);
122extern int elv_set_request(struct request_queue *, struct request *, gfp_t); 122extern int elv_set_request(struct request_queue *, struct request *, gfp_t);
123extern void elv_put_request(struct request_queue *, struct request *); 123extern void elv_put_request(struct request_queue *, struct request *);
124extern void elv_drain_elevator(struct request_queue *); 124extern void elv_drain_elevator(struct request_queue *);
125extern void elv_quiesce_start(struct request_queue *);
126extern void elv_quiesce_end(struct request_queue *);
125 127
126/* 128/*
127 * io scheduler registration 129 * io scheduler registration
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index af3f06b41dc1..557c3927e70f 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -12,6 +12,7 @@
12#include <linux/types.h> 12#include <linux/types.h>
13#include <linux/kdev_t.h> 13#include <linux/kdev_t.h>
14#include <linux/rcupdate.h> 14#include <linux/rcupdate.h>
15#include <linux/slab.h>
15 16
16#ifdef CONFIG_BLOCK 17#ifdef CONFIG_BLOCK
17 18
@@ -86,7 +87,15 @@ struct disk_stats {
86 unsigned long io_ticks; 87 unsigned long io_ticks;
87 unsigned long time_in_queue; 88 unsigned long time_in_queue;
88}; 89};
89 90
91#define PARTITION_META_INFO_VOLNAMELTH 64
92#define PARTITION_META_INFO_UUIDLTH 16
93
94struct partition_meta_info {
95 u8 uuid[PARTITION_META_INFO_UUIDLTH]; /* always big endian */
96 u8 volname[PARTITION_META_INFO_VOLNAMELTH];
97};
98
90struct hd_struct { 99struct hd_struct {
91 sector_t start_sect; 100 sector_t start_sect;
92 sector_t nr_sects; 101 sector_t nr_sects;
@@ -95,6 +104,7 @@ struct hd_struct {
95 struct device __dev; 104 struct device __dev;
96 struct kobject *holder_dir; 105 struct kobject *holder_dir;
97 int policy, partno; 106 int policy, partno;
107 struct partition_meta_info *info;
98#ifdef CONFIG_FAIL_MAKE_REQUEST 108#ifdef CONFIG_FAIL_MAKE_REQUEST
99 int make_it_fail; 109 int make_it_fail;
100#endif 110#endif
@@ -130,6 +140,7 @@ struct disk_part_tbl {
130 struct rcu_head rcu_head; 140 struct rcu_head rcu_head;
131 int len; 141 int len;
132 struct hd_struct __rcu *last_lookup; 142 struct hd_struct __rcu *last_lookup;
143 struct gendisk *disk;
133 struct hd_struct __rcu *part[]; 144 struct hd_struct __rcu *part[];
134}; 145};
135 146
@@ -181,6 +192,30 @@ static inline struct gendisk *part_to_disk(struct hd_struct *part)
181 return NULL; 192 return NULL;
182} 193}
183 194
195static inline void part_pack_uuid(const u8 *uuid_str, u8 *to)
196{
197 int i;
198 for (i = 0; i < 16; ++i) {
199 *to++ = (hex_to_bin(*uuid_str) << 4) |
200 (hex_to_bin(*(uuid_str + 1)));
201 uuid_str += 2;
202 switch (i) {
203 case 3:
204 case 5:
205 case 7:
206 case 9:
207 uuid_str++;
208 continue;
209 }
210 }
211}
212
213static inline char *part_unpack_uuid(const u8 *uuid, char *out)
214{
215 sprintf(out, "%pU", uuid);
216 return out;
217}
218
184static inline int disk_max_parts(struct gendisk *disk) 219static inline int disk_max_parts(struct gendisk *disk)
185{ 220{
186 if (disk->flags & GENHD_FL_EXT_DEVT) 221 if (disk->flags & GENHD_FL_EXT_DEVT)
@@ -342,6 +377,19 @@ static inline int part_in_flight(struct hd_struct *part)
342 return part->in_flight[0] + part->in_flight[1]; 377 return part->in_flight[0] + part->in_flight[1];
343} 378}
344 379
380static inline struct partition_meta_info *alloc_part_info(struct gendisk *disk)
381{
382 if (disk)
383 return kzalloc_node(sizeof(struct partition_meta_info),
384 GFP_KERNEL, disk->node_id);
385 return kzalloc(sizeof(struct partition_meta_info), GFP_KERNEL);
386}
387
388static inline void free_part_info(struct hd_struct *part)
389{
390 kfree(part->info);
391}
392
345/* block/blk-core.c */ 393/* block/blk-core.c */
346extern void part_round_stats(int cpu, struct hd_struct *part); 394extern void part_round_stats(int cpu, struct hd_struct *part);
347 395
@@ -533,7 +581,9 @@ extern int disk_expand_part_tbl(struct gendisk *disk, int target);
533extern int rescan_partitions(struct gendisk *disk, struct block_device *bdev); 581extern int rescan_partitions(struct gendisk *disk, struct block_device *bdev);
534extern struct hd_struct * __must_check add_partition(struct gendisk *disk, 582extern struct hd_struct * __must_check add_partition(struct gendisk *disk,
535 int partno, sector_t start, 583 int partno, sector_t start,
536 sector_t len, int flags); 584 sector_t len, int flags,
585 struct partition_meta_info
586 *info);
537extern void delete_partition(struct gendisk *, int); 587extern void delete_partition(struct gendisk *, int);
538extern void printk_all_partitions(void); 588extern void printk_all_partitions(void);
539 589
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 1759ba5adce8..edef168a0406 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -652,6 +652,16 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
652 _max1 > _max2 ? _max1 : _max2; }) 652 _max1 > _max2 ? _max1 : _max2; })
653 653
654/** 654/**
655 * min_not_zero - return the minimum that is _not_ zero, unless both are zero
656 * @x: value1
657 * @y: value2
658 */
659#define min_not_zero(x, y) ({ \
660 typeof(x) __x = (x); \
661 typeof(y) __y = (y); \
662 __x == 0 ? __y : ((__y == 0) ? __x : min(__x, __y)); })
663
664/**
655 * clamp - return a value clamped to a given range with strict typechecking 665 * clamp - return a value clamped to a given range with strict typechecking
656 * @val: current value 666 * @val: current value
657 * @min: minimum allowable value 667 * @min: minimum allowable value
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 0383601a927c..56154bbb8da9 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -336,6 +336,9 @@ extern unsigned long sysctl_hung_task_warnings;
336extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write, 336extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write,
337 void __user *buffer, 337 void __user *buffer,
338 size_t *lenp, loff_t *ppos); 338 size_t *lenp, loff_t *ppos);
339#else
340/* Avoid need for ifdefs elsewhere in the code */
341enum { sysctl_hung_task_timeout_secs = 0 };
339#endif 342#endif
340 343
341/* Attach to any functions which should be ignored in wchan output. */ 344/* Attach to any functions which should be ignored in wchan output. */