aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/blk-barrier.c3
-rw-r--r--block/blk-core.c5
-rw-r--r--block/blk-settings.c20
-rw-r--r--block/blk-tag.c8
-rw-r--r--block/bsg.c2
-rw-r--r--block/elevator.c5
6 files changed, 20 insertions, 23 deletions
diff --git a/block/blk-barrier.c b/block/blk-barrier.c
index 66e55288178c..a09ead19f9c5 100644
--- a/block/blk-barrier.c
+++ b/block/blk-barrier.c
@@ -26,8 +26,7 @@ int blk_queue_ordered(struct request_queue *q, unsigned ordered,
26{ 26{
27 if (ordered & (QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH) && 27 if (ordered & (QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH) &&
28 prepare_flush_fn == NULL) { 28 prepare_flush_fn == NULL) {
29 printk(KERN_ERR "%s: prepare_flush_fn required\n", 29 printk(KERN_ERR "%s: prepare_flush_fn required\n", __func__);
30 __FUNCTION__);
31 return -EINVAL; 30 return -EINVAL;
32 } 31 }
33 32
diff --git a/block/blk-core.c b/block/blk-core.c
index 5d09f8c56024..b754a4a2f9bd 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -136,7 +136,7 @@ static void req_bio_endio(struct request *rq, struct bio *bio,
136 136
137 if (unlikely(nbytes > bio->bi_size)) { 137 if (unlikely(nbytes > bio->bi_size)) {
138 printk(KERN_ERR "%s: want %u bytes done, %u left\n", 138 printk(KERN_ERR "%s: want %u bytes done, %u left\n",
139 __FUNCTION__, nbytes, bio->bi_size); 139 __func__, nbytes, bio->bi_size);
140 nbytes = bio->bi_size; 140 nbytes = bio->bi_size;
141 } 141 }
142 142
@@ -1566,8 +1566,7 @@ static int __end_that_request_first(struct request *req, int error,
1566 if (unlikely(bio->bi_idx >= bio->bi_vcnt)) { 1566 if (unlikely(bio->bi_idx >= bio->bi_vcnt)) {
1567 blk_dump_rq_flags(req, "__end_that"); 1567 blk_dump_rq_flags(req, "__end_that");
1568 printk(KERN_ERR "%s: bio idx %d >= vcnt %d\n", 1568 printk(KERN_ERR "%s: bio idx %d >= vcnt %d\n",
1569 __FUNCTION__, bio->bi_idx, 1569 __func__, bio->bi_idx, bio->bi_vcnt);
1570 bio->bi_vcnt);
1571 break; 1570 break;
1572 } 1571 }
1573 1572
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 6089384ab064..bb93d4c32775 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -168,8 +168,8 @@ void blk_queue_max_sectors(struct request_queue *q, unsigned int max_sectors)
168{ 168{
169 if ((max_sectors << 9) < PAGE_CACHE_SIZE) { 169 if ((max_sectors << 9) < PAGE_CACHE_SIZE) {
170 max_sectors = 1 << (PAGE_CACHE_SHIFT - 9); 170 max_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
171 printk(KERN_INFO "%s: set to minimum %d\n", __FUNCTION__, 171 printk(KERN_INFO "%s: set to minimum %d\n",
172 max_sectors); 172 __func__, max_sectors);
173 } 173 }
174 174
175 if (BLK_DEF_MAX_SECTORS > max_sectors) 175 if (BLK_DEF_MAX_SECTORS > max_sectors)
@@ -196,8 +196,8 @@ void blk_queue_max_phys_segments(struct request_queue *q,
196{ 196{
197 if (!max_segments) { 197 if (!max_segments) {
198 max_segments = 1; 198 max_segments = 1;
199 printk(KERN_INFO "%s: set to minimum %d\n", __FUNCTION__, 199 printk(KERN_INFO "%s: set to minimum %d\n",
200 max_segments); 200 __func__, max_segments);
201 } 201 }
202 202
203 q->max_phys_segments = max_segments; 203 q->max_phys_segments = max_segments;
@@ -220,8 +220,8 @@ void blk_queue_max_hw_segments(struct request_queue *q,
220{ 220{
221 if (!max_segments) { 221 if (!max_segments) {
222 max_segments = 1; 222 max_segments = 1;
223 printk(KERN_INFO "%s: set to minimum %d\n", __FUNCTION__, 223 printk(KERN_INFO "%s: set to minimum %d\n",
224 max_segments); 224 __func__, max_segments);
225 } 225 }
226 226
227 q->max_hw_segments = max_segments; 227 q->max_hw_segments = max_segments;
@@ -241,8 +241,8 @@ void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
241{ 241{
242 if (max_size < PAGE_CACHE_SIZE) { 242 if (max_size < PAGE_CACHE_SIZE) {
243 max_size = PAGE_CACHE_SIZE; 243 max_size = PAGE_CACHE_SIZE;
244 printk(KERN_INFO "%s: set to minimum %d\n", __FUNCTION__, 244 printk(KERN_INFO "%s: set to minimum %d\n",
245 max_size); 245 __func__, max_size);
246 } 246 }
247 247
248 q->max_segment_size = max_size; 248 q->max_segment_size = max_size;
@@ -357,8 +357,8 @@ void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
357{ 357{
358 if (mask < PAGE_CACHE_SIZE - 1) { 358 if (mask < PAGE_CACHE_SIZE - 1) {
359 mask = PAGE_CACHE_SIZE - 1; 359 mask = PAGE_CACHE_SIZE - 1;
360 printk(KERN_INFO "%s: set to minimum %lx\n", __FUNCTION__, 360 printk(KERN_INFO "%s: set to minimum %lx\n",
361 mask); 361 __func__, mask);
362 } 362 }
363 363
364 q->seg_boundary_mask = mask; 364 q->seg_boundary_mask = mask;
diff --git a/block/blk-tag.c b/block/blk-tag.c
index e176ddbe599e..de64e0429977 100644
--- a/block/blk-tag.c
+++ b/block/blk-tag.c
@@ -112,7 +112,7 @@ init_tag_map(struct request_queue *q, struct blk_queue_tag *tags, int depth)
112 if (q && depth > q->nr_requests * 2) { 112 if (q && depth > q->nr_requests * 2) {
113 depth = q->nr_requests * 2; 113 depth = q->nr_requests * 2;
114 printk(KERN_ERR "%s: adjusted depth to %d\n", 114 printk(KERN_ERR "%s: adjusted depth to %d\n",
115 __FUNCTION__, depth); 115 __func__, depth);
116 } 116 }
117 117
118 tag_index = kzalloc(depth * sizeof(struct request *), GFP_ATOMIC); 118 tag_index = kzalloc(depth * sizeof(struct request *), GFP_ATOMIC);
@@ -296,13 +296,13 @@ void blk_queue_end_tag(struct request_queue *q, struct request *rq)
296 296
297 if (unlikely(bqt->tag_index[tag] == NULL)) 297 if (unlikely(bqt->tag_index[tag] == NULL))
298 printk(KERN_ERR "%s: tag %d is missing\n", 298 printk(KERN_ERR "%s: tag %d is missing\n",
299 __FUNCTION__, tag); 299 __func__, tag);
300 300
301 bqt->tag_index[tag] = NULL; 301 bqt->tag_index[tag] = NULL;
302 302
303 if (unlikely(!test_bit(tag, bqt->tag_map))) { 303 if (unlikely(!test_bit(tag, bqt->tag_map))) {
304 printk(KERN_ERR "%s: attempt to clear non-busy tag (%d)\n", 304 printk(KERN_ERR "%s: attempt to clear non-busy tag (%d)\n",
305 __FUNCTION__, tag); 305 __func__, tag);
306 return; 306 return;
307 } 307 }
308 /* 308 /*
@@ -340,7 +340,7 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq)
340 if (unlikely((rq->cmd_flags & REQ_QUEUED))) { 340 if (unlikely((rq->cmd_flags & REQ_QUEUED))) {
341 printk(KERN_ERR 341 printk(KERN_ERR
342 "%s: request %p for device [%s] already tagged %d", 342 "%s: request %p for device [%s] already tagged %d",
343 __FUNCTION__, rq, 343 __func__, rq,
344 rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag); 344 rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag);
345 BUG(); 345 BUG();
346 } 346 }
diff --git a/block/bsg.c b/block/bsg.c
index 23ea4fd1a66d..fa796b605f55 100644
--- a/block/bsg.c
+++ b/block/bsg.c
@@ -57,7 +57,7 @@ enum {
57#undef BSG_DEBUG 57#undef BSG_DEBUG
58 58
59#ifdef BSG_DEBUG 59#ifdef BSG_DEBUG
60#define dprintk(fmt, args...) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ##args) 60#define dprintk(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ##args)
61#else 61#else
62#define dprintk(fmt, args...) 62#define dprintk(fmt, args...)
63#endif 63#endif
diff --git a/block/elevator.c b/block/elevator.c
index ac5310ef8270..980f8ae147b4 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -650,7 +650,7 @@ void elv_insert(struct request_queue *q, struct request *rq, int where)
650 650
651 default: 651 default:
652 printk(KERN_ERR "%s: bad insertion point %d\n", 652 printk(KERN_ERR "%s: bad insertion point %d\n",
653 __FUNCTION__, where); 653 __func__, where);
654 BUG(); 654 BUG();
655 } 655 }
656 656
@@ -808,8 +808,7 @@ struct request *elv_next_request(struct request_queue *q)
808 rq->cmd_flags |= REQ_QUIET; 808 rq->cmd_flags |= REQ_QUIET;
809 end_queued_request(rq, 0); 809 end_queued_request(rq, 0);
810 } else { 810 } else {
811 printk(KERN_ERR "%s: bad return=%d\n", __FUNCTION__, 811 printk(KERN_ERR "%s: bad return=%d\n", __func__, ret);
812 ret);
813 break; 812 break;
814 } 813 }
815 } 814 }