aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--block/blktrace.c2
-rw-r--r--block/cfq-iosched.c2
-rw-r--r--drivers/block/cciss.c86
-rw-r--r--drivers/cpufreq/cpufreq.c40
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c2
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c4
-rw-r--r--drivers/cpufreq/cpufreq_userspace.c3
-rw-r--r--include/linux/cpufreq.h3
8 files changed, 70 insertions, 72 deletions
diff --git a/block/blktrace.c b/block/blktrace.c
index b8c0702777ff..265f7a830619 100644
--- a/block/blktrace.c
+++ b/block/blktrace.c
@@ -80,7 +80,7 @@ static u32 bio_act[5] __read_mostly = { 0, BLK_TC_ACT(BLK_TC_BARRIER), BLK_TC_AC
80#define trace_sync_bit(rw) \ 80#define trace_sync_bit(rw) \
81 (((rw) & (1 << BIO_RW_SYNC)) >> (BIO_RW_SYNC - 1)) 81 (((rw) & (1 << BIO_RW_SYNC)) >> (BIO_RW_SYNC - 1))
82#define trace_ahead_bit(rw) \ 82#define trace_ahead_bit(rw) \
83 (((rw) & (1 << BIO_RW_AHEAD)) << (BIO_RW_AHEAD - 0)) 83 (((rw) & (1 << BIO_RW_AHEAD)) << (2 - BIO_RW_AHEAD))
84 84
85/* 85/*
86 * The worker for the various blk_add_trace*() types. Fills out a 86 * The worker for the various blk_add_trace*() types. Fills out a
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 102ebc2c5c34..aae3123bf3ee 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -936,7 +936,7 @@ static int cfq_arm_slice_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
936 * seeks. so allow a little bit of time for him to submit a new rq 936 * seeks. so allow a little bit of time for him to submit a new rq
937 */ 937 */
938 if (sample_valid(cic->seek_samples) && CIC_SEEKY(cic)) 938 if (sample_valid(cic->seek_samples) && CIC_SEEKY(cic))
939 sl = 2; 939 sl = min(sl, msecs_to_jiffies(2));
940 940
941 mod_timer(&cfqd->idle_slice_timer, jiffies + sl); 941 mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
942 return 1; 942 return 1;
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 1c4df22dfd2a..7b0eca703a67 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -1233,6 +1233,50 @@ static inline void complete_buffers(struct bio *bio, int status)
1233 } 1233 }
1234} 1234}
1235 1235
1236static void cciss_check_queues(ctlr_info_t *h)
1237{
1238 int start_queue = h->next_to_run;
1239 int i;
1240
1241 /* check to see if we have maxed out the number of commands that can
1242 * be placed on the queue. If so then exit. We do this check here
1243 * in case the interrupt we serviced was from an ioctl and did not
1244 * free any new commands.
1245 */
1246 if ((find_first_zero_bit(h->cmd_pool_bits, NR_CMDS)) == NR_CMDS)
1247 return;
1248
1249 /* We have room on the queue for more commands. Now we need to queue
1250 * them up. We will also keep track of the next queue to run so
1251 * that every queue gets a chance to be started first.
1252 */
1253 for (i = 0; i < h->highest_lun + 1; i++) {
1254 int curr_queue = (start_queue + i) % (h->highest_lun + 1);
1255 /* make sure the disk has been added and the drive is real
1256 * because this can be called from the middle of init_one.
1257 */
1258 if (!(h->drv[curr_queue].queue) || !(h->drv[curr_queue].heads))
1259 continue;
1260 blk_start_queue(h->gendisk[curr_queue]->queue);
1261
1262 /* check to see if we have maxed out the number of commands
1263 * that can be placed on the queue.
1264 */
1265 if ((find_first_zero_bit(h->cmd_pool_bits, NR_CMDS)) == NR_CMDS) {
1266 if (curr_queue == start_queue) {
1267 h->next_to_run =
1268 (start_queue + 1) % (h->highest_lun + 1);
1269 break;
1270 } else {
1271 h->next_to_run = curr_queue;
1272 break;
1273 }
1274 } else {
1275 curr_queue = (curr_queue + 1) % (h->highest_lun + 1);
1276 }
1277 }
1278}
1279
1236static void cciss_softirq_done(struct request *rq) 1280static void cciss_softirq_done(struct request *rq)
1237{ 1281{
1238 CommandList_struct *cmd = rq->completion_data; 1282 CommandList_struct *cmd = rq->completion_data;
@@ -1264,6 +1308,7 @@ static void cciss_softirq_done(struct request *rq)
1264 spin_lock_irqsave(&h->lock, flags); 1308 spin_lock_irqsave(&h->lock, flags);
1265 end_that_request_last(rq, rq->errors); 1309 end_that_request_last(rq, rq->errors);
1266 cmd_free(h, cmd, 1); 1310 cmd_free(h, cmd, 1);
1311 cciss_check_queues(h);
1267 spin_unlock_irqrestore(&h->lock, flags); 1312 spin_unlock_irqrestore(&h->lock, flags);
1268} 1313}
1269 1314
@@ -2528,8 +2573,6 @@ static irqreturn_t do_cciss_intr(int irq, void *dev_id, struct pt_regs *regs)
2528 CommandList_struct *c; 2573 CommandList_struct *c;
2529 unsigned long flags; 2574 unsigned long flags;
2530 __u32 a, a1, a2; 2575 __u32 a, a1, a2;
2531 int j;
2532 int start_queue = h->next_to_run;
2533 2576
2534 if (interrupt_not_for_us(h)) 2577 if (interrupt_not_for_us(h))
2535 return IRQ_NONE; 2578 return IRQ_NONE;
@@ -2588,45 +2631,6 @@ static irqreturn_t do_cciss_intr(int irq, void *dev_id, struct pt_regs *regs)
2588 } 2631 }
2589 } 2632 }
2590 2633
2591 /* check to see if we have maxed out the number of commands that can
2592 * be placed on the queue. If so then exit. We do this check here
2593 * in case the interrupt we serviced was from an ioctl and did not
2594 * free any new commands.
2595 */
2596 if ((find_first_zero_bit(h->cmd_pool_bits, NR_CMDS)) == NR_CMDS)
2597 goto cleanup;
2598
2599 /* We have room on the queue for more commands. Now we need to queue
2600 * them up. We will also keep track of the next queue to run so
2601 * that every queue gets a chance to be started first.
2602 */
2603 for (j = 0; j < h->highest_lun + 1; j++) {
2604 int curr_queue = (start_queue + j) % (h->highest_lun + 1);
2605 /* make sure the disk has been added and the drive is real
2606 * because this can be called from the middle of init_one.
2607 */
2608 if (!(h->drv[curr_queue].queue) || !(h->drv[curr_queue].heads))
2609 continue;
2610 blk_start_queue(h->gendisk[curr_queue]->queue);
2611
2612 /* check to see if we have maxed out the number of commands
2613 * that can be placed on the queue.
2614 */
2615 if ((find_first_zero_bit(h->cmd_pool_bits, NR_CMDS)) == NR_CMDS) {
2616 if (curr_queue == start_queue) {
2617 h->next_to_run =
2618 (start_queue + 1) % (h->highest_lun + 1);
2619 goto cleanup;
2620 } else {
2621 h->next_to_run = curr_queue;
2622 goto cleanup;
2623 }
2624 } else {
2625 curr_queue = (curr_queue + 1) % (h->highest_lun + 1);
2626 }
2627 }
2628
2629 cleanup:
2630 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); 2634 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
2631 return IRQ_HANDLED; 2635 return IRQ_HANDLED;
2632} 2636}
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 8d328186f774..bc1088d9b379 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -364,10 +364,12 @@ static ssize_t store_##file_name \
364 if (ret != 1) \ 364 if (ret != 1) \
365 return -EINVAL; \ 365 return -EINVAL; \
366 \ 366 \
367 lock_cpu_hotplug(); \
367 mutex_lock(&policy->lock); \ 368 mutex_lock(&policy->lock); \
368 ret = __cpufreq_set_policy(policy, &new_policy); \ 369 ret = __cpufreq_set_policy(policy, &new_policy); \
369 policy->user_policy.object = policy->object; \ 370 policy->user_policy.object = policy->object; \
370 mutex_unlock(&policy->lock); \ 371 mutex_unlock(&policy->lock); \
372 unlock_cpu_hotplug(); \
371 \ 373 \
372 return ret ? ret : count; \ 374 return ret ? ret : count; \
373} 375}
@@ -1197,20 +1199,18 @@ EXPORT_SYMBOL(cpufreq_unregister_notifier);
1197 *********************************************************************/ 1199 *********************************************************************/
1198 1200
1199 1201
1202/* Must be called with lock_cpu_hotplug held */
1200int __cpufreq_driver_target(struct cpufreq_policy *policy, 1203int __cpufreq_driver_target(struct cpufreq_policy *policy,
1201 unsigned int target_freq, 1204 unsigned int target_freq,
1202 unsigned int relation) 1205 unsigned int relation)
1203{ 1206{
1204 int retval = -EINVAL; 1207 int retval = -EINVAL;
1205 1208
1206 lock_cpu_hotplug();
1207 dprintk("target for CPU %u: %u kHz, relation %u\n", policy->cpu, 1209 dprintk("target for CPU %u: %u kHz, relation %u\n", policy->cpu,
1208 target_freq, relation); 1210 target_freq, relation);
1209 if (cpu_online(policy->cpu) && cpufreq_driver->target) 1211 if (cpu_online(policy->cpu) && cpufreq_driver->target)
1210 retval = cpufreq_driver->target(policy, target_freq, relation); 1212 retval = cpufreq_driver->target(policy, target_freq, relation);
1211 1213
1212 unlock_cpu_hotplug();
1213
1214 return retval; 1214 return retval;
1215} 1215}
1216EXPORT_SYMBOL_GPL(__cpufreq_driver_target); 1216EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
@@ -1225,17 +1225,23 @@ int cpufreq_driver_target(struct cpufreq_policy *policy,
1225 if (!policy) 1225 if (!policy)
1226 return -EINVAL; 1226 return -EINVAL;
1227 1227
1228 lock_cpu_hotplug();
1228 mutex_lock(&policy->lock); 1229 mutex_lock(&policy->lock);
1229 1230
1230 ret = __cpufreq_driver_target(policy, target_freq, relation); 1231 ret = __cpufreq_driver_target(policy, target_freq, relation);
1231 1232
1232 mutex_unlock(&policy->lock); 1233 mutex_unlock(&policy->lock);
1234 unlock_cpu_hotplug();
1233 1235
1234 cpufreq_cpu_put(policy); 1236 cpufreq_cpu_put(policy);
1235 return ret; 1237 return ret;
1236} 1238}
1237EXPORT_SYMBOL_GPL(cpufreq_driver_target); 1239EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1238 1240
1241/*
1242 * Locking: Must be called with the lock_cpu_hotplug() lock held
1243 * when "event" is CPUFREQ_GOV_LIMITS
1244 */
1239 1245
1240static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event) 1246static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event)
1241{ 1247{
@@ -1257,24 +1263,6 @@ static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event)
1257} 1263}
1258 1264
1259 1265
1260int cpufreq_governor(unsigned int cpu, unsigned int event)
1261{
1262 int ret = 0;
1263 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1264
1265 if (!policy)
1266 return -EINVAL;
1267
1268 mutex_lock(&policy->lock);
1269 ret = __cpufreq_governor(policy, event);
1270 mutex_unlock(&policy->lock);
1271
1272 cpufreq_cpu_put(policy);
1273 return ret;
1274}
1275EXPORT_SYMBOL_GPL(cpufreq_governor);
1276
1277
1278int cpufreq_register_governor(struct cpufreq_governor *governor) 1266int cpufreq_register_governor(struct cpufreq_governor *governor)
1279{ 1267{
1280 struct cpufreq_governor *t; 1268 struct cpufreq_governor *t;
@@ -1342,6 +1330,9 @@ int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
1342EXPORT_SYMBOL(cpufreq_get_policy); 1330EXPORT_SYMBOL(cpufreq_get_policy);
1343 1331
1344 1332
1333/*
1334 * Locking: Must be called with the lock_cpu_hotplug() lock held
1335 */
1345static int __cpufreq_set_policy(struct cpufreq_policy *data, struct cpufreq_policy *policy) 1336static int __cpufreq_set_policy(struct cpufreq_policy *data, struct cpufreq_policy *policy)
1346{ 1337{
1347 int ret = 0; 1338 int ret = 0;
@@ -1436,6 +1427,8 @@ int cpufreq_set_policy(struct cpufreq_policy *policy)
1436 if (!data) 1427 if (!data)
1437 return -EINVAL; 1428 return -EINVAL;
1438 1429
1430 lock_cpu_hotplug();
1431
1439 /* lock this CPU */ 1432 /* lock this CPU */
1440 mutex_lock(&data->lock); 1433 mutex_lock(&data->lock);
1441 1434
@@ -1446,6 +1439,8 @@ int cpufreq_set_policy(struct cpufreq_policy *policy)
1446 data->user_policy.governor = data->governor; 1439 data->user_policy.governor = data->governor;
1447 1440
1448 mutex_unlock(&data->lock); 1441 mutex_unlock(&data->lock);
1442
1443 unlock_cpu_hotplug();
1449 cpufreq_cpu_put(data); 1444 cpufreq_cpu_put(data);
1450 1445
1451 return ret; 1446 return ret;
@@ -1469,6 +1464,7 @@ int cpufreq_update_policy(unsigned int cpu)
1469 if (!data) 1464 if (!data)
1470 return -ENODEV; 1465 return -ENODEV;
1471 1466
1467 lock_cpu_hotplug();
1472 mutex_lock(&data->lock); 1468 mutex_lock(&data->lock);
1473 1469
1474 dprintk("updating policy for CPU %u\n", cpu); 1470 dprintk("updating policy for CPU %u\n", cpu);
@@ -1494,7 +1490,7 @@ int cpufreq_update_policy(unsigned int cpu)
1494 ret = __cpufreq_set_policy(data, &policy); 1490 ret = __cpufreq_set_policy(data, &policy);
1495 1491
1496 mutex_unlock(&data->lock); 1492 mutex_unlock(&data->lock);
1497 1493 unlock_cpu_hotplug();
1498 cpufreq_cpu_put(data); 1494 cpufreq_cpu_put(data);
1499 return ret; 1495 return ret;
1500} 1496}
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index b3ebc8f01975..c4c578defabf 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -525,7 +525,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
525 break; 525 break;
526 526
527 case CPUFREQ_GOV_LIMITS: 527 case CPUFREQ_GOV_LIMITS:
528 lock_cpu_hotplug();
529 mutex_lock(&dbs_mutex); 528 mutex_lock(&dbs_mutex);
530 if (policy->max < this_dbs_info->cur_policy->cur) 529 if (policy->max < this_dbs_info->cur_policy->cur)
531 __cpufreq_driver_target( 530 __cpufreq_driver_target(
@@ -536,7 +535,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
536 this_dbs_info->cur_policy, 535 this_dbs_info->cur_policy,
537 policy->min, CPUFREQ_RELATION_L); 536 policy->min, CPUFREQ_RELATION_L);
538 mutex_unlock(&dbs_mutex); 537 mutex_unlock(&dbs_mutex);
539 unlock_cpu_hotplug();
540 break; 538 break;
541 } 539 }
542 return 0; 540 return 0;
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 178f0c547eb7..52cf1f021825 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -309,7 +309,9 @@ static void do_dbs_timer(void *data)
309 if (!dbs_info->enable) 309 if (!dbs_info->enable)
310 return; 310 return;
311 311
312 lock_cpu_hotplug();
312 dbs_check_cpu(dbs_info); 313 dbs_check_cpu(dbs_info);
314 unlock_cpu_hotplug();
313 queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, 315 queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work,
314 usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); 316 usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
315} 317}
@@ -412,7 +414,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
412 break; 414 break;
413 415
414 case CPUFREQ_GOV_LIMITS: 416 case CPUFREQ_GOV_LIMITS:
415 lock_cpu_hotplug();
416 mutex_lock(&dbs_mutex); 417 mutex_lock(&dbs_mutex);
417 if (policy->max < this_dbs_info->cur_policy->cur) 418 if (policy->max < this_dbs_info->cur_policy->cur)
418 __cpufreq_driver_target(this_dbs_info->cur_policy, 419 __cpufreq_driver_target(this_dbs_info->cur_policy,
@@ -423,7 +424,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
423 policy->min, 424 policy->min,
424 CPUFREQ_RELATION_L); 425 CPUFREQ_RELATION_L);
425 mutex_unlock(&dbs_mutex); 426 mutex_unlock(&dbs_mutex);
426 unlock_cpu_hotplug();
427 break; 427 break;
428 } 428 }
429 return 0; 429 return 0;
diff --git a/drivers/cpufreq/cpufreq_userspace.c b/drivers/cpufreq/cpufreq_userspace.c
index 44ae5e5b94cf..a06c204589cd 100644
--- a/drivers/cpufreq/cpufreq_userspace.c
+++ b/drivers/cpufreq/cpufreq_userspace.c
@@ -18,6 +18,7 @@
18#include <linux/spinlock.h> 18#include <linux/spinlock.h>
19#include <linux/interrupt.h> 19#include <linux/interrupt.h>
20#include <linux/cpufreq.h> 20#include <linux/cpufreq.h>
21#include <linux/cpu.h>
21#include <linux/types.h> 22#include <linux/types.h>
22#include <linux/fs.h> 23#include <linux/fs.h>
23#include <linux/sysfs.h> 24#include <linux/sysfs.h>
@@ -70,6 +71,7 @@ static int cpufreq_set(unsigned int freq, struct cpufreq_policy *policy)
70 71
71 dprintk("cpufreq_set for cpu %u, freq %u kHz\n", policy->cpu, freq); 72 dprintk("cpufreq_set for cpu %u, freq %u kHz\n", policy->cpu, freq);
72 73
74 lock_cpu_hotplug();
73 mutex_lock(&userspace_mutex); 75 mutex_lock(&userspace_mutex);
74 if (!cpu_is_managed[policy->cpu]) 76 if (!cpu_is_managed[policy->cpu])
75 goto err; 77 goto err;
@@ -92,6 +94,7 @@ static int cpufreq_set(unsigned int freq, struct cpufreq_policy *policy)
92 94
93 err: 95 err:
94 mutex_unlock(&userspace_mutex); 96 mutex_unlock(&userspace_mutex);
97 unlock_cpu_hotplug();
95 return ret; 98 return ret;
96} 99}
97 100
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 35e137636b0b..4ea39fee99c7 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -172,9 +172,6 @@ extern int __cpufreq_driver_target(struct cpufreq_policy *policy,
172 unsigned int relation); 172 unsigned int relation);
173 173
174 174
175/* pass an event to the cpufreq governor */
176int cpufreq_governor(unsigned int cpu, unsigned int event);
177
178int cpufreq_register_governor(struct cpufreq_governor *governor); 175int cpufreq_register_governor(struct cpufreq_governor *governor);
179void cpufreq_unregister_governor(struct cpufreq_governor *governor); 176void cpufreq_unregister_governor(struct cpufreq_governor *governor);
180 177