aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/alpha/kernel/smp.c25
-rw-r--r--block/elevator.c70
-rw-r--r--block/ll_rw_blk.c4
-rw-r--r--drivers/macintosh/Kconfig2
-rw-r--r--include/linux/elevator.h1
-rw-r--r--mm/slob.c2
6 files changed, 47 insertions, 57 deletions
diff --git a/arch/alpha/kernel/smp.c b/arch/alpha/kernel/smp.c
index 4b873527ce1c..02c2db08114a 100644
--- a/arch/alpha/kernel/smp.c
+++ b/arch/alpha/kernel/smp.c
@@ -73,9 +73,6 @@ cpumask_t cpu_online_map;
73 73
74EXPORT_SYMBOL(cpu_online_map); 74EXPORT_SYMBOL(cpu_online_map);
75 75
76/* cpus reported in the hwrpb */
77static unsigned long hwrpb_cpu_present_mask __initdata = 0;
78
79int smp_num_probed; /* Internal processor count */ 76int smp_num_probed; /* Internal processor count */
80int smp_num_cpus = 1; /* Number that came online. */ 77int smp_num_cpus = 1; /* Number that came online. */
81 78
@@ -442,7 +439,7 @@ setup_smp(void)
442 if ((cpu->flags & 0x1cc) == 0x1cc) { 439 if ((cpu->flags & 0x1cc) == 0x1cc) {
443 smp_num_probed++; 440 smp_num_probed++;
444 /* Assume here that "whami" == index */ 441 /* Assume here that "whami" == index */
445 hwrpb_cpu_present_mask |= (1UL << i); 442 cpu_set(i, cpu_possible_map);
446 cpu->pal_revision = boot_cpu_palrev; 443 cpu->pal_revision = boot_cpu_palrev;
447 } 444 }
448 445
@@ -453,12 +450,12 @@ setup_smp(void)
453 } 450 }
454 } else { 451 } else {
455 smp_num_probed = 1; 452 smp_num_probed = 1;
456 hwrpb_cpu_present_mask = (1UL << boot_cpuid); 453 cpu_set(boot_cpuid, cpu_possible_map);
457 } 454 }
458 cpu_present_mask = cpumask_of_cpu(boot_cpuid); 455 cpu_present_mask = cpumask_of_cpu(boot_cpuid);
459 456
460 printk(KERN_INFO "SMP: %d CPUs probed -- cpu_present_mask = %lx\n", 457 printk(KERN_INFO "SMP: %d CPUs probed -- cpu_present_mask = %lx\n",
461 smp_num_probed, hwrpb_cpu_present_mask); 458 smp_num_probed, cpu_possible_map.bits[0]);
462} 459}
463 460
464/* 461/*
@@ -467,8 +464,6 @@ setup_smp(void)
467void __init 464void __init
468smp_prepare_cpus(unsigned int max_cpus) 465smp_prepare_cpus(unsigned int max_cpus)
469{ 466{
470 int cpu_count, i;
471
472 /* Take care of some initial bookkeeping. */ 467 /* Take care of some initial bookkeeping. */
473 memset(ipi_data, 0, sizeof(ipi_data)); 468 memset(ipi_data, 0, sizeof(ipi_data));
474 469
@@ -486,19 +481,7 @@ smp_prepare_cpus(unsigned int max_cpus)
486 481
487 printk(KERN_INFO "SMP starting up secondaries.\n"); 482 printk(KERN_INFO "SMP starting up secondaries.\n");
488 483
489 cpu_count = 1; 484 smp_num_cpus = smp_num_probed;
490 for (i = 0; (i < NR_CPUS) && (cpu_count < max_cpus); i++) {
491 if (i == boot_cpuid)
492 continue;
493
494 if (((hwrpb_cpu_present_mask >> i) & 1) == 0)
495 continue;
496
497 cpu_set(i, cpu_possible_map);
498 cpu_count++;
499 }
500
501 smp_num_cpus = cpu_count;
502} 485}
503 486
504void __devinit 487void __devinit
diff --git a/block/elevator.c b/block/elevator.c
index 2fc269f69726..24b702d649a9 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -293,7 +293,7 @@ void elv_requeue_request(request_queue_t *q, struct request *rq)
293 293
294 rq->flags &= ~REQ_STARTED; 294 rq->flags &= ~REQ_STARTED;
295 295
296 __elv_add_request(q, rq, ELEVATOR_INSERT_REQUEUE, 0); 296 elv_insert(q, rq, ELEVATOR_INSERT_REQUEUE);
297} 297}
298 298
299static void elv_drain_elevator(request_queue_t *q) 299static void elv_drain_elevator(request_queue_t *q)
@@ -310,41 +310,11 @@ static void elv_drain_elevator(request_queue_t *q)
310 } 310 }
311} 311}
312 312
313void __elv_add_request(request_queue_t *q, struct request *rq, int where, 313void elv_insert(request_queue_t *q, struct request *rq, int where)
314 int plug)
315{ 314{
316 struct list_head *pos; 315 struct list_head *pos;
317 unsigned ordseq; 316 unsigned ordseq;
318 317
319 if (q->ordcolor)
320 rq->flags |= REQ_ORDERED_COLOR;
321
322 if (rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) {
323 /*
324 * toggle ordered color
325 */
326 if (blk_barrier_rq(rq))
327 q->ordcolor ^= 1;
328
329 /*
330 * barriers implicitly indicate back insertion
331 */
332 if (where == ELEVATOR_INSERT_SORT)
333 where = ELEVATOR_INSERT_BACK;
334
335 /*
336 * this request is scheduling boundary, update end_sector
337 */
338 if (blk_fs_request(rq)) {
339 q->end_sector = rq_end_sector(rq);
340 q->boundary_rq = rq;
341 }
342 } else if (!(rq->flags & REQ_ELVPRIV) && where == ELEVATOR_INSERT_SORT)
343 where = ELEVATOR_INSERT_BACK;
344
345 if (plug)
346 blk_plug_device(q);
347
348 rq->q = q; 318 rq->q = q;
349 319
350 switch (where) { 320 switch (where) {
@@ -425,6 +395,42 @@ void __elv_add_request(request_queue_t *q, struct request *rq, int where,
425 } 395 }
426} 396}
427 397
398void __elv_add_request(request_queue_t *q, struct request *rq, int where,
399 int plug)
400{
401 if (q->ordcolor)
402 rq->flags |= REQ_ORDERED_COLOR;
403
404 if (rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) {
405 /*
406 * toggle ordered color
407 */
408 if (blk_barrier_rq(rq))
409 q->ordcolor ^= 1;
410
411 /*
412 * barriers implicitly indicate back insertion
413 */
414 if (where == ELEVATOR_INSERT_SORT)
415 where = ELEVATOR_INSERT_BACK;
416
417 /*
418 * this request is scheduling boundary, update
419 * end_sector
420 */
421 if (blk_fs_request(rq)) {
422 q->end_sector = rq_end_sector(rq);
423 q->boundary_rq = rq;
424 }
425 } else if (!(rq->flags & REQ_ELVPRIV) && where == ELEVATOR_INSERT_SORT)
426 where = ELEVATOR_INSERT_BACK;
427
428 if (plug)
429 blk_plug_device(q);
430
431 elv_insert(q, rq, where);
432}
433
428void elv_add_request(request_queue_t *q, struct request *rq, int where, 434void elv_add_request(request_queue_t *q, struct request *rq, int where,
429 int plug) 435 int plug)
430{ 436{
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index ee5ed98db4cd..03d9c82b0fe7 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -454,7 +454,7 @@ static void queue_flush(request_queue_t *q, unsigned which)
454 rq->end_io = end_io; 454 rq->end_io = end_io;
455 q->prepare_flush_fn(q, rq); 455 q->prepare_flush_fn(q, rq);
456 456
457 __elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 0); 457 elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
458} 458}
459 459
460static inline struct request *start_ordered(request_queue_t *q, 460static inline struct request *start_ordered(request_queue_t *q,
@@ -490,7 +490,7 @@ static inline struct request *start_ordered(request_queue_t *q,
490 else 490 else
491 q->ordseq |= QUEUE_ORDSEQ_POSTFLUSH; 491 q->ordseq |= QUEUE_ORDSEQ_POSTFLUSH;
492 492
493 __elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 0); 493 elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
494 494
495 if (q->ordered & QUEUE_ORDERED_PREFLUSH) { 495 if (q->ordered & QUEUE_ORDERED_PREFLUSH) {
496 queue_flush(q, QUEUE_ORDERED_PREFLUSH); 496 queue_flush(q, QUEUE_ORDERED_PREFLUSH);
diff --git a/drivers/macintosh/Kconfig b/drivers/macintosh/Kconfig
index b11cd31d8d27..12ad462737ba 100644
--- a/drivers/macintosh/Kconfig
+++ b/drivers/macintosh/Kconfig
@@ -190,7 +190,7 @@ config WINDFARM_PM91
190config WINDFARM_PM112 190config WINDFARM_PM112
191 tristate "Support for thermal management on PowerMac11,2" 191 tristate "Support for thermal management on PowerMac11,2"
192 depends on WINDFARM && I2C && PMAC_SMU 192 depends on WINDFARM && I2C && PMAC_SMU
193 select I2C_PMAC_SMU 193 select I2C_POWERMAC
194 help 194 help
195 This driver provides thermal control for the PowerMac11,2 195 This driver provides thermal control for the PowerMac11,2
196 which are the recent dual and quad G5 machines using the 196 which are the recent dual and quad G5 machines using the
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index 23fe746a1d51..18cf1f3e1184 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -82,6 +82,7 @@ struct elevator_queue
82extern void elv_dispatch_sort(request_queue_t *, struct request *); 82extern void elv_dispatch_sort(request_queue_t *, struct request *);
83extern void elv_add_request(request_queue_t *, struct request *, int, int); 83extern void elv_add_request(request_queue_t *, struct request *, int, int);
84extern void __elv_add_request(request_queue_t *, struct request *, int, int); 84extern void __elv_add_request(request_queue_t *, struct request *, int, int);
85extern void elv_insert(request_queue_t *, struct request *, int);
85extern int elv_merge(request_queue_t *, struct request **, struct bio *); 86extern int elv_merge(request_queue_t *, struct request **, struct bio *);
86extern void elv_merge_requests(request_queue_t *, struct request *, 87extern void elv_merge_requests(request_queue_t *, struct request *,
87 struct request *); 88 struct request *);
diff --git a/mm/slob.c b/mm/slob.c
index 1c240c4b71d9..a1f42bdc0245 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -336,7 +336,7 @@ EXPORT_SYMBOL(slab_reclaim_pages);
336 336
337#ifdef CONFIG_SMP 337#ifdef CONFIG_SMP
338 338
339void *__alloc_percpu(size_t size, size_t align) 339void *__alloc_percpu(size_t size)
340{ 340{
341 int i; 341 int i;
342 struct percpu_data *pdata = kmalloc(sizeof (*pdata), GFP_KERNEL); 342 struct percpu_data *pdata = kmalloc(sizeof (*pdata), GFP_KERNEL);