diff options
author | Ingo Molnar <mingo@elte.hu> | 2008-09-14 11:23:29 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-09-14 11:23:29 -0400 |
commit | 9dfed08eb4086584205717894177a9ee930c88c4 (patch) | |
tree | 3455b242f0e85a86d836858a86bd6ffd21279e22 /arch/powerpc | |
parent | 1cf44baad76b6f20f95ece397c6f643320aa44c9 (diff) | |
parent | adee14b2e1557d0a8559f29681732d05a89dfc35 (diff) |
Merge commit 'v2.6.27-rc6' into core/resources
Diffstat (limited to 'arch/powerpc')
-rw-r--r-- | arch/powerpc/boot/Makefile | 2 | ||||
-rw-r--r-- | arch/powerpc/include/asm/sections.h | 3 | ||||
-rw-r--r-- | arch/powerpc/kernel/module_64.c | 13 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/sched.c | 29 |
4 files changed, 39 insertions, 8 deletions
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile index 14174aa24074..717a3bc1352e 100644 --- a/arch/powerpc/boot/Makefile +++ b/arch/powerpc/boot/Makefile | |||
@@ -49,7 +49,7 @@ zlib := inffast.c inflate.c inftrees.c | |||
49 | zlibheader := inffast.h inffixed.h inflate.h inftrees.h infutil.h | 49 | zlibheader := inffast.h inffixed.h inflate.h inftrees.h infutil.h |
50 | zliblinuxheader := zlib.h zconf.h zutil.h | 50 | zliblinuxheader := zlib.h zconf.h zutil.h |
51 | 51 | ||
52 | $(addprefix $(obj)/,$(zlib) gunzip_util.o main.o): \ | 52 | $(addprefix $(obj)/,$(zlib) cuboot-c2k.o gunzip_util.o main.o prpmc2800.o): \ |
53 | $(addprefix $(obj)/,$(zliblinuxheader)) $(addprefix $(obj)/,$(zlibheader)) | 53 | $(addprefix $(obj)/,$(zliblinuxheader)) $(addprefix $(obj)/,$(zlibheader)) |
54 | 54 | ||
55 | src-libfdt := fdt.c fdt_ro.c fdt_wip.c fdt_sw.c fdt_rw.c fdt_strerror.c | 55 | src-libfdt := fdt.c fdt_ro.c fdt_wip.c fdt_sw.c fdt_rw.c fdt_strerror.c |
diff --git a/arch/powerpc/include/asm/sections.h b/arch/powerpc/include/asm/sections.h index 916018e425c4..7710e9e6660f 100644 --- a/arch/powerpc/include/asm/sections.h +++ b/arch/powerpc/include/asm/sections.h | |||
@@ -16,6 +16,9 @@ static inline int in_kernel_text(unsigned long addr) | |||
16 | return 0; | 16 | return 0; |
17 | } | 17 | } |
18 | 18 | ||
19 | #undef dereference_function_descriptor | ||
20 | void *dereference_function_descriptor(void *); | ||
21 | |||
19 | #endif | 22 | #endif |
20 | 23 | ||
21 | #endif /* __KERNEL__ */ | 24 | #endif /* __KERNEL__ */ |
diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c index ee6a2982d567..ad79de272ff3 100644 --- a/arch/powerpc/kernel/module_64.c +++ b/arch/powerpc/kernel/module_64.c | |||
@@ -21,8 +21,9 @@ | |||
21 | #include <linux/err.h> | 21 | #include <linux/err.h> |
22 | #include <linux/vmalloc.h> | 22 | #include <linux/vmalloc.h> |
23 | #include <linux/bug.h> | 23 | #include <linux/bug.h> |
24 | #include <linux/uaccess.h> | ||
24 | #include <asm/module.h> | 25 | #include <asm/module.h> |
25 | #include <asm/uaccess.h> | 26 | #include <asm/sections.h> |
26 | #include <asm/firmware.h> | 27 | #include <asm/firmware.h> |
27 | #include <asm/code-patching.h> | 28 | #include <asm/code-patching.h> |
28 | #include <linux/sort.h> | 29 | #include <linux/sort.h> |
@@ -451,3 +452,13 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, | |||
451 | 452 | ||
452 | return 0; | 453 | return 0; |
453 | } | 454 | } |
455 | |||
456 | void *dereference_function_descriptor(void *ptr) | ||
457 | { | ||
458 | struct ppc64_opd_entry *desc = ptr; | ||
459 | void *p; | ||
460 | |||
461 | if (!probe_kernel_address(&desc->funcaddr, p)) | ||
462 | ptr = p; | ||
463 | return ptr; | ||
464 | } | ||
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c index 1c1b627ee843..67595bc380dc 100644 --- a/arch/powerpc/platforms/cell/spufs/sched.c +++ b/arch/powerpc/platforms/cell/spufs/sched.c | |||
@@ -643,9 +643,10 @@ static struct spu *find_victim(struct spu_context *ctx) | |||
643 | !(tmp->flags & SPU_CREATE_NOSCHED) && | 643 | !(tmp->flags & SPU_CREATE_NOSCHED) && |
644 | (!victim || tmp->prio > victim->prio)) { | 644 | (!victim || tmp->prio > victim->prio)) { |
645 | victim = spu->ctx; | 645 | victim = spu->ctx; |
646 | get_spu_context(victim); | ||
647 | } | 646 | } |
648 | } | 647 | } |
648 | if (victim) | ||
649 | get_spu_context(victim); | ||
649 | mutex_unlock(&cbe_spu_info[node].list_mutex); | 650 | mutex_unlock(&cbe_spu_info[node].list_mutex); |
650 | 651 | ||
651 | if (victim) { | 652 | if (victim) { |
@@ -727,17 +728,33 @@ static void spu_schedule(struct spu *spu, struct spu_context *ctx) | |||
727 | /* not a candidate for interruptible because it's called either | 728 | /* not a candidate for interruptible because it's called either |
728 | from the scheduler thread or from spu_deactivate */ | 729 | from the scheduler thread or from spu_deactivate */ |
729 | mutex_lock(&ctx->state_mutex); | 730 | mutex_lock(&ctx->state_mutex); |
730 | __spu_schedule(spu, ctx); | 731 | if (ctx->state == SPU_STATE_SAVED) |
732 | __spu_schedule(spu, ctx); | ||
731 | spu_release(ctx); | 733 | spu_release(ctx); |
732 | } | 734 | } |
733 | 735 | ||
734 | static void spu_unschedule(struct spu *spu, struct spu_context *ctx) | 736 | /** |
737 | * spu_unschedule - remove a context from a spu, and possibly release it. | ||
738 | * @spu: The SPU to unschedule from | ||
739 | * @ctx: The context currently scheduled on the SPU | ||
740 | * @free_spu Whether to free the SPU for other contexts | ||
741 | * | ||
742 | * Unbinds the context @ctx from the SPU @spu. If @free_spu is non-zero, the | ||
743 | * SPU is made available for other contexts (ie, may be returned by | ||
744 | * spu_get_idle). If this is zero, the caller is expected to schedule another | ||
745 | * context to this spu. | ||
746 | * | ||
747 | * Should be called with ctx->state_mutex held. | ||
748 | */ | ||
749 | static void spu_unschedule(struct spu *spu, struct spu_context *ctx, | ||
750 | int free_spu) | ||
735 | { | 751 | { |
736 | int node = spu->node; | 752 | int node = spu->node; |
737 | 753 | ||
738 | mutex_lock(&cbe_spu_info[node].list_mutex); | 754 | mutex_lock(&cbe_spu_info[node].list_mutex); |
739 | cbe_spu_info[node].nr_active--; | 755 | cbe_spu_info[node].nr_active--; |
740 | spu->alloc_state = SPU_FREE; | 756 | if (free_spu) |
757 | spu->alloc_state = SPU_FREE; | ||
741 | spu_unbind_context(spu, ctx); | 758 | spu_unbind_context(spu, ctx); |
742 | ctx->stats.invol_ctx_switch++; | 759 | ctx->stats.invol_ctx_switch++; |
743 | spu->stats.invol_ctx_switch++; | 760 | spu->stats.invol_ctx_switch++; |
@@ -837,7 +854,7 @@ static int __spu_deactivate(struct spu_context *ctx, int force, int max_prio) | |||
837 | if (spu) { | 854 | if (spu) { |
838 | new = grab_runnable_context(max_prio, spu->node); | 855 | new = grab_runnable_context(max_prio, spu->node); |
839 | if (new || force) { | 856 | if (new || force) { |
840 | spu_unschedule(spu, ctx); | 857 | spu_unschedule(spu, ctx, new == NULL); |
841 | if (new) { | 858 | if (new) { |
842 | if (new->flags & SPU_CREATE_NOSCHED) | 859 | if (new->flags & SPU_CREATE_NOSCHED) |
843 | wake_up(&new->stop_wq); | 860 | wake_up(&new->stop_wq); |
@@ -910,7 +927,7 @@ static noinline void spusched_tick(struct spu_context *ctx) | |||
910 | 927 | ||
911 | new = grab_runnable_context(ctx->prio + 1, spu->node); | 928 | new = grab_runnable_context(ctx->prio + 1, spu->node); |
912 | if (new) { | 929 | if (new) { |
913 | spu_unschedule(spu, ctx); | 930 | spu_unschedule(spu, ctx, 0); |
914 | if (test_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags)) | 931 | if (test_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags)) |
915 | spu_add_to_rq(ctx); | 932 | spu_add_to_rq(ctx); |
916 | } else { | 933 | } else { |