aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel/module_32.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel/module_32.c')
-rw-r--r--arch/powerpc/kernel/module_32.c77
1 files changed, 63 insertions, 14 deletions
diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
index 07a89a398639..eab313858315 100644
--- a/arch/powerpc/kernel/module_32.c
+++ b/arch/powerpc/kernel/module_32.c
@@ -24,6 +24,7 @@
24#include <linux/kernel.h> 24#include <linux/kernel.h>
25#include <linux/cache.h> 25#include <linux/cache.h>
26#include <linux/bug.h> 26#include <linux/bug.h>
27#include <linux/sort.h>
27 28
28#include "setup.h" 29#include "setup.h"
29 30
@@ -54,22 +55,60 @@ void module_free(struct module *mod, void *module_region)
54 addend) */ 55 addend) */
55static unsigned int count_relocs(const Elf32_Rela *rela, unsigned int num) 56static unsigned int count_relocs(const Elf32_Rela *rela, unsigned int num)
56{ 57{
57 unsigned int i, j, ret = 0; 58 unsigned int i, r_info, r_addend, _count_relocs;
58 59
59 /* Sure, this is order(n^2), but it's usually short, and not 60 _count_relocs = 0;
60 time critical */ 61 r_info = 0;
61 for (i = 0; i < num; i++) { 62 r_addend = 0;
62 for (j = 0; j < i; j++) { 63 for (i = 0; i < num; i++)
63 /* If this addend appeared before, it's 64 /* Only count 24-bit relocs, others don't need stubs */
64 already been counted */ 65 if (ELF32_R_TYPE(rela[i].r_info) == R_PPC_REL24 &&
65 if (ELF32_R_SYM(rela[i].r_info) 66 (r_info != ELF32_R_SYM(rela[i].r_info) ||
66 == ELF32_R_SYM(rela[j].r_info) 67 r_addend != rela[i].r_addend)) {
67 && rela[i].r_addend == rela[j].r_addend) 68 _count_relocs++;
68 break; 69 r_info = ELF32_R_SYM(rela[i].r_info);
70 r_addend = rela[i].r_addend;
69 } 71 }
70 if (j == i) ret++; 72
73 return _count_relocs;
74}
75
76static int relacmp(const void *_x, const void *_y)
77{
78 const Elf32_Rela *x, *y;
79
80 y = (Elf32_Rela *)_x;
81 x = (Elf32_Rela *)_y;
82
83 /* Compare the entire r_info (as opposed to ELF32_R_SYM(r_info) only) to
84 * make the comparison cheaper/faster. It won't affect the sorting or
85 * the counting algorithms' performance
86 */
87 if (x->r_info < y->r_info)
88 return -1;
89 else if (x->r_info > y->r_info)
90 return 1;
91 else if (x->r_addend < y->r_addend)
92 return -1;
93 else if (x->r_addend > y->r_addend)
94 return 1;
95 else
96 return 0;
97}
98
99static void relaswap(void *_x, void *_y, int size)
100{
101 uint32_t *x, *y, tmp;
102 int i;
103
104 y = (uint32_t *)_x;
105 x = (uint32_t *)_y;
106
107 for (i = 0; i < sizeof(Elf32_Rela) / sizeof(uint32_t); i++) {
108 tmp = x[i];
109 x[i] = y[i];
110 y[i] = tmp;
71 } 111 }
72 return ret;
73} 112}
74 113
75/* Get the potential trampolines size required of the init and 114/* Get the potential trampolines size required of the init and
@@ -100,6 +139,16 @@ static unsigned long get_plt_size(const Elf32_Ehdr *hdr,
100 DEBUGP("Ptr: %p. Number: %u\n", 139 DEBUGP("Ptr: %p. Number: %u\n",
101 (void *)hdr + sechdrs[i].sh_offset, 140 (void *)hdr + sechdrs[i].sh_offset,
102 sechdrs[i].sh_size / sizeof(Elf32_Rela)); 141 sechdrs[i].sh_size / sizeof(Elf32_Rela));
142
143 /* Sort the relocation information based on a symbol and
144 * addend key. This is a stable O(n*log n) complexity
145 * alogrithm but it will reduce the complexity of
146 * count_relocs() to linear complexity O(n)
147 */
148 sort((void *)hdr + sechdrs[i].sh_offset,
149 sechdrs[i].sh_size / sizeof(Elf32_Rela),
150 sizeof(Elf32_Rela), relacmp, relaswap);
151
103 ret += count_relocs((void *)hdr 152 ret += count_relocs((void *)hdr
104 + sechdrs[i].sh_offset, 153 + sechdrs[i].sh_offset,
105 sechdrs[i].sh_size 154 sechdrs[i].sh_size