diff options
Diffstat (limited to 'lib')
| -rw-r--r-- | lib/Kconfig | 6 | ||||
| -rw-r--r-- | lib/Kconfig.debug | 14 | ||||
| -rw-r--r-- | lib/Makefile | 2 | ||||
| -rw-r--r-- | lib/hexdump.c | 149 | ||||
| -rw-r--r-- | lib/idr.c | 332 | ||||
| -rw-r--r-- | lib/kobject.c | 29 | ||||
| -rw-r--r-- | lib/lzo/Makefile | 5 | ||||
| -rw-r--r-- | lib/lzo/lzo1x_compress.c | 226 | ||||
| -rw-r--r-- | lib/lzo/lzo1x_decompress.c | 254 | ||||
| -rw-r--r-- | lib/lzo/lzodefs.h | 43 | ||||
| -rw-r--r-- | lib/radix-tree.c | 1 |
11 files changed, 994 insertions, 67 deletions
diff --git a/lib/Kconfig b/lib/Kconfig index 2e7ae6b9215b..3eb29d5dc4f5 100644 --- a/lib/Kconfig +++ b/lib/Kconfig | |||
| @@ -64,6 +64,12 @@ config ZLIB_INFLATE | |||
| 64 | config ZLIB_DEFLATE | 64 | config ZLIB_DEFLATE |
| 65 | tristate | 65 | tristate |
| 66 | 66 | ||
| 67 | config LZO_COMPRESS | ||
| 68 | tristate | ||
| 69 | |||
| 70 | config LZO_DECOMPRESS | ||
| 71 | tristate | ||
| 72 | |||
| 67 | # | 73 | # |
| 68 | # Generic allocator support is selected if needed | 74 | # Generic allocator support is selected if needed |
| 69 | # | 75 | # |
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 1ba77ca7d165..fab32a286371 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
| @@ -105,6 +105,15 @@ config DETECT_SOFTLOCKUP | |||
| 105 | can be detected via the NMI-watchdog, on platforms that | 105 | can be detected via the NMI-watchdog, on platforms that |
| 106 | support it.) | 106 | support it.) |
| 107 | 107 | ||
| 108 | config SCHED_DEBUG | ||
| 109 | bool "Collect scheduler debugging info" | ||
| 110 | depends on DEBUG_KERNEL && PROC_FS | ||
| 111 | default y | ||
| 112 | help | ||
| 113 | If you say Y here, the /proc/sched_debug file will be provided | ||
| 114 | that can help debug the scheduler. The runtime overhead of this | ||
| 115 | option is minimal. | ||
| 116 | |||
| 108 | config SCHEDSTATS | 117 | config SCHEDSTATS |
| 109 | bool "Collect scheduler statistics" | 118 | bool "Collect scheduler statistics" |
| 110 | depends on DEBUG_KERNEL && PROC_FS | 119 | depends on DEBUG_KERNEL && PROC_FS |
| @@ -126,7 +135,10 @@ config TIMER_STATS | |||
| 126 | reprogrammed. The statistics can be read from /proc/timer_stats. | 135 | reprogrammed. The statistics can be read from /proc/timer_stats. |
| 127 | The statistics collection is started by writing 1 to /proc/timer_stats, | 136 | The statistics collection is started by writing 1 to /proc/timer_stats, |
| 128 | writing 0 stops it. This feature is useful to collect information | 137 | writing 0 stops it. This feature is useful to collect information |
| 129 | about timer usage patterns in kernel and userspace. | 138 | about timer usage patterns in kernel and userspace. This feature |
| 139 | is lightweight if enabled in the kernel config but not activated | ||
| 140 | (it defaults to deactivated on bootup and will only be activated | ||
| 141 | if some application like powertop activates it explicitly). | ||
| 130 | 142 | ||
| 131 | config DEBUG_SLAB | 143 | config DEBUG_SLAB |
| 132 | bool "Debug slab memory allocations" | 144 | bool "Debug slab memory allocations" |
diff --git a/lib/Makefile b/lib/Makefile index c8c8e20784ce..d1b366bdf86e 100644 --- a/lib/Makefile +++ b/lib/Makefile | |||
| @@ -49,6 +49,8 @@ obj-$(CONFIG_GENERIC_ALLOCATOR) += genalloc.o | |||
| 49 | obj-$(CONFIG_ZLIB_INFLATE) += zlib_inflate/ | 49 | obj-$(CONFIG_ZLIB_INFLATE) += zlib_inflate/ |
| 50 | obj-$(CONFIG_ZLIB_DEFLATE) += zlib_deflate/ | 50 | obj-$(CONFIG_ZLIB_DEFLATE) += zlib_deflate/ |
| 51 | obj-$(CONFIG_REED_SOLOMON) += reed_solomon/ | 51 | obj-$(CONFIG_REED_SOLOMON) += reed_solomon/ |
| 52 | obj-$(CONFIG_LZO_COMPRESS) += lzo/ | ||
| 53 | obj-$(CONFIG_LZO_DECOMPRESS) += lzo/ | ||
| 52 | 54 | ||
| 53 | obj-$(CONFIG_TEXTSEARCH) += textsearch.o | 55 | obj-$(CONFIG_TEXTSEARCH) += textsearch.o |
| 54 | obj-$(CONFIG_TEXTSEARCH_KMP) += ts_kmp.o | 56 | obj-$(CONFIG_TEXTSEARCH_KMP) += ts_kmp.o |
diff --git a/lib/hexdump.c b/lib/hexdump.c index e6da5b7fc29a..473f5aed6cae 100644 --- a/lib/hexdump.c +++ b/lib/hexdump.c | |||
| @@ -16,42 +16,98 @@ | |||
| 16 | * hex_dump_to_buffer - convert a blob of data to "hex ASCII" in memory | 16 | * hex_dump_to_buffer - convert a blob of data to "hex ASCII" in memory |
| 17 | * @buf: data blob to dump | 17 | * @buf: data blob to dump |
| 18 | * @len: number of bytes in the @buf | 18 | * @len: number of bytes in the @buf |
| 19 | * @rowsize: number of bytes to print per line; must be 16 or 32 | ||
| 20 | * @groupsize: number of bytes to print at a time (1, 2, 4, 8; default = 1) | ||
| 19 | * @linebuf: where to put the converted data | 21 | * @linebuf: where to put the converted data |
| 20 | * @linebuflen: total size of @linebuf, including space for terminating NUL | 22 | * @linebuflen: total size of @linebuf, including space for terminating NUL |
| 23 | * @ascii: include ASCII after the hex output | ||
| 21 | * | 24 | * |
| 22 | * hex_dump_to_buffer() works on one "line" of output at a time, i.e., | 25 | * hex_dump_to_buffer() works on one "line" of output at a time, i.e., |
| 23 | * 16 bytes of input data converted to hex + ASCII output. | 26 | * 16 or 32 bytes of input data converted to hex + ASCII output. |
| 24 | * | 27 | * |
| 25 | * Given a buffer of u8 data, hex_dump_to_buffer() converts the input data | 28 | * Given a buffer of u8 data, hex_dump_to_buffer() converts the input data |
| 26 | * to a hex + ASCII dump at the supplied memory location. | 29 | * to a hex + ASCII dump at the supplied memory location. |
| 27 | * The converted output is always NUL-terminated. | 30 | * The converted output is always NUL-terminated. |
| 28 | * | 31 | * |
| 29 | * E.g.: | 32 | * E.g.: |
| 30 | * hex_dump_to_buffer(frame->data, frame->len, linebuf, sizeof(linebuf)); | 33 | * hex_dump_to_buffer(frame->data, frame->len, 16, 1, |
| 34 | * linebuf, sizeof(linebuf), 1); | ||
| 31 | * | 35 | * |
| 32 | * example output buffer: | 36 | * example output buffer: |
| 33 | * 40414243 44454647 48494a4b 4c4d4e4f @ABCDEFGHIJKLMNO | 37 | * 40 41 42 43 44 45 46 47 48 49 4a 4b 4c 4d 4e 4f @ABCDEFGHIJKLMNO |
| 34 | */ | 38 | */ |
| 35 | void hex_dump_to_buffer(const void *buf, size_t len, char *linebuf, | 39 | void hex_dump_to_buffer(const void *buf, size_t len, int rowsize, |
| 36 | size_t linebuflen) | 40 | int groupsize, char *linebuf, size_t linebuflen, |
| 41 | bool ascii) | ||
| 37 | { | 42 | { |
| 38 | const u8 *ptr = buf; | 43 | const u8 *ptr = buf; |
| 39 | u8 ch; | 44 | u8 ch; |
| 40 | int j, lx = 0; | 45 | int j, lx = 0; |
| 46 | int ascii_column; | ||
| 41 | 47 | ||
| 42 | for (j = 0; (j < 16) && (j < len) && (lx + 3) < linebuflen; j++) { | 48 | if (rowsize != 16 && rowsize != 32) |
| 43 | if (j && !(j % 4)) | 49 | rowsize = 16; |
| 50 | |||
| 51 | if (!len) | ||
| 52 | goto nil; | ||
| 53 | if (len > rowsize) /* limit to one line at a time */ | ||
| 54 | len = rowsize; | ||
| 55 | if ((len % groupsize) != 0) /* no mixed size output */ | ||
| 56 | groupsize = 1; | ||
| 57 | |||
| 58 | switch (groupsize) { | ||
| 59 | case 8: { | ||
| 60 | const u64 *ptr8 = buf; | ||
| 61 | int ngroups = len / groupsize; | ||
| 62 | |||
| 63 | for (j = 0; j < ngroups; j++) | ||
| 64 | lx += scnprintf(linebuf + lx, linebuflen - lx, | ||
| 65 | "%16.16llx ", (unsigned long long)*(ptr8 + j)); | ||
| 66 | ascii_column = 17 * ngroups + 2; | ||
| 67 | break; | ||
| 68 | } | ||
| 69 | |||
| 70 | case 4: { | ||
| 71 | const u32 *ptr4 = buf; | ||
| 72 | int ngroups = len / groupsize; | ||
| 73 | |||
| 74 | for (j = 0; j < ngroups; j++) | ||
| 75 | lx += scnprintf(linebuf + lx, linebuflen - lx, | ||
| 76 | "%8.8x ", *(ptr4 + j)); | ||
| 77 | ascii_column = 9 * ngroups + 2; | ||
| 78 | break; | ||
| 79 | } | ||
| 80 | |||
| 81 | case 2: { | ||
| 82 | const u16 *ptr2 = buf; | ||
| 83 | int ngroups = len / groupsize; | ||
| 84 | |||
| 85 | for (j = 0; j < ngroups; j++) | ||
| 86 | lx += scnprintf(linebuf + lx, linebuflen - lx, | ||
| 87 | "%4.4x ", *(ptr2 + j)); | ||
| 88 | ascii_column = 5 * ngroups + 2; | ||
| 89 | break; | ||
| 90 | } | ||
| 91 | |||
| 92 | default: | ||
| 93 | for (j = 0; (j < rowsize) && (j < len) && (lx + 4) < linebuflen; | ||
| 94 | j++) { | ||
| 95 | ch = ptr[j]; | ||
| 96 | linebuf[lx++] = hex_asc(ch >> 4); | ||
| 97 | linebuf[lx++] = hex_asc(ch & 0x0f); | ||
| 44 | linebuf[lx++] = ' '; | 98 | linebuf[lx++] = ' '; |
| 45 | ch = ptr[j]; | 99 | } |
| 46 | linebuf[lx++] = hex_asc(ch >> 4); | 100 | ascii_column = 3 * rowsize + 2; |
| 47 | linebuf[lx++] = hex_asc(ch & 0x0f); | 101 | break; |
| 48 | } | 102 | } |
| 49 | if ((lx + 2) < linebuflen) { | 103 | if (!ascii) |
| 50 | linebuf[lx++] = ' '; | 104 | goto nil; |
| 105 | |||
| 106 | while (lx < (linebuflen - 1) && lx < (ascii_column - 1)) | ||
| 51 | linebuf[lx++] = ' '; | 107 | linebuf[lx++] = ' '; |
| 52 | } | 108 | for (j = 0; (j < rowsize) && (j < len) && (lx + 2) < linebuflen; j++) |
| 53 | for (j = 0; (j < 16) && (j < len) && (lx + 2) < linebuflen; j++) | ||
| 54 | linebuf[lx++] = isprint(ptr[j]) ? ptr[j] : '.'; | 109 | linebuf[lx++] = isprint(ptr[j]) ? ptr[j] : '.'; |
| 110 | nil: | ||
| 55 | linebuf[lx++] = '\0'; | 111 | linebuf[lx++] = '\0'; |
| 56 | } | 112 | } |
| 57 | EXPORT_SYMBOL(hex_dump_to_buffer); | 113 | EXPORT_SYMBOL(hex_dump_to_buffer); |
| @@ -59,46 +115,83 @@ EXPORT_SYMBOL(hex_dump_to_buffer); | |||
| 59 | /** | 115 | /** |
| 60 | * print_hex_dump - print a text hex dump to syslog for a binary blob of data | 116 | * print_hex_dump - print a text hex dump to syslog for a binary blob of data |
| 61 | * @level: kernel log level (e.g. KERN_DEBUG) | 117 | * @level: kernel log level (e.g. KERN_DEBUG) |
| 118 | * @prefix_str: string to prefix each line with; | ||
| 119 | * caller supplies trailing spaces for alignment if desired | ||
| 62 | * @prefix_type: controls whether prefix of an offset, address, or none | 120 | * @prefix_type: controls whether prefix of an offset, address, or none |
| 63 | * is printed (%DUMP_PREFIX_OFFSET, %DUMP_PREFIX_ADDRESS, %DUMP_PREFIX_NONE) | 121 | * is printed (%DUMP_PREFIX_OFFSET, %DUMP_PREFIX_ADDRESS, %DUMP_PREFIX_NONE) |
| 122 | * @rowsize: number of bytes to print per line; must be 16 or 32 | ||
| 123 | * @groupsize: number of bytes to print at a time (1, 2, 4, 8; default = 1) | ||
| 64 | * @buf: data blob to dump | 124 | * @buf: data blob to dump |
| 65 | * @len: number of bytes in the @buf | 125 | * @len: number of bytes in the @buf |
| 126 | * @ascii: include ASCII after the hex output | ||
| 66 | * | 127 | * |
| 67 | * Given a buffer of u8 data, print_hex_dump() prints a hex + ASCII dump | 128 | * Given a buffer of u8 data, print_hex_dump() prints a hex + ASCII dump |
| 68 | * to the kernel log at the specified kernel log level, with an optional | 129 | * to the kernel log at the specified kernel log level, with an optional |
| 69 | * leading prefix. | 130 | * leading prefix. |
| 70 | * | 131 | * |
| 132 | * print_hex_dump() works on one "line" of output at a time, i.e., | ||
| 133 | * 16 or 32 bytes of input data converted to hex + ASCII output. | ||
| 134 | * print_hex_dump() iterates over the entire input @buf, breaking it into | ||
| 135 | * "line size" chunks to format and print. | ||
| 136 | * | ||
| 71 | * E.g.: | 137 | * E.g.: |
| 72 | * print_hex_dump(KERN_DEBUG, DUMP_PREFIX_ADDRESS, frame->data, frame->len); | 138 | * print_hex_dump(KERN_DEBUG, "raw data: ", DUMP_PREFIX_ADDRESS, |
| 139 | * 16, 1, frame->data, frame->len, 1); | ||
| 73 | * | 140 | * |
| 74 | * Example output using %DUMP_PREFIX_OFFSET: | 141 | * Example output using %DUMP_PREFIX_OFFSET and 1-byte mode: |
| 75 | * 0009ab42: 40414243 44454647 48494a4b 4c4d4e4f @ABCDEFGHIJKLMNO | 142 | * 0009ab42: 40 41 42 43 44 45 46 47 48 49 4a 4b 4c 4d 4e 4f @ABCDEFGHIJKLMNO |
| 76 | * Example output using %DUMP_PREFIX_ADDRESS: | 143 | * Example output using %DUMP_PREFIX_ADDRESS and 4-byte mode: |
| 77 | * ffffffff88089af0: 70717273 74757677 78797a7b 7c7d7e7f pqrstuvwxyz{|}~. | 144 | * ffffffff88089af0: 73727170 77767574 7b7a7978 7f7e7d7c pqrstuvwxyz{|}~. |
| 78 | */ | 145 | */ |
| 79 | void print_hex_dump(const char *level, int prefix_type, void *buf, size_t len) | 146 | void print_hex_dump(const char *level, const char *prefix_str, int prefix_type, |
| 147 | int rowsize, int groupsize, | ||
| 148 | void *buf, size_t len, bool ascii) | ||
| 80 | { | 149 | { |
| 81 | u8 *ptr = buf; | 150 | u8 *ptr = buf; |
| 82 | int i, linelen, remaining = len; | 151 | int i, linelen, remaining = len; |
| 83 | unsigned char linebuf[100]; | 152 | unsigned char linebuf[200]; |
| 84 | 153 | ||
| 85 | for (i = 0; i < len; i += 16) { | 154 | if (rowsize != 16 && rowsize != 32) |
| 86 | linelen = min(remaining, 16); | 155 | rowsize = 16; |
| 87 | remaining -= 16; | 156 | |
| 88 | hex_dump_to_buffer(ptr + i, linelen, linebuf, sizeof(linebuf)); | 157 | for (i = 0; i < len; i += rowsize) { |
| 158 | linelen = min(remaining, rowsize); | ||
| 159 | remaining -= rowsize; | ||
| 160 | hex_dump_to_buffer(ptr + i, linelen, rowsize, groupsize, | ||
| 161 | linebuf, sizeof(linebuf), ascii); | ||
| 89 | 162 | ||
| 90 | switch (prefix_type) { | 163 | switch (prefix_type) { |
| 91 | case DUMP_PREFIX_ADDRESS: | 164 | case DUMP_PREFIX_ADDRESS: |
| 92 | printk("%s%*p: %s\n", level, | 165 | printk("%s%s%*p: %s\n", level, prefix_str, |
| 93 | (int)(2 * sizeof(void *)), ptr + i, linebuf); | 166 | (int)(2 * sizeof(void *)), ptr + i, linebuf); |
| 94 | break; | 167 | break; |
| 95 | case DUMP_PREFIX_OFFSET: | 168 | case DUMP_PREFIX_OFFSET: |
| 96 | printk("%s%.8x: %s\n", level, i, linebuf); | 169 | printk("%s%s%.8x: %s\n", level, prefix_str, i, linebuf); |
| 97 | break; | 170 | break; |
| 98 | default: | 171 | default: |
| 99 | printk("%s%s\n", level, linebuf); | 172 | printk("%s%s%s\n", level, prefix_str, linebuf); |
| 100 | break; | 173 | break; |
| 101 | } | 174 | } |
| 102 | } | 175 | } |
| 103 | } | 176 | } |
| 104 | EXPORT_SYMBOL(print_hex_dump); | 177 | EXPORT_SYMBOL(print_hex_dump); |
| 178 | |||
| 179 | /** | ||
| 180 | * print_hex_dump_bytes - shorthand form of print_hex_dump() with default params | ||
| 181 | * @prefix_str: string to prefix each line with; | ||
| 182 | * caller supplies trailing spaces for alignment if desired | ||
| 183 | * @prefix_type: controls whether prefix of an offset, address, or none | ||
| 184 | * is printed (%DUMP_PREFIX_OFFSET, %DUMP_PREFIX_ADDRESS, %DUMP_PREFIX_NONE) | ||
| 185 | * @buf: data blob to dump | ||
| 186 | * @len: number of bytes in the @buf | ||
| 187 | * | ||
| 188 | * Calls print_hex_dump(), with log level of KERN_DEBUG, | ||
| 189 | * rowsize of 16, groupsize of 1, and ASCII output included. | ||
| 190 | */ | ||
| 191 | void print_hex_dump_bytes(const char *prefix_str, int prefix_type, | ||
| 192 | void *buf, size_t len) | ||
| 193 | { | ||
| 194 | print_hex_dump(KERN_DEBUG, prefix_str, prefix_type, 16, 1, | ||
| 195 | buf, len, 1); | ||
| 196 | } | ||
| 197 | EXPORT_SYMBOL(print_hex_dump_bytes); | ||
| @@ -70,6 +70,26 @@ static void free_layer(struct idr *idp, struct idr_layer *p) | |||
| 70 | spin_unlock_irqrestore(&idp->lock, flags); | 70 | spin_unlock_irqrestore(&idp->lock, flags); |
| 71 | } | 71 | } |
| 72 | 72 | ||
| 73 | static void idr_mark_full(struct idr_layer **pa, int id) | ||
| 74 | { | ||
| 75 | struct idr_layer *p = pa[0]; | ||
| 76 | int l = 0; | ||
| 77 | |||
| 78 | __set_bit(id & IDR_MASK, &p->bitmap); | ||
| 79 | /* | ||
| 80 | * If this layer is full mark the bit in the layer above to | ||
| 81 | * show that this part of the radix tree is full. This may | ||
| 82 | * complete the layer above and require walking up the radix | ||
| 83 | * tree. | ||
| 84 | */ | ||
| 85 | while (p->bitmap == IDR_FULL) { | ||
| 86 | if (!(p = pa[++l])) | ||
| 87 | break; | ||
| 88 | id = id >> IDR_BITS; | ||
| 89 | __set_bit((id & IDR_MASK), &p->bitmap); | ||
| 90 | } | ||
| 91 | } | ||
| 92 | |||
| 73 | /** | 93 | /** |
| 74 | * idr_pre_get - reserver resources for idr allocation | 94 | * idr_pre_get - reserver resources for idr allocation |
| 75 | * @idp: idr handle | 95 | * @idp: idr handle |
| @@ -95,15 +115,15 @@ int idr_pre_get(struct idr *idp, gfp_t gfp_mask) | |||
| 95 | } | 115 | } |
| 96 | EXPORT_SYMBOL(idr_pre_get); | 116 | EXPORT_SYMBOL(idr_pre_get); |
| 97 | 117 | ||
| 98 | static int sub_alloc(struct idr *idp, void *ptr, int *starting_id) | 118 | static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa) |
| 99 | { | 119 | { |
| 100 | int n, m, sh; | 120 | int n, m, sh; |
| 101 | struct idr_layer *p, *new; | 121 | struct idr_layer *p, *new; |
| 102 | struct idr_layer *pa[MAX_LEVEL]; | 122 | int l, id, oid; |
| 103 | int l, id; | ||
| 104 | long bm; | 123 | long bm; |
| 105 | 124 | ||
| 106 | id = *starting_id; | 125 | id = *starting_id; |
| 126 | restart: | ||
| 107 | p = idp->top; | 127 | p = idp->top; |
| 108 | l = idp->layers; | 128 | l = idp->layers; |
| 109 | pa[l--] = NULL; | 129 | pa[l--] = NULL; |
| @@ -117,12 +137,23 @@ static int sub_alloc(struct idr *idp, void *ptr, int *starting_id) | |||
| 117 | if (m == IDR_SIZE) { | 137 | if (m == IDR_SIZE) { |
| 118 | /* no space available go back to previous layer. */ | 138 | /* no space available go back to previous layer. */ |
| 119 | l++; | 139 | l++; |
| 140 | oid = id; | ||
| 120 | id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1; | 141 | id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1; |
| 142 | |||
| 143 | /* if already at the top layer, we need to grow */ | ||
| 121 | if (!(p = pa[l])) { | 144 | if (!(p = pa[l])) { |
| 122 | *starting_id = id; | 145 | *starting_id = id; |
| 123 | return -2; | 146 | return -2; |
| 124 | } | 147 | } |
| 125 | continue; | 148 | |
| 149 | /* If we need to go up one layer, continue the | ||
| 150 | * loop; otherwise, restart from the top. | ||
| 151 | */ | ||
| 152 | sh = IDR_BITS * (l + 1); | ||
| 153 | if (oid >> sh == id >> sh) | ||
| 154 | continue; | ||
| 155 | else | ||
| 156 | goto restart; | ||
| 126 | } | 157 | } |
| 127 | if (m != n) { | 158 | if (m != n) { |
| 128 | sh = IDR_BITS*l; | 159 | sh = IDR_BITS*l; |
| @@ -144,30 +175,13 @@ static int sub_alloc(struct idr *idp, void *ptr, int *starting_id) | |||
| 144 | pa[l--] = p; | 175 | pa[l--] = p; |
| 145 | p = p->ary[m]; | 176 | p = p->ary[m]; |
| 146 | } | 177 | } |
| 147 | /* | 178 | |
| 148 | * We have reached the leaf node, plant the | 179 | pa[l] = p; |
| 149 | * users pointer and return the raw id. | 180 | return id; |
| 150 | */ | ||
| 151 | p->ary[m] = (struct idr_layer *)ptr; | ||
| 152 | __set_bit(m, &p->bitmap); | ||
| 153 | p->count++; | ||
| 154 | /* | ||
| 155 | * If this layer is full mark the bit in the layer above | ||
| 156 | * to show that this part of the radix tree is full. | ||
| 157 | * This may complete the layer above and require walking | ||
| 158 | * up the radix tree. | ||
| 159 | */ | ||
| 160 | n = id; | ||
| 161 | while (p->bitmap == IDR_FULL) { | ||
| 162 | if (!(p = pa[++l])) | ||
| 163 | break; | ||
| 164 | n = n >> IDR_BITS; | ||
| 165 | __set_bit((n & IDR_MASK), &p->bitmap); | ||
| 166 | } | ||
| 167 | return(id); | ||
| 168 | } | 181 | } |
| 169 | 182 | ||
| 170 | static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id) | 183 | static int idr_get_empty_slot(struct idr *idp, int starting_id, |
| 184 | struct idr_layer **pa) | ||
| 171 | { | 185 | { |
| 172 | struct idr_layer *p, *new; | 186 | struct idr_layer *p, *new; |
| 173 | int layers, v, id; | 187 | int layers, v, id; |
| @@ -213,12 +227,31 @@ build_up: | |||
| 213 | } | 227 | } |
| 214 | idp->top = p; | 228 | idp->top = p; |
| 215 | idp->layers = layers; | 229 | idp->layers = layers; |
| 216 | v = sub_alloc(idp, ptr, &id); | 230 | v = sub_alloc(idp, &id, pa); |
| 217 | if (v == -2) | 231 | if (v == -2) |
| 218 | goto build_up; | 232 | goto build_up; |
| 219 | return(v); | 233 | return(v); |
| 220 | } | 234 | } |
| 221 | 235 | ||
| 236 | static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id) | ||
| 237 | { | ||
| 238 | struct idr_layer *pa[MAX_LEVEL]; | ||
| 239 | int id; | ||
| 240 | |||
| 241 | id = idr_get_empty_slot(idp, starting_id, pa); | ||
| 242 | if (id >= 0) { | ||
| 243 | /* | ||
| 244 | * Successfully found an empty slot. Install the user | ||
| 245 | * pointer and mark the slot full. | ||
| 246 | */ | ||
| 247 | pa[0]->ary[id & IDR_MASK] = (struct idr_layer *)ptr; | ||
| 248 | pa[0]->count++; | ||
| 249 | idr_mark_full(pa, id); | ||
| 250 | } | ||
| 251 | |||
| 252 | return id; | ||
| 253 | } | ||
| 254 | |||
| 222 | /** | 255 | /** |
| 223 | * idr_get_new_above - allocate new idr entry above or equal to a start id | 256 | * idr_get_new_above - allocate new idr entry above or equal to a start id |
| 224 | * @idp: idr handle | 257 | * @idp: idr handle |
| @@ -473,3 +506,248 @@ void idr_init(struct idr *idp) | |||
| 473 | spin_lock_init(&idp->lock); | 506 | spin_lock_init(&idp->lock); |
| 474 | } | 507 | } |
| 475 | EXPORT_SYMBOL(idr_init); | 508 | EXPORT_SYMBOL(idr_init); |
| 509 | |||
| 510 | |||
| 511 | /* | ||
| 512 | * IDA - IDR based ID allocator | ||
| 513 | * | ||
| 514 | * this is id allocator without id -> pointer translation. Memory | ||
| 515 | * usage is much lower than full blown idr because each id only | ||
| 516 | * occupies a bit. ida uses a custom leaf node which contains | ||
| 517 | * IDA_BITMAP_BITS slots. | ||
| 518 | * | ||
| 519 | * 2007-04-25 written by Tejun Heo <htejun@gmail.com> | ||
| 520 | */ | ||
| 521 | |||
| 522 | static void free_bitmap(struct ida *ida, struct ida_bitmap *bitmap) | ||
| 523 | { | ||
| 524 | unsigned long flags; | ||
| 525 | |||
| 526 | if (!ida->free_bitmap) { | ||
| 527 | spin_lock_irqsave(&ida->idr.lock, flags); | ||
| 528 | if (!ida->free_bitmap) { | ||
| 529 | ida->free_bitmap = bitmap; | ||
| 530 | bitmap = NULL; | ||
| 531 | } | ||
| 532 | spin_unlock_irqrestore(&ida->idr.lock, flags); | ||
| 533 | } | ||
| 534 | |||
| 535 | kfree(bitmap); | ||
| 536 | } | ||
| 537 | |||
| 538 | /** | ||
| 539 | * ida_pre_get - reserve resources for ida allocation | ||
| 540 | * @ida: ida handle | ||
| 541 | * @gfp_mask: memory allocation flag | ||
| 542 | * | ||
| 543 | * This function should be called prior to locking and calling the | ||
| 544 | * following function. It preallocates enough memory to satisfy the | ||
| 545 | * worst possible allocation. | ||
| 546 | * | ||
| 547 | * If the system is REALLY out of memory this function returns 0, | ||
| 548 | * otherwise 1. | ||
| 549 | */ | ||
| 550 | int ida_pre_get(struct ida *ida, gfp_t gfp_mask) | ||
| 551 | { | ||
| 552 | /* allocate idr_layers */ | ||
| 553 | if (!idr_pre_get(&ida->idr, gfp_mask)) | ||
| 554 | return 0; | ||
| 555 | |||
| 556 | /* allocate free_bitmap */ | ||
| 557 | if (!ida->free_bitmap) { | ||
| 558 | struct ida_bitmap *bitmap; | ||
| 559 | |||
| 560 | bitmap = kmalloc(sizeof(struct ida_bitmap), gfp_mask); | ||
| 561 | if (!bitmap) | ||
| 562 | return 0; | ||
| 563 | |||
| 564 | free_bitmap(ida, bitmap); | ||
| 565 | } | ||
| 566 | |||
| 567 | return 1; | ||
| 568 | } | ||
| 569 | EXPORT_SYMBOL(ida_pre_get); | ||
| 570 | |||
| 571 | /** | ||
| 572 | * ida_get_new_above - allocate new ID above or equal to a start id | ||
| 573 | * @ida: ida handle | ||
| 574 | * @staring_id: id to start search at | ||
| 575 | * @p_id: pointer to the allocated handle | ||
| 576 | * | ||
| 577 | * Allocate new ID above or equal to @ida. It should be called with | ||
| 578 | * any required locks. | ||
| 579 | * | ||
| 580 | * If memory is required, it will return -EAGAIN, you should unlock | ||
| 581 | * and go back to the ida_pre_get() call. If the ida is full, it will | ||
| 582 | * return -ENOSPC. | ||
| 583 | * | ||
| 584 | * @p_id returns a value in the range 0 ... 0x7fffffff. | ||
| 585 | */ | ||
| 586 | int ida_get_new_above(struct ida *ida, int starting_id, int *p_id) | ||
| 587 | { | ||
| 588 | struct idr_layer *pa[MAX_LEVEL]; | ||
| 589 | struct ida_bitmap *bitmap; | ||
| 590 | unsigned long flags; | ||
| 591 | int idr_id = starting_id / IDA_BITMAP_BITS; | ||
| 592 | int offset = starting_id % IDA_BITMAP_BITS; | ||
| 593 | int t, id; | ||
| 594 | |||
| 595 | restart: | ||
| 596 | /* get vacant slot */ | ||
| 597 | t = idr_get_empty_slot(&ida->idr, idr_id, pa); | ||
| 598 | if (t < 0) { | ||
| 599 | if (t == -1) | ||
| 600 | return -EAGAIN; | ||
| 601 | else /* will be -3 */ | ||
| 602 | return -ENOSPC; | ||
| 603 | } | ||
| 604 | |||
| 605 | if (t * IDA_BITMAP_BITS >= MAX_ID_BIT) | ||
| 606 | return -ENOSPC; | ||
| 607 | |||
| 608 | if (t != idr_id) | ||
| 609 | offset = 0; | ||
| 610 | idr_id = t; | ||
| 611 | |||
| 612 | /* if bitmap isn't there, create a new one */ | ||
| 613 | bitmap = (void *)pa[0]->ary[idr_id & IDR_MASK]; | ||
| 614 | if (!bitmap) { | ||
| 615 | spin_lock_irqsave(&ida->idr.lock, flags); | ||
| 616 | bitmap = ida->free_bitmap; | ||
| 617 | ida->free_bitmap = NULL; | ||
| 618 | spin_unlock_irqrestore(&ida->idr.lock, flags); | ||
| 619 | |||
| 620 | if (!bitmap) | ||
| 621 | return -EAGAIN; | ||
| 622 | |||
| 623 | memset(bitmap, 0, sizeof(struct ida_bitmap)); | ||
| 624 | pa[0]->ary[idr_id & IDR_MASK] = (void *)bitmap; | ||
| 625 | pa[0]->count++; | ||
| 626 | } | ||
| 627 | |||
| 628 | /* lookup for empty slot */ | ||
| 629 | t = find_next_zero_bit(bitmap->bitmap, IDA_BITMAP_BITS, offset); | ||
| 630 | if (t == IDA_BITMAP_BITS) { | ||
| 631 | /* no empty slot after offset, continue to the next chunk */ | ||
| 632 | idr_id++; | ||
| 633 | offset = 0; | ||
| 634 | goto restart; | ||
| 635 | } | ||
| 636 | |||
| 637 | id = idr_id * IDA_BITMAP_BITS + t; | ||
| 638 | if (id >= MAX_ID_BIT) | ||
| 639 | return -ENOSPC; | ||
| 640 | |||
| 641 | __set_bit(t, bitmap->bitmap); | ||
| 642 | if (++bitmap->nr_busy == IDA_BITMAP_BITS) | ||
| 643 | idr_mark_full(pa, idr_id); | ||
| 644 | |||
| 645 | *p_id = id; | ||
| 646 | |||
| 647 | /* Each leaf node can handle nearly a thousand slots and the | ||
| 648 | * whole idea of ida is to have small memory foot print. | ||
| 649 | * Throw away extra resources one by one after each successful | ||
| 650 | * allocation. | ||
| 651 | */ | ||
| 652 | if (ida->idr.id_free_cnt || ida->free_bitmap) { | ||
| 653 | struct idr_layer *p = alloc_layer(&ida->idr); | ||
| 654 | if (p) | ||
| 655 | kmem_cache_free(idr_layer_cache, p); | ||
| 656 | } | ||
| 657 | |||
| 658 | return 0; | ||
| 659 | } | ||
| 660 | EXPORT_SYMBOL(ida_get_new_above); | ||
| 661 | |||
| 662 | /** | ||
| 663 | * ida_get_new - allocate new ID | ||
| 664 | * @ida: idr handle | ||
| 665 | * @p_id: pointer to the allocated handle | ||
| 666 | * | ||
| 667 | * Allocate new ID. It should be called with any required locks. | ||
| 668 | * | ||
| 669 | * If memory is required, it will return -EAGAIN, you should unlock | ||
| 670 | * and go back to the idr_pre_get() call. If the idr is full, it will | ||
| 671 | * return -ENOSPC. | ||
| 672 | * | ||
| 673 | * @id returns a value in the range 0 ... 0x7fffffff. | ||
| 674 | */ | ||
| 675 | int ida_get_new(struct ida *ida, int *p_id) | ||
| 676 | { | ||
| 677 | return ida_get_new_above(ida, 0, p_id); | ||
| 678 | } | ||
| 679 | EXPORT_SYMBOL(ida_get_new); | ||
| 680 | |||
| 681 | /** | ||
| 682 | * ida_remove - remove the given ID | ||
| 683 | * @ida: ida handle | ||
| 684 | * @id: ID to free | ||
| 685 | */ | ||
| 686 | void ida_remove(struct ida *ida, int id) | ||
| 687 | { | ||
| 688 | struct idr_layer *p = ida->idr.top; | ||
| 689 | int shift = (ida->idr.layers - 1) * IDR_BITS; | ||
| 690 | int idr_id = id / IDA_BITMAP_BITS; | ||
| 691 | int offset = id % IDA_BITMAP_BITS; | ||
| 692 | int n; | ||
| 693 | struct ida_bitmap *bitmap; | ||
| 694 | |||
| 695 | /* clear full bits while looking up the leaf idr_layer */ | ||
| 696 | while ((shift > 0) && p) { | ||
| 697 | n = (idr_id >> shift) & IDR_MASK; | ||
| 698 | __clear_bit(n, &p->bitmap); | ||
| 699 | p = p->ary[n]; | ||
| 700 | shift -= IDR_BITS; | ||
| 701 | } | ||
| 702 | |||
| 703 | if (p == NULL) | ||
| 704 | goto err; | ||
| 705 | |||
| 706 | n = idr_id & IDR_MASK; | ||
| 707 | __clear_bit(n, &p->bitmap); | ||
| 708 | |||
| 709 | bitmap = (void *)p->ary[n]; | ||
| 710 | if (!test_bit(offset, bitmap->bitmap)) | ||
| 711 | goto err; | ||
| 712 | |||
| 713 | /* update bitmap and remove it if empty */ | ||
| 714 | __clear_bit(offset, bitmap->bitmap); | ||
| 715 | if (--bitmap->nr_busy == 0) { | ||
| 716 | __set_bit(n, &p->bitmap); /* to please idr_remove() */ | ||
| 717 | idr_remove(&ida->idr, idr_id); | ||
| 718 | free_bitmap(ida, bitmap); | ||
| 719 | } | ||
| 720 | |||
| 721 | return; | ||
| 722 | |||
| 723 | err: | ||
| 724 | printk(KERN_WARNING | ||
| 725 | "ida_remove called for id=%d which is not allocated.\n", id); | ||
| 726 | } | ||
| 727 | EXPORT_SYMBOL(ida_remove); | ||
| 728 | |||
| 729 | /** | ||
| 730 | * ida_destroy - release all cached layers within an ida tree | ||
| 731 | * ida: ida handle | ||
| 732 | */ | ||
| 733 | void ida_destroy(struct ida *ida) | ||
| 734 | { | ||
| 735 | idr_destroy(&ida->idr); | ||
| 736 | kfree(ida->free_bitmap); | ||
| 737 | } | ||
| 738 | EXPORT_SYMBOL(ida_destroy); | ||
| 739 | |||
| 740 | /** | ||
| 741 | * ida_init - initialize ida handle | ||
| 742 | * @ida: ida handle | ||
| 743 | * | ||
| 744 | * This function is use to set up the handle (@ida) that you will pass | ||
| 745 | * to the rest of the functions. | ||
| 746 | */ | ||
| 747 | void ida_init(struct ida *ida) | ||
| 748 | { | ||
| 749 | memset(ida, 0, sizeof(struct ida)); | ||
| 750 | idr_init(&ida->idr); | ||
| 751 | |||
| 752 | } | ||
| 753 | EXPORT_SYMBOL(ida_init); | ||
diff --git a/lib/kobject.c b/lib/kobject.c index fc5f3f6e7329..4b08e0ff95c8 100644 --- a/lib/kobject.c +++ b/lib/kobject.c | |||
| @@ -44,7 +44,7 @@ static int populate_dir(struct kobject * kobj) | |||
| 44 | return error; | 44 | return error; |
| 45 | } | 45 | } |
| 46 | 46 | ||
| 47 | static int create_dir(struct kobject * kobj, struct dentry *shadow_parent) | 47 | static int create_dir(struct kobject *kobj, struct sysfs_dirent *shadow_parent) |
| 48 | { | 48 | { |
| 49 | int error = 0; | 49 | int error = 0; |
| 50 | if (kobject_name(kobj)) { | 50 | if (kobject_name(kobj)) { |
| @@ -162,7 +162,7 @@ static void unlink(struct kobject * kobj) | |||
| 162 | * @shadow_parent: sysfs directory to add to. | 162 | * @shadow_parent: sysfs directory to add to. |
| 163 | */ | 163 | */ |
| 164 | 164 | ||
| 165 | int kobject_shadow_add(struct kobject * kobj, struct dentry *shadow_parent) | 165 | int kobject_shadow_add(struct kobject *kobj, struct sysfs_dirent *shadow_parent) |
| 166 | { | 166 | { |
| 167 | int error = 0; | 167 | int error = 0; |
| 168 | struct kobject * parent; | 168 | struct kobject * parent; |
| @@ -202,14 +202,14 @@ int kobject_shadow_add(struct kobject * kobj, struct dentry *shadow_parent) | |||
| 202 | 202 | ||
| 203 | /* be noisy on error issues */ | 203 | /* be noisy on error issues */ |
| 204 | if (error == -EEXIST) | 204 | if (error == -EEXIST) |
| 205 | printk("kobject_add failed for %s with -EEXIST, " | 205 | printk(KERN_ERR "kobject_add failed for %s with " |
| 206 | "don't try to register things with the " | 206 | "-EEXIST, don't try to register things with " |
| 207 | "same name in the same directory.\n", | 207 | "the same name in the same directory.\n", |
| 208 | kobject_name(kobj)); | 208 | kobject_name(kobj)); |
| 209 | else | 209 | else |
| 210 | printk("kobject_add failed for %s (%d)\n", | 210 | printk(KERN_ERR "kobject_add failed for %s (%d)\n", |
| 211 | kobject_name(kobj), error); | 211 | kobject_name(kobj), error); |
| 212 | dump_stack(); | 212 | dump_stack(); |
| 213 | } | 213 | } |
| 214 | 214 | ||
| 215 | return error; | 215 | return error; |
| @@ -338,7 +338,7 @@ int kobject_rename(struct kobject * kobj, const char *new_name) | |||
| 338 | /* Note : if we want to send the new name alone, not the full path, | 338 | /* Note : if we want to send the new name alone, not the full path, |
| 339 | * we could probably use kobject_name(kobj); */ | 339 | * we could probably use kobject_name(kobj); */ |
| 340 | 340 | ||
| 341 | error = sysfs_rename_dir(kobj, kobj->parent->dentry, new_name); | 341 | error = sysfs_rename_dir(kobj, kobj->parent->sd, new_name); |
| 342 | 342 | ||
| 343 | /* This function is mostly/only used for network interface. | 343 | /* This function is mostly/only used for network interface. |
| 344 | * Some hotplug package track interfaces by their name and | 344 | * Some hotplug package track interfaces by their name and |
| @@ -361,8 +361,8 @@ out: | |||
| 361 | * @new_name: object's new name | 361 | * @new_name: object's new name |
| 362 | */ | 362 | */ |
| 363 | 363 | ||
| 364 | int kobject_shadow_rename(struct kobject * kobj, struct dentry *new_parent, | 364 | int kobject_shadow_rename(struct kobject *kobj, |
| 365 | const char *new_name) | 365 | struct sysfs_dirent *new_parent, const char *new_name) |
| 366 | { | 366 | { |
| 367 | int error = 0; | 367 | int error = 0; |
| 368 | 368 | ||
| @@ -597,10 +597,17 @@ int kset_add(struct kset * k) | |||
| 597 | 597 | ||
| 598 | int kset_register(struct kset * k) | 598 | int kset_register(struct kset * k) |
| 599 | { | 599 | { |
| 600 | int err; | ||
| 601 | |||
| 600 | if (!k) | 602 | if (!k) |
| 601 | return -EINVAL; | 603 | return -EINVAL; |
| 604 | |||
| 602 | kset_init(k); | 605 | kset_init(k); |
| 603 | return kset_add(k); | 606 | err = kset_add(k); |
| 607 | if (err) | ||
| 608 | return err; | ||
| 609 | kobject_uevent(&k->kobj, KOBJ_ADD); | ||
| 610 | return 0; | ||
| 604 | } | 611 | } |
| 605 | 612 | ||
| 606 | 613 | ||
diff --git a/lib/lzo/Makefile b/lib/lzo/Makefile new file mode 100644 index 000000000000..e764116ea12d --- /dev/null +++ b/lib/lzo/Makefile | |||
| @@ -0,0 +1,5 @@ | |||
| 1 | lzo_compress-objs := lzo1x_compress.o | ||
| 2 | lzo_decompress-objs := lzo1x_decompress.o | ||
| 3 | |||
| 4 | obj-$(CONFIG_LZO_COMPRESS) += lzo_compress.o | ||
| 5 | obj-$(CONFIG_LZO_DECOMPRESS) += lzo_decompress.o | ||
diff --git a/lib/lzo/lzo1x_compress.c b/lib/lzo/lzo1x_compress.c new file mode 100644 index 000000000000..c935f00073e9 --- /dev/null +++ b/lib/lzo/lzo1x_compress.c | |||
| @@ -0,0 +1,226 @@ | |||
| 1 | /* | ||
| 2 | * LZO1X Compressor from MiniLZO | ||
| 3 | * | ||
| 4 | * Copyright (C) 1996-2005 Markus F.X.J. Oberhumer <markus@oberhumer.com> | ||
| 5 | * | ||
| 6 | * The full LZO package can be found at: | ||
| 7 | * http://www.oberhumer.com/opensource/lzo/ | ||
| 8 | * | ||
| 9 | * Changed for kernel use by: | ||
| 10 | * Nitin Gupta <nitingupta910@gmail.com> | ||
| 11 | * Richard Purdie <rpurdie@openedhand.com> | ||
| 12 | */ | ||
| 13 | |||
| 14 | #include <linux/module.h> | ||
| 15 | #include <linux/kernel.h> | ||
| 16 | #include <linux/lzo.h> | ||
| 17 | #include <asm/unaligned.h> | ||
| 18 | #include "lzodefs.h" | ||
| 19 | |||
| 20 | static noinline size_t | ||
| 21 | _lzo1x_1_do_compress(const unsigned char *in, size_t in_len, | ||
| 22 | unsigned char *out, size_t *out_len, void *wrkmem) | ||
| 23 | { | ||
| 24 | const unsigned char * const in_end = in + in_len; | ||
| 25 | const unsigned char * const ip_end = in + in_len - M2_MAX_LEN - 5; | ||
| 26 | const unsigned char ** const dict = wrkmem; | ||
| 27 | const unsigned char *ip = in, *ii = ip; | ||
| 28 | const unsigned char *end, *m, *m_pos; | ||
| 29 | size_t m_off, m_len, dindex; | ||
| 30 | unsigned char *op = out; | ||
| 31 | |||
| 32 | ip += 4; | ||
| 33 | |||
| 34 | for (;;) { | ||
| 35 | dindex = ((0x21 * DX3(ip, 5, 5, 6)) >> 5) & D_MASK; | ||
| 36 | m_pos = dict[dindex]; | ||
| 37 | |||
| 38 | if (m_pos < in) | ||
| 39 | goto literal; | ||
| 40 | |||
| 41 | if (ip == m_pos || (ip - m_pos) > M4_MAX_OFFSET) | ||
| 42 | goto literal; | ||
| 43 | |||
| 44 | m_off = ip - m_pos; | ||
| 45 | if (m_off <= M2_MAX_OFFSET || m_pos[3] == ip[3]) | ||
| 46 | goto try_match; | ||
| 47 | |||
| 48 | dindex = (dindex & (D_MASK & 0x7ff)) ^ (D_HIGH | 0x1f); | ||
| 49 | m_pos = dict[dindex]; | ||
| 50 | |||
| 51 | if (m_pos < in) | ||
| 52 | goto literal; | ||
| 53 | |||
| 54 | if (ip == m_pos || (ip - m_pos) > M4_MAX_OFFSET) | ||
| 55 | goto literal; | ||
| 56 | |||
| 57 | m_off = ip - m_pos; | ||
| 58 | if (m_off <= M2_MAX_OFFSET || m_pos[3] == ip[3]) | ||
| 59 | goto try_match; | ||
| 60 | |||
| 61 | goto literal; | ||
| 62 | |||
| 63 | try_match: | ||
| 64 | if (get_unaligned((const unsigned short *)m_pos) | ||
| 65 | == get_unaligned((const unsigned short *)ip)) { | ||
| 66 | if (likely(m_pos[2] == ip[2])) | ||
| 67 | goto match; | ||
| 68 | } | ||
| 69 | |||
| 70 | literal: | ||
| 71 | dict[dindex] = ip; | ||
| 72 | ++ip; | ||
| 73 | if (unlikely(ip >= ip_end)) | ||
| 74 | break; | ||
| 75 | continue; | ||
| 76 | |||
| 77 | match: | ||
| 78 | dict[dindex] = ip; | ||
| 79 | if (ip != ii) { | ||
| 80 | size_t t = ip - ii; | ||
| 81 | |||
| 82 | if (t <= 3) { | ||
| 83 | op[-2] |= t; | ||
| 84 | } else if (t <= 18) { | ||
| 85 | *op++ = (t - 3); | ||
| 86 | } else { | ||
| 87 | size_t tt = t - 18; | ||
| 88 | |||
| 89 | *op++ = 0; | ||
| 90 | while (tt > 255) { | ||
| 91 | tt -= 255; | ||
| 92 | *op++ = 0; | ||
| 93 | } | ||
| 94 | *op++ = tt; | ||
| 95 | } | ||
| 96 | do { | ||
| 97 | *op++ = *ii++; | ||
| 98 | } while (--t > 0); | ||
| 99 | } | ||
| 100 | |||
| 101 | ip += 3; | ||
| 102 | if (m_pos[3] != *ip++ || m_pos[4] != *ip++ | ||
| 103 | || m_pos[5] != *ip++ || m_pos[6] != *ip++ | ||
| 104 | || m_pos[7] != *ip++ || m_pos[8] != *ip++) { | ||
| 105 | --ip; | ||
| 106 | m_len = ip - ii; | ||
| 107 | |||
| 108 | if (m_off <= M2_MAX_OFFSET) { | ||
| 109 | m_off -= 1; | ||
| 110 | *op++ = (((m_len - 1) << 5) | ||
| 111 | | ((m_off & 7) << 2)); | ||
| 112 | *op++ = (m_off >> 3); | ||
| 113 | } else if (m_off <= M3_MAX_OFFSET) { | ||
| 114 | m_off -= 1; | ||
| 115 | *op++ = (M3_MARKER | (m_len - 2)); | ||
| 116 | goto m3_m4_offset; | ||
| 117 | } else { | ||
| 118 | m_off -= 0x4000; | ||
| 119 | |||
| 120 | *op++ = (M4_MARKER | ((m_off & 0x4000) >> 11) | ||
| 121 | | (m_len - 2)); | ||
| 122 | goto m3_m4_offset; | ||
| 123 | } | ||
| 124 | } else { | ||
| 125 | end = in_end; | ||
| 126 | m = m_pos + M2_MAX_LEN + 1; | ||
| 127 | |||
| 128 | while (ip < end && *m == *ip) { | ||
| 129 | m++; | ||
| 130 | ip++; | ||
| 131 | } | ||
| 132 | m_len = ip - ii; | ||
| 133 | |||
| 134 | if (m_off <= M3_MAX_OFFSET) { | ||
| 135 | m_off -= 1; | ||
| 136 | if (m_len <= 33) { | ||
| 137 | *op++ = (M3_MARKER | (m_len - 2)); | ||
| 138 | } else { | ||
| 139 | m_len -= 33; | ||
| 140 | *op++ = M3_MARKER | 0; | ||
| 141 | goto m3_m4_len; | ||
| 142 | } | ||
| 143 | } else { | ||
| 144 | m_off -= 0x4000; | ||
| 145 | if (m_len <= M4_MAX_LEN) { | ||
| 146 | *op++ = (M4_MARKER | ||
| 147 | | ((m_off & 0x4000) >> 11) | ||
| 148 | | (m_len - 2)); | ||
| 149 | } else { | ||
| 150 | m_len -= M4_MAX_LEN; | ||
| 151 | *op++ = (M4_MARKER | ||
| 152 | | ((m_off & 0x4000) >> 11)); | ||
| 153 | m3_m4_len: | ||
| 154 | while (m_len > 255) { | ||
| 155 | m_len -= 255; | ||
| 156 | *op++ = 0; | ||
| 157 | } | ||
| 158 | |||
| 159 | *op++ = (m_len); | ||
| 160 | } | ||
| 161 | } | ||
| 162 | m3_m4_offset: | ||
| 163 | *op++ = ((m_off & 63) << 2); | ||
| 164 | *op++ = (m_off >> 6); | ||
| 165 | } | ||
| 166 | |||
| 167 | ii = ip; | ||
| 168 | if (unlikely(ip >= ip_end)) | ||
| 169 | break; | ||
| 170 | } | ||
| 171 | |||
| 172 | *out_len = op - out; | ||
| 173 | return in_end - ii; | ||
| 174 | } | ||
| 175 | |||
| 176 | int lzo1x_1_compress(const unsigned char *in, size_t in_len, unsigned char *out, | ||
| 177 | size_t *out_len, void *wrkmem) | ||
| 178 | { | ||
| 179 | const unsigned char *ii; | ||
| 180 | unsigned char *op = out; | ||
| 181 | size_t t; | ||
| 182 | |||
| 183 | if (unlikely(in_len <= M2_MAX_LEN + 5)) { | ||
| 184 | t = in_len; | ||
| 185 | } else { | ||
| 186 | t = _lzo1x_1_do_compress(in, in_len, op, out_len, wrkmem); | ||
| 187 | op += *out_len; | ||
| 188 | } | ||
| 189 | |||
| 190 | if (t > 0) { | ||
| 191 | ii = in + in_len - t; | ||
| 192 | |||
| 193 | if (op == out && t <= 238) { | ||
| 194 | *op++ = (17 + t); | ||
| 195 | } else if (t <= 3) { | ||
| 196 | op[-2] |= t; | ||
| 197 | } else if (t <= 18) { | ||
| 198 | *op++ = (t - 3); | ||
| 199 | } else { | ||
| 200 | size_t tt = t - 18; | ||
| 201 | |||
| 202 | *op++ = 0; | ||
| 203 | while (tt > 255) { | ||
| 204 | tt -= 255; | ||
| 205 | *op++ = 0; | ||
| 206 | } | ||
| 207 | |||
| 208 | *op++ = tt; | ||
| 209 | } | ||
| 210 | do { | ||
| 211 | *op++ = *ii++; | ||
| 212 | } while (--t > 0); | ||
| 213 | } | ||
| 214 | |||
| 215 | *op++ = M4_MARKER | 1; | ||
| 216 | *op++ = 0; | ||
| 217 | *op++ = 0; | ||
| 218 | |||
| 219 | *out_len = op - out; | ||
| 220 | return LZO_E_OK; | ||
| 221 | } | ||
| 222 | EXPORT_SYMBOL_GPL(lzo1x_1_compress); | ||
| 223 | |||
| 224 | MODULE_LICENSE("GPL"); | ||
| 225 | MODULE_DESCRIPTION("LZO1X-1 Compressor"); | ||
| 226 | |||
diff --git a/lib/lzo/lzo1x_decompress.c b/lib/lzo/lzo1x_decompress.c new file mode 100644 index 000000000000..9dc7056e5520 --- /dev/null +++ b/lib/lzo/lzo1x_decompress.c | |||
| @@ -0,0 +1,254 @@ | |||
| 1 | /* | ||
| 2 | * LZO1X Decompressor from MiniLZO | ||
| 3 | * | ||
| 4 | * Copyright (C) 1996-2005 Markus F.X.J. Oberhumer <markus@oberhumer.com> | ||
| 5 | * | ||
| 6 | * The full LZO package can be found at: | ||
| 7 | * http://www.oberhumer.com/opensource/lzo/ | ||
| 8 | * | ||
| 9 | * Changed for kernel use by: | ||
| 10 | * Nitin Gupta <nitingupta910@gmail.com> | ||
| 11 | * Richard Purdie <rpurdie@openedhand.com> | ||
| 12 | */ | ||
| 13 | |||
| 14 | #include <linux/module.h> | ||
| 15 | #include <linux/kernel.h> | ||
| 16 | #include <linux/lzo.h> | ||
| 17 | #include <asm/byteorder.h> | ||
| 18 | #include <asm/unaligned.h> | ||
| 19 | #include "lzodefs.h" | ||
| 20 | |||
| 21 | #define HAVE_IP(x, ip_end, ip) ((size_t)(ip_end - ip) < (x)) | ||
| 22 | #define HAVE_OP(x, op_end, op) ((size_t)(op_end - op) < (x)) | ||
| 23 | #define HAVE_LB(m_pos, out, op) (m_pos < out || m_pos >= op) | ||
| 24 | |||
| 25 | #define COPY4(dst, src) \ | ||
| 26 | put_unaligned(get_unaligned((const u32 *)(src)), (u32 *)(dst)) | ||
| 27 | |||
| 28 | int lzo1x_decompress_safe(const unsigned char *in, size_t in_len, | ||
| 29 | unsigned char *out, size_t *out_len) | ||
| 30 | { | ||
| 31 | const unsigned char * const ip_end = in + in_len; | ||
| 32 | unsigned char * const op_end = out + *out_len; | ||
| 33 | const unsigned char *ip = in, *m_pos; | ||
| 34 | unsigned char *op = out; | ||
| 35 | size_t t; | ||
| 36 | |||
| 37 | *out_len = 0; | ||
| 38 | |||
| 39 | if (*ip > 17) { | ||
| 40 | t = *ip++ - 17; | ||
| 41 | if (t < 4) | ||
| 42 | goto match_next; | ||
| 43 | if (HAVE_OP(t, op_end, op)) | ||
| 44 | goto output_overrun; | ||
| 45 | if (HAVE_IP(t + 1, ip_end, ip)) | ||
| 46 | goto input_overrun; | ||
| 47 | do { | ||
| 48 | *op++ = *ip++; | ||
| 49 | } while (--t > 0); | ||
| 50 | goto first_literal_run; | ||
| 51 | } | ||
| 52 | |||
| 53 | while ((ip < ip_end)) { | ||
| 54 | t = *ip++; | ||
| 55 | if (t >= 16) | ||
| 56 | goto match; | ||
| 57 | if (t == 0) { | ||
| 58 | if (HAVE_IP(1, ip_end, ip)) | ||
| 59 | goto input_overrun; | ||
| 60 | while (*ip == 0) { | ||
| 61 | t += 255; | ||
| 62 | ip++; | ||
| 63 | if (HAVE_IP(1, ip_end, ip)) | ||
| 64 | goto input_overrun; | ||
| 65 | } | ||
| 66 | t += 15 + *ip++; | ||
| 67 | } | ||
| 68 | if (HAVE_OP(t + 3, op_end, op)) | ||
| 69 | goto output_overrun; | ||
| 70 | if (HAVE_IP(t + 4, ip_end, ip)) | ||
| 71 | goto input_overrun; | ||
| 72 | |||
| 73 | COPY4(op, ip); | ||
| 74 | op += 4; | ||
| 75 | ip += 4; | ||
| 76 | if (--t > 0) { | ||
| 77 | if (t >= 4) { | ||
| 78 | do { | ||
| 79 | COPY4(op, ip); | ||
| 80 | op += 4; | ||
| 81 | ip += 4; | ||
| 82 | t -= 4; | ||
| 83 | } while (t >= 4); | ||
| 84 | if (t > 0) { | ||
| 85 | do { | ||
| 86 | *op++ = *ip++; | ||
| 87 | } while (--t > 0); | ||
| 88 | } | ||
| 89 | } else { | ||
| 90 | do { | ||
| 91 | *op++ = *ip++; | ||
| 92 | } while (--t > 0); | ||
| 93 | } | ||
| 94 | } | ||
| 95 | |||
| 96 | first_literal_run: | ||
| 97 | t = *ip++; | ||
| 98 | if (t >= 16) | ||
| 99 | goto match; | ||
| 100 | m_pos = op - (1 + M2_MAX_OFFSET); | ||
| 101 | m_pos -= t >> 2; | ||
| 102 | m_pos -= *ip++ << 2; | ||
| 103 | |||
| 104 | if (HAVE_LB(m_pos, out, op)) | ||
| 105 | goto lookbehind_overrun; | ||
| 106 | |||
| 107 | if (HAVE_OP(3, op_end, op)) | ||
| 108 | goto output_overrun; | ||
| 109 | *op++ = *m_pos++; | ||
| 110 | *op++ = *m_pos++; | ||
| 111 | *op++ = *m_pos; | ||
| 112 | |||
| 113 | goto match_done; | ||
| 114 | |||
| 115 | do { | ||
| 116 | match: | ||
| 117 | if (t >= 64) { | ||
| 118 | m_pos = op - 1; | ||
| 119 | m_pos -= (t >> 2) & 7; | ||
| 120 | m_pos -= *ip++ << 3; | ||
| 121 | t = (t >> 5) - 1; | ||
| 122 | if (HAVE_LB(m_pos, out, op)) | ||
| 123 | goto lookbehind_overrun; | ||
| 124 | if (HAVE_OP(t + 3 - 1, op_end, op)) | ||
| 125 | goto output_overrun; | ||
| 126 | goto copy_match; | ||
| 127 | } else if (t >= 32) { | ||
| 128 | t &= 31; | ||
| 129 | if (t == 0) { | ||
| 130 | if (HAVE_IP(1, ip_end, ip)) | ||
| 131 | goto input_overrun; | ||
| 132 | while (*ip == 0) { | ||
| 133 | t += 255; | ||
| 134 | ip++; | ||
| 135 | if (HAVE_IP(1, ip_end, ip)) | ||
| 136 | goto input_overrun; | ||
| 137 | } | ||
| 138 | t += 31 + *ip++; | ||
| 139 | } | ||
| 140 | m_pos = op - 1; | ||
| 141 | m_pos -= le16_to_cpu(get_unaligned( | ||
| 142 | (const unsigned short *)ip)) >> 2; | ||
| 143 | ip += 2; | ||
| 144 | } else if (t >= 16) { | ||
| 145 | m_pos = op; | ||
| 146 | m_pos -= (t & 8) << 11; | ||
| 147 | |||
| 148 | t &= 7; | ||
| 149 | if (t == 0) { | ||
| 150 | if (HAVE_IP(1, ip_end, ip)) | ||
| 151 | goto input_overrun; | ||
| 152 | while (*ip == 0) { | ||
| 153 | t += 255; | ||
| 154 | ip++; | ||
| 155 | if (HAVE_IP(1, ip_end, ip)) | ||
| 156 | goto input_overrun; | ||
| 157 | } | ||
| 158 | t += 7 + *ip++; | ||
| 159 | } | ||
| 160 | m_pos -= le16_to_cpu(get_unaligned( | ||
| 161 | (const unsigned short *)ip) >> 2); | ||
| 162 | ip += 2; | ||
| 163 | if (m_pos == op) | ||
| 164 | goto eof_found; | ||
| 165 | m_pos -= 0x4000; | ||
| 166 | } else { | ||
| 167 | m_pos = op - 1; | ||
| 168 | m_pos -= t >> 2; | ||
| 169 | m_pos -= *ip++ << 2; | ||
| 170 | |||
| 171 | if (HAVE_LB(m_pos, out, op)) | ||
| 172 | goto lookbehind_overrun; | ||
| 173 | if (HAVE_OP(2, op_end, op)) | ||
| 174 | goto output_overrun; | ||
| 175 | |||
| 176 | *op++ = *m_pos++; | ||
| 177 | *op++ = *m_pos; | ||
| 178 | goto match_done; | ||
| 179 | } | ||
| 180 | |||
| 181 | if (HAVE_LB(m_pos, out, op)) | ||
| 182 | goto lookbehind_overrun; | ||
| 183 | if (HAVE_OP(t + 3 - 1, op_end, op)) | ||
| 184 | goto output_overrun; | ||
| 185 | |||
| 186 | if (t >= 2 * 4 - (3 - 1) && (op - m_pos) >= 4) { | ||
| 187 | COPY4(op, m_pos); | ||
| 188 | op += 4; | ||
| 189 | m_pos += 4; | ||
| 190 | t -= 4 - (3 - 1); | ||
| 191 | do { | ||
| 192 | COPY4(op, m_pos); | ||
| 193 | op += 4; | ||
| 194 | m_pos += 4; | ||
| 195 | t -= 4; | ||
| 196 | } while (t >= 4); | ||
| 197 | if (t > 0) | ||
| 198 | do { | ||
| 199 | *op++ = *m_pos++; | ||
| 200 | } while (--t > 0); | ||
| 201 | } else { | ||
| 202 | copy_match: | ||
| 203 | *op++ = *m_pos++; | ||
| 204 | *op++ = *m_pos++; | ||
| 205 | do { | ||
| 206 | *op++ = *m_pos++; | ||
| 207 | } while (--t > 0); | ||
| 208 | } | ||
| 209 | match_done: | ||
| 210 | t = ip[-2] & 3; | ||
| 211 | if (t == 0) | ||
| 212 | break; | ||
| 213 | match_next: | ||
| 214 | if (HAVE_OP(t, op_end, op)) | ||
| 215 | goto output_overrun; | ||
| 216 | if (HAVE_IP(t + 1, ip_end, ip)) | ||
| 217 | goto input_overrun; | ||
| 218 | |||
| 219 | *op++ = *ip++; | ||
| 220 | if (t > 1) { | ||
| 221 | *op++ = *ip++; | ||
| 222 | if (t > 2) | ||
| 223 | *op++ = *ip++; | ||
| 224 | } | ||
| 225 | |||
| 226 | t = *ip++; | ||
| 227 | } while (ip < ip_end); | ||
| 228 | } | ||
| 229 | |||
| 230 | *out_len = op - out; | ||
| 231 | return LZO_E_EOF_NOT_FOUND; | ||
| 232 | |||
| 233 | eof_found: | ||
| 234 | *out_len = op - out; | ||
| 235 | return (ip == ip_end ? LZO_E_OK : | ||
| 236 | (ip < ip_end ? LZO_E_INPUT_NOT_CONSUMED : LZO_E_INPUT_OVERRUN)); | ||
| 237 | input_overrun: | ||
| 238 | *out_len = op - out; | ||
| 239 | return LZO_E_INPUT_OVERRUN; | ||
| 240 | |||
| 241 | output_overrun: | ||
| 242 | *out_len = op - out; | ||
| 243 | return LZO_E_OUTPUT_OVERRUN; | ||
| 244 | |||
| 245 | lookbehind_overrun: | ||
| 246 | *out_len = op - out; | ||
| 247 | return LZO_E_LOOKBEHIND_OVERRUN; | ||
| 248 | } | ||
| 249 | |||
| 250 | EXPORT_SYMBOL_GPL(lzo1x_decompress_safe); | ||
| 251 | |||
| 252 | MODULE_LICENSE("GPL"); | ||
| 253 | MODULE_DESCRIPTION("LZO1X Decompressor"); | ||
| 254 | |||
diff --git a/lib/lzo/lzodefs.h b/lib/lzo/lzodefs.h new file mode 100644 index 000000000000..b6d482c492ef --- /dev/null +++ b/lib/lzo/lzodefs.h | |||
| @@ -0,0 +1,43 @@ | |||
| 1 | /* | ||
| 2 | * lzodefs.h -- architecture, OS and compiler specific defines | ||
| 3 | * | ||
| 4 | * Copyright (C) 1996-2005 Markus F.X.J. Oberhumer <markus@oberhumer.com> | ||
| 5 | * | ||
| 6 | * The full LZO package can be found at: | ||
| 7 | * http://www.oberhumer.com/opensource/lzo/ | ||
| 8 | * | ||
| 9 | * Changed for kernel use by: | ||
| 10 | * Nitin Gupta <nitingupta910@gmail.com> | ||
| 11 | * Richard Purdie <rpurdie@openedhand.com> | ||
| 12 | */ | ||
| 13 | |||
| 14 | #define LZO_VERSION 0x2020 | ||
| 15 | #define LZO_VERSION_STRING "2.02" | ||
| 16 | #define LZO_VERSION_DATE "Oct 17 2005" | ||
| 17 | |||
| 18 | #define M1_MAX_OFFSET 0x0400 | ||
| 19 | #define M2_MAX_OFFSET 0x0800 | ||
| 20 | #define M3_MAX_OFFSET 0x4000 | ||
| 21 | #define M4_MAX_OFFSET 0xbfff | ||
| 22 | |||
| 23 | #define M1_MIN_LEN 2 | ||
| 24 | #define M1_MAX_LEN 2 | ||
| 25 | #define M2_MIN_LEN 3 | ||
| 26 | #define M2_MAX_LEN 8 | ||
| 27 | #define M3_MIN_LEN 3 | ||
| 28 | #define M3_MAX_LEN 33 | ||
| 29 | #define M4_MIN_LEN 3 | ||
| 30 | #define M4_MAX_LEN 9 | ||
| 31 | |||
| 32 | #define M1_MARKER 0 | ||
| 33 | #define M2_MARKER 64 | ||
| 34 | #define M3_MARKER 32 | ||
| 35 | #define M4_MARKER 16 | ||
| 36 | |||
| 37 | #define D_BITS 14 | ||
| 38 | #define D_MASK ((1u << D_BITS) - 1) | ||
| 39 | #define D_HIGH ((D_MASK >> 1) + 1) | ||
| 40 | |||
| 41 | #define DX2(p, s1, s2) (((((size_t)((p)[2]) << (s2)) ^ (p)[1]) \ | ||
| 42 | << (s1)) ^ (p)[0]) | ||
| 43 | #define DX3(p, s1, s2, s3) ((DX2((p)+1, s2, s3) << (s1)) ^ (p)[0]) | ||
diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 402eb4eb6b23..9927cca14cb7 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c | |||
| @@ -151,6 +151,7 @@ int radix_tree_preload(gfp_t gfp_mask) | |||
| 151 | out: | 151 | out: |
| 152 | return ret; | 152 | return ret; |
| 153 | } | 153 | } |
| 154 | EXPORT_SYMBOL(radix_tree_preload); | ||
| 154 | 155 | ||
| 155 | static inline void tag_set(struct radix_tree_node *node, unsigned int tag, | 156 | static inline void tag_set(struct radix_tree_node *node, unsigned int tag, |
| 156 | int offset) | 157 | int offset) |
