From 2ef9481e666b4654159ac9f847e6963809e3c470 Mon Sep 17 00:00:00 2001 From: Jon Mason Date: Mon, 23 Jan 2006 10:58:20 -0600 Subject: [PATCH] powerpc: trivial: modify comments to refer to new location of files This patch removes all self references and fixes references to files in the now defunct arch/ppc64 tree. I think this accomplises everything wanted, though there might be a few references I missed. Signed-off-by: Jon Mason Signed-off-by: Paul Mackerras --- lib/extable.c | 1 - 1 file changed, 1 deletion(-) (limited to 'lib') diff --git a/lib/extable.c b/lib/extable.c index 18df57c029df..01c08b5836f5 100644 --- a/lib/extable.c +++ b/lib/extable.c @@ -1,5 +1,4 @@ /* - * lib/extable.c * Derived from arch/ppc/mm/extable.c and arch/i386/mm/extable.c. * * Copyright (C) 2004 Paul Mackerras, IBM Corp. -- cgit v1.2.2 From 51107301b629640f9ab76fe23bf385e187b9ac29 Mon Sep 17 00:00:00 2001 From: Jun'ichi Nomura Date: Wed, 15 Mar 2006 08:28:55 -0500 Subject: [PATCH] kobject: fix build error if CONFIG_SYSFS=n Moving uevent_seqnum and uevent_helper to kobject_uevent.c because they are used even if CONFIG_SYSFS=n while kernel/ksysfs.c is built only if CONFIG_SYSFS=y, Signed-off-by: Jun'ichi Nomura Signed-off-by: Greg Kroah-Hartman --- lib/kobject_uevent.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'lib') diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c index 086a0c6e888e..982226daf939 100644 --- a/lib/kobject_uevent.c +++ b/lib/kobject_uevent.c @@ -26,6 +26,8 @@ #define NUM_ENVP 32 /* number of env pointers */ #if defined(CONFIG_HOTPLUG) && defined(CONFIG_NET) +u64 uevent_seqnum; +char uevent_helper[UEVENT_HELPER_PATH_LEN] = "/sbin/hotplug"; static DEFINE_SPINLOCK(sequence_lock); static struct sock *uevent_sock; -- cgit v1.2.2 From 8b5536bbee53620f8d5f367987e5727ba36d886d Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Mon, 30 Jan 2006 06:19:35 +0100 Subject: [PATCH] kref: avoid an atomic operation in kref_put() Avoid an atomic operation in kref_put() when the last reference is dropped. On most platforms, atomic_read() is a plan read of the counter and involves no atomic at all. Signed-off-by: Eric Dumazet Signed-off-by: Greg Kroah-Hartman --- lib/kref.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'lib') diff --git a/lib/kref.c b/lib/kref.c index 0d07cc31c818..4a467faf1367 100644 --- a/lib/kref.c +++ b/lib/kref.c @@ -52,7 +52,12 @@ int kref_put(struct kref *kref, void (*release)(struct kref *kref)) WARN_ON(release == NULL); WARN_ON(release == (void (*)(struct kref *))kfree); - if (atomic_dec_and_test(&kref->refcount)) { + /* + * if current count is one, we are the last user and can release object + * right now, avoiding an atomic operation on 'refcount' + */ + if ((atomic_read(&kref->refcount) == 1) || + (atomic_dec_and_test(&kref->refcount))) { release(kref); return 1; } -- cgit v1.2.2 From dcd0da002122a70fe1c625c0ca9f58c95aa33ebe Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Mon, 20 Mar 2006 13:17:13 -0800 Subject: [PATCH] Kobject: provide better warning messages when people do stupid things Now that kobject_add() is used more than kobject_register() the kernel wasn't always letting people know that they were doing something wrong. This change fixes this. Signed-off-by: Greg Kroah-Hartman --- lib/kobject.c | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) (limited to 'lib') diff --git a/lib/kobject.c b/lib/kobject.c index efe67fa96a71..36668c8c3ea1 100644 --- a/lib/kobject.c +++ b/lib/kobject.c @@ -194,6 +194,17 @@ int kobject_add(struct kobject * kobj) unlink(kobj); if (parent) kobject_put(parent); + + /* be noisy on error issues */ + if (error == -EEXIST) + printk("kobject_add failed for %s with -EEXIST, " + "don't try to register things with the " + "same name in the same directory.\n", + kobject_name(kobj)); + else + printk("kobject_add failed for %s (%d)\n", + kobject_name(kobj), error); + dump_stack(); } return error; @@ -207,18 +218,13 @@ int kobject_add(struct kobject * kobj) int kobject_register(struct kobject * kobj) { - int error = 0; + int error = -EINVAL; if (kobj) { kobject_init(kobj); error = kobject_add(kobj); - if (error) { - printk("kobject_register failed for %s (%d)\n", - kobject_name(kobj),error); - dump_stack(); - } else + if (!error) kobject_uevent(kobj, KOBJ_ADD); - } else - error = -EINVAL; + } return error; } -- cgit v1.2.2 From 7423172a50968de1905a61413c52bb070a62f5ce Mon Sep 17 00:00:00 2001 From: Jun'ichi Nomura Date: Mon, 13 Mar 2006 17:14:25 -0500 Subject: [PATCH] kobject_add_dir Adding kobject_add_dir() function which creates a subdirectory for a given kobject. Signed-off-by: Jun'ichi Nomura Signed-off-by: Greg Kroah-Hartman --- lib/kobject.c | 38 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) (limited to 'lib') diff --git a/lib/kobject.c b/lib/kobject.c index 36668c8c3ea1..25204a41a9b0 100644 --- a/lib/kobject.c +++ b/lib/kobject.c @@ -385,6 +385,44 @@ void kobject_put(struct kobject * kobj) } +static void dir_release(struct kobject *kobj) +{ + kfree(kobj); +} + +static struct kobj_type dir_ktype = { + .release = dir_release, + .sysfs_ops = NULL, + .default_attrs = NULL, +}; + +/** + * kobject_add_dir - add sub directory of object. + * @parent: object in which a directory is created. + * @name: directory name. + * + * Add a plain directory object as child of given object. + */ +struct kobject *kobject_add_dir(struct kobject *parent, const char *name) +{ + struct kobject *k; + + if (!parent) + return NULL; + + k = kzalloc(sizeof(*k), GFP_KERNEL); + if (!k) + return NULL; + + k->parent = parent; + k->ktype = &dir_ktype; + kobject_set_name(k, name); + kobject_register(k); + + return k; +} +EXPORT_SYMBOL_GPL(kobject_add_dir); + /** * kset_init - initialize a kset for use * @k: kset -- cgit v1.2.2 From f4a641d66c6e135dcfc861521e8008faed2411e1 Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Wed, 22 Mar 2006 00:07:46 -0800 Subject: [PATCH] multiple exports of strpbrk Sam's tree includes a new check, which found that we're exporting strpbrk() multiple times. It seems that the convention is that this is exported from the arch files, so reove the lib/string.c export. Cc: Sam Ravnborg Cc: Yoshinori Sato Cc: David Howells Cc: Greg Ungerer Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/string.c | 1 - 1 file changed, 1 deletion(-) (limited to 'lib') diff --git a/lib/string.c b/lib/string.c index 037a48acedbb..b3c28a3f6332 100644 --- a/lib/string.c +++ b/lib/string.c @@ -403,7 +403,6 @@ char *strpbrk(const char *cs, const char *ct) } return NULL; } -EXPORT_SYMBOL(strpbrk); #endif #ifndef __HAVE_ARCH_STRSEP -- cgit v1.2.2 From 97d1f15b7ef52c1e9c28dc48b454024bb53a5fd2 Mon Sep 17 00:00:00 2001 From: Arjan van de Ven Date: Thu, 23 Mar 2006 03:00:24 -0800 Subject: [PATCH] sem2mutex: kernel/ Semaphore to mutex conversion. The conversion was generated via scripts, and the result was validated automatically via a script as well. Signed-off-by: Arjan van de Ven Signed-off-by: Ingo Molnar Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/reed_solomon/reed_solomon.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) (limited to 'lib') diff --git a/lib/reed_solomon/reed_solomon.c b/lib/reed_solomon/reed_solomon.c index f5fef948a415..f8ac9fa95de1 100644 --- a/lib/reed_solomon/reed_solomon.c +++ b/lib/reed_solomon/reed_solomon.c @@ -44,12 +44,13 @@ #include #include #include +#include #include /* This list holds all currently allocated rs control structures */ static LIST_HEAD (rslist); /* Protection for the list */ -static DECLARE_MUTEX(rslistlock); +static DEFINE_MUTEX(rslistlock); /** * rs_init - Initialize a Reed-Solomon codec @@ -161,7 +162,7 @@ errrs: */ void free_rs(struct rs_control *rs) { - down(&rslistlock); + mutex_lock(&rslistlock); rs->users--; if(!rs->users) { list_del(&rs->list); @@ -170,7 +171,7 @@ void free_rs(struct rs_control *rs) kfree(rs->genpoly); kfree(rs); } - up(&rslistlock); + mutex_unlock(&rslistlock); } /** @@ -201,7 +202,7 @@ struct rs_control *init_rs(int symsize, int gfpoly, int fcr, int prim, if (nroots < 0 || nroots >= (1<list, &rslist); } out: - up(&rslistlock); + mutex_unlock(&rslistlock); return rs; } -- cgit v1.2.2 From 87e24802586333fa861861f6493c76039872755b Mon Sep 17 00:00:00 2001 From: Paul Jackson Date: Fri, 24 Mar 2006 03:15:44 -0800 Subject: [PATCH] bitmap: region cleanup Paul Mundt says: This patch set implements a number of patches to clean up and restructure the bitmap region code, in addition to extending the interface to support multiword spanning allocations. The current implementation (before this patch set) is limited by only being able to allocate pages <= BITS_PER_LONG, as noted by the strategically positioned BUG_ON() at lib/bitmap.c:752: /* We don't do regions of pages > BITS_PER_LONG. The * algorithm would be a simple look for multiple zeros in the * array, but there's no driver today that needs this. If you * trip this BUG(), you get to code it... */ BUG_ON(pages > BITS_PER_LONG); As I seem to have been the first person to trigger this, the result ends up being the following patch set with the help of Paul Jackson. The final patch in the series eliminates quite a bit of code duplication, so the bitmap code size ends up being smaller than the current implementation as an added bonus. After these are applied, it should already be possible to do multiword allocations with dma_alloc_coherent() out of ranges established by dma_declare_coherent_memory() on x86 without having to change any of the code, and the SH store queue API will follow up on this as the other user that needs support for this. This patch: Some code cleanup on the lib/bitmap.c bitmap_*_region() routines: * spacing * variable names * comments Has no change to code function. Signed-off-by: Paul Mundt Signed-off-by: Paul Jackson Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/bitmap.c | 64 ++++++++++++++++++++++++++++++++++++------------------------ 1 file changed, 38 insertions(+), 26 deletions(-) (limited to 'lib') diff --git a/lib/bitmap.c b/lib/bitmap.c index 48e708381d44..3fab1ce9ac65 100644 --- a/lib/bitmap.c +++ b/lib/bitmap.c @@ -677,39 +677,38 @@ int bitmap_bitremap(int oldbit, const unsigned long *old, EXPORT_SYMBOL(bitmap_bitremap); /** - * bitmap_find_free_region - find a contiguous aligned mem region + * bitmap_find_free_region - find a contiguous aligned mem region * @bitmap: an array of unsigned longs corresponding to the bitmap * @bits: number of bits in the bitmap * @order: region size to find (size is actually 1< BITS_PER_LONG) + if (nbits > BITS_PER_LONG) return -EINVAL; /* make a mask of the order */ - mask = (1ul << (pages - 1)); + mask = (1UL << (nbits - 1)); mask += mask - 1; - /* run up the bitmap pages bits at a time */ - for (i = 0; i < bits; i += pages) { - int index = i/BITS_PER_LONG; + /* run up the bitmap nbits at a time */ + for (i = 0; i < bits; i += nbits) { + int index = i / BITS_PER_LONG; int offset = i - (index * BITS_PER_LONG); - if((bitmap[index] & (mask << offset)) == 0) { - /* set region in bimap */ + if ((bitmap[index] & (mask << offset)) == 0) { + /* set region in bitmap */ bitmap[index] |= (mask << offset); return i; } @@ -719,7 +718,7 @@ int bitmap_find_free_region(unsigned long *bitmap, int bits, int order) EXPORT_SYMBOL(bitmap_find_free_region); /** - * bitmap_release_region - release allocated bitmap region + * bitmap_release_region - release allocated bitmap region * @bitmap: a pointer to the bitmap * @pos: the beginning of the region * @order: the order of the bits to release (number is 1< BITS_PER_LONG. The + /* + * We don't do regions of nbits > BITS_PER_LONG. The * algorithm would be a simple look for multiple zeros in the * array, but there's no driver today that needs this. If you - * trip this BUG(), you get to code it... */ - BUG_ON(pages > BITS_PER_LONG); + * trip this BUG(), you get to code it... + */ + BUG_ON(nbits > BITS_PER_LONG); mask += mask - 1; if (bitmap[index] & (mask << offset)) return -EBUSY; -- cgit v1.2.2 From 74373c6acc52450ced28780d5fece60f1d7d20aa Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Fri, 24 Mar 2006 03:15:45 -0800 Subject: [PATCH] bitmap: region multiword spanning support Add support to the lib/bitmap.c bitmap_*_region() routines For bitmap regions larger than one word (nbits > BITS_PER_LONG). This removes a BUG_ON() in lib bitmap. I have an updated store queue API for SH that is currently using this with relative success, and at first glance, it seems like this could be useful for x86 (arch/i386/kernel/pci-dma.c) as well. Particularly for anything using dma_declare_coherent_memory() on large areas and that attempts to allocate large buffers from that space. Paul Jackson also did some cleanup to this patch. Signed-off-by: Paul Mundt Signed-off-by: Paul Jackson Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/bitmap.c | 110 +++++++++++++++++++++++++++++++++++++++++------------------ 1 file changed, 76 insertions(+), 34 deletions(-) (limited to 'lib') diff --git a/lib/bitmap.c b/lib/bitmap.c index 3fab1ce9ac65..f49eabe09271 100644 --- a/lib/bitmap.c +++ b/lib/bitmap.c @@ -692,26 +692,44 @@ EXPORT_SYMBOL(bitmap_bitremap); */ int bitmap_find_free_region(unsigned long *bitmap, int bits, int order) { - unsigned long mask; - int nbits = 1 << order; - int i; - - if (nbits > BITS_PER_LONG) - return -EINVAL; + int nbits; /* number of bits in region */ + int nlongs; /* num longs spanned by region in bitmap */ + int nbitsinlong; /* num bits of region in each spanned long */ + unsigned long mask; /* bitmask of bits [0 .. nbitsinlong-1] */ + int i; /* scans bitmap by longs */ + + nbits = 1 << order; + nlongs = (nbits + (BITS_PER_LONG - 1)) / BITS_PER_LONG; + nbitsinlong = nbits; + if (nbitsinlong > BITS_PER_LONG) + nbitsinlong = BITS_PER_LONG; /* make a mask of the order */ - mask = (1UL << (nbits - 1)); + mask = (1UL << (nbitsinlong - 1)); mask += mask - 1; - /* run up the bitmap nbits at a time */ - for (i = 0; i < bits; i += nbits) { + /* run up the bitmap nbitsinlong at a time */ + for (i = 0; i < bits; i += nbitsinlong) { int index = i / BITS_PER_LONG; int offset = i - (index * BITS_PER_LONG); - if ((bitmap[index] & (mask << offset)) == 0) { + int j, space = 1; + + /* find space in the bitmap */ + for (j = 0; j < nlongs; j++) + if ((bitmap[index + j] & (mask << offset))) { + space = 0; + break; + } + + /* keep looking */ + if (unlikely(!space)) + continue; + + for (j = 0; j < nlongs; j++) /* set region in bitmap */ - bitmap[index] |= (mask << offset); - return i; - } + bitmap[index + j] |= (mask << offset); + + return i; } return -ENOMEM; } @@ -728,13 +746,28 @@ EXPORT_SYMBOL(bitmap_find_free_region); */ void bitmap_release_region(unsigned long *bitmap, int pos, int order) { - int nbits = 1 << order; - unsigned long mask = (1UL << (nbits - 1)); - int index = pos / BITS_PER_LONG; - int offset = pos - (index * BITS_PER_LONG); - + int nbits; /* number of bits in region */ + int nlongs; /* num longs spanned by region in bitmap */ + int index; /* index first long of region in bitmap */ + int offset; /* bit offset region in bitmap[index] */ + int nbitsinlong; /* num bits of region in each spanned long */ + unsigned long mask; /* bitmask of bits [0 .. nbitsinlong-1] */ + int i; /* scans bitmap by longs */ + + nbits = 1 << order; + nlongs = (nbits + (BITS_PER_LONG - 1)) / BITS_PER_LONG; + index = pos / BITS_PER_LONG; + offset = pos - (index * BITS_PER_LONG); + + nbitsinlong = nbits; + if (nbitsinlong > BITS_PER_LONG) + nbitsinlong = BITS_PER_LONG; + + mask = (1UL << (nbitsinlong - 1)); mask += mask - 1; - bitmap[index] &= ~(mask << offset); + + for (i = 0; i < nlongs; i++) + bitmap[index + i] &= ~(mask << offset); } EXPORT_SYMBOL(bitmap_release_region); @@ -750,22 +783,31 @@ EXPORT_SYMBOL(bitmap_release_region); */ int bitmap_allocate_region(unsigned long *bitmap, int pos, int order) { - int nbits = 1 << order; - unsigned long mask = (1UL << (nbits - 1)); - int index = pos / BITS_PER_LONG; - int offset = pos - (index * BITS_PER_LONG); - - /* - * We don't do regions of nbits > BITS_PER_LONG. The - * algorithm would be a simple look for multiple zeros in the - * array, but there's no driver today that needs this. If you - * trip this BUG(), you get to code it... - */ - BUG_ON(nbits > BITS_PER_LONG); + int nbits; /* number of bits in region */ + int nlongs; /* num longs spanned by region in bitmap */ + int index; /* index first long of region in bitmap */ + int offset; /* bit offset region in bitmap[index] */ + int nbitsinlong; /* num bits of region in each spanned long */ + unsigned long mask; /* bitmask of bits [0 .. nbitsinlong-1] */ + int i; /* scans bitmap by longs */ + + nbits = 1 << order; + nlongs = (nbits + (BITS_PER_LONG - 1)) / BITS_PER_LONG; + index = pos / BITS_PER_LONG; + offset = pos - (index * BITS_PER_LONG); + + nbitsinlong = nbits; + if (nbitsinlong > BITS_PER_LONG) + nbitsinlong = BITS_PER_LONG; + + mask = (1UL << (nbitsinlong - 1)); mask += mask - 1; - if (bitmap[index] & (mask << offset)) - return -EBUSY; - bitmap[index] |= (mask << offset); + + for (i = 0; i < nlongs; i++) + if (bitmap[index + i] & (mask << offset)) + return -EBUSY; + for (i = 0; i < nlongs; i++) + bitmap[index + i] |= (mask << offset); return 0; } EXPORT_SYMBOL(bitmap_allocate_region); -- cgit v1.2.2 From 3cf64b933c90ba701cfdc7188431104c646d7c9e Mon Sep 17 00:00:00 2001 From: Paul Jackson Date: Fri, 24 Mar 2006 03:15:46 -0800 Subject: [PATCH] bitmap: region restructuring Restructure the bitmap_*_region() operations, to avoid code duplication. Also reduces binary text size by about 100 bytes (ia64 arch). The original Bottomley bitmap_*_region patch added about 1000 bytes of compiled kernel text (ia64). The Mundt multiword extension added another 600 bytes, and this restructuring patch gets back about 100 bytes. But the real motivation was the reduced amount of duplicated code. Tested by Paul Mundt using <= BITS_PER_LONG as well as power of 2 aligned multiword spanning allocations. Signed-off-by: Paul Mundt Signed-off-by: Paul Jackson Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/bitmap.c | 199 ++++++++++++++++++++++++++++++----------------------------- 1 file changed, 102 insertions(+), 97 deletions(-) (limited to 'lib') diff --git a/lib/bitmap.c b/lib/bitmap.c index f49eabe09271..8acab0e176ef 100644 --- a/lib/bitmap.c +++ b/lib/bitmap.c @@ -676,138 +676,143 @@ int bitmap_bitremap(int oldbit, const unsigned long *old, } EXPORT_SYMBOL(bitmap_bitremap); -/** - * bitmap_find_free_region - find a contiguous aligned mem region - * @bitmap: an array of unsigned longs corresponding to the bitmap - * @bits: number of bits in the bitmap - * @order: region size to find (size is actually 1< BITS_PER_LONG) - nbitsinlong = BITS_PER_LONG; + /* + * Either nlongs_reg == 1 (for small orders that fit in one long) + * or (offset == 0 && mask == ~0UL) (for larger multiword orders.) + */ + nbits_reg = 1 << order; + index = pos / BITS_PER_LONG; + offset = pos - (index * BITS_PER_LONG); + nlongs_reg = BITS_TO_LONGS(nbits_reg); + nbitsinlong = min(nbits_reg, BITS_PER_LONG); - /* make a mask of the order */ + /* + * Can't do "mask = (1UL << nbitsinlong) - 1", as that + * overflows if nbitsinlong == BITS_PER_LONG. + */ mask = (1UL << (nbitsinlong - 1)); mask += mask - 1; + mask <<= offset; - /* run up the bitmap nbitsinlong at a time */ - for (i = 0; i < bits; i += nbitsinlong) { - int index = i / BITS_PER_LONG; - int offset = i - (index * BITS_PER_LONG); - int j, space = 1; - - /* find space in the bitmap */ - for (j = 0; j < nlongs; j++) - if ((bitmap[index + j] & (mask << offset))) { - space = 0; - break; - } - - /* keep looking */ - if (unlikely(!space)) - continue; - - for (j = 0; j < nlongs; j++) - /* set region in bitmap */ - bitmap[index + j] |= (mask << offset); - - return i; + switch (reg_op) { + case REG_OP_ISFREE: + for (i = 0; i < nlongs_reg; i++) { + if (bitmap[index + i] & mask) + goto done; + } + ret = 1; /* all bits in region free (zero) */ + break; + + case REG_OP_ALLOC: + for (i = 0; i < nlongs_reg; i++) + bitmap[index + i] |= mask; + break; + + case REG_OP_RELEASE: + for (i = 0; i < nlongs_reg; i++) + bitmap[index + i] &= ~mask; + break; } - return -ENOMEM; +done: + return ret; +} + +/** + * bitmap_find_free_region - find a contiguous aligned mem region + * @bitmap: array of unsigned longs corresponding to the bitmap + * @bits: number of bits in the bitmap + * @order: region size (log base 2 of number of bits) to find + * + * Find a region of free (zero) bits in a @bitmap of @bits bits and + * allocate them (set them to one). Only consider regions of length + * a power (@order) of two, aligned to that power of two, which + * makes the search algorithm much faster. + * + * Return the bit offset in bitmap of the allocated region, + * or -errno on failure. + */ +int bitmap_find_free_region(unsigned long *bitmap, int bits, int order) +{ + int pos; /* scans bitmap by regions of size order */ + + for (pos = 0; pos < bits; pos += (1 << order)) + if (__reg_op(bitmap, pos, order, REG_OP_ISFREE)) + break; + if (pos == bits) + return -ENOMEM; + __reg_op(bitmap, pos, order, REG_OP_ALLOC); + return pos; } EXPORT_SYMBOL(bitmap_find_free_region); /** * bitmap_release_region - release allocated bitmap region - * @bitmap: a pointer to the bitmap - * @pos: the beginning of the region - * @order: the order of the bits to release (number is 1< BITS_PER_LONG) - nbitsinlong = BITS_PER_LONG; - - mask = (1UL << (nbitsinlong - 1)); - mask += mask - 1; - - for (i = 0; i < nlongs; i++) - bitmap[index + i] &= ~(mask << offset); + __reg_op(bitmap, pos, order, REG_OP_RELEASE); } EXPORT_SYMBOL(bitmap_release_region); /** * bitmap_allocate_region - allocate bitmap region - * @bitmap: a pointer to the bitmap - * @pos: the beginning of the region - * @order: the order of the bits to allocate (number is 1< BITS_PER_LONG) - nbitsinlong = BITS_PER_LONG; - - mask = (1UL << (nbitsinlong - 1)); - mask += mask - 1; - - for (i = 0; i < nlongs; i++) - if (bitmap[index + i] & (mask << offset)) - return -EBUSY; - for (i = 0; i < nlongs; i++) - bitmap[index + i] |= (mask << offset); + if (!__reg_op(bitmap, pos, order, REG_OP_ISFREE)) + return -EBUSY; + __reg_op(bitmap, pos, order, REG_OP_ALLOC); return 0; } EXPORT_SYMBOL(bitmap_allocate_region); -- cgit v1.2.2 From 604bf5a216e7f2d97cdf62614ca1281921531040 Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Fri, 24 Mar 2006 03:16:19 -0800 Subject: [PATCH] CONFIG_UNWIND_INFO As a foundation for reliable stack unwinding, this adds a config option (available to all architectures except IA64 and those where the module loader might have problems with the resulting relocations) to enable the generation of frame unwind information. Signed-off-by: Jan Beulich Cc: Miles Bader Cc: "Luck, Tony" Cc: Ralf Baechle Cc: Kyle McMartin Cc: Benjamin Herrenschmidt Cc: Paul Mackerras Cc: "David S. Miller" Cc: Paul Mundt , Cc: Andi Kleen Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/Kconfig.debug | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'lib') diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index a314e663d517..f2618e1c2b93 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -195,6 +195,17 @@ config FRAME_POINTER some architectures or if you use external debuggers. If you don't debug the kernel, you can say N. +config UNWIND_INFO + bool "Compile the kernel with frame unwind information" + depends on !IA64 + depends on !MODULES || !(MIPS || PARISC || PPC || SUPERH || SPARC64 || V850) + default DEBUG_KERNEL + help + If you say Y here the resulting kernel image will be slightly larger + but not slower, and it will give very useful debugging information. + If you don't debug the kernel, you can say N, but we may not be able + to solve problems without frame unwind information or frame pointers. + config FORCED_INLINING bool "Force gcc to inline functions marked 'inline'" depends on DEBUG_KERNEL -- cgit v1.2.2 From 34814545890db603b7648ea2ea477d1f83b61297 Mon Sep 17 00:00:00 2001 From: Eric Sesterhenn Date: Fri, 24 Mar 2006 18:47:11 +0100 Subject: BUG_ON() Conversion in lib/swiotlb.c this changes if() BUG(); constructs to BUG_ON() which is cleaner, contains unlikely() and can better optimized away. Signed-off-by: Eric Sesterhenn Signed-off-by: Adrian Bunk --- lib/swiotlb.c | 32 ++++++++++++-------------------- 1 file changed, 12 insertions(+), 20 deletions(-) (limited to 'lib') diff --git a/lib/swiotlb.c b/lib/swiotlb.c index 0af497b6b9a8..10625785eefd 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c @@ -296,8 +296,7 @@ map_single(struct device *hwdev, char *buffer, size_t size, int dir) else stride = 1; - if (!nslots) - BUG(); + BUG_ON(!nslots); /* * Find suitable number of IO TLB entries size that will fit this @@ -416,14 +415,14 @@ sync_single(struct device *hwdev, char *dma_addr, size_t size, case SYNC_FOR_CPU: if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)) memcpy(buffer, dma_addr, size); - else if (dir != DMA_TO_DEVICE) - BUG(); + else + BUG_ON(dir != DMA_TO_DEVICE); break; case SYNC_FOR_DEVICE: if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)) memcpy(dma_addr, buffer, size); - else if (dir != DMA_FROM_DEVICE) - BUG(); + else + BUG_ON(dir != DMA_FROM_DEVICE); break; default: BUG(); @@ -529,8 +528,7 @@ swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir) unsigned long dev_addr = virt_to_phys(ptr); void *map; - if (dir == DMA_NONE) - BUG(); + BUG_ON(dir == DMA_NONE); /* * If the pointer passed in happens to be in the device's DMA window, * we can safely return the device addr and not worry about bounce @@ -592,8 +590,7 @@ swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size, { char *dma_addr = phys_to_virt(dev_addr); - if (dir == DMA_NONE) - BUG(); + BUG_ON(dir == DMA_NONE); if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) unmap_single(hwdev, dma_addr, size, dir); else if (dir == DMA_FROM_DEVICE) @@ -616,8 +613,7 @@ swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, { char *dma_addr = phys_to_virt(dev_addr); - if (dir == DMA_NONE) - BUG(); + BUG_ON(dir == DMA_NONE); if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) sync_single(hwdev, dma_addr, size, dir, target); else if (dir == DMA_FROM_DEVICE) @@ -648,8 +644,7 @@ swiotlb_sync_single_range(struct device *hwdev, dma_addr_t dev_addr, { char *dma_addr = phys_to_virt(dev_addr) + offset; - if (dir == DMA_NONE) - BUG(); + BUG_ON(dir == DMA_NONE); if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) sync_single(hwdev, dma_addr, size, dir, target); else if (dir == DMA_FROM_DEVICE) @@ -696,8 +691,7 @@ swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nelems, unsigned long dev_addr; int i; - if (dir == DMA_NONE) - BUG(); + BUG_ON(dir == DMA_NONE); for (i = 0; i < nelems; i++, sg++) { addr = SG_ENT_VIRT_ADDRESS(sg); @@ -730,8 +724,7 @@ swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nelems, { int i; - if (dir == DMA_NONE) - BUG(); + BUG_ON(dir == DMA_NONE); for (i = 0; i < nelems; i++, sg++) if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg)) @@ -753,8 +746,7 @@ swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sg, { int i; - if (dir == DMA_NONE) - BUG(); + BUG_ON(dir == DMA_NONE); for (i = 0; i < nelems; i++, sg++) if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg)) -- cgit v1.2.2 From 871751e25d956ad24f129ca972b7851feaa61d53 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 25 Mar 2006 03:06:39 -0800 Subject: [PATCH] slab: implement /proc/slab_allocators Implement /proc/slab_allocators. It produces output like: idr_layer_cache: 80 idr_pre_get+0x33/0x4e buffer_head: 2555 alloc_buffer_head+0x20/0x75 mm_struct: 9 mm_alloc+0x1e/0x42 mm_struct: 20 dup_mm+0x36/0x370 vm_area_struct: 384 dup_mm+0x18f/0x370 vm_area_struct: 151 do_mmap_pgoff+0x2e0/0x7c3 vm_area_struct: 1 split_vma+0x5a/0x10e vm_area_struct: 11 do_brk+0x206/0x2e2 vm_area_struct: 2 copy_vma+0xda/0x142 vm_area_struct: 9 setup_arg_pages+0x99/0x214 fs_cache: 8 copy_fs_struct+0x21/0x133 fs_cache: 29 copy_process+0xf38/0x10e3 files_cache: 30 alloc_files+0x1b/0xcf signal_cache: 81 copy_process+0xbaa/0x10e3 sighand_cache: 77 copy_process+0xe65/0x10e3 sighand_cache: 1 de_thread+0x4d/0x5f8 anon_vma: 241 anon_vma_prepare+0xd9/0xf3 size-2048: 1 add_sect_attrs+0x5f/0x145 size-2048: 2 journal_init_revoke+0x99/0x302 size-2048: 2 journal_init_revoke+0x137/0x302 size-2048: 2 journal_init_inode+0xf9/0x1c4 Cc: Manfred Spraul Cc: Alexander Nyberg Cc: Pekka Enberg Cc: Christoph Lameter Cc: Ravikiran Thirumalai Signed-off-by: Al Viro DESC slab-leaks3-locking-fix EDESC From: Andrew Morton Update for slab-remove-cachep-spinlock.patch Cc: Al Viro Cc: Manfred Spraul Cc: Alexander Nyberg Cc: Pekka Enberg Cc: Christoph Lameter Cc: Ravikiran Thirumalai Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/Kconfig.debug | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'lib') diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index f2618e1c2b93..1fe3f897145f 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -85,6 +85,10 @@ config DEBUG_SLAB allocation as well as poisoning memory on free to catch use of freed memory. This can make kmalloc/kfree-intensive workloads much slower. +config DEBUG_SLAB_LEAK + bool "Memory leak debugging" + depends on DEBUG_SLAB + config DEBUG_PREEMPT bool "Debug preemptible kernel" depends on DEBUG_KERNEL && PREEMPT -- cgit v1.2.2 From 4a2f0acf0f951599fd9e4af95cf9483449970c26 Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Sat, 25 Mar 2006 03:07:22 -0800 Subject: [PATCH] kconfig: clarify memory debug options The Kconfig text for CONFIG_DEBUG_SLAB and CONFIG_DEBUG_PAGEALLOC have always seemed a bit confusing. Change them to: CONFIG_DEBUG_SLAB: "Debug slab memory allocations" CONFIG_DEBUG_PAGEALLOC: "Debug page memory allocations" Cc: "David S. Miller" Cc: Hirokazu Takata Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/Kconfig.debug | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'lib') diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 1fe3f897145f..0bda3c5259f7 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -78,7 +78,7 @@ config SCHEDSTATS this adds. config DEBUG_SLAB - bool "Debug memory allocations" + bool "Debug slab memory allocations" depends on DEBUG_KERNEL && SLAB help Say Y here to have the kernel do limited verification on memory -- cgit v1.2.2 From daff89f324755f87a060d5125a205c0755811ea9 Mon Sep 17 00:00:00 2001 From: Jonathan Corbet Date: Sat, 25 Mar 2006 03:08:05 -0800 Subject: [PATCH] radix-tree documentation cleanups Documentation changes to help radix tree users avoid overrunning the tags array. RADIX_TREE_TAGS moves to linux/radix-tree.h and is now known as RADIX_TREE_MAX_TAGS (Nick Piggin's idea). Tag parameters are changed to unsigned, and some comments are updated. Signed-off-by: Jonathan Corbet Cc: Nick Piggin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/radix-tree.c | 49 +++++++++++++++++++++++++++---------------------- 1 file changed, 27 insertions(+), 22 deletions(-) (limited to 'lib') diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 1e5b17dc7e3d..7097bb239e40 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c @@ -37,7 +37,6 @@ #else #define RADIX_TREE_MAP_SHIFT 3 /* For more stressful testing */ #endif -#define RADIX_TREE_TAGS 2 #define RADIX_TREE_MAP_SIZE (1UL << RADIX_TREE_MAP_SHIFT) #define RADIX_TREE_MAP_MASK (RADIX_TREE_MAP_SIZE-1) @@ -48,7 +47,7 @@ struct radix_tree_node { unsigned int count; void *slots[RADIX_TREE_MAP_SIZE]; - unsigned long tags[RADIX_TREE_TAGS][RADIX_TREE_TAG_LONGS]; + unsigned long tags[RADIX_TREE_MAX_TAGS][RADIX_TREE_TAG_LONGS]; }; struct radix_tree_path { @@ -135,17 +134,20 @@ out: return ret; } -static inline void tag_set(struct radix_tree_node *node, int tag, int offset) +static inline void tag_set(struct radix_tree_node *node, unsigned int tag, + int offset) { __set_bit(offset, node->tags[tag]); } -static inline void tag_clear(struct radix_tree_node *node, int tag, int offset) +static inline void tag_clear(struct radix_tree_node *node, unsigned int tag, + int offset) { __clear_bit(offset, node->tags[tag]); } -static inline int tag_get(struct radix_tree_node *node, int tag, int offset) +static inline int tag_get(struct radix_tree_node *node, unsigned int tag, + int offset) { return test_bit(offset, node->tags[tag]); } @@ -154,7 +156,7 @@ static inline int tag_get(struct radix_tree_node *node, int tag, int offset) * Returns 1 if any slot in the node has this tag set. * Otherwise returns 0. */ -static inline int any_tag_set(struct radix_tree_node *node, int tag) +static inline int any_tag_set(struct radix_tree_node *node, unsigned int tag) { int idx; for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) { @@ -180,7 +182,7 @@ static int radix_tree_extend(struct radix_tree_root *root, unsigned long index) { struct radix_tree_node *node; unsigned int height; - char tags[RADIX_TREE_TAGS]; + char tags[RADIX_TREE_MAX_TAGS]; int tag; /* Figure out what the height should be. */ @@ -197,7 +199,7 @@ static int radix_tree_extend(struct radix_tree_root *root, unsigned long index) * Prepare the tag status of the top-level node for propagation * into the newly-pushed top-level node(s) */ - for (tag = 0; tag < RADIX_TREE_TAGS; tag++) { + for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) { tags[tag] = 0; if (any_tag_set(root->rnode, tag)) tags[tag] = 1; @@ -211,7 +213,7 @@ static int radix_tree_extend(struct radix_tree_root *root, unsigned long index) node->slots[0] = root->rnode; /* Propagate the aggregated tag info into the new root */ - for (tag = 0; tag < RADIX_TREE_TAGS; tag++) { + for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) { if (tags[tag]) tag_set(node, tag, 0); } @@ -349,14 +351,15 @@ EXPORT_SYMBOL(radix_tree_lookup); * @index: index key * @tag: tag index * - * Set the search tag corresponging to @index in the radix tree. From + * Set the search tag (which must be < RADIX_TREE_MAX_TAGS) + * corresponding to @index in the radix tree. From * the root all the way down to the leaf node. * * Returns the address of the tagged item. Setting a tag on a not-present * item is a bug. */ void *radix_tree_tag_set(struct radix_tree_root *root, - unsigned long index, int tag) + unsigned long index, unsigned int tag) { unsigned int height, shift; struct radix_tree_node *slot; @@ -390,7 +393,8 @@ EXPORT_SYMBOL(radix_tree_tag_set); * @index: index key * @tag: tag index * - * Clear the search tag corresponging to @index in the radix tree. If + * Clear the search tag (which must be < RADIX_TREE_MAX_TAGS) + * corresponding to @index in the radix tree. If * this causes the leaf node to have no tags set then clear the tag in the * next-to-leaf node, etc. * @@ -398,7 +402,7 @@ EXPORT_SYMBOL(radix_tree_tag_set); * has the same return value and semantics as radix_tree_lookup(). */ void *radix_tree_tag_clear(struct radix_tree_root *root, - unsigned long index, int tag) + unsigned long index, unsigned int tag) { struct radix_tree_path path[RADIX_TREE_MAX_PATH], *pathp = path; struct radix_tree_node *slot; @@ -450,7 +454,7 @@ EXPORT_SYMBOL(radix_tree_tag_clear); * radix_tree_tag_get - get a tag on a radix tree node * @root: radix tree root * @index: index key - * @tag: tag index + * @tag: tag index (< RADIX_TREE_MAX_TAGS) * * Return values: * @@ -459,7 +463,7 @@ EXPORT_SYMBOL(radix_tree_tag_clear); * -1: tag present, unset */ int radix_tree_tag_get(struct radix_tree_root *root, - unsigned long index, int tag) + unsigned long index, unsigned int tag) { unsigned int height, shift; struct radix_tree_node *slot; @@ -592,7 +596,7 @@ EXPORT_SYMBOL(radix_tree_gang_lookup); */ static unsigned int __lookup_tag(struct radix_tree_root *root, void **results, unsigned long index, - unsigned int max_items, unsigned long *next_index, int tag) + unsigned int max_items, unsigned long *next_index, unsigned int tag) { unsigned int nr_found = 0; unsigned int shift; @@ -646,7 +650,7 @@ out: * @results: where the results of the lookup are placed * @first_index: start the lookup from this key * @max_items: place up to this many items at *results - * @tag: the tag index + * @tag: the tag index (< RADIX_TREE_MAX_TAGS) * * Performs an index-ascending scan of the tree for present items which * have the tag indexed by @tag set. Places the items at *@results and @@ -654,7 +658,8 @@ out: */ unsigned int radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results, - unsigned long first_index, unsigned int max_items, int tag) + unsigned long first_index, unsigned int max_items, + unsigned int tag) { const unsigned long max_index = radix_tree_maxindex(root->height); unsigned long cur_index = first_index; @@ -716,7 +721,7 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index) struct radix_tree_node *slot; unsigned int height, shift; void *ret = NULL; - char tags[RADIX_TREE_TAGS]; + char tags[RADIX_TREE_MAX_TAGS]; int nr_cleared_tags; int tag; int offset; @@ -751,7 +756,7 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index) * Clear all tags associated with the just-deleted item */ nr_cleared_tags = 0; - for (tag = 0; tag < RADIX_TREE_TAGS; tag++) { + for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) { tags[tag] = 1; if (tag_get(pathp->node, tag, pathp->offset)) { tag_clear(pathp->node, tag, pathp->offset); @@ -763,7 +768,7 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index) } for (pathp--; nr_cleared_tags && pathp->node; pathp--) { - for (tag = 0; tag < RADIX_TREE_TAGS; tag++) { + for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) { if (tags[tag]) continue; @@ -801,7 +806,7 @@ EXPORT_SYMBOL(radix_tree_delete); * @root: radix tree root * @tag: tag to test */ -int radix_tree_tagged(struct radix_tree_root *root, int tag) +int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag) { struct radix_tree_node *rnode; rnode = root->rnode; -- cgit v1.2.2 From ccb46000f4bb459777686611157ac0eac928704e Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Sat, 25 Mar 2006 03:08:08 -0800 Subject: [PATCH] cpumask: uninline first_cpu() text data bss dec hex filename before: 3490577 1322408 360000 5172985 4eeef9 vmlinux after: 3488027 1322496 360128 5170651 4ee5db vmlinux Cc: Paul Jackson Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/Makefile | 2 ++ lib/cpumask.c | 11 +++++++++++ 2 files changed, 13 insertions(+) create mode 100644 lib/cpumask.c (limited to 'lib') diff --git a/lib/Makefile b/lib/Makefile index 648b2c1242fd..f827e3c24ec0 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -7,6 +7,8 @@ lib-y := errno.o ctype.o string.o vsprintf.o cmdline.o \ idr.o div64.o int_sqrt.o bitmap.o extable.o prio_tree.o \ sha1.o +lib-$(CONFIG_SMP) += cpumask.o + lib-y += kobject.o kref.o kobject_uevent.o klist.o obj-y += sort.o parser.o halfmd4.o iomap_copy.o diff --git a/lib/cpumask.c b/lib/cpumask.c new file mode 100644 index 000000000000..1560d97390dd --- /dev/null +++ b/lib/cpumask.c @@ -0,0 +1,11 @@ +#include +#include +#include +#include + +int __first_cpu(const cpumask_t *srcp) +{ + return min_t(int, NR_CPUS, find_first_bit(srcp->bits, NR_CPUS)); +} +EXPORT_SYMBOL(__first_cpu); + -- cgit v1.2.2 From 3d18bd74a22d0bed3bc81fc64c4ba6344a10f155 Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Sat, 25 Mar 2006 03:08:09 -0800 Subject: [PATCH] cpumask: uninline next_cpu() text data bss dec hex filename before: 3488027 1322496 360128 5170651 4ee5db vmlinux after: 3485112 1322480 359968 5167560 4ed9c8 vmlinux 2931 bytes saved Cc: Paul Jackson Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/cpumask.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'lib') diff --git a/lib/cpumask.c b/lib/cpumask.c index 1560d97390dd..ba2f8543052c 100644 --- a/lib/cpumask.c +++ b/lib/cpumask.c @@ -9,3 +9,8 @@ int __first_cpu(const cpumask_t *srcp) } EXPORT_SYMBOL(__first_cpu); +int __next_cpu(int n, const cpumask_t *srcp) +{ + return min_t(int, NR_CPUS, find_next_bit(srcp->bits, NR_CPUS, n+1)); +} +EXPORT_SYMBOL(__next_cpu); -- cgit v1.2.2 From 8630282070b4a52b12cfa514ba8558e2f3d56360 Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Sat, 25 Mar 2006 03:08:09 -0800 Subject: [PATCH] cpumask: uninline highest_possible_processor_id() Shrinks the only caller (net/bridge/netfilter/ebtables.c) by 174 bytes. Also, optimise highest_possible_processor_id() out of existence on CONFIG_SMP=n. Cc: Paul Jackson Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/cpumask.c | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) (limited to 'lib') diff --git a/lib/cpumask.c b/lib/cpumask.c index ba2f8543052c..ea25a034276c 100644 --- a/lib/cpumask.c +++ b/lib/cpumask.c @@ -14,3 +14,20 @@ int __next_cpu(int n, const cpumask_t *srcp) return min_t(int, NR_CPUS, find_next_bit(srcp->bits, NR_CPUS, n+1)); } EXPORT_SYMBOL(__next_cpu); + +/* + * Find the highest possible smp_processor_id() + * + * Note: if we're prepared to assume that cpu_possible_map never changes + * (reasonable) then this function should cache its return value. + */ +int highest_possible_processor_id(void) +{ + unsigned int cpu; + unsigned highest = 0; + + for_each_cpu_mask(cpu, cpu_possible_map) + highest = cpu; + return highest; +} +EXPORT_SYMBOL(highest_possible_processor_id); -- cgit v1.2.2 From 96a9b4d31eba4722ba7aad2cc15118a7799f499f Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Sat, 25 Mar 2006 03:08:10 -0800 Subject: [PATCH] cpumask: uninline any_online_cpu() text data bss dec hex filename before: 3605597 1363528 363328 5332453 515de5 vmlinux after: 3605295 1363612 363200 5332107 515c8b vmlinux 218 bytes saved. Also, optimise any_online_cpu() out of existence on CONFIG_SMP=n. This function seems inefficient. Can't we simply AND the two masks, then use find_first_bit()? Cc: Paul Jackson Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/cpumask.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'lib') diff --git a/lib/cpumask.c b/lib/cpumask.c index ea25a034276c..3a67dc5ada7d 100644 --- a/lib/cpumask.c +++ b/lib/cpumask.c @@ -31,3 +31,15 @@ int highest_possible_processor_id(void) return highest; } EXPORT_SYMBOL(highest_possible_processor_id); + +int __any_online_cpu(const cpumask_t *mask) +{ + int cpu; + + for_each_cpu_mask(cpu, *mask) { + if (cpu_online(cpu)) + break; + } + return cpu; +} +EXPORT_SYMBOL(__any_online_cpu); -- cgit v1.2.2 From 6a0f03e0d35c10e07f1160ca75fc9a367931e38b Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Sat, 25 Mar 2006 16:32:01 +0100 Subject: [PATCH] x86_64: Don't enable CONFIG_UNWIND_INFO by default for DEBUG_KERNEL DEBUG_KERNEL is often enabled just for sysrq, but this doesn't mean the user wants more heavyweight debugging information. Cc: jbeulich@novell.com Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- lib/Kconfig.debug | 1 - 1 file changed, 1 deletion(-) (limited to 'lib') diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 0bda3c5259f7..7e70ab13e191 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -203,7 +203,6 @@ config UNWIND_INFO bool "Compile the kernel with frame unwind information" depends on !IA64 depends on !MODULES || !(MIPS || PARISC || PPC || SUPERH || SPARC64 || V850) - default DEBUG_KERNEL help If you say Y here the resulting kernel image will be slightly larger but not slower, and it will give very useful debugging information. -- cgit v1.2.2 From c7f612cdf091def01454e7e132c7d7a3f419fbc4 Mon Sep 17 00:00:00 2001 From: Akinobu Mita Date: Sun, 26 Mar 2006 01:39:11 -0800 Subject: [PATCH] bitops: generic find_{next,first}{,_zero}_bit() This patch introduces the C-language equivalents of the functions below: unsigned logn find_next_bit(const unsigned long *addr, unsigned long size, unsigned long offset); unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size, unsigned long offset); unsigned long find_first_zero_bit(const unsigned long *addr, unsigned long size); unsigned long find_first_bit(const unsigned long *addr, unsigned long size); In include/asm-generic/bitops/find.h This code largely copied from: arch/powerpc/lib/bitops.c Signed-off-by: Akinobu Mita Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/find_next_bit.c | 112 +++++++++++++++++++++++++++++++++++++--------------- 1 file changed, 81 insertions(+), 31 deletions(-) (limited to 'lib') diff --git a/lib/find_next_bit.c b/lib/find_next_bit.c index c05b4b19cf6c..9c90853b4472 100644 --- a/lib/find_next_bit.c +++ b/lib/find_next_bit.c @@ -11,48 +11,98 @@ #include #include +#include -int find_next_bit(const unsigned long *addr, int size, int offset) +#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG) + +/** + * find_next_bit - find the next set bit in a memory region + * @addr: The address to base the search on + * @offset: The bitnumber to start searching at + * @size: The maximum size to search + */ +unsigned long find_next_bit(const unsigned long *addr, unsigned long size, + unsigned long offset) { - const unsigned long *base; - const int NBITS = sizeof(*addr) * 8; + const unsigned long *p = addr + BITOP_WORD(offset); + unsigned long result = offset & ~(BITS_PER_LONG-1); unsigned long tmp; - base = addr; + if (offset >= size) + return size; + size -= result; + offset %= BITS_PER_LONG; if (offset) { - int suboffset; - - addr += offset / NBITS; - - suboffset = offset % NBITS; - if (suboffset) { - tmp = *addr; - tmp >>= suboffset; - if (tmp) - goto finish; - } - - addr++; + tmp = *(p++); + tmp &= (~0UL << offset); + if (size < BITS_PER_LONG) + goto found_first; + if (tmp) + goto found_middle; + size -= BITS_PER_LONG; + result += BITS_PER_LONG; + } + while (size & ~(BITS_PER_LONG-1)) { + if ((tmp = *(p++))) + goto found_middle; + result += BITS_PER_LONG; + size -= BITS_PER_LONG; } + if (!size) + return result; + tmp = *p; - while ((tmp = *addr) == 0) - addr++; +found_first: + tmp &= (~0UL >> (BITS_PER_LONG - size)); + if (tmp == 0UL) /* Are any bits set? */ + return result + size; /* Nope. */ +found_middle: + return result + __ffs(tmp); +} - offset = (addr - base) * NBITS; +EXPORT_SYMBOL(find_next_bit); - finish: - /* count the remaining bits without using __ffs() since that takes a 32-bit arg */ - while (!(tmp & 0xff)) { - offset += 8; - tmp >>= 8; - } +/* + * This implementation of find_{first,next}_zero_bit was stolen from + * Linus' asm-alpha/bitops.h. + */ +unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size, + unsigned long offset) +{ + const unsigned long *p = addr + BITOP_WORD(offset); + unsigned long result = offset & ~(BITS_PER_LONG-1); + unsigned long tmp; - while (!(tmp & 1)) { - offset++; - tmp >>= 1; + if (offset >= size) + return size; + size -= result; + offset %= BITS_PER_LONG; + if (offset) { + tmp = *(p++); + tmp |= ~0UL >> (BITS_PER_LONG - offset); + if (size < BITS_PER_LONG) + goto found_first; + if (~tmp) + goto found_middle; + size -= BITS_PER_LONG; + result += BITS_PER_LONG; + } + while (size & ~(BITS_PER_LONG-1)) { + if (~(tmp = *(p++))) + goto found_middle; + result += BITS_PER_LONG; + size -= BITS_PER_LONG; } + if (!size) + return result; + tmp = *p; - return offset; +found_first: + tmp |= ~0UL << size; + if (tmp == ~0UL) /* Are any bits zero? */ + return result + size; /* Nope. */ +found_middle: + return result + ffz(tmp); } -EXPORT_SYMBOL(find_next_bit); +EXPORT_SYMBOL(find_next_zero_bit); -- cgit v1.2.2 From 3b9ed1a5d2d121f32d2cb4f2b05f1fc57c99c946 Mon Sep 17 00:00:00 2001 From: Akinobu Mita Date: Sun, 26 Mar 2006 01:39:13 -0800 Subject: [PATCH] bitops: generic hweight{64,32,16,8}() This patch introduces the C-language equivalents of the functions below: unsigned int hweight32(unsigned int w); unsigned int hweight16(unsigned int w); unsigned int hweight8(unsigned int w); unsigned long hweight64(__u64 w); In include/asm-generic/bitops/hweight.h This code largely copied from: include/linux/bitops.h Signed-off-by: Akinobu Mita Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/Makefile | 1 + lib/hweight.c | 54 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 55 insertions(+) create mode 100644 lib/hweight.c (limited to 'lib') diff --git a/lib/Makefile b/lib/Makefile index f827e3c24ec0..b830c9a15541 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -23,6 +23,7 @@ lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o lib-$(CONFIG_SEMAPHORE_SLEEPERS) += semaphore-sleepers.o lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o +lib-$(CONFIG_GENERIC_HWEIGHT) += hweight.o obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o diff --git a/lib/hweight.c b/lib/hweight.c new file mode 100644 index 000000000000..721a4b8b4fb8 --- /dev/null +++ b/lib/hweight.c @@ -0,0 +1,54 @@ +#include +#include + +/** + * hweightN - returns the hamming weight of a N-bit word + * @x: the word to weigh + * + * The Hamming Weight of a number is the total number of bits set in it. + */ + +unsigned int hweight32(unsigned int w) +{ + unsigned int res = (w & 0x55555555) + ((w >> 1) & 0x55555555); + res = (res & 0x33333333) + ((res >> 2) & 0x33333333); + res = (res & 0x0F0F0F0F) + ((res >> 4) & 0x0F0F0F0F); + res = (res & 0x00FF00FF) + ((res >> 8) & 0x00FF00FF); + return (res & 0x0000FFFF) + ((res >> 16) & 0x0000FFFF); +} +EXPORT_SYMBOL(hweight32); + +unsigned int hweight16(unsigned int w) +{ + unsigned int res = (w & 0x5555) + ((w >> 1) & 0x5555); + res = (res & 0x3333) + ((res >> 2) & 0x3333); + res = (res & 0x0F0F) + ((res >> 4) & 0x0F0F); + return (res & 0x00FF) + ((res >> 8) & 0x00FF); +} +EXPORT_SYMBOL(hweight16); + +unsigned int hweight8(unsigned int w) +{ + unsigned int res = (w & 0x55) + ((w >> 1) & 0x55); + res = (res & 0x33) + ((res >> 2) & 0x33); + return (res & 0x0F) + ((res >> 4) & 0x0F); +} +EXPORT_SYMBOL(hweight8); + +unsigned long hweight64(__u64 w) +{ +#if BITS_PER_LONG == 32 + return hweight32((unsigned int)(w >> 32)) + hweight32((unsigned int)w); +#elif BITS_PER_LONG == 64 + u64 res; + res = (w & 0x5555555555555555ul) + ((w >> 1) & 0x5555555555555555ul); + res = (res & 0x3333333333333333ul) + ((res >> 2) & 0x3333333333333333ul); + res = (res & 0x0F0F0F0F0F0F0F0Ful) + ((res >> 4) & 0x0F0F0F0F0F0F0F0Ful); + res = (res & 0x00FF00FF00FF00FFul) + ((res >> 8) & 0x00FF00FF00FF00FFul); + res = (res & 0x0000FFFF0000FFFFul) + ((res >> 16) & 0x0000FFFF0000FFFFul); + return (res & 0x00000000FFFFFFFFul) + ((res >> 32) & 0x00000000FFFFFFFFul); +#else +#error BITS_PER_LONG not defined +#endif +} +EXPORT_SYMBOL(hweight64); -- cgit v1.2.2 From 930ae745f50088279fdc06057a429f16495b53a2 Mon Sep 17 00:00:00 2001 From: Akinobu Mita Date: Sun, 26 Mar 2006 01:39:15 -0800 Subject: [PATCH] bitops: generic ext2_{set,clear,test,find_first_zero,find_next_zero}_bit() This patch introduces the C-language equivalents of the functions below: int ext2_set_bit(int nr, volatile unsigned long *addr); int ext2_clear_bit(int nr, volatile unsigned long *addr); int ext2_test_bit(int nr, const volatile unsigned long *addr); unsigned long ext2_find_first_zero_bit(const unsigned long *addr, unsigned long size); unsinged long ext2_find_next_zero_bit(const unsigned long *addr, unsigned long size); In include/asm-generic/bitops/ext2-non-atomic.h This code largely copied from: include/asm-powerpc/bitops.h include/asm-parisc/bitops.h Signed-off-by: Akinobu Mita Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/find_next_bit.c | 73 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 73 insertions(+) (limited to 'lib') diff --git a/lib/find_next_bit.c b/lib/find_next_bit.c index 9c90853b4472..bda0d71a2514 100644 --- a/lib/find_next_bit.c +++ b/lib/find_next_bit.c @@ -12,6 +12,7 @@ #include #include #include +#include #define BITOP_WORD(nr) ((nr) / BITS_PER_LONG) @@ -106,3 +107,75 @@ found_middle: } EXPORT_SYMBOL(find_next_zero_bit); + +#ifdef __BIG_ENDIAN + +/* include/linux/byteorder does not support "unsigned long" type */ +static inline unsigned long ext2_swabp(const unsigned long * x) +{ +#if BITS_PER_LONG == 64 + return (unsigned long) __swab64p((u64 *) x); +#elif BITS_PER_LONG == 32 + return (unsigned long) __swab32p((u32 *) x); +#else +#error BITS_PER_LONG not defined +#endif +} + +/* include/linux/byteorder doesn't support "unsigned long" type */ +static inline unsigned long ext2_swab(const unsigned long y) +{ +#if BITS_PER_LONG == 64 + return (unsigned long) __swab64((u64) y); +#elif BITS_PER_LONG == 32 + return (unsigned long) __swab32((u32) y); +#else +#error BITS_PER_LONG not defined +#endif +} + +unsigned long generic_find_next_zero_le_bit(const unsigned long *addr, unsigned + long size, unsigned long offset) +{ + const unsigned long *p = addr + BITOP_WORD(offset); + unsigned long result = offset & ~(BITS_PER_LONG - 1); + unsigned long tmp; + + if (offset >= size) + return size; + size -= result; + offset &= (BITS_PER_LONG - 1UL); + if (offset) { + tmp = ext2_swabp(p++); + tmp |= (~0UL >> (BITS_PER_LONG - offset)); + if (size < BITS_PER_LONG) + goto found_first; + if (~tmp) + goto found_middle; + size -= BITS_PER_LONG; + result += BITS_PER_LONG; + } + + while (size & ~(BITS_PER_LONG - 1)) { + if (~(tmp = *(p++))) + goto found_middle_swap; + result += BITS_PER_LONG; + size -= BITS_PER_LONG; + } + if (!size) + return result; + tmp = ext2_swabp(p); +found_first: + tmp |= ~0UL << size; + if (tmp == ~0UL) /* Are any bits zero? */ + return result + size; /* Nope. Skip ffz */ +found_middle: + return result + ffz(tmp); + +found_middle_swap: + return result + ffz(ext2_swab(tmp)); +} + +EXPORT_SYMBOL(generic_find_next_zero_le_bit); + +#endif /* __BIG_ENDIAN */ -- cgit v1.2.2 From 37d54111c133bea05fbae9dfe6d3d61a1b19c09b Mon Sep 17 00:00:00 2001 From: Akinobu Mita Date: Sun, 26 Mar 2006 01:39:56 -0800 Subject: [PATCH] bitops: hweight() related cleanup By defining generic hweight*() routines - hweight64() will be defined on all architectures - hweight_long() will use architecture optimized hweight32() or hweight64() I found two possible cleanups by these reasons. Signed-off-by: Akinobu Mita Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/bitmap.c | 19 ++----------------- 1 file changed, 2 insertions(+), 17 deletions(-) (limited to 'lib') diff --git a/lib/bitmap.c b/lib/bitmap.c index 8acab0e176ef..ed2ae3b0cd06 100644 --- a/lib/bitmap.c +++ b/lib/bitmap.c @@ -253,33 +253,18 @@ int __bitmap_subset(const unsigned long *bitmap1, } EXPORT_SYMBOL(__bitmap_subset); -#if BITS_PER_LONG == 32 int __bitmap_weight(const unsigned long *bitmap, int bits) { int k, w = 0, lim = bits/BITS_PER_LONG; for (k = 0; k < lim; k++) - w += hweight32(bitmap[k]); + w += hweight_long(bitmap[k]); if (bits % BITS_PER_LONG) - w += hweight32(bitmap[k] & BITMAP_LAST_WORD_MASK(bits)); + w += hweight_long(bitmap[k] & BITMAP_LAST_WORD_MASK(bits)); return w; } -#else -int __bitmap_weight(const unsigned long *bitmap, int bits) -{ - int k, w = 0, lim = bits/BITS_PER_LONG; - - for (k = 0; k < lim; k++) - w += hweight64(bitmap[k]); - - if (bits % BITS_PER_LONG) - w += hweight64(bitmap[k] & BITMAP_LAST_WORD_MASK(bits)); - - return w; -} -#endif EXPORT_SYMBOL(__bitmap_weight); /* -- cgit v1.2.2 From f9b4192923fa6e38331e88214b1fe5fc21583fcc Mon Sep 17 00:00:00 2001 From: Akinobu Mita Date: Sun, 26 Mar 2006 01:40:00 -0800 Subject: [PATCH] bitops: hweight() speedup wrote: This is an extremely well-known technique. You can see a similar version that uses a multiply for the last few steps at http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel whch refers to "Software Optimization Guide for AMD Athlon 64 and Opteron Processors" http://www.amd.com/us-en/assets/content_type/white_papers_and_tech_docs/25112.PDF It's section 8.6, "Efficient Implementation of Population-Count Function in 32-bit Mode", pages 179-180. It uses the name that I am more familiar with, "popcount" (population count), although "Hamming weight" also makes sense. Anyway, the proof of correctness proceeds as follows: b = a - ((a >> 1) & 0x55555555); c = (b & 0x33333333) + ((b >> 2) & 0x33333333); d = (c + (c >> 4)) & 0x0f0f0f0f; #if SLOW_MULTIPLY e = d + (d >> 8) f = e + (e >> 16); return f & 63; #else /* Useful if multiply takes at most 4 cycles */ return (d * 0x01010101) >> 24; #endif The input value a can be thought of as 32 1-bit fields each holding their own hamming weight. Now look at it as 16 2-bit fields. Each 2-bit field a1..a0 has the value 2*a1 + a0. This can be converted into the hamming weight of the 2-bit field a1+a0 by subtracting a1. That's what the (a >> 1) & mask subtraction does. Since there can be no borrows, you can just do it all at once. Enumerating the 4 possible cases: 0b00 = 0 -> 0 - 0 = 0 0b01 = 1 -> 1 - 0 = 1 0b10 = 2 -> 2 - 1 = 1 0b11 = 3 -> 3 - 1 = 2 The next step consists of breaking up b (made of 16 2-bir fields) into even and odd halves and adding them into 4-bit fields. Since the largest possible sum is 2+2 = 4, which will not fit into a 4-bit field, the 2-bit ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ "which will not fit into a 2-bit field" fields have to be masked before they are added. After this point, the masking can be delayed. Each 4-bit field holds a population count from 0..4, taking at most 3 bits. These numbers can be added without overflowing a 4-bit field, so we can compute c + (c >> 4), and only then mask off the unwanted bits. This produces d, a number of 4 8-bit fields, each in the range 0..8. From this point, we can shift and add d multiple times without overflowing an 8-bit field, and only do a final mask at the end. The number to mask with has to be at least 63 (so that 32 on't be truncated), but can also be 128 or 255. The x86 has a special encoding for signed immediate byte values -128..127, so the value of 255 is slower. On other processors, a special "sign extend byte" instruction might be faster. On a processor with fast integer multiplies (Athlon but not P4), you can reduce the final few serially dependent instructions to a single integer multiply. Consider d to be 3 8-bit values d3, d2, d1 and d0, each in the range 0..8. The multiply forms the partial products: d3 d2 d1 d0 d3 d2 d1 d0 d3 d2 d1 d0 + d3 d2 d1 d0 ---------------------- e3 e2 e1 e0 Where e3 = d3 + d2 + d1 + d0. e2, e1 and e0 obviously cannot generate any carries. Signed-off-by: Akinobu Mita Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/hweight.c | 29 ++++++++++++++--------------- 1 file changed, 14 insertions(+), 15 deletions(-) (limited to 'lib') diff --git a/lib/hweight.c b/lib/hweight.c index 721a4b8b4fb8..438257671708 100644 --- a/lib/hweight.c +++ b/lib/hweight.c @@ -10,28 +10,28 @@ unsigned int hweight32(unsigned int w) { - unsigned int res = (w & 0x55555555) + ((w >> 1) & 0x55555555); + unsigned int res = w - ((w >> 1) & 0x55555555); res = (res & 0x33333333) + ((res >> 2) & 0x33333333); - res = (res & 0x0F0F0F0F) + ((res >> 4) & 0x0F0F0F0F); - res = (res & 0x00FF00FF) + ((res >> 8) & 0x00FF00FF); - return (res & 0x0000FFFF) + ((res >> 16) & 0x0000FFFF); + res = (res + (res >> 4)) & 0x0F0F0F0F; + res = res + (res >> 8); + return (res + (res >> 16)) & 0x000000FF; } EXPORT_SYMBOL(hweight32); unsigned int hweight16(unsigned int w) { - unsigned int res = (w & 0x5555) + ((w >> 1) & 0x5555); + unsigned int res = w - ((w >> 1) & 0x5555); res = (res & 0x3333) + ((res >> 2) & 0x3333); - res = (res & 0x0F0F) + ((res >> 4) & 0x0F0F); - return (res & 0x00FF) + ((res >> 8) & 0x00FF); + res = (res + (res >> 4)) & 0x0F0F; + return (res + (res >> 8)) & 0x00FF; } EXPORT_SYMBOL(hweight16); unsigned int hweight8(unsigned int w) { - unsigned int res = (w & 0x55) + ((w >> 1) & 0x55); + unsigned int res = w - ((w >> 1) & 0x55); res = (res & 0x33) + ((res >> 2) & 0x33); - return (res & 0x0F) + ((res >> 4) & 0x0F); + return (res + (res >> 4)) & 0x0F; } EXPORT_SYMBOL(hweight8); @@ -40,13 +40,12 @@ unsigned long hweight64(__u64 w) #if BITS_PER_LONG == 32 return hweight32((unsigned int)(w >> 32)) + hweight32((unsigned int)w); #elif BITS_PER_LONG == 64 - u64 res; - res = (w & 0x5555555555555555ul) + ((w >> 1) & 0x5555555555555555ul); + __u64 res = w - ((w >> 1) & 0x5555555555555555ul); res = (res & 0x3333333333333333ul) + ((res >> 2) & 0x3333333333333333ul); - res = (res & 0x0F0F0F0F0F0F0F0Ful) + ((res >> 4) & 0x0F0F0F0F0F0F0F0Ful); - res = (res & 0x00FF00FF00FF00FFul) + ((res >> 8) & 0x00FF00FF00FF00FFul); - res = (res & 0x0000FFFF0000FFFFul) + ((res >> 16) & 0x0000FFFF0000FFFFul); - return (res & 0x00000000FFFFFFFFul) + ((res >> 32) & 0x00000000FFFFFFFFul); + res = (res + (res >> 4)) & 0x0F0F0F0F0F0F0F0Ful; + res = res + (res >> 8); + res = res + (res >> 16); + return (res + (res >> 32)) & 0x00000000000000FFul; #else #error BITS_PER_LONG not defined #endif -- cgit v1.2.2 From ae36b883d29e53b6083ed3d1d44f254cee7507d3 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Sun, 26 Mar 2006 14:38:54 +0200 Subject: [PATCH] Don't make debugfs depend on DEBUG_KERNEL We use it generally now, at least blktrace isn't a specific debug kernel feature. Signed-off-by: Jens Axboe --- lib/Kconfig.debug | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'lib') diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 7e70ab13e191..6e8a60f67c7a 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -172,7 +172,7 @@ config DEBUG_IOREMAP config DEBUG_FS bool "Debug Filesystem" - depends on DEBUG_KERNEL && SYSFS + depends on SYSFS help debugfs is a virtual file system that kernel developers use to put debugging files into. Enable this option to be able to read and -- cgit v1.2.2 From a41d3862dfd44a1b09a0f6243bb34773061fd9a2 Mon Sep 17 00:00:00 2001 From: Helge Deller Date: Thu, 23 Mar 2006 01:07:00 -0700 Subject: [PARISC] Remove obsolete CONFIG_DEBUG_IOREMAP Remove CONFIG_DEBUG_IOREMAP, it's now obsolete and won't work anyway. Remove it from lib/KConfig since it was only available on parisc. Signed-off-by: Helge Deller Signed-off-by: Kyle McMartin --- lib/Kconfig.debug | 13 ------------- 1 file changed, 13 deletions(-) (limited to 'lib') diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 6e8a60f67c7a..d57fd9181b18 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -157,19 +157,6 @@ config DEBUG_INFO If unsure, say N. -config DEBUG_IOREMAP - bool "Enable ioremap() debugging" - depends on DEBUG_KERNEL && PARISC - help - Enabling this option will cause the kernel to distinguish between - ioremapped and physical addresses. It will print a backtrace (at - most one every 10 seconds), hopefully allowing you to see which - drivers need work. Fixing all these problems is a prerequisite - for turning on USE_HPPA_IOREMAP. The warnings are harmless; - the kernel has enough information to fix the broken drivers - automatically, but we'd like to make it more efficient by not - having to do that. - config DEBUG_FS bool "Debug Filesystem" depends on SYSFS -- cgit v1.2.2