diff options
Diffstat (limited to 'lib')
| -rw-r--r-- | lib/Kconfig.debug | 98 | ||||
| -rw-r--r-- | lib/Makefile | 3 | ||||
| -rw-r--r-- | lib/find_next_bit.c | 43 | ||||
| -rw-r--r-- | lib/kernel_lock.c | 123 | ||||
| -rw-r--r-- | lib/kobject.c | 742 | ||||
| -rw-r--r-- | lib/kobject_uevent.c | 38 | ||||
| -rw-r--r-- | lib/kref.c | 15 | ||||
| -rw-r--r-- | lib/pcounter.c | 58 | ||||
| -rw-r--r-- | lib/proportions.c | 37 | ||||
| -rw-r--r-- | lib/rwsem.c | 10 | ||||
| -rw-r--r-- | lib/scatterlist.c | 294 |
11 files changed, 1016 insertions, 445 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index a60109307d32..89f4035b526c 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
| @@ -79,6 +79,38 @@ config HEADERS_CHECK | |||
| 79 | exported to $(INSTALL_HDR_PATH) (usually 'usr/include' in | 79 | exported to $(INSTALL_HDR_PATH) (usually 'usr/include' in |
| 80 | your build tree), to make sure they're suitable. | 80 | your build tree), to make sure they're suitable. |
| 81 | 81 | ||
| 82 | config DEBUG_SECTION_MISMATCH | ||
| 83 | bool "Enable full Section mismatch analysis" | ||
| 84 | default n | ||
| 85 | help | ||
| 86 | The section mismatch analysis checks if there are illegal | ||
| 87 | references from one section to another section. | ||
| 88 | Linux will during link or during runtime drop some sections | ||
| 89 | and any use of code/data previously in these sections will | ||
| 90 | most likely result in an oops. | ||
| 91 | In the code functions and variables are annotated with | ||
| 92 | __init, __devinit etc. (see full list in include/linux/init.h) | ||
| 93 | which result in the code/data being placed in specific sections. | ||
| 94 | The section mismatch anaylsis are always done after a full | ||
| 95 | kernel build but enabling this options will in addition | ||
| 96 | do the following: | ||
| 97 | - Add the option -fno-inline-functions-called-once to gcc | ||
| 98 | When inlining a function annotated __init in a non-init | ||
| 99 | function we would loose the section information and thus | ||
| 100 | the analysis would not catch the illegal reference. | ||
| 101 | This options tell gcc to inline less but will also | ||
| 102 | result in a larger kernel. | ||
| 103 | - Run the section mismatch analysis for each module/built-in.o | ||
| 104 | When we run the section mismatch analysis on vmlinux.o we | ||
| 105 | looses valueable information about where the mismatch was | ||
| 106 | introduced. | ||
| 107 | Running the analysis for each module/built-in.o file | ||
| 108 | will tell where the mismatch happens much closer to the | ||
| 109 | source. The drawback is that we will report the same | ||
| 110 | mismatch at least twice. | ||
| 111 | - Enable verbose reporting from modpost to help solving | ||
| 112 | the section mismatches reported. | ||
| 113 | |||
| 82 | config DEBUG_KERNEL | 114 | config DEBUG_KERNEL |
| 83 | bool "Kernel debugging" | 115 | bool "Kernel debugging" |
| 84 | help | 116 | help |
| @@ -462,6 +494,30 @@ config RCU_TORTURE_TEST | |||
| 462 | Say M if you want the RCU torture tests to build as a module. | 494 | Say M if you want the RCU torture tests to build as a module. |
| 463 | Say N if you are unsure. | 495 | Say N if you are unsure. |
| 464 | 496 | ||
| 497 | config KPROBES_SANITY_TEST | ||
| 498 | bool "Kprobes sanity tests" | ||
| 499 | depends on DEBUG_KERNEL | ||
| 500 | depends on KPROBES | ||
| 501 | default n | ||
| 502 | help | ||
| 503 | This option provides for testing basic kprobes functionality on | ||
| 504 | boot. A sample kprobe, jprobe and kretprobe are inserted and | ||
| 505 | verified for functionality. | ||
| 506 | |||
| 507 | Say N if you are unsure. | ||
| 508 | |||
| 509 | config BACKTRACE_SELF_TEST | ||
| 510 | tristate "Self test for the backtrace code" | ||
| 511 | depends on DEBUG_KERNEL | ||
| 512 | default n | ||
| 513 | help | ||
| 514 | This option provides a kernel module that can be used to test | ||
| 515 | the kernel stack backtrace code. This option is not useful | ||
| 516 | for distributions or general kernels, but only for kernel | ||
| 517 | developers working on architecture code. | ||
| 518 | |||
| 519 | Say N if you are unsure. | ||
| 520 | |||
| 465 | config LKDTM | 521 | config LKDTM |
| 466 | tristate "Linux Kernel Dump Test Tool Module" | 522 | tristate "Linux Kernel Dump Test Tool Module" |
| 467 | depends on DEBUG_KERNEL | 523 | depends on DEBUG_KERNEL |
| @@ -517,4 +573,46 @@ config FAULT_INJECTION_STACKTRACE_FILTER | |||
| 517 | help | 573 | help |
| 518 | Provide stacktrace filter for fault-injection capabilities | 574 | Provide stacktrace filter for fault-injection capabilities |
| 519 | 575 | ||
| 576 | config LATENCYTOP | ||
| 577 | bool "Latency measuring infrastructure" | ||
| 578 | select FRAME_POINTER if !MIPS | ||
| 579 | select KALLSYMS | ||
| 580 | select KALLSYMS_ALL | ||
| 581 | select STACKTRACE | ||
| 582 | select SCHEDSTATS | ||
| 583 | select SCHED_DEBUG | ||
| 584 | depends on X86 || X86_64 | ||
| 585 | help | ||
| 586 | Enable this option if you want to use the LatencyTOP tool | ||
| 587 | to find out which userspace is blocking on what kernel operations. | ||
| 588 | |||
| 589 | config PROVIDE_OHCI1394_DMA_INIT | ||
| 590 | bool "Provide code for enabling DMA over FireWire early on boot" | ||
| 591 | depends on PCI && X86 | ||
| 592 | help | ||
| 593 | If you want to debug problems which hang or crash the kernel early | ||
| 594 | on boot and the crashing machine has a FireWire port, you can use | ||
| 595 | this feature to remotely access the memory of the crashed machine | ||
| 596 | over FireWire. This employs remote DMA as part of the OHCI1394 | ||
| 597 | specification which is now the standard for FireWire controllers. | ||
| 598 | |||
| 599 | With remote DMA, you can monitor the printk buffer remotely using | ||
| 600 | firescope and access all memory below 4GB using fireproxy from gdb. | ||
| 601 | Even controlling a kernel debugger is possible using remote DMA. | ||
| 602 | |||
| 603 | Usage: | ||
| 604 | |||
| 605 | If ohci1394_dma=early is used as boot parameter, it will initialize | ||
| 606 | all OHCI1394 controllers which are found in the PCI config space. | ||
| 607 | |||
| 608 | As all changes to the FireWire bus such as enabling and disabling | ||
| 609 | devices cause a bus reset and thereby disable remote DMA for all | ||
| 610 | devices, be sure to have the cable plugged and FireWire enabled on | ||
| 611 | the debugging host before booting the debug target for debugging. | ||
| 612 | |||
| 613 | This code (~1k) is freed after boot. By then, the firewire stack | ||
| 614 | in charge of the OHCI-1394 controllers should be used instead. | ||
| 615 | |||
| 616 | See Documentation/debugging-via-ohci1394.txt for more information. | ||
| 617 | |||
| 520 | source "samples/Kconfig" | 618 | source "samples/Kconfig" |
diff --git a/lib/Makefile b/lib/Makefile index b6793ed28d84..543f2502b60a 100644 --- a/lib/Makefile +++ b/lib/Makefile | |||
| @@ -6,7 +6,7 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \ | |||
| 6 | rbtree.o radix-tree.o dump_stack.o \ | 6 | rbtree.o radix-tree.o dump_stack.o \ |
| 7 | idr.o int_sqrt.o extable.o prio_tree.o \ | 7 | idr.o int_sqrt.o extable.o prio_tree.o \ |
| 8 | sha1.o irq_regs.o reciprocal_div.o argv_split.o \ | 8 | sha1.o irq_regs.o reciprocal_div.o argv_split.o \ |
| 9 | proportions.o prio_heap.o | 9 | proportions.o prio_heap.o scatterlist.o |
| 10 | 10 | ||
| 11 | lib-$(CONFIG_MMU) += ioremap.o | 11 | lib-$(CONFIG_MMU) += ioremap.o |
| 12 | lib-$(CONFIG_SMP) += cpumask.o | 12 | lib-$(CONFIG_SMP) += cpumask.o |
| @@ -61,6 +61,7 @@ obj-$(CONFIG_TEXTSEARCH_KMP) += ts_kmp.o | |||
| 61 | obj-$(CONFIG_TEXTSEARCH_BM) += ts_bm.o | 61 | obj-$(CONFIG_TEXTSEARCH_BM) += ts_bm.o |
| 62 | obj-$(CONFIG_TEXTSEARCH_FSM) += ts_fsm.o | 62 | obj-$(CONFIG_TEXTSEARCH_FSM) += ts_fsm.o |
| 63 | obj-$(CONFIG_SMP) += percpu_counter.o | 63 | obj-$(CONFIG_SMP) += percpu_counter.o |
| 64 | obj-$(CONFIG_SMP) += pcounter.o | ||
| 64 | obj-$(CONFIG_AUDIT_GENERIC) += audit.o | 65 | obj-$(CONFIG_AUDIT_GENERIC) += audit.o |
| 65 | 66 | ||
| 66 | obj-$(CONFIG_SWIOTLB) += swiotlb.o | 67 | obj-$(CONFIG_SWIOTLB) += swiotlb.o |
diff --git a/lib/find_next_bit.c b/lib/find_next_bit.c index bda0d71a2514..78ccd73a8841 100644 --- a/lib/find_next_bit.c +++ b/lib/find_next_bit.c | |||
| @@ -178,4 +178,47 @@ found_middle_swap: | |||
| 178 | 178 | ||
| 179 | EXPORT_SYMBOL(generic_find_next_zero_le_bit); | 179 | EXPORT_SYMBOL(generic_find_next_zero_le_bit); |
| 180 | 180 | ||
| 181 | unsigned long generic_find_next_le_bit(const unsigned long *addr, unsigned | ||
| 182 | long size, unsigned long offset) | ||
| 183 | { | ||
| 184 | const unsigned long *p = addr + BITOP_WORD(offset); | ||
| 185 | unsigned long result = offset & ~(BITS_PER_LONG - 1); | ||
| 186 | unsigned long tmp; | ||
| 187 | |||
| 188 | if (offset >= size) | ||
| 189 | return size; | ||
| 190 | size -= result; | ||
| 191 | offset &= (BITS_PER_LONG - 1UL); | ||
| 192 | if (offset) { | ||
| 193 | tmp = ext2_swabp(p++); | ||
| 194 | tmp &= (~0UL << offset); | ||
| 195 | if (size < BITS_PER_LONG) | ||
| 196 | goto found_first; | ||
| 197 | if (tmp) | ||
| 198 | goto found_middle; | ||
| 199 | size -= BITS_PER_LONG; | ||
| 200 | result += BITS_PER_LONG; | ||
| 201 | } | ||
| 202 | |||
| 203 | while (size & ~(BITS_PER_LONG - 1)) { | ||
| 204 | tmp = *(p++); | ||
| 205 | if (tmp) | ||
| 206 | goto found_middle_swap; | ||
| 207 | result += BITS_PER_LONG; | ||
| 208 | size -= BITS_PER_LONG; | ||
| 209 | } | ||
| 210 | if (!size) | ||
| 211 | return result; | ||
| 212 | tmp = ext2_swabp(p); | ||
| 213 | found_first: | ||
| 214 | tmp &= (~0UL >> (BITS_PER_LONG - size)); | ||
| 215 | if (tmp == 0UL) /* Are any bits set? */ | ||
| 216 | return result + size; /* Nope. */ | ||
| 217 | found_middle: | ||
| 218 | return result + __ffs(tmp); | ||
| 219 | |||
| 220 | found_middle_swap: | ||
| 221 | return result + __ffs(ext2_swab(tmp)); | ||
| 222 | } | ||
| 223 | EXPORT_SYMBOL(generic_find_next_le_bit); | ||
| 181 | #endif /* __BIG_ENDIAN */ | 224 | #endif /* __BIG_ENDIAN */ |
diff --git a/lib/kernel_lock.c b/lib/kernel_lock.c index f73e2f8c308f..812dbf00844b 100644 --- a/lib/kernel_lock.c +++ b/lib/kernel_lock.c | |||
| @@ -9,7 +9,6 @@ | |||
| 9 | #include <linux/module.h> | 9 | #include <linux/module.h> |
| 10 | #include <linux/kallsyms.h> | 10 | #include <linux/kallsyms.h> |
| 11 | 11 | ||
| 12 | #ifdef CONFIG_PREEMPT_BKL | ||
| 13 | /* | 12 | /* |
| 14 | * The 'big kernel semaphore' | 13 | * The 'big kernel semaphore' |
| 15 | * | 14 | * |
| @@ -86,128 +85,6 @@ void __lockfunc unlock_kernel(void) | |||
| 86 | up(&kernel_sem); | 85 | up(&kernel_sem); |
| 87 | } | 86 | } |
| 88 | 87 | ||
| 89 | #else | ||
| 90 | |||
| 91 | /* | ||
| 92 | * The 'big kernel lock' | ||
| 93 | * | ||
| 94 | * This spinlock is taken and released recursively by lock_kernel() | ||
| 95 | * and unlock_kernel(). It is transparently dropped and reacquired | ||
| 96 | * over schedule(). It is used to protect legacy code that hasn't | ||
| 97 | * been migrated to a proper locking design yet. | ||
| 98 | * | ||
| 99 | * Don't use in new code. | ||
| 100 | */ | ||
| 101 | static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kernel_flag); | ||
| 102 | |||
| 103 | |||
| 104 | /* | ||
| 105 | * Acquire/release the underlying lock from the scheduler. | ||
| 106 | * | ||
| 107 | * This is called with preemption disabled, and should | ||
| 108 | * return an error value if it cannot get the lock and | ||
| 109 | * TIF_NEED_RESCHED gets set. | ||
| 110 | * | ||
| 111 | * If it successfully gets the lock, it should increment | ||
| 112 | * the preemption count like any spinlock does. | ||
| 113 | * | ||
| 114 | * (This works on UP too - _raw_spin_trylock will never | ||
| 115 | * return false in that case) | ||
| 116 | */ | ||
| 117 | int __lockfunc __reacquire_kernel_lock(void) | ||
| 118 | { | ||
| 119 | while (!_raw_spin_trylock(&kernel_flag)) { | ||
| 120 | if (test_thread_flag(TIF_NEED_RESCHED)) | ||
| 121 | return -EAGAIN; | ||
| 122 | cpu_relax(); | ||
| 123 | } | ||
| 124 | preempt_disable(); | ||
| 125 | return 0; | ||
| 126 | } | ||
| 127 | |||
| 128 | void __lockfunc __release_kernel_lock(void) | ||
| 129 | { | ||
| 130 | _raw_spin_unlock(&kernel_flag); | ||
| 131 | preempt_enable_no_resched(); | ||
| 132 | } | ||
| 133 | |||
| 134 | /* | ||
| 135 | * These are the BKL spinlocks - we try to be polite about preemption. | ||
| 136 | * If SMP is not on (ie UP preemption), this all goes away because the | ||
| 137 | * _raw_spin_trylock() will always succeed. | ||
| 138 | */ | ||
| 139 | #ifdef CONFIG_PREEMPT | ||
| 140 | static inline void __lock_kernel(void) | ||
| 141 | { | ||
| 142 | preempt_disable(); | ||
| 143 | if (unlikely(!_raw_spin_trylock(&kernel_flag))) { | ||
| 144 | /* | ||
| 145 | * If preemption was disabled even before this | ||
| 146 | * was called, there's nothing we can be polite | ||
| 147 | * about - just spin. | ||
| 148 | */ | ||
| 149 | if (preempt_count() > 1) { | ||
| 150 | _raw_spin_lock(&kernel_flag); | ||
| 151 | return; | ||
| 152 | } | ||
| 153 | |||
| 154 | /* | ||
| 155 | * Otherwise, let's wait for the kernel lock | ||
| 156 | * with preemption enabled.. | ||
| 157 | */ | ||
| 158 | do { | ||
| 159 | preempt_enable(); | ||
| 160 | while (spin_is_locked(&kernel_flag)) | ||
| 161 | cpu_relax(); | ||
| 162 | preempt_disable(); | ||
| 163 | } while (!_raw_spin_trylock(&kernel_flag)); | ||
| 164 | } | ||
| 165 | } | ||
| 166 | |||
| 167 | #else | ||
| 168 | |||
| 169 | /* | ||
| 170 | * Non-preemption case - just get the spinlock | ||
| 171 | */ | ||
| 172 | static inline void __lock_kernel(void) | ||
| 173 | { | ||
| 174 | _raw_spin_lock(&kernel_flag); | ||
| 175 | } | ||
| 176 | #endif | ||
| 177 | |||
| 178 | static inline void __unlock_kernel(void) | ||
| 179 | { | ||
| 180 | /* | ||
| 181 | * the BKL is not covered by lockdep, so we open-code the | ||
| 182 | * unlocking sequence (and thus avoid the dep-chain ops): | ||
| 183 | */ | ||
| 184 | _raw_spin_unlock(&kernel_flag); | ||
| 185 | preempt_enable(); | ||
| 186 | } | ||
| 187 | |||
| 188 | /* | ||
| 189 | * Getting the big kernel lock. | ||
| 190 | * | ||
| 191 | * This cannot happen asynchronously, so we only need to | ||
| 192 | * worry about other CPU's. | ||
| 193 | */ | ||
| 194 | void __lockfunc lock_kernel(void) | ||
| 195 | { | ||
| 196 | int depth = current->lock_depth+1; | ||
| 197 | if (likely(!depth)) | ||
| 198 | __lock_kernel(); | ||
| 199 | current->lock_depth = depth; | ||
| 200 | } | ||
| 201 | |||
| 202 | void __lockfunc unlock_kernel(void) | ||
| 203 | { | ||
| 204 | BUG_ON(current->lock_depth < 0); | ||
| 205 | if (likely(--current->lock_depth < 0)) | ||
| 206 | __unlock_kernel(); | ||
| 207 | } | ||
| 208 | |||
| 209 | #endif | ||
| 210 | |||
| 211 | EXPORT_SYMBOL(lock_kernel); | 88 | EXPORT_SYMBOL(lock_kernel); |
| 212 | EXPORT_SYMBOL(unlock_kernel); | 89 | EXPORT_SYMBOL(unlock_kernel); |
| 213 | 90 | ||
diff --git a/lib/kobject.c b/lib/kobject.c index b52e9f4ef371..1d63ead1815e 100644 --- a/lib/kobject.c +++ b/lib/kobject.c | |||
| @@ -18,58 +18,57 @@ | |||
| 18 | #include <linux/stat.h> | 18 | #include <linux/stat.h> |
| 19 | #include <linux/slab.h> | 19 | #include <linux/slab.h> |
| 20 | 20 | ||
| 21 | /** | 21 | /* |
| 22 | * populate_dir - populate directory with attributes. | 22 | * populate_dir - populate directory with attributes. |
| 23 | * @kobj: object we're working on. | 23 | * @kobj: object we're working on. |
| 24 | * | ||
| 25 | * Most subsystems have a set of default attributes that | ||
| 26 | * are associated with an object that registers with them. | ||
| 27 | * This is a helper called during object registration that | ||
| 28 | * loops through the default attributes of the subsystem | ||
| 29 | * and creates attributes files for them in sysfs. | ||
| 30 | * | 24 | * |
| 25 | * Most subsystems have a set of default attributes that are associated | ||
| 26 | * with an object that registers with them. This is a helper called during | ||
| 27 | * object registration that loops through the default attributes of the | ||
| 28 | * subsystem and creates attributes files for them in sysfs. | ||
| 31 | */ | 29 | */ |
| 32 | 30 | static int populate_dir(struct kobject *kobj) | |
| 33 | static int populate_dir(struct kobject * kobj) | ||
| 34 | { | 31 | { |
| 35 | struct kobj_type * t = get_ktype(kobj); | 32 | struct kobj_type *t = get_ktype(kobj); |
| 36 | struct attribute * attr; | 33 | struct attribute *attr; |
| 37 | int error = 0; | 34 | int error = 0; |
| 38 | int i; | 35 | int i; |
| 39 | 36 | ||
| 40 | if (t && t->default_attrs) { | 37 | if (t && t->default_attrs) { |
| 41 | for (i = 0; (attr = t->default_attrs[i]) != NULL; i++) { | 38 | for (i = 0; (attr = t->default_attrs[i]) != NULL; i++) { |
| 42 | if ((error = sysfs_create_file(kobj,attr))) | 39 | error = sysfs_create_file(kobj, attr); |
| 40 | if (error) | ||
| 43 | break; | 41 | break; |
| 44 | } | 42 | } |
| 45 | } | 43 | } |
| 46 | return error; | 44 | return error; |
| 47 | } | 45 | } |
| 48 | 46 | ||
| 49 | static int create_dir(struct kobject * kobj) | 47 | static int create_dir(struct kobject *kobj) |
| 50 | { | 48 | { |
| 51 | int error = 0; | 49 | int error = 0; |
| 52 | if (kobject_name(kobj)) { | 50 | if (kobject_name(kobj)) { |
| 53 | error = sysfs_create_dir(kobj); | 51 | error = sysfs_create_dir(kobj); |
| 54 | if (!error) { | 52 | if (!error) { |
| 55 | if ((error = populate_dir(kobj))) | 53 | error = populate_dir(kobj); |
| 54 | if (error) | ||
| 56 | sysfs_remove_dir(kobj); | 55 | sysfs_remove_dir(kobj); |
| 57 | } | 56 | } |
| 58 | } | 57 | } |
| 59 | return error; | 58 | return error; |
| 60 | } | 59 | } |
| 61 | 60 | ||
| 62 | static inline struct kobject * to_kobj(struct list_head * entry) | 61 | static inline struct kobject *to_kobj(struct list_head *entry) |
| 63 | { | 62 | { |
| 64 | return container_of(entry,struct kobject,entry); | 63 | return container_of(entry, struct kobject, entry); |
| 65 | } | 64 | } |
| 66 | 65 | ||
| 67 | static int get_kobj_path_length(struct kobject *kobj) | 66 | static int get_kobj_path_length(struct kobject *kobj) |
| 68 | { | 67 | { |
| 69 | int length = 1; | 68 | int length = 1; |
| 70 | struct kobject * parent = kobj; | 69 | struct kobject *parent = kobj; |
| 71 | 70 | ||
| 72 | /* walk up the ancestors until we hit the one pointing to the | 71 | /* walk up the ancestors until we hit the one pointing to the |
| 73 | * root. | 72 | * root. |
| 74 | * Add 1 to strlen for leading '/' of each level. | 73 | * Add 1 to strlen for leading '/' of each level. |
| 75 | */ | 74 | */ |
| @@ -84,18 +83,19 @@ static int get_kobj_path_length(struct kobject *kobj) | |||
| 84 | 83 | ||
| 85 | static void fill_kobj_path(struct kobject *kobj, char *path, int length) | 84 | static void fill_kobj_path(struct kobject *kobj, char *path, int length) |
| 86 | { | 85 | { |
| 87 | struct kobject * parent; | 86 | struct kobject *parent; |
| 88 | 87 | ||
| 89 | --length; | 88 | --length; |
| 90 | for (parent = kobj; parent; parent = parent->parent) { | 89 | for (parent = kobj; parent; parent = parent->parent) { |
| 91 | int cur = strlen(kobject_name(parent)); | 90 | int cur = strlen(kobject_name(parent)); |
| 92 | /* back up enough to print this name with '/' */ | 91 | /* back up enough to print this name with '/' */ |
| 93 | length -= cur; | 92 | length -= cur; |
| 94 | strncpy (path + length, kobject_name(parent), cur); | 93 | strncpy(path + length, kobject_name(parent), cur); |
| 95 | *(path + --length) = '/'; | 94 | *(path + --length) = '/'; |
| 96 | } | 95 | } |
| 97 | 96 | ||
| 98 | pr_debug("%s: path = '%s'\n",__FUNCTION__,path); | 97 | pr_debug("kobject: '%s' (%p): %s: path = '%s'\n", kobject_name(kobj), |
| 98 | kobj, __FUNCTION__, path); | ||
| 99 | } | 99 | } |
| 100 | 100 | ||
| 101 | /** | 101 | /** |
| @@ -123,179 +123,286 @@ char *kobject_get_path(struct kobject *kobj, gfp_t gfp_mask) | |||
| 123 | } | 123 | } |
| 124 | EXPORT_SYMBOL_GPL(kobject_get_path); | 124 | EXPORT_SYMBOL_GPL(kobject_get_path); |
| 125 | 125 | ||
| 126 | /** | 126 | /* add the kobject to its kset's list */ |
| 127 | * kobject_init - initialize object. | 127 | static void kobj_kset_join(struct kobject *kobj) |
| 128 | * @kobj: object in question. | ||
| 129 | */ | ||
| 130 | void kobject_init(struct kobject * kobj) | ||
| 131 | { | 128 | { |
| 132 | if (!kobj) | 129 | if (!kobj->kset) |
| 133 | return; | 130 | return; |
| 134 | kref_init(&kobj->kref); | 131 | |
| 135 | INIT_LIST_HEAD(&kobj->entry); | 132 | kset_get(kobj->kset); |
| 136 | kobj->kset = kset_get(kobj->kset); | 133 | spin_lock(&kobj->kset->list_lock); |
| 134 | list_add_tail(&kobj->entry, &kobj->kset->list); | ||
| 135 | spin_unlock(&kobj->kset->list_lock); | ||
| 137 | } | 136 | } |
| 138 | 137 | ||
| 138 | /* remove the kobject from its kset's list */ | ||
| 139 | static void kobj_kset_leave(struct kobject *kobj) | ||
| 140 | { | ||
| 141 | if (!kobj->kset) | ||
| 142 | return; | ||
| 139 | 143 | ||
| 140 | /** | 144 | spin_lock(&kobj->kset->list_lock); |
| 141 | * unlink - remove kobject from kset list. | 145 | list_del_init(&kobj->entry); |
| 142 | * @kobj: kobject. | 146 | spin_unlock(&kobj->kset->list_lock); |
| 143 | * | 147 | kset_put(kobj->kset); |
| 144 | * Remove the kobject from the kset list and decrement | 148 | } |
| 145 | * its parent's refcount. | ||
| 146 | * This is separated out, so we can use it in both | ||
| 147 | * kobject_del() and kobject_add() on error. | ||
| 148 | */ | ||
| 149 | 149 | ||
| 150 | static void unlink(struct kobject * kobj) | 150 | static void kobject_init_internal(struct kobject *kobj) |
| 151 | { | 151 | { |
| 152 | if (kobj->kset) { | 152 | if (!kobj) |
| 153 | spin_lock(&kobj->kset->list_lock); | 153 | return; |
| 154 | list_del_init(&kobj->entry); | 154 | kref_init(&kobj->kref); |
| 155 | spin_unlock(&kobj->kset->list_lock); | 155 | INIT_LIST_HEAD(&kobj->entry); |
| 156 | } | ||
| 157 | kobject_put(kobj); | ||
| 158 | } | 156 | } |
| 159 | 157 | ||
| 160 | /** | ||
| 161 | * kobject_add - add an object to the hierarchy. | ||
| 162 | * @kobj: object. | ||
| 163 | */ | ||
| 164 | 158 | ||
| 165 | int kobject_add(struct kobject * kobj) | 159 | static int kobject_add_internal(struct kobject *kobj) |
| 166 | { | 160 | { |
| 167 | int error = 0; | 161 | int error = 0; |
| 168 | struct kobject * parent; | 162 | struct kobject *parent; |
| 169 | 163 | ||
| 170 | if (!(kobj = kobject_get(kobj))) | 164 | if (!kobj) |
| 171 | return -ENOENT; | 165 | return -ENOENT; |
| 172 | if (!kobj->k_name) | 166 | |
| 173 | kobject_set_name(kobj, "NO_NAME"); | 167 | if (!kobj->name || !kobj->name[0]) { |
| 174 | if (!*kobj->k_name) { | 168 | pr_debug("kobject: (%p): attempted to be registered with empty " |
| 175 | pr_debug("kobject attempted to be registered with no name!\n"); | 169 | "name!\n", kobj); |
| 176 | WARN_ON(1); | 170 | WARN_ON(1); |
| 177 | kobject_put(kobj); | ||
| 178 | return -EINVAL; | 171 | return -EINVAL; |
| 179 | } | 172 | } |
| 180 | parent = kobject_get(kobj->parent); | ||
| 181 | 173 | ||
| 182 | pr_debug("kobject %s: registering. parent: %s, set: %s\n", | 174 | parent = kobject_get(kobj->parent); |
| 183 | kobject_name(kobj), parent ? kobject_name(parent) : "<NULL>", | ||
| 184 | kobj->kset ? kobject_name(&kobj->kset->kobj) : "<NULL>" ); | ||
| 185 | 175 | ||
| 176 | /* join kset if set, use it as parent if we do not already have one */ | ||
| 186 | if (kobj->kset) { | 177 | if (kobj->kset) { |
| 187 | spin_lock(&kobj->kset->list_lock); | ||
| 188 | |||
| 189 | if (!parent) | 178 | if (!parent) |
| 190 | parent = kobject_get(&kobj->kset->kobj); | 179 | parent = kobject_get(&kobj->kset->kobj); |
| 191 | 180 | kobj_kset_join(kobj); | |
| 192 | list_add_tail(&kobj->entry,&kobj->kset->list); | ||
| 193 | spin_unlock(&kobj->kset->list_lock); | ||
| 194 | kobj->parent = parent; | 181 | kobj->parent = parent; |
| 195 | } | 182 | } |
| 196 | 183 | ||
| 184 | pr_debug("kobject: '%s' (%p): %s: parent: '%s', set: '%s'\n", | ||
| 185 | kobject_name(kobj), kobj, __FUNCTION__, | ||
| 186 | parent ? kobject_name(parent) : "<NULL>", | ||
| 187 | kobj->kset ? kobject_name(&kobj->kset->kobj) : "<NULL>"); | ||
| 188 | |||
| 197 | error = create_dir(kobj); | 189 | error = create_dir(kobj); |
| 198 | if (error) { | 190 | if (error) { |
| 199 | /* unlink does the kobject_put() for us */ | 191 | kobj_kset_leave(kobj); |
| 200 | unlink(kobj); | ||
| 201 | kobject_put(parent); | 192 | kobject_put(parent); |
| 193 | kobj->parent = NULL; | ||
| 202 | 194 | ||
| 203 | /* be noisy on error issues */ | 195 | /* be noisy on error issues */ |
| 204 | if (error == -EEXIST) | 196 | if (error == -EEXIST) |
| 205 | printk(KERN_ERR "kobject_add failed for %s with " | 197 | printk(KERN_ERR "%s failed for %s with " |
| 206 | "-EEXIST, don't try to register things with " | 198 | "-EEXIST, don't try to register things with " |
| 207 | "the same name in the same directory.\n", | 199 | "the same name in the same directory.\n", |
| 208 | kobject_name(kobj)); | 200 | __FUNCTION__, kobject_name(kobj)); |
| 209 | else | 201 | else |
| 210 | printk(KERN_ERR "kobject_add failed for %s (%d)\n", | 202 | printk(KERN_ERR "%s failed for %s (%d)\n", |
| 211 | kobject_name(kobj), error); | 203 | __FUNCTION__, kobject_name(kobj), error); |
| 212 | dump_stack(); | 204 | dump_stack(); |
| 213 | } | 205 | } else |
| 206 | kobj->state_in_sysfs = 1; | ||
| 214 | 207 | ||
| 215 | return error; | 208 | return error; |
| 216 | } | 209 | } |
| 217 | 210 | ||
| 218 | /** | 211 | /** |
| 219 | * kobject_register - initialize and add an object. | 212 | * kobject_set_name_vargs - Set the name of an kobject |
| 220 | * @kobj: object in question. | 213 | * @kobj: struct kobject to set the name of |
| 214 | * @fmt: format string used to build the name | ||
| 215 | * @vargs: vargs to format the string. | ||
| 221 | */ | 216 | */ |
| 217 | static int kobject_set_name_vargs(struct kobject *kobj, const char *fmt, | ||
| 218 | va_list vargs) | ||
| 219 | { | ||
| 220 | va_list aq; | ||
| 221 | char *name; | ||
| 222 | |||
| 223 | va_copy(aq, vargs); | ||
| 224 | name = kvasprintf(GFP_KERNEL, fmt, vargs); | ||
| 225 | va_end(aq); | ||
| 226 | |||
| 227 | if (!name) | ||
| 228 | return -ENOMEM; | ||
| 222 | 229 | ||
| 223 | int kobject_register(struct kobject * kobj) | 230 | /* Free the old name, if necessary. */ |
| 231 | kfree(kobj->name); | ||
| 232 | |||
| 233 | /* Now, set the new name */ | ||
| 234 | kobj->name = name; | ||
| 235 | |||
| 236 | return 0; | ||
| 237 | } | ||
| 238 | |||
| 239 | /** | ||
| 240 | * kobject_set_name - Set the name of a kobject | ||
| 241 | * @kobj: struct kobject to set the name of | ||
| 242 | * @fmt: format string used to build the name | ||
| 243 | * | ||
| 244 | * This sets the name of the kobject. If you have already added the | ||
| 245 | * kobject to the system, you must call kobject_rename() in order to | ||
| 246 | * change the name of the kobject. | ||
| 247 | */ | ||
| 248 | int kobject_set_name(struct kobject *kobj, const char *fmt, ...) | ||
| 224 | { | 249 | { |
| 225 | int error = -EINVAL; | 250 | va_list args; |
| 226 | if (kobj) { | 251 | int retval; |
| 227 | kobject_init(kobj); | 252 | |
| 228 | error = kobject_add(kobj); | 253 | va_start(args, fmt); |
| 229 | if (!error) | 254 | retval = kobject_set_name_vargs(kobj, fmt, args); |
| 230 | kobject_uevent(kobj, KOBJ_ADD); | 255 | va_end(args); |
| 256 | |||
| 257 | return retval; | ||
| 258 | } | ||
| 259 | EXPORT_SYMBOL(kobject_set_name); | ||
| 260 | |||
| 261 | /** | ||
| 262 | * kobject_init - initialize a kobject structure | ||
| 263 | * @kobj: pointer to the kobject to initialize | ||
| 264 | * @ktype: pointer to the ktype for this kobject. | ||
| 265 | * | ||
| 266 | * This function will properly initialize a kobject such that it can then | ||
| 267 | * be passed to the kobject_add() call. | ||
| 268 | * | ||
| 269 | * After this function is called, the kobject MUST be cleaned up by a call | ||
| 270 | * to kobject_put(), not by a call to kfree directly to ensure that all of | ||
| 271 | * the memory is cleaned up properly. | ||
| 272 | */ | ||
| 273 | void kobject_init(struct kobject *kobj, struct kobj_type *ktype) | ||
| 274 | { | ||
| 275 | char *err_str; | ||
| 276 | |||
| 277 | if (!kobj) { | ||
| 278 | err_str = "invalid kobject pointer!"; | ||
| 279 | goto error; | ||
| 231 | } | 280 | } |
| 232 | return error; | 281 | if (!ktype) { |
| 282 | err_str = "must have a ktype to be initialized properly!\n"; | ||
| 283 | goto error; | ||
| 284 | } | ||
| 285 | if (kobj->state_initialized) { | ||
| 286 | /* do not error out as sometimes we can recover */ | ||
| 287 | printk(KERN_ERR "kobject (%p): tried to init an initialized " | ||
| 288 | "object, something is seriously wrong.\n", kobj); | ||
| 289 | dump_stack(); | ||
| 290 | } | ||
| 291 | |||
| 292 | kref_init(&kobj->kref); | ||
| 293 | INIT_LIST_HEAD(&kobj->entry); | ||
| 294 | kobj->ktype = ktype; | ||
| 295 | kobj->state_in_sysfs = 0; | ||
| 296 | kobj->state_add_uevent_sent = 0; | ||
| 297 | kobj->state_remove_uevent_sent = 0; | ||
| 298 | kobj->state_initialized = 1; | ||
| 299 | return; | ||
| 300 | |||
| 301 | error: | ||
| 302 | printk(KERN_ERR "kobject (%p): %s\n", kobj, err_str); | ||
| 303 | dump_stack(); | ||
| 233 | } | 304 | } |
| 305 | EXPORT_SYMBOL(kobject_init); | ||
| 234 | 306 | ||
| 307 | static int kobject_add_varg(struct kobject *kobj, struct kobject *parent, | ||
| 308 | const char *fmt, va_list vargs) | ||
| 309 | { | ||
| 310 | va_list aq; | ||
| 311 | int retval; | ||
| 312 | |||
| 313 | va_copy(aq, vargs); | ||
| 314 | retval = kobject_set_name_vargs(kobj, fmt, aq); | ||
| 315 | va_end(aq); | ||
| 316 | if (retval) { | ||
| 317 | printk(KERN_ERR "kobject: can not set name properly!\n"); | ||
| 318 | return retval; | ||
| 319 | } | ||
| 320 | kobj->parent = parent; | ||
| 321 | return kobject_add_internal(kobj); | ||
| 322 | } | ||
| 235 | 323 | ||
| 236 | /** | 324 | /** |
| 237 | * kobject_set_name - Set the name of an object | 325 | * kobject_add - the main kobject add function |
| 238 | * @kobj: object. | 326 | * @kobj: the kobject to add |
| 239 | * @fmt: format string used to build the name | 327 | * @parent: pointer to the parent of the kobject. |
| 328 | * @fmt: format to name the kobject with. | ||
| 329 | * | ||
| 330 | * The kobject name is set and added to the kobject hierarchy in this | ||
| 331 | * function. | ||
| 332 | * | ||
| 333 | * If @parent is set, then the parent of the @kobj will be set to it. | ||
| 334 | * If @parent is NULL, then the parent of the @kobj will be set to the | ||
| 335 | * kobject associted with the kset assigned to this kobject. If no kset | ||
| 336 | * is assigned to the kobject, then the kobject will be located in the | ||
| 337 | * root of the sysfs tree. | ||
| 240 | * | 338 | * |
| 241 | * If strlen(name) >= KOBJ_NAME_LEN, then use a dynamically allocated | 339 | * If this function returns an error, kobject_put() must be called to |
| 242 | * string that @kobj->k_name points to. Otherwise, use the static | 340 | * properly clean up the memory associated with the object. |
| 243 | * @kobj->name array. | 341 | * Under no instance should the kobject that is passed to this function |
| 342 | * be directly freed with a call to kfree(), that can leak memory. | ||
| 343 | * | ||
| 344 | * Note, no "add" uevent will be created with this call, the caller should set | ||
| 345 | * up all of the necessary sysfs files for the object and then call | ||
| 346 | * kobject_uevent() with the UEVENT_ADD parameter to ensure that | ||
| 347 | * userspace is properly notified of this kobject's creation. | ||
| 244 | */ | 348 | */ |
| 245 | int kobject_set_name(struct kobject * kobj, const char * fmt, ...) | 349 | int kobject_add(struct kobject *kobj, struct kobject *parent, |
| 350 | const char *fmt, ...) | ||
| 246 | { | 351 | { |
| 247 | int error = 0; | ||
| 248 | int limit; | ||
| 249 | int need; | ||
| 250 | va_list args; | 352 | va_list args; |
| 251 | char *name; | 353 | int retval; |
| 252 | 354 | ||
| 253 | /* find out how big a buffer we need */ | 355 | if (!kobj) |
| 254 | name = kmalloc(1024, GFP_KERNEL); | 356 | return -EINVAL; |
| 255 | if (!name) { | ||
| 256 | error = -ENOMEM; | ||
| 257 | goto done; | ||
| 258 | } | ||
| 259 | va_start(args, fmt); | ||
| 260 | need = vsnprintf(name, 1024, fmt, args); | ||
| 261 | va_end(args); | ||
| 262 | kfree(name); | ||
| 263 | 357 | ||
| 264 | /* Allocate the new space and copy the string in */ | 358 | if (!kobj->state_initialized) { |
| 265 | limit = need + 1; | 359 | printk(KERN_ERR "kobject '%s' (%p): tried to add an " |
| 266 | name = kmalloc(limit, GFP_KERNEL); | 360 | "uninitialized object, something is seriously wrong.\n", |
| 267 | if (!name) { | 361 | kobject_name(kobj), kobj); |
| 268 | error = -ENOMEM; | 362 | dump_stack(); |
| 269 | goto done; | 363 | return -EINVAL; |
| 270 | } | 364 | } |
| 271 | va_start(args, fmt); | 365 | va_start(args, fmt); |
| 272 | need = vsnprintf(name, limit, fmt, args); | 366 | retval = kobject_add_varg(kobj, parent, fmt, args); |
| 273 | va_end(args); | 367 | va_end(args); |
| 274 | 368 | ||
| 275 | /* something wrong with the string we copied? */ | 369 | return retval; |
| 276 | if (need >= limit) { | 370 | } |
| 277 | kfree(name); | 371 | EXPORT_SYMBOL(kobject_add); |
| 278 | error = -EFAULT; | ||
| 279 | goto done; | ||
| 280 | } | ||
| 281 | 372 | ||
| 282 | /* Free the old name, if necessary. */ | 373 | /** |
| 283 | kfree(kobj->k_name); | 374 | * kobject_init_and_add - initialize a kobject structure and add it to the kobject hierarchy |
| 375 | * @kobj: pointer to the kobject to initialize | ||
| 376 | * @ktype: pointer to the ktype for this kobject. | ||
| 377 | * @parent: pointer to the parent of this kobject. | ||
| 378 | * @fmt: the name of the kobject. | ||
| 379 | * | ||
| 380 | * This function combines the call to kobject_init() and | ||
| 381 | * kobject_add(). The same type of error handling after a call to | ||
| 382 | * kobject_add() and kobject lifetime rules are the same here. | ||
| 383 | */ | ||
| 384 | int kobject_init_and_add(struct kobject *kobj, struct kobj_type *ktype, | ||
| 385 | struct kobject *parent, const char *fmt, ...) | ||
| 386 | { | ||
| 387 | va_list args; | ||
| 388 | int retval; | ||
| 284 | 389 | ||
| 285 | /* Now, set the new name */ | 390 | kobject_init(kobj, ktype); |
| 286 | kobj->k_name = name; | 391 | |
| 287 | done: | 392 | va_start(args, fmt); |
| 288 | return error; | 393 | retval = kobject_add_varg(kobj, parent, fmt, args); |
| 394 | va_end(args); | ||
| 395 | |||
| 396 | return retval; | ||
| 289 | } | 397 | } |
| 290 | EXPORT_SYMBOL(kobject_set_name); | 398 | EXPORT_SYMBOL_GPL(kobject_init_and_add); |
| 291 | 399 | ||
| 292 | /** | 400 | /** |
| 293 | * kobject_rename - change the name of an object | 401 | * kobject_rename - change the name of an object |
| 294 | * @kobj: object in question. | 402 | * @kobj: object in question. |
| 295 | * @new_name: object's new name | 403 | * @new_name: object's new name |
| 296 | */ | 404 | */ |
| 297 | 405 | int kobject_rename(struct kobject *kobj, const char *new_name) | |
| 298 | int kobject_rename(struct kobject * kobj, const char *new_name) | ||
| 299 | { | 406 | { |
| 300 | int error = 0; | 407 | int error = 0; |
| 301 | const char *devpath = NULL; | 408 | const char *devpath = NULL; |
| @@ -334,8 +441,6 @@ int kobject_rename(struct kobject * kobj, const char *new_name) | |||
| 334 | sprintf(devpath_string, "DEVPATH_OLD=%s", devpath); | 441 | sprintf(devpath_string, "DEVPATH_OLD=%s", devpath); |
| 335 | envp[0] = devpath_string; | 442 | envp[0] = devpath_string; |
| 336 | envp[1] = NULL; | 443 | envp[1] = NULL; |
| 337 | /* Note : if we want to send the new name alone, not the full path, | ||
| 338 | * we could probably use kobject_name(kobj); */ | ||
| 339 | 444 | ||
| 340 | error = sysfs_rename_dir(kobj, new_name); | 445 | error = sysfs_rename_dir(kobj, new_name); |
| 341 | 446 | ||
| @@ -354,11 +459,10 @@ out: | |||
| 354 | } | 459 | } |
| 355 | 460 | ||
| 356 | /** | 461 | /** |
| 357 | * kobject_move - move object to another parent | 462 | * kobject_move - move object to another parent |
| 358 | * @kobj: object in question. | 463 | * @kobj: object in question. |
| 359 | * @new_parent: object's new parent (can be NULL) | 464 | * @new_parent: object's new parent (can be NULL) |
| 360 | */ | 465 | */ |
| 361 | |||
| 362 | int kobject_move(struct kobject *kobj, struct kobject *new_parent) | 466 | int kobject_move(struct kobject *kobj, struct kobject *new_parent) |
| 363 | { | 467 | { |
| 364 | int error; | 468 | int error; |
| @@ -406,68 +510,74 @@ out: | |||
| 406 | } | 510 | } |
| 407 | 511 | ||
| 408 | /** | 512 | /** |
| 409 | * kobject_del - unlink kobject from hierarchy. | 513 | * kobject_del - unlink kobject from hierarchy. |
| 410 | * @kobj: object. | 514 | * @kobj: object. |
| 411 | */ | 515 | */ |
| 412 | 516 | void kobject_del(struct kobject *kobj) | |
| 413 | void kobject_del(struct kobject * kobj) | ||
| 414 | { | 517 | { |
| 415 | if (!kobj) | 518 | if (!kobj) |
| 416 | return; | 519 | return; |
| 417 | sysfs_remove_dir(kobj); | ||
| 418 | unlink(kobj); | ||
| 419 | } | ||
| 420 | |||
| 421 | /** | ||
| 422 | * kobject_unregister - remove object from hierarchy and decrement refcount. | ||
| 423 | * @kobj: object going away. | ||
| 424 | */ | ||
| 425 | 520 | ||
| 426 | void kobject_unregister(struct kobject * kobj) | 521 | sysfs_remove_dir(kobj); |
| 427 | { | 522 | kobj->state_in_sysfs = 0; |
| 428 | if (!kobj) | 523 | kobj_kset_leave(kobj); |
| 429 | return; | 524 | kobject_put(kobj->parent); |
| 430 | pr_debug("kobject %s: unregistering\n",kobject_name(kobj)); | 525 | kobj->parent = NULL; |
| 431 | kobject_uevent(kobj, KOBJ_REMOVE); | ||
| 432 | kobject_del(kobj); | ||
| 433 | kobject_put(kobj); | ||
| 434 | } | 526 | } |
| 435 | 527 | ||
| 436 | /** | 528 | /** |
| 437 | * kobject_get - increment refcount for object. | 529 | * kobject_get - increment refcount for object. |
| 438 | * @kobj: object. | 530 | * @kobj: object. |
| 439 | */ | 531 | */ |
| 440 | 532 | struct kobject *kobject_get(struct kobject *kobj) | |
| 441 | struct kobject * kobject_get(struct kobject * kobj) | ||
| 442 | { | 533 | { |
| 443 | if (kobj) | 534 | if (kobj) |
| 444 | kref_get(&kobj->kref); | 535 | kref_get(&kobj->kref); |
| 445 | return kobj; | 536 | return kobj; |
| 446 | } | 537 | } |
| 447 | 538 | ||
| 448 | /** | 539 | /* |
| 449 | * kobject_cleanup - free kobject resources. | 540 | * kobject_cleanup - free kobject resources. |
| 450 | * @kobj: object. | 541 | * @kobj: object to cleanup |
| 451 | */ | 542 | */ |
| 452 | 543 | static void kobject_cleanup(struct kobject *kobj) | |
| 453 | void kobject_cleanup(struct kobject * kobj) | ||
| 454 | { | 544 | { |
| 455 | struct kobj_type * t = get_ktype(kobj); | 545 | struct kobj_type *t = get_ktype(kobj); |
| 456 | struct kset * s = kobj->kset; | 546 | const char *name = kobj->name; |
| 457 | struct kobject * parent = kobj->parent; | 547 | |
| 458 | const char *name = kobj->k_name; | 548 | pr_debug("kobject: '%s' (%p): %s\n", |
| 549 | kobject_name(kobj), kobj, __FUNCTION__); | ||
| 550 | |||
| 551 | if (t && !t->release) | ||
| 552 | pr_debug("kobject: '%s' (%p): does not have a release() " | ||
| 553 | "function, it is broken and must be fixed.\n", | ||
| 554 | kobject_name(kobj), kobj); | ||
| 555 | |||
| 556 | /* send "remove" if the caller did not do it but sent "add" */ | ||
| 557 | if (kobj->state_add_uevent_sent && !kobj->state_remove_uevent_sent) { | ||
| 558 | pr_debug("kobject: '%s' (%p): auto cleanup 'remove' event\n", | ||
| 559 | kobject_name(kobj), kobj); | ||
| 560 | kobject_uevent(kobj, KOBJ_REMOVE); | ||
| 561 | } | ||
| 562 | |||
| 563 | /* remove from sysfs if the caller did not do it */ | ||
| 564 | if (kobj->state_in_sysfs) { | ||
| 565 | pr_debug("kobject: '%s' (%p): auto cleanup kobject_del\n", | ||
| 566 | kobject_name(kobj), kobj); | ||
| 567 | kobject_del(kobj); | ||
| 568 | } | ||
| 459 | 569 | ||
| 460 | pr_debug("kobject %s: cleaning up\n",kobject_name(kobj)); | ||
| 461 | if (t && t->release) { | 570 | if (t && t->release) { |
| 571 | pr_debug("kobject: '%s' (%p): calling ktype release\n", | ||
| 572 | kobject_name(kobj), kobj); | ||
| 462 | t->release(kobj); | 573 | t->release(kobj); |
| 463 | /* If we have a release function, we can guess that this was | 574 | } |
| 464 | * not a statically allocated kobject, so we should be safe to | 575 | |
| 465 | * free the name */ | 576 | /* free name if we allocated it */ |
| 577 | if (name) { | ||
| 578 | pr_debug("kobject: '%s': free name\n", name); | ||
| 466 | kfree(name); | 579 | kfree(name); |
| 467 | } | 580 | } |
| 468 | if (s) | ||
| 469 | kset_put(s); | ||
| 470 | kobject_put(parent); | ||
| 471 | } | 581 | } |
| 472 | 582 | ||
| 473 | static void kobject_release(struct kref *kref) | 583 | static void kobject_release(struct kref *kref) |
| @@ -476,107 +586,130 @@ static void kobject_release(struct kref *kref) | |||
| 476 | } | 586 | } |
| 477 | 587 | ||
| 478 | /** | 588 | /** |
| 479 | * kobject_put - decrement refcount for object. | 589 | * kobject_put - decrement refcount for object. |
| 480 | * @kobj: object. | 590 | * @kobj: object. |
| 481 | * | 591 | * |
| 482 | * Decrement the refcount, and if 0, call kobject_cleanup(). | 592 | * Decrement the refcount, and if 0, call kobject_cleanup(). |
| 483 | */ | 593 | */ |
| 484 | void kobject_put(struct kobject * kobj) | 594 | void kobject_put(struct kobject *kobj) |
| 485 | { | 595 | { |
| 486 | if (kobj) | 596 | if (kobj) |
| 487 | kref_put(&kobj->kref, kobject_release); | 597 | kref_put(&kobj->kref, kobject_release); |
| 488 | } | 598 | } |
| 489 | 599 | ||
| 490 | 600 | static void dynamic_kobj_release(struct kobject *kobj) | |
| 491 | static void dir_release(struct kobject *kobj) | ||
| 492 | { | 601 | { |
| 602 | pr_debug("kobject: (%p): %s\n", kobj, __FUNCTION__); | ||
| 493 | kfree(kobj); | 603 | kfree(kobj); |
| 494 | } | 604 | } |
| 495 | 605 | ||
| 496 | static struct kobj_type dir_ktype = { | 606 | static struct kobj_type dynamic_kobj_ktype = { |
| 497 | .release = dir_release, | 607 | .release = dynamic_kobj_release, |
| 498 | .sysfs_ops = NULL, | 608 | .sysfs_ops = &kobj_sysfs_ops, |
| 499 | .default_attrs = NULL, | ||
| 500 | }; | 609 | }; |
| 501 | 610 | ||
| 502 | /** | 611 | /** |
| 503 | * kobject_kset_add_dir - add sub directory of object. | 612 | * kobject_create - create a struct kobject dynamically |
| 504 | * @kset: kset the directory is belongs to. | 613 | * |
| 505 | * @parent: object in which a directory is created. | 614 | * This function creates a kobject structure dynamically and sets it up |
| 506 | * @name: directory name. | 615 | * to be a "dynamic" kobject with a default release function set up. |
| 507 | * | 616 | * |
| 508 | * Add a plain directory object as child of given object. | 617 | * If the kobject was not able to be created, NULL will be returned. |
| 618 | * The kobject structure returned from here must be cleaned up with a | ||
| 619 | * call to kobject_put() and not kfree(), as kobject_init() has | ||
| 620 | * already been called on this structure. | ||
| 509 | */ | 621 | */ |
| 510 | struct kobject *kobject_kset_add_dir(struct kset *kset, | 622 | struct kobject *kobject_create(void) |
| 511 | struct kobject *parent, const char *name) | ||
| 512 | { | 623 | { |
| 513 | struct kobject *k; | 624 | struct kobject *kobj; |
| 514 | int ret; | ||
| 515 | 625 | ||
| 516 | if (!parent) | 626 | kobj = kzalloc(sizeof(*kobj), GFP_KERNEL); |
| 517 | return NULL; | 627 | if (!kobj) |
| 518 | |||
| 519 | k = kzalloc(sizeof(*k), GFP_KERNEL); | ||
| 520 | if (!k) | ||
| 521 | return NULL; | 628 | return NULL; |
| 522 | 629 | ||
| 523 | k->kset = kset; | 630 | kobject_init(kobj, &dynamic_kobj_ktype); |
| 524 | k->parent = parent; | 631 | return kobj; |
| 525 | k->ktype = &dir_ktype; | ||
| 526 | kobject_set_name(k, name); | ||
| 527 | ret = kobject_register(k); | ||
| 528 | if (ret < 0) { | ||
| 529 | printk(KERN_WARNING "%s: kobject_register error: %d\n", | ||
| 530 | __func__, ret); | ||
| 531 | kobject_del(k); | ||
| 532 | return NULL; | ||
| 533 | } | ||
| 534 | |||
| 535 | return k; | ||
| 536 | } | 632 | } |
| 537 | 633 | ||
| 538 | /** | 634 | /** |
| 539 | * kobject_add_dir - add sub directory of object. | 635 | * kobject_create_and_add - create a struct kobject dynamically and register it with sysfs |
| 540 | * @parent: object in which a directory is created. | 636 | * |
| 541 | * @name: directory name. | 637 | * @name: the name for the kset |
| 638 | * @parent: the parent kobject of this kobject, if any. | ||
| 542 | * | 639 | * |
| 543 | * Add a plain directory object as child of given object. | 640 | * This function creates a kset structure dynamically and registers it |
| 641 | * with sysfs. When you are finished with this structure, call | ||
| 642 | * kobject_put() and the structure will be dynamically freed when | ||
| 643 | * it is no longer being used. | ||
| 644 | * | ||
| 645 | * If the kobject was not able to be created, NULL will be returned. | ||
| 544 | */ | 646 | */ |
| 545 | struct kobject *kobject_add_dir(struct kobject *parent, const char *name) | 647 | struct kobject *kobject_create_and_add(const char *name, struct kobject *parent) |
| 546 | { | 648 | { |
| 547 | return kobject_kset_add_dir(NULL, parent, name); | 649 | struct kobject *kobj; |
| 650 | int retval; | ||
| 651 | |||
| 652 | kobj = kobject_create(); | ||
| 653 | if (!kobj) | ||
| 654 | return NULL; | ||
| 655 | |||
| 656 | retval = kobject_add(kobj, parent, "%s", name); | ||
| 657 | if (retval) { | ||
| 658 | printk(KERN_WARNING "%s: kobject_add error: %d\n", | ||
| 659 | __FUNCTION__, retval); | ||
| 660 | kobject_put(kobj); | ||
| 661 | kobj = NULL; | ||
| 662 | } | ||
| 663 | return kobj; | ||
| 548 | } | 664 | } |
| 665 | EXPORT_SYMBOL_GPL(kobject_create_and_add); | ||
| 549 | 666 | ||
| 550 | /** | 667 | /** |
| 551 | * kset_init - initialize a kset for use | 668 | * kset_init - initialize a kset for use |
| 552 | * @k: kset | 669 | * @k: kset |
| 553 | */ | 670 | */ |
| 554 | 671 | void kset_init(struct kset *k) | |
| 555 | void kset_init(struct kset * k) | ||
| 556 | { | 672 | { |
| 557 | kobject_init(&k->kobj); | 673 | kobject_init_internal(&k->kobj); |
| 558 | INIT_LIST_HEAD(&k->list); | 674 | INIT_LIST_HEAD(&k->list); |
| 559 | spin_lock_init(&k->list_lock); | 675 | spin_lock_init(&k->list_lock); |
| 560 | } | 676 | } |
| 561 | 677 | ||
| 678 | /* default kobject attribute operations */ | ||
| 679 | static ssize_t kobj_attr_show(struct kobject *kobj, struct attribute *attr, | ||
| 680 | char *buf) | ||
| 681 | { | ||
| 682 | struct kobj_attribute *kattr; | ||
| 683 | ssize_t ret = -EIO; | ||
| 562 | 684 | ||
| 563 | /** | 685 | kattr = container_of(attr, struct kobj_attribute, attr); |
| 564 | * kset_add - add a kset object to the hierarchy. | 686 | if (kattr->show) |
| 565 | * @k: kset. | 687 | ret = kattr->show(kobj, kattr, buf); |
| 566 | */ | 688 | return ret; |
| 689 | } | ||
| 567 | 690 | ||
| 568 | int kset_add(struct kset * k) | 691 | static ssize_t kobj_attr_store(struct kobject *kobj, struct attribute *attr, |
| 692 | const char *buf, size_t count) | ||
| 569 | { | 693 | { |
| 570 | return kobject_add(&k->kobj); | 694 | struct kobj_attribute *kattr; |
| 695 | ssize_t ret = -EIO; | ||
| 696 | |||
| 697 | kattr = container_of(attr, struct kobj_attribute, attr); | ||
| 698 | if (kattr->store) | ||
| 699 | ret = kattr->store(kobj, kattr, buf, count); | ||
| 700 | return ret; | ||
| 571 | } | 701 | } |
| 572 | 702 | ||
| 703 | struct sysfs_ops kobj_sysfs_ops = { | ||
| 704 | .show = kobj_attr_show, | ||
| 705 | .store = kobj_attr_store, | ||
| 706 | }; | ||
| 573 | 707 | ||
| 574 | /** | 708 | /** |
| 575 | * kset_register - initialize and add a kset. | 709 | * kset_register - initialize and add a kset. |
| 576 | * @k: kset. | 710 | * @k: kset. |
| 577 | */ | 711 | */ |
| 578 | 712 | int kset_register(struct kset *k) | |
| 579 | int kset_register(struct kset * k) | ||
| 580 | { | 713 | { |
| 581 | int err; | 714 | int err; |
| 582 | 715 | ||
| @@ -584,46 +717,42 @@ int kset_register(struct kset * k) | |||
| 584 | return -EINVAL; | 717 | return -EINVAL; |
| 585 | 718 | ||
| 586 | kset_init(k); | 719 | kset_init(k); |
| 587 | err = kset_add(k); | 720 | err = kobject_add_internal(&k->kobj); |
| 588 | if (err) | 721 | if (err) |
| 589 | return err; | 722 | return err; |
| 590 | kobject_uevent(&k->kobj, KOBJ_ADD); | 723 | kobject_uevent(&k->kobj, KOBJ_ADD); |
| 591 | return 0; | 724 | return 0; |
| 592 | } | 725 | } |
| 593 | 726 | ||
| 594 | |||
| 595 | /** | 727 | /** |
| 596 | * kset_unregister - remove a kset. | 728 | * kset_unregister - remove a kset. |
| 597 | * @k: kset. | 729 | * @k: kset. |
| 598 | */ | 730 | */ |
| 599 | 731 | void kset_unregister(struct kset *k) | |
| 600 | void kset_unregister(struct kset * k) | ||
| 601 | { | 732 | { |
| 602 | if (!k) | 733 | if (!k) |
| 603 | return; | 734 | return; |
| 604 | kobject_unregister(&k->kobj); | 735 | kobject_put(&k->kobj); |
| 605 | } | 736 | } |
| 606 | 737 | ||
| 607 | |||
| 608 | /** | 738 | /** |
| 609 | * kset_find_obj - search for object in kset. | 739 | * kset_find_obj - search for object in kset. |
| 610 | * @kset: kset we're looking in. | 740 | * @kset: kset we're looking in. |
| 611 | * @name: object's name. | 741 | * @name: object's name. |
| 612 | * | 742 | * |
| 613 | * Lock kset via @kset->subsys, and iterate over @kset->list, | 743 | * Lock kset via @kset->subsys, and iterate over @kset->list, |
| 614 | * looking for a matching kobject. If matching object is found | 744 | * looking for a matching kobject. If matching object is found |
| 615 | * take a reference and return the object. | 745 | * take a reference and return the object. |
| 616 | */ | 746 | */ |
| 617 | 747 | struct kobject *kset_find_obj(struct kset *kset, const char *name) | |
| 618 | struct kobject * kset_find_obj(struct kset * kset, const char * name) | ||
| 619 | { | 748 | { |
| 620 | struct list_head * entry; | 749 | struct list_head *entry; |
| 621 | struct kobject * ret = NULL; | 750 | struct kobject *ret = NULL; |
| 622 | 751 | ||
| 623 | spin_lock(&kset->list_lock); | 752 | spin_lock(&kset->list_lock); |
| 624 | list_for_each(entry,&kset->list) { | 753 | list_for_each(entry, &kset->list) { |
| 625 | struct kobject * k = to_kobj(entry); | 754 | struct kobject *k = to_kobj(entry); |
| 626 | if (kobject_name(k) && !strcmp(kobject_name(k),name)) { | 755 | if (kobject_name(k) && !strcmp(kobject_name(k), name)) { |
| 627 | ret = kobject_get(k); | 756 | ret = kobject_get(k); |
| 628 | break; | 757 | break; |
| 629 | } | 758 | } |
| @@ -632,47 +761,94 @@ struct kobject * kset_find_obj(struct kset * kset, const char * name) | |||
| 632 | return ret; | 761 | return ret; |
| 633 | } | 762 | } |
| 634 | 763 | ||
| 635 | int subsystem_register(struct kset *s) | 764 | static void kset_release(struct kobject *kobj) |
| 636 | { | 765 | { |
| 637 | return kset_register(s); | 766 | struct kset *kset = container_of(kobj, struct kset, kobj); |
| 767 | pr_debug("kobject: '%s' (%p): %s\n", | ||
| 768 | kobject_name(kobj), kobj, __FUNCTION__); | ||
| 769 | kfree(kset); | ||
| 638 | } | 770 | } |
| 639 | 771 | ||
| 640 | void subsystem_unregister(struct kset *s) | 772 | static struct kobj_type kset_ktype = { |
| 773 | .sysfs_ops = &kobj_sysfs_ops, | ||
| 774 | .release = kset_release, | ||
| 775 | }; | ||
| 776 | |||
| 777 | /** | ||
| 778 | * kset_create - create a struct kset dynamically | ||
| 779 | * | ||
| 780 | * @name: the name for the kset | ||
| 781 | * @uevent_ops: a struct kset_uevent_ops for the kset | ||
| 782 | * @parent_kobj: the parent kobject of this kset, if any. | ||
| 783 | * | ||
| 784 | * This function creates a kset structure dynamically. This structure can | ||
| 785 | * then be registered with the system and show up in sysfs with a call to | ||
| 786 | * kset_register(). When you are finished with this structure, if | ||
| 787 | * kset_register() has been called, call kset_unregister() and the | ||
| 788 | * structure will be dynamically freed when it is no longer being used. | ||
| 789 | * | ||
| 790 | * If the kset was not able to be created, NULL will be returned. | ||
| 791 | */ | ||
| 792 | static struct kset *kset_create(const char *name, | ||
| 793 | struct kset_uevent_ops *uevent_ops, | ||
| 794 | struct kobject *parent_kobj) | ||
| 641 | { | 795 | { |
| 642 | kset_unregister(s); | 796 | struct kset *kset; |
| 797 | |||
| 798 | kset = kzalloc(sizeof(*kset), GFP_KERNEL); | ||
| 799 | if (!kset) | ||
| 800 | return NULL; | ||
| 801 | kobject_set_name(&kset->kobj, name); | ||
| 802 | kset->uevent_ops = uevent_ops; | ||
| 803 | kset->kobj.parent = parent_kobj; | ||
| 804 | |||
| 805 | /* | ||
| 806 | * The kobject of this kset will have a type of kset_ktype and belong to | ||
| 807 | * no kset itself. That way we can properly free it when it is | ||
| 808 | * finished being used. | ||
| 809 | */ | ||
| 810 | kset->kobj.ktype = &kset_ktype; | ||
| 811 | kset->kobj.kset = NULL; | ||
| 812 | |||
| 813 | return kset; | ||
| 643 | } | 814 | } |
| 644 | 815 | ||
| 645 | /** | 816 | /** |
| 646 | * subsystem_create_file - export sysfs attribute file. | 817 | * kset_create_and_add - create a struct kset dynamically and add it to sysfs |
| 647 | * @s: subsystem. | 818 | * |
| 648 | * @a: subsystem attribute descriptor. | 819 | * @name: the name for the kset |
| 820 | * @uevent_ops: a struct kset_uevent_ops for the kset | ||
| 821 | * @parent_kobj: the parent kobject of this kset, if any. | ||
| 822 | * | ||
| 823 | * This function creates a kset structure dynamically and registers it | ||
| 824 | * with sysfs. When you are finished with this structure, call | ||
| 825 | * kset_unregister() and the structure will be dynamically freed when it | ||
| 826 | * is no longer being used. | ||
| 827 | * | ||
| 828 | * If the kset was not able to be created, NULL will be returned. | ||
| 649 | */ | 829 | */ |
| 650 | 830 | struct kset *kset_create_and_add(const char *name, | |
| 651 | int subsys_create_file(struct kset *s, struct subsys_attribute *a) | 831 | struct kset_uevent_ops *uevent_ops, |
| 832 | struct kobject *parent_kobj) | ||
| 652 | { | 833 | { |
| 653 | int error = 0; | 834 | struct kset *kset; |
| 654 | 835 | int error; | |
| 655 | if (!s || !a) | ||
| 656 | return -EINVAL; | ||
| 657 | 836 | ||
| 658 | if (kset_get(s)) { | 837 | kset = kset_create(name, uevent_ops, parent_kobj); |
| 659 | error = sysfs_create_file(&s->kobj, &a->attr); | 838 | if (!kset) |
| 660 | kset_put(s); | 839 | return NULL; |
| 840 | error = kset_register(kset); | ||
| 841 | if (error) { | ||
| 842 | kfree(kset); | ||
| 843 | return NULL; | ||
| 661 | } | 844 | } |
| 662 | return error; | 845 | return kset; |
| 663 | } | 846 | } |
| 847 | EXPORT_SYMBOL_GPL(kset_create_and_add); | ||
| 664 | 848 | ||
| 665 | EXPORT_SYMBOL(kobject_init); | ||
| 666 | EXPORT_SYMBOL(kobject_register); | ||
| 667 | EXPORT_SYMBOL(kobject_unregister); | ||
| 668 | EXPORT_SYMBOL(kobject_get); | 849 | EXPORT_SYMBOL(kobject_get); |
| 669 | EXPORT_SYMBOL(kobject_put); | 850 | EXPORT_SYMBOL(kobject_put); |
| 670 | EXPORT_SYMBOL(kobject_add); | ||
| 671 | EXPORT_SYMBOL(kobject_del); | 851 | EXPORT_SYMBOL(kobject_del); |
| 672 | 852 | ||
| 673 | EXPORT_SYMBOL(kset_register); | 853 | EXPORT_SYMBOL(kset_register); |
| 674 | EXPORT_SYMBOL(kset_unregister); | 854 | EXPORT_SYMBOL(kset_unregister); |
| 675 | |||
| 676 | EXPORT_SYMBOL(subsystem_register); | ||
| 677 | EXPORT_SYMBOL(subsystem_unregister); | ||
| 678 | EXPORT_SYMBOL(subsys_create_file); | ||
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c index 5886147252d0..5a402e2982af 100644 --- a/lib/kobject_uevent.c +++ b/lib/kobject_uevent.c | |||
| @@ -98,7 +98,8 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action, | |||
| 98 | int i = 0; | 98 | int i = 0; |
| 99 | int retval = 0; | 99 | int retval = 0; |
| 100 | 100 | ||
| 101 | pr_debug("%s\n", __FUNCTION__); | 101 | pr_debug("kobject: '%s' (%p): %s\n", |
| 102 | kobject_name(kobj), kobj, __FUNCTION__); | ||
| 102 | 103 | ||
| 103 | /* search the kset we belong to */ | 104 | /* search the kset we belong to */ |
| 104 | top_kobj = kobj; | 105 | top_kobj = kobj; |
| @@ -106,7 +107,9 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action, | |||
| 106 | top_kobj = top_kobj->parent; | 107 | top_kobj = top_kobj->parent; |
| 107 | 108 | ||
| 108 | if (!top_kobj->kset) { | 109 | if (!top_kobj->kset) { |
| 109 | pr_debug("kobject attempted to send uevent without kset!\n"); | 110 | pr_debug("kobject: '%s' (%p): %s: attempted to send uevent " |
| 111 | "without kset!\n", kobject_name(kobj), kobj, | ||
| 112 | __FUNCTION__); | ||
| 110 | return -EINVAL; | 113 | return -EINVAL; |
| 111 | } | 114 | } |
| 112 | 115 | ||
| @@ -116,7 +119,9 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action, | |||
| 116 | /* skip the event, if the filter returns zero. */ | 119 | /* skip the event, if the filter returns zero. */ |
| 117 | if (uevent_ops && uevent_ops->filter) | 120 | if (uevent_ops && uevent_ops->filter) |
| 118 | if (!uevent_ops->filter(kset, kobj)) { | 121 | if (!uevent_ops->filter(kset, kobj)) { |
| 119 | pr_debug("kobject filter function caused the event to drop!\n"); | 122 | pr_debug("kobject: '%s' (%p): %s: filter function " |
| 123 | "caused the event to drop!\n", | ||
| 124 | kobject_name(kobj), kobj, __FUNCTION__); | ||
| 120 | return 0; | 125 | return 0; |
| 121 | } | 126 | } |
| 122 | 127 | ||
| @@ -126,7 +131,9 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action, | |||
| 126 | else | 131 | else |
| 127 | subsystem = kobject_name(&kset->kobj); | 132 | subsystem = kobject_name(&kset->kobj); |
| 128 | if (!subsystem) { | 133 | if (!subsystem) { |
| 129 | pr_debug("unset subsystem caused the event to drop!\n"); | 134 | pr_debug("kobject: '%s' (%p): %s: unset subsystem caused the " |
| 135 | "event to drop!\n", kobject_name(kobj), kobj, | ||
| 136 | __FUNCTION__); | ||
| 130 | return 0; | 137 | return 0; |
| 131 | } | 138 | } |
| 132 | 139 | ||
| @@ -166,12 +173,24 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action, | |||
| 166 | if (uevent_ops && uevent_ops->uevent) { | 173 | if (uevent_ops && uevent_ops->uevent) { |
| 167 | retval = uevent_ops->uevent(kset, kobj, env); | 174 | retval = uevent_ops->uevent(kset, kobj, env); |
| 168 | if (retval) { | 175 | if (retval) { |
| 169 | pr_debug ("%s - uevent() returned %d\n", | 176 | pr_debug("kobject: '%s' (%p): %s: uevent() returned " |
| 170 | __FUNCTION__, retval); | 177 | "%d\n", kobject_name(kobj), kobj, |
| 178 | __FUNCTION__, retval); | ||
| 171 | goto exit; | 179 | goto exit; |
| 172 | } | 180 | } |
| 173 | } | 181 | } |
| 174 | 182 | ||
| 183 | /* | ||
| 184 | * Mark "add" and "remove" events in the object to ensure proper | ||
| 185 | * events to userspace during automatic cleanup. If the object did | ||
| 186 | * send an "add" event, "remove" will automatically generated by | ||
| 187 | * the core, if not already done by the caller. | ||
| 188 | */ | ||
| 189 | if (action == KOBJ_ADD) | ||
| 190 | kobj->state_add_uevent_sent = 1; | ||
| 191 | else if (action == KOBJ_REMOVE) | ||
| 192 | kobj->state_remove_uevent_sent = 1; | ||
| 193 | |||
| 175 | /* we will send an event, so request a new sequence number */ | 194 | /* we will send an event, so request a new sequence number */ |
| 176 | spin_lock(&sequence_lock); | 195 | spin_lock(&sequence_lock); |
| 177 | seq = ++uevent_seqnum; | 196 | seq = ++uevent_seqnum; |
| @@ -219,11 +238,12 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action, | |||
| 219 | retval = add_uevent_var(env, "HOME=/"); | 238 | retval = add_uevent_var(env, "HOME=/"); |
| 220 | if (retval) | 239 | if (retval) |
| 221 | goto exit; | 240 | goto exit; |
| 222 | retval = add_uevent_var(env, "PATH=/sbin:/bin:/usr/sbin:/usr/bin"); | 241 | retval = add_uevent_var(env, |
| 242 | "PATH=/sbin:/bin:/usr/sbin:/usr/bin"); | ||
| 223 | if (retval) | 243 | if (retval) |
| 224 | goto exit; | 244 | goto exit; |
| 225 | 245 | ||
| 226 | call_usermodehelper (argv[0], argv, env->envp, UMH_WAIT_EXEC); | 246 | call_usermodehelper(argv[0], argv, env->envp, UMH_WAIT_EXEC); |
| 227 | } | 247 | } |
| 228 | 248 | ||
| 229 | exit: | 249 | exit: |
| @@ -231,7 +251,6 @@ exit: | |||
| 231 | kfree(env); | 251 | kfree(env); |
| 232 | return retval; | 252 | return retval; |
| 233 | } | 253 | } |
| 234 | |||
| 235 | EXPORT_SYMBOL_GPL(kobject_uevent_env); | 254 | EXPORT_SYMBOL_GPL(kobject_uevent_env); |
| 236 | 255 | ||
| 237 | /** | 256 | /** |
| @@ -247,7 +266,6 @@ int kobject_uevent(struct kobject *kobj, enum kobject_action action) | |||
| 247 | { | 266 | { |
| 248 | return kobject_uevent_env(kobj, action, NULL); | 267 | return kobject_uevent_env(kobj, action, NULL); |
| 249 | } | 268 | } |
| 250 | |||
| 251 | EXPORT_SYMBOL_GPL(kobject_uevent); | 269 | EXPORT_SYMBOL_GPL(kobject_uevent); |
| 252 | 270 | ||
| 253 | /** | 271 | /** |
diff --git a/lib/kref.c b/lib/kref.c index a6dc3ec328e0..9ecd6e865610 100644 --- a/lib/kref.c +++ b/lib/kref.c | |||
| @@ -15,13 +15,23 @@ | |||
| 15 | #include <linux/module.h> | 15 | #include <linux/module.h> |
| 16 | 16 | ||
| 17 | /** | 17 | /** |
| 18 | * kref_set - initialize object and set refcount to requested number. | ||
| 19 | * @kref: object in question. | ||
| 20 | * @num: initial reference counter | ||
| 21 | */ | ||
| 22 | void kref_set(struct kref *kref, int num) | ||
| 23 | { | ||
| 24 | atomic_set(&kref->refcount, num); | ||
| 25 | smp_mb(); | ||
| 26 | } | ||
| 27 | |||
| 28 | /** | ||
| 18 | * kref_init - initialize object. | 29 | * kref_init - initialize object. |
| 19 | * @kref: object in question. | 30 | * @kref: object in question. |
| 20 | */ | 31 | */ |
| 21 | void kref_init(struct kref *kref) | 32 | void kref_init(struct kref *kref) |
| 22 | { | 33 | { |
| 23 | atomic_set(&kref->refcount,1); | 34 | kref_set(kref, 1); |
| 24 | smp_mb(); | ||
| 25 | } | 35 | } |
| 26 | 36 | ||
| 27 | /** | 37 | /** |
| @@ -61,6 +71,7 @@ int kref_put(struct kref *kref, void (*release)(struct kref *kref)) | |||
| 61 | return 0; | 71 | return 0; |
| 62 | } | 72 | } |
| 63 | 73 | ||
| 74 | EXPORT_SYMBOL(kref_set); | ||
| 64 | EXPORT_SYMBOL(kref_init); | 75 | EXPORT_SYMBOL(kref_init); |
| 65 | EXPORT_SYMBOL(kref_get); | 76 | EXPORT_SYMBOL(kref_get); |
| 66 | EXPORT_SYMBOL(kref_put); | 77 | EXPORT_SYMBOL(kref_put); |
diff --git a/lib/pcounter.c b/lib/pcounter.c new file mode 100644 index 000000000000..9b56807da93b --- /dev/null +++ b/lib/pcounter.c | |||
| @@ -0,0 +1,58 @@ | |||
| 1 | /* | ||
| 2 | * Define default pcounter functions | ||
| 3 | * Note that often used pcounters use dedicated functions to get a speed increase. | ||
| 4 | * (see DEFINE_PCOUNTER/REF_PCOUNTER_MEMBER) | ||
| 5 | */ | ||
| 6 | |||
| 7 | #include <linux/module.h> | ||
| 8 | #include <linux/pcounter.h> | ||
| 9 | #include <linux/smp.h> | ||
| 10 | #include <linux/cpumask.h> | ||
| 11 | |||
| 12 | static void pcounter_dyn_add(struct pcounter *self, int inc) | ||
| 13 | { | ||
| 14 | per_cpu_ptr(self->per_cpu_values, smp_processor_id())[0] += inc; | ||
| 15 | } | ||
| 16 | |||
| 17 | static int pcounter_dyn_getval(const struct pcounter *self, int cpu) | ||
| 18 | { | ||
| 19 | return per_cpu_ptr(self->per_cpu_values, cpu)[0]; | ||
| 20 | } | ||
| 21 | |||
| 22 | int pcounter_getval(const struct pcounter *self) | ||
| 23 | { | ||
| 24 | int res = 0, cpu; | ||
| 25 | |||
| 26 | for_each_possible_cpu(cpu) | ||
| 27 | res += self->getval(self, cpu); | ||
| 28 | |||
| 29 | return res; | ||
| 30 | } | ||
| 31 | EXPORT_SYMBOL_GPL(pcounter_getval); | ||
| 32 | |||
| 33 | int pcounter_alloc(struct pcounter *self) | ||
| 34 | { | ||
| 35 | int rc = 0; | ||
| 36 | if (self->add == NULL) { | ||
| 37 | self->per_cpu_values = alloc_percpu(int); | ||
| 38 | if (self->per_cpu_values != NULL) { | ||
| 39 | self->add = pcounter_dyn_add; | ||
| 40 | self->getval = pcounter_dyn_getval; | ||
| 41 | } else | ||
| 42 | rc = 1; | ||
| 43 | } | ||
| 44 | return rc; | ||
| 45 | } | ||
| 46 | EXPORT_SYMBOL_GPL(pcounter_alloc); | ||
| 47 | |||
| 48 | void pcounter_free(struct pcounter *self) | ||
| 49 | { | ||
| 50 | if (self->per_cpu_values != NULL) { | ||
| 51 | free_percpu(self->per_cpu_values); | ||
| 52 | self->per_cpu_values = NULL; | ||
| 53 | self->getval = NULL; | ||
| 54 | self->add = NULL; | ||
| 55 | } | ||
| 56 | } | ||
| 57 | EXPORT_SYMBOL_GPL(pcounter_free); | ||
| 58 | |||
diff --git a/lib/proportions.c b/lib/proportions.c index 332d8c58184d..9508d9a7af3e 100644 --- a/lib/proportions.c +++ b/lib/proportions.c | |||
| @@ -190,6 +190,8 @@ prop_adjust_shift(int *pl_shift, unsigned long *pl_period, int new_shift) | |||
| 190 | * PERCPU | 190 | * PERCPU |
| 191 | */ | 191 | */ |
| 192 | 192 | ||
| 193 | #define PROP_BATCH (8*(1+ilog2(nr_cpu_ids))) | ||
| 194 | |||
| 193 | int prop_local_init_percpu(struct prop_local_percpu *pl) | 195 | int prop_local_init_percpu(struct prop_local_percpu *pl) |
| 194 | { | 196 | { |
| 195 | spin_lock_init(&pl->lock); | 197 | spin_lock_init(&pl->lock); |
| @@ -230,31 +232,24 @@ void prop_norm_percpu(struct prop_global *pg, struct prop_local_percpu *pl) | |||
| 230 | 232 | ||
| 231 | spin_lock_irqsave(&pl->lock, flags); | 233 | spin_lock_irqsave(&pl->lock, flags); |
| 232 | prop_adjust_shift(&pl->shift, &pl->period, pg->shift); | 234 | prop_adjust_shift(&pl->shift, &pl->period, pg->shift); |
| 235 | |||
| 233 | /* | 236 | /* |
| 234 | * For each missed period, we half the local counter. | 237 | * For each missed period, we half the local counter. |
| 235 | * basically: | 238 | * basically: |
| 236 | * pl->events >> (global_period - pl->period); | 239 | * pl->events >> (global_period - pl->period); |
| 237 | * | ||
| 238 | * but since the distributed nature of percpu counters make division | ||
| 239 | * rather hard, use a regular subtraction loop. This is safe, because | ||
| 240 | * the events will only every be incremented, hence the subtraction | ||
| 241 | * can never result in a negative number. | ||
| 242 | */ | 240 | */ |
| 243 | while (pl->period != global_period) { | 241 | period = (global_period - pl->period) >> (pg->shift - 1); |
| 244 | unsigned long val = percpu_counter_read(&pl->events); | 242 | if (period < BITS_PER_LONG) { |
| 245 | unsigned long half = (val + 1) >> 1; | 243 | s64 val = percpu_counter_read(&pl->events); |
| 246 | 244 | ||
| 247 | /* | 245 | if (val < (nr_cpu_ids * PROP_BATCH)) |
| 248 | * Half of zero won't be much less, break out. | 246 | val = percpu_counter_sum(&pl->events); |
| 249 | * This limits the loop to shift iterations, even | 247 | |
| 250 | * if we missed a million. | 248 | __percpu_counter_add(&pl->events, -val + (val >> period), |
| 251 | */ | 249 | PROP_BATCH); |
| 252 | if (!val) | 250 | } else |
| 253 | break; | 251 | percpu_counter_set(&pl->events, 0); |
| 254 | 252 | ||
| 255 | percpu_counter_add(&pl->events, -half); | ||
| 256 | pl->period += period; | ||
| 257 | } | ||
| 258 | pl->period = global_period; | 253 | pl->period = global_period; |
| 259 | spin_unlock_irqrestore(&pl->lock, flags); | 254 | spin_unlock_irqrestore(&pl->lock, flags); |
| 260 | } | 255 | } |
| @@ -267,7 +262,7 @@ void __prop_inc_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl) | |||
| 267 | struct prop_global *pg = prop_get_global(pd); | 262 | struct prop_global *pg = prop_get_global(pd); |
| 268 | 263 | ||
| 269 | prop_norm_percpu(pg, pl); | 264 | prop_norm_percpu(pg, pl); |
| 270 | percpu_counter_add(&pl->events, 1); | 265 | __percpu_counter_add(&pl->events, 1, PROP_BATCH); |
| 271 | percpu_counter_add(&pg->events, 1); | 266 | percpu_counter_add(&pg->events, 1); |
| 272 | prop_put_global(pd, pg); | 267 | prop_put_global(pd, pg); |
| 273 | } | 268 | } |
diff --git a/lib/rwsem.c b/lib/rwsem.c index cdb4e3d05607..3e3365e5665e 100644 --- a/lib/rwsem.c +++ b/lib/rwsem.c | |||
| @@ -146,7 +146,7 @@ __rwsem_do_wake(struct rw_semaphore *sem, int downgrading) | |||
| 146 | /* | 146 | /* |
| 147 | * wait for a lock to be granted | 147 | * wait for a lock to be granted |
| 148 | */ | 148 | */ |
| 149 | static struct rw_semaphore * | 149 | static struct rw_semaphore __sched * |
| 150 | rwsem_down_failed_common(struct rw_semaphore *sem, | 150 | rwsem_down_failed_common(struct rw_semaphore *sem, |
| 151 | struct rwsem_waiter *waiter, signed long adjustment) | 151 | struct rwsem_waiter *waiter, signed long adjustment) |
| 152 | { | 152 | { |
| @@ -187,7 +187,7 @@ rwsem_down_failed_common(struct rw_semaphore *sem, | |||
| 187 | /* | 187 | /* |
| 188 | * wait for the read lock to be granted | 188 | * wait for the read lock to be granted |
| 189 | */ | 189 | */ |
| 190 | struct rw_semaphore fastcall __sched * | 190 | asmregparm struct rw_semaphore __sched * |
| 191 | rwsem_down_read_failed(struct rw_semaphore *sem) | 191 | rwsem_down_read_failed(struct rw_semaphore *sem) |
| 192 | { | 192 | { |
| 193 | struct rwsem_waiter waiter; | 193 | struct rwsem_waiter waiter; |
| @@ -201,7 +201,7 @@ rwsem_down_read_failed(struct rw_semaphore *sem) | |||
| 201 | /* | 201 | /* |
| 202 | * wait for the write lock to be granted | 202 | * wait for the write lock to be granted |
| 203 | */ | 203 | */ |
| 204 | struct rw_semaphore fastcall __sched * | 204 | asmregparm struct rw_semaphore __sched * |
| 205 | rwsem_down_write_failed(struct rw_semaphore *sem) | 205 | rwsem_down_write_failed(struct rw_semaphore *sem) |
| 206 | { | 206 | { |
| 207 | struct rwsem_waiter waiter; | 207 | struct rwsem_waiter waiter; |
| @@ -216,7 +216,7 @@ rwsem_down_write_failed(struct rw_semaphore *sem) | |||
| 216 | * handle waking up a waiter on the semaphore | 216 | * handle waking up a waiter on the semaphore |
| 217 | * - up_read/up_write has decremented the active part of count if we come here | 217 | * - up_read/up_write has decremented the active part of count if we come here |
| 218 | */ | 218 | */ |
| 219 | struct rw_semaphore fastcall *rwsem_wake(struct rw_semaphore *sem) | 219 | asmregparm struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem) |
| 220 | { | 220 | { |
| 221 | unsigned long flags; | 221 | unsigned long flags; |
| 222 | 222 | ||
| @@ -236,7 +236,7 @@ struct rw_semaphore fastcall *rwsem_wake(struct rw_semaphore *sem) | |||
| 236 | * - caller incremented waiting part of count and discovered it still negative | 236 | * - caller incremented waiting part of count and discovered it still negative |
| 237 | * - just wake up any readers at the front of the queue | 237 | * - just wake up any readers at the front of the queue |
| 238 | */ | 238 | */ |
| 239 | struct rw_semaphore fastcall *rwsem_downgrade_wake(struct rw_semaphore *sem) | 239 | asmregparm struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem) |
| 240 | { | 240 | { |
| 241 | unsigned long flags; | 241 | unsigned long flags; |
| 242 | 242 | ||
diff --git a/lib/scatterlist.c b/lib/scatterlist.c new file mode 100644 index 000000000000..acca4901046c --- /dev/null +++ b/lib/scatterlist.c | |||
| @@ -0,0 +1,294 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2007 Jens Axboe <jens.axboe@oracle.com> | ||
| 3 | * | ||
| 4 | * Scatterlist handling helpers. | ||
| 5 | * | ||
| 6 | * This source code is licensed under the GNU General Public License, | ||
| 7 | * Version 2. See the file COPYING for more details. | ||
| 8 | */ | ||
| 9 | #include <linux/module.h> | ||
| 10 | #include <linux/scatterlist.h> | ||
| 11 | |||
| 12 | /** | ||
| 13 | * sg_next - return the next scatterlist entry in a list | ||
| 14 | * @sg: The current sg entry | ||
| 15 | * | ||
| 16 | * Description: | ||
| 17 | * Usually the next entry will be @sg@ + 1, but if this sg element is part | ||
| 18 | * of a chained scatterlist, it could jump to the start of a new | ||
| 19 | * scatterlist array. | ||
| 20 | * | ||
| 21 | **/ | ||
| 22 | struct scatterlist *sg_next(struct scatterlist *sg) | ||
| 23 | { | ||
| 24 | #ifdef CONFIG_DEBUG_SG | ||
| 25 | BUG_ON(sg->sg_magic != SG_MAGIC); | ||
| 26 | #endif | ||
| 27 | if (sg_is_last(sg)) | ||
| 28 | return NULL; | ||
| 29 | |||
| 30 | sg++; | ||
| 31 | if (unlikely(sg_is_chain(sg))) | ||
| 32 | sg = sg_chain_ptr(sg); | ||
| 33 | |||
| 34 | return sg; | ||
| 35 | } | ||
| 36 | EXPORT_SYMBOL(sg_next); | ||
| 37 | |||
| 38 | /** | ||
| 39 | * sg_last - return the last scatterlist entry in a list | ||
| 40 | * @sgl: First entry in the scatterlist | ||
| 41 | * @nents: Number of entries in the scatterlist | ||
| 42 | * | ||
| 43 | * Description: | ||
| 44 | * Should only be used casually, it (currently) scans the entire list | ||
| 45 | * to get the last entry. | ||
| 46 | * | ||
| 47 | * Note that the @sgl@ pointer passed in need not be the first one, | ||
| 48 | * the important bit is that @nents@ denotes the number of entries that | ||
| 49 | * exist from @sgl@. | ||
| 50 | * | ||
| 51 | **/ | ||
| 52 | struct scatterlist *sg_last(struct scatterlist *sgl, unsigned int nents) | ||
| 53 | { | ||
| 54 | #ifndef ARCH_HAS_SG_CHAIN | ||
| 55 | struct scatterlist *ret = &sgl[nents - 1]; | ||
| 56 | #else | ||
| 57 | struct scatterlist *sg, *ret = NULL; | ||
| 58 | unsigned int i; | ||
| 59 | |||
| 60 | for_each_sg(sgl, sg, nents, i) | ||
| 61 | ret = sg; | ||
| 62 | |||
| 63 | #endif | ||
| 64 | #ifdef CONFIG_DEBUG_SG | ||
| 65 | BUG_ON(sgl[0].sg_magic != SG_MAGIC); | ||
| 66 | BUG_ON(!sg_is_last(ret)); | ||
| 67 | #endif | ||
| 68 | return ret; | ||
| 69 | } | ||
| 70 | EXPORT_SYMBOL(sg_last); | ||
| 71 | |||
| 72 | /** | ||
| 73 | * sg_init_table - Initialize SG table | ||
| 74 | * @sgl: The SG table | ||
| 75 | * @nents: Number of entries in table | ||
| 76 | * | ||
| 77 | * Notes: | ||
| 78 | * If this is part of a chained sg table, sg_mark_end() should be | ||
| 79 | * used only on the last table part. | ||
| 80 | * | ||
| 81 | **/ | ||
| 82 | void sg_init_table(struct scatterlist *sgl, unsigned int nents) | ||
| 83 | { | ||
| 84 | memset(sgl, 0, sizeof(*sgl) * nents); | ||
| 85 | #ifdef CONFIG_DEBUG_SG | ||
| 86 | { | ||
| 87 | unsigned int i; | ||
| 88 | for (i = 0; i < nents; i++) | ||
| 89 | sgl[i].sg_magic = SG_MAGIC; | ||
| 90 | } | ||
| 91 | #endif | ||
| 92 | sg_mark_end(&sgl[nents - 1]); | ||
| 93 | } | ||
| 94 | EXPORT_SYMBOL(sg_init_table); | ||
| 95 | |||
| 96 | /** | ||
| 97 | * sg_init_one - Initialize a single entry sg list | ||
| 98 | * @sg: SG entry | ||
| 99 | * @buf: Virtual address for IO | ||
| 100 | * @buflen: IO length | ||
| 101 | * | ||
| 102 | **/ | ||
| 103 | void sg_init_one(struct scatterlist *sg, const void *buf, unsigned int buflen) | ||
| 104 | { | ||
| 105 | sg_init_table(sg, 1); | ||
| 106 | sg_set_buf(sg, buf, buflen); | ||
| 107 | } | ||
| 108 | EXPORT_SYMBOL(sg_init_one); | ||
| 109 | |||
| 110 | /* | ||
| 111 | * The default behaviour of sg_alloc_table() is to use these kmalloc/kfree | ||
| 112 | * helpers. | ||
| 113 | */ | ||
| 114 | static struct scatterlist *sg_kmalloc(unsigned int nents, gfp_t gfp_mask) | ||
| 115 | { | ||
| 116 | if (nents == SG_MAX_SINGLE_ALLOC) | ||
| 117 | return (struct scatterlist *) __get_free_page(gfp_mask); | ||
| 118 | else | ||
| 119 | return kmalloc(nents * sizeof(struct scatterlist), gfp_mask); | ||
| 120 | } | ||
| 121 | |||
| 122 | static void sg_kfree(struct scatterlist *sg, unsigned int nents) | ||
| 123 | { | ||
| 124 | if (nents == SG_MAX_SINGLE_ALLOC) | ||
| 125 | free_page((unsigned long) sg); | ||
| 126 | else | ||
| 127 | kfree(sg); | ||
| 128 | } | ||
| 129 | |||
| 130 | /** | ||
| 131 | * __sg_free_table - Free a previously mapped sg table | ||
| 132 | * @table: The sg table header to use | ||
| 133 | * @max_ents: The maximum number of entries per single scatterlist | ||
| 134 | * @free_fn: Free function | ||
| 135 | * | ||
| 136 | * Description: | ||
| 137 | * Free an sg table previously allocated and setup with | ||
| 138 | * __sg_alloc_table(). The @max_ents value must be identical to | ||
| 139 | * that previously used with __sg_alloc_table(). | ||
| 140 | * | ||
| 141 | **/ | ||
| 142 | void __sg_free_table(struct sg_table *table, unsigned int max_ents, | ||
| 143 | sg_free_fn *free_fn) | ||
| 144 | { | ||
| 145 | struct scatterlist *sgl, *next; | ||
| 146 | |||
| 147 | if (unlikely(!table->sgl)) | ||
| 148 | return; | ||
| 149 | |||
| 150 | sgl = table->sgl; | ||
| 151 | while (table->orig_nents) { | ||
| 152 | unsigned int alloc_size = table->orig_nents; | ||
| 153 | unsigned int sg_size; | ||
| 154 | |||
| 155 | /* | ||
| 156 | * If we have more than max_ents segments left, | ||
| 157 | * then assign 'next' to the sg table after the current one. | ||
| 158 | * sg_size is then one less than alloc size, since the last | ||
| 159 | * element is the chain pointer. | ||
| 160 | */ | ||
| 161 | if (alloc_size > max_ents) { | ||
| 162 | next = sg_chain_ptr(&sgl[max_ents - 1]); | ||
| 163 | alloc_size = max_ents; | ||
| 164 | sg_size = alloc_size - 1; | ||
| 165 | } else { | ||
| 166 | sg_size = alloc_size; | ||
| 167 | next = NULL; | ||
| 168 | } | ||
| 169 | |||
| 170 | table->orig_nents -= sg_size; | ||
| 171 | free_fn(sgl, alloc_size); | ||
| 172 | sgl = next; | ||
| 173 | } | ||
| 174 | |||
| 175 | table->sgl = NULL; | ||
| 176 | } | ||
| 177 | EXPORT_SYMBOL(__sg_free_table); | ||
| 178 | |||
| 179 | /** | ||
| 180 | * sg_free_table - Free a previously allocated sg table | ||
| 181 | * @table: The mapped sg table header | ||
| 182 | * | ||
| 183 | **/ | ||
| 184 | void sg_free_table(struct sg_table *table) | ||
| 185 | { | ||
| 186 | __sg_free_table(table, SG_MAX_SINGLE_ALLOC, sg_kfree); | ||
| 187 | } | ||
| 188 | EXPORT_SYMBOL(sg_free_table); | ||
| 189 | |||
| 190 | /** | ||
| 191 | * __sg_alloc_table - Allocate and initialize an sg table with given allocator | ||
| 192 | * @table: The sg table header to use | ||
| 193 | * @nents: Number of entries in sg list | ||
| 194 | * @max_ents: The maximum number of entries the allocator returns per call | ||
| 195 | * @gfp_mask: GFP allocation mask | ||
| 196 | * @alloc_fn: Allocator to use | ||
| 197 | * | ||
| 198 | * Description: | ||
| 199 | * This function returns a @table @nents long. The allocator is | ||
| 200 | * defined to return scatterlist chunks of maximum size @max_ents. | ||
| 201 | * Thus if @nents is bigger than @max_ents, the scatterlists will be | ||
| 202 | * chained in units of @max_ents. | ||
| 203 | * | ||
| 204 | * Notes: | ||
| 205 | * If this function returns non-0 (eg failure), the caller must call | ||
| 206 | * __sg_free_table() to cleanup any leftover allocations. | ||
| 207 | * | ||
| 208 | **/ | ||
| 209 | int __sg_alloc_table(struct sg_table *table, unsigned int nents, | ||
| 210 | unsigned int max_ents, gfp_t gfp_mask, | ||
| 211 | sg_alloc_fn *alloc_fn) | ||
| 212 | { | ||
| 213 | struct scatterlist *sg, *prv; | ||
| 214 | unsigned int left; | ||
| 215 | |||
| 216 | #ifndef ARCH_HAS_SG_CHAIN | ||
| 217 | BUG_ON(nents > max_ents); | ||
| 218 | #endif | ||
| 219 | |||
| 220 | memset(table, 0, sizeof(*table)); | ||
| 221 | |||
| 222 | left = nents; | ||
| 223 | prv = NULL; | ||
| 224 | do { | ||
| 225 | unsigned int sg_size, alloc_size = left; | ||
| 226 | |||
| 227 | if (alloc_size > max_ents) { | ||
| 228 | alloc_size = max_ents; | ||
| 229 | sg_size = alloc_size - 1; | ||
| 230 | } else | ||
| 231 | sg_size = alloc_size; | ||
| 232 | |||
| 233 | left -= sg_size; | ||
| 234 | |||
| 235 | sg = alloc_fn(alloc_size, gfp_mask); | ||
| 236 | if (unlikely(!sg)) | ||
| 237 | return -ENOMEM; | ||
| 238 | |||
| 239 | sg_init_table(sg, alloc_size); | ||
| 240 | table->nents = table->orig_nents += sg_size; | ||
| 241 | |||
| 242 | /* | ||
| 243 | * If this is the first mapping, assign the sg table header. | ||
| 244 | * If this is not the first mapping, chain previous part. | ||
| 245 | */ | ||
| 246 | if (prv) | ||
| 247 | sg_chain(prv, max_ents, sg); | ||
| 248 | else | ||
| 249 | table->sgl = sg; | ||
| 250 | |||
| 251 | /* | ||
| 252 | * If no more entries after this one, mark the end | ||
| 253 | */ | ||
| 254 | if (!left) | ||
| 255 | sg_mark_end(&sg[sg_size - 1]); | ||
| 256 | |||
| 257 | /* | ||
| 258 | * only really needed for mempool backed sg allocations (like | ||
| 259 | * SCSI), a possible improvement here would be to pass the | ||
| 260 | * table pointer into the allocator and let that clear these | ||
| 261 | * flags | ||
| 262 | */ | ||
| 263 | gfp_mask &= ~__GFP_WAIT; | ||
| 264 | gfp_mask |= __GFP_HIGH; | ||
| 265 | prv = sg; | ||
| 266 | } while (left); | ||
| 267 | |||
| 268 | return 0; | ||
| 269 | } | ||
| 270 | EXPORT_SYMBOL(__sg_alloc_table); | ||
| 271 | |||
| 272 | /** | ||
| 273 | * sg_alloc_table - Allocate and initialize an sg table | ||
| 274 | * @table: The sg table header to use | ||
| 275 | * @nents: Number of entries in sg list | ||
| 276 | * @gfp_mask: GFP allocation mask | ||
| 277 | * | ||
| 278 | * Description: | ||
| 279 | * Allocate and initialize an sg table. If @nents@ is larger than | ||
| 280 | * SG_MAX_SINGLE_ALLOC a chained sg table will be setup. | ||
| 281 | * | ||
| 282 | **/ | ||
| 283 | int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask) | ||
| 284 | { | ||
| 285 | int ret; | ||
| 286 | |||
| 287 | ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC, | ||
| 288 | gfp_mask, sg_kmalloc); | ||
| 289 | if (unlikely(ret)) | ||
| 290 | __sg_free_table(table, SG_MAX_SINGLE_ALLOC, sg_kfree); | ||
| 291 | |||
| 292 | return ret; | ||
| 293 | } | ||
| 294 | EXPORT_SYMBOL(sg_alloc_table); | ||
