aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ia64')
-rw-r--r--arch/ia64/ia32/ia32_signal.c2
-rw-r--r--arch/ia64/kernel/acpi.c2
-rw-r--r--arch/ia64/kernel/irq.c4
-rw-r--r--arch/ia64/kernel/palinfo.c2
-rw-r--r--arch/ia64/kernel/perfmon.c199
-rw-r--r--arch/ia64/kernel/signal.c15
-rw-r--r--arch/ia64/kernel/smp.c68
-rw-r--r--arch/ia64/kernel/time.c5
-rw-r--r--arch/ia64/kernel/topology.c16
9 files changed, 145 insertions, 168 deletions
diff --git a/arch/ia64/ia32/ia32_signal.c b/arch/ia64/ia32/ia32_signal.c
index 256a7faeda07..b763ca19ef17 100644
--- a/arch/ia64/ia32/ia32_signal.c
+++ b/arch/ia64/ia32/ia32_signal.c
@@ -463,7 +463,7 @@ sys32_sigsuspend (int history0, int history1, old_sigset_t mask)
463 463
464 current->state = TASK_INTERRUPTIBLE; 464 current->state = TASK_INTERRUPTIBLE;
465 schedule(); 465 schedule();
466 set_thread_flag(TIF_RESTORE_SIGMASK); 466 set_restore_sigmask();
467 return -ERESTARTNOHAND; 467 return -ERESTARTNOHAND;
468} 468}
469 469
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index c7467f863c7a..19709a079635 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -966,7 +966,7 @@ acpi_map_iosapics (void)
966fs_initcall(acpi_map_iosapics); 966fs_initcall(acpi_map_iosapics);
967#endif /* CONFIG_ACPI_NUMA */ 967#endif /* CONFIG_ACPI_NUMA */
968 968
969int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base) 969int __ref acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base)
970{ 970{
971 int err; 971 int err;
972 972
diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c
index 6dee579f205f..7fd18f54c056 100644
--- a/arch/ia64/kernel/irq.c
+++ b/arch/ia64/kernel/irq.c
@@ -183,10 +183,10 @@ void fixup_irqs(void)
183{ 183{
184 unsigned int irq; 184 unsigned int irq;
185 extern void ia64_process_pending_intr(void); 185 extern void ia64_process_pending_intr(void);
186 extern void ia64_disable_timer(void);
187 extern volatile int time_keeper_id; 186 extern volatile int time_keeper_id;
188 187
189 ia64_disable_timer(); 188 /* Mask ITV to disable timer */
189 ia64_set_itv(1 << 16);
190 190
191 /* 191 /*
192 * Find a new timesync master 192 * Find a new timesync master
diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
index 396004e8cd14..4547a2092af9 100644
--- a/arch/ia64/kernel/palinfo.c
+++ b/arch/ia64/kernel/palinfo.c
@@ -1053,7 +1053,7 @@ static int __cpuinit palinfo_cpu_callback(struct notifier_block *nfb,
1053 return NOTIFY_OK; 1053 return NOTIFY_OK;
1054} 1054}
1055 1055
1056static struct notifier_block palinfo_cpu_notifier __cpuinitdata = 1056static struct notifier_block __refdata palinfo_cpu_notifier =
1057{ 1057{
1058 .notifier_call = palinfo_cpu_callback, 1058 .notifier_call = palinfo_cpu_callback,
1059 .priority = 0, 1059 .priority = 0,
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index 7fbb51e10bbe..c1ad27de2dd2 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -867,7 +867,7 @@ pfm_rvfree(void *mem, unsigned long size)
867} 867}
868 868
869static pfm_context_t * 869static pfm_context_t *
870pfm_context_alloc(void) 870pfm_context_alloc(int ctx_flags)
871{ 871{
872 pfm_context_t *ctx; 872 pfm_context_t *ctx;
873 873
@@ -878,6 +878,46 @@ pfm_context_alloc(void)
878 ctx = kzalloc(sizeof(pfm_context_t), GFP_KERNEL); 878 ctx = kzalloc(sizeof(pfm_context_t), GFP_KERNEL);
879 if (ctx) { 879 if (ctx) {
880 DPRINT(("alloc ctx @%p\n", ctx)); 880 DPRINT(("alloc ctx @%p\n", ctx));
881
882 /*
883 * init context protection lock
884 */
885 spin_lock_init(&ctx->ctx_lock);
886
887 /*
888 * context is unloaded
889 */
890 ctx->ctx_state = PFM_CTX_UNLOADED;
891
892 /*
893 * initialization of context's flags
894 */
895 ctx->ctx_fl_block = (ctx_flags & PFM_FL_NOTIFY_BLOCK) ? 1 : 0;
896 ctx->ctx_fl_system = (ctx_flags & PFM_FL_SYSTEM_WIDE) ? 1: 0;
897 ctx->ctx_fl_no_msg = (ctx_flags & PFM_FL_OVFL_NO_MSG) ? 1: 0;
898 /*
899 * will move to set properties
900 * ctx->ctx_fl_excl_idle = (ctx_flags & PFM_FL_EXCL_IDLE) ? 1: 0;
901 */
902
903 /*
904 * init restart semaphore to locked
905 */
906 init_completion(&ctx->ctx_restart_done);
907
908 /*
909 * activation is used in SMP only
910 */
911 ctx->ctx_last_activation = PFM_INVALID_ACTIVATION;
912 SET_LAST_CPU(ctx, -1);
913
914 /*
915 * initialize notification message queue
916 */
917 ctx->ctx_msgq_head = ctx->ctx_msgq_tail = 0;
918 init_waitqueue_head(&ctx->ctx_msgq_wait);
919 init_waitqueue_head(&ctx->ctx_zombieq);
920
881 } 921 }
882 return ctx; 922 return ctx;
883} 923}
@@ -2165,28 +2205,21 @@ static struct dentry_operations pfmfs_dentry_operations = {
2165}; 2205};
2166 2206
2167 2207
2168static int 2208static struct file *
2169pfm_alloc_fd(struct file **cfile) 2209pfm_alloc_file(pfm_context_t *ctx)
2170{ 2210{
2171 int fd, ret = 0; 2211 struct file *file;
2172 struct file *file = NULL; 2212 struct inode *inode;
2173 struct inode * inode; 2213 struct dentry *dentry;
2174 char name[32]; 2214 char name[32];
2175 struct qstr this; 2215 struct qstr this;
2176 2216
2177 fd = get_unused_fd();
2178 if (fd < 0) return -ENFILE;
2179
2180 ret = -ENFILE;
2181
2182 file = get_empty_filp();
2183 if (!file) goto out;
2184
2185 /* 2217 /*
2186 * allocate a new inode 2218 * allocate a new inode
2187 */ 2219 */
2188 inode = new_inode(pfmfs_mnt->mnt_sb); 2220 inode = new_inode(pfmfs_mnt->mnt_sb);
2189 if (!inode) goto out; 2221 if (!inode)
2222 return ERR_PTR(-ENOMEM);
2190 2223
2191 DPRINT(("new inode ino=%ld @%p\n", inode->i_ino, inode)); 2224 DPRINT(("new inode ino=%ld @%p\n", inode->i_ino, inode));
2192 2225
@@ -2199,59 +2232,28 @@ pfm_alloc_fd(struct file **cfile)
2199 this.len = strlen(name); 2232 this.len = strlen(name);
2200 this.hash = inode->i_ino; 2233 this.hash = inode->i_ino;
2201 2234
2202 ret = -ENOMEM;
2203
2204 /* 2235 /*
2205 * allocate a new dcache entry 2236 * allocate a new dcache entry
2206 */ 2237 */
2207 file->f_path.dentry = d_alloc(pfmfs_mnt->mnt_sb->s_root, &this); 2238 dentry = d_alloc(pfmfs_mnt->mnt_sb->s_root, &this);
2208 if (!file->f_path.dentry) goto out; 2239 if (!dentry) {
2240 iput(inode);
2241 return ERR_PTR(-ENOMEM);
2242 }
2209 2243
2210 file->f_path.dentry->d_op = &pfmfs_dentry_operations; 2244 dentry->d_op = &pfmfs_dentry_operations;
2245 d_add(dentry, inode);
2211 2246
2212 d_add(file->f_path.dentry, inode); 2247 file = alloc_file(pfmfs_mnt, dentry, FMODE_READ, &pfm_file_ops);
2213 file->f_path.mnt = mntget(pfmfs_mnt); 2248 if (!file) {
2214 file->f_mapping = inode->i_mapping; 2249 dput(dentry);
2250 return ERR_PTR(-ENFILE);
2251 }
2215 2252
2216 file->f_op = &pfm_file_ops;
2217 file->f_mode = FMODE_READ;
2218 file->f_flags = O_RDONLY; 2253 file->f_flags = O_RDONLY;
2219 file->f_pos = 0; 2254 file->private_data = ctx;
2220
2221 /*
2222 * may have to delay until context is attached?
2223 */
2224 fd_install(fd, file);
2225
2226 /*
2227 * the file structure we will use
2228 */
2229 *cfile = file;
2230
2231 return fd;
2232out:
2233 if (file) put_filp(file);
2234 put_unused_fd(fd);
2235 return ret;
2236}
2237
2238static void
2239pfm_free_fd(int fd, struct file *file)
2240{
2241 struct files_struct *files = current->files;
2242 struct fdtable *fdt;
2243 2255
2244 /* 2256 return file;
2245 * there ie no fd_uninstall(), so we do it here
2246 */
2247 spin_lock(&files->file_lock);
2248 fdt = files_fdtable(files);
2249 rcu_assign_pointer(fdt->fd[fd], NULL);
2250 spin_unlock(&files->file_lock);
2251
2252 if (file)
2253 put_filp(file);
2254 put_unused_fd(fd);
2255} 2257}
2256 2258
2257static int 2259static int
@@ -2475,6 +2477,7 @@ pfm_setup_buffer_fmt(struct task_struct *task, struct file *filp, pfm_context_t
2475 2477
2476 /* link buffer format and context */ 2478 /* link buffer format and context */
2477 ctx->ctx_buf_fmt = fmt; 2479 ctx->ctx_buf_fmt = fmt;
2480 ctx->ctx_fl_is_sampling = 1; /* assume record() is defined */
2478 2481
2479 /* 2482 /*
2480 * check if buffer format wants to use perfmon buffer allocation/mapping service 2483 * check if buffer format wants to use perfmon buffer allocation/mapping service
@@ -2669,78 +2672,45 @@ pfm_context_create(pfm_context_t *ctx, void *arg, int count, struct pt_regs *reg
2669{ 2672{
2670 pfarg_context_t *req = (pfarg_context_t *)arg; 2673 pfarg_context_t *req = (pfarg_context_t *)arg;
2671 struct file *filp; 2674 struct file *filp;
2675 struct path path;
2672 int ctx_flags; 2676 int ctx_flags;
2677 int fd;
2673 int ret; 2678 int ret;
2674 2679
2675 /* let's check the arguments first */ 2680 /* let's check the arguments first */
2676 ret = pfarg_is_sane(current, req); 2681 ret = pfarg_is_sane(current, req);
2677 if (ret < 0) return ret; 2682 if (ret < 0)
2683 return ret;
2678 2684
2679 ctx_flags = req->ctx_flags; 2685 ctx_flags = req->ctx_flags;
2680 2686
2681 ret = -ENOMEM; 2687 ret = -ENOMEM;
2682 2688
2683 ctx = pfm_context_alloc(); 2689 fd = get_unused_fd();
2684 if (!ctx) goto error; 2690 if (fd < 0)
2691 return fd;
2685 2692
2686 ret = pfm_alloc_fd(&filp); 2693 ctx = pfm_context_alloc(ctx_flags);
2687 if (ret < 0) goto error_file; 2694 if (!ctx)
2695 goto error;
2688 2696
2689 req->ctx_fd = ctx->ctx_fd = ret; 2697 filp = pfm_alloc_file(ctx);
2698 if (IS_ERR(filp)) {
2699 ret = PTR_ERR(filp);
2700 goto error_file;
2701 }
2690 2702
2691 /* 2703 req->ctx_fd = ctx->ctx_fd = fd;
2692 * attach context to file
2693 */
2694 filp->private_data = ctx;
2695 2704
2696 /* 2705 /*
2697 * does the user want to sample? 2706 * does the user want to sample?
2698 */ 2707 */
2699 if (pfm_uuid_cmp(req->ctx_smpl_buf_id, pfm_null_uuid)) { 2708 if (pfm_uuid_cmp(req->ctx_smpl_buf_id, pfm_null_uuid)) {
2700 ret = pfm_setup_buffer_fmt(current, filp, ctx, ctx_flags, 0, req); 2709 ret = pfm_setup_buffer_fmt(current, filp, ctx, ctx_flags, 0, req);
2701 if (ret) goto buffer_error; 2710 if (ret)
2711 goto buffer_error;
2702 } 2712 }
2703 2713
2704 /*
2705 * init context protection lock
2706 */
2707 spin_lock_init(&ctx->ctx_lock);
2708
2709 /*
2710 * context is unloaded
2711 */
2712 ctx->ctx_state = PFM_CTX_UNLOADED;
2713
2714 /*
2715 * initialization of context's flags
2716 */
2717 ctx->ctx_fl_block = (ctx_flags & PFM_FL_NOTIFY_BLOCK) ? 1 : 0;
2718 ctx->ctx_fl_system = (ctx_flags & PFM_FL_SYSTEM_WIDE) ? 1: 0;
2719 ctx->ctx_fl_is_sampling = ctx->ctx_buf_fmt ? 1 : 0; /* assume record() is defined */
2720 ctx->ctx_fl_no_msg = (ctx_flags & PFM_FL_OVFL_NO_MSG) ? 1: 0;
2721 /*
2722 * will move to set properties
2723 * ctx->ctx_fl_excl_idle = (ctx_flags & PFM_FL_EXCL_IDLE) ? 1: 0;
2724 */
2725
2726 /*
2727 * init restart semaphore to locked
2728 */
2729 init_completion(&ctx->ctx_restart_done);
2730
2731 /*
2732 * activation is used in SMP only
2733 */
2734 ctx->ctx_last_activation = PFM_INVALID_ACTIVATION;
2735 SET_LAST_CPU(ctx, -1);
2736
2737 /*
2738 * initialize notification message queue
2739 */
2740 ctx->ctx_msgq_head = ctx->ctx_msgq_tail = 0;
2741 init_waitqueue_head(&ctx->ctx_msgq_wait);
2742 init_waitqueue_head(&ctx->ctx_zombieq);
2743
2744 DPRINT(("ctx=%p flags=0x%x system=%d notify_block=%d excl_idle=%d no_msg=%d ctx_fd=%d \n", 2714 DPRINT(("ctx=%p flags=0x%x system=%d notify_block=%d excl_idle=%d no_msg=%d ctx_fd=%d \n",
2745 ctx, 2715 ctx,
2746 ctx_flags, 2716 ctx_flags,
@@ -2755,10 +2725,14 @@ pfm_context_create(pfm_context_t *ctx, void *arg, int count, struct pt_regs *reg
2755 */ 2725 */
2756 pfm_reset_pmu_state(ctx); 2726 pfm_reset_pmu_state(ctx);
2757 2727
2728 fd_install(fd, filp);
2729
2758 return 0; 2730 return 0;
2759 2731
2760buffer_error: 2732buffer_error:
2761 pfm_free_fd(ctx->ctx_fd, filp); 2733 path = filp->f_path;
2734 put_filp(filp);
2735 path_put(&path);
2762 2736
2763 if (ctx->ctx_buf_fmt) { 2737 if (ctx->ctx_buf_fmt) {
2764 pfm_buf_fmt_exit(ctx->ctx_buf_fmt, current, NULL, regs); 2738 pfm_buf_fmt_exit(ctx->ctx_buf_fmt, current, NULL, regs);
@@ -2767,6 +2741,7 @@ error_file:
2767 pfm_context_free(ctx); 2741 pfm_context_free(ctx);
2768 2742
2769error: 2743error:
2744 put_unused_fd(fd);
2770 return ret; 2745 return ret;
2771} 2746}
2772 2747
diff --git a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c
index 5740296c35af..19c5a78636fc 100644
--- a/arch/ia64/kernel/signal.c
+++ b/arch/ia64/kernel/signal.c
@@ -464,7 +464,7 @@ ia64_do_signal (struct sigscratch *scr, long in_syscall)
464 if (!user_mode(&scr->pt)) 464 if (!user_mode(&scr->pt))
465 return; 465 return;
466 466
467 if (test_thread_flag(TIF_RESTORE_SIGMASK)) 467 if (current_thread_info()->status & TS_RESTORE_SIGMASK)
468 oldset = &current->saved_sigmask; 468 oldset = &current->saved_sigmask;
469 else 469 else
470 oldset = &current->blocked; 470 oldset = &current->blocked;
@@ -530,12 +530,13 @@ ia64_do_signal (struct sigscratch *scr, long in_syscall)
530 * continue to iterate in this loop so we can deliver the SIGSEGV... 530 * continue to iterate in this loop so we can deliver the SIGSEGV...
531 */ 531 */
532 if (handle_signal(signr, &ka, &info, oldset, scr)) { 532 if (handle_signal(signr, &ka, &info, oldset, scr)) {
533 /* a signal was successfully delivered; the saved 533 /*
534 * A signal was successfully delivered; the saved
534 * sigmask will have been stored in the signal frame, 535 * sigmask will have been stored in the signal frame,
535 * and will be restored by sigreturn, so we can simply 536 * and will be restored by sigreturn, so we can simply
536 * clear the TIF_RESTORE_SIGMASK flag */ 537 * clear the TS_RESTORE_SIGMASK flag.
537 if (test_thread_flag(TIF_RESTORE_SIGMASK)) 538 */
538 clear_thread_flag(TIF_RESTORE_SIGMASK); 539 current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
539 return; 540 return;
540 } 541 }
541 } 542 }
@@ -566,8 +567,8 @@ ia64_do_signal (struct sigscratch *scr, long in_syscall)
566 567
567 /* if there's no signal to deliver, we just put the saved sigmask 568 /* if there's no signal to deliver, we just put the saved sigmask
568 * back */ 569 * back */
569 if (test_thread_flag(TIF_RESTORE_SIGMASK)) { 570 if (current_thread_info()->status & TS_RESTORE_SIGMASK) {
570 clear_thread_flag(TIF_RESTORE_SIGMASK); 571 current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
571 sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL); 572 sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
572 } 573 }
573} 574}
diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c
index 9a9d4c489330..983296f1c813 100644
--- a/arch/ia64/kernel/smp.c
+++ b/arch/ia64/kernel/smp.c
@@ -98,8 +98,33 @@ unlock_ipi_calllock(void)
98 spin_unlock_irq(&call_lock); 98 spin_unlock_irq(&call_lock);
99} 99}
100 100
101static inline void
102handle_call_data(void)
103{
104 struct call_data_struct *data;
105 void (*func)(void *info);
106 void *info;
107 int wait;
108
109 /* release the 'pointer lock' */
110 data = (struct call_data_struct *)call_data;
111 func = data->func;
112 info = data->info;
113 wait = data->wait;
114
115 mb();
116 atomic_inc(&data->started);
117 /* At this point the structure may be gone unless wait is true. */
118 (*func)(info);
119
120 /* Notify the sending CPU that the task is done. */
121 mb();
122 if (wait)
123 atomic_inc(&data->finished);
124}
125
101static void 126static void
102stop_this_cpu (void) 127stop_this_cpu(void)
103{ 128{
104 /* 129 /*
105 * Remove this CPU: 130 * Remove this CPU:
@@ -138,44 +163,21 @@ handle_IPI (int irq, void *dev_id)
138 ops &= ~(1 << which); 163 ops &= ~(1 << which);
139 164
140 switch (which) { 165 switch (which) {
141 case IPI_CALL_FUNC: 166 case IPI_CALL_FUNC:
142 { 167 handle_call_data();
143 struct call_data_struct *data; 168 break;
144 void (*func)(void *info); 169
145 void *info; 170 case IPI_CPU_STOP:
146 int wait;
147
148 /* release the 'pointer lock' */
149 data = (struct call_data_struct *) call_data;
150 func = data->func;
151 info = data->info;
152 wait = data->wait;
153
154 mb();
155 atomic_inc(&data->started);
156 /*
157 * At this point the structure may be gone unless
158 * wait is true.
159 */
160 (*func)(info);
161
162 /* Notify the sending CPU that the task is done. */
163 mb();
164 if (wait)
165 atomic_inc(&data->finished);
166 }
167 break;
168
169 case IPI_CPU_STOP:
170 stop_this_cpu(); 171 stop_this_cpu();
171 break; 172 break;
172#ifdef CONFIG_KEXEC 173#ifdef CONFIG_KEXEC
173 case IPI_KDUMP_CPU_STOP: 174 case IPI_KDUMP_CPU_STOP:
174 unw_init_running(kdump_cpu_freeze, NULL); 175 unw_init_running(kdump_cpu_freeze, NULL);
175 break; 176 break;
176#endif 177#endif
177 default: 178 default:
178 printk(KERN_CRIT "Unknown IPI on CPU %d: %lu\n", this_cpu, which); 179 printk(KERN_CRIT "Unknown IPI on CPU %d: %lu\n",
180 this_cpu, which);
179 break; 181 break;
180 } 182 }
181 } while (ops); 183 } while (ops);
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c
index 48e15a51782f..8c73643f2d66 100644
--- a/arch/ia64/kernel/time.c
+++ b/arch/ia64/kernel/time.c
@@ -379,11 +379,6 @@ static struct irqaction timer_irqaction = {
379 .name = "timer" 379 .name = "timer"
380}; 380};
381 381
382void __devinit ia64_disable_timer(void)
383{
384 ia64_set_itv(1 << 16);
385}
386
387void __init 382void __init
388time_init (void) 383time_init (void)
389{ 384{
diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c
index abb17a613b17..26228e2d01ae 100644
--- a/arch/ia64/kernel/topology.c
+++ b/arch/ia64/kernel/topology.c
@@ -36,9 +36,11 @@ void arch_fix_phys_package_id(int num, u32 slot)
36} 36}
37EXPORT_SYMBOL_GPL(arch_fix_phys_package_id); 37EXPORT_SYMBOL_GPL(arch_fix_phys_package_id);
38 38
39int arch_register_cpu(int num) 39
40#ifdef CONFIG_HOTPLUG_CPU
41int __ref arch_register_cpu(int num)
40{ 42{
41#if defined (CONFIG_ACPI) && defined (CONFIG_HOTPLUG_CPU) 43#ifdef CONFIG_ACPI
42 /* 44 /*
43 * If CPEI can be re-targetted or if this is not 45 * If CPEI can be re-targetted or if this is not
44 * CPEI target, then it is hotpluggable 46 * CPEI target, then it is hotpluggable
@@ -47,19 +49,21 @@ int arch_register_cpu(int num)
47 sysfs_cpus[num].cpu.hotpluggable = 1; 49 sysfs_cpus[num].cpu.hotpluggable = 1;
48 map_cpu_to_node(num, node_cpuid[num].nid); 50 map_cpu_to_node(num, node_cpuid[num].nid);
49#endif 51#endif
50
51 return register_cpu(&sysfs_cpus[num].cpu, num); 52 return register_cpu(&sysfs_cpus[num].cpu, num);
52} 53}
53 54EXPORT_SYMBOL(arch_register_cpu);
54#ifdef CONFIG_HOTPLUG_CPU
55 55
56void arch_unregister_cpu(int num) 56void arch_unregister_cpu(int num)
57{ 57{
58 unregister_cpu(&sysfs_cpus[num].cpu); 58 unregister_cpu(&sysfs_cpus[num].cpu);
59 unmap_cpu_from_node(num, cpu_to_node(num)); 59 unmap_cpu_from_node(num, cpu_to_node(num));
60} 60}
61EXPORT_SYMBOL(arch_register_cpu);
62EXPORT_SYMBOL(arch_unregister_cpu); 61EXPORT_SYMBOL(arch_unregister_cpu);
62#else
63static int __init arch_register_cpu(int num)
64{
65 return register_cpu(&sysfs_cpus[num].cpu, num);
66}
63#endif /*CONFIG_HOTPLUG_CPU*/ 67#endif /*CONFIG_HOTPLUG_CPU*/
64 68
65 69