diff options
author | Dean Nelson <dcn@sgi.com> | 2008-07-30 01:34:09 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-07-30 12:41:49 -0400 |
commit | 6e41017aad9ed175ca51e4828eabc8c5cf5910be (patch) | |
tree | 388f0bd12f15d8c3d4a45d53ce99c24c33454e3a /drivers/misc/sgi-xp/xpc_main.c | |
parent | 97bf1aa1e1bb18de9bb1987c6eb9ad751bf08aab (diff) |
sgi-xp: isolate activate IRQ's hardware specific components
Isolate architecture specific code related to XPC's activate IRQ.
Signed-off-by: Dean Nelson <dcn@sgi.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers/misc/sgi-xp/xpc_main.c')
-rw-r--r-- | drivers/misc/sgi-xp/xpc_main.c | 96 |
1 files changed, 31 insertions, 65 deletions
diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c index aae90f5933b5..8780d5d00f62 100644 --- a/drivers/misc/sgi-xp/xpc_main.c +++ b/drivers/misc/sgi-xp/xpc_main.c | |||
@@ -147,11 +147,11 @@ static struct ctl_table_header *xpc_sysctl; | |||
147 | /* non-zero if any remote partition disengage request was timed out */ | 147 | /* non-zero if any remote partition disengage request was timed out */ |
148 | int xpc_disengage_request_timedout; | 148 | int xpc_disengage_request_timedout; |
149 | 149 | ||
150 | /* #of IRQs received */ | 150 | /* #of activate IRQs received */ |
151 | atomic_t xpc_act_IRQ_rcvd; | 151 | atomic_t xpc_activate_IRQ_rcvd = ATOMIC_INIT(0); |
152 | 152 | ||
153 | /* IRQ handler notifies this wait queue on receipt of an IRQ */ | 153 | /* IRQ handler notifies this wait queue on receipt of an IRQ */ |
154 | DECLARE_WAIT_QUEUE_HEAD(xpc_act_IRQ_wq); | 154 | DECLARE_WAIT_QUEUE_HEAD(xpc_activate_IRQ_wq); |
155 | 155 | ||
156 | static unsigned long xpc_hb_check_timeout; | 156 | static unsigned long xpc_hb_check_timeout; |
157 | static struct timer_list xpc_hb_timer; | 157 | static struct timer_list xpc_hb_timer; |
@@ -190,7 +190,7 @@ struct xpc_msg *(*xpc_get_deliverable_msg) (struct xpc_channel *ch); | |||
190 | void (*xpc_initiate_partition_activation) (struct xpc_rsvd_page *remote_rp, | 190 | void (*xpc_initiate_partition_activation) (struct xpc_rsvd_page *remote_rp, |
191 | u64 remote_rp_pa, int nasid); | 191 | u64 remote_rp_pa, int nasid); |
192 | 192 | ||
193 | void (*xpc_process_act_IRQ_rcvd) (int n_IRQs_expected); | 193 | void (*xpc_process_activate_IRQ_rcvd) (int n_IRQs_expected); |
194 | enum xp_retval (*xpc_setup_infrastructure) (struct xpc_partition *part); | 194 | enum xp_retval (*xpc_setup_infrastructure) (struct xpc_partition *part); |
195 | void (*xpc_teardown_infrastructure) (struct xpc_partition *part); | 195 | void (*xpc_teardown_infrastructure) (struct xpc_partition *part); |
196 | 196 | ||
@@ -239,17 +239,6 @@ xpc_timeout_partition_disengage_request(unsigned long data) | |||
239 | } | 239 | } |
240 | 240 | ||
241 | /* | 241 | /* |
242 | * Notify the heartbeat check thread that an IRQ has been received. | ||
243 | */ | ||
244 | static irqreturn_t | ||
245 | xpc_act_IRQ_handler(int irq, void *dev_id) | ||
246 | { | ||
247 | atomic_inc(&xpc_act_IRQ_rcvd); | ||
248 | wake_up_interruptible(&xpc_act_IRQ_wq); | ||
249 | return IRQ_HANDLED; | ||
250 | } | ||
251 | |||
252 | /* | ||
253 | * Timer to produce the heartbeat. The timer structures function is | 242 | * Timer to produce the heartbeat. The timer structures function is |
254 | * already set when this is initially called. A tunable is used to | 243 | * already set when this is initially called. A tunable is used to |
255 | * specify when the next timeout should occur. | 244 | * specify when the next timeout should occur. |
@@ -260,7 +249,7 @@ xpc_hb_beater(unsigned long dummy) | |||
260 | xpc_increment_heartbeat(); | 249 | xpc_increment_heartbeat(); |
261 | 250 | ||
262 | if (time_is_before_eq_jiffies(xpc_hb_check_timeout)) | 251 | if (time_is_before_eq_jiffies(xpc_hb_check_timeout)) |
263 | wake_up_interruptible(&xpc_act_IRQ_wq); | 252 | wake_up_interruptible(&xpc_activate_IRQ_wq); |
264 | 253 | ||
265 | xpc_hb_timer.expires = jiffies + (xpc_hb_interval * HZ); | 254 | xpc_hb_timer.expires = jiffies + (xpc_hb_interval * HZ); |
266 | add_timer(&xpc_hb_timer); | 255 | add_timer(&xpc_hb_timer); |
@@ -306,7 +295,7 @@ xpc_hb_checker(void *ignore) | |||
306 | dev_dbg(xpc_part, "woke up with %d ticks rem; %d IRQs have " | 295 | dev_dbg(xpc_part, "woke up with %d ticks rem; %d IRQs have " |
307 | "been received\n", | 296 | "been received\n", |
308 | (int)(xpc_hb_check_timeout - jiffies), | 297 | (int)(xpc_hb_check_timeout - jiffies), |
309 | atomic_read(&xpc_act_IRQ_rcvd) - last_IRQ_count); | 298 | atomic_read(&xpc_activate_IRQ_rcvd) - last_IRQ_count); |
310 | 299 | ||
311 | /* checking of remote heartbeats is skewed by IRQ handling */ | 300 | /* checking of remote heartbeats is skewed by IRQ handling */ |
312 | if (time_is_before_eq_jiffies(xpc_hb_check_timeout)) { | 301 | if (time_is_before_eq_jiffies(xpc_hb_check_timeout)) { |
@@ -322,15 +311,15 @@ xpc_hb_checker(void *ignore) | |||
322 | } | 311 | } |
323 | 312 | ||
324 | /* check for outstanding IRQs */ | 313 | /* check for outstanding IRQs */ |
325 | new_IRQ_count = atomic_read(&xpc_act_IRQ_rcvd); | 314 | new_IRQ_count = atomic_read(&xpc_activate_IRQ_rcvd); |
326 | if (last_IRQ_count < new_IRQ_count || force_IRQ != 0) { | 315 | if (last_IRQ_count < new_IRQ_count || force_IRQ != 0) { |
327 | force_IRQ = 0; | 316 | force_IRQ = 0; |
328 | 317 | ||
329 | dev_dbg(xpc_part, "found an IRQ to process; will be " | 318 | dev_dbg(xpc_part, "found an IRQ to process; will be " |
330 | "resetting xpc_hb_check_timeout\n"); | 319 | "resetting xpc_hb_check_timeout\n"); |
331 | 320 | ||
332 | xpc_process_act_IRQ_rcvd(new_IRQ_count - | 321 | xpc_process_activate_IRQ_rcvd(new_IRQ_count - |
333 | last_IRQ_count); | 322 | last_IRQ_count); |
334 | last_IRQ_count = new_IRQ_count; | 323 | last_IRQ_count = new_IRQ_count; |
335 | 324 | ||
336 | xpc_hb_check_timeout = jiffies + | 325 | xpc_hb_check_timeout = jiffies + |
@@ -338,9 +327,9 @@ xpc_hb_checker(void *ignore) | |||
338 | } | 327 | } |
339 | 328 | ||
340 | /* wait for IRQ or timeout */ | 329 | /* wait for IRQ or timeout */ |
341 | (void)wait_event_interruptible(xpc_act_IRQ_wq, | 330 | (void)wait_event_interruptible(xpc_activate_IRQ_wq, |
342 | (last_IRQ_count < | 331 | (last_IRQ_count < atomic_read( |
343 | atomic_read(&xpc_act_IRQ_rcvd) | 332 | &xpc_activate_IRQ_rcvd) |
344 | || time_is_before_eq_jiffies( | 333 | || time_is_before_eq_jiffies( |
345 | xpc_hb_check_timeout) || | 334 | xpc_hb_check_timeout) || |
346 | xpc_exiting)); | 335 | xpc_exiting)); |
@@ -884,10 +873,7 @@ xpc_do_exit(enum xp_retval reason) | |||
884 | * the heartbeat checker thread in case it's sleeping. | 873 | * the heartbeat checker thread in case it's sleeping. |
885 | */ | 874 | */ |
886 | xpc_exiting = 1; | 875 | xpc_exiting = 1; |
887 | wake_up_interruptible(&xpc_act_IRQ_wq); | 876 | wake_up_interruptible(&xpc_activate_IRQ_wq); |
888 | |||
889 | /* ignore all incoming interrupts */ | ||
890 | free_irq(SGI_XPC_ACTIVATE, NULL); | ||
891 | 877 | ||
892 | /* wait for the discovery thread to exit */ | 878 | /* wait for the discovery thread to exit */ |
893 | wait_for_completion(&xpc_discovery_exited); | 879 | wait_for_completion(&xpc_discovery_exited); |
@@ -968,9 +954,6 @@ xpc_do_exit(enum xp_retval reason) | |||
968 | (void)unregister_reboot_notifier(&xpc_reboot_notifier); | 954 | (void)unregister_reboot_notifier(&xpc_reboot_notifier); |
969 | } | 955 | } |
970 | 956 | ||
971 | /* close down protections for IPI operations */ | ||
972 | xpc_restrict_IPI_ops(); | ||
973 | |||
974 | /* clear the interface to XPC's functions */ | 957 | /* clear the interface to XPC's functions */ |
975 | xpc_clear_interface(); | 958 | xpc_clear_interface(); |
976 | 959 | ||
@@ -979,6 +962,11 @@ xpc_do_exit(enum xp_retval reason) | |||
979 | 962 | ||
980 | kfree(xpc_partitions); | 963 | kfree(xpc_partitions); |
981 | kfree(xpc_remote_copy_buffer_base); | 964 | kfree(xpc_remote_copy_buffer_base); |
965 | |||
966 | if (is_shub()) | ||
967 | xpc_exit_sn2(); | ||
968 | else | ||
969 | xpc_exit_uv(); | ||
982 | } | 970 | } |
983 | 971 | ||
984 | /* | 972 | /* |
@@ -1144,7 +1132,9 @@ xpc_init(void) | |||
1144 | if (xp_max_npartitions != 64) | 1132 | if (xp_max_npartitions != 64) |
1145 | return -EINVAL; | 1133 | return -EINVAL; |
1146 | 1134 | ||
1147 | xpc_init_sn2(); | 1135 | ret = xpc_init_sn2(); |
1136 | if (ret != 0) | ||
1137 | return ret; | ||
1148 | 1138 | ||
1149 | } else if (is_uv()) { | 1139 | } else if (is_uv()) { |
1150 | xpc_init_uv(); | 1140 | xpc_init_uv(); |
@@ -1163,7 +1153,8 @@ xpc_init(void) | |||
1163 | &xpc_remote_copy_buffer_base); | 1153 | &xpc_remote_copy_buffer_base); |
1164 | if (xpc_remote_copy_buffer == NULL) { | 1154 | if (xpc_remote_copy_buffer == NULL) { |
1165 | dev_err(xpc_part, "can't get memory for remote copy buffer\n"); | 1155 | dev_err(xpc_part, "can't get memory for remote copy buffer\n"); |
1166 | return -ENOMEM; | 1156 | ret = -ENOMEM; |
1157 | goto out_1; | ||
1167 | } | 1158 | } |
1168 | 1159 | ||
1169 | xpc_partitions = kzalloc(sizeof(struct xpc_partition) * | 1160 | xpc_partitions = kzalloc(sizeof(struct xpc_partition) * |
@@ -1171,7 +1162,7 @@ xpc_init(void) | |||
1171 | if (xpc_partitions == NULL) { | 1162 | if (xpc_partitions == NULL) { |
1172 | dev_err(xpc_part, "can't get memory for partition structure\n"); | 1163 | dev_err(xpc_part, "can't get memory for partition structure\n"); |
1173 | ret = -ENOMEM; | 1164 | ret = -ENOMEM; |
1174 | goto out_1; | 1165 | goto out_2; |
1175 | } | 1166 | } |
1176 | 1167 | ||
1177 | /* | 1168 | /* |
@@ -1187,7 +1178,7 @@ xpc_init(void) | |||
1187 | 1178 | ||
1188 | DBUG_ON((u64)part != L1_CACHE_ALIGN((u64)part)); | 1179 | DBUG_ON((u64)part != L1_CACHE_ALIGN((u64)part)); |
1189 | 1180 | ||
1190 | part->act_IRQ_rcvd = 0; | 1181 | part->activate_IRQ_rcvd = 0; |
1191 | spin_lock_init(&part->act_lock); | 1182 | spin_lock_init(&part->act_lock); |
1192 | part->act_state = XPC_P_INACTIVE; | 1183 | part->act_state = XPC_P_INACTIVE; |
1193 | XPC_SET_REASON(part, 0, 0); | 1184 | XPC_SET_REASON(part, 0, 0); |
@@ -1205,33 +1196,6 @@ xpc_init(void) | |||
1205 | xpc_sysctl = register_sysctl_table(xpc_sys_dir); | 1196 | xpc_sysctl = register_sysctl_table(xpc_sys_dir); |
1206 | 1197 | ||
1207 | /* | 1198 | /* |
1208 | * Open up protections for IPI operations (and AMO operations on | ||
1209 | * Shub 1.1 systems). | ||
1210 | */ | ||
1211 | xpc_allow_IPI_ops(); | ||
1212 | |||
1213 | /* | ||
1214 | * Interrupts being processed will increment this atomic variable and | ||
1215 | * awaken the heartbeat thread which will process the interrupts. | ||
1216 | */ | ||
1217 | atomic_set(&xpc_act_IRQ_rcvd, 0); | ||
1218 | |||
1219 | /* | ||
1220 | * This is safe to do before the xpc_hb_checker thread has started | ||
1221 | * because the handler releases a wait queue. If an interrupt is | ||
1222 | * received before the thread is waiting, it will not go to sleep, | ||
1223 | * but rather immediately process the interrupt. | ||
1224 | */ | ||
1225 | ret = request_irq(SGI_XPC_ACTIVATE, xpc_act_IRQ_handler, 0, | ||
1226 | "xpc hb", NULL); | ||
1227 | if (ret != 0) { | ||
1228 | dev_err(xpc_part, "can't register ACTIVATE IRQ handler, " | ||
1229 | "errno=%d\n", -ret); | ||
1230 | ret = -EBUSY; | ||
1231 | goto out_2; | ||
1232 | } | ||
1233 | |||
1234 | /* | ||
1235 | * Fill the partition reserved page with the information needed by | 1199 | * Fill the partition reserved page with the information needed by |
1236 | * other partitions to discover we are alive and establish initial | 1200 | * other partitions to discover we are alive and establish initial |
1237 | * communications. | 1201 | * communications. |
@@ -1296,14 +1260,16 @@ out_4: | |||
1296 | (void)unregister_die_notifier(&xpc_die_notifier); | 1260 | (void)unregister_die_notifier(&xpc_die_notifier); |
1297 | (void)unregister_reboot_notifier(&xpc_reboot_notifier); | 1261 | (void)unregister_reboot_notifier(&xpc_reboot_notifier); |
1298 | out_3: | 1262 | out_3: |
1299 | free_irq(SGI_XPC_ACTIVATE, NULL); | ||
1300 | out_2: | ||
1301 | xpc_restrict_IPI_ops(); | ||
1302 | if (xpc_sysctl) | 1263 | if (xpc_sysctl) |
1303 | unregister_sysctl_table(xpc_sysctl); | 1264 | unregister_sysctl_table(xpc_sysctl); |
1304 | kfree(xpc_partitions); | 1265 | kfree(xpc_partitions); |
1305 | out_1: | 1266 | out_2: |
1306 | kfree(xpc_remote_copy_buffer_base); | 1267 | kfree(xpc_remote_copy_buffer_base); |
1268 | out_1: | ||
1269 | if (is_shub()) | ||
1270 | xpc_exit_sn2(); | ||
1271 | else | ||
1272 | xpc_exit_uv(); | ||
1307 | return ret; | 1273 | return ret; |
1308 | } | 1274 | } |
1309 | 1275 | ||