diff options
Diffstat (limited to 'drivers/misc/sgi-xp/xpc_uv.c')
-rw-r--r-- | drivers/misc/sgi-xp/xpc_uv.c | 52 |
1 files changed, 30 insertions, 22 deletions
diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c index c76677afda1b..1f59ee2226ca 100644 --- a/drivers/misc/sgi-xp/xpc_uv.c +++ b/drivers/misc/sgi-xp/xpc_uv.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/delay.h> | 19 | #include <linux/delay.h> |
20 | #include <linux/device.h> | 20 | #include <linux/device.h> |
21 | #include <linux/err.h> | 21 | #include <linux/err.h> |
22 | #include <linux/slab.h> | ||
22 | #include <asm/uv/uv_hub.h> | 23 | #include <asm/uv/uv_hub.h> |
23 | #if defined CONFIG_X86_64 | 24 | #if defined CONFIG_X86_64 |
24 | #include <asm/uv/bios.h> | 25 | #include <asm/uv/bios.h> |
@@ -106,7 +107,8 @@ xpc_get_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq, int cpu, char *irq_name) | |||
106 | int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade); | 107 | int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade); |
107 | 108 | ||
108 | #if defined CONFIG_X86_64 | 109 | #if defined CONFIG_X86_64 |
109 | mq->irq = uv_setup_irq(irq_name, cpu, mq->mmr_blade, mq->mmr_offset); | 110 | mq->irq = uv_setup_irq(irq_name, cpu, mq->mmr_blade, mq->mmr_offset, |
111 | UV_AFFINITY_CPU); | ||
110 | if (mq->irq < 0) { | 112 | if (mq->irq < 0) { |
111 | dev_err(xpc_part, "uv_setup_irq() returned error=%d\n", | 113 | dev_err(xpc_part, "uv_setup_irq() returned error=%d\n", |
112 | -mq->irq); | 114 | -mq->irq); |
@@ -136,7 +138,7 @@ static void | |||
136 | xpc_release_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq) | 138 | xpc_release_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq) |
137 | { | 139 | { |
138 | #if defined CONFIG_X86_64 | 140 | #if defined CONFIG_X86_64 |
139 | uv_teardown_irq(mq->irq, mq->mmr_blade, mq->mmr_offset); | 141 | uv_teardown_irq(mq->irq); |
140 | 142 | ||
141 | #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV | 143 | #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV |
142 | int mmr_pnode; | 144 | int mmr_pnode; |
@@ -156,22 +158,24 @@ xpc_gru_mq_watchlist_alloc_uv(struct xpc_gru_mq_uv *mq) | |||
156 | { | 158 | { |
157 | int ret; | 159 | int ret; |
158 | 160 | ||
159 | #if defined CONFIG_X86_64 | 161 | #if defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV |
160 | ret = uv_bios_mq_watchlist_alloc(mq->mmr_blade, uv_gpa(mq->address), | 162 | int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade); |
161 | mq->order, &mq->mmr_offset); | 163 | |
162 | if (ret < 0) { | 164 | ret = sn_mq_watchlist_alloc(mmr_pnode, (void *)uv_gpa(mq->address), |
163 | dev_err(xpc_part, "uv_bios_mq_watchlist_alloc() failed, " | ||
164 | "ret=%d\n", ret); | ||
165 | return ret; | ||
166 | } | ||
167 | #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV | ||
168 | ret = sn_mq_watchlist_alloc(mq->mmr_blade, (void *)uv_gpa(mq->address), | ||
169 | mq->order, &mq->mmr_offset); | 165 | mq->order, &mq->mmr_offset); |
170 | if (ret < 0) { | 166 | if (ret < 0) { |
171 | dev_err(xpc_part, "sn_mq_watchlist_alloc() failed, ret=%d\n", | 167 | dev_err(xpc_part, "sn_mq_watchlist_alloc() failed, ret=%d\n", |
172 | ret); | 168 | ret); |
173 | return -EBUSY; | 169 | return -EBUSY; |
174 | } | 170 | } |
171 | #elif defined CONFIG_X86_64 | ||
172 | ret = uv_bios_mq_watchlist_alloc(uv_gpa(mq->address), | ||
173 | mq->order, &mq->mmr_offset); | ||
174 | if (ret < 0) { | ||
175 | dev_err(xpc_part, "uv_bios_mq_watchlist_alloc() failed, " | ||
176 | "ret=%d\n", ret); | ||
177 | return ret; | ||
178 | } | ||
175 | #else | 179 | #else |
176 | #error not a supported configuration | 180 | #error not a supported configuration |
177 | #endif | 181 | #endif |
@@ -184,12 +188,13 @@ static void | |||
184 | xpc_gru_mq_watchlist_free_uv(struct xpc_gru_mq_uv *mq) | 188 | xpc_gru_mq_watchlist_free_uv(struct xpc_gru_mq_uv *mq) |
185 | { | 189 | { |
186 | int ret; | 190 | int ret; |
191 | int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade); | ||
187 | 192 | ||
188 | #if defined CONFIG_X86_64 | 193 | #if defined CONFIG_X86_64 |
189 | ret = uv_bios_mq_watchlist_free(mq->mmr_blade, mq->watchlist_num); | 194 | ret = uv_bios_mq_watchlist_free(mmr_pnode, mq->watchlist_num); |
190 | BUG_ON(ret != BIOS_STATUS_SUCCESS); | 195 | BUG_ON(ret != BIOS_STATUS_SUCCESS); |
191 | #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV | 196 | #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV |
192 | ret = sn_mq_watchlist_free(mq->mmr_blade, mq->watchlist_num); | 197 | ret = sn_mq_watchlist_free(mmr_pnode, mq->watchlist_num); |
193 | BUG_ON(ret != SALRET_OK); | 198 | BUG_ON(ret != SALRET_OK); |
194 | #else | 199 | #else |
195 | #error not a supported configuration | 200 | #error not a supported configuration |
@@ -203,6 +208,7 @@ xpc_create_gru_mq_uv(unsigned int mq_size, int cpu, char *irq_name, | |||
203 | enum xp_retval xp_ret; | 208 | enum xp_retval xp_ret; |
204 | int ret; | 209 | int ret; |
205 | int nid; | 210 | int nid; |
211 | int nasid; | ||
206 | int pg_order; | 212 | int pg_order; |
207 | struct page *page; | 213 | struct page *page; |
208 | struct xpc_gru_mq_uv *mq; | 214 | struct xpc_gru_mq_uv *mq; |
@@ -258,9 +264,11 @@ xpc_create_gru_mq_uv(unsigned int mq_size, int cpu, char *irq_name, | |||
258 | goto out_5; | 264 | goto out_5; |
259 | } | 265 | } |
260 | 266 | ||
267 | nasid = UV_PNODE_TO_NASID(uv_cpu_to_pnode(cpu)); | ||
268 | |||
261 | mmr_value = (struct uv_IO_APIC_route_entry *)&mq->mmr_value; | 269 | mmr_value = (struct uv_IO_APIC_route_entry *)&mq->mmr_value; |
262 | ret = gru_create_message_queue(mq->gru_mq_desc, mq->address, mq_size, | 270 | ret = gru_create_message_queue(mq->gru_mq_desc, mq->address, mq_size, |
263 | nid, mmr_value->vector, mmr_value->dest); | 271 | nasid, mmr_value->vector, mmr_value->dest); |
264 | if (ret != 0) { | 272 | if (ret != 0) { |
265 | dev_err(xpc_part, "gru_create_message_queue() returned " | 273 | dev_err(xpc_part, "gru_create_message_queue() returned " |
266 | "error=%d\n", ret); | 274 | "error=%d\n", ret); |
@@ -945,11 +953,13 @@ xpc_get_fifo_entry_uv(struct xpc_fifo_head_uv *head) | |||
945 | head->first = first->next; | 953 | head->first = first->next; |
946 | if (head->first == NULL) | 954 | if (head->first == NULL) |
947 | head->last = NULL; | 955 | head->last = NULL; |
956 | |||
957 | head->n_entries--; | ||
958 | BUG_ON(head->n_entries < 0); | ||
959 | |||
960 | first->next = NULL; | ||
948 | } | 961 | } |
949 | head->n_entries--; | ||
950 | BUG_ON(head->n_entries < 0); | ||
951 | spin_unlock_irqrestore(&head->lock, irq_flags); | 962 | spin_unlock_irqrestore(&head->lock, irq_flags); |
952 | first->next = NULL; | ||
953 | return first; | 963 | return first; |
954 | } | 964 | } |
955 | 965 | ||
@@ -1018,7 +1028,8 @@ xpc_make_first_contact_uv(struct xpc_partition *part) | |||
1018 | xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg), | 1028 | xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg), |
1019 | XPC_ACTIVATE_MQ_MSG_SYNC_ACT_STATE_UV); | 1029 | XPC_ACTIVATE_MQ_MSG_SYNC_ACT_STATE_UV); |
1020 | 1030 | ||
1021 | while (part->sn.uv.remote_act_state != XPC_P_AS_ACTIVATING) { | 1031 | while (!((part->sn.uv.remote_act_state == XPC_P_AS_ACTIVATING) || |
1032 | (part->sn.uv.remote_act_state == XPC_P_AS_ACTIVE))) { | ||
1022 | 1033 | ||
1023 | dev_dbg(xpc_part, "waiting to make first contact with " | 1034 | dev_dbg(xpc_part, "waiting to make first contact with " |
1024 | "partition %d\n", XPC_PARTID(part)); | 1035 | "partition %d\n", XPC_PARTID(part)); |
@@ -1421,7 +1432,6 @@ xpc_handle_notify_mq_msg_uv(struct xpc_partition *part, | |||
1421 | msg_slot = ch_uv->recv_msg_slots + | 1432 | msg_slot = ch_uv->recv_msg_slots + |
1422 | (msg->hdr.msg_slot_number % ch->remote_nentries) * ch->entry_size; | 1433 | (msg->hdr.msg_slot_number % ch->remote_nentries) * ch->entry_size; |
1423 | 1434 | ||
1424 | BUG_ON(msg->hdr.msg_slot_number != msg_slot->hdr.msg_slot_number); | ||
1425 | BUG_ON(msg_slot->hdr.size != 0); | 1435 | BUG_ON(msg_slot->hdr.size != 0); |
1426 | 1436 | ||
1427 | memcpy(msg_slot, msg, msg->hdr.size); | 1437 | memcpy(msg_slot, msg, msg->hdr.size); |
@@ -1645,8 +1655,6 @@ xpc_received_payload_uv(struct xpc_channel *ch, void *payload) | |||
1645 | sizeof(struct xpc_notify_mq_msghdr_uv)); | 1655 | sizeof(struct xpc_notify_mq_msghdr_uv)); |
1646 | if (ret != xpSuccess) | 1656 | if (ret != xpSuccess) |
1647 | XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret); | 1657 | XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret); |
1648 | |||
1649 | msg->hdr.msg_slot_number += ch->remote_nentries; | ||
1650 | } | 1658 | } |
1651 | 1659 | ||
1652 | static struct xpc_arch_operations xpc_arch_ops_uv = { | 1660 | static struct xpc_arch_operations xpc_arch_ops_uv = { |