aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc64
diff options
context:
space:
mode:
authorDavid S. Miller <davem@sunset.davemloft.net>2007-07-16 19:50:36 -0400
committerDavid S. Miller <davem@sunset.davemloft.net>2007-07-16 20:11:59 -0400
commit778feeb4757daef5d7118feab2319142367910dd (patch)
treee09a08c13bbd5a2de881dfcb04d987034564f1b3 /arch/sparc64
parent3ac66e33eaf0b2313f7e3c8c023aa0420577a0c2 (diff)
[SPARC64]: Fix race between MD update and dr-cpu add.
We need to make sure the MD update occurs before we try to process dr-cpu configure requests. MD update and dr-cpu were being processed by seperate threads so that did not happen occaisionally. Fix this by executing all domain services data packets from a single thread, in order. This will help simplify some other things as well. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc64')
-rw-r--r--arch/sparc64/kernel/ds.c270
-rw-r--r--arch/sparc64/kernel/mdesc.c20
2 files changed, 147 insertions, 143 deletions
diff --git a/arch/sparc64/kernel/ds.c b/arch/sparc64/kernel/ds.c
index 1c587107cef0..ba01533f4e03 100644
--- a/arch/sparc64/kernel/ds.c
+++ b/arch/sparc64/kernel/ds.c
@@ -228,7 +228,7 @@ static struct ds_cap_state *find_cap_by_string(const char *name)
228 return NULL; 228 return NULL;
229} 229}
230 230
231static int ds_send(struct ldc_channel *lp, void *data, int len) 231static int __ds_send(struct ldc_channel *lp, void *data, int len)
232{ 232{
233 int err, limit = 1000; 233 int err, limit = 1000;
234 234
@@ -243,6 +243,18 @@ static int ds_send(struct ldc_channel *lp, void *data, int len)
243 return err; 243 return err;
244} 244}
245 245
246static int ds_send(struct ldc_channel *lp, void *data, int len)
247{
248 unsigned long flags;
249 int err;
250
251 spin_lock_irqsave(&ds_lock, flags);
252 err = __ds_send(lp, data, len);
253 spin_unlock_irqrestore(&ds_lock, flags);
254
255 return err;
256}
257
246struct ds_md_update_req { 258struct ds_md_update_req {
247 __u64 req_num; 259 __u64 req_num;
248}; 260};
@@ -267,6 +279,8 @@ static void md_update_data(struct ldc_channel *lp,
267 279
268 printk(KERN_INFO PFX "Machine description update.\n"); 280 printk(KERN_INFO PFX "Machine description update.\n");
269 281
282 mdesc_update();
283
270 memset(&pkt, 0, sizeof(pkt)); 284 memset(&pkt, 0, sizeof(pkt));
271 pkt.data.tag.type = DS_DATA; 285 pkt.data.tag.type = DS_DATA;
272 pkt.data.tag.len = sizeof(pkt) - sizeof(struct ds_msg_tag); 286 pkt.data.tag.len = sizeof(pkt) - sizeof(struct ds_msg_tag);
@@ -275,8 +289,6 @@ static void md_update_data(struct ldc_channel *lp,
275 pkt.res.result = DS_OK; 289 pkt.res.result = DS_OK;
276 290
277 ds_send(lp, &pkt, sizeof(pkt)); 291 ds_send(lp, &pkt, sizeof(pkt));
278
279 mdesc_update();
280} 292}
281 293
282struct ds_shutdown_req { 294struct ds_shutdown_req {
@@ -391,18 +403,6 @@ struct dr_cpu_resp_entry {
391 __u32 str_off; 403 __u32 str_off;
392}; 404};
393 405
394/* DR cpu requests get queued onto the work list by the
395 * dr_cpu_data() callback. The list is protected by
396 * ds_lock, and processed by dr_cpu_process() in order.
397 */
398static LIST_HEAD(dr_cpu_work_list);
399static DECLARE_WAIT_QUEUE_HEAD(dr_cpu_wait);
400
401struct dr_cpu_queue_entry {
402 struct list_head list;
403 char req[0];
404};
405
406static void __dr_cpu_send_error(struct ds_cap_state *cp, struct ds_data *data) 406static void __dr_cpu_send_error(struct ds_cap_state *cp, struct ds_data *data)
407{ 407{
408 struct dr_cpu_tag *tag = (struct dr_cpu_tag *) (data + 1); 408 struct dr_cpu_tag *tag = (struct dr_cpu_tag *) (data + 1);
@@ -425,7 +425,7 @@ static void __dr_cpu_send_error(struct ds_cap_state *cp, struct ds_data *data)
425 425
426 pkt.data.tag.len = msg_len - sizeof(struct ds_msg_tag); 426 pkt.data.tag.len = msg_len - sizeof(struct ds_msg_tag);
427 427
428 ds_send(dp->lp, &pkt, msg_len); 428 __ds_send(dp->lp, &pkt, msg_len);
429} 429}
430 430
431static void dr_cpu_send_error(struct ds_cap_state *cp, struct ds_data *data) 431static void dr_cpu_send_error(struct ds_cap_state *cp, struct ds_data *data)
@@ -555,7 +555,7 @@ static int dr_cpu_configure(struct ds_cap_state *cp, u64 req_num,
555 } 555 }
556 556
557 spin_lock_irqsave(&ds_lock, flags); 557 spin_lock_irqsave(&ds_lock, flags);
558 ds_send(ds_info->lp, resp, resp_len); 558 __ds_send(ds_info->lp, resp, resp_len);
559 spin_unlock_irqrestore(&ds_lock, flags); 559 spin_unlock_irqrestore(&ds_lock, flags);
560 560
561 kfree(resp); 561 kfree(resp);
@@ -596,7 +596,7 @@ static int dr_cpu_unconfigure(struct ds_cap_state *cp, u64 req_num,
596 } 596 }
597 597
598 spin_lock_irqsave(&ds_lock, flags); 598 spin_lock_irqsave(&ds_lock, flags);
599 ds_send(ds_info->lp, resp, resp_len); 599 __ds_send(ds_info->lp, resp, resp_len);
600 spin_unlock_irqrestore(&ds_lock, flags); 600 spin_unlock_irqrestore(&ds_lock, flags);
601 601
602 kfree(resp); 602 kfree(resp);
@@ -604,107 +604,49 @@ static int dr_cpu_unconfigure(struct ds_cap_state *cp, u64 req_num,
604 return 0; 604 return 0;
605} 605}
606 606
607static void process_dr_cpu_list(struct ds_cap_state *cp) 607static void dr_cpu_data(struct ldc_channel *lp,
608 struct ds_cap_state *cp,
609 void *buf, int len)
608{ 610{
609 struct dr_cpu_queue_entry *qp, *tmp; 611 struct ds_data *data = buf;
610 unsigned long flags; 612 struct dr_cpu_tag *tag = (struct dr_cpu_tag *) (data + 1);
611 LIST_HEAD(todo); 613 u32 *cpu_list = (u32 *) (tag + 1);
614 u64 req_num = tag->req_num;
612 cpumask_t mask; 615 cpumask_t mask;
616 unsigned int i;
617 int err;
613 618
614 spin_lock_irqsave(&ds_lock, flags); 619 switch (tag->type) {
615 list_splice(&dr_cpu_work_list, &todo); 620 case DR_CPU_CONFIGURE:
616 INIT_LIST_HEAD(&dr_cpu_work_list); 621 case DR_CPU_UNCONFIGURE:
617 spin_unlock_irqrestore(&ds_lock, flags); 622 case DR_CPU_FORCE_UNCONFIGURE:
618 623 break;
619 list_for_each_entry_safe(qp, tmp, &todo, list) {
620 struct ds_data *data = (struct ds_data *) qp->req;
621 struct dr_cpu_tag *tag = (struct dr_cpu_tag *) (data + 1);
622 u32 *cpu_list = (u32 *) (tag + 1);
623 u64 req_num = tag->req_num;
624 unsigned int i;
625 int err;
626
627 switch (tag->type) {
628 case DR_CPU_CONFIGURE:
629 case DR_CPU_UNCONFIGURE:
630 case DR_CPU_FORCE_UNCONFIGURE:
631 break;
632
633 default:
634 dr_cpu_send_error(cp, data);
635 goto next;
636 }
637
638 purge_dups(cpu_list, tag->num_records);
639
640 cpus_clear(mask);
641 for (i = 0; i < tag->num_records; i++) {
642 if (cpu_list[i] == CPU_SENTINEL)
643 continue;
644
645 if (cpu_list[i] < NR_CPUS)
646 cpu_set(cpu_list[i], mask);
647 }
648
649 if (tag->type == DR_CPU_CONFIGURE)
650 err = dr_cpu_configure(cp, req_num, &mask);
651 else
652 err = dr_cpu_unconfigure(cp, req_num, &mask);
653
654 if (err)
655 dr_cpu_send_error(cp, data);
656 624
657next: 625 default:
658 list_del(&qp->list); 626 dr_cpu_send_error(cp, data);
659 kfree(qp); 627 return;
660 } 628 }
661}
662 629
663static int dr_cpu_thread(void *__unused) 630 purge_dups(cpu_list, tag->num_records);
664{
665 struct ds_cap_state *cp;
666 DEFINE_WAIT(wait);
667 631
668 cp = find_cap_by_string("dr-cpu"); 632 cpus_clear(mask);
669 633 for (i = 0; i < tag->num_records; i++) {
670 while (1) { 634 if (cpu_list[i] == CPU_SENTINEL)
671 prepare_to_wait(&dr_cpu_wait, &wait, TASK_INTERRUPTIBLE); 635 continue;
672 if (list_empty(&dr_cpu_work_list))
673 schedule();
674 finish_wait(&dr_cpu_wait, &wait);
675
676 if (kthread_should_stop())
677 break;
678 636
679 process_dr_cpu_list(cp); 637 if (cpu_list[i] < NR_CPUS)
638 cpu_set(cpu_list[i], mask);
680 } 639 }
681 640
682 return 0; 641 if (tag->type == DR_CPU_CONFIGURE)
683} 642 err = dr_cpu_configure(cp, req_num, &mask);
684 643 else
685static void dr_cpu_data(struct ldc_channel *lp, 644 err = dr_cpu_unconfigure(cp, req_num, &mask);
686 struct ds_cap_state *dp,
687 void *buf, int len)
688{
689 struct dr_cpu_queue_entry *qp;
690 struct ds_data *dpkt = buf;
691 struct dr_cpu_tag *rp;
692 645
693 rp = (struct dr_cpu_tag *) (dpkt + 1); 646 if (err)
694 647 dr_cpu_send_error(cp, data);
695 qp = kmalloc(sizeof(struct dr_cpu_queue_entry) + len, GFP_ATOMIC);
696 if (!qp) {
697 struct ds_cap_state *cp;
698
699 cp = find_cap_by_string("dr-cpu");
700 __dr_cpu_send_error(cp, dpkt);
701 } else {
702 memcpy(&qp->req, buf, len);
703 list_add_tail(&qp->list, &dr_cpu_work_list);
704 wake_up(&dr_cpu_wait);
705 }
706} 648}
707#endif 649#endif /* CONFIG_HOTPLUG_CPU */
708 650
709struct ds_pri_msg { 651struct ds_pri_msg {
710 __u64 req_num; 652 __u64 req_num;
@@ -820,7 +762,7 @@ void ldom_set_var(const char *var, const char *value)
820 ds_var_doorbell = 0; 762 ds_var_doorbell = 0;
821 ds_var_response = -1; 763 ds_var_response = -1;
822 764
823 ds_send(dp->lp, &pkt, msg_len); 765 __ds_send(dp->lp, &pkt, msg_len);
824 spin_unlock_irqrestore(&ds_lock, flags); 766 spin_unlock_irqrestore(&ds_lock, flags);
825 767
826 loops = 1000; 768 loops = 1000;
@@ -904,7 +846,7 @@ static int register_services(struct ds_info *dp)
904 pbuf.req.minor = 0; 846 pbuf.req.minor = 0;
905 strcpy(pbuf.req.svc_id, cp->service_id); 847 strcpy(pbuf.req.svc_id, cp->service_id);
906 848
907 err = ds_send(lp, &pbuf, msg_len); 849 err = __ds_send(lp, &pbuf, msg_len);
908 if (err > 0) 850 if (err > 0)
909 cp->state = CAP_STATE_REG_SENT; 851 cp->state = CAP_STATE_REG_SENT;
910 } 852 }
@@ -960,27 +902,97 @@ conn_reset:
960 return -ECONNRESET; 902 return -ECONNRESET;
961} 903}
962 904
905static void __send_ds_nack(struct ds_info *dp, u64 handle)
906{
907 struct ds_data_nack nack = {
908 .tag = {
909 .type = DS_NACK,
910 .len = (sizeof(struct ds_data_nack) -
911 sizeof(struct ds_msg_tag)),
912 },
913 .handle = handle,
914 .result = DS_INV_HDL,
915 };
916
917 __ds_send(dp->lp, &nack, sizeof(nack));
918}
919
920static LIST_HEAD(ds_work_list);
921static DECLARE_WAIT_QUEUE_HEAD(ds_wait);
922
923struct ds_queue_entry {
924 struct list_head list;
925 int req_len;
926 int __pad;
927 u64 req[0];
928};
929
930static void process_ds_work(void)
931{
932 struct ds_queue_entry *qp, *tmp;
933 static struct ds_info *dp;
934 unsigned long flags;
935 LIST_HEAD(todo);
936
937 spin_lock_irqsave(&ds_lock, flags);
938 list_splice(&ds_work_list, &todo);
939 INIT_LIST_HEAD(&ds_work_list);
940 spin_unlock_irqrestore(&ds_lock, flags);
941
942 dp = ds_info;
943
944 list_for_each_entry_safe(qp, tmp, &todo, list) {
945 struct ds_data *dpkt = (struct ds_data *) qp->req;
946 struct ds_cap_state *cp = find_cap(dpkt->handle);
947 int req_len = qp->req_len;
948
949 if (!cp) {
950 printk(KERN_ERR PFX "Data for unknown handle %lu\n",
951 dpkt->handle);
952
953 spin_lock_irqsave(&ds_lock, flags);
954 __send_ds_nack(dp, dpkt->handle);
955 spin_unlock_irqrestore(&ds_lock, flags);
956 } else {
957 cp->data(dp->lp, cp, dpkt, req_len);
958 }
959
960 list_del(&qp->list);
961 kfree(qp);
962 }
963}
964
965static int ds_thread(void *__unused)
966{
967 DEFINE_WAIT(wait);
968
969 while (1) {
970 prepare_to_wait(&ds_wait, &wait, TASK_INTERRUPTIBLE);
971 if (list_empty(&ds_work_list))
972 schedule();
973 finish_wait(&ds_wait, &wait);
974
975 if (kthread_should_stop())
976 break;
977
978 process_ds_work();
979 }
980
981 return 0;
982}
983
963static int ds_data(struct ds_info *dp, struct ds_msg_tag *pkt, int len) 984static int ds_data(struct ds_info *dp, struct ds_msg_tag *pkt, int len)
964{ 985{
965 struct ds_data *dpkt = (struct ds_data *) pkt; 986 struct ds_data *dpkt = (struct ds_data *) pkt;
966 struct ds_cap_state *cp = find_cap(dpkt->handle); 987 struct ds_queue_entry *qp;
967 988
968 if (!cp) { 989 qp = kmalloc(sizeof(struct ds_queue_entry) + len, GFP_ATOMIC);
969 struct ds_data_nack nack = { 990 if (!qp) {
970 .tag = { 991 __send_ds_nack(dp, dpkt->handle);
971 .type = DS_NACK,
972 .len = (sizeof(struct ds_data_nack) -
973 sizeof(struct ds_msg_tag)),
974 },
975 .handle = dpkt->handle,
976 .result = DS_INV_HDL,
977 };
978
979 printk(KERN_ERR PFX "Data for unknown handle %lu\n",
980 dpkt->handle);
981 ds_send(dp->lp, &nack, sizeof(nack));
982 } else { 992 } else {
983 cp->data(dp->lp, cp, dpkt, len); 993 memcpy(&qp->req, pkt, len);
994 list_add_tail(&qp->list, &ds_work_list);
995 wake_up(&ds_wait);
984 } 996 }
985 return 0; 997 return 0;
986} 998}
@@ -996,7 +1008,7 @@ static void ds_up(struct ds_info *dp)
996 req.ver.major = 1; 1008 req.ver.major = 1;
997 req.ver.minor = 0; 1009 req.ver.minor = 0;
998 1010
999 err = ds_send(lp, &req, sizeof(req)); 1011 err = __ds_send(lp, &req, sizeof(req));
1000 if (err > 0) 1012 if (err > 0)
1001 dp->hs_state = DS_HS_START; 1013 dp->hs_state = DS_HS_START;
1002} 1014}
@@ -1148,9 +1160,7 @@ static int __init ds_init(void)
1148 for (i = 0; i < ARRAY_SIZE(ds_states); i++) 1160 for (i = 0; i < ARRAY_SIZE(ds_states); i++)
1149 ds_states[i].handle = ((u64)i << 32); 1161 ds_states[i].handle = ((u64)i << 32);
1150 1162
1151#ifdef CONFIG_HOTPLUG_CPU 1163 kthread_run(ds_thread, NULL, "kldomd");
1152 kthread_run(dr_cpu_thread, NULL, "kdrcpud");
1153#endif
1154 1164
1155 return vio_register_driver(&ds_driver); 1165 return vio_register_driver(&ds_driver);
1156} 1166}
diff --git a/arch/sparc64/kernel/mdesc.c b/arch/sparc64/kernel/mdesc.c
index 62a389793949..de5310ffdb48 100644
--- a/arch/sparc64/kernel/mdesc.c
+++ b/arch/sparc64/kernel/mdesc.c
@@ -214,7 +214,7 @@ void mdesc_release(struct mdesc_handle *hp)
214} 214}
215EXPORT_SYMBOL(mdesc_release); 215EXPORT_SYMBOL(mdesc_release);
216 216
217static void do_mdesc_update(struct work_struct *work) 217void mdesc_update(void)
218{ 218{
219 unsigned long len, real_len, status; 219 unsigned long len, real_len, status;
220 struct mdesc_handle *hp, *orig_hp; 220 struct mdesc_handle *hp, *orig_hp;
@@ -248,13 +248,6 @@ static void do_mdesc_update(struct work_struct *work)
248 spin_unlock_irqrestore(&mdesc_lock, flags); 248 spin_unlock_irqrestore(&mdesc_lock, flags);
249} 249}
250 250
251static DECLARE_WORK(mdesc_update_work, do_mdesc_update);
252
253void mdesc_update(void)
254{
255 schedule_work(&mdesc_update_work);
256}
257
258static struct mdesc_elem *node_block(struct mdesc_hdr *mdesc) 251static struct mdesc_elem *node_block(struct mdesc_hdr *mdesc)
259{ 252{
260 return (struct mdesc_elem *) (mdesc + 1); 253 return (struct mdesc_elem *) (mdesc + 1);
@@ -278,13 +271,14 @@ u64 mdesc_node_by_name(struct mdesc_handle *hp,
278 u64 last_node = hp->mdesc.node_sz / 16; 271 u64 last_node = hp->mdesc.node_sz / 16;
279 u64 ret; 272 u64 ret;
280 273
281 if (from_node == MDESC_NODE_NULL) 274 if (from_node == MDESC_NODE_NULL) {
282 from_node = 0; 275 ret = from_node = 0;
283 276 } else if (from_node >= last_node) {
284 if (from_node >= last_node)
285 return MDESC_NODE_NULL; 277 return MDESC_NODE_NULL;
278 } else {
279 ret = ep[from_node].d.val;
280 }
286 281
287 ret = ep[from_node].d.val;
288 while (ret < last_node) { 282 while (ret < last_node) {
289 if (ep[ret].tag != MD_NODE) 283 if (ep[ret].tag != MD_NODE)
290 return MDESC_NODE_NULL; 284 return MDESC_NODE_NULL;