summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid Fries <David@Fries.net>2014-01-15 23:29:18 -0500
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2014-02-07 18:40:17 -0500
commit9fcbbac5ded489c3a4e121343db999dd51cd6c75 (patch)
tree1207602c8d7cc2da54784114a5d32d9ed6924b58
parent70b34d2ed807b722413894975a8c60617defb887 (diff)
w1: process w1 netlink commands in w1_process thread
Netlink is a socket interface and is expected to be asynchronous. Clients can now make w1 requests without blocking by making use of the w1_master thread to process netlink commands which was previously only used for doing an automatic bus search. Signed-off-by: David Fries <David@Fries.net> Acked-by: Evgeniy Polyakov <zbr@ioremap.net> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r--drivers/w1/w1.c180
-rw-r--r--drivers/w1/w1.h32
-rw-r--r--drivers/w1/w1_int.c17
-rw-r--r--drivers/w1/w1_netlink.c166
4 files changed, 300 insertions, 95 deletions
diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c
index 97b35cb8b6da..53846c7f24ff 100644
--- a/drivers/w1/w1.c
+++ b/drivers/w1/w1.c
@@ -79,19 +79,10 @@ static void w1_slave_release(struct device *dev)
79{ 79{
80 struct w1_slave *sl = dev_to_w1_slave(dev); 80 struct w1_slave *sl = dev_to_w1_slave(dev);
81 81
82 dev_dbg(dev, "%s: Releasing %s.\n", __func__, sl->name); 82 dev_dbg(dev, "%s: Releasing %s [%p]\n", __func__, sl->name, sl);
83
84 while (atomic_read(&sl->refcnt)) {
85 dev_dbg(dev, "Waiting for %s to become free: refcnt=%d.\n",
86 sl->name, atomic_read(&sl->refcnt));
87 if (msleep_interruptible(1000))
88 flush_signals(current);
89 }
90 83
91 w1_family_put(sl->family); 84 w1_family_put(sl->family);
92 sl->master->slave_count--; 85 sl->master->slave_count--;
93
94 complete(&sl->released);
95} 86}
96 87
97static ssize_t name_show(struct device *dev, struct device_attribute *attr, char *buf) 88static ssize_t name_show(struct device *dev, struct device_attribute *attr, char *buf)
@@ -277,7 +268,6 @@ static ssize_t w1_master_attribute_store_pullup(struct device *dev,
277 mutex_lock(&md->mutex); 268 mutex_lock(&md->mutex);
278 md->enable_pullup = tmp; 269 md->enable_pullup = tmp;
279 mutex_unlock(&md->mutex); 270 mutex_unlock(&md->mutex);
280 wake_up_process(md->thread);
281 271
282 return count; 272 return count;
283} 273}
@@ -370,23 +360,20 @@ static ssize_t w1_master_attribute_show_slaves(struct device *dev,
370{ 360{
371 struct w1_master *md = dev_to_w1_master(dev); 361 struct w1_master *md = dev_to_w1_master(dev);
372 int c = PAGE_SIZE; 362 int c = PAGE_SIZE;
363 struct list_head *ent, *n;
364 struct w1_slave *sl = NULL;
373 365
374 mutex_lock(&md->mutex); 366 mutex_lock(&md->list_mutex);
375
376 if (md->slave_count == 0)
377 c -= snprintf(buf + PAGE_SIZE - c, c, "not found.\n");
378 else {
379 struct list_head *ent, *n;
380 struct w1_slave *sl;
381 367
382 list_for_each_safe(ent, n, &md->slist) { 368 list_for_each_safe(ent, n, &md->slist) {
383 sl = list_entry(ent, struct w1_slave, w1_slave_entry); 369 sl = list_entry(ent, struct w1_slave, w1_slave_entry);
384 370
385 c -= snprintf(buf + PAGE_SIZE - c, c, "%s\n", sl->name); 371 c -= snprintf(buf + PAGE_SIZE - c, c, "%s\n", sl->name);
386 }
387 } 372 }
373 if (!sl)
374 c -= snprintf(buf + PAGE_SIZE - c, c, "not found.\n");
388 375
389 mutex_unlock(&md->mutex); 376 mutex_unlock(&md->list_mutex);
390 377
391 return PAGE_SIZE - c; 378 return PAGE_SIZE - c;
392} 379}
@@ -440,19 +427,22 @@ static int w1_atoreg_num(struct device *dev, const char *buf, size_t count,
440} 427}
441 428
442/* Searches the slaves in the w1_master and returns a pointer or NULL. 429/* Searches the slaves in the w1_master and returns a pointer or NULL.
443 * Note: must hold the mutex 430 * Note: must not hold list_mutex
444 */ 431 */
445struct w1_slave *w1_slave_search_device(struct w1_master *dev, 432struct w1_slave *w1_slave_search_device(struct w1_master *dev,
446 struct w1_reg_num *rn) 433 struct w1_reg_num *rn)
447{ 434{
448 struct w1_slave *sl; 435 struct w1_slave *sl;
436 mutex_lock(&dev->list_mutex);
449 list_for_each_entry(sl, &dev->slist, w1_slave_entry) { 437 list_for_each_entry(sl, &dev->slist, w1_slave_entry) {
450 if (sl->reg_num.family == rn->family && 438 if (sl->reg_num.family == rn->family &&
451 sl->reg_num.id == rn->id && 439 sl->reg_num.id == rn->id &&
452 sl->reg_num.crc == rn->crc) { 440 sl->reg_num.crc == rn->crc) {
441 mutex_unlock(&dev->list_mutex);
453 return sl; 442 return sl;
454 } 443 }
455 } 444 }
445 mutex_unlock(&dev->list_mutex);
456 return NULL; 446 return NULL;
457} 447}
458 448
@@ -509,7 +499,10 @@ static ssize_t w1_master_attribute_store_remove(struct device *dev,
509 mutex_lock(&md->mutex); 499 mutex_lock(&md->mutex);
510 sl = w1_slave_search_device(md, &rn); 500 sl = w1_slave_search_device(md, &rn);
511 if (sl) { 501 if (sl) {
512 w1_slave_detach(sl); 502 result = w1_slave_detach(sl);
503 /* refcnt 0 means it was detached in the call */
504 if (result == 0)
505 result = count;
513 } else { 506 } else {
514 dev_info(dev, "Device %02x-%012llx doesn't exists\n", rn.family, 507 dev_info(dev, "Device %02x-%012llx doesn't exists\n", rn.family,
515 (unsigned long long)rn.id); 508 (unsigned long long)rn.id);
@@ -704,7 +697,9 @@ static int __w1_attach_slave_device(struct w1_slave *sl)
704 dev_set_uevent_suppress(&sl->dev, false); 697 dev_set_uevent_suppress(&sl->dev, false);
705 kobject_uevent(&sl->dev.kobj, KOBJ_ADD); 698 kobject_uevent(&sl->dev.kobj, KOBJ_ADD);
706 699
700 mutex_lock(&sl->master->list_mutex);
707 list_add_tail(&sl->w1_slave_entry, &sl->master->slist); 701 list_add_tail(&sl->w1_slave_entry, &sl->master->slist);
702 mutex_unlock(&sl->master->list_mutex);
708 703
709 return 0; 704 return 0;
710} 705}
@@ -731,8 +726,8 @@ int w1_attach_slave_device(struct w1_master *dev, struct w1_reg_num *rn)
731 726
732 memset(&msg, 0, sizeof(msg)); 727 memset(&msg, 0, sizeof(msg));
733 memcpy(&sl->reg_num, rn, sizeof(sl->reg_num)); 728 memcpy(&sl->reg_num, rn, sizeof(sl->reg_num));
734 atomic_set(&sl->refcnt, 0); 729 atomic_set(&sl->refcnt, 1);
735 init_completion(&sl->released); 730 atomic_inc(&sl->master->refcnt);
736 731
737 /* slave modules need to be loaded in a context with unlocked mutex */ 732 /* slave modules need to be loaded in a context with unlocked mutex */
738 mutex_unlock(&dev->mutex); 733 mutex_unlock(&dev->mutex);
@@ -772,23 +767,48 @@ int w1_attach_slave_device(struct w1_master *dev, struct w1_reg_num *rn)
772 return 0; 767 return 0;
773} 768}
774 769
775void w1_slave_detach(struct w1_slave *sl) 770int w1_unref_slave(struct w1_slave *sl)
776{ 771{
777 struct w1_netlink_msg msg; 772 struct w1_master *dev = sl->master;
778 773 int refcnt;
779 dev_dbg(&sl->dev, "%s: detaching %s [%p].\n", __func__, sl->name, sl); 774 mutex_lock(&dev->list_mutex);
780 775 refcnt = atomic_sub_return(1, &sl->refcnt);
781 list_del(&sl->w1_slave_entry); 776 if (refcnt == 0) {
782 777 struct w1_netlink_msg msg;
783 memset(&msg, 0, sizeof(msg)); 778
784 memcpy(msg.id.id, &sl->reg_num, sizeof(msg.id)); 779 dev_dbg(&sl->dev, "%s: detaching %s [%p].\n", __func__,
785 msg.type = W1_SLAVE_REMOVE; 780 sl->name, sl);
786 w1_netlink_send(sl->master, &msg); 781
787 782 list_del(&sl->w1_slave_entry);
788 device_unregister(&sl->dev); 783
784 memset(&msg, 0, sizeof(msg));
785 memcpy(msg.id.id, &sl->reg_num, sizeof(msg.id));
786 msg.type = W1_SLAVE_REMOVE;
787 w1_netlink_send(sl->master, &msg);
788
789 device_unregister(&sl->dev);
790 #ifdef DEBUG
791 memset(sl, 0, sizeof(*sl));
792 #endif
793 kfree(sl);
794 }
795 atomic_dec(&dev->refcnt);
796 mutex_unlock(&dev->list_mutex);
797 return refcnt;
798}
789 799
790 wait_for_completion(&sl->released); 800int w1_slave_detach(struct w1_slave *sl)
791 kfree(sl); 801{
802 /* Only detach a slave once as it decreases the refcnt each time. */
803 int destroy_now;
804 mutex_lock(&sl->master->list_mutex);
805 destroy_now = !test_bit(W1_SLAVE_DETACH, &sl->flags);
806 set_bit(W1_SLAVE_DETACH, &sl->flags);
807 mutex_unlock(&sl->master->list_mutex);
808
809 if (destroy_now)
810 destroy_now = !w1_unref_slave(sl);
811 return destroy_now ? 0 : -EBUSY;
792} 812}
793 813
794struct w1_master *w1_search_master_id(u32 id) 814struct w1_master *w1_search_master_id(u32 id)
@@ -817,7 +837,7 @@ struct w1_slave *w1_search_slave(struct w1_reg_num *id)
817 837
818 mutex_lock(&w1_mlock); 838 mutex_lock(&w1_mlock);
819 list_for_each_entry(dev, &w1_masters, w1_master_entry) { 839 list_for_each_entry(dev, &w1_masters, w1_master_entry) {
820 mutex_lock(&dev->mutex); 840 mutex_lock(&dev->list_mutex);
821 list_for_each_entry(sl, &dev->slist, w1_slave_entry) { 841 list_for_each_entry(sl, &dev->slist, w1_slave_entry) {
822 if (sl->reg_num.family == id->family && 842 if (sl->reg_num.family == id->family &&
823 sl->reg_num.id == id->id && 843 sl->reg_num.id == id->id &&
@@ -828,7 +848,7 @@ struct w1_slave *w1_search_slave(struct w1_reg_num *id)
828 break; 848 break;
829 } 849 }
830 } 850 }
831 mutex_unlock(&dev->mutex); 851 mutex_unlock(&dev->list_mutex);
832 852
833 if (found) 853 if (found)
834 break; 854 break;
@@ -848,6 +868,7 @@ void w1_reconnect_slaves(struct w1_family *f, int attach)
848 dev_dbg(&dev->dev, "Reconnecting slaves in device %s " 868 dev_dbg(&dev->dev, "Reconnecting slaves in device %s "
849 "for family %02x.\n", dev->name, f->fid); 869 "for family %02x.\n", dev->name, f->fid);
850 mutex_lock(&dev->mutex); 870 mutex_lock(&dev->mutex);
871 mutex_lock(&dev->list_mutex);
851 list_for_each_entry_safe(sl, sln, &dev->slist, w1_slave_entry) { 872 list_for_each_entry_safe(sl, sln, &dev->slist, w1_slave_entry) {
852 /* If it is a new family, slaves with the default 873 /* If it is a new family, slaves with the default
853 * family driver and are that family will be 874 * family driver and are that family will be
@@ -859,14 +880,19 @@ void w1_reconnect_slaves(struct w1_family *f, int attach)
859 (!attach && sl->family->fid == f->fid)) { 880 (!attach && sl->family->fid == f->fid)) {
860 struct w1_reg_num rn; 881 struct w1_reg_num rn;
861 882
883 mutex_unlock(&dev->list_mutex);
862 memcpy(&rn, &sl->reg_num, sizeof(rn)); 884 memcpy(&rn, &sl->reg_num, sizeof(rn));
863 w1_slave_detach(sl); 885 /* If it was already in use let the automatic
864 886 * scan pick it up again later.
865 w1_attach_slave_device(dev, &rn); 887 */
888 if (!w1_slave_detach(sl))
889 w1_attach_slave_device(dev, &rn);
890 mutex_lock(&dev->list_mutex);
866 } 891 }
867 } 892 }
868 dev_dbg(&dev->dev, "Reconnecting slaves in device %s " 893 dev_dbg(&dev->dev, "Reconnecting slaves in device %s "
869 "has been finished.\n", dev->name); 894 "has been finished.\n", dev->name);
895 mutex_unlock(&dev->list_mutex);
870 mutex_unlock(&dev->mutex); 896 mutex_unlock(&dev->mutex);
871 } 897 }
872 mutex_unlock(&w1_mlock); 898 mutex_unlock(&w1_mlock);
@@ -1020,17 +1046,24 @@ void w1_search_process_cb(struct w1_master *dev, u8 search_type,
1020{ 1046{
1021 struct w1_slave *sl, *sln; 1047 struct w1_slave *sl, *sln;
1022 1048
1049 mutex_lock(&dev->list_mutex);
1023 list_for_each_entry(sl, &dev->slist, w1_slave_entry) 1050 list_for_each_entry(sl, &dev->slist, w1_slave_entry)
1024 clear_bit(W1_SLAVE_ACTIVE, &sl->flags); 1051 clear_bit(W1_SLAVE_ACTIVE, &sl->flags);
1052 mutex_unlock(&dev->list_mutex);
1025 1053
1026 w1_search_devices(dev, search_type, cb); 1054 w1_search_devices(dev, search_type, cb);
1027 1055
1056 mutex_lock(&dev->list_mutex);
1028 list_for_each_entry_safe(sl, sln, &dev->slist, w1_slave_entry) { 1057 list_for_each_entry_safe(sl, sln, &dev->slist, w1_slave_entry) {
1029 if (!test_bit(W1_SLAVE_ACTIVE, &sl->flags) && !--sl->ttl) 1058 if (!test_bit(W1_SLAVE_ACTIVE, &sl->flags) && !--sl->ttl) {
1059 mutex_unlock(&dev->list_mutex);
1030 w1_slave_detach(sl); 1060 w1_slave_detach(sl);
1061 mutex_lock(&dev->list_mutex);
1062 }
1031 else if (test_bit(W1_SLAVE_ACTIVE, &sl->flags)) 1063 else if (test_bit(W1_SLAVE_ACTIVE, &sl->flags))
1032 sl->ttl = dev->slave_ttl; 1064 sl->ttl = dev->slave_ttl;
1033 } 1065 }
1066 mutex_unlock(&dev->list_mutex);
1034 1067
1035 if (dev->search_count > 0) 1068 if (dev->search_count > 0)
1036 dev->search_count--; 1069 dev->search_count--;
@@ -1041,6 +1074,26 @@ static void w1_search_process(struct w1_master *dev, u8 search_type)
1041 w1_search_process_cb(dev, search_type, w1_slave_found); 1074 w1_search_process_cb(dev, search_type, w1_slave_found);
1042} 1075}
1043 1076
1077int w1_process_callbacks(struct w1_master *dev)
1078{
1079 int ret = 0;
1080 struct w1_async_cmd *async_cmd, *async_n;
1081
1082 /* The list can be added to in another thread, loop until it is empty */
1083 while (!list_empty(&dev->async_list)) {
1084 list_for_each_entry_safe(async_cmd, async_n, &dev->async_list,
1085 async_entry) {
1086 /* drop the lock, if it is a search it can take a long
1087 * time */
1088 mutex_unlock(&dev->list_mutex);
1089 async_cmd->cb(dev, async_cmd);
1090 ret = 1;
1091 mutex_lock(&dev->list_mutex);
1092 }
1093 }
1094 return ret;
1095}
1096
1044int w1_process(void *data) 1097int w1_process(void *data)
1045{ 1098{
1046 struct w1_master *dev = (struct w1_master *) data; 1099 struct w1_master *dev = (struct w1_master *) data;
@@ -1048,23 +1101,46 @@ int w1_process(void *data)
1048 * time can be calculated in jiffies once. 1101 * time can be calculated in jiffies once.
1049 */ 1102 */
1050 const unsigned long jtime = msecs_to_jiffies(w1_timeout * 1000); 1103 const unsigned long jtime = msecs_to_jiffies(w1_timeout * 1000);
1104 /* remainder if it woke up early */
1105 unsigned long jremain = 0;
1051 1106
1052 while (!kthread_should_stop()) { 1107 for (;;) {
1053 if (dev->search_count) { 1108
1109 if (!jremain && dev->search_count) {
1054 mutex_lock(&dev->mutex); 1110 mutex_lock(&dev->mutex);
1055 w1_search_process(dev, W1_SEARCH); 1111 w1_search_process(dev, W1_SEARCH);
1056 mutex_unlock(&dev->mutex); 1112 mutex_unlock(&dev->mutex);
1057 } 1113 }
1058 1114
1115 mutex_lock(&dev->list_mutex);
1116 /* Note, w1_process_callback drops the lock while processing,
1117 * but locks it again before returning.
1118 */
1119 if (!w1_process_callbacks(dev) && jremain) {
1120 /* a wake up is either to stop the thread, process
1121 * callbacks, or search, it isn't process callbacks, so
1122 * schedule a search.
1123 */
1124 jremain = 1;
1125 }
1126
1059 try_to_freeze(); 1127 try_to_freeze();
1060 __set_current_state(TASK_INTERRUPTIBLE); 1128 __set_current_state(TASK_INTERRUPTIBLE);
1061 1129
1130 /* hold list_mutex until after interruptible to prevent loosing
1131 * the wakeup signal when async_cmd is added.
1132 */
1133 mutex_unlock(&dev->list_mutex);
1134
1062 if (kthread_should_stop()) 1135 if (kthread_should_stop())
1063 break; 1136 break;
1064 1137
1065 /* Only sleep when the search is active. */ 1138 /* Only sleep when the search is active. */
1066 if (dev->search_count) 1139 if (dev->search_count) {
1067 schedule_timeout(jtime); 1140 if (!jremain)
1141 jremain = jtime;
1142 jremain = schedule_timeout(jremain);
1143 }
1068 else 1144 else
1069 schedule(); 1145 schedule();
1070 } 1146 }
diff --git a/drivers/w1/w1.h b/drivers/w1/w1.h
index 3376bfbb10f4..a096ef40119e 100644
--- a/drivers/w1/w1.h
+++ b/drivers/w1/w1.h
@@ -58,6 +58,7 @@ struct w1_reg_num
58#define W1_RESUME_CMD 0xA5 58#define W1_RESUME_CMD 0xA5
59 59
60#define W1_SLAVE_ACTIVE 0 60#define W1_SLAVE_ACTIVE 0
61#define W1_SLAVE_DETACH 1
61 62
62struct w1_slave 63struct w1_slave
63{ 64{
@@ -74,7 +75,6 @@ struct w1_slave
74 struct w1_family *family; 75 struct w1_family *family;
75 void *family_data; 76 void *family_data;
76 struct device dev; 77 struct device dev;
77 struct completion released;
78}; 78};
79 79
80typedef void (*w1_slave_found_callback)(struct w1_master *, u64); 80typedef void (*w1_slave_found_callback)(struct w1_master *, u64);
@@ -171,7 +171,14 @@ struct w1_master
171 struct list_head w1_master_entry; 171 struct list_head w1_master_entry;
172 struct module *owner; 172 struct module *owner;
173 unsigned char name[W1_MAXNAMELEN]; 173 unsigned char name[W1_MAXNAMELEN];
174 /* list_mutex protects just slist and async_list so slaves can be
175 * searched for and async commands added while the master has
176 * w1_master.mutex locked and is operating on the bus.
177 * lock order w1_mlock, w1_master.mutex, w1_master_list_mutex
178 */
179 struct mutex list_mutex;
174 struct list_head slist; 180 struct list_head slist;
181 struct list_head async_list;
175 int max_slave_count, slave_count; 182 int max_slave_count, slave_count;
176 unsigned long attempts; 183 unsigned long attempts;
177 int slave_ttl; 184 int slave_ttl;
@@ -205,11 +212,29 @@ struct w1_master
205 u32 seq; 212 u32 seq;
206}; 213};
207 214
215/**
216 * struct w1_async_cmd - execute callback from the w1_process kthread
217 * @async_entry: link entry
218 * @cb: callback function, must list_del and destroy this list before
219 * returning
220 *
221 * When inserted into the w1_master async_list, w1_process will execute
222 * the callback. Embed this into the structure with the command details.
223 */
224struct w1_async_cmd {
225 struct list_head async_entry;
226 void (*cb)(struct w1_master *dev, struct w1_async_cmd *async_cmd);
227};
228
208int w1_create_master_attributes(struct w1_master *); 229int w1_create_master_attributes(struct w1_master *);
209void w1_destroy_master_attributes(struct w1_master *master); 230void w1_destroy_master_attributes(struct w1_master *master);
210void w1_search(struct w1_master *dev, u8 search_type, w1_slave_found_callback cb); 231void w1_search(struct w1_master *dev, u8 search_type, w1_slave_found_callback cb);
211void w1_search_devices(struct w1_master *dev, u8 search_type, w1_slave_found_callback cb); 232void w1_search_devices(struct w1_master *dev, u8 search_type, w1_slave_found_callback cb);
233/* call w1_unref_slave to release the reference counts w1_search_slave added */
212struct w1_slave *w1_search_slave(struct w1_reg_num *id); 234struct w1_slave *w1_search_slave(struct w1_reg_num *id);
235/* decrements the reference on sl->master and sl, and cleans up if zero
236 * returns the reference count after it has been decremented */
237int w1_unref_slave(struct w1_slave *sl);
213void w1_slave_found(struct w1_master *dev, u64 rn); 238void w1_slave_found(struct w1_master *dev, u64 rn);
214void w1_search_process_cb(struct w1_master *dev, u8 search_type, 239void w1_search_process_cb(struct w1_master *dev, u8 search_type,
215 w1_slave_found_callback cb); 240 w1_slave_found_callback cb);
@@ -224,7 +249,8 @@ struct w1_master *w1_search_master_id(u32 id);
224 */ 249 */
225void w1_reconnect_slaves(struct w1_family *f, int attach); 250void w1_reconnect_slaves(struct w1_family *f, int attach);
226int w1_attach_slave_device(struct w1_master *dev, struct w1_reg_num *rn); 251int w1_attach_slave_device(struct w1_master *dev, struct w1_reg_num *rn);
227void w1_slave_detach(struct w1_slave *sl); 252/* 0 success, otherwise EBUSY */
253int w1_slave_detach(struct w1_slave *sl);
228 254
229u8 w1_triplet(struct w1_master *dev, int bdir); 255u8 w1_triplet(struct w1_master *dev, int bdir);
230void w1_write_8(struct w1_master *, u8); 256void w1_write_8(struct w1_master *, u8);
@@ -260,6 +286,8 @@ extern int w1_max_slave_ttl;
260extern struct list_head w1_masters; 286extern struct list_head w1_masters;
261extern struct mutex w1_mlock; 287extern struct mutex w1_mlock;
262 288
289/* returns 1 if there were commands to executed 0 otherwise */
290extern int w1_process_callbacks(struct w1_master *dev);
263extern int w1_process(void *); 291extern int w1_process(void *);
264 292
265#endif /* __KERNEL__ */ 293#endif /* __KERNEL__ */
diff --git a/drivers/w1/w1_int.c b/drivers/w1/w1_int.c
index 423f3c2b9e7d..66b2caae48f3 100644
--- a/drivers/w1/w1_int.c
+++ b/drivers/w1/w1_int.c
@@ -75,8 +75,10 @@ static struct w1_master * w1_alloc_dev(u32 id, int slave_count, int slave_ttl,
75 atomic_set(&dev->refcnt, 2); 75 atomic_set(&dev->refcnt, 2);
76 76
77 INIT_LIST_HEAD(&dev->slist); 77 INIT_LIST_HEAD(&dev->slist);
78 INIT_LIST_HEAD(&dev->async_list);
78 mutex_init(&dev->mutex); 79 mutex_init(&dev->mutex);
79 mutex_init(&dev->bus_mutex); 80 mutex_init(&dev->bus_mutex);
81 mutex_init(&dev->list_mutex);
80 82
81 memcpy(&dev->dev, device, sizeof(struct device)); 83 memcpy(&dev->dev, device, sizeof(struct device));
82 dev_set_name(&dev->dev, "w1_bus_master%u", dev->id); 84 dev_set_name(&dev->dev, "w1_bus_master%u", dev->id);
@@ -188,17 +190,22 @@ void __w1_remove_master_device(struct w1_master *dev)
188 struct w1_netlink_msg msg; 190 struct w1_netlink_msg msg;
189 struct w1_slave *sl, *sln; 191 struct w1_slave *sl, *sln;
190 192
191 set_bit(W1_ABORT_SEARCH, &dev->flags);
192 kthread_stop(dev->thread);
193
194 mutex_lock(&w1_mlock); 193 mutex_lock(&w1_mlock);
195 list_del(&dev->w1_master_entry); 194 list_del(&dev->w1_master_entry);
196 mutex_unlock(&w1_mlock); 195 mutex_unlock(&w1_mlock);
197 196
197 set_bit(W1_ABORT_SEARCH, &dev->flags);
198 kthread_stop(dev->thread);
199
198 mutex_lock(&dev->mutex); 200 mutex_lock(&dev->mutex);
199 list_for_each_entry_safe(sl, sln, &dev->slist, w1_slave_entry) 201 mutex_lock(&dev->list_mutex);
202 list_for_each_entry_safe(sl, sln, &dev->slist, w1_slave_entry) {
203 mutex_unlock(&dev->list_mutex);
200 w1_slave_detach(sl); 204 w1_slave_detach(sl);
205 mutex_lock(&dev->list_mutex);
206 }
201 w1_destroy_master_attributes(dev); 207 w1_destroy_master_attributes(dev);
208 mutex_unlock(&dev->list_mutex);
202 mutex_unlock(&dev->mutex); 209 mutex_unlock(&dev->mutex);
203 atomic_dec(&dev->refcnt); 210 atomic_dec(&dev->refcnt);
204 211
@@ -208,7 +215,9 @@ void __w1_remove_master_device(struct w1_master *dev)
208 215
209 if (msleep_interruptible(1000)) 216 if (msleep_interruptible(1000))
210 flush_signals(current); 217 flush_signals(current);
218 w1_process_callbacks(dev);
211 } 219 }
220 w1_process_callbacks(dev);
212 221
213 memset(&msg, 0, sizeof(msg)); 222 memset(&msg, 0, sizeof(msg));
214 msg.id.mst.id = dev->id; 223 msg.id.mst.id = dev->id;
diff --git a/drivers/w1/w1_netlink.c b/drivers/w1/w1_netlink.c
index 747174be7b50..06d614af1166 100644
--- a/drivers/w1/w1_netlink.c
+++ b/drivers/w1/w1_netlink.c
@@ -119,10 +119,12 @@ static int w1_get_slaves(struct w1_master *dev,
119 119
120 if (req_cmd->cmd == W1_CMD_LIST_SLAVES) { 120 if (req_cmd->cmd == W1_CMD_LIST_SLAVES) {
121 __u64 rn; 121 __u64 rn;
122 mutex_lock(&dev->list_mutex);
122 list_for_each_entry(sl, &dev->slist, w1_slave_entry) { 123 list_for_each_entry(sl, &dev->slist, w1_slave_entry) {
123 memcpy(&rn, &sl->reg_num, sizeof(rn)); 124 memcpy(&rn, &sl->reg_num, sizeof(rn));
124 w1_send_slave(dev, rn); 125 w1_send_slave(dev, rn);
125 } 126 }
127 mutex_unlock(&dev->list_mutex);
126 } else { 128 } else {
127 w1_search_process_cb(dev, cmd->cmd == W1_CMD_ALARM_SEARCH ? 129 w1_search_process_cb(dev, cmd->cmd == W1_CMD_ALARM_SEARCH ?
128 W1_ALARM_SEARCH : W1_SEARCH, w1_found_send_slave); 130 W1_ALARM_SEARCH : W1_SEARCH, w1_found_send_slave);
@@ -368,29 +370,134 @@ static int w1_netlink_send_error(struct cn_msg *rcmsg, struct w1_netlink_msg *rm
368 return error; 370 return error;
369} 371}
370 372
373/* Bundle together a reference count, the full message, and broken out
374 * commands to be executed on each w1 master kthread in one memory allocation.
375 */
376struct w1_cb_block {
377 atomic_t refcnt;
378 struct cn_msg msg;
379 /* cn_msg data */
380 /* one or more variable length struct w1_cb_node */
381};
382struct w1_cb_node {
383 struct w1_async_cmd async;
384 /* pointers within w1_cb_block and msg data */
385 struct w1_cb_block *block;
386 struct w1_netlink_msg *m;
387 struct w1_slave *sl;
388 struct w1_master *dev;
389};
390
391static void w1_process_cb(struct w1_master *dev, struct w1_async_cmd *async_cmd)
392{
393 struct w1_cb_node *node = container_of(async_cmd, struct w1_cb_node,
394 async);
395 u16 mlen = node->m->len;
396 u8 *cmd_data = node->m->data;
397 int err = 0;
398 struct w1_slave *sl = node->sl;
399 struct w1_netlink_cmd *cmd = NULL;
400
401 mutex_lock(&dev->mutex);
402 if (sl && w1_reset_select_slave(sl))
403 err = -ENODEV;
404
405 while (mlen && !err) {
406 cmd = (struct w1_netlink_cmd *)cmd_data;
407
408 if (cmd->len + sizeof(struct w1_netlink_cmd) > mlen) {
409 err = -E2BIG;
410 break;
411 }
412
413 if (sl)
414 err = w1_process_command_slave(sl, &node->block->msg,
415 node->m, cmd);
416 else
417 err = w1_process_command_master(dev, &node->block->msg,
418 node->m, cmd);
419
420 w1_netlink_send_error(&node->block->msg, node->m, cmd, err);
421 err = 0;
422
423 cmd_data += cmd->len + sizeof(struct w1_netlink_cmd);
424 mlen -= cmd->len + sizeof(struct w1_netlink_cmd);
425 }
426
427 if (!cmd || err)
428 w1_netlink_send_error(&node->block->msg, node->m, cmd, err);
429
430 if (sl)
431 w1_unref_slave(sl);
432 else
433 atomic_dec(&dev->refcnt);
434 mutex_unlock(&dev->mutex);
435
436 mutex_lock(&dev->list_mutex);
437 list_del(&async_cmd->async_entry);
438 mutex_unlock(&dev->list_mutex);
439
440 if (atomic_sub_return(1, &node->block->refcnt) == 0)
441 kfree(node->block);
442}
443
371static void w1_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp) 444static void w1_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
372{ 445{
373 struct w1_netlink_msg *m = (struct w1_netlink_msg *)(msg + 1); 446 struct w1_netlink_msg *m = (struct w1_netlink_msg *)(msg + 1);
374 struct w1_netlink_cmd *cmd;
375 struct w1_slave *sl; 447 struct w1_slave *sl;
376 struct w1_master *dev; 448 struct w1_master *dev;
449 u16 msg_len;
377 int err = 0; 450 int err = 0;
451 struct w1_cb_block *block = NULL;
452 struct w1_cb_node *node = NULL;
453 int node_count = 0;
454
455 /* Count the number of master or slave commands there are to allocate
456 * space for one cb_node each.
457 */
458 msg_len = msg->len;
459 while (msg_len && !err) {
460 if (m->len + sizeof(struct w1_netlink_msg) > msg_len) {
461 err = -E2BIG;
462 break;
463 }
464
465 if (m->type == W1_MASTER_CMD || m->type == W1_SLAVE_CMD)
466 ++node_count;
467
468 msg_len -= sizeof(struct w1_netlink_msg) + m->len;
469 m = (struct w1_netlink_msg *)(((u8 *)m) +
470 sizeof(struct w1_netlink_msg) + m->len);
471 }
472 m = (struct w1_netlink_msg *)(msg + 1);
473 if (node_count) {
474 /* msg->len doesn't include itself */
475 long size = sizeof(struct w1_cb_block) + msg->len +
476 node_count*sizeof(struct w1_cb_node);
477 block = kmalloc(size, GFP_KERNEL);
478 if (!block) {
479 w1_netlink_send_error(msg, m, NULL, -ENOMEM);
480 return;
481 }
482 atomic_set(&block->refcnt, 1);
483 memcpy(&block->msg, msg, sizeof(*msg) + msg->len);
484 node = (struct w1_cb_node *)((u8 *)block->msg.data + msg->len);
485 }
378 486
379 while (msg->len && !err) { 487 msg_len = msg->len;
488 while (msg_len && !err) {
380 struct w1_reg_num id; 489 struct w1_reg_num id;
381 u16 mlen = m->len; 490 u16 mlen = m->len;
382 u8 *cmd_data = m->data;
383 491
384 dev = NULL; 492 dev = NULL;
385 sl = NULL; 493 sl = NULL;
386 cmd = NULL;
387 494
388 memcpy(&id, m->id.id, sizeof(id)); 495 memcpy(&id, m->id.id, sizeof(id));
389#if 0 496#if 0
390 printk("%s: %02x.%012llx.%02x: type=%02x, len=%u.\n", 497 printk("%s: %02x.%012llx.%02x: type=%02x, len=%u.\n",
391 __func__, id.family, (unsigned long long)id.id, id.crc, m->type, m->len); 498 __func__, id.family, (unsigned long long)id.id, id.crc, m->type, m->len);
392#endif 499#endif
393 if (m->len + sizeof(struct w1_netlink_msg) > msg->len) { 500 if (m->len + sizeof(struct w1_netlink_msg) > msg_len) {
394 err = -E2BIG; 501 err = -E2BIG;
395 break; 502 break;
396 } 503 }
@@ -415,41 +522,24 @@ static void w1_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
415 if (!mlen) 522 if (!mlen)
416 goto out_cont; 523 goto out_cont;
417 524
418 mutex_lock(&dev->mutex); 525 atomic_inc(&block->refcnt);
526 node->async.cb = w1_process_cb;
527 node->block = block;
528 node->m = (struct w1_netlink_msg *)((u8 *)&block->msg +
529 (size_t)((u8 *)m - (u8 *)msg));
530 node->sl = sl;
531 node->dev = dev;
419 532
420 if (sl && w1_reset_select_slave(sl)) { 533 mutex_lock(&dev->list_mutex);
421 err = -ENODEV; 534 list_add_tail(&node->async.async_entry, &dev->async_list);
422 goto out_up; 535 wake_up_process(dev->thread);
423 } 536 mutex_unlock(&dev->list_mutex);
424 537 ++node;
425 while (mlen) {
426 cmd = (struct w1_netlink_cmd *)cmd_data;
427
428 if (cmd->len + sizeof(struct w1_netlink_cmd) > mlen) {
429 err = -E2BIG;
430 break;
431 }
432
433 if (sl)
434 err = w1_process_command_slave(sl, msg, m, cmd);
435 else
436 err = w1_process_command_master(dev, msg, m, cmd);
437 538
438 w1_netlink_send_error(msg, m, cmd, err);
439 err = 0;
440
441 cmd_data += cmd->len + sizeof(struct w1_netlink_cmd);
442 mlen -= cmd->len + sizeof(struct w1_netlink_cmd);
443 }
444out_up:
445 atomic_dec(&dev->refcnt);
446 if (sl)
447 atomic_dec(&sl->refcnt);
448 mutex_unlock(&dev->mutex);
449out_cont: 539out_cont:
450 if (!cmd || err) 540 if (err)
451 w1_netlink_send_error(msg, m, cmd, err); 541 w1_netlink_send_error(msg, m, NULL, err);
452 msg->len -= sizeof(struct w1_netlink_msg) + m->len; 542 msg_len -= sizeof(struct w1_netlink_msg) + m->len;
453 m = (struct w1_netlink_msg *)(((u8 *)m) + sizeof(struct w1_netlink_msg) + m->len); 543 m = (struct w1_netlink_msg *)(((u8 *)m) + sizeof(struct w1_netlink_msg) + m->len);
454 544
455 /* 545 /*
@@ -458,6 +548,8 @@ out_cont:
458 if (err == -ENODEV) 548 if (err == -ENODEV)
459 err = 0; 549 err = 0;
460 } 550 }
551 if (block && atomic_sub_return(1, &block->refcnt) == 0)
552 kfree(block);
461} 553}
462 554
463int w1_init_netlink(void) 555int w1_init_netlink(void)