aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/base/core.c18
-rw-r--r--drivers/base/devres.c42
-rw-r--r--drivers/base/firmware_class.c761
-rw-r--r--drivers/base/platform.c38
-rw-r--r--drivers/base/power/main.c22
-rw-r--r--drivers/extcon/Makefile4
-rw-r--r--drivers/extcon/extcon-arizona.c72
-rw-r--r--drivers/extcon/extcon-class.c (renamed from drivers/extcon/extcon_class.c)4
-rw-r--r--drivers/extcon/extcon-gpio.c (renamed from drivers/extcon/extcon_gpio.c)0
-rw-r--r--drivers/hv/hv.c34
-rw-r--r--drivers/hv/hv_kvp.c251
-rw-r--r--drivers/hv/hv_util.c4
-rw-r--r--drivers/hv/hyperv_vmbus.h47
-rw-r--r--drivers/hv/vmbus_drv.c63
-rw-r--r--fs/debugfs/inode.c6
-rw-r--r--include/linux/device.h4
-rw-r--r--include/linux/firmware.h15
-rw-r--r--include/linux/hyperv.h99
-rw-r--r--include/linux/platform_device.h4
-rw-r--r--include/linux/pm.h5
-rw-r--r--tools/hv/hv_kvp_daemon.c337
21 files changed, 1508 insertions, 322 deletions
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 5e6e00bc1652..91478bd35418 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -184,6 +184,17 @@ static void device_release(struct kobject *kobj)
184 struct device *dev = kobj_to_dev(kobj); 184 struct device *dev = kobj_to_dev(kobj);
185 struct device_private *p = dev->p; 185 struct device_private *p = dev->p;
186 186
187 /*
188 * Some platform devices are driven without driver attached
189 * and managed resources may have been acquired. Make sure
190 * all resources are released.
191 *
192 * Drivers still can add resources into device after device
193 * is deleted but alive, so release devres here to avoid
194 * possible memory leak.
195 */
196 devres_release_all(dev);
197
187 if (dev->release) 198 if (dev->release)
188 dev->release(dev); 199 dev->release(dev);
189 else if (dev->type && dev->type->release) 200 else if (dev->type && dev->type->release)
@@ -1196,13 +1207,6 @@ void device_del(struct device *dev)
1196 bus_remove_device(dev); 1207 bus_remove_device(dev);
1197 driver_deferred_probe_del(dev); 1208 driver_deferred_probe_del(dev);
1198 1209
1199 /*
1200 * Some platform devices are driven without driver attached
1201 * and managed resources may have been acquired. Make sure
1202 * all resources are released.
1203 */
1204 devres_release_all(dev);
1205
1206 /* Notify the platform of the removal, in case they 1210 /* Notify the platform of the removal, in case they
1207 * need to do anything... 1211 * need to do anything...
1208 */ 1212 */
diff --git a/drivers/base/devres.c b/drivers/base/devres.c
index 2360adb7a58f..8731979d668a 100644
--- a/drivers/base/devres.c
+++ b/drivers/base/devres.c
@@ -144,6 +144,48 @@ EXPORT_SYMBOL_GPL(devres_alloc);
144#endif 144#endif
145 145
146/** 146/**
147 * devres_for_each_res - Resource iterator
148 * @dev: Device to iterate resource from
149 * @release: Look for resources associated with this release function
150 * @match: Match function (optional)
151 * @match_data: Data for the match function
152 * @fn: Function to be called for each matched resource.
153 * @data: Data for @fn, the 3rd parameter of @fn
154 *
155 * Call @fn for each devres of @dev which is associated with @release
156 * and for which @match returns 1.
157 *
158 * RETURNS:
159 * void
160 */
161void devres_for_each_res(struct device *dev, dr_release_t release,
162 dr_match_t match, void *match_data,
163 void (*fn)(struct device *, void *, void *),
164 void *data)
165{
166 struct devres_node *node;
167 struct devres_node *tmp;
168 unsigned long flags;
169
170 if (!fn)
171 return;
172
173 spin_lock_irqsave(&dev->devres_lock, flags);
174 list_for_each_entry_safe_reverse(node, tmp,
175 &dev->devres_head, entry) {
176 struct devres *dr = container_of(node, struct devres, node);
177
178 if (node->release != release)
179 continue;
180 if (match && !match(dev, dr->data, match_data))
181 continue;
182 fn(dev, dr->data, data);
183 }
184 spin_unlock_irqrestore(&dev->devres_lock, flags);
185}
186EXPORT_SYMBOL_GPL(devres_for_each_res);
187
188/**
147 * devres_free - Free device resource data 189 * devres_free - Free device resource data
148 * @res: Pointer to devres data to free 190 * @res: Pointer to devres data to free
149 * 191 *
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index 803cfc1597a9..ed0510a912c8 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -21,6 +21,12 @@
21#include <linux/firmware.h> 21#include <linux/firmware.h>
22#include <linux/slab.h> 22#include <linux/slab.h>
23#include <linux/sched.h> 23#include <linux/sched.h>
24#include <linux/list.h>
25#include <linux/async.h>
26#include <linux/pm.h>
27#include <linux/suspend.h>
28
29#include "base.h"
24 30
25MODULE_AUTHOR("Manuel Estrada Sainz"); 31MODULE_AUTHOR("Manuel Estrada Sainz");
26MODULE_DESCRIPTION("Multi purpose firmware loading support"); 32MODULE_DESCRIPTION("Multi purpose firmware loading support");
@@ -85,23 +91,160 @@ static inline long firmware_loading_timeout(void)
85 return loading_timeout > 0 ? loading_timeout * HZ : MAX_SCHEDULE_TIMEOUT; 91 return loading_timeout > 0 ? loading_timeout * HZ : MAX_SCHEDULE_TIMEOUT;
86} 92}
87 93
88/* fw_lock could be moved to 'struct firmware_priv' but since it is just 94struct firmware_cache {
89 * guarding for corner cases a global lock should be OK */ 95 /* firmware_buf instance will be added into the below list */
90static DEFINE_MUTEX(fw_lock); 96 spinlock_t lock;
97 struct list_head head;
98
99 /*
100 * Names of firmware images which have been cached successfully
101 * will be added into the below list so that device uncache
102 * helper can trace which firmware images have been cached
103 * before.
104 */
105 spinlock_t name_lock;
106 struct list_head fw_names;
107
108 wait_queue_head_t wait_queue;
109 int cnt;
110 struct delayed_work work;
111
112 struct notifier_block pm_notify;
113};
91 114
92struct firmware_priv { 115struct firmware_buf {
116 struct kref ref;
117 struct list_head list;
93 struct completion completion; 118 struct completion completion;
94 struct firmware *fw; 119 struct firmware_cache *fwc;
95 unsigned long status; 120 unsigned long status;
121 void *data;
122 size_t size;
96 struct page **pages; 123 struct page **pages;
97 int nr_pages; 124 int nr_pages;
98 int page_array_size; 125 int page_array_size;
126 char fw_id[];
127};
128
129struct fw_cache_entry {
130 struct list_head list;
131 char name[];
132};
133
134struct firmware_priv {
99 struct timer_list timeout; 135 struct timer_list timeout;
100 struct device dev;
101 bool nowait; 136 bool nowait;
102 char fw_id[]; 137 struct device dev;
138 struct firmware_buf *buf;
139 struct firmware *fw;
140};
141
142struct fw_name_devm {
143 unsigned long magic;
144 char name[];
103}; 145};
104 146
147#define to_fwbuf(d) container_of(d, struct firmware_buf, ref)
148
149/* fw_lock could be moved to 'struct firmware_priv' but since it is just
150 * guarding for corner cases a global lock should be OK */
151static DEFINE_MUTEX(fw_lock);
152
153static struct firmware_cache fw_cache;
154
155static struct firmware_buf *__allocate_fw_buf(const char *fw_name,
156 struct firmware_cache *fwc)
157{
158 struct firmware_buf *buf;
159
160 buf = kzalloc(sizeof(*buf) + strlen(fw_name) + 1 , GFP_ATOMIC);
161
162 if (!buf)
163 return buf;
164
165 kref_init(&buf->ref);
166 strcpy(buf->fw_id, fw_name);
167 buf->fwc = fwc;
168 init_completion(&buf->completion);
169
170 pr_debug("%s: fw-%s buf=%p\n", __func__, fw_name, buf);
171
172 return buf;
173}
174
175static struct firmware_buf *__fw_lookup_buf(const char *fw_name)
176{
177 struct firmware_buf *tmp;
178 struct firmware_cache *fwc = &fw_cache;
179
180 list_for_each_entry(tmp, &fwc->head, list)
181 if (!strcmp(tmp->fw_id, fw_name))
182 return tmp;
183 return NULL;
184}
185
186static int fw_lookup_and_allocate_buf(const char *fw_name,
187 struct firmware_cache *fwc,
188 struct firmware_buf **buf)
189{
190 struct firmware_buf *tmp;
191
192 spin_lock(&fwc->lock);
193 tmp = __fw_lookup_buf(fw_name);
194 if (tmp) {
195 kref_get(&tmp->ref);
196 spin_unlock(&fwc->lock);
197 *buf = tmp;
198 return 1;
199 }
200 tmp = __allocate_fw_buf(fw_name, fwc);
201 if (tmp)
202 list_add(&tmp->list, &fwc->head);
203 spin_unlock(&fwc->lock);
204
205 *buf = tmp;
206
207 return tmp ? 0 : -ENOMEM;
208}
209
210static struct firmware_buf *fw_lookup_buf(const char *fw_name)
211{
212 struct firmware_buf *tmp;
213 struct firmware_cache *fwc = &fw_cache;
214
215 spin_lock(&fwc->lock);
216 tmp = __fw_lookup_buf(fw_name);
217 spin_unlock(&fwc->lock);
218
219 return tmp;
220}
221
222static void __fw_free_buf(struct kref *ref)
223{
224 struct firmware_buf *buf = to_fwbuf(ref);
225 struct firmware_cache *fwc = buf->fwc;
226 int i;
227
228 pr_debug("%s: fw-%s buf=%p data=%p size=%u\n",
229 __func__, buf->fw_id, buf, buf->data,
230 (unsigned int)buf->size);
231
232 spin_lock(&fwc->lock);
233 list_del(&buf->list);
234 spin_unlock(&fwc->lock);
235
236 vunmap(buf->data);
237 for (i = 0; i < buf->nr_pages; i++)
238 __free_page(buf->pages[i]);
239 kfree(buf->pages);
240 kfree(buf);
241}
242
243static void fw_free_buf(struct firmware_buf *buf)
244{
245 kref_put(&buf->ref, __fw_free_buf);
246}
247
105static struct firmware_priv *to_firmware_priv(struct device *dev) 248static struct firmware_priv *to_firmware_priv(struct device *dev)
106{ 249{
107 return container_of(dev, struct firmware_priv, dev); 250 return container_of(dev, struct firmware_priv, dev);
@@ -109,9 +252,10 @@ static struct firmware_priv *to_firmware_priv(struct device *dev)
109 252
110static void fw_load_abort(struct firmware_priv *fw_priv) 253static void fw_load_abort(struct firmware_priv *fw_priv)
111{ 254{
112 set_bit(FW_STATUS_ABORT, &fw_priv->status); 255 struct firmware_buf *buf = fw_priv->buf;
113 wmb(); 256
114 complete(&fw_priv->completion); 257 set_bit(FW_STATUS_ABORT, &buf->status);
258 complete_all(&buf->completion);
115} 259}
116 260
117static ssize_t firmware_timeout_show(struct class *class, 261static ssize_t firmware_timeout_show(struct class *class,
@@ -154,11 +298,7 @@ static struct class_attribute firmware_class_attrs[] = {
154static void fw_dev_release(struct device *dev) 298static void fw_dev_release(struct device *dev)
155{ 299{
156 struct firmware_priv *fw_priv = to_firmware_priv(dev); 300 struct firmware_priv *fw_priv = to_firmware_priv(dev);
157 int i;
158 301
159 for (i = 0; i < fw_priv->nr_pages; i++)
160 __free_page(fw_priv->pages[i]);
161 kfree(fw_priv->pages);
162 kfree(fw_priv); 302 kfree(fw_priv);
163 303
164 module_put(THIS_MODULE); 304 module_put(THIS_MODULE);
@@ -168,7 +308,7 @@ static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env)
168{ 308{
169 struct firmware_priv *fw_priv = to_firmware_priv(dev); 309 struct firmware_priv *fw_priv = to_firmware_priv(dev);
170 310
171 if (add_uevent_var(env, "FIRMWARE=%s", fw_priv->fw_id)) 311 if (add_uevent_var(env, "FIRMWARE=%s", fw_priv->buf->fw_id))
172 return -ENOMEM; 312 return -ENOMEM;
173 if (add_uevent_var(env, "TIMEOUT=%i", loading_timeout)) 313 if (add_uevent_var(env, "TIMEOUT=%i", loading_timeout))
174 return -ENOMEM; 314 return -ENOMEM;
@@ -189,20 +329,16 @@ static ssize_t firmware_loading_show(struct device *dev,
189 struct device_attribute *attr, char *buf) 329 struct device_attribute *attr, char *buf)
190{ 330{
191 struct firmware_priv *fw_priv = to_firmware_priv(dev); 331 struct firmware_priv *fw_priv = to_firmware_priv(dev);
192 int loading = test_bit(FW_STATUS_LOADING, &fw_priv->status); 332 int loading = test_bit(FW_STATUS_LOADING, &fw_priv->buf->status);
193 333
194 return sprintf(buf, "%d\n", loading); 334 return sprintf(buf, "%d\n", loading);
195} 335}
196 336
337/* firmware holds the ownership of pages */
197static void firmware_free_data(const struct firmware *fw) 338static void firmware_free_data(const struct firmware *fw)
198{ 339{
199 int i; 340 WARN_ON(!fw->priv);
200 vunmap(fw->data); 341 fw_free_buf(fw->priv);
201 if (fw->pages) {
202 for (i = 0; i < PFN_UP(fw->size); i++)
203 __free_page(fw->pages[i]);
204 kfree(fw->pages);
205 }
206} 342}
207 343
208/* Some architectures don't have PAGE_KERNEL_RO */ 344/* Some architectures don't have PAGE_KERNEL_RO */
@@ -227,45 +363,33 @@ static ssize_t firmware_loading_store(struct device *dev,
227 const char *buf, size_t count) 363 const char *buf, size_t count)
228{ 364{
229 struct firmware_priv *fw_priv = to_firmware_priv(dev); 365 struct firmware_priv *fw_priv = to_firmware_priv(dev);
366 struct firmware_buf *fw_buf = fw_priv->buf;
230 int loading = simple_strtol(buf, NULL, 10); 367 int loading = simple_strtol(buf, NULL, 10);
231 int i; 368 int i;
232 369
233 mutex_lock(&fw_lock); 370 mutex_lock(&fw_lock);
234 371
235 if (!fw_priv->fw) 372 if (!fw_buf)
236 goto out; 373 goto out;
237 374
238 switch (loading) { 375 switch (loading) {
239 case 1: 376 case 1:
240 firmware_free_data(fw_priv->fw); 377 /* discarding any previous partial load */
241 memset(fw_priv->fw, 0, sizeof(struct firmware)); 378 if (!test_bit(FW_STATUS_DONE, &fw_buf->status)) {
242 /* If the pages are not owned by 'struct firmware' */ 379 for (i = 0; i < fw_buf->nr_pages; i++)
243 for (i = 0; i < fw_priv->nr_pages; i++) 380 __free_page(fw_buf->pages[i]);
244 __free_page(fw_priv->pages[i]); 381 kfree(fw_buf->pages);
245 kfree(fw_priv->pages); 382 fw_buf->pages = NULL;
246 fw_priv->pages = NULL; 383 fw_buf->page_array_size = 0;
247 fw_priv->page_array_size = 0; 384 fw_buf->nr_pages = 0;
248 fw_priv->nr_pages = 0; 385 set_bit(FW_STATUS_LOADING, &fw_buf->status);
249 set_bit(FW_STATUS_LOADING, &fw_priv->status); 386 }
250 break; 387 break;
251 case 0: 388 case 0:
252 if (test_bit(FW_STATUS_LOADING, &fw_priv->status)) { 389 if (test_bit(FW_STATUS_LOADING, &fw_buf->status)) {
253 vunmap(fw_priv->fw->data); 390 set_bit(FW_STATUS_DONE, &fw_buf->status);
254 fw_priv->fw->data = vmap(fw_priv->pages, 391 clear_bit(FW_STATUS_LOADING, &fw_buf->status);
255 fw_priv->nr_pages, 392 complete_all(&fw_buf->completion);
256 0, PAGE_KERNEL_RO);
257 if (!fw_priv->fw->data) {
258 dev_err(dev, "%s: vmap() failed\n", __func__);
259 goto err;
260 }
261 /* Pages are now owned by 'struct firmware' */
262 fw_priv->fw->pages = fw_priv->pages;
263 fw_priv->pages = NULL;
264
265 fw_priv->page_array_size = 0;
266 fw_priv->nr_pages = 0;
267 complete(&fw_priv->completion);
268 clear_bit(FW_STATUS_LOADING, &fw_priv->status);
269 break; 393 break;
270 } 394 }
271 /* fallthrough */ 395 /* fallthrough */
@@ -273,7 +397,6 @@ static ssize_t firmware_loading_store(struct device *dev,
273 dev_err(dev, "%s: unexpected value (%d)\n", __func__, loading); 397 dev_err(dev, "%s: unexpected value (%d)\n", __func__, loading);
274 /* fallthrough */ 398 /* fallthrough */
275 case -1: 399 case -1:
276 err:
277 fw_load_abort(fw_priv); 400 fw_load_abort(fw_priv);
278 break; 401 break;
279 } 402 }
@@ -290,21 +413,21 @@ static ssize_t firmware_data_read(struct file *filp, struct kobject *kobj,
290{ 413{
291 struct device *dev = kobj_to_dev(kobj); 414 struct device *dev = kobj_to_dev(kobj);
292 struct firmware_priv *fw_priv = to_firmware_priv(dev); 415 struct firmware_priv *fw_priv = to_firmware_priv(dev);
293 struct firmware *fw; 416 struct firmware_buf *buf;
294 ssize_t ret_count; 417 ssize_t ret_count;
295 418
296 mutex_lock(&fw_lock); 419 mutex_lock(&fw_lock);
297 fw = fw_priv->fw; 420 buf = fw_priv->buf;
298 if (!fw || test_bit(FW_STATUS_DONE, &fw_priv->status)) { 421 if (!buf || test_bit(FW_STATUS_DONE, &buf->status)) {
299 ret_count = -ENODEV; 422 ret_count = -ENODEV;
300 goto out; 423 goto out;
301 } 424 }
302 if (offset > fw->size) { 425 if (offset > buf->size) {
303 ret_count = 0; 426 ret_count = 0;
304 goto out; 427 goto out;
305 } 428 }
306 if (count > fw->size - offset) 429 if (count > buf->size - offset)
307 count = fw->size - offset; 430 count = buf->size - offset;
308 431
309 ret_count = count; 432 ret_count = count;
310 433
@@ -314,11 +437,11 @@ static ssize_t firmware_data_read(struct file *filp, struct kobject *kobj,
314 int page_ofs = offset & (PAGE_SIZE-1); 437 int page_ofs = offset & (PAGE_SIZE-1);
315 int page_cnt = min_t(size_t, PAGE_SIZE - page_ofs, count); 438 int page_cnt = min_t(size_t, PAGE_SIZE - page_ofs, count);
316 439
317 page_data = kmap(fw_priv->pages[page_nr]); 440 page_data = kmap(buf->pages[page_nr]);
318 441
319 memcpy(buffer, page_data + page_ofs, page_cnt); 442 memcpy(buffer, page_data + page_ofs, page_cnt);
320 443
321 kunmap(fw_priv->pages[page_nr]); 444 kunmap(buf->pages[page_nr]);
322 buffer += page_cnt; 445 buffer += page_cnt;
323 offset += page_cnt; 446 offset += page_cnt;
324 count -= page_cnt; 447 count -= page_cnt;
@@ -330,12 +453,13 @@ out:
330 453
331static int fw_realloc_buffer(struct firmware_priv *fw_priv, int min_size) 454static int fw_realloc_buffer(struct firmware_priv *fw_priv, int min_size)
332{ 455{
456 struct firmware_buf *buf = fw_priv->buf;
333 int pages_needed = ALIGN(min_size, PAGE_SIZE) >> PAGE_SHIFT; 457 int pages_needed = ALIGN(min_size, PAGE_SIZE) >> PAGE_SHIFT;
334 458
335 /* If the array of pages is too small, grow it... */ 459 /* If the array of pages is too small, grow it... */
336 if (fw_priv->page_array_size < pages_needed) { 460 if (buf->page_array_size < pages_needed) {
337 int new_array_size = max(pages_needed, 461 int new_array_size = max(pages_needed,
338 fw_priv->page_array_size * 2); 462 buf->page_array_size * 2);
339 struct page **new_pages; 463 struct page **new_pages;
340 464
341 new_pages = kmalloc(new_array_size * sizeof(void *), 465 new_pages = kmalloc(new_array_size * sizeof(void *),
@@ -344,24 +468,24 @@ static int fw_realloc_buffer(struct firmware_priv *fw_priv, int min_size)
344 fw_load_abort(fw_priv); 468 fw_load_abort(fw_priv);
345 return -ENOMEM; 469 return -ENOMEM;
346 } 470 }
347 memcpy(new_pages, fw_priv->pages, 471 memcpy(new_pages, buf->pages,
348 fw_priv->page_array_size * sizeof(void *)); 472 buf->page_array_size * sizeof(void *));
349 memset(&new_pages[fw_priv->page_array_size], 0, sizeof(void *) * 473 memset(&new_pages[buf->page_array_size], 0, sizeof(void *) *
350 (new_array_size - fw_priv->page_array_size)); 474 (new_array_size - buf->page_array_size));
351 kfree(fw_priv->pages); 475 kfree(buf->pages);
352 fw_priv->pages = new_pages; 476 buf->pages = new_pages;
353 fw_priv->page_array_size = new_array_size; 477 buf->page_array_size = new_array_size;
354 } 478 }
355 479
356 while (fw_priv->nr_pages < pages_needed) { 480 while (buf->nr_pages < pages_needed) {
357 fw_priv->pages[fw_priv->nr_pages] = 481 buf->pages[buf->nr_pages] =
358 alloc_page(GFP_KERNEL | __GFP_HIGHMEM); 482 alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
359 483
360 if (!fw_priv->pages[fw_priv->nr_pages]) { 484 if (!buf->pages[buf->nr_pages]) {
361 fw_load_abort(fw_priv); 485 fw_load_abort(fw_priv);
362 return -ENOMEM; 486 return -ENOMEM;
363 } 487 }
364 fw_priv->nr_pages++; 488 buf->nr_pages++;
365 } 489 }
366 return 0; 490 return 0;
367} 491}
@@ -384,18 +508,19 @@ static ssize_t firmware_data_write(struct file *filp, struct kobject *kobj,
384{ 508{
385 struct device *dev = kobj_to_dev(kobj); 509 struct device *dev = kobj_to_dev(kobj);
386 struct firmware_priv *fw_priv = to_firmware_priv(dev); 510 struct firmware_priv *fw_priv = to_firmware_priv(dev);
387 struct firmware *fw; 511 struct firmware_buf *buf;
388 ssize_t retval; 512 ssize_t retval;
389 513
390 if (!capable(CAP_SYS_RAWIO)) 514 if (!capable(CAP_SYS_RAWIO))
391 return -EPERM; 515 return -EPERM;
392 516
393 mutex_lock(&fw_lock); 517 mutex_lock(&fw_lock);
394 fw = fw_priv->fw; 518 buf = fw_priv->buf;
395 if (!fw || test_bit(FW_STATUS_DONE, &fw_priv->status)) { 519 if (!buf || test_bit(FW_STATUS_DONE, &buf->status)) {
396 retval = -ENODEV; 520 retval = -ENODEV;
397 goto out; 521 goto out;
398 } 522 }
523
399 retval = fw_realloc_buffer(fw_priv, offset + count); 524 retval = fw_realloc_buffer(fw_priv, offset + count);
400 if (retval) 525 if (retval)
401 goto out; 526 goto out;
@@ -408,17 +533,17 @@ static ssize_t firmware_data_write(struct file *filp, struct kobject *kobj,
408 int page_ofs = offset & (PAGE_SIZE - 1); 533 int page_ofs = offset & (PAGE_SIZE - 1);
409 int page_cnt = min_t(size_t, PAGE_SIZE - page_ofs, count); 534 int page_cnt = min_t(size_t, PAGE_SIZE - page_ofs, count);
410 535
411 page_data = kmap(fw_priv->pages[page_nr]); 536 page_data = kmap(buf->pages[page_nr]);
412 537
413 memcpy(page_data + page_ofs, buffer, page_cnt); 538 memcpy(page_data + page_ofs, buffer, page_cnt);
414 539
415 kunmap(fw_priv->pages[page_nr]); 540 kunmap(buf->pages[page_nr]);
416 buffer += page_cnt; 541 buffer += page_cnt;
417 offset += page_cnt; 542 offset += page_cnt;
418 count -= page_cnt; 543 count -= page_cnt;
419 } 544 }
420 545
421 fw->size = max_t(size_t, offset, fw->size); 546 buf->size = max_t(size_t, offset, buf->size);
422out: 547out:
423 mutex_unlock(&fw_lock); 548 mutex_unlock(&fw_lock);
424 return retval; 549 return retval;
@@ -445,35 +570,113 @@ fw_create_instance(struct firmware *firmware, const char *fw_name,
445 struct firmware_priv *fw_priv; 570 struct firmware_priv *fw_priv;
446 struct device *f_dev; 571 struct device *f_dev;
447 572
448 fw_priv = kzalloc(sizeof(*fw_priv) + strlen(fw_name) + 1 , GFP_KERNEL); 573 fw_priv = kzalloc(sizeof(*fw_priv), GFP_KERNEL);
449 if (!fw_priv) { 574 if (!fw_priv) {
450 dev_err(device, "%s: kmalloc failed\n", __func__); 575 dev_err(device, "%s: kmalloc failed\n", __func__);
451 return ERR_PTR(-ENOMEM); 576 fw_priv = ERR_PTR(-ENOMEM);
577 goto exit;
452 } 578 }
453 579
454 fw_priv->fw = firmware;
455 fw_priv->nowait = nowait; 580 fw_priv->nowait = nowait;
456 strcpy(fw_priv->fw_id, fw_name); 581 fw_priv->fw = firmware;
457 init_completion(&fw_priv->completion);
458 setup_timer(&fw_priv->timeout, 582 setup_timer(&fw_priv->timeout,
459 firmware_class_timeout, (u_long) fw_priv); 583 firmware_class_timeout, (u_long) fw_priv);
460 584
461 f_dev = &fw_priv->dev; 585 f_dev = &fw_priv->dev;
462 586
463 device_initialize(f_dev); 587 device_initialize(f_dev);
464 dev_set_name(f_dev, "%s", dev_name(device)); 588 dev_set_name(f_dev, "%s", fw_name);
465 f_dev->parent = device; 589 f_dev->parent = device;
466 f_dev->class = &firmware_class; 590 f_dev->class = &firmware_class;
467 591exit:
468 return fw_priv; 592 return fw_priv;
469} 593}
470 594
595/* one pages buffer is mapped/unmapped only once */
596static int fw_map_pages_buf(struct firmware_buf *buf)
597{
598 buf->data = vmap(buf->pages, buf->nr_pages, 0, PAGE_KERNEL_RO);
599 if (!buf->data)
600 return -ENOMEM;
601 return 0;
602}
603
604/* store the pages buffer info firmware from buf */
605static void fw_set_page_data(struct firmware_buf *buf, struct firmware *fw)
606{
607 fw->priv = buf;
608 fw->pages = buf->pages;
609 fw->size = buf->size;
610 fw->data = buf->data;
611
612 pr_debug("%s: fw-%s buf=%p data=%p size=%u\n",
613 __func__, buf->fw_id, buf, buf->data,
614 (unsigned int)buf->size);
615}
616
617static void fw_name_devm_release(struct device *dev, void *res)
618{
619 struct fw_name_devm *fwn = res;
620
621 if (fwn->magic == (unsigned long)&fw_cache)
622 pr_debug("%s: fw_name-%s devm-%p released\n",
623 __func__, fwn->name, res);
624}
625
626static int fw_devm_match(struct device *dev, void *res,
627 void *match_data)
628{
629 struct fw_name_devm *fwn = res;
630
631 return (fwn->magic == (unsigned long)&fw_cache) &&
632 !strcmp(fwn->name, match_data);
633}
634
635static struct fw_name_devm *fw_find_devm_name(struct device *dev,
636 const char *name)
637{
638 struct fw_name_devm *fwn;
639
640 fwn = devres_find(dev, fw_name_devm_release,
641 fw_devm_match, (void *)name);
642 return fwn;
643}
644
645/* add firmware name into devres list */
646static int fw_add_devm_name(struct device *dev, const char *name)
647{
648 struct fw_name_devm *fwn;
649
650 fwn = fw_find_devm_name(dev, name);
651 if (fwn)
652 return 1;
653
654 fwn = devres_alloc(fw_name_devm_release, sizeof(struct fw_name_devm) +
655 strlen(name) + 1, GFP_KERNEL);
656 if (!fwn)
657 return -ENOMEM;
658
659 fwn->magic = (unsigned long)&fw_cache;
660 strcpy(fwn->name, name);
661 devres_add(dev, fwn);
662
663 return 0;
664}
665
666static void _request_firmware_cleanup(const struct firmware **firmware_p)
667{
668 release_firmware(*firmware_p);
669 *firmware_p = NULL;
670}
671
471static struct firmware_priv * 672static struct firmware_priv *
472_request_firmware_prepare(const struct firmware **firmware_p, const char *name, 673_request_firmware_prepare(const struct firmware **firmware_p, const char *name,
473 struct device *device, bool uevent, bool nowait) 674 struct device *device, bool uevent, bool nowait)
474{ 675{
475 struct firmware *firmware; 676 struct firmware *firmware;
476 struct firmware_priv *fw_priv; 677 struct firmware_priv *fw_priv = NULL;
678 struct firmware_buf *buf;
679 int ret;
477 680
478 if (!firmware_p) 681 if (!firmware_p)
479 return ERR_PTR(-EINVAL); 682 return ERR_PTR(-EINVAL);
@@ -490,18 +693,45 @@ _request_firmware_prepare(const struct firmware **firmware_p, const char *name,
490 return NULL; 693 return NULL;
491 } 694 }
492 695
493 fw_priv = fw_create_instance(firmware, name, device, uevent, nowait); 696 ret = fw_lookup_and_allocate_buf(name, &fw_cache, &buf);
494 if (IS_ERR(fw_priv)) { 697 if (!ret)
495 release_firmware(firmware); 698 fw_priv = fw_create_instance(firmware, name, device,
699 uevent, nowait);
700
701 if (IS_ERR(fw_priv) || ret < 0) {
702 kfree(firmware);
496 *firmware_p = NULL; 703 *firmware_p = NULL;
704 return ERR_PTR(-ENOMEM);
705 } else if (fw_priv) {
706 fw_priv->buf = buf;
707
708 /*
709 * bind with 'buf' now to avoid warning in failure path
710 * of requesting firmware.
711 */
712 firmware->priv = buf;
713 return fw_priv;
497 } 714 }
498 return fw_priv;
499}
500 715
501static void _request_firmware_cleanup(const struct firmware **firmware_p) 716 /* share the cached buf, which is inprogessing or completed */
502{ 717 check_status:
503 release_firmware(*firmware_p); 718 mutex_lock(&fw_lock);
504 *firmware_p = NULL; 719 if (test_bit(FW_STATUS_ABORT, &buf->status)) {
720 fw_priv = ERR_PTR(-ENOENT);
721 _request_firmware_cleanup(firmware_p);
722 goto exit;
723 } else if (test_bit(FW_STATUS_DONE, &buf->status)) {
724 fw_priv = NULL;
725 fw_set_page_data(buf, firmware);
726 goto exit;
727 }
728 mutex_unlock(&fw_lock);
729 wait_for_completion(&buf->completion);
730 goto check_status;
731
732exit:
733 mutex_unlock(&fw_lock);
734 return fw_priv;
505} 735}
506 736
507static int _request_firmware_load(struct firmware_priv *fw_priv, bool uevent, 737static int _request_firmware_load(struct firmware_priv *fw_priv, bool uevent,
@@ -509,6 +739,7 @@ static int _request_firmware_load(struct firmware_priv *fw_priv, bool uevent,
509{ 739{
510 int retval = 0; 740 int retval = 0;
511 struct device *f_dev = &fw_priv->dev; 741 struct device *f_dev = &fw_priv->dev;
742 struct firmware_buf *buf = fw_priv->buf;
512 743
513 dev_set_uevent_suppress(f_dev, true); 744 dev_set_uevent_suppress(f_dev, true);
514 745
@@ -535,7 +766,7 @@ static int _request_firmware_load(struct firmware_priv *fw_priv, bool uevent,
535 766
536 if (uevent) { 767 if (uevent) {
537 dev_set_uevent_suppress(f_dev, false); 768 dev_set_uevent_suppress(f_dev, false);
538 dev_dbg(f_dev, "firmware: requesting %s\n", fw_priv->fw_id); 769 dev_dbg(f_dev, "firmware: requesting %s\n", buf->fw_id);
539 if (timeout != MAX_SCHEDULE_TIMEOUT) 770 if (timeout != MAX_SCHEDULE_TIMEOUT)
540 mod_timer(&fw_priv->timeout, 771 mod_timer(&fw_priv->timeout,
541 round_jiffies_up(jiffies + timeout)); 772 round_jiffies_up(jiffies + timeout));
@@ -543,15 +774,31 @@ static int _request_firmware_load(struct firmware_priv *fw_priv, bool uevent,
543 kobject_uevent(&fw_priv->dev.kobj, KOBJ_ADD); 774 kobject_uevent(&fw_priv->dev.kobj, KOBJ_ADD);
544 } 775 }
545 776
546 wait_for_completion(&fw_priv->completion); 777 wait_for_completion(&buf->completion);
547 778
548 set_bit(FW_STATUS_DONE, &fw_priv->status);
549 del_timer_sync(&fw_priv->timeout); 779 del_timer_sync(&fw_priv->timeout);
550 780
551 mutex_lock(&fw_lock); 781 mutex_lock(&fw_lock);
552 if (!fw_priv->fw->size || test_bit(FW_STATUS_ABORT, &fw_priv->status)) 782 if (!buf->size || test_bit(FW_STATUS_ABORT, &buf->status))
553 retval = -ENOENT; 783 retval = -ENOENT;
554 fw_priv->fw = NULL; 784
785 /*
786 * add firmware name into devres list so that we can auto cache
787 * and uncache firmware for device.
788 *
789 * f_dev->parent may has been deleted already, but the problem
790 * should be fixed in devres or driver core.
791 */
792 if (!retval && f_dev->parent)
793 fw_add_devm_name(f_dev->parent, buf->fw_id);
794
795 if (!retval)
796 retval = fw_map_pages_buf(buf);
797
798 /* pass the pages buffer to driver at the last minute */
799 fw_set_page_data(buf, fw_priv->fw);
800
801 fw_priv->buf = NULL;
555 mutex_unlock(&fw_lock); 802 mutex_unlock(&fw_lock);
556 803
557 device_remove_file(f_dev, &dev_attr_loading); 804 device_remove_file(f_dev, &dev_attr_loading);
@@ -578,6 +825,8 @@ err_put_dev:
578 * @name will be used as $FIRMWARE in the uevent environment and 825 * @name will be used as $FIRMWARE in the uevent environment and
579 * should be distinctive enough not to be confused with any other 826 * should be distinctive enough not to be confused with any other
580 * firmware image for this or any other device. 827 * firmware image for this or any other device.
828 *
829 * Caller must hold the reference count of @device.
581 **/ 830 **/
582int 831int
583request_firmware(const struct firmware **firmware_p, const char *name, 832request_firmware(const struct firmware **firmware_p, const char *name,
@@ -659,6 +908,7 @@ static void request_firmware_work_func(struct work_struct *work)
659 908
660 out: 909 out:
661 fw_work->cont(fw, fw_work->context); 910 fw_work->cont(fw, fw_work->context);
911 put_device(fw_work->device);
662 912
663 module_put(fw_work->module); 913 module_put(fw_work->module);
664 kfree(fw_work); 914 kfree(fw_work);
@@ -677,9 +927,15 @@ static void request_firmware_work_func(struct work_struct *work)
677 * @cont: function will be called asynchronously when the firmware 927 * @cont: function will be called asynchronously when the firmware
678 * request is over. 928 * request is over.
679 * 929 *
680 * Asynchronous variant of request_firmware() for user contexts where 930 * Caller must hold the reference count of @device.
681 * it is not possible to sleep for long time. It can't be called 931 *
682 * in atomic contexts. 932 * Asynchronous variant of request_firmware() for user contexts:
933 * - sleep for as small periods as possible since it may
934 * increase kernel boot time of built-in device drivers
935 * requesting firmware in their ->probe() methods, if
936 * @gfp is GFP_KERNEL.
937 *
938 * - can't sleep at all if @gfp is GFP_ATOMIC.
683 **/ 939 **/
684int 940int
685request_firmware_nowait( 941request_firmware_nowait(
@@ -705,18 +961,313 @@ request_firmware_nowait(
705 return -EFAULT; 961 return -EFAULT;
706 } 962 }
707 963
964 get_device(fw_work->device);
708 INIT_WORK(&fw_work->work, request_firmware_work_func); 965 INIT_WORK(&fw_work->work, request_firmware_work_func);
709 schedule_work(&fw_work->work); 966 schedule_work(&fw_work->work);
710 return 0; 967 return 0;
711} 968}
712 969
970/**
971 * cache_firmware - cache one firmware image in kernel memory space
972 * @fw_name: the firmware image name
973 *
974 * Cache firmware in kernel memory so that drivers can use it when
975 * system isn't ready for them to request firmware image from userspace.
976 * Once it returns successfully, driver can use request_firmware or its
977 * nowait version to get the cached firmware without any interacting
978 * with userspace
979 *
980 * Return 0 if the firmware image has been cached successfully
981 * Return !0 otherwise
982 *
983 */
984int cache_firmware(const char *fw_name)
985{
986 int ret;
987 const struct firmware *fw;
988
989 pr_debug("%s: %s\n", __func__, fw_name);
990
991 ret = request_firmware(&fw, fw_name, NULL);
992 if (!ret)
993 kfree(fw);
994
995 pr_debug("%s: %s ret=%d\n", __func__, fw_name, ret);
996
997 return ret;
998}
999
1000/**
1001 * uncache_firmware - remove one cached firmware image
1002 * @fw_name: the firmware image name
1003 *
1004 * Uncache one firmware image which has been cached successfully
1005 * before.
1006 *
1007 * Return 0 if the firmware cache has been removed successfully
1008 * Return !0 otherwise
1009 *
1010 */
1011int uncache_firmware(const char *fw_name)
1012{
1013 struct firmware_buf *buf;
1014 struct firmware fw;
1015
1016 pr_debug("%s: %s\n", __func__, fw_name);
1017
1018 if (fw_get_builtin_firmware(&fw, fw_name))
1019 return 0;
1020
1021 buf = fw_lookup_buf(fw_name);
1022 if (buf) {
1023 fw_free_buf(buf);
1024 return 0;
1025 }
1026
1027 return -EINVAL;
1028}
1029
1030static struct fw_cache_entry *alloc_fw_cache_entry(const char *name)
1031{
1032 struct fw_cache_entry *fce;
1033
1034 fce = kzalloc(sizeof(*fce) + strlen(name) + 1, GFP_ATOMIC);
1035 if (!fce)
1036 goto exit;
1037
1038 strcpy(fce->name, name);
1039exit:
1040 return fce;
1041}
1042
1043static void free_fw_cache_entry(struct fw_cache_entry *fce)
1044{
1045 kfree(fce);
1046}
1047
1048static void __async_dev_cache_fw_image(void *fw_entry,
1049 async_cookie_t cookie)
1050{
1051 struct fw_cache_entry *fce = fw_entry;
1052 struct firmware_cache *fwc = &fw_cache;
1053 int ret;
1054
1055 ret = cache_firmware(fce->name);
1056 if (ret)
1057 goto free;
1058
1059 spin_lock(&fwc->name_lock);
1060 list_add(&fce->list, &fwc->fw_names);
1061 spin_unlock(&fwc->name_lock);
1062 goto drop_ref;
1063
1064free:
1065 free_fw_cache_entry(fce);
1066drop_ref:
1067 spin_lock(&fwc->name_lock);
1068 fwc->cnt--;
1069 spin_unlock(&fwc->name_lock);
1070
1071 wake_up(&fwc->wait_queue);
1072}
1073
1074/* called with dev->devres_lock held */
1075static void dev_create_fw_entry(struct device *dev, void *res,
1076 void *data)
1077{
1078 struct fw_name_devm *fwn = res;
1079 const char *fw_name = fwn->name;
1080 struct list_head *head = data;
1081 struct fw_cache_entry *fce;
1082
1083 fce = alloc_fw_cache_entry(fw_name);
1084 if (fce)
1085 list_add(&fce->list, head);
1086}
1087
1088static int devm_name_match(struct device *dev, void *res,
1089 void *match_data)
1090{
1091 struct fw_name_devm *fwn = res;
1092 return (fwn->magic == (unsigned long)match_data);
1093}
1094
1095static void dev_cache_fw_image(struct device *dev, void *data)
1096{
1097 LIST_HEAD(todo);
1098 struct fw_cache_entry *fce;
1099 struct fw_cache_entry *fce_next;
1100 struct firmware_cache *fwc = &fw_cache;
1101
1102 devres_for_each_res(dev, fw_name_devm_release,
1103 devm_name_match, &fw_cache,
1104 dev_create_fw_entry, &todo);
1105
1106 list_for_each_entry_safe(fce, fce_next, &todo, list) {
1107 list_del(&fce->list);
1108
1109 spin_lock(&fwc->name_lock);
1110 fwc->cnt++;
1111 spin_unlock(&fwc->name_lock);
1112
1113 async_schedule(__async_dev_cache_fw_image, (void *)fce);
1114 }
1115}
1116
1117static void __device_uncache_fw_images(void)
1118{
1119 struct firmware_cache *fwc = &fw_cache;
1120 struct fw_cache_entry *fce;
1121
1122 spin_lock(&fwc->name_lock);
1123 while (!list_empty(&fwc->fw_names)) {
1124 fce = list_entry(fwc->fw_names.next,
1125 struct fw_cache_entry, list);
1126 list_del(&fce->list);
1127 spin_unlock(&fwc->name_lock);
1128
1129 uncache_firmware(fce->name);
1130 free_fw_cache_entry(fce);
1131
1132 spin_lock(&fwc->name_lock);
1133 }
1134 spin_unlock(&fwc->name_lock);
1135}
1136
1137/**
1138 * device_cache_fw_images - cache devices' firmware
1139 *
1140 * If one device called request_firmware or its nowait version
1141 * successfully before, the firmware names are recored into the
1142 * device's devres link list, so device_cache_fw_images can call
1143 * cache_firmware() to cache these firmwares for the device,
1144 * then the device driver can load its firmwares easily at
1145 * time when system is not ready to complete loading firmware.
1146 */
1147static void device_cache_fw_images(void)
1148{
1149 struct firmware_cache *fwc = &fw_cache;
1150 int old_timeout;
1151 DEFINE_WAIT(wait);
1152
1153 pr_debug("%s\n", __func__);
1154
1155 /*
1156 * use small loading timeout for caching devices' firmware
1157 * because all these firmware images have been loaded
1158 * successfully at lease once, also system is ready for
1159 * completing firmware loading now. The maximum size of
1160 * firmware in current distributions is about 2M bytes,
1161 * so 10 secs should be enough.
1162 */
1163 old_timeout = loading_timeout;
1164 loading_timeout = 10;
1165
1166 dpm_for_each_dev(NULL, dev_cache_fw_image);
1167
1168 /* wait for completion of caching firmware for all devices */
1169 spin_lock(&fwc->name_lock);
1170 for (;;) {
1171 prepare_to_wait(&fwc->wait_queue, &wait,
1172 TASK_UNINTERRUPTIBLE);
1173 if (!fwc->cnt)
1174 break;
1175
1176 spin_unlock(&fwc->name_lock);
1177
1178 schedule();
1179
1180 spin_lock(&fwc->name_lock);
1181 }
1182 spin_unlock(&fwc->name_lock);
1183 finish_wait(&fwc->wait_queue, &wait);
1184
1185 loading_timeout = old_timeout;
1186}
1187
1188/**
1189 * device_uncache_fw_images - uncache devices' firmware
1190 *
1191 * uncache all firmwares which have been cached successfully
1192 * by device_uncache_fw_images earlier
1193 */
1194static void device_uncache_fw_images(void)
1195{
1196 pr_debug("%s\n", __func__);
1197 __device_uncache_fw_images();
1198}
1199
1200static void device_uncache_fw_images_work(struct work_struct *work)
1201{
1202 device_uncache_fw_images();
1203}
1204
1205/**
1206 * device_uncache_fw_images_delay - uncache devices firmwares
1207 * @delay: number of milliseconds to delay uncache device firmwares
1208 *
1209 * uncache all devices's firmwares which has been cached successfully
1210 * by device_cache_fw_images after @delay milliseconds.
1211 */
1212static void device_uncache_fw_images_delay(unsigned long delay)
1213{
1214 schedule_delayed_work(&fw_cache.work,
1215 msecs_to_jiffies(delay));
1216}
1217
1218#ifdef CONFIG_PM
1219static int fw_pm_notify(struct notifier_block *notify_block,
1220 unsigned long mode, void *unused)
1221{
1222 switch (mode) {
1223 case PM_HIBERNATION_PREPARE:
1224 case PM_SUSPEND_PREPARE:
1225 device_cache_fw_images();
1226 break;
1227
1228 case PM_POST_SUSPEND:
1229 case PM_POST_HIBERNATION:
1230 case PM_POST_RESTORE:
1231 device_uncache_fw_images_delay(10 * MSEC_PER_SEC);
1232 break;
1233 }
1234
1235 return 0;
1236}
1237#else
1238static int fw_pm_notify(struct notifier_block *notify_block,
1239 unsigned long mode, void *unused)
1240{
1241 return 0;
1242}
1243#endif
1244
1245static void __init fw_cache_init(void)
1246{
1247 spin_lock_init(&fw_cache.lock);
1248 INIT_LIST_HEAD(&fw_cache.head);
1249
1250 spin_lock_init(&fw_cache.name_lock);
1251 INIT_LIST_HEAD(&fw_cache.fw_names);
1252 fw_cache.cnt = 0;
1253
1254 init_waitqueue_head(&fw_cache.wait_queue);
1255 INIT_DELAYED_WORK(&fw_cache.work,
1256 device_uncache_fw_images_work);
1257
1258 fw_cache.pm_notify.notifier_call = fw_pm_notify;
1259 register_pm_notifier(&fw_cache.pm_notify);
1260}
1261
713static int __init firmware_class_init(void) 1262static int __init firmware_class_init(void)
714{ 1263{
1264 fw_cache_init();
715 return class_register(&firmware_class); 1265 return class_register(&firmware_class);
716} 1266}
717 1267
718static void __exit firmware_class_exit(void) 1268static void __exit firmware_class_exit(void)
719{ 1269{
1270 unregister_pm_notifier(&fw_cache.pm_notify);
720 class_unregister(&firmware_class); 1271 class_unregister(&firmware_class);
721} 1272}
722 1273
@@ -726,3 +1277,5 @@ module_exit(firmware_class_exit);
726EXPORT_SYMBOL(release_firmware); 1277EXPORT_SYMBOL(release_firmware);
727EXPORT_SYMBOL(request_firmware); 1278EXPORT_SYMBOL(request_firmware);
728EXPORT_SYMBOL(request_firmware_nowait); 1279EXPORT_SYMBOL(request_firmware_nowait);
1280EXPORT_SYMBOL_GPL(cache_firmware);
1281EXPORT_SYMBOL_GPL(uncache_firmware);
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index a1a722502587..3f8077ce585c 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -20,9 +20,13 @@
20#include <linux/err.h> 20#include <linux/err.h>
21#include <linux/slab.h> 21#include <linux/slab.h>
22#include <linux/pm_runtime.h> 22#include <linux/pm_runtime.h>
23#include <linux/idr.h>
23 24
24#include "base.h" 25#include "base.h"
25 26
27/* For automatically allocated device IDs */
28static DEFINE_IDA(platform_devid_ida);
29
26#define to_platform_driver(drv) (container_of((drv), struct platform_driver, \ 30#define to_platform_driver(drv) (container_of((drv), struct platform_driver, \
27 driver)) 31 driver))
28 32
@@ -263,7 +267,7 @@ EXPORT_SYMBOL_GPL(platform_device_add_data);
263 */ 267 */
264int platform_device_add(struct platform_device *pdev) 268int platform_device_add(struct platform_device *pdev)
265{ 269{
266 int i, ret = 0; 270 int i, ret;
267 271
268 if (!pdev) 272 if (!pdev)
269 return -EINVAL; 273 return -EINVAL;
@@ -273,10 +277,27 @@ int platform_device_add(struct platform_device *pdev)
273 277
274 pdev->dev.bus = &platform_bus_type; 278 pdev->dev.bus = &platform_bus_type;
275 279
276 if (pdev->id != -1) 280 switch (pdev->id) {
281 default:
277 dev_set_name(&pdev->dev, "%s.%d", pdev->name, pdev->id); 282 dev_set_name(&pdev->dev, "%s.%d", pdev->name, pdev->id);
278 else 283 break;
284 case PLATFORM_DEVID_NONE:
279 dev_set_name(&pdev->dev, "%s", pdev->name); 285 dev_set_name(&pdev->dev, "%s", pdev->name);
286 break;
287 case PLATFORM_DEVID_AUTO:
288 /*
289 * Automatically allocated device ID. We mark it as such so
290 * that we remember it must be freed, and we append a suffix
291 * to avoid namespace collision with explicit IDs.
292 */
293 ret = ida_simple_get(&platform_devid_ida, 0, 0, GFP_KERNEL);
294 if (ret < 0)
295 goto err_out;
296 pdev->id = ret;
297 pdev->id_auto = true;
298 dev_set_name(&pdev->dev, "%s.%d.auto", pdev->name, pdev->id);
299 break;
300 }
280 301
281 for (i = 0; i < pdev->num_resources; i++) { 302 for (i = 0; i < pdev->num_resources; i++) {
282 struct resource *p, *r = &pdev->resource[i]; 303 struct resource *p, *r = &pdev->resource[i];
@@ -309,6 +330,11 @@ int platform_device_add(struct platform_device *pdev)
309 return ret; 330 return ret;
310 331
311 failed: 332 failed:
333 if (pdev->id_auto) {
334 ida_simple_remove(&platform_devid_ida, pdev->id);
335 pdev->id = PLATFORM_DEVID_AUTO;
336 }
337
312 while (--i >= 0) { 338 while (--i >= 0) {
313 struct resource *r = &pdev->resource[i]; 339 struct resource *r = &pdev->resource[i];
314 unsigned long type = resource_type(r); 340 unsigned long type = resource_type(r);
@@ -317,6 +343,7 @@ int platform_device_add(struct platform_device *pdev)
317 release_resource(r); 343 release_resource(r);
318 } 344 }
319 345
346 err_out:
320 return ret; 347 return ret;
321} 348}
322EXPORT_SYMBOL_GPL(platform_device_add); 349EXPORT_SYMBOL_GPL(platform_device_add);
@@ -336,6 +363,11 @@ void platform_device_del(struct platform_device *pdev)
336 if (pdev) { 363 if (pdev) {
337 device_del(&pdev->dev); 364 device_del(&pdev->dev);
338 365
366 if (pdev->id_auto) {
367 ida_simple_remove(&platform_devid_ida, pdev->id);
368 pdev->id = PLATFORM_DEVID_AUTO;
369 }
370
339 for (i = 0; i < pdev->num_resources; i++) { 371 for (i = 0; i < pdev->num_resources; i++) {
340 struct resource *r = &pdev->resource[i]; 372 struct resource *r = &pdev->resource[i];
341 unsigned long type = resource_type(r); 373 unsigned long type = resource_type(r);
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 0113adc310dc..b0b072a88f5f 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -1324,3 +1324,25 @@ int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1324 return async_error; 1324 return async_error;
1325} 1325}
1326EXPORT_SYMBOL_GPL(device_pm_wait_for_dev); 1326EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
1327
1328/**
1329 * dpm_for_each_dev - device iterator.
1330 * @data: data for the callback.
1331 * @fn: function to be called for each device.
1332 *
1333 * Iterate over devices in dpm_list, and call @fn for each device,
1334 * passing it @data.
1335 */
1336void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
1337{
1338 struct device *dev;
1339
1340 if (!fn)
1341 return;
1342
1343 device_pm_lock();
1344 list_for_each_entry(dev, &dpm_list, power.entry)
1345 fn(dev, data);
1346 device_pm_unlock();
1347}
1348EXPORT_SYMBOL_GPL(dpm_for_each_dev);
diff --git a/drivers/extcon/Makefile b/drivers/extcon/Makefile
index 88961b332348..9c0682daefb8 100644
--- a/drivers/extcon/Makefile
+++ b/drivers/extcon/Makefile
@@ -2,8 +2,8 @@
2# Makefile for external connector class (extcon) devices 2# Makefile for external connector class (extcon) devices
3# 3#
4 4
5obj-$(CONFIG_EXTCON) += extcon_class.o 5obj-$(CONFIG_EXTCON) += extcon-class.o
6obj-$(CONFIG_EXTCON_GPIO) += extcon_gpio.o 6obj-$(CONFIG_EXTCON_GPIO) += extcon-gpio.o
7obj-$(CONFIG_EXTCON_MAX77693) += extcon-max77693.o 7obj-$(CONFIG_EXTCON_MAX77693) += extcon-max77693.o
8obj-$(CONFIG_EXTCON_MAX8997) += extcon-max8997.o 8obj-$(CONFIG_EXTCON_MAX8997) += extcon-max8997.o
9obj-$(CONFIG_EXTCON_ARIZONA) += extcon-arizona.o 9obj-$(CONFIG_EXTCON_ARIZONA) += extcon-arizona.o
diff --git a/drivers/extcon/extcon-arizona.c b/drivers/extcon/extcon-arizona.c
index 427a289f32a5..fa2114f1f9ec 100644
--- a/drivers/extcon/extcon-arizona.c
+++ b/drivers/extcon/extcon-arizona.c
@@ -21,6 +21,7 @@
21#include <linux/interrupt.h> 21#include <linux/interrupt.h>
22#include <linux/err.h> 22#include <linux/err.h>
23#include <linux/gpio.h> 23#include <linux/gpio.h>
24#include <linux/input.h>
24#include <linux/platform_device.h> 25#include <linux/platform_device.h>
25#include <linux/pm_runtime.h> 26#include <linux/pm_runtime.h>
26#include <linux/regulator/consumer.h> 27#include <linux/regulator/consumer.h>
@@ -30,11 +31,14 @@
30#include <linux/mfd/arizona/pdata.h> 31#include <linux/mfd/arizona/pdata.h>
31#include <linux/mfd/arizona/registers.h> 32#include <linux/mfd/arizona/registers.h>
32 33
34#define ARIZONA_NUM_BUTTONS 6
35
33struct arizona_extcon_info { 36struct arizona_extcon_info {
34 struct device *dev; 37 struct device *dev;
35 struct arizona *arizona; 38 struct arizona *arizona;
36 struct mutex lock; 39 struct mutex lock;
37 struct regulator *micvdd; 40 struct regulator *micvdd;
41 struct input_dev *input;
38 42
39 int micd_mode; 43 int micd_mode;
40 const struct arizona_micd_config *micd_modes; 44 const struct arizona_micd_config *micd_modes;
@@ -54,6 +58,18 @@ static const struct arizona_micd_config micd_default_modes[] = {
54 { 0, 2 << ARIZONA_MICD_BIAS_SRC_SHIFT, 1 }, 58 { 0, 2 << ARIZONA_MICD_BIAS_SRC_SHIFT, 1 },
55}; 59};
56 60
61static struct {
62 u16 status;
63 int report;
64} arizona_lvl_to_key[ARIZONA_NUM_BUTTONS] = {
65 { 0x1, BTN_0 },
66 { 0x2, BTN_1 },
67 { 0x4, BTN_2 },
68 { 0x8, BTN_3 },
69 { 0x10, BTN_4 },
70 { 0x20, BTN_5 },
71};
72
57#define ARIZONA_CABLE_MECHANICAL 0 73#define ARIZONA_CABLE_MECHANICAL 0
58#define ARIZONA_CABLE_MICROPHONE 1 74#define ARIZONA_CABLE_MICROPHONE 1
59#define ARIZONA_CABLE_HEADPHONE 2 75#define ARIZONA_CABLE_HEADPHONE 2
@@ -133,6 +149,7 @@ static void arizona_stop_mic(struct arizona_extcon_info *info)
133 149
134 if (change) { 150 if (change) {
135 regulator_disable(info->micvdd); 151 regulator_disable(info->micvdd);
152 pm_runtime_mark_last_busy(info->dev);
136 pm_runtime_put_autosuspend(info->dev); 153 pm_runtime_put_autosuspend(info->dev);
137 } 154 }
138} 155}
@@ -141,8 +158,8 @@ static irqreturn_t arizona_micdet(int irq, void *data)
141{ 158{
142 struct arizona_extcon_info *info = data; 159 struct arizona_extcon_info *info = data;
143 struct arizona *arizona = info->arizona; 160 struct arizona *arizona = info->arizona;
144 unsigned int val; 161 unsigned int val, lvl;
145 int ret; 162 int ret, i;
146 163
147 mutex_lock(&info->lock); 164 mutex_lock(&info->lock);
148 165
@@ -219,13 +236,22 @@ static irqreturn_t arizona_micdet(int irq, void *data)
219 236
220 /* 237 /*
221 * If we're still detecting and we detect a short then we've 238 * If we're still detecting and we detect a short then we've
222 * got a headphone. Otherwise it's a button press, the 239 * got a headphone. Otherwise it's a button press.
223 * button reporting is stubbed out for now.
224 */ 240 */
225 if (val & 0x3fc) { 241 if (val & 0x3fc) {
226 if (info->mic) { 242 if (info->mic) {
227 dev_dbg(arizona->dev, "Mic button detected\n"); 243 dev_dbg(arizona->dev, "Mic button detected\n");
228 244
245 lvl = val & ARIZONA_MICD_LVL_MASK;
246 lvl >>= ARIZONA_MICD_LVL_SHIFT;
247
248 for (i = 0; i < ARIZONA_NUM_BUTTONS; i++)
249 if (lvl & arizona_lvl_to_key[i].status)
250 input_report_key(info->input,
251 arizona_lvl_to_key[i].report,
252 1);
253 input_sync(info->input);
254
229 } else if (info->detecting) { 255 } else if (info->detecting) {
230 dev_dbg(arizona->dev, "Headphone detected\n"); 256 dev_dbg(arizona->dev, "Headphone detected\n");
231 info->detecting = false; 257 info->detecting = false;
@@ -244,6 +270,10 @@ static irqreturn_t arizona_micdet(int irq, void *data)
244 } 270 }
245 } else { 271 } else {
246 dev_dbg(arizona->dev, "Mic button released\n"); 272 dev_dbg(arizona->dev, "Mic button released\n");
273 for (i = 0; i < ARIZONA_NUM_BUTTONS; i++)
274 input_report_key(info->input,
275 arizona_lvl_to_key[i].report, 0);
276 input_sync(info->input);
247 } 277 }
248 278
249handled: 279handled:
@@ -258,7 +288,7 @@ static irqreturn_t arizona_jackdet(int irq, void *data)
258 struct arizona_extcon_info *info = data; 288 struct arizona_extcon_info *info = data;
259 struct arizona *arizona = info->arizona; 289 struct arizona *arizona = info->arizona;
260 unsigned int val; 290 unsigned int val;
261 int ret; 291 int ret, i;
262 292
263 pm_runtime_get_sync(info->dev); 293 pm_runtime_get_sync(info->dev);
264 294
@@ -288,6 +318,11 @@ static irqreturn_t arizona_jackdet(int irq, void *data)
288 318
289 arizona_stop_mic(info); 319 arizona_stop_mic(info);
290 320
321 for (i = 0; i < ARIZONA_NUM_BUTTONS; i++)
322 input_report_key(info->input,
323 arizona_lvl_to_key[i].report, 0);
324 input_sync(info->input);
325
291 ret = extcon_update_state(&info->edev, 0xffffffff, 0); 326 ret = extcon_update_state(&info->edev, 0xffffffff, 0);
292 if (ret != 0) 327 if (ret != 0)
293 dev_err(arizona->dev, "Removal report failed: %d\n", 328 dev_err(arizona->dev, "Removal report failed: %d\n",
@@ -307,7 +342,7 @@ static int __devinit arizona_extcon_probe(struct platform_device *pdev)
307 struct arizona *arizona = dev_get_drvdata(pdev->dev.parent); 342 struct arizona *arizona = dev_get_drvdata(pdev->dev.parent);
308 struct arizona_pdata *pdata; 343 struct arizona_pdata *pdata;
309 struct arizona_extcon_info *info; 344 struct arizona_extcon_info *info;
310 int ret, mode; 345 int ret, mode, i;
311 346
312 pdata = dev_get_platdata(arizona->dev); 347 pdata = dev_get_platdata(arizona->dev);
313 348
@@ -382,6 +417,20 @@ static int __devinit arizona_extcon_probe(struct platform_device *pdev)
382 417
383 arizona_extcon_set_mode(info, 0); 418 arizona_extcon_set_mode(info, 0);
384 419
420 info->input = input_allocate_device();
421 if (!info->input) {
422 dev_err(arizona->dev, "Can't allocate input dev\n");
423 ret = -ENOMEM;
424 goto err_register;
425 }
426
427 for (i = 0; i < ARIZONA_NUM_BUTTONS; i++)
428 input_set_capability(info->input, EV_KEY,
429 arizona_lvl_to_key[i].report);
430 info->input->name = "Headset";
431 info->input->phys = "arizona/extcon";
432 info->input->dev.parent = &pdev->dev;
433
385 pm_runtime_enable(&pdev->dev); 434 pm_runtime_enable(&pdev->dev);
386 pm_runtime_idle(&pdev->dev); 435 pm_runtime_idle(&pdev->dev);
387 pm_runtime_get_sync(&pdev->dev); 436 pm_runtime_get_sync(&pdev->dev);
@@ -391,7 +440,7 @@ static int __devinit arizona_extcon_probe(struct platform_device *pdev)
391 if (ret != 0) { 440 if (ret != 0) {
392 dev_err(&pdev->dev, "Failed to get JACKDET rise IRQ: %d\n", 441 dev_err(&pdev->dev, "Failed to get JACKDET rise IRQ: %d\n",
393 ret); 442 ret);
394 goto err_register; 443 goto err_input;
395 } 444 }
396 445
397 ret = arizona_set_irq_wake(arizona, ARIZONA_IRQ_JD_RISE, 1); 446 ret = arizona_set_irq_wake(arizona, ARIZONA_IRQ_JD_RISE, 1);
@@ -436,6 +485,12 @@ static int __devinit arizona_extcon_probe(struct platform_device *pdev)
436 485
437 pm_runtime_put(&pdev->dev); 486 pm_runtime_put(&pdev->dev);
438 487
488 ret = input_register_device(info->input);
489 if (ret) {
490 dev_err(&pdev->dev, "Can't register input device: %d\n", ret);
491 goto err_fall_wake;
492 }
493
439 return 0; 494 return 0;
440 495
441err_fall_wake: 496err_fall_wake:
@@ -446,6 +501,8 @@ err_rise_wake:
446 arizona_set_irq_wake(arizona, ARIZONA_IRQ_JD_RISE, 0); 501 arizona_set_irq_wake(arizona, ARIZONA_IRQ_JD_RISE, 0);
447err_rise: 502err_rise:
448 arizona_free_irq(arizona, ARIZONA_IRQ_JD_RISE, info); 503 arizona_free_irq(arizona, ARIZONA_IRQ_JD_RISE, info);
504err_input:
505 input_free_device(info->input);
449err_register: 506err_register:
450 pm_runtime_disable(&pdev->dev); 507 pm_runtime_disable(&pdev->dev);
451 extcon_dev_unregister(&info->edev); 508 extcon_dev_unregister(&info->edev);
@@ -468,6 +525,7 @@ static int __devexit arizona_extcon_remove(struct platform_device *pdev)
468 regmap_update_bits(arizona->regmap, ARIZONA_JACK_DETECT_ANALOGUE, 525 regmap_update_bits(arizona->regmap, ARIZONA_JACK_DETECT_ANALOGUE,
469 ARIZONA_JD1_ENA, 0); 526 ARIZONA_JD1_ENA, 0);
470 arizona_clk32k_disable(arizona); 527 arizona_clk32k_disable(arizona);
528 input_unregister_device(info->input);
471 extcon_dev_unregister(&info->edev); 529 extcon_dev_unregister(&info->edev);
472 530
473 return 0; 531 return 0;
diff --git a/drivers/extcon/extcon_class.c b/drivers/extcon/extcon-class.c
index f6419f9db76c..481cfa0f2118 100644
--- a/drivers/extcon/extcon_class.c
+++ b/drivers/extcon/extcon-class.c
@@ -30,6 +30,7 @@
30#include <linux/err.h> 30#include <linux/err.h>
31#include <linux/extcon.h> 31#include <linux/extcon.h>
32#include <linux/slab.h> 32#include <linux/slab.h>
33#include <linux/sysfs.h>
33 34
34/* 35/*
35 * extcon_cable_name suggests the standard cable names for commonly used 36 * extcon_cable_name suggests the standard cable names for commonly used
@@ -673,10 +674,12 @@ int extcon_dev_register(struct extcon_dev *edev, struct device *dev)
673 cable->attr_g.name = str; 674 cable->attr_g.name = str;
674 cable->attr_g.attrs = cable->attrs; 675 cable->attr_g.attrs = cable->attrs;
675 676
677 sysfs_attr_init(&cable->attr_name.attr);
676 cable->attr_name.attr.name = "name"; 678 cable->attr_name.attr.name = "name";
677 cable->attr_name.attr.mode = 0444; 679 cable->attr_name.attr.mode = 0444;
678 cable->attr_name.show = cable_name_show; 680 cable->attr_name.show = cable_name_show;
679 681
682 sysfs_attr_init(&cable->attr_state.attr);
680 cable->attr_state.attr.name = "state"; 683 cable->attr_state.attr.name = "state";
681 cable->attr_state.attr.mode = 0644; 684 cable->attr_state.attr.mode = 0644;
682 cable->attr_state.show = cable_state_show; 685 cable->attr_state.show = cable_state_show;
@@ -722,6 +725,7 @@ int extcon_dev_register(struct extcon_dev *edev, struct device *dev)
722 goto err_muex; 725 goto err_muex;
723 } 726 }
724 strcpy(name, buf); 727 strcpy(name, buf);
728 sysfs_attr_init(&edev->d_attrs_muex[index].attr);
725 edev->d_attrs_muex[index].attr.name = name; 729 edev->d_attrs_muex[index].attr.name = name;
726 edev->d_attrs_muex[index].attr.mode = 0000; 730 edev->d_attrs_muex[index].attr.mode = 0000;
727 edev->attrs_muex[index] = &edev->d_attrs_muex[index] 731 edev->attrs_muex[index] = &edev->d_attrs_muex[index]
diff --git a/drivers/extcon/extcon_gpio.c b/drivers/extcon/extcon-gpio.c
index 3cc152e690b0..3cc152e690b0 100644
--- a/drivers/extcon/extcon_gpio.c
+++ b/drivers/extcon/extcon-gpio.c
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
index 86f8885aeb45..3648f8f0f368 100644
--- a/drivers/hv/hv.c
+++ b/drivers/hv/hv.c
@@ -26,6 +26,7 @@
26#include <linux/slab.h> 26#include <linux/slab.h>
27#include <linux/vmalloc.h> 27#include <linux/vmalloc.h>
28#include <linux/hyperv.h> 28#include <linux/hyperv.h>
29#include <linux/version.h>
29#include <asm/hyperv.h> 30#include <asm/hyperv.h>
30#include "hyperv_vmbus.h" 31#include "hyperv_vmbus.h"
31 32
@@ -38,28 +39,6 @@ struct hv_context hv_context = {
38}; 39};
39 40
40/* 41/*
41 * query_hypervisor_presence
42 * - Query the cpuid for presence of windows hypervisor
43 */
44static int query_hypervisor_presence(void)
45{
46 unsigned int eax;
47 unsigned int ebx;
48 unsigned int ecx;
49 unsigned int edx;
50 unsigned int op;
51
52 eax = 0;
53 ebx = 0;
54 ecx = 0;
55 edx = 0;
56 op = HVCPUID_VERSION_FEATURES;
57 cpuid(op, &eax, &ebx, &ecx, &edx);
58
59 return ecx & HV_PRESENT_BIT;
60}
61
62/*
63 * query_hypervisor_info - Get version info of the windows hypervisor 42 * query_hypervisor_info - Get version info of the windows hypervisor
64 */ 43 */
65static int query_hypervisor_info(void) 44static int query_hypervisor_info(void)
@@ -159,14 +138,13 @@ int hv_init(void)
159 memset(hv_context.synic_message_page, 0, 138 memset(hv_context.synic_message_page, 0,
160 sizeof(void *) * NR_CPUS); 139 sizeof(void *) * NR_CPUS);
161 140
162 if (!query_hypervisor_presence())
163 goto cleanup;
164
165 max_leaf = query_hypervisor_info(); 141 max_leaf = query_hypervisor_info();
166 142
167 /* Write our OS info */ 143 /*
168 wrmsrl(HV_X64_MSR_GUEST_OS_ID, HV_LINUX_GUEST_ID); 144 * Write our OS ID.
169 hv_context.guestid = HV_LINUX_GUEST_ID; 145 */
146 hv_context.guestid = generate_guest_id(0, LINUX_VERSION_CODE, 0);
147 wrmsrl(HV_X64_MSR_GUEST_OS_ID, hv_context.guestid);
170 148
171 /* See if the hypercall page is already set */ 149 /* See if the hypercall page is already set */
172 rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64); 150 rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c
index 0012eed6d872..d9060502b073 100644
--- a/drivers/hv/hv_kvp.c
+++ b/drivers/hv/hv_kvp.c
@@ -48,13 +48,24 @@ static struct {
48 void *kvp_context; /* for the channel callback */ 48 void *kvp_context; /* for the channel callback */
49} kvp_transaction; 49} kvp_transaction;
50 50
51/*
52 * Before we can accept KVP messages from the host, we need
53 * to handshake with the user level daemon. This state tracks
54 * if we are in the handshake phase.
55 */
56static bool in_hand_shake = true;
57
58/*
59 * This state maintains the version number registered by the daemon.
60 */
61static int dm_reg_value;
62
51static void kvp_send_key(struct work_struct *dummy); 63static void kvp_send_key(struct work_struct *dummy);
52 64
53#define TIMEOUT_FIRED 1
54 65
55static void kvp_respond_to_host(char *key, char *value, int error); 66static void kvp_respond_to_host(struct hv_kvp_msg *msg, int error);
56static void kvp_work_func(struct work_struct *dummy); 67static void kvp_work_func(struct work_struct *dummy);
57static void kvp_register(void); 68static void kvp_register(int);
58 69
59static DECLARE_DELAYED_WORK(kvp_work, kvp_work_func); 70static DECLARE_DELAYED_WORK(kvp_work, kvp_work_func);
60static DECLARE_WORK(kvp_sendkey_work, kvp_send_key); 71static DECLARE_WORK(kvp_sendkey_work, kvp_send_key);
@@ -68,7 +79,7 @@ static u8 *recv_buffer;
68 */ 79 */
69 80
70static void 81static void
71kvp_register(void) 82kvp_register(int reg_value)
72{ 83{
73 84
74 struct cn_msg *msg; 85 struct cn_msg *msg;
@@ -83,7 +94,7 @@ kvp_register(void)
83 msg->id.idx = CN_KVP_IDX; 94 msg->id.idx = CN_KVP_IDX;
84 msg->id.val = CN_KVP_VAL; 95 msg->id.val = CN_KVP_VAL;
85 96
86 kvp_msg->kvp_hdr.operation = KVP_OP_REGISTER; 97 kvp_msg->kvp_hdr.operation = reg_value;
87 strcpy(version, HV_DRV_VERSION); 98 strcpy(version, HV_DRV_VERSION);
88 msg->len = sizeof(struct hv_kvp_msg); 99 msg->len = sizeof(struct hv_kvp_msg);
89 cn_netlink_send(msg, 0, GFP_ATOMIC); 100 cn_netlink_send(msg, 0, GFP_ATOMIC);
@@ -97,9 +108,43 @@ kvp_work_func(struct work_struct *dummy)
97 * If the timer fires, the user-mode component has not responded; 108 * If the timer fires, the user-mode component has not responded;
98 * process the pending transaction. 109 * process the pending transaction.
99 */ 110 */
100 kvp_respond_to_host("Unknown key", "Guest timed out", TIMEOUT_FIRED); 111 kvp_respond_to_host(NULL, HV_E_FAIL);
112}
113
114static int kvp_handle_handshake(struct hv_kvp_msg *msg)
115{
116 int ret = 1;
117
118 switch (msg->kvp_hdr.operation) {
119 case KVP_OP_REGISTER:
120 dm_reg_value = KVP_OP_REGISTER;
121 pr_info("KVP: IP injection functionality not available\n");
122 pr_info("KVP: Upgrade the KVP daemon\n");
123 break;
124 case KVP_OP_REGISTER1:
125 dm_reg_value = KVP_OP_REGISTER1;
126 break;
127 default:
128 pr_info("KVP: incompatible daemon\n");
129 pr_info("KVP: KVP version: %d, Daemon version: %d\n",
130 KVP_OP_REGISTER1, msg->kvp_hdr.operation);
131 ret = 0;
132 }
133
134 if (ret) {
135 /*
136 * We have a compatible daemon; complete the handshake.
137 */
138 pr_info("KVP: user-mode registering done.\n");
139 kvp_register(dm_reg_value);
140 kvp_transaction.active = false;
141 if (kvp_transaction.kvp_context)
142 hv_kvp_onchannelcallback(kvp_transaction.kvp_context);
143 }
144 return ret;
101} 145}
102 146
147
103/* 148/*
104 * Callback when data is received from user mode. 149 * Callback when data is received from user mode.
105 */ 150 */
@@ -109,29 +154,163 @@ kvp_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
109{ 154{
110 struct hv_kvp_msg *message; 155 struct hv_kvp_msg *message;
111 struct hv_kvp_msg_enumerate *data; 156 struct hv_kvp_msg_enumerate *data;
157 int error = 0;
112 158
113 message = (struct hv_kvp_msg *)msg->data; 159 message = (struct hv_kvp_msg *)msg->data;
114 switch (message->kvp_hdr.operation) { 160
161 /*
162 * If we are negotiating the version information
163 * with the daemon; handle that first.
164 */
165
166 if (in_hand_shake) {
167 if (kvp_handle_handshake(message))
168 in_hand_shake = false;
169 return;
170 }
171
172 /*
173 * Based on the version of the daemon, we propagate errors from the
174 * daemon differently.
175 */
176
177 data = &message->body.kvp_enum_data;
178
179 switch (dm_reg_value) {
115 case KVP_OP_REGISTER: 180 case KVP_OP_REGISTER:
116 pr_info("KVP: user-mode registering done.\n"); 181 /*
117 kvp_register(); 182 * Null string is used to pass back error condition.
118 kvp_transaction.active = false; 183 */
119 hv_kvp_onchannelcallback(kvp_transaction.kvp_context); 184 if (data->data.key[0] == 0)
185 error = HV_S_CONT;
120 break; 186 break;
121 187
122 default: 188 case KVP_OP_REGISTER1:
123 data = &message->body.kvp_enum_data;
124 /* 189 /*
125 * Complete the transaction by forwarding the key value 190 * We use the message header information from
126 * to the host. But first, cancel the timeout. 191 * the user level daemon to transmit errors.
127 */ 192 */
128 if (cancel_delayed_work_sync(&kvp_work)) 193 error = message->error;
129 kvp_respond_to_host(data->data.key, 194 break;
130 data->data.value, 195 }
131 !strlen(data->data.key)); 196
197 /*
198 * Complete the transaction by forwarding the key value
199 * to the host. But first, cancel the timeout.
200 */
201 if (cancel_delayed_work_sync(&kvp_work))
202 kvp_respond_to_host(message, error);
203}
204
205
206static int process_ob_ipinfo(void *in_msg, void *out_msg, int op)
207{
208 struct hv_kvp_msg *in = in_msg;
209 struct hv_kvp_ip_msg *out = out_msg;
210 int len;
211
212 switch (op) {
213 case KVP_OP_GET_IP_INFO:
214 /*
215 * Transform all parameters into utf16 encoding.
216 */
217 len = utf8s_to_utf16s((char *)in->body.kvp_ip_val.ip_addr,
218 strlen((char *)in->body.kvp_ip_val.ip_addr),
219 UTF16_HOST_ENDIAN,
220 (wchar_t *)out->kvp_ip_val.ip_addr,
221 MAX_IP_ADDR_SIZE);
222 if (len < 0)
223 return len;
224
225 len = utf8s_to_utf16s((char *)in->body.kvp_ip_val.sub_net,
226 strlen((char *)in->body.kvp_ip_val.sub_net),
227 UTF16_HOST_ENDIAN,
228 (wchar_t *)out->kvp_ip_val.sub_net,
229 MAX_IP_ADDR_SIZE);
230 if (len < 0)
231 return len;
232
233 len = utf8s_to_utf16s((char *)in->body.kvp_ip_val.gate_way,
234 strlen((char *)in->body.kvp_ip_val.gate_way),
235 UTF16_HOST_ENDIAN,
236 (wchar_t *)out->kvp_ip_val.gate_way,
237 MAX_GATEWAY_SIZE);
238 if (len < 0)
239 return len;
240
241 len = utf8s_to_utf16s((char *)in->body.kvp_ip_val.dns_addr,
242 strlen((char *)in->body.kvp_ip_val.dns_addr),
243 UTF16_HOST_ENDIAN,
244 (wchar_t *)out->kvp_ip_val.dns_addr,
245 MAX_IP_ADDR_SIZE);
246 if (len < 0)
247 return len;
248
249 len = utf8s_to_utf16s((char *)in->body.kvp_ip_val.adapter_id,
250 strlen((char *)in->body.kvp_ip_val.adapter_id),
251 UTF16_HOST_ENDIAN,
252 (wchar_t *)out->kvp_ip_val.adapter_id,
253 MAX_IP_ADDR_SIZE);
254 if (len < 0)
255 return len;
256
257 out->kvp_ip_val.dhcp_enabled =
258 in->body.kvp_ip_val.dhcp_enabled;
259 }
260
261 return 0;
262}
263
264static void process_ib_ipinfo(void *in_msg, void *out_msg, int op)
265{
266 struct hv_kvp_ip_msg *in = in_msg;
267 struct hv_kvp_msg *out = out_msg;
268
269 switch (op) {
270 case KVP_OP_SET_IP_INFO:
271 /*
272 * Transform all parameters into utf8 encoding.
273 */
274 utf16s_to_utf8s((wchar_t *)in->kvp_ip_val.ip_addr,
275 MAX_IP_ADDR_SIZE,
276 UTF16_LITTLE_ENDIAN,
277 (__u8 *)out->body.kvp_ip_val.ip_addr,
278 MAX_IP_ADDR_SIZE);
279
280 utf16s_to_utf8s((wchar_t *)in->kvp_ip_val.sub_net,
281 MAX_IP_ADDR_SIZE,
282 UTF16_LITTLE_ENDIAN,
283 (__u8 *)out->body.kvp_ip_val.sub_net,
284 MAX_IP_ADDR_SIZE);
285
286 utf16s_to_utf8s((wchar_t *)in->kvp_ip_val.gate_way,
287 MAX_GATEWAY_SIZE,
288 UTF16_LITTLE_ENDIAN,
289 (__u8 *)out->body.kvp_ip_val.gate_way,
290 MAX_GATEWAY_SIZE);
291
292 utf16s_to_utf8s((wchar_t *)in->kvp_ip_val.dns_addr,
293 MAX_IP_ADDR_SIZE,
294 UTF16_LITTLE_ENDIAN,
295 (__u8 *)out->body.kvp_ip_val.dns_addr,
296 MAX_IP_ADDR_SIZE);
297
298 out->body.kvp_ip_val.dhcp_enabled = in->kvp_ip_val.dhcp_enabled;
299
300 default:
301 utf16s_to_utf8s((wchar_t *)in->kvp_ip_val.adapter_id,
302 MAX_ADAPTER_ID_SIZE,
303 UTF16_LITTLE_ENDIAN,
304 (__u8 *)out->body.kvp_ip_val.adapter_id,
305 MAX_ADAPTER_ID_SIZE);
306
307 out->body.kvp_ip_val.addr_family = in->kvp_ip_val.addr_family;
132 } 308 }
133} 309}
134 310
311
312
313
135static void 314static void
136kvp_send_key(struct work_struct *dummy) 315kvp_send_key(struct work_struct *dummy)
137{ 316{
@@ -167,6 +346,12 @@ kvp_send_key(struct work_struct *dummy)
167 */ 346 */
168 347
169 switch (message->kvp_hdr.operation) { 348 switch (message->kvp_hdr.operation) {
349 case KVP_OP_SET_IP_INFO:
350 process_ib_ipinfo(in_msg, message, KVP_OP_SET_IP_INFO);
351 break;
352 case KVP_OP_GET_IP_INFO:
353 process_ib_ipinfo(in_msg, message, KVP_OP_GET_IP_INFO);
354 break;
170 case KVP_OP_SET: 355 case KVP_OP_SET:
171 switch (in_msg->body.kvp_set.data.value_type) { 356 switch (in_msg->body.kvp_set.data.value_type) {
172 case REG_SZ: 357 case REG_SZ:
@@ -243,17 +428,19 @@ kvp_send_key(struct work_struct *dummy)
243 */ 428 */
244 429
245static void 430static void
246kvp_respond_to_host(char *key, char *value, int error) 431kvp_respond_to_host(struct hv_kvp_msg *msg_to_host, int error)
247{ 432{
248 struct hv_kvp_msg *kvp_msg; 433 struct hv_kvp_msg *kvp_msg;
249 struct hv_kvp_exchg_msg_value *kvp_data; 434 struct hv_kvp_exchg_msg_value *kvp_data;
250 char *key_name; 435 char *key_name;
436 char *value;
251 struct icmsg_hdr *icmsghdrp; 437 struct icmsg_hdr *icmsghdrp;
252 int keylen = 0; 438 int keylen = 0;
253 int valuelen = 0; 439 int valuelen = 0;
254 u32 buf_len; 440 u32 buf_len;
255 struct vmbus_channel *channel; 441 struct vmbus_channel *channel;
256 u64 req_id; 442 u64 req_id;
443 int ret;
257 444
258 /* 445 /*
259 * If a transaction is not active; log and return. 446 * If a transaction is not active; log and return.
@@ -287,6 +474,7 @@ kvp_respond_to_host(char *key, char *value, int error)
287 */ 474 */
288 return; 475 return;
289 476
477 icmsghdrp->status = error;
290 478
291 /* 479 /*
292 * If the error parameter is set, terminate the host's enumeration 480 * If the error parameter is set, terminate the host's enumeration
@@ -294,20 +482,27 @@ kvp_respond_to_host(char *key, char *value, int error)
294 */ 482 */
295 if (error) { 483 if (error) {
296 /* 484 /*
297 * Something failed or the we have timedout; 485 * Something failed or we have timedout;
298 * terminate the current host-side iteration. 486 * terminate the current host-side iteration.
299 */ 487 */
300 icmsghdrp->status = HV_S_CONT;
301 goto response_done; 488 goto response_done;
302 } 489 }
303 490
304 icmsghdrp->status = HV_S_OK;
305
306 kvp_msg = (struct hv_kvp_msg *) 491 kvp_msg = (struct hv_kvp_msg *)
307 &recv_buffer[sizeof(struct vmbuspipe_hdr) + 492 &recv_buffer[sizeof(struct vmbuspipe_hdr) +
308 sizeof(struct icmsg_hdr)]; 493 sizeof(struct icmsg_hdr)];
309 494
310 switch (kvp_transaction.kvp_msg->kvp_hdr.operation) { 495 switch (kvp_transaction.kvp_msg->kvp_hdr.operation) {
496 case KVP_OP_GET_IP_INFO:
497 ret = process_ob_ipinfo(msg_to_host,
498 (struct hv_kvp_ip_msg *)kvp_msg,
499 KVP_OP_GET_IP_INFO);
500 if (ret < 0)
501 icmsghdrp->status = HV_E_FAIL;
502
503 goto response_done;
504 case KVP_OP_SET_IP_INFO:
505 goto response_done;
311 case KVP_OP_GET: 506 case KVP_OP_GET:
312 kvp_data = &kvp_msg->body.kvp_get.data; 507 kvp_data = &kvp_msg->body.kvp_get.data;
313 goto copy_value; 508 goto copy_value;
@@ -321,7 +516,7 @@ kvp_respond_to_host(char *key, char *value, int error)
321 } 516 }
322 517
323 kvp_data = &kvp_msg->body.kvp_enum_data.data; 518 kvp_data = &kvp_msg->body.kvp_enum_data.data;
324 key_name = key; 519 key_name = msg_to_host->body.kvp_enum_data.data.key;
325 520
326 /* 521 /*
327 * The windows host expects the key/value pair to be encoded 522 * The windows host expects the key/value pair to be encoded
@@ -335,6 +530,7 @@ kvp_respond_to_host(char *key, char *value, int error)
335 kvp_data->key_size = 2*(keylen + 1); /* utf16 encoding */ 530 kvp_data->key_size = 2*(keylen + 1); /* utf16 encoding */
336 531
337copy_value: 532copy_value:
533 value = msg_to_host->body.kvp_enum_data.data.value;
338 valuelen = utf8s_to_utf16s(value, strlen(value), UTF16_HOST_ENDIAN, 534 valuelen = utf8s_to_utf16s(value, strlen(value), UTF16_HOST_ENDIAN,
339 (wchar_t *) kvp_data->value, 535 (wchar_t *) kvp_data->value,
340 (HV_KVP_EXCHANGE_MAX_VALUE_SIZE / 2) - 2); 536 (HV_KVP_EXCHANGE_MAX_VALUE_SIZE / 2) - 2);
@@ -387,7 +583,8 @@ void hv_kvp_onchannelcallback(void *context)
387 return; 583 return;
388 } 584 }
389 585
390 vmbus_recvpacket(channel, recv_buffer, PAGE_SIZE, &recvlen, &requestid); 586 vmbus_recvpacket(channel, recv_buffer, PAGE_SIZE * 2, &recvlen,
587 &requestid);
391 588
392 if (recvlen > 0) { 589 if (recvlen > 0) {
393 icmsghdrp = (struct icmsg_hdr *)&recv_buffer[ 590 icmsghdrp = (struct icmsg_hdr *)&recv_buffer[
diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c
index d3ac6a40118b..a0667de7a04c 100644
--- a/drivers/hv/hv_util.c
+++ b/drivers/hv/hv_util.c
@@ -263,7 +263,7 @@ static int util_probe(struct hv_device *dev,
263 (struct hv_util_service *)dev_id->driver_data; 263 (struct hv_util_service *)dev_id->driver_data;
264 int ret; 264 int ret;
265 265
266 srv->recv_buffer = kmalloc(PAGE_SIZE, GFP_KERNEL); 266 srv->recv_buffer = kmalloc(PAGE_SIZE * 2, GFP_KERNEL);
267 if (!srv->recv_buffer) 267 if (!srv->recv_buffer)
268 return -ENOMEM; 268 return -ENOMEM;
269 if (srv->util_init) { 269 if (srv->util_init) {
@@ -274,7 +274,7 @@ static int util_probe(struct hv_device *dev,
274 } 274 }
275 } 275 }
276 276
277 ret = vmbus_open(dev->channel, 2 * PAGE_SIZE, 2 * PAGE_SIZE, NULL, 0, 277 ret = vmbus_open(dev->channel, 4 * PAGE_SIZE, 4 * PAGE_SIZE, NULL, 0,
278 srv->util_cb, dev->channel); 278 srv->util_cb, dev->channel);
279 if (ret) 279 if (ret)
280 goto error; 280 goto error;
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index 0614ff3a7d7e..d8d1fadb398a 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -410,10 +410,49 @@ enum {
410 410
411#define HV_PRESENT_BIT 0x80000000 411#define HV_PRESENT_BIT 0x80000000
412 412
413#define HV_LINUX_GUEST_ID_LO 0x00000000 413/*
414#define HV_LINUX_GUEST_ID_HI 2976579765 414 * The guest OS needs to register the guest ID with the hypervisor.
415#define HV_LINUX_GUEST_ID (((u64)HV_LINUX_GUEST_ID_HI << 32) | \ 415 * The guest ID is a 64 bit entity and the structure of this ID is
416 HV_LINUX_GUEST_ID_LO) 416 * specified in the Hyper-V specification:
417 *
418 * http://msdn.microsoft.com/en-us/library/windows/hardware/ff542653%28v=vs.85%29.aspx
419 *
420 * While the current guideline does not specify how Linux guest ID(s)
421 * need to be generated, our plan is to publish the guidelines for
422 * Linux and other guest operating systems that currently are hosted
423 * on Hyper-V. The implementation here conforms to this yet
424 * unpublished guidelines.
425 *
426 *
427 * Bit(s)
428 * 63 - Indicates if the OS is Open Source or not; 1 is Open Source
429 * 62:56 - Os Type; Linux is 0x100
430 * 55:48 - Distro specific identification
431 * 47:16 - Linux kernel version number
432 * 15:0 - Distro specific identification
433 *
434 *
435 */
436
437#define HV_LINUX_VENDOR_ID 0x8100
438
439/*
440 * Generate the guest ID based on the guideline described above.
441 */
442
443static inline __u64 generate_guest_id(__u8 d_info1, __u32 kernel_version,
444 __u16 d_info2)
445{
446 __u64 guest_id = 0;
447
448 guest_id = (((__u64)HV_LINUX_VENDOR_ID) << 48);
449 guest_id |= (((__u64)(d_info1)) << 48);
450 guest_id |= (((__u64)(kernel_version)) << 16);
451 guest_id |= ((__u64)(d_info2));
452
453 return guest_id;
454}
455
417 456
418#define HV_CPU_POWER_MANAGEMENT (1 << 0) 457#define HV_CPU_POWER_MANAGEMENT (1 << 0)
419#define HV_RECOMMENDATIONS_MAX 4 458#define HV_RECOMMENDATIONS_MAX 4
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 4748086eaaf2..f40dd57bbec1 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -146,43 +146,9 @@ static ssize_t vmbus_show_device_attr(struct device *dev,
146 get_channel_info(hv_dev, device_info); 146 get_channel_info(hv_dev, device_info);
147 147
148 if (!strcmp(dev_attr->attr.name, "class_id")) { 148 if (!strcmp(dev_attr->attr.name, "class_id")) {
149 ret = sprintf(buf, "{%02x%02x%02x%02x-%02x%02x-%02x%02x-" 149 ret = sprintf(buf, "{%pUl}\n", device_info->chn_type.b);
150 "%02x%02x%02x%02x%02x%02x%02x%02x}\n",
151 device_info->chn_type.b[3],
152 device_info->chn_type.b[2],
153 device_info->chn_type.b[1],
154 device_info->chn_type.b[0],
155 device_info->chn_type.b[5],
156 device_info->chn_type.b[4],
157 device_info->chn_type.b[7],
158 device_info->chn_type.b[6],
159 device_info->chn_type.b[8],
160 device_info->chn_type.b[9],
161 device_info->chn_type.b[10],
162 device_info->chn_type.b[11],
163 device_info->chn_type.b[12],
164 device_info->chn_type.b[13],
165 device_info->chn_type.b[14],
166 device_info->chn_type.b[15]);
167 } else if (!strcmp(dev_attr->attr.name, "device_id")) { 150 } else if (!strcmp(dev_attr->attr.name, "device_id")) {
168 ret = sprintf(buf, "{%02x%02x%02x%02x-%02x%02x-%02x%02x-" 151 ret = sprintf(buf, "{%pUl}\n", device_info->chn_instance.b);
169 "%02x%02x%02x%02x%02x%02x%02x%02x}\n",
170 device_info->chn_instance.b[3],
171 device_info->chn_instance.b[2],
172 device_info->chn_instance.b[1],
173 device_info->chn_instance.b[0],
174 device_info->chn_instance.b[5],
175 device_info->chn_instance.b[4],
176 device_info->chn_instance.b[7],
177 device_info->chn_instance.b[6],
178 device_info->chn_instance.b[8],
179 device_info->chn_instance.b[9],
180 device_info->chn_instance.b[10],
181 device_info->chn_instance.b[11],
182 device_info->chn_instance.b[12],
183 device_info->chn_instance.b[13],
184 device_info->chn_instance.b[14],
185 device_info->chn_instance.b[15]);
186 } else if (!strcmp(dev_attr->attr.name, "modalias")) { 152 } else if (!strcmp(dev_attr->attr.name, "modalias")) {
187 print_alias_name(hv_dev, alias_name); 153 print_alias_name(hv_dev, alias_name);
188 ret = sprintf(buf, "vmbus:%s\n", alias_name); 154 ret = sprintf(buf, "vmbus:%s\n", alias_name);
@@ -753,10 +719,35 @@ static struct acpi_driver vmbus_acpi_driver = {
753 }, 719 },
754}; 720};
755 721
722/*
723 * query_hypervisor_presence
724 * - Query the cpuid for presence of windows hypervisor
725 */
726static int query_hypervisor_presence(void)
727{
728 unsigned int eax;
729 unsigned int ebx;
730 unsigned int ecx;
731 unsigned int edx;
732 unsigned int op;
733
734 eax = 0;
735 ebx = 0;
736 ecx = 0;
737 edx = 0;
738 op = HVCPUID_VERSION_FEATURES;
739 cpuid(op, &eax, &ebx, &ecx, &edx);
740
741 return ecx & HV_PRESENT_BIT;
742}
743
756static int __init hv_acpi_init(void) 744static int __init hv_acpi_init(void)
757{ 745{
758 int ret, t; 746 int ret, t;
759 747
748 if (!query_hypervisor_presence())
749 return -ENODEV;
750
760 init_completion(&probe_event); 751 init_completion(&probe_event);
761 752
762 /* 753 /*
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
index 4733eab34a23..2c9fafbe8425 100644
--- a/fs/debugfs/inode.c
+++ b/fs/debugfs/inode.c
@@ -291,9 +291,9 @@ static struct file_system_type debug_fs_type = {
291 .kill_sb = kill_litter_super, 291 .kill_sb = kill_litter_super,
292}; 292};
293 293
294struct dentry *__create_file(const char *name, umode_t mode, 294static struct dentry *__create_file(const char *name, umode_t mode,
295 struct dentry *parent, void *data, 295 struct dentry *parent, void *data,
296 const struct file_operations *fops) 296 const struct file_operations *fops)
297{ 297{
298 struct dentry *dentry = NULL; 298 struct dentry *dentry = NULL;
299 int error; 299 int error;
diff --git a/include/linux/device.h b/include/linux/device.h
index 52a5f15a2223..ecd900663726 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -536,6 +536,10 @@ extern void *__devres_alloc(dr_release_t release, size_t size, gfp_t gfp,
536#else 536#else
537extern void *devres_alloc(dr_release_t release, size_t size, gfp_t gfp); 537extern void *devres_alloc(dr_release_t release, size_t size, gfp_t gfp);
538#endif 538#endif
539extern void devres_for_each_res(struct device *dev, dr_release_t release,
540 dr_match_t match, void *match_data,
541 void (*fn)(struct device *, void *, void *),
542 void *data);
539extern void devres_free(void *res); 543extern void devres_free(void *res);
540extern void devres_add(struct device *dev, void *res); 544extern void devres_add(struct device *dev, void *res);
541extern void *devres_find(struct device *dev, dr_release_t release, 545extern void *devres_find(struct device *dev, dr_release_t release,
diff --git a/include/linux/firmware.h b/include/linux/firmware.h
index 1e7c01189fa6..e4279fedb93a 100644
--- a/include/linux/firmware.h
+++ b/include/linux/firmware.h
@@ -12,6 +12,9 @@ struct firmware {
12 size_t size; 12 size_t size;
13 const u8 *data; 13 const u8 *data;
14 struct page **pages; 14 struct page **pages;
15
16 /* firmware loader private fields */
17 void *priv;
15}; 18};
16 19
17struct module; 20struct module;
@@ -44,6 +47,8 @@ int request_firmware_nowait(
44 void (*cont)(const struct firmware *fw, void *context)); 47 void (*cont)(const struct firmware *fw, void *context));
45 48
46void release_firmware(const struct firmware *fw); 49void release_firmware(const struct firmware *fw);
50int cache_firmware(const char *name);
51int uncache_firmware(const char *name);
47#else 52#else
48static inline int request_firmware(const struct firmware **fw, 53static inline int request_firmware(const struct firmware **fw,
49 const char *name, 54 const char *name,
@@ -62,6 +67,16 @@ static inline int request_firmware_nowait(
62static inline void release_firmware(const struct firmware *fw) 67static inline void release_firmware(const struct firmware *fw)
63{ 68{
64} 69}
70
71static inline int cache_firmware(const char *name)
72{
73 return -ENOENT;
74}
75
76static inline int uncache_firmware(const char *name)
77{
78 return -EINVAL;
79}
65#endif 80#endif
66 81
67#endif 82#endif
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index 68ed7f7e1fc9..7585d5533e43 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -122,12 +122,53 @@
122#define REG_U32 4 122#define REG_U32 4
123#define REG_U64 8 123#define REG_U64 8
124 124
125/*
126 * As we look at expanding the KVP functionality to include
127 * IP injection functionality, we need to maintain binary
128 * compatibility with older daemons.
129 *
130 * The KVP opcodes are defined by the host and it was unfortunate
131 * that I chose to treat the registration operation as part of the
132 * KVP operations defined by the host.
133 * Here is the level of compatibility
134 * (between the user level daemon and the kernel KVP driver) that we
135 * will implement:
136 *
137 * An older daemon will always be supported on a newer driver.
138 * A given user level daemon will require a minimal version of the
139 * kernel driver.
140 * If we cannot handle the version differences, we will fail gracefully
141 * (this can happen when we have a user level daemon that is more
142 * advanced than the KVP driver.
143 *
144 * We will use values used in this handshake for determining if we have
145 * workable user level daemon and the kernel driver. We begin by taking the
146 * registration opcode out of the KVP opcode namespace. We will however,
147 * maintain compatibility with the existing user-level daemon code.
148 */
149
150/*
151 * Daemon code not supporting IP injection (legacy daemon).
152 */
153
154#define KVP_OP_REGISTER 4
155
156/*
157 * Daemon code supporting IP injection.
158 * The KVP opcode field is used to communicate the
159 * registration information; so define a namespace that
160 * will be distinct from the host defined KVP opcode.
161 */
162
163#define KVP_OP_REGISTER1 100
164
125enum hv_kvp_exchg_op { 165enum hv_kvp_exchg_op {
126 KVP_OP_GET = 0, 166 KVP_OP_GET = 0,
127 KVP_OP_SET, 167 KVP_OP_SET,
128 KVP_OP_DELETE, 168 KVP_OP_DELETE,
129 KVP_OP_ENUMERATE, 169 KVP_OP_ENUMERATE,
130 KVP_OP_REGISTER, 170 KVP_OP_GET_IP_INFO,
171 KVP_OP_SET_IP_INFO,
131 KVP_OP_COUNT /* Number of operations, must be last. */ 172 KVP_OP_COUNT /* Number of operations, must be last. */
132}; 173};
133 174
@@ -140,6 +181,37 @@ enum hv_kvp_exchg_pool {
140 KVP_POOL_COUNT /* Number of pools, must be last. */ 181 KVP_POOL_COUNT /* Number of pools, must be last. */
141}; 182};
142 183
184/*
185 * Some Hyper-V status codes.
186 */
187
188#define HV_S_OK 0x00000000
189#define HV_E_FAIL 0x80004005
190#define HV_S_CONT 0x80070103
191#define HV_ERROR_NOT_SUPPORTED 0x80070032
192#define HV_ERROR_MACHINE_LOCKED 0x800704F7
193#define HV_ERROR_DEVICE_NOT_CONNECTED 0x8007048F
194
195#define ADDR_FAMILY_NONE 0x00
196#define ADDR_FAMILY_IPV4 0x01
197#define ADDR_FAMILY_IPV6 0x02
198
199#define MAX_ADAPTER_ID_SIZE 128
200#define MAX_IP_ADDR_SIZE 1024
201#define MAX_GATEWAY_SIZE 512
202
203
204struct hv_kvp_ipaddr_value {
205 __u16 adapter_id[MAX_ADAPTER_ID_SIZE];
206 __u8 addr_family;
207 __u8 dhcp_enabled;
208 __u16 ip_addr[MAX_IP_ADDR_SIZE];
209 __u16 sub_net[MAX_IP_ADDR_SIZE];
210 __u16 gate_way[MAX_GATEWAY_SIZE];
211 __u16 dns_addr[MAX_IP_ADDR_SIZE];
212} __attribute__((packed));
213
214
143struct hv_kvp_hdr { 215struct hv_kvp_hdr {
144 __u8 operation; 216 __u8 operation;
145 __u8 pool; 217 __u8 pool;
@@ -181,16 +253,26 @@ struct hv_kvp_register {
181}; 253};
182 254
183struct hv_kvp_msg { 255struct hv_kvp_msg {
184 struct hv_kvp_hdr kvp_hdr; 256 union {
257 struct hv_kvp_hdr kvp_hdr;
258 int error;
259 };
185 union { 260 union {
186 struct hv_kvp_msg_get kvp_get; 261 struct hv_kvp_msg_get kvp_get;
187 struct hv_kvp_msg_set kvp_set; 262 struct hv_kvp_msg_set kvp_set;
188 struct hv_kvp_msg_delete kvp_delete; 263 struct hv_kvp_msg_delete kvp_delete;
189 struct hv_kvp_msg_enumerate kvp_enum_data; 264 struct hv_kvp_msg_enumerate kvp_enum_data;
265 struct hv_kvp_ipaddr_value kvp_ip_val;
190 struct hv_kvp_register kvp_register; 266 struct hv_kvp_register kvp_register;
191 } body; 267 } body;
192} __attribute__((packed)); 268} __attribute__((packed));
193 269
270struct hv_kvp_ip_msg {
271 __u8 operation;
272 __u8 pool;
273 struct hv_kvp_ipaddr_value kvp_ip_val;
274} __attribute__((packed));
275
194#ifdef __KERNEL__ 276#ifdef __KERNEL__
195#include <linux/scatterlist.h> 277#include <linux/scatterlist.h>
196#include <linux/list.h> 278#include <linux/list.h>
@@ -405,7 +487,7 @@ struct vmtransfer_page_range {
405struct vmtransfer_page_packet_header { 487struct vmtransfer_page_packet_header {
406 struct vmpacket_descriptor d; 488 struct vmpacket_descriptor d;
407 u16 xfer_pageset_id; 489 u16 xfer_pageset_id;
408 bool sender_owns_set; 490 u8 sender_owns_set;
409 u8 reserved; 491 u8 reserved;
410 u32 range_cnt; 492 u32 range_cnt;
411 struct vmtransfer_page_range ranges[1]; 493 struct vmtransfer_page_range ranges[1];
@@ -559,7 +641,7 @@ struct vmbus_channel_query_vmbus_version {
559/* VMBus Version Supported parameters */ 641/* VMBus Version Supported parameters */
560struct vmbus_channel_version_supported { 642struct vmbus_channel_version_supported {
561 struct vmbus_channel_message_header header; 643 struct vmbus_channel_message_header header;
562 bool version_supported; 644 u8 version_supported;
563} __packed; 645} __packed;
564 646
565/* Offer Channel parameters */ 647/* Offer Channel parameters */
@@ -568,7 +650,7 @@ struct vmbus_channel_offer_channel {
568 struct vmbus_channel_offer offer; 650 struct vmbus_channel_offer offer;
569 u32 child_relid; 651 u32 child_relid;
570 u8 monitorid; 652 u8 monitorid;
571 bool monitor_allocated; 653 u8 monitor_allocated;
572} __packed; 654} __packed;
573 655
574/* Rescind Offer parameters */ 656/* Rescind Offer parameters */
@@ -704,7 +786,7 @@ struct vmbus_channel_initiate_contact {
704 786
705struct vmbus_channel_version_response { 787struct vmbus_channel_version_response {
706 struct vmbus_channel_message_header header; 788 struct vmbus_channel_message_header header;
707 bool version_supported; 789 u8 version_supported;
708} __packed; 790} __packed;
709 791
710enum vmbus_channel_state { 792enum vmbus_channel_state {
@@ -977,11 +1059,6 @@ void vmbus_driver_unregister(struct hv_driver *hv_driver);
977#define ICMSGHDRFLAG_REQUEST 2 1059#define ICMSGHDRFLAG_REQUEST 2
978#define ICMSGHDRFLAG_RESPONSE 4 1060#define ICMSGHDRFLAG_RESPONSE 4
979 1061
980#define HV_S_OK 0x00000000
981#define HV_E_FAIL 0x80004005
982#define HV_S_CONT 0x80070103
983#define HV_ERROR_NOT_SUPPORTED 0x80070032
984#define HV_ERROR_MACHINE_LOCKED 0x800704F7
985 1062
986/* 1063/*
987 * While we want to handle util services as regular devices, 1064 * While we want to handle util services as regular devices,
diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h
index 60e9994ef405..5711e9525a2a 100644
--- a/include/linux/platform_device.h
+++ b/include/linux/platform_device.h
@@ -14,11 +14,15 @@
14#include <linux/device.h> 14#include <linux/device.h>
15#include <linux/mod_devicetable.h> 15#include <linux/mod_devicetable.h>
16 16
17#define PLATFORM_DEVID_NONE (-1)
18#define PLATFORM_DEVID_AUTO (-2)
19
17struct mfd_cell; 20struct mfd_cell;
18 21
19struct platform_device { 22struct platform_device {
20 const char * name; 23 const char * name;
21 int id; 24 int id;
25 bool id_auto;
22 struct device dev; 26 struct device dev;
23 u32 num_resources; 27 u32 num_resources;
24 struct resource * resource; 28 struct resource * resource;
diff --git a/include/linux/pm.h b/include/linux/pm.h
index f067e60a3832..88f034a23f2c 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -638,6 +638,7 @@ extern void __suspend_report_result(const char *function, void *fn, int ret);
638 } while (0) 638 } while (0)
639 639
640extern int device_pm_wait_for_dev(struct device *sub, struct device *dev); 640extern int device_pm_wait_for_dev(struct device *sub, struct device *dev);
641extern void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *));
641 642
642extern int pm_generic_prepare(struct device *dev); 643extern int pm_generic_prepare(struct device *dev);
643extern int pm_generic_suspend_late(struct device *dev); 644extern int pm_generic_suspend_late(struct device *dev);
@@ -677,6 +678,10 @@ static inline int device_pm_wait_for_dev(struct device *a, struct device *b)
677 return 0; 678 return 0;
678} 679}
679 680
681static inline void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
682{
683}
684
680#define pm_generic_prepare NULL 685#define pm_generic_prepare NULL
681#define pm_generic_suspend NULL 686#define pm_generic_suspend NULL
682#define pm_generic_resume NULL 687#define pm_generic_resume NULL
diff --git a/tools/hv/hv_kvp_daemon.c b/tools/hv/hv_kvp_daemon.c
index d9834b362943..65d54c89394e 100644
--- a/tools/hv/hv_kvp_daemon.c
+++ b/tools/hv/hv_kvp_daemon.c
@@ -69,15 +69,16 @@ enum key_index {
69}; 69};
70 70
71static char kvp_send_buffer[4096]; 71static char kvp_send_buffer[4096];
72static char kvp_recv_buffer[4096]; 72static char kvp_recv_buffer[4096 * 2];
73static struct sockaddr_nl addr; 73static struct sockaddr_nl addr;
74static int in_hand_shake = 1;
74 75
75static char *os_name = ""; 76static char *os_name = "";
76static char *os_major = ""; 77static char *os_major = "";
77static char *os_minor = ""; 78static char *os_minor = "";
78static char *processor_arch; 79static char *processor_arch;
79static char *os_build; 80static char *os_build;
80static char *lic_version; 81static char *lic_version = "Unknown version";
81static struct utsname uts_buf; 82static struct utsname uts_buf;
82 83
83 84
@@ -394,7 +395,7 @@ static int kvp_get_value(int pool, __u8 *key, int key_size, __u8 *value,
394 return 1; 395 return 1;
395} 396}
396 397
397static void kvp_pool_enumerate(int pool, int index, __u8 *key, int key_size, 398static int kvp_pool_enumerate(int pool, int index, __u8 *key, int key_size,
398 __u8 *value, int value_size) 399 __u8 *value, int value_size)
399{ 400{
400 struct kvp_record *record; 401 struct kvp_record *record;
@@ -406,16 +407,12 @@ static void kvp_pool_enumerate(int pool, int index, __u8 *key, int key_size,
406 record = kvp_file_info[pool].records; 407 record = kvp_file_info[pool].records;
407 408
408 if (index >= kvp_file_info[pool].num_records) { 409 if (index >= kvp_file_info[pool].num_records) {
409 /* 410 return 1;
410 * This is an invalid index; terminate enumeration;
411 * - a NULL value will do the trick.
412 */
413 strcpy(value, "");
414 return;
415 } 411 }
416 412
417 memcpy(key, record[index].key, key_size); 413 memcpy(key, record[index].key, key_size);
418 memcpy(value, record[index].value, value_size); 414 memcpy(value, record[index].value, value_size);
415 return 0;
419} 416}
420 417
421 418
@@ -494,21 +491,141 @@ done:
494 return; 491 return;
495} 492}
496 493
494static void kvp_process_ipconfig_file(char *cmd,
495 char *config_buf, int len,
496 int element_size, int offset)
497{
498 char buf[256];
499 char *p;
500 char *x;
501 FILE *file;
502
503 /*
504 * First execute the command.
505 */
506 file = popen(cmd, "r");
507 if (file == NULL)
508 return;
509
510 if (offset == 0)
511 memset(config_buf, 0, len);
512 while ((p = fgets(buf, sizeof(buf), file)) != NULL) {
513 if ((len - strlen(config_buf)) < (element_size + 1))
514 break;
515
516 x = strchr(p, '\n');
517 *x = '\0';
518 strcat(config_buf, p);
519 strcat(config_buf, ";");
520 }
521 pclose(file);
522}
523
524static void kvp_get_ipconfig_info(char *if_name,
525 struct hv_kvp_ipaddr_value *buffer)
526{
527 char cmd[512];
528
529 /*
530 * Get the address of default gateway (ipv4).
531 */
532 sprintf(cmd, "%s %s", "ip route show dev", if_name);
533 strcat(cmd, " | awk '/default/ {print $3 }'");
534
535 /*
536 * Execute the command to gather gateway info.
537 */
538 kvp_process_ipconfig_file(cmd, (char *)buffer->gate_way,
539 (MAX_GATEWAY_SIZE * 2), INET_ADDRSTRLEN, 0);
540
541 /*
542 * Get the address of default gateway (ipv6).
543 */
544 sprintf(cmd, "%s %s", "ip -f inet6 route show dev", if_name);
545 strcat(cmd, " | awk '/default/ {print $3 }'");
546
547 /*
548 * Execute the command to gather gateway info (ipv6).
549 */
550 kvp_process_ipconfig_file(cmd, (char *)buffer->gate_way,
551 (MAX_GATEWAY_SIZE * 2), INET6_ADDRSTRLEN, 1);
552
553}
554
555
556static unsigned int hweight32(unsigned int *w)
557{
558 unsigned int res = *w - ((*w >> 1) & 0x55555555);
559 res = (res & 0x33333333) + ((res >> 2) & 0x33333333);
560 res = (res + (res >> 4)) & 0x0F0F0F0F;
561 res = res + (res >> 8);
562 return (res + (res >> 16)) & 0x000000FF;
563}
564
565static int kvp_process_ip_address(void *addrp,
566 int family, char *buffer,
567 int length, int *offset)
568{
569 struct sockaddr_in *addr;
570 struct sockaddr_in6 *addr6;
571 int addr_length;
572 char tmp[50];
573 const char *str;
574
575 if (family == AF_INET) {
576 addr = (struct sockaddr_in *)addrp;
577 str = inet_ntop(family, &addr->sin_addr, tmp, 50);
578 addr_length = INET_ADDRSTRLEN;
579 } else {
580 addr6 = (struct sockaddr_in6 *)addrp;
581 str = inet_ntop(family, &addr6->sin6_addr.s6_addr, tmp, 50);
582 addr_length = INET6_ADDRSTRLEN;
583 }
584
585 if ((length - *offset) < addr_length + 1)
586 return 1;
587 if (str == NULL) {
588 strcpy(buffer, "inet_ntop failed\n");
589 return 1;
590 }
591 if (*offset == 0)
592 strcpy(buffer, tmp);
593 else
594 strcat(buffer, tmp);
595 strcat(buffer, ";");
596
597 *offset += strlen(str) + 1;
598 return 0;
599}
600
497static int 601static int
498kvp_get_ip_address(int family, char *buffer, int length) 602kvp_get_ip_address(int family, char *if_name, int op,
603 void *out_buffer, int length)
499{ 604{
500 struct ifaddrs *ifap; 605 struct ifaddrs *ifap;
501 struct ifaddrs *curp; 606 struct ifaddrs *curp;
502 int ipv4_len = strlen("255.255.255.255") + 1;
503 int ipv6_len = strlen("ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff")+1;
504 int offset = 0; 607 int offset = 0;
505 const char *str; 608 int sn_offset = 0;
506 char tmp[50];
507 int error = 0; 609 int error = 0;
508 610 char *buffer;
611 struct hv_kvp_ipaddr_value *ip_buffer;
612 char cidr_mask[5]; /* /xyz */
613 int weight;
614 int i;
615 unsigned int *w;
616 char *sn_str;
617 struct sockaddr_in6 *addr6;
618
619 if (op == KVP_OP_ENUMERATE) {
620 buffer = out_buffer;
621 } else {
622 ip_buffer = out_buffer;
623 buffer = (char *)ip_buffer->ip_addr;
624 ip_buffer->addr_family = 0;
625 }
509 /* 626 /*
510 * On entry into this function, the buffer is capable of holding the 627 * On entry into this function, the buffer is capable of holding the
511 * maximum key value (2048 bytes). 628 * maximum key value.
512 */ 629 */
513 630
514 if (getifaddrs(&ifap)) { 631 if (getifaddrs(&ifap)) {
@@ -518,58 +635,99 @@ kvp_get_ip_address(int family, char *buffer, int length)
518 635
519 curp = ifap; 636 curp = ifap;
520 while (curp != NULL) { 637 while (curp != NULL) {
521 if ((curp->ifa_addr != NULL) && 638 if (curp->ifa_addr == NULL) {
522 (curp->ifa_addr->sa_family == family)) { 639 curp = curp->ifa_next;
523 if (family == AF_INET) { 640 continue;
524 struct sockaddr_in *addr = 641 }
525 (struct sockaddr_in *) curp->ifa_addr;
526
527 str = inet_ntop(family, &addr->sin_addr,
528 tmp, 50);
529 if (str == NULL) {
530 strcpy(buffer, "inet_ntop failed\n");
531 error = 1;
532 goto getaddr_done;
533 }
534 if (offset == 0)
535 strcpy(buffer, tmp);
536 else
537 strcat(buffer, tmp);
538 strcat(buffer, ";");
539 642
540 offset += strlen(str) + 1; 643 if ((if_name != NULL) &&
541 if ((length - offset) < (ipv4_len + 1)) 644 (strncmp(curp->ifa_name, if_name, strlen(if_name)))) {
542 goto getaddr_done; 645 /*
646 * We want info about a specific interface;
647 * just continue.
648 */
649 curp = curp->ifa_next;
650 continue;
651 }
543 652
544 } else { 653 /*
654 * We only support two address families: AF_INET and AF_INET6.
655 * If a family value of 0 is specified, we collect both
656 * supported address families; if not we gather info on
657 * the specified address family.
658 */
659 if ((family != 0) && (curp->ifa_addr->sa_family != family)) {
660 curp = curp->ifa_next;
661 continue;
662 }
663 if ((curp->ifa_addr->sa_family != AF_INET) &&
664 (curp->ifa_addr->sa_family != AF_INET6)) {
665 curp = curp->ifa_next;
666 continue;
667 }
545 668
669 if (op == KVP_OP_GET_IP_INFO) {
546 /* 670 /*
547 * We only support AF_INET and AF_INET6 671 * Gather info other than the IP address.
548 * and the list of addresses is separated by a ";". 672 * IP address info will be gathered later.
549 */ 673 */
550 struct sockaddr_in6 *addr = 674 if (curp->ifa_addr->sa_family == AF_INET) {
551 (struct sockaddr_in6 *) curp->ifa_addr; 675 ip_buffer->addr_family |= ADDR_FAMILY_IPV4;
552 676 /*
553 str = inet_ntop(family, 677 * Get subnet info.
554 &addr->sin6_addr.s6_addr, 678 */
555 tmp, 50); 679 error = kvp_process_ip_address(
556 if (str == NULL) { 680 curp->ifa_netmask,
557 strcpy(buffer, "inet_ntop failed\n"); 681 AF_INET,
558 error = 1; 682 (char *)
559 goto getaddr_done; 683 ip_buffer->sub_net,
560 } 684 length,
561 if (offset == 0) 685 &sn_offset);
562 strcpy(buffer, tmp); 686 if (error)
563 else 687 goto gather_ipaddr;
564 strcat(buffer, tmp); 688 } else {
565 strcat(buffer, ";"); 689 ip_buffer->addr_family |= ADDR_FAMILY_IPV6;
566 offset += strlen(str) + 1;
567 if ((length - offset) < (ipv6_len + 1))
568 goto getaddr_done;
569 690
691 /*
692 * Get subnet info in CIDR format.
693 */
694 weight = 0;
695 sn_str = (char *)ip_buffer->sub_net;
696 addr6 = (struct sockaddr_in6 *)
697 curp->ifa_netmask;
698 w = addr6->sin6_addr.s6_addr32;
699
700 for (i = 0; i < 4; i++)
701 weight += hweight32(&w[i]);
702
703 sprintf(cidr_mask, "/%d", weight);
704 if ((length - sn_offset) <
705 (strlen(cidr_mask) + 1))
706 goto gather_ipaddr;
707
708 if (sn_offset == 0)
709 strcpy(sn_str, cidr_mask);
710 else
711 strcat(sn_str, cidr_mask);
712 strcat((char *)ip_buffer->sub_net, ";");
713 sn_offset += strlen(sn_str) + 1;
570 } 714 }
571 715
716 /*
717 * Collect other ip related configuration info.
718 */
719
720 kvp_get_ipconfig_info(if_name, ip_buffer);
572 } 721 }
722
723gather_ipaddr:
724 error = kvp_process_ip_address(curp->ifa_addr,
725 curp->ifa_addr->sa_family,
726 buffer,
727 length, &offset);
728 if (error)
729 goto getaddr_done;
730
573 curp = curp->ifa_next; 731 curp = curp->ifa_next;
574 } 732 }
575 733
@@ -646,6 +804,8 @@ int main(void)
646 char *p; 804 char *p;
647 char *key_value; 805 char *key_value;
648 char *key_name; 806 char *key_name;
807 int op;
808 int pool;
649 809
650 daemon(1, 0); 810 daemon(1, 0);
651 openlog("KVP", 0, LOG_USER); 811 openlog("KVP", 0, LOG_USER);
@@ -687,7 +847,7 @@ int main(void)
687 message->id.val = CN_KVP_VAL; 847 message->id.val = CN_KVP_VAL;
688 848
689 hv_msg = (struct hv_kvp_msg *)message->data; 849 hv_msg = (struct hv_kvp_msg *)message->data;
690 hv_msg->kvp_hdr.operation = KVP_OP_REGISTER; 850 hv_msg->kvp_hdr.operation = KVP_OP_REGISTER1;
691 message->ack = 0; 851 message->ack = 0;
692 message->len = sizeof(struct hv_kvp_msg); 852 message->len = sizeof(struct hv_kvp_msg);
693 853
@@ -721,12 +881,21 @@ int main(void)
721 incoming_cn_msg = (struct cn_msg *)NLMSG_DATA(incoming_msg); 881 incoming_cn_msg = (struct cn_msg *)NLMSG_DATA(incoming_msg);
722 hv_msg = (struct hv_kvp_msg *)incoming_cn_msg->data; 882 hv_msg = (struct hv_kvp_msg *)incoming_cn_msg->data;
723 883
724 switch (hv_msg->kvp_hdr.operation) { 884 /*
725 case KVP_OP_REGISTER: 885 * We will use the KVP header information to pass back
886 * the error from this daemon. So, first copy the state
887 * and set the error code to success.
888 */
889 op = hv_msg->kvp_hdr.operation;
890 pool = hv_msg->kvp_hdr.pool;
891 hv_msg->error = HV_S_OK;
892
893 if ((in_hand_shake) && (op == KVP_OP_REGISTER1)) {
726 /* 894 /*
727 * Driver is registering with us; stash away the version 895 * Driver is registering with us; stash away the version
728 * information. 896 * information.
729 */ 897 */
898 in_hand_shake = 0;
730 p = (char *)hv_msg->body.kvp_register.version; 899 p = (char *)hv_msg->body.kvp_register.version;
731 lic_version = malloc(strlen(p) + 1); 900 lic_version = malloc(strlen(p) + 1);
732 if (lic_version) { 901 if (lic_version) {
@@ -737,44 +906,39 @@ int main(void)
737 syslog(LOG_ERR, "malloc failed"); 906 syslog(LOG_ERR, "malloc failed");
738 } 907 }
739 continue; 908 continue;
909 }
740 910
741 /* 911 switch (op) {
742 * The current protocol with the kernel component uses a
743 * NULL key name to pass an error condition.
744 * For the SET, GET and DELETE operations,
745 * use the existing protocol to pass back error.
746 */
747
748 case KVP_OP_SET: 912 case KVP_OP_SET:
749 if (kvp_key_add_or_modify(hv_msg->kvp_hdr.pool, 913 if (kvp_key_add_or_modify(pool,
750 hv_msg->body.kvp_set.data.key, 914 hv_msg->body.kvp_set.data.key,
751 hv_msg->body.kvp_set.data.key_size, 915 hv_msg->body.kvp_set.data.key_size,
752 hv_msg->body.kvp_set.data.value, 916 hv_msg->body.kvp_set.data.value,
753 hv_msg->body.kvp_set.data.value_size)) 917 hv_msg->body.kvp_set.data.value_size))
754 strcpy(hv_msg->body.kvp_set.data.key, ""); 918 hv_msg->error = HV_S_CONT;
755 break; 919 break;
756 920
757 case KVP_OP_GET: 921 case KVP_OP_GET:
758 if (kvp_get_value(hv_msg->kvp_hdr.pool, 922 if (kvp_get_value(pool,
759 hv_msg->body.kvp_set.data.key, 923 hv_msg->body.kvp_set.data.key,
760 hv_msg->body.kvp_set.data.key_size, 924 hv_msg->body.kvp_set.data.key_size,
761 hv_msg->body.kvp_set.data.value, 925 hv_msg->body.kvp_set.data.value,
762 hv_msg->body.kvp_set.data.value_size)) 926 hv_msg->body.kvp_set.data.value_size))
763 strcpy(hv_msg->body.kvp_set.data.key, ""); 927 hv_msg->error = HV_S_CONT;
764 break; 928 break;
765 929
766 case KVP_OP_DELETE: 930 case KVP_OP_DELETE:
767 if (kvp_key_delete(hv_msg->kvp_hdr.pool, 931 if (kvp_key_delete(pool,
768 hv_msg->body.kvp_delete.key, 932 hv_msg->body.kvp_delete.key,
769 hv_msg->body.kvp_delete.key_size)) 933 hv_msg->body.kvp_delete.key_size))
770 strcpy(hv_msg->body.kvp_delete.key, ""); 934 hv_msg->error = HV_S_CONT;
771 break; 935 break;
772 936
773 default: 937 default:
774 break; 938 break;
775 } 939 }
776 940
777 if (hv_msg->kvp_hdr.operation != KVP_OP_ENUMERATE) 941 if (op != KVP_OP_ENUMERATE)
778 goto kvp_done; 942 goto kvp_done;
779 943
780 /* 944 /*
@@ -782,13 +946,14 @@ int main(void)
782 * both the key and the value; if not read from the 946 * both the key and the value; if not read from the
783 * appropriate pool. 947 * appropriate pool.
784 */ 948 */
785 if (hv_msg->kvp_hdr.pool != KVP_POOL_AUTO) { 949 if (pool != KVP_POOL_AUTO) {
786 kvp_pool_enumerate(hv_msg->kvp_hdr.pool, 950 if (kvp_pool_enumerate(pool,
787 hv_msg->body.kvp_enum_data.index, 951 hv_msg->body.kvp_enum_data.index,
788 hv_msg->body.kvp_enum_data.data.key, 952 hv_msg->body.kvp_enum_data.data.key,
789 HV_KVP_EXCHANGE_MAX_KEY_SIZE, 953 HV_KVP_EXCHANGE_MAX_KEY_SIZE,
790 hv_msg->body.kvp_enum_data.data.value, 954 hv_msg->body.kvp_enum_data.data.value,
791 HV_KVP_EXCHANGE_MAX_VALUE_SIZE); 955 HV_KVP_EXCHANGE_MAX_VALUE_SIZE))
956 hv_msg->error = HV_S_CONT;
792 goto kvp_done; 957 goto kvp_done;
793 } 958 }
794 959
@@ -807,13 +972,13 @@ int main(void)
807 strcpy(key_value, lic_version); 972 strcpy(key_value, lic_version);
808 break; 973 break;
809 case NetworkAddressIPv4: 974 case NetworkAddressIPv4:
810 kvp_get_ip_address(AF_INET, key_value, 975 kvp_get_ip_address(AF_INET, NULL, KVP_OP_ENUMERATE,
811 HV_KVP_EXCHANGE_MAX_VALUE_SIZE); 976 key_value, HV_KVP_EXCHANGE_MAX_VALUE_SIZE);
812 strcpy(key_name, "NetworkAddressIPv4"); 977 strcpy(key_name, "NetworkAddressIPv4");
813 break; 978 break;
814 case NetworkAddressIPv6: 979 case NetworkAddressIPv6:
815 kvp_get_ip_address(AF_INET6, key_value, 980 kvp_get_ip_address(AF_INET6, NULL, KVP_OP_ENUMERATE,
816 HV_KVP_EXCHANGE_MAX_VALUE_SIZE); 981 key_value, HV_KVP_EXCHANGE_MAX_VALUE_SIZE);
817 strcpy(key_name, "NetworkAddressIPv6"); 982 strcpy(key_name, "NetworkAddressIPv6");
818 break; 983 break;
819 case OSBuildNumber: 984 case OSBuildNumber:
@@ -841,11 +1006,7 @@ int main(void)
841 strcpy(key_name, "ProcessorArchitecture"); 1006 strcpy(key_name, "ProcessorArchitecture");
842 break; 1007 break;
843 default: 1008 default:
844 strcpy(key_value, "Unknown Key"); 1009 hv_msg->error = HV_S_CONT;
845 /*
846 * We use a null key name to terminate enumeration.
847 */
848 strcpy(key_name, "");
849 break; 1010 break;
850 } 1011 }
851 /* 1012 /*