aboutsummaryrefslogtreecommitdiffstats
path: root/net/lapb/lapb_iface.c
diff options
context:
space:
mode:
authorPrarit Bhargava <prarit@redhat.com>2009-11-12 09:18:46 -0500
committerDave Jones <davej@redhat.com>2009-11-17 23:15:04 -0500
commit90e41bac100e34f955f48e7686c2fc685ac9aa30 (patch)
tree50ae248a292e85d3e784d12e2e6a37823048d98b /net/lapb/lapb_iface.c
parent54c9a35d9faef06e00e2a941eb8fe674f1886901 (diff)
[CPUFREQ] Fix stale cpufreq_cpu_governor pointer
Dave, Attached is an update of my patch against the cpufreq fixes branch. Before applying the patch I compiled and booted the tree to see if the panic was still there -- to my surprise it was not. This is because of the conversion of cpufreq_cpu_governor to a char[]. While the panic is kaput, the problem of stale data continues and my patch is still valid. It is possible to end up with the wrong governor after hotplug events because CPUFREQ_DEFAULT_GOVERNOR is statically linked to a default, while the cpu siblings may have had a different governor assigned by a user. ie) the patch is still needed in order to keep the governors assigned properly when hotplugging devices Signed-off-by: Prarit Bhargava <prarit@redhat.com> Signed-off-by: Dave Jones <davej@redhat.com>
Diffstat (limited to 'net/lapb/lapb_iface.c')
0 files changed, 0 insertions, 0 deletions
an> 0; while ((recorded < bytes) && (i < desc->nr_addrs)) { unsigned long vaddr = (unsigned long)buffer + recorded; unsigned long paddr; int offset; int chunksz; offset = vaddr % PAGE_SIZE; /* handle partial pages */ chunksz = min(PAGE_SIZE - offset, bytes - recorded); paddr = xencomm_vtop(vaddr); if (paddr == ~0UL) { printk(KERN_DEBUG "%s: couldn't translate vaddr %lx\n", __func__, vaddr); return -EINVAL; } desc->address[i++] = paddr; recorded += chunksz; } if (recorded < bytes) { printk(KERN_DEBUG "%s: could only translate %ld of %ld bytes\n", __func__, recorded, bytes); return -ENOSPC; } /* mark remaining addresses invalid (just for safety) */ while (i < desc->nr_addrs) desc->address[i++] = XENCOMM_INVALID; desc->magic = XENCOMM_MAGIC; return 0; } static struct xencomm_desc *xencomm_alloc(gfp_t gfp_mask, void *buffer, unsigned long bytes) { struct xencomm_desc *desc; unsigned long buffer_ulong = (unsigned long)buffer; unsigned long start = buffer_ulong & PAGE_MASK; unsigned long end = (buffer_ulong + bytes) | ~PAGE_MASK; unsigned long nr_addrs = (end - start + 1) >> PAGE_SHIFT; unsigned long size = sizeof(*desc) + sizeof(desc->address[0]) * nr_addrs; /* * slab allocator returns at least sizeof(void*) aligned pointer. * When sizeof(*desc) > sizeof(void*), struct xencomm_desc might * cross page boundary. */ if (sizeof(*desc) > sizeof(void *)) { unsigned long order = get_order(size); desc = (struct xencomm_desc *)__get_free_pages(gfp_mask, order); if (desc == NULL) return NULL; desc->nr_addrs = ((PAGE_SIZE << order) - sizeof(struct xencomm_desc)) / sizeof(*desc->address); } else { desc = kmalloc(size, gfp_mask); if (desc == NULL) return NULL; desc->nr_addrs = nr_addrs; } return desc; } void xencomm_free(struct xencomm_handle *desc) { if (desc && !((ulong)desc & XENCOMM_INLINE_FLAG)) { struct xencomm_desc *desc__ = (struct xencomm_desc *)desc; if (sizeof(*desc__) > sizeof(void *)) { unsigned long size = sizeof(*desc__) + sizeof(desc__->address[0]) * desc__->nr_addrs; unsigned long order = get_order(size); free_pages((unsigned long)__va(desc), order); } else kfree(__va(desc)); } } static int xencomm_create(void *buffer, unsigned long bytes, struct xencomm_desc **ret, gfp_t gfp_mask) { struct xencomm_desc *desc; int rc; pr_debug("%s: %p[%ld]\n", __func__, buffer, bytes); if (bytes == 0) { /* don't create a descriptor; Xen recognizes NULL. */ BUG_ON(buffer != NULL); *ret = NULL; return 0; } BUG_ON(buffer == NULL); /* 'bytes' is non-zero */ desc = xencomm_alloc(gfp_mask, buffer, bytes); if (!desc) { printk(KERN_DEBUG "%s failure\n", "xencomm_alloc"); return -ENOMEM; } rc = xencomm_init(desc, buffer, bytes); if (rc) { printk(KERN_DEBUG "%s failure: %d\n", "xencomm_init", rc); xencomm_free((struct xencomm_handle *)__pa(desc)); return rc; } *ret = desc; return 0; } static struct xencomm_handle *xencomm_create_inline(void *ptr) { unsigned long paddr; BUG_ON(!xencomm_is_phys_contiguous((unsigned long)ptr)); paddr = (unsigned long)xencomm_pa(ptr); BUG_ON(paddr & XENCOMM_INLINE_FLAG); return (struct xencomm_handle *)(paddr | XENCOMM_INLINE_FLAG); } /* "mini" routine, for stack-based communications: */ static int xencomm_create_mini(void *buffer, unsigned long bytes, struct xencomm_mini *xc_desc, struct xencomm_desc **ret) { int rc = 0; struct xencomm_desc *desc; BUG_ON(((unsigned long)xc_desc) % sizeof(*xc_desc) != 0); desc = (void *)xc_desc; desc->nr_addrs = XENCOMM_MINI_ADDRS; rc = xencomm_init(desc, buffer, bytes); if (!rc) *ret = desc; return rc; } struct xencomm_handle *xencomm_map(void *ptr, unsigned long bytes) { int rc; struct xencomm_desc *desc; if (xencomm_is_phys_contiguous((unsigned long)ptr)) return xencomm_create_inline(ptr); rc = xencomm_create(ptr, bytes, &desc, GFP_KERNEL); if (rc || desc == NULL) return NULL; return xencomm_pa(desc); } struct xencomm_handle *__xencomm_map_no_alloc(void *ptr, unsigned long bytes, struct xencomm_mini *xc_desc) { int rc; struct xencomm_desc *desc = NULL; if (xencomm_is_phys_contiguous((unsigned long)ptr)) return xencomm_create_inline(ptr); rc = xencomm_create_mini(ptr, bytes, xc_desc, &desc); if (rc) return NULL; return xencomm_pa(desc); }