aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Hurley <peter@hurleysoftware.com>2013-02-27 15:28:28 -0500
committerGleb Natapov <gleb@redhat.com>2013-02-28 01:50:11 -0500
commit3d2a80a230250c2534ce5b17503670adaf1d7fff (patch)
tree2d52e8d1159051f0647afda8d889fd6645ff5cf2
parent2a7d2b96d5cba7568139d9ab157a0e97ab32440f (diff)
x86/kvm: Fix pvclock vsyscall fixmap
The physical memory fixmapped for the pvclock clock_gettime vsyscall was allocated, and thus is not a kernel symbol. __pa() is the proper method to use in this case. Fixes the crash below when booting a next-20130204+ smp guest on a 3.8-rc5+ KVM host. [ 0.666410] udevd[97]: starting version 175 [ 0.674043] udevd[97]: udevd:[97]: segfault at ffffffffff5fd020 ip 00007fff069e277f sp 00007fff068c9ef8 error d Acked-by: Marcelo Tosatti <mtosatti@redhat.com> Signed-off-by: Peter Hurley <peter@hurleysoftware.com> Signed-off-by: Gleb Natapov <gleb@redhat.com>
-rw-r--r--arch/x86/kernel/pvclock.c2
1 files changed, 1 insertions, 1 deletions
diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
index 85c39590c1a4..2cb9470ea85b 100644
--- a/arch/x86/kernel/pvclock.c
+++ b/arch/x86/kernel/pvclock.c
@@ -185,7 +185,7 @@ int __init pvclock_init_vsyscall(struct pvclock_vsyscall_time_info *i,
185 185
186 for (idx = 0; idx <= (PVCLOCK_FIXMAP_END-PVCLOCK_FIXMAP_BEGIN); idx++) { 186 for (idx = 0; idx <= (PVCLOCK_FIXMAP_END-PVCLOCK_FIXMAP_BEGIN); idx++) {
187 __set_fixmap(PVCLOCK_FIXMAP_BEGIN + idx, 187 __set_fixmap(PVCLOCK_FIXMAP_BEGIN + idx,
188 __pa_symbol(i) + (idx*PAGE_SIZE), 188 __pa(i) + (idx*PAGE_SIZE),
189 PAGE_KERNEL_VVAR); 189 PAGE_KERNEL_VVAR);
190 } 190 }
191 191
>->req->errors = result; if (err < 0) /* we're only returning the result field in the reply */ job->req->sense_len = sizeof(u32); else job->req->sense_len = job->reply_len; /* we assume all request payload was transferred, residual == 0 */ req->resid_len = 0; if (rsp) { WARN_ON(reply_payload_rcv_len > rsp->resid_len); /* set reply (bidi) residual */ rsp->resid_len -= min(reply_payload_rcv_len, rsp->resid_len); } blk_complete_request(req); } EXPORT_SYMBOL_GPL(bsg_job_done); /** * bsg_softirq_done - softirq done routine for destroying the bsg requests * @rq: BSG request that holds the job to be destroyed */ static void bsg_softirq_done(struct request *rq) { struct bsg_job *job = rq->special; blk_end_request_all(rq, rq->errors); bsg_destroy_job(job); } static int bsg_map_buffer(struct bsg_buffer *buf, struct request *req) { size_t sz = (sizeof(struct scatterlist) * req->nr_phys_segments); BUG_ON(!req->nr_phys_segments); buf->sg_list = kzalloc(sz, GFP_KERNEL); if (!buf->sg_list) return -ENOMEM; sg_init_table(buf->sg_list, req->nr_phys_segments); buf->sg_cnt = blk_rq_map_sg(req->q, req, buf->sg_list); buf->payload_len = blk_rq_bytes(req); return 0; } /** * bsg_create_job - create the bsg_job structure for the bsg request * @dev: device that is being sent the bsg request * @req: BSG request that needs a job structure */ static int bsg_create_job(struct device *dev, struct request *req) { struct request *rsp = req->next_rq; struct request_queue *q = req->q; struct bsg_job *job; int ret; BUG_ON(req->special); job = kzalloc(sizeof(struct bsg_job) + q->bsg_job_size, GFP_KERNEL); if (!job) return -ENOMEM; req->special = job; job->req = req; if (q->bsg_job_size) job->dd_data = (void *)&job[1]; job->request = req->cmd; job->request_len = req->cmd_len; job->reply = req->sense; job->reply_len = SCSI_SENSE_BUFFERSIZE; /* Size of sense buffer * allocated */ if (req->bio) { ret = bsg_map_buffer(&job->request_payload, req); if (ret) goto failjob_rls_job; } if (rsp && rsp->bio) { ret = bsg_map_buffer(&job->reply_payload, rsp); if (ret) goto failjob_rls_rqst_payload; } job->dev = dev; /* take a reference for the request */ get_device(job->dev); return 0; failjob_rls_rqst_payload: kfree(job->request_payload.sg_list); failjob_rls_job: kfree(job); return -ENOMEM; } /* * bsg_goose_queue - restart queue in case it was stopped * @q: request q to be restarted */ void bsg_goose_queue(struct request_queue *q) { if (!q) return; blk_run_queue_async(q); } EXPORT_SYMBOL_GPL(bsg_goose_queue); /** * bsg_request_fn - generic handler for bsg requests * @q: request queue to manage * * On error the create_bsg_job function should return a -Exyz error value * that will be set to the req->errors. * * Drivers/subsys should pass this to the queue init function. */ void bsg_request_fn(struct request_queue *q) { struct device *dev = q->queuedata; struct request *req; struct bsg_job *job; int ret; if (!get_device(dev)) return; while (1) { req = blk_fetch_request(q); if (!req) break; spin_unlock_irq(q->queue_lock); ret = bsg_create_job(dev, req); if (ret) { req->errors = ret; blk_end_request_all(req, ret); spin_lock_irq(q->queue_lock); continue; } job = req->special; ret = q->bsg_job_fn(job); spin_lock_irq(q->queue_lock); if (ret) break; } spin_unlock_irq(q->queue_lock); put_device(dev); spin_lock_irq(q->queue_lock); } EXPORT_SYMBOL_GPL(bsg_request_fn); /** * bsg_setup_queue - Create and add the bsg hooks so we can receive requests * @dev: device to attach bsg device to * @q: request queue setup by caller * @name: device to give bsg device * @job_fn: bsg job handler * @dd_job_size: size of LLD data needed for each job * * The caller should have setup the reuqest queue with bsg_request_fn * as the request_fn. */ int bsg_setup_queue(struct device *dev, struct request_queue *q, char *name, bsg_job_fn *job_fn, int dd_job_size) { int ret; q->queuedata = dev; q->bsg_job_size = dd_job_size; q->bsg_job_fn = job_fn; queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q); blk_queue_softirq_done(q, bsg_softirq_done); blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT); ret = bsg_register_queue(q, dev, name, NULL); if (ret) { printk(KERN_ERR "%s: bsg interface failed to " "initialize - register queue\n", dev->kobj.name); return ret; } return 0; } EXPORT_SYMBOL_GPL(bsg_setup_queue);