aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorArnaldo Carvalho de Melo <acme@mandriva.com>2005-08-16 18:46:48 -0400
committerDavid S. Miller <davem@sunset.davemloft.net>2005-08-29 19:01:39 -0400
commit4c6ea29d82e0d1b9b37e6b879e0a7fd6c409333d (patch)
treea11270e23933664056cfd3d53e3ae2a661515d23 /net
parent6ed8a48582c08432e84e5610564c1d25fe00dd7f (diff)
[IP]: Introduce ip_options_get_from_user
This variant is needed to satisfy sparse __user annotations. Signed-off-by: Arnaldo Carvalho de Melo <acme@mandriva.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/ipv4/ip_options.c49
-rw-r--r--net/ipv4/ip_sockglue.c4
2 files changed, 36 insertions, 17 deletions
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
index 7e02ba584079..bce4e875193b 100644
--- a/net/ipv4/ip_options.c
+++ b/net/ipv4/ip_options.c
@@ -489,23 +489,18 @@ void ip_options_undo(struct ip_options * opt)
489 } 489 }
490} 490}
491 491
492int ip_options_get(struct ip_options **optp, unsigned char *data, int optlen, int user) 492static struct ip_options *ip_options_get_alloc(const int optlen)
493{ 493{
494 struct ip_options *opt; 494 struct ip_options *opt = kmalloc(sizeof(*opt) + ((optlen + 3) & ~3),
495 GFP_KERNEL);
496 if (opt)
497 memset(opt, 0, sizeof(*opt));
498 return opt;
499}
495 500
496 opt = kmalloc(sizeof(struct ip_options)+((optlen+3)&~3), GFP_KERNEL); 501static int ip_options_get_finish(struct ip_options **optp,
497 if (!opt) 502 struct ip_options *opt, int optlen)
498 return -ENOMEM; 503{
499 memset(opt, 0, sizeof(struct ip_options));
500 if (optlen) {
501 if (user) {
502 if (copy_from_user(opt->__data, data, optlen)) {
503 kfree(opt);
504 return -EFAULT;
505 }
506 } else
507 memcpy(opt->__data, data, optlen);
508 }
509 while (optlen & 3) 504 while (optlen & 3)
510 opt->__data[optlen++] = IPOPT_END; 505 opt->__data[optlen++] = IPOPT_END;
511 opt->optlen = optlen; 506 opt->optlen = optlen;
@@ -521,6 +516,30 @@ int ip_options_get(struct ip_options **optp, unsigned char *data, int optlen, in
521 return 0; 516 return 0;
522} 517}
523 518
519int ip_options_get_from_user(struct ip_options **optp, unsigned char __user *data, int optlen)
520{
521 struct ip_options *opt = ip_options_get_alloc(optlen);
522
523 if (!opt)
524 return -ENOMEM;
525 if (optlen && copy_from_user(opt->__data, data, optlen)) {
526 kfree(opt);
527 return -EFAULT;
528 }
529 return ip_options_get_finish(optp, opt, optlen);
530}
531
532int ip_options_get(struct ip_options **optp, unsigned char *data, int optlen)
533{
534 struct ip_options *opt = ip_options_get_alloc(optlen);
535
536 if (!opt)
537 return -ENOMEM;
538 if (optlen)
539 memcpy(opt->__data, data, optlen);
540 return ip_options_get_finish(optp, opt, optlen);
541}
542
524void ip_forward_options(struct sk_buff *skb) 543void ip_forward_options(struct sk_buff *skb)
525{ 544{
526 struct ip_options * opt = &(IPCB(skb)->opt); 545 struct ip_options * opt = &(IPCB(skb)->opt);
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index aca088b3707a..2f0b47da5b37 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -153,7 +153,7 @@ int ip_cmsg_send(struct msghdr *msg, struct ipcm_cookie *ipc)
153 switch (cmsg->cmsg_type) { 153 switch (cmsg->cmsg_type) {
154 case IP_RETOPTS: 154 case IP_RETOPTS:
155 err = cmsg->cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr)); 155 err = cmsg->cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr));
156 err = ip_options_get(&ipc->opt, CMSG_DATA(cmsg), err < 40 ? err : 40, 0); 156 err = ip_options_get(&ipc->opt, CMSG_DATA(cmsg), err < 40 ? err : 40);
157 if (err) 157 if (err)
158 return err; 158 return err;
159 break; 159 break;
@@ -425,7 +425,7 @@ int ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
425 struct ip_options * opt = NULL; 425 struct ip_options * opt = NULL;
426 if (optlen > 40 || optlen < 0) 426 if (optlen > 40 || optlen < 0)
427 goto e_inval; 427 goto e_inval;
428 err = ip_options_get(&opt, optval, optlen, 1); 428 err = ip_options_get_from_user(&opt, optval, optlen);
429 if (err) 429 if (err)
430 break; 430 break;
431 if (sk->sk_type == SOCK_STREAM) { 431 if (sk->sk_type == SOCK_STREAM) {
pt">->queuelist)); list_del_init(&rq->flush.list); blk_flush_restore_request(rq); if (q->mq_ops) blk_mq_end_request(rq, error); else __blk_end_request_all(rq, error); break; default: BUG(); } kicked = blk_kick_flush(q, fq); return kicked | queued; } static void flush_end_io(struct request *flush_rq, int error) { struct request_queue *q = flush_rq->q; struct list_head *running; bool queued = false; struct request *rq, *n; unsigned long flags = 0; struct blk_flush_queue *fq = blk_get_flush_queue(q, flush_rq->mq_ctx); if (q->mq_ops) { struct blk_mq_hw_ctx *hctx; /* release the tag's ownership to the req cloned from */ spin_lock_irqsave(&fq->mq_flush_lock, flags); hctx = blk_mq_map_queue(q, flush_rq->mq_ctx->cpu); blk_mq_tag_set_rq(hctx, flush_rq->tag, fq->orig_rq); flush_rq->tag = -1; } running = &fq->flush_queue[fq->flush_running_idx]; BUG_ON(fq->flush_pending_idx == fq->flush_running_idx); /* account completion of the flush request */ fq->flush_running_idx ^= 1; if (!q->mq_ops) elv_completed_request(q, flush_rq); /* and push the waiting requests to the next stage */ list_for_each_entry_safe(rq, n, running, flush.list) { unsigned int seq = blk_flush_cur_seq(rq); BUG_ON(seq != REQ_FSEQ_PREFLUSH && seq != REQ_FSEQ_POSTFLUSH); queued |= blk_flush_complete_seq(rq, fq, seq, error); } /* * Kick the queue to avoid stall for two cases: * 1. Moving a request silently to empty queue_head may stall the * queue. * 2. When flush request is running in non-queueable queue, the * queue is hold. Restart the queue after flush request is finished * to avoid stall. * This function is called from request completion path and calling * directly into request_fn may confuse the driver. Always use * kblockd. */ if (queued || fq->flush_queue_delayed) { WARN_ON(q->mq_ops); blk_run_queue_async(q); } fq->flush_queue_delayed = 0; if (q->mq_ops) spin_unlock_irqrestore(&fq->mq_flush_lock, flags); } /** * blk_kick_flush - consider issuing flush request * @q: request_queue being kicked * @fq: flush queue * * Flush related states of @q have changed, consider issuing flush request. * Please read the comment at the top of this file for more info. * * CONTEXT: * spin_lock_irq(q->queue_lock or fq->mq_flush_lock) * * RETURNS: * %true if flush was issued, %false otherwise. */ static bool blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq) { struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx]; struct request *first_rq = list_first_entry(pending, struct request, flush.list); struct request *flush_rq = fq->flush_rq; /* C1 described at the top of this file */ if (fq->flush_pending_idx != fq->flush_running_idx || list_empty(pending)) return false; /* C2 and C3 */ if (!list_empty(&fq->flush_data_in_flight) && time_before(jiffies, fq->flush_pending_since + FLUSH_PENDING_TIMEOUT)) return false; /* * Issue flush and toggle pending_idx. This makes pending_idx * different from running_idx, which means flush is in flight. */ fq->flush_pending_idx ^= 1; blk_rq_init(q, flush_rq); /* * Borrow tag from the first request since they can't * be in flight at the same time. And acquire the tag's * ownership for flush req. */ if (q->mq_ops) { struct blk_mq_hw_ctx *hctx; flush_rq->mq_ctx = first_rq->mq_ctx; flush_rq->tag = first_rq->tag; fq->orig_rq = first_rq; hctx = blk_mq_map_queue(q, first_rq->mq_ctx->cpu); blk_mq_tag_set_rq(hctx, first_rq->tag, flush_rq); } flush_rq->cmd_type = REQ_TYPE_FS; flush_rq->cmd_flags = REQ_OP_FLUSH | REQ_PREFLUSH; flush_rq->rq_flags |= RQF_FLUSH_SEQ; flush_rq->rq_disk = first_rq->rq_disk; flush_rq->end_io = flush_end_io; return blk_flush_queue_rq(flush_rq, false); } static void flush_data_end_io(struct request *rq, int error) { struct request_queue *q = rq->q; struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL); /* * After populating an empty queue, kick it to avoid stall. Read * the comment in flush_end_io(). */ if (blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error)) blk_run_queue_async(q); } static void mq_flush_data_end_io(struct request *rq, int error) { struct request_queue *q = rq->q; struct blk_mq_hw_ctx *hctx; struct blk_mq_ctx *ctx = rq->mq_ctx; unsigned long flags; struct blk_flush_queue *fq = blk_get_flush_queue(q, ctx); hctx = blk_mq_map_queue(q, ctx->cpu); /* * After populating an empty queue, kick it to avoid stall. Read * the comment in flush_end_io(). */ spin_lock_irqsave(&fq->mq_flush_lock, flags); if (blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error)) blk_mq_run_hw_queue(hctx, true); spin_unlock_irqrestore(&fq->mq_flush_lock, flags); } /** * blk_insert_flush - insert a new FLUSH/FUA request * @rq: request to insert * * To be called from __elv_add_request() for %ELEVATOR_INSERT_FLUSH insertions. * or __blk_mq_run_hw_queue() to dispatch request. * @rq is being submitted. Analyze what needs to be done and put it on the * right queue. * * CONTEXT: * spin_lock_irq(q->queue_lock) in !mq case */ void blk_insert_flush(struct request *rq) { struct request_queue *q = rq->q; unsigned long fflags = q->queue_flags; /* may change, cache */ unsigned int policy = blk_flush_policy(fflags, rq); struct blk_flush_queue *fq = blk_get_flush_queue(q, rq->mq_ctx); /* * @policy now records what operations need to be done. Adjust * REQ_PREFLUSH and FUA for the driver. */ rq->cmd_flags &= ~REQ_PREFLUSH; if (!(fflags & (1UL << QUEUE_FLAG_FUA))) rq->cmd_flags &= ~REQ_FUA; /* * An empty flush handed down from a stacking driver may * translate into nothing if the underlying device does not * advertise a write-back cache. In this case, simply * complete the request. */ if (!policy) { if (q->mq_ops) blk_mq_end_request(rq, 0); else __blk_end_bidi_request(rq, 0, 0, 0); return; } BUG_ON(rq->bio != rq->biotail); /*assumes zero or single bio rq */ /* * If there's data but flush is not necessary, the request can be * processed directly without going through flush machinery. Queue * for normal execution. */ if ((policy & REQ_FSEQ_DATA) && !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) { if (q->mq_ops) { blk_mq_insert_request(rq, false, false, true); } else list_add_tail(&rq->queuelist, &q->queue_head); return; } /* * @rq should go through flush machinery. Mark it part of flush * sequence and submit for further processing. */ memset(&rq->flush, 0, sizeof(rq->flush)); INIT_LIST_HEAD(&rq->flush.list); rq->rq_flags |= RQF_FLUSH_SEQ; rq->flush.saved_end_io = rq->end_io; /* Usually NULL */ if (q->mq_ops) { rq->end_io = mq_flush_data_end_io; spin_lock_irq(&fq->mq_flush_lock); blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0); spin_unlock_irq(&fq->mq_flush_lock); return; } rq->end_io = flush_data_end_io; blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0); } /** * blkdev_issue_flush - queue a flush * @bdev: blockdev to issue flush for * @gfp_mask: memory allocation flags (for bio_alloc) * @error_sector: error sector * * Description: * Issue a flush for the block device in question. Caller can supply * room for storing the error offset in case of a flush error, if they * wish to. If WAIT flag is not passed then caller may check only what * request was pushed in some internal queue for later handling. */ int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask, sector_t *error_sector) { struct request_queue *q; struct bio *bio; int ret = 0; if (bdev->bd_disk == NULL) return -ENXIO; q = bdev_get_queue(bdev); if (!q) return -ENXIO; /* * some block devices may not have their queue correctly set up here * (e.g. loop device without a backing file) and so issuing a flush * here will panic. Ensure there is a request function before issuing * the flush. */ if (!q->make_request_fn) return -ENXIO; bio = bio_alloc(gfp_mask, 0); bio->bi_bdev = bdev; bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; ret = submit_bio_wait(bio); /* * The driver must store the error location in ->bi_sector, if * it supports it. For non-stacked drivers, this should be * copied from blk_rq_pos(rq). */ if (error_sector) *error_sector = bio->bi_iter.bi_sector; bio_put(bio); return ret; } EXPORT_SYMBOL(blkdev_issue_flush); struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q, int node, int cmd_size) { struct blk_flush_queue *fq; int rq_sz = sizeof(struct request); fq = kzalloc_node(sizeof(*fq), GFP_KERNEL, node); if (!fq) goto fail; if (q->mq_ops) { spin_lock_init(&fq->mq_flush_lock); rq_sz = round_up(rq_sz + cmd_size, cache_line_size()); } fq->flush_rq = kzalloc_node(rq_sz, GFP_KERNEL, node); if (!fq->flush_rq) goto fail_rq; INIT_LIST_HEAD(&fq->flush_queue[0]); INIT_LIST_HEAD(&fq->flush_queue[1]); INIT_LIST_HEAD(&fq->flush_data_in_flight); return fq; fail_rq: kfree(fq); fail: return NULL; } void blk_free_flush_queue(struct blk_flush_queue *fq) { /* bio based request queue hasn't flush queue */ if (!fq) return; kfree(fq->flush_rq); kfree(fq); }