summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2018-03-13 12:28:41 -0400
committerJens Axboe <axboe@kernel.dk>2018-03-13 13:40:24 -0400
commit17cb960f29c29ee07bf6848ada3265f4be55972e (patch)
tree8571a25cac75a45261456dbe6711f324dc5b1044
parentef6fa64f9b8e1611854077ea9213f2eef2428cd2 (diff)
bsg: split handling of SCSI CDBs vs transport requeues
The current BSG design tries to shoe-horn the transport-specific passthrough commands into the overall framework for SCSI passthrough requests. This has a couple problems: - each passthrough queue has to set the QUEUE_FLAG_SCSI_PASSTHROUGH flag despite not dealing with SCSI commands at all. Because of that these queues could also incorrectly accept SCSI commands from in-kernel users or through the legacy SCSI_IOCTL_SEND_COMMAND ioctl. - the real SCSI bsg queues also incorrectly accept bsg requests of the BSG_SUB_PROTOCOL_SCSI_TRANSPORT type - the bsg transport code is almost unredable because it tries to reuse different SCSI concepts for its own purpose. This patch instead adds a new bsg_ops structure to handle the two cases differently, and thus solves all of the above problems. Another side effect is that the bsg-lib queues also don't need to embedd a struct scsi_request anymore. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Hannes Reinecke <hare@suse.com> Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--block/bsg-lib.c158
-rw-r--r--block/bsg.c262
-rw-r--r--drivers/scsi/scsi_lib.c4
-rw-r--r--drivers/scsi/scsi_sysfs.c3
-rw-r--r--drivers/scsi/scsi_transport_sas.c1
-rw-r--r--include/linux/bsg-lib.h4
-rw-r--r--include/linux/bsg.h35
7 files changed, 250 insertions, 217 deletions
diff --git a/block/bsg-lib.c b/block/bsg-lib.c
index f2c2d54a61b4..fc2e5ff2c4b9 100644
--- a/block/bsg-lib.c
+++ b/block/bsg-lib.c
@@ -27,6 +27,94 @@
27#include <linux/bsg-lib.h> 27#include <linux/bsg-lib.h>
28#include <linux/export.h> 28#include <linux/export.h>
29#include <scsi/scsi_cmnd.h> 29#include <scsi/scsi_cmnd.h>
30#include <scsi/sg.h>
31
32#define uptr64(val) ((void __user *)(uintptr_t)(val))
33
34static int bsg_transport_check_proto(struct sg_io_v4 *hdr)
35{
36 if (hdr->protocol != BSG_PROTOCOL_SCSI ||
37 hdr->subprotocol != BSG_SUB_PROTOCOL_SCSI_TRANSPORT)
38 return -EINVAL;
39 if (!capable(CAP_SYS_RAWIO))
40 return -EPERM;
41 return 0;
42}
43
44static int bsg_transport_fill_hdr(struct request *rq, struct sg_io_v4 *hdr,
45 fmode_t mode)
46{
47 struct bsg_job *job = blk_mq_rq_to_pdu(rq);
48
49 job->request_len = hdr->request_len;
50 job->request = memdup_user(uptr64(hdr->request), hdr->request_len);
51 if (IS_ERR(job->request))
52 return PTR_ERR(job->request);
53 return 0;
54}
55
56static int bsg_transport_complete_rq(struct request *rq, struct sg_io_v4 *hdr)
57{
58 struct bsg_job *job = blk_mq_rq_to_pdu(rq);
59 int ret = 0;
60
61 /*
62 * The assignments below don't make much sense, but are kept for
63 * bug by bug backwards compatibility:
64 */
65 hdr->device_status = job->result & 0xff;
66 hdr->transport_status = host_byte(job->result);
67 hdr->driver_status = driver_byte(job->result);
68 hdr->info = 0;
69 if (hdr->device_status || hdr->transport_status || hdr->driver_status)
70 hdr->info |= SG_INFO_CHECK;
71 hdr->response_len = 0;
72
73 if (job->result < 0) {
74 /* we're only returning the result field in the reply */
75 job->reply_len = sizeof(u32);
76 ret = job->result;
77 }
78
79 if (job->reply_len && hdr->response) {
80 int len = min(hdr->max_response_len, job->reply_len);
81
82 if (copy_to_user(uptr64(hdr->response), job->reply, len))
83 ret = -EFAULT;
84 else
85 hdr->response_len = len;
86 }
87
88 /* we assume all request payload was transferred, residual == 0 */
89 hdr->dout_resid = 0;
90
91 if (rq->next_rq) {
92 unsigned int rsp_len = job->reply_payload.payload_len;
93
94 if (WARN_ON(job->reply_payload_rcv_len > rsp_len))
95 hdr->din_resid = 0;
96 else
97 hdr->din_resid = rsp_len - job->reply_payload_rcv_len;
98 } else {
99 hdr->din_resid = 0;
100 }
101
102 return ret;
103}
104
105static void bsg_transport_free_rq(struct request *rq)
106{
107 struct bsg_job *job = blk_mq_rq_to_pdu(rq);
108
109 kfree(job->request);
110}
111
112static const struct bsg_ops bsg_transport_ops = {
113 .check_proto = bsg_transport_check_proto,
114 .fill_hdr = bsg_transport_fill_hdr,
115 .complete_rq = bsg_transport_complete_rq,
116 .free_rq = bsg_transport_free_rq,
117};
30 118
31/** 119/**
32 * bsg_teardown_job - routine to teardown a bsg job 120 * bsg_teardown_job - routine to teardown a bsg job
@@ -68,27 +156,9 @@ EXPORT_SYMBOL_GPL(bsg_job_get);
68void bsg_job_done(struct bsg_job *job, int result, 156void bsg_job_done(struct bsg_job *job, int result,
69 unsigned int reply_payload_rcv_len) 157 unsigned int reply_payload_rcv_len)
70{ 158{
71 struct request *req = blk_mq_rq_from_pdu(job); 159 job->result = result;
72 struct request *rsp = req->next_rq; 160 job->reply_payload_rcv_len = reply_payload_rcv_len;
73 int err; 161 blk_complete_request(blk_mq_rq_from_pdu(job));
74
75 err = job->sreq.result = result;
76 if (err < 0)
77 /* we're only returning the result field in the reply */
78 job->sreq.sense_len = sizeof(u32);
79 else
80 job->sreq.sense_len = job->reply_len;
81 /* we assume all request payload was transferred, residual == 0 */
82 job->sreq.resid_len = 0;
83
84 if (rsp) {
85 WARN_ON(reply_payload_rcv_len > scsi_req(rsp)->resid_len);
86
87 /* set reply (bidi) residual */
88 scsi_req(rsp)->resid_len -=
89 min(reply_payload_rcv_len, scsi_req(rsp)->resid_len);
90 }
91 blk_complete_request(req);
92} 162}
93EXPORT_SYMBOL_GPL(bsg_job_done); 163EXPORT_SYMBOL_GPL(bsg_job_done);
94 164
@@ -113,7 +183,6 @@ static int bsg_map_buffer(struct bsg_buffer *buf, struct request *req)
113 if (!buf->sg_list) 183 if (!buf->sg_list)
114 return -ENOMEM; 184 return -ENOMEM;
115 sg_init_table(buf->sg_list, req->nr_phys_segments); 185 sg_init_table(buf->sg_list, req->nr_phys_segments);
116 scsi_req(req)->resid_len = blk_rq_bytes(req);
117 buf->sg_cnt = blk_rq_map_sg(req->q, req, buf->sg_list); 186 buf->sg_cnt = blk_rq_map_sg(req->q, req, buf->sg_list);
118 buf->payload_len = blk_rq_bytes(req); 187 buf->payload_len = blk_rq_bytes(req);
119 return 0; 188 return 0;
@@ -124,16 +193,13 @@ static int bsg_map_buffer(struct bsg_buffer *buf, struct request *req)
124 * @dev: device that is being sent the bsg request 193 * @dev: device that is being sent the bsg request
125 * @req: BSG request that needs a job structure 194 * @req: BSG request that needs a job structure
126 */ 195 */
127static int bsg_prepare_job(struct device *dev, struct request *req) 196static bool bsg_prepare_job(struct device *dev, struct request *req)
128{ 197{
129 struct request *rsp = req->next_rq; 198 struct request *rsp = req->next_rq;
130 struct scsi_request *rq = scsi_req(req);
131 struct bsg_job *job = blk_mq_rq_to_pdu(req); 199 struct bsg_job *job = blk_mq_rq_to_pdu(req);
132 int ret; 200 int ret;
133 201
134 job->timeout = req->timeout; 202 job->timeout = req->timeout;
135 job->request = rq->cmd;
136 job->request_len = rq->cmd_len;
137 203
138 if (req->bio) { 204 if (req->bio) {
139 ret = bsg_map_buffer(&job->request_payload, req); 205 ret = bsg_map_buffer(&job->request_payload, req);
@@ -149,12 +215,13 @@ static int bsg_prepare_job(struct device *dev, struct request *req)
149 /* take a reference for the request */ 215 /* take a reference for the request */
150 get_device(job->dev); 216 get_device(job->dev);
151 kref_init(&job->kref); 217 kref_init(&job->kref);
152 return 0; 218 return true;
153 219
154failjob_rls_rqst_payload: 220failjob_rls_rqst_payload:
155 kfree(job->request_payload.sg_list); 221 kfree(job->request_payload.sg_list);
156failjob_rls_job: 222failjob_rls_job:
157 return -ENOMEM; 223 job->result = -ENOMEM;
224 return false;
158} 225}
159 226
160/** 227/**
@@ -183,9 +250,7 @@ static void bsg_request_fn(struct request_queue *q)
183 break; 250 break;
184 spin_unlock_irq(q->queue_lock); 251 spin_unlock_irq(q->queue_lock);
185 252
186 ret = bsg_prepare_job(dev, req); 253 if (!bsg_prepare_job(dev, req)) {
187 if (ret) {
188 scsi_req(req)->result = ret;
189 blk_end_request_all(req, BLK_STS_OK); 254 blk_end_request_all(req, BLK_STS_OK);
190 spin_lock_irq(q->queue_lock); 255 spin_lock_irq(q->queue_lock);
191 continue; 256 continue;
@@ -202,46 +267,34 @@ static void bsg_request_fn(struct request_queue *q)
202 spin_lock_irq(q->queue_lock); 267 spin_lock_irq(q->queue_lock);
203} 268}
204 269
270/* called right after the request is allocated for the request_queue */
205static int bsg_init_rq(struct request_queue *q, struct request *req, gfp_t gfp) 271static int bsg_init_rq(struct request_queue *q, struct request *req, gfp_t gfp)
206{ 272{
207 struct bsg_job *job = blk_mq_rq_to_pdu(req); 273 struct bsg_job *job = blk_mq_rq_to_pdu(req);
208 struct scsi_request *sreq = &job->sreq;
209
210 /* called right after the request is allocated for the request_queue */
211 274
212 sreq->sense = kzalloc(SCSI_SENSE_BUFFERSIZE, gfp); 275 job->reply = kzalloc(SCSI_SENSE_BUFFERSIZE, gfp);
213 if (!sreq->sense) 276 if (!job->reply)
214 return -ENOMEM; 277 return -ENOMEM;
215
216 return 0; 278 return 0;
217} 279}
218 280
281/* called right before the request is given to the request_queue user */
219static void bsg_initialize_rq(struct request *req) 282static void bsg_initialize_rq(struct request *req)
220{ 283{
221 struct bsg_job *job = blk_mq_rq_to_pdu(req); 284 struct bsg_job *job = blk_mq_rq_to_pdu(req);
222 struct scsi_request *sreq = &job->sreq; 285 void *reply = job->reply;
223 void *sense = sreq->sense;
224
225 /* called right before the request is given to the request_queue user */
226 286
227 memset(job, 0, sizeof(*job)); 287 memset(job, 0, sizeof(*job));
228 288 job->reply = reply;
229 scsi_req_init(sreq); 289 job->reply_len = SCSI_SENSE_BUFFERSIZE;
230
231 sreq->sense = sense;
232 sreq->sense_len = SCSI_SENSE_BUFFERSIZE;
233
234 job->reply = sense;
235 job->reply_len = sreq->sense_len;
236 job->dd_data = job + 1; 290 job->dd_data = job + 1;
237} 291}
238 292
239static void bsg_exit_rq(struct request_queue *q, struct request *req) 293static void bsg_exit_rq(struct request_queue *q, struct request *req)
240{ 294{
241 struct bsg_job *job = blk_mq_rq_to_pdu(req); 295 struct bsg_job *job = blk_mq_rq_to_pdu(req);
242 struct scsi_request *sreq = &job->sreq;
243 296
244 kfree(sreq->sense); 297 kfree(job->reply);
245} 298}
246 299
247/** 300/**
@@ -275,11 +328,10 @@ struct request_queue *bsg_setup_queue(struct device *dev, const char *name,
275 q->queuedata = dev; 328 q->queuedata = dev;
276 q->bsg_job_fn = job_fn; 329 q->bsg_job_fn = job_fn;
277 blk_queue_flag_set(QUEUE_FLAG_BIDI, q); 330 blk_queue_flag_set(QUEUE_FLAG_BIDI, q);
278 blk_queue_flag_set(QUEUE_FLAG_SCSI_PASSTHROUGH, q);
279 blk_queue_softirq_done(q, bsg_softirq_done); 331 blk_queue_softirq_done(q, bsg_softirq_done);
280 blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT); 332 blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT);
281 333
282 ret = bsg_register_queue(q, dev, name, release); 334 ret = bsg_register_queue(q, dev, name, &bsg_transport_ops, release);
283 if (ret) { 335 if (ret) {
284 printk(KERN_ERR "%s: bsg interface failed to " 336 printk(KERN_ERR "%s: bsg interface failed to "
285 "initialize - register queue\n", dev->kobj.name); 337 "initialize - register queue\n", dev->kobj.name);
diff --git a/block/bsg.c b/block/bsg.c
index 06dc96e1f670..defa06c11858 100644
--- a/block/bsg.c
+++ b/block/bsg.c
@@ -130,114 +130,120 @@ static inline struct hlist_head *bsg_dev_idx_hash(int index)
130 return &bsg_device_list[index & (BSG_LIST_ARRAY_SIZE - 1)]; 130 return &bsg_device_list[index & (BSG_LIST_ARRAY_SIZE - 1)];
131} 131}
132 132
133static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq, 133#define uptr64(val) ((void __user *)(uintptr_t)(val))
134 struct sg_io_v4 *hdr, struct bsg_device *bd, 134
135 fmode_t mode) 135static int bsg_scsi_check_proto(struct sg_io_v4 *hdr)
136{
137 if (hdr->protocol != BSG_PROTOCOL_SCSI ||
138 hdr->subprotocol != BSG_SUB_PROTOCOL_SCSI_CMD)
139 return -EINVAL;
140 return 0;
141}
142
143static int bsg_scsi_fill_hdr(struct request *rq, struct sg_io_v4 *hdr,
144 fmode_t mode)
136{ 145{
137 struct scsi_request *req = scsi_req(rq); 146 struct scsi_request *sreq = scsi_req(rq);
138 147
139 if (hdr->request_len > BLK_MAX_CDB) { 148 sreq->cmd_len = hdr->request_len;
140 req->cmd = kzalloc(hdr->request_len, GFP_KERNEL); 149 if (sreq->cmd_len > BLK_MAX_CDB) {
141 if (!req->cmd) 150 sreq->cmd = kzalloc(sreq->cmd_len, GFP_KERNEL);
151 if (!sreq->cmd)
142 return -ENOMEM; 152 return -ENOMEM;
143 } 153 }
144 154
145 if (copy_from_user(req->cmd, (void __user *)(unsigned long)hdr->request, 155 if (copy_from_user(sreq->cmd, uptr64(hdr->request), sreq->cmd_len))
146 hdr->request_len))
147 return -EFAULT; 156 return -EFAULT;
148 157 if (blk_verify_command(sreq->cmd, mode))
149 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
150 if (blk_verify_command(req->cmd, mode))
151 return -EPERM;
152 } else if (!capable(CAP_SYS_RAWIO))
153 return -EPERM; 158 return -EPERM;
154
155 /*
156 * fill in request structure
157 */
158 req->cmd_len = hdr->request_len;
159
160 rq->timeout = msecs_to_jiffies(hdr->timeout);
161 if (!rq->timeout)
162 rq->timeout = q->sg_timeout;
163 if (!rq->timeout)
164 rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
165 if (rq->timeout < BLK_MIN_SG_TIMEOUT)
166 rq->timeout = BLK_MIN_SG_TIMEOUT;
167
168 return 0; 159 return 0;
169} 160}
170 161
171/* 162static int bsg_scsi_complete_rq(struct request *rq, struct sg_io_v4 *hdr)
172 * Check if sg_io_v4 from user is allowed and valid
173 */
174static int
175bsg_validate_sgv4_hdr(struct sg_io_v4 *hdr, int *op)
176{ 163{
164 struct scsi_request *sreq = scsi_req(rq);
177 int ret = 0; 165 int ret = 0;
178 166
179 if (hdr->guard != 'Q') 167 /*
180 return -EINVAL; 168 * fill in all the output members
169 */
170 hdr->device_status = sreq->result & 0xff;
171 hdr->transport_status = host_byte(sreq->result);
172 hdr->driver_status = driver_byte(sreq->result);
173 hdr->info = 0;
174 if (hdr->device_status || hdr->transport_status || hdr->driver_status)
175 hdr->info |= SG_INFO_CHECK;
176 hdr->response_len = 0;
181 177
182 switch (hdr->protocol) { 178 if (sreq->sense_len && hdr->response) {
183 case BSG_PROTOCOL_SCSI: 179 int len = min_t(unsigned int, hdr->max_response_len,
184 switch (hdr->subprotocol) { 180 sreq->sense_len);
185 case BSG_SUB_PROTOCOL_SCSI_CMD: 181
186 case BSG_SUB_PROTOCOL_SCSI_TRANSPORT: 182 if (copy_to_user(uptr64(hdr->response), sreq->sense, len))
187 break; 183 ret = -EFAULT;
188 default: 184 else
189 ret = -EINVAL; 185 hdr->response_len = len;
190 } 186 }
191 break; 187
192 default: 188 if (rq->next_rq) {
193 ret = -EINVAL; 189 hdr->dout_resid = sreq->resid_len;
190 hdr->din_resid = scsi_req(rq->next_rq)->resid_len;
191 } else if (rq_data_dir(rq) == READ) {
192 hdr->din_resid = sreq->resid_len;
193 } else {
194 hdr->dout_resid = sreq->resid_len;
194 } 195 }
195 196
196 *op = hdr->dout_xfer_len ? REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN;
197 return ret; 197 return ret;
198} 198}
199 199
200/* 200static void bsg_scsi_free_rq(struct request *rq)
201 * map sg_io_v4 to a request. 201{
202 */ 202 scsi_req_free_cmd(scsi_req(rq));
203}
204
205static const struct bsg_ops bsg_scsi_ops = {
206 .check_proto = bsg_scsi_check_proto,
207 .fill_hdr = bsg_scsi_fill_hdr,
208 .complete_rq = bsg_scsi_complete_rq,
209 .free_rq = bsg_scsi_free_rq,
210};
211
203static struct request * 212static struct request *
204bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t mode) 213bsg_map_hdr(struct request_queue *q, struct sg_io_v4 *hdr, fmode_t mode)
205{ 214{
206 struct request_queue *q = bd->queue;
207 struct request *rq, *next_rq = NULL; 215 struct request *rq, *next_rq = NULL;
208 int ret; 216 int ret;
209 unsigned int op, dxfer_len;
210 void __user *dxferp = NULL;
211 struct bsg_class_device *bcd = &q->bsg_dev;
212 217
213 /* if the LLD has been removed then the bsg_unregister_queue will 218 if (!q->bsg_dev.class_dev)
214 * eventually be called and the class_dev was freed, so we can no
215 * longer use this request_queue. Return no such address.
216 */
217 if (!bcd->class_dev)
218 return ERR_PTR(-ENXIO); 219 return ERR_PTR(-ENXIO);
219 220
220 bsg_dbg(bd, "map hdr %llx/%u %llx/%u\n", 221 if (hdr->guard != 'Q')
221 (unsigned long long) hdr->dout_xferp, 222 return ERR_PTR(-EINVAL);
222 hdr->dout_xfer_len, (unsigned long long) hdr->din_xferp,
223 hdr->din_xfer_len);
224 223
225 ret = bsg_validate_sgv4_hdr(hdr, &op); 224 ret = q->bsg_dev.ops->check_proto(hdr);
226 if (ret) 225 if (ret)
227 return ERR_PTR(ret); 226 return ERR_PTR(ret);
228 227
229 /* 228 rq = blk_get_request(q, hdr->dout_xfer_len ?
230 * map scatter-gather elements separately and string them to request 229 REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN,
231 */ 230 GFP_KERNEL);
232 rq = blk_get_request(q, op, GFP_KERNEL);
233 if (IS_ERR(rq)) 231 if (IS_ERR(rq))
234 return rq; 232 return rq;
235 233
236 ret = blk_fill_sgv4_hdr_rq(q, rq, hdr, bd, mode); 234 ret = q->bsg_dev.ops->fill_hdr(rq, hdr, mode);
237 if (ret) 235 if (ret)
238 goto out; 236 goto out;
239 237
240 if (op == REQ_OP_SCSI_OUT && hdr->din_xfer_len) { 238 rq->timeout = msecs_to_jiffies(hdr->timeout);
239 if (!rq->timeout)
240 rq->timeout = q->sg_timeout;
241 if (!rq->timeout)
242 rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
243 if (rq->timeout < BLK_MIN_SG_TIMEOUT)
244 rq->timeout = BLK_MIN_SG_TIMEOUT;
245
246 if (hdr->dout_xfer_len && hdr->din_xfer_len) {
241 if (!test_bit(QUEUE_FLAG_BIDI, &q->queue_flags)) { 247 if (!test_bit(QUEUE_FLAG_BIDI, &q->queue_flags)) {
242 ret = -EOPNOTSUPP; 248 ret = -EOPNOTSUPP;
243 goto out; 249 goto out;
@@ -246,42 +252,39 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t mode)
246 next_rq = blk_get_request(q, REQ_OP_SCSI_IN, GFP_KERNEL); 252 next_rq = blk_get_request(q, REQ_OP_SCSI_IN, GFP_KERNEL);
247 if (IS_ERR(next_rq)) { 253 if (IS_ERR(next_rq)) {
248 ret = PTR_ERR(next_rq); 254 ret = PTR_ERR(next_rq);
249 next_rq = NULL;
250 goto out; 255 goto out;
251 } 256 }
252 rq->next_rq = next_rq;
253 257
254 dxferp = (void __user *)(unsigned long)hdr->din_xferp; 258 rq->next_rq = next_rq;
255 ret = blk_rq_map_user(q, next_rq, NULL, dxferp, 259 ret = blk_rq_map_user(q, next_rq, NULL, uptr64(hdr->din_xferp),
256 hdr->din_xfer_len, GFP_KERNEL); 260 hdr->din_xfer_len, GFP_KERNEL);
257 if (ret) 261 if (ret)
258 goto out; 262 goto out_free_nextrq;
259 } 263 }
260 264
261 if (hdr->dout_xfer_len) { 265 if (hdr->dout_xfer_len) {
262 dxfer_len = hdr->dout_xfer_len; 266 ret = blk_rq_map_user(q, rq, NULL, uptr64(hdr->dout_xferp),
263 dxferp = (void __user *)(unsigned long)hdr->dout_xferp; 267 hdr->dout_xfer_len, GFP_KERNEL);
264 } else if (hdr->din_xfer_len) { 268 } else if (hdr->din_xfer_len) {
265 dxfer_len = hdr->din_xfer_len; 269 ret = blk_rq_map_user(q, rq, NULL, uptr64(hdr->din_xferp),
266 dxferp = (void __user *)(unsigned long)hdr->din_xferp; 270 hdr->din_xfer_len, GFP_KERNEL);
267 } else 271 } else {
268 dxfer_len = 0; 272 ret = blk_rq_map_user(q, rq, NULL, NULL, 0, GFP_KERNEL);
269
270 if (dxfer_len) {
271 ret = blk_rq_map_user(q, rq, NULL, dxferp, dxfer_len,
272 GFP_KERNEL);
273 if (ret)
274 goto out;
275 } 273 }
276 274
275 if (ret)
276 goto out_unmap_nextrq;
277 return rq; 277 return rq;
278
279out_unmap_nextrq:
280 if (rq->next_rq)
281 blk_rq_unmap_user(rq->next_rq->bio);
282out_free_nextrq:
283 if (rq->next_rq)
284 blk_put_request(rq->next_rq);
278out: 285out:
279 scsi_req_free_cmd(scsi_req(rq)); 286 q->bsg_dev.ops->free_rq(rq);
280 blk_put_request(rq); 287 blk_put_request(rq);
281 if (next_rq) {
282 blk_rq_unmap_user(next_rq->bio);
283 blk_put_request(next_rq);
284 }
285 return ERR_PTR(ret); 288 return ERR_PTR(ret);
286} 289}
287 290
@@ -383,56 +386,18 @@ static struct bsg_command *bsg_get_done_cmd(struct bsg_device *bd)
383static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr, 386static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
384 struct bio *bio, struct bio *bidi_bio) 387 struct bio *bio, struct bio *bidi_bio)
385{ 388{
386 struct scsi_request *req = scsi_req(rq); 389 int ret;
387 int ret = 0;
388
389 pr_debug("rq %p bio %p 0x%x\n", rq, bio, req->result);
390 /*
391 * fill in all the output members
392 */
393 hdr->device_status = req->result & 0xff;
394 hdr->transport_status = host_byte(req->result);
395 hdr->driver_status = driver_byte(req->result);
396 hdr->info = 0;
397 if (hdr->device_status || hdr->transport_status || hdr->driver_status)
398 hdr->info |= SG_INFO_CHECK;
399 hdr->response_len = 0;
400
401 if (req->sense_len && hdr->response) {
402 int len = min_t(unsigned int, hdr->max_response_len,
403 req->sense_len);
404 390
405 ret = copy_to_user((void __user *)(unsigned long)hdr->response, 391 ret = rq->q->bsg_dev.ops->complete_rq(rq, hdr);
406 req->sense, len);
407 if (!ret)
408 hdr->response_len = len;
409 else
410 ret = -EFAULT;
411 }
412 392
413 if (rq->next_rq) { 393 if (rq->next_rq) {
414 hdr->dout_resid = req->resid_len;
415 hdr->din_resid = scsi_req(rq->next_rq)->resid_len;
416 blk_rq_unmap_user(bidi_bio); 394 blk_rq_unmap_user(bidi_bio);
417 blk_put_request(rq->next_rq); 395 blk_put_request(rq->next_rq);
418 } else if (rq_data_dir(rq) == READ) 396 }
419 hdr->din_resid = req->resid_len;
420 else
421 hdr->dout_resid = req->resid_len;
422
423 /*
424 * If the request generated a negative error number, return it
425 * (providing we aren't already returning an error); if it's
426 * just a protocol response (i.e. non negative), that gets
427 * processed above.
428 */
429 if (!ret && req->result < 0)
430 ret = req->result;
431 397
432 blk_rq_unmap_user(bio); 398 blk_rq_unmap_user(bio);
433 scsi_req_free_cmd(req); 399 rq->q->bsg_dev.ops->free_rq(rq);
434 blk_put_request(rq); 400 blk_put_request(rq);
435
436 return ret; 401 return ret;
437} 402}
438 403
@@ -614,7 +579,7 @@ static int __bsg_write(struct bsg_device *bd, const char __user *buf,
614 /* 579 /*
615 * get a request, fill in the blanks, and add to request queue 580 * get a request, fill in the blanks, and add to request queue
616 */ 581 */
617 rq = bsg_map_hdr(bd, &bc->hdr, mode); 582 rq = bsg_map_hdr(bd->queue, &bc->hdr, mode);
618 if (IS_ERR(rq)) { 583 if (IS_ERR(rq)) {
619 ret = PTR_ERR(rq); 584 ret = PTR_ERR(rq);
620 rq = NULL; 585 rq = NULL;
@@ -742,11 +707,6 @@ static struct bsg_device *bsg_add_device(struct inode *inode,
742 struct bsg_device *bd; 707 struct bsg_device *bd;
743 unsigned char buf[32]; 708 unsigned char buf[32];
744 709
745 if (!blk_queue_scsi_passthrough(rq)) {
746 WARN_ONCE(true, "Attempt to register a non-SCSI queue\n");
747 return ERR_PTR(-EINVAL);
748 }
749
750 if (!blk_get_queue(rq)) 710 if (!blk_get_queue(rq))
751 return ERR_PTR(-ENXIO); 711 return ERR_PTR(-ENXIO);
752 712
@@ -907,7 +867,7 @@ static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
907 if (copy_from_user(&hdr, uarg, sizeof(hdr))) 867 if (copy_from_user(&hdr, uarg, sizeof(hdr)))
908 return -EFAULT; 868 return -EFAULT;
909 869
910 rq = bsg_map_hdr(bd, &hdr, file->f_mode); 870 rq = bsg_map_hdr(bd->queue, &hdr, file->f_mode);
911 if (IS_ERR(rq)) 871 if (IS_ERR(rq))
912 return PTR_ERR(rq); 872 return PTR_ERR(rq);
913 873
@@ -959,7 +919,8 @@ void bsg_unregister_queue(struct request_queue *q)
959EXPORT_SYMBOL_GPL(bsg_unregister_queue); 919EXPORT_SYMBOL_GPL(bsg_unregister_queue);
960 920
961int bsg_register_queue(struct request_queue *q, struct device *parent, 921int bsg_register_queue(struct request_queue *q, struct device *parent,
962 const char *name, void (*release)(struct device *)) 922 const char *name, const struct bsg_ops *ops,
923 void (*release)(struct device *))
963{ 924{
964 struct bsg_class_device *bcd; 925 struct bsg_class_device *bcd;
965 dev_t dev; 926 dev_t dev;
@@ -996,6 +957,7 @@ int bsg_register_queue(struct request_queue *q, struct device *parent,
996 bcd->queue = q; 957 bcd->queue = q;
997 bcd->parent = get_device(parent); 958 bcd->parent = get_device(parent);
998 bcd->release = release; 959 bcd->release = release;
960 bcd->ops = ops;
999 kref_init(&bcd->ref); 961 kref_init(&bcd->ref);
1000 dev = MKDEV(bsg_major, bcd->minor); 962 dev = MKDEV(bsg_major, bcd->minor);
1001 class_dev = device_create(bsg_class, parent, dev, NULL, "%s", devname); 963 class_dev = device_create(bsg_class, parent, dev, NULL, "%s", devname);
@@ -1023,7 +985,17 @@ unlock:
1023 mutex_unlock(&bsg_mutex); 985 mutex_unlock(&bsg_mutex);
1024 return ret; 986 return ret;
1025} 987}
1026EXPORT_SYMBOL_GPL(bsg_register_queue); 988
989int bsg_scsi_register_queue(struct request_queue *q, struct device *parent)
990{
991 if (!blk_queue_scsi_passthrough(q)) {
992 WARN_ONCE(true, "Attempt to register a non-SCSI queue\n");
993 return -EINVAL;
994 }
995
996 return bsg_register_queue(q, parent, NULL, &bsg_scsi_ops, NULL);
997}
998EXPORT_SYMBOL_GPL(bsg_scsi_register_queue);
1027 999
1028static struct cdev bsg_cdev; 1000static struct cdev bsg_cdev;
1029 1001
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 538152f3528e..37c1d63e847e 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -2140,8 +2140,6 @@ void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
2140{ 2140{
2141 struct device *dev = shost->dma_dev; 2141 struct device *dev = shost->dma_dev;
2142 2142
2143 blk_queue_flag_set(QUEUE_FLAG_SCSI_PASSTHROUGH, q);
2144
2145 /* 2143 /*
2146 * this limit is imposed by hardware restrictions 2144 * this limit is imposed by hardware restrictions
2147 */ 2145 */
@@ -2239,6 +2237,7 @@ struct request_queue *scsi_old_alloc_queue(struct scsi_device *sdev)
2239 } 2237 }
2240 2238
2241 __scsi_init_queue(shost, q); 2239 __scsi_init_queue(shost, q);
2240 blk_queue_flag_set(QUEUE_FLAG_SCSI_PASSTHROUGH, q);
2242 blk_queue_prep_rq(q, scsi_prep_fn); 2241 blk_queue_prep_rq(q, scsi_prep_fn);
2243 blk_queue_unprep_rq(q, scsi_unprep_fn); 2242 blk_queue_unprep_rq(q, scsi_unprep_fn);
2244 blk_queue_softirq_done(q, scsi_softirq_done); 2243 blk_queue_softirq_done(q, scsi_softirq_done);
@@ -2270,6 +2269,7 @@ struct request_queue *scsi_mq_alloc_queue(struct scsi_device *sdev)
2270 2269
2271 sdev->request_queue->queuedata = sdev; 2270 sdev->request_queue->queuedata = sdev;
2272 __scsi_init_queue(sdev->host, sdev->request_queue); 2271 __scsi_init_queue(sdev->host, sdev->request_queue);
2272 blk_queue_flag_set(QUEUE_FLAG_SCSI_PASSTHROUGH, sdev->request_queue);
2273 return sdev->request_queue; 2273 return sdev->request_queue;
2274} 2274}
2275 2275
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 91b90f672d23..7142c8be1099 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -1292,8 +1292,7 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
1292 transport_add_device(&sdev->sdev_gendev); 1292 transport_add_device(&sdev->sdev_gendev);
1293 sdev->is_visible = 1; 1293 sdev->is_visible = 1;
1294 1294
1295 error = bsg_register_queue(rq, &sdev->sdev_gendev, NULL, NULL); 1295 error = bsg_scsi_register_queue(rq, &sdev->sdev_gendev);
1296
1297 if (error) 1296 if (error)
1298 /* we're treating error on bsg register as non-fatal, 1297 /* we're treating error on bsg register as non-fatal,
1299 * so pretend nothing went wrong */ 1298 * so pretend nothing went wrong */
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index 7c0987616684..08acbabfae07 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -228,7 +228,6 @@ static int sas_bsg_initialize(struct Scsi_Host *shost, struct sas_rphy *rphy)
228 */ 228 */
229 blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH); 229 blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
230 blk_queue_flag_set(QUEUE_FLAG_BIDI, q); 230 blk_queue_flag_set(QUEUE_FLAG_BIDI, q);
231 blk_queue_flag_set(QUEUE_FLAG_SCSI_PASSTHROUGH, q);
232 return 0; 231 return 0;
233} 232}
234 233
diff --git a/include/linux/bsg-lib.h b/include/linux/bsg-lib.h
index 08762d297cbd..28a7ccc55c89 100644
--- a/include/linux/bsg-lib.h
+++ b/include/linux/bsg-lib.h
@@ -38,7 +38,6 @@ struct bsg_buffer {
38}; 38};
39 39
40struct bsg_job { 40struct bsg_job {
41 struct scsi_request sreq;
42 struct device *dev; 41 struct device *dev;
43 42
44 struct kref kref; 43 struct kref kref;
@@ -64,6 +63,9 @@ struct bsg_job {
64 struct bsg_buffer request_payload; 63 struct bsg_buffer request_payload;
65 struct bsg_buffer reply_payload; 64 struct bsg_buffer reply_payload;
66 65
66 int result;
67 unsigned int reply_payload_rcv_len;
68
67 void *dd_data; /* Used for driver-specific storage */ 69 void *dd_data; /* Used for driver-specific storage */
68}; 70};
69 71
diff --git a/include/linux/bsg.h b/include/linux/bsg.h
index 2a202e41a3af..0c7dd9ceb139 100644
--- a/include/linux/bsg.h
+++ b/include/linux/bsg.h
@@ -1,34 +1,43 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef BSG_H 2#ifndef _LINUX_BSG_H
3#define BSG_H 3#define _LINUX_BSG_H
4 4
5#include <uapi/linux/bsg.h> 5#include <uapi/linux/bsg.h>
6 6
7struct request;
8
9#ifdef CONFIG_BLK_DEV_BSG
10struct bsg_ops {
11 int (*check_proto)(struct sg_io_v4 *hdr);
12 int (*fill_hdr)(struct request *rq, struct sg_io_v4 *hdr,
13 fmode_t mode);
14 int (*complete_rq)(struct request *rq, struct sg_io_v4 *hdr);
15 void (*free_rq)(struct request *rq);
16};
7 17
8#if defined(CONFIG_BLK_DEV_BSG)
9struct bsg_class_device { 18struct bsg_class_device {
10 struct device *class_dev; 19 struct device *class_dev;
11 struct device *parent; 20 struct device *parent;
12 int minor; 21 int minor;
13 struct request_queue *queue; 22 struct request_queue *queue;
14 struct kref ref; 23 struct kref ref;
24 const struct bsg_ops *ops;
15 void (*release)(struct device *); 25 void (*release)(struct device *);
16}; 26};
17 27
18extern int bsg_register_queue(struct request_queue *q, 28int bsg_register_queue(struct request_queue *q, struct device *parent,
19 struct device *parent, const char *name, 29 const char *name, const struct bsg_ops *ops,
20 void (*release)(struct device *)); 30 void (*release)(struct device *));
21extern void bsg_unregister_queue(struct request_queue *); 31int bsg_scsi_register_queue(struct request_queue *q, struct device *parent);
32void bsg_unregister_queue(struct request_queue *q);
22#else 33#else
23static inline int bsg_register_queue(struct request_queue *q, 34static inline int bsg_scsi_register_queue(struct request_queue *q,
24 struct device *parent, const char *name, 35 struct device *parent)
25 void (*release)(struct device *))
26{ 36{
27 return 0; 37 return 0;
28} 38}
29static inline void bsg_unregister_queue(struct request_queue *q) 39static inline void bsg_unregister_queue(struct request_queue *q)
30{ 40{
31} 41}
32#endif 42#endif /* CONFIG_BLK_DEV_BSG */
33 43#endif /* _LINUX_BSG_H */
34#endif