aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/isci/request.h
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2011-06-27 17:57:03 -0400
committerDan Williams <dan.j.williams@intel.com>2011-07-03 07:04:52 -0400
commit5076a1a97e2fa61c847a5fdd4b1991faf7716da6 (patch)
tree251d207e75439da25d4d3a0353e0b853c8e79f2b /drivers/scsi/isci/request.h
parentba7cb22342a66505a831bb7e4541fef90e0193c9 (diff)
isci: unify isci_request and scic_sds_request
They are one in the same object so remove the distinction. The near duplicate fields (owning_controller, and isci_host) will be cleaned up after the scic_sds_contoller isci_host unification. Reported-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers/scsi/isci/request.h')
-rw-r--r--drivers/scsi/isci/request.h184
1 files changed, 61 insertions, 123 deletions
diff --git a/drivers/scsi/isci/request.h b/drivers/scsi/isci/request.h
index 7fd98531d1f2..68d8a27357eb 100644
--- a/drivers/scsi/isci/request.h
+++ b/drivers/scsi/isci/request.h
@@ -93,7 +93,7 @@ enum sci_request_protocol {
93 * isci_stp_request - extra request infrastructure to handle pio/atapi protocol 93 * isci_stp_request - extra request infrastructure to handle pio/atapi protocol
94 * @pio_len - number of bytes requested at PIO setup 94 * @pio_len - number of bytes requested at PIO setup
95 * @status - pio setup ending status value to tell us if we need 95 * @status - pio setup ending status value to tell us if we need
96 * to wait for another fis or if the transfer is complete. Upon 96 * to wait for another fis or if the transfer is complete. Upon
97 * receipt of a d2h fis this will be the status field of that fis. 97 * receipt of a d2h fis this will be the status field of that fis.
98 * @sgl - track pio transfer progress as we iterate through the sgl 98 * @sgl - track pio transfer progress as we iterate through the sgl
99 * @device_cdb_len - atapi device advertises it's transfer constraints at setup 99 * @device_cdb_len - atapi device advertises it's transfer constraints at setup
@@ -110,69 +110,55 @@ struct isci_stp_request {
110 u32 device_cdb_len; 110 u32 device_cdb_len;
111}; 111};
112 112
113struct scic_sds_request { 113struct isci_request {
114 /* 114 enum isci_request_status status;
115 * This field contains the information for the base request state 115 #define IREQ_COMPLETE_IN_TARGET 0
116 * machine. 116 #define IREQ_TERMINATED 1
117 #define IREQ_TMF 2
118 #define IREQ_ACTIVE 3
119 unsigned long flags;
120 /* XXX kill ttype and ttype_ptr, allocate full sas_task */
121 enum task_type ttype;
122 union ttype_ptr_union {
123 struct sas_task *io_task_ptr; /* When ttype==io_task */
124 struct isci_tmf *tmf_task_ptr; /* When ttype==tmf_task */
125 } ttype_ptr;
126 struct isci_host *isci_host;
127 /* For use in the requests_to_{complete|abort} lists: */
128 struct list_head completed_node;
129 /* For use in the reqs_in_process list: */
130 struct list_head dev_node;
131 spinlock_t state_lock;
132 dma_addr_t request_daddr;
133 dma_addr_t zero_scatter_daddr;
134 unsigned int num_sg_entries;
135 /* Note: "io_request_completion" is completed in two different ways
136 * depending on whether this is a TMF or regular request.
137 * - TMF requests are completed in the thread that started them;
138 * - regular requests are completed in the request completion callback
139 * function.
140 * This difference in operation allows the aborter of a TMF request
141 * to be sure that once the TMF request completes, the I/O that the
142 * TMF was aborting is guaranteed to have completed.
143 *
144 * XXX kill io_request_completion
117 */ 145 */
146 struct completion *io_request_completion;
118 struct sci_base_state_machine sm; 147 struct sci_base_state_machine sm;
119
120 /*
121 * This field simply points to the controller to which this IO request
122 * is associated.
123 */
124 struct scic_sds_controller *owning_controller; 148 struct scic_sds_controller *owning_controller;
125
126 /*
127 * This field simply points to the remote device to which this IO
128 * request is associated.
129 */
130 struct scic_sds_remote_device *target_device; 149 struct scic_sds_remote_device *target_device;
131
132 /*
133 * This field indicates the IO tag for this request. The IO tag is
134 * comprised of the task_index and a sequence count. The sequence count
135 * is utilized to help identify tasks from one life to another.
136 */
137 u16 io_tag; 150 u16 io_tag;
138
139 /*
140 * This field specifies the protocol being utilized for this
141 * IO request.
142 */
143 enum sci_request_protocol protocol; 151 enum sci_request_protocol protocol;
144 152 u32 scu_status; /* hardware result */
145 /* 153 u32 sci_status; /* upper layer disposition */
146 * This field indicates the completion status taken from the SCUs
147 * completion code. It indicates the completion result for the SCU
148 * hardware.
149 */
150 u32 scu_status;
151
152 /*
153 * This field indicates the completion status returned to the SCI user.
154 * It indicates the users view of the io request completion.
155 */
156 u32 sci_status;
157
158 /*
159 * This field contains the value to be utilized when posting
160 * (e.g. Post_TC, * Post_TC_Abort) this request to the silicon.
161 */
162 u32 post_context; 154 u32 post_context;
163
164 struct scu_task_context *tc; 155 struct scu_task_context *tc;
165
166 /* could be larger with sg chaining */ 156 /* could be larger with sg chaining */
167 #define SCU_SGL_SIZE ((SCI_MAX_SCATTER_GATHER_ELEMENTS + 1) / 2) 157 #define SCU_SGL_SIZE ((SCI_MAX_SCATTER_GATHER_ELEMENTS + 1) / 2)
168 struct scu_sgl_element_pair sg_table[SCU_SGL_SIZE] __attribute__ ((aligned(32))); 158 struct scu_sgl_element_pair sg_table[SCU_SGL_SIZE] __attribute__ ((aligned(32)));
169 159 /* This field is a pointer to the stored rx frame data. It is used in
170 /*
171 * This field is a pointer to the stored rx frame data. It is used in
172 * STP internal requests and SMP response frames. If this field is 160 * STP internal requests and SMP response frames. If this field is
173 * non-NULL the saved frame must be released on IO request completion. 161 * non-NULL the saved frame must be released on IO request completion.
174 *
175 * @todo In the future do we want to keep a list of RX frame buffers?
176 */ 162 */
177 u32 saved_rx_frame_index; 163 u32 saved_rx_frame_index;
178 164
@@ -187,11 +173,9 @@ struct scic_sds_request {
187 u8 rsp_buf[SSP_RESP_IU_MAX_SIZE]; 173 u8 rsp_buf[SSP_RESP_IU_MAX_SIZE];
188 }; 174 };
189 } ssp; 175 } ssp;
190
191 struct { 176 struct {
192 struct smp_resp rsp; 177 struct smp_resp rsp;
193 } smp; 178 } smp;
194
195 struct { 179 struct {
196 struct isci_stp_request req; 180 struct isci_stp_request req;
197 struct host_to_dev_fis cmd; 181 struct host_to_dev_fis cmd;
@@ -200,56 +184,11 @@ struct scic_sds_request {
200 }; 184 };
201}; 185};
202 186
203static inline struct scic_sds_request *to_sci_req(struct isci_stp_request *stp_req) 187static inline struct isci_request *to_ireq(struct isci_stp_request *stp_req)
204{
205 struct scic_sds_request *sci_req;
206
207 sci_req = container_of(stp_req, typeof(*sci_req), stp.req);
208 return sci_req;
209}
210
211struct isci_request {
212 enum isci_request_status status;
213 enum task_type ttype;
214 unsigned short io_tag;
215 #define IREQ_COMPLETE_IN_TARGET 0
216 #define IREQ_TERMINATED 1
217 #define IREQ_TMF 2
218 #define IREQ_ACTIVE 3
219 unsigned long flags;
220
221 union ttype_ptr_union {
222 struct sas_task *io_task_ptr; /* When ttype==io_task */
223 struct isci_tmf *tmf_task_ptr; /* When ttype==tmf_task */
224 } ttype_ptr;
225 struct isci_host *isci_host;
226 /* For use in the requests_to_{complete|abort} lists: */
227 struct list_head completed_node;
228 /* For use in the reqs_in_process list: */
229 struct list_head dev_node;
230 spinlock_t state_lock;
231 dma_addr_t request_daddr;
232 dma_addr_t zero_scatter_daddr;
233
234 unsigned int num_sg_entries; /* returned by pci_alloc_sg */
235
236 /** Note: "io_request_completion" is completed in two different ways
237 * depending on whether this is a TMF or regular request.
238 * - TMF requests are completed in the thread that started them;
239 * - regular requests are completed in the request completion callback
240 * function.
241 * This difference in operation allows the aborter of a TMF request
242 * to be sure that once the TMF request completes, the I/O that the
243 * TMF was aborting is guaranteed to have completed.
244 */
245 struct completion *io_request_completion;
246 struct scic_sds_request sci;
247};
248
249static inline struct isci_request *sci_req_to_ireq(struct scic_sds_request *sci_req)
250{ 188{
251 struct isci_request *ireq = container_of(sci_req, typeof(*ireq), sci); 189 struct isci_request *ireq;
252 190
191 ireq = container_of(stp_req, typeof(*ireq), stp.req);
253 return ireq; 192 return ireq;
254} 193}
255 194
@@ -366,32 +305,32 @@ enum sci_base_request_states {
366 * 305 *
367 * This macro will return the controller for this io request object 306 * This macro will return the controller for this io request object
368 */ 307 */
369#define scic_sds_request_get_controller(sci_req) \ 308#define scic_sds_request_get_controller(ireq) \
370 ((sci_req)->owning_controller) 309 ((ireq)->owning_controller)
371 310
372/** 311/**
373 * scic_sds_request_get_device() - 312 * scic_sds_request_get_device() -
374 * 313 *
375 * This macro will return the device for this io request object 314 * This macro will return the device for this io request object
376 */ 315 */
377#define scic_sds_request_get_device(sci_req) \ 316#define scic_sds_request_get_device(ireq) \
378 ((sci_req)->target_device) 317 ((ireq)->target_device)
379 318
380/** 319/**
381 * scic_sds_request_get_port() - 320 * scic_sds_request_get_port() -
382 * 321 *
383 * This macro will return the port for this io request object 322 * This macro will return the port for this io request object
384 */ 323 */
385#define scic_sds_request_get_port(sci_req) \ 324#define scic_sds_request_get_port(ireq) \
386 scic_sds_remote_device_get_port(scic_sds_request_get_device(sci_req)) 325 scic_sds_remote_device_get_port(scic_sds_request_get_device(ireq))
387 326
388/** 327/**
389 * scic_sds_request_get_post_context() - 328 * scic_sds_request_get_post_context() -
390 * 329 *
391 * This macro returns the constructed post context result for the io request. 330 * This macro returns the constructed post context result for the io request.
392 */ 331 */
393#define scic_sds_request_get_post_context(sci_req) \ 332#define scic_sds_request_get_post_context(ireq) \
394 ((sci_req)->post_context) 333 ((ireq)->post_context)
395 334
396/** 335/**
397 * scic_sds_request_get_task_context() - 336 * scic_sds_request_get_task_context() -
@@ -413,26 +352,25 @@ enum sci_base_request_states {
413 (request)->sci_status = (sci_status_code); \ 352 (request)->sci_status = (sci_status_code); \
414 } 353 }
415 354
416enum sci_status scic_sds_request_start(struct scic_sds_request *sci_req); 355enum sci_status scic_sds_request_start(struct isci_request *ireq);
417enum sci_status scic_sds_io_request_terminate(struct scic_sds_request *sci_req); 356enum sci_status scic_sds_io_request_terminate(struct isci_request *ireq);
418enum sci_status 357enum sci_status
419scic_sds_io_request_event_handler(struct scic_sds_request *sci_req, 358scic_sds_io_request_event_handler(struct isci_request *ireq,
420 u32 event_code); 359 u32 event_code);
421enum sci_status 360enum sci_status
422scic_sds_io_request_frame_handler(struct scic_sds_request *sci_req, 361scic_sds_io_request_frame_handler(struct isci_request *ireq,
423 u32 frame_index); 362 u32 frame_index);
424enum sci_status 363enum sci_status
425scic_sds_task_request_terminate(struct scic_sds_request *sci_req); 364scic_sds_task_request_terminate(struct isci_request *ireq);
426extern enum sci_status 365extern enum sci_status
427scic_sds_request_complete(struct scic_sds_request *sci_req); 366scic_sds_request_complete(struct isci_request *ireq);
428extern enum sci_status 367extern enum sci_status
429scic_sds_io_request_tc_completion(struct scic_sds_request *sci_req, u32 code); 368scic_sds_io_request_tc_completion(struct isci_request *ireq, u32 code);
430 369
431/* XXX open code in caller */ 370/* XXX open code in caller */
432static inline dma_addr_t 371static inline dma_addr_t
433scic_io_request_get_dma_addr(struct scic_sds_request *sci_req, void *virt_addr) 372scic_io_request_get_dma_addr(struct isci_request *ireq, void *virt_addr)
434{ 373{
435 struct isci_request *ireq = sci_req_to_ireq(sci_req);
436 374
437 char *requested_addr = (char *)virt_addr; 375 char *requested_addr = (char *)virt_addr;
438 char *base_addr = (char *)ireq; 376 char *base_addr = (char *)ireq;
@@ -565,14 +503,14 @@ enum sci_status
565scic_task_request_construct(struct scic_sds_controller *scic, 503scic_task_request_construct(struct scic_sds_controller *scic,
566 struct scic_sds_remote_device *sci_dev, 504 struct scic_sds_remote_device *sci_dev,
567 u16 io_tag, 505 u16 io_tag,
568 struct scic_sds_request *sci_req); 506 struct isci_request *ireq);
569enum sci_status 507enum sci_status
570scic_task_request_construct_ssp(struct scic_sds_request *sci_req); 508scic_task_request_construct_ssp(struct isci_request *ireq);
571enum sci_status 509enum sci_status
572scic_task_request_construct_sata(struct scic_sds_request *sci_req); 510scic_task_request_construct_sata(struct isci_request *ireq);
573void 511void
574scic_stp_io_request_set_ncq_tag(struct scic_sds_request *sci_req, u16 ncq_tag); 512scic_stp_io_request_set_ncq_tag(struct isci_request *ireq, u16 ncq_tag);
575void scic_sds_smp_request_copy_response(struct scic_sds_request *sci_req); 513void scic_sds_smp_request_copy_response(struct isci_request *ireq);
576 514
577static inline int isci_task_is_ncq_recovery(struct sas_task *task) 515static inline int isci_task_is_ncq_recovery(struct sas_task *task)
578{ 516{