diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-06-18 18:44:51 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-06-18 18:44:51 -0400 |
commit | c0d1a7eb1e601f54d301ff8a11a994e164ee65ae (patch) | |
tree | 7dcb076290ee259831e9a476f3927ac7dec56b82 | |
parent | 6cc22dc08a247b7b4a173e4561e39705a557d300 (diff) | |
parent | ec530174c43798099d305fbd6511e5d7fc7616d4 (diff) |
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
Pull s390 updates from Martin Schwidefsky:
"common I/O layer
- Fix bit-fields crossing storage-unit boundaries in css_general_char
dasd driver
- Avoid a sparse warning in regard to the queue lock
- Allocate the struct dasd_ccw_req as per request data. Only for
internal I/O is the structure allocated separately
- Remove the unused function dasd_kmalloc_set_cda
- Save a few bytes in struct dasd_ccw_req by reordering fields
- Convert remaining users of dasd_kmalloc_request to
dasd_smalloc_request and remove the now unused function
vfio/ccw
- Refactor and improve pfn_array_alloc_pin/pfn_array_pin
- Add a new tracepoint for failed vfio/ccw requests
- Add a CCW translation improvement to accept more requests as valid
- Bug fixes"
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux:
s390/dasd: only use preallocated requests
s390/dasd: reshuffle struct dasd_ccw_req
s390/dasd: remove dasd_kmalloc_set_cda
s390/dasd: move dasd_ccw_req to per request data
s390/dasd: simplify locking in process_final_queue
s390/cio: sanitize css_general_characteristics definition
vfio: ccw: add tracepoints for interesting error paths
vfio: ccw: set ccw->cda to NULL defensively
vfio: ccw: refactor and improve pfn_array_alloc_pin()
vfio: ccw: shorten kernel doc description for pfn_array_pin()
vfio: ccw: push down unsupported IDA check
vfio: ccw: fix error return in vfio_ccw_sch_event
s390/archrandom: Rework arch random implementation.
s390/net: add pnetid support
-rw-r--r-- | arch/s390/include/asm/css_chars.h | 62 | ||||
-rw-r--r-- | drivers/s390/block/dasd.c | 171 | ||||
-rw-r--r-- | drivers/s390/block/dasd_alias.c | 6 | ||||
-rw-r--r-- | drivers/s390/block/dasd_diag.c | 3 | ||||
-rw-r--r-- | drivers/s390/block/dasd_eckd.c | 46 | ||||
-rw-r--r-- | drivers/s390/block/dasd_eer.c | 10 | ||||
-rw-r--r-- | drivers/s390/block/dasd_fba.c | 6 | ||||
-rw-r--r-- | drivers/s390/block/dasd_int.h | 26 | ||||
-rw-r--r-- | drivers/s390/cio/Makefile | 1 | ||||
-rw-r--r-- | drivers/s390/cio/vfio_ccw_cp.c | 140 | ||||
-rw-r--r-- | drivers/s390/cio/vfio_ccw_drv.c | 5 | ||||
-rw-r--r-- | drivers/s390/cio/vfio_ccw_fsm.c | 17 | ||||
-rw-r--r-- | drivers/s390/cio/vfio_ccw_trace.h | 54 |
13 files changed, 282 insertions, 265 deletions
diff --git a/arch/s390/include/asm/css_chars.h b/arch/s390/include/asm/css_chars.h index 0563fd3e8458..480bb02ccacd 100644 --- a/arch/s390/include/asm/css_chars.h +++ b/arch/s390/include/asm/css_chars.h | |||
@@ -6,36 +6,38 @@ | |||
6 | 6 | ||
7 | struct css_general_char { | 7 | struct css_general_char { |
8 | u64 : 12; | 8 | u64 : 12; |
9 | u32 dynio : 1; /* bit 12 */ | 9 | u64 dynio : 1; /* bit 12 */ |
10 | u32 : 4; | 10 | u64 : 4; |
11 | u32 eadm : 1; /* bit 17 */ | 11 | u64 eadm : 1; /* bit 17 */ |
12 | u32 : 23; | 12 | u64 : 23; |
13 | u32 aif : 1; /* bit 41 */ | 13 | u64 aif : 1; /* bit 41 */ |
14 | u32 : 3; | 14 | u64 : 3; |
15 | u32 mcss : 1; /* bit 45 */ | 15 | u64 mcss : 1; /* bit 45 */ |
16 | u32 fcs : 1; /* bit 46 */ | 16 | u64 fcs : 1; /* bit 46 */ |
17 | u32 : 1; | 17 | u64 : 1; |
18 | u32 ext_mb : 1; /* bit 48 */ | 18 | u64 ext_mb : 1; /* bit 48 */ |
19 | u32 : 7; | 19 | u64 : 7; |
20 | u32 aif_tdd : 1; /* bit 56 */ | 20 | u64 aif_tdd : 1; /* bit 56 */ |
21 | u32 : 1; | 21 | u64 : 1; |
22 | u32 qebsm : 1; /* bit 58 */ | 22 | u64 qebsm : 1; /* bit 58 */ |
23 | u32 : 2; | 23 | u64 : 2; |
24 | u32 aiv : 1; /* bit 61 */ | 24 | u64 aiv : 1; /* bit 61 */ |
25 | u32 : 5; | 25 | u64 : 2; |
26 | u32 aif_osa : 1; /* bit 67 */ | 26 | |
27 | u32 : 12; | 27 | u64 : 3; |
28 | u32 eadm_rf : 1; /* bit 80 */ | 28 | u64 aif_osa : 1; /* bit 67 */ |
29 | u32 : 1; | 29 | u64 : 12; |
30 | u32 cib : 1; /* bit 82 */ | 30 | u64 eadm_rf : 1; /* bit 80 */ |
31 | u32 : 5; | 31 | u64 : 1; |
32 | u32 fcx : 1; /* bit 88 */ | 32 | u64 cib : 1; /* bit 82 */ |
33 | u32 : 19; | 33 | u64 : 5; |
34 | u32 alt_ssi : 1; /* bit 108 */ | 34 | u64 fcx : 1; /* bit 88 */ |
35 | u32 : 1; | 35 | u64 : 19; |
36 | u32 narf : 1; /* bit 110 */ | 36 | u64 alt_ssi : 1; /* bit 108 */ |
37 | u32 : 12; | 37 | u64 : 1; |
38 | u32 util_str : 1;/* bit 123 */ | 38 | u64 narf : 1; /* bit 110 */ |
39 | u64 : 12; | ||
40 | u64 util_str : 1;/* bit 123 */ | ||
39 | } __packed; | 41 | } __packed; |
40 | 42 | ||
41 | extern struct css_general_char css_general_characteristics; | 43 | extern struct css_general_char css_general_characteristics; |
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 73cce3ecb97f..d3a38c421503 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c | |||
@@ -1222,80 +1222,37 @@ static void dasd_hosts_init(struct dentry *base_dentry, | |||
1222 | device->hosts_dentry = pde; | 1222 | device->hosts_dentry = pde; |
1223 | } | 1223 | } |
1224 | 1224 | ||
1225 | /* | 1225 | struct dasd_ccw_req *dasd_smalloc_request(int magic, int cplength, int datasize, |
1226 | * Allocate memory for a channel program with 'cplength' channel | 1226 | struct dasd_device *device, |
1227 | * command words and 'datasize' additional space. There are two | 1227 | struct dasd_ccw_req *cqr) |
1228 | * variantes: 1) dasd_kmalloc_request uses kmalloc to get the needed | ||
1229 | * memory and 2) dasd_smalloc_request uses the static ccw memory | ||
1230 | * that gets allocated for each device. | ||
1231 | */ | ||
1232 | struct dasd_ccw_req *dasd_kmalloc_request(int magic, int cplength, | ||
1233 | int datasize, | ||
1234 | struct dasd_device *device) | ||
1235 | { | ||
1236 | struct dasd_ccw_req *cqr; | ||
1237 | |||
1238 | /* Sanity checks */ | ||
1239 | BUG_ON(datasize > PAGE_SIZE || | ||
1240 | (cplength*sizeof(struct ccw1)) > PAGE_SIZE); | ||
1241 | |||
1242 | cqr = kzalloc(sizeof(struct dasd_ccw_req), GFP_ATOMIC); | ||
1243 | if (cqr == NULL) | ||
1244 | return ERR_PTR(-ENOMEM); | ||
1245 | cqr->cpaddr = NULL; | ||
1246 | if (cplength > 0) { | ||
1247 | cqr->cpaddr = kcalloc(cplength, sizeof(struct ccw1), | ||
1248 | GFP_ATOMIC | GFP_DMA); | ||
1249 | if (cqr->cpaddr == NULL) { | ||
1250 | kfree(cqr); | ||
1251 | return ERR_PTR(-ENOMEM); | ||
1252 | } | ||
1253 | } | ||
1254 | cqr->data = NULL; | ||
1255 | if (datasize > 0) { | ||
1256 | cqr->data = kzalloc(datasize, GFP_ATOMIC | GFP_DMA); | ||
1257 | if (cqr->data == NULL) { | ||
1258 | kfree(cqr->cpaddr); | ||
1259 | kfree(cqr); | ||
1260 | return ERR_PTR(-ENOMEM); | ||
1261 | } | ||
1262 | } | ||
1263 | cqr->magic = magic; | ||
1264 | set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); | ||
1265 | dasd_get_device(device); | ||
1266 | return cqr; | ||
1267 | } | ||
1268 | EXPORT_SYMBOL(dasd_kmalloc_request); | ||
1269 | |||
1270 | struct dasd_ccw_req *dasd_smalloc_request(int magic, int cplength, | ||
1271 | int datasize, | ||
1272 | struct dasd_device *device) | ||
1273 | { | 1228 | { |
1274 | unsigned long flags; | 1229 | unsigned long flags; |
1275 | struct dasd_ccw_req *cqr; | 1230 | char *data, *chunk; |
1276 | char *data; | 1231 | int size = 0; |
1277 | int size; | ||
1278 | 1232 | ||
1279 | size = (sizeof(struct dasd_ccw_req) + 7L) & -8L; | ||
1280 | if (cplength > 0) | 1233 | if (cplength > 0) |
1281 | size += cplength * sizeof(struct ccw1); | 1234 | size += cplength * sizeof(struct ccw1); |
1282 | if (datasize > 0) | 1235 | if (datasize > 0) |
1283 | size += datasize; | 1236 | size += datasize; |
1237 | if (!cqr) | ||
1238 | size += (sizeof(*cqr) + 7L) & -8L; | ||
1239 | |||
1284 | spin_lock_irqsave(&device->mem_lock, flags); | 1240 | spin_lock_irqsave(&device->mem_lock, flags); |
1285 | cqr = (struct dasd_ccw_req *) | 1241 | data = chunk = dasd_alloc_chunk(&device->ccw_chunks, size); |
1286 | dasd_alloc_chunk(&device->ccw_chunks, size); | ||
1287 | spin_unlock_irqrestore(&device->mem_lock, flags); | 1242 | spin_unlock_irqrestore(&device->mem_lock, flags); |
1288 | if (cqr == NULL) | 1243 | if (!chunk) |
1289 | return ERR_PTR(-ENOMEM); | 1244 | return ERR_PTR(-ENOMEM); |
1290 | memset(cqr, 0, sizeof(struct dasd_ccw_req)); | 1245 | if (!cqr) { |
1291 | data = (char *) cqr + ((sizeof(struct dasd_ccw_req) + 7L) & -8L); | 1246 | cqr = (void *) data; |
1292 | cqr->cpaddr = NULL; | 1247 | data += (sizeof(*cqr) + 7L) & -8L; |
1248 | } | ||
1249 | memset(cqr, 0, sizeof(*cqr)); | ||
1250 | cqr->mem_chunk = chunk; | ||
1293 | if (cplength > 0) { | 1251 | if (cplength > 0) { |
1294 | cqr->cpaddr = (struct ccw1 *) data; | 1252 | cqr->cpaddr = data; |
1295 | data += cplength*sizeof(struct ccw1); | 1253 | data += cplength * sizeof(struct ccw1); |
1296 | memset(cqr->cpaddr, 0, cplength*sizeof(struct ccw1)); | 1254 | memset(cqr->cpaddr, 0, cplength * sizeof(struct ccw1)); |
1297 | } | 1255 | } |
1298 | cqr->data = NULL; | ||
1299 | if (datasize > 0) { | 1256 | if (datasize > 0) { |
1300 | cqr->data = data; | 1257 | cqr->data = data; |
1301 | memset(cqr->data, 0, datasize); | 1258 | memset(cqr->data, 0, datasize); |
@@ -1307,33 +1264,12 @@ struct dasd_ccw_req *dasd_smalloc_request(int magic, int cplength, | |||
1307 | } | 1264 | } |
1308 | EXPORT_SYMBOL(dasd_smalloc_request); | 1265 | EXPORT_SYMBOL(dasd_smalloc_request); |
1309 | 1266 | ||
1310 | /* | ||
1311 | * Free memory of a channel program. This function needs to free all the | ||
1312 | * idal lists that might have been created by dasd_set_cda and the | ||
1313 | * struct dasd_ccw_req itself. | ||
1314 | */ | ||
1315 | void dasd_kfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device) | ||
1316 | { | ||
1317 | struct ccw1 *ccw; | ||
1318 | |||
1319 | /* Clear any idals used for the request. */ | ||
1320 | ccw = cqr->cpaddr; | ||
1321 | do { | ||
1322 | clear_normalized_cda(ccw); | ||
1323 | } while (ccw++->flags & (CCW_FLAG_CC | CCW_FLAG_DC)); | ||
1324 | kfree(cqr->cpaddr); | ||
1325 | kfree(cqr->data); | ||
1326 | kfree(cqr); | ||
1327 | dasd_put_device(device); | ||
1328 | } | ||
1329 | EXPORT_SYMBOL(dasd_kfree_request); | ||
1330 | |||
1331 | void dasd_sfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device) | 1267 | void dasd_sfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device) |
1332 | { | 1268 | { |
1333 | unsigned long flags; | 1269 | unsigned long flags; |
1334 | 1270 | ||
1335 | spin_lock_irqsave(&device->mem_lock, flags); | 1271 | spin_lock_irqsave(&device->mem_lock, flags); |
1336 | dasd_free_chunk(&device->ccw_chunks, cqr); | 1272 | dasd_free_chunk(&device->ccw_chunks, cqr->mem_chunk); |
1337 | spin_unlock_irqrestore(&device->mem_lock, flags); | 1273 | spin_unlock_irqrestore(&device->mem_lock, flags); |
1338 | dasd_put_device(device); | 1274 | dasd_put_device(device); |
1339 | } | 1275 | } |
@@ -1885,6 +1821,33 @@ static void __dasd_device_process_ccw_queue(struct dasd_device *device, | |||
1885 | } | 1821 | } |
1886 | } | 1822 | } |
1887 | 1823 | ||
1824 | static void __dasd_process_cqr(struct dasd_device *device, | ||
1825 | struct dasd_ccw_req *cqr) | ||
1826 | { | ||
1827 | char errorstring[ERRORLENGTH]; | ||
1828 | |||
1829 | switch (cqr->status) { | ||
1830 | case DASD_CQR_SUCCESS: | ||
1831 | cqr->status = DASD_CQR_DONE; | ||
1832 | break; | ||
1833 | case DASD_CQR_ERROR: | ||
1834 | cqr->status = DASD_CQR_NEED_ERP; | ||
1835 | break; | ||
1836 | case DASD_CQR_CLEARED: | ||
1837 | cqr->status = DASD_CQR_TERMINATED; | ||
1838 | break; | ||
1839 | default: | ||
1840 | /* internal error 12 - wrong cqr status*/ | ||
1841 | snprintf(errorstring, ERRORLENGTH, "12 %p %x02", cqr, cqr->status); | ||
1842 | dev_err(&device->cdev->dev, | ||
1843 | "An error occurred in the DASD device driver, " | ||
1844 | "reason=%s\n", errorstring); | ||
1845 | BUG(); | ||
1846 | } | ||
1847 | if (cqr->callback) | ||
1848 | cqr->callback(cqr, cqr->callback_data); | ||
1849 | } | ||
1850 | |||
1888 | /* | 1851 | /* |
1889 | * the cqrs from the final queue are returned to the upper layer | 1852 | * the cqrs from the final queue are returned to the upper layer |
1890 | * by setting a dasd_block state and calling the callback function | 1853 | * by setting a dasd_block state and calling the callback function |
@@ -1895,40 +1858,18 @@ static void __dasd_device_process_final_queue(struct dasd_device *device, | |||
1895 | struct list_head *l, *n; | 1858 | struct list_head *l, *n; |
1896 | struct dasd_ccw_req *cqr; | 1859 | struct dasd_ccw_req *cqr; |
1897 | struct dasd_block *block; | 1860 | struct dasd_block *block; |
1898 | void (*callback)(struct dasd_ccw_req *, void *data); | ||
1899 | void *callback_data; | ||
1900 | char errorstring[ERRORLENGTH]; | ||
1901 | 1861 | ||
1902 | list_for_each_safe(l, n, final_queue) { | 1862 | list_for_each_safe(l, n, final_queue) { |
1903 | cqr = list_entry(l, struct dasd_ccw_req, devlist); | 1863 | cqr = list_entry(l, struct dasd_ccw_req, devlist); |
1904 | list_del_init(&cqr->devlist); | 1864 | list_del_init(&cqr->devlist); |
1905 | block = cqr->block; | 1865 | block = cqr->block; |
1906 | callback = cqr->callback; | 1866 | if (!block) { |
1907 | callback_data = cqr->callback_data; | 1867 | __dasd_process_cqr(device, cqr); |
1908 | if (block) | 1868 | } else { |
1909 | spin_lock_bh(&block->queue_lock); | 1869 | spin_lock_bh(&block->queue_lock); |
1910 | switch (cqr->status) { | 1870 | __dasd_process_cqr(device, cqr); |
1911 | case DASD_CQR_SUCCESS: | ||
1912 | cqr->status = DASD_CQR_DONE; | ||
1913 | break; | ||
1914 | case DASD_CQR_ERROR: | ||
1915 | cqr->status = DASD_CQR_NEED_ERP; | ||
1916 | break; | ||
1917 | case DASD_CQR_CLEARED: | ||
1918 | cqr->status = DASD_CQR_TERMINATED; | ||
1919 | break; | ||
1920 | default: | ||
1921 | /* internal error 12 - wrong cqr status*/ | ||
1922 | snprintf(errorstring, ERRORLENGTH, "12 %p %x02", cqr, cqr->status); | ||
1923 | dev_err(&device->cdev->dev, | ||
1924 | "An error occurred in the DASD device driver, " | ||
1925 | "reason=%s\n", errorstring); | ||
1926 | BUG(); | ||
1927 | } | ||
1928 | if (cqr->callback != NULL) | ||
1929 | (callback)(cqr, callback_data); | ||
1930 | if (block) | ||
1931 | spin_unlock_bh(&block->queue_lock); | 1871 | spin_unlock_bh(&block->queue_lock); |
1872 | } | ||
1932 | } | 1873 | } |
1933 | } | 1874 | } |
1934 | 1875 | ||
@@ -3041,7 +2982,6 @@ static blk_status_t do_dasd_request(struct blk_mq_hw_ctx *hctx, | |||
3041 | cqr->callback_data = req; | 2982 | cqr->callback_data = req; |
3042 | cqr->status = DASD_CQR_FILLED; | 2983 | cqr->status = DASD_CQR_FILLED; |
3043 | cqr->dq = dq; | 2984 | cqr->dq = dq; |
3044 | *((struct dasd_ccw_req **) blk_mq_rq_to_pdu(req)) = cqr; | ||
3045 | 2985 | ||
3046 | blk_mq_start_request(req); | 2986 | blk_mq_start_request(req); |
3047 | spin_lock(&block->queue_lock); | 2987 | spin_lock(&block->queue_lock); |
@@ -3072,7 +3012,7 @@ enum blk_eh_timer_return dasd_times_out(struct request *req, bool reserved) | |||
3072 | unsigned long flags; | 3012 | unsigned long flags; |
3073 | int rc = 0; | 3013 | int rc = 0; |
3074 | 3014 | ||
3075 | cqr = *((struct dasd_ccw_req **) blk_mq_rq_to_pdu(req)); | 3015 | cqr = blk_mq_rq_to_pdu(req); |
3076 | if (!cqr) | 3016 | if (!cqr) |
3077 | return BLK_EH_DONE; | 3017 | return BLK_EH_DONE; |
3078 | 3018 | ||
@@ -3174,7 +3114,7 @@ static int dasd_alloc_queue(struct dasd_block *block) | |||
3174 | int rc; | 3114 | int rc; |
3175 | 3115 | ||
3176 | block->tag_set.ops = &dasd_mq_ops; | 3116 | block->tag_set.ops = &dasd_mq_ops; |
3177 | block->tag_set.cmd_size = sizeof(struct dasd_ccw_req *); | 3117 | block->tag_set.cmd_size = sizeof(struct dasd_ccw_req); |
3178 | block->tag_set.nr_hw_queues = DASD_NR_HW_QUEUES; | 3118 | block->tag_set.nr_hw_queues = DASD_NR_HW_QUEUES; |
3179 | block->tag_set.queue_depth = DASD_MAX_LCU_DEV * DASD_REQ_PER_DEV; | 3119 | block->tag_set.queue_depth = DASD_MAX_LCU_DEV * DASD_REQ_PER_DEV; |
3180 | block->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; | 3120 | block->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; |
@@ -4038,7 +3978,8 @@ static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device, | |||
4038 | struct ccw1 *ccw; | 3978 | struct ccw1 *ccw; |
4039 | unsigned long *idaw; | 3979 | unsigned long *idaw; |
4040 | 3980 | ||
4041 | cqr = dasd_smalloc_request(magic, 1 /* RDC */, rdc_buffer_size, device); | 3981 | cqr = dasd_smalloc_request(magic, 1 /* RDC */, rdc_buffer_size, device, |
3982 | NULL); | ||
4042 | 3983 | ||
4043 | if (IS_ERR(cqr)) { | 3984 | if (IS_ERR(cqr)) { |
4044 | /* internal error 13 - Allocating the RDC request failed*/ | 3985 | /* internal error 13 - Allocating the RDC request failed*/ |
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c index 5e963fe0e38d..e36a114354fc 100644 --- a/drivers/s390/block/dasd_alias.c +++ b/drivers/s390/block/dasd_alias.c | |||
@@ -407,9 +407,9 @@ static int read_unit_address_configuration(struct dasd_device *device, | |||
407 | int rc; | 407 | int rc; |
408 | unsigned long flags; | 408 | unsigned long flags; |
409 | 409 | ||
410 | cqr = dasd_kmalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */, | 410 | cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */, |
411 | (sizeof(struct dasd_psf_prssd_data)), | 411 | (sizeof(struct dasd_psf_prssd_data)), |
412 | device); | 412 | device, NULL); |
413 | if (IS_ERR(cqr)) | 413 | if (IS_ERR(cqr)) |
414 | return PTR_ERR(cqr); | 414 | return PTR_ERR(cqr); |
415 | cqr->startdev = device; | 415 | cqr->startdev = device; |
@@ -457,7 +457,7 @@ static int read_unit_address_configuration(struct dasd_device *device, | |||
457 | lcu->flags |= NEED_UAC_UPDATE; | 457 | lcu->flags |= NEED_UAC_UPDATE; |
458 | spin_unlock_irqrestore(&lcu->lock, flags); | 458 | spin_unlock_irqrestore(&lcu->lock, flags); |
459 | } | 459 | } |
460 | dasd_kfree_request(cqr, cqr->memdev); | 460 | dasd_sfree_request(cqr, cqr->memdev); |
461 | return rc; | 461 | return rc; |
462 | } | 462 | } |
463 | 463 | ||
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c index 131f1989f6f3..e1fe02477ea8 100644 --- a/drivers/s390/block/dasd_diag.c +++ b/drivers/s390/block/dasd_diag.c | |||
@@ -536,7 +536,8 @@ static struct dasd_ccw_req *dasd_diag_build_cp(struct dasd_device *memdev, | |||
536 | /* Build the request */ | 536 | /* Build the request */ |
537 | datasize = sizeof(struct dasd_diag_req) + | 537 | datasize = sizeof(struct dasd_diag_req) + |
538 | count*sizeof(struct dasd_diag_bio); | 538 | count*sizeof(struct dasd_diag_bio); |
539 | cqr = dasd_smalloc_request(DASD_DIAG_MAGIC, 0, datasize, memdev); | 539 | cqr = dasd_smalloc_request(DASD_DIAG_MAGIC, 0, datasize, memdev, |
540 | blk_mq_rq_to_pdu(req)); | ||
540 | if (IS_ERR(cqr)) | 541 | if (IS_ERR(cqr)) |
541 | return cqr; | 542 | return cqr; |
542 | 543 | ||
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index be208e7adcb4..bbf95b78ef5d 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c | |||
@@ -886,7 +886,7 @@ static int dasd_eckd_read_conf_lpm(struct dasd_device *device, | |||
886 | } | 886 | } |
887 | cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* RCD */, | 887 | cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* RCD */, |
888 | 0, /* use rcd_buf as data ara */ | 888 | 0, /* use rcd_buf as data ara */ |
889 | device); | 889 | device, NULL); |
890 | if (IS_ERR(cqr)) { | 890 | if (IS_ERR(cqr)) { |
891 | DBF_DEV_EVENT(DBF_WARNING, device, "%s", | 891 | DBF_DEV_EVENT(DBF_WARNING, device, "%s", |
892 | "Could not allocate RCD request"); | 892 | "Could not allocate RCD request"); |
@@ -1442,7 +1442,7 @@ static int dasd_eckd_read_features(struct dasd_device *device) | |||
1442 | cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */, | 1442 | cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */, |
1443 | (sizeof(struct dasd_psf_prssd_data) + | 1443 | (sizeof(struct dasd_psf_prssd_data) + |
1444 | sizeof(struct dasd_rssd_features)), | 1444 | sizeof(struct dasd_rssd_features)), |
1445 | device); | 1445 | device, NULL); |
1446 | if (IS_ERR(cqr)) { | 1446 | if (IS_ERR(cqr)) { |
1447 | DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", "Could not " | 1447 | DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", "Could not " |
1448 | "allocate initialization request"); | 1448 | "allocate initialization request"); |
@@ -1504,7 +1504,7 @@ static struct dasd_ccw_req *dasd_eckd_build_psf_ssc(struct dasd_device *device, | |||
1504 | 1504 | ||
1505 | cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ , | 1505 | cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ , |
1506 | sizeof(struct dasd_psf_ssc_data), | 1506 | sizeof(struct dasd_psf_ssc_data), |
1507 | device); | 1507 | device, NULL); |
1508 | 1508 | ||
1509 | if (IS_ERR(cqr)) { | 1509 | if (IS_ERR(cqr)) { |
1510 | DBF_DEV_EVENT(DBF_WARNING, device, "%s", | 1510 | DBF_DEV_EVENT(DBF_WARNING, device, "%s", |
@@ -1815,7 +1815,8 @@ dasd_eckd_analysis_ccw(struct dasd_device *device) | |||
1815 | 1815 | ||
1816 | cplength = 8; | 1816 | cplength = 8; |
1817 | datasize = sizeof(struct DE_eckd_data) + 2*sizeof(struct LO_eckd_data); | 1817 | datasize = sizeof(struct DE_eckd_data) + 2*sizeof(struct LO_eckd_data); |
1818 | cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, device); | 1818 | cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, device, |
1819 | NULL); | ||
1819 | if (IS_ERR(cqr)) | 1820 | if (IS_ERR(cqr)) |
1820 | return cqr; | 1821 | return cqr; |
1821 | ccw = cqr->cpaddr; | 1822 | ccw = cqr->cpaddr; |
@@ -2092,7 +2093,8 @@ dasd_eckd_build_check_tcw(struct dasd_device *base, struct format_data_t *fdata, | |||
2092 | */ | 2093 | */ |
2093 | itcw_size = itcw_calc_size(0, count, 0); | 2094 | itcw_size = itcw_calc_size(0, count, 0); |
2094 | 2095 | ||
2095 | cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev); | 2096 | cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev, |
2097 | NULL); | ||
2096 | if (IS_ERR(cqr)) | 2098 | if (IS_ERR(cqr)) |
2097 | return cqr; | 2099 | return cqr; |
2098 | 2100 | ||
@@ -2186,7 +2188,7 @@ dasd_eckd_build_check(struct dasd_device *base, struct format_data_t *fdata, | |||
2186 | cplength += count; | 2188 | cplength += count; |
2187 | 2189 | ||
2188 | cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, | 2190 | cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, |
2189 | startdev); | 2191 | startdev, NULL); |
2190 | if (IS_ERR(cqr)) | 2192 | if (IS_ERR(cqr)) |
2191 | return cqr; | 2193 | return cqr; |
2192 | 2194 | ||
@@ -2332,7 +2334,7 @@ dasd_eckd_build_format(struct dasd_device *base, | |||
2332 | } | 2334 | } |
2333 | /* Allocate the format ccw request. */ | 2335 | /* Allocate the format ccw request. */ |
2334 | fcp = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, | 2336 | fcp = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, |
2335 | datasize, startdev); | 2337 | datasize, startdev, NULL); |
2336 | if (IS_ERR(fcp)) | 2338 | if (IS_ERR(fcp)) |
2337 | return fcp; | 2339 | return fcp; |
2338 | 2340 | ||
@@ -3103,7 +3105,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single( | |||
3103 | } | 3105 | } |
3104 | /* Allocate the ccw request. */ | 3106 | /* Allocate the ccw request. */ |
3105 | cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, | 3107 | cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, |
3106 | startdev); | 3108 | startdev, blk_mq_rq_to_pdu(req)); |
3107 | if (IS_ERR(cqr)) | 3109 | if (IS_ERR(cqr)) |
3108 | return cqr; | 3110 | return cqr; |
3109 | ccw = cqr->cpaddr; | 3111 | ccw = cqr->cpaddr; |
@@ -3262,7 +3264,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track( | |||
3262 | 3264 | ||
3263 | /* Allocate the ccw request. */ | 3265 | /* Allocate the ccw request. */ |
3264 | cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, | 3266 | cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, |
3265 | startdev); | 3267 | startdev, blk_mq_rq_to_pdu(req)); |
3266 | if (IS_ERR(cqr)) | 3268 | if (IS_ERR(cqr)) |
3267 | return cqr; | 3269 | return cqr; |
3268 | ccw = cqr->cpaddr; | 3270 | ccw = cqr->cpaddr; |
@@ -3595,7 +3597,8 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track( | |||
3595 | 3597 | ||
3596 | /* Allocate the ccw request. */ | 3598 | /* Allocate the ccw request. */ |
3597 | itcw_size = itcw_calc_size(0, ctidaw, 0); | 3599 | itcw_size = itcw_calc_size(0, ctidaw, 0); |
3598 | cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev); | 3600 | cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev, |
3601 | blk_mq_rq_to_pdu(req)); | ||
3599 | if (IS_ERR(cqr)) | 3602 | if (IS_ERR(cqr)) |
3600 | return cqr; | 3603 | return cqr; |
3601 | 3604 | ||
@@ -3862,7 +3865,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_raw(struct dasd_device *startdev, | |||
3862 | 3865 | ||
3863 | /* Allocate the ccw request. */ | 3866 | /* Allocate the ccw request. */ |
3864 | cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, | 3867 | cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, |
3865 | datasize, startdev); | 3868 | datasize, startdev, blk_mq_rq_to_pdu(req)); |
3866 | if (IS_ERR(cqr)) | 3869 | if (IS_ERR(cqr)) |
3867 | return cqr; | 3870 | return cqr; |
3868 | 3871 | ||
@@ -4102,7 +4105,7 @@ dasd_eckd_release(struct dasd_device *device) | |||
4102 | return -EACCES; | 4105 | return -EACCES; |
4103 | 4106 | ||
4104 | useglobal = 0; | 4107 | useglobal = 0; |
4105 | cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device); | 4108 | cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device, NULL); |
4106 | if (IS_ERR(cqr)) { | 4109 | if (IS_ERR(cqr)) { |
4107 | mutex_lock(&dasd_reserve_mutex); | 4110 | mutex_lock(&dasd_reserve_mutex); |
4108 | useglobal = 1; | 4111 | useglobal = 1; |
@@ -4157,7 +4160,7 @@ dasd_eckd_reserve(struct dasd_device *device) | |||
4157 | return -EACCES; | 4160 | return -EACCES; |
4158 | 4161 | ||
4159 | useglobal = 0; | 4162 | useglobal = 0; |
4160 | cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device); | 4163 | cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device, NULL); |
4161 | if (IS_ERR(cqr)) { | 4164 | if (IS_ERR(cqr)) { |
4162 | mutex_lock(&dasd_reserve_mutex); | 4165 | mutex_lock(&dasd_reserve_mutex); |
4163 | useglobal = 1; | 4166 | useglobal = 1; |
@@ -4211,7 +4214,7 @@ dasd_eckd_steal_lock(struct dasd_device *device) | |||
4211 | return -EACCES; | 4214 | return -EACCES; |
4212 | 4215 | ||
4213 | useglobal = 0; | 4216 | useglobal = 0; |
4214 | cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device); | 4217 | cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device, NULL); |
4215 | if (IS_ERR(cqr)) { | 4218 | if (IS_ERR(cqr)) { |
4216 | mutex_lock(&dasd_reserve_mutex); | 4219 | mutex_lock(&dasd_reserve_mutex); |
4217 | useglobal = 1; | 4220 | useglobal = 1; |
@@ -4271,7 +4274,8 @@ static int dasd_eckd_snid(struct dasd_device *device, | |||
4271 | 4274 | ||
4272 | useglobal = 0; | 4275 | useglobal = 0; |
4273 | cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, | 4276 | cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, |
4274 | sizeof(struct dasd_snid_data), device); | 4277 | sizeof(struct dasd_snid_data), device, |
4278 | NULL); | ||
4275 | if (IS_ERR(cqr)) { | 4279 | if (IS_ERR(cqr)) { |
4276 | mutex_lock(&dasd_reserve_mutex); | 4280 | mutex_lock(&dasd_reserve_mutex); |
4277 | useglobal = 1; | 4281 | useglobal = 1; |
@@ -4331,7 +4335,7 @@ dasd_eckd_performance(struct dasd_device *device, void __user *argp) | |||
4331 | cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */, | 4335 | cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */, |
4332 | (sizeof(struct dasd_psf_prssd_data) + | 4336 | (sizeof(struct dasd_psf_prssd_data) + |
4333 | sizeof(struct dasd_rssd_perf_stats_t)), | 4337 | sizeof(struct dasd_rssd_perf_stats_t)), |
4334 | device); | 4338 | device, NULL); |
4335 | if (IS_ERR(cqr)) { | 4339 | if (IS_ERR(cqr)) { |
4336 | DBF_DEV_EVENT(DBF_WARNING, device, "%s", | 4340 | DBF_DEV_EVENT(DBF_WARNING, device, "%s", |
4337 | "Could not allocate initialization request"); | 4341 | "Could not allocate initialization request"); |
@@ -4477,7 +4481,7 @@ static int dasd_symm_io(struct dasd_device *device, void __user *argp) | |||
4477 | psf1 = psf_data[1]; | 4481 | psf1 = psf_data[1]; |
4478 | 4482 | ||
4479 | /* setup CCWs for PSF + RSSD */ | 4483 | /* setup CCWs for PSF + RSSD */ |
4480 | cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2 , 0, device); | 4484 | cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2, 0, device, NULL); |
4481 | if (IS_ERR(cqr)) { | 4485 | if (IS_ERR(cqr)) { |
4482 | DBF_DEV_EVENT(DBF_WARNING, device, "%s", | 4486 | DBF_DEV_EVENT(DBF_WARNING, device, "%s", |
4483 | "Could not allocate initialization request"); | 4487 | "Could not allocate initialization request"); |
@@ -5037,7 +5041,7 @@ static int dasd_eckd_read_message_buffer(struct dasd_device *device, | |||
5037 | cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */, | 5041 | cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */, |
5038 | (sizeof(struct dasd_psf_prssd_data) + | 5042 | (sizeof(struct dasd_psf_prssd_data) + |
5039 | sizeof(struct dasd_rssd_messages)), | 5043 | sizeof(struct dasd_rssd_messages)), |
5040 | device); | 5044 | device, NULL); |
5041 | if (IS_ERR(cqr)) { | 5045 | if (IS_ERR(cqr)) { |
5042 | DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", | 5046 | DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", |
5043 | "Could not allocate read message buffer request"); | 5047 | "Could not allocate read message buffer request"); |
@@ -5126,7 +5130,7 @@ static int dasd_eckd_query_host_access(struct dasd_device *device, | |||
5126 | 5130 | ||
5127 | cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */, | 5131 | cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */, |
5128 | sizeof(struct dasd_psf_prssd_data) + 1, | 5132 | sizeof(struct dasd_psf_prssd_data) + 1, |
5129 | device); | 5133 | device, NULL); |
5130 | if (IS_ERR(cqr)) { | 5134 | if (IS_ERR(cqr)) { |
5131 | DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", | 5135 | DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", |
5132 | "Could not allocate read message buffer request"); | 5136 | "Could not allocate read message buffer request"); |
@@ -5284,8 +5288,8 @@ dasd_eckd_psf_cuir_response(struct dasd_device *device, int response, | |||
5284 | int rc; | 5288 | int rc; |
5285 | 5289 | ||
5286 | cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ , | 5290 | cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ , |
5287 | sizeof(struct dasd_psf_cuir_response), | 5291 | sizeof(struct dasd_psf_cuir_response), |
5288 | device); | 5292 | device, NULL); |
5289 | 5293 | ||
5290 | if (IS_ERR(cqr)) { | 5294 | if (IS_ERR(cqr)) { |
5291 | DBF_DEV_EVENT(DBF_WARNING, device, "%s", | 5295 | DBF_DEV_EVENT(DBF_WARNING, device, "%s", |
diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c index 0af8c5295b65..6ef8714dc693 100644 --- a/drivers/s390/block/dasd_eer.c +++ b/drivers/s390/block/dasd_eer.c | |||
@@ -447,7 +447,7 @@ static void dasd_eer_snss_cb(struct dasd_ccw_req *cqr, void *data) | |||
447 | * is a new ccw in device->eer_cqr. Free the "old" | 447 | * is a new ccw in device->eer_cqr. Free the "old" |
448 | * snss request now. | 448 | * snss request now. |
449 | */ | 449 | */ |
450 | dasd_kfree_request(cqr, device); | 450 | dasd_sfree_request(cqr, device); |
451 | } | 451 | } |
452 | 452 | ||
453 | /* | 453 | /* |
@@ -472,8 +472,8 @@ int dasd_eer_enable(struct dasd_device *device) | |||
472 | if (rc) | 472 | if (rc) |
473 | goto out; | 473 | goto out; |
474 | 474 | ||
475 | cqr = dasd_kmalloc_request(DASD_ECKD_MAGIC, 1 /* SNSS */, | 475 | cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* SNSS */, |
476 | SNSS_DATA_SIZE, device); | 476 | SNSS_DATA_SIZE, device, NULL); |
477 | if (IS_ERR(cqr)) { | 477 | if (IS_ERR(cqr)) { |
478 | rc = -ENOMEM; | 478 | rc = -ENOMEM; |
479 | cqr = NULL; | 479 | cqr = NULL; |
@@ -505,7 +505,7 @@ out: | |||
505 | spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); | 505 | spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); |
506 | 506 | ||
507 | if (cqr) | 507 | if (cqr) |
508 | dasd_kfree_request(cqr, device); | 508 | dasd_sfree_request(cqr, device); |
509 | 509 | ||
510 | return rc; | 510 | return rc; |
511 | } | 511 | } |
@@ -528,7 +528,7 @@ void dasd_eer_disable(struct dasd_device *device) | |||
528 | in_use = test_and_clear_bit(DASD_FLAG_EER_IN_USE, &device->flags); | 528 | in_use = test_and_clear_bit(DASD_FLAG_EER_IN_USE, &device->flags); |
529 | spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); | 529 | spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); |
530 | if (cqr && !in_use) | 530 | if (cqr && !in_use) |
531 | dasd_kfree_request(cqr, device); | 531 | dasd_sfree_request(cqr, device); |
532 | } | 532 | } |
533 | 533 | ||
534 | /* | 534 | /* |
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c index a6b132f7e869..56007a3e7f11 100644 --- a/drivers/s390/block/dasd_fba.c +++ b/drivers/s390/block/dasd_fba.c | |||
@@ -356,7 +356,8 @@ static struct dasd_ccw_req *dasd_fba_build_cp_discard( | |||
356 | datasize = sizeof(struct DE_fba_data) + | 356 | datasize = sizeof(struct DE_fba_data) + |
357 | nr_ccws * (sizeof(struct LO_fba_data) + sizeof(struct ccw1)); | 357 | nr_ccws * (sizeof(struct LO_fba_data) + sizeof(struct ccw1)); |
358 | 358 | ||
359 | cqr = dasd_smalloc_request(DASD_FBA_MAGIC, cplength, datasize, memdev); | 359 | cqr = dasd_smalloc_request(DASD_FBA_MAGIC, cplength, datasize, memdev, |
360 | blk_mq_rq_to_pdu(req)); | ||
360 | if (IS_ERR(cqr)) | 361 | if (IS_ERR(cqr)) |
361 | return cqr; | 362 | return cqr; |
362 | 363 | ||
@@ -490,7 +491,8 @@ static struct dasd_ccw_req *dasd_fba_build_cp_regular( | |||
490 | datasize += (count - 1)*sizeof(struct LO_fba_data); | 491 | datasize += (count - 1)*sizeof(struct LO_fba_data); |
491 | } | 492 | } |
492 | /* Allocate the ccw request. */ | 493 | /* Allocate the ccw request. */ |
493 | cqr = dasd_smalloc_request(DASD_FBA_MAGIC, cplength, datasize, memdev); | 494 | cqr = dasd_smalloc_request(DASD_FBA_MAGIC, cplength, datasize, memdev, |
495 | blk_mq_rq_to_pdu(req)); | ||
494 | if (IS_ERR(cqr)) | 496 | if (IS_ERR(cqr)) |
495 | return cqr; | 497 | return cqr; |
496 | ccw = cqr->cpaddr; | 498 | ccw = cqr->cpaddr; |
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h index 96709b1a7bf8..976b6bd4fb05 100644 --- a/drivers/s390/block/dasd_int.h +++ b/drivers/s390/block/dasd_int.h | |||
@@ -158,40 +158,33 @@ do { \ | |||
158 | 158 | ||
159 | struct dasd_ccw_req { | 159 | struct dasd_ccw_req { |
160 | unsigned int magic; /* Eye catcher */ | 160 | unsigned int magic; /* Eye catcher */ |
161 | int intrc; /* internal error, e.g. from start_IO */ | ||
161 | struct list_head devlist; /* for dasd_device request queue */ | 162 | struct list_head devlist; /* for dasd_device request queue */ |
162 | struct list_head blocklist; /* for dasd_block request queue */ | 163 | struct list_head blocklist; /* for dasd_block request queue */ |
163 | |||
164 | /* Where to execute what... */ | ||
165 | struct dasd_block *block; /* the originating block device */ | 164 | struct dasd_block *block; /* the originating block device */ |
166 | struct dasd_device *memdev; /* the device used to allocate this */ | 165 | struct dasd_device *memdev; /* the device used to allocate this */ |
167 | struct dasd_device *startdev; /* device the request is started on */ | 166 | struct dasd_device *startdev; /* device the request is started on */ |
168 | struct dasd_device *basedev; /* base device if no block->base */ | 167 | struct dasd_device *basedev; /* base device if no block->base */ |
169 | void *cpaddr; /* address of ccw or tcw */ | 168 | void *cpaddr; /* address of ccw or tcw */ |
169 | short retries; /* A retry counter */ | ||
170 | unsigned char cpmode; /* 0 = cmd mode, 1 = itcw */ | 170 | unsigned char cpmode; /* 0 = cmd mode, 1 = itcw */ |
171 | char status; /* status of this request */ | 171 | char status; /* status of this request */ |
172 | short retries; /* A retry counter */ | 172 | char lpm; /* logical path mask */ |
173 | unsigned long flags; /* flags of this request */ | 173 | unsigned long flags; /* flags of this request */ |
174 | struct dasd_queue *dq; | 174 | struct dasd_queue *dq; |
175 | |||
176 | /* ... and how */ | ||
177 | unsigned long starttime; /* jiffies time of request start */ | 175 | unsigned long starttime; /* jiffies time of request start */ |
178 | unsigned long expires; /* expiration period in jiffies */ | 176 | unsigned long expires; /* expiration period in jiffies */ |
179 | char lpm; /* logical path mask */ | ||
180 | void *data; /* pointer to data area */ | 177 | void *data; /* pointer to data area */ |
181 | |||
182 | /* these are important for recovering erroneous requests */ | ||
183 | int intrc; /* internal error, e.g. from start_IO */ | ||
184 | struct irb irb; /* device status in case of an error */ | 178 | struct irb irb; /* device status in case of an error */ |
185 | struct dasd_ccw_req *refers; /* ERP-chain queueing. */ | 179 | struct dasd_ccw_req *refers; /* ERP-chain queueing. */ |
186 | void *function; /* originating ERP action */ | 180 | void *function; /* originating ERP action */ |
181 | void *mem_chunk; | ||
187 | 182 | ||
188 | /* these are for statistics only */ | ||
189 | unsigned long buildclk; /* TOD-clock of request generation */ | 183 | unsigned long buildclk; /* TOD-clock of request generation */ |
190 | unsigned long startclk; /* TOD-clock of request start */ | 184 | unsigned long startclk; /* TOD-clock of request start */ |
191 | unsigned long stopclk; /* TOD-clock of request interrupt */ | 185 | unsigned long stopclk; /* TOD-clock of request interrupt */ |
192 | unsigned long endclk; /* TOD-clock of request termination */ | 186 | unsigned long endclk; /* TOD-clock of request termination */ |
193 | 187 | ||
194 | /* Callback that is called after reaching final status. */ | ||
195 | void (*callback)(struct dasd_ccw_req *, void *data); | 188 | void (*callback)(struct dasd_ccw_req *, void *data); |
196 | void *callback_data; | 189 | void *callback_data; |
197 | }; | 190 | }; |
@@ -714,19 +707,10 @@ extern const struct block_device_operations dasd_device_operations; | |||
714 | extern struct kmem_cache *dasd_page_cache; | 707 | extern struct kmem_cache *dasd_page_cache; |
715 | 708 | ||
716 | struct dasd_ccw_req * | 709 | struct dasd_ccw_req * |
717 | dasd_kmalloc_request(int , int, int, struct dasd_device *); | 710 | dasd_smalloc_request(int, int, int, struct dasd_device *, struct dasd_ccw_req *); |
718 | struct dasd_ccw_req * | ||
719 | dasd_smalloc_request(int , int, int, struct dasd_device *); | ||
720 | void dasd_kfree_request(struct dasd_ccw_req *, struct dasd_device *); | ||
721 | void dasd_sfree_request(struct dasd_ccw_req *, struct dasd_device *); | 711 | void dasd_sfree_request(struct dasd_ccw_req *, struct dasd_device *); |
722 | void dasd_wakeup_cb(struct dasd_ccw_req *, void *); | 712 | void dasd_wakeup_cb(struct dasd_ccw_req *, void *); |
723 | 713 | ||
724 | static inline int | ||
725 | dasd_kmalloc_set_cda(struct ccw1 *ccw, void *cda, struct dasd_device *device) | ||
726 | { | ||
727 | return set_normalized_cda(ccw, cda); | ||
728 | } | ||
729 | |||
730 | struct dasd_device *dasd_alloc_device(void); | 714 | struct dasd_device *dasd_alloc_device(void); |
731 | void dasd_free_device(struct dasd_device *); | 715 | void dasd_free_device(struct dasd_device *); |
732 | 716 | ||
diff --git a/drivers/s390/cio/Makefile b/drivers/s390/cio/Makefile index a070ef0efe65..f230516abb96 100644 --- a/drivers/s390/cio/Makefile +++ b/drivers/s390/cio/Makefile | |||
@@ -5,6 +5,7 @@ | |||
5 | 5 | ||
6 | # The following is required for define_trace.h to find ./trace.h | 6 | # The following is required for define_trace.h to find ./trace.h |
7 | CFLAGS_trace.o := -I$(src) | 7 | CFLAGS_trace.o := -I$(src) |
8 | CFLAGS_vfio_ccw_fsm.o := -I$(src) | ||
8 | 9 | ||
9 | obj-y += airq.o blacklist.o chsc.o cio.o css.o chp.o idset.o isc.o \ | 10 | obj-y += airq.o blacklist.o chsc.o cio.o css.o chp.o idset.o isc.o \ |
10 | fcx.o itcw.o crw.o ccwreq.o trace.o ioasm.o | 11 | fcx.o itcw.o crw.o ccwreq.o trace.o ioasm.o |
diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c index dce92b2a895d..dbe7c7ac9ac8 100644 --- a/drivers/s390/cio/vfio_ccw_cp.c +++ b/drivers/s390/cio/vfio_ccw_cp.c | |||
@@ -23,9 +23,13 @@ | |||
23 | #define CCWCHAIN_LEN_MAX 256 | 23 | #define CCWCHAIN_LEN_MAX 256 |
24 | 24 | ||
25 | struct pfn_array { | 25 | struct pfn_array { |
26 | /* Starting guest physical I/O address. */ | ||
26 | unsigned long pa_iova; | 27 | unsigned long pa_iova; |
28 | /* Array that stores PFNs of the pages need to pin. */ | ||
27 | unsigned long *pa_iova_pfn; | 29 | unsigned long *pa_iova_pfn; |
30 | /* Array that receives PFNs of the pages pinned. */ | ||
28 | unsigned long *pa_pfn; | 31 | unsigned long *pa_pfn; |
32 | /* Number of pages pinned from @pa_iova. */ | ||
29 | int pa_nr; | 33 | int pa_nr; |
30 | }; | 34 | }; |
31 | 35 | ||
@@ -46,70 +50,33 @@ struct ccwchain { | |||
46 | }; | 50 | }; |
47 | 51 | ||
48 | /* | 52 | /* |
49 | * pfn_array_pin() - pin user pages in memory | 53 | * pfn_array_alloc_pin() - alloc memory for PFNs, then pin user pages in memory |
50 | * @pa: pfn_array on which to perform the operation | 54 | * @pa: pfn_array on which to perform the operation |
51 | * @mdev: the mediated device to perform pin/unpin operations | 55 | * @mdev: the mediated device to perform pin/unpin operations |
56 | * @iova: target guest physical address | ||
57 | * @len: number of bytes that should be pinned from @iova | ||
52 | * | 58 | * |
53 | * Attempt to pin user pages in memory. | 59 | * Attempt to allocate memory for PFNs, and pin user pages in memory. |
54 | * | 60 | * |
55 | * Usage of pfn_array: | 61 | * Usage of pfn_array: |
56 | * @pa->pa_iova starting guest physical I/O address. Assigned by caller. | 62 | * We expect (pa_nr == 0) and (pa_iova_pfn == NULL), any field in |
57 | * @pa->pa_iova_pfn array that stores PFNs of the pages need to pin. Allocated | 63 | * this structure will be filled in by this function. |
58 | * by caller. | ||
59 | * @pa->pa_pfn array that receives PFNs of the pages pinned. Allocated by | ||
60 | * caller. | ||
61 | * @pa->pa_nr number of pages from @pa->pa_iova to pin. Assigned by | ||
62 | * caller. | ||
63 | * number of pages pinned. Assigned by callee. | ||
64 | * | 64 | * |
65 | * Returns: | 65 | * Returns: |
66 | * Number of pages pinned on success. | 66 | * Number of pages pinned on success. |
67 | * If @pa->pa_nr is 0 or negative, returns 0. | 67 | * If @pa->pa_nr is not 0, or @pa->pa_iova_pfn is not NULL initially, |
68 | * returns -EINVAL. | ||
68 | * If no pages were pinned, returns -errno. | 69 | * If no pages were pinned, returns -errno. |
69 | */ | 70 | */ |
70 | static int pfn_array_pin(struct pfn_array *pa, struct device *mdev) | ||
71 | { | ||
72 | int i, ret; | ||
73 | |||
74 | if (pa->pa_nr <= 0) { | ||
75 | pa->pa_nr = 0; | ||
76 | return 0; | ||
77 | } | ||
78 | |||
79 | pa->pa_iova_pfn[0] = pa->pa_iova >> PAGE_SHIFT; | ||
80 | for (i = 1; i < pa->pa_nr; i++) | ||
81 | pa->pa_iova_pfn[i] = pa->pa_iova_pfn[i - 1] + 1; | ||
82 | |||
83 | ret = vfio_pin_pages(mdev, pa->pa_iova_pfn, pa->pa_nr, | ||
84 | IOMMU_READ | IOMMU_WRITE, pa->pa_pfn); | ||
85 | |||
86 | if (ret > 0 && ret != pa->pa_nr) { | ||
87 | vfio_unpin_pages(mdev, pa->pa_iova_pfn, ret); | ||
88 | pa->pa_nr = 0; | ||
89 | return 0; | ||
90 | } | ||
91 | |||
92 | return ret; | ||
93 | } | ||
94 | |||
95 | /* Unpin the pages before releasing the memory. */ | ||
96 | static void pfn_array_unpin_free(struct pfn_array *pa, struct device *mdev) | ||
97 | { | ||
98 | vfio_unpin_pages(mdev, pa->pa_iova_pfn, pa->pa_nr); | ||
99 | pa->pa_nr = 0; | ||
100 | kfree(pa->pa_iova_pfn); | ||
101 | } | ||
102 | |||
103 | /* Alloc memory for PFNs, then pin pages with them. */ | ||
104 | static int pfn_array_alloc_pin(struct pfn_array *pa, struct device *mdev, | 71 | static int pfn_array_alloc_pin(struct pfn_array *pa, struct device *mdev, |
105 | u64 iova, unsigned int len) | 72 | u64 iova, unsigned int len) |
106 | { | 73 | { |
107 | int ret = 0; | 74 | int i, ret = 0; |
108 | 75 | ||
109 | if (!len) | 76 | if (!len) |
110 | return 0; | 77 | return 0; |
111 | 78 | ||
112 | if (pa->pa_nr) | 79 | if (pa->pa_nr || pa->pa_iova_pfn) |
113 | return -EINVAL; | 80 | return -EINVAL; |
114 | 81 | ||
115 | pa->pa_iova = iova; | 82 | pa->pa_iova = iova; |
@@ -126,18 +93,39 @@ static int pfn_array_alloc_pin(struct pfn_array *pa, struct device *mdev, | |||
126 | return -ENOMEM; | 93 | return -ENOMEM; |
127 | pa->pa_pfn = pa->pa_iova_pfn + pa->pa_nr; | 94 | pa->pa_pfn = pa->pa_iova_pfn + pa->pa_nr; |
128 | 95 | ||
129 | ret = pfn_array_pin(pa, mdev); | 96 | pa->pa_iova_pfn[0] = pa->pa_iova >> PAGE_SHIFT; |
97 | for (i = 1; i < pa->pa_nr; i++) | ||
98 | pa->pa_iova_pfn[i] = pa->pa_iova_pfn[i - 1] + 1; | ||
130 | 99 | ||
131 | if (ret > 0) | 100 | ret = vfio_pin_pages(mdev, pa->pa_iova_pfn, pa->pa_nr, |
132 | return ret; | 101 | IOMMU_READ | IOMMU_WRITE, pa->pa_pfn); |
133 | else if (!ret) | 102 | |
103 | if (ret < 0) { | ||
104 | goto err_out; | ||
105 | } else if (ret > 0 && ret != pa->pa_nr) { | ||
106 | vfio_unpin_pages(mdev, pa->pa_iova_pfn, ret); | ||
134 | ret = -EINVAL; | 107 | ret = -EINVAL; |
108 | goto err_out; | ||
109 | } | ||
135 | 110 | ||
111 | return ret; | ||
112 | |||
113 | err_out: | ||
114 | pa->pa_nr = 0; | ||
136 | kfree(pa->pa_iova_pfn); | 115 | kfree(pa->pa_iova_pfn); |
116 | pa->pa_iova_pfn = NULL; | ||
137 | 117 | ||
138 | return ret; | 118 | return ret; |
139 | } | 119 | } |
140 | 120 | ||
121 | /* Unpin the pages before releasing the memory. */ | ||
122 | static void pfn_array_unpin_free(struct pfn_array *pa, struct device *mdev) | ||
123 | { | ||
124 | vfio_unpin_pages(mdev, pa->pa_iova_pfn, pa->pa_nr); | ||
125 | pa->pa_nr = 0; | ||
126 | kfree(pa->pa_iova_pfn); | ||
127 | } | ||
128 | |||
141 | static int pfn_array_table_init(struct pfn_array_table *pat, int nr) | 129 | static int pfn_array_table_init(struct pfn_array_table *pat, int nr) |
142 | { | 130 | { |
143 | pat->pat_pa = kcalloc(nr, sizeof(*pat->pat_pa), GFP_KERNEL); | 131 | pat->pat_pa = kcalloc(nr, sizeof(*pat->pat_pa), GFP_KERNEL); |
@@ -365,6 +353,9 @@ static void cp_unpin_free(struct channel_program *cp) | |||
365 | * This is the chain length not considering any TICs. | 353 | * This is the chain length not considering any TICs. |
366 | * You need to do a new round for each TIC target. | 354 | * You need to do a new round for each TIC target. |
367 | * | 355 | * |
356 | * The program is also validated for absence of not yet supported | ||
357 | * indirect data addressing scenarios. | ||
358 | * | ||
368 | * Returns: the length of the ccw chain or -errno. | 359 | * Returns: the length of the ccw chain or -errno. |
369 | */ | 360 | */ |
370 | static int ccwchain_calc_length(u64 iova, struct channel_program *cp) | 361 | static int ccwchain_calc_length(u64 iova, struct channel_program *cp) |
@@ -391,6 +382,14 @@ static int ccwchain_calc_length(u64 iova, struct channel_program *cp) | |||
391 | do { | 382 | do { |
392 | cnt++; | 383 | cnt++; |
393 | 384 | ||
385 | /* | ||
386 | * As we don't want to fail direct addressing even if the | ||
387 | * orb specified one of the unsupported formats, we defer | ||
388 | * checking for IDAWs in unsupported formats to here. | ||
389 | */ | ||
390 | if ((!cp->orb.cmd.c64 || cp->orb.cmd.i2k) && ccw_is_idal(ccw)) | ||
391 | return -EOPNOTSUPP; | ||
392 | |||
394 | if ((!ccw_is_chain(ccw)) && (!ccw_is_tic(ccw))) | 393 | if ((!ccw_is_chain(ccw)) && (!ccw_is_tic(ccw))) |
395 | break; | 394 | break; |
396 | 395 | ||
@@ -503,7 +502,7 @@ static int ccwchain_fetch_direct(struct ccwchain *chain, | |||
503 | struct ccw1 *ccw; | 502 | struct ccw1 *ccw; |
504 | struct pfn_array_table *pat; | 503 | struct pfn_array_table *pat; |
505 | unsigned long *idaws; | 504 | unsigned long *idaws; |
506 | int idaw_nr; | 505 | int ret; |
507 | 506 | ||
508 | ccw = chain->ch_ccw + idx; | 507 | ccw = chain->ch_ccw + idx; |
509 | 508 | ||
@@ -523,18 +522,19 @@ static int ccwchain_fetch_direct(struct ccwchain *chain, | |||
523 | * needed when translating a direct ccw to a idal ccw. | 522 | * needed when translating a direct ccw to a idal ccw. |
524 | */ | 523 | */ |
525 | pat = chain->ch_pat + idx; | 524 | pat = chain->ch_pat + idx; |
526 | if (pfn_array_table_init(pat, 1)) | 525 | ret = pfn_array_table_init(pat, 1); |
527 | return -ENOMEM; | 526 | if (ret) |
528 | idaw_nr = pfn_array_alloc_pin(pat->pat_pa, cp->mdev, | 527 | goto out_init; |
529 | ccw->cda, ccw->count); | 528 | |
530 | if (idaw_nr < 0) | 529 | ret = pfn_array_alloc_pin(pat->pat_pa, cp->mdev, ccw->cda, ccw->count); |
531 | return idaw_nr; | 530 | if (ret < 0) |
531 | goto out_init; | ||
532 | 532 | ||
533 | /* Translate this direct ccw to a idal ccw. */ | 533 | /* Translate this direct ccw to a idal ccw. */ |
534 | idaws = kcalloc(idaw_nr, sizeof(*idaws), GFP_DMA | GFP_KERNEL); | 534 | idaws = kcalloc(ret, sizeof(*idaws), GFP_DMA | GFP_KERNEL); |
535 | if (!idaws) { | 535 | if (!idaws) { |
536 | pfn_array_table_unpin_free(pat, cp->mdev); | 536 | ret = -ENOMEM; |
537 | return -ENOMEM; | 537 | goto out_unpin; |
538 | } | 538 | } |
539 | ccw->cda = (__u32) virt_to_phys(idaws); | 539 | ccw->cda = (__u32) virt_to_phys(idaws); |
540 | ccw->flags |= CCW_FLAG_IDA; | 540 | ccw->flags |= CCW_FLAG_IDA; |
@@ -542,6 +542,12 @@ static int ccwchain_fetch_direct(struct ccwchain *chain, | |||
542 | pfn_array_table_idal_create_words(pat, idaws); | 542 | pfn_array_table_idal_create_words(pat, idaws); |
543 | 543 | ||
544 | return 0; | 544 | return 0; |
545 | |||
546 | out_unpin: | ||
547 | pfn_array_table_unpin_free(pat, cp->mdev); | ||
548 | out_init: | ||
549 | ccw->cda = 0; | ||
550 | return ret; | ||
545 | } | 551 | } |
546 | 552 | ||
547 | static int ccwchain_fetch_idal(struct ccwchain *chain, | 553 | static int ccwchain_fetch_idal(struct ccwchain *chain, |
@@ -571,7 +577,7 @@ static int ccwchain_fetch_idal(struct ccwchain *chain, | |||
571 | pat = chain->ch_pat + idx; | 577 | pat = chain->ch_pat + idx; |
572 | ret = pfn_array_table_init(pat, idaw_nr); | 578 | ret = pfn_array_table_init(pat, idaw_nr); |
573 | if (ret) | 579 | if (ret) |
574 | return ret; | 580 | goto out_init; |
575 | 581 | ||
576 | /* Translate idal ccw to use new allocated idaws. */ | 582 | /* Translate idal ccw to use new allocated idaws. */ |
577 | idaws = kzalloc(idaw_len, GFP_DMA | GFP_KERNEL); | 583 | idaws = kzalloc(idaw_len, GFP_DMA | GFP_KERNEL); |
@@ -603,6 +609,8 @@ out_free_idaws: | |||
603 | kfree(idaws); | 609 | kfree(idaws); |
604 | out_unpin: | 610 | out_unpin: |
605 | pfn_array_table_unpin_free(pat, cp->mdev); | 611 | pfn_array_table_unpin_free(pat, cp->mdev); |
612 | out_init: | ||
613 | ccw->cda = 0; | ||
606 | return ret; | 614 | return ret; |
607 | } | 615 | } |
608 | 616 | ||
@@ -656,10 +664,8 @@ int cp_init(struct channel_program *cp, struct device *mdev, union orb *orb) | |||
656 | /* | 664 | /* |
657 | * XXX: | 665 | * XXX: |
658 | * Only support prefetch enable mode now. | 666 | * Only support prefetch enable mode now. |
659 | * Only support 64bit addressing idal. | ||
660 | * Only support 4k IDAW. | ||
661 | */ | 667 | */ |
662 | if (!orb->cmd.pfch || !orb->cmd.c64 || orb->cmd.i2k) | 668 | if (!orb->cmd.pfch) |
663 | return -EOPNOTSUPP; | 669 | return -EOPNOTSUPP; |
664 | 670 | ||
665 | INIT_LIST_HEAD(&cp->ccwchain_list); | 671 | INIT_LIST_HEAD(&cp->ccwchain_list); |
@@ -688,6 +694,10 @@ int cp_init(struct channel_program *cp, struct device *mdev, union orb *orb) | |||
688 | ret = ccwchain_loop_tic(chain, cp); | 694 | ret = ccwchain_loop_tic(chain, cp); |
689 | if (ret) | 695 | if (ret) |
690 | cp_unpin_free(cp); | 696 | cp_unpin_free(cp); |
697 | /* It is safe to force: if not set but idals used | ||
698 | * ccwchain_calc_length returns an error. | ||
699 | */ | ||
700 | cp->orb.cmd.c64 = 1; | ||
691 | 701 | ||
692 | return ret; | 702 | return ret; |
693 | } | 703 | } |
diff --git a/drivers/s390/cio/vfio_ccw_drv.c b/drivers/s390/cio/vfio_ccw_drv.c index ea6a2d0b2894..770fa9cfc310 100644 --- a/drivers/s390/cio/vfio_ccw_drv.c +++ b/drivers/s390/cio/vfio_ccw_drv.c | |||
@@ -177,6 +177,7 @@ static int vfio_ccw_sch_event(struct subchannel *sch, int process) | |||
177 | { | 177 | { |
178 | struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev); | 178 | struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev); |
179 | unsigned long flags; | 179 | unsigned long flags; |
180 | int rc = -EAGAIN; | ||
180 | 181 | ||
181 | spin_lock_irqsave(sch->lock, flags); | 182 | spin_lock_irqsave(sch->lock, flags); |
182 | if (!device_is_registered(&sch->dev)) | 183 | if (!device_is_registered(&sch->dev)) |
@@ -187,6 +188,7 @@ static int vfio_ccw_sch_event(struct subchannel *sch, int process) | |||
187 | 188 | ||
188 | if (cio_update_schib(sch)) { | 189 | if (cio_update_schib(sch)) { |
189 | vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER); | 190 | vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER); |
191 | rc = 0; | ||
190 | goto out_unlock; | 192 | goto out_unlock; |
191 | } | 193 | } |
192 | 194 | ||
@@ -195,11 +197,12 @@ static int vfio_ccw_sch_event(struct subchannel *sch, int process) | |||
195 | private->state = private->mdev ? VFIO_CCW_STATE_IDLE : | 197 | private->state = private->mdev ? VFIO_CCW_STATE_IDLE : |
196 | VFIO_CCW_STATE_STANDBY; | 198 | VFIO_CCW_STATE_STANDBY; |
197 | } | 199 | } |
200 | rc = 0; | ||
198 | 201 | ||
199 | out_unlock: | 202 | out_unlock: |
200 | spin_unlock_irqrestore(sch->lock, flags); | 203 | spin_unlock_irqrestore(sch->lock, flags); |
201 | 204 | ||
202 | return 0; | 205 | return rc; |
203 | } | 206 | } |
204 | 207 | ||
205 | static struct css_device_id vfio_ccw_sch_ids[] = { | 208 | static struct css_device_id vfio_ccw_sch_ids[] = { |
diff --git a/drivers/s390/cio/vfio_ccw_fsm.c b/drivers/s390/cio/vfio_ccw_fsm.c index 3c800642134e..797a82731159 100644 --- a/drivers/s390/cio/vfio_ccw_fsm.c +++ b/drivers/s390/cio/vfio_ccw_fsm.c | |||
@@ -13,6 +13,9 @@ | |||
13 | #include "ioasm.h" | 13 | #include "ioasm.h" |
14 | #include "vfio_ccw_private.h" | 14 | #include "vfio_ccw_private.h" |
15 | 15 | ||
16 | #define CREATE_TRACE_POINTS | ||
17 | #include "vfio_ccw_trace.h" | ||
18 | |||
16 | static int fsm_io_helper(struct vfio_ccw_private *private) | 19 | static int fsm_io_helper(struct vfio_ccw_private *private) |
17 | { | 20 | { |
18 | struct subchannel *sch; | 21 | struct subchannel *sch; |
@@ -110,6 +113,10 @@ static void fsm_disabled_irq(struct vfio_ccw_private *private, | |||
110 | */ | 113 | */ |
111 | cio_disable_subchannel(sch); | 114 | cio_disable_subchannel(sch); |
112 | } | 115 | } |
116 | inline struct subchannel_id get_schid(struct vfio_ccw_private *p) | ||
117 | { | ||
118 | return p->sch->schid; | ||
119 | } | ||
113 | 120 | ||
114 | /* | 121 | /* |
115 | * Deal with the ccw command request from the userspace. | 122 | * Deal with the ccw command request from the userspace. |
@@ -121,6 +128,7 @@ static void fsm_io_request(struct vfio_ccw_private *private, | |||
121 | union scsw *scsw = &private->scsw; | 128 | union scsw *scsw = &private->scsw; |
122 | struct ccw_io_region *io_region = &private->io_region; | 129 | struct ccw_io_region *io_region = &private->io_region; |
123 | struct mdev_device *mdev = private->mdev; | 130 | struct mdev_device *mdev = private->mdev; |
131 | char *errstr = "request"; | ||
124 | 132 | ||
125 | private->state = VFIO_CCW_STATE_BOXED; | 133 | private->state = VFIO_CCW_STATE_BOXED; |
126 | 134 | ||
@@ -132,15 +140,19 @@ static void fsm_io_request(struct vfio_ccw_private *private, | |||
132 | /* Don't try to build a cp if transport mode is specified. */ | 140 | /* Don't try to build a cp if transport mode is specified. */ |
133 | if (orb->tm.b) { | 141 | if (orb->tm.b) { |
134 | io_region->ret_code = -EOPNOTSUPP; | 142 | io_region->ret_code = -EOPNOTSUPP; |
143 | errstr = "transport mode"; | ||
135 | goto err_out; | 144 | goto err_out; |
136 | } | 145 | } |
137 | io_region->ret_code = cp_init(&private->cp, mdev_dev(mdev), | 146 | io_region->ret_code = cp_init(&private->cp, mdev_dev(mdev), |
138 | orb); | 147 | orb); |
139 | if (io_region->ret_code) | 148 | if (io_region->ret_code) { |
149 | errstr = "cp init"; | ||
140 | goto err_out; | 150 | goto err_out; |
151 | } | ||
141 | 152 | ||
142 | io_region->ret_code = cp_prefetch(&private->cp); | 153 | io_region->ret_code = cp_prefetch(&private->cp); |
143 | if (io_region->ret_code) { | 154 | if (io_region->ret_code) { |
155 | errstr = "cp prefetch"; | ||
144 | cp_free(&private->cp); | 156 | cp_free(&private->cp); |
145 | goto err_out; | 157 | goto err_out; |
146 | } | 158 | } |
@@ -148,6 +160,7 @@ static void fsm_io_request(struct vfio_ccw_private *private, | |||
148 | /* Start channel program and wait for I/O interrupt. */ | 160 | /* Start channel program and wait for I/O interrupt. */ |
149 | io_region->ret_code = fsm_io_helper(private); | 161 | io_region->ret_code = fsm_io_helper(private); |
150 | if (io_region->ret_code) { | 162 | if (io_region->ret_code) { |
163 | errstr = "cp fsm_io_helper"; | ||
151 | cp_free(&private->cp); | 164 | cp_free(&private->cp); |
152 | goto err_out; | 165 | goto err_out; |
153 | } | 166 | } |
@@ -164,6 +177,8 @@ static void fsm_io_request(struct vfio_ccw_private *private, | |||
164 | 177 | ||
165 | err_out: | 178 | err_out: |
166 | private->state = VFIO_CCW_STATE_IDLE; | 179 | private->state = VFIO_CCW_STATE_IDLE; |
180 | trace_vfio_ccw_io_fctl(scsw->cmd.fctl, get_schid(private), | ||
181 | io_region->ret_code, errstr); | ||
167 | } | 182 | } |
168 | 183 | ||
169 | /* | 184 | /* |
diff --git a/drivers/s390/cio/vfio_ccw_trace.h b/drivers/s390/cio/vfio_ccw_trace.h new file mode 100644 index 000000000000..b1da53ddec1f --- /dev/null +++ b/drivers/s390/cio/vfio_ccw_trace.h | |||
@@ -0,0 +1,54 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 | ||
2 | * Tracepoints for vfio_ccw driver | ||
3 | * | ||
4 | * Copyright IBM Corp. 2018 | ||
5 | * | ||
6 | * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com> | ||
7 | * Halil Pasic <pasic@linux.vnet.ibm.com> | ||
8 | */ | ||
9 | |||
10 | #undef TRACE_SYSTEM | ||
11 | #define TRACE_SYSTEM vfio_ccw | ||
12 | |||
13 | #if !defined(_VFIO_CCW_TRACE_) || defined(TRACE_HEADER_MULTI_READ) | ||
14 | #define _VFIO_CCW_TRACE_ | ||
15 | |||
16 | #include <linux/tracepoint.h> | ||
17 | |||
18 | TRACE_EVENT(vfio_ccw_io_fctl, | ||
19 | TP_PROTO(int fctl, struct subchannel_id schid, int errno, char *errstr), | ||
20 | TP_ARGS(fctl, schid, errno, errstr), | ||
21 | |||
22 | TP_STRUCT__entry( | ||
23 | __field(int, fctl) | ||
24 | __field_struct(struct subchannel_id, schid) | ||
25 | __field(int, errno) | ||
26 | __field(char*, errstr) | ||
27 | ), | ||
28 | |||
29 | TP_fast_assign( | ||
30 | __entry->fctl = fctl; | ||
31 | __entry->schid = schid; | ||
32 | __entry->errno = errno; | ||
33 | __entry->errstr = errstr; | ||
34 | ), | ||
35 | |||
36 | TP_printk("schid=%x.%x.%04x fctl=%x errno=%d info=%s", | ||
37 | __entry->schid.cssid, | ||
38 | __entry->schid.ssid, | ||
39 | __entry->schid.sch_no, | ||
40 | __entry->fctl, | ||
41 | __entry->errno, | ||
42 | __entry->errstr) | ||
43 | ); | ||
44 | |||
45 | #endif /* _VFIO_CCW_TRACE_ */ | ||
46 | |||
47 | /* This part must be outside protection */ | ||
48 | |||
49 | #undef TRACE_INCLUDE_PATH | ||
50 | #define TRACE_INCLUDE_PATH . | ||
51 | #undef TRACE_INCLUDE_FILE | ||
52 | #define TRACE_INCLUDE_FILE vfio_ccw_trace | ||
53 | |||
54 | #include <trace/define_trace.h> | ||