diff options
author | James Bottomley <jejb@mulgrave.il.steeleye.com> | 2007-02-10 14:45:43 -0500 |
---|---|---|
committer | James Bottomley <jejb@mulgrave.il.steeleye.com> | 2007-02-10 14:45:43 -0500 |
commit | 81b7bbd1932a04869d4c8635a75222dfc6089f96 (patch) | |
tree | 285ae868a1e3a41fb0dbfe346c28e380949bcb55 /drivers/s390 | |
parent | 98051995ab44b993f992946055edc6115351f725 (diff) | |
parent | 66efc5a7e3061c3597ac43a8bb1026488d57e66b (diff) |
Merge branch 'linus'
Conflicts:
drivers/scsi/ipr.c
Signed-off-by: James Bottomley <James.Bottomley@SteelEye.com>
Diffstat (limited to 'drivers/s390')
80 files changed, 2293 insertions, 5149 deletions
diff --git a/drivers/s390/Kconfig b/drivers/s390/Kconfig index ae89b9b88743..165af398fdea 100644 --- a/drivers/s390/Kconfig +++ b/drivers/s390/Kconfig | |||
@@ -103,14 +103,8 @@ config CCW_CONSOLE | |||
103 | depends on TN3215_CONSOLE || TN3270_CONSOLE | 103 | depends on TN3215_CONSOLE || TN3270_CONSOLE |
104 | default y | 104 | default y |
105 | 105 | ||
106 | config SCLP | ||
107 | bool "Support for SCLP" | ||
108 | help | ||
109 | Include support for the SCLP interface to the service element. | ||
110 | |||
111 | config SCLP_TTY | 106 | config SCLP_TTY |
112 | bool "Support for SCLP line mode terminal" | 107 | bool "Support for SCLP line mode terminal" |
113 | depends on SCLP | ||
114 | help | 108 | help |
115 | Include support for IBM SCLP line-mode terminals. | 109 | Include support for IBM SCLP line-mode terminals. |
116 | 110 | ||
@@ -123,7 +117,6 @@ config SCLP_CONSOLE | |||
123 | 117 | ||
124 | config SCLP_VT220_TTY | 118 | config SCLP_VT220_TTY |
125 | bool "Support for SCLP VT220-compatible terminal" | 119 | bool "Support for SCLP VT220-compatible terminal" |
126 | depends on SCLP | ||
127 | help | 120 | help |
128 | Include support for an IBM SCLP VT220-compatible terminal. | 121 | Include support for an IBM SCLP VT220-compatible terminal. |
129 | 122 | ||
@@ -136,7 +129,6 @@ config SCLP_VT220_CONSOLE | |||
136 | 129 | ||
137 | config SCLP_CPI | 130 | config SCLP_CPI |
138 | tristate "Control-Program Identification" | 131 | tristate "Control-Program Identification" |
139 | depends on SCLP | ||
140 | help | 132 | help |
141 | This option enables the hardware console interface for system | 133 | This option enables the hardware console interface for system |
142 | identification. This is commonly used for workload management and | 134 | identification. This is commonly used for workload management and |
diff --git a/drivers/s390/Makefile b/drivers/s390/Makefile index 9803c9352d78..5a888704a8d0 100644 --- a/drivers/s390/Makefile +++ b/drivers/s390/Makefile | |||
@@ -2,6 +2,8 @@ | |||
2 | # Makefile for the S/390 specific device drivers | 2 | # Makefile for the S/390 specific device drivers |
3 | # | 3 | # |
4 | 4 | ||
5 | CFLAGS_sysinfo.o += -Iinclude/math-emu -Iarch/s390/math-emu -w | ||
6 | |||
5 | obj-y += s390mach.o sysinfo.o s390_rdev.o | 7 | obj-y += s390mach.o sysinfo.o s390_rdev.o |
6 | obj-y += cio/ block/ char/ crypto/ net/ scsi/ | 8 | obj-y += cio/ block/ char/ crypto/ net/ scsi/ |
7 | 9 | ||
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 492b68bcd7cc..eb5dc62f0d9c 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c | |||
@@ -37,6 +37,7 @@ | |||
37 | */ | 37 | */ |
38 | debug_info_t *dasd_debug_area; | 38 | debug_info_t *dasd_debug_area; |
39 | struct dasd_discipline *dasd_diag_discipline_pointer; | 39 | struct dasd_discipline *dasd_diag_discipline_pointer; |
40 | void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *); | ||
40 | 41 | ||
41 | MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>"); | 42 | MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>"); |
42 | MODULE_DESCRIPTION("Linux on S/390 DASD device driver," | 43 | MODULE_DESCRIPTION("Linux on S/390 DASD device driver," |
@@ -51,7 +52,6 @@ static int dasd_alloc_queue(struct dasd_device * device); | |||
51 | static void dasd_setup_queue(struct dasd_device * device); | 52 | static void dasd_setup_queue(struct dasd_device * device); |
52 | static void dasd_free_queue(struct dasd_device * device); | 53 | static void dasd_free_queue(struct dasd_device * device); |
53 | static void dasd_flush_request_queue(struct dasd_device *); | 54 | static void dasd_flush_request_queue(struct dasd_device *); |
54 | static void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *); | ||
55 | static int dasd_flush_ccw_queue(struct dasd_device *, int); | 55 | static int dasd_flush_ccw_queue(struct dasd_device *, int); |
56 | static void dasd_tasklet(struct dasd_device *); | 56 | static void dasd_tasklet(struct dasd_device *); |
57 | static void do_kick_device(struct work_struct *); | 57 | static void do_kick_device(struct work_struct *); |
@@ -483,7 +483,7 @@ unsigned int dasd_profile_level = DASD_PROFILE_OFF; | |||
483 | /* | 483 | /* |
484 | * Add profiling information for cqr before execution. | 484 | * Add profiling information for cqr before execution. |
485 | */ | 485 | */ |
486 | static inline void | 486 | static void |
487 | dasd_profile_start(struct dasd_device *device, struct dasd_ccw_req * cqr, | 487 | dasd_profile_start(struct dasd_device *device, struct dasd_ccw_req * cqr, |
488 | struct request *req) | 488 | struct request *req) |
489 | { | 489 | { |
@@ -505,7 +505,7 @@ dasd_profile_start(struct dasd_device *device, struct dasd_ccw_req * cqr, | |||
505 | /* | 505 | /* |
506 | * Add profiling information for cqr after execution. | 506 | * Add profiling information for cqr after execution. |
507 | */ | 507 | */ |
508 | static inline void | 508 | static void |
509 | dasd_profile_end(struct dasd_device *device, struct dasd_ccw_req * cqr, | 509 | dasd_profile_end(struct dasd_device *device, struct dasd_ccw_req * cqr, |
510 | struct request *req) | 510 | struct request *req) |
511 | { | 511 | { |
@@ -1022,8 +1022,6 @@ dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, | |||
1022 | irb->scsw.cstat == 0 && | 1022 | irb->scsw.cstat == 0 && |
1023 | !irb->esw.esw0.erw.cons) | 1023 | !irb->esw.esw0.erw.cons) |
1024 | era = dasd_era_none; | 1024 | era = dasd_era_none; |
1025 | else if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) | ||
1026 | era = dasd_era_fatal; /* don't recover this request */ | ||
1027 | else if (irb->esw.esw0.erw.cons) | 1025 | else if (irb->esw.esw0.erw.cons) |
1028 | era = device->discipline->examine_error(cqr, irb); | 1026 | era = device->discipline->examine_error(cqr, irb); |
1029 | else | 1027 | else |
@@ -1104,7 +1102,7 @@ __dasd_process_erp(struct dasd_device *device, struct dasd_ccw_req *cqr) | |||
1104 | /* | 1102 | /* |
1105 | * Process ccw request queue. | 1103 | * Process ccw request queue. |
1106 | */ | 1104 | */ |
1107 | static inline void | 1105 | static void |
1108 | __dasd_process_ccw_queue(struct dasd_device * device, | 1106 | __dasd_process_ccw_queue(struct dasd_device * device, |
1109 | struct list_head *final_queue) | 1107 | struct list_head *final_queue) |
1110 | { | 1108 | { |
@@ -1127,7 +1125,9 @@ restart: | |||
1127 | cqr->status = DASD_CQR_FAILED; | 1125 | cqr->status = DASD_CQR_FAILED; |
1128 | cqr->stopclk = get_clock(); | 1126 | cqr->stopclk = get_clock(); |
1129 | } else { | 1127 | } else { |
1130 | if (cqr->irb.esw.esw0.erw.cons) { | 1128 | if (cqr->irb.esw.esw0.erw.cons && |
1129 | test_bit(DASD_CQR_FLAGS_USE_ERP, | ||
1130 | &cqr->flags)) { | ||
1131 | erp_fn = device->discipline-> | 1131 | erp_fn = device->discipline-> |
1132 | erp_action(cqr); | 1132 | erp_action(cqr); |
1133 | erp_fn(cqr); | 1133 | erp_fn(cqr); |
@@ -1181,7 +1181,7 @@ dasd_end_request_cb(struct dasd_ccw_req * cqr, void *data) | |||
1181 | /* | 1181 | /* |
1182 | * Fetch requests from the block device queue. | 1182 | * Fetch requests from the block device queue. |
1183 | */ | 1183 | */ |
1184 | static inline void | 1184 | static void |
1185 | __dasd_process_blk_queue(struct dasd_device * device) | 1185 | __dasd_process_blk_queue(struct dasd_device * device) |
1186 | { | 1186 | { |
1187 | request_queue_t *queue; | 1187 | request_queue_t *queue; |
@@ -1232,6 +1232,19 @@ __dasd_process_blk_queue(struct dasd_device * device) | |||
1232 | if (IS_ERR(cqr)) { | 1232 | if (IS_ERR(cqr)) { |
1233 | if (PTR_ERR(cqr) == -ENOMEM) | 1233 | if (PTR_ERR(cqr) == -ENOMEM) |
1234 | break; /* terminate request queue loop */ | 1234 | break; /* terminate request queue loop */ |
1235 | if (PTR_ERR(cqr) == -EAGAIN) { | ||
1236 | /* | ||
1237 | * The current request cannot be build right | ||
1238 | * now, we have to try later. If this request | ||
1239 | * is the head-of-queue we stop the device | ||
1240 | * for 1/2 second. | ||
1241 | */ | ||
1242 | if (!list_empty(&device->ccw_queue)) | ||
1243 | break; | ||
1244 | device->stopped |= DASD_STOPPED_PENDING; | ||
1245 | dasd_set_timer(device, HZ/2); | ||
1246 | break; | ||
1247 | } | ||
1235 | DBF_DEV_EVENT(DBF_ERR, device, | 1248 | DBF_DEV_EVENT(DBF_ERR, device, |
1236 | "CCW creation failed (rc=%ld) " | 1249 | "CCW creation failed (rc=%ld) " |
1237 | "on request %p", | 1250 | "on request %p", |
@@ -1254,7 +1267,7 @@ __dasd_process_blk_queue(struct dasd_device * device) | |||
1254 | * Take a look at the first request on the ccw queue and check | 1267 | * Take a look at the first request on the ccw queue and check |
1255 | * if it reached its expire time. If so, terminate the IO. | 1268 | * if it reached its expire time. If so, terminate the IO. |
1256 | */ | 1269 | */ |
1257 | static inline void | 1270 | static void |
1258 | __dasd_check_expire(struct dasd_device * device) | 1271 | __dasd_check_expire(struct dasd_device * device) |
1259 | { | 1272 | { |
1260 | struct dasd_ccw_req *cqr; | 1273 | struct dasd_ccw_req *cqr; |
@@ -1285,7 +1298,7 @@ __dasd_check_expire(struct dasd_device * device) | |||
1285 | * Take a look at the first request on the ccw queue and check | 1298 | * Take a look at the first request on the ccw queue and check |
1286 | * if it needs to be started. | 1299 | * if it needs to be started. |
1287 | */ | 1300 | */ |
1288 | static inline void | 1301 | static void |
1289 | __dasd_start_head(struct dasd_device * device) | 1302 | __dasd_start_head(struct dasd_device * device) |
1290 | { | 1303 | { |
1291 | struct dasd_ccw_req *cqr; | 1304 | struct dasd_ccw_req *cqr; |
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c index 4d01040c2c63..8b9d68f6e016 100644 --- a/drivers/s390/block/dasd_3990_erp.c +++ b/drivers/s390/block/dasd_3990_erp.c | |||
@@ -170,7 +170,6 @@ dasd_3990_erp_examine(struct dasd_ccw_req * cqr, struct irb * irb) | |||
170 | /* log the erp chain if fatal error occurred */ | 170 | /* log the erp chain if fatal error occurred */ |
171 | if ((era == dasd_era_fatal) && (device->state >= DASD_STATE_READY)) { | 171 | if ((era == dasd_era_fatal) && (device->state >= DASD_STATE_READY)) { |
172 | dasd_log_sense(cqr, irb); | 172 | dasd_log_sense(cqr, irb); |
173 | dasd_log_ccw(cqr, 0, irb->scsw.cpa); | ||
174 | } | 173 | } |
175 | 174 | ||
176 | return era; | 175 | return era; |
@@ -2640,7 +2639,6 @@ dasd_3990_erp_action(struct dasd_ccw_req * cqr) | |||
2640 | 2639 | ||
2641 | struct dasd_ccw_req *erp = NULL; | 2640 | struct dasd_ccw_req *erp = NULL; |
2642 | struct dasd_device *device = cqr->device; | 2641 | struct dasd_device *device = cqr->device; |
2643 | __u32 cpa = cqr->irb.scsw.cpa; | ||
2644 | struct dasd_ccw_req *temp_erp = NULL; | 2642 | struct dasd_ccw_req *temp_erp = NULL; |
2645 | 2643 | ||
2646 | if (device->features & DASD_FEATURE_ERPLOG) { | 2644 | if (device->features & DASD_FEATURE_ERPLOG) { |
@@ -2706,9 +2704,6 @@ dasd_3990_erp_action(struct dasd_ccw_req * cqr) | |||
2706 | } | 2704 | } |
2707 | } | 2705 | } |
2708 | 2706 | ||
2709 | if (erp->status == DASD_CQR_FAILED) | ||
2710 | dasd_log_ccw(erp, 1, cpa); | ||
2711 | |||
2712 | /* enqueue added ERP request */ | 2707 | /* enqueue added ERP request */ |
2713 | if (erp->status == DASD_CQR_FILLED) { | 2708 | if (erp->status == DASD_CQR_FILLED) { |
2714 | erp->status = DASD_CQR_QUEUED; | 2709 | erp->status = DASD_CQR_QUEUED; |
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c index 5943266152f5..ed70852cc915 100644 --- a/drivers/s390/block/dasd_devmap.c +++ b/drivers/s390/block/dasd_devmap.c | |||
@@ -136,7 +136,7 @@ __setup ("dasd=", dasd_call_setup); | |||
136 | /* | 136 | /* |
137 | * Read a device busid/devno from a string. | 137 | * Read a device busid/devno from a string. |
138 | */ | 138 | */ |
139 | static inline int | 139 | static int |
140 | dasd_busid(char **str, int *id0, int *id1, int *devno) | 140 | dasd_busid(char **str, int *id0, int *id1, int *devno) |
141 | { | 141 | { |
142 | int val, old_style; | 142 | int val, old_style; |
@@ -182,7 +182,7 @@ dasd_busid(char **str, int *id0, int *id1, int *devno) | |||
182 | * only one: "ro" for read-only devices. The default feature set | 182 | * only one: "ro" for read-only devices. The default feature set |
183 | * is empty (value 0). | 183 | * is empty (value 0). |
184 | */ | 184 | */ |
185 | static inline int | 185 | static int |
186 | dasd_feature_list(char *str, char **endp) | 186 | dasd_feature_list(char *str, char **endp) |
187 | { | 187 | { |
188 | int features, len, rc; | 188 | int features, len, rc; |
@@ -341,7 +341,7 @@ dasd_parse_range( char *parsestring ) { | |||
341 | return ERR_PTR(-EINVAL); | 341 | return ERR_PTR(-EINVAL); |
342 | } | 342 | } |
343 | 343 | ||
344 | static inline char * | 344 | static char * |
345 | dasd_parse_next_element( char *parsestring ) { | 345 | dasd_parse_next_element( char *parsestring ) { |
346 | char * residual_str; | 346 | char * residual_str; |
347 | residual_str = dasd_parse_keyword(parsestring); | 347 | residual_str = dasd_parse_keyword(parsestring); |
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c index 53db58a68617..ab782bb46ac1 100644 --- a/drivers/s390/block/dasd_diag.c +++ b/drivers/s390/block/dasd_diag.c | |||
@@ -43,7 +43,7 @@ MODULE_LICENSE("GPL"); | |||
43 | #define DIAG_MAX_RETRIES 32 | 43 | #define DIAG_MAX_RETRIES 32 |
44 | #define DIAG_TIMEOUT 50 * HZ | 44 | #define DIAG_TIMEOUT 50 * HZ |
45 | 45 | ||
46 | struct dasd_discipline dasd_diag_discipline; | 46 | static struct dasd_discipline dasd_diag_discipline; |
47 | 47 | ||
48 | struct dasd_diag_private { | 48 | struct dasd_diag_private { |
49 | struct dasd_diag_characteristics rdc_data; | 49 | struct dasd_diag_characteristics rdc_data; |
@@ -90,7 +90,7 @@ static inline int dia250(void *iob, int cmd) | |||
90 | * block offset. On success, return zero and set end_block to contain the | 90 | * block offset. On success, return zero and set end_block to contain the |
91 | * number of blocks on the device minus the specified offset. Return non-zero | 91 | * number of blocks on the device minus the specified offset. Return non-zero |
92 | * otherwise. */ | 92 | * otherwise. */ |
93 | static __inline__ int | 93 | static inline int |
94 | mdsk_init_io(struct dasd_device *device, unsigned int blocksize, | 94 | mdsk_init_io(struct dasd_device *device, unsigned int blocksize, |
95 | blocknum_t offset, blocknum_t *end_block) | 95 | blocknum_t offset, blocknum_t *end_block) |
96 | { | 96 | { |
@@ -117,7 +117,7 @@ mdsk_init_io(struct dasd_device *device, unsigned int blocksize, | |||
117 | 117 | ||
118 | /* Remove block I/O environment for device. Return zero on success, non-zero | 118 | /* Remove block I/O environment for device. Return zero on success, non-zero |
119 | * otherwise. */ | 119 | * otherwise. */ |
120 | static __inline__ int | 120 | static inline int |
121 | mdsk_term_io(struct dasd_device * device) | 121 | mdsk_term_io(struct dasd_device * device) |
122 | { | 122 | { |
123 | struct dasd_diag_private *private; | 123 | struct dasd_diag_private *private; |
@@ -576,7 +576,7 @@ dasd_diag_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req, | |||
576 | "dump sense not available for DIAG data"); | 576 | "dump sense not available for DIAG data"); |
577 | } | 577 | } |
578 | 578 | ||
579 | struct dasd_discipline dasd_diag_discipline = { | 579 | static struct dasd_discipline dasd_diag_discipline = { |
580 | .owner = THIS_MODULE, | 580 | .owner = THIS_MODULE, |
581 | .name = "DIAG", | 581 | .name = "DIAG", |
582 | .ebcname = "DIAG", | 582 | .ebcname = "DIAG", |
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index fdaa471e845f..cecab2274a6e 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c | |||
@@ -134,44 +134,7 @@ ceil_quot(unsigned int d1, unsigned int d2) | |||
134 | return (d1 + (d2 - 1)) / d2; | 134 | return (d1 + (d2 - 1)) / d2; |
135 | } | 135 | } |
136 | 136 | ||
137 | static inline int | 137 | static unsigned int |
138 | bytes_per_record(struct dasd_eckd_characteristics *rdc, int kl, int dl) | ||
139 | { | ||
140 | unsigned int fl1, fl2, int1, int2; | ||
141 | int bpr; | ||
142 | |||
143 | switch (rdc->formula) { | ||
144 | case 0x01: | ||
145 | fl1 = round_up_multiple(ECKD_F2(rdc) + dl, ECKD_F1(rdc)); | ||
146 | fl2 = round_up_multiple(kl ? ECKD_F2(rdc) + kl : 0, | ||
147 | ECKD_F1(rdc)); | ||
148 | bpr = fl1 + fl2; | ||
149 | break; | ||
150 | case 0x02: | ||
151 | int1 = ceil_quot(dl + ECKD_F6(rdc), ECKD_F5(rdc) << 1); | ||
152 | int2 = ceil_quot(kl + ECKD_F6(rdc), ECKD_F5(rdc) << 1); | ||
153 | fl1 = round_up_multiple(ECKD_F1(rdc) * ECKD_F2(rdc) + dl + | ||
154 | ECKD_F6(rdc) + ECKD_F4(rdc) * int1, | ||
155 | ECKD_F1(rdc)); | ||
156 | fl2 = round_up_multiple(ECKD_F1(rdc) * ECKD_F3(rdc) + kl + | ||
157 | ECKD_F6(rdc) + ECKD_F4(rdc) * int2, | ||
158 | ECKD_F1(rdc)); | ||
159 | bpr = fl1 + fl2; | ||
160 | break; | ||
161 | default: | ||
162 | bpr = 0; | ||
163 | break; | ||
164 | } | ||
165 | return bpr; | ||
166 | } | ||
167 | |||
168 | static inline unsigned int | ||
169 | bytes_per_track(struct dasd_eckd_characteristics *rdc) | ||
170 | { | ||
171 | return *(unsigned int *) (rdc->byte_per_track) >> 8; | ||
172 | } | ||
173 | |||
174 | static inline unsigned int | ||
175 | recs_per_track(struct dasd_eckd_characteristics * rdc, | 138 | recs_per_track(struct dasd_eckd_characteristics * rdc, |
176 | unsigned int kl, unsigned int dl) | 139 | unsigned int kl, unsigned int dl) |
177 | { | 140 | { |
@@ -204,37 +167,39 @@ recs_per_track(struct dasd_eckd_characteristics * rdc, | |||
204 | return 0; | 167 | return 0; |
205 | } | 168 | } |
206 | 169 | ||
207 | static inline void | 170 | static int |
208 | check_XRC (struct ccw1 *de_ccw, | 171 | check_XRC (struct ccw1 *de_ccw, |
209 | struct DE_eckd_data *data, | 172 | struct DE_eckd_data *data, |
210 | struct dasd_device *device) | 173 | struct dasd_device *device) |
211 | { | 174 | { |
212 | struct dasd_eckd_private *private; | 175 | struct dasd_eckd_private *private; |
176 | int rc; | ||
213 | 177 | ||
214 | private = (struct dasd_eckd_private *) device->private; | 178 | private = (struct dasd_eckd_private *) device->private; |
179 | if (!private->rdc_data.facilities.XRC_supported) | ||
180 | return 0; | ||
215 | 181 | ||
216 | /* switch on System Time Stamp - needed for XRC Support */ | 182 | /* switch on System Time Stamp - needed for XRC Support */ |
217 | if (private->rdc_data.facilities.XRC_supported) { | 183 | data->ga_extended |= 0x08; /* switch on 'Time Stamp Valid' */ |
218 | 184 | data->ga_extended |= 0x02; /* switch on 'Extended Parameter' */ | |
219 | data->ga_extended |= 0x08; /* switch on 'Time Stamp Valid' */ | ||
220 | data->ga_extended |= 0x02; /* switch on 'Extended Parameter' */ | ||
221 | |||
222 | data->ep_sys_time = get_clock (); | ||
223 | |||
224 | de_ccw->count = sizeof (struct DE_eckd_data); | ||
225 | de_ccw->flags |= CCW_FLAG_SLI; | ||
226 | } | ||
227 | 185 | ||
228 | return; | 186 | rc = get_sync_clock(&data->ep_sys_time); |
187 | /* Ignore return code if sync clock is switched off. */ | ||
188 | if (rc == -ENOSYS || rc == -EACCES) | ||
189 | rc = 0; | ||
229 | 190 | ||
230 | } /* end check_XRC */ | 191 | de_ccw->count = sizeof (struct DE_eckd_data); |
192 | de_ccw->flags |= CCW_FLAG_SLI; | ||
193 | return rc; | ||
194 | } | ||
231 | 195 | ||
232 | static inline void | 196 | static int |
233 | define_extent(struct ccw1 * ccw, struct DE_eckd_data * data, int trk, | 197 | define_extent(struct ccw1 * ccw, struct DE_eckd_data * data, int trk, |
234 | int totrk, int cmd, struct dasd_device * device) | 198 | int totrk, int cmd, struct dasd_device * device) |
235 | { | 199 | { |
236 | struct dasd_eckd_private *private; | 200 | struct dasd_eckd_private *private; |
237 | struct ch_t geo, beg, end; | 201 | struct ch_t geo, beg, end; |
202 | int rc = 0; | ||
238 | 203 | ||
239 | private = (struct dasd_eckd_private *) device->private; | 204 | private = (struct dasd_eckd_private *) device->private; |
240 | 205 | ||
@@ -263,12 +228,12 @@ define_extent(struct ccw1 * ccw, struct DE_eckd_data * data, int trk, | |||
263 | case DASD_ECKD_CCW_WRITE_KD_MT: | 228 | case DASD_ECKD_CCW_WRITE_KD_MT: |
264 | data->mask.perm = 0x02; | 229 | data->mask.perm = 0x02; |
265 | data->attributes.operation = private->attrib.operation; | 230 | data->attributes.operation = private->attrib.operation; |
266 | check_XRC (ccw, data, device); | 231 | rc = check_XRC (ccw, data, device); |
267 | break; | 232 | break; |
268 | case DASD_ECKD_CCW_WRITE_CKD: | 233 | case DASD_ECKD_CCW_WRITE_CKD: |
269 | case DASD_ECKD_CCW_WRITE_CKD_MT: | 234 | case DASD_ECKD_CCW_WRITE_CKD_MT: |
270 | data->attributes.operation = DASD_BYPASS_CACHE; | 235 | data->attributes.operation = DASD_BYPASS_CACHE; |
271 | check_XRC (ccw, data, device); | 236 | rc = check_XRC (ccw, data, device); |
272 | break; | 237 | break; |
273 | case DASD_ECKD_CCW_ERASE: | 238 | case DASD_ECKD_CCW_ERASE: |
274 | case DASD_ECKD_CCW_WRITE_HOME_ADDRESS: | 239 | case DASD_ECKD_CCW_WRITE_HOME_ADDRESS: |
@@ -276,7 +241,7 @@ define_extent(struct ccw1 * ccw, struct DE_eckd_data * data, int trk, | |||
276 | data->mask.perm = 0x3; | 241 | data->mask.perm = 0x3; |
277 | data->mask.auth = 0x1; | 242 | data->mask.auth = 0x1; |
278 | data->attributes.operation = DASD_BYPASS_CACHE; | 243 | data->attributes.operation = DASD_BYPASS_CACHE; |
279 | check_XRC (ccw, data, device); | 244 | rc = check_XRC (ccw, data, device); |
280 | break; | 245 | break; |
281 | default: | 246 | default: |
282 | DEV_MESSAGE(KERN_ERR, device, "unknown opcode 0x%x", cmd); | 247 | DEV_MESSAGE(KERN_ERR, device, "unknown opcode 0x%x", cmd); |
@@ -312,9 +277,10 @@ define_extent(struct ccw1 * ccw, struct DE_eckd_data * data, int trk, | |||
312 | data->beg_ext.head = beg.head; | 277 | data->beg_ext.head = beg.head; |
313 | data->end_ext.cyl = end.cyl; | 278 | data->end_ext.cyl = end.cyl; |
314 | data->end_ext.head = end.head; | 279 | data->end_ext.head = end.head; |
280 | return rc; | ||
315 | } | 281 | } |
316 | 282 | ||
317 | static inline void | 283 | static void |
318 | locate_record(struct ccw1 *ccw, struct LO_eckd_data *data, int trk, | 284 | locate_record(struct ccw1 *ccw, struct LO_eckd_data *data, int trk, |
319 | int rec_on_trk, int no_rec, int cmd, | 285 | int rec_on_trk, int no_rec, int cmd, |
320 | struct dasd_device * device, int reclen) | 286 | struct dasd_device * device, int reclen) |
@@ -548,7 +514,7 @@ dasd_eckd_read_conf(struct dasd_device *device) | |||
548 | /* | 514 | /* |
549 | * Build CP for Perform Subsystem Function - SSC. | 515 | * Build CP for Perform Subsystem Function - SSC. |
550 | */ | 516 | */ |
551 | struct dasd_ccw_req * | 517 | static struct dasd_ccw_req * |
552 | dasd_eckd_build_psf_ssc(struct dasd_device *device) | 518 | dasd_eckd_build_psf_ssc(struct dasd_device *device) |
553 | { | 519 | { |
554 | struct dasd_ccw_req *cqr; | 520 | struct dasd_ccw_req *cqr; |
@@ -1200,7 +1166,12 @@ dasd_eckd_build_cp(struct dasd_device * device, struct request *req) | |||
1200 | return cqr; | 1166 | return cqr; |
1201 | ccw = cqr->cpaddr; | 1167 | ccw = cqr->cpaddr; |
1202 | /* First ccw is define extent. */ | 1168 | /* First ccw is define extent. */ |
1203 | define_extent(ccw++, cqr->data, first_trk, last_trk, cmd, device); | 1169 | if (define_extent(ccw++, cqr->data, first_trk, |
1170 | last_trk, cmd, device) == -EAGAIN) { | ||
1171 | /* Clock not in sync and XRC is enabled. Try again later. */ | ||
1172 | dasd_sfree_request(cqr, device); | ||
1173 | return ERR_PTR(-EAGAIN); | ||
1174 | } | ||
1204 | /* Build locate_record+read/write/ccws. */ | 1175 | /* Build locate_record+read/write/ccws. */ |
1205 | idaws = (unsigned long *) (cqr->data + sizeof(struct DE_eckd_data)); | 1176 | idaws = (unsigned long *) (cqr->data + sizeof(struct DE_eckd_data)); |
1206 | LO_data = (struct LO_eckd_data *) (idaws + cidaw); | 1177 | LO_data = (struct LO_eckd_data *) (idaws + cidaw); |
@@ -1380,7 +1351,7 @@ dasd_eckd_release(struct dasd_device *device) | |||
1380 | cqr->device = device; | 1351 | cqr->device = device; |
1381 | clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); | 1352 | clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); |
1382 | set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); | 1353 | set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); |
1383 | cqr->retries = 0; | 1354 | cqr->retries = 2; /* set retry counter to enable basic ERP */ |
1384 | cqr->expires = 2 * HZ; | 1355 | cqr->expires = 2 * HZ; |
1385 | cqr->buildclk = get_clock(); | 1356 | cqr->buildclk = get_clock(); |
1386 | cqr->status = DASD_CQR_FILLED; | 1357 | cqr->status = DASD_CQR_FILLED; |
@@ -1420,7 +1391,7 @@ dasd_eckd_reserve(struct dasd_device *device) | |||
1420 | cqr->device = device; | 1391 | cqr->device = device; |
1421 | clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); | 1392 | clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); |
1422 | set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); | 1393 | set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); |
1423 | cqr->retries = 0; | 1394 | cqr->retries = 2; /* set retry counter to enable basic ERP */ |
1424 | cqr->expires = 2 * HZ; | 1395 | cqr->expires = 2 * HZ; |
1425 | cqr->buildclk = get_clock(); | 1396 | cqr->buildclk = get_clock(); |
1426 | cqr->status = DASD_CQR_FILLED; | 1397 | cqr->status = DASD_CQR_FILLED; |
@@ -1459,7 +1430,7 @@ dasd_eckd_steal_lock(struct dasd_device *device) | |||
1459 | cqr->device = device; | 1430 | cqr->device = device; |
1460 | clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); | 1431 | clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); |
1461 | set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); | 1432 | set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); |
1462 | cqr->retries = 0; | 1433 | cqr->retries = 2; /* set retry counter to enable basic ERP */ |
1463 | cqr->expires = 2 * HZ; | 1434 | cqr->expires = 2 * HZ; |
1464 | cqr->buildclk = get_clock(); | 1435 | cqr->buildclk = get_clock(); |
1465 | cqr->status = DASD_CQR_FILLED; | 1436 | cqr->status = DASD_CQR_FILLED; |
@@ -1609,7 +1580,7 @@ dasd_eckd_ioctl(struct dasd_device *device, unsigned int cmd, void __user *argp) | |||
1609 | * Dump the range of CCWs into 'page' buffer | 1580 | * Dump the range of CCWs into 'page' buffer |
1610 | * and return number of printed chars. | 1581 | * and return number of printed chars. |
1611 | */ | 1582 | */ |
1612 | static inline int | 1583 | static int |
1613 | dasd_eckd_dump_ccw_range(struct ccw1 *from, struct ccw1 *to, char *page) | 1584 | dasd_eckd_dump_ccw_range(struct ccw1 *from, struct ccw1 *to, char *page) |
1614 | { | 1585 | { |
1615 | int len, count; | 1586 | int len, count; |
diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c index e0bf30ebb215..6cedc914077e 100644 --- a/drivers/s390/block/dasd_eer.c +++ b/drivers/s390/block/dasd_eer.c | |||
@@ -658,18 +658,24 @@ static struct file_operations dasd_eer_fops = { | |||
658 | .owner = THIS_MODULE, | 658 | .owner = THIS_MODULE, |
659 | }; | 659 | }; |
660 | 660 | ||
661 | static struct miscdevice dasd_eer_dev = { | 661 | static struct miscdevice *dasd_eer_dev = NULL; |
662 | .minor = MISC_DYNAMIC_MINOR, | ||
663 | .name = "dasd_eer", | ||
664 | .fops = &dasd_eer_fops, | ||
665 | }; | ||
666 | 662 | ||
667 | int __init dasd_eer_init(void) | 663 | int __init dasd_eer_init(void) |
668 | { | 664 | { |
669 | int rc; | 665 | int rc; |
670 | 666 | ||
671 | rc = misc_register(&dasd_eer_dev); | 667 | dasd_eer_dev = kzalloc(sizeof(*dasd_eer_dev), GFP_KERNEL); |
668 | if (!dasd_eer_dev) | ||
669 | return -ENOMEM; | ||
670 | |||
671 | dasd_eer_dev->minor = MISC_DYNAMIC_MINOR; | ||
672 | dasd_eer_dev->name = "dasd_eer"; | ||
673 | dasd_eer_dev->fops = &dasd_eer_fops; | ||
674 | |||
675 | rc = misc_register(dasd_eer_dev); | ||
672 | if (rc) { | 676 | if (rc) { |
677 | kfree(dasd_eer_dev); | ||
678 | dasd_eer_dev = NULL; | ||
673 | MESSAGE(KERN_ERR, "%s", "dasd_eer_init could not " | 679 | MESSAGE(KERN_ERR, "%s", "dasd_eer_init could not " |
674 | "register misc device"); | 680 | "register misc device"); |
675 | return rc; | 681 | return rc; |
@@ -680,5 +686,9 @@ int __init dasd_eer_init(void) | |||
680 | 686 | ||
681 | void dasd_eer_exit(void) | 687 | void dasd_eer_exit(void) |
682 | { | 688 | { |
683 | WARN_ON(misc_deregister(&dasd_eer_dev) != 0); | 689 | if (dasd_eer_dev) { |
690 | WARN_ON(misc_deregister(dasd_eer_dev) != 0); | ||
691 | kfree(dasd_eer_dev); | ||
692 | dasd_eer_dev = NULL; | ||
693 | } | ||
684 | } | 694 | } |
diff --git a/drivers/s390/block/dasd_erp.c b/drivers/s390/block/dasd_erp.c index 58a65097922b..caa5d91420f8 100644 --- a/drivers/s390/block/dasd_erp.c +++ b/drivers/s390/block/dasd_erp.c | |||
@@ -152,25 +152,6 @@ dasd_default_erp_postaction(struct dasd_ccw_req * cqr) | |||
152 | 152 | ||
153 | } /* end default_erp_postaction */ | 153 | } /* end default_erp_postaction */ |
154 | 154 | ||
155 | /* | ||
156 | * Print the hex dump of the memory used by a request. This includes | ||
157 | * all error recovery ccws that have been chained in from of the | ||
158 | * real request. | ||
159 | */ | ||
160 | static inline void | ||
161 | hex_dump_memory(struct dasd_device *device, void *data, int len) | ||
162 | { | ||
163 | int *pint; | ||
164 | |||
165 | pint = (int *) data; | ||
166 | while (len > 0) { | ||
167 | DEV_MESSAGE(KERN_ERR, device, "%p: %08x %08x %08x %08x", | ||
168 | pint, pint[0], pint[1], pint[2], pint[3]); | ||
169 | pint += 4; | ||
170 | len -= 16; | ||
171 | } | ||
172 | } | ||
173 | |||
174 | void | 155 | void |
175 | dasd_log_sense(struct dasd_ccw_req *cqr, struct irb *irb) | 156 | dasd_log_sense(struct dasd_ccw_req *cqr, struct irb *irb) |
176 | { | 157 | { |
@@ -182,69 +163,8 @@ dasd_log_sense(struct dasd_ccw_req *cqr, struct irb *irb) | |||
182 | device->discipline->dump_sense(device, cqr, irb); | 163 | device->discipline->dump_sense(device, cqr, irb); |
183 | } | 164 | } |
184 | 165 | ||
185 | void | ||
186 | dasd_log_ccw(struct dasd_ccw_req * cqr, int caller, __u32 cpa) | ||
187 | { | ||
188 | struct dasd_device *device; | ||
189 | struct dasd_ccw_req *lcqr; | ||
190 | struct ccw1 *ccw; | ||
191 | int cplength; | ||
192 | |||
193 | device = cqr->device; | ||
194 | /* log the channel program */ | ||
195 | for (lcqr = cqr; lcqr != NULL; lcqr = lcqr->refers) { | ||
196 | DEV_MESSAGE(KERN_ERR, device, | ||
197 | "(%s) ERP chain report for req: %p", | ||
198 | caller == 0 ? "EXAMINE" : "ACTION", lcqr); | ||
199 | hex_dump_memory(device, lcqr, sizeof(struct dasd_ccw_req)); | ||
200 | |||
201 | cplength = 1; | ||
202 | ccw = lcqr->cpaddr; | ||
203 | while (ccw++->flags & (CCW_FLAG_DC | CCW_FLAG_CC)) | ||
204 | cplength++; | ||
205 | |||
206 | if (cplength > 40) { /* log only parts of the CP */ | ||
207 | DEV_MESSAGE(KERN_ERR, device, "%s", | ||
208 | "Start of channel program:"); | ||
209 | hex_dump_memory(device, lcqr->cpaddr, | ||
210 | 40*sizeof(struct ccw1)); | ||
211 | |||
212 | DEV_MESSAGE(KERN_ERR, device, "%s", | ||
213 | "End of channel program:"); | ||
214 | hex_dump_memory(device, lcqr->cpaddr + cplength - 10, | ||
215 | 10*sizeof(struct ccw1)); | ||
216 | } else { /* log the whole CP */ | ||
217 | DEV_MESSAGE(KERN_ERR, device, "%s", | ||
218 | "Channel program (complete):"); | ||
219 | hex_dump_memory(device, lcqr->cpaddr, | ||
220 | cplength*sizeof(struct ccw1)); | ||
221 | } | ||
222 | |||
223 | if (lcqr != cqr) | ||
224 | continue; | ||
225 | |||
226 | /* | ||
227 | * Log bytes arround failed CCW but only if we did | ||
228 | * not log the whole CP of the CCW is outside the | ||
229 | * logged CP. | ||
230 | */ | ||
231 | if (cplength > 40 || | ||
232 | ((addr_t) cpa < (addr_t) lcqr->cpaddr && | ||
233 | (addr_t) cpa > (addr_t) (lcqr->cpaddr + cplength + 4))) { | ||
234 | |||
235 | DEV_MESSAGE(KERN_ERR, device, | ||
236 | "Failed CCW (%p) (area):", | ||
237 | (void *) (long) cpa); | ||
238 | hex_dump_memory(device, cqr->cpaddr - 10, | ||
239 | 20*sizeof(struct ccw1)); | ||
240 | } | ||
241 | } | ||
242 | |||
243 | } /* end log_erp_chain */ | ||
244 | |||
245 | EXPORT_SYMBOL(dasd_default_erp_action); | 166 | EXPORT_SYMBOL(dasd_default_erp_action); |
246 | EXPORT_SYMBOL(dasd_default_erp_postaction); | 167 | EXPORT_SYMBOL(dasd_default_erp_postaction); |
247 | EXPORT_SYMBOL(dasd_alloc_erp_request); | 168 | EXPORT_SYMBOL(dasd_alloc_erp_request); |
248 | EXPORT_SYMBOL(dasd_free_erp_request); | 169 | EXPORT_SYMBOL(dasd_free_erp_request); |
249 | EXPORT_SYMBOL(dasd_log_sense); | 170 | EXPORT_SYMBOL(dasd_log_sense); |
250 | EXPORT_SYMBOL(dasd_log_ccw); | ||
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c index b857fd5893fd..be0909e39226 100644 --- a/drivers/s390/block/dasd_fba.c +++ b/drivers/s390/block/dasd_fba.c | |||
@@ -75,7 +75,7 @@ static struct ccw_driver dasd_fba_driver = { | |||
75 | .notify = dasd_generic_notify, | 75 | .notify = dasd_generic_notify, |
76 | }; | 76 | }; |
77 | 77 | ||
78 | static inline void | 78 | static void |
79 | define_extent(struct ccw1 * ccw, struct DE_fba_data *data, int rw, | 79 | define_extent(struct ccw1 * ccw, struct DE_fba_data *data, int rw, |
80 | int blksize, int beg, int nr) | 80 | int blksize, int beg, int nr) |
81 | { | 81 | { |
@@ -95,7 +95,7 @@ define_extent(struct ccw1 * ccw, struct DE_fba_data *data, int rw, | |||
95 | data->ext_end = nr - 1; | 95 | data->ext_end = nr - 1; |
96 | } | 96 | } |
97 | 97 | ||
98 | static inline void | 98 | static void |
99 | locate_record(struct ccw1 * ccw, struct LO_fba_data *data, int rw, | 99 | locate_record(struct ccw1 * ccw, struct LO_fba_data *data, int rw, |
100 | int block_nr, int block_ct) | 100 | int block_nr, int block_ct) |
101 | { | 101 | { |
diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c index d163632101d2..47ba4462708d 100644 --- a/drivers/s390/block/dasd_genhd.c +++ b/drivers/s390/block/dasd_genhd.c | |||
@@ -147,7 +147,7 @@ dasd_destroy_partitions(struct dasd_device * device) | |||
147 | */ | 147 | */ |
148 | memset(&bpart, 0, sizeof(struct blkpg_partition)); | 148 | memset(&bpart, 0, sizeof(struct blkpg_partition)); |
149 | memset(&barg, 0, sizeof(struct blkpg_ioctl_arg)); | 149 | memset(&barg, 0, sizeof(struct blkpg_ioctl_arg)); |
150 | barg.data = (void __user *) &bpart; | 150 | barg.data = (void __force __user *) &bpart; |
151 | barg.op = BLKPG_DEL_PARTITION; | 151 | barg.op = BLKPG_DEL_PARTITION; |
152 | for (bpart.pno = device->gdp->minors - 1; bpart.pno > 0; bpart.pno--) | 152 | for (bpart.pno = device->gdp->minors - 1; bpart.pno > 0; bpart.pno--) |
153 | ioctl_by_bdev(bdev, BLKPG, (unsigned long) &barg); | 153 | ioctl_by_bdev(bdev, BLKPG, (unsigned long) &barg); |
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h index fb725e3b08fe..a2cc69e11410 100644 --- a/drivers/s390/block/dasd_int.h +++ b/drivers/s390/block/dasd_int.h | |||
@@ -559,7 +559,6 @@ struct dasd_ccw_req *dasd_alloc_erp_request(char *, int, int, | |||
559 | struct dasd_device *); | 559 | struct dasd_device *); |
560 | void dasd_free_erp_request(struct dasd_ccw_req *, struct dasd_device *); | 560 | void dasd_free_erp_request(struct dasd_ccw_req *, struct dasd_device *); |
561 | void dasd_log_sense(struct dasd_ccw_req *, struct irb *); | 561 | void dasd_log_sense(struct dasd_ccw_req *, struct irb *); |
562 | void dasd_log_ccw(struct dasd_ccw_req *, int, __u32); | ||
563 | 562 | ||
564 | /* externals in dasd_3370_erp.c */ | 563 | /* externals in dasd_3370_erp.c */ |
565 | dasd_era_t dasd_3370_erp_examine(struct dasd_ccw_req *, struct irb *); | 564 | dasd_era_t dasd_3370_erp_examine(struct dasd_ccw_req *, struct irb *); |
diff --git a/drivers/s390/block/dasd_proc.c b/drivers/s390/block/dasd_proc.c index bfa010f6dab2..8b7e11815d70 100644 --- a/drivers/s390/block/dasd_proc.c +++ b/drivers/s390/block/dasd_proc.c | |||
@@ -28,7 +28,7 @@ static struct proc_dir_entry *dasd_proc_root_entry = NULL; | |||
28 | static struct proc_dir_entry *dasd_devices_entry = NULL; | 28 | static struct proc_dir_entry *dasd_devices_entry = NULL; |
29 | static struct proc_dir_entry *dasd_statistics_entry = NULL; | 29 | static struct proc_dir_entry *dasd_statistics_entry = NULL; |
30 | 30 | ||
31 | static inline char * | 31 | static char * |
32 | dasd_get_user_string(const char __user *user_buf, size_t user_len) | 32 | dasd_get_user_string(const char __user *user_buf, size_t user_len) |
33 | { | 33 | { |
34 | char *buffer; | 34 | char *buffer; |
@@ -154,7 +154,7 @@ static struct file_operations dasd_devices_file_ops = { | |||
154 | .release = seq_release, | 154 | .release = seq_release, |
155 | }; | 155 | }; |
156 | 156 | ||
157 | static inline int | 157 | static int |
158 | dasd_calc_metrics(char *page, char **start, off_t off, | 158 | dasd_calc_metrics(char *page, char **start, off_t off, |
159 | int count, int *eof, int len) | 159 | int count, int *eof, int len) |
160 | { | 160 | { |
@@ -167,8 +167,8 @@ dasd_calc_metrics(char *page, char **start, off_t off, | |||
167 | return len; | 167 | return len; |
168 | } | 168 | } |
169 | 169 | ||
170 | static inline char * | 170 | static char * |
171 | dasd_statistics_array(char *str, int *array, int shift) | 171 | dasd_statistics_array(char *str, unsigned int *array, int shift) |
172 | { | 172 | { |
173 | int i; | 173 | int i; |
174 | 174 | ||
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c index be9b05347b4f..1340451ea408 100644 --- a/drivers/s390/block/dcssblk.c +++ b/drivers/s390/block/dcssblk.c | |||
@@ -102,7 +102,7 @@ dcssblk_release_segment(struct device *dev) | |||
102 | * device needs to be enqueued before the semaphore is | 102 | * device needs to be enqueued before the semaphore is |
103 | * freed. | 103 | * freed. |
104 | */ | 104 | */ |
105 | static inline int | 105 | static int |
106 | dcssblk_assign_free_minor(struct dcssblk_dev_info *dev_info) | 106 | dcssblk_assign_free_minor(struct dcssblk_dev_info *dev_info) |
107 | { | 107 | { |
108 | int minor, found; | 108 | int minor, found; |
@@ -230,7 +230,7 @@ dcssblk_shared_store(struct device *dev, struct device_attribute *attr, const ch | |||
230 | SEGMENT_SHARED); | 230 | SEGMENT_SHARED); |
231 | if (rc < 0) { | 231 | if (rc < 0) { |
232 | BUG_ON(rc == -EINVAL); | 232 | BUG_ON(rc == -EINVAL); |
233 | if (rc == -EIO || rc == -ENOENT) | 233 | if (rc != -EAGAIN) |
234 | goto removeseg; | 234 | goto removeseg; |
235 | } else { | 235 | } else { |
236 | dev_info->is_shared = 1; | 236 | dev_info->is_shared = 1; |
@@ -253,7 +253,7 @@ dcssblk_shared_store(struct device *dev, struct device_attribute *attr, const ch | |||
253 | SEGMENT_EXCLUSIVE); | 253 | SEGMENT_EXCLUSIVE); |
254 | if (rc < 0) { | 254 | if (rc < 0) { |
255 | BUG_ON(rc == -EINVAL); | 255 | BUG_ON(rc == -EINVAL); |
256 | if (rc == -EIO || rc == -ENOENT) | 256 | if (rc != -EAGAIN) |
257 | goto removeseg; | 257 | goto removeseg; |
258 | } else { | 258 | } else { |
259 | dev_info->is_shared = 0; | 259 | dev_info->is_shared = 0; |
diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile index c3e97b4fc186..293e667b50f2 100644 --- a/drivers/s390/char/Makefile +++ b/drivers/s390/char/Makefile | |||
@@ -2,7 +2,8 @@ | |||
2 | # S/390 character devices | 2 | # S/390 character devices |
3 | # | 3 | # |
4 | 4 | ||
5 | obj-y += ctrlchar.o keyboard.o defkeymap.o | 5 | obj-y += ctrlchar.o keyboard.o defkeymap.o sclp.o sclp_rw.o sclp_quiesce.o \ |
6 | sclp_info.o | ||
6 | 7 | ||
7 | obj-$(CONFIG_TN3270) += raw3270.o | 8 | obj-$(CONFIG_TN3270) += raw3270.o |
8 | obj-$(CONFIG_TN3270_CONSOLE) += con3270.o | 9 | obj-$(CONFIG_TN3270_CONSOLE) += con3270.o |
@@ -11,7 +12,6 @@ obj-$(CONFIG_TN3270_FS) += fs3270.o | |||
11 | 12 | ||
12 | obj-$(CONFIG_TN3215) += con3215.o | 13 | obj-$(CONFIG_TN3215) += con3215.o |
13 | 14 | ||
14 | obj-$(CONFIG_SCLP) += sclp.o sclp_rw.o sclp_quiesce.o | ||
15 | obj-$(CONFIG_SCLP_TTY) += sclp_tty.o | 15 | obj-$(CONFIG_SCLP_TTY) += sclp_tty.o |
16 | obj-$(CONFIG_SCLP_CONSOLE) += sclp_con.o | 16 | obj-$(CONFIG_SCLP_CONSOLE) += sclp_con.o |
17 | obj-$(CONFIG_SCLP_VT220_TTY) += sclp_vt220.o | 17 | obj-$(CONFIG_SCLP_VT220_TTY) += sclp_vt220.o |
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c index 25b5d7a66417..9a328f14a641 100644 --- a/drivers/s390/char/con3215.c +++ b/drivers/s390/char/con3215.c | |||
@@ -1121,7 +1121,7 @@ static const struct tty_operations tty3215_ops = { | |||
1121 | * 3215 tty registration code called from tty_init(). | 1121 | * 3215 tty registration code called from tty_init(). |
1122 | * Most kernel services (incl. kmalloc) are available at this poimt. | 1122 | * Most kernel services (incl. kmalloc) are available at this poimt. |
1123 | */ | 1123 | */ |
1124 | int __init | 1124 | static int __init |
1125 | tty3215_init(void) | 1125 | tty3215_init(void) |
1126 | { | 1126 | { |
1127 | struct tty_driver *driver; | 1127 | struct tty_driver *driver; |
diff --git a/drivers/s390/char/con3270.c b/drivers/s390/char/con3270.c index 7566be890688..8e7f2d7633d6 100644 --- a/drivers/s390/char/con3270.c +++ b/drivers/s390/char/con3270.c | |||
@@ -69,8 +69,7 @@ static void con3270_update(struct con3270 *); | |||
69 | /* | 69 | /* |
70 | * Setup timeout for a device. On timeout trigger an update. | 70 | * Setup timeout for a device. On timeout trigger an update. |
71 | */ | 71 | */ |
72 | void | 72 | static void con3270_set_timer(struct con3270 *cp, int expires) |
73 | con3270_set_timer(struct con3270 *cp, int expires) | ||
74 | { | 73 | { |
75 | if (expires == 0) { | 74 | if (expires == 0) { |
76 | if (timer_pending(&cp->timer)) | 75 | if (timer_pending(&cp->timer)) |
diff --git a/drivers/s390/char/defkeymap.c b/drivers/s390/char/defkeymap.c index 17027d918cf7..564baca01b7c 100644 --- a/drivers/s390/char/defkeymap.c +++ b/drivers/s390/char/defkeymap.c | |||
@@ -5,6 +5,8 @@ | |||
5 | #include <linux/types.h> | 5 | #include <linux/types.h> |
6 | #include <linux/keyboard.h> | 6 | #include <linux/keyboard.h> |
7 | #include <linux/kd.h> | 7 | #include <linux/kd.h> |
8 | #include <linux/kbd_kern.h> | ||
9 | #include <linux/kbd_diacr.h> | ||
8 | 10 | ||
9 | u_short plain_map[NR_KEYS] = { | 11 | u_short plain_map[NR_KEYS] = { |
10 | 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, | 12 | 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, |
diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c index 0893d306ae80..e1a746269c4c 100644 --- a/drivers/s390/char/fs3270.c +++ b/drivers/s390/char/fs3270.c | |||
@@ -23,7 +23,7 @@ | |||
23 | #include "raw3270.h" | 23 | #include "raw3270.h" |
24 | #include "ctrlchar.h" | 24 | #include "ctrlchar.h" |
25 | 25 | ||
26 | struct raw3270_fn fs3270_fn; | 26 | static struct raw3270_fn fs3270_fn; |
27 | 27 | ||
28 | struct fs3270 { | 28 | struct fs3270 { |
29 | struct raw3270_view view; | 29 | struct raw3270_view view; |
@@ -401,7 +401,7 @@ fs3270_release(struct raw3270_view *view) | |||
401 | } | 401 | } |
402 | 402 | ||
403 | /* View to a 3270 device. Can be console, tty or fullscreen. */ | 403 | /* View to a 3270 device. Can be console, tty or fullscreen. */ |
404 | struct raw3270_fn fs3270_fn = { | 404 | static struct raw3270_fn fs3270_fn = { |
405 | .activate = fs3270_activate, | 405 | .activate = fs3270_activate, |
406 | .deactivate = fs3270_deactivate, | 406 | .deactivate = fs3270_deactivate, |
407 | .intv = (void *) fs3270_irq, | 407 | .intv = (void *) fs3270_irq, |
diff --git a/drivers/s390/char/keyboard.c b/drivers/s390/char/keyboard.c index 3e86fd1756e5..f62f9a4e8950 100644 --- a/drivers/s390/char/keyboard.c +++ b/drivers/s390/char/keyboard.c | |||
@@ -148,6 +148,7 @@ kbd_ascebc(struct kbd_data *kbd, unsigned char *ascebc) | |||
148 | } | 148 | } |
149 | } | 149 | } |
150 | 150 | ||
151 | #if 0 | ||
151 | /* | 152 | /* |
152 | * Generate ebcdic -> ascii translation table from kbd_data. | 153 | * Generate ebcdic -> ascii translation table from kbd_data. |
153 | */ | 154 | */ |
@@ -173,6 +174,7 @@ kbd_ebcasc(struct kbd_data *kbd, unsigned char *ebcasc) | |||
173 | } | 174 | } |
174 | } | 175 | } |
175 | } | 176 | } |
177 | #endif | ||
176 | 178 | ||
177 | /* | 179 | /* |
178 | * We have a combining character DIACR here, followed by the character CH. | 180 | * We have a combining character DIACR here, followed by the character CH. |
diff --git a/drivers/s390/char/monreader.c b/drivers/s390/char/monreader.c index a138b1510093..3a1a958fb5f2 100644 --- a/drivers/s390/char/monreader.c +++ b/drivers/s390/char/monreader.c | |||
@@ -3,7 +3,7 @@ | |||
3 | * | 3 | * |
4 | * Character device driver for reading z/VM *MONITOR service records. | 4 | * Character device driver for reading z/VM *MONITOR service records. |
5 | * | 5 | * |
6 | * Copyright (C) 2004 IBM Corporation, IBM Deutschland Entwicklung GmbH. | 6 | * Copyright 2004 IBM Corporation, IBM Deutschland Entwicklung GmbH. |
7 | * | 7 | * |
8 | * Author: Gerald Schaefer <geraldsc@de.ibm.com> | 8 | * Author: Gerald Schaefer <geraldsc@de.ibm.com> |
9 | */ | 9 | */ |
@@ -22,7 +22,7 @@ | |||
22 | #include <asm/ebcdic.h> | 22 | #include <asm/ebcdic.h> |
23 | #include <asm/extmem.h> | 23 | #include <asm/extmem.h> |
24 | #include <linux/poll.h> | 24 | #include <linux/poll.h> |
25 | #include "../net/iucv.h" | 25 | #include <net/iucv/iucv.h> |
26 | 26 | ||
27 | 27 | ||
28 | //#define MON_DEBUG /* Debug messages on/off */ | 28 | //#define MON_DEBUG /* Debug messages on/off */ |
@@ -50,14 +50,13 @@ static char mon_dcss_name[9] = "MONDCSS\0"; | |||
50 | struct mon_msg { | 50 | struct mon_msg { |
51 | u32 pos; | 51 | u32 pos; |
52 | u32 mca_offset; | 52 | u32 mca_offset; |
53 | iucv_MessagePending local_eib; | 53 | struct iucv_message msg; |
54 | char msglim_reached; | 54 | char msglim_reached; |
55 | char replied_msglim; | 55 | char replied_msglim; |
56 | }; | 56 | }; |
57 | 57 | ||
58 | struct mon_private { | 58 | struct mon_private { |
59 | u16 pathid; | 59 | struct iucv_path *path; |
60 | iucv_handle_t iucv_handle; | ||
61 | struct mon_msg *msg_array[MON_MSGLIM]; | 60 | struct mon_msg *msg_array[MON_MSGLIM]; |
62 | unsigned int write_index; | 61 | unsigned int write_index; |
63 | unsigned int read_index; | 62 | unsigned int read_index; |
@@ -75,8 +74,6 @@ static unsigned long mon_dcss_end; | |||
75 | static DECLARE_WAIT_QUEUE_HEAD(mon_read_wait_queue); | 74 | static DECLARE_WAIT_QUEUE_HEAD(mon_read_wait_queue); |
76 | static DECLARE_WAIT_QUEUE_HEAD(mon_conn_wait_queue); | 75 | static DECLARE_WAIT_QUEUE_HEAD(mon_conn_wait_queue); |
77 | 76 | ||
78 | static u8 iucv_host[8] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; | ||
79 | |||
80 | static u8 user_data_connect[16] = { | 77 | static u8 user_data_connect[16] = { |
81 | /* Version code, must be 0x01 for shared mode */ | 78 | /* Version code, must be 0x01 for shared mode */ |
82 | 0x01, | 79 | 0x01, |
@@ -100,8 +97,7 @@ static u8 user_data_sever[16] = { | |||
100 | * Create the 8 bytes EBCDIC DCSS segment name from | 97 | * Create the 8 bytes EBCDIC DCSS segment name from |
101 | * an ASCII name, incl. padding | 98 | * an ASCII name, incl. padding |
102 | */ | 99 | */ |
103 | static inline void | 100 | static inline void dcss_mkname(char *ascii_name, char *ebcdic_name) |
104 | dcss_mkname(char *ascii_name, char *ebcdic_name) | ||
105 | { | 101 | { |
106 | int i; | 102 | int i; |
107 | 103 | ||
@@ -119,8 +115,7 @@ dcss_mkname(char *ascii_name, char *ebcdic_name) | |||
119 | * print appropriate error message for segment_load()/segment_type() | 115 | * print appropriate error message for segment_load()/segment_type() |
120 | * return code | 116 | * return code |
121 | */ | 117 | */ |
122 | static void | 118 | static void mon_segment_warn(int rc, char* seg_name) |
123 | mon_segment_warn(int rc, char* seg_name) | ||
124 | { | 119 | { |
125 | switch (rc) { | 120 | switch (rc) { |
126 | case -ENOENT: | 121 | case -ENOENT: |
@@ -166,44 +161,37 @@ mon_segment_warn(int rc, char* seg_name) | |||
166 | } | 161 | } |
167 | } | 162 | } |
168 | 163 | ||
169 | static inline unsigned long | 164 | static inline unsigned long mon_mca_start(struct mon_msg *monmsg) |
170 | mon_mca_start(struct mon_msg *monmsg) | ||
171 | { | 165 | { |
172 | return monmsg->local_eib.ln1msg1.iprmmsg1_u32; | 166 | return *(u32 *) &monmsg->msg.rmmsg; |
173 | } | 167 | } |
174 | 168 | ||
175 | static inline unsigned long | 169 | static inline unsigned long mon_mca_end(struct mon_msg *monmsg) |
176 | mon_mca_end(struct mon_msg *monmsg) | ||
177 | { | 170 | { |
178 | return monmsg->local_eib.ln1msg2.ipbfln1f; | 171 | return *(u32 *) &monmsg->msg.rmmsg[4]; |
179 | } | 172 | } |
180 | 173 | ||
181 | static inline u8 | 174 | static inline u8 mon_mca_type(struct mon_msg *monmsg, u8 index) |
182 | mon_mca_type(struct mon_msg *monmsg, u8 index) | ||
183 | { | 175 | { |
184 | return *((u8 *) mon_mca_start(monmsg) + monmsg->mca_offset + index); | 176 | return *((u8 *) mon_mca_start(monmsg) + monmsg->mca_offset + index); |
185 | } | 177 | } |
186 | 178 | ||
187 | static inline u32 | 179 | static inline u32 mon_mca_size(struct mon_msg *monmsg) |
188 | mon_mca_size(struct mon_msg *monmsg) | ||
189 | { | 180 | { |
190 | return mon_mca_end(monmsg) - mon_mca_start(monmsg) + 1; | 181 | return mon_mca_end(monmsg) - mon_mca_start(monmsg) + 1; |
191 | } | 182 | } |
192 | 183 | ||
193 | static inline u32 | 184 | static inline u32 mon_rec_start(struct mon_msg *monmsg) |
194 | mon_rec_start(struct mon_msg *monmsg) | ||
195 | { | 185 | { |
196 | return *((u32 *) (mon_mca_start(monmsg) + monmsg->mca_offset + 4)); | 186 | return *((u32 *) (mon_mca_start(monmsg) + monmsg->mca_offset + 4)); |
197 | } | 187 | } |
198 | 188 | ||
199 | static inline u32 | 189 | static inline u32 mon_rec_end(struct mon_msg *monmsg) |
200 | mon_rec_end(struct mon_msg *monmsg) | ||
201 | { | 190 | { |
202 | return *((u32 *) (mon_mca_start(monmsg) + monmsg->mca_offset + 8)); | 191 | return *((u32 *) (mon_mca_start(monmsg) + monmsg->mca_offset + 8)); |
203 | } | 192 | } |
204 | 193 | ||
205 | static inline int | 194 | static inline int mon_check_mca(struct mon_msg *monmsg) |
206 | mon_check_mca(struct mon_msg *monmsg) | ||
207 | { | 195 | { |
208 | if ((mon_rec_end(monmsg) <= mon_rec_start(monmsg)) || | 196 | if ((mon_rec_end(monmsg) <= mon_rec_start(monmsg)) || |
209 | (mon_rec_start(monmsg) < mon_dcss_start) || | 197 | (mon_rec_start(monmsg) < mon_dcss_start) || |
@@ -221,20 +209,17 @@ mon_check_mca(struct mon_msg *monmsg) | |||
221 | return 0; | 209 | return 0; |
222 | } | 210 | } |
223 | 211 | ||
224 | static inline int | 212 | static inline int mon_send_reply(struct mon_msg *monmsg, |
225 | mon_send_reply(struct mon_msg *monmsg, struct mon_private *monpriv) | 213 | struct mon_private *monpriv) |
226 | { | 214 | { |
227 | u8 prmmsg[8]; | ||
228 | int rc; | 215 | int rc; |
229 | 216 | ||
230 | P_DEBUG("read, REPLY: pathid = 0x%04X, msgid = 0x%08X, trgcls = " | 217 | P_DEBUG("read, REPLY: pathid = 0x%04X, msgid = 0x%08X, trgcls = " |
231 | "0x%08X\n\n", | 218 | "0x%08X\n\n", |
232 | monmsg->local_eib.ippathid, monmsg->local_eib.ipmsgid, | 219 | monpriv->path->pathid, monmsg->msg.id, monmsg->msg.class); |
233 | monmsg->local_eib.iptrgcls); | 220 | |
234 | rc = iucv_reply_prmmsg(monmsg->local_eib.ippathid, | 221 | rc = iucv_message_reply(monpriv->path, &monmsg->msg, |
235 | monmsg->local_eib.ipmsgid, | 222 | IUCV_IPRMDATA, NULL, 0); |
236 | monmsg->local_eib.iptrgcls, | ||
237 | 0, prmmsg); | ||
238 | atomic_dec(&monpriv->msglim_count); | 223 | atomic_dec(&monpriv->msglim_count); |
239 | if (likely(!monmsg->msglim_reached)) { | 224 | if (likely(!monmsg->msglim_reached)) { |
240 | monmsg->pos = 0; | 225 | monmsg->pos = 0; |
@@ -251,10 +236,19 @@ mon_send_reply(struct mon_msg *monmsg, struct mon_private *monpriv) | |||
251 | return 0; | 236 | return 0; |
252 | } | 237 | } |
253 | 238 | ||
254 | static inline struct mon_private * | 239 | static inline void mon_free_mem(struct mon_private *monpriv) |
255 | mon_alloc_mem(void) | 240 | { |
241 | int i; | ||
242 | |||
243 | for (i = 0; i < MON_MSGLIM; i++) | ||
244 | if (monpriv->msg_array[i]) | ||
245 | kfree(monpriv->msg_array[i]); | ||
246 | kfree(monpriv); | ||
247 | } | ||
248 | |||
249 | static inline struct mon_private *mon_alloc_mem(void) | ||
256 | { | 250 | { |
257 | int i,j; | 251 | int i; |
258 | struct mon_private *monpriv; | 252 | struct mon_private *monpriv; |
259 | 253 | ||
260 | monpriv = kzalloc(sizeof(struct mon_private), GFP_KERNEL); | 254 | monpriv = kzalloc(sizeof(struct mon_private), GFP_KERNEL); |
@@ -267,16 +261,15 @@ mon_alloc_mem(void) | |||
267 | GFP_KERNEL); | 261 | GFP_KERNEL); |
268 | if (!monpriv->msg_array[i]) { | 262 | if (!monpriv->msg_array[i]) { |
269 | P_ERROR("open, no memory for msg_array\n"); | 263 | P_ERROR("open, no memory for msg_array\n"); |
270 | for (j = 0; j < i; j++) | 264 | mon_free_mem(monpriv); |
271 | kfree(monpriv->msg_array[j]); | ||
272 | return NULL; | 265 | return NULL; |
273 | } | 266 | } |
274 | } | 267 | } |
275 | return monpriv; | 268 | return monpriv; |
276 | } | 269 | } |
277 | 270 | ||
278 | static inline void | 271 | static inline void mon_read_debug(struct mon_msg *monmsg, |
279 | mon_read_debug(struct mon_msg *monmsg, struct mon_private *monpriv) | 272 | struct mon_private *monpriv) |
280 | { | 273 | { |
281 | #ifdef MON_DEBUG | 274 | #ifdef MON_DEBUG |
282 | u8 msg_type[2], mca_type; | 275 | u8 msg_type[2], mca_type; |
@@ -284,7 +277,7 @@ mon_read_debug(struct mon_msg *monmsg, struct mon_private *monpriv) | |||
284 | 277 | ||
285 | records_len = mon_rec_end(monmsg) - mon_rec_start(monmsg) + 1; | 278 | records_len = mon_rec_end(monmsg) - mon_rec_start(monmsg) + 1; |
286 | 279 | ||
287 | memcpy(msg_type, &monmsg->local_eib.iptrgcls, 2); | 280 | memcpy(msg_type, &monmsg->msg.class, 2); |
288 | EBCASC(msg_type, 2); | 281 | EBCASC(msg_type, 2); |
289 | mca_type = mon_mca_type(monmsg, 0); | 282 | mca_type = mon_mca_type(monmsg, 0); |
290 | EBCASC(&mca_type, 1); | 283 | EBCASC(&mca_type, 1); |
@@ -292,8 +285,7 @@ mon_read_debug(struct mon_msg *monmsg, struct mon_private *monpriv) | |||
292 | P_DEBUG("read, mon_read_index = %i, mon_write_index = %i\n", | 285 | P_DEBUG("read, mon_read_index = %i, mon_write_index = %i\n", |
293 | monpriv->read_index, monpriv->write_index); | 286 | monpriv->read_index, monpriv->write_index); |
294 | P_DEBUG("read, pathid = 0x%04X, msgid = 0x%08X, trgcls = 0x%08X\n", | 287 | P_DEBUG("read, pathid = 0x%04X, msgid = 0x%08X, trgcls = 0x%08X\n", |
295 | monmsg->local_eib.ippathid, monmsg->local_eib.ipmsgid, | 288 | monpriv->path->pathid, monmsg->msg.id, monmsg->msg.class); |
296 | monmsg->local_eib.iptrgcls); | ||
297 | P_DEBUG("read, msg_type = '%c%c', mca_type = '%c' / 0x%X / 0x%X\n", | 289 | P_DEBUG("read, msg_type = '%c%c', mca_type = '%c' / 0x%X / 0x%X\n", |
298 | msg_type[0], msg_type[1], mca_type ? mca_type : 'X', | 290 | msg_type[0], msg_type[1], mca_type ? mca_type : 'X', |
299 | mon_mca_type(monmsg, 1), mon_mca_type(monmsg, 2)); | 291 | mon_mca_type(monmsg, 1), mon_mca_type(monmsg, 2)); |
@@ -306,8 +298,7 @@ mon_read_debug(struct mon_msg *monmsg, struct mon_private *monpriv) | |||
306 | #endif | 298 | #endif |
307 | } | 299 | } |
308 | 300 | ||
309 | static inline void | 301 | static inline void mon_next_mca(struct mon_msg *monmsg) |
310 | mon_next_mca(struct mon_msg *monmsg) | ||
311 | { | 302 | { |
312 | if (likely((mon_mca_size(monmsg) - monmsg->mca_offset) == 12)) | 303 | if (likely((mon_mca_size(monmsg) - monmsg->mca_offset) == 12)) |
313 | return; | 304 | return; |
@@ -316,8 +307,7 @@ mon_next_mca(struct mon_msg *monmsg) | |||
316 | monmsg->pos = 0; | 307 | monmsg->pos = 0; |
317 | } | 308 | } |
318 | 309 | ||
319 | static inline struct mon_msg * | 310 | static inline struct mon_msg *mon_next_message(struct mon_private *monpriv) |
320 | mon_next_message(struct mon_private *monpriv) | ||
321 | { | 311 | { |
322 | struct mon_msg *monmsg; | 312 | struct mon_msg *monmsg; |
323 | 313 | ||
@@ -342,39 +332,37 @@ mon_next_message(struct mon_private *monpriv) | |||
342 | /****************************************************************************** | 332 | /****************************************************************************** |
343 | * IUCV handler * | 333 | * IUCV handler * |
344 | *****************************************************************************/ | 334 | *****************************************************************************/ |
345 | static void | 335 | static void mon_iucv_path_complete(struct iucv_path *path, u8 ipuser[16]) |
346 | mon_iucv_ConnectionComplete(iucv_ConnectionComplete *eib, void *pgm_data) | ||
347 | { | 336 | { |
348 | struct mon_private *monpriv = (struct mon_private *) pgm_data; | 337 | struct mon_private *monpriv = path->private; |
349 | 338 | ||
350 | P_DEBUG("IUCV connection completed\n"); | 339 | P_DEBUG("IUCV connection completed\n"); |
351 | P_DEBUG("IUCV ACCEPT (from *MONITOR): Version = 0x%02X, Event = " | 340 | P_DEBUG("IUCV ACCEPT (from *MONITOR): Version = 0x%02X, Event = " |
352 | "0x%02X, Sample = 0x%02X\n", | 341 | "0x%02X, Sample = 0x%02X\n", |
353 | eib->ipuser[0], eib->ipuser[1], eib->ipuser[2]); | 342 | ipuser[0], ipuser[1], ipuser[2]); |
354 | atomic_set(&monpriv->iucv_connected, 1); | 343 | atomic_set(&monpriv->iucv_connected, 1); |
355 | wake_up(&mon_conn_wait_queue); | 344 | wake_up(&mon_conn_wait_queue); |
356 | } | 345 | } |
357 | 346 | ||
358 | static void | 347 | static void mon_iucv_path_severed(struct iucv_path *path, u8 ipuser[16]) |
359 | mon_iucv_ConnectionSevered(iucv_ConnectionSevered *eib, void *pgm_data) | ||
360 | { | 348 | { |
361 | struct mon_private *monpriv = (struct mon_private *) pgm_data; | 349 | struct mon_private *monpriv = path->private; |
362 | 350 | ||
363 | P_ERROR("IUCV connection severed with rc = 0x%X\n", | 351 | P_ERROR("IUCV connection severed with rc = 0x%X\n", ipuser[0]); |
364 | (u8) eib->ipuser[0]); | 352 | iucv_path_sever(path, NULL); |
365 | atomic_set(&monpriv->iucv_severed, 1); | 353 | atomic_set(&monpriv->iucv_severed, 1); |
366 | wake_up(&mon_conn_wait_queue); | 354 | wake_up(&mon_conn_wait_queue); |
367 | wake_up_interruptible(&mon_read_wait_queue); | 355 | wake_up_interruptible(&mon_read_wait_queue); |
368 | } | 356 | } |
369 | 357 | ||
370 | static void | 358 | static void mon_iucv_message_pending(struct iucv_path *path, |
371 | mon_iucv_MessagePending(iucv_MessagePending *eib, void *pgm_data) | 359 | struct iucv_message *msg) |
372 | { | 360 | { |
373 | struct mon_private *monpriv = (struct mon_private *) pgm_data; | 361 | struct mon_private *monpriv = path->private; |
374 | 362 | ||
375 | P_DEBUG("IUCV message pending\n"); | 363 | P_DEBUG("IUCV message pending\n"); |
376 | memcpy(&monpriv->msg_array[monpriv->write_index]->local_eib, eib, | 364 | memcpy(&monpriv->msg_array[monpriv->write_index]->msg, |
377 | sizeof(iucv_MessagePending)); | 365 | msg, sizeof(*msg)); |
378 | if (atomic_inc_return(&monpriv->msglim_count) == MON_MSGLIM) { | 366 | if (atomic_inc_return(&monpriv->msglim_count) == MON_MSGLIM) { |
379 | P_WARNING("IUCV message pending, message limit (%i) reached\n", | 367 | P_WARNING("IUCV message pending, message limit (%i) reached\n", |
380 | MON_MSGLIM); | 368 | MON_MSGLIM); |
@@ -385,54 +373,45 @@ mon_iucv_MessagePending(iucv_MessagePending *eib, void *pgm_data) | |||
385 | wake_up_interruptible(&mon_read_wait_queue); | 373 | wake_up_interruptible(&mon_read_wait_queue); |
386 | } | 374 | } |
387 | 375 | ||
388 | static iucv_interrupt_ops_t mon_iucvops = { | 376 | static struct iucv_handler monreader_iucv_handler = { |
389 | .ConnectionComplete = mon_iucv_ConnectionComplete, | 377 | .path_complete = mon_iucv_path_complete, |
390 | .ConnectionSevered = mon_iucv_ConnectionSevered, | 378 | .path_severed = mon_iucv_path_severed, |
391 | .MessagePending = mon_iucv_MessagePending, | 379 | .message_pending = mon_iucv_message_pending, |
392 | }; | 380 | }; |
393 | 381 | ||
394 | /****************************************************************************** | 382 | /****************************************************************************** |
395 | * file operations * | 383 | * file operations * |
396 | *****************************************************************************/ | 384 | *****************************************************************************/ |
397 | static int | 385 | static int mon_open(struct inode *inode, struct file *filp) |
398 | mon_open(struct inode *inode, struct file *filp) | ||
399 | { | 386 | { |
400 | int rc, i; | ||
401 | struct mon_private *monpriv; | 387 | struct mon_private *monpriv; |
388 | int rc; | ||
402 | 389 | ||
403 | /* | 390 | /* |
404 | * only one user allowed | 391 | * only one user allowed |
405 | */ | 392 | */ |
393 | rc = -EBUSY; | ||
406 | if (test_and_set_bit(MON_IN_USE, &mon_in_use)) | 394 | if (test_and_set_bit(MON_IN_USE, &mon_in_use)) |
407 | return -EBUSY; | 395 | goto out; |
408 | 396 | ||
397 | rc = -ENOMEM; | ||
409 | monpriv = mon_alloc_mem(); | 398 | monpriv = mon_alloc_mem(); |
410 | if (!monpriv) | 399 | if (!monpriv) |
411 | return -ENOMEM; | 400 | goto out_use; |
412 | 401 | ||
413 | /* | 402 | /* |
414 | * Register with IUCV and connect to *MONITOR service | 403 | * Connect to *MONITOR service |
415 | */ | 404 | */ |
416 | monpriv->iucv_handle = iucv_register_program("my_monreader ", | 405 | monpriv->path = iucv_path_alloc(MON_MSGLIM, IUCV_IPRMDATA, GFP_KERNEL); |
417 | MON_SERVICE, | 406 | if (!monpriv->path) |
418 | NULL, | 407 | goto out_priv; |
419 | &mon_iucvops, | 408 | rc = iucv_path_connect(monpriv->path, &monreader_iucv_handler, |
420 | monpriv); | 409 | MON_SERVICE, NULL, user_data_connect, monpriv); |
421 | if (!monpriv->iucv_handle) { | ||
422 | P_ERROR("failed to register with iucv driver\n"); | ||
423 | rc = -EIO; | ||
424 | goto out_error; | ||
425 | } | ||
426 | P_INFO("open, registered with IUCV\n"); | ||
427 | |||
428 | rc = iucv_connect(&monpriv->pathid, MON_MSGLIM, user_data_connect, | ||
429 | MON_SERVICE, iucv_host, IPRMDATA, NULL, NULL, | ||
430 | monpriv->iucv_handle, NULL); | ||
431 | if (rc) { | 410 | if (rc) { |
432 | P_ERROR("iucv connection to *MONITOR failed with " | 411 | P_ERROR("iucv connection to *MONITOR failed with " |
433 | "IPUSER SEVER code = %i\n", rc); | 412 | "IPUSER SEVER code = %i\n", rc); |
434 | rc = -EIO; | 413 | rc = -EIO; |
435 | goto out_unregister; | 414 | goto out_path; |
436 | } | 415 | } |
437 | /* | 416 | /* |
438 | * Wait for connection confirmation | 417 | * Wait for connection confirmation |
@@ -444,24 +423,23 @@ mon_open(struct inode *inode, struct file *filp) | |||
444 | atomic_set(&monpriv->iucv_severed, 0); | 423 | atomic_set(&monpriv->iucv_severed, 0); |
445 | atomic_set(&monpriv->iucv_connected, 0); | 424 | atomic_set(&monpriv->iucv_connected, 0); |
446 | rc = -EIO; | 425 | rc = -EIO; |
447 | goto out_unregister; | 426 | goto out_path; |
448 | } | 427 | } |
449 | P_INFO("open, established connection to *MONITOR service\n\n"); | 428 | P_INFO("open, established connection to *MONITOR service\n\n"); |
450 | filp->private_data = monpriv; | 429 | filp->private_data = monpriv; |
451 | return nonseekable_open(inode, filp); | 430 | return nonseekable_open(inode, filp); |
452 | 431 | ||
453 | out_unregister: | 432 | out_path: |
454 | iucv_unregister_program(monpriv->iucv_handle); | 433 | kfree(monpriv->path); |
455 | out_error: | 434 | out_priv: |
456 | for (i = 0; i < MON_MSGLIM; i++) | 435 | mon_free_mem(monpriv); |
457 | kfree(monpriv->msg_array[i]); | 436 | out_use: |
458 | kfree(monpriv); | ||
459 | clear_bit(MON_IN_USE, &mon_in_use); | 437 | clear_bit(MON_IN_USE, &mon_in_use); |
438 | out: | ||
460 | return rc; | 439 | return rc; |
461 | } | 440 | } |
462 | 441 | ||
463 | static int | 442 | static int mon_close(struct inode *inode, struct file *filp) |
464 | mon_close(struct inode *inode, struct file *filp) | ||
465 | { | 443 | { |
466 | int rc, i; | 444 | int rc, i; |
467 | struct mon_private *monpriv = filp->private_data; | 445 | struct mon_private *monpriv = filp->private_data; |
@@ -469,18 +447,12 @@ mon_close(struct inode *inode, struct file *filp) | |||
469 | /* | 447 | /* |
470 | * Close IUCV connection and unregister | 448 | * Close IUCV connection and unregister |
471 | */ | 449 | */ |
472 | rc = iucv_sever(monpriv->pathid, user_data_sever); | 450 | rc = iucv_path_sever(monpriv->path, user_data_sever); |
473 | if (rc) | 451 | if (rc) |
474 | P_ERROR("close, iucv_sever failed with rc = %i\n", rc); | 452 | P_ERROR("close, iucv_sever failed with rc = %i\n", rc); |
475 | else | 453 | else |
476 | P_INFO("close, terminated connection to *MONITOR service\n"); | 454 | P_INFO("close, terminated connection to *MONITOR service\n"); |
477 | 455 | ||
478 | rc = iucv_unregister_program(monpriv->iucv_handle); | ||
479 | if (rc) | ||
480 | P_ERROR("close, iucv_unregister failed with rc = %i\n", rc); | ||
481 | else | ||
482 | P_INFO("close, unregistered with IUCV\n"); | ||
483 | |||
484 | atomic_set(&monpriv->iucv_severed, 0); | 456 | atomic_set(&monpriv->iucv_severed, 0); |
485 | atomic_set(&monpriv->iucv_connected, 0); | 457 | atomic_set(&monpriv->iucv_connected, 0); |
486 | atomic_set(&monpriv->read_ready, 0); | 458 | atomic_set(&monpriv->read_ready, 0); |
@@ -495,8 +467,8 @@ mon_close(struct inode *inode, struct file *filp) | |||
495 | return 0; | 467 | return 0; |
496 | } | 468 | } |
497 | 469 | ||
498 | static ssize_t | 470 | static ssize_t mon_read(struct file *filp, char __user *data, |
499 | mon_read(struct file *filp, char __user *data, size_t count, loff_t *ppos) | 471 | size_t count, loff_t *ppos) |
500 | { | 472 | { |
501 | struct mon_private *monpriv = filp->private_data; | 473 | struct mon_private *monpriv = filp->private_data; |
502 | struct mon_msg *monmsg; | 474 | struct mon_msg *monmsg; |
@@ -563,8 +535,7 @@ out_copy: | |||
563 | return count; | 535 | return count; |
564 | } | 536 | } |
565 | 537 | ||
566 | static unsigned int | 538 | static unsigned int mon_poll(struct file *filp, struct poll_table_struct *p) |
567 | mon_poll(struct file *filp, struct poll_table_struct *p) | ||
568 | { | 539 | { |
569 | struct mon_private *monpriv = filp->private_data; | 540 | struct mon_private *monpriv = filp->private_data; |
570 | 541 | ||
@@ -593,8 +564,7 @@ static struct miscdevice mon_dev = { | |||
593 | /****************************************************************************** | 564 | /****************************************************************************** |
594 | * module init/exit * | 565 | * module init/exit * |
595 | *****************************************************************************/ | 566 | *****************************************************************************/ |
596 | static int __init | 567 | static int __init mon_init(void) |
597 | mon_init(void) | ||
598 | { | 568 | { |
599 | int rc; | 569 | int rc; |
600 | 570 | ||
@@ -603,22 +573,34 @@ mon_init(void) | |||
603 | return -ENODEV; | 573 | return -ENODEV; |
604 | } | 574 | } |
605 | 575 | ||
576 | /* | ||
577 | * Register with IUCV and connect to *MONITOR service | ||
578 | */ | ||
579 | rc = iucv_register(&monreader_iucv_handler, 1); | ||
580 | if (rc) { | ||
581 | P_ERROR("failed to register with iucv driver\n"); | ||
582 | return rc; | ||
583 | } | ||
584 | P_INFO("open, registered with IUCV\n"); | ||
585 | |||
606 | rc = segment_type(mon_dcss_name); | 586 | rc = segment_type(mon_dcss_name); |
607 | if (rc < 0) { | 587 | if (rc < 0) { |
608 | mon_segment_warn(rc, mon_dcss_name); | 588 | mon_segment_warn(rc, mon_dcss_name); |
609 | return rc; | 589 | goto out_iucv; |
610 | } | 590 | } |
611 | if (rc != SEG_TYPE_SC) { | 591 | if (rc != SEG_TYPE_SC) { |
612 | P_ERROR("segment %s has unsupported type, should be SC\n", | 592 | P_ERROR("segment %s has unsupported type, should be SC\n", |
613 | mon_dcss_name); | 593 | mon_dcss_name); |
614 | return -EINVAL; | 594 | rc = -EINVAL; |
595 | goto out_iucv; | ||
615 | } | 596 | } |
616 | 597 | ||
617 | rc = segment_load(mon_dcss_name, SEGMENT_SHARED, | 598 | rc = segment_load(mon_dcss_name, SEGMENT_SHARED, |
618 | &mon_dcss_start, &mon_dcss_end); | 599 | &mon_dcss_start, &mon_dcss_end); |
619 | if (rc < 0) { | 600 | if (rc < 0) { |
620 | mon_segment_warn(rc, mon_dcss_name); | 601 | mon_segment_warn(rc, mon_dcss_name); |
621 | return -EINVAL; | 602 | rc = -EINVAL; |
603 | goto out_iucv; | ||
622 | } | 604 | } |
623 | dcss_mkname(mon_dcss_name, &user_data_connect[8]); | 605 | dcss_mkname(mon_dcss_name, &user_data_connect[8]); |
624 | 606 | ||
@@ -634,14 +616,16 @@ mon_init(void) | |||
634 | 616 | ||
635 | out: | 617 | out: |
636 | segment_unload(mon_dcss_name); | 618 | segment_unload(mon_dcss_name); |
619 | out_iucv: | ||
620 | iucv_unregister(&monreader_iucv_handler, 1); | ||
637 | return rc; | 621 | return rc; |
638 | } | 622 | } |
639 | 623 | ||
640 | static void __exit | 624 | static void __exit mon_exit(void) |
641 | mon_exit(void) | ||
642 | { | 625 | { |
643 | segment_unload(mon_dcss_name); | 626 | segment_unload(mon_dcss_name); |
644 | WARN_ON(misc_deregister(&mon_dev) != 0); | 627 | WARN_ON(misc_deregister(&mon_dev) != 0); |
628 | iucv_unregister(&monreader_iucv_handler, 1); | ||
645 | return; | 629 | return; |
646 | } | 630 | } |
647 | 631 | ||
diff --git a/drivers/s390/char/monwriter.c b/drivers/s390/char/monwriter.c index cdb24f528112..9e451acc6491 100644 --- a/drivers/s390/char/monwriter.c +++ b/drivers/s390/char/monwriter.c | |||
@@ -67,8 +67,8 @@ static int monwrite_diag(struct monwrite_hdr *myhdr, char *buffer, int fcn) | |||
67 | return -EINVAL; | 67 | return -EINVAL; |
68 | } | 68 | } |
69 | 69 | ||
70 | static inline struct mon_buf *monwrite_find_hdr(struct mon_private *monpriv, | 70 | static struct mon_buf *monwrite_find_hdr(struct mon_private *monpriv, |
71 | struct monwrite_hdr *monhdr) | 71 | struct monwrite_hdr *monhdr) |
72 | { | 72 | { |
73 | struct mon_buf *entry, *next; | 73 | struct mon_buf *entry, *next; |
74 | 74 | ||
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c index 7a84014f2037..8facd14adb7c 100644 --- a/drivers/s390/char/raw3270.c +++ b/drivers/s390/char/raw3270.c | |||
@@ -29,7 +29,7 @@ | |||
29 | #include <linux/device.h> | 29 | #include <linux/device.h> |
30 | #include <linux/mutex.h> | 30 | #include <linux/mutex.h> |
31 | 31 | ||
32 | struct class *class3270; | 32 | static struct class *class3270; |
33 | 33 | ||
34 | /* The main 3270 data structure. */ | 34 | /* The main 3270 data structure. */ |
35 | struct raw3270 { | 35 | struct raw3270 { |
@@ -86,7 +86,7 @@ DECLARE_WAIT_QUEUE_HEAD(raw3270_wait_queue); | |||
86 | /* | 86 | /* |
87 | * Encode array for 12 bit 3270 addresses. | 87 | * Encode array for 12 bit 3270 addresses. |
88 | */ | 88 | */ |
89 | unsigned char raw3270_ebcgraf[64] = { | 89 | static unsigned char raw3270_ebcgraf[64] = { |
90 | 0x40, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, | 90 | 0x40, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, |
91 | 0xc8, 0xc9, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, | 91 | 0xc8, 0xc9, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, |
92 | 0x50, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, | 92 | 0x50, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, |
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c index 8a056df09d6b..f171de3b0b11 100644 --- a/drivers/s390/char/sclp.c +++ b/drivers/s390/char/sclp.c | |||
@@ -59,7 +59,8 @@ static volatile enum sclp_init_state_t { | |||
59 | /* Internal state: is a request active at the sclp? */ | 59 | /* Internal state: is a request active at the sclp? */ |
60 | static volatile enum sclp_running_state_t { | 60 | static volatile enum sclp_running_state_t { |
61 | sclp_running_state_idle, | 61 | sclp_running_state_idle, |
62 | sclp_running_state_running | 62 | sclp_running_state_running, |
63 | sclp_running_state_reset_pending | ||
63 | } sclp_running_state = sclp_running_state_idle; | 64 | } sclp_running_state = sclp_running_state_idle; |
64 | 65 | ||
65 | /* Internal state: is a read request pending? */ | 66 | /* Internal state: is a read request pending? */ |
@@ -88,15 +89,15 @@ static volatile enum sclp_mask_state_t { | |||
88 | 89 | ||
89 | /* Timeout intervals in seconds.*/ | 90 | /* Timeout intervals in seconds.*/ |
90 | #define SCLP_BUSY_INTERVAL 10 | 91 | #define SCLP_BUSY_INTERVAL 10 |
91 | #define SCLP_RETRY_INTERVAL 15 | 92 | #define SCLP_RETRY_INTERVAL 30 |
92 | 93 | ||
93 | static void sclp_process_queue(void); | 94 | static void sclp_process_queue(void); |
94 | static int sclp_init_mask(int calculate); | 95 | static int sclp_init_mask(int calculate); |
95 | static int sclp_init(void); | 96 | static int sclp_init(void); |
96 | 97 | ||
97 | /* Perform service call. Return 0 on success, non-zero otherwise. */ | 98 | /* Perform service call. Return 0 on success, non-zero otherwise. */ |
98 | static int | 99 | int |
99 | service_call(sclp_cmdw_t command, void *sccb) | 100 | sclp_service_call(sclp_cmdw_t command, void *sccb) |
100 | { | 101 | { |
101 | int cc; | 102 | int cc; |
102 | 103 | ||
@@ -113,19 +114,17 @@ service_call(sclp_cmdw_t command, void *sccb) | |||
113 | return 0; | 114 | return 0; |
114 | } | 115 | } |
115 | 116 | ||
116 | /* Request timeout handler. Restart the request queue. If DATA is non-zero, | 117 | static inline void __sclp_make_read_req(void); |
117 | * force restart of running request. */ | 118 | |
118 | static void | 119 | static void |
119 | sclp_request_timeout(unsigned long data) | 120 | __sclp_queue_read_req(void) |
120 | { | 121 | { |
121 | unsigned long flags; | 122 | if (sclp_reading_state == sclp_reading_state_idle) { |
122 | 123 | sclp_reading_state = sclp_reading_state_reading; | |
123 | if (data) { | 124 | __sclp_make_read_req(); |
124 | spin_lock_irqsave(&sclp_lock, flags); | 125 | /* Add request to head of queue */ |
125 | sclp_running_state = sclp_running_state_idle; | 126 | list_add(&sclp_read_req.list, &sclp_req_queue); |
126 | spin_unlock_irqrestore(&sclp_lock, flags); | ||
127 | } | 127 | } |
128 | sclp_process_queue(); | ||
129 | } | 128 | } |
130 | 129 | ||
131 | /* Set up request retry timer. Called while sclp_lock is locked. */ | 130 | /* Set up request retry timer. Called while sclp_lock is locked. */ |
@@ -140,6 +139,29 @@ __sclp_set_request_timer(unsigned long time, void (*function)(unsigned long), | |||
140 | add_timer(&sclp_request_timer); | 139 | add_timer(&sclp_request_timer); |
141 | } | 140 | } |
142 | 141 | ||
142 | /* Request timeout handler. Restart the request queue. If DATA is non-zero, | ||
143 | * force restart of running request. */ | ||
144 | static void | ||
145 | sclp_request_timeout(unsigned long data) | ||
146 | { | ||
147 | unsigned long flags; | ||
148 | |||
149 | spin_lock_irqsave(&sclp_lock, flags); | ||
150 | if (data) { | ||
151 | if (sclp_running_state == sclp_running_state_running) { | ||
152 | /* Break running state and queue NOP read event request | ||
153 | * to get a defined interface state. */ | ||
154 | __sclp_queue_read_req(); | ||
155 | sclp_running_state = sclp_running_state_idle; | ||
156 | } | ||
157 | } else { | ||
158 | __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ, | ||
159 | sclp_request_timeout, 0); | ||
160 | } | ||
161 | spin_unlock_irqrestore(&sclp_lock, flags); | ||
162 | sclp_process_queue(); | ||
163 | } | ||
164 | |||
143 | /* Try to start a request. Return zero if the request was successfully | 165 | /* Try to start a request. Return zero if the request was successfully |
144 | * started or if it will be started at a later time. Return non-zero otherwise. | 166 | * started or if it will be started at a later time. Return non-zero otherwise. |
145 | * Called while sclp_lock is locked. */ | 167 | * Called while sclp_lock is locked. */ |
@@ -151,7 +173,7 @@ __sclp_start_request(struct sclp_req *req) | |||
151 | if (sclp_running_state != sclp_running_state_idle) | 173 | if (sclp_running_state != sclp_running_state_idle) |
152 | return 0; | 174 | return 0; |
153 | del_timer(&sclp_request_timer); | 175 | del_timer(&sclp_request_timer); |
154 | rc = service_call(req->command, req->sccb); | 176 | rc = sclp_service_call(req->command, req->sccb); |
155 | req->start_count++; | 177 | req->start_count++; |
156 | 178 | ||
157 | if (rc == 0) { | 179 | if (rc == 0) { |
@@ -191,7 +213,15 @@ sclp_process_queue(void) | |||
191 | rc = __sclp_start_request(req); | 213 | rc = __sclp_start_request(req); |
192 | if (rc == 0) | 214 | if (rc == 0) |
193 | break; | 215 | break; |
194 | /* Request failed. */ | 216 | /* Request failed */ |
217 | if (req->start_count > 1) { | ||
218 | /* Cannot abort already submitted request - could still | ||
219 | * be active at the SCLP */ | ||
220 | __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ, | ||
221 | sclp_request_timeout, 0); | ||
222 | break; | ||
223 | } | ||
224 | /* Post-processing for aborted request */ | ||
195 | list_del(&req->list); | 225 | list_del(&req->list); |
196 | if (req->callback) { | 226 | if (req->callback) { |
197 | spin_unlock_irqrestore(&sclp_lock, flags); | 227 | spin_unlock_irqrestore(&sclp_lock, flags); |
@@ -221,7 +251,8 @@ sclp_add_request(struct sclp_req *req) | |||
221 | list_add_tail(&req->list, &sclp_req_queue); | 251 | list_add_tail(&req->list, &sclp_req_queue); |
222 | rc = 0; | 252 | rc = 0; |
223 | /* Start if request is first in list */ | 253 | /* Start if request is first in list */ |
224 | if (req->list.prev == &sclp_req_queue) { | 254 | if (sclp_running_state == sclp_running_state_idle && |
255 | req->list.prev == &sclp_req_queue) { | ||
225 | rc = __sclp_start_request(req); | 256 | rc = __sclp_start_request(req); |
226 | if (rc) | 257 | if (rc) |
227 | list_del(&req->list); | 258 | list_del(&req->list); |
@@ -294,7 +325,7 @@ __sclp_make_read_req(void) | |||
294 | sccb = (struct sccb_header *) sclp_read_sccb; | 325 | sccb = (struct sccb_header *) sclp_read_sccb; |
295 | clear_page(sccb); | 326 | clear_page(sccb); |
296 | memset(&sclp_read_req, 0, sizeof(struct sclp_req)); | 327 | memset(&sclp_read_req, 0, sizeof(struct sclp_req)); |
297 | sclp_read_req.command = SCLP_CMDW_READDATA; | 328 | sclp_read_req.command = SCLP_CMDW_READ_EVENT_DATA; |
298 | sclp_read_req.status = SCLP_REQ_QUEUED; | 329 | sclp_read_req.status = SCLP_REQ_QUEUED; |
299 | sclp_read_req.start_count = 0; | 330 | sclp_read_req.start_count = 0; |
300 | sclp_read_req.callback = sclp_read_cb; | 331 | sclp_read_req.callback = sclp_read_cb; |
@@ -334,6 +365,8 @@ sclp_interrupt_handler(__u16 code) | |||
334 | finished_sccb = S390_lowcore.ext_params & 0xfffffff8; | 365 | finished_sccb = S390_lowcore.ext_params & 0xfffffff8; |
335 | evbuf_pending = S390_lowcore.ext_params & 0x3; | 366 | evbuf_pending = S390_lowcore.ext_params & 0x3; |
336 | if (finished_sccb) { | 367 | if (finished_sccb) { |
368 | del_timer(&sclp_request_timer); | ||
369 | sclp_running_state = sclp_running_state_reset_pending; | ||
337 | req = __sclp_find_req(finished_sccb); | 370 | req = __sclp_find_req(finished_sccb); |
338 | if (req) { | 371 | if (req) { |
339 | /* Request post-processing */ | 372 | /* Request post-processing */ |
@@ -348,13 +381,8 @@ sclp_interrupt_handler(__u16 code) | |||
348 | sclp_running_state = sclp_running_state_idle; | 381 | sclp_running_state = sclp_running_state_idle; |
349 | } | 382 | } |
350 | if (evbuf_pending && sclp_receive_mask != 0 && | 383 | if (evbuf_pending && sclp_receive_mask != 0 && |
351 | sclp_reading_state == sclp_reading_state_idle && | 384 | sclp_activation_state == sclp_activation_state_active) |
352 | sclp_activation_state == sclp_activation_state_active ) { | 385 | __sclp_queue_read_req(); |
353 | sclp_reading_state = sclp_reading_state_reading; | ||
354 | __sclp_make_read_req(); | ||
355 | /* Add request to head of queue */ | ||
356 | list_add(&sclp_read_req.list, &sclp_req_queue); | ||
357 | } | ||
358 | spin_unlock(&sclp_lock); | 386 | spin_unlock(&sclp_lock); |
359 | sclp_process_queue(); | 387 | sclp_process_queue(); |
360 | } | 388 | } |
@@ -374,6 +402,7 @@ sclp_sync_wait(void) | |||
374 | unsigned long flags; | 402 | unsigned long flags; |
375 | unsigned long cr0, cr0_sync; | 403 | unsigned long cr0, cr0_sync; |
376 | u64 timeout; | 404 | u64 timeout; |
405 | int irq_context; | ||
377 | 406 | ||
378 | /* We'll be disabling timer interrupts, so we need a custom timeout | 407 | /* We'll be disabling timer interrupts, so we need a custom timeout |
379 | * mechanism */ | 408 | * mechanism */ |
@@ -386,7 +415,9 @@ sclp_sync_wait(void) | |||
386 | } | 415 | } |
387 | local_irq_save(flags); | 416 | local_irq_save(flags); |
388 | /* Prevent bottom half from executing once we force interrupts open */ | 417 | /* Prevent bottom half from executing once we force interrupts open */ |
389 | local_bh_disable(); | 418 | irq_context = in_interrupt(); |
419 | if (!irq_context) | ||
420 | local_bh_disable(); | ||
390 | /* Enable service-signal interruption, disable timer interrupts */ | 421 | /* Enable service-signal interruption, disable timer interrupts */ |
391 | trace_hardirqs_on(); | 422 | trace_hardirqs_on(); |
392 | __ctl_store(cr0, 0, 0); | 423 | __ctl_store(cr0, 0, 0); |
@@ -402,19 +433,19 @@ sclp_sync_wait(void) | |||
402 | get_clock() > timeout && | 433 | get_clock() > timeout && |
403 | del_timer(&sclp_request_timer)) | 434 | del_timer(&sclp_request_timer)) |
404 | sclp_request_timer.function(sclp_request_timer.data); | 435 | sclp_request_timer.function(sclp_request_timer.data); |
405 | barrier(); | ||
406 | cpu_relax(); | 436 | cpu_relax(); |
407 | } | 437 | } |
408 | local_irq_disable(); | 438 | local_irq_disable(); |
409 | __ctl_load(cr0, 0, 0); | 439 | __ctl_load(cr0, 0, 0); |
410 | _local_bh_enable(); | 440 | if (!irq_context) |
441 | _local_bh_enable(); | ||
411 | local_irq_restore(flags); | 442 | local_irq_restore(flags); |
412 | } | 443 | } |
413 | 444 | ||
414 | EXPORT_SYMBOL(sclp_sync_wait); | 445 | EXPORT_SYMBOL(sclp_sync_wait); |
415 | 446 | ||
416 | /* Dispatch changes in send and receive mask to registered listeners. */ | 447 | /* Dispatch changes in send and receive mask to registered listeners. */ |
417 | static inline void | 448 | static void |
418 | sclp_dispatch_state_change(void) | 449 | sclp_dispatch_state_change(void) |
419 | { | 450 | { |
420 | struct list_head *l; | 451 | struct list_head *l; |
@@ -597,7 +628,7 @@ __sclp_make_init_req(u32 receive_mask, u32 send_mask) | |||
597 | sccb = (struct init_sccb *) sclp_init_sccb; | 628 | sccb = (struct init_sccb *) sclp_init_sccb; |
598 | clear_page(sccb); | 629 | clear_page(sccb); |
599 | memset(&sclp_init_req, 0, sizeof(struct sclp_req)); | 630 | memset(&sclp_init_req, 0, sizeof(struct sclp_req)); |
600 | sclp_init_req.command = SCLP_CMDW_WRITEMASK; | 631 | sclp_init_req.command = SCLP_CMDW_WRITE_EVENT_MASK; |
601 | sclp_init_req.status = SCLP_REQ_FILLED; | 632 | sclp_init_req.status = SCLP_REQ_FILLED; |
602 | sclp_init_req.start_count = 0; | 633 | sclp_init_req.start_count = 0; |
603 | sclp_init_req.callback = NULL; | 634 | sclp_init_req.callback = NULL; |
@@ -800,7 +831,7 @@ sclp_check_interface(void) | |||
800 | for (retry = 0; retry <= SCLP_INIT_RETRY; retry++) { | 831 | for (retry = 0; retry <= SCLP_INIT_RETRY; retry++) { |
801 | __sclp_make_init_req(0, 0); | 832 | __sclp_make_init_req(0, 0); |
802 | sccb = (struct init_sccb *) sclp_init_req.sccb; | 833 | sccb = (struct init_sccb *) sclp_init_req.sccb; |
803 | rc = service_call(sclp_init_req.command, sccb); | 834 | rc = sclp_service_call(sclp_init_req.command, sccb); |
804 | if (rc == -EIO) | 835 | if (rc == -EIO) |
805 | break; | 836 | break; |
806 | sclp_init_req.status = SCLP_REQ_RUNNING; | 837 | sclp_init_req.status = SCLP_REQ_RUNNING; |
diff --git a/drivers/s390/char/sclp.h b/drivers/s390/char/sclp.h index 2c71d6ee7b5b..7d29ab45a6ed 100644 --- a/drivers/s390/char/sclp.h +++ b/drivers/s390/char/sclp.h | |||
@@ -12,7 +12,7 @@ | |||
12 | 12 | ||
13 | #include <linux/types.h> | 13 | #include <linux/types.h> |
14 | #include <linux/list.h> | 14 | #include <linux/list.h> |
15 | 15 | #include <asm/sclp.h> | |
16 | #include <asm/ebcdic.h> | 16 | #include <asm/ebcdic.h> |
17 | 17 | ||
18 | /* maximum number of pages concerning our own memory management */ | 18 | /* maximum number of pages concerning our own memory management */ |
@@ -49,9 +49,11 @@ | |||
49 | 49 | ||
50 | typedef unsigned int sclp_cmdw_t; | 50 | typedef unsigned int sclp_cmdw_t; |
51 | 51 | ||
52 | #define SCLP_CMDW_READDATA 0x00770005 | 52 | #define SCLP_CMDW_READ_EVENT_DATA 0x00770005 |
53 | #define SCLP_CMDW_WRITEDATA 0x00760005 | 53 | #define SCLP_CMDW_WRITE_EVENT_DATA 0x00760005 |
54 | #define SCLP_CMDW_WRITEMASK 0x00780005 | 54 | #define SCLP_CMDW_WRITE_EVENT_MASK 0x00780005 |
55 | #define SCLP_CMDW_READ_SCP_INFO 0x00020001 | ||
56 | #define SCLP_CMDW_READ_SCP_INFO_FORCED 0x00120001 | ||
55 | 57 | ||
56 | #define GDS_ID_MDSMU 0x1310 | 58 | #define GDS_ID_MDSMU 0x1310 |
57 | #define GDS_ID_MDSRouteInfo 0x1311 | 59 | #define GDS_ID_MDSRouteInfo 0x1311 |
@@ -66,13 +68,6 @@ typedef unsigned int sclp_cmdw_t; | |||
66 | 68 | ||
67 | typedef u32 sccb_mask_t; /* ATTENTION: assumes 32bit mask !!! */ | 69 | typedef u32 sccb_mask_t; /* ATTENTION: assumes 32bit mask !!! */ |
68 | 70 | ||
69 | struct sccb_header { | ||
70 | u16 length; | ||
71 | u8 function_code; | ||
72 | u8 control_mask[3]; | ||
73 | u16 response_code; | ||
74 | } __attribute__((packed)); | ||
75 | |||
76 | struct gds_subvector { | 71 | struct gds_subvector { |
77 | u8 length; | 72 | u8 length; |
78 | u8 key; | 73 | u8 key; |
@@ -131,6 +126,7 @@ void sclp_unregister(struct sclp_register *reg); | |||
131 | int sclp_remove_processed(struct sccb_header *sccb); | 126 | int sclp_remove_processed(struct sccb_header *sccb); |
132 | int sclp_deactivate(void); | 127 | int sclp_deactivate(void); |
133 | int sclp_reactivate(void); | 128 | int sclp_reactivate(void); |
129 | int sclp_service_call(sclp_cmdw_t command, void *sccb); | ||
134 | 130 | ||
135 | /* useful inlines */ | 131 | /* useful inlines */ |
136 | 132 | ||
diff --git a/drivers/s390/char/sclp_con.c b/drivers/s390/char/sclp_con.c index 86864f641716..ead1043d788e 100644 --- a/drivers/s390/char/sclp_con.c +++ b/drivers/s390/char/sclp_con.c | |||
@@ -66,7 +66,7 @@ sclp_conbuf_callback(struct sclp_buffer *buffer, int rc) | |||
66 | } while (buffer && sclp_emit_buffer(buffer, sclp_conbuf_callback)); | 66 | } while (buffer && sclp_emit_buffer(buffer, sclp_conbuf_callback)); |
67 | } | 67 | } |
68 | 68 | ||
69 | static inline void | 69 | static void |
70 | sclp_conbuf_emit(void) | 70 | sclp_conbuf_emit(void) |
71 | { | 71 | { |
72 | struct sclp_buffer* buffer; | 72 | struct sclp_buffer* buffer; |
diff --git a/drivers/s390/char/sclp_cpi.c b/drivers/s390/char/sclp_cpi.c index 4f873ae148b7..65aa2c85737f 100644 --- a/drivers/s390/char/sclp_cpi.c +++ b/drivers/s390/char/sclp_cpi.c | |||
@@ -169,7 +169,7 @@ cpi_prepare_req(void) | |||
169 | } | 169 | } |
170 | 170 | ||
171 | /* prepare request data structure presented to SCLP driver */ | 171 | /* prepare request data structure presented to SCLP driver */ |
172 | req->command = SCLP_CMDW_WRITEDATA; | 172 | req->command = SCLP_CMDW_WRITE_EVENT_DATA; |
173 | req->sccb = sccb; | 173 | req->sccb = sccb; |
174 | req->status = SCLP_REQ_FILLED; | 174 | req->status = SCLP_REQ_FILLED; |
175 | req->callback = cpi_callback; | 175 | req->callback = cpi_callback; |
diff --git a/drivers/s390/char/sclp_info.c b/drivers/s390/char/sclp_info.c new file mode 100644 index 000000000000..7bcbe643b087 --- /dev/null +++ b/drivers/s390/char/sclp_info.c | |||
@@ -0,0 +1,57 @@ | |||
1 | /* | ||
2 | * drivers/s390/char/sclp_info.c | ||
3 | * | ||
4 | * Copyright IBM Corp. 2007 | ||
5 | * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> | ||
6 | */ | ||
7 | |||
8 | #include <linux/init.h> | ||
9 | #include <linux/errno.h> | ||
10 | #include <linux/string.h> | ||
11 | #include <asm/sclp.h> | ||
12 | #include "sclp.h" | ||
13 | |||
14 | struct sclp_readinfo_sccb s390_readinfo_sccb; | ||
15 | |||
16 | void __init sclp_readinfo_early(void) | ||
17 | { | ||
18 | sclp_cmdw_t command; | ||
19 | struct sccb_header *sccb; | ||
20 | int ret; | ||
21 | |||
22 | __ctl_set_bit(0, 9); /* enable service signal subclass mask */ | ||
23 | |||
24 | sccb = &s390_readinfo_sccb.header; | ||
25 | command = SCLP_CMDW_READ_SCP_INFO_FORCED; | ||
26 | while (1) { | ||
27 | u16 response; | ||
28 | |||
29 | memset(&s390_readinfo_sccb, 0, sizeof(s390_readinfo_sccb)); | ||
30 | sccb->length = sizeof(s390_readinfo_sccb); | ||
31 | sccb->control_mask[2] = 0x80; | ||
32 | |||
33 | ret = sclp_service_call(command, &s390_readinfo_sccb); | ||
34 | |||
35 | if (ret == -EIO) | ||
36 | goto out; | ||
37 | if (ret == -EBUSY) | ||
38 | continue; | ||
39 | |||
40 | __load_psw_mask(PSW_BASE_BITS | PSW_MASK_EXT | | ||
41 | PSW_MASK_WAIT | PSW_DEFAULT_KEY); | ||
42 | local_irq_disable(); | ||
43 | barrier(); | ||
44 | |||
45 | response = sccb->response_code; | ||
46 | |||
47 | if (response == 0x10) | ||
48 | break; | ||
49 | |||
50 | if (response != 0x1f0 || command == SCLP_CMDW_READ_SCP_INFO) | ||
51 | break; | ||
52 | |||
53 | command = SCLP_CMDW_READ_SCP_INFO; | ||
54 | } | ||
55 | out: | ||
56 | __ctl_clear_bit(0, 9); /* disable service signal subclass mask */ | ||
57 | } | ||
diff --git a/drivers/s390/char/sclp_rw.c b/drivers/s390/char/sclp_rw.c index 0c92d3909cca..2486783ea58e 100644 --- a/drivers/s390/char/sclp_rw.c +++ b/drivers/s390/char/sclp_rw.c | |||
@@ -460,7 +460,7 @@ sclp_emit_buffer(struct sclp_buffer *buffer, | |||
460 | sccb->msg_buf.header.type = EvTyp_PMsgCmd; | 460 | sccb->msg_buf.header.type = EvTyp_PMsgCmd; |
461 | else | 461 | else |
462 | return -ENOSYS; | 462 | return -ENOSYS; |
463 | buffer->request.command = SCLP_CMDW_WRITEDATA; | 463 | buffer->request.command = SCLP_CMDW_WRITE_EVENT_DATA; |
464 | buffer->request.status = SCLP_REQ_FILLED; | 464 | buffer->request.status = SCLP_REQ_FILLED; |
465 | buffer->request.callback = sclp_writedata_callback; | 465 | buffer->request.callback = sclp_writedata_callback; |
466 | buffer->request.callback_data = buffer; | 466 | buffer->request.callback_data = buffer; |
diff --git a/drivers/s390/char/sclp_tty.c b/drivers/s390/char/sclp_tty.c index 2d173e5c8a09..90536f60bf50 100644 --- a/drivers/s390/char/sclp_tty.c +++ b/drivers/s390/char/sclp_tty.c | |||
@@ -721,7 +721,7 @@ static const struct tty_operations sclp_ops = { | |||
721 | .ioctl = sclp_tty_ioctl, | 721 | .ioctl = sclp_tty_ioctl, |
722 | }; | 722 | }; |
723 | 723 | ||
724 | int __init | 724 | static int __init |
725 | sclp_tty_init(void) | 725 | sclp_tty_init(void) |
726 | { | 726 | { |
727 | struct tty_driver *driver; | 727 | struct tty_driver *driver; |
diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c index 723bf4191bfe..544f137d70d7 100644 --- a/drivers/s390/char/sclp_vt220.c +++ b/drivers/s390/char/sclp_vt220.c | |||
@@ -207,7 +207,7 @@ __sclp_vt220_emit(struct sclp_vt220_request *request) | |||
207 | request->sclp_req.status = SCLP_REQ_FAILED; | 207 | request->sclp_req.status = SCLP_REQ_FAILED; |
208 | return -EIO; | 208 | return -EIO; |
209 | } | 209 | } |
210 | request->sclp_req.command = SCLP_CMDW_WRITEDATA; | 210 | request->sclp_req.command = SCLP_CMDW_WRITE_EVENT_DATA; |
211 | request->sclp_req.status = SCLP_REQ_FILLED; | 211 | request->sclp_req.status = SCLP_REQ_FILLED; |
212 | request->sclp_req.callback = sclp_vt220_callback; | 212 | request->sclp_req.callback = sclp_vt220_callback; |
213 | request->sclp_req.callback_data = (void *) request; | 213 | request->sclp_req.callback_data = (void *) request; |
@@ -669,7 +669,7 @@ static const struct tty_operations sclp_vt220_ops = { | |||
669 | /* | 669 | /* |
670 | * Register driver with SCLP and Linux and initialize internal tty structures. | 670 | * Register driver with SCLP and Linux and initialize internal tty structures. |
671 | */ | 671 | */ |
672 | int __init | 672 | static int __init |
673 | sclp_vt220_tty_init(void) | 673 | sclp_vt220_tty_init(void) |
674 | { | 674 | { |
675 | struct tty_driver *driver; | 675 | struct tty_driver *driver; |
diff --git a/drivers/s390/char/tape.h b/drivers/s390/char/tape.h index c9f1c4c8bb13..bb4ff537729d 100644 --- a/drivers/s390/char/tape.h +++ b/drivers/s390/char/tape.h | |||
@@ -3,7 +3,7 @@ | |||
3 | * tape device driver for 3480/3490E/3590 tapes. | 3 | * tape device driver for 3480/3490E/3590 tapes. |
4 | * | 4 | * |
5 | * S390 and zSeries version | 5 | * S390 and zSeries version |
6 | * Copyright (C) 2001,2005 IBM Deutschland Entwicklung GmbH, IBM Corporation | 6 | * Copyright IBM Corp. 2001,2006 |
7 | * Author(s): Carsten Otte <cotte@de.ibm.com> | 7 | * Author(s): Carsten Otte <cotte@de.ibm.com> |
8 | * Tuan Ngo-Anh <ngoanh@de.ibm.com> | 8 | * Tuan Ngo-Anh <ngoanh@de.ibm.com> |
9 | * Martin Schwidefsky <schwidefsky@de.ibm.com> | 9 | * Martin Schwidefsky <schwidefsky@de.ibm.com> |
@@ -99,7 +99,11 @@ enum tape_op { | |||
99 | TO_DIS, /* Tape display */ | 99 | TO_DIS, /* Tape display */ |
100 | TO_ASSIGN, /* Assign tape to channel path */ | 100 | TO_ASSIGN, /* Assign tape to channel path */ |
101 | TO_UNASSIGN, /* Unassign tape from channel path */ | 101 | TO_UNASSIGN, /* Unassign tape from channel path */ |
102 | TO_SIZE /* #entries in tape_op_t */ | 102 | TO_CRYPT_ON, /* Enable encrpytion */ |
103 | TO_CRYPT_OFF, /* Disable encrpytion */ | ||
104 | TO_KEKL_SET, /* Set KEK label */ | ||
105 | TO_KEKL_QUERY, /* Query KEK label */ | ||
106 | TO_SIZE, /* #entries in tape_op_t */ | ||
103 | }; | 107 | }; |
104 | 108 | ||
105 | /* Forward declaration */ | 109 | /* Forward declaration */ |
@@ -112,6 +116,7 @@ enum tape_request_status { | |||
112 | TAPE_REQUEST_IN_IO, /* request is currently in IO */ | 116 | TAPE_REQUEST_IN_IO, /* request is currently in IO */ |
113 | TAPE_REQUEST_DONE, /* request is completed. */ | 117 | TAPE_REQUEST_DONE, /* request is completed. */ |
114 | TAPE_REQUEST_CANCEL, /* request should be canceled. */ | 118 | TAPE_REQUEST_CANCEL, /* request should be canceled. */ |
119 | TAPE_REQUEST_LONG_BUSY, /* request has to be restarted after long busy */ | ||
115 | }; | 120 | }; |
116 | 121 | ||
117 | /* Tape CCW request */ | 122 | /* Tape CCW request */ |
@@ -164,10 +169,11 @@ struct tape_discipline { | |||
164 | * The discipline irq function either returns an error code (<0) which | 169 | * The discipline irq function either returns an error code (<0) which |
165 | * means that the request has failed with an error or one of the following: | 170 | * means that the request has failed with an error or one of the following: |
166 | */ | 171 | */ |
167 | #define TAPE_IO_SUCCESS 0 /* request successful */ | 172 | #define TAPE_IO_SUCCESS 0 /* request successful */ |
168 | #define TAPE_IO_PENDING 1 /* request still running */ | 173 | #define TAPE_IO_PENDING 1 /* request still running */ |
169 | #define TAPE_IO_RETRY 2 /* retry to current request */ | 174 | #define TAPE_IO_RETRY 2 /* retry to current request */ |
170 | #define TAPE_IO_STOP 3 /* stop the running request */ | 175 | #define TAPE_IO_STOP 3 /* stop the running request */ |
176 | #define TAPE_IO_LONG_BUSY 4 /* delay the running request */ | ||
171 | 177 | ||
172 | /* Char Frontend Data */ | 178 | /* Char Frontend Data */ |
173 | struct tape_char_data { | 179 | struct tape_char_data { |
@@ -242,6 +248,10 @@ struct tape_device { | |||
242 | 248 | ||
243 | /* Function to start or stop the next request later. */ | 249 | /* Function to start or stop the next request later. */ |
244 | struct delayed_work tape_dnr; | 250 | struct delayed_work tape_dnr; |
251 | |||
252 | /* Timer for long busy */ | ||
253 | struct timer_list lb_timeout; | ||
254 | |||
245 | }; | 255 | }; |
246 | 256 | ||
247 | /* Externals from tape_core.c */ | 257 | /* Externals from tape_core.c */ |
diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c index 9df912f63188..50f5edab83d7 100644 --- a/drivers/s390/char/tape_3590.c +++ b/drivers/s390/char/tape_3590.c | |||
@@ -2,7 +2,7 @@ | |||
2 | * drivers/s390/char/tape_3590.c | 2 | * drivers/s390/char/tape_3590.c |
3 | * tape device discipline for 3590 tapes. | 3 | * tape device discipline for 3590 tapes. |
4 | * | 4 | * |
5 | * Copyright (C) IBM Corp. 2001,2006 | 5 | * Copyright IBM Corp. 2001,2006 |
6 | * Author(s): Stefan Bader <shbader@de.ibm.com> | 6 | * Author(s): Stefan Bader <shbader@de.ibm.com> |
7 | * Michael Holzheu <holzheu@de.ibm.com> | 7 | * Michael Holzheu <holzheu@de.ibm.com> |
8 | * Martin Schwidefsky <schwidefsky@de.ibm.com> | 8 | * Martin Schwidefsky <schwidefsky@de.ibm.com> |
@@ -11,6 +11,7 @@ | |||
11 | #include <linux/module.h> | 11 | #include <linux/module.h> |
12 | #include <linux/init.h> | 12 | #include <linux/init.h> |
13 | #include <linux/bio.h> | 13 | #include <linux/bio.h> |
14 | #include <asm/ebcdic.h> | ||
14 | 15 | ||
15 | #define TAPE_DBF_AREA tape_3590_dbf | 16 | #define TAPE_DBF_AREA tape_3590_dbf |
16 | 17 | ||
@@ -30,7 +31,7 @@ EXPORT_SYMBOL(TAPE_DBF_AREA); | |||
30 | * - Read Device (buffered) log: BRA | 31 | * - Read Device (buffered) log: BRA |
31 | * - Read Library log: BRA | 32 | * - Read Library log: BRA |
32 | * - Swap Devices: BRA | 33 | * - Swap Devices: BRA |
33 | * - Long Busy: BRA | 34 | * - Long Busy: implemented |
34 | * - Special Intercept: BRA | 35 | * - Special Intercept: BRA |
35 | * - Read Alternate: implemented | 36 | * - Read Alternate: implemented |
36 | *******************************************************************/ | 37 | *******************************************************************/ |
@@ -94,6 +95,332 @@ static const char *tape_3590_msg[TAPE_3590_MAX_MSG] = { | |||
94 | [0xae] = "Subsystem environmental alert", | 95 | [0xae] = "Subsystem environmental alert", |
95 | }; | 96 | }; |
96 | 97 | ||
98 | static int crypt_supported(struct tape_device *device) | ||
99 | { | ||
100 | return TAPE390_CRYPT_SUPPORTED(TAPE_3590_CRYPT_INFO(device)); | ||
101 | } | ||
102 | |||
103 | static int crypt_enabled(struct tape_device *device) | ||
104 | { | ||
105 | return TAPE390_CRYPT_ON(TAPE_3590_CRYPT_INFO(device)); | ||
106 | } | ||
107 | |||
108 | static void ext_to_int_kekl(struct tape390_kekl *in, | ||
109 | struct tape3592_kekl *out) | ||
110 | { | ||
111 | int i; | ||
112 | |||
113 | memset(out, 0, sizeof(*out)); | ||
114 | if (in->type == TAPE390_KEKL_TYPE_HASH) | ||
115 | out->flags |= 0x40; | ||
116 | if (in->type_on_tape == TAPE390_KEKL_TYPE_HASH) | ||
117 | out->flags |= 0x80; | ||
118 | strncpy(out->label, in->label, 64); | ||
119 | for (i = strlen(in->label); i < sizeof(out->label); i++) | ||
120 | out->label[i] = ' '; | ||
121 | ASCEBC(out->label, sizeof(out->label)); | ||
122 | } | ||
123 | |||
124 | static void int_to_ext_kekl(struct tape3592_kekl *in, | ||
125 | struct tape390_kekl *out) | ||
126 | { | ||
127 | memset(out, 0, sizeof(*out)); | ||
128 | if(in->flags & 0x40) | ||
129 | out->type = TAPE390_KEKL_TYPE_HASH; | ||
130 | else | ||
131 | out->type = TAPE390_KEKL_TYPE_LABEL; | ||
132 | if(in->flags & 0x80) | ||
133 | out->type_on_tape = TAPE390_KEKL_TYPE_HASH; | ||
134 | else | ||
135 | out->type_on_tape = TAPE390_KEKL_TYPE_LABEL; | ||
136 | memcpy(out->label, in->label, sizeof(in->label)); | ||
137 | EBCASC(out->label, sizeof(in->label)); | ||
138 | strstrip(out->label); | ||
139 | } | ||
140 | |||
141 | static void int_to_ext_kekl_pair(struct tape3592_kekl_pair *in, | ||
142 | struct tape390_kekl_pair *out) | ||
143 | { | ||
144 | if (in->count == 0) { | ||
145 | out->kekl[0].type = TAPE390_KEKL_TYPE_NONE; | ||
146 | out->kekl[0].type_on_tape = TAPE390_KEKL_TYPE_NONE; | ||
147 | out->kekl[1].type = TAPE390_KEKL_TYPE_NONE; | ||
148 | out->kekl[1].type_on_tape = TAPE390_KEKL_TYPE_NONE; | ||
149 | } else if (in->count == 1) { | ||
150 | int_to_ext_kekl(&in->kekl[0], &out->kekl[0]); | ||
151 | out->kekl[1].type = TAPE390_KEKL_TYPE_NONE; | ||
152 | out->kekl[1].type_on_tape = TAPE390_KEKL_TYPE_NONE; | ||
153 | } else if (in->count == 2) { | ||
154 | int_to_ext_kekl(&in->kekl[0], &out->kekl[0]); | ||
155 | int_to_ext_kekl(&in->kekl[1], &out->kekl[1]); | ||
156 | } else { | ||
157 | printk("Invalid KEKL number: %d\n", in->count); | ||
158 | BUG(); | ||
159 | } | ||
160 | } | ||
161 | |||
162 | static int check_ext_kekl(struct tape390_kekl *kekl) | ||
163 | { | ||
164 | if (kekl->type == TAPE390_KEKL_TYPE_NONE) | ||
165 | goto invalid; | ||
166 | if (kekl->type > TAPE390_KEKL_TYPE_HASH) | ||
167 | goto invalid; | ||
168 | if (kekl->type_on_tape == TAPE390_KEKL_TYPE_NONE) | ||
169 | goto invalid; | ||
170 | if (kekl->type_on_tape > TAPE390_KEKL_TYPE_HASH) | ||
171 | goto invalid; | ||
172 | if ((kekl->type == TAPE390_KEKL_TYPE_HASH) && | ||
173 | (kekl->type_on_tape == TAPE390_KEKL_TYPE_LABEL)) | ||
174 | goto invalid; | ||
175 | |||
176 | return 0; | ||
177 | invalid: | ||
178 | return -EINVAL; | ||
179 | } | ||
180 | |||
181 | static int check_ext_kekl_pair(struct tape390_kekl_pair *kekls) | ||
182 | { | ||
183 | if (check_ext_kekl(&kekls->kekl[0])) | ||
184 | goto invalid; | ||
185 | if (check_ext_kekl(&kekls->kekl[1])) | ||
186 | goto invalid; | ||
187 | |||
188 | return 0; | ||
189 | invalid: | ||
190 | return -EINVAL; | ||
191 | } | ||
192 | |||
193 | /* | ||
194 | * Query KEKLs | ||
195 | */ | ||
196 | static int tape_3592_kekl_query(struct tape_device *device, | ||
197 | struct tape390_kekl_pair *ext_kekls) | ||
198 | { | ||
199 | struct tape_request *request; | ||
200 | struct tape3592_kekl_query_order *order; | ||
201 | struct tape3592_kekl_query_data *int_kekls; | ||
202 | int rc; | ||
203 | |||
204 | DBF_EVENT(6, "tape3592_kekl_query\n"); | ||
205 | int_kekls = kmalloc(sizeof(*int_kekls), GFP_KERNEL|GFP_DMA); | ||
206 | if (!int_kekls) | ||
207 | return -ENOMEM; | ||
208 | request = tape_alloc_request(2, sizeof(*order)); | ||
209 | if (IS_ERR(request)) { | ||
210 | rc = PTR_ERR(request); | ||
211 | goto fail_malloc; | ||
212 | } | ||
213 | order = request->cpdata; | ||
214 | memset(order,0,sizeof(*order)); | ||
215 | order->code = 0xe2; | ||
216 | order->max_count = 2; | ||
217 | request->op = TO_KEKL_QUERY; | ||
218 | tape_ccw_cc(request->cpaddr, PERF_SUBSYS_FUNC, sizeof(*order), order); | ||
219 | tape_ccw_end(request->cpaddr + 1, READ_SS_DATA, sizeof(*int_kekls), | ||
220 | int_kekls); | ||
221 | rc = tape_do_io(device, request); | ||
222 | if (rc) | ||
223 | goto fail_request; | ||
224 | int_to_ext_kekl_pair(&int_kekls->kekls, ext_kekls); | ||
225 | |||
226 | rc = 0; | ||
227 | fail_request: | ||
228 | tape_free_request(request); | ||
229 | fail_malloc: | ||
230 | kfree(int_kekls); | ||
231 | return rc; | ||
232 | } | ||
233 | |||
234 | /* | ||
235 | * IOCTL: Query KEKLs | ||
236 | */ | ||
237 | static int tape_3592_ioctl_kekl_query(struct tape_device *device, | ||
238 | unsigned long arg) | ||
239 | { | ||
240 | int rc; | ||
241 | struct tape390_kekl_pair *ext_kekls; | ||
242 | |||
243 | DBF_EVENT(6, "tape_3592_ioctl_kekl_query\n"); | ||
244 | if (!crypt_supported(device)) | ||
245 | return -ENOSYS; | ||
246 | if (!crypt_enabled(device)) | ||
247 | return -EUNATCH; | ||
248 | ext_kekls = kmalloc(sizeof(*ext_kekls), GFP_KERNEL); | ||
249 | if (!ext_kekls) | ||
250 | return -ENOMEM; | ||
251 | rc = tape_3592_kekl_query(device, ext_kekls); | ||
252 | if (rc != 0) | ||
253 | goto fail; | ||
254 | if (copy_to_user((char __user *) arg, ext_kekls, sizeof(*ext_kekls))) { | ||
255 | rc = -EFAULT; | ||
256 | goto fail; | ||
257 | } | ||
258 | rc = 0; | ||
259 | fail: | ||
260 | kfree(ext_kekls); | ||
261 | return rc; | ||
262 | } | ||
263 | |||
264 | static int tape_3590_mttell(struct tape_device *device, int mt_count); | ||
265 | |||
266 | /* | ||
267 | * Set KEKLs | ||
268 | */ | ||
269 | static int tape_3592_kekl_set(struct tape_device *device, | ||
270 | struct tape390_kekl_pair *ext_kekls) | ||
271 | { | ||
272 | struct tape_request *request; | ||
273 | struct tape3592_kekl_set_order *order; | ||
274 | |||
275 | DBF_EVENT(6, "tape3592_kekl_set\n"); | ||
276 | if (check_ext_kekl_pair(ext_kekls)) { | ||
277 | DBF_EVENT(6, "invalid kekls\n"); | ||
278 | return -EINVAL; | ||
279 | } | ||
280 | if (tape_3590_mttell(device, 0) != 0) | ||
281 | return -EBADSLT; | ||
282 | request = tape_alloc_request(1, sizeof(*order)); | ||
283 | if (IS_ERR(request)) | ||
284 | return PTR_ERR(request); | ||
285 | order = request->cpdata; | ||
286 | memset(order, 0, sizeof(*order)); | ||
287 | order->code = 0xe3; | ||
288 | order->kekls.count = 2; | ||
289 | ext_to_int_kekl(&ext_kekls->kekl[0], &order->kekls.kekl[0]); | ||
290 | ext_to_int_kekl(&ext_kekls->kekl[1], &order->kekls.kekl[1]); | ||
291 | request->op = TO_KEKL_SET; | ||
292 | tape_ccw_end(request->cpaddr, PERF_SUBSYS_FUNC, sizeof(*order), order); | ||
293 | |||
294 | return tape_do_io_free(device, request); | ||
295 | } | ||
296 | |||
297 | /* | ||
298 | * IOCTL: Set KEKLs | ||
299 | */ | ||
300 | static int tape_3592_ioctl_kekl_set(struct tape_device *device, | ||
301 | unsigned long arg) | ||
302 | { | ||
303 | int rc; | ||
304 | struct tape390_kekl_pair *ext_kekls; | ||
305 | |||
306 | DBF_EVENT(6, "tape_3592_ioctl_kekl_set\n"); | ||
307 | if (!crypt_supported(device)) | ||
308 | return -ENOSYS; | ||
309 | if (!crypt_enabled(device)) | ||
310 | return -EUNATCH; | ||
311 | ext_kekls = kmalloc(sizeof(*ext_kekls), GFP_KERNEL); | ||
312 | if (!ext_kekls) | ||
313 | return -ENOMEM; | ||
314 | if (copy_from_user(ext_kekls, (char __user *)arg, sizeof(*ext_kekls))) { | ||
315 | rc = -EFAULT; | ||
316 | goto out; | ||
317 | } | ||
318 | rc = tape_3592_kekl_set(device, ext_kekls); | ||
319 | out: | ||
320 | kfree(ext_kekls); | ||
321 | return rc; | ||
322 | } | ||
323 | |||
324 | /* | ||
325 | * Enable encryption | ||
326 | */ | ||
327 | static int tape_3592_enable_crypt(struct tape_device *device) | ||
328 | { | ||
329 | struct tape_request *request; | ||
330 | char *data; | ||
331 | |||
332 | DBF_EVENT(6, "tape_3592_enable_crypt\n"); | ||
333 | if (!crypt_supported(device)) | ||
334 | return -ENOSYS; | ||
335 | request = tape_alloc_request(2, 72); | ||
336 | if (IS_ERR(request)) | ||
337 | return PTR_ERR(request); | ||
338 | data = request->cpdata; | ||
339 | memset(data,0,72); | ||
340 | |||
341 | data[0] = 0x05; | ||
342 | data[36 + 0] = 0x03; | ||
343 | data[36 + 1] = 0x03; | ||
344 | data[36 + 4] = 0x40; | ||
345 | data[36 + 6] = 0x01; | ||
346 | data[36 + 14] = 0x2f; | ||
347 | data[36 + 18] = 0xc3; | ||
348 | data[36 + 35] = 0x72; | ||
349 | request->op = TO_CRYPT_ON; | ||
350 | tape_ccw_cc(request->cpaddr, MODE_SET_CB, 36, data); | ||
351 | tape_ccw_end(request->cpaddr + 1, MODE_SET_CB, 36, data + 36); | ||
352 | return tape_do_io_free(device, request); | ||
353 | } | ||
354 | |||
355 | /* | ||
356 | * Disable encryption | ||
357 | */ | ||
358 | static int tape_3592_disable_crypt(struct tape_device *device) | ||
359 | { | ||
360 | struct tape_request *request; | ||
361 | char *data; | ||
362 | |||
363 | DBF_EVENT(6, "tape_3592_disable_crypt\n"); | ||
364 | if (!crypt_supported(device)) | ||
365 | return -ENOSYS; | ||
366 | request = tape_alloc_request(2, 72); | ||
367 | if (IS_ERR(request)) | ||
368 | return PTR_ERR(request); | ||
369 | data = request->cpdata; | ||
370 | memset(data,0,72); | ||
371 | |||
372 | data[0] = 0x05; | ||
373 | data[36 + 0] = 0x03; | ||
374 | data[36 + 1] = 0x03; | ||
375 | data[36 + 35] = 0x32; | ||
376 | |||
377 | request->op = TO_CRYPT_OFF; | ||
378 | tape_ccw_cc(request->cpaddr, MODE_SET_CB, 36, data); | ||
379 | tape_ccw_end(request->cpaddr + 1, MODE_SET_CB, 36, data + 36); | ||
380 | |||
381 | return tape_do_io_free(device, request); | ||
382 | } | ||
383 | |||
384 | /* | ||
385 | * IOCTL: Set encryption status | ||
386 | */ | ||
387 | static int tape_3592_ioctl_crypt_set(struct tape_device *device, | ||
388 | unsigned long arg) | ||
389 | { | ||
390 | struct tape390_crypt_info info; | ||
391 | |||
392 | DBF_EVENT(6, "tape_3592_ioctl_crypt_set\n"); | ||
393 | if (!crypt_supported(device)) | ||
394 | return -ENOSYS; | ||
395 | if (copy_from_user(&info, (char __user *)arg, sizeof(info))) | ||
396 | return -EFAULT; | ||
397 | if (info.status & ~TAPE390_CRYPT_ON_MASK) | ||
398 | return -EINVAL; | ||
399 | if (info.status & TAPE390_CRYPT_ON_MASK) | ||
400 | return tape_3592_enable_crypt(device); | ||
401 | else | ||
402 | return tape_3592_disable_crypt(device); | ||
403 | } | ||
404 | |||
405 | static int tape_3590_sense_medium(struct tape_device *device); | ||
406 | |||
407 | /* | ||
408 | * IOCTL: Query enryption status | ||
409 | */ | ||
410 | static int tape_3592_ioctl_crypt_query(struct tape_device *device, | ||
411 | unsigned long arg) | ||
412 | { | ||
413 | DBF_EVENT(6, "tape_3592_ioctl_crypt_query\n"); | ||
414 | if (!crypt_supported(device)) | ||
415 | return -ENOSYS; | ||
416 | tape_3590_sense_medium(device); | ||
417 | if (copy_to_user((char __user *) arg, &TAPE_3590_CRYPT_INFO(device), | ||
418 | sizeof(TAPE_3590_CRYPT_INFO(device)))) | ||
419 | return -EFAULT; | ||
420 | else | ||
421 | return 0; | ||
422 | } | ||
423 | |||
97 | /* | 424 | /* |
98 | * 3590 IOCTL Overload | 425 | * 3590 IOCTL Overload |
99 | */ | 426 | */ |
@@ -109,6 +436,14 @@ tape_3590_ioctl(struct tape_device *device, unsigned int cmd, unsigned long arg) | |||
109 | 436 | ||
110 | return tape_std_display(device, &disp); | 437 | return tape_std_display(device, &disp); |
111 | } | 438 | } |
439 | case TAPE390_KEKL_SET: | ||
440 | return tape_3592_ioctl_kekl_set(device, arg); | ||
441 | case TAPE390_KEKL_QUERY: | ||
442 | return tape_3592_ioctl_kekl_query(device, arg); | ||
443 | case TAPE390_CRYPT_SET: | ||
444 | return tape_3592_ioctl_crypt_set(device, arg); | ||
445 | case TAPE390_CRYPT_QUERY: | ||
446 | return tape_3592_ioctl_crypt_query(device, arg); | ||
112 | default: | 447 | default: |
113 | return -EINVAL; /* no additional ioctls */ | 448 | return -EINVAL; /* no additional ioctls */ |
114 | } | 449 | } |
@@ -248,6 +583,12 @@ tape_3590_work_handler(struct work_struct *work) | |||
248 | case TO_READ_ATTMSG: | 583 | case TO_READ_ATTMSG: |
249 | tape_3590_read_attmsg(p->device); | 584 | tape_3590_read_attmsg(p->device); |
250 | break; | 585 | break; |
586 | case TO_CRYPT_ON: | ||
587 | tape_3592_enable_crypt(p->device); | ||
588 | break; | ||
589 | case TO_CRYPT_OFF: | ||
590 | tape_3592_disable_crypt(p->device); | ||
591 | break; | ||
251 | default: | 592 | default: |
252 | DBF_EVENT(3, "T3590: work handler undefined for " | 593 | DBF_EVENT(3, "T3590: work handler undefined for " |
253 | "operation 0x%02x\n", p->op); | 594 | "operation 0x%02x\n", p->op); |
@@ -365,6 +706,33 @@ tape_3590_check_locate(struct tape_device *device, struct tape_request *request) | |||
365 | } | 706 | } |
366 | #endif | 707 | #endif |
367 | 708 | ||
709 | static void tape_3590_med_state_set(struct tape_device *device, | ||
710 | struct tape_3590_med_sense *sense) | ||
711 | { | ||
712 | struct tape390_crypt_info *c_info; | ||
713 | |||
714 | c_info = &TAPE_3590_CRYPT_INFO(device); | ||
715 | |||
716 | if (sense->masst == MSENSE_UNASSOCIATED) { | ||
717 | tape_med_state_set(device, MS_UNLOADED); | ||
718 | TAPE_3590_CRYPT_INFO(device).medium_status = 0; | ||
719 | return; | ||
720 | } | ||
721 | if (sense->masst != MSENSE_ASSOCIATED_MOUNT) { | ||
722 | PRINT_ERR("Unknown medium state: %x\n", sense->masst); | ||
723 | return; | ||
724 | } | ||
725 | tape_med_state_set(device, MS_LOADED); | ||
726 | c_info->medium_status |= TAPE390_MEDIUM_LOADED_MASK; | ||
727 | if (sense->flags & MSENSE_CRYPT_MASK) { | ||
728 | PRINT_INFO("Medium is encrypted (%04x)\n", sense->flags); | ||
729 | c_info->medium_status |= TAPE390_MEDIUM_ENCRYPTED_MASK; | ||
730 | } else { | ||
731 | DBF_EVENT(6, "Medium is not encrypted %04x\n", sense->flags); | ||
732 | c_info->medium_status &= ~TAPE390_MEDIUM_ENCRYPTED_MASK; | ||
733 | } | ||
734 | } | ||
735 | |||
368 | /* | 736 | /* |
369 | * The done handler is called at device/channel end and wakes up the sleeping | 737 | * The done handler is called at device/channel end and wakes up the sleeping |
370 | * process | 738 | * process |
@@ -372,9 +740,10 @@ tape_3590_check_locate(struct tape_device *device, struct tape_request *request) | |||
372 | static int | 740 | static int |
373 | tape_3590_done(struct tape_device *device, struct tape_request *request) | 741 | tape_3590_done(struct tape_device *device, struct tape_request *request) |
374 | { | 742 | { |
375 | struct tape_3590_med_sense *sense; | 743 | struct tape_3590_disc_data *disc_data; |
376 | 744 | ||
377 | DBF_EVENT(6, "%s done\n", tape_op_verbose[request->op]); | 745 | DBF_EVENT(6, "%s done\n", tape_op_verbose[request->op]); |
746 | disc_data = device->discdata; | ||
378 | 747 | ||
379 | switch (request->op) { | 748 | switch (request->op) { |
380 | case TO_BSB: | 749 | case TO_BSB: |
@@ -394,13 +763,20 @@ tape_3590_done(struct tape_device *device, struct tape_request *request) | |||
394 | break; | 763 | break; |
395 | case TO_RUN: | 764 | case TO_RUN: |
396 | tape_med_state_set(device, MS_UNLOADED); | 765 | tape_med_state_set(device, MS_UNLOADED); |
766 | tape_3590_schedule_work(device, TO_CRYPT_OFF); | ||
397 | break; | 767 | break; |
398 | case TO_MSEN: | 768 | case TO_MSEN: |
399 | sense = (struct tape_3590_med_sense *) request->cpdata; | 769 | tape_3590_med_state_set(device, request->cpdata); |
400 | if (sense->masst == MSENSE_UNASSOCIATED) | 770 | break; |
401 | tape_med_state_set(device, MS_UNLOADED); | 771 | case TO_CRYPT_ON: |
402 | if (sense->masst == MSENSE_ASSOCIATED_MOUNT) | 772 | TAPE_3590_CRYPT_INFO(device).status |
403 | tape_med_state_set(device, MS_LOADED); | 773 | |= TAPE390_CRYPT_ON_MASK; |
774 | *(device->modeset_byte) |= 0x03; | ||
775 | break; | ||
776 | case TO_CRYPT_OFF: | ||
777 | TAPE_3590_CRYPT_INFO(device).status | ||
778 | &= ~TAPE390_CRYPT_ON_MASK; | ||
779 | *(device->modeset_byte) &= ~0x03; | ||
404 | break; | 780 | break; |
405 | case TO_RBI: /* RBI seems to succeed even without medium loaded. */ | 781 | case TO_RBI: /* RBI seems to succeed even without medium loaded. */ |
406 | case TO_NOP: /* Same to NOP. */ | 782 | case TO_NOP: /* Same to NOP. */ |
@@ -409,8 +785,9 @@ tape_3590_done(struct tape_device *device, struct tape_request *request) | |||
409 | case TO_DIS: | 785 | case TO_DIS: |
410 | case TO_ASSIGN: | 786 | case TO_ASSIGN: |
411 | case TO_UNASSIGN: | 787 | case TO_UNASSIGN: |
412 | break; | ||
413 | case TO_SIZE: | 788 | case TO_SIZE: |
789 | case TO_KEKL_SET: | ||
790 | case TO_KEKL_QUERY: | ||
414 | break; | 791 | break; |
415 | } | 792 | } |
416 | return TAPE_IO_SUCCESS; | 793 | return TAPE_IO_SUCCESS; |
@@ -540,10 +917,8 @@ static int | |||
540 | tape_3590_erp_long_busy(struct tape_device *device, | 917 | tape_3590_erp_long_busy(struct tape_device *device, |
541 | struct tape_request *request, struct irb *irb) | 918 | struct tape_request *request, struct irb *irb) |
542 | { | 919 | { |
543 | /* FIXME: how about WAITING for a minute ? */ | 920 | DBF_EVENT(6, "Device is busy\n"); |
544 | PRINT_WARN("(%s): Device is busy! Please wait a minute!\n", | 921 | return TAPE_IO_LONG_BUSY; |
545 | device->cdev->dev.bus_id); | ||
546 | return tape_3590_erp_basic(device, request, irb, -EBUSY); | ||
547 | } | 922 | } |
548 | 923 | ||
549 | /* | 924 | /* |
@@ -951,6 +1326,34 @@ tape_3590_print_era_msg(struct tape_device *device, struct irb *irb) | |||
951 | device->cdev->dev.bus_id, sense->mc); | 1326 | device->cdev->dev.bus_id, sense->mc); |
952 | } | 1327 | } |
953 | 1328 | ||
1329 | static int tape_3590_crypt_error(struct tape_device *device, | ||
1330 | struct tape_request *request, struct irb *irb) | ||
1331 | { | ||
1332 | u8 cu_rc, ekm_rc1; | ||
1333 | u16 ekm_rc2; | ||
1334 | u32 drv_rc; | ||
1335 | char *bus_id, *sense; | ||
1336 | |||
1337 | sense = ((struct tape_3590_sense *) irb->ecw)->fmt.data; | ||
1338 | bus_id = device->cdev->dev.bus_id; | ||
1339 | cu_rc = sense[0]; | ||
1340 | drv_rc = *((u32*) &sense[5]) & 0xffffff; | ||
1341 | ekm_rc1 = sense[9]; | ||
1342 | ekm_rc2 = *((u16*) &sense[10]); | ||
1343 | if ((cu_rc == 0) && (ekm_rc2 == 0xee31)) | ||
1344 | /* key not defined on EKM */ | ||
1345 | return tape_3590_erp_basic(device, request, irb, -EKEYREJECTED); | ||
1346 | if ((cu_rc == 1) || (cu_rc == 2)) | ||
1347 | /* No connection to EKM */ | ||
1348 | return tape_3590_erp_basic(device, request, irb, -ENOTCONN); | ||
1349 | |||
1350 | PRINT_ERR("(%s): Unable to get encryption key from EKM\n", bus_id); | ||
1351 | PRINT_ERR("(%s): CU=%02X DRIVE=%06X EKM=%02X:%04X\n", bus_id, cu_rc, | ||
1352 | drv_rc, ekm_rc1, ekm_rc2); | ||
1353 | |||
1354 | return tape_3590_erp_basic(device, request, irb, -ENOKEY); | ||
1355 | } | ||
1356 | |||
954 | /* | 1357 | /* |
955 | * 3590 error Recovery routine: | 1358 | * 3590 error Recovery routine: |
956 | * If possible, it tries to recover from the error. If this is not possible, | 1359 | * If possible, it tries to recover from the error. If this is not possible, |
@@ -979,6 +1382,8 @@ tape_3590_unit_check(struct tape_device *device, struct tape_request *request, | |||
979 | 1382 | ||
980 | sense = (struct tape_3590_sense *) irb->ecw; | 1383 | sense = (struct tape_3590_sense *) irb->ecw; |
981 | 1384 | ||
1385 | DBF_EVENT(6, "Unit Check: RQC = %x\n", sense->rc_rqc); | ||
1386 | |||
982 | /* | 1387 | /* |
983 | * First check all RC-QRCs where we want to do something special | 1388 | * First check all RC-QRCs where we want to do something special |
984 | * - "break": basic error recovery is done | 1389 | * - "break": basic error recovery is done |
@@ -999,6 +1404,8 @@ tape_3590_unit_check(struct tape_device *device, struct tape_request *request, | |||
999 | case 0x2231: | 1404 | case 0x2231: |
1000 | tape_3590_print_era_msg(device, irb); | 1405 | tape_3590_print_era_msg(device, irb); |
1001 | return tape_3590_erp_special_interrupt(device, request, irb); | 1406 | return tape_3590_erp_special_interrupt(device, request, irb); |
1407 | case 0x2240: | ||
1408 | return tape_3590_crypt_error(device, request, irb); | ||
1002 | 1409 | ||
1003 | case 0x3010: | 1410 | case 0x3010: |
1004 | DBF_EVENT(2, "(%08x): Backward at Beginning of Partition\n", | 1411 | DBF_EVENT(2, "(%08x): Backward at Beginning of Partition\n", |
@@ -1020,6 +1427,7 @@ tape_3590_unit_check(struct tape_device *device, struct tape_request *request, | |||
1020 | DBF_EVENT(2, "(%08x): Rewind Unload complete\n", | 1427 | DBF_EVENT(2, "(%08x): Rewind Unload complete\n", |
1021 | device->cdev_id); | 1428 | device->cdev_id); |
1022 | tape_med_state_set(device, MS_UNLOADED); | 1429 | tape_med_state_set(device, MS_UNLOADED); |
1430 | tape_3590_schedule_work(device, TO_CRYPT_OFF); | ||
1023 | return tape_3590_erp_basic(device, request, irb, 0); | 1431 | return tape_3590_erp_basic(device, request, irb, 0); |
1024 | 1432 | ||
1025 | case 0x4010: | 1433 | case 0x4010: |
@@ -1030,9 +1438,15 @@ tape_3590_unit_check(struct tape_device *device, struct tape_request *request, | |||
1030 | PRINT_WARN("(%s): Tape operation when medium not loaded\n", | 1438 | PRINT_WARN("(%s): Tape operation when medium not loaded\n", |
1031 | device->cdev->dev.bus_id); | 1439 | device->cdev->dev.bus_id); |
1032 | tape_med_state_set(device, MS_UNLOADED); | 1440 | tape_med_state_set(device, MS_UNLOADED); |
1441 | tape_3590_schedule_work(device, TO_CRYPT_OFF); | ||
1033 | return tape_3590_erp_basic(device, request, irb, -ENOMEDIUM); | 1442 | return tape_3590_erp_basic(device, request, irb, -ENOMEDIUM); |
1034 | case 0x4012: /* Device Long Busy */ | 1443 | case 0x4012: /* Device Long Busy */ |
1444 | /* XXX: Also use long busy handling here? */ | ||
1445 | DBF_EVENT(6, "(%08x): LONG BUSY\n", device->cdev_id); | ||
1035 | tape_3590_print_era_msg(device, irb); | 1446 | tape_3590_print_era_msg(device, irb); |
1447 | return tape_3590_erp_basic(device, request, irb, -EBUSY); | ||
1448 | case 0x4014: | ||
1449 | DBF_EVENT(6, "(%08x): Crypto LONG BUSY\n", device->cdev_id); | ||
1036 | return tape_3590_erp_long_busy(device, request, irb); | 1450 | return tape_3590_erp_long_busy(device, request, irb); |
1037 | 1451 | ||
1038 | case 0x5010: | 1452 | case 0x5010: |
@@ -1064,6 +1478,7 @@ tape_3590_unit_check(struct tape_device *device, struct tape_request *request, | |||
1064 | case 0x5120: | 1478 | case 0x5120: |
1065 | case 0x1120: | 1479 | case 0x1120: |
1066 | tape_med_state_set(device, MS_UNLOADED); | 1480 | tape_med_state_set(device, MS_UNLOADED); |
1481 | tape_3590_schedule_work(device, TO_CRYPT_OFF); | ||
1067 | return tape_3590_erp_basic(device, request, irb, -ENOMEDIUM); | 1482 | return tape_3590_erp_basic(device, request, irb, -ENOMEDIUM); |
1068 | 1483 | ||
1069 | case 0x6020: | 1484 | case 0x6020: |
@@ -1142,21 +1557,47 @@ tape_3590_setup_device(struct tape_device *device) | |||
1142 | { | 1557 | { |
1143 | int rc; | 1558 | int rc; |
1144 | struct tape_3590_disc_data *data; | 1559 | struct tape_3590_disc_data *data; |
1560 | char *rdc_data; | ||
1145 | 1561 | ||
1146 | DBF_EVENT(6, "3590 device setup\n"); | 1562 | DBF_EVENT(6, "3590 device setup\n"); |
1147 | data = kmalloc(sizeof(struct tape_3590_disc_data), | 1563 | data = kzalloc(sizeof(struct tape_3590_disc_data), GFP_KERNEL | GFP_DMA); |
1148 | GFP_KERNEL | GFP_DMA); | ||
1149 | if (data == NULL) | 1564 | if (data == NULL) |
1150 | return -ENOMEM; | 1565 | return -ENOMEM; |
1151 | data->read_back_op = READ_PREVIOUS; | 1566 | data->read_back_op = READ_PREVIOUS; |
1152 | device->discdata = data; | 1567 | device->discdata = data; |
1153 | 1568 | ||
1154 | if ((rc = tape_std_assign(device)) == 0) { | 1569 | rdc_data = kmalloc(64, GFP_KERNEL | GFP_DMA); |
1155 | /* Try to find out if medium is loaded */ | 1570 | if (!rdc_data) { |
1156 | if ((rc = tape_3590_sense_medium(device)) != 0) | 1571 | rc = -ENOMEM; |
1157 | DBF_LH(3, "3590 medium sense returned %d\n", rc); | 1572 | goto fail_kmalloc; |
1573 | } | ||
1574 | rc = read_dev_chars(device->cdev, (void**)&rdc_data, 64); | ||
1575 | if (rc) { | ||
1576 | DBF_LH(3, "Read device characteristics failed!\n"); | ||
1577 | goto fail_kmalloc; | ||
1578 | } | ||
1579 | rc = tape_std_assign(device); | ||
1580 | if (rc) | ||
1581 | goto fail_rdc_data; | ||
1582 | if (rdc_data[31] == 0x13) { | ||
1583 | PRINT_INFO("Device has crypto support\n"); | ||
1584 | data->crypt_info.capability |= TAPE390_CRYPT_SUPPORTED_MASK; | ||
1585 | tape_3592_disable_crypt(device); | ||
1586 | } else { | ||
1587 | DBF_EVENT(6, "Device has NO crypto support\n"); | ||
1158 | } | 1588 | } |
1589 | /* Try to find out if medium is loaded */ | ||
1590 | rc = tape_3590_sense_medium(device); | ||
1591 | if (rc) { | ||
1592 | DBF_LH(3, "3590 medium sense returned %d\n", rc); | ||
1593 | goto fail_rdc_data; | ||
1594 | } | ||
1595 | return 0; | ||
1159 | 1596 | ||
1597 | fail_rdc_data: | ||
1598 | kfree(rdc_data); | ||
1599 | fail_kmalloc: | ||
1600 | kfree(data); | ||
1160 | return rc; | 1601 | return rc; |
1161 | } | 1602 | } |
1162 | 1603 | ||
diff --git a/drivers/s390/char/tape_3590.h b/drivers/s390/char/tape_3590.h index cf274b9445a6..aa5138807af1 100644 --- a/drivers/s390/char/tape_3590.h +++ b/drivers/s390/char/tape_3590.h | |||
@@ -2,7 +2,7 @@ | |||
2 | * drivers/s390/char/tape_3590.h | 2 | * drivers/s390/char/tape_3590.h |
3 | * tape device discipline for 3590 tapes. | 3 | * tape device discipline for 3590 tapes. |
4 | * | 4 | * |
5 | * Copyright (C) IBM Corp. 2001,2006 | 5 | * Copyright IBM Corp. 2001,2006 |
6 | * Author(s): Stefan Bader <shbader@de.ibm.com> | 6 | * Author(s): Stefan Bader <shbader@de.ibm.com> |
7 | * Michael Holzheu <holzheu@de.ibm.com> | 7 | * Michael Holzheu <holzheu@de.ibm.com> |
8 | * Martin Schwidefsky <schwidefsky@de.ibm.com> | 8 | * Martin Schwidefsky <schwidefsky@de.ibm.com> |
@@ -38,16 +38,22 @@ | |||
38 | #define MSENSE_UNASSOCIATED 0x00 | 38 | #define MSENSE_UNASSOCIATED 0x00 |
39 | #define MSENSE_ASSOCIATED_MOUNT 0x01 | 39 | #define MSENSE_ASSOCIATED_MOUNT 0x01 |
40 | #define MSENSE_ASSOCIATED_UMOUNT 0x02 | 40 | #define MSENSE_ASSOCIATED_UMOUNT 0x02 |
41 | #define MSENSE_CRYPT_MASK 0x00000010 | ||
41 | 42 | ||
42 | #define TAPE_3590_MAX_MSG 0xb0 | 43 | #define TAPE_3590_MAX_MSG 0xb0 |
43 | 44 | ||
44 | /* Datatypes */ | 45 | /* Datatypes */ |
45 | 46 | ||
46 | struct tape_3590_disc_data { | 47 | struct tape_3590_disc_data { |
47 | unsigned char modeset_byte; | 48 | struct tape390_crypt_info crypt_info; |
48 | int read_back_op; | 49 | int read_back_op; |
49 | }; | 50 | }; |
50 | 51 | ||
52 | #define TAPE_3590_CRYPT_INFO(device) \ | ||
53 | ((struct tape_3590_disc_data*)(device->discdata))->crypt_info | ||
54 | #define TAPE_3590_READ_BACK_OP(device) \ | ||
55 | ((struct tape_3590_disc_data*)(device->discdata))->read_back_op | ||
56 | |||
51 | struct tape_3590_sense { | 57 | struct tape_3590_sense { |
52 | 58 | ||
53 | unsigned int command_rej:1; | 59 | unsigned int command_rej:1; |
@@ -118,7 +124,48 @@ struct tape_3590_sense { | |||
118 | struct tape_3590_med_sense { | 124 | struct tape_3590_med_sense { |
119 | unsigned int macst:4; | 125 | unsigned int macst:4; |
120 | unsigned int masst:4; | 126 | unsigned int masst:4; |
121 | char pad[127]; | 127 | char pad1[7]; |
128 | unsigned int flags; | ||
129 | char pad2[116]; | ||
130 | } __attribute__ ((packed)); | ||
131 | |||
132 | /* Datastructures for 3592 encryption support */ | ||
133 | |||
134 | struct tape3592_kekl { | ||
135 | __u8 flags; | ||
136 | char label[64]; | ||
137 | } __attribute__ ((packed)); | ||
138 | |||
139 | struct tape3592_kekl_pair { | ||
140 | __u8 count; | ||
141 | struct tape3592_kekl kekl[2]; | ||
142 | } __attribute__ ((packed)); | ||
143 | |||
144 | struct tape3592_kekl_query_data { | ||
145 | __u16 len; | ||
146 | __u8 fmt; | ||
147 | __u8 mc; | ||
148 | __u32 id; | ||
149 | __u8 flags; | ||
150 | struct tape3592_kekl_pair kekls; | ||
151 | char reserved[116]; | ||
152 | } __attribute__ ((packed)); | ||
153 | |||
154 | struct tape3592_kekl_query_order { | ||
155 | __u8 code; | ||
156 | __u8 flags; | ||
157 | char reserved1[2]; | ||
158 | __u8 max_count; | ||
159 | char reserved2[35]; | ||
160 | } __attribute__ ((packed)); | ||
161 | |||
162 | struct tape3592_kekl_set_order { | ||
163 | __u8 code; | ||
164 | __u8 flags; | ||
165 | char reserved1[2]; | ||
166 | __u8 op; | ||
167 | struct tape3592_kekl_pair kekls; | ||
168 | char reserved2[120]; | ||
122 | } __attribute__ ((packed)); | 169 | } __attribute__ ((packed)); |
123 | 170 | ||
124 | #endif /* _TAPE_3590_H */ | 171 | #endif /* _TAPE_3590_H */ |
diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c index c8a89b3b87d4..dd0ecaed592e 100644 --- a/drivers/s390/char/tape_block.c +++ b/drivers/s390/char/tape_block.c | |||
@@ -73,7 +73,7 @@ tapeblock_trigger_requeue(struct tape_device *device) | |||
73 | /* | 73 | /* |
74 | * Post finished request. | 74 | * Post finished request. |
75 | */ | 75 | */ |
76 | static inline void | 76 | static void |
77 | tapeblock_end_request(struct request *req, int uptodate) | 77 | tapeblock_end_request(struct request *req, int uptodate) |
78 | { | 78 | { |
79 | if (end_that_request_first(req, uptodate, req->hard_nr_sectors)) | 79 | if (end_that_request_first(req, uptodate, req->hard_nr_sectors)) |
@@ -108,7 +108,7 @@ __tapeblock_end_request(struct tape_request *ccw_req, void *data) | |||
108 | /* | 108 | /* |
109 | * Feed the tape device CCW queue with requests supplied in a list. | 109 | * Feed the tape device CCW queue with requests supplied in a list. |
110 | */ | 110 | */ |
111 | static inline int | 111 | static int |
112 | tapeblock_start_request(struct tape_device *device, struct request *req) | 112 | tapeblock_start_request(struct tape_device *device, struct request *req) |
113 | { | 113 | { |
114 | struct tape_request * ccw_req; | 114 | struct tape_request * ccw_req; |
diff --git a/drivers/s390/char/tape_char.c b/drivers/s390/char/tape_char.c index 31198c8f2718..9faea04e11e9 100644 --- a/drivers/s390/char/tape_char.c +++ b/drivers/s390/char/tape_char.c | |||
@@ -3,7 +3,7 @@ | |||
3 | * character device frontend for tape device driver | 3 | * character device frontend for tape device driver |
4 | * | 4 | * |
5 | * S390 and zSeries version | 5 | * S390 and zSeries version |
6 | * Copyright (C) 2001,2002 IBM Deutschland Entwicklung GmbH, IBM Corporation | 6 | * Copyright IBM Corp. 2001,2006 |
7 | * Author(s): Carsten Otte <cotte@de.ibm.com> | 7 | * Author(s): Carsten Otte <cotte@de.ibm.com> |
8 | * Michael Holzheu <holzheu@de.ibm.com> | 8 | * Michael Holzheu <holzheu@de.ibm.com> |
9 | * Tuan Ngo-Anh <ngoanh@de.ibm.com> | 9 | * Tuan Ngo-Anh <ngoanh@de.ibm.com> |
@@ -89,22 +89,7 @@ tapechar_cleanup_device(struct tape_device *device) | |||
89 | device->nt = NULL; | 89 | device->nt = NULL; |
90 | } | 90 | } |
91 | 91 | ||
92 | /* | 92 | static int |
93 | * Terminate write command (we write two TMs and skip backward over last) | ||
94 | * This ensures that the tape is always correctly terminated. | ||
95 | * When the user writes afterwards a new file, he will overwrite the | ||
96 | * second TM and therefore one TM will remain to separate the | ||
97 | * two files on the tape... | ||
98 | */ | ||
99 | static inline void | ||
100 | tapechar_terminate_write(struct tape_device *device) | ||
101 | { | ||
102 | if (tape_mtop(device, MTWEOF, 1) == 0 && | ||
103 | tape_mtop(device, MTWEOF, 1) == 0) | ||
104 | tape_mtop(device, MTBSR, 1); | ||
105 | } | ||
106 | |||
107 | static inline int | ||
108 | tapechar_check_idalbuffer(struct tape_device *device, size_t block_size) | 93 | tapechar_check_idalbuffer(struct tape_device *device, size_t block_size) |
109 | { | 94 | { |
110 | struct idal_buffer *new; | 95 | struct idal_buffer *new; |
@@ -137,7 +122,7 @@ tapechar_check_idalbuffer(struct tape_device *device, size_t block_size) | |||
137 | /* | 122 | /* |
138 | * Tape device read function | 123 | * Tape device read function |
139 | */ | 124 | */ |
140 | ssize_t | 125 | static ssize_t |
141 | tapechar_read(struct file *filp, char __user *data, size_t count, loff_t *ppos) | 126 | tapechar_read(struct file *filp, char __user *data, size_t count, loff_t *ppos) |
142 | { | 127 | { |
143 | struct tape_device *device; | 128 | struct tape_device *device; |
@@ -201,7 +186,7 @@ tapechar_read(struct file *filp, char __user *data, size_t count, loff_t *ppos) | |||
201 | /* | 186 | /* |
202 | * Tape device write function | 187 | * Tape device write function |
203 | */ | 188 | */ |
204 | ssize_t | 189 | static ssize_t |
205 | tapechar_write(struct file *filp, const char __user *data, size_t count, loff_t *ppos) | 190 | tapechar_write(struct file *filp, const char __user *data, size_t count, loff_t *ppos) |
206 | { | 191 | { |
207 | struct tape_device *device; | 192 | struct tape_device *device; |
@@ -291,7 +276,7 @@ tapechar_write(struct file *filp, const char __user *data, size_t count, loff_t | |||
291 | /* | 276 | /* |
292 | * Character frontend tape device open function. | 277 | * Character frontend tape device open function. |
293 | */ | 278 | */ |
294 | int | 279 | static int |
295 | tapechar_open (struct inode *inode, struct file *filp) | 280 | tapechar_open (struct inode *inode, struct file *filp) |
296 | { | 281 | { |
297 | struct tape_device *device; | 282 | struct tape_device *device; |
@@ -326,7 +311,7 @@ tapechar_open (struct inode *inode, struct file *filp) | |||
326 | * Character frontend tape device release function. | 311 | * Character frontend tape device release function. |
327 | */ | 312 | */ |
328 | 313 | ||
329 | int | 314 | static int |
330 | tapechar_release(struct inode *inode, struct file *filp) | 315 | tapechar_release(struct inode *inode, struct file *filp) |
331 | { | 316 | { |
332 | struct tape_device *device; | 317 | struct tape_device *device; |
diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c index c6c2e918b990..e2a8a1a04bab 100644 --- a/drivers/s390/char/tape_core.c +++ b/drivers/s390/char/tape_core.c | |||
@@ -3,7 +3,7 @@ | |||
3 | * basic function of the tape device driver | 3 | * basic function of the tape device driver |
4 | * | 4 | * |
5 | * S390 and zSeries version | 5 | * S390 and zSeries version |
6 | * Copyright (C) 2001,2005 IBM Deutschland Entwicklung GmbH, IBM Corporation | 6 | * Copyright IBM Corp. 2001,2006 |
7 | * Author(s): Carsten Otte <cotte@de.ibm.com> | 7 | * Author(s): Carsten Otte <cotte@de.ibm.com> |
8 | * Michael Holzheu <holzheu@de.ibm.com> | 8 | * Michael Holzheu <holzheu@de.ibm.com> |
9 | * Tuan Ngo-Anh <ngoanh@de.ibm.com> | 9 | * Tuan Ngo-Anh <ngoanh@de.ibm.com> |
@@ -26,9 +26,11 @@ | |||
26 | #include "tape_std.h" | 26 | #include "tape_std.h" |
27 | 27 | ||
28 | #define PRINTK_HEADER "TAPE_CORE: " | 28 | #define PRINTK_HEADER "TAPE_CORE: " |
29 | #define LONG_BUSY_TIMEOUT 180 /* seconds */ | ||
29 | 30 | ||
30 | static void __tape_do_irq (struct ccw_device *, unsigned long, struct irb *); | 31 | static void __tape_do_irq (struct ccw_device *, unsigned long, struct irb *); |
31 | static void tape_delayed_next_request(struct work_struct *); | 32 | static void tape_delayed_next_request(struct work_struct *); |
33 | static void tape_long_busy_timeout(unsigned long data); | ||
32 | 34 | ||
33 | /* | 35 | /* |
34 | * One list to contain all tape devices of all disciplines, so | 36 | * One list to contain all tape devices of all disciplines, so |
@@ -69,10 +71,12 @@ const char *tape_op_verbose[TO_SIZE] = | |||
69 | [TO_LOAD] = "LOA", [TO_READ_CONFIG] = "RCF", | 71 | [TO_LOAD] = "LOA", [TO_READ_CONFIG] = "RCF", |
70 | [TO_READ_ATTMSG] = "RAT", | 72 | [TO_READ_ATTMSG] = "RAT", |
71 | [TO_DIS] = "DIS", [TO_ASSIGN] = "ASS", | 73 | [TO_DIS] = "DIS", [TO_ASSIGN] = "ASS", |
72 | [TO_UNASSIGN] = "UAS" | 74 | [TO_UNASSIGN] = "UAS", [TO_CRYPT_ON] = "CON", |
75 | [TO_CRYPT_OFF] = "COF", [TO_KEKL_SET] = "KLS", | ||
76 | [TO_KEKL_QUERY] = "KLQ", | ||
73 | }; | 77 | }; |
74 | 78 | ||
75 | static inline int | 79 | static int |
76 | busid_to_int(char *bus_id) | 80 | busid_to_int(char *bus_id) |
77 | { | 81 | { |
78 | int dec; | 82 | int dec; |
@@ -252,7 +256,7 @@ tape_med_state_set(struct tape_device *device, enum tape_medium_state newstate) | |||
252 | /* | 256 | /* |
253 | * Stop running ccw. Has to be called with the device lock held. | 257 | * Stop running ccw. Has to be called with the device lock held. |
254 | */ | 258 | */ |
255 | static inline int | 259 | static int |
256 | __tape_cancel_io(struct tape_device *device, struct tape_request *request) | 260 | __tape_cancel_io(struct tape_device *device, struct tape_request *request) |
257 | { | 261 | { |
258 | int retries; | 262 | int retries; |
@@ -346,6 +350,9 @@ tape_generic_online(struct tape_device *device, | |||
346 | return -EINVAL; | 350 | return -EINVAL; |
347 | } | 351 | } |
348 | 352 | ||
353 | init_timer(&device->lb_timeout); | ||
354 | device->lb_timeout.function = tape_long_busy_timeout; | ||
355 | |||
349 | /* Let the discipline have a go at the device. */ | 356 | /* Let the discipline have a go at the device. */ |
350 | device->discipline = discipline; | 357 | device->discipline = discipline; |
351 | if (!try_module_get(discipline->owner)) { | 358 | if (!try_module_get(discipline->owner)) { |
@@ -385,7 +392,7 @@ out: | |||
385 | return rc; | 392 | return rc; |
386 | } | 393 | } |
387 | 394 | ||
388 | static inline void | 395 | static void |
389 | tape_cleanup_device(struct tape_device *device) | 396 | tape_cleanup_device(struct tape_device *device) |
390 | { | 397 | { |
391 | tapeblock_cleanup_device(device); | 398 | tapeblock_cleanup_device(device); |
@@ -563,7 +570,7 @@ tape_generic_probe(struct ccw_device *cdev) | |||
563 | return ret; | 570 | return ret; |
564 | } | 571 | } |
565 | 572 | ||
566 | static inline void | 573 | static void |
567 | __tape_discard_requests(struct tape_device *device) | 574 | __tape_discard_requests(struct tape_device *device) |
568 | { | 575 | { |
569 | struct tape_request * request; | 576 | struct tape_request * request; |
@@ -703,7 +710,7 @@ tape_free_request (struct tape_request * request) | |||
703 | kfree(request); | 710 | kfree(request); |
704 | } | 711 | } |
705 | 712 | ||
706 | static inline int | 713 | static int |
707 | __tape_start_io(struct tape_device *device, struct tape_request *request) | 714 | __tape_start_io(struct tape_device *device, struct tape_request *request) |
708 | { | 715 | { |
709 | int rc; | 716 | int rc; |
@@ -733,7 +740,7 @@ __tape_start_io(struct tape_device *device, struct tape_request *request) | |||
733 | return rc; | 740 | return rc; |
734 | } | 741 | } |
735 | 742 | ||
736 | static inline void | 743 | static void |
737 | __tape_start_next_request(struct tape_device *device) | 744 | __tape_start_next_request(struct tape_device *device) |
738 | { | 745 | { |
739 | struct list_head *l, *n; | 746 | struct list_head *l, *n; |
@@ -801,7 +808,23 @@ tape_delayed_next_request(struct work_struct *work) | |||
801 | spin_unlock_irq(get_ccwdev_lock(device->cdev)); | 808 | spin_unlock_irq(get_ccwdev_lock(device->cdev)); |
802 | } | 809 | } |
803 | 810 | ||
804 | static inline void | 811 | static void tape_long_busy_timeout(unsigned long data) |
812 | { | ||
813 | struct tape_request *request; | ||
814 | struct tape_device *device; | ||
815 | |||
816 | device = (struct tape_device *) data; | ||
817 | spin_lock_irq(get_ccwdev_lock(device->cdev)); | ||
818 | request = list_entry(device->req_queue.next, struct tape_request, list); | ||
819 | if (request->status != TAPE_REQUEST_LONG_BUSY) | ||
820 | BUG(); | ||
821 | DBF_LH(6, "%08x: Long busy timeout.\n", device->cdev_id); | ||
822 | __tape_start_next_request(device); | ||
823 | device->lb_timeout.data = (unsigned long) tape_put_device(device); | ||
824 | spin_unlock_irq(get_ccwdev_lock(device->cdev)); | ||
825 | } | ||
826 | |||
827 | static void | ||
805 | __tape_end_request( | 828 | __tape_end_request( |
806 | struct tape_device * device, | 829 | struct tape_device * device, |
807 | struct tape_request * request, | 830 | struct tape_request * request, |
@@ -878,7 +901,7 @@ tape_dump_sense_dbf(struct tape_device *device, struct tape_request *request, | |||
878 | * and starts it if the tape is idle. Has to be called with | 901 | * and starts it if the tape is idle. Has to be called with |
879 | * the device lock held. | 902 | * the device lock held. |
880 | */ | 903 | */ |
881 | static inline int | 904 | static int |
882 | __tape_start_request(struct tape_device *device, struct tape_request *request) | 905 | __tape_start_request(struct tape_device *device, struct tape_request *request) |
883 | { | 906 | { |
884 | int rc; | 907 | int rc; |
@@ -1094,7 +1117,22 @@ __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb) | |||
1094 | /* May be an unsolicited irq */ | 1117 | /* May be an unsolicited irq */ |
1095 | if(request != NULL) | 1118 | if(request != NULL) |
1096 | request->rescnt = irb->scsw.count; | 1119 | request->rescnt = irb->scsw.count; |
1097 | 1120 | else if ((irb->scsw.dstat == 0x85 || irb->scsw.dstat == 0x80) && | |
1121 | !list_empty(&device->req_queue)) { | ||
1122 | /* Not Ready to Ready after long busy ? */ | ||
1123 | struct tape_request *req; | ||
1124 | req = list_entry(device->req_queue.next, | ||
1125 | struct tape_request, list); | ||
1126 | if (req->status == TAPE_REQUEST_LONG_BUSY) { | ||
1127 | DBF_EVENT(3, "(%08x): del timer\n", device->cdev_id); | ||
1128 | if (del_timer(&device->lb_timeout)) { | ||
1129 | device->lb_timeout.data = (unsigned long) | ||
1130 | tape_put_device(device); | ||
1131 | __tape_start_next_request(device); | ||
1132 | } | ||
1133 | return; | ||
1134 | } | ||
1135 | } | ||
1098 | if (irb->scsw.dstat != 0x0c) { | 1136 | if (irb->scsw.dstat != 0x0c) { |
1099 | /* Set the 'ONLINE' flag depending on sense byte 1 */ | 1137 | /* Set the 'ONLINE' flag depending on sense byte 1 */ |
1100 | if(*(((__u8 *) irb->ecw) + 1) & SENSE_DRIVE_ONLINE) | 1138 | if(*(((__u8 *) irb->ecw) + 1) & SENSE_DRIVE_ONLINE) |
@@ -1142,6 +1180,15 @@ __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb) | |||
1142 | break; | 1180 | break; |
1143 | case TAPE_IO_PENDING: | 1181 | case TAPE_IO_PENDING: |
1144 | break; | 1182 | break; |
1183 | case TAPE_IO_LONG_BUSY: | ||
1184 | device->lb_timeout.data = | ||
1185 | (unsigned long)tape_get_device_reference(device); | ||
1186 | device->lb_timeout.expires = jiffies + | ||
1187 | LONG_BUSY_TIMEOUT * HZ; | ||
1188 | DBF_EVENT(3, "(%08x): add timer\n", device->cdev_id); | ||
1189 | add_timer(&device->lb_timeout); | ||
1190 | request->status = TAPE_REQUEST_LONG_BUSY; | ||
1191 | break; | ||
1145 | case TAPE_IO_RETRY: | 1192 | case TAPE_IO_RETRY: |
1146 | rc = __tape_start_io(device, request); | 1193 | rc = __tape_start_io(device, request); |
1147 | if (rc) | 1194 | if (rc) |
diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c index 09844621edc0..bc33068b9ce2 100644 --- a/drivers/s390/char/tty3270.c +++ b/drivers/s390/char/tty3270.c | |||
@@ -36,7 +36,7 @@ | |||
36 | struct tty_driver *tty3270_driver; | 36 | struct tty_driver *tty3270_driver; |
37 | static int tty3270_max_index; | 37 | static int tty3270_max_index; |
38 | 38 | ||
39 | struct raw3270_fn tty3270_fn; | 39 | static struct raw3270_fn tty3270_fn; |
40 | 40 | ||
41 | struct tty3270_cell { | 41 | struct tty3270_cell { |
42 | unsigned char character; | 42 | unsigned char character; |
@@ -119,8 +119,7 @@ static void tty3270_update(struct tty3270 *); | |||
119 | /* | 119 | /* |
120 | * Setup timeout for a device. On timeout trigger an update. | 120 | * Setup timeout for a device. On timeout trigger an update. |
121 | */ | 121 | */ |
122 | void | 122 | static void tty3270_set_timer(struct tty3270 *tp, int expires) |
123 | tty3270_set_timer(struct tty3270 *tp, int expires) | ||
124 | { | 123 | { |
125 | if (expires == 0) { | 124 | if (expires == 0) { |
126 | if (timer_pending(&tp->timer) && del_timer(&tp->timer)) | 125 | if (timer_pending(&tp->timer) && del_timer(&tp->timer)) |
@@ -841,7 +840,7 @@ tty3270_del_views(void) | |||
841 | } | 840 | } |
842 | } | 841 | } |
843 | 842 | ||
844 | struct raw3270_fn tty3270_fn = { | 843 | static struct raw3270_fn tty3270_fn = { |
845 | .activate = tty3270_activate, | 844 | .activate = tty3270_activate, |
846 | .deactivate = tty3270_deactivate, | 845 | .deactivate = tty3270_deactivate, |
847 | .intv = (void *) tty3270_irq, | 846 | .intv = (void *) tty3270_irq, |
@@ -1754,8 +1753,7 @@ static const struct tty_operations tty3270_ops = { | |||
1754 | .set_termios = tty3270_set_termios | 1753 | .set_termios = tty3270_set_termios |
1755 | }; | 1754 | }; |
1756 | 1755 | ||
1757 | void | 1756 | static void tty3270_notifier(int index, int active) |
1758 | tty3270_notifier(int index, int active) | ||
1759 | { | 1757 | { |
1760 | if (active) | 1758 | if (active) |
1761 | tty_register_device(tty3270_driver, index, NULL); | 1759 | tty_register_device(tty3270_driver, index, NULL); |
@@ -1767,8 +1765,7 @@ tty3270_notifier(int index, int active) | |||
1767 | * 3270 tty registration code called from tty_init(). | 1765 | * 3270 tty registration code called from tty_init(). |
1768 | * Most kernel services (incl. kmalloc) are available at this poimt. | 1766 | * Most kernel services (incl. kmalloc) are available at this poimt. |
1769 | */ | 1767 | */ |
1770 | int __init | 1768 | static int __init tty3270_init(void) |
1771 | tty3270_init(void) | ||
1772 | { | 1769 | { |
1773 | struct tty_driver *driver; | 1770 | struct tty_driver *driver; |
1774 | int ret; | 1771 | int ret; |
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c index 6cb23040954b..8432a76b961e 100644 --- a/drivers/s390/char/vmlogrdr.c +++ b/drivers/s390/char/vmlogrdr.c | |||
@@ -3,7 +3,7 @@ | |||
3 | * character device driver for reading z/VM system service records | 3 | * character device driver for reading z/VM system service records |
4 | * | 4 | * |
5 | * | 5 | * |
6 | * Copyright (C) 2004 IBM Corporation | 6 | * Copyright 2004 IBM Corporation |
7 | * character device driver for reading z/VM system service records, | 7 | * character device driver for reading z/VM system service records, |
8 | * Version 1.0 | 8 | * Version 1.0 |
9 | * Author(s): Xenia Tkatschow <xenia@us.ibm.com> | 9 | * Author(s): Xenia Tkatschow <xenia@us.ibm.com> |
@@ -21,7 +21,7 @@ | |||
21 | #include <asm/cpcmd.h> | 21 | #include <asm/cpcmd.h> |
22 | #include <asm/debug.h> | 22 | #include <asm/debug.h> |
23 | #include <asm/ebcdic.h> | 23 | #include <asm/ebcdic.h> |
24 | #include "../net/iucv.h" | 24 | #include <net/iucv/iucv.h> |
25 | #include <linux/kmod.h> | 25 | #include <linux/kmod.h> |
26 | #include <linux/cdev.h> | 26 | #include <linux/cdev.h> |
27 | #include <linux/device.h> | 27 | #include <linux/device.h> |
@@ -60,12 +60,11 @@ struct vmlogrdr_priv_t { | |||
60 | char system_service[8]; | 60 | char system_service[8]; |
61 | char internal_name[8]; | 61 | char internal_name[8]; |
62 | char recording_name[8]; | 62 | char recording_name[8]; |
63 | u16 pathid; | 63 | struct iucv_path *path; |
64 | int connection_established; | 64 | int connection_established; |
65 | int iucv_path_severed; | 65 | int iucv_path_severed; |
66 | iucv_MessagePending local_interrupt_buffer; | 66 | struct iucv_message local_interrupt_buffer; |
67 | atomic_t receive_ready; | 67 | atomic_t receive_ready; |
68 | iucv_handle_t iucv_handle; | ||
69 | int minor_num; | 68 | int minor_num; |
70 | char * buffer; | 69 | char * buffer; |
71 | char * current_position; | 70 | char * current_position; |
@@ -97,40 +96,21 @@ static struct file_operations vmlogrdr_fops = { | |||
97 | }; | 96 | }; |
98 | 97 | ||
99 | 98 | ||
100 | static u8 iucvMagic[16] = { | 99 | static void vmlogrdr_iucv_path_complete(struct iucv_path *, u8 ipuser[16]); |
101 | 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, | 100 | static void vmlogrdr_iucv_path_severed(struct iucv_path *, u8 ipuser[16]); |
102 | 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40 | 101 | static void vmlogrdr_iucv_message_pending(struct iucv_path *, |
103 | }; | 102 | struct iucv_message *); |
104 | 103 | ||
105 | 104 | ||
106 | static u8 mask[] = { | 105 | static struct iucv_handler vmlogrdr_iucv_handler = { |
107 | 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, | 106 | .path_complete = vmlogrdr_iucv_path_complete, |
108 | 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, | 107 | .path_severed = vmlogrdr_iucv_path_severed, |
109 | 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, | 108 | .message_pending = vmlogrdr_iucv_message_pending, |
110 | 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff | ||
111 | }; | 109 | }; |
112 | 110 | ||
113 | 111 | ||
114 | static u8 iucv_host[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; | 112 | static DECLARE_WAIT_QUEUE_HEAD(conn_wait_queue); |
115 | 113 | static DECLARE_WAIT_QUEUE_HEAD(read_wait_queue); | |
116 | |||
117 | static void | ||
118 | vmlogrdr_iucv_ConnectionComplete(iucv_ConnectionComplete *eib, void *pgm_data); | ||
119 | static void | ||
120 | vmlogrdr_iucv_ConnectionSevered(iucv_ConnectionSevered *eib, void *pgm_data); | ||
121 | static void | ||
122 | vmlogrdr_iucv_MessagePending(iucv_MessagePending *eib, void *pgm_data); | ||
123 | |||
124 | |||
125 | static iucv_interrupt_ops_t vmlogrdr_iucvops = { | ||
126 | .ConnectionComplete = vmlogrdr_iucv_ConnectionComplete, | ||
127 | .ConnectionSevered = vmlogrdr_iucv_ConnectionSevered, | ||
128 | .MessagePending = vmlogrdr_iucv_MessagePending, | ||
129 | }; | ||
130 | |||
131 | |||
132 | DECLARE_WAIT_QUEUE_HEAD(conn_wait_queue); | ||
133 | DECLARE_WAIT_QUEUE_HEAD(read_wait_queue); | ||
134 | 114 | ||
135 | /* | 115 | /* |
136 | * pointer to system service private structure | 116 | * pointer to system service private structure |
@@ -177,28 +157,29 @@ static struct cdev *vmlogrdr_cdev = NULL; | |||
177 | static int recording_class_AB; | 157 | static int recording_class_AB; |
178 | 158 | ||
179 | 159 | ||
180 | static void | 160 | static void vmlogrdr_iucv_path_complete(struct iucv_path *path, u8 ipuser[16]) |
181 | vmlogrdr_iucv_ConnectionComplete (iucv_ConnectionComplete * eib, | ||
182 | void * pgm_data) | ||
183 | { | 161 | { |
184 | struct vmlogrdr_priv_t * logptr = pgm_data; | 162 | struct vmlogrdr_priv_t * logptr = path->private; |
163 | |||
185 | spin_lock(&logptr->priv_lock); | 164 | spin_lock(&logptr->priv_lock); |
186 | logptr->connection_established = 1; | 165 | logptr->connection_established = 1; |
187 | spin_unlock(&logptr->priv_lock); | 166 | spin_unlock(&logptr->priv_lock); |
188 | wake_up(&conn_wait_queue); | 167 | wake_up(&conn_wait_queue); |
189 | return; | ||
190 | } | 168 | } |
191 | 169 | ||
192 | 170 | ||
193 | static void | 171 | static void vmlogrdr_iucv_path_severed(struct iucv_path *path, u8 ipuser[16]) |
194 | vmlogrdr_iucv_ConnectionSevered (iucv_ConnectionSevered * eib, void * pgm_data) | ||
195 | { | 172 | { |
196 | u8 reason = (u8) eib->ipuser[8]; | 173 | struct vmlogrdr_priv_t * logptr = path->private; |
197 | struct vmlogrdr_priv_t * logptr = pgm_data; | 174 | u8 reason = (u8) ipuser[8]; |
198 | 175 | ||
199 | printk (KERN_ERR "vmlogrdr: connection severed with" | 176 | printk (KERN_ERR "vmlogrdr: connection severed with" |
200 | " reason %i\n", reason); | 177 | " reason %i\n", reason); |
201 | 178 | ||
179 | iucv_path_sever(path, NULL); | ||
180 | kfree(path); | ||
181 | logptr->path = NULL; | ||
182 | |||
202 | spin_lock(&logptr->priv_lock); | 183 | spin_lock(&logptr->priv_lock); |
203 | logptr->connection_established = 0; | 184 | logptr->connection_established = 0; |
204 | logptr->iucv_path_severed = 1; | 185 | logptr->iucv_path_severed = 1; |
@@ -210,10 +191,10 @@ vmlogrdr_iucv_ConnectionSevered (iucv_ConnectionSevered * eib, void * pgm_data) | |||
210 | } | 191 | } |
211 | 192 | ||
212 | 193 | ||
213 | static void | 194 | static void vmlogrdr_iucv_message_pending(struct iucv_path *path, |
214 | vmlogrdr_iucv_MessagePending (iucv_MessagePending * eib, void * pgm_data) | 195 | struct iucv_message *msg) |
215 | { | 196 | { |
216 | struct vmlogrdr_priv_t * logptr = pgm_data; | 197 | struct vmlogrdr_priv_t * logptr = path->private; |
217 | 198 | ||
218 | /* | 199 | /* |
219 | * This function is the bottom half so it should be quick. | 200 | * This function is the bottom half so it should be quick. |
@@ -221,15 +202,15 @@ vmlogrdr_iucv_MessagePending (iucv_MessagePending * eib, void * pgm_data) | |||
221 | * the usage count | 202 | * the usage count |
222 | */ | 203 | */ |
223 | spin_lock(&logptr->priv_lock); | 204 | spin_lock(&logptr->priv_lock); |
224 | memcpy(&(logptr->local_interrupt_buffer), eib, sizeof(*eib)); | 205 | memcpy(&logptr->local_interrupt_buffer, msg, sizeof(*msg)); |
225 | atomic_inc(&logptr->receive_ready); | 206 | atomic_inc(&logptr->receive_ready); |
226 | spin_unlock(&logptr->priv_lock); | 207 | spin_unlock(&logptr->priv_lock); |
227 | wake_up_interruptible(&read_wait_queue); | 208 | wake_up_interruptible(&read_wait_queue); |
228 | } | 209 | } |
229 | 210 | ||
230 | 211 | ||
231 | static int | 212 | static int vmlogrdr_get_recording_class_AB(void) |
232 | vmlogrdr_get_recording_class_AB(void) { | 213 | { |
233 | char cp_command[]="QUERY COMMAND RECORDING "; | 214 | char cp_command[]="QUERY COMMAND RECORDING "; |
234 | char cp_response[80]; | 215 | char cp_response[80]; |
235 | char *tail; | 216 | char *tail; |
@@ -259,8 +240,9 @@ vmlogrdr_get_recording_class_AB(void) { | |||
259 | } | 240 | } |
260 | 241 | ||
261 | 242 | ||
262 | static int | 243 | static int vmlogrdr_recording(struct vmlogrdr_priv_t * logptr, |
263 | vmlogrdr_recording(struct vmlogrdr_priv_t * logptr, int action, int purge) { | 244 | int action, int purge) |
245 | { | ||
264 | 246 | ||
265 | char cp_command[80]; | 247 | char cp_command[80]; |
266 | char cp_response[160]; | 248 | char cp_response[160]; |
@@ -318,8 +300,7 @@ vmlogrdr_recording(struct vmlogrdr_priv_t * logptr, int action, int purge) { | |||
318 | } | 300 | } |
319 | 301 | ||
320 | 302 | ||
321 | static int | 303 | static int vmlogrdr_open (struct inode *inode, struct file *filp) |
322 | vmlogrdr_open (struct inode *inode, struct file *filp) | ||
323 | { | 304 | { |
324 | int dev_num = 0; | 305 | int dev_num = 0; |
325 | struct vmlogrdr_priv_t * logptr = NULL; | 306 | struct vmlogrdr_priv_t * logptr = NULL; |
@@ -329,10 +310,7 @@ vmlogrdr_open (struct inode *inode, struct file *filp) | |||
329 | dev_num = iminor(inode); | 310 | dev_num = iminor(inode); |
330 | if (dev_num > MAXMINOR) | 311 | if (dev_num > MAXMINOR) |
331 | return -ENODEV; | 312 | return -ENODEV; |
332 | |||
333 | logptr = &sys_ser[dev_num]; | 313 | logptr = &sys_ser[dev_num]; |
334 | if (logptr == NULL) | ||
335 | return -ENODEV; | ||
336 | 314 | ||
337 | /* | 315 | /* |
338 | * only allow for blocking reads to be open | 316 | * only allow for blocking reads to be open |
@@ -345,52 +323,38 @@ vmlogrdr_open (struct inode *inode, struct file *filp) | |||
345 | if (logptr->dev_in_use) { | 323 | if (logptr->dev_in_use) { |
346 | spin_unlock_bh(&logptr->priv_lock); | 324 | spin_unlock_bh(&logptr->priv_lock); |
347 | return -EBUSY; | 325 | return -EBUSY; |
348 | } else { | ||
349 | logptr->dev_in_use = 1; | ||
350 | spin_unlock_bh(&logptr->priv_lock); | ||
351 | } | 326 | } |
352 | 327 | logptr->dev_in_use = 1; | |
328 | logptr->connection_established = 0; | ||
329 | logptr->iucv_path_severed = 0; | ||
353 | atomic_set(&logptr->receive_ready, 0); | 330 | atomic_set(&logptr->receive_ready, 0); |
354 | logptr->buffer_free = 1; | 331 | logptr->buffer_free = 1; |
332 | spin_unlock_bh(&logptr->priv_lock); | ||
355 | 333 | ||
356 | /* set the file options */ | 334 | /* set the file options */ |
357 | filp->private_data = logptr; | 335 | filp->private_data = logptr; |
358 | filp->f_op = &vmlogrdr_fops; | 336 | filp->f_op = &vmlogrdr_fops; |
359 | 337 | ||
360 | /* start recording for this service*/ | 338 | /* start recording for this service*/ |
361 | ret=0; | 339 | if (logptr->autorecording) { |
362 | if (logptr->autorecording) | ||
363 | ret = vmlogrdr_recording(logptr,1,logptr->autopurge); | 340 | ret = vmlogrdr_recording(logptr,1,logptr->autopurge); |
364 | if (ret) | 341 | if (ret) |
365 | printk (KERN_WARNING "vmlogrdr: failed to start " | 342 | printk (KERN_WARNING "vmlogrdr: failed to start " |
366 | "recording automatically\n"); | 343 | "recording automatically\n"); |
367 | |||
368 | /* Register with iucv driver */ | ||
369 | logptr->iucv_handle = iucv_register_program(iucvMagic, | ||
370 | logptr->system_service, mask, &vmlogrdr_iucvops, | ||
371 | logptr); | ||
372 | |||
373 | if (logptr->iucv_handle == NULL) { | ||
374 | printk (KERN_ERR "vmlogrdr: failed to register with" | ||
375 | "iucv driver\n"); | ||
376 | goto not_registered; | ||
377 | } | 344 | } |
378 | 345 | ||
379 | /* create connection to the system service */ | 346 | /* create connection to the system service */ |
380 | spin_lock_bh(&logptr->priv_lock); | 347 | logptr->path = iucv_path_alloc(10, 0, GFP_KERNEL); |
381 | logptr->connection_established = 0; | 348 | if (!logptr->path) |
382 | logptr->iucv_path_severed = 0; | 349 | goto out_dev; |
383 | spin_unlock_bh(&logptr->priv_lock); | 350 | connect_rc = iucv_path_connect(logptr->path, &vmlogrdr_iucv_handler, |
384 | 351 | logptr->system_service, NULL, NULL, | |
385 | connect_rc = iucv_connect (&(logptr->pathid), 10, iucvMagic, | 352 | logptr); |
386 | logptr->system_service, iucv_host, 0, | ||
387 | NULL, NULL, | ||
388 | logptr->iucv_handle, NULL); | ||
389 | if (connect_rc) { | 353 | if (connect_rc) { |
390 | printk (KERN_ERR "vmlogrdr: iucv connection to %s " | 354 | printk (KERN_ERR "vmlogrdr: iucv connection to %s " |
391 | "failed with rc %i \n", logptr->system_service, | 355 | "failed with rc %i \n", logptr->system_service, |
392 | connect_rc); | 356 | connect_rc); |
393 | goto not_connected; | 357 | goto out_path; |
394 | } | 358 | } |
395 | 359 | ||
396 | /* We've issued the connect and now we must wait for a | 360 | /* We've issued the connect and now we must wait for a |
@@ -399,35 +363,28 @@ vmlogrdr_open (struct inode *inode, struct file *filp) | |||
399 | */ | 363 | */ |
400 | wait_event(conn_wait_queue, (logptr->connection_established) | 364 | wait_event(conn_wait_queue, (logptr->connection_established) |
401 | || (logptr->iucv_path_severed)); | 365 | || (logptr->iucv_path_severed)); |
402 | if (logptr->iucv_path_severed) { | 366 | if (logptr->iucv_path_severed) |
403 | goto not_connected; | 367 | goto out_record; |
404 | } | ||
405 | |||
406 | return nonseekable_open(inode, filp); | 368 | return nonseekable_open(inode, filp); |
407 | 369 | ||
408 | not_connected: | 370 | out_record: |
409 | iucv_unregister_program(logptr->iucv_handle); | ||
410 | logptr->iucv_handle = NULL; | ||
411 | not_registered: | ||
412 | if (logptr->autorecording) | 371 | if (logptr->autorecording) |
413 | vmlogrdr_recording(logptr,0,logptr->autopurge); | 372 | vmlogrdr_recording(logptr,0,logptr->autopurge); |
373 | out_path: | ||
374 | kfree(logptr->path); /* kfree(NULL) is ok. */ | ||
375 | logptr->path = NULL; | ||
376 | out_dev: | ||
414 | logptr->dev_in_use = 0; | 377 | logptr->dev_in_use = 0; |
415 | return -EIO; | 378 | return -EIO; |
416 | |||
417 | |||
418 | } | 379 | } |
419 | 380 | ||
420 | 381 | ||
421 | static int | 382 | static int vmlogrdr_release (struct inode *inode, struct file *filp) |
422 | vmlogrdr_release (struct inode *inode, struct file *filp) | ||
423 | { | 383 | { |
424 | int ret; | 384 | int ret; |
425 | 385 | ||
426 | struct vmlogrdr_priv_t * logptr = filp->private_data; | 386 | struct vmlogrdr_priv_t * logptr = filp->private_data; |
427 | 387 | ||
428 | iucv_unregister_program(logptr->iucv_handle); | ||
429 | logptr->iucv_handle = NULL; | ||
430 | |||
431 | if (logptr->autorecording) { | 388 | if (logptr->autorecording) { |
432 | ret = vmlogrdr_recording(logptr,0,logptr->autopurge); | 389 | ret = vmlogrdr_recording(logptr,0,logptr->autopurge); |
433 | if (ret) | 390 | if (ret) |
@@ -440,8 +397,8 @@ vmlogrdr_release (struct inode *inode, struct file *filp) | |||
440 | } | 397 | } |
441 | 398 | ||
442 | 399 | ||
443 | static int | 400 | static int vmlogrdr_receive_data(struct vmlogrdr_priv_t *priv) |
444 | vmlogrdr_receive_data(struct vmlogrdr_priv_t *priv) { | 401 | { |
445 | int rc, *temp; | 402 | int rc, *temp; |
446 | /* we need to keep track of two data sizes here: | 403 | /* we need to keep track of two data sizes here: |
447 | * The number of bytes we need to receive from iucv and | 404 | * The number of bytes we need to receive from iucv and |
@@ -462,8 +419,7 @@ vmlogrdr_receive_data(struct vmlogrdr_priv_t *priv) { | |||
462 | * We need to return the total length of the record | 419 | * We need to return the total length of the record |
463 | * + size of FENCE in the first 4 bytes of the buffer. | 420 | * + size of FENCE in the first 4 bytes of the buffer. |
464 | */ | 421 | */ |
465 | iucv_data_count = | 422 | iucv_data_count = priv->local_interrupt_buffer.length; |
466 | priv->local_interrupt_buffer.ln1msg2.ipbfln1f; | ||
467 | user_data_count = sizeof(int); | 423 | user_data_count = sizeof(int); |
468 | temp = (int*)priv->buffer; | 424 | temp = (int*)priv->buffer; |
469 | *temp= iucv_data_count + sizeof(FENCE); | 425 | *temp= iucv_data_count + sizeof(FENCE); |
@@ -475,14 +431,10 @@ vmlogrdr_receive_data(struct vmlogrdr_priv_t *priv) { | |||
475 | */ | 431 | */ |
476 | if (iucv_data_count > NET_BUFFER_SIZE) | 432 | if (iucv_data_count > NET_BUFFER_SIZE) |
477 | iucv_data_count = NET_BUFFER_SIZE; | 433 | iucv_data_count = NET_BUFFER_SIZE; |
478 | rc = iucv_receive(priv->pathid, | 434 | rc = iucv_message_receive(priv->path, |
479 | priv->local_interrupt_buffer.ipmsgid, | 435 | &priv->local_interrupt_buffer, |
480 | priv->local_interrupt_buffer.iptrgcls, | 436 | 0, buffer, iucv_data_count, |
481 | buffer, | 437 | &priv->residual_length); |
482 | iucv_data_count, | ||
483 | NULL, | ||
484 | NULL, | ||
485 | &priv->residual_length); | ||
486 | spin_unlock_bh(&priv->priv_lock); | 438 | spin_unlock_bh(&priv->priv_lock); |
487 | /* An rc of 5 indicates that the record was bigger then | 439 | /* An rc of 5 indicates that the record was bigger then |
488 | * the buffer, which is OK for us. A 9 indicates that the | 440 | * the buffer, which is OK for us. A 9 indicates that the |
@@ -514,8 +466,8 @@ vmlogrdr_receive_data(struct vmlogrdr_priv_t *priv) { | |||
514 | } | 466 | } |
515 | 467 | ||
516 | 468 | ||
517 | static ssize_t | 469 | static ssize_t vmlogrdr_read(struct file *filp, char __user *data, |
518 | vmlogrdr_read(struct file *filp, char __user *data, size_t count, loff_t * ppos) | 470 | size_t count, loff_t * ppos) |
519 | { | 471 | { |
520 | int rc; | 472 | int rc; |
521 | struct vmlogrdr_priv_t * priv = filp->private_data; | 473 | struct vmlogrdr_priv_t * priv = filp->private_data; |
@@ -547,8 +499,10 @@ vmlogrdr_read(struct file *filp, char __user *data, size_t count, loff_t * ppos) | |||
547 | return count; | 499 | return count; |
548 | } | 500 | } |
549 | 501 | ||
550 | static ssize_t | 502 | static ssize_t vmlogrdr_autopurge_store(struct device * dev, |
551 | vmlogrdr_autopurge_store(struct device * dev, struct device_attribute *attr, const char * buf, size_t count) { | 503 | struct device_attribute *attr, |
504 | const char * buf, size_t count) | ||
505 | { | ||
552 | struct vmlogrdr_priv_t *priv = dev->driver_data; | 506 | struct vmlogrdr_priv_t *priv = dev->driver_data; |
553 | ssize_t ret = count; | 507 | ssize_t ret = count; |
554 | 508 | ||
@@ -566,8 +520,10 @@ vmlogrdr_autopurge_store(struct device * dev, struct device_attribute *attr, con | |||
566 | } | 520 | } |
567 | 521 | ||
568 | 522 | ||
569 | static ssize_t | 523 | static ssize_t vmlogrdr_autopurge_show(struct device *dev, |
570 | vmlogrdr_autopurge_show(struct device *dev, struct device_attribute *attr, char *buf) { | 524 | struct device_attribute *attr, |
525 | char *buf) | ||
526 | { | ||
571 | struct vmlogrdr_priv_t *priv = dev->driver_data; | 527 | struct vmlogrdr_priv_t *priv = dev->driver_data; |
572 | return sprintf(buf, "%u\n", priv->autopurge); | 528 | return sprintf(buf, "%u\n", priv->autopurge); |
573 | } | 529 | } |
@@ -577,8 +533,10 @@ static DEVICE_ATTR(autopurge, 0644, vmlogrdr_autopurge_show, | |||
577 | vmlogrdr_autopurge_store); | 533 | vmlogrdr_autopurge_store); |
578 | 534 | ||
579 | 535 | ||
580 | static ssize_t | 536 | static ssize_t vmlogrdr_purge_store(struct device * dev, |
581 | vmlogrdr_purge_store(struct device * dev, struct device_attribute *attr, const char * buf, size_t count) { | 537 | struct device_attribute *attr, |
538 | const char * buf, size_t count) | ||
539 | { | ||
582 | 540 | ||
583 | char cp_command[80]; | 541 | char cp_command[80]; |
584 | char cp_response[80]; | 542 | char cp_response[80]; |
@@ -618,9 +576,10 @@ vmlogrdr_purge_store(struct device * dev, struct device_attribute *attr, const c | |||
618 | static DEVICE_ATTR(purge, 0200, NULL, vmlogrdr_purge_store); | 576 | static DEVICE_ATTR(purge, 0200, NULL, vmlogrdr_purge_store); |
619 | 577 | ||
620 | 578 | ||
621 | static ssize_t | 579 | static ssize_t vmlogrdr_autorecording_store(struct device *dev, |
622 | vmlogrdr_autorecording_store(struct device *dev, struct device_attribute *attr, const char *buf, | 580 | struct device_attribute *attr, |
623 | size_t count) { | 581 | const char *buf, size_t count) |
582 | { | ||
624 | struct vmlogrdr_priv_t *priv = dev->driver_data; | 583 | struct vmlogrdr_priv_t *priv = dev->driver_data; |
625 | ssize_t ret = count; | 584 | ssize_t ret = count; |
626 | 585 | ||
@@ -638,8 +597,10 @@ vmlogrdr_autorecording_store(struct device *dev, struct device_attribute *attr, | |||
638 | } | 597 | } |
639 | 598 | ||
640 | 599 | ||
641 | static ssize_t | 600 | static ssize_t vmlogrdr_autorecording_show(struct device *dev, |
642 | vmlogrdr_autorecording_show(struct device *dev, struct device_attribute *attr, char *buf) { | 601 | struct device_attribute *attr, |
602 | char *buf) | ||
603 | { | ||
643 | struct vmlogrdr_priv_t *priv = dev->driver_data; | 604 | struct vmlogrdr_priv_t *priv = dev->driver_data; |
644 | return sprintf(buf, "%u\n", priv->autorecording); | 605 | return sprintf(buf, "%u\n", priv->autorecording); |
645 | } | 606 | } |
@@ -649,9 +610,10 @@ static DEVICE_ATTR(autorecording, 0644, vmlogrdr_autorecording_show, | |||
649 | vmlogrdr_autorecording_store); | 610 | vmlogrdr_autorecording_store); |
650 | 611 | ||
651 | 612 | ||
652 | static ssize_t | 613 | static ssize_t vmlogrdr_recording_store(struct device * dev, |
653 | vmlogrdr_recording_store(struct device * dev, struct device_attribute *attr, const char * buf, size_t count) { | 614 | struct device_attribute *attr, |
654 | 615 | const char * buf, size_t count) | |
616 | { | ||
655 | struct vmlogrdr_priv_t *priv = dev->driver_data; | 617 | struct vmlogrdr_priv_t *priv = dev->driver_data; |
656 | ssize_t ret; | 618 | ssize_t ret; |
657 | 619 | ||
@@ -676,8 +638,9 @@ vmlogrdr_recording_store(struct device * dev, struct device_attribute *attr, con | |||
676 | static DEVICE_ATTR(recording, 0200, NULL, vmlogrdr_recording_store); | 638 | static DEVICE_ATTR(recording, 0200, NULL, vmlogrdr_recording_store); |
677 | 639 | ||
678 | 640 | ||
679 | static ssize_t | 641 | static ssize_t vmlogrdr_recording_status_show(struct device_driver *driver, |
680 | vmlogrdr_recording_status_show(struct device_driver *driver, char *buf) { | 642 | char *buf) |
643 | { | ||
681 | 644 | ||
682 | char cp_command[] = "QUERY RECORDING "; | 645 | char cp_command[] = "QUERY RECORDING "; |
683 | int len; | 646 | int len; |
@@ -710,52 +673,63 @@ static struct device_driver vmlogrdr_driver = { | |||
710 | }; | 673 | }; |
711 | 674 | ||
712 | 675 | ||
713 | static int | 676 | static int vmlogrdr_register_driver(void) |
714 | vmlogrdr_register_driver(void) { | 677 | { |
715 | int ret; | 678 | int ret; |
716 | 679 | ||
680 | /* Register with iucv driver */ | ||
681 | ret = iucv_register(&vmlogrdr_iucv_handler, 1); | ||
682 | if (ret) { | ||
683 | printk (KERN_ERR "vmlogrdr: failed to register with" | ||
684 | "iucv driver\n"); | ||
685 | goto out; | ||
686 | } | ||
687 | |||
717 | ret = driver_register(&vmlogrdr_driver); | 688 | ret = driver_register(&vmlogrdr_driver); |
718 | if (ret) { | 689 | if (ret) { |
719 | printk(KERN_ERR "vmlogrdr: failed to register driver.\n"); | 690 | printk(KERN_ERR "vmlogrdr: failed to register driver.\n"); |
720 | return ret; | 691 | goto out_iucv; |
721 | } | 692 | } |
722 | 693 | ||
723 | ret = driver_create_file(&vmlogrdr_driver, | 694 | ret = driver_create_file(&vmlogrdr_driver, |
724 | &driver_attr_recording_status); | 695 | &driver_attr_recording_status); |
725 | if (ret) { | 696 | if (ret) { |
726 | printk(KERN_ERR "vmlogrdr: failed to add driver attribute.\n"); | 697 | printk(KERN_ERR "vmlogrdr: failed to add driver attribute.\n"); |
727 | goto unregdriver; | 698 | goto out_driver; |
728 | } | 699 | } |
729 | 700 | ||
730 | vmlogrdr_class = class_create(THIS_MODULE, "vmlogrdr"); | 701 | vmlogrdr_class = class_create(THIS_MODULE, "vmlogrdr"); |
731 | if (IS_ERR(vmlogrdr_class)) { | 702 | if (IS_ERR(vmlogrdr_class)) { |
732 | printk(KERN_ERR "vmlogrdr: failed to create class.\n"); | 703 | printk(KERN_ERR "vmlogrdr: failed to create class.\n"); |
733 | ret=PTR_ERR(vmlogrdr_class); | 704 | ret = PTR_ERR(vmlogrdr_class); |
734 | vmlogrdr_class=NULL; | 705 | vmlogrdr_class = NULL; |
735 | goto unregattr; | 706 | goto out_attr; |
736 | } | 707 | } |
737 | return 0; | 708 | return 0; |
738 | 709 | ||
739 | unregattr: | 710 | out_attr: |
740 | driver_remove_file(&vmlogrdr_driver, &driver_attr_recording_status); | 711 | driver_remove_file(&vmlogrdr_driver, &driver_attr_recording_status); |
741 | unregdriver: | 712 | out_driver: |
742 | driver_unregister(&vmlogrdr_driver); | 713 | driver_unregister(&vmlogrdr_driver); |
714 | out_iucv: | ||
715 | iucv_unregister(&vmlogrdr_iucv_handler, 1); | ||
716 | out: | ||
743 | return ret; | 717 | return ret; |
744 | } | 718 | } |
745 | 719 | ||
746 | 720 | ||
747 | static void | 721 | static void vmlogrdr_unregister_driver(void) |
748 | vmlogrdr_unregister_driver(void) { | 722 | { |
749 | class_destroy(vmlogrdr_class); | 723 | class_destroy(vmlogrdr_class); |
750 | vmlogrdr_class = NULL; | 724 | vmlogrdr_class = NULL; |
751 | driver_remove_file(&vmlogrdr_driver, &driver_attr_recording_status); | 725 | driver_remove_file(&vmlogrdr_driver, &driver_attr_recording_status); |
752 | driver_unregister(&vmlogrdr_driver); | 726 | driver_unregister(&vmlogrdr_driver); |
753 | return; | 727 | iucv_unregister(&vmlogrdr_iucv_handler, 1); |
754 | } | 728 | } |
755 | 729 | ||
756 | 730 | ||
757 | static int | 731 | static int vmlogrdr_register_device(struct vmlogrdr_priv_t *priv) |
758 | vmlogrdr_register_device(struct vmlogrdr_priv_t *priv) { | 732 | { |
759 | struct device *dev; | 733 | struct device *dev; |
760 | int ret; | 734 | int ret; |
761 | 735 | ||
@@ -804,9 +778,10 @@ vmlogrdr_register_device(struct vmlogrdr_priv_t *priv) { | |||
804 | } | 778 | } |
805 | 779 | ||
806 | 780 | ||
807 | static int | 781 | static int vmlogrdr_unregister_device(struct vmlogrdr_priv_t *priv) |
808 | vmlogrdr_unregister_device(struct vmlogrdr_priv_t *priv ) { | 782 | { |
809 | class_device_destroy(vmlogrdr_class, MKDEV(vmlogrdr_major, priv->minor_num)); | 783 | class_device_destroy(vmlogrdr_class, |
784 | MKDEV(vmlogrdr_major, priv->minor_num)); | ||
810 | if (priv->device != NULL) { | 785 | if (priv->device != NULL) { |
811 | sysfs_remove_group(&priv->device->kobj, &vmlogrdr_attr_group); | 786 | sysfs_remove_group(&priv->device->kobj, &vmlogrdr_attr_group); |
812 | device_unregister(priv->device); | 787 | device_unregister(priv->device); |
@@ -816,8 +791,8 @@ vmlogrdr_unregister_device(struct vmlogrdr_priv_t *priv ) { | |||
816 | } | 791 | } |
817 | 792 | ||
818 | 793 | ||
819 | static int | 794 | static int vmlogrdr_register_cdev(dev_t dev) |
820 | vmlogrdr_register_cdev(dev_t dev) { | 795 | { |
821 | int rc = 0; | 796 | int rc = 0; |
822 | vmlogrdr_cdev = cdev_alloc(); | 797 | vmlogrdr_cdev = cdev_alloc(); |
823 | if (!vmlogrdr_cdev) { | 798 | if (!vmlogrdr_cdev) { |
@@ -837,9 +812,10 @@ vmlogrdr_register_cdev(dev_t dev) { | |||
837 | } | 812 | } |
838 | 813 | ||
839 | 814 | ||
840 | static void | 815 | static void vmlogrdr_cleanup(void) |
841 | vmlogrdr_cleanup(void) { | 816 | { |
842 | int i; | 817 | int i; |
818 | |||
843 | if (vmlogrdr_cdev) { | 819 | if (vmlogrdr_cdev) { |
844 | cdev_del(vmlogrdr_cdev); | 820 | cdev_del(vmlogrdr_cdev); |
845 | vmlogrdr_cdev=NULL; | 821 | vmlogrdr_cdev=NULL; |
@@ -856,8 +832,7 @@ vmlogrdr_cleanup(void) { | |||
856 | } | 832 | } |
857 | 833 | ||
858 | 834 | ||
859 | static int | 835 | static int vmlogrdr_init(void) |
860 | vmlogrdr_init(void) | ||
861 | { | 836 | { |
862 | int rc; | 837 | int rc; |
863 | int i; | 838 | int i; |
@@ -907,8 +882,7 @@ cleanup: | |||
907 | } | 882 | } |
908 | 883 | ||
909 | 884 | ||
910 | static void | 885 | static void vmlogrdr_exit(void) |
911 | vmlogrdr_exit(void) | ||
912 | { | 886 | { |
913 | vmlogrdr_cleanup(); | 887 | vmlogrdr_cleanup(); |
914 | printk (KERN_INFO "vmlogrdr: driver unloaded\n"); | 888 | printk (KERN_INFO "vmlogrdr: driver unloaded\n"); |
diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c index 12c2d6b746e6..aa65df4dfced 100644 --- a/drivers/s390/cio/blacklist.c +++ b/drivers/s390/cio/blacklist.c | |||
@@ -43,7 +43,7 @@ typedef enum {add, free} range_action; | |||
43 | * Function: blacklist_range | 43 | * Function: blacklist_range |
44 | * (Un-)blacklist the devices from-to | 44 | * (Un-)blacklist the devices from-to |
45 | */ | 45 | */ |
46 | static inline void | 46 | static void |
47 | blacklist_range (range_action action, unsigned int from, unsigned int to, | 47 | blacklist_range (range_action action, unsigned int from, unsigned int to, |
48 | unsigned int ssid) | 48 | unsigned int ssid) |
49 | { | 49 | { |
@@ -69,7 +69,7 @@ blacklist_range (range_action action, unsigned int from, unsigned int to, | |||
69 | * Get devno/busid from given string. | 69 | * Get devno/busid from given string. |
70 | * Shamelessly grabbed from dasd_devmap.c. | 70 | * Shamelessly grabbed from dasd_devmap.c. |
71 | */ | 71 | */ |
72 | static inline int | 72 | static int |
73 | blacklist_busid(char **str, int *id0, int *ssid, int *devno) | 73 | blacklist_busid(char **str, int *id0, int *ssid, int *devno) |
74 | { | 74 | { |
75 | int val, old_style; | 75 | int val, old_style; |
@@ -123,10 +123,10 @@ confused: | |||
123 | return 1; | 123 | return 1; |
124 | } | 124 | } |
125 | 125 | ||
126 | static inline int | 126 | static int |
127 | blacklist_parse_parameters (char *str, range_action action) | 127 | blacklist_parse_parameters (char *str, range_action action) |
128 | { | 128 | { |
129 | unsigned int from, to, from_id0, to_id0, from_ssid, to_ssid; | 129 | int from, to, from_id0, to_id0, from_ssid, to_ssid; |
130 | 130 | ||
131 | while (*str != 0 && *str != '\n') { | 131 | while (*str != 0 && *str != '\n') { |
132 | range_action ra = action; | 132 | range_action ra = action; |
@@ -227,7 +227,7 @@ is_blacklisted (int ssid, int devno) | |||
227 | * Function: blacklist_parse_proc_parameters | 227 | * Function: blacklist_parse_proc_parameters |
228 | * parse the stuff which is piped to /proc/cio_ignore | 228 | * parse the stuff which is piped to /proc/cio_ignore |
229 | */ | 229 | */ |
230 | static inline void | 230 | static void |
231 | blacklist_parse_proc_parameters (char *buf) | 231 | blacklist_parse_proc_parameters (char *buf) |
232 | { | 232 | { |
233 | if (strncmp (buf, "free ", 5) == 0) { | 233 | if (strncmp (buf, "free ", 5) == 0) { |
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c index 38954f5cd14c..d48e3ca4752c 100644 --- a/drivers/s390/cio/ccwgroup.c +++ b/drivers/s390/cio/ccwgroup.c | |||
@@ -53,7 +53,7 @@ ccwgroup_uevent (struct device *dev, char **envp, int num_envp, char *buffer, | |||
53 | 53 | ||
54 | static struct bus_type ccwgroup_bus_type; | 54 | static struct bus_type ccwgroup_bus_type; |
55 | 55 | ||
56 | static inline void | 56 | static void |
57 | __ccwgroup_remove_symlinks(struct ccwgroup_device *gdev) | 57 | __ccwgroup_remove_symlinks(struct ccwgroup_device *gdev) |
58 | { | 58 | { |
59 | int i; | 59 | int i; |
@@ -104,7 +104,7 @@ ccwgroup_release (struct device *dev) | |||
104 | kfree(gdev); | 104 | kfree(gdev); |
105 | } | 105 | } |
106 | 106 | ||
107 | static inline int | 107 | static int |
108 | __ccwgroup_create_symlinks(struct ccwgroup_device *gdev) | 108 | __ccwgroup_create_symlinks(struct ccwgroup_device *gdev) |
109 | { | 109 | { |
110 | char str[8]; | 110 | char str[8]; |
@@ -424,7 +424,7 @@ ccwgroup_probe_ccwdev(struct ccw_device *cdev) | |||
424 | return 0; | 424 | return 0; |
425 | } | 425 | } |
426 | 426 | ||
427 | static inline struct ccwgroup_device * | 427 | static struct ccwgroup_device * |
428 | __ccwgroup_get_gdev_by_cdev(struct ccw_device *cdev) | 428 | __ccwgroup_get_gdev_by_cdev(struct ccw_device *cdev) |
429 | { | 429 | { |
430 | struct ccwgroup_device *gdev; | 430 | struct ccwgroup_device *gdev; |
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c index cbab8d2ce5cf..6f05a44e3817 100644 --- a/drivers/s390/cio/chsc.c +++ b/drivers/s390/cio/chsc.c | |||
@@ -93,7 +93,7 @@ chsc_get_sch_desc_irq(struct subchannel *sch, void *page) | |||
93 | u16 sch; /* subchannel */ | 93 | u16 sch; /* subchannel */ |
94 | u8 chpid[8]; /* chpids 0-7 */ | 94 | u8 chpid[8]; /* chpids 0-7 */ |
95 | u16 fla[8]; /* full link addresses 0-7 */ | 95 | u16 fla[8]; /* full link addresses 0-7 */ |
96 | } *ssd_area; | 96 | } __attribute__ ((packed)) *ssd_area; |
97 | 97 | ||
98 | ssd_area = page; | 98 | ssd_area = page; |
99 | 99 | ||
@@ -277,7 +277,7 @@ out_unreg: | |||
277 | return 0; | 277 | return 0; |
278 | } | 278 | } |
279 | 279 | ||
280 | static inline void | 280 | static void |
281 | s390_set_chpid_offline( __u8 chpid) | 281 | s390_set_chpid_offline( __u8 chpid) |
282 | { | 282 | { |
283 | char dbf_txt[15]; | 283 | char dbf_txt[15]; |
@@ -338,7 +338,7 @@ s390_process_res_acc_sch(struct res_acc_data *res_data, struct subchannel *sch) | |||
338 | return 0x80 >> chp; | 338 | return 0x80 >> chp; |
339 | } | 339 | } |
340 | 340 | ||
341 | static inline int | 341 | static int |
342 | s390_process_res_acc_new_sch(struct subchannel_id schid) | 342 | s390_process_res_acc_new_sch(struct subchannel_id schid) |
343 | { | 343 | { |
344 | struct schib schib; | 344 | struct schib schib; |
@@ -444,7 +444,7 @@ __get_chpid_from_lir(void *data) | |||
444 | u32 andesc[28]; | 444 | u32 andesc[28]; |
445 | /* incident-specific information */ | 445 | /* incident-specific information */ |
446 | u32 isinfo[28]; | 446 | u32 isinfo[28]; |
447 | } *lir; | 447 | } __attribute__ ((packed)) *lir; |
448 | 448 | ||
449 | lir = data; | 449 | lir = data; |
450 | if (!(lir->iq&0x80)) | 450 | if (!(lir->iq&0x80)) |
@@ -461,154 +461,146 @@ __get_chpid_from_lir(void *data) | |||
461 | return (u16) (lir->indesc[0]&0x000000ff); | 461 | return (u16) (lir->indesc[0]&0x000000ff); |
462 | } | 462 | } |
463 | 463 | ||
464 | int | 464 | struct chsc_sei_area { |
465 | chsc_process_crw(void) | 465 | struct chsc_header request; |
466 | u32 reserved1; | ||
467 | u32 reserved2; | ||
468 | u32 reserved3; | ||
469 | struct chsc_header response; | ||
470 | u32 reserved4; | ||
471 | u8 flags; | ||
472 | u8 vf; /* validity flags */ | ||
473 | u8 rs; /* reporting source */ | ||
474 | u8 cc; /* content code */ | ||
475 | u16 fla; /* full link address */ | ||
476 | u16 rsid; /* reporting source id */ | ||
477 | u32 reserved5; | ||
478 | u32 reserved6; | ||
479 | u8 ccdf[4096 - 16 - 24]; /* content-code dependent field */ | ||
480 | /* ccdf has to be big enough for a link-incident record */ | ||
481 | } __attribute__ ((packed)); | ||
482 | |||
483 | static int chsc_process_sei_link_incident(struct chsc_sei_area *sei_area) | ||
484 | { | ||
485 | int chpid; | ||
486 | |||
487 | CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x)\n", | ||
488 | sei_area->rs, sei_area->rsid); | ||
489 | if (sei_area->rs != 4) | ||
490 | return 0; | ||
491 | chpid = __get_chpid_from_lir(sei_area->ccdf); | ||
492 | if (chpid < 0) | ||
493 | CIO_CRW_EVENT(4, "chsc: link incident - invalid LIR\n"); | ||
494 | else | ||
495 | s390_set_chpid_offline(chpid); | ||
496 | |||
497 | return 0; | ||
498 | } | ||
499 | |||
500 | static int chsc_process_sei_res_acc(struct chsc_sei_area *sei_area) | ||
466 | { | 501 | { |
467 | int chpid, ret; | ||
468 | struct res_acc_data res_data; | 502 | struct res_acc_data res_data; |
469 | struct { | 503 | struct device *dev; |
470 | struct chsc_header request; | 504 | int status; |
471 | u32 reserved1; | 505 | int rc; |
472 | u32 reserved2; | 506 | |
473 | u32 reserved3; | 507 | CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, " |
474 | struct chsc_header response; | 508 | "rs_id=%04x)\n", sei_area->rs, sei_area->rsid); |
475 | u32 reserved4; | 509 | if (sei_area->rs != 4) |
476 | u8 flags; | 510 | return 0; |
477 | u8 vf; /* validity flags */ | 511 | /* allocate a new channel path structure, if needed */ |
478 | u8 rs; /* reporting source */ | 512 | status = get_chp_status(sei_area->rsid); |
479 | u8 cc; /* content code */ | 513 | if (status < 0) |
480 | u16 fla; /* full link address */ | 514 | new_channel_path(sei_area->rsid); |
481 | u16 rsid; /* reporting source id */ | 515 | else if (!status) |
482 | u32 reserved5; | 516 | return 0; |
483 | u32 reserved6; | 517 | dev = get_device(&css[0]->chps[sei_area->rsid]->dev); |
484 | u32 ccdf[96]; /* content-code dependent field */ | 518 | memset(&res_data, 0, sizeof(struct res_acc_data)); |
485 | /* ccdf has to be big enough for a link-incident record */ | 519 | res_data.chp = to_channelpath(dev); |
486 | } *sei_area; | 520 | if ((sei_area->vf & 0xc0) != 0) { |
521 | res_data.fla = sei_area->fla; | ||
522 | if ((sei_area->vf & 0xc0) == 0xc0) | ||
523 | /* full link address */ | ||
524 | res_data.fla_mask = 0xffff; | ||
525 | else | ||
526 | /* link address */ | ||
527 | res_data.fla_mask = 0xff00; | ||
528 | } | ||
529 | rc = s390_process_res_acc(&res_data); | ||
530 | put_device(dev); | ||
531 | |||
532 | return rc; | ||
533 | } | ||
534 | |||
535 | static int chsc_process_sei(struct chsc_sei_area *sei_area) | ||
536 | { | ||
537 | int rc; | ||
538 | |||
539 | /* Check if we might have lost some information. */ | ||
540 | if (sei_area->flags & 0x40) | ||
541 | CIO_CRW_EVENT(2, "chsc: event overflow\n"); | ||
542 | /* which kind of information was stored? */ | ||
543 | rc = 0; | ||
544 | switch (sei_area->cc) { | ||
545 | case 1: /* link incident*/ | ||
546 | rc = chsc_process_sei_link_incident(sei_area); | ||
547 | break; | ||
548 | case 2: /* i/o resource accessibiliy */ | ||
549 | rc = chsc_process_sei_res_acc(sei_area); | ||
550 | break; | ||
551 | default: /* other stuff */ | ||
552 | CIO_CRW_EVENT(4, "chsc: unhandled sei content code %d\n", | ||
553 | sei_area->cc); | ||
554 | break; | ||
555 | } | ||
556 | |||
557 | return rc; | ||
558 | } | ||
559 | |||
560 | int chsc_process_crw(void) | ||
561 | { | ||
562 | struct chsc_sei_area *sei_area; | ||
563 | int ret; | ||
564 | int rc; | ||
487 | 565 | ||
488 | if (!sei_page) | 566 | if (!sei_page) |
489 | return 0; | 567 | return 0; |
490 | /* | 568 | /* Access to sei_page is serialized through machine check handler |
491 | * build the chsc request block for store event information | 569 | * thread, so no need for locking. */ |
492 | * and do the call | ||
493 | * This function is only called by the machine check handler thread, | ||
494 | * so we don't need locking for the sei_page. | ||
495 | */ | ||
496 | sei_area = sei_page; | 570 | sei_area = sei_page; |
497 | 571 | ||
498 | CIO_TRACE_EVENT( 2, "prcss"); | 572 | CIO_TRACE_EVENT( 2, "prcss"); |
499 | ret = 0; | 573 | ret = 0; |
500 | do { | 574 | do { |
501 | int ccode, status; | ||
502 | struct device *dev; | ||
503 | memset(sei_area, 0, sizeof(*sei_area)); | 575 | memset(sei_area, 0, sizeof(*sei_area)); |
504 | memset(&res_data, 0, sizeof(struct res_acc_data)); | ||
505 | sei_area->request.length = 0x0010; | 576 | sei_area->request.length = 0x0010; |
506 | sei_area->request.code = 0x000e; | 577 | sei_area->request.code = 0x000e; |
578 | if (chsc(sei_area)) | ||
579 | break; | ||
507 | 580 | ||
508 | ccode = chsc(sei_area); | 581 | if (sei_area->response.code == 0x0001) { |
509 | if (ccode > 0) | 582 | CIO_CRW_EVENT(4, "chsc: sei successful\n"); |
510 | return 0; | 583 | rc = chsc_process_sei(sei_area); |
511 | 584 | if (rc) | |
512 | switch (sei_area->response.code) { | 585 | ret = rc; |
513 | /* for debug purposes, check for problems */ | 586 | } else { |
514 | case 0x0001: | 587 | CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n", |
515 | CIO_CRW_EVENT(4, "chsc_process_crw: event information " | ||
516 | "successfully stored\n"); | ||
517 | break; /* everything ok */ | ||
518 | case 0x0002: | ||
519 | CIO_CRW_EVENT(2, | ||
520 | "chsc_process_crw: invalid command!\n"); | ||
521 | return 0; | ||
522 | case 0x0003: | ||
523 | CIO_CRW_EVENT(2, "chsc_process_crw: error in chsc " | ||
524 | "request block!\n"); | ||
525 | return 0; | ||
526 | case 0x0005: | ||
527 | CIO_CRW_EVENT(2, "chsc_process_crw: no event " | ||
528 | "information stored\n"); | ||
529 | return 0; | ||
530 | default: | ||
531 | CIO_CRW_EVENT(2, "chsc_process_crw: chsc response %d\n", | ||
532 | sei_area->response.code); | 588 | sei_area->response.code); |
533 | return 0; | 589 | ret = 0; |
534 | } | ||
535 | |||
536 | /* Check if we might have lost some information. */ | ||
537 | if (sei_area->flags & 0x40) | ||
538 | CIO_CRW_EVENT(2, "chsc_process_crw: Event information " | ||
539 | "has been lost due to overflow!\n"); | ||
540 | |||
541 | if (sei_area->rs != 4) { | ||
542 | CIO_CRW_EVENT(2, "chsc_process_crw: reporting source " | ||
543 | "(%04X) isn't a chpid!\n", | ||
544 | sei_area->rsid); | ||
545 | continue; | ||
546 | } | ||
547 | |||
548 | /* which kind of information was stored? */ | ||
549 | switch (sei_area->cc) { | ||
550 | case 1: /* link incident*/ | ||
551 | CIO_CRW_EVENT(4, "chsc_process_crw: " | ||
552 | "channel subsystem reports link incident," | ||
553 | " reporting source is chpid %x\n", | ||
554 | sei_area->rsid); | ||
555 | chpid = __get_chpid_from_lir(sei_area->ccdf); | ||
556 | if (chpid < 0) | ||
557 | CIO_CRW_EVENT(4, "%s: Invalid LIR, skipping\n", | ||
558 | __FUNCTION__); | ||
559 | else | ||
560 | s390_set_chpid_offline(chpid); | ||
561 | break; | ||
562 | |||
563 | case 2: /* i/o resource accessibiliy */ | ||
564 | CIO_CRW_EVENT(4, "chsc_process_crw: " | ||
565 | "channel subsystem reports some I/O " | ||
566 | "devices may have become accessible\n"); | ||
567 | pr_debug("Data received after sei: \n"); | ||
568 | pr_debug("Validity flags: %x\n", sei_area->vf); | ||
569 | |||
570 | /* allocate a new channel path structure, if needed */ | ||
571 | status = get_chp_status(sei_area->rsid); | ||
572 | if (status < 0) | ||
573 | new_channel_path(sei_area->rsid); | ||
574 | else if (!status) | ||
575 | break; | ||
576 | dev = get_device(&css[0]->chps[sei_area->rsid]->dev); | ||
577 | res_data.chp = to_channelpath(dev); | ||
578 | pr_debug("chpid: %x", sei_area->rsid); | ||
579 | if ((sei_area->vf & 0xc0) != 0) { | ||
580 | res_data.fla = sei_area->fla; | ||
581 | if ((sei_area->vf & 0xc0) == 0xc0) { | ||
582 | pr_debug(" full link addr: %x", | ||
583 | sei_area->fla); | ||
584 | res_data.fla_mask = 0xffff; | ||
585 | } else { | ||
586 | pr_debug(" link addr: %x", | ||
587 | sei_area->fla); | ||
588 | res_data.fla_mask = 0xff00; | ||
589 | } | ||
590 | } | ||
591 | ret = s390_process_res_acc(&res_data); | ||
592 | pr_debug("\n\n"); | ||
593 | put_device(dev); | ||
594 | break; | ||
595 | |||
596 | default: /* other stuff */ | ||
597 | CIO_CRW_EVENT(4, "chsc_process_crw: event %d\n", | ||
598 | sei_area->cc); | ||
599 | break; | 590 | break; |
600 | } | 591 | } |
601 | } while (sei_area->flags & 0x80); | 592 | } while (sei_area->flags & 0x80); |
593 | |||
602 | return ret; | 594 | return ret; |
603 | } | 595 | } |
604 | 596 | ||
605 | static inline int | 597 | static int |
606 | __chp_add_new_sch(struct subchannel_id schid) | 598 | __chp_add_new_sch(struct subchannel_id schid) |
607 | { | 599 | { |
608 | struct schib schib; | 600 | struct schib schib; |
609 | int ret; | 601 | int ret; |
610 | 602 | ||
611 | if (stsch(schid, &schib)) | 603 | if (stsch_err(schid, &schib)) |
612 | /* We're through */ | 604 | /* We're through */ |
613 | return need_rescan ? -EAGAIN : -ENXIO; | 605 | return need_rescan ? -EAGAIN : -ENXIO; |
614 | 606 | ||
@@ -709,7 +701,7 @@ chp_process_crw(int chpid, int on) | |||
709 | return chp_add(chpid); | 701 | return chp_add(chpid); |
710 | } | 702 | } |
711 | 703 | ||
712 | static inline int check_for_io_on_path(struct subchannel *sch, int index) | 704 | static int check_for_io_on_path(struct subchannel *sch, int index) |
713 | { | 705 | { |
714 | int cc; | 706 | int cc; |
715 | 707 | ||
@@ -741,7 +733,7 @@ static void terminate_internal_io(struct subchannel *sch) | |||
741 | sch->driver->termination(&sch->dev); | 733 | sch->driver->termination(&sch->dev); |
742 | } | 734 | } |
743 | 735 | ||
744 | static inline void | 736 | static void |
745 | __s390_subchannel_vary_chpid(struct subchannel *sch, __u8 chpid, int on) | 737 | __s390_subchannel_vary_chpid(struct subchannel *sch, __u8 chpid, int on) |
746 | { | 738 | { |
747 | int chp, old_lpm; | 739 | int chp, old_lpm; |
@@ -967,8 +959,8 @@ static struct bin_attribute chp_measurement_attr = { | |||
967 | static void | 959 | static void |
968 | chsc_remove_chp_cmg_attr(struct channel_path *chp) | 960 | chsc_remove_chp_cmg_attr(struct channel_path *chp) |
969 | { | 961 | { |
970 | sysfs_remove_bin_file(&chp->dev.kobj, &chp_measurement_chars_attr); | 962 | device_remove_bin_file(&chp->dev, &chp_measurement_chars_attr); |
971 | sysfs_remove_bin_file(&chp->dev.kobj, &chp_measurement_attr); | 963 | device_remove_bin_file(&chp->dev, &chp_measurement_attr); |
972 | } | 964 | } |
973 | 965 | ||
974 | static int | 966 | static int |
@@ -976,14 +968,12 @@ chsc_add_chp_cmg_attr(struct channel_path *chp) | |||
976 | { | 968 | { |
977 | int ret; | 969 | int ret; |
978 | 970 | ||
979 | ret = sysfs_create_bin_file(&chp->dev.kobj, | 971 | ret = device_create_bin_file(&chp->dev, &chp_measurement_chars_attr); |
980 | &chp_measurement_chars_attr); | ||
981 | if (ret) | 972 | if (ret) |
982 | return ret; | 973 | return ret; |
983 | ret = sysfs_create_bin_file(&chp->dev.kobj, &chp_measurement_attr); | 974 | ret = device_create_bin_file(&chp->dev, &chp_measurement_attr); |
984 | if (ret) | 975 | if (ret) |
985 | sysfs_remove_bin_file(&chp->dev.kobj, | 976 | device_remove_bin_file(&chp->dev, &chp_measurement_chars_attr); |
986 | &chp_measurement_chars_attr); | ||
987 | return ret; | 977 | return ret; |
988 | } | 978 | } |
989 | 979 | ||
@@ -1042,7 +1032,7 @@ __chsc_do_secm(struct channel_subsystem *css, int enable, void *page) | |||
1042 | u32 : 4; | 1032 | u32 : 4; |
1043 | u32 fmt : 4; | 1033 | u32 fmt : 4; |
1044 | u32 : 16; | 1034 | u32 : 16; |
1045 | } *secm_area; | 1035 | } __attribute__ ((packed)) *secm_area; |
1046 | int ret, ccode; | 1036 | int ret, ccode; |
1047 | 1037 | ||
1048 | secm_area = page; | 1038 | secm_area = page; |
@@ -1253,7 +1243,7 @@ chsc_determine_channel_path_description(int chpid, | |||
1253 | struct chsc_header response; | 1243 | struct chsc_header response; |
1254 | u32 zeroes2; | 1244 | u32 zeroes2; |
1255 | struct channel_path_desc desc; | 1245 | struct channel_path_desc desc; |
1256 | } *scpd_area; | 1246 | } __attribute__ ((packed)) *scpd_area; |
1257 | 1247 | ||
1258 | scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); | 1248 | scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); |
1259 | if (!scpd_area) | 1249 | if (!scpd_area) |
@@ -1350,7 +1340,7 @@ chsc_get_channel_measurement_chars(struct channel_path *chp) | |||
1350 | u32 cmg : 8; | 1340 | u32 cmg : 8; |
1351 | u32 zeroes3; | 1341 | u32 zeroes3; |
1352 | u32 data[NR_MEASUREMENT_CHARS]; | 1342 | u32 data[NR_MEASUREMENT_CHARS]; |
1353 | } *scmc_area; | 1343 | } __attribute__ ((packed)) *scmc_area; |
1354 | 1344 | ||
1355 | scmc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); | 1345 | scmc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); |
1356 | if (!scmc_area) | 1346 | if (!scmc_area) |
@@ -1517,7 +1507,7 @@ chsc_enable_facility(int operation_code) | |||
1517 | u32 reserved5:4; | 1507 | u32 reserved5:4; |
1518 | u32 format2:4; | 1508 | u32 format2:4; |
1519 | u32 reserved6:24; | 1509 | u32 reserved6:24; |
1520 | } *sda_area; | 1510 | } __attribute__ ((packed)) *sda_area; |
1521 | 1511 | ||
1522 | sda_area = (void *)get_zeroed_page(GFP_KERNEL|GFP_DMA); | 1512 | sda_area = (void *)get_zeroed_page(GFP_KERNEL|GFP_DMA); |
1523 | if (!sda_area) | 1513 | if (!sda_area) |
@@ -1569,7 +1559,7 @@ chsc_determine_css_characteristics(void) | |||
1569 | u32 reserved4; | 1559 | u32 reserved4; |
1570 | u32 general_char[510]; | 1560 | u32 general_char[510]; |
1571 | u32 chsc_char[518]; | 1561 | u32 chsc_char[518]; |
1572 | } *scsc_area; | 1562 | } __attribute__ ((packed)) *scsc_area; |
1573 | 1563 | ||
1574 | scsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); | 1564 | scsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); |
1575 | if (!scsc_area) { | 1565 | if (!scsc_area) { |
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h index a259245780ae..0fb2b024208f 100644 --- a/drivers/s390/cio/chsc.h +++ b/drivers/s390/cio/chsc.h | |||
@@ -10,17 +10,17 @@ | |||
10 | struct chsc_header { | 10 | struct chsc_header { |
11 | u16 length; | 11 | u16 length; |
12 | u16 code; | 12 | u16 code; |
13 | }; | 13 | } __attribute__ ((packed)); |
14 | 14 | ||
15 | #define NR_MEASUREMENT_CHARS 5 | 15 | #define NR_MEASUREMENT_CHARS 5 |
16 | struct cmg_chars { | 16 | struct cmg_chars { |
17 | u32 values[NR_MEASUREMENT_CHARS]; | 17 | u32 values[NR_MEASUREMENT_CHARS]; |
18 | }; | 18 | } __attribute__ ((packed)); |
19 | 19 | ||
20 | #define NR_MEASUREMENT_ENTRIES 8 | 20 | #define NR_MEASUREMENT_ENTRIES 8 |
21 | struct cmg_entry { | 21 | struct cmg_entry { |
22 | u32 values[NR_MEASUREMENT_ENTRIES]; | 22 | u32 values[NR_MEASUREMENT_ENTRIES]; |
23 | }; | 23 | } __attribute__ ((packed)); |
24 | 24 | ||
25 | struct channel_path_desc { | 25 | struct channel_path_desc { |
26 | u8 flags; | 26 | u8 flags; |
@@ -31,7 +31,7 @@ struct channel_path_desc { | |||
31 | u8 zeroes; | 31 | u8 zeroes; |
32 | u8 chla; | 32 | u8 chla; |
33 | u8 chpp; | 33 | u8 chpp; |
34 | }; | 34 | } __attribute__ ((packed)); |
35 | 35 | ||
36 | struct channel_path { | 36 | struct channel_path { |
37 | int id; | 37 | int id; |
@@ -47,6 +47,9 @@ struct channel_path { | |||
47 | extern void s390_process_css( void ); | 47 | extern void s390_process_css( void ); |
48 | extern void chsc_validate_chpids(struct subchannel *); | 48 | extern void chsc_validate_chpids(struct subchannel *); |
49 | extern void chpid_is_actually_online(int); | 49 | extern void chpid_is_actually_online(int); |
50 | extern int css_get_ssd_info(struct subchannel *); | ||
51 | extern int chsc_process_crw(void); | ||
52 | extern int chp_process_crw(int, int); | ||
50 | 53 | ||
51 | struct css_general_char { | 54 | struct css_general_char { |
52 | u64 : 41; | 55 | u64 : 41; |
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c index ae1bf231d089..b3a56dc5f68a 100644 --- a/drivers/s390/cio/cio.c +++ b/drivers/s390/cio/cio.c | |||
@@ -122,7 +122,7 @@ cio_get_options (struct subchannel *sch) | |||
122 | * Use tpi to get a pending interrupt, call the interrupt handler and | 122 | * Use tpi to get a pending interrupt, call the interrupt handler and |
123 | * return a pointer to the subchannel structure. | 123 | * return a pointer to the subchannel structure. |
124 | */ | 124 | */ |
125 | static inline int | 125 | static int |
126 | cio_tpi(void) | 126 | cio_tpi(void) |
127 | { | 127 | { |
128 | struct tpi_info *tpi_info; | 128 | struct tpi_info *tpi_info; |
@@ -152,7 +152,7 @@ cio_tpi(void) | |||
152 | return 1; | 152 | return 1; |
153 | } | 153 | } |
154 | 154 | ||
155 | static inline int | 155 | static int |
156 | cio_start_handle_notoper(struct subchannel *sch, __u8 lpm) | 156 | cio_start_handle_notoper(struct subchannel *sch, __u8 lpm) |
157 | { | 157 | { |
158 | char dbf_text[15]; | 158 | char dbf_text[15]; |
@@ -585,7 +585,7 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid) | |||
585 | * This device must not be known to Linux. So we simply | 585 | * This device must not be known to Linux. So we simply |
586 | * say that there is no device and return ENODEV. | 586 | * say that there is no device and return ENODEV. |
587 | */ | 587 | */ |
588 | CIO_MSG_EVENT(0, "Blacklisted device detected " | 588 | CIO_MSG_EVENT(4, "Blacklisted device detected " |
589 | "at devno %04X, subchannel set %x\n", | 589 | "at devno %04X, subchannel set %x\n", |
590 | sch->schib.pmcw.dev, sch->schid.ssid); | 590 | sch->schib.pmcw.dev, sch->schid.ssid); |
591 | err = -ENODEV; | 591 | err = -ENODEV; |
@@ -646,7 +646,7 @@ do_IRQ (struct pt_regs *regs) | |||
646 | * Make sure that the i/o interrupt did not "overtake" | 646 | * Make sure that the i/o interrupt did not "overtake" |
647 | * the last HZ timer interrupt. | 647 | * the last HZ timer interrupt. |
648 | */ | 648 | */ |
649 | account_ticks(); | 649 | account_ticks(S390_lowcore.int_clock); |
650 | /* | 650 | /* |
651 | * Get interrupt information from lowcore | 651 | * Get interrupt information from lowcore |
652 | */ | 652 | */ |
@@ -832,7 +832,7 @@ cio_get_console_subchannel(void) | |||
832 | } | 832 | } |
833 | 833 | ||
834 | #endif | 834 | #endif |
835 | static inline int | 835 | static int |
836 | __disable_subchannel_easy(struct subchannel_id schid, struct schib *schib) | 836 | __disable_subchannel_easy(struct subchannel_id schid, struct schib *schib) |
837 | { | 837 | { |
838 | int retry, cc; | 838 | int retry, cc; |
@@ -850,7 +850,20 @@ __disable_subchannel_easy(struct subchannel_id schid, struct schib *schib) | |||
850 | return -EBUSY; /* uhm... */ | 850 | return -EBUSY; /* uhm... */ |
851 | } | 851 | } |
852 | 852 | ||
853 | static inline int | 853 | /* we can't use the normal udelay here, since it enables external interrupts */ |
854 | |||
855 | static void udelay_reset(unsigned long usecs) | ||
856 | { | ||
857 | uint64_t start_cc, end_cc; | ||
858 | |||
859 | asm volatile ("STCK %0" : "=m" (start_cc)); | ||
860 | do { | ||
861 | cpu_relax(); | ||
862 | asm volatile ("STCK %0" : "=m" (end_cc)); | ||
863 | } while (((end_cc - start_cc)/4096) < usecs); | ||
864 | } | ||
865 | |||
866 | static int | ||
854 | __clear_subchannel_easy(struct subchannel_id schid) | 867 | __clear_subchannel_easy(struct subchannel_id schid) |
855 | { | 868 | { |
856 | int retry; | 869 | int retry; |
@@ -865,7 +878,7 @@ __clear_subchannel_easy(struct subchannel_id schid) | |||
865 | if (schid_equal(&ti.schid, &schid)) | 878 | if (schid_equal(&ti.schid, &schid)) |
866 | return 0; | 879 | return 0; |
867 | } | 880 | } |
868 | udelay(100); | 881 | udelay_reset(100); |
869 | } | 882 | } |
870 | return -EBUSY; | 883 | return -EBUSY; |
871 | } | 884 | } |
@@ -882,11 +895,11 @@ static int stsch_reset(struct subchannel_id schid, volatile struct schib *addr) | |||
882 | int rc; | 895 | int rc; |
883 | 896 | ||
884 | pgm_check_occured = 0; | 897 | pgm_check_occured = 0; |
885 | s390_reset_pgm_handler = cio_reset_pgm_check_handler; | 898 | s390_base_pgm_handler_fn = cio_reset_pgm_check_handler; |
886 | rc = stsch(schid, addr); | 899 | rc = stsch(schid, addr); |
887 | s390_reset_pgm_handler = NULL; | 900 | s390_base_pgm_handler_fn = NULL; |
888 | 901 | ||
889 | /* The program check handler could have changed pgm_check_occured */ | 902 | /* The program check handler could have changed pgm_check_occured. */ |
890 | barrier(); | 903 | barrier(); |
891 | 904 | ||
892 | if (pgm_check_occured) | 905 | if (pgm_check_occured) |
@@ -944,7 +957,7 @@ static void css_reset(void) | |||
944 | /* Reset subchannels. */ | 957 | /* Reset subchannels. */ |
945 | for_each_subchannel(__shutdown_subchannel_easy, NULL); | 958 | for_each_subchannel(__shutdown_subchannel_easy, NULL); |
946 | /* Reset channel paths. */ | 959 | /* Reset channel paths. */ |
947 | s390_reset_mcck_handler = s390_reset_chpids_mcck_handler; | 960 | s390_base_mcck_handler_fn = s390_reset_chpids_mcck_handler; |
948 | /* Enable channel report machine checks. */ | 961 | /* Enable channel report machine checks. */ |
949 | __ctl_set_bit(14, 28); | 962 | __ctl_set_bit(14, 28); |
950 | /* Temporarily reenable machine checks. */ | 963 | /* Temporarily reenable machine checks. */ |
@@ -969,7 +982,7 @@ static void css_reset(void) | |||
969 | local_mcck_disable(); | 982 | local_mcck_disable(); |
970 | /* Disable channel report machine checks. */ | 983 | /* Disable channel report machine checks. */ |
971 | __ctl_clear_bit(14, 28); | 984 | __ctl_clear_bit(14, 28); |
972 | s390_reset_mcck_handler = NULL; | 985 | s390_base_mcck_handler_fn = NULL; |
973 | } | 986 | } |
974 | 987 | ||
975 | static struct reset_call css_reset_call = { | 988 | static struct reset_call css_reset_call = { |
diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c index 828b2d334f0a..90b22faabbf7 100644 --- a/drivers/s390/cio/cmf.c +++ b/drivers/s390/cio/cmf.c | |||
@@ -519,8 +519,8 @@ struct cmb { | |||
519 | /* insert a single device into the cmb_area list | 519 | /* insert a single device into the cmb_area list |
520 | * called with cmb_area.lock held from alloc_cmb | 520 | * called with cmb_area.lock held from alloc_cmb |
521 | */ | 521 | */ |
522 | static inline int alloc_cmb_single (struct ccw_device *cdev, | 522 | static int alloc_cmb_single(struct ccw_device *cdev, |
523 | struct cmb_data *cmb_data) | 523 | struct cmb_data *cmb_data) |
524 | { | 524 | { |
525 | struct cmb *cmb; | 525 | struct cmb *cmb; |
526 | struct ccw_device_private *node; | 526 | struct ccw_device_private *node; |
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index 9d6c02446863..fe0ace7aece8 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c | |||
@@ -30,7 +30,7 @@ struct channel_subsystem *css[__MAX_CSSID + 1]; | |||
30 | 30 | ||
31 | int css_characteristics_avail = 0; | 31 | int css_characteristics_avail = 0; |
32 | 32 | ||
33 | inline int | 33 | int |
34 | for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data) | 34 | for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data) |
35 | { | 35 | { |
36 | struct subchannel_id schid; | 36 | struct subchannel_id schid; |
@@ -108,9 +108,6 @@ css_subchannel_release(struct device *dev) | |||
108 | } | 108 | } |
109 | } | 109 | } |
110 | 110 | ||
111 | extern int css_get_ssd_info(struct subchannel *sch); | ||
112 | |||
113 | |||
114 | int css_sch_device_register(struct subchannel *sch) | 111 | int css_sch_device_register(struct subchannel *sch) |
115 | { | 112 | { |
116 | int ret; | 113 | int ret; |
@@ -187,7 +184,7 @@ get_subchannel_by_schid(struct subchannel_id schid) | |||
187 | return dev ? to_subchannel(dev) : NULL; | 184 | return dev ? to_subchannel(dev) : NULL; |
188 | } | 185 | } |
189 | 186 | ||
190 | static inline int css_get_subchannel_status(struct subchannel *sch) | 187 | static int css_get_subchannel_status(struct subchannel *sch) |
191 | { | 188 | { |
192 | struct schib schib; | 189 | struct schib schib; |
193 | 190 | ||
@@ -299,7 +296,7 @@ static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow) | |||
299 | /* Will be done on the slow path. */ | 296 | /* Will be done on the slow path. */ |
300 | return -EAGAIN; | 297 | return -EAGAIN; |
301 | } | 298 | } |
302 | if (stsch(schid, &schib) || !schib.pmcw.dnv) { | 299 | if (stsch_err(schid, &schib) || !schib.pmcw.dnv) { |
303 | /* Unusable - ignore. */ | 300 | /* Unusable - ignore. */ |
304 | return 0; | 301 | return 0; |
305 | } | 302 | } |
@@ -417,7 +414,7 @@ static void reprobe_all(struct work_struct *unused) | |||
417 | need_reprobe); | 414 | need_reprobe); |
418 | } | 415 | } |
419 | 416 | ||
420 | DECLARE_WORK(css_reprobe_work, reprobe_all); | 417 | static DECLARE_WORK(css_reprobe_work, reprobe_all); |
421 | 418 | ||
422 | /* Schedule reprobing of all unregistered subchannels. */ | 419 | /* Schedule reprobing of all unregistered subchannels. */ |
423 | void css_schedule_reprobe(void) | 420 | void css_schedule_reprobe(void) |
@@ -578,7 +575,7 @@ css_cm_enable_store(struct device *dev, struct device_attribute *attr, | |||
578 | 575 | ||
579 | static DEVICE_ATTR(cm_enable, 0644, css_cm_enable_show, css_cm_enable_store); | 576 | static DEVICE_ATTR(cm_enable, 0644, css_cm_enable_show, css_cm_enable_store); |
580 | 577 | ||
581 | static inline int __init setup_css(int nr) | 578 | static int __init setup_css(int nr) |
582 | { | 579 | { |
583 | u32 tod_high; | 580 | u32 tod_high; |
584 | int ret; | 581 | int ret; |
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h index 3464c5b875c4..ca2bab932a8a 100644 --- a/drivers/s390/cio/css.h +++ b/drivers/s390/cio/css.h | |||
@@ -143,6 +143,8 @@ extern void css_sch_device_unregister(struct subchannel *); | |||
143 | extern struct subchannel * get_subchannel_by_schid(struct subchannel_id); | 143 | extern struct subchannel * get_subchannel_by_schid(struct subchannel_id); |
144 | extern int css_init_done; | 144 | extern int css_init_done; |
145 | extern int for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *); | 145 | extern int for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *); |
146 | extern int css_process_crw(int, int); | ||
147 | extern void css_reiterate_subchannels(void); | ||
146 | 148 | ||
147 | #define __MAX_SUBCHANNEL 65535 | 149 | #define __MAX_SUBCHANNEL 65535 |
148 | #define __MAX_SSID 3 | 150 | #define __MAX_SSID 3 |
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index 803579053c2f..e322111fb369 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c | |||
@@ -138,7 +138,6 @@ struct bus_type ccw_bus_type; | |||
138 | 138 | ||
139 | static int io_subchannel_probe (struct subchannel *); | 139 | static int io_subchannel_probe (struct subchannel *); |
140 | static int io_subchannel_remove (struct subchannel *); | 140 | static int io_subchannel_remove (struct subchannel *); |
141 | void io_subchannel_irq (struct device *); | ||
142 | static int io_subchannel_notify(struct device *, int); | 141 | static int io_subchannel_notify(struct device *, int); |
143 | static void io_subchannel_verify(struct device *); | 142 | static void io_subchannel_verify(struct device *); |
144 | static void io_subchannel_ioterm(struct device *); | 143 | static void io_subchannel_ioterm(struct device *); |
@@ -235,11 +234,8 @@ chpids_show (struct device * dev, struct device_attribute *attr, char * buf) | |||
235 | ssize_t ret = 0; | 234 | ssize_t ret = 0; |
236 | int chp; | 235 | int chp; |
237 | 236 | ||
238 | if (ssd) | 237 | for (chp = 0; chp < 8; chp++) |
239 | for (chp = 0; chp < 8; chp++) | 238 | ret += sprintf (buf+ret, "%02x ", ssd->chpid[chp]); |
240 | ret += sprintf (buf+ret, "%02x ", ssd->chpid[chp]); | ||
241 | else | ||
242 | ret += sprintf (buf, "n/a"); | ||
243 | ret += sprintf (buf+ret, "\n"); | 239 | ret += sprintf (buf+ret, "\n"); |
244 | return min((ssize_t)PAGE_SIZE, ret); | 240 | return min((ssize_t)PAGE_SIZE, ret); |
245 | } | 241 | } |
@@ -552,13 +548,13 @@ static struct attribute_group ccwdev_attr_group = { | |||
552 | .attrs = ccwdev_attrs, | 548 | .attrs = ccwdev_attrs, |
553 | }; | 549 | }; |
554 | 550 | ||
555 | static inline int | 551 | static int |
556 | device_add_files (struct device *dev) | 552 | device_add_files (struct device *dev) |
557 | { | 553 | { |
558 | return sysfs_create_group(&dev->kobj, &ccwdev_attr_group); | 554 | return sysfs_create_group(&dev->kobj, &ccwdev_attr_group); |
559 | } | 555 | } |
560 | 556 | ||
561 | static inline void | 557 | static void |
562 | device_remove_files(struct device *dev) | 558 | device_remove_files(struct device *dev) |
563 | { | 559 | { |
564 | sysfs_remove_group(&dev->kobj, &ccwdev_attr_group); | 560 | sysfs_remove_group(&dev->kobj, &ccwdev_attr_group); |
diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h index 29db6341d632..b66338b76579 100644 --- a/drivers/s390/cio/device.h +++ b/drivers/s390/cio/device.h | |||
@@ -74,6 +74,7 @@ extern struct workqueue_struct *ccw_device_notify_work; | |||
74 | extern wait_queue_head_t ccw_device_init_wq; | 74 | extern wait_queue_head_t ccw_device_init_wq; |
75 | extern atomic_t ccw_device_init_count; | 75 | extern atomic_t ccw_device_init_count; |
76 | 76 | ||
77 | void io_subchannel_irq (struct device *pdev); | ||
77 | void io_subchannel_recog_done(struct ccw_device *cdev); | 78 | void io_subchannel_recog_done(struct ccw_device *cdev); |
78 | 79 | ||
79 | int ccw_device_cancel_halt_clear(struct ccw_device *); | 80 | int ccw_device_cancel_halt_clear(struct ccw_device *); |
@@ -118,6 +119,7 @@ int ccw_device_stlck(struct ccw_device *); | |||
118 | /* qdio needs this. */ | 119 | /* qdio needs this. */ |
119 | void ccw_device_set_timeout(struct ccw_device *, int); | 120 | void ccw_device_set_timeout(struct ccw_device *, int); |
120 | extern struct subchannel_id ccw_device_get_subchannel_id(struct ccw_device *); | 121 | extern struct subchannel_id ccw_device_get_subchannel_id(struct ccw_device *); |
122 | extern struct bus_type ccw_bus_type; | ||
121 | 123 | ||
122 | /* Channel measurement facility related */ | 124 | /* Channel measurement facility related */ |
123 | void retry_set_schib(struct ccw_device *cdev); | 125 | void retry_set_schib(struct ccw_device *cdev); |
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c index eed14572fc3b..51238e7555bb 100644 --- a/drivers/s390/cio/device_fsm.c +++ b/drivers/s390/cio/device_fsm.c | |||
@@ -206,7 +206,7 @@ ccw_device_handle_oper(struct ccw_device *cdev) | |||
206 | * been varied online on the SE so we have to find out by magic (i. e. driving | 206 | * been varied online on the SE so we have to find out by magic (i. e. driving |
207 | * the channel subsystem to device selection and updating our path masks). | 207 | * the channel subsystem to device selection and updating our path masks). |
208 | */ | 208 | */ |
209 | static inline void | 209 | static void |
210 | __recover_lost_chpids(struct subchannel *sch, int old_lpm) | 210 | __recover_lost_chpids(struct subchannel *sch, int old_lpm) |
211 | { | 211 | { |
212 | int mask, i; | 212 | int mask, i; |
@@ -387,7 +387,7 @@ ccw_device_done(struct ccw_device *cdev, int state) | |||
387 | put_device (&cdev->dev); | 387 | put_device (&cdev->dev); |
388 | } | 388 | } |
389 | 389 | ||
390 | static inline int cmp_pgid(struct pgid *p1, struct pgid *p2) | 390 | static int cmp_pgid(struct pgid *p1, struct pgid *p2) |
391 | { | 391 | { |
392 | char *c1; | 392 | char *c1; |
393 | char *c2; | 393 | char *c2; |
@@ -842,6 +842,8 @@ ccw_device_irq(struct ccw_device *cdev, enum dev_event dev_event) | |||
842 | call_handler_unsol: | 842 | call_handler_unsol: |
843 | if (cdev->handler) | 843 | if (cdev->handler) |
844 | cdev->handler (cdev, 0, irb); | 844 | cdev->handler (cdev, 0, irb); |
845 | if (cdev->private->flags.doverify) | ||
846 | ccw_device_online_verify(cdev, 0); | ||
845 | return; | 847 | return; |
846 | } | 848 | } |
847 | /* Accumulate status and find out if a basic sense is needed. */ | 849 | /* Accumulate status and find out if a basic sense is needed. */ |
@@ -892,7 +894,7 @@ ccw_device_online_timeout(struct ccw_device *cdev, enum dev_event dev_event) | |||
892 | /* | 894 | /* |
893 | * Got an interrupt for a basic sense. | 895 | * Got an interrupt for a basic sense. |
894 | */ | 896 | */ |
895 | void | 897 | static void |
896 | ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event) | 898 | ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event) |
897 | { | 899 | { |
898 | struct irb *irb; | 900 | struct irb *irb; |
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c index d269607336ec..d7b25b8f71d2 100644 --- a/drivers/s390/cio/device_ops.c +++ b/drivers/s390/cio/device_ops.c | |||
@@ -302,7 +302,7 @@ ccw_device_wake_up(struct ccw_device *cdev, unsigned long ip, struct irb *irb) | |||
302 | wake_up(&cdev->private->wait_q); | 302 | wake_up(&cdev->private->wait_q); |
303 | } | 303 | } |
304 | 304 | ||
305 | static inline int | 305 | static int |
306 | __ccw_device_retry_loop(struct ccw_device *cdev, struct ccw1 *ccw, long magic, __u8 lpm) | 306 | __ccw_device_retry_loop(struct ccw_device *cdev, struct ccw1 *ccw, long magic, __u8 lpm) |
307 | { | 307 | { |
308 | int ret; | 308 | int ret; |
diff --git a/drivers/s390/cio/device_status.c b/drivers/s390/cio/device_status.c index bdcf930f7beb..6b1caea622ea 100644 --- a/drivers/s390/cio/device_status.c +++ b/drivers/s390/cio/device_status.c | |||
@@ -25,7 +25,7 @@ | |||
25 | * Check for any kind of channel or interface control check but don't | 25 | * Check for any kind of channel or interface control check but don't |
26 | * issue the message for the console device | 26 | * issue the message for the console device |
27 | */ | 27 | */ |
28 | static inline void | 28 | static void |
29 | ccw_device_msg_control_check(struct ccw_device *cdev, struct irb *irb) | 29 | ccw_device_msg_control_check(struct ccw_device *cdev, struct irb *irb) |
30 | { | 30 | { |
31 | if (!(irb->scsw.cstat & (SCHN_STAT_CHN_DATA_CHK | | 31 | if (!(irb->scsw.cstat & (SCHN_STAT_CHN_DATA_CHK | |
@@ -72,7 +72,7 @@ ccw_device_path_notoper(struct ccw_device *cdev) | |||
72 | /* | 72 | /* |
73 | * Copy valid bits from the extended control word to device irb. | 73 | * Copy valid bits from the extended control word to device irb. |
74 | */ | 74 | */ |
75 | static inline void | 75 | static void |
76 | ccw_device_accumulate_ecw(struct ccw_device *cdev, struct irb *irb) | 76 | ccw_device_accumulate_ecw(struct ccw_device *cdev, struct irb *irb) |
77 | { | 77 | { |
78 | /* | 78 | /* |
@@ -94,7 +94,7 @@ ccw_device_accumulate_ecw(struct ccw_device *cdev, struct irb *irb) | |||
94 | /* | 94 | /* |
95 | * Check if extended status word is valid. | 95 | * Check if extended status word is valid. |
96 | */ | 96 | */ |
97 | static inline int | 97 | static int |
98 | ccw_device_accumulate_esw_valid(struct irb *irb) | 98 | ccw_device_accumulate_esw_valid(struct irb *irb) |
99 | { | 99 | { |
100 | if (!irb->scsw.eswf && irb->scsw.stctl == SCSW_STCTL_STATUS_PEND) | 100 | if (!irb->scsw.eswf && irb->scsw.stctl == SCSW_STCTL_STATUS_PEND) |
@@ -109,7 +109,7 @@ ccw_device_accumulate_esw_valid(struct irb *irb) | |||
109 | /* | 109 | /* |
110 | * Copy valid bits from the extended status word to device irb. | 110 | * Copy valid bits from the extended status word to device irb. |
111 | */ | 111 | */ |
112 | static inline void | 112 | static void |
113 | ccw_device_accumulate_esw(struct ccw_device *cdev, struct irb *irb) | 113 | ccw_device_accumulate_esw(struct ccw_device *cdev, struct irb *irb) |
114 | { | 114 | { |
115 | struct irb *cdev_irb; | 115 | struct irb *cdev_irb; |
diff --git a/drivers/s390/cio/qdio.c b/drivers/s390/cio/qdio.c index 6fd1940842eb..d726cd5777de 100644 --- a/drivers/s390/cio/qdio.c +++ b/drivers/s390/cio/qdio.c | |||
@@ -66,7 +66,6 @@ MODULE_LICENSE("GPL"); | |||
66 | /******************** HERE WE GO ***********************************/ | 66 | /******************** HERE WE GO ***********************************/ |
67 | 67 | ||
68 | static const char version[] = "QDIO base support version 2"; | 68 | static const char version[] = "QDIO base support version 2"; |
69 | extern struct bus_type ccw_bus_type; | ||
70 | 69 | ||
71 | static int qdio_performance_stats = 0; | 70 | static int qdio_performance_stats = 0; |
72 | static int proc_perf_file_registration; | 71 | static int proc_perf_file_registration; |
@@ -138,7 +137,7 @@ qdio_release_q(struct qdio_q *q) | |||
138 | } | 137 | } |
139 | 138 | ||
140 | /*check ccq */ | 139 | /*check ccq */ |
141 | static inline int | 140 | static int |
142 | qdio_check_ccq(struct qdio_q *q, unsigned int ccq) | 141 | qdio_check_ccq(struct qdio_q *q, unsigned int ccq) |
143 | { | 142 | { |
144 | char dbf_text[15]; | 143 | char dbf_text[15]; |
@@ -153,7 +152,7 @@ qdio_check_ccq(struct qdio_q *q, unsigned int ccq) | |||
153 | return -EIO; | 152 | return -EIO; |
154 | } | 153 | } |
155 | /* EQBS: extract buffer states */ | 154 | /* EQBS: extract buffer states */ |
156 | static inline int | 155 | static int |
157 | qdio_do_eqbs(struct qdio_q *q, unsigned char *state, | 156 | qdio_do_eqbs(struct qdio_q *q, unsigned char *state, |
158 | unsigned int *start, unsigned int *cnt) | 157 | unsigned int *start, unsigned int *cnt) |
159 | { | 158 | { |
@@ -188,7 +187,7 @@ again: | |||
188 | } | 187 | } |
189 | 188 | ||
190 | /* SQBS: set buffer states */ | 189 | /* SQBS: set buffer states */ |
191 | static inline int | 190 | static int |
192 | qdio_do_sqbs(struct qdio_q *q, unsigned char state, | 191 | qdio_do_sqbs(struct qdio_q *q, unsigned char state, |
193 | unsigned int *start, unsigned int *cnt) | 192 | unsigned int *start, unsigned int *cnt) |
194 | { | 193 | { |
@@ -315,7 +314,7 @@ __do_siga_output(struct qdio_q *q, unsigned int *busy_bit) | |||
315 | * returns QDIO_SIGA_ERROR_ACCESS_EXCEPTION as cc, when SIGA returns | 314 | * returns QDIO_SIGA_ERROR_ACCESS_EXCEPTION as cc, when SIGA returns |
316 | * an access exception | 315 | * an access exception |
317 | */ | 316 | */ |
318 | static inline int | 317 | static int |
319 | qdio_siga_output(struct qdio_q *q) | 318 | qdio_siga_output(struct qdio_q *q) |
320 | { | 319 | { |
321 | int cc; | 320 | int cc; |
@@ -349,7 +348,7 @@ qdio_siga_output(struct qdio_q *q) | |||
349 | return cc; | 348 | return cc; |
350 | } | 349 | } |
351 | 350 | ||
352 | static inline int | 351 | static int |
353 | qdio_siga_input(struct qdio_q *q) | 352 | qdio_siga_input(struct qdio_q *q) |
354 | { | 353 | { |
355 | int cc; | 354 | int cc; |
@@ -421,7 +420,7 @@ tiqdio_sched_tl(void) | |||
421 | tasklet_hi_schedule(&tiqdio_tasklet); | 420 | tasklet_hi_schedule(&tiqdio_tasklet); |
422 | } | 421 | } |
423 | 422 | ||
424 | static inline void | 423 | static void |
425 | qdio_mark_tiq(struct qdio_q *q) | 424 | qdio_mark_tiq(struct qdio_q *q) |
426 | { | 425 | { |
427 | unsigned long flags; | 426 | unsigned long flags; |
@@ -471,7 +470,7 @@ qdio_mark_q(struct qdio_q *q) | |||
471 | tasklet_schedule(&q->tasklet); | 470 | tasklet_schedule(&q->tasklet); |
472 | } | 471 | } |
473 | 472 | ||
474 | static inline int | 473 | static int |
475 | qdio_stop_polling(struct qdio_q *q) | 474 | qdio_stop_polling(struct qdio_q *q) |
476 | { | 475 | { |
477 | #ifdef QDIO_USE_PROCESSING_STATE | 476 | #ifdef QDIO_USE_PROCESSING_STATE |
@@ -525,7 +524,7 @@ qdio_stop_polling(struct qdio_q *q) | |||
525 | * sophisticated locking outside of unmark_q, so that we don't need to | 524 | * sophisticated locking outside of unmark_q, so that we don't need to |
526 | * disable the interrupts :-) | 525 | * disable the interrupts :-) |
527 | */ | 526 | */ |
528 | static inline void | 527 | static void |
529 | qdio_unmark_q(struct qdio_q *q) | 528 | qdio_unmark_q(struct qdio_q *q) |
530 | { | 529 | { |
531 | unsigned long flags; | 530 | unsigned long flags; |
@@ -691,7 +690,7 @@ qdio_qebsm_get_inbound_buffer_frontier(struct qdio_q *q) | |||
691 | return q->first_to_check; | 690 | return q->first_to_check; |
692 | } | 691 | } |
693 | 692 | ||
694 | static inline int | 693 | static int |
695 | qdio_get_outbound_buffer_frontier(struct qdio_q *q) | 694 | qdio_get_outbound_buffer_frontier(struct qdio_q *q) |
696 | { | 695 | { |
697 | struct qdio_irq *irq; | 696 | struct qdio_irq *irq; |
@@ -774,7 +773,7 @@ out: | |||
774 | } | 773 | } |
775 | 774 | ||
776 | /* all buffers are processed */ | 775 | /* all buffers are processed */ |
777 | static inline int | 776 | static int |
778 | qdio_is_outbound_q_done(struct qdio_q *q) | 777 | qdio_is_outbound_q_done(struct qdio_q *q) |
779 | { | 778 | { |
780 | int no_used; | 779 | int no_used; |
@@ -796,7 +795,7 @@ qdio_is_outbound_q_done(struct qdio_q *q) | |||
796 | return (no_used==0); | 795 | return (no_used==0); |
797 | } | 796 | } |
798 | 797 | ||
799 | static inline int | 798 | static int |
800 | qdio_has_outbound_q_moved(struct qdio_q *q) | 799 | qdio_has_outbound_q_moved(struct qdio_q *q) |
801 | { | 800 | { |
802 | int i; | 801 | int i; |
@@ -816,7 +815,7 @@ qdio_has_outbound_q_moved(struct qdio_q *q) | |||
816 | } | 815 | } |
817 | } | 816 | } |
818 | 817 | ||
819 | static inline void | 818 | static void |
820 | qdio_kick_outbound_q(struct qdio_q *q) | 819 | qdio_kick_outbound_q(struct qdio_q *q) |
821 | { | 820 | { |
822 | int result; | 821 | int result; |
@@ -905,7 +904,7 @@ qdio_kick_outbound_q(struct qdio_q *q) | |||
905 | } | 904 | } |
906 | } | 905 | } |
907 | 906 | ||
908 | static inline void | 907 | static void |
909 | qdio_kick_outbound_handler(struct qdio_q *q) | 908 | qdio_kick_outbound_handler(struct qdio_q *q) |
910 | { | 909 | { |
911 | int start, end, real_end, count; | 910 | int start, end, real_end, count; |
@@ -942,7 +941,7 @@ qdio_kick_outbound_handler(struct qdio_q *q) | |||
942 | q->error_status_flags=0; | 941 | q->error_status_flags=0; |
943 | } | 942 | } |
944 | 943 | ||
945 | static inline void | 944 | static void |
946 | __qdio_outbound_processing(struct qdio_q *q) | 945 | __qdio_outbound_processing(struct qdio_q *q) |
947 | { | 946 | { |
948 | int siga_attempts; | 947 | int siga_attempts; |
@@ -1002,7 +1001,7 @@ qdio_outbound_processing(struct qdio_q *q) | |||
1002 | /************************* INBOUND ROUTINES *******************************/ | 1001 | /************************* INBOUND ROUTINES *******************************/ |
1003 | 1002 | ||
1004 | 1003 | ||
1005 | static inline int | 1004 | static int |
1006 | qdio_get_inbound_buffer_frontier(struct qdio_q *q) | 1005 | qdio_get_inbound_buffer_frontier(struct qdio_q *q) |
1007 | { | 1006 | { |
1008 | struct qdio_irq *irq; | 1007 | struct qdio_irq *irq; |
@@ -1133,7 +1132,7 @@ out: | |||
1133 | return q->first_to_check; | 1132 | return q->first_to_check; |
1134 | } | 1133 | } |
1135 | 1134 | ||
1136 | static inline int | 1135 | static int |
1137 | qdio_has_inbound_q_moved(struct qdio_q *q) | 1136 | qdio_has_inbound_q_moved(struct qdio_q *q) |
1138 | { | 1137 | { |
1139 | int i; | 1138 | int i; |
@@ -1167,7 +1166,7 @@ qdio_has_inbound_q_moved(struct qdio_q *q) | |||
1167 | } | 1166 | } |
1168 | 1167 | ||
1169 | /* means, no more buffers to be filled */ | 1168 | /* means, no more buffers to be filled */ |
1170 | static inline int | 1169 | static int |
1171 | tiqdio_is_inbound_q_done(struct qdio_q *q) | 1170 | tiqdio_is_inbound_q_done(struct qdio_q *q) |
1172 | { | 1171 | { |
1173 | int no_used; | 1172 | int no_used; |
@@ -1228,7 +1227,7 @@ tiqdio_is_inbound_q_done(struct qdio_q *q) | |||
1228 | return 0; | 1227 | return 0; |
1229 | } | 1228 | } |
1230 | 1229 | ||
1231 | static inline int | 1230 | static int |
1232 | qdio_is_inbound_q_done(struct qdio_q *q) | 1231 | qdio_is_inbound_q_done(struct qdio_q *q) |
1233 | { | 1232 | { |
1234 | int no_used; | 1233 | int no_used; |
@@ -1296,7 +1295,7 @@ qdio_is_inbound_q_done(struct qdio_q *q) | |||
1296 | } | 1295 | } |
1297 | } | 1296 | } |
1298 | 1297 | ||
1299 | static inline void | 1298 | static void |
1300 | qdio_kick_inbound_handler(struct qdio_q *q) | 1299 | qdio_kick_inbound_handler(struct qdio_q *q) |
1301 | { | 1300 | { |
1302 | int count, start, end, real_end, i; | 1301 | int count, start, end, real_end, i; |
@@ -1343,7 +1342,7 @@ qdio_kick_inbound_handler(struct qdio_q *q) | |||
1343 | } | 1342 | } |
1344 | } | 1343 | } |
1345 | 1344 | ||
1346 | static inline void | 1345 | static void |
1347 | __tiqdio_inbound_processing(struct qdio_q *q, int spare_ind_was_set) | 1346 | __tiqdio_inbound_processing(struct qdio_q *q, int spare_ind_was_set) |
1348 | { | 1347 | { |
1349 | struct qdio_irq *irq_ptr; | 1348 | struct qdio_irq *irq_ptr; |
@@ -1442,7 +1441,7 @@ tiqdio_inbound_processing(struct qdio_q *q) | |||
1442 | __tiqdio_inbound_processing(q, atomic_read(&spare_indicator_usecount)); | 1441 | __tiqdio_inbound_processing(q, atomic_read(&spare_indicator_usecount)); |
1443 | } | 1442 | } |
1444 | 1443 | ||
1445 | static inline void | 1444 | static void |
1446 | __qdio_inbound_processing(struct qdio_q *q) | 1445 | __qdio_inbound_processing(struct qdio_q *q) |
1447 | { | 1446 | { |
1448 | int q_laps=0; | 1447 | int q_laps=0; |
@@ -1493,7 +1492,7 @@ qdio_inbound_processing(struct qdio_q *q) | |||
1493 | /************************* MAIN ROUTINES *******************************/ | 1492 | /************************* MAIN ROUTINES *******************************/ |
1494 | 1493 | ||
1495 | #ifdef QDIO_USE_PROCESSING_STATE | 1494 | #ifdef QDIO_USE_PROCESSING_STATE |
1496 | static inline int | 1495 | static int |
1497 | tiqdio_reset_processing_state(struct qdio_q *q, int q_laps) | 1496 | tiqdio_reset_processing_state(struct qdio_q *q, int q_laps) |
1498 | { | 1497 | { |
1499 | if (!q) { | 1498 | if (!q) { |
@@ -1545,7 +1544,7 @@ tiqdio_reset_processing_state(struct qdio_q *q, int q_laps) | |||
1545 | } | 1544 | } |
1546 | #endif /* QDIO_USE_PROCESSING_STATE */ | 1545 | #endif /* QDIO_USE_PROCESSING_STATE */ |
1547 | 1546 | ||
1548 | static inline void | 1547 | static void |
1549 | tiqdio_inbound_checks(void) | 1548 | tiqdio_inbound_checks(void) |
1550 | { | 1549 | { |
1551 | struct qdio_q *q; | 1550 | struct qdio_q *q; |
@@ -1949,7 +1948,7 @@ qdio_set_state(struct qdio_irq *irq_ptr, enum qdio_irq_states state) | |||
1949 | mb(); | 1948 | mb(); |
1950 | } | 1949 | } |
1951 | 1950 | ||
1952 | static inline void | 1951 | static void |
1953 | qdio_irq_check_sense(struct subchannel_id schid, struct irb *irb) | 1952 | qdio_irq_check_sense(struct subchannel_id schid, struct irb *irb) |
1954 | { | 1953 | { |
1955 | char dbf_text[15]; | 1954 | char dbf_text[15]; |
@@ -1966,7 +1965,7 @@ qdio_irq_check_sense(struct subchannel_id schid, struct irb *irb) | |||
1966 | 1965 | ||
1967 | } | 1966 | } |
1968 | 1967 | ||
1969 | static inline void | 1968 | static void |
1970 | qdio_handle_pci(struct qdio_irq *irq_ptr) | 1969 | qdio_handle_pci(struct qdio_irq *irq_ptr) |
1971 | { | 1970 | { |
1972 | int i; | 1971 | int i; |
@@ -2002,7 +2001,7 @@ qdio_handle_pci(struct qdio_irq *irq_ptr) | |||
2002 | 2001 | ||
2003 | static void qdio_establish_handle_irq(struct ccw_device*, int, int); | 2002 | static void qdio_establish_handle_irq(struct ccw_device*, int, int); |
2004 | 2003 | ||
2005 | static inline void | 2004 | static void |
2006 | qdio_handle_activate_check(struct ccw_device *cdev, unsigned long intparm, | 2005 | qdio_handle_activate_check(struct ccw_device *cdev, unsigned long intparm, |
2007 | int cstat, int dstat) | 2006 | int cstat, int dstat) |
2008 | { | 2007 | { |
@@ -2229,7 +2228,7 @@ qdio_synchronize(struct ccw_device *cdev, unsigned int flags, | |||
2229 | return cc; | 2228 | return cc; |
2230 | } | 2229 | } |
2231 | 2230 | ||
2232 | static inline void | 2231 | static void |
2233 | qdio_check_subchannel_qebsm(struct qdio_irq *irq_ptr, unsigned char qdioac, | 2232 | qdio_check_subchannel_qebsm(struct qdio_irq *irq_ptr, unsigned char qdioac, |
2234 | unsigned long token) | 2233 | unsigned long token) |
2235 | { | 2234 | { |
@@ -2740,7 +2739,7 @@ qdio_free(struct ccw_device *cdev) | |||
2740 | return 0; | 2739 | return 0; |
2741 | } | 2740 | } |
2742 | 2741 | ||
2743 | static inline void | 2742 | static void |
2744 | qdio_allocate_do_dbf(struct qdio_initialize *init_data) | 2743 | qdio_allocate_do_dbf(struct qdio_initialize *init_data) |
2745 | { | 2744 | { |
2746 | char dbf_text[20]; /* if a printf printed out more than 8 chars */ | 2745 | char dbf_text[20]; /* if a printf printed out more than 8 chars */ |
@@ -2773,7 +2772,7 @@ qdio_allocate_do_dbf(struct qdio_initialize *init_data) | |||
2773 | QDIO_DBF_HEX0(0,setup,&init_data->output_sbal_addr_array,sizeof(void*)); | 2772 | QDIO_DBF_HEX0(0,setup,&init_data->output_sbal_addr_array,sizeof(void*)); |
2774 | } | 2773 | } |
2775 | 2774 | ||
2776 | static inline void | 2775 | static void |
2777 | qdio_allocate_fill_input_desc(struct qdio_irq *irq_ptr, int i, int iqfmt) | 2776 | qdio_allocate_fill_input_desc(struct qdio_irq *irq_ptr, int i, int iqfmt) |
2778 | { | 2777 | { |
2779 | irq_ptr->input_qs[i]->is_iqdio_q = iqfmt; | 2778 | irq_ptr->input_qs[i]->is_iqdio_q = iqfmt; |
@@ -2792,7 +2791,7 @@ qdio_allocate_fill_input_desc(struct qdio_irq *irq_ptr, int i, int iqfmt) | |||
2792 | irq_ptr->qdr->qdf0[i].dkey=QDIO_STORAGE_KEY; | 2791 | irq_ptr->qdr->qdf0[i].dkey=QDIO_STORAGE_KEY; |
2793 | } | 2792 | } |
2794 | 2793 | ||
2795 | static inline void | 2794 | static void |
2796 | qdio_allocate_fill_output_desc(struct qdio_irq *irq_ptr, int i, | 2795 | qdio_allocate_fill_output_desc(struct qdio_irq *irq_ptr, int i, |
2797 | int j, int iqfmt) | 2796 | int j, int iqfmt) |
2798 | { | 2797 | { |
@@ -2813,7 +2812,7 @@ qdio_allocate_fill_output_desc(struct qdio_irq *irq_ptr, int i, | |||
2813 | } | 2812 | } |
2814 | 2813 | ||
2815 | 2814 | ||
2816 | static inline void | 2815 | static void |
2817 | qdio_initialize_set_siga_flags_input(struct qdio_irq *irq_ptr) | 2816 | qdio_initialize_set_siga_flags_input(struct qdio_irq *irq_ptr) |
2818 | { | 2817 | { |
2819 | int i; | 2818 | int i; |
@@ -2839,7 +2838,7 @@ qdio_initialize_set_siga_flags_input(struct qdio_irq *irq_ptr) | |||
2839 | } | 2838 | } |
2840 | } | 2839 | } |
2841 | 2840 | ||
2842 | static inline void | 2841 | static void |
2843 | qdio_initialize_set_siga_flags_output(struct qdio_irq *irq_ptr) | 2842 | qdio_initialize_set_siga_flags_output(struct qdio_irq *irq_ptr) |
2844 | { | 2843 | { |
2845 | int i; | 2844 | int i; |
@@ -2865,7 +2864,7 @@ qdio_initialize_set_siga_flags_output(struct qdio_irq *irq_ptr) | |||
2865 | } | 2864 | } |
2866 | } | 2865 | } |
2867 | 2866 | ||
2868 | static inline int | 2867 | static int |
2869 | qdio_establish_irq_check_for_errors(struct ccw_device *cdev, int cstat, | 2868 | qdio_establish_irq_check_for_errors(struct ccw_device *cdev, int cstat, |
2870 | int dstat) | 2869 | int dstat) |
2871 | { | 2870 | { |
@@ -3014,7 +3013,7 @@ qdio_allocate(struct qdio_initialize *init_data) | |||
3014 | return 0; | 3013 | return 0; |
3015 | } | 3014 | } |
3016 | 3015 | ||
3017 | int qdio_fill_irq(struct qdio_initialize *init_data) | 3016 | static int qdio_fill_irq(struct qdio_initialize *init_data) |
3018 | { | 3017 | { |
3019 | int i; | 3018 | int i; |
3020 | char dbf_text[15]; | 3019 | char dbf_text[15]; |
@@ -3367,7 +3366,7 @@ qdio_activate(struct ccw_device *cdev, int flags) | |||
3367 | } | 3366 | } |
3368 | 3367 | ||
3369 | /* buffers filled forwards again to make Rick happy */ | 3368 | /* buffers filled forwards again to make Rick happy */ |
3370 | static inline void | 3369 | static void |
3371 | qdio_do_qdio_fill_input(struct qdio_q *q, unsigned int qidx, | 3370 | qdio_do_qdio_fill_input(struct qdio_q *q, unsigned int qidx, |
3372 | unsigned int count, struct qdio_buffer *buffers) | 3371 | unsigned int count, struct qdio_buffer *buffers) |
3373 | { | 3372 | { |
@@ -3386,7 +3385,7 @@ qdio_do_qdio_fill_input(struct qdio_q *q, unsigned int qidx, | |||
3386 | } | 3385 | } |
3387 | } | 3386 | } |
3388 | 3387 | ||
3389 | static inline void | 3388 | static void |
3390 | qdio_do_qdio_fill_output(struct qdio_q *q, unsigned int qidx, | 3389 | qdio_do_qdio_fill_output(struct qdio_q *q, unsigned int qidx, |
3391 | unsigned int count, struct qdio_buffer *buffers) | 3390 | unsigned int count, struct qdio_buffer *buffers) |
3392 | { | 3391 | { |
@@ -3407,7 +3406,7 @@ qdio_do_qdio_fill_output(struct qdio_q *q, unsigned int qidx, | |||
3407 | } | 3406 | } |
3408 | } | 3407 | } |
3409 | 3408 | ||
3410 | static inline void | 3409 | static void |
3411 | do_qdio_handle_inbound(struct qdio_q *q, unsigned int callflags, | 3410 | do_qdio_handle_inbound(struct qdio_q *q, unsigned int callflags, |
3412 | unsigned int qidx, unsigned int count, | 3411 | unsigned int qidx, unsigned int count, |
3413 | struct qdio_buffer *buffers) | 3412 | struct qdio_buffer *buffers) |
@@ -3443,7 +3442,7 @@ do_qdio_handle_inbound(struct qdio_q *q, unsigned int callflags, | |||
3443 | qdio_mark_q(q); | 3442 | qdio_mark_q(q); |
3444 | } | 3443 | } |
3445 | 3444 | ||
3446 | static inline void | 3445 | static void |
3447 | do_qdio_handle_outbound(struct qdio_q *q, unsigned int callflags, | 3446 | do_qdio_handle_outbound(struct qdio_q *q, unsigned int callflags, |
3448 | unsigned int qidx, unsigned int count, | 3447 | unsigned int qidx, unsigned int count, |
3449 | struct qdio_buffer *buffers) | 3448 | struct qdio_buffer *buffers) |
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c index 81b5899f4010..c7d1355237b6 100644 --- a/drivers/s390/crypto/ap_bus.c +++ b/drivers/s390/crypto/ap_bus.c | |||
@@ -465,7 +465,7 @@ static int ap_device_probe(struct device *dev) | |||
465 | * Flush all requests from the request/pending queue of an AP device. | 465 | * Flush all requests from the request/pending queue of an AP device. |
466 | * @ap_dev: pointer to the AP device. | 466 | * @ap_dev: pointer to the AP device. |
467 | */ | 467 | */ |
468 | static inline void __ap_flush_queue(struct ap_device *ap_dev) | 468 | static void __ap_flush_queue(struct ap_device *ap_dev) |
469 | { | 469 | { |
470 | struct ap_message *ap_msg, *next; | 470 | struct ap_message *ap_msg, *next; |
471 | 471 | ||
@@ -587,7 +587,7 @@ static struct bus_attribute *const ap_bus_attrs[] = { | |||
587 | /** | 587 | /** |
588 | * Pick one of the 16 ap domains. | 588 | * Pick one of the 16 ap domains. |
589 | */ | 589 | */ |
590 | static inline int ap_select_domain(void) | 590 | static int ap_select_domain(void) |
591 | { | 591 | { |
592 | int queue_depth, device_type, count, max_count, best_domain; | 592 | int queue_depth, device_type, count, max_count, best_domain; |
593 | int rc, i, j; | 593 | int rc, i, j; |
@@ -825,7 +825,7 @@ static inline void ap_schedule_poll_timer(void) | |||
825 | * required, bit 2^1 is set if the poll timer needs to get armed | 825 | * required, bit 2^1 is set if the poll timer needs to get armed |
826 | * Returns 0 if the device is still present, -ENODEV if not. | 826 | * Returns 0 if the device is still present, -ENODEV if not. |
827 | */ | 827 | */ |
828 | static inline int ap_poll_read(struct ap_device *ap_dev, unsigned long *flags) | 828 | static int ap_poll_read(struct ap_device *ap_dev, unsigned long *flags) |
829 | { | 829 | { |
830 | struct ap_queue_status status; | 830 | struct ap_queue_status status; |
831 | struct ap_message *ap_msg; | 831 | struct ap_message *ap_msg; |
@@ -872,7 +872,7 @@ static inline int ap_poll_read(struct ap_device *ap_dev, unsigned long *flags) | |||
872 | * required, bit 2^1 is set if the poll timer needs to get armed | 872 | * required, bit 2^1 is set if the poll timer needs to get armed |
873 | * Returns 0 if the device is still present, -ENODEV if not. | 873 | * Returns 0 if the device is still present, -ENODEV if not. |
874 | */ | 874 | */ |
875 | static inline int ap_poll_write(struct ap_device *ap_dev, unsigned long *flags) | 875 | static int ap_poll_write(struct ap_device *ap_dev, unsigned long *flags) |
876 | { | 876 | { |
877 | struct ap_queue_status status; | 877 | struct ap_queue_status status; |
878 | struct ap_message *ap_msg; | 878 | struct ap_message *ap_msg; |
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c index 1edc10a7a6f2..b9e59bc9435a 100644 --- a/drivers/s390/crypto/zcrypt_api.c +++ b/drivers/s390/crypto/zcrypt_api.c | |||
@@ -791,7 +791,7 @@ static long trans_xcRB32(struct file *filp, unsigned int cmd, | |||
791 | return rc; | 791 | return rc; |
792 | } | 792 | } |
793 | 793 | ||
794 | long zcrypt_compat_ioctl(struct file *filp, unsigned int cmd, | 794 | static long zcrypt_compat_ioctl(struct file *filp, unsigned int cmd, |
795 | unsigned long arg) | 795 | unsigned long arg) |
796 | { | 796 | { |
797 | if (cmd == ICARSAMODEXPO) | 797 | if (cmd == ICARSAMODEXPO) |
@@ -833,8 +833,8 @@ static struct miscdevice zcrypt_misc_device = { | |||
833 | */ | 833 | */ |
834 | static struct proc_dir_entry *zcrypt_entry; | 834 | static struct proc_dir_entry *zcrypt_entry; |
835 | 835 | ||
836 | static inline int sprintcl(unsigned char *outaddr, unsigned char *addr, | 836 | static int sprintcl(unsigned char *outaddr, unsigned char *addr, |
837 | unsigned int len) | 837 | unsigned int len) |
838 | { | 838 | { |
839 | int hl, i; | 839 | int hl, i; |
840 | 840 | ||
@@ -845,8 +845,8 @@ static inline int sprintcl(unsigned char *outaddr, unsigned char *addr, | |||
845 | return hl; | 845 | return hl; |
846 | } | 846 | } |
847 | 847 | ||
848 | static inline int sprintrw(unsigned char *outaddr, unsigned char *addr, | 848 | static int sprintrw(unsigned char *outaddr, unsigned char *addr, |
849 | unsigned int len) | 849 | unsigned int len) |
850 | { | 850 | { |
851 | int hl, inl, c, cx; | 851 | int hl, inl, c, cx; |
852 | 852 | ||
@@ -865,8 +865,8 @@ static inline int sprintrw(unsigned char *outaddr, unsigned char *addr, | |||
865 | return hl; | 865 | return hl; |
866 | } | 866 | } |
867 | 867 | ||
868 | static inline int sprinthx(unsigned char *title, unsigned char *outaddr, | 868 | static int sprinthx(unsigned char *title, unsigned char *outaddr, |
869 | unsigned char *addr, unsigned int len) | 869 | unsigned char *addr, unsigned int len) |
870 | { | 870 | { |
871 | int hl, inl, r, rx; | 871 | int hl, inl, r, rx; |
872 | 872 | ||
@@ -885,8 +885,8 @@ static inline int sprinthx(unsigned char *title, unsigned char *outaddr, | |||
885 | return hl; | 885 | return hl; |
886 | } | 886 | } |
887 | 887 | ||
888 | static inline int sprinthx4(unsigned char *title, unsigned char *outaddr, | 888 | static int sprinthx4(unsigned char *title, unsigned char *outaddr, |
889 | unsigned int *array, unsigned int len) | 889 | unsigned int *array, unsigned int len) |
890 | { | 890 | { |
891 | int hl, r; | 891 | int hl, r; |
892 | 892 | ||
@@ -943,7 +943,7 @@ static int zcrypt_status_read(char *resp_buff, char **start, off_t offset, | |||
943 | zcrypt_qdepth_mask(workarea); | 943 | zcrypt_qdepth_mask(workarea); |
944 | len += sprinthx("Waiting work element counts", | 944 | len += sprinthx("Waiting work element counts", |
945 | resp_buff+len, workarea, AP_DEVICES); | 945 | resp_buff+len, workarea, AP_DEVICES); |
946 | zcrypt_perdev_reqcnt((unsigned int *) workarea); | 946 | zcrypt_perdev_reqcnt((int *) workarea); |
947 | len += sprinthx4("Per-device successfully completed request counts", | 947 | len += sprinthx4("Per-device successfully completed request counts", |
948 | resp_buff+len,(unsigned int *) workarea, AP_DEVICES); | 948 | resp_buff+len,(unsigned int *) workarea, AP_DEVICES); |
949 | *eof = 1; | 949 | *eof = 1; |
diff --git a/drivers/s390/crypto/zcrypt_pcica.c b/drivers/s390/crypto/zcrypt_pcica.c index 32e37014345c..818ffe05ac00 100644 --- a/drivers/s390/crypto/zcrypt_pcica.c +++ b/drivers/s390/crypto/zcrypt_pcica.c | |||
@@ -191,10 +191,10 @@ static int ICACRT_msg_to_type4CRT_msg(struct zcrypt_device *zdev, | |||
191 | * | 191 | * |
192 | * Returns 0 on success or -EFAULT. | 192 | * Returns 0 on success or -EFAULT. |
193 | */ | 193 | */ |
194 | static inline int convert_type84(struct zcrypt_device *zdev, | 194 | static int convert_type84(struct zcrypt_device *zdev, |
195 | struct ap_message *reply, | 195 | struct ap_message *reply, |
196 | char __user *outputdata, | 196 | char __user *outputdata, |
197 | unsigned int outputdatalength) | 197 | unsigned int outputdatalength) |
198 | { | 198 | { |
199 | struct type84_hdr *t84h = reply->message; | 199 | struct type84_hdr *t84h = reply->message; |
200 | char *data; | 200 | char *data; |
diff --git a/drivers/s390/crypto/zcrypt_pcixcc.c b/drivers/s390/crypto/zcrypt_pcixcc.c index b7153c1e15cd..252443b6bd1b 100644 --- a/drivers/s390/crypto/zcrypt_pcixcc.c +++ b/drivers/s390/crypto/zcrypt_pcixcc.c | |||
@@ -709,7 +709,8 @@ out_free: | |||
709 | * PCIXCC/CEX2C device to the request distributor | 709 | * PCIXCC/CEX2C device to the request distributor |
710 | * @xcRB: pointer to the send_cprb request buffer | 710 | * @xcRB: pointer to the send_cprb request buffer |
711 | */ | 711 | */ |
712 | long zcrypt_pcixcc_send_cprb(struct zcrypt_device *zdev, struct ica_xcRB *xcRB) | 712 | static long zcrypt_pcixcc_send_cprb(struct zcrypt_device *zdev, |
713 | struct ica_xcRB *xcRB) | ||
713 | { | 714 | { |
714 | struct ap_message ap_msg; | 715 | struct ap_message ap_msg; |
715 | struct response_type resp_type = { | 716 | struct response_type resp_type = { |
diff --git a/drivers/s390/net/Kconfig b/drivers/s390/net/Kconfig index 52625153a4f0..f98fa465df0a 100644 --- a/drivers/s390/net/Kconfig +++ b/drivers/s390/net/Kconfig | |||
@@ -22,13 +22,6 @@ config CTC | |||
22 | available. This option is also available as a module which will be | 22 | available. This option is also available as a module which will be |
23 | called ctc.ko. If you do not know what it is, it's safe to say "Y". | 23 | called ctc.ko. If you do not know what it is, it's safe to say "Y". |
24 | 24 | ||
25 | config IUCV | ||
26 | tristate "IUCV support (VM only)" | ||
27 | help | ||
28 | Select this option if you want to use inter-user communication | ||
29 | under VM or VIF. If unsure, say "Y" to enable a fast communication | ||
30 | link between VM guests. | ||
31 | |||
32 | config NETIUCV | 25 | config NETIUCV |
33 | tristate "IUCV network device support (VM only)" | 26 | tristate "IUCV network device support (VM only)" |
34 | depends on IUCV && NETDEVICES | 27 | depends on IUCV && NETDEVICES |
diff --git a/drivers/s390/net/Makefile b/drivers/s390/net/Makefile index 4777e36a922f..bbe3ab2e93d9 100644 --- a/drivers/s390/net/Makefile +++ b/drivers/s390/net/Makefile | |||
@@ -4,7 +4,6 @@ | |||
4 | 4 | ||
5 | ctc-objs := ctcmain.o ctcdbug.o | 5 | ctc-objs := ctcmain.o ctcdbug.o |
6 | 6 | ||
7 | obj-$(CONFIG_IUCV) += iucv.o | ||
8 | obj-$(CONFIG_NETIUCV) += netiucv.o fsm.o | 7 | obj-$(CONFIG_NETIUCV) += netiucv.o fsm.o |
9 | obj-$(CONFIG_SMSGIUCV) += smsgiucv.o | 8 | obj-$(CONFIG_SMSGIUCV) += smsgiucv.o |
10 | obj-$(CONFIG_CTC) += ctc.o fsm.o cu3088.o | 9 | obj-$(CONFIG_CTC) += ctc.o fsm.o cu3088.o |
diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c index 95f4e105cb96..7809a79feec7 100644 --- a/drivers/s390/net/claw.c +++ b/drivers/s390/net/claw.c | |||
@@ -121,7 +121,7 @@ MODULE_LICENSE("GPL"); | |||
121 | #define DEBUG | 121 | #define DEBUG |
122 | #endif | 122 | #endif |
123 | 123 | ||
124 | char debug_buffer[255]; | 124 | static char debug_buffer[255]; |
125 | /** | 125 | /** |
126 | * Debug Facility Stuff | 126 | * Debug Facility Stuff |
127 | */ | 127 | */ |
@@ -223,16 +223,14 @@ static void claw_timer ( struct chbk * p_ch ); | |||
223 | /* Functions */ | 223 | /* Functions */ |
224 | static int add_claw_reads(struct net_device *dev, | 224 | static int add_claw_reads(struct net_device *dev, |
225 | struct ccwbk* p_first, struct ccwbk* p_last); | 225 | struct ccwbk* p_first, struct ccwbk* p_last); |
226 | static void inline ccw_check_return_code (struct ccw_device *cdev, | 226 | static void ccw_check_return_code (struct ccw_device *cdev, int return_code); |
227 | int return_code); | 227 | static void ccw_check_unit_check (struct chbk * p_ch, unsigned char sense ); |
228 | static void inline ccw_check_unit_check (struct chbk * p_ch, | ||
229 | unsigned char sense ); | ||
230 | static int find_link(struct net_device *dev, char *host_name, char *ws_name ); | 228 | static int find_link(struct net_device *dev, char *host_name, char *ws_name ); |
231 | static int claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid); | 229 | static int claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid); |
232 | static int init_ccw_bk(struct net_device *dev); | 230 | static int init_ccw_bk(struct net_device *dev); |
233 | static void probe_error( struct ccwgroup_device *cgdev); | 231 | static void probe_error( struct ccwgroup_device *cgdev); |
234 | static struct net_device_stats *claw_stats(struct net_device *dev); | 232 | static struct net_device_stats *claw_stats(struct net_device *dev); |
235 | static int inline pages_to_order_of_mag(int num_of_pages); | 233 | static int pages_to_order_of_mag(int num_of_pages); |
236 | static struct sk_buff *claw_pack_skb(struct claw_privbk *privptr); | 234 | static struct sk_buff *claw_pack_skb(struct claw_privbk *privptr); |
237 | #ifdef DEBUG | 235 | #ifdef DEBUG |
238 | static void dumpit (char *buf, int len); | 236 | static void dumpit (char *buf, int len); |
@@ -1310,7 +1308,7 @@ claw_timer ( struct chbk * p_ch ) | |||
1310 | * of magnitude get_free_pages() has an upper order of 9 * | 1308 | * of magnitude get_free_pages() has an upper order of 9 * |
1311 | *--------------------------------------------------------------------*/ | 1309 | *--------------------------------------------------------------------*/ |
1312 | 1310 | ||
1313 | static int inline | 1311 | static int |
1314 | pages_to_order_of_mag(int num_of_pages) | 1312 | pages_to_order_of_mag(int num_of_pages) |
1315 | { | 1313 | { |
1316 | int order_of_mag=1; /* assume 2 pages */ | 1314 | int order_of_mag=1; /* assume 2 pages */ |
@@ -1482,7 +1480,7 @@ add_claw_reads(struct net_device *dev, struct ccwbk* p_first, | |||
1482 | * * | 1480 | * * |
1483 | *-------------------------------------------------------------------*/ | 1481 | *-------------------------------------------------------------------*/ |
1484 | 1482 | ||
1485 | static void inline | 1483 | static void |
1486 | ccw_check_return_code(struct ccw_device *cdev, int return_code) | 1484 | ccw_check_return_code(struct ccw_device *cdev, int return_code) |
1487 | { | 1485 | { |
1488 | #ifdef FUNCTRACE | 1486 | #ifdef FUNCTRACE |
@@ -1529,7 +1527,7 @@ ccw_check_return_code(struct ccw_device *cdev, int return_code) | |||
1529 | * ccw_check_unit_check * | 1527 | * ccw_check_unit_check * |
1530 | *--------------------------------------------------------------------*/ | 1528 | *--------------------------------------------------------------------*/ |
1531 | 1529 | ||
1532 | static void inline | 1530 | static void |
1533 | ccw_check_unit_check(struct chbk * p_ch, unsigned char sense ) | 1531 | ccw_check_unit_check(struct chbk * p_ch, unsigned char sense ) |
1534 | { | 1532 | { |
1535 | struct net_device *dev = p_ch->ndev; | 1533 | struct net_device *dev = p_ch->ndev; |
diff --git a/drivers/s390/net/ctcmain.c b/drivers/s390/net/ctcmain.c index 03cc263fe0da..5a84fbbc6611 100644 --- a/drivers/s390/net/ctcmain.c +++ b/drivers/s390/net/ctcmain.c | |||
@@ -369,7 +369,7 @@ ctc_dump_skb(struct sk_buff *skb, int offset) | |||
369 | * @param ch The channel where this skb has been received. | 369 | * @param ch The channel where this skb has been received. |
370 | * @param pskb The received skb. | 370 | * @param pskb The received skb. |
371 | */ | 371 | */ |
372 | static __inline__ void | 372 | static void |
373 | ctc_unpack_skb(struct channel *ch, struct sk_buff *pskb) | 373 | ctc_unpack_skb(struct channel *ch, struct sk_buff *pskb) |
374 | { | 374 | { |
375 | struct net_device *dev = ch->netdev; | 375 | struct net_device *dev = ch->netdev; |
@@ -512,7 +512,7 @@ ctc_unpack_skb(struct channel *ch, struct sk_buff *pskb) | |||
512 | * @param ch The channel, the error belongs to. | 512 | * @param ch The channel, the error belongs to. |
513 | * @param return_code The error code to inspect. | 513 | * @param return_code The error code to inspect. |
514 | */ | 514 | */ |
515 | static void inline | 515 | static void |
516 | ccw_check_return_code(struct channel *ch, int return_code, char *msg) | 516 | ccw_check_return_code(struct channel *ch, int return_code, char *msg) |
517 | { | 517 | { |
518 | DBF_TEXT(trace, 5, __FUNCTION__); | 518 | DBF_TEXT(trace, 5, __FUNCTION__); |
@@ -547,7 +547,7 @@ ccw_check_return_code(struct channel *ch, int return_code, char *msg) | |||
547 | * @param ch The channel, the sense code belongs to. | 547 | * @param ch The channel, the sense code belongs to. |
548 | * @param sense The sense code to inspect. | 548 | * @param sense The sense code to inspect. |
549 | */ | 549 | */ |
550 | static void inline | 550 | static void |
551 | ccw_unit_check(struct channel *ch, unsigned char sense) | 551 | ccw_unit_check(struct channel *ch, unsigned char sense) |
552 | { | 552 | { |
553 | DBF_TEXT(trace, 5, __FUNCTION__); | 553 | DBF_TEXT(trace, 5, __FUNCTION__); |
@@ -603,7 +603,7 @@ ctc_purge_skb_queue(struct sk_buff_head *q) | |||
603 | } | 603 | } |
604 | } | 604 | } |
605 | 605 | ||
606 | static __inline__ int | 606 | static int |
607 | ctc_checkalloc_buffer(struct channel *ch, int warn) | 607 | ctc_checkalloc_buffer(struct channel *ch, int warn) |
608 | { | 608 | { |
609 | DBF_TEXT(trace, 5, __FUNCTION__); | 609 | DBF_TEXT(trace, 5, __FUNCTION__); |
diff --git a/drivers/s390/net/cu3088.c b/drivers/s390/net/cu3088.c index e965f03a7291..76728ae4b843 100644 --- a/drivers/s390/net/cu3088.c +++ b/drivers/s390/net/cu3088.c | |||
@@ -57,7 +57,7 @@ static struct ccw_device_id cu3088_ids[] = { | |||
57 | 57 | ||
58 | static struct ccw_driver cu3088_driver; | 58 | static struct ccw_driver cu3088_driver; |
59 | 59 | ||
60 | struct device *cu3088_root_dev; | 60 | static struct device *cu3088_root_dev; |
61 | 61 | ||
62 | static ssize_t | 62 | static ssize_t |
63 | group_write(struct device_driver *drv, const char *buf, size_t count) | 63 | group_write(struct device_driver *drv, const char *buf, size_t count) |
diff --git a/drivers/s390/net/iucv.c b/drivers/s390/net/iucv.c deleted file mode 100644 index 229aeb5fc399..000000000000 --- a/drivers/s390/net/iucv.c +++ /dev/null | |||
@@ -1,2540 +0,0 @@ | |||
1 | /* | ||
2 | * IUCV network driver | ||
3 | * | ||
4 | * Copyright (C) 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation | ||
5 | * Author(s): | ||
6 | * Original source: | ||
7 | * Alan Altmark (Alan_Altmark@us.ibm.com) Sept. 2000 | ||
8 | * Xenia Tkatschow (xenia@us.ibm.com) | ||
9 | * 2Gb awareness and general cleanup: | ||
10 | * Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com) | ||
11 | * | ||
12 | * Documentation used: | ||
13 | * The original source | ||
14 | * CP Programming Service, IBM document # SC24-5760 | ||
15 | * | ||
16 | * This program is free software; you can redistribute it and/or modify | ||
17 | * it under the terms of the GNU General Public License as published by | ||
18 | * the Free Software Foundation; either version 2, or (at your option) | ||
19 | * any later version. | ||
20 | * | ||
21 | * This program is distributed in the hope that it will be useful, | ||
22 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
23 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
24 | * GNU General Public License for more details. | ||
25 | * | ||
26 | * You should have received a copy of the GNU General Public License | ||
27 | * along with this program; if not, write to the Free Software | ||
28 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
29 | * | ||
30 | */ | ||
31 | |||
32 | /* #define DEBUG */ | ||
33 | |||
34 | #include <linux/module.h> | ||
35 | #include <linux/moduleparam.h> | ||
36 | |||
37 | #include <linux/spinlock.h> | ||
38 | #include <linux/kernel.h> | ||
39 | #include <linux/slab.h> | ||
40 | #include <linux/init.h> | ||
41 | #include <linux/interrupt.h> | ||
42 | #include <linux/list.h> | ||
43 | #include <linux/errno.h> | ||
44 | #include <linux/err.h> | ||
45 | #include <linux/device.h> | ||
46 | #include <asm/atomic.h> | ||
47 | #include "iucv.h" | ||
48 | #include <asm/io.h> | ||
49 | #include <asm/s390_ext.h> | ||
50 | #include <asm/ebcdic.h> | ||
51 | #include <asm/smp.h> | ||
52 | #include <asm/s390_rdev.h> | ||
53 | |||
54 | /* FLAGS: | ||
55 | * All flags are defined in the field IPFLAGS1 of each function | ||
56 | * and can be found in CP Programming Services. | ||
57 | * IPSRCCLS - Indicates you have specified a source class | ||
58 | * IPFGMCL - Indicates you have specified a target class | ||
59 | * IPFGPID - Indicates you have specified a pathid | ||
60 | * IPFGMID - Indicates you have specified a message ID | ||
61 | * IPANSLST - Indicates that you are using an address list for | ||
62 | * reply data | ||
63 | * IPBUFLST - Indicates that you are using an address list for | ||
64 | * message data | ||
65 | */ | ||
66 | |||
67 | #define IPSRCCLS 0x01 | ||
68 | #define IPFGMCL 0x01 | ||
69 | #define IPFGPID 0x02 | ||
70 | #define IPFGMID 0x04 | ||
71 | #define IPANSLST 0x08 | ||
72 | #define IPBUFLST 0x40 | ||
73 | |||
74 | static int | ||
75 | iucv_bus_match (struct device *dev, struct device_driver *drv) | ||
76 | { | ||
77 | return 0; | ||
78 | } | ||
79 | |||
80 | struct bus_type iucv_bus = { | ||
81 | .name = "iucv", | ||
82 | .match = iucv_bus_match, | ||
83 | }; | ||
84 | |||
85 | struct device *iucv_root; | ||
86 | |||
87 | /* General IUCV interrupt structure */ | ||
88 | typedef struct { | ||
89 | __u16 ippathid; | ||
90 | __u8 res1; | ||
91 | __u8 iptype; | ||
92 | __u32 res2; | ||
93 | __u8 ipvmid[8]; | ||
94 | __u8 res3[24]; | ||
95 | } iucv_GeneralInterrupt; | ||
96 | |||
97 | static iucv_GeneralInterrupt *iucv_external_int_buffer = NULL; | ||
98 | |||
99 | /* Spin Lock declaration */ | ||
100 | |||
101 | static DEFINE_SPINLOCK(iucv_lock); | ||
102 | |||
103 | static int messagesDisabled = 0; | ||
104 | |||
105 | /***************INTERRUPT HANDLING ***************/ | ||
106 | |||
107 | typedef struct { | ||
108 | struct list_head queue; | ||
109 | iucv_GeneralInterrupt data; | ||
110 | } iucv_irqdata; | ||
111 | |||
112 | static struct list_head iucv_irq_queue; | ||
113 | static DEFINE_SPINLOCK(iucv_irq_queue_lock); | ||
114 | |||
115 | /* | ||
116 | *Internal function prototypes | ||
117 | */ | ||
118 | static void iucv_tasklet_handler(unsigned long); | ||
119 | static void iucv_irq_handler(__u16); | ||
120 | |||
121 | static DECLARE_TASKLET(iucv_tasklet,iucv_tasklet_handler,0); | ||
122 | |||
123 | /************ FUNCTION ID'S ****************************/ | ||
124 | |||
125 | #define ACCEPT 10 | ||
126 | #define CONNECT 11 | ||
127 | #define DECLARE_BUFFER 12 | ||
128 | #define PURGE 9 | ||
129 | #define QUERY 0 | ||
130 | #define QUIESCE 13 | ||
131 | #define RECEIVE 5 | ||
132 | #define REJECT 8 | ||
133 | #define REPLY 6 | ||
134 | #define RESUME 14 | ||
135 | #define RETRIEVE_BUFFER 2 | ||
136 | #define SEND 4 | ||
137 | #define SETMASK 16 | ||
138 | #define SEVER 15 | ||
139 | |||
140 | /** | ||
141 | * Structure: handler | ||
142 | * members: list - list management. | ||
143 | * structure: id | ||
144 | * userid - 8 char array of machine identification | ||
145 | * user_data - 16 char array for user identification | ||
146 | * mask - 24 char array used to compare the 2 previous | ||
147 | * interrupt_table - vector of interrupt functions. | ||
148 | * pgm_data - ulong, application data that is passed | ||
149 | * to the interrupt handlers | ||
150 | */ | ||
151 | typedef struct handler_t { | ||
152 | struct list_head list; | ||
153 | struct { | ||
154 | __u8 userid[8]; | ||
155 | __u8 user_data[16]; | ||
156 | __u8 mask[24]; | ||
157 | } id; | ||
158 | iucv_interrupt_ops_t *interrupt_table; | ||
159 | void *pgm_data; | ||
160 | } handler; | ||
161 | |||
162 | /** | ||
163 | * iucv_handler_table: List of registered handlers. | ||
164 | */ | ||
165 | static struct list_head iucv_handler_table; | ||
166 | |||
167 | /** | ||
168 | * iucv_pathid_table: an array of *handler pointing into | ||
169 | * iucv_handler_table for fast indexing by pathid; | ||
170 | */ | ||
171 | static handler **iucv_pathid_table; | ||
172 | |||
173 | static unsigned long max_connections; | ||
174 | |||
175 | /** | ||
176 | * iucv_cpuid: contains the logical cpu number of the cpu which | ||
177 | * has declared the iucv buffer by issuing DECLARE_BUFFER. | ||
178 | * If no cpu has done the initialization iucv_cpuid contains -1. | ||
179 | */ | ||
180 | static int iucv_cpuid = -1; | ||
181 | /** | ||
182 | * register_flag: is 0 when external interrupt has not been registered | ||
183 | */ | ||
184 | static int register_flag; | ||
185 | |||
186 | /****************FIVE 40-BYTE PARAMETER STRUCTURES******************/ | ||
187 | /* Data struct 1: iparml_control | ||
188 | * Used for iucv_accept | ||
189 | * iucv_connect | ||
190 | * iucv_quiesce | ||
191 | * iucv_resume | ||
192 | * iucv_sever | ||
193 | * iucv_retrieve_buffer | ||
194 | * Data struct 2: iparml_dpl (data in parameter list) | ||
195 | * Used for iucv_send_prmmsg | ||
196 | * iucv_send2way_prmmsg | ||
197 | * iucv_send2way_prmmsg_array | ||
198 | * iucv_reply_prmmsg | ||
199 | * Data struct 3: iparml_db (data in a buffer) | ||
200 | * Used for iucv_receive | ||
201 | * iucv_receive_array | ||
202 | * iucv_reject | ||
203 | * iucv_reply | ||
204 | * iucv_reply_array | ||
205 | * iucv_send | ||
206 | * iucv_send_array | ||
207 | * iucv_send2way | ||
208 | * iucv_send2way_array | ||
209 | * iucv_declare_buffer | ||
210 | * Data struct 4: iparml_purge | ||
211 | * Used for iucv_purge | ||
212 | * iucv_query | ||
213 | * Data struct 5: iparml_set_mask | ||
214 | * Used for iucv_set_mask | ||
215 | */ | ||
216 | |||
217 | typedef struct { | ||
218 | __u16 ippathid; | ||
219 | __u8 ipflags1; | ||
220 | __u8 iprcode; | ||
221 | __u16 ipmsglim; | ||
222 | __u16 res1; | ||
223 | __u8 ipvmid[8]; | ||
224 | __u8 ipuser[16]; | ||
225 | __u8 iptarget[8]; | ||
226 | } iparml_control; | ||
227 | |||
228 | typedef struct { | ||
229 | __u16 ippathid; | ||
230 | __u8 ipflags1; | ||
231 | __u8 iprcode; | ||
232 | __u32 ipmsgid; | ||
233 | __u32 iptrgcls; | ||
234 | __u8 iprmmsg[8]; | ||
235 | __u32 ipsrccls; | ||
236 | __u32 ipmsgtag; | ||
237 | __u32 ipbfadr2; | ||
238 | __u32 ipbfln2f; | ||
239 | __u32 res; | ||
240 | } iparml_dpl; | ||
241 | |||
242 | typedef struct { | ||
243 | __u16 ippathid; | ||
244 | __u8 ipflags1; | ||
245 | __u8 iprcode; | ||
246 | __u32 ipmsgid; | ||
247 | __u32 iptrgcls; | ||
248 | __u32 ipbfadr1; | ||
249 | __u32 ipbfln1f; | ||
250 | __u32 ipsrccls; | ||
251 | __u32 ipmsgtag; | ||
252 | __u32 ipbfadr2; | ||
253 | __u32 ipbfln2f; | ||
254 | __u32 res; | ||
255 | } iparml_db; | ||
256 | |||
257 | typedef struct { | ||
258 | __u16 ippathid; | ||
259 | __u8 ipflags1; | ||
260 | __u8 iprcode; | ||
261 | __u32 ipmsgid; | ||
262 | __u8 ipaudit[3]; | ||
263 | __u8 res1[5]; | ||
264 | __u32 res2; | ||
265 | __u32 ipsrccls; | ||
266 | __u32 ipmsgtag; | ||
267 | __u32 res3[3]; | ||
268 | } iparml_purge; | ||
269 | |||
270 | typedef struct { | ||
271 | __u8 ipmask; | ||
272 | __u8 res1[2]; | ||
273 | __u8 iprcode; | ||
274 | __u32 res2[9]; | ||
275 | } iparml_set_mask; | ||
276 | |||
277 | typedef struct { | ||
278 | union { | ||
279 | iparml_control p_ctrl; | ||
280 | iparml_dpl p_dpl; | ||
281 | iparml_db p_db; | ||
282 | iparml_purge p_purge; | ||
283 | iparml_set_mask p_set_mask; | ||
284 | } param; | ||
285 | atomic_t in_use; | ||
286 | __u32 res; | ||
287 | } __attribute__ ((aligned(8))) iucv_param; | ||
288 | #define PARAM_POOL_SIZE (PAGE_SIZE / sizeof(iucv_param)) | ||
289 | |||
290 | static iucv_param * iucv_param_pool; | ||
291 | |||
292 | MODULE_AUTHOR("(C) 2001 IBM Corp. by Fritz Elfert (felfert@millenux.com)"); | ||
293 | MODULE_DESCRIPTION("Linux for S/390 IUCV lowlevel driver"); | ||
294 | MODULE_LICENSE("GPL"); | ||
295 | |||
296 | /* | ||
297 | * Debugging stuff | ||
298 | *******************************************************************************/ | ||
299 | |||
300 | |||
301 | #ifdef DEBUG | ||
302 | static int debuglevel = 0; | ||
303 | |||
304 | module_param(debuglevel, int, 0); | ||
305 | MODULE_PARM_DESC(debuglevel, | ||
306 | "Specifies the debug level (0=off ... 3=all)"); | ||
307 | |||
308 | static void | ||
309 | iucv_dumpit(char *title, void *buf, int len) | ||
310 | { | ||
311 | int i; | ||
312 | __u8 *p = (__u8 *)buf; | ||
313 | |||
314 | if (debuglevel < 3) | ||
315 | return; | ||
316 | |||
317 | printk(KERN_DEBUG "%s\n", title); | ||
318 | printk(" "); | ||
319 | for (i = 0; i < len; i++) { | ||
320 | if (!(i % 16) && i != 0) | ||
321 | printk ("\n "); | ||
322 | else if (!(i % 4) && i != 0) | ||
323 | printk(" "); | ||
324 | printk("%02X", *p++); | ||
325 | } | ||
326 | if (len % 16) | ||
327 | printk ("\n"); | ||
328 | return; | ||
329 | } | ||
330 | #define iucv_debug(lvl, fmt, args...) \ | ||
331 | do { \ | ||
332 | if (debuglevel >= lvl) \ | ||
333 | printk(KERN_DEBUG "%s: " fmt "\n", __FUNCTION__ , ## args); \ | ||
334 | } while (0) | ||
335 | |||
336 | #else | ||
337 | |||
338 | #define iucv_debug(lvl, fmt, args...) do { } while (0) | ||
339 | #define iucv_dumpit(title, buf, len) do { } while (0) | ||
340 | |||
341 | #endif | ||
342 | |||
343 | /* | ||
344 | * Internal functions | ||
345 | *******************************************************************************/ | ||
346 | |||
347 | /** | ||
348 | * print start banner | ||
349 | */ | ||
350 | static void | ||
351 | iucv_banner(void) | ||
352 | { | ||
353 | printk(KERN_INFO "IUCV lowlevel driver initialized\n"); | ||
354 | } | ||
355 | |||
356 | /** | ||
357 | * iucv_init - Initialization | ||
358 | * | ||
359 | * Allocates and initializes various data structures. | ||
360 | */ | ||
361 | static int | ||
362 | iucv_init(void) | ||
363 | { | ||
364 | int ret; | ||
365 | |||
366 | if (iucv_external_int_buffer) | ||
367 | return 0; | ||
368 | |||
369 | if (!MACHINE_IS_VM) { | ||
370 | printk(KERN_ERR "IUCV: IUCV connection needs VM as base\n"); | ||
371 | return -EPROTONOSUPPORT; | ||
372 | } | ||
373 | |||
374 | ret = bus_register(&iucv_bus); | ||
375 | if (ret) { | ||
376 | printk(KERN_ERR "IUCV: failed to register bus.\n"); | ||
377 | return ret; | ||
378 | } | ||
379 | |||
380 | iucv_root = s390_root_dev_register("iucv"); | ||
381 | if (IS_ERR(iucv_root)) { | ||
382 | printk(KERN_ERR "IUCV: failed to register iucv root.\n"); | ||
383 | bus_unregister(&iucv_bus); | ||
384 | return PTR_ERR(iucv_root); | ||
385 | } | ||
386 | |||
387 | /* Note: GFP_DMA used used to get memory below 2G */ | ||
388 | iucv_external_int_buffer = kzalloc(sizeof(iucv_GeneralInterrupt), | ||
389 | GFP_KERNEL|GFP_DMA); | ||
390 | if (!iucv_external_int_buffer) { | ||
391 | printk(KERN_WARNING | ||
392 | "%s: Could not allocate external interrupt buffer\n", | ||
393 | __FUNCTION__); | ||
394 | s390_root_dev_unregister(iucv_root); | ||
395 | bus_unregister(&iucv_bus); | ||
396 | return -ENOMEM; | ||
397 | } | ||
398 | |||
399 | /* Initialize parameter pool */ | ||
400 | iucv_param_pool = kzalloc(sizeof(iucv_param) * PARAM_POOL_SIZE, | ||
401 | GFP_KERNEL|GFP_DMA); | ||
402 | if (!iucv_param_pool) { | ||
403 | printk(KERN_WARNING "%s: Could not allocate param pool\n", | ||
404 | __FUNCTION__); | ||
405 | kfree(iucv_external_int_buffer); | ||
406 | iucv_external_int_buffer = NULL; | ||
407 | s390_root_dev_unregister(iucv_root); | ||
408 | bus_unregister(&iucv_bus); | ||
409 | return -ENOMEM; | ||
410 | } | ||
411 | |||
412 | /* Initialize irq queue */ | ||
413 | INIT_LIST_HEAD(&iucv_irq_queue); | ||
414 | |||
415 | /* Initialize handler table */ | ||
416 | INIT_LIST_HEAD(&iucv_handler_table); | ||
417 | |||
418 | iucv_banner(); | ||
419 | return 0; | ||
420 | } | ||
421 | |||
422 | /** | ||
423 | * iucv_exit - De-Initialization | ||
424 | * | ||
425 | * Frees everything allocated from iucv_init. | ||
426 | */ | ||
427 | static int iucv_retrieve_buffer (void); | ||
428 | |||
429 | static void | ||
430 | iucv_exit(void) | ||
431 | { | ||
432 | iucv_retrieve_buffer(); | ||
433 | kfree(iucv_external_int_buffer); | ||
434 | iucv_external_int_buffer = NULL; | ||
435 | kfree(iucv_param_pool); | ||
436 | iucv_param_pool = NULL; | ||
437 | s390_root_dev_unregister(iucv_root); | ||
438 | bus_unregister(&iucv_bus); | ||
439 | printk(KERN_INFO "IUCV lowlevel driver unloaded\n"); | ||
440 | } | ||
441 | |||
442 | /** | ||
443 | * grab_param: - Get a parameter buffer from the pre-allocated pool. | ||
444 | * | ||
445 | * This function searches for an unused element in the pre-allocated pool | ||
446 | * of parameter buffers. If one is found, it marks it "in use" and returns | ||
447 | * a pointer to it. The calling function is responsible for releasing it | ||
448 | * when it has finished its usage. | ||
449 | * | ||
450 | * Returns: A pointer to iucv_param. | ||
451 | */ | ||
452 | static __inline__ iucv_param * | ||
453 | grab_param(void) | ||
454 | { | ||
455 | iucv_param *ptr; | ||
456 | static int hint = 0; | ||
457 | |||
458 | ptr = iucv_param_pool + hint; | ||
459 | do { | ||
460 | ptr++; | ||
461 | if (ptr >= iucv_param_pool + PARAM_POOL_SIZE) | ||
462 | ptr = iucv_param_pool; | ||
463 | } while (atomic_cmpxchg(&ptr->in_use, 0, 1) != 0); | ||
464 | hint = ptr - iucv_param_pool; | ||
465 | |||
466 | memset(&ptr->param, 0, sizeof(ptr->param)); | ||
467 | return ptr; | ||
468 | } | ||
469 | |||
470 | /** | ||
471 | * release_param - Release a parameter buffer. | ||
472 | * @p: A pointer to a struct iucv_param, previously obtained by calling | ||
473 | * grab_param(). | ||
474 | * | ||
475 | * This function marks the specified parameter buffer "unused". | ||
476 | */ | ||
477 | static __inline__ void | ||
478 | release_param(void *p) | ||
479 | { | ||
480 | atomic_set(&((iucv_param *)p)->in_use, 0); | ||
481 | } | ||
482 | |||
483 | /** | ||
484 | * iucv_add_handler: - Add a new handler | ||
485 | * @new_handler: handle that is being entered into chain. | ||
486 | * | ||
487 | * Places new handle on iucv_handler_table, if identical handler is not | ||
488 | * found. | ||
489 | * | ||
490 | * Returns: 0 on success, !0 on failure (handler already in chain). | ||
491 | */ | ||
492 | static int | ||
493 | iucv_add_handler (handler *new) | ||
494 | { | ||
495 | ulong flags; | ||
496 | |||
497 | iucv_debug(1, "entering"); | ||
498 | iucv_dumpit("handler:", new, sizeof(handler)); | ||
499 | |||
500 | spin_lock_irqsave (&iucv_lock, flags); | ||
501 | if (!list_empty(&iucv_handler_table)) { | ||
502 | struct list_head *lh; | ||
503 | |||
504 | /** | ||
505 | * Search list for handler with identical id. If one | ||
506 | * is found, the new handler is _not_ added. | ||
507 | */ | ||
508 | list_for_each(lh, &iucv_handler_table) { | ||
509 | handler *h = list_entry(lh, handler, list); | ||
510 | if (!memcmp(&new->id, &h->id, sizeof(h->id))) { | ||
511 | iucv_debug(1, "ret 1"); | ||
512 | spin_unlock_irqrestore (&iucv_lock, flags); | ||
513 | return 1; | ||
514 | } | ||
515 | } | ||
516 | } | ||
517 | /** | ||
518 | * If we get here, no handler was found. | ||
519 | */ | ||
520 | INIT_LIST_HEAD(&new->list); | ||
521 | list_add(&new->list, &iucv_handler_table); | ||
522 | spin_unlock_irqrestore (&iucv_lock, flags); | ||
523 | |||
524 | iucv_debug(1, "exiting"); | ||
525 | return 0; | ||
526 | } | ||
527 | |||
528 | /** | ||
529 | * b2f0: | ||
530 | * @code: identifier of IUCV call to CP. | ||
531 | * @parm: pointer to 40 byte iparml area passed to CP | ||
532 | * | ||
533 | * Calls CP to execute IUCV commands. | ||
534 | * | ||
535 | * Returns: return code from CP's IUCV call | ||
536 | */ | ||
537 | static inline ulong b2f0(__u32 code, void *parm) | ||
538 | { | ||
539 | register unsigned long reg0 asm ("0"); | ||
540 | register unsigned long reg1 asm ("1"); | ||
541 | iucv_dumpit("iparml before b2f0 call:", parm, sizeof(iucv_param)); | ||
542 | |||
543 | reg0 = code; | ||
544 | reg1 = virt_to_phys(parm); | ||
545 | asm volatile(".long 0xb2f01000" : : "d" (reg0), "a" (reg1)); | ||
546 | |||
547 | iucv_dumpit("iparml after b2f0 call:", parm, sizeof(iucv_param)); | ||
548 | |||
549 | return (unsigned long)*((__u8 *)(parm + 3)); | ||
550 | } | ||
551 | |||
552 | /* | ||
553 | * Name: iucv_add_pathid | ||
554 | * Purpose: Adds a path id to the system. | ||
555 | * Input: pathid - pathid that is going to be entered into system | ||
556 | * handle - address of handler that the pathid will be associated | ||
557 | * with. | ||
558 | * pgm_data - token passed in by application. | ||
559 | * Output: 0: successful addition of pathid | ||
560 | * - EINVAL - pathid entry is being used by another application | ||
561 | * - ENOMEM - storage allocation for a new pathid table failed | ||
562 | */ | ||
563 | static int | ||
564 | __iucv_add_pathid(__u16 pathid, handler *handler) | ||
565 | { | ||
566 | |||
567 | iucv_debug(1, "entering"); | ||
568 | |||
569 | iucv_debug(1, "handler is pointing to %p", handler); | ||
570 | |||
571 | if (pathid > (max_connections - 1)) | ||
572 | return -EINVAL; | ||
573 | |||
574 | if (iucv_pathid_table[pathid]) { | ||
575 | iucv_debug(1, "pathid entry is %p", iucv_pathid_table[pathid]); | ||
576 | printk(KERN_WARNING | ||
577 | "%s: Pathid being used, error.\n", __FUNCTION__); | ||
578 | return -EINVAL; | ||
579 | } | ||
580 | iucv_pathid_table[pathid] = handler; | ||
581 | |||
582 | iucv_debug(1, "exiting"); | ||
583 | return 0; | ||
584 | } /* end of add_pathid function */ | ||
585 | |||
586 | static int | ||
587 | iucv_add_pathid(__u16 pathid, handler *handler) | ||
588 | { | ||
589 | ulong flags; | ||
590 | int rc; | ||
591 | |||
592 | spin_lock_irqsave (&iucv_lock, flags); | ||
593 | rc = __iucv_add_pathid(pathid, handler); | ||
594 | spin_unlock_irqrestore (&iucv_lock, flags); | ||
595 | return rc; | ||
596 | } | ||
597 | |||
598 | static void | ||
599 | iucv_remove_pathid(__u16 pathid) | ||
600 | { | ||
601 | ulong flags; | ||
602 | |||
603 | if (pathid > (max_connections - 1)) | ||
604 | return; | ||
605 | |||
606 | spin_lock_irqsave (&iucv_lock, flags); | ||
607 | iucv_pathid_table[pathid] = NULL; | ||
608 | spin_unlock_irqrestore (&iucv_lock, flags); | ||
609 | } | ||
610 | |||
611 | /** | ||
612 | * iucv_declare_buffer_cpuid | ||
613 | * Register at VM for subsequent IUCV operations. This is executed | ||
614 | * on the reserved CPU iucv_cpuid. Called from iucv_declare_buffer(). | ||
615 | */ | ||
616 | static void | ||
617 | iucv_declare_buffer_cpuid (void *result) | ||
618 | { | ||
619 | iparml_db *parm; | ||
620 | |||
621 | parm = (iparml_db *)grab_param(); | ||
622 | parm->ipbfadr1 = virt_to_phys(iucv_external_int_buffer); | ||
623 | if ((*((ulong *)result) = b2f0(DECLARE_BUFFER, parm)) == 1) | ||
624 | *((ulong *)result) = parm->iprcode; | ||
625 | release_param(parm); | ||
626 | } | ||
627 | |||
628 | /** | ||
629 | * iucv_retrieve_buffer_cpuid: | ||
630 | * Unregister IUCV usage at VM. This is always executed on the same | ||
631 | * cpu that registered the buffer to VM. | ||
632 | * Called from iucv_retrieve_buffer(). | ||
633 | */ | ||
634 | static void | ||
635 | iucv_retrieve_buffer_cpuid (void *cpu) | ||
636 | { | ||
637 | iparml_control *parm; | ||
638 | |||
639 | parm = (iparml_control *)grab_param(); | ||
640 | b2f0(RETRIEVE_BUFFER, parm); | ||
641 | release_param(parm); | ||
642 | } | ||
643 | |||
644 | /** | ||
645 | * Name: iucv_declare_buffer | ||
646 | * Purpose: Specifies the guests real address of an external | ||
647 | * interrupt. | ||
648 | * Input: void | ||
649 | * Output: iprcode - return code from b2f0 call | ||
650 | */ | ||
651 | static int | ||
652 | iucv_declare_buffer (void) | ||
653 | { | ||
654 | unsigned long flags; | ||
655 | ulong b2f0_result; | ||
656 | |||
657 | iucv_debug(1, "entering"); | ||
658 | b2f0_result = -ENODEV; | ||
659 | spin_lock_irqsave (&iucv_lock, flags); | ||
660 | if (iucv_cpuid == -1) { | ||
661 | /* Reserve any cpu for use by iucv. */ | ||
662 | iucv_cpuid = smp_get_cpu(CPU_MASK_ALL); | ||
663 | spin_unlock_irqrestore (&iucv_lock, flags); | ||
664 | smp_call_function_on(iucv_declare_buffer_cpuid, | ||
665 | &b2f0_result, 0, 1, iucv_cpuid); | ||
666 | if (b2f0_result) { | ||
667 | smp_put_cpu(iucv_cpuid); | ||
668 | iucv_cpuid = -1; | ||
669 | } | ||
670 | iucv_debug(1, "Address of EIB = %p", iucv_external_int_buffer); | ||
671 | } else { | ||
672 | spin_unlock_irqrestore (&iucv_lock, flags); | ||
673 | b2f0_result = 0; | ||
674 | } | ||
675 | iucv_debug(1, "exiting"); | ||
676 | return b2f0_result; | ||
677 | } | ||
678 | |||
679 | /** | ||
680 | * iucv_retrieve_buffer: | ||
681 | * | ||
682 | * Terminates all use of IUCV. | ||
683 | * Returns: return code from CP | ||
684 | */ | ||
685 | static int | ||
686 | iucv_retrieve_buffer (void) | ||
687 | { | ||
688 | iucv_debug(1, "entering"); | ||
689 | if (iucv_cpuid != -1) { | ||
690 | smp_call_function_on(iucv_retrieve_buffer_cpuid, | ||
691 | NULL, 0, 1, iucv_cpuid); | ||
692 | /* Release the cpu reserved by iucv_declare_buffer. */ | ||
693 | smp_put_cpu(iucv_cpuid); | ||
694 | iucv_cpuid = -1; | ||
695 | } | ||
696 | iucv_debug(1, "exiting"); | ||
697 | return 0; | ||
698 | } | ||
699 | |||
700 | /** | ||
701 | * iucv_remove_handler: | ||
702 | * @users_handler: handler to be removed | ||
703 | * | ||
704 | * Remove handler when application unregisters. | ||
705 | */ | ||
706 | static void | ||
707 | iucv_remove_handler(handler *handler) | ||
708 | { | ||
709 | unsigned long flags; | ||
710 | |||
711 | if ((!iucv_pathid_table) || (!handler)) | ||
712 | return; | ||
713 | |||
714 | iucv_debug(1, "entering"); | ||
715 | |||
716 | spin_lock_irqsave (&iucv_lock, flags); | ||
717 | list_del(&handler->list); | ||
718 | if (list_empty(&iucv_handler_table)) { | ||
719 | if (register_flag) { | ||
720 | unregister_external_interrupt(0x4000, iucv_irq_handler); | ||
721 | register_flag = 0; | ||
722 | } | ||
723 | } | ||
724 | spin_unlock_irqrestore (&iucv_lock, flags); | ||
725 | |||
726 | iucv_debug(1, "exiting"); | ||
727 | return; | ||
728 | } | ||
729 | |||
730 | /** | ||
731 | * iucv_register_program: | ||
732 | * @pgmname: user identification | ||
733 | * @userid: machine identification | ||
734 | * @pgmmask: Indicates which bits in the pgmname and userid combined will be | ||
735 | * used to determine who is given control. | ||
736 | * @ops: Address of interrupt handler table. | ||
737 | * @pgm_data: Application data to be passed to interrupt handlers. | ||
738 | * | ||
739 | * Registers an application with IUCV. | ||
740 | * Returns: | ||
741 | * The address of handler, or NULL on failure. | ||
742 | * NOTE on pgmmask: | ||
743 | * If pgmname, userid and pgmmask are provided, pgmmask is entered into the | ||
744 | * handler as is. | ||
745 | * If pgmmask is NULL, the internal mask is set to all 0xff's | ||
746 | * When userid is NULL, the first 8 bytes of the internal mask are forced | ||
747 | * to 0x00. | ||
748 | * If pgmmask and userid are NULL, the first 8 bytes of the internal mask | ||
749 | * are forced to 0x00 and the last 16 bytes to 0xff. | ||
750 | */ | ||
751 | |||
752 | iucv_handle_t | ||
753 | iucv_register_program (__u8 pgmname[16], | ||
754 | __u8 userid[8], | ||
755 | __u8 pgmmask[24], | ||
756 | iucv_interrupt_ops_t * ops, void *pgm_data) | ||
757 | { | ||
758 | ulong rc = 0; /* return code from function calls */ | ||
759 | handler *new_handler; | ||
760 | |||
761 | iucv_debug(1, "entering"); | ||
762 | |||
763 | if (ops == NULL) { | ||
764 | /* interrupt table is not defined */ | ||
765 | printk(KERN_WARNING "%s: Interrupt table is not defined, " | ||
766 | "exiting\n", __FUNCTION__); | ||
767 | return NULL; | ||
768 | } | ||
769 | if (!pgmname) { | ||
770 | printk(KERN_WARNING "%s: pgmname not provided\n", __FUNCTION__); | ||
771 | return NULL; | ||
772 | } | ||
773 | |||
774 | /* Allocate handler entry */ | ||
775 | new_handler = kmalloc(sizeof(handler), GFP_ATOMIC); | ||
776 | if (new_handler == NULL) { | ||
777 | printk(KERN_WARNING "%s: storage allocation for new handler " | ||
778 | "failed.\n", __FUNCTION__); | ||
779 | return NULL; | ||
780 | } | ||
781 | |||
782 | if (!iucv_pathid_table) { | ||
783 | if (iucv_init()) { | ||
784 | kfree(new_handler); | ||
785 | return NULL; | ||
786 | } | ||
787 | |||
788 | max_connections = iucv_query_maxconn(); | ||
789 | iucv_pathid_table = kcalloc(max_connections, sizeof(handler *), | ||
790 | GFP_ATOMIC); | ||
791 | if (iucv_pathid_table == NULL) { | ||
792 | printk(KERN_WARNING "%s: iucv_pathid_table storage " | ||
793 | "allocation failed\n", __FUNCTION__); | ||
794 | kfree(new_handler); | ||
795 | return NULL; | ||
796 | } | ||
797 | } | ||
798 | memset(new_handler, 0, sizeof (handler)); | ||
799 | memcpy(new_handler->id.user_data, pgmname, | ||
800 | sizeof (new_handler->id.user_data)); | ||
801 | if (userid) { | ||
802 | memcpy (new_handler->id.userid, userid, | ||
803 | sizeof (new_handler->id.userid)); | ||
804 | ASCEBC (new_handler->id.userid, | ||
805 | sizeof (new_handler->id.userid)); | ||
806 | EBC_TOUPPER (new_handler->id.userid, | ||
807 | sizeof (new_handler->id.userid)); | ||
808 | |||
809 | if (pgmmask) { | ||
810 | memcpy (new_handler->id.mask, pgmmask, | ||
811 | sizeof (new_handler->id.mask)); | ||
812 | } else { | ||
813 | memset (new_handler->id.mask, 0xFF, | ||
814 | sizeof (new_handler->id.mask)); | ||
815 | } | ||
816 | } else { | ||
817 | if (pgmmask) { | ||
818 | memcpy (new_handler->id.mask, pgmmask, | ||
819 | sizeof (new_handler->id.mask)); | ||
820 | } else { | ||
821 | memset (new_handler->id.mask, 0xFF, | ||
822 | sizeof (new_handler->id.mask)); | ||
823 | } | ||
824 | memset (new_handler->id.userid, 0x00, | ||
825 | sizeof (new_handler->id.userid)); | ||
826 | } | ||
827 | /* fill in the rest of handler */ | ||
828 | new_handler->pgm_data = pgm_data; | ||
829 | new_handler->interrupt_table = ops; | ||
830 | |||
831 | /* | ||
832 | * Check if someone else is registered with same pgmname, userid | ||
833 | * and mask. If someone is already registered with same pgmname, | ||
834 | * userid and mask, registration will fail and NULL will be returned | ||
835 | * to the application. | ||
836 | * If identical handler not found, then handler is added to list. | ||
837 | */ | ||
838 | rc = iucv_add_handler(new_handler); | ||
839 | if (rc) { | ||
840 | printk(KERN_WARNING "%s: Someone already registered with same " | ||
841 | "pgmname, userid, pgmmask\n", __FUNCTION__); | ||
842 | kfree (new_handler); | ||
843 | return NULL; | ||
844 | } | ||
845 | |||
846 | rc = iucv_declare_buffer(); | ||
847 | if (rc) { | ||
848 | char *err = "Unknown"; | ||
849 | iucv_remove_handler(new_handler); | ||
850 | kfree(new_handler); | ||
851 | switch(rc) { | ||
852 | case 0x03: | ||
853 | err = "Directory error"; | ||
854 | break; | ||
855 | case 0x0a: | ||
856 | err = "Invalid length"; | ||
857 | break; | ||
858 | case 0x13: | ||
859 | err = "Buffer already exists"; | ||
860 | break; | ||
861 | case 0x3e: | ||
862 | err = "Buffer overlap"; | ||
863 | break; | ||
864 | case 0x5c: | ||
865 | err = "Paging or storage error"; | ||
866 | break; | ||
867 | } | ||
868 | printk(KERN_WARNING "%s: iucv_declare_buffer " | ||
869 | "returned error 0x%02lx (%s)\n", __FUNCTION__, rc, err); | ||
870 | return NULL; | ||
871 | } | ||
872 | if (!register_flag) { | ||
873 | /* request the 0x4000 external interrupt */ | ||
874 | rc = register_external_interrupt (0x4000, iucv_irq_handler); | ||
875 | if (rc) { | ||
876 | iucv_remove_handler(new_handler); | ||
877 | kfree (new_handler); | ||
878 | printk(KERN_WARNING "%s: " | ||
879 | "register_external_interrupt returned %ld\n", | ||
880 | __FUNCTION__, rc); | ||
881 | return NULL; | ||
882 | |||
883 | } | ||
884 | register_flag = 1; | ||
885 | } | ||
886 | iucv_debug(1, "exiting"); | ||
887 | return new_handler; | ||
888 | } /* end of register function */ | ||
889 | |||
890 | /** | ||
891 | * iucv_unregister_program: | ||
892 | * @handle: address of handler | ||
893 | * | ||
894 | * Unregister application with IUCV. | ||
895 | * Returns: | ||
896 | * 0 on success, -EINVAL, if specified handle is invalid. | ||
897 | */ | ||
898 | |||
899 | int | ||
900 | iucv_unregister_program (iucv_handle_t handle) | ||
901 | { | ||
902 | handler *h = NULL; | ||
903 | struct list_head *lh; | ||
904 | int i; | ||
905 | ulong flags; | ||
906 | |||
907 | iucv_debug(1, "entering"); | ||
908 | iucv_debug(1, "address of handler is %p", h); | ||
909 | |||
910 | /* Checking if handle is valid */ | ||
911 | spin_lock_irqsave (&iucv_lock, flags); | ||
912 | list_for_each(lh, &iucv_handler_table) { | ||
913 | if ((handler *)handle == list_entry(lh, handler, list)) { | ||
914 | h = (handler *)handle; | ||
915 | break; | ||
916 | } | ||
917 | } | ||
918 | if (!h) { | ||
919 | spin_unlock_irqrestore (&iucv_lock, flags); | ||
920 | if (handle) | ||
921 | printk(KERN_WARNING | ||
922 | "%s: Handler not found in iucv_handler_table.\n", | ||
923 | __FUNCTION__); | ||
924 | else | ||
925 | printk(KERN_WARNING | ||
926 | "%s: NULL handle passed by application.\n", | ||
927 | __FUNCTION__); | ||
928 | return -EINVAL; | ||
929 | } | ||
930 | |||
931 | /** | ||
932 | * First, walk thru iucv_pathid_table and sever any pathid which is | ||
933 | * still pointing to the handler to be removed. | ||
934 | */ | ||
935 | for (i = 0; i < max_connections; i++) | ||
936 | if (iucv_pathid_table[i] == h) { | ||
937 | spin_unlock_irqrestore (&iucv_lock, flags); | ||
938 | iucv_sever(i, h->id.user_data); | ||
939 | spin_lock_irqsave(&iucv_lock, flags); | ||
940 | } | ||
941 | spin_unlock_irqrestore (&iucv_lock, flags); | ||
942 | |||
943 | iucv_remove_handler(h); | ||
944 | kfree(h); | ||
945 | |||
946 | iucv_debug(1, "exiting"); | ||
947 | return 0; | ||
948 | } | ||
949 | |||
950 | /** | ||
951 | * iucv_accept: | ||
952 | * @pathid: Path identification number | ||
953 | * @msglim_reqstd: The number of outstanding messages requested. | ||
954 | * @user_data: Data specified by the iucv_connect function. | ||
955 | * @flags1: Contains options for this path. | ||
956 | * - IPPRTY (0x20) Specifies if you want to send priority message. | ||
957 | * - IPRMDATA (0x80) Specifies whether your program can handle a message | ||
958 | * in the parameter list. | ||
959 | * - IPQUSCE (0x40) Specifies whether you want to quiesce the path being | ||
960 | * established. | ||
961 | * @handle: Address of handler. | ||
962 | * @pgm_data: Application data passed to interrupt handlers. | ||
963 | * @flags1_out: Pointer to an int. If not NULL, on return the options for | ||
964 | * the path are stored at the given location: | ||
965 | * - IPPRTY (0x20) Indicates you may send a priority message. | ||
966 | * @msglim: Pointer to an __u16. If not NULL, on return the maximum | ||
967 | * number of outstanding messages is stored at the given | ||
968 | * location. | ||
969 | * | ||
970 | * This function is issued after the user receives a Connection Pending external | ||
971 | * interrupt and now wishes to complete the IUCV communication path. | ||
972 | * Returns: | ||
973 | * return code from CP | ||
974 | */ | ||
975 | int | ||
976 | iucv_accept(__u16 pathid, __u16 msglim_reqstd, | ||
977 | __u8 user_data[16], int flags1, | ||
978 | iucv_handle_t handle, void *pgm_data, | ||
979 | int *flags1_out, __u16 * msglim) | ||
980 | { | ||
981 | ulong b2f0_result = 0; | ||
982 | ulong flags; | ||
983 | struct list_head *lh; | ||
984 | handler *h = NULL; | ||
985 | iparml_control *parm; | ||
986 | |||
987 | iucv_debug(1, "entering"); | ||
988 | iucv_debug(1, "pathid = %d", pathid); | ||
989 | |||
990 | /* Checking if handle is valid */ | ||
991 | spin_lock_irqsave (&iucv_lock, flags); | ||
992 | list_for_each(lh, &iucv_handler_table) { | ||
993 | if ((handler *)handle == list_entry(lh, handler, list)) { | ||
994 | h = (handler *)handle; | ||
995 | break; | ||
996 | } | ||
997 | } | ||
998 | spin_unlock_irqrestore (&iucv_lock, flags); | ||
999 | |||
1000 | if (!h) { | ||
1001 | if (handle) | ||
1002 | printk(KERN_WARNING | ||
1003 | "%s: Handler not found in iucv_handler_table.\n", | ||
1004 | __FUNCTION__); | ||
1005 | else | ||
1006 | printk(KERN_WARNING | ||
1007 | "%s: NULL handle passed by application.\n", | ||
1008 | __FUNCTION__); | ||
1009 | return -EINVAL; | ||
1010 | } | ||
1011 | |||
1012 | parm = (iparml_control *)grab_param(); | ||
1013 | |||
1014 | parm->ippathid = pathid; | ||
1015 | parm->ipmsglim = msglim_reqstd; | ||
1016 | if (user_data) | ||
1017 | memcpy(parm->ipuser, user_data, sizeof(parm->ipuser)); | ||
1018 | |||
1019 | parm->ipflags1 = (__u8)flags1; | ||
1020 | b2f0_result = b2f0(ACCEPT, parm); | ||
1021 | |||
1022 | if (!b2f0_result) { | ||
1023 | if (msglim) | ||
1024 | *msglim = parm->ipmsglim; | ||
1025 | if (pgm_data) | ||
1026 | h->pgm_data = pgm_data; | ||
1027 | if (flags1_out) | ||
1028 | *flags1_out = (parm->ipflags1 & IPPRTY) ? IPPRTY : 0; | ||
1029 | } | ||
1030 | release_param(parm); | ||
1031 | |||
1032 | iucv_debug(1, "exiting"); | ||
1033 | return b2f0_result; | ||
1034 | } | ||
1035 | |||
1036 | /** | ||
1037 | * iucv_connect: | ||
1038 | * @pathid: Path identification number | ||
1039 | * @msglim_reqstd: Number of outstanding messages requested | ||
1040 | * @user_data: 16-byte user data | ||
1041 | * @userid: 8-byte of user identification | ||
1042 | * @system_name: 8-byte identifying the system name | ||
1043 | * @flags1: Specifies options for this path: | ||
1044 | * - IPPRTY (0x20) Specifies if you want to send priority message. | ||
1045 | * - IPRMDATA (0x80) Specifies whether your program can handle a message | ||
1046 | * in the parameter list. | ||
1047 | * - IPQUSCE (0x40) Specifies whether you want to quiesce the path being | ||
1048 | * established. | ||
1049 | * - IPLOCAL (0x01) Allows an application to force the partner to be on the | ||
1050 | * local system. If local is specified then target class | ||
1051 | * cannot be specified. | ||
1052 | * @flags1_out: Pointer to an int. If not NULL, on return the options for | ||
1053 | * the path are stored at the given location: | ||
1054 | * - IPPRTY (0x20) Indicates you may send a priority message. | ||
1055 | * @msglim: Pointer to an __u16. If not NULL, on return the maximum | ||
1056 | * number of outstanding messages is stored at the given | ||
1057 | * location. | ||
1058 | * @handle: Address of handler. | ||
1059 | * @pgm_data: Application data to be passed to interrupt handlers. | ||
1060 | * | ||
1061 | * This function establishes an IUCV path. Although the connect may complete | ||
1062 | * successfully, you are not able to use the path until you receive an IUCV | ||
1063 | * Connection Complete external interrupt. | ||
1064 | * Returns: return code from CP, or one of the following | ||
1065 | * - ENOMEM | ||
1066 | * - return code from iucv_declare_buffer | ||
1067 | * - EINVAL - invalid handle passed by application | ||
1068 | * - EINVAL - pathid address is NULL | ||
1069 | * - ENOMEM - pathid table storage allocation failed | ||
1070 | * - return code from internal function add_pathid | ||
1071 | */ | ||
1072 | int | ||
1073 | iucv_connect (__u16 *pathid, __u16 msglim_reqstd, | ||
1074 | __u8 user_data[16], __u8 userid[8], | ||
1075 | __u8 system_name[8], int flags1, | ||
1076 | int *flags1_out, __u16 * msglim, | ||
1077 | iucv_handle_t handle, void *pgm_data) | ||
1078 | { | ||
1079 | iparml_control *parm; | ||
1080 | iparml_control local_parm; | ||
1081 | struct list_head *lh; | ||
1082 | ulong b2f0_result = 0; | ||
1083 | ulong flags; | ||
1084 | int add_pathid_result = 0; | ||
1085 | handler *h = NULL; | ||
1086 | __u8 no_memory[16] = "NO MEMORY"; | ||
1087 | |||
1088 | iucv_debug(1, "entering"); | ||
1089 | |||
1090 | /* Checking if handle is valid */ | ||
1091 | spin_lock_irqsave (&iucv_lock, flags); | ||
1092 | list_for_each(lh, &iucv_handler_table) { | ||
1093 | if ((handler *)handle == list_entry(lh, handler, list)) { | ||
1094 | h = (handler *)handle; | ||
1095 | break; | ||
1096 | } | ||
1097 | } | ||
1098 | spin_unlock_irqrestore (&iucv_lock, flags); | ||
1099 | |||
1100 | if (!h) { | ||
1101 | if (handle) | ||
1102 | printk(KERN_WARNING | ||
1103 | "%s: Handler not found in iucv_handler_table.\n", | ||
1104 | __FUNCTION__); | ||
1105 | else | ||
1106 | printk(KERN_WARNING | ||
1107 | "%s: NULL handle passed by application.\n", | ||
1108 | __FUNCTION__); | ||
1109 | return -EINVAL; | ||
1110 | } | ||
1111 | |||
1112 | if (pathid == NULL) { | ||
1113 | printk(KERN_WARNING "%s: NULL pathid pointer\n", | ||
1114 | __FUNCTION__); | ||
1115 | return -EINVAL; | ||
1116 | } | ||
1117 | |||
1118 | parm = (iparml_control *)grab_param(); | ||
1119 | |||
1120 | parm->ipmsglim = msglim_reqstd; | ||
1121 | |||
1122 | if (user_data) | ||
1123 | memcpy(parm->ipuser, user_data, sizeof(parm->ipuser)); | ||
1124 | |||
1125 | if (userid) { | ||
1126 | memcpy(parm->ipvmid, userid, sizeof(parm->ipvmid)); | ||
1127 | ASCEBC(parm->ipvmid, sizeof(parm->ipvmid)); | ||
1128 | EBC_TOUPPER(parm->ipvmid, sizeof(parm->ipvmid)); | ||
1129 | } | ||
1130 | |||
1131 | if (system_name) { | ||
1132 | memcpy(parm->iptarget, system_name, sizeof(parm->iptarget)); | ||
1133 | ASCEBC(parm->iptarget, sizeof(parm->iptarget)); | ||
1134 | EBC_TOUPPER(parm->iptarget, sizeof(parm->iptarget)); | ||
1135 | } | ||
1136 | |||
1137 | /* In order to establish an IUCV connection, the procedure is: | ||
1138 | * | ||
1139 | * b2f0(CONNECT) | ||
1140 | * take the ippathid from the b2f0 call | ||
1141 | * register the handler to the ippathid | ||
1142 | * | ||
1143 | * Unfortunately, the ConnectionEstablished message gets sent after the | ||
1144 | * b2f0(CONNECT) call but before the register is handled. | ||
1145 | * | ||
1146 | * In order for this race condition to be eliminated, the IUCV Control | ||
1147 | * Interrupts must be disabled for the above procedure. | ||
1148 | * | ||
1149 | * David Kennedy <dkennedy@linuxcare.com> | ||
1150 | */ | ||
1151 | |||
1152 | /* Enable everything but IUCV Control messages */ | ||
1153 | iucv_setmask(~(AllInterrupts)); | ||
1154 | messagesDisabled = 1; | ||
1155 | |||
1156 | spin_lock_irqsave (&iucv_lock, flags); | ||
1157 | parm->ipflags1 = (__u8)flags1; | ||
1158 | b2f0_result = b2f0(CONNECT, parm); | ||
1159 | memcpy(&local_parm, parm, sizeof(local_parm)); | ||
1160 | release_param(parm); | ||
1161 | parm = &local_parm; | ||
1162 | if (!b2f0_result) | ||
1163 | add_pathid_result = __iucv_add_pathid(parm->ippathid, h); | ||
1164 | spin_unlock_irqrestore (&iucv_lock, flags); | ||
1165 | |||
1166 | if (b2f0_result) { | ||
1167 | iucv_setmask(~0); | ||
1168 | messagesDisabled = 0; | ||
1169 | return b2f0_result; | ||
1170 | } | ||
1171 | |||
1172 | *pathid = parm->ippathid; | ||
1173 | |||
1174 | /* Enable everything again */ | ||
1175 | iucv_setmask(IUCVControlInterruptsFlag); | ||
1176 | |||
1177 | if (msglim) | ||
1178 | *msglim = parm->ipmsglim; | ||
1179 | if (flags1_out) | ||
1180 | *flags1_out = (parm->ipflags1 & IPPRTY) ? IPPRTY : 0; | ||
1181 | |||
1182 | if (add_pathid_result) { | ||
1183 | iucv_sever(*pathid, no_memory); | ||
1184 | printk(KERN_WARNING "%s: add_pathid failed with rc =" | ||
1185 | " %d\n", __FUNCTION__, add_pathid_result); | ||
1186 | return(add_pathid_result); | ||
1187 | } | ||
1188 | |||
1189 | iucv_debug(1, "exiting"); | ||
1190 | return b2f0_result; | ||
1191 | } | ||
1192 | |||
1193 | /** | ||
1194 | * iucv_purge: | ||
1195 | * @pathid: Path identification number | ||
1196 | * @msgid: Message ID of message to purge. | ||
1197 | * @srccls: Message class of the message to purge. | ||
1198 | * @audit: Pointer to an __u32. If not NULL, on return, information about | ||
1199 | * asynchronous errors that may have affected the normal completion | ||
1200 | * of this message ist stored at the given location. | ||
1201 | * | ||
1202 | * Cancels a message you have sent. | ||
1203 | * Returns: return code from CP | ||
1204 | */ | ||
1205 | int | ||
1206 | iucv_purge (__u16 pathid, __u32 msgid, __u32 srccls, __u32 *audit) | ||
1207 | { | ||
1208 | iparml_purge *parm; | ||
1209 | ulong b2f0_result = 0; | ||
1210 | |||
1211 | iucv_debug(1, "entering"); | ||
1212 | iucv_debug(1, "pathid = %d", pathid); | ||
1213 | |||
1214 | parm = (iparml_purge *)grab_param(); | ||
1215 | |||
1216 | parm->ipmsgid = msgid; | ||
1217 | parm->ippathid = pathid; | ||
1218 | parm->ipsrccls = srccls; | ||
1219 | parm->ipflags1 |= (IPSRCCLS | IPFGMID | IPFGPID); | ||
1220 | b2f0_result = b2f0(PURGE, parm); | ||
1221 | |||
1222 | if (!b2f0_result && audit) { | ||
1223 | memcpy(audit, parm->ipaudit, sizeof(parm->ipaudit)); | ||
1224 | /* parm->ipaudit has only 3 bytes */ | ||
1225 | *audit >>= 8; | ||
1226 | } | ||
1227 | |||
1228 | release_param(parm); | ||
1229 | |||
1230 | iucv_debug(1, "b2f0_result = %ld", b2f0_result); | ||
1231 | iucv_debug(1, "exiting"); | ||
1232 | return b2f0_result; | ||
1233 | } | ||
1234 | |||
1235 | /** | ||
1236 | * iucv_query_generic: | ||
1237 | * @want_maxconn: Flag, describing which value is to be returned. | ||
1238 | * | ||
1239 | * Helper function for iucv_query_maxconn() and iucv_query_bufsize(). | ||
1240 | * | ||
1241 | * Returns: The buffersize, if want_maxconn is 0; the maximum number of | ||
1242 | * connections, if want_maxconn is 1 or an error-code < 0 on failure. | ||
1243 | */ | ||
1244 | static int | ||
1245 | iucv_query_generic(int want_maxconn) | ||
1246 | { | ||
1247 | register unsigned long reg0 asm ("0"); | ||
1248 | register unsigned long reg1 asm ("1"); | ||
1249 | iparml_purge *parm = (iparml_purge *)grab_param(); | ||
1250 | int bufsize, maxconn; | ||
1251 | int ccode; | ||
1252 | |||
1253 | /** | ||
1254 | * Call b2f0 and store R0 (max buffer size), | ||
1255 | * R1 (max connections) and CC. | ||
1256 | */ | ||
1257 | reg0 = QUERY; | ||
1258 | reg1 = virt_to_phys(parm); | ||
1259 | asm volatile( | ||
1260 | " .long 0xb2f01000\n" | ||
1261 | " ipm %0\n" | ||
1262 | " srl %0,28\n" | ||
1263 | : "=d" (ccode), "+d" (reg0), "+d" (reg1) : : "cc"); | ||
1264 | bufsize = reg0; | ||
1265 | maxconn = reg1; | ||
1266 | release_param(parm); | ||
1267 | |||
1268 | if (ccode) | ||
1269 | return -EPERM; | ||
1270 | if (want_maxconn) | ||
1271 | return maxconn; | ||
1272 | return bufsize; | ||
1273 | } | ||
1274 | |||
1275 | /** | ||
1276 | * iucv_query_maxconn: | ||
1277 | * | ||
1278 | * Determines the maximum number of connections thay may be established. | ||
1279 | * | ||
1280 | * Returns: Maximum number of connections that can be. | ||
1281 | */ | ||
1282 | ulong | ||
1283 | iucv_query_maxconn(void) | ||
1284 | { | ||
1285 | return iucv_query_generic(1); | ||
1286 | } | ||
1287 | |||
1288 | /** | ||
1289 | * iucv_query_bufsize: | ||
1290 | * | ||
1291 | * Determines the size of the external interrupt buffer. | ||
1292 | * | ||
1293 | * Returns: Size of external interrupt buffer. | ||
1294 | */ | ||
1295 | ulong | ||
1296 | iucv_query_bufsize (void) | ||
1297 | { | ||
1298 | return iucv_query_generic(0); | ||
1299 | } | ||
1300 | |||
1301 | /** | ||
1302 | * iucv_quiesce: | ||
1303 | * @pathid: Path identification number | ||
1304 | * @user_data: 16-byte user data | ||
1305 | * | ||
1306 | * Temporarily suspends incoming messages on an IUCV path. | ||
1307 | * You can later reactivate the path by invoking the iucv_resume function. | ||
1308 | * Returns: return code from CP | ||
1309 | */ | ||
1310 | int | ||
1311 | iucv_quiesce (__u16 pathid, __u8 user_data[16]) | ||
1312 | { | ||
1313 | iparml_control *parm; | ||
1314 | ulong b2f0_result = 0; | ||
1315 | |||
1316 | iucv_debug(1, "entering"); | ||
1317 | iucv_debug(1, "pathid = %d", pathid); | ||
1318 | |||
1319 | parm = (iparml_control *)grab_param(); | ||
1320 | |||
1321 | memcpy(parm->ipuser, user_data, sizeof(parm->ipuser)); | ||
1322 | parm->ippathid = pathid; | ||
1323 | |||
1324 | b2f0_result = b2f0(QUIESCE, parm); | ||
1325 | release_param(parm); | ||
1326 | |||
1327 | iucv_debug(1, "b2f0_result = %ld", b2f0_result); | ||
1328 | iucv_debug(1, "exiting"); | ||
1329 | |||
1330 | return b2f0_result; | ||
1331 | } | ||
1332 | |||
1333 | /** | ||
1334 | * iucv_receive: | ||
1335 | * @pathid: Path identification number. | ||
1336 | * @buffer: Address of buffer to receive. Must be below 2G. | ||
1337 | * @buflen: Length of buffer to receive. | ||
1338 | * @msgid: Specifies the message ID. | ||
1339 | * @trgcls: Specifies target class. | ||
1340 | * @flags1_out: Receives options for path on return. | ||
1341 | * - IPNORPY (0x10) Specifies whether a reply is required | ||
1342 | * - IPPRTY (0x20) Specifies if you want to send priority message | ||
1343 | * - IPRMDATA (0x80) Specifies the data is contained in the parameter list | ||
1344 | * @residual_buffer: Receives the address of buffer updated by the number | ||
1345 | * of bytes you have received on return. | ||
1346 | * @residual_length: On return, receives one of the following values: | ||
1347 | * - 0 If the receive buffer is the same length as | ||
1348 | * the message. | ||
1349 | * - Remaining bytes in buffer If the receive buffer is longer than the | ||
1350 | * message. | ||
1351 | * - Remaining bytes in message If the receive buffer is shorter than the | ||
1352 | * message. | ||
1353 | * | ||
1354 | * This function receives messages that are being sent to you over established | ||
1355 | * paths. | ||
1356 | * Returns: return code from CP IUCV call; If the receive buffer is shorter | ||
1357 | * than the message, always 5 | ||
1358 | * -EINVAL - buffer address is pointing to NULL | ||
1359 | */ | ||
1360 | int | ||
1361 | iucv_receive (__u16 pathid, __u32 msgid, __u32 trgcls, | ||
1362 | void *buffer, ulong buflen, | ||
1363 | int *flags1_out, ulong * residual_buffer, ulong * residual_length) | ||
1364 | { | ||
1365 | iparml_db *parm; | ||
1366 | ulong b2f0_result; | ||
1367 | int moved = 0; /* number of bytes moved from parmlist to buffer */ | ||
1368 | |||
1369 | iucv_debug(2, "entering"); | ||
1370 | |||
1371 | if (!buffer) | ||
1372 | return -EINVAL; | ||
1373 | |||
1374 | parm = (iparml_db *)grab_param(); | ||
1375 | |||
1376 | parm->ipbfadr1 = (__u32) (addr_t) buffer; | ||
1377 | parm->ipbfln1f = (__u32) ((ulong) buflen); | ||
1378 | parm->ipmsgid = msgid; | ||
1379 | parm->ippathid = pathid; | ||
1380 | parm->iptrgcls = trgcls; | ||
1381 | parm->ipflags1 = (IPFGPID | IPFGMID | IPFGMCL); | ||
1382 | |||
1383 | b2f0_result = b2f0(RECEIVE, parm); | ||
1384 | |||
1385 | if (!b2f0_result || b2f0_result == 5) { | ||
1386 | if (flags1_out) { | ||
1387 | iucv_debug(2, "*flags1_out = %d", *flags1_out); | ||
1388 | *flags1_out = (parm->ipflags1 & (~0x07)); | ||
1389 | iucv_debug(2, "*flags1_out = %d", *flags1_out); | ||
1390 | } | ||
1391 | |||
1392 | if (!(parm->ipflags1 & IPRMDATA)) { /*msg not in parmlist */ | ||
1393 | if (residual_length) | ||
1394 | *residual_length = parm->ipbfln1f; | ||
1395 | |||
1396 | if (residual_buffer) | ||
1397 | *residual_buffer = parm->ipbfadr1; | ||
1398 | } else { | ||
1399 | moved = min_t (unsigned long, buflen, 8); | ||
1400 | |||
1401 | memcpy ((char *) buffer, | ||
1402 | (char *) &parm->ipbfadr1, moved); | ||
1403 | |||
1404 | if (buflen < 8) | ||
1405 | b2f0_result = 5; | ||
1406 | |||
1407 | if (residual_length) | ||
1408 | *residual_length = abs (buflen - 8); | ||
1409 | |||
1410 | if (residual_buffer) | ||
1411 | *residual_buffer = (ulong) (buffer + moved); | ||
1412 | } | ||
1413 | } | ||
1414 | release_param(parm); | ||
1415 | |||
1416 | iucv_debug(2, "exiting"); | ||
1417 | return b2f0_result; | ||
1418 | } | ||
1419 | |||
1420 | /* | ||
1421 | * Name: iucv_receive_array | ||
1422 | * Purpose: This function receives messages that are being sent to you | ||
1423 | * over established paths. | ||
1424 | * Input: pathid - path identification number | ||
1425 | * buffer - address of array of buffers | ||
1426 | * buflen - total length of buffers | ||
1427 | * msgid - specifies the message ID. | ||
1428 | * trgcls - specifies target class | ||
1429 | * Output: | ||
1430 | * flags1_out: Options for path. | ||
1431 | * IPNORPY - 0x10 specifies whether a reply is required | ||
1432 | * IPPRTY - 0x20 specifies if you want to send priority message | ||
1433 | * IPRMDATA - 0x80 specifies the data is contained in the parameter list | ||
1434 | * residual_buffer - address points to the current list entry IUCV | ||
1435 | * is working on. | ||
1436 | * residual_length - | ||
1437 | * Contains one of the following values, if the receive buffer is: | ||
1438 | * The same length as the message, this field is zero. | ||
1439 | * Longer than the message, this field contains the number of | ||
1440 | * bytes remaining in the buffer. | ||
1441 | * Shorter than the message, this field contains the residual | ||
1442 | * count (that is, the number of bytes remaining in the | ||
1443 | * message that does not fit into the buffer. In this case | ||
1444 | * b2f0_result = 5. | ||
1445 | * Return: b2f0_result - return code from CP | ||
1446 | * (-EINVAL) - buffer address is NULL | ||
1447 | */ | ||
1448 | int | ||
1449 | iucv_receive_array (__u16 pathid, | ||
1450 | __u32 msgid, __u32 trgcls, | ||
1451 | iucv_array_t * buffer, ulong buflen, | ||
1452 | int *flags1_out, | ||
1453 | ulong * residual_buffer, ulong * residual_length) | ||
1454 | { | ||
1455 | iparml_db *parm; | ||
1456 | ulong b2f0_result; | ||
1457 | int i = 0, moved = 0, need_to_move = 8, dyn_len; | ||
1458 | |||
1459 | iucv_debug(2, "entering"); | ||
1460 | |||
1461 | if (!buffer) | ||
1462 | return -EINVAL; | ||
1463 | |||
1464 | parm = (iparml_db *)grab_param(); | ||
1465 | |||
1466 | parm->ipbfadr1 = (__u32) ((ulong) buffer); | ||
1467 | parm->ipbfln1f = (__u32) buflen; | ||
1468 | parm->ipmsgid = msgid; | ||
1469 | parm->ippathid = pathid; | ||
1470 | parm->iptrgcls = trgcls; | ||
1471 | parm->ipflags1 = (IPBUFLST | IPFGPID | IPFGMID | IPFGMCL); | ||
1472 | |||
1473 | b2f0_result = b2f0(RECEIVE, parm); | ||
1474 | |||
1475 | if (!b2f0_result || b2f0_result == 5) { | ||
1476 | |||
1477 | if (flags1_out) { | ||
1478 | iucv_debug(2, "*flags1_out = %d", *flags1_out); | ||
1479 | *flags1_out = (parm->ipflags1 & (~0x07)); | ||
1480 | iucv_debug(2, "*flags1_out = %d", *flags1_out); | ||
1481 | } | ||
1482 | |||
1483 | if (!(parm->ipflags1 & IPRMDATA)) { /*msg not in parmlist */ | ||
1484 | |||
1485 | if (residual_length) | ||
1486 | *residual_length = parm->ipbfln1f; | ||
1487 | |||
1488 | if (residual_buffer) | ||
1489 | *residual_buffer = parm->ipbfadr1; | ||
1490 | |||
1491 | } else { | ||
1492 | /* copy msg from parmlist to users array. */ | ||
1493 | |||
1494 | while ((moved < 8) && (moved < buflen)) { | ||
1495 | dyn_len = | ||
1496 | min_t (unsigned int, | ||
1497 | (buffer + i)->length, need_to_move); | ||
1498 | |||
1499 | memcpy ((char *)((ulong)((buffer + i)->address)), | ||
1500 | ((char *) &parm->ipbfadr1) + moved, | ||
1501 | dyn_len); | ||
1502 | |||
1503 | moved += dyn_len; | ||
1504 | need_to_move -= dyn_len; | ||
1505 | |||
1506 | (buffer + i)->address = | ||
1507 | (__u32) | ||
1508 | ((ulong)(__u8 *) ((ulong)(buffer + i)->address) | ||
1509 | + dyn_len); | ||
1510 | |||
1511 | (buffer + i)->length -= dyn_len; | ||
1512 | i++; | ||
1513 | } | ||
1514 | |||
1515 | if (need_to_move) /* buflen < 8 bytes */ | ||
1516 | b2f0_result = 5; | ||
1517 | |||
1518 | if (residual_length) | ||
1519 | *residual_length = abs (buflen - 8); | ||
1520 | |||
1521 | if (residual_buffer) { | ||
1522 | if (!moved) | ||
1523 | *residual_buffer = (ulong) buffer; | ||
1524 | else | ||
1525 | *residual_buffer = | ||
1526 | (ulong) (buffer + (i - 1)); | ||
1527 | } | ||
1528 | |||
1529 | } | ||
1530 | } | ||
1531 | release_param(parm); | ||
1532 | |||
1533 | iucv_debug(2, "exiting"); | ||
1534 | return b2f0_result; | ||
1535 | } | ||
1536 | |||
1537 | /** | ||
1538 | * iucv_reject: | ||
1539 | * @pathid: Path identification number. | ||
1540 | * @msgid: Message ID of the message to reject. | ||
1541 | * @trgcls: Target class of the message to reject. | ||
1542 | * Returns: return code from CP | ||
1543 | * | ||
1544 | * Refuses a specified message. Between the time you are notified of a | ||
1545 | * message and the time that you complete the message, the message may | ||
1546 | * be rejected. | ||
1547 | */ | ||
1548 | int | ||
1549 | iucv_reject (__u16 pathid, __u32 msgid, __u32 trgcls) | ||
1550 | { | ||
1551 | iparml_db *parm; | ||
1552 | ulong b2f0_result = 0; | ||
1553 | |||
1554 | iucv_debug(1, "entering"); | ||
1555 | iucv_debug(1, "pathid = %d", pathid); | ||
1556 | |||
1557 | parm = (iparml_db *)grab_param(); | ||
1558 | |||
1559 | parm->ippathid = pathid; | ||
1560 | parm->ipmsgid = msgid; | ||
1561 | parm->iptrgcls = trgcls; | ||
1562 | parm->ipflags1 = (IPFGMCL | IPFGMID | IPFGPID); | ||
1563 | |||
1564 | b2f0_result = b2f0(REJECT, parm); | ||
1565 | release_param(parm); | ||
1566 | |||
1567 | iucv_debug(1, "b2f0_result = %ld", b2f0_result); | ||
1568 | iucv_debug(1, "exiting"); | ||
1569 | |||
1570 | return b2f0_result; | ||
1571 | } | ||
1572 | |||
1573 | /* | ||
1574 | * Name: iucv_reply | ||
1575 | * Purpose: This function responds to the two-way messages that you | ||
1576 | * receive. You must identify completely the message to | ||
1577 | * which you wish to reply. ie, pathid, msgid, and trgcls. | ||
1578 | * Input: pathid - path identification number | ||
1579 | * msgid - specifies the message ID. | ||
1580 | * trgcls - specifies target class | ||
1581 | * flags1 - option for path | ||
1582 | * IPPRTY- 0x20 - specifies if you want to send priority message | ||
1583 | * buffer - address of reply buffer | ||
1584 | * buflen - length of reply buffer | ||
1585 | * Output: ipbfadr2 - Address of buffer updated by the number | ||
1586 | * of bytes you have moved. | ||
1587 | * ipbfln2f - Contains one of the following values: | ||
1588 | * If the answer buffer is the same length as the reply, this field | ||
1589 | * contains zero. | ||
1590 | * If the answer buffer is longer than the reply, this field contains | ||
1591 | * the number of bytes remaining in the buffer. | ||
1592 | * If the answer buffer is shorter than the reply, this field contains | ||
1593 | * a residual count (that is, the number of bytes remianing in the | ||
1594 | * reply that does not fit into the buffer. In this | ||
1595 | * case b2f0_result = 5. | ||
1596 | * Return: b2f0_result - return code from CP | ||
1597 | * (-EINVAL) - buffer address is NULL | ||
1598 | */ | ||
1599 | int | ||
1600 | iucv_reply (__u16 pathid, | ||
1601 | __u32 msgid, __u32 trgcls, | ||
1602 | int flags1, | ||
1603 | void *buffer, ulong buflen, ulong * ipbfadr2, ulong * ipbfln2f) | ||
1604 | { | ||
1605 | iparml_db *parm; | ||
1606 | ulong b2f0_result; | ||
1607 | |||
1608 | iucv_debug(2, "entering"); | ||
1609 | |||
1610 | if (!buffer) | ||
1611 | return -EINVAL; | ||
1612 | |||
1613 | parm = (iparml_db *)grab_param(); | ||
1614 | |||
1615 | parm->ipbfadr2 = (__u32) ((ulong) buffer); | ||
1616 | parm->ipbfln2f = (__u32) buflen; /* length of message */ | ||
1617 | parm->ippathid = pathid; | ||
1618 | parm->ipmsgid = msgid; | ||
1619 | parm->iptrgcls = trgcls; | ||
1620 | parm->ipflags1 = (__u8) flags1; /* priority message */ | ||
1621 | |||
1622 | b2f0_result = b2f0(REPLY, parm); | ||
1623 | |||
1624 | if ((!b2f0_result) || (b2f0_result == 5)) { | ||
1625 | if (ipbfadr2) | ||
1626 | *ipbfadr2 = parm->ipbfadr2; | ||
1627 | if (ipbfln2f) | ||
1628 | *ipbfln2f = parm->ipbfln2f; | ||
1629 | } | ||
1630 | release_param(parm); | ||
1631 | |||
1632 | iucv_debug(2, "exiting"); | ||
1633 | |||
1634 | return b2f0_result; | ||
1635 | } | ||
1636 | |||
1637 | /* | ||
1638 | * Name: iucv_reply_array | ||
1639 | * Purpose: This function responds to the two-way messages that you | ||
1640 | * receive. You must identify completely the message to | ||
1641 | * which you wish to reply. ie, pathid, msgid, and trgcls. | ||
1642 | * The array identifies a list of addresses and lengths of | ||
1643 | * discontiguous buffers that contains the reply data. | ||
1644 | * Input: pathid - path identification number | ||
1645 | * msgid - specifies the message ID. | ||
1646 | * trgcls - specifies target class | ||
1647 | * flags1 - option for path | ||
1648 | * IPPRTY- specifies if you want to send priority message | ||
1649 | * buffer - address of array of reply buffers | ||
1650 | * buflen - total length of reply buffers | ||
1651 | * Output: ipbfadr2 - Address of buffer which IUCV is currently working on. | ||
1652 | * ipbfln2f - Contains one of the following values: | ||
1653 | * If the answer buffer is the same length as the reply, this field | ||
1654 | * contains zero. | ||
1655 | * If the answer buffer is longer than the reply, this field contains | ||
1656 | * the number of bytes remaining in the buffer. | ||
1657 | * If the answer buffer is shorter than the reply, this field contains | ||
1658 | * a residual count (that is, the number of bytes remianing in the | ||
1659 | * reply that does not fit into the buffer. In this | ||
1660 | * case b2f0_result = 5. | ||
1661 | * Return: b2f0_result - return code from CP | ||
1662 | * (-EINVAL) - buffer address is NULL | ||
1663 | */ | ||
1664 | int | ||
1665 | iucv_reply_array (__u16 pathid, | ||
1666 | __u32 msgid, __u32 trgcls, | ||
1667 | int flags1, | ||
1668 | iucv_array_t * buffer, | ||
1669 | ulong buflen, ulong * ipbfadr2, ulong * ipbfln2f) | ||
1670 | { | ||
1671 | iparml_db *parm; | ||
1672 | ulong b2f0_result; | ||
1673 | |||
1674 | iucv_debug(2, "entering"); | ||
1675 | |||
1676 | if (!buffer) | ||
1677 | return -EINVAL; | ||
1678 | |||
1679 | parm = (iparml_db *)grab_param(); | ||
1680 | |||
1681 | parm->ipbfadr2 = (__u32) ((ulong) buffer); | ||
1682 | parm->ipbfln2f = buflen; /* length of message */ | ||
1683 | parm->ippathid = pathid; | ||
1684 | parm->ipmsgid = msgid; | ||
1685 | parm->iptrgcls = trgcls; | ||
1686 | parm->ipflags1 = (IPANSLST | flags1); | ||
1687 | |||
1688 | b2f0_result = b2f0(REPLY, parm); | ||
1689 | |||
1690 | if ((!b2f0_result) || (b2f0_result == 5)) { | ||
1691 | |||
1692 | if (ipbfadr2) | ||
1693 | *ipbfadr2 = parm->ipbfadr2; | ||
1694 | if (ipbfln2f) | ||
1695 | *ipbfln2f = parm->ipbfln2f; | ||
1696 | } | ||
1697 | release_param(parm); | ||
1698 | |||
1699 | iucv_debug(2, "exiting"); | ||
1700 | |||
1701 | return b2f0_result; | ||
1702 | } | ||
1703 | |||
1704 | /* | ||
1705 | * Name: iucv_reply_prmmsg | ||
1706 | * Purpose: This function responds to the two-way messages that you | ||
1707 | * receive. You must identify completely the message to | ||
1708 | * which you wish to reply. ie, pathid, msgid, and trgcls. | ||
1709 | * Prmmsg signifies the data is moved into the | ||
1710 | * parameter list. | ||
1711 | * Input: pathid - path identification number | ||
1712 | * msgid - specifies the message ID. | ||
1713 | * trgcls - specifies target class | ||
1714 | * flags1 - option for path | ||
1715 | * IPPRTY- specifies if you want to send priority message | ||
1716 | * prmmsg - 8-bytes of data to be placed into the parameter | ||
1717 | * list. | ||
1718 | * Output: NA | ||
1719 | * Return: b2f0_result - return code from CP | ||
1720 | */ | ||
1721 | int | ||
1722 | iucv_reply_prmmsg (__u16 pathid, | ||
1723 | __u32 msgid, __u32 trgcls, int flags1, __u8 prmmsg[8]) | ||
1724 | { | ||
1725 | iparml_dpl *parm; | ||
1726 | ulong b2f0_result; | ||
1727 | |||
1728 | iucv_debug(2, "entering"); | ||
1729 | |||
1730 | parm = (iparml_dpl *)grab_param(); | ||
1731 | |||
1732 | parm->ippathid = pathid; | ||
1733 | parm->ipmsgid = msgid; | ||
1734 | parm->iptrgcls = trgcls; | ||
1735 | memcpy(parm->iprmmsg, prmmsg, sizeof (parm->iprmmsg)); | ||
1736 | parm->ipflags1 = (IPRMDATA | flags1); | ||
1737 | |||
1738 | b2f0_result = b2f0(REPLY, parm); | ||
1739 | release_param(parm); | ||
1740 | |||
1741 | iucv_debug(2, "exiting"); | ||
1742 | |||
1743 | return b2f0_result; | ||
1744 | } | ||
1745 | |||
1746 | /** | ||
1747 | * iucv_resume: | ||
1748 | * @pathid: Path identification number | ||
1749 | * @user_data: 16-byte of user data | ||
1750 | * | ||
1751 | * This function restores communication over a quiesced path. | ||
1752 | * Returns: return code from CP | ||
1753 | */ | ||
1754 | int | ||
1755 | iucv_resume (__u16 pathid, __u8 user_data[16]) | ||
1756 | { | ||
1757 | iparml_control *parm; | ||
1758 | ulong b2f0_result = 0; | ||
1759 | |||
1760 | iucv_debug(1, "entering"); | ||
1761 | iucv_debug(1, "pathid = %d", pathid); | ||
1762 | |||
1763 | parm = (iparml_control *)grab_param(); | ||
1764 | |||
1765 | memcpy (parm->ipuser, user_data, sizeof (*user_data)); | ||
1766 | parm->ippathid = pathid; | ||
1767 | |||
1768 | b2f0_result = b2f0(RESUME, parm); | ||
1769 | release_param(parm); | ||
1770 | |||
1771 | iucv_debug(1, "exiting"); | ||
1772 | |||
1773 | return b2f0_result; | ||
1774 | } | ||
1775 | |||
1776 | /* | ||
1777 | * Name: iucv_send | ||
1778 | * Purpose: sends messages | ||
1779 | * Input: pathid - ushort, pathid | ||
1780 | * msgid - ulong *, id of message returned to caller | ||
1781 | * trgcls - ulong, target message class | ||
1782 | * srccls - ulong, source message class | ||
1783 | * msgtag - ulong, message tag | ||
1784 | * flags1 - Contains options for this path. | ||
1785 | * IPPRTY - Ox20 - specifies if you want to send a priority message. | ||
1786 | * buffer - pointer to buffer | ||
1787 | * buflen - ulong, length of buffer | ||
1788 | * Output: b2f0_result - return code from b2f0 call | ||
1789 | * msgid - returns message id | ||
1790 | */ | ||
1791 | int | ||
1792 | iucv_send (__u16 pathid, __u32 * msgid, | ||
1793 | __u32 trgcls, __u32 srccls, | ||
1794 | __u32 msgtag, int flags1, void *buffer, ulong buflen) | ||
1795 | { | ||
1796 | iparml_db *parm; | ||
1797 | ulong b2f0_result; | ||
1798 | |||
1799 | iucv_debug(2, "entering"); | ||
1800 | |||
1801 | if (!buffer) | ||
1802 | return -EINVAL; | ||
1803 | |||
1804 | parm = (iparml_db *)grab_param(); | ||
1805 | |||
1806 | parm->ipbfadr1 = (__u32) ((ulong) buffer); | ||
1807 | parm->ippathid = pathid; | ||
1808 | parm->iptrgcls = trgcls; | ||
1809 | parm->ipbfln1f = (__u32) buflen; /* length of message */ | ||
1810 | parm->ipsrccls = srccls; | ||
1811 | parm->ipmsgtag = msgtag; | ||
1812 | parm->ipflags1 = (IPNORPY | flags1); /* one way priority message */ | ||
1813 | |||
1814 | b2f0_result = b2f0(SEND, parm); | ||
1815 | |||
1816 | if ((!b2f0_result) && (msgid)) | ||
1817 | *msgid = parm->ipmsgid; | ||
1818 | release_param(parm); | ||
1819 | |||
1820 | iucv_debug(2, "exiting"); | ||
1821 | |||
1822 | return b2f0_result; | ||
1823 | } | ||
1824 | |||
1825 | /* | ||
1826 | * Name: iucv_send_array | ||
1827 | * Purpose: This function transmits data to another application. | ||
1828 | * The contents of buffer is the address of the array of | ||
1829 | * addresses and lengths of discontiguous buffers that hold | ||
1830 | * the message text. This is a one-way message and the | ||
1831 | * receiver will not reply to the message. | ||
1832 | * Input: pathid - path identification number | ||
1833 | * trgcls - specifies target class | ||
1834 | * srccls - specifies the source message class | ||
1835 | * msgtag - specifies a tag to be associated witht the message | ||
1836 | * flags1 - option for path | ||
1837 | * IPPRTY- specifies if you want to send priority message | ||
1838 | * buffer - address of array of send buffers | ||
1839 | * buflen - total length of send buffers | ||
1840 | * Output: msgid - specifies the message ID. | ||
1841 | * Return: b2f0_result - return code from CP | ||
1842 | * (-EINVAL) - buffer address is NULL | ||
1843 | */ | ||
1844 | int | ||
1845 | iucv_send_array (__u16 pathid, | ||
1846 | __u32 * msgid, | ||
1847 | __u32 trgcls, | ||
1848 | __u32 srccls, | ||
1849 | __u32 msgtag, int flags1, iucv_array_t * buffer, ulong buflen) | ||
1850 | { | ||
1851 | iparml_db *parm; | ||
1852 | ulong b2f0_result; | ||
1853 | |||
1854 | iucv_debug(2, "entering"); | ||
1855 | |||
1856 | if (!buffer) | ||
1857 | return -EINVAL; | ||
1858 | |||
1859 | parm = (iparml_db *)grab_param(); | ||
1860 | |||
1861 | parm->ippathid = pathid; | ||
1862 | parm->iptrgcls = trgcls; | ||
1863 | parm->ipbfadr1 = (__u32) ((ulong) buffer); | ||
1864 | parm->ipbfln1f = (__u32) buflen; /* length of message */ | ||
1865 | parm->ipsrccls = srccls; | ||
1866 | parm->ipmsgtag = msgtag; | ||
1867 | parm->ipflags1 = (IPNORPY | IPBUFLST | flags1); | ||
1868 | b2f0_result = b2f0(SEND, parm); | ||
1869 | |||
1870 | if ((!b2f0_result) && (msgid)) | ||
1871 | *msgid = parm->ipmsgid; | ||
1872 | release_param(parm); | ||
1873 | |||
1874 | iucv_debug(2, "exiting"); | ||
1875 | return b2f0_result; | ||
1876 | } | ||
1877 | |||
1878 | /* | ||
1879 | * Name: iucv_send_prmmsg | ||
1880 | * Purpose: This function transmits data to another application. | ||
1881 | * Prmmsg specifies that the 8-bytes of data are to be moved | ||
1882 | * into the parameter list. This is a one-way message and the | ||
1883 | * receiver will not reply to the message. | ||
1884 | * Input: pathid - path identification number | ||
1885 | * trgcls - specifies target class | ||
1886 | * srccls - specifies the source message class | ||
1887 | * msgtag - specifies a tag to be associated with the message | ||
1888 | * flags1 - option for path | ||
1889 | * IPPRTY- specifies if you want to send priority message | ||
1890 | * prmmsg - 8-bytes of data to be placed into parameter list | ||
1891 | * Output: msgid - specifies the message ID. | ||
1892 | * Return: b2f0_result - return code from CP | ||
1893 | */ | ||
1894 | int | ||
1895 | iucv_send_prmmsg (__u16 pathid, | ||
1896 | __u32 * msgid, | ||
1897 | __u32 trgcls, | ||
1898 | __u32 srccls, __u32 msgtag, int flags1, __u8 prmmsg[8]) | ||
1899 | { | ||
1900 | iparml_dpl *parm; | ||
1901 | ulong b2f0_result; | ||
1902 | |||
1903 | iucv_debug(2, "entering"); | ||
1904 | |||
1905 | parm = (iparml_dpl *)grab_param(); | ||
1906 | |||
1907 | parm->ippathid = pathid; | ||
1908 | parm->iptrgcls = trgcls; | ||
1909 | parm->ipsrccls = srccls; | ||
1910 | parm->ipmsgtag = msgtag; | ||
1911 | parm->ipflags1 = (IPRMDATA | IPNORPY | flags1); | ||
1912 | memcpy(parm->iprmmsg, prmmsg, sizeof(parm->iprmmsg)); | ||
1913 | |||
1914 | b2f0_result = b2f0(SEND, parm); | ||
1915 | |||
1916 | if ((!b2f0_result) && (msgid)) | ||
1917 | *msgid = parm->ipmsgid; | ||
1918 | release_param(parm); | ||
1919 | |||
1920 | iucv_debug(2, "exiting"); | ||
1921 | |||
1922 | return b2f0_result; | ||
1923 | } | ||
1924 | |||
1925 | /* | ||
1926 | * Name: iucv_send2way | ||
1927 | * Purpose: This function transmits data to another application. | ||
1928 | * Data to be transmitted is in a buffer. The receiver | ||
1929 | * of the send is expected to reply to the message and | ||
1930 | * a buffer is provided into which IUCV moves the reply | ||
1931 | * to this message. | ||
1932 | * Input: pathid - path identification number | ||
1933 | * trgcls - specifies target class | ||
1934 | * srccls - specifies the source message class | ||
1935 | * msgtag - specifies a tag associated with the message | ||
1936 | * flags1 - option for path | ||
1937 | * IPPRTY- specifies if you want to send priority message | ||
1938 | * buffer - address of send buffer | ||
1939 | * buflen - length of send buffer | ||
1940 | * ansbuf - address of buffer to reply with | ||
1941 | * anslen - length of buffer to reply with | ||
1942 | * Output: msgid - specifies the message ID. | ||
1943 | * Return: b2f0_result - return code from CP | ||
1944 | * (-EINVAL) - buffer or ansbuf address is NULL | ||
1945 | */ | ||
1946 | int | ||
1947 | iucv_send2way (__u16 pathid, | ||
1948 | __u32 * msgid, | ||
1949 | __u32 trgcls, | ||
1950 | __u32 srccls, | ||
1951 | __u32 msgtag, | ||
1952 | int flags1, | ||
1953 | void *buffer, ulong buflen, void *ansbuf, ulong anslen) | ||
1954 | { | ||
1955 | iparml_db *parm; | ||
1956 | ulong b2f0_result; | ||
1957 | |||
1958 | iucv_debug(2, "entering"); | ||
1959 | |||
1960 | if (!buffer || !ansbuf) | ||
1961 | return -EINVAL; | ||
1962 | |||
1963 | parm = (iparml_db *)grab_param(); | ||
1964 | |||
1965 | parm->ippathid = pathid; | ||
1966 | parm->iptrgcls = trgcls; | ||
1967 | parm->ipbfadr1 = (__u32) ((ulong) buffer); | ||
1968 | parm->ipbfln1f = (__u32) buflen; /* length of message */ | ||
1969 | parm->ipbfadr2 = (__u32) ((ulong) ansbuf); | ||
1970 | parm->ipbfln2f = (__u32) anslen; | ||
1971 | parm->ipsrccls = srccls; | ||
1972 | parm->ipmsgtag = msgtag; | ||
1973 | parm->ipflags1 = flags1; /* priority message */ | ||
1974 | |||
1975 | b2f0_result = b2f0(SEND, parm); | ||
1976 | |||
1977 | if ((!b2f0_result) && (msgid)) | ||
1978 | *msgid = parm->ipmsgid; | ||
1979 | release_param(parm); | ||
1980 | |||
1981 | iucv_debug(2, "exiting"); | ||
1982 | |||
1983 | return b2f0_result; | ||
1984 | } | ||
1985 | |||
1986 | /* | ||
1987 | * Name: iucv_send2way_array | ||
1988 | * Purpose: This function transmits data to another application. | ||
1989 | * The contents of buffer is the address of the array of | ||
1990 | * addresses and lengths of discontiguous buffers that hold | ||
1991 | * the message text. The receiver of the send is expected to | ||
1992 | * reply to the message and a buffer is provided into which | ||
1993 | * IUCV moves the reply to this message. | ||
1994 | * Input: pathid - path identification number | ||
1995 | * trgcls - specifies target class | ||
1996 | * srccls - specifies the source message class | ||
1997 | * msgtag - spcifies a tag to be associated with the message | ||
1998 | * flags1 - option for path | ||
1999 | * IPPRTY- specifies if you want to send priority message | ||
2000 | * buffer - address of array of send buffers | ||
2001 | * buflen - total length of send buffers | ||
2002 | * ansbuf - address of buffer to reply with | ||
2003 | * anslen - length of buffer to reply with | ||
2004 | * Output: msgid - specifies the message ID. | ||
2005 | * Return: b2f0_result - return code from CP | ||
2006 | * (-EINVAL) - buffer address is NULL | ||
2007 | */ | ||
2008 | int | ||
2009 | iucv_send2way_array (__u16 pathid, | ||
2010 | __u32 * msgid, | ||
2011 | __u32 trgcls, | ||
2012 | __u32 srccls, | ||
2013 | __u32 msgtag, | ||
2014 | int flags1, | ||
2015 | iucv_array_t * buffer, | ||
2016 | ulong buflen, iucv_array_t * ansbuf, ulong anslen) | ||
2017 | { | ||
2018 | iparml_db *parm; | ||
2019 | ulong b2f0_result; | ||
2020 | |||
2021 | iucv_debug(2, "entering"); | ||
2022 | |||
2023 | if (!buffer || !ansbuf) | ||
2024 | return -EINVAL; | ||
2025 | |||
2026 | parm = (iparml_db *)grab_param(); | ||
2027 | |||
2028 | parm->ippathid = pathid; | ||
2029 | parm->iptrgcls = trgcls; | ||
2030 | parm->ipbfadr1 = (__u32) ((ulong) buffer); | ||
2031 | parm->ipbfln1f = (__u32) buflen; /* length of message */ | ||
2032 | parm->ipbfadr2 = (__u32) ((ulong) ansbuf); | ||
2033 | parm->ipbfln2f = (__u32) anslen; | ||
2034 | parm->ipsrccls = srccls; | ||
2035 | parm->ipmsgtag = msgtag; | ||
2036 | parm->ipflags1 = (IPBUFLST | IPANSLST | flags1); | ||
2037 | b2f0_result = b2f0(SEND, parm); | ||
2038 | if ((!b2f0_result) && (msgid)) | ||
2039 | *msgid = parm->ipmsgid; | ||
2040 | release_param(parm); | ||
2041 | |||
2042 | iucv_debug(2, "exiting"); | ||
2043 | return b2f0_result; | ||
2044 | } | ||
2045 | |||
2046 | /* | ||
2047 | * Name: iucv_send2way_prmmsg | ||
2048 | * Purpose: This function transmits data to another application. | ||
2049 | * Prmmsg specifies that the 8-bytes of data are to be moved | ||
2050 | * into the parameter list. This is a two-way message and the | ||
2051 | * receiver of the message is expected to reply. A buffer | ||
2052 | * is provided into which IUCV moves the reply to this | ||
2053 | * message. | ||
2054 | * Input: pathid - path identification number | ||
2055 | * trgcls - specifies target class | ||
2056 | * srccls - specifies the source message class | ||
2057 | * msgtag - specifies a tag to be associated with the message | ||
2058 | * flags1 - option for path | ||
2059 | * IPPRTY- specifies if you want to send priority message | ||
2060 | * prmmsg - 8-bytes of data to be placed in parameter list | ||
2061 | * ansbuf - address of buffer to reply with | ||
2062 | * anslen - length of buffer to reply with | ||
2063 | * Output: msgid - specifies the message ID. | ||
2064 | * Return: b2f0_result - return code from CP | ||
2065 | * (-EINVAL) - buffer address is NULL | ||
2066 | */ | ||
2067 | int | ||
2068 | iucv_send2way_prmmsg (__u16 pathid, | ||
2069 | __u32 * msgid, | ||
2070 | __u32 trgcls, | ||
2071 | __u32 srccls, | ||
2072 | __u32 msgtag, | ||
2073 | ulong flags1, __u8 prmmsg[8], void *ansbuf, ulong anslen) | ||
2074 | { | ||
2075 | iparml_dpl *parm; | ||
2076 | ulong b2f0_result; | ||
2077 | |||
2078 | iucv_debug(2, "entering"); | ||
2079 | |||
2080 | if (!ansbuf) | ||
2081 | return -EINVAL; | ||
2082 | |||
2083 | parm = (iparml_dpl *)grab_param(); | ||
2084 | |||
2085 | parm->ippathid = pathid; | ||
2086 | parm->iptrgcls = trgcls; | ||
2087 | parm->ipsrccls = srccls; | ||
2088 | parm->ipmsgtag = msgtag; | ||
2089 | parm->ipbfadr2 = (__u32) ((ulong) ansbuf); | ||
2090 | parm->ipbfln2f = (__u32) anslen; | ||
2091 | parm->ipflags1 = (IPRMDATA | flags1); /* message in prmlist */ | ||
2092 | memcpy(parm->iprmmsg, prmmsg, sizeof(parm->iprmmsg)); | ||
2093 | |||
2094 | b2f0_result = b2f0(SEND, parm); | ||
2095 | |||
2096 | if ((!b2f0_result) && (msgid)) | ||
2097 | *msgid = parm->ipmsgid; | ||
2098 | release_param(parm); | ||
2099 | |||
2100 | iucv_debug(2, "exiting"); | ||
2101 | |||
2102 | return b2f0_result; | ||
2103 | } | ||
2104 | |||
2105 | /* | ||
2106 | * Name: iucv_send2way_prmmsg_array | ||
2107 | * Purpose: This function transmits data to another application. | ||
2108 | * Prmmsg specifies that the 8-bytes of data are to be moved | ||
2109 | * into the parameter list. This is a two-way message and the | ||
2110 | * receiver of the message is expected to reply. A buffer | ||
2111 | * is provided into which IUCV moves the reply to this | ||
2112 | * message. The contents of ansbuf is the address of the | ||
2113 | * array of addresses and lengths of discontiguous buffers | ||
2114 | * that contain the reply. | ||
2115 | * Input: pathid - path identification number | ||
2116 | * trgcls - specifies target class | ||
2117 | * srccls - specifies the source message class | ||
2118 | * msgtag - specifies a tag to be associated with the message | ||
2119 | * flags1 - option for path | ||
2120 | * IPPRTY- specifies if you want to send priority message | ||
2121 | * prmmsg - 8-bytes of data to be placed into the parameter list | ||
2122 | * ansbuf - address of buffer to reply with | ||
2123 | * anslen - length of buffer to reply with | ||
2124 | * Output: msgid - specifies the message ID. | ||
2125 | * Return: b2f0_result - return code from CP | ||
2126 | * (-EINVAL) - ansbuf address is NULL | ||
2127 | */ | ||
2128 | int | ||
2129 | iucv_send2way_prmmsg_array (__u16 pathid, | ||
2130 | __u32 * msgid, | ||
2131 | __u32 trgcls, | ||
2132 | __u32 srccls, | ||
2133 | __u32 msgtag, | ||
2134 | int flags1, | ||
2135 | __u8 prmmsg[8], | ||
2136 | iucv_array_t * ansbuf, ulong anslen) | ||
2137 | { | ||
2138 | iparml_dpl *parm; | ||
2139 | ulong b2f0_result; | ||
2140 | |||
2141 | iucv_debug(2, "entering"); | ||
2142 | |||
2143 | if (!ansbuf) | ||
2144 | return -EINVAL; | ||
2145 | |||
2146 | parm = (iparml_dpl *)grab_param(); | ||
2147 | |||
2148 | parm->ippathid = pathid; | ||
2149 | parm->iptrgcls = trgcls; | ||
2150 | parm->ipsrccls = srccls; | ||
2151 | parm->ipmsgtag = msgtag; | ||
2152 | parm->ipbfadr2 = (__u32) ((ulong) ansbuf); | ||
2153 | parm->ipbfln2f = (__u32) anslen; | ||
2154 | parm->ipflags1 = (IPRMDATA | IPANSLST | flags1); | ||
2155 | memcpy(parm->iprmmsg, prmmsg, sizeof(parm->iprmmsg)); | ||
2156 | b2f0_result = b2f0(SEND, parm); | ||
2157 | if ((!b2f0_result) && (msgid)) | ||
2158 | *msgid = parm->ipmsgid; | ||
2159 | release_param(parm); | ||
2160 | |||
2161 | iucv_debug(2, "exiting"); | ||
2162 | return b2f0_result; | ||
2163 | } | ||
2164 | |||
2165 | void | ||
2166 | iucv_setmask_cpuid (void *result) | ||
2167 | { | ||
2168 | iparml_set_mask *parm; | ||
2169 | |||
2170 | iucv_debug(1, "entering"); | ||
2171 | parm = (iparml_set_mask *)grab_param(); | ||
2172 | parm->ipmask = *((__u8*)result); | ||
2173 | *((ulong *)result) = b2f0(SETMASK, parm); | ||
2174 | release_param(parm); | ||
2175 | |||
2176 | iucv_debug(1, "b2f0_result = %ld", *((ulong *)result)); | ||
2177 | iucv_debug(1, "exiting"); | ||
2178 | } | ||
2179 | |||
2180 | /* | ||
2181 | * Name: iucv_setmask | ||
2182 | * Purpose: This function enables or disables the following IUCV | ||
2183 | * external interruptions: Nonpriority and priority message | ||
2184 | * interrupts, nonpriority and priority reply interrupts. | ||
2185 | * Input: SetMaskFlag - options for interrupts | ||
2186 | * 0x80 - Nonpriority_MessagePendingInterruptsFlag | ||
2187 | * 0x40 - Priority_MessagePendingInterruptsFlag | ||
2188 | * 0x20 - Nonpriority_MessageCompletionInterruptsFlag | ||
2189 | * 0x10 - Priority_MessageCompletionInterruptsFlag | ||
2190 | * 0x08 - IUCVControlInterruptsFlag | ||
2191 | * Output: NA | ||
2192 | * Return: b2f0_result - return code from CP | ||
2193 | */ | ||
2194 | int | ||
2195 | iucv_setmask (int SetMaskFlag) | ||
2196 | { | ||
2197 | union { | ||
2198 | ulong result; | ||
2199 | __u8 param; | ||
2200 | } u; | ||
2201 | int cpu; | ||
2202 | |||
2203 | u.param = SetMaskFlag; | ||
2204 | cpu = get_cpu(); | ||
2205 | smp_call_function_on(iucv_setmask_cpuid, &u, 0, 1, iucv_cpuid); | ||
2206 | put_cpu(); | ||
2207 | |||
2208 | return u.result; | ||
2209 | } | ||
2210 | |||
2211 | /** | ||
2212 | * iucv_sever: | ||
2213 | * @pathid: Path identification number | ||
2214 | * @user_data: 16-byte of user data | ||
2215 | * | ||
2216 | * This function terminates an iucv path. | ||
2217 | * Returns: return code from CP | ||
2218 | */ | ||
2219 | int | ||
2220 | iucv_sever(__u16 pathid, __u8 user_data[16]) | ||
2221 | { | ||
2222 | iparml_control *parm; | ||
2223 | ulong b2f0_result = 0; | ||
2224 | |||
2225 | iucv_debug(1, "entering"); | ||
2226 | parm = (iparml_control *)grab_param(); | ||
2227 | |||
2228 | memcpy(parm->ipuser, user_data, sizeof(parm->ipuser)); | ||
2229 | parm->ippathid = pathid; | ||
2230 | |||
2231 | b2f0_result = b2f0(SEVER, parm); | ||
2232 | |||
2233 | if (!b2f0_result) | ||
2234 | iucv_remove_pathid(pathid); | ||
2235 | release_param(parm); | ||
2236 | |||
2237 | iucv_debug(1, "exiting"); | ||
2238 | return b2f0_result; | ||
2239 | } | ||
2240 | |||
2241 | /* | ||
2242 | * Interrupt Handlers | ||
2243 | *******************************************************************************/ | ||
2244 | |||
2245 | /** | ||
2246 | * iucv_irq_handler: | ||
2247 | * @regs: Current registers | ||
2248 | * @code: irq code | ||
2249 | * | ||
2250 | * Handles external interrupts coming in from CP. | ||
2251 | * Places the interrupt buffer on a queue and schedules iucv_tasklet_handler(). | ||
2252 | */ | ||
2253 | static void | ||
2254 | iucv_irq_handler(__u16 code) | ||
2255 | { | ||
2256 | iucv_irqdata *irqdata; | ||
2257 | |||
2258 | irqdata = kmalloc(sizeof(iucv_irqdata), GFP_ATOMIC); | ||
2259 | if (!irqdata) { | ||
2260 | printk(KERN_WARNING "%s: out of memory\n", __FUNCTION__); | ||
2261 | return; | ||
2262 | } | ||
2263 | |||
2264 | memcpy(&irqdata->data, iucv_external_int_buffer, | ||
2265 | sizeof(iucv_GeneralInterrupt)); | ||
2266 | |||
2267 | spin_lock(&iucv_irq_queue_lock); | ||
2268 | list_add_tail(&irqdata->queue, &iucv_irq_queue); | ||
2269 | spin_unlock(&iucv_irq_queue_lock); | ||
2270 | |||
2271 | tasklet_schedule(&iucv_tasklet); | ||
2272 | } | ||
2273 | |||
2274 | /** | ||
2275 | * iucv_do_int: | ||
2276 | * @int_buf: Pointer to copy of external interrupt buffer | ||
2277 | * | ||
2278 | * The workhorse for handling interrupts queued by iucv_irq_handler(). | ||
2279 | * This function is called from the bottom half iucv_tasklet_handler(). | ||
2280 | */ | ||
2281 | static void | ||
2282 | iucv_do_int(iucv_GeneralInterrupt * int_buf) | ||
2283 | { | ||
2284 | handler *h = NULL; | ||
2285 | struct list_head *lh; | ||
2286 | ulong flags; | ||
2287 | iucv_interrupt_ops_t *interrupt = NULL; /* interrupt addresses */ | ||
2288 | __u8 temp_buff1[24], temp_buff2[24]; /* masked handler id. */ | ||
2289 | int rc = 0, j = 0; | ||
2290 | __u8 no_listener[16] = "NO LISTENER"; | ||
2291 | |||
2292 | iucv_debug(2, "entering, pathid %d, type %02X", | ||
2293 | int_buf->ippathid, int_buf->iptype); | ||
2294 | iucv_dumpit("External Interrupt Buffer:", | ||
2295 | int_buf, sizeof(iucv_GeneralInterrupt)); | ||
2296 | |||
2297 | ASCEBC (no_listener, 16); | ||
2298 | |||
2299 | if (int_buf->iptype != 01) { | ||
2300 | if ((int_buf->ippathid) > (max_connections - 1)) { | ||
2301 | printk(KERN_WARNING "%s: Got interrupt with pathid %d" | ||
2302 | " > max_connections (%ld)\n", __FUNCTION__, | ||
2303 | int_buf->ippathid, max_connections - 1); | ||
2304 | } else { | ||
2305 | h = iucv_pathid_table[int_buf->ippathid]; | ||
2306 | interrupt = h->interrupt_table; | ||
2307 | iucv_dumpit("Handler:", h, sizeof(handler)); | ||
2308 | } | ||
2309 | } | ||
2310 | |||
2311 | /* end of if statement */ | ||
2312 | switch (int_buf->iptype) { | ||
2313 | case 0x01: /* connection pending */ | ||
2314 | if (messagesDisabled) { | ||
2315 | iucv_setmask(~0); | ||
2316 | messagesDisabled = 0; | ||
2317 | } | ||
2318 | spin_lock_irqsave(&iucv_lock, flags); | ||
2319 | list_for_each(lh, &iucv_handler_table) { | ||
2320 | h = list_entry(lh, handler, list); | ||
2321 | memcpy(temp_buff1, &(int_buf->ipvmid), 24); | ||
2322 | memcpy(temp_buff2, &(h->id.userid), 24); | ||
2323 | for (j = 0; j < 24; j++) { | ||
2324 | temp_buff1[j] &= (h->id.mask)[j]; | ||
2325 | temp_buff2[j] &= (h->id.mask)[j]; | ||
2326 | } | ||
2327 | |||
2328 | iucv_dumpit("temp_buff1:", | ||
2329 | temp_buff1, sizeof(temp_buff1)); | ||
2330 | iucv_dumpit("temp_buff2", | ||
2331 | temp_buff2, sizeof(temp_buff2)); | ||
2332 | |||
2333 | if (!memcmp (temp_buff1, temp_buff2, 24)) { | ||
2334 | |||
2335 | iucv_debug(2, | ||
2336 | "found a matching handler"); | ||
2337 | break; | ||
2338 | } else | ||
2339 | h = NULL; | ||
2340 | } | ||
2341 | spin_unlock_irqrestore (&iucv_lock, flags); | ||
2342 | if (h) { | ||
2343 | /* ADD PATH TO PATHID TABLE */ | ||
2344 | rc = iucv_add_pathid(int_buf->ippathid, h); | ||
2345 | if (rc) { | ||
2346 | iucv_sever (int_buf->ippathid, | ||
2347 | no_listener); | ||
2348 | iucv_debug(1, | ||
2349 | "add_pathid failed, rc = %d", | ||
2350 | rc); | ||
2351 | } else { | ||
2352 | interrupt = h->interrupt_table; | ||
2353 | if (interrupt->ConnectionPending) { | ||
2354 | EBCASC (int_buf->ipvmid, 8); | ||
2355 | interrupt->ConnectionPending( | ||
2356 | (iucv_ConnectionPending *)int_buf, | ||
2357 | h->pgm_data); | ||
2358 | } else | ||
2359 | iucv_sever(int_buf->ippathid, | ||
2360 | no_listener); | ||
2361 | } | ||
2362 | } else | ||
2363 | iucv_sever(int_buf->ippathid, no_listener); | ||
2364 | break; | ||
2365 | |||
2366 | case 0x02: /*connection complete */ | ||
2367 | if (messagesDisabled) { | ||
2368 | iucv_setmask(~0); | ||
2369 | messagesDisabled = 0; | ||
2370 | } | ||
2371 | if (h) { | ||
2372 | if (interrupt->ConnectionComplete) | ||
2373 | { | ||
2374 | interrupt->ConnectionComplete( | ||
2375 | (iucv_ConnectionComplete *)int_buf, | ||
2376 | h->pgm_data); | ||
2377 | } | ||
2378 | else | ||
2379 | iucv_debug(1, | ||
2380 | "ConnectionComplete not called"); | ||
2381 | } else | ||
2382 | iucv_sever(int_buf->ippathid, no_listener); | ||
2383 | break; | ||
2384 | |||
2385 | case 0x03: /* connection severed */ | ||
2386 | if (messagesDisabled) { | ||
2387 | iucv_setmask(~0); | ||
2388 | messagesDisabled = 0; | ||
2389 | } | ||
2390 | if (h) { | ||
2391 | if (interrupt->ConnectionSevered) | ||
2392 | interrupt->ConnectionSevered( | ||
2393 | (iucv_ConnectionSevered *)int_buf, | ||
2394 | h->pgm_data); | ||
2395 | |||
2396 | else | ||
2397 | iucv_sever (int_buf->ippathid, no_listener); | ||
2398 | } else | ||
2399 | iucv_sever(int_buf->ippathid, no_listener); | ||
2400 | break; | ||
2401 | |||
2402 | case 0x04: /* connection quiesced */ | ||
2403 | if (messagesDisabled) { | ||
2404 | iucv_setmask(~0); | ||
2405 | messagesDisabled = 0; | ||
2406 | } | ||
2407 | if (h) { | ||
2408 | if (interrupt->ConnectionQuiesced) | ||
2409 | interrupt->ConnectionQuiesced( | ||
2410 | (iucv_ConnectionQuiesced *)int_buf, | ||
2411 | h->pgm_data); | ||
2412 | else | ||
2413 | iucv_debug(1, | ||
2414 | "ConnectionQuiesced not called"); | ||
2415 | } | ||
2416 | break; | ||
2417 | |||
2418 | case 0x05: /* connection resumed */ | ||
2419 | if (messagesDisabled) { | ||
2420 | iucv_setmask(~0); | ||
2421 | messagesDisabled = 0; | ||
2422 | } | ||
2423 | if (h) { | ||
2424 | if (interrupt->ConnectionResumed) | ||
2425 | interrupt->ConnectionResumed( | ||
2426 | (iucv_ConnectionResumed *)int_buf, | ||
2427 | h->pgm_data); | ||
2428 | else | ||
2429 | iucv_debug(1, | ||
2430 | "ConnectionResumed not called"); | ||
2431 | } | ||
2432 | break; | ||
2433 | |||
2434 | case 0x06: /* priority message complete */ | ||
2435 | case 0x07: /* nonpriority message complete */ | ||
2436 | if (h) { | ||
2437 | if (interrupt->MessageComplete) | ||
2438 | interrupt->MessageComplete( | ||
2439 | (iucv_MessageComplete *)int_buf, | ||
2440 | h->pgm_data); | ||
2441 | else | ||
2442 | iucv_debug(2, | ||
2443 | "MessageComplete not called"); | ||
2444 | } | ||
2445 | break; | ||
2446 | |||
2447 | case 0x08: /* priority message pending */ | ||
2448 | case 0x09: /* nonpriority message pending */ | ||
2449 | if (h) { | ||
2450 | if (interrupt->MessagePending) | ||
2451 | interrupt->MessagePending( | ||
2452 | (iucv_MessagePending *) int_buf, | ||
2453 | h->pgm_data); | ||
2454 | else | ||
2455 | iucv_debug(2, | ||
2456 | "MessagePending not called"); | ||
2457 | } | ||
2458 | break; | ||
2459 | default: /* unknown iucv type */ | ||
2460 | printk(KERN_WARNING "%s: unknown iucv interrupt\n", | ||
2461 | __FUNCTION__); | ||
2462 | break; | ||
2463 | } /* end switch */ | ||
2464 | |||
2465 | iucv_debug(2, "exiting pathid %d, type %02X", | ||
2466 | int_buf->ippathid, int_buf->iptype); | ||
2467 | |||
2468 | return; | ||
2469 | } | ||
2470 | |||
2471 | /** | ||
2472 | * iucv_tasklet_handler: | ||
2473 | * | ||
2474 | * This function loops over the queue of irq buffers and runs iucv_do_int() | ||
2475 | * on every queue element. | ||
2476 | */ | ||
2477 | static void | ||
2478 | iucv_tasklet_handler(unsigned long ignored) | ||
2479 | { | ||
2480 | struct list_head head; | ||
2481 | struct list_head *next; | ||
2482 | ulong flags; | ||
2483 | |||
2484 | spin_lock_irqsave(&iucv_irq_queue_lock, flags); | ||
2485 | list_add(&head, &iucv_irq_queue); | ||
2486 | list_del_init(&iucv_irq_queue); | ||
2487 | spin_unlock_irqrestore (&iucv_irq_queue_lock, flags); | ||
2488 | |||
2489 | next = head.next; | ||
2490 | while (next != &head) { | ||
2491 | iucv_irqdata *p = list_entry(next, iucv_irqdata, queue); | ||
2492 | |||
2493 | next = next->next; | ||
2494 | iucv_do_int(&p->data); | ||
2495 | kfree(p); | ||
2496 | } | ||
2497 | |||
2498 | return; | ||
2499 | } | ||
2500 | |||
2501 | subsys_initcall(iucv_init); | ||
2502 | module_exit(iucv_exit); | ||
2503 | |||
2504 | /** | ||
2505 | * Export all public stuff | ||
2506 | */ | ||
2507 | EXPORT_SYMBOL (iucv_bus); | ||
2508 | EXPORT_SYMBOL (iucv_root); | ||
2509 | EXPORT_SYMBOL (iucv_accept); | ||
2510 | EXPORT_SYMBOL (iucv_connect); | ||
2511 | #if 0 | ||
2512 | EXPORT_SYMBOL (iucv_purge); | ||
2513 | EXPORT_SYMBOL (iucv_query_maxconn); | ||
2514 | EXPORT_SYMBOL (iucv_query_bufsize); | ||
2515 | EXPORT_SYMBOL (iucv_quiesce); | ||
2516 | #endif | ||
2517 | EXPORT_SYMBOL (iucv_receive); | ||
2518 | #if 0 | ||
2519 | EXPORT_SYMBOL (iucv_receive_array); | ||
2520 | #endif | ||
2521 | EXPORT_SYMBOL (iucv_reject); | ||
2522 | #if 0 | ||
2523 | EXPORT_SYMBOL (iucv_reply); | ||
2524 | EXPORT_SYMBOL (iucv_reply_array); | ||
2525 | EXPORT_SYMBOL (iucv_resume); | ||
2526 | #endif | ||
2527 | EXPORT_SYMBOL (iucv_reply_prmmsg); | ||
2528 | EXPORT_SYMBOL (iucv_send); | ||
2529 | EXPORT_SYMBOL (iucv_send2way); | ||
2530 | EXPORT_SYMBOL (iucv_send2way_array); | ||
2531 | EXPORT_SYMBOL (iucv_send2way_prmmsg); | ||
2532 | EXPORT_SYMBOL (iucv_send2way_prmmsg_array); | ||
2533 | #if 0 | ||
2534 | EXPORT_SYMBOL (iucv_send_array); | ||
2535 | EXPORT_SYMBOL (iucv_send_prmmsg); | ||
2536 | EXPORT_SYMBOL (iucv_setmask); | ||
2537 | #endif | ||
2538 | EXPORT_SYMBOL (iucv_sever); | ||
2539 | EXPORT_SYMBOL (iucv_register_program); | ||
2540 | EXPORT_SYMBOL (iucv_unregister_program); | ||
diff --git a/drivers/s390/net/iucv.h b/drivers/s390/net/iucv.h deleted file mode 100644 index 5b6b1b7241c9..000000000000 --- a/drivers/s390/net/iucv.h +++ /dev/null | |||
@@ -1,849 +0,0 @@ | |||
1 | /* | ||
2 | * drivers/s390/net/iucv.h | ||
3 | * IUCV base support. | ||
4 | * | ||
5 | * S390 version | ||
6 | * Copyright (C) 2000 IBM Corporation | ||
7 | * Author(s):Alan Altmark (Alan_Altmark@us.ibm.com) | ||
8 | * Xenia Tkatschow (xenia@us.ibm.com) | ||
9 | * | ||
10 | * | ||
11 | * Functionality: | ||
12 | * To explore any of the IUCV functions, one must first register | ||
13 | * their program using iucv_register_program(). Once your program has | ||
14 | * successfully completed a register, it can exploit the other functions. | ||
15 | * For furthur reference on all IUCV functionality, refer to the | ||
16 | * CP Programming Services book, also available on the web | ||
17 | * thru www.ibm.com/s390/vm/pubs, manual # SC24-5760 | ||
18 | * | ||
19 | * Definition of Return Codes | ||
20 | * -All positive return codes including zero are reflected back | ||
21 | * from CP except for iucv_register_program. The definition of each | ||
22 | * return code can be found in CP Programming Services book. | ||
23 | * Also available on the web thru www.ibm.com/s390/vm/pubs, manual # SC24-5760 | ||
24 | * - Return Code of: | ||
25 | * (-EINVAL) Invalid value | ||
26 | * (-ENOMEM) storage allocation failed | ||
27 | * pgmask defined in iucv_register_program will be set depending on input | ||
28 | * paramters. | ||
29 | * | ||
30 | */ | ||
31 | |||
32 | #include <linux/types.h> | ||
33 | #include <asm/debug.h> | ||
34 | |||
35 | /** | ||
36 | * Debug Facility stuff | ||
37 | */ | ||
38 | #define IUCV_DBF_SETUP_NAME "iucv_setup" | ||
39 | #define IUCV_DBF_SETUP_LEN 32 | ||
40 | #define IUCV_DBF_SETUP_PAGES 2 | ||
41 | #define IUCV_DBF_SETUP_NR_AREAS 1 | ||
42 | #define IUCV_DBF_SETUP_LEVEL 3 | ||
43 | |||
44 | #define IUCV_DBF_DATA_NAME "iucv_data" | ||
45 | #define IUCV_DBF_DATA_LEN 128 | ||
46 | #define IUCV_DBF_DATA_PAGES 2 | ||
47 | #define IUCV_DBF_DATA_NR_AREAS 1 | ||
48 | #define IUCV_DBF_DATA_LEVEL 2 | ||
49 | |||
50 | #define IUCV_DBF_TRACE_NAME "iucv_trace" | ||
51 | #define IUCV_DBF_TRACE_LEN 16 | ||
52 | #define IUCV_DBF_TRACE_PAGES 4 | ||
53 | #define IUCV_DBF_TRACE_NR_AREAS 1 | ||
54 | #define IUCV_DBF_TRACE_LEVEL 3 | ||
55 | |||
56 | #define IUCV_DBF_TEXT(name,level,text) \ | ||
57 | do { \ | ||
58 | debug_text_event(iucv_dbf_##name,level,text); \ | ||
59 | } while (0) | ||
60 | |||
61 | #define IUCV_DBF_HEX(name,level,addr,len) \ | ||
62 | do { \ | ||
63 | debug_event(iucv_dbf_##name,level,(void*)(addr),len); \ | ||
64 | } while (0) | ||
65 | |||
66 | DECLARE_PER_CPU(char[256], iucv_dbf_txt_buf); | ||
67 | |||
68 | #define IUCV_DBF_TEXT_(name,level,text...) \ | ||
69 | do { \ | ||
70 | char* iucv_dbf_txt_buf = get_cpu_var(iucv_dbf_txt_buf); \ | ||
71 | sprintf(iucv_dbf_txt_buf, text); \ | ||
72 | debug_text_event(iucv_dbf_##name,level,iucv_dbf_txt_buf); \ | ||
73 | put_cpu_var(iucv_dbf_txt_buf); \ | ||
74 | } while (0) | ||
75 | |||
76 | #define IUCV_DBF_SPRINTF(name,level,text...) \ | ||
77 | do { \ | ||
78 | debug_sprintf_event(iucv_dbf_trace, level, ##text ); \ | ||
79 | debug_sprintf_event(iucv_dbf_trace, level, text ); \ | ||
80 | } while (0) | ||
81 | |||
82 | /** | ||
83 | * some more debug stuff | ||
84 | */ | ||
85 | #define IUCV_HEXDUMP16(importance,header,ptr) \ | ||
86 | PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \ | ||
87 | "%02x %02x %02x %02x %02x %02x %02x %02x\n", \ | ||
88 | *(((char*)ptr)),*(((char*)ptr)+1),*(((char*)ptr)+2), \ | ||
89 | *(((char*)ptr)+3),*(((char*)ptr)+4),*(((char*)ptr)+5), \ | ||
90 | *(((char*)ptr)+6),*(((char*)ptr)+7),*(((char*)ptr)+8), \ | ||
91 | *(((char*)ptr)+9),*(((char*)ptr)+10),*(((char*)ptr)+11), \ | ||
92 | *(((char*)ptr)+12),*(((char*)ptr)+13), \ | ||
93 | *(((char*)ptr)+14),*(((char*)ptr)+15)); \ | ||
94 | PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \ | ||
95 | "%02x %02x %02x %02x %02x %02x %02x %02x\n", \ | ||
96 | *(((char*)ptr)+16),*(((char*)ptr)+17), \ | ||
97 | *(((char*)ptr)+18),*(((char*)ptr)+19), \ | ||
98 | *(((char*)ptr)+20),*(((char*)ptr)+21), \ | ||
99 | *(((char*)ptr)+22),*(((char*)ptr)+23), \ | ||
100 | *(((char*)ptr)+24),*(((char*)ptr)+25), \ | ||
101 | *(((char*)ptr)+26),*(((char*)ptr)+27), \ | ||
102 | *(((char*)ptr)+28),*(((char*)ptr)+29), \ | ||
103 | *(((char*)ptr)+30),*(((char*)ptr)+31)); | ||
104 | |||
105 | static inline void | ||
106 | iucv_hex_dump(unsigned char *buf, size_t len) | ||
107 | { | ||
108 | size_t i; | ||
109 | |||
110 | for (i = 0; i < len; i++) { | ||
111 | if (i && !(i % 16)) | ||
112 | printk("\n"); | ||
113 | printk("%02x ", *(buf + i)); | ||
114 | } | ||
115 | printk("\n"); | ||
116 | } | ||
117 | /** | ||
118 | * end of debug stuff | ||
119 | */ | ||
120 | |||
121 | #define uchar unsigned char | ||
122 | #define ushort unsigned short | ||
123 | #define ulong unsigned long | ||
124 | #define iucv_handle_t void * | ||
125 | |||
126 | /* flags1: | ||
127 | * All flags are defined in the field IPFLAGS1 of each function | ||
128 | * and can be found in CP Programming Services. | ||
129 | * IPLOCAL - Indicates the connect can only be satisfied on the | ||
130 | * local system | ||
131 | * IPPRTY - Indicates a priority message | ||
132 | * IPQUSCE - Indicates you do not want to receive messages on a | ||
133 | * path until an iucv_resume is issued | ||
134 | * IPRMDATA - Indicates that the message is in the parameter list | ||
135 | */ | ||
136 | #define IPLOCAL 0x01 | ||
137 | #define IPPRTY 0x20 | ||
138 | #define IPQUSCE 0x40 | ||
139 | #define IPRMDATA 0x80 | ||
140 | |||
141 | /* flags1_out: | ||
142 | * All flags are defined in the output field of IPFLAGS1 for each function | ||
143 | * and can be found in CP Programming Services. | ||
144 | * IPNORPY - Specifies this is a one-way message and no reply is expected. | ||
145 | * IPPRTY - Indicates a priority message is permitted. Defined in flags1. | ||
146 | */ | ||
147 | #define IPNORPY 0x10 | ||
148 | |||
149 | #define Nonpriority_MessagePendingInterruptsFlag 0x80 | ||
150 | #define Priority_MessagePendingInterruptsFlag 0x40 | ||
151 | #define Nonpriority_MessageCompletionInterruptsFlag 0x20 | ||
152 | #define Priority_MessageCompletionInterruptsFlag 0x10 | ||
153 | #define IUCVControlInterruptsFlag 0x08 | ||
154 | #define AllInterrupts 0xf8 | ||
155 | /* | ||
156 | * Mapping of external interrupt buffers should be used with the corresponding | ||
157 | * interrupt types. | ||
158 | * Names: iucv_ConnectionPending -> connection pending | ||
159 | * iucv_ConnectionComplete -> connection complete | ||
160 | * iucv_ConnectionSevered -> connection severed | ||
161 | * iucv_ConnectionQuiesced -> connection quiesced | ||
162 | * iucv_ConnectionResumed -> connection resumed | ||
163 | * iucv_MessagePending -> message pending | ||
164 | * iucv_MessageComplete -> message complete | ||
165 | */ | ||
166 | typedef struct { | ||
167 | u16 ippathid; | ||
168 | uchar ipflags1; | ||
169 | uchar iptype; | ||
170 | u16 ipmsglim; | ||
171 | u16 res1; | ||
172 | uchar ipvmid[8]; | ||
173 | uchar ipuser[16]; | ||
174 | u32 res3; | ||
175 | uchar ippollfg; | ||
176 | uchar res4[3]; | ||
177 | } iucv_ConnectionPending; | ||
178 | |||
179 | typedef struct { | ||
180 | u16 ippathid; | ||
181 | uchar ipflags1; | ||
182 | uchar iptype; | ||
183 | u16 ipmsglim; | ||
184 | u16 res1; | ||
185 | uchar res2[8]; | ||
186 | uchar ipuser[16]; | ||
187 | u32 res3; | ||
188 | uchar ippollfg; | ||
189 | uchar res4[3]; | ||
190 | } iucv_ConnectionComplete; | ||
191 | |||
192 | typedef struct { | ||
193 | u16 ippathid; | ||
194 | uchar res1; | ||
195 | uchar iptype; | ||
196 | u32 res2; | ||
197 | uchar res3[8]; | ||
198 | uchar ipuser[16]; | ||
199 | u32 res4; | ||
200 | uchar ippollfg; | ||
201 | uchar res5[3]; | ||
202 | } iucv_ConnectionSevered; | ||
203 | |||
204 | typedef struct { | ||
205 | u16 ippathid; | ||
206 | uchar res1; | ||
207 | uchar iptype; | ||
208 | u32 res2; | ||
209 | uchar res3[8]; | ||
210 | uchar ipuser[16]; | ||
211 | u32 res4; | ||
212 | uchar ippollfg; | ||
213 | uchar res5[3]; | ||
214 | } iucv_ConnectionQuiesced; | ||
215 | |||
216 | typedef struct { | ||
217 | u16 ippathid; | ||
218 | uchar res1; | ||
219 | uchar iptype; | ||
220 | u32 res2; | ||
221 | uchar res3[8]; | ||
222 | uchar ipuser[16]; | ||
223 | u32 res4; | ||
224 | uchar ippollfg; | ||
225 | uchar res5[3]; | ||
226 | } iucv_ConnectionResumed; | ||
227 | |||
228 | typedef struct { | ||
229 | u16 ippathid; | ||
230 | uchar ipflags1; | ||
231 | uchar iptype; | ||
232 | u32 ipmsgid; | ||
233 | u32 iptrgcls; | ||
234 | union u2 { | ||
235 | u32 iprmmsg1_u32; | ||
236 | uchar iprmmsg1[4]; | ||
237 | } ln1msg1; | ||
238 | union u1 { | ||
239 | u32 ipbfln1f; | ||
240 | uchar iprmmsg2[4]; | ||
241 | } ln1msg2; | ||
242 | u32 res1[3]; | ||
243 | u32 ipbfln2f; | ||
244 | uchar ippollfg; | ||
245 | uchar res2[3]; | ||
246 | } iucv_MessagePending; | ||
247 | |||
248 | typedef struct { | ||
249 | u16 ippathid; | ||
250 | uchar ipflags1; | ||
251 | uchar iptype; | ||
252 | u32 ipmsgid; | ||
253 | u32 ipaudit; | ||
254 | uchar iprmmsg[8]; | ||
255 | u32 ipsrccls; | ||
256 | u32 ipmsgtag; | ||
257 | u32 res; | ||
258 | u32 ipbfln2f; | ||
259 | uchar ippollfg; | ||
260 | uchar res2[3]; | ||
261 | } iucv_MessageComplete; | ||
262 | |||
263 | /* | ||
264 | * iucv_interrupt_ops_t: Is a vector of functions that handle | ||
265 | * IUCV interrupts. | ||
266 | * Parameter list: | ||
267 | * eib - is a pointer to a 40-byte area described | ||
268 | * with one of the structures above. | ||
269 | * pgm_data - this data is strictly for the | ||
270 | * interrupt handler that is passed by | ||
271 | * the application. This may be an address | ||
272 | * or token. | ||
273 | */ | ||
274 | typedef struct { | ||
275 | void (*ConnectionPending) (iucv_ConnectionPending * eib, | ||
276 | void *pgm_data); | ||
277 | void (*ConnectionComplete) (iucv_ConnectionComplete * eib, | ||
278 | void *pgm_data); | ||
279 | void (*ConnectionSevered) (iucv_ConnectionSevered * eib, | ||
280 | void *pgm_data); | ||
281 | void (*ConnectionQuiesced) (iucv_ConnectionQuiesced * eib, | ||
282 | void *pgm_data); | ||
283 | void (*ConnectionResumed) (iucv_ConnectionResumed * eib, | ||
284 | void *pgm_data); | ||
285 | void (*MessagePending) (iucv_MessagePending * eib, void *pgm_data); | ||
286 | void (*MessageComplete) (iucv_MessageComplete * eib, void *pgm_data); | ||
287 | } iucv_interrupt_ops_t; | ||
288 | |||
289 | /* | ||
290 | *iucv_array_t : Defines buffer array. | ||
291 | * Inside the array may be 31- bit addresses and 31-bit lengths. | ||
292 | */ | ||
293 | typedef struct { | ||
294 | u32 address; | ||
295 | u32 length; | ||
296 | } iucv_array_t __attribute__ ((aligned (8))); | ||
297 | |||
298 | extern struct bus_type iucv_bus; | ||
299 | extern struct device *iucv_root; | ||
300 | |||
301 | /* -prototypes- */ | ||
302 | /* | ||
303 | * Name: iucv_register_program | ||
304 | * Purpose: Registers an application with IUCV | ||
305 | * Input: prmname - user identification | ||
306 | * userid - machine identification | ||
307 | * pgmmask - indicates which bits in the prmname and userid combined will be | ||
308 | * used to determine who is given control | ||
309 | * ops - address of vector of interrupt handlers | ||
310 | * pgm_data- application data passed to interrupt handlers | ||
311 | * Output: NA | ||
312 | * Return: address of handler | ||
313 | * (0) - Error occurred, registration not completed. | ||
314 | * NOTE: Exact cause of failure will be recorded in syslog. | ||
315 | */ | ||
316 | iucv_handle_t iucv_register_program (uchar pgmname[16], | ||
317 | uchar userid[8], | ||
318 | uchar pgmmask[24], | ||
319 | iucv_interrupt_ops_t * ops, | ||
320 | void *pgm_data); | ||
321 | |||
322 | /* | ||
323 | * Name: iucv_unregister_program | ||
324 | * Purpose: Unregister application with IUCV | ||
325 | * Input: address of handler | ||
326 | * Output: NA | ||
327 | * Return: (0) - Normal return | ||
328 | * (-EINVAL) - Internal error, wild pointer | ||
329 | */ | ||
330 | int iucv_unregister_program (iucv_handle_t handle); | ||
331 | |||
332 | /* | ||
333 | * Name: iucv_accept | ||
334 | * Purpose: This function is issued after the user receives a Connection Pending external | ||
335 | * interrupt and now wishes to complete the IUCV communication path. | ||
336 | * Input: pathid - u16 , Path identification number | ||
337 | * msglim_reqstd - u16, The number of outstanding messages requested. | ||
338 | * user_data - uchar[16], Data specified by the iucv_connect function. | ||
339 | * flags1 - int, Contains options for this path. | ||
340 | * -IPPRTY - 0x20- Specifies if you want to send priority message. | ||
341 | * -IPRMDATA - 0x80, Specifies whether your program can handle a message | ||
342 | * in the parameter list. | ||
343 | * -IPQUSCE - 0x40, Specifies whether you want to quiesce the path being | ||
344 | * established. | ||
345 | * handle - iucv_handle_t, Address of handler. | ||
346 | * pgm_data - void *, Application data passed to interrupt handlers. | ||
347 | * flags1_out - int * Contains information about the path | ||
348 | * - IPPRTY - 0x20, Indicates you may send priority messages. | ||
349 | * msglim - *u16, Number of outstanding messages. | ||
350 | * Output: return code from CP IUCV call. | ||
351 | */ | ||
352 | |||
353 | int iucv_accept (u16 pathid, | ||
354 | u16 msglim_reqstd, | ||
355 | uchar user_data[16], | ||
356 | int flags1, | ||
357 | iucv_handle_t handle, | ||
358 | void *pgm_data, int *flags1_out, u16 * msglim); | ||
359 | |||
360 | /* | ||
361 | * Name: iucv_connect | ||
362 | * Purpose: This function establishes an IUCV path. Although the connect may complete | ||
363 | * successfully, you are not able to use the path until you receive an IUCV | ||
364 | * Connection Complete external interrupt. | ||
365 | * Input: pathid - u16 *, Path identification number | ||
366 | * msglim_reqstd - u16, Number of outstanding messages requested | ||
367 | * user_data - uchar[16], 16-byte user data | ||
368 | * userid - uchar[8], User identification | ||
369 | * system_name - uchar[8], 8-byte identifying the system name | ||
370 | * flags1 - int, Contains options for this path. | ||
371 | * -IPPRTY - 0x20, Specifies if you want to send priority message. | ||
372 | * -IPRMDATA - 0x80, Specifies whether your program can handle a message | ||
373 | * in the parameter list. | ||
374 | * -IPQUSCE - 0x40, Specifies whether you want to quiesce the path being | ||
375 | * established. | ||
376 | * -IPLOCAL - 0X01, Allows an application to force the partner to be on | ||
377 | * the local system. If local is specified then target class cannot be | ||
378 | * specified. | ||
379 | * flags1_out - int * Contains information about the path | ||
380 | * - IPPRTY - 0x20, Indicates you may send priority messages. | ||
381 | * msglim - * u16, Number of outstanding messages | ||
382 | * handle - iucv_handle_t, Address of handler | ||
383 | * pgm_data - void *, Application data passed to interrupt handlers | ||
384 | * Output: return code from CP IUCV call | ||
385 | * rc - return code from iucv_declare_buffer | ||
386 | * -EINVAL - Invalid handle passed by application | ||
387 | * -EINVAL - Pathid address is NULL | ||
388 | * add_pathid_result - Return code from internal function add_pathid | ||
389 | */ | ||
390 | int | ||
391 | iucv_connect (u16 * pathid, | ||
392 | u16 msglim_reqstd, | ||
393 | uchar user_data[16], | ||
394 | uchar userid[8], | ||
395 | uchar system_name[8], | ||
396 | int flags1, | ||
397 | int *flags1_out, | ||
398 | u16 * msglim, iucv_handle_t handle, void *pgm_data); | ||
399 | |||
400 | /* | ||
401 | * Name: iucv_purge | ||
402 | * Purpose: This function cancels a message that you have sent. | ||
403 | * Input: pathid - Path identification number. | ||
404 | * msgid - Specifies the message ID of the message to be purged. | ||
405 | * srccls - Specifies the source message class. | ||
406 | * Output: audit - Contains information about asynchronous error | ||
407 | * that may have affected the normal completion | ||
408 | * of this message. | ||
409 | * Return: Return code from CP IUCV call. | ||
410 | */ | ||
411 | int iucv_purge (u16 pathid, u32 msgid, u32 srccls, __u32 *audit); | ||
412 | /* | ||
413 | * Name: iucv_query_maxconn | ||
414 | * Purpose: This function determines the maximum number of communication paths you | ||
415 | * may establish. | ||
416 | * Return: maxconn - ulong, Maximum number of connection the virtual machine may | ||
417 | * establish. | ||
418 | */ | ||
419 | ulong iucv_query_maxconn (void); | ||
420 | |||
421 | /* | ||
422 | * Name: iucv_query_bufsize | ||
423 | * Purpose: This function determines how large an external interrupt | ||
424 | * buffer IUCV requires to store information. | ||
425 | * Return: bufsize - ulong, Size of external interrupt buffer. | ||
426 | */ | ||
427 | ulong iucv_query_bufsize (void); | ||
428 | |||
429 | /* | ||
430 | * Name: iucv_quiesce | ||
431 | * Purpose: This function temporarily suspends incoming messages on an | ||
432 | * IUCV path. You can later reactivate the path by invoking | ||
433 | * the iucv_resume function. | ||
434 | * Input: pathid - Path identification number | ||
435 | * user_data - 16-bytes of user data | ||
436 | * Output: NA | ||
437 | * Return: Return code from CP IUCV call. | ||
438 | */ | ||
439 | int iucv_quiesce (u16 pathid, uchar user_data[16]); | ||
440 | |||
441 | /* | ||
442 | * Name: iucv_receive | ||
443 | * Purpose: This function receives messages that are being sent to you | ||
444 | * over established paths. Data will be returned in buffer for length of | ||
445 | * buflen. | ||
446 | * Input: | ||
447 | * pathid - Path identification number. | ||
448 | * buffer - Address of buffer to receive. | ||
449 | * buflen - Length of buffer to receive. | ||
450 | * msgid - Specifies the message ID. | ||
451 | * trgcls - Specifies target class. | ||
452 | * Output: | ||
453 | * flags1_out: int *, Contains information about this path. | ||
454 | * IPNORPY - 0x10 Specifies this is a one-way message and no reply is | ||
455 | * expected. | ||
456 | * IPPRTY - 0x20 Specifies if you want to send priority message. | ||
457 | * IPRMDATA - 0x80 specifies the data is contained in the parameter list | ||
458 | * residual_buffer - address of buffer updated by the number | ||
459 | * of bytes you have received. | ||
460 | * residual_length - | ||
461 | * Contains one of the following values, if the receive buffer is: | ||
462 | * The same length as the message, this field is zero. | ||
463 | * Longer than the message, this field contains the number of | ||
464 | * bytes remaining in the buffer. | ||
465 | * Shorter than the message, this field contains the residual | ||
466 | * count (that is, the number of bytes remaining in the | ||
467 | * message that does not fit into the buffer. In this | ||
468 | * case b2f0_result = 5. | ||
469 | * Return: Return code from CP IUCV call. | ||
470 | * (-EINVAL) - buffer address is pointing to NULL | ||
471 | */ | ||
472 | int iucv_receive (u16 pathid, | ||
473 | u32 msgid, | ||
474 | u32 trgcls, | ||
475 | void *buffer, | ||
476 | ulong buflen, | ||
477 | int *flags1_out, | ||
478 | ulong * residual_buffer, ulong * residual_length); | ||
479 | |||
480 | /* | ||
481 | * Name: iucv_receive_array | ||
482 | * Purpose: This function receives messages that are being sent to you | ||
483 | * over established paths. Data will be returned in first buffer for | ||
484 | * length of first buffer. | ||
485 | * Input: pathid - Path identification number. | ||
486 | * msgid - specifies the message ID. | ||
487 | * trgcls - Specifies target class. | ||
488 | * buffer - Address of array of buffers. | ||
489 | * buflen - Total length of buffers. | ||
490 | * Output: | ||
491 | * flags1_out: int *, Contains information about this path. | ||
492 | * IPNORPY - 0x10 Specifies this is a one-way message and no reply is | ||
493 | * expected. | ||
494 | * IPPRTY - 0x20 Specifies if you want to send priority message. | ||
495 | * IPRMDATA - 0x80 specifies the data is contained in the parameter list | ||
496 | * residual_buffer - address points to the current list entry IUCV | ||
497 | * is working on. | ||
498 | * residual_length - | ||
499 | * Contains one of the following values, if the receive buffer is: | ||
500 | * The same length as the message, this field is zero. | ||
501 | * Longer than the message, this field contains the number of | ||
502 | * bytes remaining in the buffer. | ||
503 | * Shorter than the message, this field contains the residual | ||
504 | * count (that is, the number of bytes remaining in the | ||
505 | * message that does not fit into the buffer. In this | ||
506 | * case b2f0_result = 5. | ||
507 | * Return: Return code from CP IUCV call. | ||
508 | * (-EINVAL) - Buffer address is NULL. | ||
509 | */ | ||
510 | int iucv_receive_array (u16 pathid, | ||
511 | u32 msgid, | ||
512 | u32 trgcls, | ||
513 | iucv_array_t * buffer, | ||
514 | ulong buflen, | ||
515 | int *flags1_out, | ||
516 | ulong * residual_buffer, ulong * residual_length); | ||
517 | |||
518 | /* | ||
519 | * Name: iucv_reject | ||
520 | * Purpose: The reject function refuses a specified message. Between the | ||
521 | * time you are notified of a message and the time that you | ||
522 | * complete the message, the message may be rejected. | ||
523 | * Input: pathid - Path identification number. | ||
524 | * msgid - Specifies the message ID. | ||
525 | * trgcls - Specifies target class. | ||
526 | * Output: NA | ||
527 | * Return: Return code from CP IUCV call. | ||
528 | */ | ||
529 | int iucv_reject (u16 pathid, u32 msgid, u32 trgcls); | ||
530 | |||
531 | /* | ||
532 | * Name: iucv_reply | ||
533 | * Purpose: This function responds to the two-way messages that you | ||
534 | * receive. You must identify completely the message to | ||
535 | * which you wish to reply. ie, pathid, msgid, and trgcls. | ||
536 | * Input: pathid - Path identification number. | ||
537 | * msgid - Specifies the message ID. | ||
538 | * trgcls - Specifies target class. | ||
539 | * flags1 - Option for path. | ||
540 | * IPPRTY- 0x20, Specifies if you want to send priority message. | ||
541 | * buffer - Address of reply buffer. | ||
542 | * buflen - Length of reply buffer. | ||
543 | * Output: residual_buffer - Address of buffer updated by the number | ||
544 | * of bytes you have moved. | ||
545 | * residual_length - Contains one of the following values: | ||
546 | * If the answer buffer is the same length as the reply, this field | ||
547 | * contains zero. | ||
548 | * If the answer buffer is longer than the reply, this field contains | ||
549 | * the number of bytes remaining in the buffer. | ||
550 | * If the answer buffer is shorter than the reply, this field contains | ||
551 | * a residual count (that is, the number of bytes remianing in the | ||
552 | * reply that does not fit into the buffer. In this | ||
553 | * case b2f0_result = 5. | ||
554 | * Return: Return code from CP IUCV call. | ||
555 | * (-EINVAL) - Buffer address is NULL. | ||
556 | */ | ||
557 | int iucv_reply (u16 pathid, | ||
558 | u32 msgid, | ||
559 | u32 trgcls, | ||
560 | int flags1, | ||
561 | void *buffer, ulong buflen, ulong * residual_buffer, | ||
562 | ulong * residual_length); | ||
563 | |||
564 | /* | ||
565 | * Name: iucv_reply_array | ||
566 | * Purpose: This function responds to the two-way messages that you | ||
567 | * receive. You must identify completely the message to | ||
568 | * which you wish to reply. ie, pathid, msgid, and trgcls. | ||
569 | * The array identifies a list of addresses and lengths of | ||
570 | * discontiguous buffers that contains the reply data. | ||
571 | * Input: pathid - Path identification number | ||
572 | * msgid - Specifies the message ID. | ||
573 | * trgcls - Specifies target class. | ||
574 | * flags1 - Option for path. | ||
575 | * IPPRTY- 0x20, Specifies if you want to send priority message. | ||
576 | * buffer - Address of array of reply buffers. | ||
577 | * buflen - Total length of reply buffers. | ||
578 | * Output: residual_buffer - Address of buffer which IUCV is currently working on. | ||
579 | * residual_length - Contains one of the following values: | ||
580 | * If the answer buffer is the same length as the reply, this field | ||
581 | * contains zero. | ||
582 | * If the answer buffer is longer than the reply, this field contains | ||
583 | * the number of bytes remaining in the buffer. | ||
584 | * If the answer buffer is shorter than the reply, this field contains | ||
585 | * a residual count (that is, the number of bytes remianing in the | ||
586 | * reply that does not fit into the buffer. In this | ||
587 | * case b2f0_result = 5. | ||
588 | * Return: Return code from CP IUCV call. | ||
589 | * (-EINVAL) - Buffer address is NULL. | ||
590 | */ | ||
591 | int iucv_reply_array (u16 pathid, | ||
592 | u32 msgid, | ||
593 | u32 trgcls, | ||
594 | int flags1, | ||
595 | iucv_array_t * buffer, | ||
596 | ulong buflen, ulong * residual_address, | ||
597 | ulong * residual_length); | ||
598 | |||
599 | /* | ||
600 | * Name: iucv_reply_prmmsg | ||
601 | * Purpose: This function responds to the two-way messages that you | ||
602 | * receive. You must identify completely the message to | ||
603 | * which you wish to reply. ie, pathid, msgid, and trgcls. | ||
604 | * Prmmsg signifies the data is moved into the | ||
605 | * parameter list. | ||
606 | * Input: pathid - Path identification number. | ||
607 | * msgid - Specifies the message ID. | ||
608 | * trgcls - Specifies target class. | ||
609 | * flags1 - Option for path. | ||
610 | * IPPRTY- 0x20 Specifies if you want to send priority message. | ||
611 | * prmmsg - 8-bytes of data to be placed into the parameter. | ||
612 | * list. | ||
613 | * Output: NA | ||
614 | * Return: Return code from CP IUCV call. | ||
615 | */ | ||
616 | int iucv_reply_prmmsg (u16 pathid, | ||
617 | u32 msgid, u32 trgcls, int flags1, uchar prmmsg[8]); | ||
618 | |||
619 | /* | ||
620 | * Name: iucv_resume | ||
621 | * Purpose: This function restores communications over a quiesced path | ||
622 | * Input: pathid - Path identification number. | ||
623 | * user_data - 16-bytes of user data. | ||
624 | * Output: NA | ||
625 | * Return: Return code from CP IUCV call. | ||
626 | */ | ||
627 | int iucv_resume (u16 pathid, uchar user_data[16]); | ||
628 | |||
629 | /* | ||
630 | * Name: iucv_send | ||
631 | * Purpose: This function transmits data to another application. | ||
632 | * Data to be transmitted is in a buffer and this is a | ||
633 | * one-way message and the receiver will not reply to the | ||
634 | * message. | ||
635 | * Input: pathid - Path identification number. | ||
636 | * trgcls - Specifies target class. | ||
637 | * srccls - Specifies the source message class. | ||
638 | * msgtag - Specifies a tag to be associated with the message. | ||
639 | * flags1 - Option for path. | ||
640 | * IPPRTY- 0x20 Specifies if you want to send priority message. | ||
641 | * buffer - Address of send buffer. | ||
642 | * buflen - Length of send buffer. | ||
643 | * Output: msgid - Specifies the message ID. | ||
644 | * Return: Return code from CP IUCV call. | ||
645 | * (-EINVAL) - Buffer address is NULL. | ||
646 | */ | ||
647 | int iucv_send (u16 pathid, | ||
648 | u32 * msgid, | ||
649 | u32 trgcls, | ||
650 | u32 srccls, u32 msgtag, int flags1, void *buffer, ulong buflen); | ||
651 | |||
652 | /* | ||
653 | * Name: iucv_send_array | ||
654 | * Purpose: This function transmits data to another application. | ||
655 | * The contents of buffer is the address of the array of | ||
656 | * addresses and lengths of discontiguous buffers that hold | ||
657 | * the message text. This is a one-way message and the | ||
658 | * receiver will not reply to the message. | ||
659 | * Input: pathid - Path identification number. | ||
660 | * trgcls - Specifies target class. | ||
661 | * srccls - Specifies the source message class. | ||
662 | * msgtag - Specifies a tag to be associated witht the message. | ||
663 | * flags1 - Option for path. | ||
664 | * IPPRTY- specifies if you want to send priority message. | ||
665 | * buffer - Address of array of send buffers. | ||
666 | * buflen - Total length of send buffers. | ||
667 | * Output: msgid - Specifies the message ID. | ||
668 | * Return: Return code from CP IUCV call. | ||
669 | * (-EINVAL) - Buffer address is NULL. | ||
670 | */ | ||
671 | int iucv_send_array (u16 pathid, | ||
672 | u32 * msgid, | ||
673 | u32 trgcls, | ||
674 | u32 srccls, | ||
675 | u32 msgtag, | ||
676 | int flags1, iucv_array_t * buffer, ulong buflen); | ||
677 | |||
678 | /* | ||
679 | * Name: iucv_send_prmmsg | ||
680 | * Purpose: This function transmits data to another application. | ||
681 | * Prmmsg specifies that the 8-bytes of data are to be moved | ||
682 | * into the parameter list. This is a one-way message and the | ||
683 | * receiver will not reply to the message. | ||
684 | * Input: pathid - Path identification number. | ||
685 | * trgcls - Specifies target class. | ||
686 | * srccls - Specifies the source message class. | ||
687 | * msgtag - Specifies a tag to be associated with the message. | ||
688 | * flags1 - Option for path. | ||
689 | * IPPRTY- 0x20 specifies if you want to send priority message. | ||
690 | * prmmsg - 8-bytes of data to be placed into parameter list. | ||
691 | * Output: msgid - Specifies the message ID. | ||
692 | * Return: Return code from CP IUCV call. | ||
693 | */ | ||
694 | int iucv_send_prmmsg (u16 pathid, | ||
695 | u32 * msgid, | ||
696 | u32 trgcls, | ||
697 | u32 srccls, u32 msgtag, int flags1, uchar prmmsg[8]); | ||
698 | |||
699 | /* | ||
700 | * Name: iucv_send2way | ||
701 | * Purpose: This function transmits data to another application. | ||
702 | * Data to be transmitted is in a buffer. The receiver | ||
703 | * of the send is expected to reply to the message and | ||
704 | * a buffer is provided into which IUCV moves the reply | ||
705 | * to this message. | ||
706 | * Input: pathid - Path identification number. | ||
707 | * trgcls - Specifies target class. | ||
708 | * srccls - Specifies the source message class. | ||
709 | * msgtag - Specifies a tag associated with the message. | ||
710 | * flags1 - Option for path. | ||
711 | * IPPRTY- 0x20 Specifies if you want to send priority message. | ||
712 | * buffer - Address of send buffer. | ||
713 | * buflen - Length of send buffer. | ||
714 | * ansbuf - Address of buffer into which IUCV moves the reply of | ||
715 | * this message. | ||
716 | * anslen - Address of length of buffer. | ||
717 | * Output: msgid - Specifies the message ID. | ||
718 | * Return: Return code from CP IUCV call. | ||
719 | * (-EINVAL) - Buffer or ansbuf address is NULL. | ||
720 | */ | ||
721 | int iucv_send2way (u16 pathid, | ||
722 | u32 * msgid, | ||
723 | u32 trgcls, | ||
724 | u32 srccls, | ||
725 | u32 msgtag, | ||
726 | int flags1, | ||
727 | void *buffer, ulong buflen, void *ansbuf, ulong anslen); | ||
728 | |||
729 | /* | ||
730 | * Name: iucv_send2way_array | ||
731 | * Purpose: This function transmits data to another application. | ||
732 | * The contents of buffer is the address of the array of | ||
733 | * addresses and lengths of discontiguous buffers that hold | ||
734 | * the message text. The receiver of the send is expected to | ||
735 | * reply to the message and a buffer is provided into which | ||
736 | * IUCV moves the reply to this message. | ||
737 | * Input: pathid - Path identification number. | ||
738 | * trgcls - Specifies target class. | ||
739 | * srccls - Specifies the source message class. | ||
740 | * msgtag - Specifies a tag to be associated with the message. | ||
741 | * flags1 - Option for path. | ||
742 | * IPPRTY- 0x20 Specifies if you want to send priority message. | ||
743 | * buffer - Sddress of array of send buffers. | ||
744 | * buflen - Total length of send buffers. | ||
745 | * ansbuf - Address of array of buffer into which IUCV moves the reply | ||
746 | * of this message. | ||
747 | * anslen - Address of length reply buffers. | ||
748 | * Output: msgid - Specifies the message ID. | ||
749 | * Return: Return code from CP IUCV call. | ||
750 | * (-EINVAL) - Buffer address is NULL. | ||
751 | */ | ||
752 | int iucv_send2way_array (u16 pathid, | ||
753 | u32 * msgid, | ||
754 | u32 trgcls, | ||
755 | u32 srccls, | ||
756 | u32 msgtag, | ||
757 | int flags1, | ||
758 | iucv_array_t * buffer, | ||
759 | ulong buflen, iucv_array_t * ansbuf, ulong anslen); | ||
760 | |||
761 | /* | ||
762 | * Name: iucv_send2way_prmmsg | ||
763 | * Purpose: This function transmits data to another application. | ||
764 | * Prmmsg specifies that the 8-bytes of data are to be moved | ||
765 | * into the parameter list. This is a two-way message and the | ||
766 | * receiver of the message is expected to reply. A buffer | ||
767 | * is provided into which IUCV moves the reply to this | ||
768 | * message. | ||
769 | * Input: pathid - Rath identification number. | ||
770 | * trgcls - Specifies target class. | ||
771 | * srccls - Specifies the source message class. | ||
772 | * msgtag - Specifies a tag to be associated with the message. | ||
773 | * flags1 - Option for path. | ||
774 | * IPPRTY- 0x20 Specifies if you want to send priority message. | ||
775 | * prmmsg - 8-bytes of data to be placed in parameter list. | ||
776 | * ansbuf - Address of buffer into which IUCV moves the reply of | ||
777 | * this message. | ||
778 | * anslen - Address of length of buffer. | ||
779 | * Output: msgid - Specifies the message ID. | ||
780 | * Return: Return code from CP IUCV call. | ||
781 | * (-EINVAL) - Buffer address is NULL. | ||
782 | */ | ||
783 | int iucv_send2way_prmmsg (u16 pathid, | ||
784 | u32 * msgid, | ||
785 | u32 trgcls, | ||
786 | u32 srccls, | ||
787 | u32 msgtag, | ||
788 | ulong flags1, | ||
789 | uchar prmmsg[8], void *ansbuf, ulong anslen); | ||
790 | |||
791 | /* | ||
792 | * Name: iucv_send2way_prmmsg_array | ||
793 | * Purpose: This function transmits data to another application. | ||
794 | * Prmmsg specifies that the 8-bytes of data are to be moved | ||
795 | * into the parameter list. This is a two-way message and the | ||
796 | * receiver of the message is expected to reply. A buffer | ||
797 | * is provided into which IUCV moves the reply to this | ||
798 | * message. The contents of ansbuf is the address of the | ||
799 | * array of addresses and lengths of discontiguous buffers | ||
800 | * that contain the reply. | ||
801 | * Input: pathid - Path identification number. | ||
802 | * trgcls - Specifies target class. | ||
803 | * srccls - Specifies the source message class. | ||
804 | * msgtag - Specifies a tag to be associated with the message. | ||
805 | * flags1 - Option for path. | ||
806 | * IPPRTY- 0x20 specifies if you want to send priority message. | ||
807 | * prmmsg - 8-bytes of data to be placed into the parameter list. | ||
808 | * ansbuf - Address of array of buffer into which IUCV moves the reply | ||
809 | * of this message. | ||
810 | * anslen - Address of length of reply buffers. | ||
811 | * Output: msgid - Specifies the message ID. | ||
812 | * Return: Return code from CP IUCV call. | ||
813 | * (-EINVAL) - Ansbuf address is NULL. | ||
814 | */ | ||
815 | int iucv_send2way_prmmsg_array (u16 pathid, | ||
816 | u32 * msgid, | ||
817 | u32 trgcls, | ||
818 | u32 srccls, | ||
819 | u32 msgtag, | ||
820 | int flags1, | ||
821 | uchar prmmsg[8], | ||
822 | iucv_array_t * ansbuf, ulong anslen); | ||
823 | |||
824 | /* | ||
825 | * Name: iucv_setmask | ||
826 | * Purpose: This function enables or disables the following IUCV | ||
827 | * external interruptions: Nonpriority and priority message | ||
828 | * interrupts, nonpriority and priority reply interrupts. | ||
829 | * Input: SetMaskFlag - options for interrupts | ||
830 | * 0x80 - Nonpriority_MessagePendingInterruptsFlag | ||
831 | * 0x40 - Priority_MessagePendingInterruptsFlag | ||
832 | * 0x20 - Nonpriority_MessageCompletionInterruptsFlag | ||
833 | * 0x10 - Priority_MessageCompletionInterruptsFlag | ||
834 | * 0x08 - IUCVControlInterruptsFlag | ||
835 | * Output: NA | ||
836 | * Return: Return code from CP IUCV call. | ||
837 | */ | ||
838 | int iucv_setmask (int SetMaskFlag); | ||
839 | |||
840 | /* | ||
841 | * Name: iucv_sever | ||
842 | * Purpose: This function terminates an IUCV path. | ||
843 | * Input: pathid - Path identification number. | ||
844 | * user_data - 16-bytes of user data. | ||
845 | * Output: NA | ||
846 | * Return: Return code from CP IUCV call. | ||
847 | * (-EINVAL) - Interal error, wild pointer. | ||
848 | */ | ||
849 | int iucv_sever (u16 pathid, uchar user_data[16]); | ||
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c index e5665b6743a1..b97dd15bdb9a 100644 --- a/drivers/s390/net/lcs.c +++ b/drivers/s390/net/lcs.c | |||
@@ -828,7 +828,7 @@ lcs_notify_lancmd_waiters(struct lcs_card *card, struct lcs_cmd *cmd) | |||
828 | /** | 828 | /** |
829 | * Emit buffer of a lan comand. | 829 | * Emit buffer of a lan comand. |
830 | */ | 830 | */ |
831 | void | 831 | static void |
832 | lcs_lancmd_timeout(unsigned long data) | 832 | lcs_lancmd_timeout(unsigned long data) |
833 | { | 833 | { |
834 | struct lcs_reply *reply, *list_reply, *r; | 834 | struct lcs_reply *reply, *list_reply, *r; |
@@ -1360,7 +1360,7 @@ lcs_get_problem(struct ccw_device *cdev, struct irb *irb) | |||
1360 | return 0; | 1360 | return 0; |
1361 | } | 1361 | } |
1362 | 1362 | ||
1363 | void | 1363 | static void |
1364 | lcs_schedule_recovery(struct lcs_card *card) | 1364 | lcs_schedule_recovery(struct lcs_card *card) |
1365 | { | 1365 | { |
1366 | LCS_DBF_TEXT(2, trace, "startrec"); | 1366 | LCS_DBF_TEXT(2, trace, "startrec"); |
@@ -1990,7 +1990,7 @@ lcs_timeout_store (struct device *dev, struct device_attribute *attr, const char | |||
1990 | 1990 | ||
1991 | } | 1991 | } |
1992 | 1992 | ||
1993 | DEVICE_ATTR(lancmd_timeout, 0644, lcs_timeout_show, lcs_timeout_store); | 1993 | static DEVICE_ATTR(lancmd_timeout, 0644, lcs_timeout_show, lcs_timeout_store); |
1994 | 1994 | ||
1995 | static ssize_t | 1995 | static ssize_t |
1996 | lcs_dev_recover_store(struct device *dev, struct device_attribute *attr, | 1996 | lcs_dev_recover_store(struct device *dev, struct device_attribute *attr, |
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c index d7d1cc0a5c8e..6387b483f2bf 100644 --- a/drivers/s390/net/netiucv.c +++ b/drivers/s390/net/netiucv.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * IUCV network driver | 2 | * IUCV network driver |
3 | * | 3 | * |
4 | * Copyright (C) 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation | 4 | * Copyright 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation |
5 | * Author(s): Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com) | 5 | * Author(s): Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com) |
6 | * | 6 | * |
7 | * Sysfs integration and all bugs therein by Cornelia Huck | 7 | * Sysfs integration and all bugs therein by Cornelia Huck |
@@ -58,13 +58,94 @@ | |||
58 | #include <asm/io.h> | 58 | #include <asm/io.h> |
59 | #include <asm/uaccess.h> | 59 | #include <asm/uaccess.h> |
60 | 60 | ||
61 | #include "iucv.h" | 61 | #include <net/iucv/iucv.h> |
62 | #include "fsm.h" | 62 | #include "fsm.h" |
63 | 63 | ||
64 | MODULE_AUTHOR | 64 | MODULE_AUTHOR |
65 | ("(C) 2001 IBM Corporation by Fritz Elfert (felfert@millenux.com)"); | 65 | ("(C) 2001 IBM Corporation by Fritz Elfert (felfert@millenux.com)"); |
66 | MODULE_DESCRIPTION ("Linux for S/390 IUCV network driver"); | 66 | MODULE_DESCRIPTION ("Linux for S/390 IUCV network driver"); |
67 | 67 | ||
68 | /** | ||
69 | * Debug Facility stuff | ||
70 | */ | ||
71 | #define IUCV_DBF_SETUP_NAME "iucv_setup" | ||
72 | #define IUCV_DBF_SETUP_LEN 32 | ||
73 | #define IUCV_DBF_SETUP_PAGES 2 | ||
74 | #define IUCV_DBF_SETUP_NR_AREAS 1 | ||
75 | #define IUCV_DBF_SETUP_LEVEL 3 | ||
76 | |||
77 | #define IUCV_DBF_DATA_NAME "iucv_data" | ||
78 | #define IUCV_DBF_DATA_LEN 128 | ||
79 | #define IUCV_DBF_DATA_PAGES 2 | ||
80 | #define IUCV_DBF_DATA_NR_AREAS 1 | ||
81 | #define IUCV_DBF_DATA_LEVEL 2 | ||
82 | |||
83 | #define IUCV_DBF_TRACE_NAME "iucv_trace" | ||
84 | #define IUCV_DBF_TRACE_LEN 16 | ||
85 | #define IUCV_DBF_TRACE_PAGES 4 | ||
86 | #define IUCV_DBF_TRACE_NR_AREAS 1 | ||
87 | #define IUCV_DBF_TRACE_LEVEL 3 | ||
88 | |||
89 | #define IUCV_DBF_TEXT(name,level,text) \ | ||
90 | do { \ | ||
91 | debug_text_event(iucv_dbf_##name,level,text); \ | ||
92 | } while (0) | ||
93 | |||
94 | #define IUCV_DBF_HEX(name,level,addr,len) \ | ||
95 | do { \ | ||
96 | debug_event(iucv_dbf_##name,level,(void*)(addr),len); \ | ||
97 | } while (0) | ||
98 | |||
99 | DECLARE_PER_CPU(char[256], iucv_dbf_txt_buf); | ||
100 | |||
101 | #define IUCV_DBF_TEXT_(name,level,text...) \ | ||
102 | do { \ | ||
103 | char* iucv_dbf_txt_buf = get_cpu_var(iucv_dbf_txt_buf); \ | ||
104 | sprintf(iucv_dbf_txt_buf, text); \ | ||
105 | debug_text_event(iucv_dbf_##name,level,iucv_dbf_txt_buf); \ | ||
106 | put_cpu_var(iucv_dbf_txt_buf); \ | ||
107 | } while (0) | ||
108 | |||
109 | #define IUCV_DBF_SPRINTF(name,level,text...) \ | ||
110 | do { \ | ||
111 | debug_sprintf_event(iucv_dbf_trace, level, ##text ); \ | ||
112 | debug_sprintf_event(iucv_dbf_trace, level, text ); \ | ||
113 | } while (0) | ||
114 | |||
115 | /** | ||
116 | * some more debug stuff | ||
117 | */ | ||
118 | #define IUCV_HEXDUMP16(importance,header,ptr) \ | ||
119 | PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \ | ||
120 | "%02x %02x %02x %02x %02x %02x %02x %02x\n", \ | ||
121 | *(((char*)ptr)),*(((char*)ptr)+1),*(((char*)ptr)+2), \ | ||
122 | *(((char*)ptr)+3),*(((char*)ptr)+4),*(((char*)ptr)+5), \ | ||
123 | *(((char*)ptr)+6),*(((char*)ptr)+7),*(((char*)ptr)+8), \ | ||
124 | *(((char*)ptr)+9),*(((char*)ptr)+10),*(((char*)ptr)+11), \ | ||
125 | *(((char*)ptr)+12),*(((char*)ptr)+13), \ | ||
126 | *(((char*)ptr)+14),*(((char*)ptr)+15)); \ | ||
127 | PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \ | ||
128 | "%02x %02x %02x %02x %02x %02x %02x %02x\n", \ | ||
129 | *(((char*)ptr)+16),*(((char*)ptr)+17), \ | ||
130 | *(((char*)ptr)+18),*(((char*)ptr)+19), \ | ||
131 | *(((char*)ptr)+20),*(((char*)ptr)+21), \ | ||
132 | *(((char*)ptr)+22),*(((char*)ptr)+23), \ | ||
133 | *(((char*)ptr)+24),*(((char*)ptr)+25), \ | ||
134 | *(((char*)ptr)+26),*(((char*)ptr)+27), \ | ||
135 | *(((char*)ptr)+28),*(((char*)ptr)+29), \ | ||
136 | *(((char*)ptr)+30),*(((char*)ptr)+31)); | ||
137 | |||
138 | static inline void iucv_hex_dump(unsigned char *buf, size_t len) | ||
139 | { | ||
140 | size_t i; | ||
141 | |||
142 | for (i = 0; i < len; i++) { | ||
143 | if (i && !(i % 16)) | ||
144 | printk("\n"); | ||
145 | printk("%02x ", *(buf + i)); | ||
146 | } | ||
147 | printk("\n"); | ||
148 | } | ||
68 | 149 | ||
69 | #define PRINTK_HEADER " iucv: " /* for debugging */ | 150 | #define PRINTK_HEADER " iucv: " /* for debugging */ |
70 | 151 | ||
@@ -73,6 +154,25 @@ static struct device_driver netiucv_driver = { | |||
73 | .bus = &iucv_bus, | 154 | .bus = &iucv_bus, |
74 | }; | 155 | }; |
75 | 156 | ||
157 | static int netiucv_callback_connreq(struct iucv_path *, | ||
158 | u8 ipvmid[8], u8 ipuser[16]); | ||
159 | static void netiucv_callback_connack(struct iucv_path *, u8 ipuser[16]); | ||
160 | static void netiucv_callback_connrej(struct iucv_path *, u8 ipuser[16]); | ||
161 | static void netiucv_callback_connsusp(struct iucv_path *, u8 ipuser[16]); | ||
162 | static void netiucv_callback_connres(struct iucv_path *, u8 ipuser[16]); | ||
163 | static void netiucv_callback_rx(struct iucv_path *, struct iucv_message *); | ||
164 | static void netiucv_callback_txdone(struct iucv_path *, struct iucv_message *); | ||
165 | |||
166 | static struct iucv_handler netiucv_handler = { | ||
167 | .path_pending = netiucv_callback_connreq, | ||
168 | .path_complete = netiucv_callback_connack, | ||
169 | .path_severed = netiucv_callback_connrej, | ||
170 | .path_quiesced = netiucv_callback_connsusp, | ||
171 | .path_resumed = netiucv_callback_connres, | ||
172 | .message_pending = netiucv_callback_rx, | ||
173 | .message_complete = netiucv_callback_txdone | ||
174 | }; | ||
175 | |||
76 | /** | 176 | /** |
77 | * Per connection profiling data | 177 | * Per connection profiling data |
78 | */ | 178 | */ |
@@ -92,9 +192,8 @@ struct connection_profile { | |||
92 | * Representation of one iucv connection | 192 | * Representation of one iucv connection |
93 | */ | 193 | */ |
94 | struct iucv_connection { | 194 | struct iucv_connection { |
95 | struct iucv_connection *next; | 195 | struct list_head list; |
96 | iucv_handle_t handle; | 196 | struct iucv_path *path; |
97 | __u16 pathid; | ||
98 | struct sk_buff *rx_buff; | 197 | struct sk_buff *rx_buff; |
99 | struct sk_buff *tx_buff; | 198 | struct sk_buff *tx_buff; |
100 | struct sk_buff_head collect_queue; | 199 | struct sk_buff_head collect_queue; |
@@ -112,12 +211,9 @@ struct iucv_connection { | |||
112 | /** | 211 | /** |
113 | * Linked list of all connection structs. | 212 | * Linked list of all connection structs. |
114 | */ | 213 | */ |
115 | struct iucv_connection_struct { | 214 | static struct list_head iucv_connection_list = |
116 | struct iucv_connection *iucv_connections; | 215 | LIST_HEAD_INIT(iucv_connection_list); |
117 | rwlock_t iucv_rwlock; | 216 | static rwlock_t iucv_connection_rwlock = RW_LOCK_UNLOCKED; |
118 | }; | ||
119 | |||
120 | static struct iucv_connection_struct iucv_conns; | ||
121 | 217 | ||
122 | /** | 218 | /** |
123 | * Representation of event-data for the | 219 | * Representation of event-data for the |
@@ -142,11 +238,11 @@ struct netiucv_priv { | |||
142 | /** | 238 | /** |
143 | * Link level header for a packet. | 239 | * Link level header for a packet. |
144 | */ | 240 | */ |
145 | typedef struct ll_header_t { | 241 | struct ll_header { |
146 | __u16 next; | 242 | u16 next; |
147 | } ll_header; | 243 | }; |
148 | 244 | ||
149 | #define NETIUCV_HDRLEN (sizeof(ll_header)) | 245 | #define NETIUCV_HDRLEN (sizeof(struct ll_header)) |
150 | #define NETIUCV_BUFSIZE_MAX 32768 | 246 | #define NETIUCV_BUFSIZE_MAX 32768 |
151 | #define NETIUCV_BUFSIZE_DEFAULT NETIUCV_BUFSIZE_MAX | 247 | #define NETIUCV_BUFSIZE_DEFAULT NETIUCV_BUFSIZE_MAX |
152 | #define NETIUCV_MTU_MAX (NETIUCV_BUFSIZE_MAX - NETIUCV_HDRLEN) | 248 | #define NETIUCV_MTU_MAX (NETIUCV_BUFSIZE_MAX - NETIUCV_HDRLEN) |
@@ -158,36 +254,26 @@ typedef struct ll_header_t { | |||
158 | * Compatibility macros for busy handling | 254 | * Compatibility macros for busy handling |
159 | * of network devices. | 255 | * of network devices. |
160 | */ | 256 | */ |
161 | static __inline__ void netiucv_clear_busy(struct net_device *dev) | 257 | static inline void netiucv_clear_busy(struct net_device *dev) |
162 | { | 258 | { |
163 | clear_bit(0, &(((struct netiucv_priv *)dev->priv)->tbusy)); | 259 | struct netiucv_priv *priv = netdev_priv(dev); |
260 | clear_bit(0, &priv->tbusy); | ||
164 | netif_wake_queue(dev); | 261 | netif_wake_queue(dev); |
165 | } | 262 | } |
166 | 263 | ||
167 | static __inline__ int netiucv_test_and_set_busy(struct net_device *dev) | 264 | static inline int netiucv_test_and_set_busy(struct net_device *dev) |
168 | { | 265 | { |
266 | struct netiucv_priv *priv = netdev_priv(dev); | ||
169 | netif_stop_queue(dev); | 267 | netif_stop_queue(dev); |
170 | return test_and_set_bit(0, &((struct netiucv_priv *)dev->priv)->tbusy); | 268 | return test_and_set_bit(0, &priv->tbusy); |
171 | } | 269 | } |
172 | 270 | ||
173 | static __u8 iucv_host[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; | 271 | static u8 iucvMagic[16] = { |
174 | static __u8 iucvMagic[16] = { | ||
175 | 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, | 272 | 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, |
176 | 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40 | 273 | 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40 |
177 | }; | 274 | }; |
178 | 275 | ||
179 | /** | 276 | /** |
180 | * This mask means the 16-byte IUCV "magic" and the origin userid must | ||
181 | * match exactly as specified in order to give connection_pending() | ||
182 | * control. | ||
183 | */ | ||
184 | static __u8 netiucv_mask[] = { | ||
185 | 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, | ||
186 | 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, | ||
187 | 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff | ||
188 | }; | ||
189 | |||
190 | /** | ||
191 | * Convert an iucv userId to its printable | 277 | * Convert an iucv userId to its printable |
192 | * form (strip whitespace at end). | 278 | * form (strip whitespace at end). |
193 | * | 279 | * |
@@ -195,8 +281,7 @@ static __u8 netiucv_mask[] = { | |||
195 | * | 281 | * |
196 | * @returns The printable string (static data!!) | 282 | * @returns The printable string (static data!!) |
197 | */ | 283 | */ |
198 | static __inline__ char * | 284 | static inline char *netiucv_printname(char *name) |
199 | netiucv_printname(char *name) | ||
200 | { | 285 | { |
201 | static char tmp[9]; | 286 | static char tmp[9]; |
202 | char *p = tmp; | 287 | char *p = tmp; |
@@ -379,8 +464,7 @@ static debug_info_t *iucv_dbf_trace = NULL; | |||
379 | 464 | ||
380 | DEFINE_PER_CPU(char[256], iucv_dbf_txt_buf); | 465 | DEFINE_PER_CPU(char[256], iucv_dbf_txt_buf); |
381 | 466 | ||
382 | static void | 467 | static void iucv_unregister_dbf_views(void) |
383 | iucv_unregister_dbf_views(void) | ||
384 | { | 468 | { |
385 | if (iucv_dbf_setup) | 469 | if (iucv_dbf_setup) |
386 | debug_unregister(iucv_dbf_setup); | 470 | debug_unregister(iucv_dbf_setup); |
@@ -389,8 +473,7 @@ iucv_unregister_dbf_views(void) | |||
389 | if (iucv_dbf_trace) | 473 | if (iucv_dbf_trace) |
390 | debug_unregister(iucv_dbf_trace); | 474 | debug_unregister(iucv_dbf_trace); |
391 | } | 475 | } |
392 | static int | 476 | static int iucv_register_dbf_views(void) |
393 | iucv_register_dbf_views(void) | ||
394 | { | 477 | { |
395 | iucv_dbf_setup = debug_register(IUCV_DBF_SETUP_NAME, | 478 | iucv_dbf_setup = debug_register(IUCV_DBF_SETUP_NAME, |
396 | IUCV_DBF_SETUP_PAGES, | 479 | IUCV_DBF_SETUP_PAGES, |
@@ -422,125 +505,111 @@ iucv_register_dbf_views(void) | |||
422 | return 0; | 505 | return 0; |
423 | } | 506 | } |
424 | 507 | ||
425 | /** | 508 | /* |
426 | * Callback-wrappers, called from lowlevel iucv layer. | 509 | * Callback-wrappers, called from lowlevel iucv layer. |
427 | *****************************************************************************/ | 510 | */ |
428 | 511 | ||
429 | static void | 512 | static void netiucv_callback_rx(struct iucv_path *path, |
430 | netiucv_callback_rx(iucv_MessagePending *eib, void *pgm_data) | 513 | struct iucv_message *msg) |
431 | { | 514 | { |
432 | struct iucv_connection *conn = (struct iucv_connection *)pgm_data; | 515 | struct iucv_connection *conn = path->private; |
433 | struct iucv_event ev; | 516 | struct iucv_event ev; |
434 | 517 | ||
435 | ev.conn = conn; | 518 | ev.conn = conn; |
436 | ev.data = (void *)eib; | 519 | ev.data = msg; |
437 | |||
438 | fsm_event(conn->fsm, CONN_EVENT_RX, &ev); | 520 | fsm_event(conn->fsm, CONN_EVENT_RX, &ev); |
439 | } | 521 | } |
440 | 522 | ||
441 | static void | 523 | static void netiucv_callback_txdone(struct iucv_path *path, |
442 | netiucv_callback_txdone(iucv_MessageComplete *eib, void *pgm_data) | 524 | struct iucv_message *msg) |
443 | { | 525 | { |
444 | struct iucv_connection *conn = (struct iucv_connection *)pgm_data; | 526 | struct iucv_connection *conn = path->private; |
445 | struct iucv_event ev; | 527 | struct iucv_event ev; |
446 | 528 | ||
447 | ev.conn = conn; | 529 | ev.conn = conn; |
448 | ev.data = (void *)eib; | 530 | ev.data = msg; |
449 | fsm_event(conn->fsm, CONN_EVENT_TXDONE, &ev); | 531 | fsm_event(conn->fsm, CONN_EVENT_TXDONE, &ev); |
450 | } | 532 | } |
451 | 533 | ||
452 | static void | 534 | static void netiucv_callback_connack(struct iucv_path *path, u8 ipuser[16]) |
453 | netiucv_callback_connack(iucv_ConnectionComplete *eib, void *pgm_data) | ||
454 | { | 535 | { |
455 | struct iucv_connection *conn = (struct iucv_connection *)pgm_data; | 536 | struct iucv_connection *conn = path->private; |
456 | struct iucv_event ev; | ||
457 | 537 | ||
458 | ev.conn = conn; | 538 | fsm_event(conn->fsm, CONN_EVENT_CONN_ACK, conn); |
459 | ev.data = (void *)eib; | ||
460 | fsm_event(conn->fsm, CONN_EVENT_CONN_ACK, &ev); | ||
461 | } | 539 | } |
462 | 540 | ||
463 | static void | 541 | static int netiucv_callback_connreq(struct iucv_path *path, |
464 | netiucv_callback_connreq(iucv_ConnectionPending *eib, void *pgm_data) | 542 | u8 ipvmid[8], u8 ipuser[16]) |
465 | { | 543 | { |
466 | struct iucv_connection *conn = (struct iucv_connection *)pgm_data; | 544 | struct iucv_connection *conn = path->private; |
467 | struct iucv_event ev; | 545 | struct iucv_event ev; |
546 | int rc; | ||
468 | 547 | ||
469 | ev.conn = conn; | 548 | if (memcmp(iucvMagic, ipuser, sizeof(ipuser))) |
470 | ev.data = (void *)eib; | 549 | /* ipuser must match iucvMagic. */ |
471 | fsm_event(conn->fsm, CONN_EVENT_CONN_REQ, &ev); | 550 | return -EINVAL; |
551 | rc = -EINVAL; | ||
552 | read_lock_bh(&iucv_connection_rwlock); | ||
553 | list_for_each_entry(conn, &iucv_connection_list, list) { | ||
554 | if (strncmp(ipvmid, conn->userid, 8)) | ||
555 | continue; | ||
556 | /* Found a matching connection for this path. */ | ||
557 | conn->path = path; | ||
558 | ev.conn = conn; | ||
559 | ev.data = path; | ||
560 | fsm_event(conn->fsm, CONN_EVENT_CONN_REQ, &ev); | ||
561 | rc = 0; | ||
562 | } | ||
563 | read_unlock_bh(&iucv_connection_rwlock); | ||
564 | return rc; | ||
472 | } | 565 | } |
473 | 566 | ||
474 | static void | 567 | static void netiucv_callback_connrej(struct iucv_path *path, u8 ipuser[16]) |
475 | netiucv_callback_connrej(iucv_ConnectionSevered *eib, void *pgm_data) | ||
476 | { | 568 | { |
477 | struct iucv_connection *conn = (struct iucv_connection *)pgm_data; | 569 | struct iucv_connection *conn = path->private; |
478 | struct iucv_event ev; | ||
479 | 570 | ||
480 | ev.conn = conn; | 571 | fsm_event(conn->fsm, CONN_EVENT_CONN_REJ, conn); |
481 | ev.data = (void *)eib; | ||
482 | fsm_event(conn->fsm, CONN_EVENT_CONN_REJ, &ev); | ||
483 | } | 572 | } |
484 | 573 | ||
485 | static void | 574 | static void netiucv_callback_connsusp(struct iucv_path *path, u8 ipuser[16]) |
486 | netiucv_callback_connsusp(iucv_ConnectionQuiesced *eib, void *pgm_data) | ||
487 | { | 575 | { |
488 | struct iucv_connection *conn = (struct iucv_connection *)pgm_data; | 576 | struct iucv_connection *conn = path->private; |
489 | struct iucv_event ev; | ||
490 | 577 | ||
491 | ev.conn = conn; | 578 | fsm_event(conn->fsm, CONN_EVENT_CONN_SUS, conn); |
492 | ev.data = (void *)eib; | ||
493 | fsm_event(conn->fsm, CONN_EVENT_CONN_SUS, &ev); | ||
494 | } | 579 | } |
495 | 580 | ||
496 | static void | 581 | static void netiucv_callback_connres(struct iucv_path *path, u8 ipuser[16]) |
497 | netiucv_callback_connres(iucv_ConnectionResumed *eib, void *pgm_data) | ||
498 | { | 582 | { |
499 | struct iucv_connection *conn = (struct iucv_connection *)pgm_data; | 583 | struct iucv_connection *conn = path->private; |
500 | struct iucv_event ev; | ||
501 | 584 | ||
502 | ev.conn = conn; | 585 | fsm_event(conn->fsm, CONN_EVENT_CONN_RES, conn); |
503 | ev.data = (void *)eib; | 586 | } |
504 | fsm_event(conn->fsm, CONN_EVENT_CONN_RES, &ev); | ||
505 | } | ||
506 | |||
507 | static iucv_interrupt_ops_t netiucv_ops = { | ||
508 | .ConnectionPending = netiucv_callback_connreq, | ||
509 | .ConnectionComplete = netiucv_callback_connack, | ||
510 | .ConnectionSevered = netiucv_callback_connrej, | ||
511 | .ConnectionQuiesced = netiucv_callback_connsusp, | ||
512 | .ConnectionResumed = netiucv_callback_connres, | ||
513 | .MessagePending = netiucv_callback_rx, | ||
514 | .MessageComplete = netiucv_callback_txdone | ||
515 | }; | ||
516 | 587 | ||
517 | /** | 588 | /** |
518 | * Dummy NOP action for all statemachines | 589 | * Dummy NOP action for all statemachines |
519 | */ | 590 | */ |
520 | static void | 591 | static void fsm_action_nop(fsm_instance *fi, int event, void *arg) |
521 | fsm_action_nop(fsm_instance *fi, int event, void *arg) | ||
522 | { | 592 | { |
523 | } | 593 | } |
524 | 594 | ||
525 | /** | 595 | /* |
526 | * Actions of the connection statemachine | 596 | * Actions of the connection statemachine |
527 | *****************************************************************************/ | 597 | */ |
528 | 598 | ||
529 | /** | 599 | /** |
530 | * Helper function for conn_action_rx() | 600 | * netiucv_unpack_skb |
531 | * Unpack a just received skb and hand it over to | 601 | * @conn: The connection where this skb has been received. |
532 | * upper layers. | 602 | * @pskb: The received skb. |
533 | * | 603 | * |
534 | * @param conn The connection where this skb has been received. | 604 | * Unpack a just received skb and hand it over to upper layers. |
535 | * @param pskb The received skb. | 605 | * Helper function for conn_action_rx. |
536 | */ | 606 | */ |
537 | //static __inline__ void | 607 | static void netiucv_unpack_skb(struct iucv_connection *conn, |
538 | static void | 608 | struct sk_buff *pskb) |
539 | netiucv_unpack_skb(struct iucv_connection *conn, struct sk_buff *pskb) | ||
540 | { | 609 | { |
541 | struct net_device *dev = conn->netdev; | 610 | struct net_device *dev = conn->netdev; |
542 | struct netiucv_priv *privptr = dev->priv; | 611 | struct netiucv_priv *privptr = netdev_priv(dev); |
543 | __u16 offset = 0; | 612 | u16 offset = 0; |
544 | 613 | ||
545 | skb_put(pskb, NETIUCV_HDRLEN); | 614 | skb_put(pskb, NETIUCV_HDRLEN); |
546 | pskb->dev = dev; | 615 | pskb->dev = dev; |
@@ -549,7 +618,7 @@ netiucv_unpack_skb(struct iucv_connection *conn, struct sk_buff *pskb) | |||
549 | 618 | ||
550 | while (1) { | 619 | while (1) { |
551 | struct sk_buff *skb; | 620 | struct sk_buff *skb; |
552 | ll_header *header = (ll_header *)pskb->data; | 621 | struct ll_header *header = (struct ll_header *) pskb->data; |
553 | 622 | ||
554 | if (!header->next) | 623 | if (!header->next) |
555 | break; | 624 | break; |
@@ -595,40 +664,37 @@ netiucv_unpack_skb(struct iucv_connection *conn, struct sk_buff *pskb) | |||
595 | } | 664 | } |
596 | } | 665 | } |
597 | 666 | ||
598 | static void | 667 | static void conn_action_rx(fsm_instance *fi, int event, void *arg) |
599 | conn_action_rx(fsm_instance *fi, int event, void *arg) | ||
600 | { | 668 | { |
601 | struct iucv_event *ev = (struct iucv_event *)arg; | 669 | struct iucv_event *ev = arg; |
602 | struct iucv_connection *conn = ev->conn; | 670 | struct iucv_connection *conn = ev->conn; |
603 | iucv_MessagePending *eib = (iucv_MessagePending *)ev->data; | 671 | struct iucv_message *msg = ev->data; |
604 | struct netiucv_priv *privptr =(struct netiucv_priv *)conn->netdev->priv; | 672 | struct netiucv_priv *privptr = netdev_priv(conn->netdev); |
605 | |||
606 | __u32 msglen = eib->ln1msg2.ipbfln1f; | ||
607 | int rc; | 673 | int rc; |
608 | 674 | ||
609 | IUCV_DBF_TEXT(trace, 4, __FUNCTION__); | 675 | IUCV_DBF_TEXT(trace, 4, __FUNCTION__); |
610 | 676 | ||
611 | if (!conn->netdev) { | 677 | if (!conn->netdev) { |
612 | /* FRITZ: How to tell iucv LL to drop the msg? */ | 678 | iucv_message_reject(conn->path, msg); |
613 | PRINT_WARN("Received data for unlinked connection\n"); | 679 | PRINT_WARN("Received data for unlinked connection\n"); |
614 | IUCV_DBF_TEXT(data, 2, | 680 | IUCV_DBF_TEXT(data, 2, |
615 | "Received data for unlinked connection\n"); | 681 | "Received data for unlinked connection\n"); |
616 | return; | 682 | return; |
617 | } | 683 | } |
618 | if (msglen > conn->max_buffsize) { | 684 | if (msg->length > conn->max_buffsize) { |
619 | /* FRITZ: How to tell iucv LL to drop the msg? */ | 685 | iucv_message_reject(conn->path, msg); |
620 | privptr->stats.rx_dropped++; | 686 | privptr->stats.rx_dropped++; |
621 | PRINT_WARN("msglen %d > max_buffsize %d\n", | 687 | PRINT_WARN("msglen %d > max_buffsize %d\n", |
622 | msglen, conn->max_buffsize); | 688 | msg->length, conn->max_buffsize); |
623 | IUCV_DBF_TEXT_(data, 2, "msglen %d > max_buffsize %d\n", | 689 | IUCV_DBF_TEXT_(data, 2, "msglen %d > max_buffsize %d\n", |
624 | msglen, conn->max_buffsize); | 690 | msg->length, conn->max_buffsize); |
625 | return; | 691 | return; |
626 | } | 692 | } |
627 | conn->rx_buff->data = conn->rx_buff->tail = conn->rx_buff->head; | 693 | conn->rx_buff->data = conn->rx_buff->tail = conn->rx_buff->head; |
628 | conn->rx_buff->len = 0; | 694 | conn->rx_buff->len = 0; |
629 | rc = iucv_receive(conn->pathid, eib->ipmsgid, eib->iptrgcls, | 695 | rc = iucv_message_receive(conn->path, msg, 0, conn->rx_buff->data, |
630 | conn->rx_buff->data, msglen, NULL, NULL, NULL); | 696 | msg->length, NULL); |
631 | if (rc || msglen < 5) { | 697 | if (rc || msg->length < 5) { |
632 | privptr->stats.rx_errors++; | 698 | privptr->stats.rx_errors++; |
633 | PRINT_WARN("iucv_receive returned %08x\n", rc); | 699 | PRINT_WARN("iucv_receive returned %08x\n", rc); |
634 | IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_receive\n", rc); | 700 | IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_receive\n", rc); |
@@ -637,26 +703,26 @@ conn_action_rx(fsm_instance *fi, int event, void *arg) | |||
637 | netiucv_unpack_skb(conn, conn->rx_buff); | 703 | netiucv_unpack_skb(conn, conn->rx_buff); |
638 | } | 704 | } |
639 | 705 | ||
640 | static void | 706 | static void conn_action_txdone(fsm_instance *fi, int event, void *arg) |
641 | conn_action_txdone(fsm_instance *fi, int event, void *arg) | ||
642 | { | 707 | { |
643 | struct iucv_event *ev = (struct iucv_event *)arg; | 708 | struct iucv_event *ev = arg; |
644 | struct iucv_connection *conn = ev->conn; | 709 | struct iucv_connection *conn = ev->conn; |
645 | iucv_MessageComplete *eib = (iucv_MessageComplete *)ev->data; | 710 | struct iucv_message *msg = ev->data; |
711 | struct iucv_message txmsg; | ||
646 | struct netiucv_priv *privptr = NULL; | 712 | struct netiucv_priv *privptr = NULL; |
647 | /* Shut up, gcc! skb is always below 2G. */ | 713 | u32 single_flag = msg->tag; |
648 | __u32 single_flag = eib->ipmsgtag; | 714 | u32 txbytes = 0; |
649 | __u32 txbytes = 0; | 715 | u32 txpackets = 0; |
650 | __u32 txpackets = 0; | 716 | u32 stat_maxcq = 0; |
651 | __u32 stat_maxcq = 0; | ||
652 | struct sk_buff *skb; | 717 | struct sk_buff *skb; |
653 | unsigned long saveflags; | 718 | unsigned long saveflags; |
654 | ll_header header; | 719 | struct ll_header header; |
720 | int rc; | ||
655 | 721 | ||
656 | IUCV_DBF_TEXT(trace, 4, __FUNCTION__); | 722 | IUCV_DBF_TEXT(trace, 4, __FUNCTION__); |
657 | 723 | ||
658 | if (conn && conn->netdev && conn->netdev->priv) | 724 | if (conn && conn->netdev) |
659 | privptr = (struct netiucv_priv *)conn->netdev->priv; | 725 | privptr = netdev_priv(conn->netdev); |
660 | conn->prof.tx_pending--; | 726 | conn->prof.tx_pending--; |
661 | if (single_flag) { | 727 | if (single_flag) { |
662 | if ((skb = skb_dequeue(&conn->commit_queue))) { | 728 | if ((skb = skb_dequeue(&conn->commit_queue))) { |
@@ -688,56 +754,55 @@ conn_action_txdone(fsm_instance *fi, int event, void *arg) | |||
688 | conn->prof.maxmulti = conn->collect_len; | 754 | conn->prof.maxmulti = conn->collect_len; |
689 | conn->collect_len = 0; | 755 | conn->collect_len = 0; |
690 | spin_unlock_irqrestore(&conn->collect_lock, saveflags); | 756 | spin_unlock_irqrestore(&conn->collect_lock, saveflags); |
691 | if (conn->tx_buff->len) { | 757 | if (conn->tx_buff->len == 0) { |
692 | int rc; | 758 | fsm_newstate(fi, CONN_STATE_IDLE); |
693 | 759 | return; | |
694 | header.next = 0; | 760 | } |
695 | memcpy(skb_put(conn->tx_buff, NETIUCV_HDRLEN), &header, | ||
696 | NETIUCV_HDRLEN); | ||
697 | 761 | ||
698 | conn->prof.send_stamp = xtime; | 762 | header.next = 0; |
699 | rc = iucv_send(conn->pathid, NULL, 0, 0, 0, 0, | 763 | memcpy(skb_put(conn->tx_buff, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN); |
764 | conn->prof.send_stamp = xtime; | ||
765 | txmsg.class = 0; | ||
766 | txmsg.tag = 0; | ||
767 | rc = iucv_message_send(conn->path, &txmsg, 0, 0, | ||
700 | conn->tx_buff->data, conn->tx_buff->len); | 768 | conn->tx_buff->data, conn->tx_buff->len); |
701 | conn->prof.doios_multi++; | 769 | conn->prof.doios_multi++; |
702 | conn->prof.txlen += conn->tx_buff->len; | 770 | conn->prof.txlen += conn->tx_buff->len; |
703 | conn->prof.tx_pending++; | 771 | conn->prof.tx_pending++; |
704 | if (conn->prof.tx_pending > conn->prof.tx_max_pending) | 772 | if (conn->prof.tx_pending > conn->prof.tx_max_pending) |
705 | conn->prof.tx_max_pending = conn->prof.tx_pending; | 773 | conn->prof.tx_max_pending = conn->prof.tx_pending; |
706 | if (rc) { | 774 | if (rc) { |
707 | conn->prof.tx_pending--; | 775 | conn->prof.tx_pending--; |
708 | fsm_newstate(fi, CONN_STATE_IDLE); | ||
709 | if (privptr) | ||
710 | privptr->stats.tx_errors += txpackets; | ||
711 | PRINT_WARN("iucv_send returned %08x\n", rc); | ||
712 | IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc); | ||
713 | } else { | ||
714 | if (privptr) { | ||
715 | privptr->stats.tx_packets += txpackets; | ||
716 | privptr->stats.tx_bytes += txbytes; | ||
717 | } | ||
718 | if (stat_maxcq > conn->prof.maxcqueue) | ||
719 | conn->prof.maxcqueue = stat_maxcq; | ||
720 | } | ||
721 | } else | ||
722 | fsm_newstate(fi, CONN_STATE_IDLE); | 776 | fsm_newstate(fi, CONN_STATE_IDLE); |
777 | if (privptr) | ||
778 | privptr->stats.tx_errors += txpackets; | ||
779 | PRINT_WARN("iucv_send returned %08x\n", rc); | ||
780 | IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc); | ||
781 | } else { | ||
782 | if (privptr) { | ||
783 | privptr->stats.tx_packets += txpackets; | ||
784 | privptr->stats.tx_bytes += txbytes; | ||
785 | } | ||
786 | if (stat_maxcq > conn->prof.maxcqueue) | ||
787 | conn->prof.maxcqueue = stat_maxcq; | ||
788 | } | ||
723 | } | 789 | } |
724 | 790 | ||
725 | static void | 791 | static void conn_action_connaccept(fsm_instance *fi, int event, void *arg) |
726 | conn_action_connaccept(fsm_instance *fi, int event, void *arg) | ||
727 | { | 792 | { |
728 | struct iucv_event *ev = (struct iucv_event *)arg; | 793 | struct iucv_event *ev = arg; |
729 | struct iucv_connection *conn = ev->conn; | 794 | struct iucv_connection *conn = ev->conn; |
730 | iucv_ConnectionPending *eib = (iucv_ConnectionPending *)ev->data; | 795 | struct iucv_path *path = ev->data; |
731 | struct net_device *netdev = conn->netdev; | 796 | struct net_device *netdev = conn->netdev; |
732 | struct netiucv_priv *privptr = (struct netiucv_priv *)netdev->priv; | 797 | struct netiucv_priv *privptr = netdev_priv(netdev); |
733 | int rc; | 798 | int rc; |
734 | __u16 msglimit; | ||
735 | __u8 udata[16]; | ||
736 | 799 | ||
737 | IUCV_DBF_TEXT(trace, 3, __FUNCTION__); | 800 | IUCV_DBF_TEXT(trace, 3, __FUNCTION__); |
738 | 801 | ||
739 | rc = iucv_accept(eib->ippathid, NETIUCV_QUEUELEN_DEFAULT, udata, 0, | 802 | conn->path = path; |
740 | conn->handle, conn, NULL, &msglimit); | 803 | path->msglim = NETIUCV_QUEUELEN_DEFAULT; |
804 | path->flags = 0; | ||
805 | rc = iucv_path_accept(path, &netiucv_handler, NULL, conn); | ||
741 | if (rc) { | 806 | if (rc) { |
742 | PRINT_WARN("%s: IUCV accept failed with error %d\n", | 807 | PRINT_WARN("%s: IUCV accept failed with error %d\n", |
743 | netdev->name, rc); | 808 | netdev->name, rc); |
@@ -745,183 +810,126 @@ conn_action_connaccept(fsm_instance *fi, int event, void *arg) | |||
745 | return; | 810 | return; |
746 | } | 811 | } |
747 | fsm_newstate(fi, CONN_STATE_IDLE); | 812 | fsm_newstate(fi, CONN_STATE_IDLE); |
748 | conn->pathid = eib->ippathid; | 813 | netdev->tx_queue_len = conn->path->msglim; |
749 | netdev->tx_queue_len = msglimit; | ||
750 | fsm_event(privptr->fsm, DEV_EVENT_CONUP, netdev); | 814 | fsm_event(privptr->fsm, DEV_EVENT_CONUP, netdev); |
751 | } | 815 | } |
752 | 816 | ||
753 | static void | 817 | static void conn_action_connreject(fsm_instance *fi, int event, void *arg) |
754 | conn_action_connreject(fsm_instance *fi, int event, void *arg) | ||
755 | { | 818 | { |
756 | struct iucv_event *ev = (struct iucv_event *)arg; | 819 | struct iucv_event *ev = arg; |
757 | struct iucv_connection *conn = ev->conn; | 820 | struct iucv_path *path = ev->data; |
758 | struct net_device *netdev = conn->netdev; | ||
759 | iucv_ConnectionPending *eib = (iucv_ConnectionPending *)ev->data; | ||
760 | __u8 udata[16]; | ||
761 | 821 | ||
762 | IUCV_DBF_TEXT(trace, 3, __FUNCTION__); | 822 | IUCV_DBF_TEXT(trace, 3, __FUNCTION__); |
763 | 823 | iucv_path_sever(path, NULL); | |
764 | iucv_sever(eib->ippathid, udata); | ||
765 | if (eib->ippathid != conn->pathid) { | ||
766 | PRINT_INFO("%s: IR Connection Pending; " | ||
767 | "pathid %d does not match original pathid %d\n", | ||
768 | netdev->name, eib->ippathid, conn->pathid); | ||
769 | IUCV_DBF_TEXT_(data, 2, | ||
770 | "connreject: IR pathid %d, conn. pathid %d\n", | ||
771 | eib->ippathid, conn->pathid); | ||
772 | iucv_sever(conn->pathid, udata); | ||
773 | } | ||
774 | } | 824 | } |
775 | 825 | ||
776 | static void | 826 | static void conn_action_connack(fsm_instance *fi, int event, void *arg) |
777 | conn_action_connack(fsm_instance *fi, int event, void *arg) | ||
778 | { | 827 | { |
779 | struct iucv_event *ev = (struct iucv_event *)arg; | 828 | struct iucv_connection *conn = arg; |
780 | struct iucv_connection *conn = ev->conn; | ||
781 | iucv_ConnectionComplete *eib = (iucv_ConnectionComplete *)ev->data; | ||
782 | struct net_device *netdev = conn->netdev; | 829 | struct net_device *netdev = conn->netdev; |
783 | struct netiucv_priv *privptr = (struct netiucv_priv *)netdev->priv; | 830 | struct netiucv_priv *privptr = netdev_priv(netdev); |
784 | 831 | ||
785 | IUCV_DBF_TEXT(trace, 3, __FUNCTION__); | 832 | IUCV_DBF_TEXT(trace, 3, __FUNCTION__); |
786 | |||
787 | fsm_deltimer(&conn->timer); | 833 | fsm_deltimer(&conn->timer); |
788 | fsm_newstate(fi, CONN_STATE_IDLE); | 834 | fsm_newstate(fi, CONN_STATE_IDLE); |
789 | if (eib->ippathid != conn->pathid) { | 835 | netdev->tx_queue_len = conn->path->msglim; |
790 | PRINT_INFO("%s: IR Connection Complete; " | ||
791 | "pathid %d does not match original pathid %d\n", | ||
792 | netdev->name, eib->ippathid, conn->pathid); | ||
793 | IUCV_DBF_TEXT_(data, 2, | ||
794 | "connack: IR pathid %d, conn. pathid %d\n", | ||
795 | eib->ippathid, conn->pathid); | ||
796 | conn->pathid = eib->ippathid; | ||
797 | } | ||
798 | netdev->tx_queue_len = eib->ipmsglim; | ||
799 | fsm_event(privptr->fsm, DEV_EVENT_CONUP, netdev); | 836 | fsm_event(privptr->fsm, DEV_EVENT_CONUP, netdev); |
800 | } | 837 | } |
801 | 838 | ||
802 | static void | 839 | static void conn_action_conntimsev(fsm_instance *fi, int event, void *arg) |
803 | conn_action_conntimsev(fsm_instance *fi, int event, void *arg) | ||
804 | { | 840 | { |
805 | struct iucv_connection *conn = (struct iucv_connection *)arg; | 841 | struct iucv_connection *conn = arg; |
806 | __u8 udata[16]; | ||
807 | 842 | ||
808 | IUCV_DBF_TEXT(trace, 3, __FUNCTION__); | 843 | IUCV_DBF_TEXT(trace, 3, __FUNCTION__); |
809 | |||
810 | fsm_deltimer(&conn->timer); | 844 | fsm_deltimer(&conn->timer); |
811 | iucv_sever(conn->pathid, udata); | 845 | iucv_path_sever(conn->path, NULL); |
812 | fsm_newstate(fi, CONN_STATE_STARTWAIT); | 846 | fsm_newstate(fi, CONN_STATE_STARTWAIT); |
813 | } | 847 | } |
814 | 848 | ||
815 | static void | 849 | static void conn_action_connsever(fsm_instance *fi, int event, void *arg) |
816 | conn_action_connsever(fsm_instance *fi, int event, void *arg) | ||
817 | { | 850 | { |
818 | struct iucv_event *ev = (struct iucv_event *)arg; | 851 | struct iucv_connection *conn = arg; |
819 | struct iucv_connection *conn = ev->conn; | ||
820 | struct net_device *netdev = conn->netdev; | 852 | struct net_device *netdev = conn->netdev; |
821 | struct netiucv_priv *privptr = (struct netiucv_priv *)netdev->priv; | 853 | struct netiucv_priv *privptr = netdev_priv(netdev); |
822 | __u8 udata[16]; | ||
823 | 854 | ||
824 | IUCV_DBF_TEXT(trace, 3, __FUNCTION__); | 855 | IUCV_DBF_TEXT(trace, 3, __FUNCTION__); |
825 | 856 | ||
826 | fsm_deltimer(&conn->timer); | 857 | fsm_deltimer(&conn->timer); |
827 | iucv_sever(conn->pathid, udata); | 858 | iucv_path_sever(conn->path, NULL); |
828 | PRINT_INFO("%s: Remote dropped connection\n", netdev->name); | 859 | PRINT_INFO("%s: Remote dropped connection\n", netdev->name); |
829 | IUCV_DBF_TEXT(data, 2, | 860 | IUCV_DBF_TEXT(data, 2, |
830 | "conn_action_connsever: Remote dropped connection\n"); | 861 | "conn_action_connsever: Remote dropped connection\n"); |
831 | fsm_newstate(fi, CONN_STATE_STARTWAIT); | 862 | fsm_newstate(fi, CONN_STATE_STARTWAIT); |
832 | fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev); | 863 | fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev); |
833 | } | 864 | } |
834 | 865 | ||
835 | static void | 866 | static void conn_action_start(fsm_instance *fi, int event, void *arg) |
836 | conn_action_start(fsm_instance *fi, int event, void *arg) | ||
837 | { | 867 | { |
838 | struct iucv_event *ev = (struct iucv_event *)arg; | 868 | struct iucv_connection *conn = arg; |
839 | struct iucv_connection *conn = ev->conn; | ||
840 | __u16 msglimit; | ||
841 | int rc; | 869 | int rc; |
842 | 870 | ||
843 | IUCV_DBF_TEXT(trace, 3, __FUNCTION__); | 871 | IUCV_DBF_TEXT(trace, 3, __FUNCTION__); |
844 | 872 | ||
845 | if (!conn->handle) { | 873 | fsm_newstate(fi, CONN_STATE_STARTWAIT); |
846 | IUCV_DBF_TEXT(trace, 5, "calling iucv_register_program\n"); | ||
847 | conn->handle = | ||
848 | iucv_register_program(iucvMagic, conn->userid, | ||
849 | netiucv_mask, | ||
850 | &netiucv_ops, conn); | ||
851 | fsm_newstate(fi, CONN_STATE_STARTWAIT); | ||
852 | if (!conn->handle) { | ||
853 | fsm_newstate(fi, CONN_STATE_REGERR); | ||
854 | conn->handle = NULL; | ||
855 | IUCV_DBF_TEXT(setup, 2, | ||
856 | "NULL from iucv_register_program\n"); | ||
857 | return; | ||
858 | } | ||
859 | |||
860 | PRINT_DEBUG("%s('%s'): registered successfully\n", | ||
861 | conn->netdev->name, conn->userid); | ||
862 | } | ||
863 | |||
864 | PRINT_DEBUG("%s('%s'): connecting ...\n", | 874 | PRINT_DEBUG("%s('%s'): connecting ...\n", |
865 | conn->netdev->name, conn->userid); | 875 | conn->netdev->name, conn->userid); |
866 | 876 | ||
867 | /* We must set the state before calling iucv_connect because the callback | 877 | /* |
868 | * handler could be called at any point after the connection request is | 878 | * We must set the state before calling iucv_connect because the |
869 | * sent */ | 879 | * callback handler could be called at any point after the connection |
880 | * request is sent | ||
881 | */ | ||
870 | 882 | ||
871 | fsm_newstate(fi, CONN_STATE_SETUPWAIT); | 883 | fsm_newstate(fi, CONN_STATE_SETUPWAIT); |
872 | rc = iucv_connect(&(conn->pathid), NETIUCV_QUEUELEN_DEFAULT, iucvMagic, | 884 | conn->path = iucv_path_alloc(NETIUCV_QUEUELEN_DEFAULT, 0, GFP_KERNEL); |
873 | conn->userid, iucv_host, 0, NULL, &msglimit, | 885 | rc = iucv_path_connect(conn->path, &netiucv_handler, conn->userid, |
874 | conn->handle, conn); | 886 | NULL, iucvMagic, conn); |
875 | switch (rc) { | 887 | switch (rc) { |
876 | case 0: | 888 | case 0: |
877 | conn->netdev->tx_queue_len = msglimit; | 889 | conn->netdev->tx_queue_len = conn->path->msglim; |
878 | fsm_addtimer(&conn->timer, NETIUCV_TIMEOUT_5SEC, | 890 | fsm_addtimer(&conn->timer, NETIUCV_TIMEOUT_5SEC, |
879 | CONN_EVENT_TIMER, conn); | 891 | CONN_EVENT_TIMER, conn); |
880 | return; | 892 | return; |
881 | case 11: | 893 | case 11: |
882 | PRINT_INFO("%s: User %s is currently not available.\n", | 894 | PRINT_INFO("%s: User %s is currently not available.\n", |
883 | conn->netdev->name, | 895 | conn->netdev->name, |
884 | netiucv_printname(conn->userid)); | 896 | netiucv_printname(conn->userid)); |
885 | fsm_newstate(fi, CONN_STATE_STARTWAIT); | 897 | fsm_newstate(fi, CONN_STATE_STARTWAIT); |
886 | return; | 898 | break; |
887 | case 12: | 899 | case 12: |
888 | PRINT_INFO("%s: User %s is currently not ready.\n", | 900 | PRINT_INFO("%s: User %s is currently not ready.\n", |
889 | conn->netdev->name, | 901 | conn->netdev->name, |
890 | netiucv_printname(conn->userid)); | 902 | netiucv_printname(conn->userid)); |
891 | fsm_newstate(fi, CONN_STATE_STARTWAIT); | 903 | fsm_newstate(fi, CONN_STATE_STARTWAIT); |
892 | return; | 904 | break; |
893 | case 13: | 905 | case 13: |
894 | PRINT_WARN("%s: Too many IUCV connections.\n", | 906 | PRINT_WARN("%s: Too many IUCV connections.\n", |
895 | conn->netdev->name); | 907 | conn->netdev->name); |
896 | fsm_newstate(fi, CONN_STATE_CONNERR); | 908 | fsm_newstate(fi, CONN_STATE_CONNERR); |
897 | break; | 909 | break; |
898 | case 14: | 910 | case 14: |
899 | PRINT_WARN( | 911 | PRINT_WARN("%s: User %s has too many IUCV connections.\n", |
900 | "%s: User %s has too many IUCV connections.\n", | 912 | conn->netdev->name, |
901 | conn->netdev->name, | 913 | netiucv_printname(conn->userid)); |
902 | netiucv_printname(conn->userid)); | 914 | fsm_newstate(fi, CONN_STATE_CONNERR); |
903 | fsm_newstate(fi, CONN_STATE_CONNERR); | 915 | break; |
904 | break; | 916 | case 15: |
905 | case 15: | 917 | PRINT_WARN("%s: No IUCV authorization in CP directory.\n", |
906 | PRINT_WARN( | 918 | conn->netdev->name); |
907 | "%s: No IUCV authorization in CP directory.\n", | 919 | fsm_newstate(fi, CONN_STATE_CONNERR); |
908 | conn->netdev->name); | 920 | break; |
909 | fsm_newstate(fi, CONN_STATE_CONNERR); | 921 | default: |
910 | break; | 922 | PRINT_WARN("%s: iucv_connect returned error %d\n", |
911 | default: | 923 | conn->netdev->name, rc); |
912 | PRINT_WARN("%s: iucv_connect returned error %d\n", | 924 | fsm_newstate(fi, CONN_STATE_CONNERR); |
913 | conn->netdev->name, rc); | 925 | break; |
914 | fsm_newstate(fi, CONN_STATE_CONNERR); | ||
915 | break; | ||
916 | } | 926 | } |
917 | IUCV_DBF_TEXT_(setup, 5, "iucv_connect rc is %d\n", rc); | 927 | IUCV_DBF_TEXT_(setup, 5, "iucv_connect rc is %d\n", rc); |
918 | IUCV_DBF_TEXT(trace, 5, "calling iucv_unregister_program\n"); | 928 | kfree(conn->path); |
919 | iucv_unregister_program(conn->handle); | 929 | conn->path = NULL; |
920 | conn->handle = NULL; | ||
921 | } | 930 | } |
922 | 931 | ||
923 | static void | 932 | static void netiucv_purge_skb_queue(struct sk_buff_head *q) |
924 | netiucv_purge_skb_queue(struct sk_buff_head *q) | ||
925 | { | 933 | { |
926 | struct sk_buff *skb; | 934 | struct sk_buff *skb; |
927 | 935 | ||
@@ -931,36 +939,34 @@ netiucv_purge_skb_queue(struct sk_buff_head *q) | |||
931 | } | 939 | } |
932 | } | 940 | } |
933 | 941 | ||
934 | static void | 942 | static void conn_action_stop(fsm_instance *fi, int event, void *arg) |
935 | conn_action_stop(fsm_instance *fi, int event, void *arg) | ||
936 | { | 943 | { |
937 | struct iucv_event *ev = (struct iucv_event *)arg; | 944 | struct iucv_event *ev = arg; |
938 | struct iucv_connection *conn = ev->conn; | 945 | struct iucv_connection *conn = ev->conn; |
939 | struct net_device *netdev = conn->netdev; | 946 | struct net_device *netdev = conn->netdev; |
940 | struct netiucv_priv *privptr = (struct netiucv_priv *)netdev->priv; | 947 | struct netiucv_priv *privptr = netdev_priv(netdev); |
941 | 948 | ||
942 | IUCV_DBF_TEXT(trace, 3, __FUNCTION__); | 949 | IUCV_DBF_TEXT(trace, 3, __FUNCTION__); |
943 | 950 | ||
944 | fsm_deltimer(&conn->timer); | 951 | fsm_deltimer(&conn->timer); |
945 | fsm_newstate(fi, CONN_STATE_STOPPED); | 952 | fsm_newstate(fi, CONN_STATE_STOPPED); |
946 | netiucv_purge_skb_queue(&conn->collect_queue); | 953 | netiucv_purge_skb_queue(&conn->collect_queue); |
947 | if (conn->handle) | 954 | if (conn->path) { |
948 | IUCV_DBF_TEXT(trace, 5, "calling iucv_unregister_program\n"); | 955 | IUCV_DBF_TEXT(trace, 5, "calling iucv_path_sever\n"); |
949 | iucv_unregister_program(conn->handle); | 956 | iucv_path_sever(conn->path, iucvMagic); |
950 | conn->handle = NULL; | 957 | kfree(conn->path); |
958 | conn->path = NULL; | ||
959 | } | ||
951 | netiucv_purge_skb_queue(&conn->commit_queue); | 960 | netiucv_purge_skb_queue(&conn->commit_queue); |
952 | fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev); | 961 | fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev); |
953 | } | 962 | } |
954 | 963 | ||
955 | static void | 964 | static void conn_action_inval(fsm_instance *fi, int event, void *arg) |
956 | conn_action_inval(fsm_instance *fi, int event, void *arg) | ||
957 | { | 965 | { |
958 | struct iucv_event *ev = (struct iucv_event *)arg; | 966 | struct iucv_connection *conn = arg; |
959 | struct iucv_connection *conn = ev->conn; | ||
960 | struct net_device *netdev = conn->netdev; | 967 | struct net_device *netdev = conn->netdev; |
961 | 968 | ||
962 | PRINT_WARN("%s: Cannot connect without username\n", | 969 | PRINT_WARN("%s: Cannot connect without username\n", netdev->name); |
963 | netdev->name); | ||
964 | IUCV_DBF_TEXT(data, 2, "conn_action_inval called\n"); | 970 | IUCV_DBF_TEXT(data, 2, "conn_action_inval called\n"); |
965 | } | 971 | } |
966 | 972 | ||
@@ -999,29 +1005,27 @@ static const fsm_node conn_fsm[] = { | |||
999 | static const int CONN_FSM_LEN = sizeof(conn_fsm) / sizeof(fsm_node); | 1005 | static const int CONN_FSM_LEN = sizeof(conn_fsm) / sizeof(fsm_node); |
1000 | 1006 | ||
1001 | 1007 | ||
1002 | /** | 1008 | /* |
1003 | * Actions for interface - statemachine. | 1009 | * Actions for interface - statemachine. |
1004 | *****************************************************************************/ | 1010 | */ |
1005 | 1011 | ||
1006 | /** | 1012 | /** |
1007 | * Startup connection by sending CONN_EVENT_START to it. | 1013 | * dev_action_start |
1014 | * @fi: An instance of an interface statemachine. | ||
1015 | * @event: The event, just happened. | ||
1016 | * @arg: Generic pointer, casted from struct net_device * upon call. | ||
1008 | * | 1017 | * |
1009 | * @param fi An instance of an interface statemachine. | 1018 | * Startup connection by sending CONN_EVENT_START to it. |
1010 | * @param event The event, just happened. | ||
1011 | * @param arg Generic pointer, casted from struct net_device * upon call. | ||
1012 | */ | 1019 | */ |
1013 | static void | 1020 | static void dev_action_start(fsm_instance *fi, int event, void *arg) |
1014 | dev_action_start(fsm_instance *fi, int event, void *arg) | ||
1015 | { | 1021 | { |
1016 | struct net_device *dev = (struct net_device *)arg; | 1022 | struct net_device *dev = arg; |
1017 | struct netiucv_priv *privptr = dev->priv; | 1023 | struct netiucv_priv *privptr = netdev_priv(dev); |
1018 | struct iucv_event ev; | ||
1019 | 1024 | ||
1020 | IUCV_DBF_TEXT(trace, 3, __FUNCTION__); | 1025 | IUCV_DBF_TEXT(trace, 3, __FUNCTION__); |
1021 | 1026 | ||
1022 | ev.conn = privptr->conn; | ||
1023 | fsm_newstate(fi, DEV_STATE_STARTWAIT); | 1027 | fsm_newstate(fi, DEV_STATE_STARTWAIT); |
1024 | fsm_event(privptr->conn->fsm, CONN_EVENT_START, &ev); | 1028 | fsm_event(privptr->conn->fsm, CONN_EVENT_START, privptr->conn); |
1025 | } | 1029 | } |
1026 | 1030 | ||
1027 | /** | 1031 | /** |
@@ -1034,8 +1038,8 @@ dev_action_start(fsm_instance *fi, int event, void *arg) | |||
1034 | static void | 1038 | static void |
1035 | dev_action_stop(fsm_instance *fi, int event, void *arg) | 1039 | dev_action_stop(fsm_instance *fi, int event, void *arg) |
1036 | { | 1040 | { |
1037 | struct net_device *dev = (struct net_device *)arg; | 1041 | struct net_device *dev = arg; |
1038 | struct netiucv_priv *privptr = dev->priv; | 1042 | struct netiucv_priv *privptr = netdev_priv(dev); |
1039 | struct iucv_event ev; | 1043 | struct iucv_event ev; |
1040 | 1044 | ||
1041 | IUCV_DBF_TEXT(trace, 3, __FUNCTION__); | 1045 | IUCV_DBF_TEXT(trace, 3, __FUNCTION__); |
@@ -1057,8 +1061,8 @@ dev_action_stop(fsm_instance *fi, int event, void *arg) | |||
1057 | static void | 1061 | static void |
1058 | dev_action_connup(fsm_instance *fi, int event, void *arg) | 1062 | dev_action_connup(fsm_instance *fi, int event, void *arg) |
1059 | { | 1063 | { |
1060 | struct net_device *dev = (struct net_device *)arg; | 1064 | struct net_device *dev = arg; |
1061 | struct netiucv_priv *privptr = dev->priv; | 1065 | struct netiucv_priv *privptr = netdev_priv(dev); |
1062 | 1066 | ||
1063 | IUCV_DBF_TEXT(trace, 3, __FUNCTION__); | 1067 | IUCV_DBF_TEXT(trace, 3, __FUNCTION__); |
1064 | 1068 | ||
@@ -1131,11 +1135,13 @@ static const int DEV_FSM_LEN = sizeof(dev_fsm) / sizeof(fsm_node); | |||
1131 | * | 1135 | * |
1132 | * @return 0 on success, -ERRNO on failure. (Never fails.) | 1136 | * @return 0 on success, -ERRNO on failure. (Never fails.) |
1133 | */ | 1137 | */ |
1134 | static int | 1138 | static int netiucv_transmit_skb(struct iucv_connection *conn, |
1135 | netiucv_transmit_skb(struct iucv_connection *conn, struct sk_buff *skb) { | 1139 | struct sk_buff *skb) |
1140 | { | ||
1141 | struct iucv_message msg; | ||
1136 | unsigned long saveflags; | 1142 | unsigned long saveflags; |
1137 | ll_header header; | 1143 | struct ll_header header; |
1138 | int rc = 0; | 1144 | int rc; |
1139 | 1145 | ||
1140 | if (fsm_getstate(conn->fsm) != CONN_STATE_IDLE) { | 1146 | if (fsm_getstate(conn->fsm) != CONN_STATE_IDLE) { |
1141 | int l = skb->len + NETIUCV_HDRLEN; | 1147 | int l = skb->len + NETIUCV_HDRLEN; |
@@ -1145,11 +1151,12 @@ netiucv_transmit_skb(struct iucv_connection *conn, struct sk_buff *skb) { | |||
1145 | (conn->max_buffsize - NETIUCV_HDRLEN)) { | 1151 | (conn->max_buffsize - NETIUCV_HDRLEN)) { |
1146 | rc = -EBUSY; | 1152 | rc = -EBUSY; |
1147 | IUCV_DBF_TEXT(data, 2, | 1153 | IUCV_DBF_TEXT(data, 2, |
1148 | "EBUSY from netiucv_transmit_skb\n"); | 1154 | "EBUSY from netiucv_transmit_skb\n"); |
1149 | } else { | 1155 | } else { |
1150 | atomic_inc(&skb->users); | 1156 | atomic_inc(&skb->users); |
1151 | skb_queue_tail(&conn->collect_queue, skb); | 1157 | skb_queue_tail(&conn->collect_queue, skb); |
1152 | conn->collect_len += l; | 1158 | conn->collect_len += l; |
1159 | rc = 0; | ||
1153 | } | 1160 | } |
1154 | spin_unlock_irqrestore(&conn->collect_lock, saveflags); | 1161 | spin_unlock_irqrestore(&conn->collect_lock, saveflags); |
1155 | } else { | 1162 | } else { |
@@ -1188,9 +1195,10 @@ netiucv_transmit_skb(struct iucv_connection *conn, struct sk_buff *skb) { | |||
1188 | fsm_newstate(conn->fsm, CONN_STATE_TX); | 1195 | fsm_newstate(conn->fsm, CONN_STATE_TX); |
1189 | conn->prof.send_stamp = xtime; | 1196 | conn->prof.send_stamp = xtime; |
1190 | 1197 | ||
1191 | rc = iucv_send(conn->pathid, NULL, 0, 0, 1 /* single_flag */, | 1198 | msg.tag = 1; |
1192 | 0, nskb->data, nskb->len); | 1199 | msg.class = 0; |
1193 | /* Shut up, gcc! nskb is always below 2G. */ | 1200 | rc = iucv_message_send(conn->path, &msg, 0, 0, |
1201 | nskb->data, nskb->len); | ||
1194 | conn->prof.doios_single++; | 1202 | conn->prof.doios_single++; |
1195 | conn->prof.txlen += skb->len; | 1203 | conn->prof.txlen += skb->len; |
1196 | conn->prof.tx_pending++; | 1204 | conn->prof.tx_pending++; |
@@ -1200,7 +1208,7 @@ netiucv_transmit_skb(struct iucv_connection *conn, struct sk_buff *skb) { | |||
1200 | struct netiucv_priv *privptr; | 1208 | struct netiucv_priv *privptr; |
1201 | fsm_newstate(conn->fsm, CONN_STATE_IDLE); | 1209 | fsm_newstate(conn->fsm, CONN_STATE_IDLE); |
1202 | conn->prof.tx_pending--; | 1210 | conn->prof.tx_pending--; |
1203 | privptr = (struct netiucv_priv *)conn->netdev->priv; | 1211 | privptr = netdev_priv(conn->netdev); |
1204 | if (privptr) | 1212 | if (privptr) |
1205 | privptr->stats.tx_errors++; | 1213 | privptr->stats.tx_errors++; |
1206 | if (copied) | 1214 | if (copied) |
@@ -1226,9 +1234,9 @@ netiucv_transmit_skb(struct iucv_connection *conn, struct sk_buff *skb) { | |||
1226 | return rc; | 1234 | return rc; |
1227 | } | 1235 | } |
1228 | 1236 | ||
1229 | /** | 1237 | /* |
1230 | * Interface API for upper network layers | 1238 | * Interface API for upper network layers |
1231 | *****************************************************************************/ | 1239 | */ |
1232 | 1240 | ||
1233 | /** | 1241 | /** |
1234 | * Open an interface. | 1242 | * Open an interface. |
@@ -1238,9 +1246,11 @@ netiucv_transmit_skb(struct iucv_connection *conn, struct sk_buff *skb) { | |||
1238 | * | 1246 | * |
1239 | * @return 0 on success, -ERRNO on failure. (Never fails.) | 1247 | * @return 0 on success, -ERRNO on failure. (Never fails.) |
1240 | */ | 1248 | */ |
1241 | static int | 1249 | static int netiucv_open(struct net_device *dev) |
1242 | netiucv_open(struct net_device *dev) { | 1250 | { |
1243 | fsm_event(((struct netiucv_priv *)dev->priv)->fsm, DEV_EVENT_START,dev); | 1251 | struct netiucv_priv *priv = netdev_priv(dev); |
1252 | |||
1253 | fsm_event(priv->fsm, DEV_EVENT_START, dev); | ||
1244 | return 0; | 1254 | return 0; |
1245 | } | 1255 | } |
1246 | 1256 | ||
@@ -1252,9 +1262,11 @@ netiucv_open(struct net_device *dev) { | |||
1252 | * | 1262 | * |
1253 | * @return 0 on success, -ERRNO on failure. (Never fails.) | 1263 | * @return 0 on success, -ERRNO on failure. (Never fails.) |
1254 | */ | 1264 | */ |
1255 | static int | 1265 | static int netiucv_close(struct net_device *dev) |
1256 | netiucv_close(struct net_device *dev) { | 1266 | { |
1257 | fsm_event(((struct netiucv_priv *)dev->priv)->fsm, DEV_EVENT_STOP, dev); | 1267 | struct netiucv_priv *priv = netdev_priv(dev); |
1268 | |||
1269 | fsm_event(priv->fsm, DEV_EVENT_STOP, dev); | ||
1258 | return 0; | 1270 | return 0; |
1259 | } | 1271 | } |
1260 | 1272 | ||
@@ -1271,8 +1283,8 @@ netiucv_close(struct net_device *dev) { | |||
1271 | */ | 1283 | */ |
1272 | static int netiucv_tx(struct sk_buff *skb, struct net_device *dev) | 1284 | static int netiucv_tx(struct sk_buff *skb, struct net_device *dev) |
1273 | { | 1285 | { |
1274 | int rc = 0; | 1286 | struct netiucv_priv *privptr = netdev_priv(dev); |
1275 | struct netiucv_priv *privptr = dev->priv; | 1287 | int rc; |
1276 | 1288 | ||
1277 | IUCV_DBF_TEXT(trace, 4, __FUNCTION__); | 1289 | IUCV_DBF_TEXT(trace, 4, __FUNCTION__); |
1278 | /** | 1290 | /** |
@@ -1312,40 +1324,41 @@ static int netiucv_tx(struct sk_buff *skb, struct net_device *dev) | |||
1312 | return -EBUSY; | 1324 | return -EBUSY; |
1313 | } | 1325 | } |
1314 | dev->trans_start = jiffies; | 1326 | dev->trans_start = jiffies; |
1315 | if (netiucv_transmit_skb(privptr->conn, skb)) | 1327 | rc = netiucv_transmit_skb(privptr->conn, skb) != 0; |
1316 | rc = 1; | ||
1317 | netiucv_clear_busy(dev); | 1328 | netiucv_clear_busy(dev); |
1318 | return rc; | 1329 | return rc; |
1319 | } | 1330 | } |
1320 | 1331 | ||
1321 | /** | 1332 | /** |
1322 | * Returns interface statistics of a device. | 1333 | * netiucv_stats |
1334 | * @dev: Pointer to interface struct. | ||
1323 | * | 1335 | * |
1324 | * @param dev Pointer to interface struct. | 1336 | * Returns interface statistics of a device. |
1325 | * | 1337 | * |
1326 | * @return Pointer to stats struct of this interface. | 1338 | * Returns pointer to stats struct of this interface. |
1327 | */ | 1339 | */ |
1328 | static struct net_device_stats * | 1340 | static struct net_device_stats *netiucv_stats (struct net_device * dev) |
1329 | netiucv_stats (struct net_device * dev) | ||
1330 | { | 1341 | { |
1342 | struct netiucv_priv *priv = netdev_priv(dev); | ||
1343 | |||
1331 | IUCV_DBF_TEXT(trace, 5, __FUNCTION__); | 1344 | IUCV_DBF_TEXT(trace, 5, __FUNCTION__); |
1332 | return &((struct netiucv_priv *)dev->priv)->stats; | 1345 | return &priv->stats; |
1333 | } | 1346 | } |
1334 | 1347 | ||
1335 | /** | 1348 | /** |
1336 | * Sets MTU of an interface. | 1349 | * netiucv_change_mtu |
1350 | * @dev: Pointer to interface struct. | ||
1351 | * @new_mtu: The new MTU to use for this interface. | ||
1337 | * | 1352 | * |
1338 | * @param dev Pointer to interface struct. | 1353 | * Sets MTU of an interface. |
1339 | * @param new_mtu The new MTU to use for this interface. | ||
1340 | * | 1354 | * |
1341 | * @return 0 on success, -EINVAL if MTU is out of valid range. | 1355 | * Returns 0 on success, -EINVAL if MTU is out of valid range. |
1342 | * (valid range is 576 .. NETIUCV_MTU_MAX). | 1356 | * (valid range is 576 .. NETIUCV_MTU_MAX). |
1343 | */ | 1357 | */ |
1344 | static int | 1358 | static int netiucv_change_mtu(struct net_device * dev, int new_mtu) |
1345 | netiucv_change_mtu (struct net_device * dev, int new_mtu) | ||
1346 | { | 1359 | { |
1347 | IUCV_DBF_TEXT(trace, 3, __FUNCTION__); | 1360 | IUCV_DBF_TEXT(trace, 3, __FUNCTION__); |
1348 | if ((new_mtu < 576) || (new_mtu > NETIUCV_MTU_MAX)) { | 1361 | if (new_mtu < 576 || new_mtu > NETIUCV_MTU_MAX) { |
1349 | IUCV_DBF_TEXT(setup, 2, "given MTU out of valid range\n"); | 1362 | IUCV_DBF_TEXT(setup, 2, "given MTU out of valid range\n"); |
1350 | return -EINVAL; | 1363 | return -EINVAL; |
1351 | } | 1364 | } |
@@ -1353,12 +1366,12 @@ netiucv_change_mtu (struct net_device * dev, int new_mtu) | |||
1353 | return 0; | 1366 | return 0; |
1354 | } | 1367 | } |
1355 | 1368 | ||
1356 | /** | 1369 | /* |
1357 | * attributes in sysfs | 1370 | * attributes in sysfs |
1358 | *****************************************************************************/ | 1371 | */ |
1359 | 1372 | ||
1360 | static ssize_t | 1373 | static ssize_t user_show(struct device *dev, struct device_attribute *attr, |
1361 | user_show (struct device *dev, struct device_attribute *attr, char *buf) | 1374 | char *buf) |
1362 | { | 1375 | { |
1363 | struct netiucv_priv *priv = dev->driver_data; | 1376 | struct netiucv_priv *priv = dev->driver_data; |
1364 | 1377 | ||
@@ -1366,8 +1379,8 @@ user_show (struct device *dev, struct device_attribute *attr, char *buf) | |||
1366 | return sprintf(buf, "%s\n", netiucv_printname(priv->conn->userid)); | 1379 | return sprintf(buf, "%s\n", netiucv_printname(priv->conn->userid)); |
1367 | } | 1380 | } |
1368 | 1381 | ||
1369 | static ssize_t | 1382 | static ssize_t user_write(struct device *dev, struct device_attribute *attr, |
1370 | user_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) | 1383 | const char *buf, size_t count) |
1371 | { | 1384 | { |
1372 | struct netiucv_priv *priv = dev->driver_data; | 1385 | struct netiucv_priv *priv = dev->driver_data; |
1373 | struct net_device *ndev = priv->conn->netdev; | 1386 | struct net_device *ndev = priv->conn->netdev; |
@@ -1375,80 +1388,70 @@ user_write (struct device *dev, struct device_attribute *attr, const char *buf, | |||
1375 | char *tmp; | 1388 | char *tmp; |
1376 | char username[9]; | 1389 | char username[9]; |
1377 | int i; | 1390 | int i; |
1378 | struct iucv_connection **clist = &iucv_conns.iucv_connections; | 1391 | struct iucv_connection *cp; |
1379 | unsigned long flags; | ||
1380 | 1392 | ||
1381 | IUCV_DBF_TEXT(trace, 3, __FUNCTION__); | 1393 | IUCV_DBF_TEXT(trace, 3, __FUNCTION__); |
1382 | if (count>9) { | 1394 | if (count > 9) { |
1383 | PRINT_WARN("netiucv: username too long (%d)!\n", (int)count); | 1395 | PRINT_WARN("netiucv: username too long (%d)!\n", (int) count); |
1384 | IUCV_DBF_TEXT_(setup, 2, | 1396 | IUCV_DBF_TEXT_(setup, 2, |
1385 | "%d is length of username\n", (int)count); | 1397 | "%d is length of username\n", (int) count); |
1386 | return -EINVAL; | 1398 | return -EINVAL; |
1387 | } | 1399 | } |
1388 | 1400 | ||
1389 | tmp = strsep((char **) &buf, "\n"); | 1401 | tmp = strsep((char **) &buf, "\n"); |
1390 | for (i=0, p=tmp; i<8 && *p; i++, p++) { | 1402 | for (i = 0, p = tmp; i < 8 && *p; i++, p++) { |
1391 | if (isalnum(*p) || (*p == '$')) | 1403 | if (isalnum(*p) || (*p == '$')) { |
1392 | username[i]= toupper(*p); | 1404 | username[i]= toupper(*p); |
1393 | else if (*p == '\n') { | 1405 | continue; |
1406 | } | ||
1407 | if (*p == '\n') { | ||
1394 | /* trailing lf, grr */ | 1408 | /* trailing lf, grr */ |
1395 | break; | 1409 | break; |
1396 | } else { | ||
1397 | PRINT_WARN("netiucv: Invalid char %c in username!\n", | ||
1398 | *p); | ||
1399 | IUCV_DBF_TEXT_(setup, 2, | ||
1400 | "username: invalid character %c\n", | ||
1401 | *p); | ||
1402 | return -EINVAL; | ||
1403 | } | 1410 | } |
1411 | PRINT_WARN("netiucv: Invalid char %c in username!\n", *p); | ||
1412 | IUCV_DBF_TEXT_(setup, 2, | ||
1413 | "username: invalid character %c\n", *p); | ||
1414 | return -EINVAL; | ||
1404 | } | 1415 | } |
1405 | while (i<8) | 1416 | while (i < 8) |
1406 | username[i++] = ' '; | 1417 | username[i++] = ' '; |
1407 | username[8] = '\0'; | 1418 | username[8] = '\0'; |
1408 | 1419 | ||
1409 | if (memcmp(username, priv->conn->userid, 9)) { | 1420 | if (memcmp(username, priv->conn->userid, 9) && |
1410 | /* username changed */ | 1421 | (ndev->flags & (IFF_UP | IFF_RUNNING))) { |
1411 | if (ndev->flags & (IFF_UP | IFF_RUNNING)) { | 1422 | /* username changed while the interface is active. */ |
1412 | PRINT_WARN( | 1423 | PRINT_WARN("netiucv: device %s active, connected to %s\n", |
1413 | "netiucv: device %s active, connected to %s\n", | 1424 | dev->bus_id, priv->conn->userid); |
1414 | dev->bus_id, priv->conn->userid); | 1425 | PRINT_WARN("netiucv: user cannot be updated\n"); |
1415 | PRINT_WARN("netiucv: user cannot be updated\n"); | 1426 | IUCV_DBF_TEXT(setup, 2, "user_write: device active\n"); |
1416 | IUCV_DBF_TEXT(setup, 2, "user_write: device active\n"); | 1427 | return -EBUSY; |
1417 | return -EBUSY; | 1428 | } |
1429 | read_lock_bh(&iucv_connection_rwlock); | ||
1430 | list_for_each_entry(cp, &iucv_connection_list, list) { | ||
1431 | if (!strncmp(username, cp->userid, 9) && cp->netdev != ndev) { | ||
1432 | read_unlock_bh(&iucv_connection_rwlock); | ||
1433 | PRINT_WARN("netiucv: Connection to %s already " | ||
1434 | "exists\n", username); | ||
1435 | return -EEXIST; | ||
1418 | } | 1436 | } |
1419 | } | 1437 | } |
1420 | read_lock_irqsave(&iucv_conns.iucv_rwlock, flags); | 1438 | read_unlock_bh(&iucv_connection_rwlock); |
1421 | while (*clist) { | ||
1422 | if (!strncmp(username, (*clist)->userid, 9) || | ||
1423 | ((*clist)->netdev != ndev)) | ||
1424 | break; | ||
1425 | clist = &((*clist)->next); | ||
1426 | } | ||
1427 | read_unlock_irqrestore(&iucv_conns.iucv_rwlock, flags); | ||
1428 | if (*clist) { | ||
1429 | PRINT_WARN("netiucv: Connection to %s already exists\n", | ||
1430 | username); | ||
1431 | return -EEXIST; | ||
1432 | } | ||
1433 | memcpy(priv->conn->userid, username, 9); | 1439 | memcpy(priv->conn->userid, username, 9); |
1434 | |||
1435 | return count; | 1440 | return count; |
1436 | |||
1437 | } | 1441 | } |
1438 | 1442 | ||
1439 | static DEVICE_ATTR(user, 0644, user_show, user_write); | 1443 | static DEVICE_ATTR(user, 0644, user_show, user_write); |
1440 | 1444 | ||
1441 | static ssize_t | 1445 | static ssize_t buffer_show (struct device *dev, struct device_attribute *attr, |
1442 | buffer_show (struct device *dev, struct device_attribute *attr, char *buf) | 1446 | char *buf) |
1443 | { | 1447 | { struct netiucv_priv *priv = dev->driver_data; |
1444 | struct netiucv_priv *priv = dev->driver_data; | ||
1445 | 1448 | ||
1446 | IUCV_DBF_TEXT(trace, 5, __FUNCTION__); | 1449 | IUCV_DBF_TEXT(trace, 5, __FUNCTION__); |
1447 | return sprintf(buf, "%d\n", priv->conn->max_buffsize); | 1450 | return sprintf(buf, "%d\n", priv->conn->max_buffsize); |
1448 | } | 1451 | } |
1449 | 1452 | ||
1450 | static ssize_t | 1453 | static ssize_t buffer_write (struct device *dev, struct device_attribute *attr, |
1451 | buffer_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) | 1454 | const char *buf, size_t count) |
1452 | { | 1455 | { |
1453 | struct netiucv_priv *priv = dev->driver_data; | 1456 | struct netiucv_priv *priv = dev->driver_data; |
1454 | struct net_device *ndev = priv->conn->netdev; | 1457 | struct net_device *ndev = priv->conn->netdev; |
@@ -1502,8 +1505,8 @@ buffer_write (struct device *dev, struct device_attribute *attr, const char *buf | |||
1502 | 1505 | ||
1503 | static DEVICE_ATTR(buffer, 0644, buffer_show, buffer_write); | 1506 | static DEVICE_ATTR(buffer, 0644, buffer_show, buffer_write); |
1504 | 1507 | ||
1505 | static ssize_t | 1508 | static ssize_t dev_fsm_show (struct device *dev, struct device_attribute *attr, |
1506 | dev_fsm_show (struct device *dev, struct device_attribute *attr, char *buf) | 1509 | char *buf) |
1507 | { | 1510 | { |
1508 | struct netiucv_priv *priv = dev->driver_data; | 1511 | struct netiucv_priv *priv = dev->driver_data; |
1509 | 1512 | ||
@@ -1513,8 +1516,8 @@ dev_fsm_show (struct device *dev, struct device_attribute *attr, char *buf) | |||
1513 | 1516 | ||
1514 | static DEVICE_ATTR(device_fsm_state, 0444, dev_fsm_show, NULL); | 1517 | static DEVICE_ATTR(device_fsm_state, 0444, dev_fsm_show, NULL); |
1515 | 1518 | ||
1516 | static ssize_t | 1519 | static ssize_t conn_fsm_show (struct device *dev, |
1517 | conn_fsm_show (struct device *dev, struct device_attribute *attr, char *buf) | 1520 | struct device_attribute *attr, char *buf) |
1518 | { | 1521 | { |
1519 | struct netiucv_priv *priv = dev->driver_data; | 1522 | struct netiucv_priv *priv = dev->driver_data; |
1520 | 1523 | ||
@@ -1524,8 +1527,8 @@ conn_fsm_show (struct device *dev, struct device_attribute *attr, char *buf) | |||
1524 | 1527 | ||
1525 | static DEVICE_ATTR(connection_fsm_state, 0444, conn_fsm_show, NULL); | 1528 | static DEVICE_ATTR(connection_fsm_state, 0444, conn_fsm_show, NULL); |
1526 | 1529 | ||
1527 | static ssize_t | 1530 | static ssize_t maxmulti_show (struct device *dev, |
1528 | maxmulti_show (struct device *dev, struct device_attribute *attr, char *buf) | 1531 | struct device_attribute *attr, char *buf) |
1529 | { | 1532 | { |
1530 | struct netiucv_priv *priv = dev->driver_data; | 1533 | struct netiucv_priv *priv = dev->driver_data; |
1531 | 1534 | ||
@@ -1533,8 +1536,9 @@ maxmulti_show (struct device *dev, struct device_attribute *attr, char *buf) | |||
1533 | return sprintf(buf, "%ld\n", priv->conn->prof.maxmulti); | 1536 | return sprintf(buf, "%ld\n", priv->conn->prof.maxmulti); |
1534 | } | 1537 | } |
1535 | 1538 | ||
1536 | static ssize_t | 1539 | static ssize_t maxmulti_write (struct device *dev, |
1537 | maxmulti_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) | 1540 | struct device_attribute *attr, |
1541 | const char *buf, size_t count) | ||
1538 | { | 1542 | { |
1539 | struct netiucv_priv *priv = dev->driver_data; | 1543 | struct netiucv_priv *priv = dev->driver_data; |
1540 | 1544 | ||
@@ -1545,8 +1549,8 @@ maxmulti_write (struct device *dev, struct device_attribute *attr, const char *b | |||
1545 | 1549 | ||
1546 | static DEVICE_ATTR(max_tx_buffer_used, 0644, maxmulti_show, maxmulti_write); | 1550 | static DEVICE_ATTR(max_tx_buffer_used, 0644, maxmulti_show, maxmulti_write); |
1547 | 1551 | ||
1548 | static ssize_t | 1552 | static ssize_t maxcq_show (struct device *dev, struct device_attribute *attr, |
1549 | maxcq_show (struct device *dev, struct device_attribute *attr, char *buf) | 1553 | char *buf) |
1550 | { | 1554 | { |
1551 | struct netiucv_priv *priv = dev->driver_data; | 1555 | struct netiucv_priv *priv = dev->driver_data; |
1552 | 1556 | ||
@@ -1554,8 +1558,8 @@ maxcq_show (struct device *dev, struct device_attribute *attr, char *buf) | |||
1554 | return sprintf(buf, "%ld\n", priv->conn->prof.maxcqueue); | 1558 | return sprintf(buf, "%ld\n", priv->conn->prof.maxcqueue); |
1555 | } | 1559 | } |
1556 | 1560 | ||
1557 | static ssize_t | 1561 | static ssize_t maxcq_write (struct device *dev, struct device_attribute *attr, |
1558 | maxcq_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) | 1562 | const char *buf, size_t count) |
1559 | { | 1563 | { |
1560 | struct netiucv_priv *priv = dev->driver_data; | 1564 | struct netiucv_priv *priv = dev->driver_data; |
1561 | 1565 | ||
@@ -1566,8 +1570,8 @@ maxcq_write (struct device *dev, struct device_attribute *attr, const char *buf, | |||
1566 | 1570 | ||
1567 | static DEVICE_ATTR(max_chained_skbs, 0644, maxcq_show, maxcq_write); | 1571 | static DEVICE_ATTR(max_chained_skbs, 0644, maxcq_show, maxcq_write); |
1568 | 1572 | ||
1569 | static ssize_t | 1573 | static ssize_t sdoio_show (struct device *dev, struct device_attribute *attr, |
1570 | sdoio_show (struct device *dev, struct device_attribute *attr, char *buf) | 1574 | char *buf) |
1571 | { | 1575 | { |
1572 | struct netiucv_priv *priv = dev->driver_data; | 1576 | struct netiucv_priv *priv = dev->driver_data; |
1573 | 1577 | ||
@@ -1575,8 +1579,8 @@ sdoio_show (struct device *dev, struct device_attribute *attr, char *buf) | |||
1575 | return sprintf(buf, "%ld\n", priv->conn->prof.doios_single); | 1579 | return sprintf(buf, "%ld\n", priv->conn->prof.doios_single); |
1576 | } | 1580 | } |
1577 | 1581 | ||
1578 | static ssize_t | 1582 | static ssize_t sdoio_write (struct device *dev, struct device_attribute *attr, |
1579 | sdoio_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) | 1583 | const char *buf, size_t count) |
1580 | { | 1584 | { |
1581 | struct netiucv_priv *priv = dev->driver_data; | 1585 | struct netiucv_priv *priv = dev->driver_data; |
1582 | 1586 | ||
@@ -1587,8 +1591,8 @@ sdoio_write (struct device *dev, struct device_attribute *attr, const char *buf, | |||
1587 | 1591 | ||
1588 | static DEVICE_ATTR(tx_single_write_ops, 0644, sdoio_show, sdoio_write); | 1592 | static DEVICE_ATTR(tx_single_write_ops, 0644, sdoio_show, sdoio_write); |
1589 | 1593 | ||
1590 | static ssize_t | 1594 | static ssize_t mdoio_show (struct device *dev, struct device_attribute *attr, |
1591 | mdoio_show (struct device *dev, struct device_attribute *attr, char *buf) | 1595 | char *buf) |
1592 | { | 1596 | { |
1593 | struct netiucv_priv *priv = dev->driver_data; | 1597 | struct netiucv_priv *priv = dev->driver_data; |
1594 | 1598 | ||
@@ -1596,8 +1600,8 @@ mdoio_show (struct device *dev, struct device_attribute *attr, char *buf) | |||
1596 | return sprintf(buf, "%ld\n", priv->conn->prof.doios_multi); | 1600 | return sprintf(buf, "%ld\n", priv->conn->prof.doios_multi); |
1597 | } | 1601 | } |
1598 | 1602 | ||
1599 | static ssize_t | 1603 | static ssize_t mdoio_write (struct device *dev, struct device_attribute *attr, |
1600 | mdoio_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) | 1604 | const char *buf, size_t count) |
1601 | { | 1605 | { |
1602 | struct netiucv_priv *priv = dev->driver_data; | 1606 | struct netiucv_priv *priv = dev->driver_data; |
1603 | 1607 | ||
@@ -1608,8 +1612,8 @@ mdoio_write (struct device *dev, struct device_attribute *attr, const char *buf, | |||
1608 | 1612 | ||
1609 | static DEVICE_ATTR(tx_multi_write_ops, 0644, mdoio_show, mdoio_write); | 1613 | static DEVICE_ATTR(tx_multi_write_ops, 0644, mdoio_show, mdoio_write); |
1610 | 1614 | ||
1611 | static ssize_t | 1615 | static ssize_t txlen_show (struct device *dev, struct device_attribute *attr, |
1612 | txlen_show (struct device *dev, struct device_attribute *attr, char *buf) | 1616 | char *buf) |
1613 | { | 1617 | { |
1614 | struct netiucv_priv *priv = dev->driver_data; | 1618 | struct netiucv_priv *priv = dev->driver_data; |
1615 | 1619 | ||
@@ -1617,8 +1621,8 @@ txlen_show (struct device *dev, struct device_attribute *attr, char *buf) | |||
1617 | return sprintf(buf, "%ld\n", priv->conn->prof.txlen); | 1621 | return sprintf(buf, "%ld\n", priv->conn->prof.txlen); |
1618 | } | 1622 | } |
1619 | 1623 | ||
1620 | static ssize_t | 1624 | static ssize_t txlen_write (struct device *dev, struct device_attribute *attr, |
1621 | txlen_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) | 1625 | const char *buf, size_t count) |
1622 | { | 1626 | { |
1623 | struct netiucv_priv *priv = dev->driver_data; | 1627 | struct netiucv_priv *priv = dev->driver_data; |
1624 | 1628 | ||
@@ -1629,8 +1633,8 @@ txlen_write (struct device *dev, struct device_attribute *attr, const char *buf, | |||
1629 | 1633 | ||
1630 | static DEVICE_ATTR(netto_bytes, 0644, txlen_show, txlen_write); | 1634 | static DEVICE_ATTR(netto_bytes, 0644, txlen_show, txlen_write); |
1631 | 1635 | ||
1632 | static ssize_t | 1636 | static ssize_t txtime_show (struct device *dev, struct device_attribute *attr, |
1633 | txtime_show (struct device *dev, struct device_attribute *attr, char *buf) | 1637 | char *buf) |
1634 | { | 1638 | { |
1635 | struct netiucv_priv *priv = dev->driver_data; | 1639 | struct netiucv_priv *priv = dev->driver_data; |
1636 | 1640 | ||
@@ -1638,8 +1642,8 @@ txtime_show (struct device *dev, struct device_attribute *attr, char *buf) | |||
1638 | return sprintf(buf, "%ld\n", priv->conn->prof.tx_time); | 1642 | return sprintf(buf, "%ld\n", priv->conn->prof.tx_time); |
1639 | } | 1643 | } |
1640 | 1644 | ||
1641 | static ssize_t | 1645 | static ssize_t txtime_write (struct device *dev, struct device_attribute *attr, |
1642 | txtime_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) | 1646 | const char *buf, size_t count) |
1643 | { | 1647 | { |
1644 | struct netiucv_priv *priv = dev->driver_data; | 1648 | struct netiucv_priv *priv = dev->driver_data; |
1645 | 1649 | ||
@@ -1650,8 +1654,8 @@ txtime_write (struct device *dev, struct device_attribute *attr, const char *buf | |||
1650 | 1654 | ||
1651 | static DEVICE_ATTR(max_tx_io_time, 0644, txtime_show, txtime_write); | 1655 | static DEVICE_ATTR(max_tx_io_time, 0644, txtime_show, txtime_write); |
1652 | 1656 | ||
1653 | static ssize_t | 1657 | static ssize_t txpend_show (struct device *dev, struct device_attribute *attr, |
1654 | txpend_show (struct device *dev, struct device_attribute *attr, char *buf) | 1658 | char *buf) |
1655 | { | 1659 | { |
1656 | struct netiucv_priv *priv = dev->driver_data; | 1660 | struct netiucv_priv *priv = dev->driver_data; |
1657 | 1661 | ||
@@ -1659,8 +1663,8 @@ txpend_show (struct device *dev, struct device_attribute *attr, char *buf) | |||
1659 | return sprintf(buf, "%ld\n", priv->conn->prof.tx_pending); | 1663 | return sprintf(buf, "%ld\n", priv->conn->prof.tx_pending); |
1660 | } | 1664 | } |
1661 | 1665 | ||
1662 | static ssize_t | 1666 | static ssize_t txpend_write (struct device *dev, struct device_attribute *attr, |
1663 | txpend_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) | 1667 | const char *buf, size_t count) |
1664 | { | 1668 | { |
1665 | struct netiucv_priv *priv = dev->driver_data; | 1669 | struct netiucv_priv *priv = dev->driver_data; |
1666 | 1670 | ||
@@ -1671,8 +1675,8 @@ txpend_write (struct device *dev, struct device_attribute *attr, const char *buf | |||
1671 | 1675 | ||
1672 | static DEVICE_ATTR(tx_pending, 0644, txpend_show, txpend_write); | 1676 | static DEVICE_ATTR(tx_pending, 0644, txpend_show, txpend_write); |
1673 | 1677 | ||
1674 | static ssize_t | 1678 | static ssize_t txmpnd_show (struct device *dev, struct device_attribute *attr, |
1675 | txmpnd_show (struct device *dev, struct device_attribute *attr, char *buf) | 1679 | char *buf) |
1676 | { | 1680 | { |
1677 | struct netiucv_priv *priv = dev->driver_data; | 1681 | struct netiucv_priv *priv = dev->driver_data; |
1678 | 1682 | ||
@@ -1680,8 +1684,8 @@ txmpnd_show (struct device *dev, struct device_attribute *attr, char *buf) | |||
1680 | return sprintf(buf, "%ld\n", priv->conn->prof.tx_max_pending); | 1684 | return sprintf(buf, "%ld\n", priv->conn->prof.tx_max_pending); |
1681 | } | 1685 | } |
1682 | 1686 | ||
1683 | static ssize_t | 1687 | static ssize_t txmpnd_write (struct device *dev, struct device_attribute *attr, |
1684 | txmpnd_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) | 1688 | const char *buf, size_t count) |
1685 | { | 1689 | { |
1686 | struct netiucv_priv *priv = dev->driver_data; | 1690 | struct netiucv_priv *priv = dev->driver_data; |
1687 | 1691 | ||
@@ -1721,8 +1725,7 @@ static struct attribute_group netiucv_stat_attr_group = { | |||
1721 | .attrs = netiucv_stat_attrs, | 1725 | .attrs = netiucv_stat_attrs, |
1722 | }; | 1726 | }; |
1723 | 1727 | ||
1724 | static inline int | 1728 | static inline int netiucv_add_files(struct device *dev) |
1725 | netiucv_add_files(struct device *dev) | ||
1726 | { | 1729 | { |
1727 | int ret; | 1730 | int ret; |
1728 | 1731 | ||
@@ -1736,18 +1739,16 @@ netiucv_add_files(struct device *dev) | |||
1736 | return ret; | 1739 | return ret; |
1737 | } | 1740 | } |
1738 | 1741 | ||
1739 | static inline void | 1742 | static inline void netiucv_remove_files(struct device *dev) |
1740 | netiucv_remove_files(struct device *dev) | ||
1741 | { | 1743 | { |
1742 | IUCV_DBF_TEXT(trace, 3, __FUNCTION__); | 1744 | IUCV_DBF_TEXT(trace, 3, __FUNCTION__); |
1743 | sysfs_remove_group(&dev->kobj, &netiucv_stat_attr_group); | 1745 | sysfs_remove_group(&dev->kobj, &netiucv_stat_attr_group); |
1744 | sysfs_remove_group(&dev->kobj, &netiucv_attr_group); | 1746 | sysfs_remove_group(&dev->kobj, &netiucv_attr_group); |
1745 | } | 1747 | } |
1746 | 1748 | ||
1747 | static int | 1749 | static int netiucv_register_device(struct net_device *ndev) |
1748 | netiucv_register_device(struct net_device *ndev) | ||
1749 | { | 1750 | { |
1750 | struct netiucv_priv *priv = ndev->priv; | 1751 | struct netiucv_priv *priv = netdev_priv(ndev); |
1751 | struct device *dev = kzalloc(sizeof(struct device), GFP_KERNEL); | 1752 | struct device *dev = kzalloc(sizeof(struct device), GFP_KERNEL); |
1752 | int ret; | 1753 | int ret; |
1753 | 1754 | ||
@@ -1786,8 +1787,7 @@ out_unreg: | |||
1786 | return ret; | 1787 | return ret; |
1787 | } | 1788 | } |
1788 | 1789 | ||
1789 | static void | 1790 | static void netiucv_unregister_device(struct device *dev) |
1790 | netiucv_unregister_device(struct device *dev) | ||
1791 | { | 1791 | { |
1792 | IUCV_DBF_TEXT(trace, 3, __FUNCTION__); | 1792 | IUCV_DBF_TEXT(trace, 3, __FUNCTION__); |
1793 | netiucv_remove_files(dev); | 1793 | netiucv_remove_files(dev); |
@@ -1798,107 +1798,89 @@ netiucv_unregister_device(struct device *dev) | |||
1798 | * Allocate and initialize a new connection structure. | 1798 | * Allocate and initialize a new connection structure. |
1799 | * Add it to the list of netiucv connections; | 1799 | * Add it to the list of netiucv connections; |
1800 | */ | 1800 | */ |
1801 | static struct iucv_connection * | 1801 | static struct iucv_connection *netiucv_new_connection(struct net_device *dev, |
1802 | netiucv_new_connection(struct net_device *dev, char *username) | 1802 | char *username) |
1803 | { | 1803 | { |
1804 | unsigned long flags; | 1804 | struct iucv_connection *conn; |
1805 | struct iucv_connection **clist = &iucv_conns.iucv_connections; | ||
1806 | struct iucv_connection *conn = | ||
1807 | kzalloc(sizeof(struct iucv_connection), GFP_KERNEL); | ||
1808 | |||
1809 | if (conn) { | ||
1810 | skb_queue_head_init(&conn->collect_queue); | ||
1811 | skb_queue_head_init(&conn->commit_queue); | ||
1812 | spin_lock_init(&conn->collect_lock); | ||
1813 | conn->max_buffsize = NETIUCV_BUFSIZE_DEFAULT; | ||
1814 | conn->netdev = dev; | ||
1815 | |||
1816 | conn->rx_buff = alloc_skb(NETIUCV_BUFSIZE_DEFAULT, | ||
1817 | GFP_KERNEL | GFP_DMA); | ||
1818 | if (!conn->rx_buff) { | ||
1819 | kfree(conn); | ||
1820 | return NULL; | ||
1821 | } | ||
1822 | conn->tx_buff = alloc_skb(NETIUCV_BUFSIZE_DEFAULT, | ||
1823 | GFP_KERNEL | GFP_DMA); | ||
1824 | if (!conn->tx_buff) { | ||
1825 | kfree_skb(conn->rx_buff); | ||
1826 | kfree(conn); | ||
1827 | return NULL; | ||
1828 | } | ||
1829 | conn->fsm = init_fsm("netiucvconn", conn_state_names, | ||
1830 | conn_event_names, NR_CONN_STATES, | ||
1831 | NR_CONN_EVENTS, conn_fsm, CONN_FSM_LEN, | ||
1832 | GFP_KERNEL); | ||
1833 | if (!conn->fsm) { | ||
1834 | kfree_skb(conn->tx_buff); | ||
1835 | kfree_skb(conn->rx_buff); | ||
1836 | kfree(conn); | ||
1837 | return NULL; | ||
1838 | } | ||
1839 | fsm_settimer(conn->fsm, &conn->timer); | ||
1840 | fsm_newstate(conn->fsm, CONN_STATE_INVALID); | ||
1841 | |||
1842 | if (username) { | ||
1843 | memcpy(conn->userid, username, 9); | ||
1844 | fsm_newstate(conn->fsm, CONN_STATE_STOPPED); | ||
1845 | } | ||
1846 | 1805 | ||
1847 | write_lock_irqsave(&iucv_conns.iucv_rwlock, flags); | 1806 | conn = kzalloc(sizeof(*conn), GFP_KERNEL); |
1848 | conn->next = *clist; | 1807 | if (!conn) |
1849 | *clist = conn; | 1808 | goto out; |
1850 | write_unlock_irqrestore(&iucv_conns.iucv_rwlock, flags); | 1809 | skb_queue_head_init(&conn->collect_queue); |
1810 | skb_queue_head_init(&conn->commit_queue); | ||
1811 | spin_lock_init(&conn->collect_lock); | ||
1812 | conn->max_buffsize = NETIUCV_BUFSIZE_DEFAULT; | ||
1813 | conn->netdev = dev; | ||
1814 | |||
1815 | conn->rx_buff = alloc_skb(conn->max_buffsize, GFP_KERNEL | GFP_DMA); | ||
1816 | if (!conn->rx_buff) | ||
1817 | goto out_conn; | ||
1818 | conn->tx_buff = alloc_skb(conn->max_buffsize, GFP_KERNEL | GFP_DMA); | ||
1819 | if (!conn->tx_buff) | ||
1820 | goto out_rx; | ||
1821 | conn->fsm = init_fsm("netiucvconn", conn_state_names, | ||
1822 | conn_event_names, NR_CONN_STATES, | ||
1823 | NR_CONN_EVENTS, conn_fsm, CONN_FSM_LEN, | ||
1824 | GFP_KERNEL); | ||
1825 | if (!conn->fsm) | ||
1826 | goto out_tx; | ||
1827 | |||
1828 | fsm_settimer(conn->fsm, &conn->timer); | ||
1829 | fsm_newstate(conn->fsm, CONN_STATE_INVALID); | ||
1830 | |||
1831 | if (username) { | ||
1832 | memcpy(conn->userid, username, 9); | ||
1833 | fsm_newstate(conn->fsm, CONN_STATE_STOPPED); | ||
1851 | } | 1834 | } |
1835 | |||
1836 | write_lock_bh(&iucv_connection_rwlock); | ||
1837 | list_add_tail(&conn->list, &iucv_connection_list); | ||
1838 | write_unlock_bh(&iucv_connection_rwlock); | ||
1852 | return conn; | 1839 | return conn; |
1840 | |||
1841 | out_tx: | ||
1842 | kfree_skb(conn->tx_buff); | ||
1843 | out_rx: | ||
1844 | kfree_skb(conn->rx_buff); | ||
1845 | out_conn: | ||
1846 | kfree(conn); | ||
1847 | out: | ||
1848 | return NULL; | ||
1853 | } | 1849 | } |
1854 | 1850 | ||
1855 | /** | 1851 | /** |
1856 | * Release a connection structure and remove it from the | 1852 | * Release a connection structure and remove it from the |
1857 | * list of netiucv connections. | 1853 | * list of netiucv connections. |
1858 | */ | 1854 | */ |
1859 | static void | 1855 | static void netiucv_remove_connection(struct iucv_connection *conn) |
1860 | netiucv_remove_connection(struct iucv_connection *conn) | ||
1861 | { | 1856 | { |
1862 | struct iucv_connection **clist = &iucv_conns.iucv_connections; | ||
1863 | unsigned long flags; | ||
1864 | |||
1865 | IUCV_DBF_TEXT(trace, 3, __FUNCTION__); | 1857 | IUCV_DBF_TEXT(trace, 3, __FUNCTION__); |
1866 | if (conn == NULL) | 1858 | write_lock_bh(&iucv_connection_rwlock); |
1867 | return; | 1859 | list_del_init(&conn->list); |
1868 | write_lock_irqsave(&iucv_conns.iucv_rwlock, flags); | 1860 | write_unlock_bh(&iucv_connection_rwlock); |
1869 | while (*clist) { | 1861 | if (conn->path) { |
1870 | if (*clist == conn) { | 1862 | iucv_path_sever(conn->path, iucvMagic); |
1871 | *clist = conn->next; | 1863 | kfree(conn->path); |
1872 | write_unlock_irqrestore(&iucv_conns.iucv_rwlock, flags); | 1864 | conn->path = NULL; |
1873 | if (conn->handle) { | ||
1874 | iucv_unregister_program(conn->handle); | ||
1875 | conn->handle = NULL; | ||
1876 | } | ||
1877 | fsm_deltimer(&conn->timer); | ||
1878 | kfree_fsm(conn->fsm); | ||
1879 | kfree_skb(conn->rx_buff); | ||
1880 | kfree_skb(conn->tx_buff); | ||
1881 | return; | ||
1882 | } | ||
1883 | clist = &((*clist)->next); | ||
1884 | } | 1865 | } |
1885 | write_unlock_irqrestore(&iucv_conns.iucv_rwlock, flags); | 1866 | fsm_deltimer(&conn->timer); |
1867 | kfree_fsm(conn->fsm); | ||
1868 | kfree_skb(conn->rx_buff); | ||
1869 | kfree_skb(conn->tx_buff); | ||
1886 | } | 1870 | } |
1887 | 1871 | ||
1888 | /** | 1872 | /** |
1889 | * Release everything of a net device. | 1873 | * Release everything of a net device. |
1890 | */ | 1874 | */ |
1891 | static void | 1875 | static void netiucv_free_netdevice(struct net_device *dev) |
1892 | netiucv_free_netdevice(struct net_device *dev) | ||
1893 | { | 1876 | { |
1894 | struct netiucv_priv *privptr; | 1877 | struct netiucv_priv *privptr = netdev_priv(dev); |
1895 | 1878 | ||
1896 | IUCV_DBF_TEXT(trace, 3, __FUNCTION__); | 1879 | IUCV_DBF_TEXT(trace, 3, __FUNCTION__); |
1897 | 1880 | ||
1898 | if (!dev) | 1881 | if (!dev) |
1899 | return; | 1882 | return; |
1900 | 1883 | ||
1901 | privptr = (struct netiucv_priv *)dev->priv; | ||
1902 | if (privptr) { | 1884 | if (privptr) { |
1903 | if (privptr->conn) | 1885 | if (privptr->conn) |
1904 | netiucv_remove_connection(privptr->conn); | 1886 | netiucv_remove_connection(privptr->conn); |
@@ -1913,11 +1895,8 @@ netiucv_free_netdevice(struct net_device *dev) | |||
1913 | /** | 1895 | /** |
1914 | * Initialize a net device. (Called from kernel in alloc_netdev()) | 1896 | * Initialize a net device. (Called from kernel in alloc_netdev()) |
1915 | */ | 1897 | */ |
1916 | static void | 1898 | static void netiucv_setup_netdevice(struct net_device *dev) |
1917 | netiucv_setup_netdevice(struct net_device *dev) | ||
1918 | { | 1899 | { |
1919 | memset(dev->priv, 0, sizeof(struct netiucv_priv)); | ||
1920 | |||
1921 | dev->mtu = NETIUCV_MTU_DEFAULT; | 1900 | dev->mtu = NETIUCV_MTU_DEFAULT; |
1922 | dev->hard_start_xmit = netiucv_tx; | 1901 | dev->hard_start_xmit = netiucv_tx; |
1923 | dev->open = netiucv_open; | 1902 | dev->open = netiucv_open; |
@@ -1936,8 +1915,7 @@ netiucv_setup_netdevice(struct net_device *dev) | |||
1936 | /** | 1915 | /** |
1937 | * Allocate and initialize everything of a net device. | 1916 | * Allocate and initialize everything of a net device. |
1938 | */ | 1917 | */ |
1939 | static struct net_device * | 1918 | static struct net_device *netiucv_init_netdevice(char *username) |
1940 | netiucv_init_netdevice(char *username) | ||
1941 | { | 1919 | { |
1942 | struct netiucv_priv *privptr; | 1920 | struct netiucv_priv *privptr; |
1943 | struct net_device *dev; | 1921 | struct net_device *dev; |
@@ -1946,40 +1924,40 @@ netiucv_init_netdevice(char *username) | |||
1946 | netiucv_setup_netdevice); | 1924 | netiucv_setup_netdevice); |
1947 | if (!dev) | 1925 | if (!dev) |
1948 | return NULL; | 1926 | return NULL; |
1949 | if (dev_alloc_name(dev, dev->name) < 0) { | 1927 | if (dev_alloc_name(dev, dev->name) < 0) |
1950 | free_netdev(dev); | 1928 | goto out_netdev; |
1951 | return NULL; | ||
1952 | } | ||
1953 | 1929 | ||
1954 | privptr = (struct netiucv_priv *)dev->priv; | 1930 | privptr = netdev_priv(dev); |
1955 | privptr->fsm = init_fsm("netiucvdev", dev_state_names, | 1931 | privptr->fsm = init_fsm("netiucvdev", dev_state_names, |
1956 | dev_event_names, NR_DEV_STATES, NR_DEV_EVENTS, | 1932 | dev_event_names, NR_DEV_STATES, NR_DEV_EVENTS, |
1957 | dev_fsm, DEV_FSM_LEN, GFP_KERNEL); | 1933 | dev_fsm, DEV_FSM_LEN, GFP_KERNEL); |
1958 | if (!privptr->fsm) { | 1934 | if (!privptr->fsm) |
1959 | free_netdev(dev); | 1935 | goto out_netdev; |
1960 | return NULL; | 1936 | |
1961 | } | ||
1962 | privptr->conn = netiucv_new_connection(dev, username); | 1937 | privptr->conn = netiucv_new_connection(dev, username); |
1963 | if (!privptr->conn) { | 1938 | if (!privptr->conn) { |
1964 | kfree_fsm(privptr->fsm); | ||
1965 | free_netdev(dev); | ||
1966 | IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_new_connection\n"); | 1939 | IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_new_connection\n"); |
1967 | return NULL; | 1940 | goto out_fsm; |
1968 | } | 1941 | } |
1969 | fsm_newstate(privptr->fsm, DEV_STATE_STOPPED); | 1942 | fsm_newstate(privptr->fsm, DEV_STATE_STOPPED); |
1970 | |||
1971 | return dev; | 1943 | return dev; |
1944 | |||
1945 | out_fsm: | ||
1946 | kfree_fsm(privptr->fsm); | ||
1947 | out_netdev: | ||
1948 | free_netdev(dev); | ||
1949 | return NULL; | ||
1972 | } | 1950 | } |
1973 | 1951 | ||
1974 | static ssize_t | 1952 | static ssize_t conn_write(struct device_driver *drv, |
1975 | conn_write(struct device_driver *drv, const char *buf, size_t count) | 1953 | const char *buf, size_t count) |
1976 | { | 1954 | { |
1977 | char *p; | 1955 | const char *p; |
1978 | char username[9]; | 1956 | char username[9]; |
1979 | int i, ret; | 1957 | int i, rc; |
1980 | struct net_device *dev; | 1958 | struct net_device *dev; |
1981 | struct iucv_connection **clist = &iucv_conns.iucv_connections; | 1959 | struct netiucv_priv *priv; |
1982 | unsigned long flags; | 1960 | struct iucv_connection *cp; |
1983 | 1961 | ||
1984 | IUCV_DBF_TEXT(trace, 3, __FUNCTION__); | 1962 | IUCV_DBF_TEXT(trace, 3, __FUNCTION__); |
1985 | if (count>9) { | 1963 | if (count>9) { |
@@ -1988,83 +1966,82 @@ conn_write(struct device_driver *drv, const char *buf, size_t count) | |||
1988 | return -EINVAL; | 1966 | return -EINVAL; |
1989 | } | 1967 | } |
1990 | 1968 | ||
1991 | for (i=0, p=(char *)buf; i<8 && *p; i++, p++) { | 1969 | for (i = 0, p = buf; i < 8 && *p; i++, p++) { |
1992 | if (isalnum(*p) || (*p == '$')) | 1970 | if (isalnum(*p) || *p == '$') { |
1993 | username[i]= toupper(*p); | 1971 | username[i] = toupper(*p); |
1994 | else if (*p == '\n') { | 1972 | continue; |
1973 | } | ||
1974 | if (*p == '\n') | ||
1995 | /* trailing lf, grr */ | 1975 | /* trailing lf, grr */ |
1996 | break; | 1976 | break; |
1997 | } else { | 1977 | PRINT_WARN("netiucv: Invalid character in username!\n"); |
1998 | PRINT_WARN("netiucv: Invalid character in username!\n"); | 1978 | IUCV_DBF_TEXT_(setup, 2, |
1999 | IUCV_DBF_TEXT_(setup, 2, | 1979 | "conn_write: invalid character %c\n", *p); |
2000 | "conn_write: invalid character %c\n", *p); | 1980 | return -EINVAL; |
2001 | return -EINVAL; | ||
2002 | } | ||
2003 | } | 1981 | } |
2004 | while (i<8) | 1982 | while (i < 8) |
2005 | username[i++] = ' '; | 1983 | username[i++] = ' '; |
2006 | username[8] = '\0'; | 1984 | username[8] = '\0'; |
2007 | 1985 | ||
2008 | read_lock_irqsave(&iucv_conns.iucv_rwlock, flags); | 1986 | read_lock_bh(&iucv_connection_rwlock); |
2009 | while (*clist) { | 1987 | list_for_each_entry(cp, &iucv_connection_list, list) { |
2010 | if (!strncmp(username, (*clist)->userid, 9)) | 1988 | if (!strncmp(username, cp->userid, 9)) { |
2011 | break; | 1989 | read_unlock_bh(&iucv_connection_rwlock); |
2012 | clist = &((*clist)->next); | 1990 | PRINT_WARN("netiucv: Connection to %s already " |
2013 | } | 1991 | "exists\n", username); |
2014 | read_unlock_irqrestore(&iucv_conns.iucv_rwlock, flags); | 1992 | return -EEXIST; |
2015 | if (*clist) { | 1993 | } |
2016 | PRINT_WARN("netiucv: Connection to %s already exists\n", | ||
2017 | username); | ||
2018 | return -EEXIST; | ||
2019 | } | 1994 | } |
1995 | read_unlock_bh(&iucv_connection_rwlock); | ||
1996 | |||
2020 | dev = netiucv_init_netdevice(username); | 1997 | dev = netiucv_init_netdevice(username); |
2021 | if (!dev) { | 1998 | if (!dev) { |
2022 | PRINT_WARN( | 1999 | PRINT_WARN("netiucv: Could not allocate network device " |
2023 | "netiucv: Could not allocate network device structure " | 2000 | "structure for user '%s'\n", |
2024 | "for user '%s'\n", netiucv_printname(username)); | 2001 | netiucv_printname(username)); |
2025 | IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_init_netdevice\n"); | 2002 | IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_init_netdevice\n"); |
2026 | return -ENODEV; | 2003 | return -ENODEV; |
2027 | } | 2004 | } |
2028 | 2005 | ||
2029 | if ((ret = netiucv_register_device(dev))) { | 2006 | rc = netiucv_register_device(dev); |
2007 | if (rc) { | ||
2030 | IUCV_DBF_TEXT_(setup, 2, | 2008 | IUCV_DBF_TEXT_(setup, 2, |
2031 | "ret %d from netiucv_register_device\n", ret); | 2009 | "ret %d from netiucv_register_device\n", rc); |
2032 | goto out_free_ndev; | 2010 | goto out_free_ndev; |
2033 | } | 2011 | } |
2034 | 2012 | ||
2035 | /* sysfs magic */ | 2013 | /* sysfs magic */ |
2036 | SET_NETDEV_DEV(dev, | 2014 | priv = netdev_priv(dev); |
2037 | (struct device*)((struct netiucv_priv*)dev->priv)->dev); | 2015 | SET_NETDEV_DEV(dev, priv->dev); |
2038 | 2016 | ||
2039 | if ((ret = register_netdev(dev))) { | 2017 | rc = register_netdev(dev); |
2040 | netiucv_unregister_device((struct device*) | 2018 | if (rc) |
2041 | ((struct netiucv_priv*)dev->priv)->dev); | 2019 | goto out_unreg; |
2042 | goto out_free_ndev; | ||
2043 | } | ||
2044 | 2020 | ||
2045 | PRINT_INFO("%s: '%s'\n", dev->name, netiucv_printname(username)); | 2021 | PRINT_INFO("%s: '%s'\n", dev->name, netiucv_printname(username)); |
2046 | 2022 | ||
2047 | return count; | 2023 | return count; |
2048 | 2024 | ||
2025 | out_unreg: | ||
2026 | netiucv_unregister_device(priv->dev); | ||
2049 | out_free_ndev: | 2027 | out_free_ndev: |
2050 | PRINT_WARN("netiucv: Could not register '%s'\n", dev->name); | 2028 | PRINT_WARN("netiucv: Could not register '%s'\n", dev->name); |
2051 | IUCV_DBF_TEXT(setup, 2, "conn_write: could not register\n"); | 2029 | IUCV_DBF_TEXT(setup, 2, "conn_write: could not register\n"); |
2052 | netiucv_free_netdevice(dev); | 2030 | netiucv_free_netdevice(dev); |
2053 | return ret; | 2031 | return rc; |
2054 | } | 2032 | } |
2055 | 2033 | ||
2056 | DRIVER_ATTR(connection, 0200, NULL, conn_write); | 2034 | static DRIVER_ATTR(connection, 0200, NULL, conn_write); |
2057 | 2035 | ||
2058 | static ssize_t | 2036 | static ssize_t remove_write (struct device_driver *drv, |
2059 | remove_write (struct device_driver *drv, const char *buf, size_t count) | 2037 | const char *buf, size_t count) |
2060 | { | 2038 | { |
2061 | struct iucv_connection **clist = &iucv_conns.iucv_connections; | 2039 | struct iucv_connection *cp; |
2062 | unsigned long flags; | ||
2063 | struct net_device *ndev; | 2040 | struct net_device *ndev; |
2064 | struct netiucv_priv *priv; | 2041 | struct netiucv_priv *priv; |
2065 | struct device *dev; | 2042 | struct device *dev; |
2066 | char name[IFNAMSIZ]; | 2043 | char name[IFNAMSIZ]; |
2067 | char *p; | 2044 | const char *p; |
2068 | int i; | 2045 | int i; |
2069 | 2046 | ||
2070 | IUCV_DBF_TEXT(trace, 3, __FUNCTION__); | 2047 | IUCV_DBF_TEXT(trace, 3, __FUNCTION__); |
@@ -2072,33 +2049,27 @@ remove_write (struct device_driver *drv, const char *buf, size_t count) | |||
2072 | if (count >= IFNAMSIZ) | 2049 | if (count >= IFNAMSIZ) |
2073 | count = IFNAMSIZ - 1;; | 2050 | count = IFNAMSIZ - 1;; |
2074 | 2051 | ||
2075 | for (i=0, p=(char *)buf; i<count && *p; i++, p++) { | 2052 | for (i = 0, p = buf; i < count && *p; i++, p++) { |
2076 | if ((*p == '\n') || (*p == ' ')) { | 2053 | if (*p == '\n' || *p == ' ') |
2077 | /* trailing lf, grr */ | 2054 | /* trailing lf, grr */ |
2078 | break; | 2055 | break; |
2079 | } else { | 2056 | name[i] = *p; |
2080 | name[i]=*p; | ||
2081 | } | ||
2082 | } | 2057 | } |
2083 | name[i] = '\0'; | 2058 | name[i] = '\0'; |
2084 | 2059 | ||
2085 | read_lock_irqsave(&iucv_conns.iucv_rwlock, flags); | 2060 | read_lock_bh(&iucv_connection_rwlock); |
2086 | while (*clist) { | 2061 | list_for_each_entry(cp, &iucv_connection_list, list) { |
2087 | ndev = (*clist)->netdev; | 2062 | ndev = cp->netdev; |
2088 | priv = (struct netiucv_priv*)ndev->priv; | 2063 | priv = netdev_priv(ndev); |
2089 | dev = priv->dev; | 2064 | dev = priv->dev; |
2090 | 2065 | if (strncmp(name, ndev->name, count)) | |
2091 | if (strncmp(name, ndev->name, count)) { | 2066 | continue; |
2092 | clist = &((*clist)->next); | 2067 | read_unlock_bh(&iucv_connection_rwlock); |
2093 | continue; | ||
2094 | } | ||
2095 | read_unlock_irqrestore(&iucv_conns.iucv_rwlock, flags); | ||
2096 | if (ndev->flags & (IFF_UP | IFF_RUNNING)) { | 2068 | if (ndev->flags & (IFF_UP | IFF_RUNNING)) { |
2097 | PRINT_WARN( | 2069 | PRINT_WARN("netiucv: net device %s active with peer " |
2098 | "netiucv: net device %s active with peer %s\n", | 2070 | "%s\n", ndev->name, priv->conn->userid); |
2099 | ndev->name, priv->conn->userid); | ||
2100 | PRINT_WARN("netiucv: %s cannot be removed\n", | 2071 | PRINT_WARN("netiucv: %s cannot be removed\n", |
2101 | ndev->name); | 2072 | ndev->name); |
2102 | IUCV_DBF_TEXT(data, 2, "remove_write: still active\n"); | 2073 | IUCV_DBF_TEXT(data, 2, "remove_write: still active\n"); |
2103 | return -EBUSY; | 2074 | return -EBUSY; |
2104 | } | 2075 | } |
@@ -2106,75 +2077,94 @@ remove_write (struct device_driver *drv, const char *buf, size_t count) | |||
2106 | netiucv_unregister_device(dev); | 2077 | netiucv_unregister_device(dev); |
2107 | return count; | 2078 | return count; |
2108 | } | 2079 | } |
2109 | read_unlock_irqrestore(&iucv_conns.iucv_rwlock, flags); | 2080 | read_unlock_bh(&iucv_connection_rwlock); |
2110 | PRINT_WARN("netiucv: net device %s unknown\n", name); | 2081 | PRINT_WARN("netiucv: net device %s unknown\n", name); |
2111 | IUCV_DBF_TEXT(data, 2, "remove_write: unknown device\n"); | 2082 | IUCV_DBF_TEXT(data, 2, "remove_write: unknown device\n"); |
2112 | return -EINVAL; | 2083 | return -EINVAL; |
2113 | } | 2084 | } |
2114 | 2085 | ||
2115 | DRIVER_ATTR(remove, 0200, NULL, remove_write); | 2086 | static DRIVER_ATTR(remove, 0200, NULL, remove_write); |
2116 | 2087 | ||
2117 | static void | 2088 | static struct attribute * netiucv_drv_attrs[] = { |
2118 | netiucv_banner(void) | 2089 | &driver_attr_connection.attr, |
2090 | &driver_attr_remove.attr, | ||
2091 | NULL, | ||
2092 | }; | ||
2093 | |||
2094 | static struct attribute_group netiucv_drv_attr_group = { | ||
2095 | .attrs = netiucv_drv_attrs, | ||
2096 | }; | ||
2097 | |||
2098 | static void netiucv_banner(void) | ||
2119 | { | 2099 | { |
2120 | PRINT_INFO("NETIUCV driver initialized\n"); | 2100 | PRINT_INFO("NETIUCV driver initialized\n"); |
2121 | } | 2101 | } |
2122 | 2102 | ||
2123 | static void __exit | 2103 | static void __exit netiucv_exit(void) |
2124 | netiucv_exit(void) | ||
2125 | { | 2104 | { |
2105 | struct iucv_connection *cp; | ||
2106 | struct net_device *ndev; | ||
2107 | struct netiucv_priv *priv; | ||
2108 | struct device *dev; | ||
2109 | |||
2126 | IUCV_DBF_TEXT(trace, 3, __FUNCTION__); | 2110 | IUCV_DBF_TEXT(trace, 3, __FUNCTION__); |
2127 | while (iucv_conns.iucv_connections) { | 2111 | while (!list_empty(&iucv_connection_list)) { |
2128 | struct net_device *ndev = iucv_conns.iucv_connections->netdev; | 2112 | cp = list_entry(iucv_connection_list.next, |
2129 | struct netiucv_priv *priv = (struct netiucv_priv*)ndev->priv; | 2113 | struct iucv_connection, list); |
2130 | struct device *dev = priv->dev; | 2114 | list_del(&cp->list); |
2115 | ndev = cp->netdev; | ||
2116 | priv = netdev_priv(ndev); | ||
2117 | dev = priv->dev; | ||
2131 | 2118 | ||
2132 | unregister_netdev(ndev); | 2119 | unregister_netdev(ndev); |
2133 | netiucv_unregister_device(dev); | 2120 | netiucv_unregister_device(dev); |
2134 | } | 2121 | } |
2135 | 2122 | ||
2136 | driver_remove_file(&netiucv_driver, &driver_attr_connection); | 2123 | sysfs_remove_group(&netiucv_driver.kobj, &netiucv_drv_attr_group); |
2137 | driver_remove_file(&netiucv_driver, &driver_attr_remove); | ||
2138 | driver_unregister(&netiucv_driver); | 2124 | driver_unregister(&netiucv_driver); |
2125 | iucv_unregister(&netiucv_handler, 1); | ||
2139 | iucv_unregister_dbf_views(); | 2126 | iucv_unregister_dbf_views(); |
2140 | 2127 | ||
2141 | PRINT_INFO("NETIUCV driver unloaded\n"); | 2128 | PRINT_INFO("NETIUCV driver unloaded\n"); |
2142 | return; | 2129 | return; |
2143 | } | 2130 | } |
2144 | 2131 | ||
2145 | static int __init | 2132 | static int __init netiucv_init(void) |
2146 | netiucv_init(void) | ||
2147 | { | 2133 | { |
2148 | int ret; | 2134 | int rc; |
2149 | 2135 | ||
2150 | ret = iucv_register_dbf_views(); | 2136 | rc = iucv_register_dbf_views(); |
2151 | if (ret) { | 2137 | if (rc) |
2152 | PRINT_WARN("netiucv_init failed, " | 2138 | goto out; |
2153 | "iucv_register_dbf_views rc = %d\n", ret); | 2139 | rc = iucv_register(&netiucv_handler, 1); |
2154 | return ret; | 2140 | if (rc) |
2155 | } | 2141 | goto out_dbf; |
2156 | IUCV_DBF_TEXT(trace, 3, __FUNCTION__); | 2142 | IUCV_DBF_TEXT(trace, 3, __FUNCTION__); |
2157 | ret = driver_register(&netiucv_driver); | 2143 | rc = driver_register(&netiucv_driver); |
2158 | if (ret) { | 2144 | if (rc) { |
2159 | PRINT_ERR("NETIUCV: failed to register driver.\n"); | 2145 | PRINT_ERR("NETIUCV: failed to register driver.\n"); |
2160 | IUCV_DBF_TEXT_(setup, 2, "ret %d from driver_register\n", ret); | 2146 | IUCV_DBF_TEXT_(setup, 2, "ret %d from driver_register\n", rc); |
2161 | iucv_unregister_dbf_views(); | 2147 | goto out_iucv; |
2162 | return ret; | ||
2163 | } | 2148 | } |
2164 | 2149 | ||
2165 | /* Add entry for specifying connections. */ | 2150 | rc = sysfs_create_group(&netiucv_driver.kobj, &netiucv_drv_attr_group); |
2166 | ret = driver_create_file(&netiucv_driver, &driver_attr_connection); | 2151 | if (rc) { |
2167 | if (!ret) { | 2152 | PRINT_ERR("NETIUCV: failed to add driver attributes.\n"); |
2168 | ret = driver_create_file(&netiucv_driver, &driver_attr_remove); | 2153 | IUCV_DBF_TEXT_(setup, 2, |
2169 | netiucv_banner(); | 2154 | "ret %d - netiucv_drv_attr_group\n", rc); |
2170 | rwlock_init(&iucv_conns.iucv_rwlock); | 2155 | goto out_driver; |
2171 | } else { | ||
2172 | PRINT_ERR("NETIUCV: failed to add driver attribute.\n"); | ||
2173 | IUCV_DBF_TEXT_(setup, 2, "ret %d from driver_create_file\n", ret); | ||
2174 | driver_unregister(&netiucv_driver); | ||
2175 | iucv_unregister_dbf_views(); | ||
2176 | } | 2156 | } |
2177 | return ret; | 2157 | netiucv_banner(); |
2158 | return rc; | ||
2159 | |||
2160 | out_driver: | ||
2161 | driver_unregister(&netiucv_driver); | ||
2162 | out_iucv: | ||
2163 | iucv_unregister(&netiucv_handler, 1); | ||
2164 | out_dbf: | ||
2165 | iucv_unregister_dbf_views(); | ||
2166 | out: | ||
2167 | return rc; | ||
2178 | } | 2168 | } |
2179 | 2169 | ||
2180 | module_init(netiucv_init); | 2170 | module_init(netiucv_init); |
diff --git a/drivers/s390/net/qeth_eddp.c b/drivers/s390/net/qeth_eddp.c index 6bb558a9a032..7c735e1fe063 100644 --- a/drivers/s390/net/qeth_eddp.c +++ b/drivers/s390/net/qeth_eddp.c | |||
@@ -49,7 +49,7 @@ qeth_eddp_check_buffers_for_context(struct qeth_qdio_out_q *queue, | |||
49 | return buffers_needed; | 49 | return buffers_needed; |
50 | } | 50 | } |
51 | 51 | ||
52 | static inline void | 52 | static void |
53 | qeth_eddp_free_context(struct qeth_eddp_context *ctx) | 53 | qeth_eddp_free_context(struct qeth_eddp_context *ctx) |
54 | { | 54 | { |
55 | int i; | 55 | int i; |
@@ -91,7 +91,7 @@ qeth_eddp_buf_release_contexts(struct qeth_qdio_out_buffer *buf) | |||
91 | } | 91 | } |
92 | } | 92 | } |
93 | 93 | ||
94 | static inline int | 94 | static int |
95 | qeth_eddp_buf_ref_context(struct qeth_qdio_out_buffer *buf, | 95 | qeth_eddp_buf_ref_context(struct qeth_qdio_out_buffer *buf, |
96 | struct qeth_eddp_context *ctx) | 96 | struct qeth_eddp_context *ctx) |
97 | { | 97 | { |
@@ -196,7 +196,7 @@ out: | |||
196 | return flush_cnt; | 196 | return flush_cnt; |
197 | } | 197 | } |
198 | 198 | ||
199 | static inline void | 199 | static void |
200 | qeth_eddp_create_segment_hdrs(struct qeth_eddp_context *ctx, | 200 | qeth_eddp_create_segment_hdrs(struct qeth_eddp_context *ctx, |
201 | struct qeth_eddp_data *eddp, int data_len) | 201 | struct qeth_eddp_data *eddp, int data_len) |
202 | { | 202 | { |
@@ -256,7 +256,7 @@ qeth_eddp_create_segment_hdrs(struct qeth_eddp_context *ctx, | |||
256 | ctx->offset += eddp->thl; | 256 | ctx->offset += eddp->thl; |
257 | } | 257 | } |
258 | 258 | ||
259 | static inline void | 259 | static void |
260 | qeth_eddp_copy_data_tcp(char *dst, struct qeth_eddp_data *eddp, int len, | 260 | qeth_eddp_copy_data_tcp(char *dst, struct qeth_eddp_data *eddp, int len, |
261 | __wsum *hcsum) | 261 | __wsum *hcsum) |
262 | { | 262 | { |
@@ -302,7 +302,7 @@ qeth_eddp_copy_data_tcp(char *dst, struct qeth_eddp_data *eddp, int len, | |||
302 | } | 302 | } |
303 | } | 303 | } |
304 | 304 | ||
305 | static inline void | 305 | static void |
306 | qeth_eddp_create_segment_data_tcp(struct qeth_eddp_context *ctx, | 306 | qeth_eddp_create_segment_data_tcp(struct qeth_eddp_context *ctx, |
307 | struct qeth_eddp_data *eddp, int data_len, | 307 | struct qeth_eddp_data *eddp, int data_len, |
308 | __wsum hcsum) | 308 | __wsum hcsum) |
@@ -349,7 +349,7 @@ qeth_eddp_create_segment_data_tcp(struct qeth_eddp_context *ctx, | |||
349 | ((struct tcphdr *)eddp->th_in_ctx)->check = csum_fold(hcsum); | 349 | ((struct tcphdr *)eddp->th_in_ctx)->check = csum_fold(hcsum); |
350 | } | 350 | } |
351 | 351 | ||
352 | static inline __wsum | 352 | static __wsum |
353 | qeth_eddp_check_tcp4_hdr(struct qeth_eddp_data *eddp, int data_len) | 353 | qeth_eddp_check_tcp4_hdr(struct qeth_eddp_data *eddp, int data_len) |
354 | { | 354 | { |
355 | __wsum phcsum; /* pseudo header checksum */ | 355 | __wsum phcsum; /* pseudo header checksum */ |
@@ -363,7 +363,7 @@ qeth_eddp_check_tcp4_hdr(struct qeth_eddp_data *eddp, int data_len) | |||
363 | return csum_partial((u8 *)&eddp->th, eddp->thl, phcsum); | 363 | return csum_partial((u8 *)&eddp->th, eddp->thl, phcsum); |
364 | } | 364 | } |
365 | 365 | ||
366 | static inline __wsum | 366 | static __wsum |
367 | qeth_eddp_check_tcp6_hdr(struct qeth_eddp_data *eddp, int data_len) | 367 | qeth_eddp_check_tcp6_hdr(struct qeth_eddp_data *eddp, int data_len) |
368 | { | 368 | { |
369 | __be32 proto; | 369 | __be32 proto; |
@@ -381,7 +381,7 @@ qeth_eddp_check_tcp6_hdr(struct qeth_eddp_data *eddp, int data_len) | |||
381 | return phcsum; | 381 | return phcsum; |
382 | } | 382 | } |
383 | 383 | ||
384 | static inline struct qeth_eddp_data * | 384 | static struct qeth_eddp_data * |
385 | qeth_eddp_create_eddp_data(struct qeth_hdr *qh, u8 *nh, u8 nhl, u8 *th, u8 thl) | 385 | qeth_eddp_create_eddp_data(struct qeth_hdr *qh, u8 *nh, u8 nhl, u8 *th, u8 thl) |
386 | { | 386 | { |
387 | struct qeth_eddp_data *eddp; | 387 | struct qeth_eddp_data *eddp; |
@@ -399,7 +399,7 @@ qeth_eddp_create_eddp_data(struct qeth_hdr *qh, u8 *nh, u8 nhl, u8 *th, u8 thl) | |||
399 | return eddp; | 399 | return eddp; |
400 | } | 400 | } |
401 | 401 | ||
402 | static inline void | 402 | static void |
403 | __qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx, | 403 | __qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx, |
404 | struct qeth_eddp_data *eddp) | 404 | struct qeth_eddp_data *eddp) |
405 | { | 405 | { |
@@ -464,7 +464,7 @@ __qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx, | |||
464 | } | 464 | } |
465 | } | 465 | } |
466 | 466 | ||
467 | static inline int | 467 | static int |
468 | qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx, | 468 | qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx, |
469 | struct sk_buff *skb, struct qeth_hdr *qhdr) | 469 | struct sk_buff *skb, struct qeth_hdr *qhdr) |
470 | { | 470 | { |
@@ -505,7 +505,7 @@ qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx, | |||
505 | return 0; | 505 | return 0; |
506 | } | 506 | } |
507 | 507 | ||
508 | static inline void | 508 | static void |
509 | qeth_eddp_calc_num_pages(struct qeth_eddp_context *ctx, struct sk_buff *skb, | 509 | qeth_eddp_calc_num_pages(struct qeth_eddp_context *ctx, struct sk_buff *skb, |
510 | int hdr_len) | 510 | int hdr_len) |
511 | { | 511 | { |
@@ -529,7 +529,7 @@ qeth_eddp_calc_num_pages(struct qeth_eddp_context *ctx, struct sk_buff *skb, | |||
529 | (skb_shinfo(skb)->gso_segs + 1); | 529 | (skb_shinfo(skb)->gso_segs + 1); |
530 | } | 530 | } |
531 | 531 | ||
532 | static inline struct qeth_eddp_context * | 532 | static struct qeth_eddp_context * |
533 | qeth_eddp_create_context_generic(struct qeth_card *card, struct sk_buff *skb, | 533 | qeth_eddp_create_context_generic(struct qeth_card *card, struct sk_buff *skb, |
534 | int hdr_len) | 534 | int hdr_len) |
535 | { | 535 | { |
@@ -581,7 +581,7 @@ qeth_eddp_create_context_generic(struct qeth_card *card, struct sk_buff *skb, | |||
581 | return ctx; | 581 | return ctx; |
582 | } | 582 | } |
583 | 583 | ||
584 | static inline struct qeth_eddp_context * | 584 | static struct qeth_eddp_context * |
585 | qeth_eddp_create_context_tcp(struct qeth_card *card, struct sk_buff *skb, | 585 | qeth_eddp_create_context_tcp(struct qeth_card *card, struct sk_buff *skb, |
586 | struct qeth_hdr *qhdr) | 586 | struct qeth_hdr *qhdr) |
587 | { | 587 | { |
@@ -625,5 +625,3 @@ qeth_eddp_create_context(struct qeth_card *card, struct sk_buff *skb, | |||
625 | } | 625 | } |
626 | return NULL; | 626 | return NULL; |
627 | } | 627 | } |
628 | |||
629 | |||
diff --git a/drivers/s390/net/qeth_main.c b/drivers/s390/net/qeth_main.c index d2efa5ff125d..2257e45594b3 100644 --- a/drivers/s390/net/qeth_main.c +++ b/drivers/s390/net/qeth_main.c | |||
@@ -651,7 +651,7 @@ __qeth_ref_ip_on_card(struct qeth_card *card, struct qeth_ipaddr *todo, | |||
651 | return 0; | 651 | return 0; |
652 | } | 652 | } |
653 | 653 | ||
654 | static inline int | 654 | static int |
655 | __qeth_address_exists_in_list(struct list_head *list, struct qeth_ipaddr *addr, | 655 | __qeth_address_exists_in_list(struct list_head *list, struct qeth_ipaddr *addr, |
656 | int same_type) | 656 | int same_type) |
657 | { | 657 | { |
@@ -795,7 +795,7 @@ qeth_add_ip(struct qeth_card *card, struct qeth_ipaddr *addr) | |||
795 | return rc; | 795 | return rc; |
796 | } | 796 | } |
797 | 797 | ||
798 | static inline void | 798 | static void |
799 | __qeth_delete_all_mc(struct qeth_card *card, unsigned long *flags) | 799 | __qeth_delete_all_mc(struct qeth_card *card, unsigned long *flags) |
800 | { | 800 | { |
801 | struct qeth_ipaddr *addr, *tmp; | 801 | struct qeth_ipaddr *addr, *tmp; |
@@ -882,7 +882,7 @@ static void qeth_layer2_add_multicast(struct qeth_card *); | |||
882 | static void qeth_add_multicast_ipv6(struct qeth_card *); | 882 | static void qeth_add_multicast_ipv6(struct qeth_card *); |
883 | #endif | 883 | #endif |
884 | 884 | ||
885 | static inline int | 885 | static int |
886 | qeth_set_thread_start_bit(struct qeth_card *card, unsigned long thread) | 886 | qeth_set_thread_start_bit(struct qeth_card *card, unsigned long thread) |
887 | { | 887 | { |
888 | unsigned long flags; | 888 | unsigned long flags; |
@@ -920,7 +920,7 @@ qeth_clear_thread_running_bit(struct qeth_card *card, unsigned long thread) | |||
920 | wake_up(&card->wait_q); | 920 | wake_up(&card->wait_q); |
921 | } | 921 | } |
922 | 922 | ||
923 | static inline int | 923 | static int |
924 | __qeth_do_run_thread(struct qeth_card *card, unsigned long thread) | 924 | __qeth_do_run_thread(struct qeth_card *card, unsigned long thread) |
925 | { | 925 | { |
926 | unsigned long flags; | 926 | unsigned long flags; |
@@ -1764,9 +1764,9 @@ out: | |||
1764 | qeth_release_buffer(channel,iob); | 1764 | qeth_release_buffer(channel,iob); |
1765 | } | 1765 | } |
1766 | 1766 | ||
1767 | static inline void | 1767 | static void |
1768 | qeth_prepare_control_data(struct qeth_card *card, int len, | 1768 | qeth_prepare_control_data(struct qeth_card *card, int len, |
1769 | struct qeth_cmd_buffer *iob) | 1769 | struct qeth_cmd_buffer *iob) |
1770 | { | 1770 | { |
1771 | qeth_setup_ccw(&card->write,iob->data,len); | 1771 | qeth_setup_ccw(&card->write,iob->data,len); |
1772 | iob->callback = qeth_release_buffer; | 1772 | iob->callback = qeth_release_buffer; |
@@ -2160,7 +2160,7 @@ qeth_check_qdio_errors(struct qdio_buffer *buf, unsigned int qdio_error, | |||
2160 | return 0; | 2160 | return 0; |
2161 | } | 2161 | } |
2162 | 2162 | ||
2163 | static inline struct sk_buff * | 2163 | static struct sk_buff * |
2164 | qeth_get_skb(unsigned int length, struct qeth_hdr *hdr) | 2164 | qeth_get_skb(unsigned int length, struct qeth_hdr *hdr) |
2165 | { | 2165 | { |
2166 | struct sk_buff* skb; | 2166 | struct sk_buff* skb; |
@@ -2179,7 +2179,7 @@ qeth_get_skb(unsigned int length, struct qeth_hdr *hdr) | |||
2179 | return skb; | 2179 | return skb; |
2180 | } | 2180 | } |
2181 | 2181 | ||
2182 | static inline struct sk_buff * | 2182 | static struct sk_buff * |
2183 | qeth_get_next_skb(struct qeth_card *card, struct qdio_buffer *buffer, | 2183 | qeth_get_next_skb(struct qeth_card *card, struct qdio_buffer *buffer, |
2184 | struct qdio_buffer_element **__element, int *__offset, | 2184 | struct qdio_buffer_element **__element, int *__offset, |
2185 | struct qeth_hdr **hdr) | 2185 | struct qeth_hdr **hdr) |
@@ -2264,7 +2264,7 @@ no_mem: | |||
2264 | return NULL; | 2264 | return NULL; |
2265 | } | 2265 | } |
2266 | 2266 | ||
2267 | static inline __be16 | 2267 | static __be16 |
2268 | qeth_type_trans(struct sk_buff *skb, struct net_device *dev) | 2268 | qeth_type_trans(struct sk_buff *skb, struct net_device *dev) |
2269 | { | 2269 | { |
2270 | struct qeth_card *card; | 2270 | struct qeth_card *card; |
@@ -2297,7 +2297,7 @@ qeth_type_trans(struct sk_buff *skb, struct net_device *dev) | |||
2297 | return htons(ETH_P_802_2); | 2297 | return htons(ETH_P_802_2); |
2298 | } | 2298 | } |
2299 | 2299 | ||
2300 | static inline void | 2300 | static void |
2301 | qeth_rebuild_skb_fake_ll_tr(struct qeth_card *card, struct sk_buff *skb, | 2301 | qeth_rebuild_skb_fake_ll_tr(struct qeth_card *card, struct sk_buff *skb, |
2302 | struct qeth_hdr *hdr) | 2302 | struct qeth_hdr *hdr) |
2303 | { | 2303 | { |
@@ -2351,7 +2351,7 @@ qeth_rebuild_skb_fake_ll_tr(struct qeth_card *card, struct sk_buff *skb, | |||
2351 | fake_llc->ethertype = ETH_P_IP; | 2351 | fake_llc->ethertype = ETH_P_IP; |
2352 | } | 2352 | } |
2353 | 2353 | ||
2354 | static inline void | 2354 | static void |
2355 | qeth_rebuild_skb_fake_ll_eth(struct qeth_card *card, struct sk_buff *skb, | 2355 | qeth_rebuild_skb_fake_ll_eth(struct qeth_card *card, struct sk_buff *skb, |
2356 | struct qeth_hdr *hdr) | 2356 | struct qeth_hdr *hdr) |
2357 | { | 2357 | { |
@@ -2420,7 +2420,7 @@ qeth_layer2_rebuild_skb(struct qeth_card *card, struct sk_buff *skb, | |||
2420 | *((__u32 *)skb->cb) = ++card->seqno.pkt_seqno; | 2420 | *((__u32 *)skb->cb) = ++card->seqno.pkt_seqno; |
2421 | } | 2421 | } |
2422 | 2422 | ||
2423 | static inline __u16 | 2423 | static __u16 |
2424 | qeth_rebuild_skb(struct qeth_card *card, struct sk_buff *skb, | 2424 | qeth_rebuild_skb(struct qeth_card *card, struct sk_buff *skb, |
2425 | struct qeth_hdr *hdr) | 2425 | struct qeth_hdr *hdr) |
2426 | { | 2426 | { |
@@ -2476,7 +2476,7 @@ qeth_rebuild_skb(struct qeth_card *card, struct sk_buff *skb, | |||
2476 | return vlan_id; | 2476 | return vlan_id; |
2477 | } | 2477 | } |
2478 | 2478 | ||
2479 | static inline void | 2479 | static void |
2480 | qeth_process_inbound_buffer(struct qeth_card *card, | 2480 | qeth_process_inbound_buffer(struct qeth_card *card, |
2481 | struct qeth_qdio_buffer *buf, int index) | 2481 | struct qeth_qdio_buffer *buf, int index) |
2482 | { | 2482 | { |
@@ -2528,7 +2528,7 @@ qeth_process_inbound_buffer(struct qeth_card *card, | |||
2528 | } | 2528 | } |
2529 | } | 2529 | } |
2530 | 2530 | ||
2531 | static inline struct qeth_buffer_pool_entry * | 2531 | static struct qeth_buffer_pool_entry * |
2532 | qeth_get_buffer_pool_entry(struct qeth_card *card) | 2532 | qeth_get_buffer_pool_entry(struct qeth_card *card) |
2533 | { | 2533 | { |
2534 | struct qeth_buffer_pool_entry *entry; | 2534 | struct qeth_buffer_pool_entry *entry; |
@@ -2543,7 +2543,7 @@ qeth_get_buffer_pool_entry(struct qeth_card *card) | |||
2543 | return NULL; | 2543 | return NULL; |
2544 | } | 2544 | } |
2545 | 2545 | ||
2546 | static inline void | 2546 | static void |
2547 | qeth_init_input_buffer(struct qeth_card *card, struct qeth_qdio_buffer *buf) | 2547 | qeth_init_input_buffer(struct qeth_card *card, struct qeth_qdio_buffer *buf) |
2548 | { | 2548 | { |
2549 | struct qeth_buffer_pool_entry *pool_entry; | 2549 | struct qeth_buffer_pool_entry *pool_entry; |
@@ -2570,7 +2570,7 @@ qeth_init_input_buffer(struct qeth_card *card, struct qeth_qdio_buffer *buf) | |||
2570 | buf->state = QETH_QDIO_BUF_EMPTY; | 2570 | buf->state = QETH_QDIO_BUF_EMPTY; |
2571 | } | 2571 | } |
2572 | 2572 | ||
2573 | static inline void | 2573 | static void |
2574 | qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, | 2574 | qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, |
2575 | struct qeth_qdio_out_buffer *buf) | 2575 | struct qeth_qdio_out_buffer *buf) |
2576 | { | 2576 | { |
@@ -2595,7 +2595,7 @@ qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, | |||
2595 | atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY); | 2595 | atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY); |
2596 | } | 2596 | } |
2597 | 2597 | ||
2598 | static inline void | 2598 | static void |
2599 | qeth_queue_input_buffer(struct qeth_card *card, int index) | 2599 | qeth_queue_input_buffer(struct qeth_card *card, int index) |
2600 | { | 2600 | { |
2601 | struct qeth_qdio_q *queue = card->qdio.in_q; | 2601 | struct qeth_qdio_q *queue = card->qdio.in_q; |
@@ -2699,7 +2699,7 @@ qeth_qdio_input_handler(struct ccw_device * ccwdev, unsigned int status, | |||
2699 | card->perf_stats.inbound_start_time; | 2699 | card->perf_stats.inbound_start_time; |
2700 | } | 2700 | } |
2701 | 2701 | ||
2702 | static inline int | 2702 | static int |
2703 | qeth_handle_send_error(struct qeth_card *card, | 2703 | qeth_handle_send_error(struct qeth_card *card, |
2704 | struct qeth_qdio_out_buffer *buffer, | 2704 | struct qeth_qdio_out_buffer *buffer, |
2705 | unsigned int qdio_err, unsigned int siga_err) | 2705 | unsigned int qdio_err, unsigned int siga_err) |
@@ -2821,7 +2821,7 @@ qeth_flush_buffers(struct qeth_qdio_out_q *queue, int under_int, | |||
2821 | * Switched to packing state if the number of used buffers on a queue | 2821 | * Switched to packing state if the number of used buffers on a queue |
2822 | * reaches a certain limit. | 2822 | * reaches a certain limit. |
2823 | */ | 2823 | */ |
2824 | static inline void | 2824 | static void |
2825 | qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue) | 2825 | qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue) |
2826 | { | 2826 | { |
2827 | if (!queue->do_pack) { | 2827 | if (!queue->do_pack) { |
@@ -2842,7 +2842,7 @@ qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue) | |||
2842 | * In that case 1 is returned to inform the caller. If no buffer | 2842 | * In that case 1 is returned to inform the caller. If no buffer |
2843 | * has to be flushed, zero is returned. | 2843 | * has to be flushed, zero is returned. |
2844 | */ | 2844 | */ |
2845 | static inline int | 2845 | static int |
2846 | qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue) | 2846 | qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue) |
2847 | { | 2847 | { |
2848 | struct qeth_qdio_out_buffer *buffer; | 2848 | struct qeth_qdio_out_buffer *buffer; |
@@ -2877,7 +2877,7 @@ qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue) | |||
2877 | * Checks if there is a packing buffer and prepares it to be flushed. | 2877 | * Checks if there is a packing buffer and prepares it to be flushed. |
2878 | * In that case returns 1, otherwise zero. | 2878 | * In that case returns 1, otherwise zero. |
2879 | */ | 2879 | */ |
2880 | static inline int | 2880 | static int |
2881 | qeth_flush_buffers_on_no_pci(struct qeth_qdio_out_q *queue) | 2881 | qeth_flush_buffers_on_no_pci(struct qeth_qdio_out_q *queue) |
2882 | { | 2882 | { |
2883 | struct qeth_qdio_out_buffer *buffer; | 2883 | struct qeth_qdio_out_buffer *buffer; |
@@ -2894,7 +2894,7 @@ qeth_flush_buffers_on_no_pci(struct qeth_qdio_out_q *queue) | |||
2894 | return 0; | 2894 | return 0; |
2895 | } | 2895 | } |
2896 | 2896 | ||
2897 | static inline void | 2897 | static void |
2898 | qeth_check_outbound_queue(struct qeth_qdio_out_q *queue) | 2898 | qeth_check_outbound_queue(struct qeth_qdio_out_q *queue) |
2899 | { | 2899 | { |
2900 | int index; | 2900 | int index; |
@@ -3594,7 +3594,7 @@ qeth_fake_header(struct sk_buff *skb, struct net_device *dev, | |||
3594 | } | 3594 | } |
3595 | } | 3595 | } |
3596 | 3596 | ||
3597 | static inline int | 3597 | static int |
3598 | qeth_send_packet(struct qeth_card *, struct sk_buff *); | 3598 | qeth_send_packet(struct qeth_card *, struct sk_buff *); |
3599 | 3599 | ||
3600 | static int | 3600 | static int |
@@ -3759,7 +3759,7 @@ qeth_stop(struct net_device *dev) | |||
3759 | return 0; | 3759 | return 0; |
3760 | } | 3760 | } |
3761 | 3761 | ||
3762 | static inline int | 3762 | static int |
3763 | qeth_get_cast_type(struct qeth_card *card, struct sk_buff *skb) | 3763 | qeth_get_cast_type(struct qeth_card *card, struct sk_buff *skb) |
3764 | { | 3764 | { |
3765 | int cast_type = RTN_UNSPEC; | 3765 | int cast_type = RTN_UNSPEC; |
@@ -3806,7 +3806,7 @@ qeth_get_cast_type(struct qeth_card *card, struct sk_buff *skb) | |||
3806 | return cast_type; | 3806 | return cast_type; |
3807 | } | 3807 | } |
3808 | 3808 | ||
3809 | static inline int | 3809 | static int |
3810 | qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb, | 3810 | qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb, |
3811 | int ipv, int cast_type) | 3811 | int ipv, int cast_type) |
3812 | { | 3812 | { |
@@ -3853,7 +3853,7 @@ qeth_get_ip_version(struct sk_buff *skb) | |||
3853 | } | 3853 | } |
3854 | } | 3854 | } |
3855 | 3855 | ||
3856 | static inline struct qeth_hdr * | 3856 | static struct qeth_hdr * |
3857 | __qeth_prepare_skb(struct qeth_card *card, struct sk_buff *skb, int ipv) | 3857 | __qeth_prepare_skb(struct qeth_card *card, struct sk_buff *skb, int ipv) |
3858 | { | 3858 | { |
3859 | #ifdef CONFIG_QETH_VLAN | 3859 | #ifdef CONFIG_QETH_VLAN |
@@ -3882,14 +3882,14 @@ __qeth_prepare_skb(struct qeth_card *card, struct sk_buff *skb, int ipv) | |||
3882 | qeth_push_skb(card, skb, sizeof(struct qeth_hdr))); | 3882 | qeth_push_skb(card, skb, sizeof(struct qeth_hdr))); |
3883 | } | 3883 | } |
3884 | 3884 | ||
3885 | static inline void | 3885 | static void |
3886 | __qeth_free_new_skb(struct sk_buff *orig_skb, struct sk_buff *new_skb) | 3886 | __qeth_free_new_skb(struct sk_buff *orig_skb, struct sk_buff *new_skb) |
3887 | { | 3887 | { |
3888 | if (orig_skb != new_skb) | 3888 | if (orig_skb != new_skb) |
3889 | dev_kfree_skb_any(new_skb); | 3889 | dev_kfree_skb_any(new_skb); |
3890 | } | 3890 | } |
3891 | 3891 | ||
3892 | static inline struct sk_buff * | 3892 | static struct sk_buff * |
3893 | qeth_prepare_skb(struct qeth_card *card, struct sk_buff *skb, | 3893 | qeth_prepare_skb(struct qeth_card *card, struct sk_buff *skb, |
3894 | struct qeth_hdr **hdr, int ipv) | 3894 | struct qeth_hdr **hdr, int ipv) |
3895 | { | 3895 | { |
@@ -3940,7 +3940,7 @@ qeth_get_qeth_hdr_flags6(int cast_type) | |||
3940 | return ct | QETH_CAST_UNICAST; | 3940 | return ct | QETH_CAST_UNICAST; |
3941 | } | 3941 | } |
3942 | 3942 | ||
3943 | static inline void | 3943 | static void |
3944 | qeth_layer2_get_packet_type(struct qeth_card *card, struct qeth_hdr *hdr, | 3944 | qeth_layer2_get_packet_type(struct qeth_card *card, struct qeth_hdr *hdr, |
3945 | struct sk_buff *skb) | 3945 | struct sk_buff *skb) |
3946 | { | 3946 | { |
@@ -3977,7 +3977,7 @@ qeth_layer2_get_packet_type(struct qeth_card *card, struct qeth_hdr *hdr, | |||
3977 | } | 3977 | } |
3978 | } | 3978 | } |
3979 | 3979 | ||
3980 | static inline void | 3980 | static void |
3981 | qeth_layer2_fill_header(struct qeth_card *card, struct qeth_hdr *hdr, | 3981 | qeth_layer2_fill_header(struct qeth_card *card, struct qeth_hdr *hdr, |
3982 | struct sk_buff *skb, int cast_type) | 3982 | struct sk_buff *skb, int cast_type) |
3983 | { | 3983 | { |
@@ -4068,7 +4068,7 @@ qeth_fill_header(struct qeth_card *card, struct qeth_hdr *hdr, | |||
4068 | } | 4068 | } |
4069 | } | 4069 | } |
4070 | 4070 | ||
4071 | static inline void | 4071 | static void |
4072 | __qeth_fill_buffer(struct sk_buff *skb, struct qdio_buffer *buffer, | 4072 | __qeth_fill_buffer(struct sk_buff *skb, struct qdio_buffer *buffer, |
4073 | int is_tso, int *next_element_to_fill) | 4073 | int is_tso, int *next_element_to_fill) |
4074 | { | 4074 | { |
@@ -4112,7 +4112,7 @@ __qeth_fill_buffer(struct sk_buff *skb, struct qdio_buffer *buffer, | |||
4112 | *next_element_to_fill = element; | 4112 | *next_element_to_fill = element; |
4113 | } | 4113 | } |
4114 | 4114 | ||
4115 | static inline int | 4115 | static int |
4116 | qeth_fill_buffer(struct qeth_qdio_out_q *queue, | 4116 | qeth_fill_buffer(struct qeth_qdio_out_q *queue, |
4117 | struct qeth_qdio_out_buffer *buf, | 4117 | struct qeth_qdio_out_buffer *buf, |
4118 | struct sk_buff *skb) | 4118 | struct sk_buff *skb) |
@@ -4171,7 +4171,7 @@ qeth_fill_buffer(struct qeth_qdio_out_q *queue, | |||
4171 | return flush_cnt; | 4171 | return flush_cnt; |
4172 | } | 4172 | } |
4173 | 4173 | ||
4174 | static inline int | 4174 | static int |
4175 | qeth_do_send_packet_fast(struct qeth_card *card, struct qeth_qdio_out_q *queue, | 4175 | qeth_do_send_packet_fast(struct qeth_card *card, struct qeth_qdio_out_q *queue, |
4176 | struct sk_buff *skb, struct qeth_hdr *hdr, | 4176 | struct sk_buff *skb, struct qeth_hdr *hdr, |
4177 | int elements_needed, | 4177 | int elements_needed, |
@@ -4222,7 +4222,7 @@ out: | |||
4222 | return -EBUSY; | 4222 | return -EBUSY; |
4223 | } | 4223 | } |
4224 | 4224 | ||
4225 | static inline int | 4225 | static int |
4226 | qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue, | 4226 | qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue, |
4227 | struct sk_buff *skb, struct qeth_hdr *hdr, | 4227 | struct sk_buff *skb, struct qeth_hdr *hdr, |
4228 | int elements_needed, struct qeth_eddp_context *ctx) | 4228 | int elements_needed, struct qeth_eddp_context *ctx) |
@@ -4328,7 +4328,7 @@ out: | |||
4328 | return rc; | 4328 | return rc; |
4329 | } | 4329 | } |
4330 | 4330 | ||
4331 | static inline int | 4331 | static int |
4332 | qeth_get_elements_no(struct qeth_card *card, void *hdr, | 4332 | qeth_get_elements_no(struct qeth_card *card, void *hdr, |
4333 | struct sk_buff *skb, int elems) | 4333 | struct sk_buff *skb, int elems) |
4334 | { | 4334 | { |
@@ -4349,7 +4349,7 @@ qeth_get_elements_no(struct qeth_card *card, void *hdr, | |||
4349 | } | 4349 | } |
4350 | 4350 | ||
4351 | 4351 | ||
4352 | static inline int | 4352 | static int |
4353 | qeth_send_packet(struct qeth_card *card, struct sk_buff *skb) | 4353 | qeth_send_packet(struct qeth_card *card, struct sk_buff *skb) |
4354 | { | 4354 | { |
4355 | int ipv = 0; | 4355 | int ipv = 0; |
@@ -4536,7 +4536,7 @@ qeth_mdio_read(struct net_device *dev, int phy_id, int regnum) | |||
4536 | } | 4536 | } |
4537 | 4537 | ||
4538 | 4538 | ||
4539 | static inline const char * | 4539 | static const char * |
4540 | qeth_arp_get_error_cause(int *rc) | 4540 | qeth_arp_get_error_cause(int *rc) |
4541 | { | 4541 | { |
4542 | switch (*rc) { | 4542 | switch (*rc) { |
@@ -4597,7 +4597,7 @@ qeth_arp_set_no_entries(struct qeth_card *card, int no_entries) | |||
4597 | return rc; | 4597 | return rc; |
4598 | } | 4598 | } |
4599 | 4599 | ||
4600 | static inline void | 4600 | static void |
4601 | qeth_copy_arp_entries_stripped(struct qeth_arp_query_info *qinfo, | 4601 | qeth_copy_arp_entries_stripped(struct qeth_arp_query_info *qinfo, |
4602 | struct qeth_arp_query_data *qdata, | 4602 | struct qeth_arp_query_data *qdata, |
4603 | int entry_size, int uentry_size) | 4603 | int entry_size, int uentry_size) |
@@ -5214,7 +5214,7 @@ qeth_vlan_rx_register(struct net_device *dev, struct vlan_group *grp) | |||
5214 | spin_unlock_irqrestore(&card->vlanlock, flags); | 5214 | spin_unlock_irqrestore(&card->vlanlock, flags); |
5215 | } | 5215 | } |
5216 | 5216 | ||
5217 | static inline void | 5217 | static void |
5218 | qeth_free_vlan_buffer(struct qeth_card *card, struct qeth_qdio_out_buffer *buf, | 5218 | qeth_free_vlan_buffer(struct qeth_card *card, struct qeth_qdio_out_buffer *buf, |
5219 | unsigned short vid) | 5219 | unsigned short vid) |
5220 | { | 5220 | { |
@@ -5625,7 +5625,7 @@ qeth_delete_mc_addresses(struct qeth_card *card) | |||
5625 | spin_unlock_irqrestore(&card->ip_lock, flags); | 5625 | spin_unlock_irqrestore(&card->ip_lock, flags); |
5626 | } | 5626 | } |
5627 | 5627 | ||
5628 | static inline void | 5628 | static void |
5629 | qeth_add_mc(struct qeth_card *card, struct in_device *in4_dev) | 5629 | qeth_add_mc(struct qeth_card *card, struct in_device *in4_dev) |
5630 | { | 5630 | { |
5631 | struct qeth_ipaddr *ipm; | 5631 | struct qeth_ipaddr *ipm; |
@@ -5711,7 +5711,7 @@ qeth_layer2_add_multicast(struct qeth_card *card) | |||
5711 | } | 5711 | } |
5712 | 5712 | ||
5713 | #ifdef CONFIG_QETH_IPV6 | 5713 | #ifdef CONFIG_QETH_IPV6 |
5714 | static inline void | 5714 | static void |
5715 | qeth_add_mc6(struct qeth_card *card, struct inet6_dev *in6_dev) | 5715 | qeth_add_mc6(struct qeth_card *card, struct inet6_dev *in6_dev) |
5716 | { | 5716 | { |
5717 | struct qeth_ipaddr *ipm; | 5717 | struct qeth_ipaddr *ipm; |
@@ -6022,7 +6022,7 @@ qeth_send_setdelmc(struct qeth_card *card, struct qeth_ipaddr *addr, int ipacmd) | |||
6022 | 6022 | ||
6023 | return rc; | 6023 | return rc; |
6024 | } | 6024 | } |
6025 | static inline void | 6025 | static void |
6026 | qeth_fill_netmask(u8 *netmask, unsigned int len) | 6026 | qeth_fill_netmask(u8 *netmask, unsigned int len) |
6027 | { | 6027 | { |
6028 | int i,j; | 6028 | int i,j; |
@@ -6626,7 +6626,7 @@ qeth_send_setadp_mode(struct qeth_card *card, __u32 command, __u32 mode) | |||
6626 | return rc; | 6626 | return rc; |
6627 | } | 6627 | } |
6628 | 6628 | ||
6629 | static inline int | 6629 | static int |
6630 | qeth_setadapter_hstr(struct qeth_card *card) | 6630 | qeth_setadapter_hstr(struct qeth_card *card) |
6631 | { | 6631 | { |
6632 | int rc; | 6632 | int rc; |
@@ -6889,7 +6889,7 @@ qeth_send_simple_setassparms(struct qeth_card *card, | |||
6889 | return rc; | 6889 | return rc; |
6890 | } | 6890 | } |
6891 | 6891 | ||
6892 | static inline int | 6892 | static int |
6893 | qeth_start_ipa_arp_processing(struct qeth_card *card) | 6893 | qeth_start_ipa_arp_processing(struct qeth_card *card) |
6894 | { | 6894 | { |
6895 | int rc; | 6895 | int rc; |
@@ -7529,7 +7529,7 @@ qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads, | |||
7529 | wake_up(&card->wait_q); | 7529 | wake_up(&card->wait_q); |
7530 | } | 7530 | } |
7531 | 7531 | ||
7532 | static inline int | 7532 | static int |
7533 | qeth_threads_running(struct qeth_card *card, unsigned long threads) | 7533 | qeth_threads_running(struct qeth_card *card, unsigned long threads) |
7534 | { | 7534 | { |
7535 | unsigned long flags; | 7535 | unsigned long flags; |
@@ -8118,7 +8118,7 @@ qeth_del_ipato_entry(struct qeth_card *card, enum qeth_prot_versions proto, | |||
8118 | spin_unlock_irqrestore(&card->ip_lock, flags); | 8118 | spin_unlock_irqrestore(&card->ip_lock, flags); |
8119 | } | 8119 | } |
8120 | 8120 | ||
8121 | static inline void | 8121 | static void |
8122 | qeth_convert_addr_to_bits(u8 *addr, u8 *bits, int len) | 8122 | qeth_convert_addr_to_bits(u8 *addr, u8 *bits, int len) |
8123 | { | 8123 | { |
8124 | int i, j; | 8124 | int i, j; |
diff --git a/drivers/s390/net/qeth_sys.c b/drivers/s390/net/qeth_sys.c index 5836737ac58f..d518419cd0c6 100644 --- a/drivers/s390/net/qeth_sys.c +++ b/drivers/s390/net/qeth_sys.c | |||
@@ -328,7 +328,7 @@ qeth_dev_bufcnt_store(struct device *dev, struct device_attribute *attr, const c | |||
328 | static DEVICE_ATTR(buffer_count, 0644, qeth_dev_bufcnt_show, | 328 | static DEVICE_ATTR(buffer_count, 0644, qeth_dev_bufcnt_show, |
329 | qeth_dev_bufcnt_store); | 329 | qeth_dev_bufcnt_store); |
330 | 330 | ||
331 | static inline ssize_t | 331 | static ssize_t |
332 | qeth_dev_route_show(struct qeth_card *card, struct qeth_routing_info *route, | 332 | qeth_dev_route_show(struct qeth_card *card, struct qeth_routing_info *route, |
333 | char *buf) | 333 | char *buf) |
334 | { | 334 | { |
@@ -368,7 +368,7 @@ qeth_dev_route4_show(struct device *dev, struct device_attribute *attr, char *bu | |||
368 | return qeth_dev_route_show(card, &card->options.route4, buf); | 368 | return qeth_dev_route_show(card, &card->options.route4, buf); |
369 | } | 369 | } |
370 | 370 | ||
371 | static inline ssize_t | 371 | static ssize_t |
372 | qeth_dev_route_store(struct qeth_card *card, struct qeth_routing_info *route, | 372 | qeth_dev_route_store(struct qeth_card *card, struct qeth_routing_info *route, |
373 | enum qeth_prot_versions prot, const char *buf, size_t count) | 373 | enum qeth_prot_versions prot, const char *buf, size_t count) |
374 | { | 374 | { |
@@ -998,7 +998,7 @@ struct device_attribute dev_attr_##_id = { \ | |||
998 | .store = _store, \ | 998 | .store = _store, \ |
999 | }; | 999 | }; |
1000 | 1000 | ||
1001 | int | 1001 | static int |
1002 | qeth_check_layer2(struct qeth_card *card) | 1002 | qeth_check_layer2(struct qeth_card *card) |
1003 | { | 1003 | { |
1004 | if (card->options.layer2) | 1004 | if (card->options.layer2) |
@@ -1100,7 +1100,7 @@ static QETH_DEVICE_ATTR(ipato_invert4, invert4, 0644, | |||
1100 | qeth_dev_ipato_invert4_show, | 1100 | qeth_dev_ipato_invert4_show, |
1101 | qeth_dev_ipato_invert4_store); | 1101 | qeth_dev_ipato_invert4_store); |
1102 | 1102 | ||
1103 | static inline ssize_t | 1103 | static ssize_t |
1104 | qeth_dev_ipato_add_show(char *buf, struct qeth_card *card, | 1104 | qeth_dev_ipato_add_show(char *buf, struct qeth_card *card, |
1105 | enum qeth_prot_versions proto) | 1105 | enum qeth_prot_versions proto) |
1106 | { | 1106 | { |
@@ -1146,7 +1146,7 @@ qeth_dev_ipato_add4_show(struct device *dev, struct device_attribute *attr, char | |||
1146 | return qeth_dev_ipato_add_show(buf, card, QETH_PROT_IPV4); | 1146 | return qeth_dev_ipato_add_show(buf, card, QETH_PROT_IPV4); |
1147 | } | 1147 | } |
1148 | 1148 | ||
1149 | static inline int | 1149 | static int |
1150 | qeth_parse_ipatoe(const char* buf, enum qeth_prot_versions proto, | 1150 | qeth_parse_ipatoe(const char* buf, enum qeth_prot_versions proto, |
1151 | u8 *addr, int *mask_bits) | 1151 | u8 *addr, int *mask_bits) |
1152 | { | 1152 | { |
@@ -1178,7 +1178,7 @@ qeth_parse_ipatoe(const char* buf, enum qeth_prot_versions proto, | |||
1178 | return 0; | 1178 | return 0; |
1179 | } | 1179 | } |
1180 | 1180 | ||
1181 | static inline ssize_t | 1181 | static ssize_t |
1182 | qeth_dev_ipato_add_store(const char *buf, size_t count, | 1182 | qeth_dev_ipato_add_store(const char *buf, size_t count, |
1183 | struct qeth_card *card, enum qeth_prot_versions proto) | 1183 | struct qeth_card *card, enum qeth_prot_versions proto) |
1184 | { | 1184 | { |
@@ -1223,7 +1223,7 @@ static QETH_DEVICE_ATTR(ipato_add4, add4, 0644, | |||
1223 | qeth_dev_ipato_add4_show, | 1223 | qeth_dev_ipato_add4_show, |
1224 | qeth_dev_ipato_add4_store); | 1224 | qeth_dev_ipato_add4_store); |
1225 | 1225 | ||
1226 | static inline ssize_t | 1226 | static ssize_t |
1227 | qeth_dev_ipato_del_store(const char *buf, size_t count, | 1227 | qeth_dev_ipato_del_store(const char *buf, size_t count, |
1228 | struct qeth_card *card, enum qeth_prot_versions proto) | 1228 | struct qeth_card *card, enum qeth_prot_versions proto) |
1229 | { | 1229 | { |
@@ -1361,7 +1361,7 @@ static struct attribute_group qeth_device_ipato_group = { | |||
1361 | .attrs = (struct attribute **)qeth_ipato_device_attrs, | 1361 | .attrs = (struct attribute **)qeth_ipato_device_attrs, |
1362 | }; | 1362 | }; |
1363 | 1363 | ||
1364 | static inline ssize_t | 1364 | static ssize_t |
1365 | qeth_dev_vipa_add_show(char *buf, struct qeth_card *card, | 1365 | qeth_dev_vipa_add_show(char *buf, struct qeth_card *card, |
1366 | enum qeth_prot_versions proto) | 1366 | enum qeth_prot_versions proto) |
1367 | { | 1367 | { |
@@ -1407,7 +1407,7 @@ qeth_dev_vipa_add4_show(struct device *dev, struct device_attribute *attr, char | |||
1407 | return qeth_dev_vipa_add_show(buf, card, QETH_PROT_IPV4); | 1407 | return qeth_dev_vipa_add_show(buf, card, QETH_PROT_IPV4); |
1408 | } | 1408 | } |
1409 | 1409 | ||
1410 | static inline int | 1410 | static int |
1411 | qeth_parse_vipae(const char* buf, enum qeth_prot_versions proto, | 1411 | qeth_parse_vipae(const char* buf, enum qeth_prot_versions proto, |
1412 | u8 *addr) | 1412 | u8 *addr) |
1413 | { | 1413 | { |
@@ -1418,7 +1418,7 @@ qeth_parse_vipae(const char* buf, enum qeth_prot_versions proto, | |||
1418 | return 0; | 1418 | return 0; |
1419 | } | 1419 | } |
1420 | 1420 | ||
1421 | static inline ssize_t | 1421 | static ssize_t |
1422 | qeth_dev_vipa_add_store(const char *buf, size_t count, | 1422 | qeth_dev_vipa_add_store(const char *buf, size_t count, |
1423 | struct qeth_card *card, enum qeth_prot_versions proto) | 1423 | struct qeth_card *card, enum qeth_prot_versions proto) |
1424 | { | 1424 | { |
@@ -1451,7 +1451,7 @@ static QETH_DEVICE_ATTR(vipa_add4, add4, 0644, | |||
1451 | qeth_dev_vipa_add4_show, | 1451 | qeth_dev_vipa_add4_show, |
1452 | qeth_dev_vipa_add4_store); | 1452 | qeth_dev_vipa_add4_store); |
1453 | 1453 | ||
1454 | static inline ssize_t | 1454 | static ssize_t |
1455 | qeth_dev_vipa_del_store(const char *buf, size_t count, | 1455 | qeth_dev_vipa_del_store(const char *buf, size_t count, |
1456 | struct qeth_card *card, enum qeth_prot_versions proto) | 1456 | struct qeth_card *card, enum qeth_prot_versions proto) |
1457 | { | 1457 | { |
@@ -1542,7 +1542,7 @@ static struct attribute_group qeth_device_vipa_group = { | |||
1542 | .attrs = (struct attribute **)qeth_vipa_device_attrs, | 1542 | .attrs = (struct attribute **)qeth_vipa_device_attrs, |
1543 | }; | 1543 | }; |
1544 | 1544 | ||
1545 | static inline ssize_t | 1545 | static ssize_t |
1546 | qeth_dev_rxip_add_show(char *buf, struct qeth_card *card, | 1546 | qeth_dev_rxip_add_show(char *buf, struct qeth_card *card, |
1547 | enum qeth_prot_versions proto) | 1547 | enum qeth_prot_versions proto) |
1548 | { | 1548 | { |
@@ -1588,7 +1588,7 @@ qeth_dev_rxip_add4_show(struct device *dev, struct device_attribute *attr, char | |||
1588 | return qeth_dev_rxip_add_show(buf, card, QETH_PROT_IPV4); | 1588 | return qeth_dev_rxip_add_show(buf, card, QETH_PROT_IPV4); |
1589 | } | 1589 | } |
1590 | 1590 | ||
1591 | static inline int | 1591 | static int |
1592 | qeth_parse_rxipe(const char* buf, enum qeth_prot_versions proto, | 1592 | qeth_parse_rxipe(const char* buf, enum qeth_prot_versions proto, |
1593 | u8 *addr) | 1593 | u8 *addr) |
1594 | { | 1594 | { |
@@ -1599,7 +1599,7 @@ qeth_parse_rxipe(const char* buf, enum qeth_prot_versions proto, | |||
1599 | return 0; | 1599 | return 0; |
1600 | } | 1600 | } |
1601 | 1601 | ||
1602 | static inline ssize_t | 1602 | static ssize_t |
1603 | qeth_dev_rxip_add_store(const char *buf, size_t count, | 1603 | qeth_dev_rxip_add_store(const char *buf, size_t count, |
1604 | struct qeth_card *card, enum qeth_prot_versions proto) | 1604 | struct qeth_card *card, enum qeth_prot_versions proto) |
1605 | { | 1605 | { |
@@ -1632,7 +1632,7 @@ static QETH_DEVICE_ATTR(rxip_add4, add4, 0644, | |||
1632 | qeth_dev_rxip_add4_show, | 1632 | qeth_dev_rxip_add4_show, |
1633 | qeth_dev_rxip_add4_store); | 1633 | qeth_dev_rxip_add4_store); |
1634 | 1634 | ||
1635 | static inline ssize_t | 1635 | static ssize_t |
1636 | qeth_dev_rxip_del_store(const char *buf, size_t count, | 1636 | qeth_dev_rxip_del_store(const char *buf, size_t count, |
1637 | struct qeth_card *card, enum qeth_prot_versions proto) | 1637 | struct qeth_card *card, enum qeth_prot_versions proto) |
1638 | { | 1638 | { |
diff --git a/drivers/s390/net/smsgiucv.c b/drivers/s390/net/smsgiucv.c index b8179c27ceb6..3ccca5871fdf 100644 --- a/drivers/s390/net/smsgiucv.c +++ b/drivers/s390/net/smsgiucv.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * IUCV special message driver | 2 | * IUCV special message driver |
3 | * | 3 | * |
4 | * Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation | 4 | * Copyright 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation |
5 | * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) | 5 | * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
@@ -23,10 +23,10 @@ | |||
23 | #include <linux/init.h> | 23 | #include <linux/init.h> |
24 | #include <linux/errno.h> | 24 | #include <linux/errno.h> |
25 | #include <linux/device.h> | 25 | #include <linux/device.h> |
26 | #include <net/iucv/iucv.h> | ||
26 | #include <asm/cpcmd.h> | 27 | #include <asm/cpcmd.h> |
27 | #include <asm/ebcdic.h> | 28 | #include <asm/ebcdic.h> |
28 | 29 | #include "smsgiucv.h" | |
29 | #include "iucv.h" | ||
30 | 30 | ||
31 | struct smsg_callback { | 31 | struct smsg_callback { |
32 | struct list_head list; | 32 | struct list_head list; |
@@ -39,38 +39,46 @@ MODULE_AUTHOR | |||
39 | ("(C) 2003 IBM Corporation by Martin Schwidefsky (schwidefsky@de.ibm.com)"); | 39 | ("(C) 2003 IBM Corporation by Martin Schwidefsky (schwidefsky@de.ibm.com)"); |
40 | MODULE_DESCRIPTION ("Linux for S/390 IUCV special message driver"); | 40 | MODULE_DESCRIPTION ("Linux for S/390 IUCV special message driver"); |
41 | 41 | ||
42 | static iucv_handle_t smsg_handle; | 42 | static struct iucv_path *smsg_path; |
43 | static unsigned short smsg_pathid; | 43 | |
44 | static DEFINE_SPINLOCK(smsg_list_lock); | 44 | static DEFINE_SPINLOCK(smsg_list_lock); |
45 | static struct list_head smsg_list = LIST_HEAD_INIT(smsg_list); | 45 | static struct list_head smsg_list = LIST_HEAD_INIT(smsg_list); |
46 | 46 | ||
47 | static void | 47 | static int smsg_path_pending(struct iucv_path *, u8 ipvmid[8], u8 ipuser[16]); |
48 | smsg_connection_complete(iucv_ConnectionComplete *eib, void *pgm_data) | 48 | static void smsg_message_pending(struct iucv_path *, struct iucv_message *); |
49 | |||
50 | static struct iucv_handler smsg_handler = { | ||
51 | .path_pending = smsg_path_pending, | ||
52 | .message_pending = smsg_message_pending, | ||
53 | }; | ||
54 | |||
55 | static int smsg_path_pending(struct iucv_path *path, u8 ipvmid[8], | ||
56 | u8 ipuser[16]) | ||
49 | { | 57 | { |
58 | if (strncmp(ipvmid, "*MSG ", sizeof(ipvmid)) != 0) | ||
59 | return -EINVAL; | ||
60 | /* Path pending from *MSG. */ | ||
61 | return iucv_path_accept(path, &smsg_handler, "SMSGIUCV ", NULL); | ||
50 | } | 62 | } |
51 | 63 | ||
52 | 64 | static void smsg_message_pending(struct iucv_path *path, | |
53 | static void | 65 | struct iucv_message *msg) |
54 | smsg_message_pending(iucv_MessagePending *eib, void *pgm_data) | ||
55 | { | 66 | { |
56 | struct smsg_callback *cb; | 67 | struct smsg_callback *cb; |
57 | unsigned char *msg; | 68 | unsigned char *buffer; |
58 | unsigned char sender[9]; | 69 | unsigned char sender[9]; |
59 | unsigned short len; | ||
60 | int rc, i; | 70 | int rc, i; |
61 | 71 | ||
62 | len = eib->ln1msg2.ipbfln1f; | 72 | buffer = kmalloc(msg->length + 1, GFP_ATOMIC | GFP_DMA); |
63 | msg = kmalloc(len + 1, GFP_ATOMIC|GFP_DMA); | 73 | if (!buffer) { |
64 | if (!msg) { | 74 | iucv_message_reject(path, msg); |
65 | iucv_reject(eib->ippathid, eib->ipmsgid, eib->iptrgcls); | ||
66 | return; | 75 | return; |
67 | } | 76 | } |
68 | rc = iucv_receive(eib->ippathid, eib->ipmsgid, eib->iptrgcls, | 77 | rc = iucv_message_receive(path, msg, 0, buffer, msg->length, NULL); |
69 | msg, len, NULL, NULL, NULL); | ||
70 | if (rc == 0) { | 78 | if (rc == 0) { |
71 | msg[len] = 0; | 79 | buffer[msg->length] = 0; |
72 | EBCASC(msg, len); | 80 | EBCASC(buffer, msg->length); |
73 | memcpy(sender, msg, 8); | 81 | memcpy(sender, buffer, 8); |
74 | sender[8] = 0; | 82 | sender[8] = 0; |
75 | /* Remove trailing whitespace from the sender name. */ | 83 | /* Remove trailing whitespace from the sender name. */ |
76 | for (i = 7; i >= 0; i--) { | 84 | for (i = 7; i >= 0; i--) { |
@@ -80,27 +88,17 @@ smsg_message_pending(iucv_MessagePending *eib, void *pgm_data) | |||
80 | } | 88 | } |
81 | spin_lock(&smsg_list_lock); | 89 | spin_lock(&smsg_list_lock); |
82 | list_for_each_entry(cb, &smsg_list, list) | 90 | list_for_each_entry(cb, &smsg_list, list) |
83 | if (strncmp(msg + 8, cb->prefix, cb->len) == 0) { | 91 | if (strncmp(buffer + 8, cb->prefix, cb->len) == 0) { |
84 | cb->callback(sender, msg + 8); | 92 | cb->callback(sender, buffer + 8); |
85 | break; | 93 | break; |
86 | } | 94 | } |
87 | spin_unlock(&smsg_list_lock); | 95 | spin_unlock(&smsg_list_lock); |
88 | } | 96 | } |
89 | kfree(msg); | 97 | kfree(buffer); |
90 | } | 98 | } |
91 | 99 | ||
92 | static iucv_interrupt_ops_t smsg_ops = { | 100 | int smsg_register_callback(char *prefix, |
93 | .ConnectionComplete = smsg_connection_complete, | 101 | void (*callback)(char *from, char *str)) |
94 | .MessagePending = smsg_message_pending, | ||
95 | }; | ||
96 | |||
97 | static struct device_driver smsg_driver = { | ||
98 | .name = "SMSGIUCV", | ||
99 | .bus = &iucv_bus, | ||
100 | }; | ||
101 | |||
102 | int | ||
103 | smsg_register_callback(char *prefix, void (*callback)(char *from, char *str)) | ||
104 | { | 102 | { |
105 | struct smsg_callback *cb; | 103 | struct smsg_callback *cb; |
106 | 104 | ||
@@ -110,18 +108,18 @@ smsg_register_callback(char *prefix, void (*callback)(char *from, char *str)) | |||
110 | cb->prefix = prefix; | 108 | cb->prefix = prefix; |
111 | cb->len = strlen(prefix); | 109 | cb->len = strlen(prefix); |
112 | cb->callback = callback; | 110 | cb->callback = callback; |
113 | spin_lock(&smsg_list_lock); | 111 | spin_lock_bh(&smsg_list_lock); |
114 | list_add_tail(&cb->list, &smsg_list); | 112 | list_add_tail(&cb->list, &smsg_list); |
115 | spin_unlock(&smsg_list_lock); | 113 | spin_unlock_bh(&smsg_list_lock); |
116 | return 0; | 114 | return 0; |
117 | } | 115 | } |
118 | 116 | ||
119 | void | 117 | void smsg_unregister_callback(char *prefix, |
120 | smsg_unregister_callback(char *prefix, void (*callback)(char *from, char *str)) | 118 | void (*callback)(char *from, char *str)) |
121 | { | 119 | { |
122 | struct smsg_callback *cb, *tmp; | 120 | struct smsg_callback *cb, *tmp; |
123 | 121 | ||
124 | spin_lock(&smsg_list_lock); | 122 | spin_lock_bh(&smsg_list_lock); |
125 | cb = NULL; | 123 | cb = NULL; |
126 | list_for_each_entry(tmp, &smsg_list, list) | 124 | list_for_each_entry(tmp, &smsg_list, list) |
127 | if (tmp->callback == callback && | 125 | if (tmp->callback == callback && |
@@ -130,55 +128,58 @@ smsg_unregister_callback(char *prefix, void (*callback)(char *from, char *str)) | |||
130 | list_del(&cb->list); | 128 | list_del(&cb->list); |
131 | break; | 129 | break; |
132 | } | 130 | } |
133 | spin_unlock(&smsg_list_lock); | 131 | spin_unlock_bh(&smsg_list_lock); |
134 | kfree(cb); | 132 | kfree(cb); |
135 | } | 133 | } |
136 | 134 | ||
137 | static void __exit | 135 | static struct device_driver smsg_driver = { |
138 | smsg_exit(void) | 136 | .name = "SMSGIUCV", |
137 | .bus = &iucv_bus, | ||
138 | }; | ||
139 | |||
140 | static void __exit smsg_exit(void) | ||
139 | { | 141 | { |
140 | if (smsg_handle > 0) { | 142 | cpcmd("SET SMSG IUCV", NULL, 0, NULL); |
141 | cpcmd("SET SMSG OFF", NULL, 0, NULL); | 143 | iucv_unregister(&smsg_handler, 1); |
142 | iucv_sever(smsg_pathid, NULL); | 144 | driver_unregister(&smsg_driver); |
143 | iucv_unregister_program(smsg_handle); | ||
144 | driver_unregister(&smsg_driver); | ||
145 | } | ||
146 | return; | ||
147 | } | 145 | } |
148 | 146 | ||
149 | static int __init | 147 | static int __init smsg_init(void) |
150 | smsg_init(void) | ||
151 | { | 148 | { |
152 | static unsigned char pgmmask[24] = { | ||
153 | 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, | ||
154 | 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, | ||
155 | 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff | ||
156 | }; | ||
157 | int rc; | 149 | int rc; |
158 | 150 | ||
159 | rc = driver_register(&smsg_driver); | 151 | rc = driver_register(&smsg_driver); |
160 | if (rc != 0) { | 152 | if (rc != 0) |
161 | printk(KERN_ERR "SMSGIUCV: failed to register driver.\n"); | 153 | goto out; |
162 | return rc; | 154 | rc = iucv_register(&smsg_handler, 1); |
163 | } | 155 | if (rc) { |
164 | smsg_handle = iucv_register_program("SMSGIUCV ", "*MSG ", | ||
165 | pgmmask, &smsg_ops, NULL); | ||
166 | if (!smsg_handle) { | ||
167 | printk(KERN_ERR "SMSGIUCV: failed to register to iucv"); | 156 | printk(KERN_ERR "SMSGIUCV: failed to register to iucv"); |
168 | driver_unregister(&smsg_driver); | 157 | rc = -EIO; /* better errno ? */ |
169 | return -EIO; /* better errno ? */ | 158 | goto out_driver; |
159 | } | ||
160 | smsg_path = iucv_path_alloc(255, 0, GFP_KERNEL); | ||
161 | if (!smsg_path) { | ||
162 | rc = -ENOMEM; | ||
163 | goto out_register; | ||
170 | } | 164 | } |
171 | rc = iucv_connect (&smsg_pathid, 255, NULL, "*MSG ", NULL, 0, | 165 | rc = iucv_path_connect(smsg_path, &smsg_handler, "*MSG ", |
172 | NULL, NULL, smsg_handle, NULL); | 166 | NULL, NULL, NULL); |
173 | if (rc) { | 167 | if (rc) { |
174 | printk(KERN_ERR "SMSGIUCV: failed to connect to *MSG"); | 168 | printk(KERN_ERR "SMSGIUCV: failed to connect to *MSG"); |
175 | iucv_unregister_program(smsg_handle); | 169 | rc = -EIO; /* better errno ? */ |
176 | driver_unregister(&smsg_driver); | 170 | goto out_free; |
177 | smsg_handle = NULL; | ||
178 | return -EIO; | ||
179 | } | 171 | } |
180 | cpcmd("SET SMSG IUCV", NULL, 0, NULL); | 172 | cpcmd("SET SMSG IUCV", NULL, 0, NULL); |
181 | return 0; | 173 | return 0; |
174 | |||
175 | out_free: | ||
176 | iucv_path_free(smsg_path); | ||
177 | out_register: | ||
178 | iucv_unregister(&smsg_handler, 1); | ||
179 | out_driver: | ||
180 | driver_unregister(&smsg_driver); | ||
181 | out: | ||
182 | return rc; | ||
182 | } | 183 | } |
183 | 184 | ||
184 | module_init(smsg_init); | 185 | module_init(smsg_init); |
diff --git a/drivers/s390/s390mach.c b/drivers/s390/s390mach.c index e088b5e28711..806bb1a921eb 100644 --- a/drivers/s390/s390mach.c +++ b/drivers/s390/s390mach.c | |||
@@ -13,22 +13,18 @@ | |||
13 | #include <linux/errno.h> | 13 | #include <linux/errno.h> |
14 | #include <linux/workqueue.h> | 14 | #include <linux/workqueue.h> |
15 | #include <linux/time.h> | 15 | #include <linux/time.h> |
16 | #include <linux/device.h> | ||
16 | #include <linux/kthread.h> | 17 | #include <linux/kthread.h> |
17 | 18 | #include <asm/etr.h> | |
18 | #include <asm/lowcore.h> | 19 | #include <asm/lowcore.h> |
19 | 20 | #include <asm/cio.h> | |
21 | #include "cio/cio.h" | ||
22 | #include "cio/chsc.h" | ||
23 | #include "cio/css.h" | ||
20 | #include "s390mach.h" | 24 | #include "s390mach.h" |
21 | 25 | ||
22 | static struct semaphore m_sem; | 26 | static struct semaphore m_sem; |
23 | 27 | ||
24 | extern int css_process_crw(int, int); | ||
25 | extern int chsc_process_crw(void); | ||
26 | extern int chp_process_crw(int, int); | ||
27 | extern void css_reiterate_subchannels(void); | ||
28 | |||
29 | extern struct workqueue_struct *slow_path_wq; | ||
30 | extern struct work_struct slow_path_work; | ||
31 | |||
32 | static NORET_TYPE void | 28 | static NORET_TYPE void |
33 | s390_handle_damage(char *msg) | 29 | s390_handle_damage(char *msg) |
34 | { | 30 | { |
@@ -470,6 +466,19 @@ s390_do_machine_check(struct pt_regs *regs) | |||
470 | s390_handle_damage("unable to revalidate registers."); | 466 | s390_handle_damage("unable to revalidate registers."); |
471 | } | 467 | } |
472 | 468 | ||
469 | if (mci->cd) { | ||
470 | /* Timing facility damage */ | ||
471 | s390_handle_damage("TOD clock damaged"); | ||
472 | } | ||
473 | |||
474 | if (mci->ed && mci->ec) { | ||
475 | /* External damage */ | ||
476 | if (S390_lowcore.external_damage_code & (1U << ED_ETR_SYNC)) | ||
477 | etr_sync_check(); | ||
478 | if (S390_lowcore.external_damage_code & (1U << ED_ETR_SWITCH)) | ||
479 | etr_switch_to_local(); | ||
480 | } | ||
481 | |||
473 | if (mci->se) | 482 | if (mci->se) |
474 | /* Storage error uncorrected */ | 483 | /* Storage error uncorrected */ |
475 | s390_handle_damage("received storage error uncorrected " | 484 | s390_handle_damage("received storage error uncorrected " |
@@ -508,7 +517,7 @@ static int | |||
508 | machine_check_init(void) | 517 | machine_check_init(void) |
509 | { | 518 | { |
510 | init_MUTEX_LOCKED(&m_sem); | 519 | init_MUTEX_LOCKED(&m_sem); |
511 | ctl_clear_bit(14, 25); /* disable external damage MCH */ | 520 | ctl_set_bit(14, 25); /* enable external damage MCH */ |
512 | ctl_set_bit(14, 27); /* enable system recovery MCH */ | 521 | ctl_set_bit(14, 27); /* enable system recovery MCH */ |
513 | #ifdef CONFIG_MACHCHK_WARNING | 522 | #ifdef CONFIG_MACHCHK_WARNING |
514 | ctl_set_bit(14, 24); /* enable warning MCH */ | 523 | ctl_set_bit(14, 24); /* enable warning MCH */ |
@@ -529,7 +538,11 @@ arch_initcall(machine_check_init); | |||
529 | static int __init | 538 | static int __init |
530 | machine_check_crw_init (void) | 539 | machine_check_crw_init (void) |
531 | { | 540 | { |
532 | kthread_run(s390_collect_crw_info, &m_sem, "kmcheck"); | 541 | struct task_struct *task; |
542 | |||
543 | task = kthread_run(s390_collect_crw_info, &m_sem, "kmcheck"); | ||
544 | if (IS_ERR(task)) | ||
545 | return PTR_ERR(task); | ||
533 | ctl_set_bit(14, 28); /* enable channel report MCH */ | 546 | ctl_set_bit(14, 28); /* enable channel report MCH */ |
534 | return 0; | 547 | return 0; |
535 | } | 548 | } |
diff --git a/drivers/s390/s390mach.h b/drivers/s390/s390mach.h index 7abb42a09ae2..d3ca4281a494 100644 --- a/drivers/s390/s390mach.h +++ b/drivers/s390/s390mach.h | |||
@@ -102,4 +102,7 @@ static inline int stcrw(struct crw *pcrw ) | |||
102 | return ccode; | 102 | return ccode; |
103 | } | 103 | } |
104 | 104 | ||
105 | #define ED_ETR_SYNC 12 /* External damage ETR sync check */ | ||
106 | #define ED_ETR_SWITCH 13 /* External damage ETR switch to local */ | ||
107 | |||
105 | #endif /* __s390mach */ | 108 | #endif /* __s390mach */ |
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index 85093b71f9fa..39a885266790 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c | |||
@@ -47,13 +47,12 @@ static int __init zfcp_module_init(void); | |||
47 | static void zfcp_ns_gid_pn_handler(unsigned long); | 47 | static void zfcp_ns_gid_pn_handler(unsigned long); |
48 | 48 | ||
49 | /* miscellaneous */ | 49 | /* miscellaneous */ |
50 | static inline int zfcp_sg_list_alloc(struct zfcp_sg_list *, size_t); | 50 | static int zfcp_sg_list_alloc(struct zfcp_sg_list *, size_t); |
51 | static inline void zfcp_sg_list_free(struct zfcp_sg_list *); | 51 | static void zfcp_sg_list_free(struct zfcp_sg_list *); |
52 | static inline int zfcp_sg_list_copy_from_user(struct zfcp_sg_list *, | 52 | static int zfcp_sg_list_copy_from_user(struct zfcp_sg_list *, |
53 | void __user *, size_t); | 53 | void __user *, size_t); |
54 | static inline int zfcp_sg_list_copy_to_user(void __user *, | 54 | static int zfcp_sg_list_copy_to_user(void __user *, |
55 | struct zfcp_sg_list *, size_t); | 55 | struct zfcp_sg_list *, size_t); |
56 | |||
57 | static long zfcp_cfdc_dev_ioctl(struct file *, unsigned int, unsigned long); | 56 | static long zfcp_cfdc_dev_ioctl(struct file *, unsigned int, unsigned long); |
58 | 57 | ||
59 | #define ZFCP_CFDC_IOC_MAGIC 0xDD | 58 | #define ZFCP_CFDC_IOC_MAGIC 0xDD |
@@ -605,7 +604,7 @@ zfcp_cfdc_dev_ioctl(struct file *file, unsigned int command, | |||
605 | * elements of the scatter-gather list. The maximum size of a single element | 604 | * elements of the scatter-gather list. The maximum size of a single element |
606 | * in the scatter-gather list is PAGE_SIZE. | 605 | * in the scatter-gather list is PAGE_SIZE. |
607 | */ | 606 | */ |
608 | static inline int | 607 | static int |
609 | zfcp_sg_list_alloc(struct zfcp_sg_list *sg_list, size_t size) | 608 | zfcp_sg_list_alloc(struct zfcp_sg_list *sg_list, size_t size) |
610 | { | 609 | { |
611 | struct scatterlist *sg; | 610 | struct scatterlist *sg; |
@@ -652,7 +651,7 @@ zfcp_sg_list_alloc(struct zfcp_sg_list *sg_list, size_t size) | |||
652 | * Memory for each element in the scatter-gather list is freed. | 651 | * Memory for each element in the scatter-gather list is freed. |
653 | * Finally sg_list->sg is freed itself and sg_list->count is reset. | 652 | * Finally sg_list->sg is freed itself and sg_list->count is reset. |
654 | */ | 653 | */ |
655 | static inline void | 654 | static void |
656 | zfcp_sg_list_free(struct zfcp_sg_list *sg_list) | 655 | zfcp_sg_list_free(struct zfcp_sg_list *sg_list) |
657 | { | 656 | { |
658 | struct scatterlist *sg; | 657 | struct scatterlist *sg; |
@@ -697,7 +696,7 @@ zfcp_sg_size(struct scatterlist *sg, unsigned int sg_count) | |||
697 | * @size: number of bytes to be copied | 696 | * @size: number of bytes to be copied |
698 | * Return: 0 on success, -EFAULT if copy_from_user fails. | 697 | * Return: 0 on success, -EFAULT if copy_from_user fails. |
699 | */ | 698 | */ |
700 | static inline int | 699 | static int |
701 | zfcp_sg_list_copy_from_user(struct zfcp_sg_list *sg_list, | 700 | zfcp_sg_list_copy_from_user(struct zfcp_sg_list *sg_list, |
702 | void __user *user_buffer, | 701 | void __user *user_buffer, |
703 | size_t size) | 702 | size_t size) |
@@ -735,7 +734,7 @@ zfcp_sg_list_copy_from_user(struct zfcp_sg_list *sg_list, | |||
735 | * @size: number of bytes to be copied | 734 | * @size: number of bytes to be copied |
736 | * Return: 0 on success, -EFAULT if copy_to_user fails | 735 | * Return: 0 on success, -EFAULT if copy_to_user fails |
737 | */ | 736 | */ |
738 | static inline int | 737 | static int |
739 | zfcp_sg_list_copy_to_user(void __user *user_buffer, | 738 | zfcp_sg_list_copy_to_user(void __user *user_buffer, |
740 | struct zfcp_sg_list *sg_list, | 739 | struct zfcp_sg_list *sg_list, |
741 | size_t size) | 740 | size_t size) |
@@ -1799,7 +1798,7 @@ static const struct zfcp_rc_entry zfcp_p_rjt_rc[] = { | |||
1799 | * @code: reason code | 1798 | * @code: reason code |
1800 | * @rc_table: table of reason codes and descriptions | 1799 | * @rc_table: table of reason codes and descriptions |
1801 | */ | 1800 | */ |
1802 | static inline const char * | 1801 | static const char * |
1803 | zfcp_rc_description(u8 code, const struct zfcp_rc_entry *rc_table) | 1802 | zfcp_rc_description(u8 code, const struct zfcp_rc_entry *rc_table) |
1804 | { | 1803 | { |
1805 | const char *descr = "unknown reason code"; | 1804 | const char *descr = "unknown reason code"; |
@@ -1847,7 +1846,7 @@ zfcp_check_ct_response(struct ct_hdr *rjt) | |||
1847 | * @rjt_par: reject parameter acc. to FC-PH/FC-FS | 1846 | * @rjt_par: reject parameter acc. to FC-PH/FC-FS |
1848 | * @rc_table: table of reason codes and descriptions | 1847 | * @rc_table: table of reason codes and descriptions |
1849 | */ | 1848 | */ |
1850 | static inline void | 1849 | static void |
1851 | zfcp_print_els_rjt(struct zfcp_ls_rjt_par *rjt_par, | 1850 | zfcp_print_els_rjt(struct zfcp_ls_rjt_par *rjt_par, |
1852 | const struct zfcp_rc_entry *rc_table) | 1851 | const struct zfcp_rc_entry *rc_table) |
1853 | { | 1852 | { |
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c index 0aa3b1ac76af..d8191d115c14 100644 --- a/drivers/s390/scsi/zfcp_dbf.c +++ b/drivers/s390/scsi/zfcp_dbf.c | |||
@@ -31,7 +31,7 @@ MODULE_PARM_DESC(dbfsize, | |||
31 | 31 | ||
32 | #define ZFCP_LOG_AREA ZFCP_LOG_AREA_OTHER | 32 | #define ZFCP_LOG_AREA ZFCP_LOG_AREA_OTHER |
33 | 33 | ||
34 | static inline int | 34 | static int |
35 | zfcp_dbf_stck(char *out_buf, const char *label, unsigned long long stck) | 35 | zfcp_dbf_stck(char *out_buf, const char *label, unsigned long long stck) |
36 | { | 36 | { |
37 | unsigned long long sec; | 37 | unsigned long long sec; |
@@ -106,7 +106,7 @@ zfcp_dbf_view_dump(char *out_buf, const char *label, | |||
106 | return len; | 106 | return len; |
107 | } | 107 | } |
108 | 108 | ||
109 | static inline int | 109 | static int |
110 | zfcp_dbf_view_header(debug_info_t * id, struct debug_view *view, int area, | 110 | zfcp_dbf_view_header(debug_info_t * id, struct debug_view *view, int area, |
111 | debug_entry_t * entry, char *out_buf) | 111 | debug_entry_t * entry, char *out_buf) |
112 | { | 112 | { |
@@ -130,7 +130,7 @@ zfcp_dbf_view_header(debug_info_t * id, struct debug_view *view, int area, | |||
130 | return len; | 130 | return len; |
131 | } | 131 | } |
132 | 132 | ||
133 | inline void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *fsf_req) | 133 | void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *fsf_req) |
134 | { | 134 | { |
135 | struct zfcp_adapter *adapter = fsf_req->adapter; | 135 | struct zfcp_adapter *adapter = fsf_req->adapter; |
136 | struct fsf_qtcb *qtcb = fsf_req->qtcb; | 136 | struct fsf_qtcb *qtcb = fsf_req->qtcb; |
@@ -241,7 +241,7 @@ inline void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *fsf_req) | |||
241 | spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags); | 241 | spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags); |
242 | } | 242 | } |
243 | 243 | ||
244 | inline void | 244 | void |
245 | zfcp_hba_dbf_event_fsf_unsol(const char *tag, struct zfcp_adapter *adapter, | 245 | zfcp_hba_dbf_event_fsf_unsol(const char *tag, struct zfcp_adapter *adapter, |
246 | struct fsf_status_read_buffer *status_buffer) | 246 | struct fsf_status_read_buffer *status_buffer) |
247 | { | 247 | { |
@@ -295,7 +295,7 @@ zfcp_hba_dbf_event_fsf_unsol(const char *tag, struct zfcp_adapter *adapter, | |||
295 | spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags); | 295 | spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags); |
296 | } | 296 | } |
297 | 297 | ||
298 | inline void | 298 | void |
299 | zfcp_hba_dbf_event_qdio(struct zfcp_adapter *adapter, unsigned int status, | 299 | zfcp_hba_dbf_event_qdio(struct zfcp_adapter *adapter, unsigned int status, |
300 | unsigned int qdio_error, unsigned int siga_error, | 300 | unsigned int qdio_error, unsigned int siga_error, |
301 | int sbal_index, int sbal_count) | 301 | int sbal_index, int sbal_count) |
@@ -316,7 +316,7 @@ zfcp_hba_dbf_event_qdio(struct zfcp_adapter *adapter, unsigned int status, | |||
316 | spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags); | 316 | spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags); |
317 | } | 317 | } |
318 | 318 | ||
319 | static inline int | 319 | static int |
320 | zfcp_hba_dbf_view_response(char *out_buf, | 320 | zfcp_hba_dbf_view_response(char *out_buf, |
321 | struct zfcp_hba_dbf_record_response *rec) | 321 | struct zfcp_hba_dbf_record_response *rec) |
322 | { | 322 | { |
@@ -403,7 +403,7 @@ zfcp_hba_dbf_view_response(char *out_buf, | |||
403 | return len; | 403 | return len; |
404 | } | 404 | } |
405 | 405 | ||
406 | static inline int | 406 | static int |
407 | zfcp_hba_dbf_view_status(char *out_buf, struct zfcp_hba_dbf_record_status *rec) | 407 | zfcp_hba_dbf_view_status(char *out_buf, struct zfcp_hba_dbf_record_status *rec) |
408 | { | 408 | { |
409 | int len = 0; | 409 | int len = 0; |
@@ -424,7 +424,7 @@ zfcp_hba_dbf_view_status(char *out_buf, struct zfcp_hba_dbf_record_status *rec) | |||
424 | return len; | 424 | return len; |
425 | } | 425 | } |
426 | 426 | ||
427 | static inline int | 427 | static int |
428 | zfcp_hba_dbf_view_qdio(char *out_buf, struct zfcp_hba_dbf_record_qdio *rec) | 428 | zfcp_hba_dbf_view_qdio(char *out_buf, struct zfcp_hba_dbf_record_qdio *rec) |
429 | { | 429 | { |
430 | int len = 0; | 430 | int len = 0; |
@@ -469,7 +469,7 @@ zfcp_hba_dbf_view_format(debug_info_t * id, struct debug_view *view, | |||
469 | return len; | 469 | return len; |
470 | } | 470 | } |
471 | 471 | ||
472 | struct debug_view zfcp_hba_dbf_view = { | 472 | static struct debug_view zfcp_hba_dbf_view = { |
473 | "structured", | 473 | "structured", |
474 | NULL, | 474 | NULL, |
475 | &zfcp_dbf_view_header, | 475 | &zfcp_dbf_view_header, |
@@ -478,7 +478,7 @@ struct debug_view zfcp_hba_dbf_view = { | |||
478 | NULL | 478 | NULL |
479 | }; | 479 | }; |
480 | 480 | ||
481 | inline void | 481 | void |
482 | _zfcp_san_dbf_event_common_ct(const char *tag, struct zfcp_fsf_req *fsf_req, | 482 | _zfcp_san_dbf_event_common_ct(const char *tag, struct zfcp_fsf_req *fsf_req, |
483 | u32 s_id, u32 d_id, void *buffer, int buflen) | 483 | u32 s_id, u32 d_id, void *buffer, int buflen) |
484 | { | 484 | { |
@@ -519,7 +519,7 @@ _zfcp_san_dbf_event_common_ct(const char *tag, struct zfcp_fsf_req *fsf_req, | |||
519 | spin_unlock_irqrestore(&adapter->san_dbf_lock, flags); | 519 | spin_unlock_irqrestore(&adapter->san_dbf_lock, flags); |
520 | } | 520 | } |
521 | 521 | ||
522 | inline void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *fsf_req) | 522 | void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *fsf_req) |
523 | { | 523 | { |
524 | struct zfcp_send_ct *ct = (struct zfcp_send_ct *)fsf_req->data; | 524 | struct zfcp_send_ct *ct = (struct zfcp_send_ct *)fsf_req->data; |
525 | struct zfcp_port *port = ct->port; | 525 | struct zfcp_port *port = ct->port; |
@@ -531,7 +531,7 @@ inline void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *fsf_req) | |||
531 | ct->req->length); | 531 | ct->req->length); |
532 | } | 532 | } |
533 | 533 | ||
534 | inline void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *fsf_req) | 534 | void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *fsf_req) |
535 | { | 535 | { |
536 | struct zfcp_send_ct *ct = (struct zfcp_send_ct *)fsf_req->data; | 536 | struct zfcp_send_ct *ct = (struct zfcp_send_ct *)fsf_req->data; |
537 | struct zfcp_port *port = ct->port; | 537 | struct zfcp_port *port = ct->port; |
@@ -543,7 +543,7 @@ inline void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *fsf_req) | |||
543 | ct->resp->length); | 543 | ct->resp->length); |
544 | } | 544 | } |
545 | 545 | ||
546 | static inline void | 546 | static void |
547 | _zfcp_san_dbf_event_common_els(const char *tag, int level, | 547 | _zfcp_san_dbf_event_common_els(const char *tag, int level, |
548 | struct zfcp_fsf_req *fsf_req, u32 s_id, | 548 | struct zfcp_fsf_req *fsf_req, u32 s_id, |
549 | u32 d_id, u8 ls_code, void *buffer, int buflen) | 549 | u32 d_id, u8 ls_code, void *buffer, int buflen) |
@@ -585,7 +585,7 @@ _zfcp_san_dbf_event_common_els(const char *tag, int level, | |||
585 | spin_unlock_irqrestore(&adapter->san_dbf_lock, flags); | 585 | spin_unlock_irqrestore(&adapter->san_dbf_lock, flags); |
586 | } | 586 | } |
587 | 587 | ||
588 | inline void zfcp_san_dbf_event_els_request(struct zfcp_fsf_req *fsf_req) | 588 | void zfcp_san_dbf_event_els_request(struct zfcp_fsf_req *fsf_req) |
589 | { | 589 | { |
590 | struct zfcp_send_els *els = (struct zfcp_send_els *)fsf_req->data; | 590 | struct zfcp_send_els *els = (struct zfcp_send_els *)fsf_req->data; |
591 | 591 | ||
@@ -597,7 +597,7 @@ inline void zfcp_san_dbf_event_els_request(struct zfcp_fsf_req *fsf_req) | |||
597 | els->req->length); | 597 | els->req->length); |
598 | } | 598 | } |
599 | 599 | ||
600 | inline void zfcp_san_dbf_event_els_response(struct zfcp_fsf_req *fsf_req) | 600 | void zfcp_san_dbf_event_els_response(struct zfcp_fsf_req *fsf_req) |
601 | { | 601 | { |
602 | struct zfcp_send_els *els = (struct zfcp_send_els *)fsf_req->data; | 602 | struct zfcp_send_els *els = (struct zfcp_send_els *)fsf_req->data; |
603 | 603 | ||
@@ -608,7 +608,7 @@ inline void zfcp_san_dbf_event_els_response(struct zfcp_fsf_req *fsf_req) | |||
608 | els->resp->length); | 608 | els->resp->length); |
609 | } | 609 | } |
610 | 610 | ||
611 | inline void zfcp_san_dbf_event_incoming_els(struct zfcp_fsf_req *fsf_req) | 611 | void zfcp_san_dbf_event_incoming_els(struct zfcp_fsf_req *fsf_req) |
612 | { | 612 | { |
613 | struct zfcp_adapter *adapter = fsf_req->adapter; | 613 | struct zfcp_adapter *adapter = fsf_req->adapter; |
614 | struct fsf_status_read_buffer *status_buffer = | 614 | struct fsf_status_read_buffer *status_buffer = |
@@ -693,7 +693,7 @@ zfcp_san_dbf_view_format(debug_info_t * id, struct debug_view *view, | |||
693 | return len; | 693 | return len; |
694 | } | 694 | } |
695 | 695 | ||
696 | struct debug_view zfcp_san_dbf_view = { | 696 | static struct debug_view zfcp_san_dbf_view = { |
697 | "structured", | 697 | "structured", |
698 | NULL, | 698 | NULL, |
699 | &zfcp_dbf_view_header, | 699 | &zfcp_dbf_view_header, |
@@ -702,7 +702,7 @@ struct debug_view zfcp_san_dbf_view = { | |||
702 | NULL | 702 | NULL |
703 | }; | 703 | }; |
704 | 704 | ||
705 | static inline void | 705 | static void |
706 | _zfcp_scsi_dbf_event_common(const char *tag, const char *tag2, int level, | 706 | _zfcp_scsi_dbf_event_common(const char *tag, const char *tag2, int level, |
707 | struct zfcp_adapter *adapter, | 707 | struct zfcp_adapter *adapter, |
708 | struct scsi_cmnd *scsi_cmnd, | 708 | struct scsi_cmnd *scsi_cmnd, |
@@ -786,7 +786,7 @@ _zfcp_scsi_dbf_event_common(const char *tag, const char *tag2, int level, | |||
786 | spin_unlock_irqrestore(&adapter->scsi_dbf_lock, flags); | 786 | spin_unlock_irqrestore(&adapter->scsi_dbf_lock, flags); |
787 | } | 787 | } |
788 | 788 | ||
789 | inline void | 789 | void |
790 | zfcp_scsi_dbf_event_result(const char *tag, int level, | 790 | zfcp_scsi_dbf_event_result(const char *tag, int level, |
791 | struct zfcp_adapter *adapter, | 791 | struct zfcp_adapter *adapter, |
792 | struct scsi_cmnd *scsi_cmnd, | 792 | struct scsi_cmnd *scsi_cmnd, |
@@ -796,7 +796,7 @@ zfcp_scsi_dbf_event_result(const char *tag, int level, | |||
796 | adapter, scsi_cmnd, fsf_req, 0); | 796 | adapter, scsi_cmnd, fsf_req, 0); |
797 | } | 797 | } |
798 | 798 | ||
799 | inline void | 799 | void |
800 | zfcp_scsi_dbf_event_abort(const char *tag, struct zfcp_adapter *adapter, | 800 | zfcp_scsi_dbf_event_abort(const char *tag, struct zfcp_adapter *adapter, |
801 | struct scsi_cmnd *scsi_cmnd, | 801 | struct scsi_cmnd *scsi_cmnd, |
802 | struct zfcp_fsf_req *new_fsf_req, | 802 | struct zfcp_fsf_req *new_fsf_req, |
@@ -806,7 +806,7 @@ zfcp_scsi_dbf_event_abort(const char *tag, struct zfcp_adapter *adapter, | |||
806 | adapter, scsi_cmnd, new_fsf_req, old_req_id); | 806 | adapter, scsi_cmnd, new_fsf_req, old_req_id); |
807 | } | 807 | } |
808 | 808 | ||
809 | inline void | 809 | void |
810 | zfcp_scsi_dbf_event_devreset(const char *tag, u8 flag, struct zfcp_unit *unit, | 810 | zfcp_scsi_dbf_event_devreset(const char *tag, u8 flag, struct zfcp_unit *unit, |
811 | struct scsi_cmnd *scsi_cmnd) | 811 | struct scsi_cmnd *scsi_cmnd) |
812 | { | 812 | { |
@@ -884,7 +884,7 @@ zfcp_scsi_dbf_view_format(debug_info_t * id, struct debug_view *view, | |||
884 | return len; | 884 | return len; |
885 | } | 885 | } |
886 | 886 | ||
887 | struct debug_view zfcp_scsi_dbf_view = { | 887 | static struct debug_view zfcp_scsi_dbf_view = { |
888 | "structured", | 888 | "structured", |
889 | NULL, | 889 | NULL, |
890 | &zfcp_dbf_view_header, | 890 | &zfcp_dbf_view_header, |
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c index 755b754dec60..421da1e7c0ea 100644 --- a/drivers/s390/scsi/zfcp_erp.c +++ b/drivers/s390/scsi/zfcp_erp.c | |||
@@ -200,7 +200,7 @@ void zfcp_fsf_start_timer(struct zfcp_fsf_req *fsf_req, unsigned long timeout) | |||
200 | * returns: 0 - initiated action successfully | 200 | * returns: 0 - initiated action successfully |
201 | * <0 - failed to initiate action | 201 | * <0 - failed to initiate action |
202 | */ | 202 | */ |
203 | int | 203 | static int |
204 | zfcp_erp_adapter_reopen_internal(struct zfcp_adapter *adapter, int clear_mask) | 204 | zfcp_erp_adapter_reopen_internal(struct zfcp_adapter *adapter, int clear_mask) |
205 | { | 205 | { |
206 | int retval; | 206 | int retval; |
@@ -295,7 +295,7 @@ zfcp_erp_unit_shutdown(struct zfcp_unit *unit, int clear_mask) | |||
295 | * zfcp_erp_adisc - send ADISC ELS command | 295 | * zfcp_erp_adisc - send ADISC ELS command |
296 | * @port: port structure | 296 | * @port: port structure |
297 | */ | 297 | */ |
298 | int | 298 | static int |
299 | zfcp_erp_adisc(struct zfcp_port *port) | 299 | zfcp_erp_adisc(struct zfcp_port *port) |
300 | { | 300 | { |
301 | struct zfcp_adapter *adapter = port->adapter; | 301 | struct zfcp_adapter *adapter = port->adapter; |
@@ -380,7 +380,7 @@ zfcp_erp_adisc(struct zfcp_port *port) | |||
380 | * | 380 | * |
381 | * If ADISC failed (LS_RJT or timed out) forced reopen of the port is triggered. | 381 | * If ADISC failed (LS_RJT or timed out) forced reopen of the port is triggered. |
382 | */ | 382 | */ |
383 | void | 383 | static void |
384 | zfcp_erp_adisc_handler(unsigned long data) | 384 | zfcp_erp_adisc_handler(unsigned long data) |
385 | { | 385 | { |
386 | struct zfcp_send_els *send_els; | 386 | struct zfcp_send_els *send_els; |
@@ -3135,7 +3135,6 @@ zfcp_erp_action_cleanup(int action, struct zfcp_adapter *adapter, | |||
3135 | break; | 3135 | break; |
3136 | case ZFCP_ERP_ACTION_REOPEN_ADAPTER: | 3136 | case ZFCP_ERP_ACTION_REOPEN_ADAPTER: |
3137 | if (result != ZFCP_ERP_SUCCEEDED) { | 3137 | if (result != ZFCP_ERP_SUCCEEDED) { |
3138 | struct zfcp_port *port; | ||
3139 | list_for_each_entry(port, &adapter->port_list_head, list) | 3138 | list_for_each_entry(port, &adapter->port_list_head, list) |
3140 | if (port->rport && | 3139 | if (port->rport && |
3141 | !atomic_test_mask(ZFCP_STATUS_PORT_WKA, | 3140 | !atomic_test_mask(ZFCP_STATUS_PORT_WKA, |
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h index 7ec8e352b1fe..01386ac688a2 100644 --- a/drivers/s390/scsi/zfcp_ext.h +++ b/drivers/s390/scsi/zfcp_ext.h | |||
@@ -119,8 +119,8 @@ extern int zfcp_adapter_scsi_register(struct zfcp_adapter *); | |||
119 | extern void zfcp_adapter_scsi_unregister(struct zfcp_adapter *); | 119 | extern void zfcp_adapter_scsi_unregister(struct zfcp_adapter *); |
120 | extern void zfcp_set_fcp_dl(struct fcp_cmnd_iu *, fcp_dl_t); | 120 | extern void zfcp_set_fcp_dl(struct fcp_cmnd_iu *, fcp_dl_t); |
121 | extern char *zfcp_get_fcp_rsp_info_ptr(struct fcp_rsp_iu *); | 121 | extern char *zfcp_get_fcp_rsp_info_ptr(struct fcp_rsp_iu *); |
122 | extern void set_host_byte(u32 *, char); | 122 | extern void set_host_byte(int *, char); |
123 | extern void set_driver_byte(u32 *, char); | 123 | extern void set_driver_byte(int *, char); |
124 | extern char *zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *); | 124 | extern char *zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *); |
125 | extern fcp_dl_t zfcp_get_fcp_dl(struct fcp_cmnd_iu *); | 125 | extern fcp_dl_t zfcp_get_fcp_dl(struct fcp_cmnd_iu *); |
126 | 126 | ||
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index eabf86bb13f5..ef16f7ca4bb1 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c | |||
@@ -4560,7 +4560,7 @@ zfcp_fsf_req_sbal_check(unsigned long *flags, | |||
4560 | /* | 4560 | /* |
4561 | * set qtcb pointer in fsf_req and initialize QTCB | 4561 | * set qtcb pointer in fsf_req and initialize QTCB |
4562 | */ | 4562 | */ |
4563 | static inline void | 4563 | static void |
4564 | zfcp_fsf_req_qtcb_init(struct zfcp_fsf_req *fsf_req) | 4564 | zfcp_fsf_req_qtcb_init(struct zfcp_fsf_req *fsf_req) |
4565 | { | 4565 | { |
4566 | if (likely(fsf_req->qtcb != NULL)) { | 4566 | if (likely(fsf_req->qtcb != NULL)) { |
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c index dbd9f48e863e..1e12a78e8edd 100644 --- a/drivers/s390/scsi/zfcp_qdio.c +++ b/drivers/s390/scsi/zfcp_qdio.c | |||
@@ -21,22 +21,22 @@ | |||
21 | 21 | ||
22 | #include "zfcp_ext.h" | 22 | #include "zfcp_ext.h" |
23 | 23 | ||
24 | static inline void zfcp_qdio_sbal_limit(struct zfcp_fsf_req *, int); | 24 | static void zfcp_qdio_sbal_limit(struct zfcp_fsf_req *, int); |
25 | static inline volatile struct qdio_buffer_element *zfcp_qdio_sbale_get | 25 | static inline volatile struct qdio_buffer_element *zfcp_qdio_sbale_get |
26 | (struct zfcp_qdio_queue *, int, int); | 26 | (struct zfcp_qdio_queue *, int, int); |
27 | static inline volatile struct qdio_buffer_element *zfcp_qdio_sbale_resp | 27 | static inline volatile struct qdio_buffer_element *zfcp_qdio_sbale_resp |
28 | (struct zfcp_fsf_req *, int, int); | 28 | (struct zfcp_fsf_req *, int, int); |
29 | static inline volatile struct qdio_buffer_element *zfcp_qdio_sbal_chain | 29 | static volatile struct qdio_buffer_element *zfcp_qdio_sbal_chain |
30 | (struct zfcp_fsf_req *, unsigned long); | 30 | (struct zfcp_fsf_req *, unsigned long); |
31 | static inline volatile struct qdio_buffer_element *zfcp_qdio_sbale_next | 31 | static volatile struct qdio_buffer_element *zfcp_qdio_sbale_next |
32 | (struct zfcp_fsf_req *, unsigned long); | 32 | (struct zfcp_fsf_req *, unsigned long); |
33 | static inline int zfcp_qdio_sbals_zero(struct zfcp_qdio_queue *, int, int); | 33 | static int zfcp_qdio_sbals_zero(struct zfcp_qdio_queue *, int, int); |
34 | static inline int zfcp_qdio_sbals_wipe(struct zfcp_fsf_req *); | 34 | static inline int zfcp_qdio_sbals_wipe(struct zfcp_fsf_req *); |
35 | static inline void zfcp_qdio_sbale_fill | 35 | static void zfcp_qdio_sbale_fill |
36 | (struct zfcp_fsf_req *, unsigned long, void *, int); | 36 | (struct zfcp_fsf_req *, unsigned long, void *, int); |
37 | static inline int zfcp_qdio_sbals_from_segment | 37 | static int zfcp_qdio_sbals_from_segment |
38 | (struct zfcp_fsf_req *, unsigned long, void *, unsigned long); | 38 | (struct zfcp_fsf_req *, unsigned long, void *, unsigned long); |
39 | static inline int zfcp_qdio_sbals_from_buffer | 39 | static int zfcp_qdio_sbals_from_buffer |
40 | (struct zfcp_fsf_req *, unsigned long, void *, unsigned long, int); | 40 | (struct zfcp_fsf_req *, unsigned long, void *, unsigned long, int); |
41 | 41 | ||
42 | static qdio_handler_t zfcp_qdio_request_handler; | 42 | static qdio_handler_t zfcp_qdio_request_handler; |
@@ -201,7 +201,7 @@ zfcp_qdio_allocate(struct zfcp_adapter *adapter) | |||
201 | * returns: error flag | 201 | * returns: error flag |
202 | * | 202 | * |
203 | */ | 203 | */ |
204 | static inline int | 204 | static int |
205 | zfcp_qdio_handler_error_check(struct zfcp_adapter *adapter, unsigned int status, | 205 | zfcp_qdio_handler_error_check(struct zfcp_adapter *adapter, unsigned int status, |
206 | unsigned int qdio_error, unsigned int siga_error, | 206 | unsigned int qdio_error, unsigned int siga_error, |
207 | int first_element, int elements_processed) | 207 | int first_element, int elements_processed) |
@@ -462,7 +462,7 @@ zfcp_qdio_sbale_get(struct zfcp_qdio_queue *queue, int sbal, int sbale) | |||
462 | * zfcp_qdio_sbale_req - return pointer to SBALE of request_queue for | 462 | * zfcp_qdio_sbale_req - return pointer to SBALE of request_queue for |
463 | * a struct zfcp_fsf_req | 463 | * a struct zfcp_fsf_req |
464 | */ | 464 | */ |
465 | inline volatile struct qdio_buffer_element * | 465 | volatile struct qdio_buffer_element * |
466 | zfcp_qdio_sbale_req(struct zfcp_fsf_req *fsf_req, int sbal, int sbale) | 466 | zfcp_qdio_sbale_req(struct zfcp_fsf_req *fsf_req, int sbal, int sbale) |
467 | { | 467 | { |
468 | return zfcp_qdio_sbale_get(&fsf_req->adapter->request_queue, | 468 | return zfcp_qdio_sbale_get(&fsf_req->adapter->request_queue, |
@@ -484,7 +484,7 @@ zfcp_qdio_sbale_resp(struct zfcp_fsf_req *fsf_req, int sbal, int sbale) | |||
484 | * zfcp_qdio_sbale_curr - return current SBALE on request_queue for | 484 | * zfcp_qdio_sbale_curr - return current SBALE on request_queue for |
485 | * a struct zfcp_fsf_req | 485 | * a struct zfcp_fsf_req |
486 | */ | 486 | */ |
487 | inline volatile struct qdio_buffer_element * | 487 | volatile struct qdio_buffer_element * |
488 | zfcp_qdio_sbale_curr(struct zfcp_fsf_req *fsf_req) | 488 | zfcp_qdio_sbale_curr(struct zfcp_fsf_req *fsf_req) |
489 | { | 489 | { |
490 | return zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, | 490 | return zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, |
@@ -499,7 +499,7 @@ zfcp_qdio_sbale_curr(struct zfcp_fsf_req *fsf_req) | |||
499 | * | 499 | * |
500 | * Note: We can assume at least one free SBAL in the request_queue when called. | 500 | * Note: We can assume at least one free SBAL in the request_queue when called. |
501 | */ | 501 | */ |
502 | static inline void | 502 | static void |
503 | zfcp_qdio_sbal_limit(struct zfcp_fsf_req *fsf_req, int max_sbals) | 503 | zfcp_qdio_sbal_limit(struct zfcp_fsf_req *fsf_req, int max_sbals) |
504 | { | 504 | { |
505 | int count = atomic_read(&fsf_req->adapter->request_queue.free_count); | 505 | int count = atomic_read(&fsf_req->adapter->request_queue.free_count); |
@@ -517,7 +517,7 @@ zfcp_qdio_sbal_limit(struct zfcp_fsf_req *fsf_req, int max_sbals) | |||
517 | * | 517 | * |
518 | * This function changes sbal_curr, sbale_curr, sbal_number of fsf_req. | 518 | * This function changes sbal_curr, sbale_curr, sbal_number of fsf_req. |
519 | */ | 519 | */ |
520 | static inline volatile struct qdio_buffer_element * | 520 | static volatile struct qdio_buffer_element * |
521 | zfcp_qdio_sbal_chain(struct zfcp_fsf_req *fsf_req, unsigned long sbtype) | 521 | zfcp_qdio_sbal_chain(struct zfcp_fsf_req *fsf_req, unsigned long sbtype) |
522 | { | 522 | { |
523 | volatile struct qdio_buffer_element *sbale; | 523 | volatile struct qdio_buffer_element *sbale; |
@@ -554,7 +554,7 @@ zfcp_qdio_sbal_chain(struct zfcp_fsf_req *fsf_req, unsigned long sbtype) | |||
554 | /** | 554 | /** |
555 | * zfcp_qdio_sbale_next - switch to next SBALE, chain SBALs if needed | 555 | * zfcp_qdio_sbale_next - switch to next SBALE, chain SBALs if needed |
556 | */ | 556 | */ |
557 | static inline volatile struct qdio_buffer_element * | 557 | static volatile struct qdio_buffer_element * |
558 | zfcp_qdio_sbale_next(struct zfcp_fsf_req *fsf_req, unsigned long sbtype) | 558 | zfcp_qdio_sbale_next(struct zfcp_fsf_req *fsf_req, unsigned long sbtype) |
559 | { | 559 | { |
560 | if (fsf_req->sbale_curr == ZFCP_LAST_SBALE_PER_SBAL) | 560 | if (fsf_req->sbale_curr == ZFCP_LAST_SBALE_PER_SBAL) |
@@ -569,7 +569,7 @@ zfcp_qdio_sbale_next(struct zfcp_fsf_req *fsf_req, unsigned long sbtype) | |||
569 | * zfcp_qdio_sbals_zero - initialize SBALs between first and last in queue | 569 | * zfcp_qdio_sbals_zero - initialize SBALs between first and last in queue |
570 | * with zero from | 570 | * with zero from |
571 | */ | 571 | */ |
572 | static inline int | 572 | static int |
573 | zfcp_qdio_sbals_zero(struct zfcp_qdio_queue *queue, int first, int last) | 573 | zfcp_qdio_sbals_zero(struct zfcp_qdio_queue *queue, int first, int last) |
574 | { | 574 | { |
575 | struct qdio_buffer **buf = queue->buffer; | 575 | struct qdio_buffer **buf = queue->buffer; |
@@ -603,7 +603,7 @@ zfcp_qdio_sbals_wipe(struct zfcp_fsf_req *fsf_req) | |||
603 | * zfcp_qdio_sbale_fill - set address and lenght in current SBALE | 603 | * zfcp_qdio_sbale_fill - set address and lenght in current SBALE |
604 | * on request_queue | 604 | * on request_queue |
605 | */ | 605 | */ |
606 | static inline void | 606 | static void |
607 | zfcp_qdio_sbale_fill(struct zfcp_fsf_req *fsf_req, unsigned long sbtype, | 607 | zfcp_qdio_sbale_fill(struct zfcp_fsf_req *fsf_req, unsigned long sbtype, |
608 | void *addr, int length) | 608 | void *addr, int length) |
609 | { | 609 | { |
@@ -624,7 +624,7 @@ zfcp_qdio_sbale_fill(struct zfcp_fsf_req *fsf_req, unsigned long sbtype, | |||
624 | * Alignment and length of the segment determine how many SBALEs are needed | 624 | * Alignment and length of the segment determine how many SBALEs are needed |
625 | * for the memory segment. | 625 | * for the memory segment. |
626 | */ | 626 | */ |
627 | static inline int | 627 | static int |
628 | zfcp_qdio_sbals_from_segment(struct zfcp_fsf_req *fsf_req, unsigned long sbtype, | 628 | zfcp_qdio_sbals_from_segment(struct zfcp_fsf_req *fsf_req, unsigned long sbtype, |
629 | void *start_addr, unsigned long total_length) | 629 | void *start_addr, unsigned long total_length) |
630 | { | 630 | { |
@@ -659,7 +659,7 @@ zfcp_qdio_sbals_from_segment(struct zfcp_fsf_req *fsf_req, unsigned long sbtype, | |||
659 | * @sg_count: number of elements in scatter-gather list | 659 | * @sg_count: number of elements in scatter-gather list |
660 | * @max_sbals: upper bound for number of SBALs to be used | 660 | * @max_sbals: upper bound for number of SBALs to be used |
661 | */ | 661 | */ |
662 | inline int | 662 | int |
663 | zfcp_qdio_sbals_from_sg(struct zfcp_fsf_req *fsf_req, unsigned long sbtype, | 663 | zfcp_qdio_sbals_from_sg(struct zfcp_fsf_req *fsf_req, unsigned long sbtype, |
664 | struct scatterlist *sg, int sg_count, int max_sbals) | 664 | struct scatterlist *sg, int sg_count, int max_sbals) |
665 | { | 665 | { |
@@ -707,7 +707,7 @@ out: | |||
707 | * @length: length of buffer | 707 | * @length: length of buffer |
708 | * @max_sbals: upper bound for number of SBALs to be used | 708 | * @max_sbals: upper bound for number of SBALs to be used |
709 | */ | 709 | */ |
710 | static inline int | 710 | static int |
711 | zfcp_qdio_sbals_from_buffer(struct zfcp_fsf_req *fsf_req, unsigned long sbtype, | 711 | zfcp_qdio_sbals_from_buffer(struct zfcp_fsf_req *fsf_req, unsigned long sbtype, |
712 | void *buffer, unsigned long length, int max_sbals) | 712 | void *buffer, unsigned long length, int max_sbals) |
713 | { | 713 | { |
@@ -728,7 +728,7 @@ zfcp_qdio_sbals_from_buffer(struct zfcp_fsf_req *fsf_req, unsigned long sbtype, | |||
728 | * @scsi_cmnd: either scatter-gather list or buffer contained herein is used | 728 | * @scsi_cmnd: either scatter-gather list or buffer contained herein is used |
729 | * to fill SBALs | 729 | * to fill SBALs |
730 | */ | 730 | */ |
731 | inline int | 731 | int |
732 | zfcp_qdio_sbals_from_scsicmnd(struct zfcp_fsf_req *fsf_req, | 732 | zfcp_qdio_sbals_from_scsicmnd(struct zfcp_fsf_req *fsf_req, |
733 | unsigned long sbtype, struct scsi_cmnd *scsi_cmnd) | 733 | unsigned long sbtype, struct scsi_cmnd *scsi_cmnd) |
734 | { | 734 | { |
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c index 452d96f92a14..99db02062c3b 100644 --- a/drivers/s390/scsi/zfcp_scsi.c +++ b/drivers/s390/scsi/zfcp_scsi.c | |||
@@ -90,7 +90,7 @@ zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *fcp_rsp_iu) | |||
90 | return fcp_sns_info_ptr; | 90 | return fcp_sns_info_ptr; |
91 | } | 91 | } |
92 | 92 | ||
93 | fcp_dl_t * | 93 | static fcp_dl_t * |
94 | zfcp_get_fcp_dl_ptr(struct fcp_cmnd_iu * fcp_cmd) | 94 | zfcp_get_fcp_dl_ptr(struct fcp_cmnd_iu * fcp_cmd) |
95 | { | 95 | { |
96 | int additional_length = fcp_cmd->add_fcp_cdb_length << 2; | 96 | int additional_length = fcp_cmd->add_fcp_cdb_length << 2; |
@@ -124,19 +124,19 @@ zfcp_set_fcp_dl(struct fcp_cmnd_iu *fcp_cmd, fcp_dl_t fcp_dl) | |||
124 | * regarding the specified byte | 124 | * regarding the specified byte |
125 | */ | 125 | */ |
126 | static inline void | 126 | static inline void |
127 | set_byte(u32 * result, char status, char pos) | 127 | set_byte(int *result, char status, char pos) |
128 | { | 128 | { |
129 | *result |= status << (pos * 8); | 129 | *result |= status << (pos * 8); |
130 | } | 130 | } |
131 | 131 | ||
132 | void | 132 | void |
133 | set_host_byte(u32 * result, char status) | 133 | set_host_byte(int *result, char status) |
134 | { | 134 | { |
135 | set_byte(result, status, 2); | 135 | set_byte(result, status, 2); |
136 | } | 136 | } |
137 | 137 | ||
138 | void | 138 | void |
139 | set_driver_byte(u32 * result, char status) | 139 | set_driver_byte(int *result, char status) |
140 | { | 140 | { |
141 | set_byte(result, status, 3); | 141 | set_byte(result, status, 3); |
142 | } | 142 | } |
@@ -280,7 +280,7 @@ out: | |||
280 | return retval; | 280 | return retval; |
281 | } | 281 | } |
282 | 282 | ||
283 | void | 283 | static void |
284 | zfcp_scsi_command_sync_handler(struct scsi_cmnd *scpnt) | 284 | zfcp_scsi_command_sync_handler(struct scsi_cmnd *scpnt) |
285 | { | 285 | { |
286 | struct completion *wait = (struct completion *) scpnt->SCp.ptr; | 286 | struct completion *wait = (struct completion *) scpnt->SCp.ptr; |
@@ -324,7 +324,7 @@ zfcp_scsi_command_sync(struct zfcp_unit *unit, struct scsi_cmnd *scpnt, | |||
324 | * returns: 0 - success, SCSI command enqueued | 324 | * returns: 0 - success, SCSI command enqueued |
325 | * !0 - failure | 325 | * !0 - failure |
326 | */ | 326 | */ |
327 | int | 327 | static int |
328 | zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt, | 328 | zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt, |
329 | void (*done) (struct scsi_cmnd *)) | 329 | void (*done) (struct scsi_cmnd *)) |
330 | { | 330 | { |
@@ -380,7 +380,7 @@ zfcp_unit_lookup(struct zfcp_adapter *adapter, int channel, unsigned int id, | |||
380 | * will handle late commands. (Usually, the normal completion of late | 380 | * will handle late commands. (Usually, the normal completion of late |
381 | * commands is ignored with respect to the running abort operation.) | 381 | * commands is ignored with respect to the running abort operation.) |
382 | */ | 382 | */ |
383 | int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt) | 383 | static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt) |
384 | { | 384 | { |
385 | struct Scsi_Host *scsi_host; | 385 | struct Scsi_Host *scsi_host; |
386 | struct zfcp_adapter *adapter; | 386 | struct zfcp_adapter *adapter; |
@@ -445,7 +445,7 @@ int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt) | |||
445 | return retval; | 445 | return retval; |
446 | } | 446 | } |
447 | 447 | ||
448 | int | 448 | static int |
449 | zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *scpnt) | 449 | zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *scpnt) |
450 | { | 450 | { |
451 | int retval; | 451 | int retval; |
@@ -541,7 +541,7 @@ zfcp_task_management_function(struct zfcp_unit *unit, u8 tm_flags, | |||
541 | /** | 541 | /** |
542 | * zfcp_scsi_eh_host_reset_handler - handler for host and bus reset | 542 | * zfcp_scsi_eh_host_reset_handler - handler for host and bus reset |
543 | */ | 543 | */ |
544 | int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt) | 544 | static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt) |
545 | { | 545 | { |
546 | struct zfcp_unit *unit; | 546 | struct zfcp_unit *unit; |
547 | struct zfcp_adapter *adapter; | 547 | struct zfcp_adapter *adapter; |
diff --git a/drivers/s390/sysinfo.c b/drivers/s390/sysinfo.c index 1e788e815ce7..090743d2f914 100644 --- a/drivers/s390/sysinfo.c +++ b/drivers/s390/sysinfo.c | |||
@@ -9,8 +9,14 @@ | |||
9 | #include <linux/mm.h> | 9 | #include <linux/mm.h> |
10 | #include <linux/proc_fs.h> | 10 | #include <linux/proc_fs.h> |
11 | #include <linux/init.h> | 11 | #include <linux/init.h> |
12 | #include <linux/delay.h> | ||
12 | #include <asm/ebcdic.h> | 13 | #include <asm/ebcdic.h> |
13 | 14 | ||
15 | /* Sigh, math-emu. Don't ask. */ | ||
16 | #include <asm/sfp-util.h> | ||
17 | #include <math-emu/soft-fp.h> | ||
18 | #include <math-emu/single.h> | ||
19 | |||
14 | struct sysinfo_1_1_1 { | 20 | struct sysinfo_1_1_1 { |
15 | char reserved_0[32]; | 21 | char reserved_0[32]; |
16 | char manufacturer[16]; | 22 | char manufacturer[16]; |
@@ -198,7 +204,7 @@ static int stsi_1_2_2(struct sysinfo_1_2_2 *info, char *page, int len) | |||
198 | * if the higher order 8 bits are not zero. Printing | 204 | * if the higher order 8 bits are not zero. Printing |
199 | * a floating point number in the kernel is a no-no, | 205 | * a floating point number in the kernel is a no-no, |
200 | * always print the number as 32 bit unsigned integer. | 206 | * always print the number as 32 bit unsigned integer. |
201 | * The user-space needs to know about the stange | 207 | * The user-space needs to know about the strange |
202 | * encoding of the alternate cpu capability. | 208 | * encoding of the alternate cpu capability. |
203 | */ | 209 | */ |
204 | len += sprintf(page + len, "Capability: %u %u\n", | 210 | len += sprintf(page + len, "Capability: %u %u\n", |
@@ -351,3 +357,58 @@ static __init int create_proc_sysinfo(void) | |||
351 | 357 | ||
352 | __initcall(create_proc_sysinfo); | 358 | __initcall(create_proc_sysinfo); |
353 | 359 | ||
360 | /* | ||
361 | * CPU capability might have changed. Therefore recalculate loops_per_jiffy. | ||
362 | */ | ||
363 | void s390_adjust_jiffies(void) | ||
364 | { | ||
365 | struct sysinfo_1_2_2 *info; | ||
366 | const unsigned int fmil = 0x4b189680; /* 1e7 as 32-bit float. */ | ||
367 | FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR); | ||
368 | FP_DECL_EX; | ||
369 | unsigned int capability; | ||
370 | |||
371 | info = (void *) get_zeroed_page(GFP_KERNEL); | ||
372 | if (!info) | ||
373 | return; | ||
374 | |||
375 | if (stsi(info, 1, 2, 2) != -ENOSYS) { | ||
376 | /* | ||
377 | * Major sigh. The cpu capability encoding is "special". | ||
378 | * If the first 9 bits of info->capability are 0 then it | ||
379 | * is a 32 bit unsigned integer in the range 0 .. 2^23. | ||
380 | * If the first 9 bits are != 0 then it is a 32 bit float. | ||
381 | * In addition a lower value indicates a proportionally | ||
382 | * higher cpu capacity. Bogomips are the other way round. | ||
383 | * To get to a halfway suitable number we divide 1e7 | ||
384 | * by the cpu capability number. Yes, that means a floating | ||
385 | * point division .. math-emu here we come :-) | ||
386 | */ | ||
387 | FP_UNPACK_SP(SA, &fmil); | ||
388 | if ((info->capability >> 23) == 0) | ||
389 | FP_FROM_INT_S(SB, info->capability, 32, int); | ||
390 | else | ||
391 | FP_UNPACK_SP(SB, &info->capability); | ||
392 | FP_DIV_S(SR, SA, SB); | ||
393 | FP_TO_INT_S(capability, SR, 32, 0); | ||
394 | } else | ||
395 | /* | ||
396 | * Really old machine without stsi block for basic | ||
397 | * cpu information. Report 42.0 bogomips. | ||
398 | */ | ||
399 | capability = 42; | ||
400 | loops_per_jiffy = capability * (500000/HZ); | ||
401 | free_page((unsigned long) info); | ||
402 | } | ||
403 | |||
404 | /* | ||
405 | * calibrate the delay loop | ||
406 | */ | ||
407 | void __init calibrate_delay(void) | ||
408 | { | ||
409 | s390_adjust_jiffies(); | ||
410 | /* Print the good old Bogomips line .. */ | ||
411 | printk(KERN_DEBUG "Calibrating delay loop (skipped)... " | ||
412 | "%lu.%02lu BogoMIPS preset\n", loops_per_jiffy/(500000/HZ), | ||
413 | (loops_per_jiffy/(5000/HZ)) % 100); | ||
414 | } | ||